Blame SOURCES/010-probe-pending.patch

01619e
From b0347f7b8e609420a7055d5fe537cc40ac0d1bb2 Mon Sep 17 00:00:00 2001
01619e
From: Ken Gaillot <kgaillot@redhat.com>
01619e
Date: Fri, 16 Jul 2021 11:08:05 -0500
01619e
Subject: [PATCH 1/3] Fix: scheduler: don't schedule probes of unmanaged
01619e
 resources on pending nodes
01619e
01619e
Previously, custom_action() would set an action's optional or runnable flag in
01619e
the same, exclusive if-else sequence. This means that if an action should be
01619e
optional *and* runnable, only one would be set. In particular, this meant that
01619e
if a resource is unmanaged *and* its allocated node is pending, any probe would
01619e
be set to optional, but not unrunnable, and the controller could wrongly
01619e
attempt the probe before the join completed.
01619e
01619e
Now, optional is checked separately.
01619e
---
01619e
 lib/pengine/utils.c | 22 ++++++++++++++--------
01619e
 1 file changed, 14 insertions(+), 8 deletions(-)
01619e
01619e
diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
01619e
index 5ef742e..965824b 100644
01619e
--- a/lib/pengine/utils.c
01619e
+++ b/lib/pengine/utils.c
01619e
@@ -541,6 +541,20 @@ custom_action(pe_resource_t * rsc, char *key, const char *task,
01619e
                                        FALSE, data_set);
01619e
         }
01619e
 
01619e
+        // Make the action optional if its resource is unmanaged
01619e
+        if (!pcmk_is_set(action->flags, pe_action_pseudo)
01619e
+            && (action->node != NULL)
01619e
+            && !pcmk_is_set(action->rsc->flags, pe_rsc_managed)
01619e
+            && (g_hash_table_lookup(action->meta,
01619e
+                                    XML_LRM_ATTR_INTERVAL_MS) == NULL)) {
01619e
+                pe_rsc_debug(rsc, "%s on %s is optional (%s is unmanaged)",
01619e
+                             action->uuid, action->node->details->uname,
01619e
+                             action->rsc->id);
01619e
+                pe__set_action_flags(action, pe_action_optional);
01619e
+                // We shouldn't clear runnable here because ... something
01619e
+        }
01619e
+
01619e
+        // Make the action runnable or unrunnable as appropriate
01619e
         if (pcmk_is_set(action->flags, pe_action_pseudo)) {
01619e
             /* leave untouched */
01619e
 
01619e
@@ -549,14 +563,6 @@ custom_action(pe_resource_t * rsc, char *key, const char *task,
01619e
                          action->uuid);
01619e
             pe__clear_action_flags(action, pe_action_runnable);
01619e
 
01619e
-        } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)
01619e
-                   && g_hash_table_lookup(action->meta,
01619e
-                                          XML_LRM_ATTR_INTERVAL_MS) == NULL) {
01619e
-            pe_rsc_debug(rsc, "%s on %s is optional (%s is unmanaged)",
01619e
-                         action->uuid, action->node->details->uname, rsc->id);
01619e
-            pe__set_action_flags(action, pe_action_optional);
01619e
-            //pe__clear_action_flags(action, pe_action_runnable);
01619e
-
01619e
         } else if (!pcmk_is_set(action->flags, pe_action_dc)
01619e
                    && !(action->node->details->online)
01619e
                    && (!pe__is_guest_node(action->node)
01619e
-- 
01619e
1.8.3.1
01619e
01619e
01619e
From 520303b90eb707f5b7a9afa9b106e4a38b90f0f9 Mon Sep 17 00:00:00 2001
01619e
From: Ken Gaillot <kgaillot@redhat.com>
01619e
Date: Wed, 14 Jul 2021 17:18:44 -0500
01619e
Subject: [PATCH 2/3] Test: scheduler: update existing tests for probe
01619e
 scheduling change
01619e
01619e
This is an improvement. Looking at bundle-probe-order-2 for example,
01619e
the bundle's first instance has this status to start:
01619e
01619e
    * Replica[0]
01619e
      * galera	(ocf::heartbeat:galera):	 Stopped (unmanaged)
01619e
      * galera-bundle-docker-0	(ocf::heartbeat:docker):	 Started centos2 (unmanaged)
01619e
      * galera-bundle-0	(ocf::pacemaker:remote):	 Started centos2 (unmanaged)
01619e
01619e
After the changes, we now schedule recurring monitors for
01619e
galera-bundle-docker-0 and galera-bundle-0 on centos2, and a probe of galera:0
01619e
on galera-bundle-0, all of which are possible.
01619e
---
01619e
 cts/scheduler/dot/bundle-probe-order-2.dot         |  3 ++
01619e
 cts/scheduler/dot/bundle-probe-order-3.dot         |  1 +
01619e
 cts/scheduler/exp/bundle-probe-order-2.exp         | 33 ++++++++++++++++++++--
01619e
 cts/scheduler/exp/bundle-probe-order-3.exp         | 21 ++++++++++----
01619e
 cts/scheduler/summary/bundle-probe-order-2.summary |  3 ++
01619e
 cts/scheduler/summary/bundle-probe-order-3.summary |  1 +
01619e
 6 files changed, 53 insertions(+), 9 deletions(-)
01619e
01619e
diff --git a/cts/scheduler/dot/bundle-probe-order-2.dot b/cts/scheduler/dot/bundle-probe-order-2.dot
01619e
index 0cce3fd..7706195 100644
01619e
--- a/cts/scheduler/dot/bundle-probe-order-2.dot
01619e
+++ b/cts/scheduler/dot/bundle-probe-order-2.dot
01619e
@@ -1,6 +1,9 @@
01619e
  digraph "g" {
01619e
+"galera-bundle-0_monitor_30000 centos2" [ style=bold color="green" fontcolor="black"]
01619e
+"galera-bundle-docker-0_monitor_60000 centos2" [ style=bold color="green" fontcolor="black"]
01619e
 "galera-bundle-docker-1_monitor_0 centos2" [ style=bold color="green" fontcolor="black"]
01619e
 "galera-bundle-docker-2_monitor_0 centos1" [ style=bold color="green" fontcolor="black"]
01619e
 "galera-bundle-docker-2_monitor_0 centos2" [ style=bold color="green" fontcolor="black"]
01619e
 "galera-bundle-docker-2_monitor_0 centos3" [ style=bold color="green" fontcolor="black"]
01619e
+"galera:0_monitor_0 galera-bundle-0" [ style=bold color="green" fontcolor="black"]
01619e
 }
01619e
diff --git a/cts/scheduler/dot/bundle-probe-order-3.dot b/cts/scheduler/dot/bundle-probe-order-3.dot
01619e
index a4b109f..53a384b 100644
01619e
--- a/cts/scheduler/dot/bundle-probe-order-3.dot
01619e
+++ b/cts/scheduler/dot/bundle-probe-order-3.dot
01619e
@@ -2,6 +2,7 @@
01619e
 "galera-bundle-0_monitor_0 centos1" [ style=bold color="green" fontcolor="black"]
01619e
 "galera-bundle-0_monitor_0 centos2" [ style=bold color="green" fontcolor="black"]
01619e
 "galera-bundle-0_monitor_0 centos3" [ style=bold color="green" fontcolor="black"]
01619e
+"galera-bundle-docker-0_monitor_60000 centos2" [ style=bold color="green" fontcolor="black"]
01619e
 "galera-bundle-docker-1_monitor_0 centos2" [ style=bold color="green" fontcolor="black"]
01619e
 "galera-bundle-docker-2_monitor_0 centos1" [ style=bold color="green" fontcolor="black"]
01619e
 "galera-bundle-docker-2_monitor_0 centos2" [ style=bold color="green" fontcolor="black"]
01619e
diff --git a/cts/scheduler/exp/bundle-probe-order-2.exp b/cts/scheduler/exp/bundle-probe-order-2.exp
01619e
index d6174e7..5b28050 100644
01619e
--- a/cts/scheduler/exp/bundle-probe-order-2.exp
01619e
+++ b/cts/scheduler/exp/bundle-probe-order-2.exp
01619e
@@ -1,6 +1,33 @@
01619e
 <transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY"  transition_id="0">
01619e
   <synapse id="0">
01619e
     <action_set>
01619e
+      <rsc_op id="14" operation="monitor" operation_key="galera:0_monitor_0" on_node="galera-bundle-0" on_node_uuid="galera-bundle-0" router_node="centos2">
01619e
+        <primitive id="galera" long-id="galera:0" class="ocf" provider="heartbeat" type="galera"/>
01619e
+        <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="3" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="galera-bundle-0" CRM_meta_on_node_uuid="galera-bundle-0" CRM_meta_op_target_rc="7" CRM_meta_physical_host="centos2" CRM_meta_promoted_max="3" CRM_meta_promoted_node_max="1" CRM_meta_timeout="30000" cluster_host_map="centos1:centos1;centos2:centos2;centos3:centos3"  enable_creation="true" wsrep_cluster_address="gcomm://centos1,centos2,centos3"/>
01619e
+      </rsc_op>
01619e
+    </action_set>
01619e
+    <inputs/>
01619e
+  </synapse>
01619e
+  <synapse id="1">
01619e
+    <action_set>
01619e
+      <rsc_op id="16" operation="monitor" operation_key="galera-bundle-docker-0_monitor_60000" on_node="centos2" on_node_uuid="2">
01619e
+        <primitive id="galera-bundle-docker-0" class="ocf" provider="heartbeat" type="docker"/>
01619e
+        <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="centos2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" allow_pull="true"  force_kill="false" image="docker.io/tripleoupstream/centos-binary-mariadb:latest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/galera-bundle-0" reuse="false" run_cmd="/usr/sbin/pacemaker_remoted" run_opts=" --restart=no -e PCMK_stderr=1 --net=host -e PCMK_remote_port=3123 -v /foo:/etc/libqb/force-filesystem-sockets:ro -v /etc/my.cnf.d/galera.cnf:/etc/my.cnf.d/galera.cnf:ro -v /var/lib/mysql:/var/lib/mysql:rw -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/galera-bundle-0:/var/log --user=root --log-driver=journald "/>
01619e
+      </rsc_op>
01619e
+    </action_set>
01619e
+    <inputs/>
01619e
+  </synapse>
01619e
+  <synapse id="2">
01619e
+    <action_set>
01619e
+      <rsc_op id="18" operation="monitor" operation_key="galera-bundle-0_monitor_30000" on_node="centos2" on_node_uuid="2">
01619e
+        <primitive id="galera-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
01619e
+        <attributes CRM_meta_container="galera-bundle-docker-0" CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_on_node="centos2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="30000" addr="centos2"  port="3123"/>
01619e
+      </rsc_op>
01619e
+    </action_set>
01619e
+    <inputs/>
01619e
+  </synapse>
01619e
+  <synapse id="3">
01619e
+    <action_set>
01619e
       <rsc_op id="7" operation="monitor" operation_key="galera-bundle-docker-1_monitor_0" on_node="centos2" on_node_uuid="2">
01619e
         <primitive id="galera-bundle-docker-1" class="ocf" provider="heartbeat" type="docker"/>
01619e
         <attributes CRM_meta_on_node="centos2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" allow_pull="true"  force_kill="false" image="docker.io/tripleoupstream/centos-binary-mariadb:latest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/galera-bundle-1" reuse="false" run_cmd="/usr/sbin/pacemaker_remoted" run_opts=" --restart=no -e PCMK_stderr=1 --net=host -e PCMK_remote_port=3123 -v /foo:/etc/libqb/force-filesystem-sockets:ro -v /etc/my.cnf.d/galera.cnf:/etc/my.cnf.d/galera.cnf:ro -v /var/lib/mysql:/var/lib/mysql:rw -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/galera-bundle-1:/var/log --user=root --log-driver=journald "/>
01619e
@@ -8,7 +35,7 @@
01619e
     </action_set>
01619e
     <inputs/>
01619e
   </synapse>
01619e
-  <synapse id="1">
01619e
+  <synapse id="4">
01619e
     <action_set>
01619e
       <rsc_op id="12" operation="monitor" operation_key="galera-bundle-docker-2_monitor_0" on_node="centos3" on_node_uuid="3">
01619e
         <primitive id="galera-bundle-docker-2" class="ocf" provider="heartbeat" type="docker"/>
01619e
@@ -17,7 +44,7 @@
01619e
     </action_set>
01619e
     <inputs/>
01619e
   </synapse>
01619e
-  <synapse id="2">
01619e
+  <synapse id="5">
01619e
     <action_set>
01619e
       <rsc_op id="9" operation="monitor" operation_key="galera-bundle-docker-2_monitor_0" on_node="centos2" on_node_uuid="2">
01619e
         <primitive id="galera-bundle-docker-2" class="ocf" provider="heartbeat" type="docker"/>
01619e
@@ -26,7 +53,7 @@
01619e
     </action_set>
01619e
     <inputs/>
01619e
   </synapse>
01619e
-  <synapse id="3">
01619e
+  <synapse id="6">
01619e
     <action_set>
01619e
       <rsc_op id="5" operation="monitor" operation_key="galera-bundle-docker-2_monitor_0" on_node="centos1" on_node_uuid="1">
01619e
         <primitive id="galera-bundle-docker-2" class="ocf" provider="heartbeat" type="docker"/>
01619e
diff --git a/cts/scheduler/exp/bundle-probe-order-3.exp b/cts/scheduler/exp/bundle-probe-order-3.exp
01619e
index e1f60e7..69140a4 100644
01619e
--- a/cts/scheduler/exp/bundle-probe-order-3.exp
01619e
+++ b/cts/scheduler/exp/bundle-probe-order-3.exp
01619e
@@ -1,6 +1,15 @@
01619e
 <transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY"  transition_id="0">
01619e
   <synapse id="0">
01619e
     <action_set>
01619e
+      <rsc_op id="16" operation="monitor" operation_key="galera-bundle-docker-0_monitor_60000" on_node="centos2" on_node_uuid="2">
01619e
+        <primitive id="galera-bundle-docker-0" class="ocf" provider="heartbeat" type="docker"/>
01619e
+        <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="centos2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" allow_pull="true"  force_kill="false" image="docker.io/tripleoupstream/centos-binary-mariadb:latest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/galera-bundle-0" reuse="false" run_cmd="/usr/sbin/pacemaker_remoted" run_opts=" --restart=no -e PCMK_stderr=1 --net=host -e PCMK_remote_port=3123 -v /foo:/etc/libqb/force-filesystem-sockets:ro -v /etc/my.cnf.d/galera.cnf:/etc/my.cnf.d/galera.cnf:ro -v /var/lib/mysql:/var/lib/mysql:rw -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/galera-bundle-0:/var/log --user=root --log-driver=journald "/>
01619e
+      </rsc_op>
01619e
+    </action_set>
01619e
+    <inputs/>
01619e
+  </synapse>
01619e
+  <synapse id="1">
01619e
+    <action_set>
01619e
       <rsc_op id="11" operation="monitor" operation_key="galera-bundle-0_monitor_0" on_node="centos3" on_node_uuid="3">
01619e
         <primitive id="galera-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
01619e
         <attributes CRM_meta_container="galera-bundle-docker-0" CRM_meta_on_node="centos3" CRM_meta_on_node_uuid="3" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="centos2"  port="3123"/>
01619e
@@ -8,7 +17,7 @@
01619e
     </action_set>
01619e
     <inputs/>
01619e
   </synapse>
01619e
-  <synapse id="1">
01619e
+  <synapse id="2">
01619e
     <action_set>
01619e
       <rsc_op id="6" operation="monitor" operation_key="galera-bundle-0_monitor_0" on_node="centos2" on_node_uuid="2">
01619e
         <primitive id="galera-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
01619e
@@ -17,7 +26,7 @@
01619e
     </action_set>
01619e
     <inputs/>
01619e
   </synapse>
01619e
-  <synapse id="2">
01619e
+  <synapse id="3">
01619e
     <action_set>
01619e
       <rsc_op id="3" operation="monitor" operation_key="galera-bundle-0_monitor_0" on_node="centos1" on_node_uuid="1">
01619e
         <primitive id="galera-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
01619e
@@ -26,7 +35,7 @@
01619e
     </action_set>
01619e
     <inputs/>
01619e
   </synapse>
01619e
-  <synapse id="3">
01619e
+  <synapse id="4">
01619e
     <action_set>
01619e
       <rsc_op id="7" operation="monitor" operation_key="galera-bundle-docker-1_monitor_0" on_node="centos2" on_node_uuid="2">
01619e
         <primitive id="galera-bundle-docker-1" class="ocf" provider="heartbeat" type="docker"/>
01619e
@@ -35,7 +44,7 @@
01619e
     </action_set>
01619e
     <inputs/>
01619e
   </synapse>
01619e
-  <synapse id="4">
01619e
+  <synapse id="5">
01619e
     <action_set>
01619e
       <rsc_op id="13" operation="monitor" operation_key="galera-bundle-docker-2_monitor_0" on_node="centos3" on_node_uuid="3">
01619e
         <primitive id="galera-bundle-docker-2" class="ocf" provider="heartbeat" type="docker"/>
01619e
@@ -44,7 +53,7 @@
01619e
     </action_set>
01619e
     <inputs/>
01619e
   </synapse>
01619e
-  <synapse id="5">
01619e
+  <synapse id="6">
01619e
     <action_set>
01619e
       <rsc_op id="9" operation="monitor" operation_key="galera-bundle-docker-2_monitor_0" on_node="centos2" on_node_uuid="2">
01619e
         <primitive id="galera-bundle-docker-2" class="ocf" provider="heartbeat" type="docker"/>
01619e
@@ -53,7 +62,7 @@
01619e
     </action_set>
01619e
     <inputs/>
01619e
   </synapse>
01619e
-  <synapse id="6">
01619e
+  <synapse id="7">
01619e
     <action_set>
01619e
       <rsc_op id="4" operation="monitor" operation_key="galera-bundle-docker-2_monitor_0" on_node="centos1" on_node_uuid="1">
01619e
         <primitive id="galera-bundle-docker-2" class="ocf" provider="heartbeat" type="docker"/>
01619e
diff --git a/cts/scheduler/summary/bundle-probe-order-2.summary b/cts/scheduler/summary/bundle-probe-order-2.summary
01619e
index 681d607..024c472 100644
01619e
--- a/cts/scheduler/summary/bundle-probe-order-2.summary
01619e
+++ b/cts/scheduler/summary/bundle-probe-order-2.summary
01619e
@@ -13,6 +13,9 @@ Current cluster status:
01619e
 Transition Summary:
01619e
 
01619e
 Executing Cluster Transition:
01619e
+  * Resource action: galera:0        monitor on galera-bundle-0
01619e
+  * Resource action: galera-bundle-docker-0 monitor=60000 on centos2
01619e
+  * Resource action: galera-bundle-0 monitor=30000 on centos2
01619e
   * Resource action: galera-bundle-docker-1 monitor on centos2
01619e
   * Resource action: galera-bundle-docker-2 monitor on centos3
01619e
   * Resource action: galera-bundle-docker-2 monitor on centos2
01619e
diff --git a/cts/scheduler/summary/bundle-probe-order-3.summary b/cts/scheduler/summary/bundle-probe-order-3.summary
01619e
index f089618..331bd87 100644
01619e
--- a/cts/scheduler/summary/bundle-probe-order-3.summary
01619e
+++ b/cts/scheduler/summary/bundle-probe-order-3.summary
01619e
@@ -12,6 +12,7 @@ Current cluster status:
01619e
 Transition Summary:
01619e
 
01619e
 Executing Cluster Transition:
01619e
+  * Resource action: galera-bundle-docker-0 monitor=60000 on centos2
01619e
   * Resource action: galera-bundle-0 monitor on centos3
01619e
   * Resource action: galera-bundle-0 monitor on centos2
01619e
   * Resource action: galera-bundle-0 monitor on centos1
01619e
-- 
01619e
1.8.3.1
01619e
01619e
01619e
From cb9c294a7ef22916866e0e42e51e88c2b1a61c2e Mon Sep 17 00:00:00 2001
01619e
From: Ken Gaillot <kgaillot@redhat.com>
01619e
Date: Wed, 14 Jul 2021 17:23:11 -0500
01619e
Subject: [PATCH 3/3] Test: scheduler: add test for probe of unmanaged resource
01619e
 on pending node
01619e
01619e
No probes should be scheduled in this case
01619e
---
01619e
 cts/cts-scheduler.in                             |   1 +
01619e
 cts/scheduler/dot/probe-pending-node.dot         |   2 +
01619e
 cts/scheduler/exp/probe-pending-node.exp         |   1 +
01619e
 cts/scheduler/scores/probe-pending-node.scores   |  61 ++++++
01619e
 cts/scheduler/summary/probe-pending-node.summary |  55 +++++
01619e
 cts/scheduler/xml/probe-pending-node.xml         | 247 +++++++++++++++++++++++
01619e
 6 files changed, 367 insertions(+)
01619e
 create mode 100644 cts/scheduler/dot/probe-pending-node.dot
01619e
 create mode 100644 cts/scheduler/exp/probe-pending-node.exp
01619e
 create mode 100644 cts/scheduler/scores/probe-pending-node.scores
01619e
 create mode 100644 cts/scheduler/summary/probe-pending-node.summary
01619e
 create mode 100644 cts/scheduler/xml/probe-pending-node.xml
01619e
01619e
diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
01619e
index fc9790b..7ba2415 100644
01619e
--- a/cts/cts-scheduler.in
01619e
+++ b/cts/cts-scheduler.in
01619e
@@ -110,6 +110,7 @@ TESTS = [
01619e
         [ "probe-2", "Correctly re-probe cloned groups" ],
01619e
         [ "probe-3", "Probe (pending node)" ],
01619e
         [ "probe-4", "Probe (pending node + stopped resource)" ],
01619e
+        [ "probe-pending-node", "Probe (pending node + unmanaged resource)" ],
01619e
         [ "standby", "Standby" ],
01619e
         [ "comments", "Comments" ],
01619e
     ],
01619e
diff --git a/cts/scheduler/dot/probe-pending-node.dot b/cts/scheduler/dot/probe-pending-node.dot
01619e
new file mode 100644
01619e
index 0000000..d8f1c9f
01619e
--- /dev/null
01619e
+++ b/cts/scheduler/dot/probe-pending-node.dot
01619e
@@ -0,0 +1,2 @@
01619e
+ digraph "g" {
01619e
+}
01619e
diff --git a/cts/scheduler/exp/probe-pending-node.exp b/cts/scheduler/exp/probe-pending-node.exp
01619e
new file mode 100644
01619e
index 0000000..56e315f
01619e
--- /dev/null
01619e
+++ b/cts/scheduler/exp/probe-pending-node.exp
01619e
@@ -0,0 +1 @@
01619e
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY"  transition_id="0"/>
01619e
diff --git a/cts/scheduler/scores/probe-pending-node.scores b/cts/scheduler/scores/probe-pending-node.scores
01619e
new file mode 100644
01619e
index 0000000..020a1a0
01619e
--- /dev/null
01619e
+++ b/cts/scheduler/scores/probe-pending-node.scores
01619e
@@ -0,0 +1,61 @@
01619e
+
01619e
+pcmk__clone_allocate: fs_UC5_SAPMNT-clone allocation score on gcdoubwap01: 0
01619e
+pcmk__clone_allocate: fs_UC5_SAPMNT-clone allocation score on gcdoubwap02: 0
01619e
+pcmk__clone_allocate: fs_UC5_SAPMNT:0 allocation score on gcdoubwap01: 0
01619e
+pcmk__clone_allocate: fs_UC5_SAPMNT:0 allocation score on gcdoubwap02: 0
01619e
+pcmk__clone_allocate: fs_UC5_SAPMNT:1 allocation score on gcdoubwap01: 0
01619e
+pcmk__clone_allocate: fs_UC5_SAPMNT:1 allocation score on gcdoubwap02: 0
01619e
+pcmk__clone_allocate: fs_UC5_SYS-clone allocation score on gcdoubwap01: 0
01619e
+pcmk__clone_allocate: fs_UC5_SYS-clone allocation score on gcdoubwap02: 0
01619e
+pcmk__clone_allocate: fs_UC5_SYS:0 allocation score on gcdoubwap01: 0
01619e
+pcmk__clone_allocate: fs_UC5_SYS:0 allocation score on gcdoubwap02: 0
01619e
+pcmk__clone_allocate: fs_UC5_SYS:1 allocation score on gcdoubwap01: 0
01619e
+pcmk__clone_allocate: fs_UC5_SYS:1 allocation score on gcdoubwap02: 0
01619e
+pcmk__group_allocate: fs_UC5_ascs allocation score on gcdoubwap01: 0
01619e
+pcmk__group_allocate: fs_UC5_ascs allocation score on gcdoubwap02: 0
01619e
+pcmk__group_allocate: fs_UC5_ers allocation score on gcdoubwap01: 0
01619e
+pcmk__group_allocate: fs_UC5_ers allocation score on gcdoubwap02: 0
01619e
+pcmk__group_allocate: grp_UC5_ascs allocation score on gcdoubwap01: 0
01619e
+pcmk__group_allocate: grp_UC5_ascs allocation score on gcdoubwap02: 0
01619e
+pcmk__group_allocate: grp_UC5_ers allocation score on gcdoubwap01: 0
01619e
+pcmk__group_allocate: grp_UC5_ers allocation score on gcdoubwap02: 0
01619e
+pcmk__group_allocate: rsc_sap_UC5_ASCS11 allocation score on gcdoubwap01: 0
01619e
+pcmk__group_allocate: rsc_sap_UC5_ASCS11 allocation score on gcdoubwap02: 0
01619e
+pcmk__group_allocate: rsc_sap_UC5_ERS12 allocation score on gcdoubwap01: 0
01619e
+pcmk__group_allocate: rsc_sap_UC5_ERS12 allocation score on gcdoubwap02: 0
01619e
+pcmk__group_allocate: rsc_vip_gcp_ascs allocation score on gcdoubwap01: INFINITY
01619e
+pcmk__group_allocate: rsc_vip_gcp_ascs allocation score on gcdoubwap02: 0
01619e
+pcmk__group_allocate: rsc_vip_gcp_ers allocation score on gcdoubwap01: 0
01619e
+pcmk__group_allocate: rsc_vip_gcp_ers allocation score on gcdoubwap02: 0
01619e
+pcmk__group_allocate: rsc_vip_init_ers allocation score on gcdoubwap01: 0
01619e
+pcmk__group_allocate: rsc_vip_init_ers allocation score on gcdoubwap02: 0
01619e
+pcmk__group_allocate: rsc_vip_int_ascs allocation score on gcdoubwap01: 0
01619e
+pcmk__group_allocate: rsc_vip_int_ascs allocation score on gcdoubwap02: 0
01619e
+pcmk__native_allocate: fs_UC5_SAPMNT:0 allocation score on gcdoubwap01: 0
01619e
+pcmk__native_allocate: fs_UC5_SAPMNT:0 allocation score on gcdoubwap02: -INFINITY
01619e
+pcmk__native_allocate: fs_UC5_SAPMNT:1 allocation score on gcdoubwap01: 0
01619e
+pcmk__native_allocate: fs_UC5_SAPMNT:1 allocation score on gcdoubwap02: -INFINITY
01619e
+pcmk__native_allocate: fs_UC5_SYS:0 allocation score on gcdoubwap01: 0
01619e
+pcmk__native_allocate: fs_UC5_SYS:0 allocation score on gcdoubwap02: -INFINITY
01619e
+pcmk__native_allocate: fs_UC5_SYS:1 allocation score on gcdoubwap01: 0
01619e
+pcmk__native_allocate: fs_UC5_SYS:1 allocation score on gcdoubwap02: -INFINITY
01619e
+pcmk__native_allocate: fs_UC5_ascs allocation score on gcdoubwap01: 0
01619e
+pcmk__native_allocate: fs_UC5_ascs allocation score on gcdoubwap02: -INFINITY
01619e
+pcmk__native_allocate: fs_UC5_ers allocation score on gcdoubwap01: -INFINITY
01619e
+pcmk__native_allocate: fs_UC5_ers allocation score on gcdoubwap02: -INFINITY
01619e
+pcmk__native_allocate: rsc_sap_UC5_ASCS11 allocation score on gcdoubwap01: -INFINITY
01619e
+pcmk__native_allocate: rsc_sap_UC5_ASCS11 allocation score on gcdoubwap02: -INFINITY
01619e
+pcmk__native_allocate: rsc_sap_UC5_ERS12 allocation score on gcdoubwap01: -INFINITY
01619e
+pcmk__native_allocate: rsc_sap_UC5_ERS12 allocation score on gcdoubwap02: -INFINITY
01619e
+pcmk__native_allocate: rsc_vip_gcp_ascs allocation score on gcdoubwap01: -INFINITY
01619e
+pcmk__native_allocate: rsc_vip_gcp_ascs allocation score on gcdoubwap02: -INFINITY
01619e
+pcmk__native_allocate: rsc_vip_gcp_ers allocation score on gcdoubwap01: -INFINITY
01619e
+pcmk__native_allocate: rsc_vip_gcp_ers allocation score on gcdoubwap02: -INFINITY
01619e
+pcmk__native_allocate: rsc_vip_init_ers allocation score on gcdoubwap01: 0
01619e
+pcmk__native_allocate: rsc_vip_init_ers allocation score on gcdoubwap02: -INFINITY
01619e
+pcmk__native_allocate: rsc_vip_int_ascs allocation score on gcdoubwap01: INFINITY
01619e
+pcmk__native_allocate: rsc_vip_int_ascs allocation score on gcdoubwap02: -INFINITY
01619e
+pcmk__native_allocate: stonith_gcdoubwap01 allocation score on gcdoubwap01: -INFINITY
01619e
+pcmk__native_allocate: stonith_gcdoubwap01 allocation score on gcdoubwap02: 0
01619e
+pcmk__native_allocate: stonith_gcdoubwap02 allocation score on gcdoubwap01: 0
01619e
+pcmk__native_allocate: stonith_gcdoubwap02 allocation score on gcdoubwap02: -INFINITY
01619e
diff --git a/cts/scheduler/summary/probe-pending-node.summary b/cts/scheduler/summary/probe-pending-node.summary
01619e
new file mode 100644
01619e
index 0000000..208186b
01619e
--- /dev/null
01619e
+++ b/cts/scheduler/summary/probe-pending-node.summary
01619e
@@ -0,0 +1,55 @@
01619e
+Using the original execution date of: 2021-06-11 13:55:24Z
01619e
+
01619e
+              *** Resource management is DISABLED ***
01619e
+  The cluster will not attempt to start, stop or recover services
01619e
+
01619e
+Current cluster status:
01619e
+  * Node List:
01619e
+    * Node gcdoubwap02: pending
01619e
+    * Online: [ gcdoubwap01 ]
01619e
+
01619e
+  * Full List of Resources:
01619e
+    * stonith_gcdoubwap01	(stonith:fence_gce):	 Stopped (unmanaged)
01619e
+    * stonith_gcdoubwap02	(stonith:fence_gce):	 Stopped (unmanaged)
01619e
+    * Clone Set: fs_UC5_SAPMNT-clone [fs_UC5_SAPMNT] (unmanaged):
01619e
+      * Stopped: [ gcdoubwap01 gcdoubwap02 ]
01619e
+    * Clone Set: fs_UC5_SYS-clone [fs_UC5_SYS] (unmanaged):
01619e
+      * Stopped: [ gcdoubwap01 gcdoubwap02 ]
01619e
+    * Resource Group: grp_UC5_ascs (unmanaged):
01619e
+      * rsc_vip_int_ascs	(ocf:heartbeat:IPaddr2):	 Stopped (unmanaged)
01619e
+      * rsc_vip_gcp_ascs	(ocf:heartbeat:gcp-vpc-move-vip):	 Started gcdoubwap01 (unmanaged)
01619e
+      * fs_UC5_ascs	(ocf:heartbeat:Filesystem):	 Stopped (unmanaged)
01619e
+      * rsc_sap_UC5_ASCS11	(ocf:heartbeat:SAPInstance):	 Stopped (unmanaged)
01619e
+    * Resource Group: grp_UC5_ers (unmanaged):
01619e
+      * rsc_vip_init_ers	(ocf:heartbeat:IPaddr2):	 Stopped (unmanaged)
01619e
+      * rsc_vip_gcp_ers	(ocf:heartbeat:gcp-vpc-move-vip):	 Stopped (unmanaged)
01619e
+      * fs_UC5_ers	(ocf:heartbeat:Filesystem):	 Stopped (unmanaged)
01619e
+      * rsc_sap_UC5_ERS12	(ocf:heartbeat:SAPInstance):	 Stopped (unmanaged)
01619e
+
01619e
+Transition Summary:
01619e
+
01619e
+Executing Cluster Transition:
01619e
+Using the original execution date of: 2021-06-11 13:55:24Z
01619e
+
01619e
+Revised Cluster Status:
01619e
+  * Node List:
01619e
+    * Node gcdoubwap02: pending
01619e
+    * Online: [ gcdoubwap01 ]
01619e
+
01619e
+  * Full List of Resources:
01619e
+    * stonith_gcdoubwap01	(stonith:fence_gce):	 Stopped (unmanaged)
01619e
+    * stonith_gcdoubwap02	(stonith:fence_gce):	 Stopped (unmanaged)
01619e
+    * Clone Set: fs_UC5_SAPMNT-clone [fs_UC5_SAPMNT] (unmanaged):
01619e
+      * Stopped: [ gcdoubwap01 gcdoubwap02 ]
01619e
+    * Clone Set: fs_UC5_SYS-clone [fs_UC5_SYS] (unmanaged):
01619e
+      * Stopped: [ gcdoubwap01 gcdoubwap02 ]
01619e
+    * Resource Group: grp_UC5_ascs (unmanaged):
01619e
+      * rsc_vip_int_ascs	(ocf:heartbeat:IPaddr2):	 Stopped (unmanaged)
01619e
+      * rsc_vip_gcp_ascs	(ocf:heartbeat:gcp-vpc-move-vip):	 Started gcdoubwap01 (unmanaged)
01619e
+      * fs_UC5_ascs	(ocf:heartbeat:Filesystem):	 Stopped (unmanaged)
01619e
+      * rsc_sap_UC5_ASCS11	(ocf:heartbeat:SAPInstance):	 Stopped (unmanaged)
01619e
+    * Resource Group: grp_UC5_ers (unmanaged):
01619e
+      * rsc_vip_init_ers	(ocf:heartbeat:IPaddr2):	 Stopped (unmanaged)
01619e
+      * rsc_vip_gcp_ers	(ocf:heartbeat:gcp-vpc-move-vip):	 Stopped (unmanaged)
01619e
+      * fs_UC5_ers	(ocf:heartbeat:Filesystem):	 Stopped (unmanaged)
01619e
+      * rsc_sap_UC5_ERS12	(ocf:heartbeat:SAPInstance):	 Stopped (unmanaged)
01619e
diff --git a/cts/scheduler/xml/probe-pending-node.xml b/cts/scheduler/xml/probe-pending-node.xml
01619e
new file mode 100644
01619e
index 0000000..9f55c92
01619e
--- /dev/null
01619e
+++ b/cts/scheduler/xml/probe-pending-node.xml
01619e
@@ -0,0 +1,247 @@
01619e
+<cib crm_feature_set="3.0.14" validate-with="pacemaker-2.10" epoch="395" num_updates="30" admin_epoch="0" cib-last-written="Thu Jun 10 18:01:13 2021" update-origin="gcdoubwap01" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1" execution-date="1623419724">
01619e
+  <configuration>
01619e
+    <crm_config>
01619e
+      <cluster_property_set id="cib-bootstrap-options">
01619e
+        <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
01619e
+        <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.19-8.el7_6.5-c3c624ea3d"/>
01619e
+        <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
01619e
+        <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="ascscluster"/>
01619e
+        <nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="true"/>
01619e
+        <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1622815036"/>
01619e
+      </cluster_property_set>
01619e
+    </crm_config>
01619e
+    <nodes>
01619e
+      <node id="1" uname="gcdoubwap01"/>
01619e
+      <node id="2" uname="gcdoubwap02"/>
01619e
+    </nodes>
01619e
+    <resources>
01619e
+      <primitive class="stonith" id="stonith_gcdoubwap01" type="fence_gce">
01619e
+        <instance_attributes id="stonith_gcdoubwap01-instance_attributes">
01619e
+          <nvpair id="stonith_gcdoubwap01-instance_attributes-project" name="project" value="pj-uat-do-nane1-01"/>
01619e
+          <nvpair id="stonith_gcdoubwap01-instance_attributes-zone" name="zone" value="northamerica-northeast1-b"/>
01619e
+        </instance_attributes>
01619e
+        <operations>
01619e
+          <op id="stonith_gcdoubwap01-monitor-interval-60s" interval="60s" name="monitor"/>
01619e
+        </operations>
01619e
+      </primitive>
01619e
+      <primitive class="stonith" id="stonith_gcdoubwap02" type="fence_gce">
01619e
+        <instance_attributes id="stonith_gcdoubwap02-instance_attributes">
01619e
+          <nvpair id="stonith_gcdoubwap02-instance_attributes-project" name="project" value="pj-uat-do-nane1-01"/>
01619e
+          <nvpair id="stonith_gcdoubwap02-instance_attributes-zone" name="zone" value="northamerica-northeast1-c"/>
01619e
+        </instance_attributes>
01619e
+        <operations>
01619e
+          <op id="stonith_gcdoubwap02-monitor-interval-60s" interval="60s" name="monitor"/>
01619e
+        </operations>
01619e
+      </primitive>
01619e
+      <clone id="fs_UC5_SAPMNT-clone">
01619e
+        <primitive class="ocf" id="fs_UC5_SAPMNT" provider="heartbeat" type="Filesystem">
01619e
+          <instance_attributes id="fs_UC5_SAPMNT-instance_attributes">
01619e
+            <nvpair id="fs_UC5_SAPMNT-instance_attributes-device" name="device" value="uatdoelfs.igmfinancial.net:UC5_sapmnt/root"/>
01619e
+            <nvpair id="fs_UC5_SAPMNT-instance_attributes-directory" name="directory" value="/sapmnt/UC5"/>
01619e
+            <nvpair id="fs_UC5_SAPMNT-instance_attributes-fstype" name="fstype" value="nfs"/>
01619e
+          </instance_attributes>
01619e
+          <operations>
01619e
+            <op id="fs_UC5_SAPMNT-monitor-interval-20s" interval="20s" name="monitor" timeout="40s"/>
01619e
+            <op id="fs_UC5_SAPMNT-notify-interval-0s" interval="0s" name="notify" timeout="60s"/>
01619e
+            <op id="fs_UC5_SAPMNT-start-interval-0s" interval="0s" name="start" timeout="60s"/>
01619e
+            <op id="fs_UC5_SAPMNT-stop-interval-0s" interval="0s" name="stop" timeout="60s"/>
01619e
+          </operations>
01619e
+        </primitive>
01619e
+        <meta_attributes id="fs_UC5_SAPMNT-clone-meta_attributes">
01619e
+          <nvpair id="fs_UC5_SAPMNT-clone-meta_attributes-interleave" name="interleave" value="true"/>
01619e
+        </meta_attributes>
01619e
+      </clone>
01619e
+      <clone id="fs_UC5_SYS-clone">
01619e
+        <primitive class="ocf" id="fs_UC5_SYS" provider="heartbeat" type="Filesystem">
01619e
+          <instance_attributes id="fs_UC5_SYS-instance_attributes">
01619e
+            <nvpair id="fs_UC5_SYS-instance_attributes-device" name="device" value="uatdoelfs.igmfinancial.net:UC5_SYS/root"/>
01619e
+            <nvpair id="fs_UC5_SYS-instance_attributes-directory" name="directory" value="/usr/sap/UC5/SYS"/>
01619e
+            <nvpair id="fs_UC5_SYS-instance_attributes-fstype" name="fstype" value="nfs"/>
01619e
+          </instance_attributes>
01619e
+          <operations>
01619e
+            <op id="fs_UC5_SYS-monitor-interval-20s" interval="20s" name="monitor" timeout="40s"/>
01619e
+            <op id="fs_UC5_SYS-notify-interval-0s" interval="0s" name="notify" timeout="60s"/>
01619e
+            <op id="fs_UC5_SYS-start-interval-0s" interval="0s" name="start" timeout="60s"/>
01619e
+            <op id="fs_UC5_SYS-stop-interval-0s" interval="0s" name="stop" timeout="60s"/>
01619e
+          </operations>
01619e
+        </primitive>
01619e
+        <meta_attributes id="fs_UC5_SYS-clone-meta_attributes">
01619e
+          <nvpair id="fs_UC5_SYS-clone-meta_attributes-interleave" name="interleave" value="true"/>
01619e
+        </meta_attributes>
01619e
+      </clone>
01619e
+      <group id="grp_UC5_ascs">
01619e
+        <primitive class="ocf" id="rsc_vip_int_ascs" provider="heartbeat" type="IPaddr2">
01619e
+          <instance_attributes id="rsc_vip_int_ascs-instance_attributes">
01619e
+            <nvpair id="rsc_vip_int_ascs-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
01619e
+            <nvpair id="rsc_vip_int_ascs-instance_attributes-ip" name="ip" value="10.4.130.38"/>
01619e
+            <nvpair id="rsc_vip_int_ascs-instance_attributes-nic" name="nic" value="eth0"/>
01619e
+          </instance_attributes>
01619e
+          <operations>
01619e
+            <op id="rsc_vip_int_ascs-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
01619e
+            <op id="rsc_vip_int_ascs-start-interval-0s" interval="0s" name="start" timeout="20s"/>
01619e
+            <op id="rsc_vip_int_ascs-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
01619e
+          </operations>
01619e
+        </primitive>
01619e
+        <primitive class="ocf" id="rsc_vip_gcp_ascs" provider="heartbeat" type="gcp-vpc-move-vip">
01619e
+          <instance_attributes id="rsc_vip_gcp_ascs-instance_attributes">
01619e
+            <nvpair id="rsc_vip_gcp_ascs-instance_attributes-alias_ip" name="alias_ip" value="10.4.130.38/32"/>
01619e
+            <nvpair id="rsc_vip_gcp_ascs-instance_attributes-hostlist" name="hostlist" value="gcdoubwap01 gcdoubwap02"/>
01619e
+          </instance_attributes>
01619e
+          <operations>
01619e
+            <op id="rsc_vip_gcp_ascs-monitor-interval-60s" interval="60s" name="monitor" on-fail="ignore"/>
01619e
+            <op id="rsc_vip_gcp_ascs-start-interval-0s" interval="0s" name="start" timeout="300s"/>
01619e
+            <op id="rsc_vip_gcp_ascs-stop-interval-0s" interval="0s" name="stop" timeout="15s"/>
01619e
+          </operations>
01619e
+        </primitive>
01619e
+        <primitive class="ocf" id="fs_UC5_ascs" provider="heartbeat" type="Filesystem">
01619e
+          <instance_attributes id="fs_UC5_ascs-instance_attributes">
01619e
+            <nvpair id="fs_UC5_ascs-instance_attributes-device" name="device" value="uatdoelfs.igmfinancial.net:UC5_ASCS/root"/>
01619e
+            <nvpair id="fs_UC5_ascs-instance_attributes-directory" name="directory" value="/usr/sap/UC5/ASCS11"/>
01619e
+            <nvpair id="fs_UC5_ascs-instance_attributes-force_unmount" name="force_unmount" value="safe"/>
01619e
+            <nvpair id="fs_UC5_ascs-instance_attributes-fstype" name="fstype" value="nfs"/>
01619e
+          </instance_attributes>
01619e
+          <operations>
01619e
+            <op id="fs_UC5_ascs-monitor-interval-200" interval="200" name="monitor" timeout="40"/>
01619e
+            <op id="fs_UC5_ascs-notify-interval-0s" interval="0s" name="notify" timeout="60s"/>
01619e
+            <op id="fs_UC5_ascs-start-interval-0" interval="0" name="start" timeout="60"/>
01619e
+            <op id="fs_UC5_ascs-stop-interval-0" interval="0" name="stop" timeout="120"/>
01619e
+          </operations>
01619e
+        </primitive>
01619e
+        <primitive class="ocf" id="rsc_sap_UC5_ASCS11" provider="heartbeat" type="SAPInstance">
01619e
+          <instance_attributes id="rsc_sap_UC5_ASCS11-instance_attributes">
01619e
+            <nvpair id="rsc_sap_UC5_ASCS11-instance_attributes-AUTOMATIC_RECOVER" name="AUTOMATIC_RECOVER" value="false"/>
01619e
+            <nvpair id="rsc_sap_UC5_ASCS11-instance_attributes-InstanceName" name="InstanceName" value="UC5_ASCS11_uatdobwscs"/>
01619e
+            <nvpair id="rsc_sap_UC5_ASCS11-instance_attributes-START_PROFILE" name="START_PROFILE" value="/sapmnt/UC5/profile/UC5_ASCS11_uatdobwscs"/>
01619e
+          </instance_attributes>
01619e
+          <meta_attributes id="rsc_sap_UC5_ASCS11-meta_attributes">
01619e
+            <nvpair id="rsc_sap_UC5_ASCS11-meta_attributes-failure-timeout" name="failure-timeout" value="60"/>
01619e
+            <nvpair id="rsc_sap_UC5_ASCS11-meta_attributes-migration-threshold" name="migration-threshold" value="1"/>
01619e
+            <nvpair id="rsc_sap_UC5_ASCS11-meta_attributes-resource-stickiness" name="resource-stickiness" value="5000"/>
01619e
+          </meta_attributes>
01619e
+          <operations>
01619e
+            <op id="rsc_sap_UC5_ASCS11-demote-interval-0s" interval="0s" name="demote" timeout="320s"/>
01619e
+            <op id="rsc_sap_UC5_ASCS11-methods-interval-0s" interval="0s" name="methods" timeout="5s"/>
01619e
+            <op id="rsc_sap_UC5_ASCS11-monitor-interval-20" interval="20" name="monitor" on-fail="restart" timeout="60"/>
01619e
+            <op id="rsc_sap_UC5_ASCS11-promote-interval-0s" interval="0s" name="promote" timeout="320s"/>
01619e
+            <op id="rsc_sap_UC5_ASCS11-start-interval-0" interval="0" name="start" timeout="600"/>
01619e
+            <op id="rsc_sap_UC5_ASCS11-stop-interval-0" interval="0" name="stop" timeout="600"/>
01619e
+          </operations>
01619e
+        </primitive>
01619e
+      </group>
01619e
+      <group id="grp_UC5_ers">
01619e
+        <primitive class="ocf" id="rsc_vip_init_ers" provider="heartbeat" type="IPaddr2">
01619e
+          <instance_attributes id="rsc_vip_init_ers-instance_attributes">
01619e
+            <nvpair id="rsc_vip_init_ers-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
01619e
+            <nvpair id="rsc_vip_init_ers-instance_attributes-ip" name="ip" value="10.4.130.39"/>
01619e
+            <nvpair id="rsc_vip_init_ers-instance_attributes-nic" name="nic" value="eth0"/>
01619e
+          </instance_attributes>
01619e
+          <operations>
01619e
+            <op id="rsc_vip_init_ers-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
01619e
+            <op id="rsc_vip_init_ers-start-interval-0s" interval="0s" name="start" timeout="20s"/>
01619e
+            <op id="rsc_vip_init_ers-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
01619e
+          </operations>
01619e
+        </primitive>
01619e
+        <primitive class="ocf" id="rsc_vip_gcp_ers" provider="heartbeat" type="gcp-vpc-move-vip">
01619e
+          <instance_attributes id="rsc_vip_gcp_ers-instance_attributes">
01619e
+            <nvpair id="rsc_vip_gcp_ers-instance_attributes-alias_ip" name="alias_ip" value="10.4.130.39/32"/>
01619e
+            <nvpair id="rsc_vip_gcp_ers-instance_attributes-hostlist" name="hostlist" value="gcdoubwap01 gcdoubwap02"/>
01619e
+          </instance_attributes>
01619e
+          <operations>
01619e
+            <op id="rsc_vip_gcp_ers-monitor-interval-60s" interval="60s" name="monitor" on-fail="ignore"/>
01619e
+            <op id="rsc_vip_gcp_ers-start-interval-0s" interval="0s" name="start" timeout="300s"/>
01619e
+            <op id="rsc_vip_gcp_ers-stop-interval-0s" interval="0s" name="stop" timeout="180s"/>
01619e
+          </operations>
01619e
+        </primitive>
01619e
+        <primitive class="ocf" id="fs_UC5_ers" provider="heartbeat" type="Filesystem">
01619e
+          <instance_attributes id="fs_UC5_ers-instance_attributes">
01619e
+            <nvpair id="fs_UC5_ers-instance_attributes-device" name="device" value="uatdoelfs.igmfinancial.net:UC5_ERS/root"/>
01619e
+            <nvpair id="fs_UC5_ers-instance_attributes-directory" name="directory" value="/usr/sap/UC5/ERS12"/>
01619e
+            <nvpair id="fs_UC5_ers-instance_attributes-force_unmount" name="force_unmount" value="safe"/>
01619e
+            <nvpair id="fs_UC5_ers-instance_attributes-fstype" name="fstype" value="nfs"/>
01619e
+          </instance_attributes>
01619e
+          <operations>
01619e
+            <op id="fs_UC5_ers-monitor-interval-200" interval="200" name="monitor" timeout="40"/>
01619e
+            <op id="fs_UC5_ers-notify-interval-0s" interval="0s" name="notify" timeout="60s"/>
01619e
+            <op id="fs_UC5_ers-start-interval-0" interval="0" name="start" timeout="60"/>
01619e
+            <op id="fs_UC5_ers-stop-interval-0" interval="0" name="stop" timeout="120"/>
01619e
+          </operations>
01619e
+        </primitive>
01619e
+        <primitive class="ocf" id="rsc_sap_UC5_ERS12" provider="heartbeat" type="SAPInstance">
01619e
+          <instance_attributes id="rsc_sap_UC5_ERS12-instance_attributes">
01619e
+            <nvpair id="rsc_sap_UC5_ERS12-instance_attributes-AUTOMATIC_RECOVER" name="AUTOMATIC_RECOVER" value="false"/>
01619e
+            <nvpair id="rsc_sap_UC5_ERS12-instance_attributes-IS_ERS" name="IS_ERS" value="true"/>
01619e
+            <nvpair id="rsc_sap_UC5_ERS12-instance_attributes-InstanceName" name="InstanceName" value="UC5_ERS12_uatdobwers"/>
01619e
+            <nvpair id="rsc_sap_UC5_ERS12-instance_attributes-START_PROFILE" name="START_PROFILE" value="/sapmnt/UC5/profile/UC5_ERS12_uatdobwers"/>
01619e
+          </instance_attributes>
01619e
+          <operations>
01619e
+            <op id="rsc_sap_UC5_ERS12-demote-interval-0s" interval="0s" name="demote" timeout="320s"/>
01619e
+            <op id="rsc_sap_UC5_ERS12-methods-interval-0s" interval="0s" name="methods" timeout="5s"/>
01619e
+            <op id="rsc_sap_UC5_ERS12-monitor-interval-20" interval="20" name="monitor" on-fail="restart" timeout="60"/>
01619e
+            <op id="rsc_sap_UC5_ERS12-promote-interval-0s" interval="0s" name="promote" timeout="320s"/>
01619e
+            <op id="rsc_sap_UC5_ERS12-start-interval-0" interval="0" name="start" timeout="600"/>
01619e
+            <op id="rsc_sap_UC5_ERS12-stop-interval-0" interval="0" name="stop" timeout="600"/>
01619e
+          </operations>
01619e
+        </primitive>
01619e
+      </group>
01619e
+    </resources>
01619e
+    <constraints>
01619e
+      <rsc_colocation id="colocation-grp_UC5_ers-grp_UC5_ascs--5000" rsc="grp_UC5_ers" score="-5000" with-rsc="grp_UC5_ascs"/>
01619e
+      <rsc_order first="grp_UC5_ascs" first-action="start" id="order-grp_UC5_ascs-grp_UC5_ers-mandatory" symmetrical="false" then="grp_UC5_ers" then-action="stop"/>
01619e
+      <rsc_location id="location-rsc_sap_UC5_ASCS11" rsc="rsc_sap_UC5_ASCS11">
01619e
+        <rule id="location-rsc_sap_UC5_ASCS11-rule" score="2000">
01619e
+          <expression attribute="runs_ERS_UC5" id="location-rsc_sap_UC5_ASCS11-rule-expr" operation="eq" value="1"/>
01619e
+        </rule>
01619e
+      </rsc_location>
01619e
+      <rsc_location id="location-stonith_gcdoubwap01-gcdoubwap02" node="gcdoubwap01" rsc="stonith_gcdoubwap01" score="-INFINITY"/>
01619e
+      <rsc_location id="location-stonith_gcdoubwap02-gcdoubwap01" node="gcdoubwap02" rsc="stonith_gcdoubwap02" score="-INFINITY"/>
01619e
+      <rsc_order first="fs_UC5_SAPMNT-clone" first-action="start" id="order-fs_UC5_SAPMNT-clone-grp_UC5_ascs-mandatory" then="grp_UC5_ascs" then-action="start"/>
01619e
+      <rsc_order first="fs_UC5_SAPMNT-clone" first-action="start" id="order-fs_UC5_SAPMNT-clone-grp_UC5_ers-mandatory" then="grp_UC5_ers" then-action="start"/>
01619e
+    </constraints>
01619e
+  </configuration>
01619e
+  <status>
01619e
+    <node_state id="1" uname="gcdoubwap01" in_ccm="true" crmd="online" crm-debug-origin="post_cache_update" join="member" expected="member">
01619e
+      <lrm id="1">
01619e
+        <lrm_resources>
01619e
+          <lrm_resource id="stonith_gcdoubwap01" type="fence_gce" class="stonith">
01619e
+            <lrm_rsc_op id="stonith_gcdoubwap01_last_0" operation_key="stonith_gcdoubwap01_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="1:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;1:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="5" rc-code="7" op-status="0" interval="0" last-run="1623419700" last-rc-change="1623419700" exec-time="27" queue-time="0" op-digest="e6935031dfde569ad30fb442953d3d91"/>
01619e
+          </lrm_resource>
01619e
+          <lrm_resource id="stonith_gcdoubwap02" type="fence_gce" class="stonith">
01619e
+            <lrm_rsc_op id="stonith_gcdoubwap02_last_0" operation_key="stonith_gcdoubwap02_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="2:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;2:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="9" rc-code="7" op-status="0" interval="0" last-run="1623419700" last-rc-change="1623419700" exec-time="0" queue-time="0" op-digest="064645c51d6d3a802eb6928f6116222c"/>
01619e
+          </lrm_resource>
01619e
+          <lrm_resource id="fs_UC5_SAPMNT" type="Filesystem" class="ocf" provider="heartbeat">
01619e
+            <lrm_rsc_op id="fs_UC5_SAPMNT_last_0" operation_key="fs_UC5_SAPMNT_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="3:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;3:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="14" rc-code="7" op-status="0" interval="0" last-run="1623419700" last-rc-change="1623419700" exec-time="126" queue-time="1" op-digest="02c74f325691f1af3c3dd9c2ab702b01"/>
01619e
+          </lrm_resource>
01619e
+          <lrm_resource id="fs_UC5_SYS" type="Filesystem" class="ocf" provider="heartbeat">
01619e
+            <lrm_rsc_op id="fs_UC5_SYS_last_0" operation_key="fs_UC5_SYS_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="4:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;4:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="19" rc-code="7" op-status="0" interval="0" last-run="1623419700" last-rc-change="1623419700" exec-time="130" queue-time="0" op-digest="f1f67b01fc16ed22d8fa1fe030d9c06b"/>
01619e
+          </lrm_resource>
01619e
+          <lrm_resource id="rsc_vip_int_ascs" type="IPaddr2" class="ocf" provider="heartbeat">
01619e
+            <lrm_rsc_op id="rsc_vip_int_ascs_last_0" operation_key="rsc_vip_int_ascs_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="5:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;5:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="23" rc-code="7" op-status="0" interval="0" last-run="1623419700" last-rc-change="1623419700" exec-time="105" queue-time="0" op-digest="da0b35400002727d7281b8f7051fe400"/>
01619e
+          </lrm_resource>
01619e
+          <lrm_resource id="rsc_vip_gcp_ascs" type="gcp-vpc-move-vip" class="ocf" provider="heartbeat">
01619e
+            <lrm_rsc_op id="rsc_vip_gcp_ascs_last_0" operation_key="rsc_vip_gcp_ascs_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="6:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:0;6:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="27" rc-code="0" op-status="0" interval="0" last-run="1623419705" last-rc-change="1623419705" exec-time="1842" queue-time="0" op-digest="face88a40d76658d0caa541eefc02ca8"/>
01619e
+            <lrm_rsc_op id="rsc_vip_gcp_ascs_last_failure_0" operation_key="rsc_vip_gcp_ascs_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="6:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:0;6:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="27" rc-code="0" op-status="0" interval="0" last-run="1623419705" last-rc-change="1623419705" exec-time="1842" queue-time="0" op-digest="face88a40d76658d0caa541eefc02ca8"/>
01619e
+          </lrm_resource>
01619e
+          <lrm_resource id="fs_UC5_ascs" type="Filesystem" class="ocf" provider="heartbeat">
01619e
+            <lrm_rsc_op id="fs_UC5_ascs_last_0" operation_key="fs_UC5_ascs_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="7:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;7:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="31" rc-code="7" op-status="0" interval="0" last-run="1623419705" last-rc-change="1623419705" exec-time="241" queue-time="0" op-digest="acac63abd6c034d7dad4aae73e2ca95d"/>
01619e
+          </lrm_resource>
01619e
+          <lrm_resource id="rsc_sap_UC5_ASCS11" type="SAPInstance" class="ocf" provider="heartbeat">
01619e
+            <lrm_rsc_op id="rsc_sap_UC5_ASCS11_last_0" operation_key="rsc_sap_UC5_ASCS11_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="8:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;8:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="35" rc-code="7" op-status="0" interval="0" last-run="1623419705" last-rc-change="1623419705" exec-time="969" queue-time="0" op-digest="08c114a33aa3c16b3204ff09cb983107" op-force-restart=" ERS_START_PROFILE  ERS_InstanceName  START_PROFILE  InstanceName " op-restart-digest="315a463141e0ef59afedf7a62a8d6362"/>
01619e
+          </lrm_resource>
01619e
+          <lrm_resource id="rsc_vip_init_ers" type="IPaddr2" class="ocf" provider="heartbeat">
01619e
+            <lrm_rsc_op id="rsc_vip_init_ers_last_0" operation_key="rsc_vip_init_ers_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="9:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;9:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="39" rc-code="7" op-status="0" interval="0" last-run="1623419705" last-rc-change="1623419705" exec-time="1033" queue-time="0" op-digest="7b29d7af6a7baa6015d1eeac471a9b42"/>
01619e
+          </lrm_resource>
01619e
+          <lrm_resource id="rsc_vip_gcp_ers" type="gcp-vpc-move-vip" class="ocf" provider="heartbeat">
01619e
+            <lrm_rsc_op id="rsc_vip_gcp_ers_last_0" operation_key="rsc_vip_gcp_ers_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="10:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;10:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="43" rc-code="7" op-status="0" interval="0" last-run="1623419705" last-rc-change="1623419705" exec-time="1702" queue-time="0" op-digest="10365a97fe5a5864a3679c314bf65bfd"/>
01619e
+          </lrm_resource>
01619e
+          <lrm_resource id="fs_UC5_ers" type="Filesystem" class="ocf" provider="heartbeat">
01619e
+            <lrm_rsc_op id="fs_UC5_ers_last_0" operation_key="fs_UC5_ers_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="11:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;11:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="47" rc-code="7" op-status="0" interval="0" last-run="1623419706" last-rc-change="1623419706" exec-time="709" queue-time="0" op-digest="61e45529b2da32c1e53055238a00ca99"/>
01619e
+          </lrm_resource>
01619e
+          <lrm_resource id="rsc_sap_UC5_ERS12" type="SAPInstance" class="ocf" provider="heartbeat">
01619e
+            <lrm_rsc_op id="rsc_sap_UC5_ERS12_last_0" operation_key="rsc_sap_UC5_ERS12_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="12:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" transition-magic="0:7;12:0:7:5e41afa8-15bd-443a-88fb-ec276232a804" exit-reason="" on_node="gcdoubwap01" call-id="51" rc-code="7" op-status="0" interval="0" last-run="1623419706" last-rc-change="1623419706" exec-time="914" queue-time="1" op-digest="b550e70bd4203af88473e4d914b11f87" op-force-restart=" ERS_START_PROFILE  ERS_InstanceName  START_PROFILE  InstanceName " op-restart-digest="2fb6ec6eb77e25302c8dc0dad84dc46f"/>
01619e
+          </lrm_resource>
01619e
+        </lrm_resources>
01619e
+      </lrm>
01619e
+    </node_state>
01619e
+    <node_state id="2" uname="gcdoubwap02" crmd="offline" crm-debug-origin="post_cache_update" in_ccm="true"/>
01619e
+  </status>
01619e
+</cib>
01619e
-- 
01619e
1.8.3.1
01619e