Blob Blame History Raw
From 82ae2f1b652c361dadacf25dece42a43340776ee Mon Sep 17 00:00:00 2001
From: Chris Lumens <clumens@redhat.com>
Date: Thu, 11 Feb 2021 09:57:21 -0500
Subject: [PATCH 1/3] Low: tools: Rename the result of cli_resource_search.

The result of cli_resource_search is a list of nodes, not a list of
resources.  Change the variable name appropriately.
---
 tools/crm_resource.c         | 4 ++--
 tools/crm_resource_runtime.c | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index 564600e..78b2246 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -1873,8 +1873,8 @@ main(int argc, char **argv)
             break;
 
         case cmd_locate: {
-            GListPtr resources = cli_resource_search(out, rsc, options.rsc_id, data_set);
-            rc = out->message(out, "resource-search-list", resources, rsc, options.rsc_id);
+            GListPtr nodes = cli_resource_search(out, rsc, options.rsc_id, data_set);
+            rc = out->message(out, "resource-search-list", nodes, rsc, options.rsc_id);
             break;
         }
 
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index b6e4df1..adfdfba 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -1780,8 +1780,8 @@ cli_resource_execute(pcmk__output_t *out, pe_resource_t *rsc,
         action = rsc_action+6;
 
         if(pe_rsc_is_clone(rsc)) {
-            GListPtr rscs = cli_resource_search(out, rsc, requested_name, data_set);
-            if(rscs != NULL && force == FALSE) {
+            GListPtr nodes = cli_resource_search(out, rsc, requested_name, data_set);
+            if(nodes != NULL && force == FALSE) {
                 out->err(out, "It is not safe to %s %s here: the cluster claims it is already active",
                          action, rsc->id);
                 out->err(out, "Try setting target-role=Stopped first or specifying "
-- 
1.8.3.1


From e8b320aaaabdd60b7ac851e5b70a2a1b3c2180a3 Mon Sep 17 00:00:00 2001
From: Chris Lumens <clumens@redhat.com>
Date: Thu, 11 Feb 2021 11:07:07 -0500
Subject: [PATCH 2/3] Test: cts: Add a test for a promotable clone resource.

Note that for the moment, the crm_resource output in
regression.tools.exp is incorrect.  There's a bug in that tool, but I
wanted to get a test case working before fixing it.
---
 cts/cli/crm_mon.xml            |  32 +++-
 cts/cli/regression.crm_mon.exp | 401 +++++++++++++++++++++++++++++------------
 cts/cli/regression.tools.exp   |  18 ++
 cts/cts-cli.in                 |  20 ++
 4 files changed, 357 insertions(+), 114 deletions(-)

diff --git a/cts/cli/crm_mon.xml b/cts/cli/crm_mon.xml
index d8d5d35..f0f14fd 100644
--- a/cts/cli/crm_mon.xml
+++ b/cts/cli/crm_mon.xml
@@ -1,4 +1,4 @@
-<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.3" epoch="1" num_updates="1" admin_epoch="1" cib-last-written="Tue May  5 12:04:36 2020" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2">
+<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.3" epoch="1" num_updates="135" admin_epoch="1" cib-last-written="Tue May  5 12:04:36 2020" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
@@ -99,9 +99,25 @@
           </primitive>
         </group>
       </clone>
+      <clone id="promotable-clone">
+        <meta_attributes id="promotable-clone-meta_attributes">
+          <nvpair id="promotable-clone-meta_attributes-promotable" name="promotable" value="true"/>
+        </meta_attributes>
+        <primitive id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful">
+          <operations id="promotable-rsc-operations">
+            <op id="promotable-rsc-monitor-master-5" name="monitor" interval="5" role="Master"/>
+            <op id="promotable-rsc-monitor-slave-10" name="monitor" interval="10" role="Slave"/>
+          </operations>
+        </primitive>
+      </clone>
     </resources>
     <constraints>
       <rsc_location id="not-on-cluster1" rsc="dummy" node="cluster01" score="-INFINITY"/>
+      <rsc_location id="loc-promotable-clone" rsc="promotable-clone">
+        <rule id="loc-promotable-clone-rule" role="Master" score="10">
+          <expression attribute="#uname" id="loc-promotable-clone-expression" operation="eq" value="cluster02"/>
+        </rule>
+      </rsc_location>
     </constraints>
     <tags>
       <tag id="all-nodes">
@@ -153,6 +169,13 @@
             <lrm_rsc_op id="mysql-proxy_last_0" operation_key="mysql-proxy_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.4.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1596126852" last-run="1596126852" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
             <lrm_rsc_op id="mysql-proxy_monitor_10000" operation_key="mysql-proxy_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.4.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" last-rc-change="1596126852" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
           </lrm_resource>
+          <lrm_resource id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful">
+            <lrm_rsc_op id="promotable-rsc_last_0" operation_key="promotable-rsc_promote_0" operation="promote" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="6:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;6:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="6" rc-code="0" op-status="0" interval="0" last-rc-change="1613059546" last-run="1613059546" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="promotable-rsc_post_notify_start_0" operation_key="promotable-rsc_notify_0" operation="notify" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1613058809" last-run="1613058809" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="promotable-rsc_monitor_10000" operation_key="promotable-rsc_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="4" rc-code="0" op-status="0" interval="10000" last-rc-change="1613058809" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4"/>
+            <lrm_rsc_op id="promotable-rsc_cancel_10000" operation_key="promotable-rsc_cancel_10000" operation="cancel" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="5:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;5:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="5" rc-code="0" op-status="0" interval="10000" last-rc-change="1613059546" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4"/>
+            <lrm_rsc_op id="promotable-rsc_monitor_5000" operation_key="promotable-rsc_monitor_5000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="7:-1:8:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:8;7:-1:8:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="7" rc-code="8" op-status="0" interval="5000" last-rc-change="1613059546" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4"/>
+          </lrm_resource>
         </lrm_resources>
       </lrm>
       <transient_attributes id="2">
@@ -170,7 +193,7 @@
           </lrm_resource>
           <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
             <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="12:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;12:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="15" rc-code="0" op-status="0" interval="0" last-rc-change="1588951272" last-run="1588951272" exec-time="36" queue-time="0" op-digest="7da16842ab2328e41f737cab5e5fc89c"/>
-            <lrm_rsc_op id="Fencing_monitor_60000" operation_key="Fencing_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="13:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;13:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="19" rc-code="0" op-status="0" interval="60000" last-rc-change="1588951276" exec-time="24" queue-time="0" op-digest="f85d77708ad4ea02a9099e1e548aff0d"/>
+            <lrm_rsc_op id="Fencing_monitor_60000" operation_key="Fencing_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="20:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;20:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="cluster01" call-id="20" rc-code="0" op-status="0" interval="60000" last-rc-change="1613056690" exec-time="0" queue-time="0" op-digest="d4ee02dc1c7ce16eb0f72e06c2cc9193"/>
           </lrm_resource>
           <lrm_resource id="dummy" type="Dummy" class="ocf" provider="pacemaker">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="3:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;3:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="16" rc-code="0" op-status="0" interval="0" last-rc-change="1588951272" last-run="1588951272" exec-time="6048" queue-time="0" op-digest="aa0f9b7caf28600646551adb55bd9b95" op-force-restart=" envfile  op_sleep  passwd  state " op-restart-digest="aa0f9b7caf28600646551adb55bd9b95" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
@@ -185,6 +208,11 @@
             <lrm_rsc_op id="mysql-proxy_last_0" operation_key="mysql-proxy_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.4.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1596126852" last-run="1596126852" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
             <lrm_rsc_op id="mysql-proxy_monitor_10000" operation_key="mysql-proxy_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.4.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" last-rc-change="1596126852" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
           </lrm_resource>
+          <lrm_resource id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful">
+            <lrm_rsc_op id="promotable-rsc_last_0" operation_key="promotable-rsc_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1613058809" last-run="1613058809" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="promotable-rsc_post_notify_start_0" operation_key="promotable-rsc_notify_0" operation="notify" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1613058809" last-run="1613058809" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="promotable-rsc_monitor_10000" operation_key="promotable-rsc_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="4" rc-code="0" op-status="0" interval="10000" last-rc-change="1613058809" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4"/>
+          </lrm_resource>
         </lrm_resources>
       </lrm>
       <transient_attributes id="1">
diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp
index dd20116..c223b7f 100644
--- a/cts/cli/regression.crm_mon.exp
+++ b/cts/cli/regression.crm_mon.exp
@@ -5,7 +5,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
@@ -20,6 +20,9 @@ Active Resources:
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
+  * Clone Set: promotable-clone [promotable-rsc] (promotable):
+    * Masters: [ cluster02 ]
+    * Slaves: [ cluster01 ]
 =#=#=#= End test: Basic text output - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output
 =#=#=#= Begin test: XML output =#=#=#=
@@ -30,12 +33,12 @@ Active Resources:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
@@ -112,6 +115,17 @@ Active Resources:
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
+    <clone id="promotable-clone" multi_state="true" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Master" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+        <node name="cluster02" id="2" cached="true"/>
+      </resource>
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Slave" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+        <node name="cluster01" id="1" cached="true"/>
+      </resource>
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+    </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
@@ -142,6 +156,12 @@ Active Resources:
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
+      <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
+        <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
+        <operation_history call="5" task="cancel" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
+        <operation_history call="6" task="promote" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
+        <operation_history call="7" task="monitor" rc="8" rc_text="master" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
+      </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
@@ -150,7 +170,7 @@ Active Resources:
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
-        <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/>
+        <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
@@ -159,6 +179,10 @@ Active Resources:
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
+      <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
+        <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
+        <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
+      </resource_history>
     </node>
   </node_history>
   <bans>
@@ -175,7 +199,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
@@ -187,6 +211,9 @@ Active Resources:
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
+  * Clone Set: promotable-clone [promotable-rsc] (promotable):
+    * Masters: [ cluster02 ]
+    * Slaves: [ cluster01 ]
 =#=#=#= End test: Basic text output without node section - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output without node section
 =#=#=#= Begin test: XML output without the node section =#=#=#=
@@ -197,7 +224,7 @@ Active Resources:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <resources>
@@ -272,6 +299,17 @@ Active Resources:
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
+    <clone id="promotable-clone" multi_state="true" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Master" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+        <node name="cluster02" id="2" cached="true"/>
+      </resource>
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Slave" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+        <node name="cluster01" id="1" cached="true"/>
+      </resource>
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+    </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
@@ -302,6 +340,12 @@ Active Resources:
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
+      <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
+        <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
+        <operation_history call="5" task="cancel" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
+        <operation_history call="6" task="promote" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
+        <operation_history call="7" task="monitor" rc="8" rc_text="master" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
+      </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
@@ -310,7 +354,7 @@ Active Resources:
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
-        <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/>
+        <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
@@ -319,6 +363,10 @@ Active Resources:
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
+      <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
+        <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
+        <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
+      </resource_history>
     </node>
   </node_history>
   <bans>
@@ -340,7 +388,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
@@ -355,6 +403,9 @@ Active Resources:
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
+  * Clone Set: promotable-clone [promotable-rsc] (promotable):
+    * Masters: [ cluster02 ]
+    * Slaves: [ cluster01 ]
 
 Node Attributes:
   * Node: cluster01:
@@ -378,18 +429,26 @@ Operations:
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
+    * promotable-rsc: migration-threshold=1000000:
+      * (4) monitor: interval="10000ms"
+      * (5) cancel: interval="10000ms"
+      * (6) promote
+      * (7) monitor: interval="5000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
-      * (19) monitor: interval="60000ms"
+      * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
+    * promotable-rsc: migration-threshold=1000000:
+      * (2) start
+      * (4) monitor: interval="10000ms"
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
@@ -402,7 +461,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 (1) cluster02 (2) ]
@@ -427,6 +486,12 @@ Active Resources:
       * mysql-proxy	(lsb:mysql-proxy):	 Stopped
     * Resource Group: mysql-group:4:
       * mysql-proxy	(lsb:mysql-proxy):	 Stopped
+  * Clone Set: promotable-clone [promotable-rsc] (promotable):
+    * promotable-rsc	(ocf::pacemaker:Stateful):	 Master cluster02
+    * promotable-rsc	(ocf::pacemaker:Stateful):	 Slave cluster01
+    * promotable-rsc	(ocf::pacemaker:Stateful):	 Stopped
+    * promotable-rsc	(ocf::pacemaker:Stateful):	 Stopped
+    * promotable-rsc	(ocf::pacemaker:Stateful):	 Stopped
 
 Node Attributes:
   * Node: cluster01 (1):
@@ -450,18 +515,26 @@ Operations:
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
+    * promotable-rsc: migration-threshold=1000000:
+      * (4) monitor: interval="10000ms"
+      * (5) cancel: interval="10000ms"
+      * (6) promote
+      * (7) monitor: interval="5000ms"
   * Node: cluster01 (1):
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
-      * (19) monitor: interval="60000ms"
+      * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
+    * promotable-rsc: migration-threshold=1000000:
+      * (2) start
+      * (4) monitor: interval="10000ms"
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01 (1)
@@ -474,7 +547,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
@@ -489,6 +562,9 @@ Active Resources:
     * 1/1	(ocf::heartbeat:IPaddr):	Active cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
+  * Clone Set: promotable-clone [promotable-rsc] (promotable):
+    * Masters: [ cluster02 ]
+    * Slaves: [ cluster01 ]
 
 Node Attributes:
   * Node: cluster01:
@@ -512,18 +588,26 @@ Operations:
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
+    * promotable-rsc: migration-threshold=1000000:
+      * (4) monitor: interval="10000ms"
+      * (5) cancel: interval="10000ms"
+      * (6) promote
+      * (7) monitor: interval="5000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
-      * (19) monitor: interval="60000ms"
+      * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
+    * promotable-rsc: migration-threshold=1000000:
+      * (2) start
+      * (4) monitor: interval="10000ms"
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
@@ -536,7 +620,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01: online:
@@ -544,6 +628,7 @@ Node List:
       * ping	(ocf::pacemaker:ping):	 Started
       * Fencing	(stonith:fence_xvm):	 Started
       * mysql-proxy	(lsb:mysql-proxy):	 Started
+      * promotable-rsc	(ocf::pacemaker:Stateful):	 Slave
   * Node cluster02: online:
     * Resources:
       * ping	(ocf::pacemaker:ping):	 Started
@@ -551,6 +636,7 @@ Node List:
       * Public-IP	(ocf::heartbeat:IPaddr):	 Started
       * Email	(lsb:exim):	 Started
       * mysql-proxy	(lsb:mysql-proxy):	 Started
+      * promotable-rsc	(ocf::pacemaker:Stateful):	 Master
   * GuestNode httpd-bundle-0@: OFFLINE:
     * Resources:
   * GuestNode httpd-bundle-1@: OFFLINE:
@@ -580,18 +666,26 @@ Operations:
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
+    * promotable-rsc: migration-threshold=1000000:
+      * (4) monitor: interval="10000ms"
+      * (5) cancel: interval="10000ms"
+      * (6) promote
+      * (7) monitor: interval="5000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
-      * (19) monitor: interval="60000ms"
+      * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
+    * promotable-rsc: migration-threshold=1000000:
+      * (2) start
+      * (4) monitor: interval="10000ms"
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
@@ -604,12 +698,13 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01: online:
     * Resources:
       * 1	(lsb:mysql-proxy):	Active 
+      * 1	(ocf::pacemaker:Stateful):	Active 
       * 1	(ocf::pacemaker:ping):	Active 
       * 1	(stonith:fence_xvm):	Active 
   * Node cluster02: online:
@@ -618,6 +713,7 @@ Node List:
       * 1	(lsb:mysql-proxy):	Active 
       * 1	(ocf::heartbeat:IPaddr):	Active 
       * 1	(ocf::pacemaker:Dummy):	Active 
+      * 1	(ocf::pacemaker:Stateful):	Active 
       * 1	(ocf::pacemaker:ping):	Active 
 
 Node Attributes:
@@ -642,18 +738,26 @@ Operations:
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
+    * promotable-rsc: migration-threshold=1000000:
+      * (4) monitor: interval="10000ms"
+      * (5) cancel: interval="10000ms"
+      * (6) promote
+      * (7) monitor: interval="5000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
-      * (19) monitor: interval="60000ms"
+      * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
+    * promotable-rsc: migration-threshold=1000000:
+      * (2) start
+      * (4) monitor: interval="10000ms"
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
@@ -667,11 +771,11 @@ Negative Location Constraints:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member">
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member">
       <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
@@ -681,8 +785,11 @@ Negative Location Constraints:
       <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Slave" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+        <node name="cluster01" id="1" cached="true"/>
+      </resource>
     </node>
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member">
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member">
       <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
@@ -698,6 +805,9 @@ Negative Location Constraints:
       <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Master" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+        <node name="cluster02" id="2" cached="true"/>
+      </resource>
     </node>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
@@ -753,6 +863,17 @@ Negative Location Constraints:
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
+    <clone id="promotable-clone" multi_state="true" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Master" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+        <node name="cluster02" id="2" cached="true"/>
+      </resource>
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Slave" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+        <node name="cluster01" id="1" cached="true"/>
+      </resource>
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+    </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
@@ -783,6 +904,12 @@ Negative Location Constraints:
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
+      <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
+        <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
+        <operation_history call="5" task="cancel" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
+        <operation_history call="6" task="promote" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
+        <operation_history call="7" task="monitor" rc="8" rc_text="master" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
+      </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
@@ -791,7 +918,7 @@ Negative Location Constraints:
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
-        <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/>
+        <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
@@ -800,6 +927,10 @@ Negative Location Constraints:
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
+      <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
+        <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
+        <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
+      </resource_history>
     </node>
   </node_history>
   <bans>
@@ -816,7 +947,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 ]
@@ -827,6 +958,8 @@ Active Resources:
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 ]
+  * Clone Set: promotable-clone [promotable-rsc] (promotable):
+    * Slaves: [ cluster01 ]
 
 Node Attributes:
   * Node: cluster01:
@@ -840,12 +973,15 @@ Operations:
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
-      * (19) monitor: interval="60000ms"
+      * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
+    * promotable-rsc: migration-threshold=1000000:
+      * (2) start
+      * (4) monitor: interval="10000ms"
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
@@ -859,11 +995,11 @@ Negative Location Constraints:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
@@ -918,6 +1054,14 @@ Negative Location Constraints:
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
+    <clone id="promotable-clone" multi_state="true" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Slave" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+        <node name="cluster01" id="1" cached="true"/>
+      </resource>
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+    </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
@@ -933,7 +1077,7 @@ Negative Location Constraints:
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
-        <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/>
+        <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
@@ -942,6 +1086,10 @@ Negative Location Constraints:
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
+      <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
+        <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
+        <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
+      </resource_history>
     </node>
   </node_history>
   <bans>
@@ -958,7 +1106,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster02 ]
@@ -972,6 +1120,8 @@ Active Resources:
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster02 ]
+  * Clone Set: promotable-clone [promotable-rsc] (promotable):
+    * Masters: [ cluster02 ]
 
 Node Attributes:
   * Node: cluster02:
@@ -992,6 +1142,11 @@ Operations:
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
+    * promotable-rsc: migration-threshold=1000000:
+      * (4) monitor: interval="10000ms"
+      * (5) cancel: interval="10000ms"
+      * (6) promote
+      * (7) monitor: interval="5000ms"
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
@@ -1005,11 +1160,11 @@ Negative Location Constraints:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
@@ -1072,6 +1227,14 @@ Negative Location Constraints:
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
+    <clone id="promotable-clone" multi_state="true" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Master" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+        <node name="cluster02" id="2" cached="true"/>
+      </resource>
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+    </clone>
   </resources>
   <node_attributes>
     <node name="cluster02">
@@ -1098,6 +1261,12 @@ Negative Location Constraints:
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
+      <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
+        <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
+        <operation_history call="5" task="cancel" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
+        <operation_history call="6" task="promote" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
+        <operation_history call="7" task="monitor" rc="8" rc_text="master" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
+      </resource_history>
     </node>
   </node_history>
   <bans>
@@ -1114,7 +1283,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
@@ -1133,7 +1302,7 @@ Operations:
   * Node: cluster01:
     * Fencing: migration-threshold=1000000:
       * (15) start
-      * (19) monitor: interval="60000ms"
+      * (20) monitor: interval="60000ms"
 =#=#=#= End test: Complete text output filtered by resource tag - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by resource tag
 =#=#=#= Begin test: XML output filtered by resource tag =#=#=#=
@@ -1144,12 +1313,12 @@ Operations:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
@@ -1172,7 +1341,7 @@ Operations:
     <node name="cluster01">
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
-        <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/>
+        <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
@@ -1187,7 +1356,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Active Resources:
   * No active resources
@@ -1201,7 +1370,7 @@ Active Resources:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes/>
@@ -1249,7 +1418,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
@@ -1273,6 +1442,9 @@ Full List of Resources:
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
+  * Clone Set: promotable-clone [promotable-rsc] (promotable):
+    * Masters: [ cluster02 ]
+    * Slaves: [ cluster01 ]
 =#=#=#= End test: Basic text output with inactive resources - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by node =#=#=#=
@@ -1282,7 +1454,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster02 ]
@@ -1305,6 +1477,8 @@ Full List of Resources:
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster02 ]
+  * Clone Set: promotable-clone [promotable-rsc] (promotable):
+    * Masters: [ cluster02 ]
 =#=#=#= End test: Basic text output with inactive resources, filtered by node - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by node
 =#=#=#= Begin test: Complete text output filtered by primitive resource =#=#=#=
@@ -1314,7 +1488,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
@@ -1333,7 +1507,7 @@ Operations:
   * Node: cluster01:
     * Fencing: migration-threshold=1000000:
       * (15) start
-      * (19) monitor: interval="60000ms"
+      * (20) monitor: interval="60000ms"
 =#=#=#= End test: Complete text output filtered by primitive resource - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by primitive resource
 =#=#=#= Begin test: XML output filtered by primitive resource =#=#=#=
@@ -1344,12 +1518,12 @@ Operations:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
@@ -1372,7 +1546,7 @@ Operations:
     <node name="cluster01">
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
-        <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/>
+        <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
@@ -1387,7 +1561,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
@@ -1420,12 +1594,12 @@ Operations:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
@@ -1470,7 +1644,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
@@ -1500,12 +1674,12 @@ Operations:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
@@ -1544,7 +1718,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
@@ -1579,12 +1753,12 @@ Operations:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
@@ -1633,7 +1807,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
@@ -1668,12 +1842,12 @@ Operations:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
@@ -1722,7 +1896,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 (1) cluster02 (2) ]
@@ -1757,12 +1931,12 @@ Operations:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
@@ -1808,7 +1982,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
@@ -1825,12 +1999,12 @@ Active Resources:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
@@ -1857,7 +2031,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
@@ -1877,7 +2051,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
@@ -1897,12 +2071,12 @@ Full List of Resources:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
@@ -1950,7 +2124,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
@@ -1969,12 +2143,12 @@ Full List of Resources:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
@@ -2007,7 +2181,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
@@ -2026,12 +2200,12 @@ Full List of Resources:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
@@ -2064,7 +2238,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
@@ -2083,12 +2257,12 @@ Full List of Resources:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
@@ -2121,7 +2295,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
@@ -2144,12 +2318,12 @@ Full List of Resources:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
@@ -2188,7 +2362,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 (1) cluster02 (2) ]
@@ -2232,12 +2406,12 @@ Operations:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
@@ -2279,7 +2453,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 (1) cluster02 (2) ]
@@ -2323,12 +2497,12 @@ Operations:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
@@ -2390,7 +2564,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 (1) cluster02 (2) ]
@@ -2426,12 +2600,12 @@ Operations:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
@@ -2479,7 +2653,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 (1) cluster02 (2) ]
@@ -2523,12 +2697,12 @@ Operations:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
@@ -2590,7 +2764,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 (1) cluster02 (2) ]
@@ -2626,12 +2800,12 @@ Operations:
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
-    <resources_configured number="27" disabled="4" blocked="0"/>
+    <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
@@ -3083,7 +3257,7 @@ Cluster Summary:
   * Last updated:
   * Last change:
   * 5 nodes configured
-  * 27 resource instances configured (4 DISABLED)
+  * 32 resource instances configured (4 DISABLED)
 
               *** Resource management is DISABLED ***
   The cluster will not attempt to start, stop or recover services
@@ -3114,5 +3288,8 @@ Full List of Resources:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02 (unmanaged)
     * Resource Group: mysql-group:1 (unmanaged):
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01 (unmanaged)
+  * Clone Set: promotable-clone [promotable-rsc] (promotable) (unmanaged):
+    * promotable-rsc	(ocf::pacemaker:Stateful):	 Master cluster02 (unmanaged)
+    * promotable-rsc	(ocf::pacemaker:Stateful):	 Slave cluster01 (unmanaged)
 =#=#=#= End test: Text output of all resources with maintenance-mode enabled - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of all resources with maintenance-mode enabled
diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp
index 1afe596..708c340 100644
--- a/cts/cli/regression.tools.exp
+++ b/cts/cli/regression.tools.exp
@@ -4077,3 +4077,21 @@ Resources colocated with clone:
 5
 =#=#=#= End test: List guest,remote nodes - OK (0) =#=#=#=
 * Passed: crmadmin       - List guest,remote nodes
+=#=#=#= Begin test: List a promotable clone resource =#=#=#=
+resource promotable-clone is running on: cluster02
+resource promotable-clone is running on: cluster01
+=#=#=#= End test: List a promotable clone resource - OK (0) =#=#=#=
+* Passed: crm_resource   - List a promotable clone resource
+=#=#=#= Begin test: List the primitive of a promotable clone resource =#=#=#=
+resource promotable-rsc is running on: cluster02 Master
+resource promotable-rsc is running on: cluster01 Master
+=#=#=#= End test: List the primitive of a promotable clone resource - OK (0) =#=#=#=
+* Passed: crm_resource   - List the primitive of a promotable clone resource
+=#=#=#= Begin test: List a single instance of a promotable clone resource =#=#=#=
+resource promotable-rsc:0 is running on: cluster02 Master
+=#=#=#= End test: List a single instance of a promotable clone resource - OK (0) =#=#=#=
+* Passed: crm_resource   - List a single instance of a promotable clone resource
+=#=#=#= Begin test: List another instance of a promotable clone resource =#=#=#=
+resource promotable-rsc:1 is running on: cluster01
+=#=#=#= End test: List another instance of a promotable clone resource - OK (0) =#=#=#=
+* Passed: crm_resource   - List another instance of a promotable clone resource
diff --git a/cts/cts-cli.in b/cts/cts-cli.in
index 8e2dbe5..6f7eb80 100755
--- a/cts/cts-cli.in
+++ b/cts/cts-cli.in
@@ -831,6 +831,26 @@ function test_tools() {
     test_assert $CRM_EX_OK 0
 
     unset CIB_file
+
+    export CIB_file="$test_home/cli/crm_mon.xml"
+
+    desc="List a promotable clone resource"
+    cmd="crm_resource --locate -r promotable-clone"
+    test_assert $CRM_EX_OK 0
+
+    desc="List the primitive of a promotable clone resource"
+    cmd="crm_resource --locate -r promotable-rsc"
+    test_assert $CRM_EX_OK 0
+
+    desc="List a single instance of a promotable clone resource"
+    cmd="crm_resource --locate -r promotable-rsc:0"
+    test_assert $CRM_EX_OK 0
+
+    desc="List another instance of a promotable clone resource"
+    cmd="crm_resource --locate -r promotable-rsc:1"
+    test_assert $CRM_EX_OK 0
+
+    unset CIB_file
 }
 
 INVALID_PERIODS=(
-- 
1.8.3.1


From d1bb0758726c09fd78efbc30c7eb46559e9c10e2 Mon Sep 17 00:00:00 2001
From: Chris Lumens <clumens@redhat.com>
Date: Thu, 11 Feb 2021 15:09:54 -0500
Subject: [PATCH 3/3] Fix: Correct output of "crm_resource --locate" in case of
 clones.

For non-clone resources, the rsc parameter passed to
resource_search_list_* is accurate - it is the resource object for the
name given on the command line.  For clone resources, this parameter is
incorrect.  It will be a single instance of the clone resource, no
matter which instance might have been asked for on the command line.

This typically doesn't matter, but results in incorrect output when
promotable clones are searched for.  For promotable clones, the "Master"
text may not appear for any of the instances.  This is because the
single instance passed in as the rsc parameter might not be the master,
but each iteration through the loop will use that same parameter.

The fix is to change cli_resource_search to return a list of
node/promoted pairs so we we already have all the information we need.
Printing is then a simple matter of just walking that list.

The referenced bug has a much better explanation of the cause of the
problem.

See: rhbz#1925681
---
 cts/cli/regression.tools.exp |  4 ++--
 tools/crm_resource.c         |  3 ++-
 tools/crm_resource.h         |  7 +++++-
 tools/crm_resource_print.c   | 23 +++++++-------------
 tools/crm_resource_runtime.c | 51 +++++++++++++++++++++++++++++++-------------
 5 files changed, 54 insertions(+), 34 deletions(-)

diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp
index 708c340..b3f16fa 100644
--- a/cts/cli/regression.tools.exp
+++ b/cts/cli/regression.tools.exp
@@ -4078,13 +4078,13 @@ Resources colocated with clone:
 =#=#=#= End test: List guest,remote nodes - OK (0) =#=#=#=
 * Passed: crmadmin       - List guest,remote nodes
 =#=#=#= Begin test: List a promotable clone resource =#=#=#=
-resource promotable-clone is running on: cluster02
 resource promotable-clone is running on: cluster01
+resource promotable-clone is running on: cluster02 Master
 =#=#=#= End test: List a promotable clone resource - OK (0) =#=#=#=
 * Passed: crm_resource   - List a promotable clone resource
 =#=#=#= Begin test: List the primitive of a promotable clone resource =#=#=#=
+resource promotable-rsc is running on: cluster01
 resource promotable-rsc is running on: cluster02 Master
-resource promotable-rsc is running on: cluster01 Master
 =#=#=#= End test: List the primitive of a promotable clone resource - OK (0) =#=#=#=
 * Passed: crm_resource   - List the primitive of a promotable clone resource
 =#=#=#= Begin test: List a single instance of a promotable clone resource =#=#=#=
diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index 78b2246..7d2f0f6 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -1874,7 +1874,8 @@ main(int argc, char **argv)
 
         case cmd_locate: {
             GListPtr nodes = cli_resource_search(out, rsc, options.rsc_id, data_set);
-            rc = out->message(out, "resource-search-list", nodes, rsc, options.rsc_id);
+            rc = out->message(out, "resource-search-list", nodes, options.rsc_id);
+            g_list_free_full(nodes, free);
             break;
         }
 
diff --git a/tools/crm_resource.h b/tools/crm_resource.h
index 5bfadb7..777490a 100644
--- a/tools/crm_resource.h
+++ b/tools/crm_resource.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2004-2020 the Pacemaker project contributors
+ * Copyright 2004-2021 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
@@ -23,6 +23,11 @@
 #include <crm/pengine/internal.h>
 #include <pacemaker-internal.h>
 
+typedef struct node_info_s {
+    const char *node_name;
+    bool promoted;
+} node_info_t;
+
 enum resource_check_flags {
     rsc_remain_stopped  = (1 << 0),
     rsc_unpromotable    = (1 << 1),
diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c
index 398fef0..053f806 100644
--- a/tools/crm_resource_print.c
+++ b/tools/crm_resource_print.c
@@ -276,12 +276,11 @@ resource_check_list_xml(pcmk__output_t *out, va_list args) {
     return rc;
 }
 
-PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "pe_resource_t *", "gchar *")
+PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "gchar *")
 static int
 resource_search_list_default(pcmk__output_t *out, va_list args)
 {
     GList *nodes = va_arg(args, GList *);
-    pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     gchar *requested_name = va_arg(args, gchar *);
 
     bool printed = false;
@@ -293,7 +292,7 @@ resource_search_list_default(pcmk__output_t *out, va_list args)
     }
 
     for (GList *lpc = nodes; lpc != NULL; lpc = lpc->next) {
-        pe_node_t *node = (pe_node_t *) lpc->data;
+        node_info_t *ni = (node_info_t *) lpc->data;
 
         if (!printed) {
             out->begin_list(out, NULL, NULL, "Nodes");
@@ -302,15 +301,10 @@ resource_search_list_default(pcmk__output_t *out, va_list args)
         }
 
         if (out->is_quiet(out)) {
-            out->list_item(out, "node", "%s", node->details->uname);
+            out->list_item(out, "node", "%s", ni->node_name);
         } else {
-            const char *state = "";
-
-            if (!pe_rsc_is_clone(rsc) && rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER) {
-                state = " Master";
-            }
             out->list_item(out, "node", "resource %s is running on: %s%s",
-                           requested_name, node->details->uname, state);
+                           requested_name, ni->node_name, ni->promoted ? " Master" : "");
         }
     }
 
@@ -321,12 +315,11 @@ resource_search_list_default(pcmk__output_t *out, va_list args)
     return rc;
 }
 
-PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "pe_resource_t *", "gchar *")
+PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "gchar *")
 static int
 resource_search_list_xml(pcmk__output_t *out, va_list args)
 {
     GList *nodes = va_arg(args, GList *);
-    pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     gchar *requested_name = va_arg(args, gchar *);
 
     pcmk__output_xml_create_parent(out, "nodes",
@@ -334,10 +327,10 @@ resource_search_list_xml(pcmk__output_t *out, va_list args)
                                    NULL);
 
     for (GList *lpc = nodes; lpc != NULL; lpc = lpc->next) {
-        pe_node_t *node = (pe_node_t *) lpc->data;
-        xmlNodePtr sub_node = pcmk__output_create_xml_text_node(out, "node", node->details->uname);
+        node_info_t *ni = (node_info_t *) lpc->data;
+        xmlNodePtr sub_node = pcmk__output_create_xml_text_node(out, "node", ni->node_name);
 
-        if (!pe_rsc_is_clone(rsc) && rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER) {
+        if (ni->promoted) {
             crm_xml_add(sub_node, "state", "promoted");
         }
     }
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index adfdfba..1769042 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -41,20 +41,37 @@ cli_check_resource(pe_resource_t *rsc, char *role_s, char *managed)
     return rc;
 }
 
+static GListPtr
+build_node_info_list(pe_resource_t *rsc)
+{
+    GListPtr retval = NULL;
+
+    for (GListPtr iter = rsc->children; iter != NULL; iter = iter->next) {
+        pe_resource_t *child = (pe_resource_t *) iter->data;
+
+        for (GListPtr iter2 = child->running_on; iter2 != NULL; iter2 = iter2->next) {
+            pe_node_t *node = (pe_node_t *) iter2->data;
+            node_info_t *ni = calloc(1, sizeof(node_info_t));
+            ni->node_name = node->details->uname;
+            ni->promoted = pcmk_is_set(rsc->flags, pe_rsc_promotable) &&
+                           child->fns->state(child, TRUE) == RSC_ROLE_MASTER;
+
+            retval = g_list_prepend(retval, ni);
+        }
+    }
+
+    return retval;
+}
+
 GListPtr
 cli_resource_search(pcmk__output_t *out, pe_resource_t *rsc, const char *requested_name,
                     pe_working_set_t *data_set)
 {
-    GListPtr found = NULL;
+    GListPtr retval = NULL;
     pe_resource_t *parent = uber_parent(rsc);
 
     if (pe_rsc_is_clone(rsc)) {
-        for (GListPtr iter = rsc->children; iter != NULL; iter = iter->next) {
-            GListPtr extra = ((pe_resource_t *) iter->data)->running_on;
-            if (extra != NULL) {
-                found = g_list_concat(found, extra);
-            }
-        }
+        retval = build_node_info_list(rsc);
 
     /* The anonymous clone children's common ID is supplied */
     } else if (pe_rsc_is_clone(parent)
@@ -63,18 +80,20 @@ cli_resource_search(pcmk__output_t *out, pe_resource_t *rsc, const char *request
                && pcmk__str_eq(requested_name, rsc->clone_name, pcmk__str_casei)
                && !pcmk__str_eq(requested_name, rsc->id, pcmk__str_casei)) {
 
-        for (GListPtr iter = parent->children; iter; iter = iter->next) {
-            GListPtr extra = ((pe_resource_t *) iter->data)->running_on;
-            if (extra != NULL) {
-                found = g_list_concat(found, extra);
-            }
-        }
+        retval = build_node_info_list(parent);
 
     } else if (rsc->running_on != NULL) {
-        found = g_list_concat(found, rsc->running_on);
+        for (GListPtr iter = rsc->running_on; iter != NULL; iter = iter->next) {
+            pe_node_t *node = (pe_node_t *) iter->data;
+            node_info_t *ni = calloc(1, sizeof(node_info_t));
+            ni->node_name = node->details->uname;
+            ni->promoted = rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER;
+
+            retval = g_list_prepend(retval, ni);
+        }
     }
 
-    return found;
+    return retval;
 }
 
 #define XPATH_MAX 1024
@@ -1788,6 +1807,8 @@ cli_resource_execute(pcmk__output_t *out, pe_resource_t *rsc,
                          "the force option");
                 return CRM_EX_UNSAFE;
             }
+
+            g_list_free_full(nodes, free);
         }
 
     } else {
-- 
1.8.3.1