Blame SOURCES/036-crm_resource.patch

c563b9
From 82ae2f1b652c361dadacf25dece42a43340776ee Mon Sep 17 00:00:00 2001
c563b9
From: Chris Lumens <clumens@redhat.com>
c563b9
Date: Thu, 11 Feb 2021 09:57:21 -0500
c563b9
Subject: [PATCH 1/3] Low: tools: Rename the result of cli_resource_search.
c563b9
c563b9
The result of cli_resource_search is a list of nodes, not a list of
c563b9
resources.  Change the variable name appropriately.
c563b9
---
c563b9
 tools/crm_resource.c         | 4 ++--
c563b9
 tools/crm_resource_runtime.c | 4 ++--
c563b9
 2 files changed, 4 insertions(+), 4 deletions(-)
c563b9
c563b9
diff --git a/tools/crm_resource.c b/tools/crm_resource.c
c563b9
index 564600e..78b2246 100644
c563b9
--- a/tools/crm_resource.c
c563b9
+++ b/tools/crm_resource.c
c563b9
@@ -1873,8 +1873,8 @@ main(int argc, char **argv)
c563b9
             break;
c563b9
 
c563b9
         case cmd_locate: {
c563b9
-            GListPtr resources = cli_resource_search(out, rsc, options.rsc_id, data_set);
c563b9
-            rc = out->message(out, "resource-search-list", resources, rsc, options.rsc_id);
c563b9
+            GListPtr nodes = cli_resource_search(out, rsc, options.rsc_id, data_set);
c563b9
+            rc = out->message(out, "resource-search-list", nodes, rsc, options.rsc_id);
c563b9
             break;
c563b9
         }
c563b9
 
c563b9
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
c563b9
index b6e4df1..adfdfba 100644
c563b9
--- a/tools/crm_resource_runtime.c
c563b9
+++ b/tools/crm_resource_runtime.c
c563b9
@@ -1780,8 +1780,8 @@ cli_resource_execute(pcmk__output_t *out, pe_resource_t *rsc,
c563b9
         action = rsc_action+6;
c563b9
 
c563b9
         if(pe_rsc_is_clone(rsc)) {
c563b9
-            GListPtr rscs = cli_resource_search(out, rsc, requested_name, data_set);
c563b9
-            if(rscs != NULL && force == FALSE) {
c563b9
+            GListPtr nodes = cli_resource_search(out, rsc, requested_name, data_set);
c563b9
+            if(nodes != NULL && force == FALSE) {
c563b9
                 out->err(out, "It is not safe to %s %s here: the cluster claims it is already active",
c563b9
                          action, rsc->id);
c563b9
                 out->err(out, "Try setting target-role=Stopped first or specifying "
c563b9
-- 
c563b9
1.8.3.1
c563b9
c563b9
c563b9
From e8b320aaaabdd60b7ac851e5b70a2a1b3c2180a3 Mon Sep 17 00:00:00 2001
c563b9
From: Chris Lumens <clumens@redhat.com>
c563b9
Date: Thu, 11 Feb 2021 11:07:07 -0500
c563b9
Subject: [PATCH 2/3] Test: cts: Add a test for a promotable clone resource.
c563b9
c563b9
Note that for the moment, the crm_resource output in
c563b9
regression.tools.exp is incorrect.  There's a bug in that tool, but I
c563b9
wanted to get a test case working before fixing it.
c563b9
---
c563b9
 cts/cli/crm_mon.xml            |  32 +++-
c563b9
 cts/cli/regression.crm_mon.exp | 401 +++++++++++++++++++++++++++++------------
c563b9
 cts/cli/regression.tools.exp   |  18 ++
c563b9
 cts/cts-cli.in                 |  20 ++
c563b9
 4 files changed, 357 insertions(+), 114 deletions(-)
c563b9
c563b9
diff --git a/cts/cli/crm_mon.xml b/cts/cli/crm_mon.xml
c563b9
index d8d5d35..f0f14fd 100644
c563b9
--- a/cts/cli/crm_mon.xml
c563b9
+++ b/cts/cli/crm_mon.xml
c563b9
@@ -1,4 +1,4 @@
c563b9
-<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.3" epoch="1" num_updates="1" admin_epoch="1" cib-last-written="Tue May  5 12:04:36 2020" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2">
c563b9
+<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.3" epoch="1" num_updates="135" admin_epoch="1" cib-last-written="Tue May  5 12:04:36 2020" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2">
c563b9
   <configuration>
c563b9
     <crm_config>
c563b9
       <cluster_property_set id="cib-bootstrap-options">
c563b9
@@ -99,9 +99,25 @@
c563b9
           </primitive>
c563b9
         </group>
c563b9
       </clone>
c563b9
+      <clone id="promotable-clone">
c563b9
+        <meta_attributes id="promotable-clone-meta_attributes">
c563b9
+          <nvpair id="promotable-clone-meta_attributes-promotable" name="promotable" value="true"/>
c563b9
+        </meta_attributes>
c563b9
+        <primitive id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful">
c563b9
+          <operations id="promotable-rsc-operations">
c563b9
+            <op id="promotable-rsc-monitor-master-5" name="monitor" interval="5" role="Master"/>
c563b9
+            <op id="promotable-rsc-monitor-slave-10" name="monitor" interval="10" role="Slave"/>
c563b9
+          </operations>
c563b9
+        </primitive>
c563b9
+      </clone>
c563b9
     </resources>
c563b9
     <constraints>
c563b9
       <rsc_location id="not-on-cluster1" rsc="dummy" node="cluster01" score="-INFINITY"/>
c563b9
+      <rsc_location id="loc-promotable-clone" rsc="promotable-clone">
c563b9
+        <rule id="loc-promotable-clone-rule" role="Master" score="10">
c563b9
+          <expression attribute="#uname" id="loc-promotable-clone-expression" operation="eq" value="cluster02"/>
c563b9
+        </rule>
c563b9
+      </rsc_location>
c563b9
     </constraints>
c563b9
     <tags>
c563b9
       <tag id="all-nodes">
c563b9
@@ -153,6 +169,13 @@
c563b9
             <lrm_rsc_op id="mysql-proxy_last_0" operation_key="mysql-proxy_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.4.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1596126852" last-run="1596126852" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
c563b9
             <lrm_rsc_op id="mysql-proxy_monitor_10000" operation_key="mysql-proxy_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.4.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" last-rc-change="1596126852" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
c563b9
           </lrm_resource>
c563b9
+          <lrm_resource id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful">
c563b9
+            <lrm_rsc_op id="promotable-rsc_last_0" operation_key="promotable-rsc_promote_0" operation="promote" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="6:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;6:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="6" rc-code="0" op-status="0" interval="0" last-rc-change="1613059546" last-run="1613059546" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
c563b9
+            <lrm_rsc_op id="promotable-rsc_post_notify_start_0" operation_key="promotable-rsc_notify_0" operation="notify" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1613058809" last-run="1613058809" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
c563b9
+            <lrm_rsc_op id="promotable-rsc_monitor_10000" operation_key="promotable-rsc_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="4" rc-code="0" op-status="0" interval="10000" last-rc-change="1613058809" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4"/>
c563b9
+            <lrm_rsc_op id="promotable-rsc_cancel_10000" operation_key="promotable-rsc_cancel_10000" operation="cancel" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="5:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;5:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="5" rc-code="0" op-status="0" interval="10000" last-rc-change="1613059546" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4"/>
c563b9
+            <lrm_rsc_op id="promotable-rsc_monitor_5000" operation_key="promotable-rsc_monitor_5000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="7:-1:8:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:8;7:-1:8:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="7" rc-code="8" op-status="0" interval="5000" last-rc-change="1613059546" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4"/>
c563b9
+          </lrm_resource>
c563b9
         </lrm_resources>
c563b9
       </lrm>
c563b9
       <transient_attributes id="2">
c563b9
@@ -170,7 +193,7 @@
c563b9
           </lrm_resource>
c563b9
           <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
c563b9
             <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="12:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;12:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="15" rc-code="0" op-status="0" interval="0" last-rc-change="1588951272" last-run="1588951272" exec-time="36" queue-time="0" op-digest="7da16842ab2328e41f737cab5e5fc89c"/>
c563b9
-            <lrm_rsc_op id="Fencing_monitor_60000" operation_key="Fencing_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="13:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;13:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="19" rc-code="0" op-status="0" interval="60000" last-rc-change="1588951276" exec-time="24" queue-time="0" op-digest="f85d77708ad4ea02a9099e1e548aff0d"/>
c563b9
+            <lrm_rsc_op id="Fencing_monitor_60000" operation_key="Fencing_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="20:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;20:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="cluster01" call-id="20" rc-code="0" op-status="0" interval="60000" last-rc-change="1613056690" exec-time="0" queue-time="0" op-digest="d4ee02dc1c7ce16eb0f72e06c2cc9193"/>
c563b9
           </lrm_resource>
c563b9
           <lrm_resource id="dummy" type="Dummy" class="ocf" provider="pacemaker">
c563b9
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="3:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;3:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="16" rc-code="0" op-status="0" interval="0" last-rc-change="1588951272" last-run="1588951272" exec-time="6048" queue-time="0" op-digest="aa0f9b7caf28600646551adb55bd9b95" op-force-restart=" envfile  op_sleep  passwd  state " op-restart-digest="aa0f9b7caf28600646551adb55bd9b95" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
c563b9
@@ -185,6 +208,11 @@
c563b9
             <lrm_rsc_op id="mysql-proxy_last_0" operation_key="mysql-proxy_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.4.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1596126852" last-run="1596126852" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
c563b9
             <lrm_rsc_op id="mysql-proxy_monitor_10000" operation_key="mysql-proxy_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.4.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" last-rc-change="1596126852" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
c563b9
           </lrm_resource>
c563b9
+          <lrm_resource id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful">
c563b9
+            <lrm_rsc_op id="promotable-rsc_last_0" operation_key="promotable-rsc_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1613058809" last-run="1613058809" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
c563b9
+            <lrm_rsc_op id="promotable-rsc_post_notify_start_0" operation_key="promotable-rsc_notify_0" operation="notify" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1613058809" last-run="1613058809" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
c563b9
+            <lrm_rsc_op id="promotable-rsc_monitor_10000" operation_key="promotable-rsc_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="4" rc-code="0" op-status="0" interval="10000" last-rc-change="1613058809" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4"/>
c563b9
+          </lrm_resource>
c563b9
         </lrm_resources>
c563b9
       </lrm>
c563b9
       <transient_attributes id="1">
c563b9
diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp
c563b9
index dd20116..c223b7f 100644
c563b9
--- a/cts/cli/regression.crm_mon.exp
c563b9
+++ b/cts/cli/regression.crm_mon.exp
c563b9
@@ -5,7 +5,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 cluster02 ]
c563b9
@@ -20,6 +20,9 @@ Active Resources:
c563b9
     * Email	(lsb:exim):	 Started cluster02
c563b9
   * Clone Set: mysql-clone-group [mysql-group]:
c563b9
     * Started: [ cluster01 cluster02 ]
c563b9
+  * Clone Set: promotable-clone [promotable-rsc] (promotable):
c563b9
+    * Masters: [ cluster02 ]
c563b9
+    * Slaves: [ cluster01 ]
c563b9
 =#=#=#= End test: Basic text output - OK (0) =#=#=#=
c563b9
 * Passed: crm_mon        - Basic text output
c563b9
 =#=#=#= Begin test: XML output =#=#=#=
c563b9
@@ -30,12 +33,12 @@ Active Resources:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
c563b9
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
c563b9
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
c563b9
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
c563b9
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
c563b9
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
c563b9
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
c563b9
@@ -112,6 +115,17 @@ Active Resources:
c563b9
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
c563b9
       </group>
c563b9
     </clone>
c563b9
+    <clone id="promotable-clone" multi_state="true" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Master" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
c563b9
+        <node name="cluster02" id="2" cached="true"/>
c563b9
+      </resource>
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Slave" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
c563b9
+        <node name="cluster01" id="1" cached="true"/>
c563b9
+      </resource>
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
c563b9
+    </clone>
c563b9
   </resources>
c563b9
   <node_attributes>
c563b9
     <node name="cluster01">
c563b9
@@ -142,6 +156,12 @@ Active Resources:
c563b9
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
c563b9
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
       </resource_history>
c563b9
+      <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
c563b9
+        <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
+        <operation_history call="5" task="cancel" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
+        <operation_history call="6" task="promote" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
c563b9
+        <operation_history call="7" task="monitor" rc="8" rc_text="master" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
+      </resource_history>
c563b9
     </node>
c563b9
     <node name="cluster01">
c563b9
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
c563b9
@@ -150,7 +170,7 @@ Active Resources:
c563b9
       </resource_history>
c563b9
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
c563b9
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
c563b9
-        <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/>
c563b9
+        <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
       </resource_history>
c563b9
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
c563b9
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
c563b9
@@ -159,6 +179,10 @@ Active Resources:
c563b9
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
c563b9
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
       </resource_history>
c563b9
+      <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
c563b9
+        <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
c563b9
+        <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
+      </resource_history>
c563b9
     </node>
c563b9
   </node_history>
c563b9
   <bans>
c563b9
@@ -175,7 +199,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Active Resources:
c563b9
   * Clone Set: ping-clone [ping]:
c563b9
@@ -187,6 +211,9 @@ Active Resources:
c563b9
     * Email	(lsb:exim):	 Started cluster02
c563b9
   * Clone Set: mysql-clone-group [mysql-group]:
c563b9
     * Started: [ cluster01 cluster02 ]
c563b9
+  * Clone Set: promotable-clone [promotable-rsc] (promotable):
c563b9
+    * Masters: [ cluster02 ]
c563b9
+    * Slaves: [ cluster01 ]
c563b9
 =#=#=#= End test: Basic text output without node section - OK (0) =#=#=#=
c563b9
 * Passed: crm_mon        - Basic text output without node section
c563b9
 =#=#=#= Begin test: XML output without the node section =#=#=#=
c563b9
@@ -197,7 +224,7 @@ Active Resources:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <resources>
c563b9
@@ -272,6 +299,17 @@ Active Resources:
c563b9
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
c563b9
       </group>
c563b9
     </clone>
c563b9
+    <clone id="promotable-clone" multi_state="true" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Master" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
c563b9
+        <node name="cluster02" id="2" cached="true"/>
c563b9
+      </resource>
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Slave" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
c563b9
+        <node name="cluster01" id="1" cached="true"/>
c563b9
+      </resource>
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
c563b9
+    </clone>
c563b9
   </resources>
c563b9
   <node_attributes>
c563b9
     <node name="cluster01">
c563b9
@@ -302,6 +340,12 @@ Active Resources:
c563b9
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
c563b9
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
       </resource_history>
c563b9
+      <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
c563b9
+        <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
+        <operation_history call="5" task="cancel" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
+        <operation_history call="6" task="promote" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
c563b9
+        <operation_history call="7" task="monitor" rc="8" rc_text="master" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
+      </resource_history>
c563b9
     </node>
c563b9
     <node name="cluster01">
c563b9
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
c563b9
@@ -310,7 +354,7 @@ Active Resources:
c563b9
       </resource_history>
c563b9
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
c563b9
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
c563b9
-        <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/>
c563b9
+        <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
       </resource_history>
c563b9
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
c563b9
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
c563b9
@@ -319,6 +363,10 @@ Active Resources:
c563b9
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
c563b9
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
       </resource_history>
c563b9
+      <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
c563b9
+        <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
c563b9
+        <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
+      </resource_history>
c563b9
     </node>
c563b9
   </node_history>
c563b9
   <bans>
c563b9
@@ -340,7 +388,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 cluster02 ]
c563b9
@@ -355,6 +403,9 @@ Active Resources:
c563b9
     * Email	(lsb:exim):	 Started cluster02
c563b9
   * Clone Set: mysql-clone-group [mysql-group]:
c563b9
     * Started: [ cluster01 cluster02 ]
c563b9
+  * Clone Set: promotable-clone [promotable-rsc] (promotable):
c563b9
+    * Masters: [ cluster02 ]
c563b9
+    * Slaves: [ cluster01 ]
c563b9
 
c563b9
 Node Attributes:
c563b9
   * Node: cluster01:
c563b9
@@ -378,18 +429,26 @@ Operations:
c563b9
     * mysql-proxy: migration-threshold=1000000:
c563b9
       * (2) start
c563b9
       * (3) monitor: interval="10000ms"
c563b9
+    * promotable-rsc: migration-threshold=1000000:
c563b9
+      * (4) monitor: interval="10000ms"
c563b9
+      * (5) cancel: interval="10000ms"
c563b9
+      * (6) promote
c563b9
+      * (7) monitor: interval="5000ms"
c563b9
   * Node: cluster01:
c563b9
     * ping: migration-threshold=1000000:
c563b9
       * (17) start
c563b9
       * (18) monitor: interval="10000ms"
c563b9
     * Fencing: migration-threshold=1000000:
c563b9
       * (15) start
c563b9
-      * (19) monitor: interval="60000ms"
c563b9
+      * (20) monitor: interval="60000ms"
c563b9
     * dummy: migration-threshold=1000000:
c563b9
       * (16) stop
c563b9
     * mysql-proxy: migration-threshold=1000000:
c563b9
       * (2) start
c563b9
       * (3) monitor: interval="10000ms"
c563b9
+    * promotable-rsc: migration-threshold=1000000:
c563b9
+      * (2) start
c563b9
+      * (4) monitor: interval="10000ms"
c563b9
 
c563b9
 Negative Location Constraints:
c563b9
   * not-on-cluster1	prevents dummy from running on cluster01
c563b9
@@ -402,7 +461,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 (1) cluster02 (2) ]
c563b9
@@ -427,6 +486,12 @@ Active Resources:
c563b9
       * mysql-proxy	(lsb:mysql-proxy):	 Stopped
c563b9
     * Resource Group: mysql-group:4:
c563b9
       * mysql-proxy	(lsb:mysql-proxy):	 Stopped
c563b9
+  * Clone Set: promotable-clone [promotable-rsc] (promotable):
c563b9
+    * promotable-rsc	(ocf::pacemaker:Stateful):	 Master cluster02
c563b9
+    * promotable-rsc	(ocf::pacemaker:Stateful):	 Slave cluster01
c563b9
+    * promotable-rsc	(ocf::pacemaker:Stateful):	 Stopped
c563b9
+    * promotable-rsc	(ocf::pacemaker:Stateful):	 Stopped
c563b9
+    * promotable-rsc	(ocf::pacemaker:Stateful):	 Stopped
c563b9
 
c563b9
 Node Attributes:
c563b9
   * Node: cluster01 (1):
c563b9
@@ -450,18 +515,26 @@ Operations:
c563b9
     * mysql-proxy: migration-threshold=1000000:
c563b9
       * (2) start
c563b9
       * (3) monitor: interval="10000ms"
c563b9
+    * promotable-rsc: migration-threshold=1000000:
c563b9
+      * (4) monitor: interval="10000ms"
c563b9
+      * (5) cancel: interval="10000ms"
c563b9
+      * (6) promote
c563b9
+      * (7) monitor: interval="5000ms"
c563b9
   * Node: cluster01 (1):
c563b9
     * ping: migration-threshold=1000000:
c563b9
       * (17) start
c563b9
       * (18) monitor: interval="10000ms"
c563b9
     * Fencing: migration-threshold=1000000:
c563b9
       * (15) start
c563b9
-      * (19) monitor: interval="60000ms"
c563b9
+      * (20) monitor: interval="60000ms"
c563b9
     * dummy: migration-threshold=1000000:
c563b9
       * (16) stop
c563b9
     * mysql-proxy: migration-threshold=1000000:
c563b9
       * (2) start
c563b9
       * (3) monitor: interval="10000ms"
c563b9
+    * promotable-rsc: migration-threshold=1000000:
c563b9
+      * (2) start
c563b9
+      * (4) monitor: interval="10000ms"
c563b9
 
c563b9
 Negative Location Constraints:
c563b9
   * not-on-cluster1	prevents dummy from running on cluster01 (1)
c563b9
@@ -474,7 +547,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 cluster02 ]
c563b9
@@ -489,6 +562,9 @@ Active Resources:
c563b9
     * 1/1	(ocf::heartbeat:IPaddr):	Active cluster02
c563b9
   * Clone Set: mysql-clone-group [mysql-group]:
c563b9
     * Started: [ cluster01 cluster02 ]
c563b9
+  * Clone Set: promotable-clone [promotable-rsc] (promotable):
c563b9
+    * Masters: [ cluster02 ]
c563b9
+    * Slaves: [ cluster01 ]
c563b9
 
c563b9
 Node Attributes:
c563b9
   * Node: cluster01:
c563b9
@@ -512,18 +588,26 @@ Operations:
c563b9
     * mysql-proxy: migration-threshold=1000000:
c563b9
       * (2) start
c563b9
       * (3) monitor: interval="10000ms"
c563b9
+    * promotable-rsc: migration-threshold=1000000:
c563b9
+      * (4) monitor: interval="10000ms"
c563b9
+      * (5) cancel: interval="10000ms"
c563b9
+      * (6) promote
c563b9
+      * (7) monitor: interval="5000ms"
c563b9
   * Node: cluster01:
c563b9
     * ping: migration-threshold=1000000:
c563b9
       * (17) start
c563b9
       * (18) monitor: interval="10000ms"
c563b9
     * Fencing: migration-threshold=1000000:
c563b9
       * (15) start
c563b9
-      * (19) monitor: interval="60000ms"
c563b9
+      * (20) monitor: interval="60000ms"
c563b9
     * dummy: migration-threshold=1000000:
c563b9
       * (16) stop
c563b9
     * mysql-proxy: migration-threshold=1000000:
c563b9
       * (2) start
c563b9
       * (3) monitor: interval="10000ms"
c563b9
+    * promotable-rsc: migration-threshold=1000000:
c563b9
+      * (2) start
c563b9
+      * (4) monitor: interval="10000ms"
c563b9
 
c563b9
 Negative Location Constraints:
c563b9
   * not-on-cluster1	prevents dummy from running on cluster01
c563b9
@@ -536,7 +620,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Node cluster01: online:
c563b9
@@ -544,6 +628,7 @@ Node List:
c563b9
       * ping	(ocf::pacemaker:ping):	 Started
c563b9
       * Fencing	(stonith:fence_xvm):	 Started
c563b9
       * mysql-proxy	(lsb:mysql-proxy):	 Started
c563b9
+      * promotable-rsc	(ocf::pacemaker:Stateful):	 Slave
c563b9
   * Node cluster02: online:
c563b9
     * Resources:
c563b9
       * ping	(ocf::pacemaker:ping):	 Started
c563b9
@@ -551,6 +636,7 @@ Node List:
c563b9
       * Public-IP	(ocf::heartbeat:IPaddr):	 Started
c563b9
       * Email	(lsb:exim):	 Started
c563b9
       * mysql-proxy	(lsb:mysql-proxy):	 Started
c563b9
+      * promotable-rsc	(ocf::pacemaker:Stateful):	 Master
c563b9
   * GuestNode httpd-bundle-0@: OFFLINE:
c563b9
     * Resources:
c563b9
   * GuestNode httpd-bundle-1@: OFFLINE:
c563b9
@@ -580,18 +666,26 @@ Operations:
c563b9
     * mysql-proxy: migration-threshold=1000000:
c563b9
       * (2) start
c563b9
       * (3) monitor: interval="10000ms"
c563b9
+    * promotable-rsc: migration-threshold=1000000:
c563b9
+      * (4) monitor: interval="10000ms"
c563b9
+      * (5) cancel: interval="10000ms"
c563b9
+      * (6) promote
c563b9
+      * (7) monitor: interval="5000ms"
c563b9
   * Node: cluster01:
c563b9
     * ping: migration-threshold=1000000:
c563b9
       * (17) start
c563b9
       * (18) monitor: interval="10000ms"
c563b9
     * Fencing: migration-threshold=1000000:
c563b9
       * (15) start
c563b9
-      * (19) monitor: interval="60000ms"
c563b9
+      * (20) monitor: interval="60000ms"
c563b9
     * dummy: migration-threshold=1000000:
c563b9
       * (16) stop
c563b9
     * mysql-proxy: migration-threshold=1000000:
c563b9
       * (2) start
c563b9
       * (3) monitor: interval="10000ms"
c563b9
+    * promotable-rsc: migration-threshold=1000000:
c563b9
+      * (2) start
c563b9
+      * (4) monitor: interval="10000ms"
c563b9
 
c563b9
 Negative Location Constraints:
c563b9
   * not-on-cluster1	prevents dummy from running on cluster01
c563b9
@@ -604,12 +698,13 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Node cluster01: online:
c563b9
     * Resources:
c563b9
       * 1	(lsb:mysql-proxy):	Active 
c563b9
+      * 1	(ocf::pacemaker:Stateful):	Active 
c563b9
       * 1	(ocf::pacemaker:ping):	Active 
c563b9
       * 1	(stonith:fence_xvm):	Active 
c563b9
   * Node cluster02: online:
c563b9
@@ -618,6 +713,7 @@ Node List:
c563b9
       * 1	(lsb:mysql-proxy):	Active 
c563b9
       * 1	(ocf::heartbeat:IPaddr):	Active 
c563b9
       * 1	(ocf::pacemaker:Dummy):	Active 
c563b9
+      * 1	(ocf::pacemaker:Stateful):	Active 
c563b9
       * 1	(ocf::pacemaker:ping):	Active 
c563b9
 
c563b9
 Node Attributes:
c563b9
@@ -642,18 +738,26 @@ Operations:
c563b9
     * mysql-proxy: migration-threshold=1000000:
c563b9
       * (2) start
c563b9
       * (3) monitor: interval="10000ms"
c563b9
+    * promotable-rsc: migration-threshold=1000000:
c563b9
+      * (4) monitor: interval="10000ms"
c563b9
+      * (5) cancel: interval="10000ms"
c563b9
+      * (6) promote
c563b9
+      * (7) monitor: interval="5000ms"
c563b9
   * Node: cluster01:
c563b9
     * ping: migration-threshold=1000000:
c563b9
       * (17) start
c563b9
       * (18) monitor: interval="10000ms"
c563b9
     * Fencing: migration-threshold=1000000:
c563b9
       * (15) start
c563b9
-      * (19) monitor: interval="60000ms"
c563b9
+      * (20) monitor: interval="60000ms"
c563b9
     * dummy: migration-threshold=1000000:
c563b9
       * (16) stop
c563b9
     * mysql-proxy: migration-threshold=1000000:
c563b9
       * (2) start
c563b9
       * (3) monitor: interval="10000ms"
c563b9
+    * promotable-rsc: migration-threshold=1000000:
c563b9
+      * (2) start
c563b9
+      * (4) monitor: interval="10000ms"
c563b9
 
c563b9
 Negative Location Constraints:
c563b9
   * not-on-cluster1	prevents dummy from running on cluster01
c563b9
@@ -667,11 +771,11 @@ Negative Location Constraints:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member">
c563b9
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member">
c563b9
       <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
c563b9
         <node name="cluster01" id="1" cached="true"/>
c563b9
       </resource>
c563b9
@@ -681,8 +785,11 @@ Negative Location Constraints:
c563b9
       <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
c563b9
         <node name="cluster01" id="1" cached="true"/>
c563b9
       </resource>
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Slave" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
c563b9
+        <node name="cluster01" id="1" cached="true"/>
c563b9
+      </resource>
c563b9
     </node>
c563b9
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member">
c563b9
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member">
c563b9
       <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
c563b9
         <node name="cluster02" id="2" cached="true"/>
c563b9
       </resource>
c563b9
@@ -698,6 +805,9 @@ Negative Location Constraints:
c563b9
       <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
c563b9
         <node name="cluster02" id="2" cached="true"/>
c563b9
       </resource>
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Master" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
c563b9
+        <node name="cluster02" id="2" cached="true"/>
c563b9
+      </resource>
c563b9
     </node>
c563b9
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
c563b9
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
c563b9
@@ -753,6 +863,17 @@ Negative Location Constraints:
c563b9
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
c563b9
       </group>
c563b9
     </clone>
c563b9
+    <clone id="promotable-clone" multi_state="true" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Master" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
c563b9
+        <node name="cluster02" id="2" cached="true"/>
c563b9
+      </resource>
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Slave" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
c563b9
+        <node name="cluster01" id="1" cached="true"/>
c563b9
+      </resource>
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
c563b9
+    </clone>
c563b9
   </resources>
c563b9
   <node_attributes>
c563b9
     <node name="cluster01">
c563b9
@@ -783,6 +904,12 @@ Negative Location Constraints:
c563b9
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
c563b9
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
       </resource_history>
c563b9
+      <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
c563b9
+        <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
+        <operation_history call="5" task="cancel" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
+        <operation_history call="6" task="promote" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
c563b9
+        <operation_history call="7" task="monitor" rc="8" rc_text="master" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
+      </resource_history>
c563b9
     </node>
c563b9
     <node name="cluster01">
c563b9
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
c563b9
@@ -791,7 +918,7 @@ Negative Location Constraints:
c563b9
       </resource_history>
c563b9
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
c563b9
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
c563b9
-        <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/>
c563b9
+        <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
       </resource_history>
c563b9
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
c563b9
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
c563b9
@@ -800,6 +927,10 @@ Negative Location Constraints:
c563b9
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
c563b9
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
       </resource_history>
c563b9
+      <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
c563b9
+        <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
c563b9
+        <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
+      </resource_history>
c563b9
     </node>
c563b9
   </node_history>
c563b9
   <bans>
c563b9
@@ -816,7 +947,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 ]
c563b9
@@ -827,6 +958,8 @@ Active Resources:
c563b9
   * Fencing	(stonith:fence_xvm):	 Started cluster01
c563b9
   * Clone Set: mysql-clone-group [mysql-group]:
c563b9
     * Started: [ cluster01 ]
c563b9
+  * Clone Set: promotable-clone [promotable-rsc] (promotable):
c563b9
+    * Slaves: [ cluster01 ]
c563b9
 
c563b9
 Node Attributes:
c563b9
   * Node: cluster01:
c563b9
@@ -840,12 +973,15 @@ Operations:
c563b9
       * (18) monitor: interval="10000ms"
c563b9
     * Fencing: migration-threshold=1000000:
c563b9
       * (15) start
c563b9
-      * (19) monitor: interval="60000ms"
c563b9
+      * (20) monitor: interval="60000ms"
c563b9
     * dummy: migration-threshold=1000000:
c563b9
       * (16) stop
c563b9
     * mysql-proxy: migration-threshold=1000000:
c563b9
       * (2) start
c563b9
       * (3) monitor: interval="10000ms"
c563b9
+    * promotable-rsc: migration-threshold=1000000:
c563b9
+      * (2) start
c563b9
+      * (4) monitor: interval="10000ms"
c563b9
 
c563b9
 Negative Location Constraints:
c563b9
   * not-on-cluster1	prevents dummy from running on cluster01
c563b9
@@ -859,11 +995,11 @@ Negative Location Constraints:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
c563b9
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
c563b9
   </nodes>
c563b9
   <resources>
c563b9
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
c563b9
@@ -918,6 +1054,14 @@ Negative Location Constraints:
c563b9
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
c563b9
       </group>
c563b9
     </clone>
c563b9
+    <clone id="promotable-clone" multi_state="true" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Slave" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
c563b9
+        <node name="cluster01" id="1" cached="true"/>
c563b9
+      </resource>
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
c563b9
+    </clone>
c563b9
   </resources>
c563b9
   <node_attributes>
c563b9
     <node name="cluster01">
c563b9
@@ -933,7 +1077,7 @@ Negative Location Constraints:
c563b9
       </resource_history>
c563b9
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
c563b9
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
c563b9
-        <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/>
c563b9
+        <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
       </resource_history>
c563b9
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
c563b9
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
c563b9
@@ -942,6 +1086,10 @@ Negative Location Constraints:
c563b9
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
c563b9
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
       </resource_history>
c563b9
+      <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
c563b9
+        <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
c563b9
+        <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
+      </resource_history>
c563b9
     </node>
c563b9
   </node_history>
c563b9
   <bans>
c563b9
@@ -958,7 +1106,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster02 ]
c563b9
@@ -972,6 +1120,8 @@ Active Resources:
c563b9
     * Email	(lsb:exim):	 Started cluster02
c563b9
   * Clone Set: mysql-clone-group [mysql-group]:
c563b9
     * Started: [ cluster02 ]
c563b9
+  * Clone Set: promotable-clone [promotable-rsc] (promotable):
c563b9
+    * Masters: [ cluster02 ]
c563b9
 
c563b9
 Node Attributes:
c563b9
   * Node: cluster02:
c563b9
@@ -992,6 +1142,11 @@ Operations:
c563b9
     * mysql-proxy: migration-threshold=1000000:
c563b9
       * (2) start
c563b9
       * (3) monitor: interval="10000ms"
c563b9
+    * promotable-rsc: migration-threshold=1000000:
c563b9
+      * (4) monitor: interval="10000ms"
c563b9
+      * (5) cancel: interval="10000ms"
c563b9
+      * (6) promote
c563b9
+      * (7) monitor: interval="5000ms"
c563b9
 
c563b9
 Negative Location Constraints:
c563b9
   * not-on-cluster1	prevents dummy from running on cluster01
c563b9
@@ -1005,11 +1160,11 @@ Negative Location Constraints:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
c563b9
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
c563b9
   </nodes>
c563b9
   <resources>
c563b9
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
c563b9
@@ -1072,6 +1227,14 @@ Negative Location Constraints:
c563b9
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
c563b9
       </group>
c563b9
     </clone>
c563b9
+    <clone id="promotable-clone" multi_state="true" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Master" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
c563b9
+        <node name="cluster02" id="2" cached="true"/>
c563b9
+      </resource>
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
c563b9
+      <resource id="promotable-rsc" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
c563b9
+    </clone>
c563b9
   </resources>
c563b9
   <node_attributes>
c563b9
     <node name="cluster02">
c563b9
@@ -1098,6 +1261,12 @@ Negative Location Constraints:
c563b9
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
c563b9
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
       </resource_history>
c563b9
+      <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
c563b9
+        <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
+        <operation_history call="5" task="cancel" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
+        <operation_history call="6" task="promote" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
c563b9
+        <operation_history call="7" task="monitor" rc="8" rc_text="master" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
+      </resource_history>
c563b9
     </node>
c563b9
   </node_history>
c563b9
   <bans>
c563b9
@@ -1114,7 +1283,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 cluster02 ]
c563b9
@@ -1133,7 +1302,7 @@ Operations:
c563b9
   * Node: cluster01:
c563b9
     * Fencing: migration-threshold=1000000:
c563b9
       * (15) start
c563b9
-      * (19) monitor: interval="60000ms"
c563b9
+      * (20) monitor: interval="60000ms"
c563b9
 =#=#=#= End test: Complete text output filtered by resource tag - OK (0) =#=#=#=
c563b9
 * Passed: crm_mon        - Complete text output filtered by resource tag
c563b9
 =#=#=#= Begin test: XML output filtered by resource tag =#=#=#=
c563b9
@@ -1144,12 +1313,12 @@ Operations:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
c563b9
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
c563b9
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
c563b9
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
c563b9
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
c563b9
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
c563b9
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
c563b9
@@ -1172,7 +1341,7 @@ Operations:
c563b9
     <node name="cluster01">
c563b9
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
c563b9
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
c563b9
-        <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/>
c563b9
+        <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
       </resource_history>
c563b9
     </node>
c563b9
   </node_history>
c563b9
@@ -1187,7 +1356,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Active Resources:
c563b9
   * No active resources
c563b9
@@ -1201,7 +1370,7 @@ Active Resources:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes/>
c563b9
@@ -1249,7 +1418,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 cluster02 ]
c563b9
@@ -1273,6 +1442,9 @@ Full List of Resources:
c563b9
     * Email	(lsb:exim):	 Started cluster02
c563b9
   * Clone Set: mysql-clone-group [mysql-group]:
c563b9
     * Started: [ cluster01 cluster02 ]
c563b9
+  * Clone Set: promotable-clone [promotable-rsc] (promotable):
c563b9
+    * Masters: [ cluster02 ]
c563b9
+    * Slaves: [ cluster01 ]
c563b9
 =#=#=#= End test: Basic text output with inactive resources - OK (0) =#=#=#=
c563b9
 * Passed: crm_mon        - Basic text output with inactive resources
c563b9
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by node =#=#=#=
c563b9
@@ -1282,7 +1454,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster02 ]
c563b9
@@ -1305,6 +1477,8 @@ Full List of Resources:
c563b9
     * Email	(lsb:exim):	 Started cluster02
c563b9
   * Clone Set: mysql-clone-group [mysql-group]:
c563b9
     * Started: [ cluster02 ]
c563b9
+  * Clone Set: promotable-clone [promotable-rsc] (promotable):
c563b9
+    * Masters: [ cluster02 ]
c563b9
 =#=#=#= End test: Basic text output with inactive resources, filtered by node - OK (0) =#=#=#=
c563b9
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by node
c563b9
 =#=#=#= Begin test: Complete text output filtered by primitive resource =#=#=#=
c563b9
@@ -1314,7 +1488,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 cluster02 ]
c563b9
@@ -1333,7 +1507,7 @@ Operations:
c563b9
   * Node: cluster01:
c563b9
     * Fencing: migration-threshold=1000000:
c563b9
       * (15) start
c563b9
-      * (19) monitor: interval="60000ms"
c563b9
+      * (20) monitor: interval="60000ms"
c563b9
 =#=#=#= End test: Complete text output filtered by primitive resource - OK (0) =#=#=#=
c563b9
 * Passed: crm_mon        - Complete text output filtered by primitive resource
c563b9
 =#=#=#= Begin test: XML output filtered by primitive resource =#=#=#=
c563b9
@@ -1344,12 +1518,12 @@ Operations:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
c563b9
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
c563b9
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
c563b9
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
c563b9
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
c563b9
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
c563b9
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
c563b9
@@ -1372,7 +1546,7 @@ Operations:
c563b9
     <node name="cluster01">
c563b9
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
c563b9
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
c563b9
-        <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/>
c563b9
+        <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
c563b9
       </resource_history>
c563b9
     </node>
c563b9
   </node_history>
c563b9
@@ -1387,7 +1561,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 cluster02 ]
c563b9
@@ -1420,12 +1594,12 @@ Operations:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
c563b9
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
c563b9
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
c563b9
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
c563b9
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
c563b9
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
c563b9
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
c563b9
@@ -1470,7 +1644,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 cluster02 ]
c563b9
@@ -1500,12 +1674,12 @@ Operations:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
c563b9
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
c563b9
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
c563b9
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
c563b9
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
c563b9
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
c563b9
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
c563b9
@@ -1544,7 +1718,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 cluster02 ]
c563b9
@@ -1579,12 +1753,12 @@ Operations:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
c563b9
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
c563b9
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
c563b9
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
c563b9
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
c563b9
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
c563b9
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
c563b9
@@ -1633,7 +1807,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 cluster02 ]
c563b9
@@ -1668,12 +1842,12 @@ Operations:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
c563b9
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
c563b9
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
c563b9
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
c563b9
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
c563b9
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
c563b9
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
c563b9
@@ -1722,7 +1896,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 (1) cluster02 (2) ]
c563b9
@@ -1757,12 +1931,12 @@ Operations:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
c563b9
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
c563b9
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
c563b9
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
c563b9
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
c563b9
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
c563b9
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
c563b9
@@ -1808,7 +1982,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 cluster02 ]
c563b9
@@ -1825,12 +1999,12 @@ Active Resources:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
c563b9
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
c563b9
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
c563b9
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
c563b9
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
c563b9
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
c563b9
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
c563b9
@@ -1857,7 +2031,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 cluster02 ]
c563b9
@@ -1877,7 +2051,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 cluster02 ]
c563b9
@@ -1897,12 +2071,12 @@ Full List of Resources:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
c563b9
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
c563b9
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
c563b9
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
c563b9
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
c563b9
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
c563b9
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
c563b9
@@ -1950,7 +2124,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 cluster02 ]
c563b9
@@ -1969,12 +2143,12 @@ Full List of Resources:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
c563b9
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
c563b9
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
c563b9
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
c563b9
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
c563b9
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
c563b9
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
c563b9
@@ -2007,7 +2181,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 cluster02 ]
c563b9
@@ -2026,12 +2200,12 @@ Full List of Resources:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
c563b9
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
c563b9
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
c563b9
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
c563b9
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
c563b9
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
c563b9
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
c563b9
@@ -2064,7 +2238,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 cluster02 ]
c563b9
@@ -2083,12 +2257,12 @@ Full List of Resources:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
c563b9
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
c563b9
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
c563b9
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
c563b9
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
c563b9
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
c563b9
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
c563b9
@@ -2121,7 +2295,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 cluster02 ]
c563b9
@@ -2144,12 +2318,12 @@ Full List of Resources:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
c563b9
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
c563b9
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
c563b9
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
c563b9
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
c563b9
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
c563b9
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
c563b9
@@ -2188,7 +2362,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 (1) cluster02 (2) ]
c563b9
@@ -2232,12 +2406,12 @@ Operations:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
c563b9
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
c563b9
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
c563b9
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
c563b9
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
c563b9
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
c563b9
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
c563b9
@@ -2279,7 +2453,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 (1) cluster02 (2) ]
c563b9
@@ -2323,12 +2497,12 @@ Operations:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
c563b9
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
c563b9
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
c563b9
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
c563b9
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
c563b9
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
c563b9
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
c563b9
@@ -2390,7 +2564,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 (1) cluster02 (2) ]
c563b9
@@ -2426,12 +2600,12 @@ Operations:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
c563b9
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
c563b9
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
c563b9
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
c563b9
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
c563b9
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
c563b9
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
c563b9
@@ -2479,7 +2653,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 (1) cluster02 (2) ]
c563b9
@@ -2523,12 +2697,12 @@ Operations:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
c563b9
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
c563b9
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
c563b9
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
c563b9
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
c563b9
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
c563b9
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
c563b9
@@ -2590,7 +2764,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
 Node List:
c563b9
   * Online: [ cluster01 (1) cluster02 (2) ]
c563b9
@@ -2626,12 +2800,12 @@ Operations:
c563b9
     <last_update time=""/>
c563b9
     <last_change time=""/>
c563b9
     <nodes_configured number="5"/>
c563b9
-    <resources_configured number="27" disabled="4" blocked="0"/>
c563b9
+    <resources_configured number="32" disabled="4" blocked="0"/>
c563b9
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
c563b9
   </summary>
c563b9
   <nodes>
c563b9
-    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
c563b9
-    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
c563b9
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="4" type="member"/>
c563b9
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="6" type="member"/>
c563b9
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
c563b9
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
c563b9
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
c563b9
@@ -3083,7 +3257,7 @@ Cluster Summary:
c563b9
   * Last updated:
c563b9
   * Last change:
c563b9
   * 5 nodes configured
c563b9
-  * 27 resource instances configured (4 DISABLED)
c563b9
+  * 32 resource instances configured (4 DISABLED)
c563b9
 
c563b9
               *** Resource management is DISABLED ***
c563b9
   The cluster will not attempt to start, stop or recover services
c563b9
@@ -3114,5 +3288,8 @@ Full List of Resources:
c563b9
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02 (unmanaged)
c563b9
     * Resource Group: mysql-group:1 (unmanaged):
c563b9
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01 (unmanaged)
c563b9
+  * Clone Set: promotable-clone [promotable-rsc] (promotable) (unmanaged):
c563b9
+    * promotable-rsc	(ocf::pacemaker:Stateful):	 Master cluster02 (unmanaged)
c563b9
+    * promotable-rsc	(ocf::pacemaker:Stateful):	 Slave cluster01 (unmanaged)
c563b9
 =#=#=#= End test: Text output of all resources with maintenance-mode enabled - OK (0) =#=#=#=
c563b9
 * Passed: crm_mon        - Text output of all resources with maintenance-mode enabled
c563b9
diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp
c563b9
index 1afe596..708c340 100644
c563b9
--- a/cts/cli/regression.tools.exp
c563b9
+++ b/cts/cli/regression.tools.exp
c563b9
@@ -4077,3 +4077,21 @@ Resources colocated with clone:
c563b9
 5
c563b9
 =#=#=#= End test: List guest,remote nodes - OK (0) =#=#=#=
c563b9
 * Passed: crmadmin       - List guest,remote nodes
c563b9
+=#=#=#= Begin test: List a promotable clone resource =#=#=#=
c563b9
+resource promotable-clone is running on: cluster02
c563b9
+resource promotable-clone is running on: cluster01
c563b9
+=#=#=#= End test: List a promotable clone resource - OK (0) =#=#=#=
c563b9
+* Passed: crm_resource   - List a promotable clone resource
c563b9
+=#=#=#= Begin test: List the primitive of a promotable clone resource =#=#=#=
c563b9
+resource promotable-rsc is running on: cluster02 Master
c563b9
+resource promotable-rsc is running on: cluster01 Master
c563b9
+=#=#=#= End test: List the primitive of a promotable clone resource - OK (0) =#=#=#=
c563b9
+* Passed: crm_resource   - List the primitive of a promotable clone resource
c563b9
+=#=#=#= Begin test: List a single instance of a promotable clone resource =#=#=#=
c563b9
+resource promotable-rsc:0 is running on: cluster02 Master
c563b9
+=#=#=#= End test: List a single instance of a promotable clone resource - OK (0) =#=#=#=
c563b9
+* Passed: crm_resource   - List a single instance of a promotable clone resource
c563b9
+=#=#=#= Begin test: List another instance of a promotable clone resource =#=#=#=
c563b9
+resource promotable-rsc:1 is running on: cluster01
c563b9
+=#=#=#= End test: List another instance of a promotable clone resource - OK (0) =#=#=#=
c563b9
+* Passed: crm_resource   - List another instance of a promotable clone resource
c563b9
diff --git a/cts/cts-cli.in b/cts/cts-cli.in
c563b9
index 8e2dbe5..6f7eb80 100755
c563b9
--- a/cts/cts-cli.in
c563b9
+++ b/cts/cts-cli.in
c563b9
@@ -831,6 +831,26 @@ function test_tools() {
c563b9
     test_assert $CRM_EX_OK 0
c563b9
 
c563b9
     unset CIB_file
c563b9
+
c563b9
+    export CIB_file="$test_home/cli/crm_mon.xml"
c563b9
+
c563b9
+    desc="List a promotable clone resource"
c563b9
+    cmd="crm_resource --locate -r promotable-clone"
c563b9
+    test_assert $CRM_EX_OK 0
c563b9
+
c563b9
+    desc="List the primitive of a promotable clone resource"
c563b9
+    cmd="crm_resource --locate -r promotable-rsc"
c563b9
+    test_assert $CRM_EX_OK 0
c563b9
+
c563b9
+    desc="List a single instance of a promotable clone resource"
c563b9
+    cmd="crm_resource --locate -r promotable-rsc:0"
c563b9
+    test_assert $CRM_EX_OK 0
c563b9
+
c563b9
+    desc="List another instance of a promotable clone resource"
c563b9
+    cmd="crm_resource --locate -r promotable-rsc:1"
c563b9
+    test_assert $CRM_EX_OK 0
c563b9
+
c563b9
+    unset CIB_file
c563b9
 }
c563b9
 
c563b9
 INVALID_PERIODS=(
c563b9
-- 
c563b9
1.8.3.1
c563b9
c563b9
c563b9
From d1bb0758726c09fd78efbc30c7eb46559e9c10e2 Mon Sep 17 00:00:00 2001
c563b9
From: Chris Lumens <clumens@redhat.com>
c563b9
Date: Thu, 11 Feb 2021 15:09:54 -0500
c563b9
Subject: [PATCH 3/3] Fix: Correct output of "crm_resource --locate" in case of
c563b9
 clones.
c563b9
c563b9
For non-clone resources, the rsc parameter passed to
c563b9
resource_search_list_* is accurate - it is the resource object for the
c563b9
name given on the command line.  For clone resources, this parameter is
c563b9
incorrect.  It will be a single instance of the clone resource, no
c563b9
matter which instance might have been asked for on the command line.
c563b9
c563b9
This typically doesn't matter, but results in incorrect output when
c563b9
promotable clones are searched for.  For promotable clones, the "Master"
c563b9
text may not appear for any of the instances.  This is because the
c563b9
single instance passed in as the rsc parameter might not be the master,
c563b9
but each iteration through the loop will use that same parameter.
c563b9
c563b9
The fix is to change cli_resource_search to return a list of
c563b9
node/promoted pairs so we we already have all the information we need.
c563b9
Printing is then a simple matter of just walking that list.
c563b9
c563b9
The referenced bug has a much better explanation of the cause of the
c563b9
problem.
c563b9
c563b9
See: rhbz#1925681
c563b9
---
c563b9
 cts/cli/regression.tools.exp |  4 ++--
c563b9
 tools/crm_resource.c         |  3 ++-
c563b9
 tools/crm_resource.h         |  7 +++++-
c563b9
 tools/crm_resource_print.c   | 23 +++++++-------------
c563b9
 tools/crm_resource_runtime.c | 51 +++++++++++++++++++++++++++++++-------------
c563b9
 5 files changed, 54 insertions(+), 34 deletions(-)
c563b9
c563b9
diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp
c563b9
index 708c340..b3f16fa 100644
c563b9
--- a/cts/cli/regression.tools.exp
c563b9
+++ b/cts/cli/regression.tools.exp
c563b9
@@ -4078,13 +4078,13 @@ Resources colocated with clone:
c563b9
 =#=#=#= End test: List guest,remote nodes - OK (0) =#=#=#=
c563b9
 * Passed: crmadmin       - List guest,remote nodes
c563b9
 =#=#=#= Begin test: List a promotable clone resource =#=#=#=
c563b9
-resource promotable-clone is running on: cluster02
c563b9
 resource promotable-clone is running on: cluster01
c563b9
+resource promotable-clone is running on: cluster02 Master
c563b9
 =#=#=#= End test: List a promotable clone resource - OK (0) =#=#=#=
c563b9
 * Passed: crm_resource   - List a promotable clone resource
c563b9
 =#=#=#= Begin test: List the primitive of a promotable clone resource =#=#=#=
c563b9
+resource promotable-rsc is running on: cluster01
c563b9
 resource promotable-rsc is running on: cluster02 Master
c563b9
-resource promotable-rsc is running on: cluster01 Master
c563b9
 =#=#=#= End test: List the primitive of a promotable clone resource - OK (0) =#=#=#=
c563b9
 * Passed: crm_resource   - List the primitive of a promotable clone resource
c563b9
 =#=#=#= Begin test: List a single instance of a promotable clone resource =#=#=#=
c563b9
diff --git a/tools/crm_resource.c b/tools/crm_resource.c
c563b9
index 78b2246..7d2f0f6 100644
c563b9
--- a/tools/crm_resource.c
c563b9
+++ b/tools/crm_resource.c
c563b9
@@ -1874,7 +1874,8 @@ main(int argc, char **argv)
c563b9
 
c563b9
         case cmd_locate: {
c563b9
             GListPtr nodes = cli_resource_search(out, rsc, options.rsc_id, data_set);
c563b9
-            rc = out->message(out, "resource-search-list", nodes, rsc, options.rsc_id);
c563b9
+            rc = out->message(out, "resource-search-list", nodes, options.rsc_id);
c563b9
+            g_list_free_full(nodes, free);
c563b9
             break;
c563b9
         }
c563b9
 
c563b9
diff --git a/tools/crm_resource.h b/tools/crm_resource.h
c563b9
index 5bfadb7..777490a 100644
c563b9
--- a/tools/crm_resource.h
c563b9
+++ b/tools/crm_resource.h
c563b9
@@ -1,5 +1,5 @@
c563b9
 /*
c563b9
- * Copyright 2004-2020 the Pacemaker project contributors
c563b9
+ * Copyright 2004-2021 the Pacemaker project contributors
c563b9
  *
c563b9
  * The version control history for this file may have further details.
c563b9
  *
c563b9
@@ -23,6 +23,11 @@
c563b9
 #include <crm/pengine/internal.h>
c563b9
 #include <pacemaker-internal.h>
c563b9
 
c563b9
+typedef struct node_info_s {
c563b9
+    const char *node_name;
c563b9
+    bool promoted;
c563b9
+} node_info_t;
c563b9
+
c563b9
 enum resource_check_flags {
c563b9
     rsc_remain_stopped  = (1 << 0),
c563b9
     rsc_unpromotable    = (1 << 1),
c563b9
diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c
c563b9
index 398fef0..053f806 100644
c563b9
--- a/tools/crm_resource_print.c
c563b9
+++ b/tools/crm_resource_print.c
c563b9
@@ -276,12 +276,11 @@ resource_check_list_xml(pcmk__output_t *out, va_list args) {
c563b9
     return rc;
c563b9
 }
c563b9
 
c563b9
-PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "pe_resource_t *", "gchar *")
c563b9
+PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "gchar *")
c563b9
 static int
c563b9
 resource_search_list_default(pcmk__output_t *out, va_list args)
c563b9
 {
c563b9
     GList *nodes = va_arg(args, GList *);
c563b9
-    pe_resource_t *rsc = va_arg(args, pe_resource_t *);
c563b9
     gchar *requested_name = va_arg(args, gchar *);
c563b9
 
c563b9
     bool printed = false;
c563b9
@@ -293,7 +292,7 @@ resource_search_list_default(pcmk__output_t *out, va_list args)
c563b9
     }
c563b9
 
c563b9
     for (GList *lpc = nodes; lpc != NULL; lpc = lpc->next) {
c563b9
-        pe_node_t *node = (pe_node_t *) lpc->data;
c563b9
+        node_info_t *ni = (node_info_t *) lpc->data;
c563b9
 
c563b9
         if (!printed) {
c563b9
             out->begin_list(out, NULL, NULL, "Nodes");
c563b9
@@ -302,15 +301,10 @@ resource_search_list_default(pcmk__output_t *out, va_list args)
c563b9
         }
c563b9
 
c563b9
         if (out->is_quiet(out)) {
c563b9
-            out->list_item(out, "node", "%s", node->details->uname);
c563b9
+            out->list_item(out, "node", "%s", ni->node_name);
c563b9
         } else {
c563b9
-            const char *state = "";
c563b9
-
c563b9
-            if (!pe_rsc_is_clone(rsc) && rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER) {
c563b9
-                state = " Master";
c563b9
-            }
c563b9
             out->list_item(out, "node", "resource %s is running on: %s%s",
c563b9
-                           requested_name, node->details->uname, state);
c563b9
+                           requested_name, ni->node_name, ni->promoted ? " Master" : "");
c563b9
         }
c563b9
     }
c563b9
 
c563b9
@@ -321,12 +315,11 @@ resource_search_list_default(pcmk__output_t *out, va_list args)
c563b9
     return rc;
c563b9
 }
c563b9
 
c563b9
-PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "pe_resource_t *", "gchar *")
c563b9
+PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "gchar *")
c563b9
 static int
c563b9
 resource_search_list_xml(pcmk__output_t *out, va_list args)
c563b9
 {
c563b9
     GList *nodes = va_arg(args, GList *);
c563b9
-    pe_resource_t *rsc = va_arg(args, pe_resource_t *);
c563b9
     gchar *requested_name = va_arg(args, gchar *);
c563b9
 
c563b9
     pcmk__output_xml_create_parent(out, "nodes",
c563b9
@@ -334,10 +327,10 @@ resource_search_list_xml(pcmk__output_t *out, va_list args)
c563b9
                                    NULL);
c563b9
 
c563b9
     for (GList *lpc = nodes; lpc != NULL; lpc = lpc->next) {
c563b9
-        pe_node_t *node = (pe_node_t *) lpc->data;
c563b9
-        xmlNodePtr sub_node = pcmk__output_create_xml_text_node(out, "node", node->details->uname);
c563b9
+        node_info_t *ni = (node_info_t *) lpc->data;
c563b9
+        xmlNodePtr sub_node = pcmk__output_create_xml_text_node(out, "node", ni->node_name);
c563b9
 
c563b9
-        if (!pe_rsc_is_clone(rsc) && rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER) {
c563b9
+        if (ni->promoted) {
c563b9
             crm_xml_add(sub_node, "state", "promoted");
c563b9
         }
c563b9
     }
c563b9
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
c563b9
index adfdfba..1769042 100644
c563b9
--- a/tools/crm_resource_runtime.c
c563b9
+++ b/tools/crm_resource_runtime.c
c563b9
@@ -41,20 +41,37 @@ cli_check_resource(pe_resource_t *rsc, char *role_s, char *managed)
c563b9
     return rc;
c563b9
 }
c563b9
 
c563b9
+static GListPtr
c563b9
+build_node_info_list(pe_resource_t *rsc)
c563b9
+{
c563b9
+    GListPtr retval = NULL;
c563b9
+
c563b9
+    for (GListPtr iter = rsc->children; iter != NULL; iter = iter->next) {
c563b9
+        pe_resource_t *child = (pe_resource_t *) iter->data;
c563b9
+
c563b9
+        for (GListPtr iter2 = child->running_on; iter2 != NULL; iter2 = iter2->next) {
c563b9
+            pe_node_t *node = (pe_node_t *) iter2->data;
c563b9
+            node_info_t *ni = calloc(1, sizeof(node_info_t));
c563b9
+            ni->node_name = node->details->uname;
c563b9
+            ni->promoted = pcmk_is_set(rsc->flags, pe_rsc_promotable) &&
c563b9
+                           child->fns->state(child, TRUE) == RSC_ROLE_MASTER;
c563b9
+
c563b9
+            retval = g_list_prepend(retval, ni);
c563b9
+        }
c563b9
+    }
c563b9
+
c563b9
+    return retval;
c563b9
+}
c563b9
+
c563b9
 GListPtr
c563b9
 cli_resource_search(pcmk__output_t *out, pe_resource_t *rsc, const char *requested_name,
c563b9
                     pe_working_set_t *data_set)
c563b9
 {
c563b9
-    GListPtr found = NULL;
c563b9
+    GListPtr retval = NULL;
c563b9
     pe_resource_t *parent = uber_parent(rsc);
c563b9
 
c563b9
     if (pe_rsc_is_clone(rsc)) {
c563b9
-        for (GListPtr iter = rsc->children; iter != NULL; iter = iter->next) {
c563b9
-            GListPtr extra = ((pe_resource_t *) iter->data)->running_on;
c563b9
-            if (extra != NULL) {
c563b9
-                found = g_list_concat(found, extra);
c563b9
-            }
c563b9
-        }
c563b9
+        retval = build_node_info_list(rsc);
c563b9
 
c563b9
     /* The anonymous clone children's common ID is supplied */
c563b9
     } else if (pe_rsc_is_clone(parent)
c563b9
@@ -63,18 +80,20 @@ cli_resource_search(pcmk__output_t *out, pe_resource_t *rsc, const char *request
c563b9
                && pcmk__str_eq(requested_name, rsc->clone_name, pcmk__str_casei)
c563b9
                && !pcmk__str_eq(requested_name, rsc->id, pcmk__str_casei)) {
c563b9
 
c563b9
-        for (GListPtr iter = parent->children; iter; iter = iter->next) {
c563b9
-            GListPtr extra = ((pe_resource_t *) iter->data)->running_on;
c563b9
-            if (extra != NULL) {
c563b9
-                found = g_list_concat(found, extra);
c563b9
-            }
c563b9
-        }
c563b9
+        retval = build_node_info_list(parent);
c563b9
 
c563b9
     } else if (rsc->running_on != NULL) {
c563b9
-        found = g_list_concat(found, rsc->running_on);
c563b9
+        for (GListPtr iter = rsc->running_on; iter != NULL; iter = iter->next) {
c563b9
+            pe_node_t *node = (pe_node_t *) iter->data;
c563b9
+            node_info_t *ni = calloc(1, sizeof(node_info_t));
c563b9
+            ni->node_name = node->details->uname;
c563b9
+            ni->promoted = rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER;
c563b9
+
c563b9
+            retval = g_list_prepend(retval, ni);
c563b9
+        }
c563b9
     }
c563b9
 
c563b9
-    return found;
c563b9
+    return retval;
c563b9
 }
c563b9
 
c563b9
 #define XPATH_MAX 1024
c563b9
@@ -1788,6 +1807,8 @@ cli_resource_execute(pcmk__output_t *out, pe_resource_t *rsc,
c563b9
                          "the force option");
c563b9
                 return CRM_EX_UNSAFE;
c563b9
             }
c563b9
+
c563b9
+            g_list_free_full(nodes, free);
c563b9
         }
c563b9
 
c563b9
     } else {
c563b9
-- 
c563b9
1.8.3.1
c563b9