Blob Blame History Raw
From 4a5dcc5210160f7d167bc68142635c1b5a6d4af2 Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Fri, 22 Apr 2022 10:47:29 -0500
Subject: [PATCH 1/3] Fix: scheduler: make multiple-active="stop_unexpected"
 actually work

The previous implementation covered the scenario in the regression test and not
much else. It unnecessarily added an expected_node member to the native variant
data, when the resource's allocated_to is sufficient to know the expected node.
---
 lib/pacemaker/pcmk_sched_native.c | 45 +++++++++++++++----------------
 lib/pengine/unpack.c              |  1 -
 2 files changed, 22 insertions(+), 24 deletions(-)

diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c
index c0224849f..a1a51721e 100644
--- a/lib/pacemaker/pcmk_sched_native.c
+++ b/lib/pacemaker/pcmk_sched_native.c
@@ -1250,7 +1250,7 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set)
     gboolean need_stop = FALSE;
     bool need_promote = FALSE;
     gboolean is_moving = FALSE;
-    gboolean allow_migrate = pcmk_is_set(rsc->flags, pe_rsc_allow_migrate)? TRUE : FALSE;
+    gboolean allow_migrate = FALSE;
 
     GList *gIter = NULL;
     unsigned int num_all_active = 0;
@@ -1259,9 +1259,8 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set)
     enum rsc_role_e role = RSC_ROLE_UNKNOWN;
     enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
 
-    native_variant_data_t *native_data = NULL;
-
-    get_native_variant_data(native_data, rsc);
+    CRM_ASSERT(rsc != NULL);
+    allow_migrate = pcmk_is_set(rsc->flags, pe_rsc_allow_migrate)? TRUE : FALSE;
 
     chosen = rsc->allocated_to;
     next_role = rsc->next_role;
@@ -1338,8 +1337,16 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set)
             crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ#Resource_is_Too_Active for more information");
         }
 
-        if (rsc->recovery_type == recovery_stop_start) {
-            need_stop = TRUE;
+        switch (rsc->recovery_type) {
+            case recovery_stop_start:
+                need_stop = TRUE;
+                break;
+            case recovery_stop_unexpected:
+                need_stop = TRUE; // StopRsc() will skip expected node
+                pe__set_resource_flags(rsc, pe_rsc_stop_unexpected);
+                break;
+            default:
+                break;
         }
 
         /* If by chance a partial migration is in process, but the migration
@@ -1350,7 +1357,6 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set)
     }
 
     if (!multiply_active) {
-        native_data->expected_node = NULL;
         pe__clear_resource_flags(rsc, pe_rsc_stop_unexpected);
     }
 
@@ -2020,14 +2026,11 @@ native_expand(pe_resource_t * rsc, pe_working_set_t * data_set)
 static bool
 is_expected_node(const pe_resource_t *rsc, const pe_node_t *node)
 {
-    native_variant_data_t *native_data = NULL;
-
-    get_native_variant_data(native_data, rsc);
     return pcmk_all_flags_set(rsc->flags,
                               pe_rsc_stop_unexpected|pe_rsc_restarting)
            && (rsc->next_role > RSC_ROLE_STOPPED)
-           && (native_data->expected_node != NULL) && (node != NULL)
-           && (native_data->expected_node->details == node->details);
+           && (rsc->allocated_to != NULL) && (node != NULL)
+           && (rsc->allocated_to->details == node->details);
 }
 
 gboolean
@@ -2076,17 +2079,13 @@ StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set
 
         if(rsc->allocated_to == NULL) {
             pe_action_set_reason(stop, "node availability", TRUE);
-        } else if (pcmk_is_set(rsc->flags, pe_rsc_restarting)) {
-            native_variant_data_t *native_data = NULL;
-
-            get_native_variant_data(native_data, rsc);
-            if (native_data->expected_node != NULL) {
-                /* We are stopping a multiply active resource on a node that is
-                 * not its expected node, and we are still scheduling restart
-                 * actions, so the stop is for being multiply active.
-                 */
-                pe_action_set_reason(stop, "being multiply active", TRUE);
-            }
+        } else if (pcmk_all_flags_set(rsc->flags, pe_rsc_restarting
+                                                  |pe_rsc_stop_unexpected)) {
+            /* We are stopping a multiply active resource on a node that is
+             * not its expected node, and we are still scheduling restart
+             * actions, so the stop is for being multiply active.
+             */
+            pe_action_set_reason(stop, "being multiply active", TRUE);
         }
 
         if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
index 17dea0d7a..426022013 100644
--- a/lib/pengine/unpack.c
+++ b/lib/pengine/unpack.c
@@ -3945,7 +3945,6 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
     }
 
 done:
-    pe__update_expected_node(rsc, node, status, rc, target_rc);
     pe_rsc_trace(rsc, "Resource %s after %s: role=%s, next=%s",
                  rsc->id, task, role2text(rsc->role),
                  role2text(rsc->next_role));
-- 
2.27.0


From 703d3a09bce389afb4e095e1ac7af29eb5edd189 Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Fri, 22 Apr 2022 14:02:34 -0500
Subject: [PATCH 2/3] Test: scheduler: add a second regression test for
 multiple-active=stop_unexpected

---
 cts/cts-scheduler.in                          |   3 +-
 cts/scheduler/dot/stop-unexpected-2.dot       |   7 +
 cts/scheduler/exp/stop-unexpected-2.exp       |  36 ++++
 cts/scheduler/scores/stop-unexpected-2.scores |  21 ++
 .../summary/stop-unexpected-2.summary         |  29 +++
 cts/scheduler/xml/stop-unexpected-2.xml       | 204 ++++++++++++++++++
 6 files changed, 299 insertions(+), 1 deletion(-)
 create mode 100644 cts/scheduler/dot/stop-unexpected-2.dot
 create mode 100644 cts/scheduler/exp/stop-unexpected-2.exp
 create mode 100644 cts/scheduler/scores/stop-unexpected-2.scores
 create mode 100644 cts/scheduler/summary/stop-unexpected-2.summary
 create mode 100644 cts/scheduler/xml/stop-unexpected-2.xml

diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
index 8c04687da..7fc76cce4 100644
--- a/cts/cts-scheduler.in
+++ b/cts/cts-scheduler.in
@@ -273,8 +273,9 @@ TESTS = [
         [ "rec-rsc-6", "Resource Recover - multiple - restart" ],
         [ "rec-rsc-7", "Resource Recover - multiple - stop" ],
         [ "rec-rsc-8", "Resource Recover - multiple - block" ],
-        [ "stop-unexpected", "Resource Recover - multiple - stop unexpected" ],
         [ "rec-rsc-9", "Resource Recover - group/group" ],
+        [ "stop-unexpected", "Recover multiply active group with stop_unexpected" ],
+        [ "stop-unexpected-2", "Resource multiply active primitve with stop_unexpected" ],
         [ "monitor-recovery", "on-fail=block + resource recovery detected by recurring monitor" ],
         [ "stop-failure-no-quorum", "Stop failure without quorum" ],
         [ "stop-failure-no-fencing", "Stop failure without fencing available" ],
diff --git a/cts/scheduler/dot/stop-unexpected-2.dot b/cts/scheduler/dot/stop-unexpected-2.dot
new file mode 100644
index 000000000..cdaebf551
--- /dev/null
+++ b/cts/scheduler/dot/stop-unexpected-2.dot
@@ -0,0 +1,7 @@
+ digraph "g" {
+"test_monitor_10000 rhel8-4" [ style=bold color="green" fontcolor="black"]
+"test_start_0 rhel8-4" -> "test_monitor_10000 rhel8-4" [ style = bold]
+"test_start_0 rhel8-4" [ style=bold color="green" fontcolor="orange"]
+"test_stop_0 rhel8-3" -> "test_start_0 rhel8-4" [ style = bold]
+"test_stop_0 rhel8-3" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/exp/stop-unexpected-2.exp b/cts/scheduler/exp/stop-unexpected-2.exp
new file mode 100644
index 000000000..258053c08
--- /dev/null
+++ b/cts/scheduler/exp/stop-unexpected-2.exp
@@ -0,0 +1,36 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="1"  transition_id="0">
+  <synapse id="0">
+    <action_set>
+      <rsc_op id="10" operation="monitor" operation_key="test_monitor_10000" on_node="rhel8-4" on_node_uuid="4">
+        <primitive id="test" class="ocf" provider="pacemaker" type="Dummy"/>
+        <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="rhel8-4" CRM_meta_on_node_uuid="4" CRM_meta_timeout="20000" />
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="9" operation="start" operation_key="test_start_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="1">
+    <action_set>
+      <pseudo_event id="9" operation="start" operation_key="test_start_0">
+        <attributes CRM_meta_name="start" CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="8" operation="stop" operation_key="test_stop_0" on_node="rhel8-3" on_node_uuid="3"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="2">
+    <action_set>
+      <rsc_op id="8" operation="stop" operation_key="test_stop_0" on_node="rhel8-3" on_node_uuid="3">
+        <primitive id="test" class="ocf" provider="pacemaker" type="Dummy"/>
+        <attributes CRM_meta_name="stop" CRM_meta_on_node="rhel8-3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" />
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+</transition_graph>
diff --git a/cts/scheduler/scores/stop-unexpected-2.scores b/cts/scheduler/scores/stop-unexpected-2.scores
new file mode 100644
index 000000000..0eb549f5e
--- /dev/null
+++ b/cts/scheduler/scores/stop-unexpected-2.scores
@@ -0,0 +1,21 @@
+
+pcmk__native_allocate: Fencing allocation score on rhel8-1: 0
+pcmk__native_allocate: Fencing allocation score on rhel8-2: 0
+pcmk__native_allocate: Fencing allocation score on rhel8-3: 0
+pcmk__native_allocate: Fencing allocation score on rhel8-4: 0
+pcmk__native_allocate: Fencing allocation score on rhel8-5: 0
+pcmk__native_allocate: FencingFail allocation score on rhel8-1: 0
+pcmk__native_allocate: FencingFail allocation score on rhel8-2: 0
+pcmk__native_allocate: FencingFail allocation score on rhel8-3: 0
+pcmk__native_allocate: FencingFail allocation score on rhel8-4: 0
+pcmk__native_allocate: FencingFail allocation score on rhel8-5: 0
+pcmk__native_allocate: FencingPass allocation score on rhel8-1: 0
+pcmk__native_allocate: FencingPass allocation score on rhel8-2: 0
+pcmk__native_allocate: FencingPass allocation score on rhel8-3: 0
+pcmk__native_allocate: FencingPass allocation score on rhel8-4: 0
+pcmk__native_allocate: FencingPass allocation score on rhel8-5: 0
+pcmk__native_allocate: test allocation score on rhel8-1: 0
+pcmk__native_allocate: test allocation score on rhel8-2: 0
+pcmk__native_allocate: test allocation score on rhel8-3: 0
+pcmk__native_allocate: test allocation score on rhel8-4: 0
+pcmk__native_allocate: test allocation score on rhel8-5: 0
diff --git a/cts/scheduler/summary/stop-unexpected-2.summary b/cts/scheduler/summary/stop-unexpected-2.summary
new file mode 100644
index 000000000..d6b0c15dc
--- /dev/null
+++ b/cts/scheduler/summary/stop-unexpected-2.summary
@@ -0,0 +1,29 @@
+Using the original execution date of: 2022-04-22 14:15:37Z
+Current cluster status:
+  * Node List:
+    * Online: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ]
+
+  * Full List of Resources:
+    * Fencing	(stonith:fence_xvm):	 Started rhel8-1
+    * FencingPass	(stonith:fence_dummy):	 Started rhel8-2
+    * FencingFail	(stonith:fence_dummy):	 Started rhel8-3
+    * test	(ocf:pacemaker:Dummy):	 Started [ rhel8-4 rhel8-3 ]
+
+Transition Summary:
+  * Restart    test    ( rhel8-4 )
+
+Executing Cluster Transition:
+  * Resource action: test            stop on rhel8-3
+  * Pseudo action:   test_start_0
+  * Resource action: test            monitor=10000 on rhel8-4
+Using the original execution date of: 2022-04-22 14:15:37Z
+
+Revised Cluster Status:
+  * Node List:
+    * Online: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ]
+
+  * Full List of Resources:
+    * Fencing	(stonith:fence_xvm):	 Started rhel8-1
+    * FencingPass	(stonith:fence_dummy):	 Started rhel8-2
+    * FencingFail	(stonith:fence_dummy):	 Started rhel8-3
+    * test	(ocf:pacemaker:Dummy):	 Started rhel8-4
diff --git a/cts/scheduler/xml/stop-unexpected-2.xml b/cts/scheduler/xml/stop-unexpected-2.xml
new file mode 100644
index 000000000..e103629e9
--- /dev/null
+++ b/cts/scheduler/xml/stop-unexpected-2.xml
@@ -0,0 +1,204 @@
+<cib crm_feature_set="3.13.0" validate-with="pacemaker-3.7" epoch="59" num_updates="14" admin_epoch="0" cib-last-written="Fri Apr 22 09:15:36 2022" update-origin="rhel8-1" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2" execution-date="1650636937">
+  <configuration>
+    <crm_config>
+      <cluster_property_set id="cib-bootstrap-options">
+        <nvpair id="cts-stonith-enabled" name="stonith-enabled" value="1"/>
+        <nvpair id="cts-start-failure-is-fatal" name="start-failure-is-fatal" value="false"/>
+        <nvpair id="cts-pe-input-series-max" name="pe-input-series-max" value="5000"/>
+        <nvpair id="cts-shutdown-escalation" name="shutdown-escalation" value="5min"/>
+        <nvpair id="cts-batch-limit" name="batch-limit" value="10"/>
+        <nvpair id="cts-dc-deadtime" name="dc-deadtime" value="5s"/>
+        <nvpair id="cts-no-quorum-policy" name="no-quorum-policy" value="stop"/>
+        <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+        <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.2-4.el8_6.1-ada5c3b36e2"/>
+        <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+        <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="rhel8-lab"/>
+        <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1650636936"/>
+      </cluster_property_set>
+    </crm_config>
+    <nodes>
+      <node id="1" uname="rhel8-1"/>
+      <node id="3" uname="rhel8-3"/>
+      <node id="4" uname="rhel8-4"/>
+      <node id="5" uname="rhel8-5"/>
+      <node id="2" uname="rhel8-2"/>
+    </nodes>
+    <resources>
+      <primitive class="stonith" id="Fencing" type="fence_xvm">
+        <meta_attributes id="Fencing-meta">
+          <nvpair id="Fencing-migration-threshold" name="migration-threshold" value="5"/>
+        </meta_attributes>
+        <instance_attributes id="Fencing-params">
+          <nvpair id="Fencing-pcmk_host_map" name="pcmk_host_map" value="remote-rhel8-1:rhel8-1;remote-rhel8-2:rhel8-2;remote-rhel8-3:rhel8-3;remote-rhel8-4:rhel8-4;remote-rhel8-5:rhel8-5;"/>
+          <nvpair id="Fencing-key_file" name="key_file" value="/etc/pacemaker/fence_xvm.key"/>
+          <nvpair id="Fencing-multicast_address" name="multicast_address" value="239.255.100.100"/>
+          <nvpair id="Fencing-pcmk_host_list" name="pcmk_host_list" value="rhel8-1 remote-rhel8-1 rhel8-2 remote-rhel8-2 rhel8-3 remote-rhel8-3 rhel8-4 remote-rhel8-4 rhel8-5 remote-rhel8-5"/>
+        </instance_attributes>
+        <operations>
+          <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+          <op id="Fencing-stop-0" interval="0" name="stop" timeout="60s"/>
+          <op id="Fencing-start-0" interval="0" name="start" timeout="60s"/>
+        </operations>
+      </primitive>
+      <primitive class="stonith" id="FencingPass" type="fence_dummy">
+        <instance_attributes id="FencingPass-params">
+          <nvpair id="FencingPass-pcmk_host_list" name="pcmk_host_list" value="rhel8-4 remote-rhel8-4 rhel8-5 remote-rhel8-5"/>
+          <nvpair id="FencingPass-random_sleep_range" name="random_sleep_range" value="30"/>
+          <nvpair id="FencingPass-mode" name="mode" value="pass"/>
+        </instance_attributes>
+      </primitive>
+      <primitive class="stonith" id="FencingFail" type="fence_dummy">
+        <instance_attributes id="FencingFail-params">
+          <nvpair id="FencingFail-pcmk_host_list" name="pcmk_host_list" value="rhel8-2 remote-rhel8-2"/>
+          <nvpair id="FencingFail-random_sleep_range" name="random_sleep_range" value="30"/>
+          <nvpair id="FencingFail-mode" name="mode" value="fail"/>
+        </instance_attributes>
+      </primitive>
+      <primitive class="ocf" id="test" provider="pacemaker" type="Dummy">
+        <meta_attributes id="test-meta_attributes">
+          <nvpair id="test-meta_attributes-multiple-active" name="multiple-active" value="stop_unexpected"/>
+        </meta_attributes>
+        <operations>
+          <op id="test-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+          <op id="test-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+          <op id="test-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+          <op id="test-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+          <op id="test-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+          <op id="test-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+          <op id="test-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+        </operations>
+      </primitive>
+    </resources>
+    <constraints/>
+    <fencing-topology>
+      <fencing-level devices="FencingFail" id="cts-rhel8-2.1" index="1" target="rhel8-2"/>
+      <fencing-level devices="Fencing" id="cts-rhel8-2.2" index="2" target="rhel8-2"/>
+      <fencing-level devices="FencingFail" id="cts-remote-rhel8-2.1" index="1" target="remote-rhel8-2"/>
+      <fencing-level devices="Fencing" id="cts-remote-rhel8-2.2" index="2" target="remote-rhel8-2"/>
+      <fencing-level devices="FencingPass,Fencing" id="cts-rhel8-4.1" index="1" target="rhel8-4"/>
+      <fencing-level devices="FencingPass,Fencing" id="cts-remote-rhel8-4.1" index="1" target="remote-rhel8-4"/>
+      <fencing-level devices="FencingPass,Fencing" id="cts-rhel8-5.1" index="1" target="rhel8-5"/>
+      <fencing-level devices="FencingPass,Fencing" id="cts-remote-rhel8-5.1" index="1" target="remote-rhel8-5"/>
+    </fencing-topology>
+    <op_defaults>
+      <meta_attributes id="cts-op_defaults-meta">
+        <nvpair id="cts-op_defaults-timeout" name="timeout" value="90s"/>
+      </meta_attributes>
+    </op_defaults>
+    <alerts>
+      <alert id="alert-1" path="/var/lib/pacemaker/notify.sh">
+        <recipient id="alert-1-recipient-1" value="/run/crm/alert.log"/>
+      </alert>
+    </alerts>
+  </configuration>
+  <status>
+    <node_state id="4" uname="rhel8-4" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <transient_attributes id="4">
+        <instance_attributes id="status-4"/>
+      </transient_attributes>
+      <lrm id="4">
+        <lrm_resources>
+          <lrm_resource id="FencingPass" type="fence_dummy" class="stonith">
+            <lrm_rsc_op id="FencingPass_last_0" operation_key="FencingPass_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="47:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;47:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-4" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="0" queue-time="0" op-digest="bdca24cab6ded2b426c6b31df675bf0b"/>
+          </lrm_resource>
+          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="46:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;46:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-4" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="9" queue-time="0" op-digest="bf974d77f2d4d33e434be1f89e362a52"/>
+          </lrm_resource>
+          <lrm_resource id="FencingFail" type="fence_dummy" class="stonith">
+            <lrm_rsc_op id="FencingFail_last_0" operation_key="FencingFail_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="48:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;48:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-4" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="0" queue-time="0" op-digest="fe7e3f8acdd3228efda2766a0eea7ba5"/>
+          </lrm_resource>
+          <lrm_resource id="test" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="test_last_0" operation_key="test_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.13.0" transition-key="5:46:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:0;5:46:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-4" call-id="130" rc-code="0" op-status="0" interval="0" last-rc-change="1650636936" exec-time="25" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="test_last_failure_0" operation_key="test_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.13.0" transition-key="5:46:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:0;5:46:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-4" call-id="130" rc-code="0" op-status="0" interval="0" last-rc-change="1650636936" exec-time="25" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+    <node_state id="5" uname="rhel8-5" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <transient_attributes id="5">
+        <instance_attributes id="status-5"/>
+      </transient_attributes>
+      <lrm id="5">
+        <lrm_resources>
+          <lrm_resource id="FencingPass" type="fence_dummy" class="stonith">
+            <lrm_rsc_op id="FencingPass_last_0" operation_key="FencingPass_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="62:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;62:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-5" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="0" queue-time="0" op-digest="bdca24cab6ded2b426c6b31df675bf0b"/>
+          </lrm_resource>
+          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="61:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;61:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-5" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="6" queue-time="0" op-digest="bf974d77f2d4d33e434be1f89e362a52"/>
+          </lrm_resource>
+          <lrm_resource id="FencingFail" type="fence_dummy" class="stonith">
+            <lrm_rsc_op id="FencingFail_last_0" operation_key="FencingFail_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="63:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;63:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-5" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="0" queue-time="0" op-digest="fe7e3f8acdd3228efda2766a0eea7ba5"/>
+          </lrm_resource>
+          <lrm_resource id="test" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="test_last_0" operation_key="test_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.13.0" transition-key="6:46:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;6:46:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-5" call-id="113" rc-code="7" op-status="0" interval="0" last-rc-change="1650636936" exec-time="25" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+    <node_state id="1" uname="rhel8-1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <transient_attributes id="1">
+        <instance_attributes id="status-1"/>
+      </transient_attributes>
+      <lrm id="1">
+        <lrm_resources>
+          <lrm_resource id="FencingPass" type="fence_dummy" class="stonith">
+            <lrm_rsc_op id="FencingPass_last_0" operation_key="FencingPass_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="2:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;2:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-1" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="0" queue-time="0" op-digest="bdca24cab6ded2b426c6b31df675bf0b"/>
+          </lrm_resource>
+          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="76:0:0:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:0;76:0:0:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-1" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1650636745" exec-time="36" queue-time="0" op-digest="bf974d77f2d4d33e434be1f89e362a52"/>
+            <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="77:0:0:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:0;77:0:0:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-1" call-id="16" rc-code="0" op-status="0" interval="120000" last-rc-change="1650636745" exec-time="36" queue-time="0" op-digest="24c9c9364f847dcb857d6fb4e1b4d3c8"/>
+          </lrm_resource>
+          <lrm_resource id="FencingFail" type="fence_dummy" class="stonith">
+            <lrm_rsc_op id="FencingFail_last_0" operation_key="FencingFail_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="3:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;3:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-1" call-id="21" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="0" queue-time="0" op-digest="fe7e3f8acdd3228efda2766a0eea7ba5"/>
+          </lrm_resource>
+          <lrm_resource id="test" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="test_last_0" operation_key="test_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.13.0" transition-key="2:46:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;2:46:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-1" call-id="121" rc-code="7" op-status="0" interval="0" last-rc-change="1650636936" exec-time="62" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+    <node_state id="2" uname="rhel8-2" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <transient_attributes id="2">
+        <instance_attributes id="status-2"/>
+      </transient_attributes>
+      <lrm id="2">
+        <lrm_resources>
+          <lrm_resource id="FencingPass" type="fence_dummy" class="stonith">
+            <lrm_rsc_op id="FencingPass_last_0" operation_key="FencingPass_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="78:0:0:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:0;78:0:0:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-2" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1650636745" exec-time="21041" queue-time="0" op-digest="bdca24cab6ded2b426c6b31df675bf0b"/>
+          </lrm_resource>
+          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="16:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;16:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-2" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="2" queue-time="0" op-digest="bf974d77f2d4d33e434be1f89e362a52"/>
+          </lrm_resource>
+          <lrm_resource id="FencingFail" type="fence_dummy" class="stonith">
+            <lrm_rsc_op id="FencingFail_last_0" operation_key="FencingFail_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="18:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;18:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-2" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="0" queue-time="0" op-digest="fe7e3f8acdd3228efda2766a0eea7ba5"/>
+          </lrm_resource>
+          <lrm_resource id="test" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="test_last_0" operation_key="test_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.13.0" transition-key="3:46:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;3:46:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-2" call-id="109" rc-code="7" op-status="0" interval="0" last-rc-change="1650636936" exec-time="37" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+    <node_state id="3" uname="rhel8-3" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <transient_attributes id="3">
+        <instance_attributes id="status-3"/>
+      </transient_attributes>
+      <lrm id="3">
+        <lrm_resources>
+          <lrm_resource id="FencingPass" type="fence_dummy" class="stonith">
+            <lrm_rsc_op id="FencingPass_last_0" operation_key="FencingPass_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="32:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;32:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-3" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="0" queue-time="0" op-digest="bdca24cab6ded2b426c6b31df675bf0b"/>
+          </lrm_resource>
+          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="31:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;31:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-3" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="2" queue-time="0" op-digest="bf974d77f2d4d33e434be1f89e362a52"/>
+          </lrm_resource>
+          <lrm_resource id="FencingFail" type="fence_dummy" class="stonith">
+            <lrm_rsc_op id="FencingFail_last_0" operation_key="FencingFail_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="79:0:0:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:0;79:0:0:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-3" call-id="26" rc-code="0" op-status="0" interval="0" last-rc-change="1650636745" exec-time="1044" queue-time="0" op-digest="fe7e3f8acdd3228efda2766a0eea7ba5"/>
+          </lrm_resource>
+          <lrm_resource id="test" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="test_last_0" operation_key="test_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.13.0" transition-key="4:46:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:0;4:46:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-3" call-id="114" rc-code="0" op-status="0" interval="0" last-rc-change="1650636936" exec-time="24" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="test_last_failure_0" operation_key="test_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.13.0" transition-key="4:46:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:0;4:46:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-3" call-id="114" rc-code="0" op-status="0" interval="0" last-rc-change="1650636936" exec-time="24" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+  </status>
+</cib>
-- 
2.27.0


From 60d8bb01ba73dfd1cb25c6764ee2b923dcfc4e8c Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Fri, 22 Apr 2022 14:09:43 -0500
Subject: [PATCH 3/3] Revert "Refactor: scheduler: add expected node to
 primitive variant data"

This reverts commit 871e2201d92520039df45062afc9120fd1fb0f30.
---
 include/crm/pengine/internal.h |  4 ----
 lib/pengine/native.c           | 38 ----------------------------------
 lib/pengine/variant.h          |  8 ++-----
 3 files changed, 2 insertions(+), 48 deletions(-)

diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
index a2e4b5bf7..fe9a23b7e 100644
--- a/include/crm/pengine/internal.h
+++ b/include/crm/pengine/internal.h
@@ -580,8 +580,4 @@ xmlNode *pe__failed_probe_for_rsc(pe_resource_t *rsc, const char *name);
 
 const char *pe__clone_child_id(pe_resource_t *rsc);
 
-void pe__update_expected_node(pe_resource_t *rsc, pe_node_t *node,
-                              int execution_status, int exit_status,
-                              int expected_exit_status);
-
 #endif
diff --git a/lib/pengine/native.c b/lib/pengine/native.c
index 591d1c6f5..fa7dc8960 100644
--- a/lib/pengine/native.c
+++ b/lib/pengine/native.c
@@ -1376,41 +1376,3 @@ pe__native_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_paren
 
     return TRUE;
 }
-
-/*!
- * \internal
- * \brief Set a resource's expected node if appropriate for a history result
- *
- * \param[in] rsc               Resource to set expected node for
- * \param[in] node              Node to set as expected node
- * \param[in] execution_status  History entry's execution status
- * \param[in] exit_status       History entry's actual exit status
- * \param[in] expected_status   History entry's expected exit status
- */
-void
-pe__update_expected_node(pe_resource_t *rsc, pe_node_t *node,
-                         int execution_status, int exit_status,
-                         int expected_exit_status)
-{
-    native_variant_data_t *native_data = NULL;
-
-    get_native_variant_data(native_data, rsc);
-
-    if ((rsc->recovery_type == recovery_stop_unexpected)
-        && (rsc->role > RSC_ROLE_STOPPED)
-        && (execution_status == PCMK_EXEC_DONE)
-        && (exit_status == expected_exit_status)) {
-        // Resource is active and was expected on this node
-        pe_rsc_trace(rsc, "Found expected node %s for %s",
-                     node->details->uname, rsc->id);
-        native_data->expected_node = node;
-        pe__set_resource_flags(rsc, pe_rsc_stop_unexpected);
-
-    } else if ((native_data->expected_node != NULL)
-               && (native_data->expected_node->details == node->details)) {
-        // Resource is not cleanly active here
-        pe_rsc_trace(rsc, "Clearing expected node for %s", rsc->id);
-        native_data->expected_node = NULL;
-        pe__clear_resource_flags(rsc, pe_rsc_stop_unexpected);
-    }
-}
diff --git a/lib/pengine/variant.h b/lib/pengine/variant.h
index d8fefa9d6..cabfbe81f 100644
--- a/lib/pengine/variant.h
+++ b/lib/pengine/variant.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2021 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
@@ -139,11 +139,7 @@ typedef struct group_variant_data_s {
 #  elif VARIANT_NATIVE
 
 typedef struct native_variant_data_s {
-    /* If the resource is multiply active, and has multiple-active set to
-     * stop_unexpected, this will be set to the node where the resource was
-     * found active by an operation with a expected result.
-     */
-    pe_node_t *expected_node;
+    int dummy;
 } native_variant_data_t;
 
 #    define get_native_variant_data(data, rsc)				\
-- 
2.27.0