diff --git a/SOURCES/024-stop_unexpected.patch b/SOURCES/024-stop_unexpected.patch
new file mode 100644
index 0000000..0fcf75b
--- /dev/null
+++ b/SOURCES/024-stop_unexpected.patch
@@ -0,0 +1,806 @@
+From 767b5552ab49850204692c2c990dfb41d37589f3 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Mon, 28 Mar 2022 18:11:52 -0500
+Subject: [PATCH 1/9] Refactor: libpacemaker: drop unnecessary argument from
+ "rsc-action" message
+
+9875cab129 moved the setting of the "moving" variable from LogActions() to a
+new "rsc-action" message, but continued to pass the variable unnecessarily
+
+Also simplify how it's set
+---
+ lib/pacemaker/pcmk_output.c         | 10 ++++------
+ lib/pacemaker/pcmk_sched_native.c   |  4 +---
+ 2 files changed, 5 insertions(+), 9 deletions(-)
+
+diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c
+index d864c8bd2..56963a93f 100644
+--- a/lib/pacemaker/pcmk_output.c
++++ b/lib/pacemaker/pcmk_output.c
+@@ -873,19 +873,18 @@ digests_xml(pcmk__output_t *out, va_list args)
+         }                                                               \
+     } while(0)
+ 
+-PCMK__OUTPUT_ARGS("rsc-action", "pe_resource_t *", "pe_node_t *", "pe_node_t *",
+-                  "gboolean")
++PCMK__OUTPUT_ARGS("rsc-action", "pe_resource_t *", "pe_node_t *", "pe_node_t *")
+ static int
+ rsc_action_default(pcmk__output_t *out, va_list args)
+ {
+     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+     pe_node_t *current = va_arg(args, pe_node_t *);
+     pe_node_t *next = va_arg(args, pe_node_t *);
+-    gboolean moving = va_arg(args, gboolean);
+ 
+     GList *possible_matches = NULL;
+     char *key = NULL;
+     int rc = pcmk_rc_no_output;
++    bool moving = false;
+ 
+     pe_node_t *start_node = NULL;
+     pe_action_t *start = NULL;
+@@ -901,9 +900,8 @@ rsc_action_default(pcmk__output_t *out, va_list args)
+         return rc;
+     }
+ 
+-    if (current != NULL && next != NULL && !pcmk__str_eq(current->details->id, next->details->id, pcmk__str_casei)) {
+-        moving = TRUE;
+-    }
++    moving = (current != NULL) && (next != NULL)
++             && (current->details != next->details);
+ 
+     possible_matches = pe__resource_actions(rsc, next, RSC_START, FALSE);
+     if (possible_matches) {
+diff --git a/lib/pacemaker/pcmk_sched_resource.c b/lib/pacemaker/pcmk_sched_resource.c
+index a3d646775..41631da3d 100644
+--- a/lib/pacemaker/pcmk_sched_native.c
++++ b/lib/pacemaker/pcmk_sched_native.c
+@@ -2037,8 +2037,6 @@ LogActions(pe_resource_t * rsc, pe_working_set_t * data_set)
+     pe_node_t *next = NULL;
+     pe_node_t *current = NULL;
+ 
+-    gboolean moving = FALSE;
+-
+     if(rsc->variant == pe_container) {
+         pcmk__bundle_log_actions(rsc, data_set);
+         return;
+@@ -2066,7 +2064,7 @@ LogActions(pe_resource_t * rsc, pe_working_set_t * data_set)
+         return;
+     }
+ 
+-    out->message(out, "rsc-action", rsc, current, next, moving);
++    out->message(out, "rsc-action", rsc, current, next);
+ }
+ 
+ gboolean
+-- 
+2.27.0
+
+
+From 870fb19715618c4ceab9ed4ae13a99658440b662 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Tue, 22 Mar 2022 15:22:23 -0500
+Subject: [PATCH 2/9] Refactor: scheduler: functionize scheduling restart
+ actions
+
+native_create_actions() is already overlarge, and more needs to be added to it
+---
+ lib/pacemaker/pcmk_sched_native.c | 85 ++++++++++++++++++++-----------
+ 1 file changed, 54 insertions(+), 31 deletions(-)
+
+diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c
+index 808e97540..b8a1c1e1a 100644
+--- a/lib/pacemaker/pcmk_sched_native.c
++++ b/lib/pacemaker/pcmk_sched_native.c
+@@ -1185,6 +1185,58 @@ handle_migration_actions(pe_resource_t * rsc, pe_node_t *current, pe_node_t *cho
+     }
+ }
+ 
++/*!
++ * \internal
++ * \brief Schedule actions to bring resource down and back to current role
++ *
++ * \param[in] rsc           Resource to restart
++ * \param[in] current       Node that resource should be brought down on
++ * \param[in] chosen        Node that resource should be brought up on
++ * \param[in] need_stop     Whether the resource must be stopped
++ * \param[in] need_promote  Whether the resource must be promoted
++ *
++ * \return Role that resource would have after scheduled actions are taken
++ */
++static void
++schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current,
++                         pe_node_t *chosen, bool need_stop, bool need_promote)
++{
++    enum rsc_role_e role = rsc->role;
++    enum rsc_role_e next_role;
++
++    // Bring resource down to a stop on its current node
++    while (role != RSC_ROLE_STOPPED) {
++        next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
++        pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s",
++                     (need_stop? "required" : "optional"), rsc->id,
++                     role2text(role), role2text(next_role));
++        if (!rsc_action_matrix[role][next_role](rsc, current, !need_stop,
++                                                rsc->cluster)) {
++            break;
++        }
++        role = next_role;
++    }
++
++    // Bring resource up to its next role on its next node
++    while ((rsc->role <= rsc->next_role) && (role != rsc->role)
++           && !pcmk_is_set(rsc->flags, pe_rsc_block)) {
++        bool required = need_stop;
++
++        next_role = rsc_state_matrix[role][rsc->role];
++        if ((next_role == RSC_ROLE_PROMOTED) && need_promote) {
++            required = true;
++        }
++        pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s",
++                     (required? "required" : "optional"), rsc->id,
++                     role2text(role), role2text(next_role));
++        if (!rsc_action_matrix[role][next_role](rsc, chosen, !required,
++                                                rsc->cluster)) {
++            break;
++        }
++        role = next_role;
++    }
++}
++
+ void
+ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set)
+ {
+@@ -1332,39 +1384,10 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set)
+     /* Create any additional actions required when bringing resource down and
+      * back up to same level.
+      */
+-    role = rsc->role;
+-    while (role != RSC_ROLE_STOPPED) {
+-        next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
+-        pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s",
+-                     (need_stop? "required" : "optional"), rsc->id,
+-                     role2text(role), role2text(next_role));
+-        if (rsc_action_matrix[role][next_role] (rsc, current, !need_stop, data_set) == FALSE) {
+-            break;
+-        }
+-        role = next_role;
+-    }
+-
+-
+-    while ((rsc->role <= rsc->next_role) && (role != rsc->role)
+-           && !pcmk_is_set(rsc->flags, pe_rsc_block)) {
+-        bool required = need_stop;
+-
+-        next_role = rsc_state_matrix[role][rsc->role];
+-        if ((next_role == RSC_ROLE_PROMOTED) && need_promote) {
+-            required = true;
+-        }
+-        pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s",
+-                     (required? "required" : "optional"), rsc->id,
+-                     role2text(role), role2text(next_role));
+-        if (rsc_action_matrix[role][next_role](rsc, chosen, !required,
+-                                               data_set) == FALSE) {
+-            break;
+-        }
+-        role = next_role;
+-    }
+-    role = rsc->role;
++    schedule_restart_actions(rsc, current, chosen, need_stop, need_promote);
+ 
+     /* Required steps from this role to the next */
++    role = rsc->role;
+     while (role != rsc->next_role) {
+         next_role = rsc_state_matrix[role][rsc->next_role];
+         pe_rsc_trace(rsc, "Creating action to take %s from %s to %s (ending at %s)",
+-- 
+2.27.0
+
+
+From 736d4d8f5e432acf12e577d137e9165904c71b3b Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Mon, 28 Mar 2022 17:42:26 -0500
+Subject: [PATCH 3/9] Log: scheduler: improve trace messages when creating
+ actions
+
+---
+ lib/pacemaker/pcmk_sched_native.c | 22 ++++++++++++++++------
+ 1 file changed, 16 insertions(+), 6 deletions(-)
+
+diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c
+index b8a1c1e1a..8b651ebd2 100644
+--- a/lib/pacemaker/pcmk_sched_native.c
++++ b/lib/pacemaker/pcmk_sched_native.c
+@@ -1997,7 +1997,6 @@ StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set
+     GList *gIter = NULL;
+ 
+     CRM_ASSERT(rsc);
+-    pe_rsc_trace(rsc, "%s", rsc->id);
+ 
+     for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
+         pe_node_t *current = (pe_node_t *) gIter->data;
+@@ -2005,16 +2004,23 @@ StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set
+ 
+         if (rsc->partial_migration_target) {
+             if (rsc->partial_migration_target->details == current->details) {
+-                pe_rsc_trace(rsc, "Filtered %s -> %s %s", current->details->uname,
+-                             next->details->uname, rsc->id);
++                pe_rsc_trace(rsc,
++                             "Skipping stop of %s on %s "
++                             "because migration to %s in progress",
++                             rsc->id, current->details->uname,
++                             next->details->uname);
+                 continue;
+             } else {
+-                pe_rsc_trace(rsc, "Forced on %s %s", current->details->uname, rsc->id);
++                pe_rsc_trace(rsc,
++                             "Forcing stop of %s on %s "
++                             "because migration target changed",
++                             rsc->id, current->details->uname);
+                 optional = FALSE;
+             }
+         }
+ 
+-        pe_rsc_trace(rsc, "%s on %s", rsc->id, current->details->uname);
++        pe_rsc_trace(rsc, "Scheduling stop of %s on %s",
++                     rsc->id, current->details->uname);
+         stop = stop_action(rsc, current, optional);
+ 
+         if(rsc->allocated_to == NULL) {
+@@ -2048,7 +2054,11 @@ StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_se
+     pe_action_t *start = NULL;
+ 
+     CRM_ASSERT(rsc);
+-    pe_rsc_trace(rsc, "%s on %s %d %d", rsc->id, next ? next->details->uname : "N/A", optional, next ? next->weight : 0);
++
++    pe_rsc_trace(rsc, "Scheduling %s start of %s on %s (weight=%d)",
++                 (optional? "optional" : "required"), rsc->id,
++                 ((next == NULL)? "N/A" : next->details->uname),
++                 ((next == NULL)? 0 : next->weight));
+     start = start_action(rsc, next, TRUE);
+ 
+     pcmk__order_vs_unfence(rsc, next, start, pe_order_implies_then, data_set);
+-- 
+2.27.0
+
+
+From 6f987234d5246ed50f4fe2db90e5edb6a23e877d Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Tue, 1 Mar 2022 16:42:06 -0600
+Subject: [PATCH 4/9] Log: scheduler: log a warning if invalid value is given
+ for multiple-active
+
+---
+ lib/pengine/complex.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c
+index e82af2aae..f2caef831 100644
+--- a/lib/pengine/complex.c
++++ b/lib/pengine/complex.c
+@@ -694,7 +694,12 @@ common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc,
+         (*rsc)->recovery_type = recovery_block;
+         pe_rsc_trace((*rsc), "\tMultiple running resource recovery: block");
+ 
+-    } else {
++    } else { // "stop_start"
++        if (!pcmk__str_eq(value, "stop_start",
++                          pcmk__str_casei|pcmk__str_null_matches)) {
++            pe_warn("%s is not a valid value for " XML_RSC_ATTR_MULTIPLE
++                    ", using default of \"stop_start\"", value);
++        }
+         (*rsc)->recovery_type = recovery_stop_start;
+         pe_rsc_trace((*rsc), "\tMultiple running resource recovery: stop/start");
+     }
+-- 
+2.27.0
+
+
+From 50456c3e229a6021ca0ba7346af41cd234abcc16 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Tue, 1 Mar 2022 16:49:31 -0600
+Subject: [PATCH 5/9] API: libpe_status: add recovery_stop_unexpected to enum
+ rsc_recovery_type
+
+The behavior is not implemented as of this commit
+---
+ include/crm/pengine/common.h | 14 ++++++++++++--
+ lib/pengine/complex.c        |  5 +++++
+ lib/pengine/native.c         |  7 +++++--
+ 3 files changed, 22 insertions(+), 4 deletions(-)
+
+diff --git a/include/crm/pengine/common.h b/include/crm/pengine/common.h
+index efe89a171..9b9f38f3b 100644
+--- a/include/crm/pengine/common.h
++++ b/include/crm/pengine/common.h
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2004-2021 the Pacemaker project contributors
++ * Copyright 2004-2022 the Pacemaker project contributors
+  *
+  * The version control history for this file may have further details.
+  *
+@@ -78,7 +78,8 @@ enum action_tasks {
+ enum rsc_recovery_type {
+     recovery_stop_start,
+     recovery_stop_only,
+-    recovery_block
++    recovery_block,
++    recovery_stop_unexpected,
+ };
+ 
+ enum rsc_start_requirement {
+@@ -143,6 +144,13 @@ const char *fail2text(enum action_fail_response fail);
+ const char *pe_pref(GHashTable * options, const char *name);
+ void calculate_active_ops(GList * sorted_op_list, int *start_index, int *stop_index);
+ 
++/*!
++ * \brief Get readable description of a recovery type
++ *
++ * \param[in] type  Recovery type
++ *
++ * \return Static string describing \p type
++ */
+ static inline const char *
+ recovery2text(enum rsc_recovery_type type)
+ {
+@@ -153,6 +161,8 @@ recovery2text(enum rsc_recovery_type type)
+             return "attempting recovery";
+         case recovery_block:
+             return "waiting for an administrator";
++        case recovery_stop_unexpected:
++            return "stopping unexpected instances";
+     }
+     return "Unknown";
+ }
+diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c
+index f2caef831..fc9028e81 100644
+--- a/lib/pengine/complex.c
++++ b/lib/pengine/complex.c
+@@ -694,6 +694,11 @@ common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc,
+         (*rsc)->recovery_type = recovery_block;
+         pe_rsc_trace((*rsc), "\tMultiple running resource recovery: block");
+ 
++    } else if (pcmk__str_eq(value, "stop_unexpected", pcmk__str_casei)) {
++        (*rsc)->recovery_type = recovery_stop_unexpected;
++        pe_rsc_trace((*rsc), "\tMultiple running resource recovery: "
++                             "stop unexpected instances");
++
+     } else { // "stop_start"
+         if (!pcmk__str_eq(value, "stop_start",
+                           pcmk__str_casei|pcmk__str_null_matches)) {
+diff --git a/lib/pengine/native.c b/lib/pengine/native.c
+index e16e54bae..fa7dc8960 100644
+--- a/lib/pengine/native.c
++++ b/lib/pengine/native.c
+@@ -149,8 +149,6 @@ native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * dat
+                     }
+                 }
+                 break;
+-            case recovery_stop_start:
+-                break;
+             case recovery_block:
+                 pe__clear_resource_flags(rsc, pe_rsc_managed);
+                 pe__set_resource_flags(rsc, pe_rsc_block);
+@@ -171,6 +169,11 @@ native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * dat
+                     }
+                 }
+                 break;
++            default: // recovery_stop_start, recovery_stop_unexpected
++                /* The scheduler will do the right thing because the relevant
++                 * variables and flags are set when unpacking the history.
++                 */
++                break;
+         }
+         crm_debug("%s is active on multiple nodes including %s: %s",
+                   rsc->id, node->details->uname,
+-- 
+2.27.0
+
+
+From 5e994f0633b27e7a53701e0954466739c8f1acf7 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Wed, 30 Mar 2022 16:26:19 -0500
+Subject: [PATCH 6/9] API: libpe_status: add pe_rsc_stop_unexpected flag
+
+---
+ include/crm/pengine/pe_types.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h
+index e3ecaa823..7d5394bff 100644
+--- a/include/crm/pengine/pe_types.h
++++ b/include/crm/pengine/pe_types.h
+@@ -277,6 +277,7 @@ struct pe_node_s {
+ 
+ #  define pe_rsc_starting                   0x00100000ULL
+ #  define pe_rsc_stopping                   0x00200000ULL
++#  define pe_rsc_stop_unexpected            0x00400000ULL
+ #  define pe_rsc_allow_migrate              0x00800000ULL
+ 
+ #  define pe_rsc_failure_ignored            0x01000000ULL
+-- 
+2.27.0
+
+
+From c1acf05be853d99c17761759b8c961f2ec4a55c2 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 31 Mar 2022 09:56:34 -0500
+Subject: [PATCH 7/9] API: libpe_status: add pe_rsc_restarting flag
+
+This is used to indicate that any actions currently being scheduled are part of
+the resource's restart actions (i.e. we are in schedule_restart_actions()).
+---
+ include/crm/pengine/pe_types.h    | 1 +
+ lib/pacemaker/pcmk_sched_native.c | 4 ++++
+ 2 files changed, 5 insertions(+)
+
+diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h
+index 7d5394bff..77d28e900 100644
+--- a/include/crm/pengine/pe_types.h
++++ b/include/crm/pengine/pe_types.h
+@@ -265,6 +265,7 @@ struct pe_node_s {
+ #  define pe_rsc_provisional                0x00000100ULL
+ #  define pe_rsc_allocating                 0x00000200ULL
+ #  define pe_rsc_merging                    0x00000400ULL
++#  define pe_rsc_restarting                 0x00000800ULL
+ 
+ #  define pe_rsc_stop                       0x00001000ULL
+ #  define pe_rsc_reload                     0x00002000ULL
+diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c
+index 8b651ebd2..8002938b5 100644
+--- a/lib/pacemaker/pcmk_sched_native.c
++++ b/lib/pacemaker/pcmk_sched_native.c
+@@ -1204,6 +1204,8 @@ schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current,
+     enum rsc_role_e role = rsc->role;
+     enum rsc_role_e next_role;
+ 
++    pe__set_resource_flags(rsc, pe_rsc_restarting);
++
+     // Bring resource down to a stop on its current node
+     while (role != RSC_ROLE_STOPPED) {
+         next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
+@@ -1235,6 +1237,8 @@ schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current,
+         }
+         role = next_role;
+     }
++
++    pe__clear_resource_flags(rsc, pe_rsc_restarting);
+ }
+ 
+ void
+-- 
+2.27.0
+
+
+From 871e2201d92520039df45062afc9120fd1fb0f30 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Tue, 1 Mar 2022 17:46:39 -0600
+Subject: [PATCH 8/9] Refactor: scheduler: add expected node to primitive
+ variant data
+
+Nothing uses it yet
+---
+ include/crm/pengine/internal.h |  4 ++++
+ lib/pengine/native.c           | 38 ++++++++++++++++++++++++++++++++++
+ lib/pengine/variant.h          |  8 +++++--
+ 3 files changed, 48 insertions(+), 2 deletions(-)
+
+diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
+index f949684b4..f69e6bcce 100644
+--- a/include/crm/pengine/internal.h
++++ b/include/crm/pengine/internal.h
+@@ -579,4 +579,8 @@ xmlNode *pe__failed_probe_for_rsc(pe_resource_t *rsc, const char *name);
+ 
+ const char *pe__clone_child_id(pe_resource_t *rsc);
+ 
++void pe__update_expected_node(pe_resource_t *rsc, pe_node_t *node,
++                              int execution_status, int exit_status,
++                              int expected_exit_status);
++
+ #endif
+diff --git a/lib/pengine/native.c b/lib/pengine/native.c
+index fa7dc8960..591d1c6f5 100644
+--- a/lib/pengine/native.c
++++ b/lib/pengine/native.c
+@@ -1376,3 +1376,41 @@ pe__native_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_paren
+ 
+     return TRUE;
+ }
++
++/*!
++ * \internal
++ * \brief Set a resource's expected node if appropriate for a history result
++ *
++ * \param[in] rsc               Resource to set expected node for
++ * \param[in] node              Node to set as expected node
++ * \param[in] execution_status  History entry's execution status
++ * \param[in] exit_status       History entry's actual exit status
++ * \param[in] expected_status   History entry's expected exit status
++ */
++void
++pe__update_expected_node(pe_resource_t *rsc, pe_node_t *node,
++                         int execution_status, int exit_status,
++                         int expected_exit_status)
++{
++    native_variant_data_t *native_data = NULL;
++
++    get_native_variant_data(native_data, rsc);
++
++    if ((rsc->recovery_type == recovery_stop_unexpected)
++        && (rsc->role > RSC_ROLE_STOPPED)
++        && (execution_status == PCMK_EXEC_DONE)
++        && (exit_status == expected_exit_status)) {
++        // Resource is active and was expected on this node
++        pe_rsc_trace(rsc, "Found expected node %s for %s",
++                     node->details->uname, rsc->id);
++        native_data->expected_node = node;
++        pe__set_resource_flags(rsc, pe_rsc_stop_unexpected);
++
++    } else if ((native_data->expected_node != NULL)
++               && (native_data->expected_node->details == node->details)) {
++        // Resource is not cleanly active here
++        pe_rsc_trace(rsc, "Clearing expected node for %s", rsc->id);
++        native_data->expected_node = NULL;
++        pe__clear_resource_flags(rsc, pe_rsc_stop_unexpected);
++    }
++}
+diff --git a/lib/pengine/variant.h b/lib/pengine/variant.h
+index cabfbe81f..d8fefa9d6 100644
+--- a/lib/pengine/variant.h
++++ b/lib/pengine/variant.h
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2004-2021 the Pacemaker project contributors
++ * Copyright 2004-2022 the Pacemaker project contributors
+  *
+  * The version control history for this file may have further details.
+  *
+@@ -139,7 +139,11 @@ typedef struct group_variant_data_s {
+ #  elif VARIANT_NATIVE
+ 
+ typedef struct native_variant_data_s {
+-    int dummy;
++    /* If the resource is multiply active, and has multiple-active set to
++     * stop_unexpected, this will be set to the node where the resource was
++     * found active by an operation with a expected result.
++     */
++    pe_node_t *expected_node;
+ } native_variant_data_t;
+ 
+ #    define get_native_variant_data(data, rsc)				\
+-- 
+2.27.0
+
+
+From 0e4e17e972f1c3663389f18d8f8c527bd819b3c5 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Thu, 7 Apr 2022 10:20:00 -0500
+Subject: [PATCH 9/9] Feature: scheduler: implement
+ multiple-active=stop_unexpected
+
+The default multiple-active policy of restarting the resource on all nodes
+requires no special handling, because at least one of the locations will have
+an unexpected rc, causing the resource to be marked as failed and restarted,
+and StopRsc() creates stops on all nodes running the resource.
+
+The new stop_unexpected behavior relies on most of the same handling, but
+the action creation functions need to skip the node where the resource had the
+expected result. For that, we set the new rsc->expected_node when unpacking a
+successful result, to be checked by those functions.
+
+Note that this still schedules a start for the resource, which is a pseudo-op
+for the resource itself, but (properly) causes any dependent resources to be
+restarted.
+
+Fixes T23
+---
+ lib/pacemaker/pcmk_output.c       | 10 ++++
+ lib/pacemaker/pcmk_sched_native.c | 94 ++++++++++++++++++++++++++++++-
+ lib/pengine/unpack.c              |  1 +
+ 3 files changed, 103 insertions(+), 2 deletions(-)
+
+diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c
+index 56963a93f..9a522a3e5 100644
+--- a/lib/pacemaker/pcmk_output.c
++++ b/lib/pacemaker/pcmk_output.c
+@@ -918,6 +918,16 @@ rsc_action_default(pcmk__output_t *out, va_list args)
+     if (possible_matches) {
+         stop = possible_matches->data;
+         g_list_free(possible_matches);
++    } else if (pcmk_is_set(rsc->flags, pe_rsc_stop_unexpected)) {
++        /* The resource is multiply active with multiple-active set to
++         * stop_unexpected, and not stopping on its current node, but it should
++         * be stopping elsewhere.
++         */
++        possible_matches = pe__resource_actions(rsc, NULL, RSC_STOP, FALSE);
++        if (possible_matches != NULL) {
++            stop = possible_matches->data;
++            g_list_free(possible_matches);
++        }
+     }
+ 
+     possible_matches = pe__resource_actions(rsc, next, RSC_PROMOTE, FALSE);
+diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c
+index 8002938b5..c0224849f 100644
+--- a/lib/pacemaker/pcmk_sched_native.c
++++ b/lib/pacemaker/pcmk_sched_native.c
+@@ -1259,7 +1259,10 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set)
+     enum rsc_role_e role = RSC_ROLE_UNKNOWN;
+     enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
+ 
+-    CRM_ASSERT(rsc);
++    native_variant_data_t *native_data = NULL;
++
++    get_native_variant_data(native_data, rsc);
++
+     chosen = rsc->allocated_to;
+     next_role = rsc->next_role;
+     if (next_role == RSC_ROLE_UNKNOWN) {
+@@ -1323,6 +1326,7 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set)
+                        "(will stop on both nodes)",
+                        rsc->id, rsc->partial_migration_source->details->uname,
+                        rsc->partial_migration_target->details->uname);
++            multiply_active = false;
+ 
+         } else {
+             const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
+@@ -1345,6 +1349,11 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set)
+         allow_migrate = FALSE;
+     }
+ 
++    if (!multiply_active) {
++        native_data->expected_node = NULL;
++        pe__clear_resource_flags(rsc, pe_rsc_stop_unexpected);
++    }
++
+     if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
+         pe_rsc_trace(rsc, "Creating start action for %s to represent already pending start",
+                      rsc->id);
+@@ -1995,6 +2004,32 @@ native_expand(pe_resource_t * rsc, pe_working_set_t * data_set)
+     out->message(out, "rsc-action", rsc, current, next);
+ }
+ 
++/*!
++ * \internal
++ * \brief Check whether a node is a multiply active resource's expected node
++ *
++ * \param[in] rsc  Resource to check
++ * \param[in] node  Node to check
++ *
++ * \return true if \p rsc is multiply active with multiple-active set to
++ *         stop_unexpected, and \p node is the node where it will remain active
++ * \note This assumes that the resource's next role cannot be changed to stopped
++ *       after this is called, which should be reasonable if status has already
++ *       been unpacked and resources have been assigned to nodes.
++ */
++static bool
++is_expected_node(const pe_resource_t *rsc, const pe_node_t *node)
++{
++    native_variant_data_t *native_data = NULL;
++
++    get_native_variant_data(native_data, rsc);
++    return pcmk_all_flags_set(rsc->flags,
++                              pe_rsc_stop_unexpected|pe_rsc_restarting)
++           && (rsc->next_role > RSC_ROLE_STOPPED)
++           && (native_data->expected_node != NULL) && (node != NULL)
++           && (native_data->expected_node->details == node->details);
++}
++
+ gboolean
+ StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
+ {
+@@ -2006,6 +2041,18 @@ StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set
+         pe_node_t *current = (pe_node_t *) gIter->data;
+         pe_action_t *stop;
+ 
++        if (is_expected_node(rsc, current)) {
++            /* We are scheduling restart actions for a multiply active resource
++             * with multiple-active=stop_unexpected, and this is where it should
++             * not be stopped.
++             */
++            pe_rsc_trace(rsc,
++                         "Skipping stop of multiply active resource %s "
++                         "on expected node %s",
++                         rsc->id, current->details->uname);
++            continue;
++        }
++
+         if (rsc->partial_migration_target) {
+             if (rsc->partial_migration_target->details == current->details) {
+                 pe_rsc_trace(rsc,
+@@ -2029,6 +2076,17 @@ StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set
+ 
+         if(rsc->allocated_to == NULL) {
+             pe_action_set_reason(stop, "node availability", TRUE);
++        } else if (pcmk_is_set(rsc->flags, pe_rsc_restarting)) {
++            native_variant_data_t *native_data = NULL;
++
++            get_native_variant_data(native_data, rsc);
++            if (native_data->expected_node != NULL) {
++                /* We are stopping a multiply active resource on a node that is
++                 * not its expected node, and we are still scheduling restart
++                 * actions, so the stop is for being multiply active.
++                 */
++                pe_action_set_reason(stop, "being multiply active", TRUE);
++            }
+         }
+ 
+         if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+@@ -2071,6 +2129,16 @@ StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_se
+         pe__clear_action_flags(start, pe_action_optional);
+     }
+ 
++    if (is_expected_node(rsc, next)) {
++        /* This could be a problem if the start becomes necessary for other
++         * reasons later.
++         */
++        pe_rsc_trace(rsc,
++                     "Start of multiply active resouce %s "
++                     "on expected node %s will be a pseudo-action",
++                     rsc->id, next->details->uname);
++        pe__set_action_flags(start, pe_action_pseudo);
++    }
+ 
+     return TRUE;
+ }
+@@ -2084,6 +2152,7 @@ PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_
+ 
+     CRM_ASSERT(rsc);
+     CRM_CHECK(next != NULL, return FALSE);
++
+     pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname);
+ 
+     action_list = pe__resource_actions(rsc, next, RSC_START, TRUE);
+@@ -2098,7 +2167,19 @@ PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_
+     g_list_free(action_list);
+ 
+     if (runnable) {
+-        promote_action(rsc, next, optional);
++        pe_action_t *promote = promote_action(rsc, next, optional);
++
++        if (is_expected_node(rsc, next)) {
++            /* This could be a problem if the promote becomes necessary for
++             * other reasons later.
++             */
++            pe_rsc_trace(rsc,
++                         "Promotion of multiply active resouce %s "
++                         "on expected node %s will be a pseudo-action",
++                         rsc->id, next->details->uname);
++            pe__set_action_flags(promote, pe_action_pseudo);
++        }
++
+         return TRUE;
+     }
+ 
+@@ -2122,6 +2203,15 @@ DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_s
+     GList *gIter = NULL;
+ 
+     CRM_ASSERT(rsc);
++
++    if (is_expected_node(rsc, next)) {
++        pe_rsc_trace(rsc,
++                     "Skipping demote of multiply active resource %s "
++                     "on expected node %s",
++                     rsc->id, next->details->uname);
++        return TRUE;
++    }
++
+     pe_rsc_trace(rsc, "%s", rsc->id);
+ 
+     /* CRM_CHECK(rsc->next_role == RSC_ROLE_UNPROMOTED, return FALSE); */
+diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
+index d218f523f..edaa9de48 100644
+--- a/lib/pengine/unpack.c
++++ b/lib/pengine/unpack.c
+@@ -3974,6 +3974,7 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
+     }
+ 
+ done:
++    pe__update_expected_node(rsc, node, status, rc, target_rc);
+     pe_rsc_trace(rsc, "Resource %s after %s: role=%s, next=%s",
+                  rsc->id, task, role2text(rsc->role),
+                  role2text(rsc->next_role));
+-- 
+2.27.0
+
diff --git a/SOURCES/025-stop_unexpected-test.patch b/SOURCES/025-stop_unexpected-test.patch
new file mode 100644
index 0000000..65b74dc
--- /dev/null
+++ b/SOURCES/025-stop_unexpected-test.patch
@@ -0,0 +1,495 @@
+From 8a0a16c8ed72c74d656664694ebe36b76ff22498 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Wed, 30 Mar 2022 17:14:33 -0500
+Subject: [PATCH] Test: cts-scheduler: add test for
+ multiple-active=stop_unexpected
+
+---
+ cts/cts-scheduler.in                          |   1 +
+ cts/scheduler/dot/stop-unexpected.dot         |  40 ++++
+ cts/scheduler/exp/stop-unexpected.exp         | 201 ++++++++++++++++++
+ cts/scheduler/scores/stop-unexpected.scores   |  17 ++
+ cts/scheduler/summary/stop-unexpected.summary |  41 ++++
+ cts/scheduler/xml/stop-unexpected.xml         | 131 ++++++++++++
+ 6 files changed, 431 insertions(+)
+ create mode 100644 cts/scheduler/dot/stop-unexpected.dot
+ create mode 100644 cts/scheduler/exp/stop-unexpected.exp
+ create mode 100644 cts/scheduler/scores/stop-unexpected.scores
+ create mode 100644 cts/scheduler/summary/stop-unexpected.summary
+ create mode 100644 cts/scheduler/xml/stop-unexpected.xml
+
+diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
+index 3a8aeaca5..8c04687da 100644
+--- a/cts/cts-scheduler.in
++++ b/cts/cts-scheduler.in
+@@ -273,6 +273,7 @@ TESTS = [
+         [ "rec-rsc-6", "Resource Recover - multiple - restart" ],
+         [ "rec-rsc-7", "Resource Recover - multiple - stop" ],
+         [ "rec-rsc-8", "Resource Recover - multiple - block" ],
++        [ "stop-unexpected", "Resource Recover - multiple - stop unexpected" ],
+         [ "rec-rsc-9", "Resource Recover - group/group" ],
+         [ "monitor-recovery", "on-fail=block + resource recovery detected by recurring monitor" ],
+         [ "stop-failure-no-quorum", "Stop failure without quorum" ],
+diff --git a/cts/scheduler/dot/stop-unexpected.dot b/cts/scheduler/dot/stop-unexpected.dot
+new file mode 100644
+index 000000000..0f67eec54
+--- /dev/null
++++ b/cts/scheduler/dot/stop-unexpected.dot
+@@ -0,0 +1,40 @@
++ digraph "g" {
++"dgroup_running_0" [ style=bold color="green" fontcolor="orange"]
++"dgroup_start_0" -> "dgroup_running_0" [ style = bold]
++"dgroup_start_0" -> "dummy2_start_0 node2" [ style = bold]
++"dgroup_start_0" -> "dummy3_start_0 node2" [ style = bold]
++"dgroup_start_0" -> "dummy_start_0 node2" [ style = bold]
++"dgroup_start_0" [ style=bold color="green" fontcolor="orange"]
++"dgroup_stop_0" -> "dgroup_stopped_0" [ style = bold]
++"dgroup_stop_0" -> "dummy2_stop_0 node2" [ style = bold]
++"dgroup_stop_0" -> "dummy3_stop_0 node2" [ style = bold]
++"dgroup_stop_0" -> "dummy_stop_0 node3" [ style = bold]
++"dgroup_stop_0" [ style=bold color="green" fontcolor="orange"]
++"dgroup_stopped_0" -> "dgroup_start_0" [ style = bold]
++"dgroup_stopped_0" [ style=bold color="green" fontcolor="orange"]
++"dummy2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
++"dummy2_start_0 node2" -> "dgroup_running_0" [ style = bold]
++"dummy2_start_0 node2" -> "dummy2_monitor_10000 node2" [ style = bold]
++"dummy2_start_0 node2" -> "dummy3_start_0 node2" [ style = bold]
++"dummy2_start_0 node2" [ style=bold color="green" fontcolor="black"]
++"dummy2_stop_0 node2" -> "dgroup_stopped_0" [ style = bold]
++"dummy2_stop_0 node2" -> "dummy2_start_0 node2" [ style = bold]
++"dummy2_stop_0 node2" -> "dummy_stop_0 node3" [ style = bold]
++"dummy2_stop_0 node2" [ style=bold color="green" fontcolor="black"]
++"dummy3_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
++"dummy3_start_0 node2" -> "dgroup_running_0" [ style = bold]
++"dummy3_start_0 node2" -> "dummy3_monitor_10000 node2" [ style = bold]
++"dummy3_start_0 node2" [ style=bold color="green" fontcolor="black"]
++"dummy3_stop_0 node2" -> "dgroup_stopped_0" [ style = bold]
++"dummy3_stop_0 node2" -> "dummy2_stop_0 node2" [ style = bold]
++"dummy3_stop_0 node2" -> "dummy3_start_0 node2" [ style = bold]
++"dummy3_stop_0 node2" [ style=bold color="green" fontcolor="black"]
++"dummy_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
++"dummy_start_0 node2" -> "dgroup_running_0" [ style = bold]
++"dummy_start_0 node2" -> "dummy2_start_0 node2" [ style = bold]
++"dummy_start_0 node2" -> "dummy_monitor_10000 node2" [ style = bold]
++"dummy_start_0 node2" [ style=bold color="green" fontcolor="orange"]
++"dummy_stop_0 node3" -> "dgroup_stopped_0" [ style = bold]
++"dummy_stop_0 node3" -> "dummy_start_0 node2" [ style = bold]
++"dummy_stop_0 node3" [ style=bold color="green" fontcolor="black"]
++}
+diff --git a/cts/scheduler/exp/stop-unexpected.exp b/cts/scheduler/exp/stop-unexpected.exp
+new file mode 100644
+index 000000000..1f94532f7
+--- /dev/null
++++ b/cts/scheduler/exp/stop-unexpected.exp
+@@ -0,0 +1,201 @@
++<transition_graph cluster-delay="60s" stonith-timeout="90" failed-stop-offset="INFINITY" failed-start-offset="INFINITY"  transition_id="0">
++  <synapse id="0">
++    <action_set>
++      <pseudo_event id="15" operation="stopped" operation_key="dgroup_stopped_0">
++        <attributes CRM_meta_record_pending="false" CRM_meta_timeout="20000" />
++      </pseudo_event>
++    </action_set>
++    <inputs>
++      <trigger>
++        <rsc_op id="4" operation="stop" operation_key="dummy_stop_0" on_node="node3" on_node_uuid="node3"/>
++      </trigger>
++      <trigger>
++        <rsc_op id="8" operation="stop" operation_key="dummy2_stop_0" on_node="node2" on_node_uuid="node2"/>
++      </trigger>
++      <trigger>
++        <rsc_op id="10" operation="stop" operation_key="dummy3_stop_0" on_node="node2" on_node_uuid="node2"/>
++      </trigger>
++      <trigger>
++        <pseudo_event id="14" operation="stop" operation_key="dgroup_stop_0"/>
++      </trigger>
++    </inputs>
++  </synapse>
++  <synapse id="1">
++    <action_set>
++      <pseudo_event id="14" operation="stop" operation_key="dgroup_stop_0">
++        <attributes CRM_meta_record_pending="false" CRM_meta_timeout="20000" />
++      </pseudo_event>
++    </action_set>
++    <inputs/>
++  </synapse>
++  <synapse id="2">
++    <action_set>
++      <pseudo_event id="13" operation="running" operation_key="dgroup_running_0">
++        <attributes CRM_meta_record_pending="false" CRM_meta_timeout="20000" />
++      </pseudo_event>
++    </action_set>
++    <inputs>
++      <trigger>
++        <pseudo_event id="7" operation="start" operation_key="dummy_start_0"/>
++      </trigger>
++      <trigger>
++        <rsc_op id="9" operation="start" operation_key="dummy2_start_0" on_node="node2" on_node_uuid="node2"/>
++      </trigger>
++      <trigger>
++        <rsc_op id="11" operation="start" operation_key="dummy3_start_0" on_node="node2" on_node_uuid="node2"/>
++      </trigger>
++      <trigger>
++        <pseudo_event id="12" operation="start" operation_key="dgroup_start_0"/>
++      </trigger>
++    </inputs>
++  </synapse>
++  <synapse id="3">
++    <action_set>
++      <pseudo_event id="12" operation="start" operation_key="dgroup_start_0">
++        <attributes CRM_meta_record_pending="false" CRM_meta_timeout="20000" />
++      </pseudo_event>
++    </action_set>
++    <inputs>
++      <trigger>
++        <pseudo_event id="15" operation="stopped" operation_key="dgroup_stopped_0"/>
++      </trigger>
++    </inputs>
++  </synapse>
++  <synapse id="4">
++    <action_set>
++      <pseudo_event id="7" operation="start" operation_key="dummy_start_0">
++        <attributes CRM_meta_name="start" CRM_meta_record_pending="false" CRM_meta_timeout="300000" />
++      </pseudo_event>
++    </action_set>
++    <inputs>
++      <trigger>
++        <rsc_op id="4" operation="stop" operation_key="dummy_stop_0" on_node="node3" on_node_uuid="node3"/>
++      </trigger>
++      <trigger>
++        <pseudo_event id="12" operation="start" operation_key="dgroup_start_0"/>
++      </trigger>
++    </inputs>
++  </synapse>
++  <synapse id="5">
++    <action_set>
++      <rsc_op id="4" operation="stop" operation_key="dummy_stop_0" on_node="node3" on_node_uuid="node3">
++        <primitive id="dummy" class="ocf" provider="heartbeat" type="DummyTimeout"/>
++        <attributes CRM_meta_name="stop" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="node3" CRM_meta_record_pending="false" CRM_meta_timeout="20000" />
++      </rsc_op>
++    </action_set>
++    <inputs>
++      <trigger>
++        <rsc_op id="8" operation="stop" operation_key="dummy2_stop_0" on_node="node2" on_node_uuid="node2"/>
++      </trigger>
++      <trigger>
++        <pseudo_event id="14" operation="stop" operation_key="dgroup_stop_0"/>
++      </trigger>
++    </inputs>
++  </synapse>
++  <synapse id="6">
++    <action_set>
++      <rsc_op id="2" operation="monitor" operation_key="dummy_monitor_10000" on_node="node2" on_node_uuid="node2">
++        <primitive id="dummy" class="ocf" provider="heartbeat" type="DummyTimeout"/>
++        <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="node2" CRM_meta_record_pending="false" CRM_meta_timeout="20000" />
++      </rsc_op>
++    </action_set>
++    <inputs>
++      <trigger>
++        <pseudo_event id="7" operation="start" operation_key="dummy_start_0"/>
++      </trigger>
++    </inputs>
++  </synapse>
++  <synapse id="7">
++    <action_set>
++      <rsc_op id="9" operation="start" operation_key="dummy2_start_0" on_node="node2" on_node_uuid="node2">
++        <primitive id="dummy2" class="ocf" provider="heartbeat" type="Dummy"/>
++        <attributes CRM_meta_name="start" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="node2" CRM_meta_record_pending="false" CRM_meta_timeout="20000" />
++      </rsc_op>
++    </action_set>
++    <inputs>
++      <trigger>
++        <pseudo_event id="7" operation="start" operation_key="dummy_start_0"/>
++      </trigger>
++      <trigger>
++        <rsc_op id="8" operation="stop" operation_key="dummy2_stop_0" on_node="node2" on_node_uuid="node2"/>
++      </trigger>
++      <trigger>
++        <pseudo_event id="12" operation="start" operation_key="dgroup_start_0"/>
++      </trigger>
++    </inputs>
++  </synapse>
++  <synapse id="8">
++    <action_set>
++      <rsc_op id="8" operation="stop" operation_key="dummy2_stop_0" on_node="node2" on_node_uuid="node2">
++        <primitive id="dummy2" class="ocf" provider="heartbeat" type="Dummy"/>
++        <attributes CRM_meta_name="stop" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="node2" CRM_meta_record_pending="false" CRM_meta_timeout="20000" />
++      </rsc_op>
++    </action_set>
++    <inputs>
++      <trigger>
++        <rsc_op id="10" operation="stop" operation_key="dummy3_stop_0" on_node="node2" on_node_uuid="node2"/>
++      </trigger>
++      <trigger>
++        <pseudo_event id="14" operation="stop" operation_key="dgroup_stop_0"/>
++      </trigger>
++    </inputs>
++  </synapse>
++  <synapse id="9">
++    <action_set>
++      <rsc_op id="3" operation="monitor" operation_key="dummy2_monitor_10000" on_node="node2" on_node_uuid="node2">
++        <primitive id="dummy2" class="ocf" provider="heartbeat" type="Dummy"/>
++        <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="node2" CRM_meta_record_pending="false" CRM_meta_timeout="20000" />
++      </rsc_op>
++    </action_set>
++    <inputs>
++      <trigger>
++        <rsc_op id="9" operation="start" operation_key="dummy2_start_0" on_node="node2" on_node_uuid="node2"/>
++      </trigger>
++    </inputs>
++  </synapse>
++  <synapse id="10">
++    <action_set>
++      <rsc_op id="11" operation="start" operation_key="dummy3_start_0" on_node="node2" on_node_uuid="node2">
++        <primitive id="dummy3" class="ocf" provider="heartbeat" type="Dummy"/>
++        <attributes CRM_meta_name="start" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="node2" CRM_meta_record_pending="false" CRM_meta_timeout="20000" />
++      </rsc_op>
++    </action_set>
++    <inputs>
++      <trigger>
++        <rsc_op id="9" operation="start" operation_key="dummy2_start_0" on_node="node2" on_node_uuid="node2"/>
++      </trigger>
++      <trigger>
++        <rsc_op id="10" operation="stop" operation_key="dummy3_stop_0" on_node="node2" on_node_uuid="node2"/>
++      </trigger>
++      <trigger>
++        <pseudo_event id="12" operation="start" operation_key="dgroup_start_0"/>
++      </trigger>
++    </inputs>
++  </synapse>
++  <synapse id="11">
++    <action_set>
++      <rsc_op id="10" operation="stop" operation_key="dummy3_stop_0" on_node="node2" on_node_uuid="node2">
++        <primitive id="dummy3" class="ocf" provider="heartbeat" type="Dummy"/>
++        <attributes CRM_meta_name="stop" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="node2" CRM_meta_record_pending="false" CRM_meta_timeout="20000" />
++      </rsc_op>
++    </action_set>
++    <inputs>
++      <trigger>
++        <pseudo_event id="14" operation="stop" operation_key="dgroup_stop_0"/>
++      </trigger>
++    </inputs>
++  </synapse>
++  <synapse id="12">
++    <action_set>
++      <rsc_op id="1" operation="monitor" operation_key="dummy3_monitor_10000" on_node="node2" on_node_uuid="node2">
++        <primitive id="dummy3" class="ocf" provider="heartbeat" type="Dummy"/>
++        <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="node2" CRM_meta_record_pending="false" CRM_meta_timeout="20000" />
++      </rsc_op>
++    </action_set>
++    <inputs>
++      <trigger>
++        <rsc_op id="11" operation="start" operation_key="dummy3_start_0" on_node="node2" on_node_uuid="node2"/>
++      </trigger>
++    </inputs>
++  </synapse>
++</transition_graph>
+diff --git a/cts/scheduler/scores/stop-unexpected.scores b/cts/scheduler/scores/stop-unexpected.scores
+new file mode 100644
+index 000000000..68f98e837
+--- /dev/null
++++ b/cts/scheduler/scores/stop-unexpected.scores
+@@ -0,0 +1,17 @@
++
++pcmk__group_allocate: dgroup allocation score on node2: 0
++pcmk__group_allocate: dgroup allocation score on node3: 0
++pcmk__group_allocate: dummy allocation score on node2: 0
++pcmk__group_allocate: dummy allocation score on node3: 0
++pcmk__group_allocate: dummy2 allocation score on node2: 100
++pcmk__group_allocate: dummy2 allocation score on node3: 0
++pcmk__group_allocate: dummy3 allocation score on node2: 100
++pcmk__group_allocate: dummy3 allocation score on node3: 0
++pcmk__native_allocate: dummy allocation score on node2: 200
++pcmk__native_allocate: dummy allocation score on node3: 0
++pcmk__native_allocate: dummy2 allocation score on node2: 200
++pcmk__native_allocate: dummy2 allocation score on node3: -INFINITY
++pcmk__native_allocate: dummy3 allocation score on node2: 100
++pcmk__native_allocate: dummy3 allocation score on node3: -INFINITY
++pcmk__native_allocate: st-sbd allocation score on node2: 100
++pcmk__native_allocate: st-sbd allocation score on node3: 0
+diff --git a/cts/scheduler/summary/stop-unexpected.summary b/cts/scheduler/summary/stop-unexpected.summary
+new file mode 100644
+index 000000000..7c7fc68b6
+--- /dev/null
++++ b/cts/scheduler/summary/stop-unexpected.summary
+@@ -0,0 +1,41 @@
++Current cluster status:
++  * Node List:
++    * Online: [ node2 node3 ]
++
++  * Full List of Resources:
++    * st-sbd	(stonith:external/sbd):	 Started node2
++    * Resource Group: dgroup:
++      * dummy	(ocf:heartbeat:DummyTimeout):	 FAILED [ node2 node3 ]
++      * dummy2	(ocf:heartbeat:Dummy):	 Started node2
++      * dummy3	(ocf:heartbeat:Dummy):	 Started node2
++
++Transition Summary:
++  * Recover    dummy      ( node2 )  due to being multiply active
++  * Restart    dummy2     ( node2 )  due to required dummy start
++  * Restart    dummy3     ( node2 )  due to required dummy2 start
++
++Executing Cluster Transition:
++  * Pseudo action:   dgroup_stop_0
++  * Resource action: dummy3          stop on node2
++  * Resource action: dummy2          stop on node2
++  * Resource action: dummy           stop on node3
++  * Pseudo action:   dgroup_stopped_0
++  * Pseudo action:   dgroup_start_0
++  * Pseudo action:   dummy_start_0
++  * Resource action: dummy           monitor=10000 on node2
++  * Resource action: dummy2          start on node2
++  * Resource action: dummy2          monitor=10000 on node2
++  * Resource action: dummy3          start on node2
++  * Resource action: dummy3          monitor=10000 on node2
++  * Pseudo action:   dgroup_running_0
++
++Revised Cluster Status:
++  * Node List:
++    * Online: [ node2 node3 ]
++
++  * Full List of Resources:
++    * st-sbd	(stonith:external/sbd):	 Started node2
++    * Resource Group: dgroup:
++      * dummy	(ocf:heartbeat:DummyTimeout):	 Started node2
++      * dummy2	(ocf:heartbeat:Dummy):	 Started node2
++      * dummy3	(ocf:heartbeat:Dummy):	 Started node2
+diff --git a/cts/scheduler/xml/stop-unexpected.xml b/cts/scheduler/xml/stop-unexpected.xml
+new file mode 100644
+index 000000000..6e61aeba3
+--- /dev/null
++++ b/cts/scheduler/xml/stop-unexpected.xml
+@@ -0,0 +1,131 @@
++<cib epoch="631" num_updates="25" admin_epoch="0" validate-with="pacemaker-3.0" crm_feature_set="3.0.8" have-quorum="1" cib-last-written="Thu Aug 20 11:44:27 2015" update-origin="node2" update-client="cibadmin" update-user="root" dc-uuid="node2">
++  <configuration>
++    <crm_config>
++      <cluster_property_set id="cib-bootstrap-options">
++        <nvpair name="dc-version" value="1.1.11-3ca8c3b" id="cib-bootstrap-options-dc-version"/>
++        <nvpair name="cluster-infrastructure" value="corosync" id="cib-bootstrap-options-cluster-infrastructure"/>
++        <nvpair name="node-action-limit" value="2" id="cib-bootstrap-options-node-action-limit"/>
++        <nvpair name="no-quorum-policy" value="ignore" id="cib-bootstrap-options-no-quorum-policy"/>
++        <nvpair name="stonith-enabled" value="false" id="cib-bootstrap-options-stonith-enabled"/>
++        <nvpair name="stonith-timeout" value="90" id="cib-bootstrap-options-stonith-timeout"/>
++        <nvpair name="last-lrm-refresh" value="1439556204" id="cib-bootstrap-options-last-lrm-refresh"/>
++      </cluster_property_set>
++    </crm_config>
++    <nodes>
++      <node uname="node2" id="node2">
++        <instance_attributes id="nodes-node2">
++          <nvpair id="nodes-node2-standby" name="standby" value="off"/>
++        </instance_attributes>
++      </node>
++      <node id="node3" uname="node3">
++        <instance_attributes id="nodes-node3">
++          <nvpair id="nodes-node3-standby" name="standby" value="off"/>
++        </instance_attributes>
++      </node>
++    </nodes>
++    <resources>
++      <primitive id="st-sbd" class="stonith" type="external/sbd"/>
++      <group id="dgroup">
++        <meta_attributes id="dgroup-meta_attributes">
++          <nvpair name="multiple-active" value="stop_unexpected" id="dgroup-meta_attributes-multiple-active"/>
++        </meta_attributes>
++        <primitive id="dummy" class="ocf" provider="heartbeat" type="DummyTimeout">
++          <operations>
++            <op name="monitor" interval="10s" timeout="20" id="dummy-monitor-10s"/>
++            <op name="start" timeout="300s" interval="0" id="dummy-start-0"/>
++            <op name="stop" timeout="20s" interval="0" id="dummy-stop-0"/>
++          </operations>
++          <meta_attributes id="dummy-meta_attributes">
++            <nvpair name="target-role" value="Started" id="dummy-meta_attributes-target-role"/>
++          </meta_attributes>
++        </primitive>
++        <primitive id="dummy2" class="ocf" provider="heartbeat" type="Dummy">
++          <operations>
++            <op name="monitor" interval="10s" timeout="20" id="dummy2-monitor-10s"/>
++            <op name="start" timeout="20s" interval="0" id="dummy2-start-0"/>
++            <op name="stop" timeout="20s" interval="0" id="dummy2-stop-0"/>
++          </operations>
++          <meta_attributes id="dummy2-meta_attributes">
++            <nvpair name="target-role" value="Started" id="dummy2-meta_attributes-target-role"/>
++          </meta_attributes>
++        </primitive>
++        <primitive id="dummy3" class="ocf" provider="heartbeat" type="Dummy">
++          <operations>
++            <op name="monitor" interval="10s" timeout="20" id="dummy3-monitor-10s"/>
++            <op name="start" timeout="20s" interval="0" id="dummy3-start-0"/>
++            <op name="stop" timeout="20s" interval="0" id="dummy3-stop-0"/>
++          </operations>
++          <meta_attributes id="dummy3-meta_attributes">
++            <nvpair name="target-role" value="Started" id="dummy3-meta_attributes-target-role"/>
++          </meta_attributes>
++        </primitive>
++      </group>
++    </resources>
++    <constraints/>
++    <op_defaults>
++      <meta_attributes id="op_defaults-options">
++        <nvpair id="op_defaults-options-record-pending" name="record-pending" value="false"/>
++      </meta_attributes>
++    </op_defaults>
++    <rsc_defaults>
++      <meta_attributes id="rsc_defaults-options">
++        <nvpair name="resource-stickiness" value="100" id="rsc_defaults-options-resource-stickiness"/>
++        <nvpair name="migration-threshold" value="100" id="rsc_defaults-options-migration-threshold"/>
++      </meta_attributes>
++    </rsc_defaults>
++  </configuration>
++  <status>
++    <node_state id="node2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
++      <transient_attributes id="node2">
++        <instance_attributes id="status-node2">
++          <nvpair id="status-node2-shutdown" name="shutdown" value="0"/>
++          <nvpair id="status-node2-probe_complete" name="probe_complete" value="true"/>
++        </instance_attributes>
++      </transient_attributes>
++      <lrm id="node2">
++        <lrm_resources>
++          <lrm_resource id="dummy3" type="Dummy" class="ocf" provider="heartbeat">
++            <lrm_rsc_op id="dummy3_last_0" operation_key="dummy3_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.8" transition-key="13:1:0:b78eae54-472e-4e90-a3c5-ec4b25a6d8cf" transition-magic="0:0;13:1:0:b78eae54-472e-4e90-a3c5-ec4b25a6d8cf" call-id="24" rc-code="0" op-status="0" interval="0" last-run="1440063239" last-rc-change="1440063239" exec-time="6" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
++            <lrm_rsc_op id="dummy3_monitor_10000" operation_key="dummy3_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.8" transition-key="14:1:0:b78eae54-472e-4e90-a3c5-ec4b25a6d8cf" transition-magic="0:0;14:1:0:b78eae54-472e-4e90-a3c5-ec4b25a6d8cf" call-id="25" rc-code="0" op-status="0" interval="10000" last-rc-change="1440063239" exec-time="5" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
++          </lrm_resource>
++          <lrm_resource id="st-sbd" type="external/sbd" class="stonith">
++            <lrm_rsc_op id="st-sbd_last_0" operation_key="st-sbd_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.8" transition-key="10:6:0:b78eae54-472e-4e90-a3c5-ec4b25a6d8cf" transition-magic="0:0;10:6:0:b78eae54-472e-4e90-a3c5-ec4b25a6d8cf" call-id="27" rc-code="0" op-status="0" interval="0" last-run="1440064019" last-rc-change="1440064019" exec-time="1213" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
++          </lrm_resource>
++          <lrm_resource id="dummy" type="DummyTimeout" class="ocf" provider="heartbeat">
++            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.8" transition-key="9:1:0:b78eae54-472e-4e90-a3c5-ec4b25a6d8cf" transition-magic="0:0;9:1:0:b78eae54-472e-4e90-a3c5-ec4b25a6d8cf" call-id="20" rc-code="0" op-status="0" interval="0" last-run="1440063237" last-rc-change="1440063237" exec-time="1009" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
++            <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.8" transition-key="10:1:0:b78eae54-472e-4e90-a3c5-ec4b25a6d8cf" transition-magic="0:0;10:1:0:b78eae54-472e-4e90-a3c5-ec4b25a6d8cf" call-id="21" rc-code="0" op-status="0" interval="10000" last-rc-change="1440063238" exec-time="1010" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
++          </lrm_resource>
++          <lrm_resource id="dummy2" type="Dummy" class="ocf" provider="heartbeat">
++            <lrm_rsc_op id="dummy2_last_0" operation_key="dummy2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.8" transition-key="11:1:0:b78eae54-472e-4e90-a3c5-ec4b25a6d8cf" transition-magic="0:0;11:1:0:b78eae54-472e-4e90-a3c5-ec4b25a6d8cf" call-id="22" rc-code="0" op-status="0" interval="0" last-run="1440063239" last-rc-change="1440063239" exec-time="5" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
++            <lrm_rsc_op id="dummy2_monitor_10000" operation_key="dummy2_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.8" transition-key="12:1:0:b78eae54-472e-4e90-a3c5-ec4b25a6d8cf" transition-magic="0:0;12:1:0:b78eae54-472e-4e90-a3c5-ec4b25a6d8cf" call-id="23" rc-code="0" op-status="0" interval="10000" last-rc-change="1440063239" exec-time="5" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
++          </lrm_resource>
++        </lrm_resources>
++      </lrm>
++    </node_state>
++    <node_state id="node3" uname="node3" crmd="online" crm-debug-origin="do_update_resource" in_ccm="true" join="member" expected="member">
++      <transient_attributes id="node3">
++        <instance_attributes id="status-node3">
++          <nvpair id="status-node3-shutdown" name="shutdown" value="0"/>
++          <nvpair id="status-node3-probe_complete" name="probe_complete" value="true"/>
++        </instance_attributes>
++      </transient_attributes>
++      <lrm id="node3">
++        <lrm_resources>
++          <lrm_resource id="dummy3" type="Dummy" class="ocf" provider="heartbeat">
++            <lrm_rsc_op id="dummy3_last_0" operation_key="dummy3_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.8" transition-key="11:2:7:b78eae54-472e-4e90-a3c5-ec4b25a6d8cf" transition-magic="0:7;11:2:7:b78eae54-472e-4e90-a3c5-ec4b25a6d8cf" call-id="18" rc-code="7" op-status="0" interval="0" last-run="1440063820" last-rc-change="1440063820" exec-time="7" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
++          </lrm_resource>
++          <lrm_resource id="st-sbd" type="external/sbd" class="stonith">
++            <lrm_rsc_op id="st-sbd_last_0" operation_key="st-sbd_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.0.8" transition-key="9:6:0:b78eae54-472e-4e90-a3c5-ec4b25a6d8cf" transition-magic="0:0;9:6:0:b78eae54-472e-4e90-a3c5-ec4b25a6d8cf" call-id="21" rc-code="0" op-status="0" interval="0" last-run="1440064019" last-rc-change="1440064019" exec-time="1" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
++          </lrm_resource>
++          <lrm_resource id="dummy" type="DummyTimeout" class="ocf" provider="heartbeat">
++            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.8" transition-key="5:0:0:a5e85e43-f35a-4f75-8e15-f0ddc8d81812" transition-magic="0:7;5:0:0:a5e85e43-f35a-4f75-8e15-f0ddc8d81812" call-id="20" rc-code="7" op-status="0" interval="0" last-run="1440063984" last-rc-change="1440063984" exec-time="179014" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
++            <lrm_rsc_op id="dummy_last_failure_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.8" transition-key="5:0:0:a5e85e43-f35a-4f75-8e15-f0ddc8d81812" transition-magic="0:7;5:0:0:a5e85e43-f35a-4f75-8e15-f0ddc8d81812" call-id="20" rc-code="7" op-status="0" interval="0" last-run="1440063984" last-rc-change="1440063984" exec-time="179014" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
++          </lrm_resource>
++          <lrm_resource id="dummy2" type="Dummy" class="ocf" provider="heartbeat">
++            <lrm_rsc_op id="dummy2_last_0" operation_key="dummy2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.8" transition-key="10:2:7:b78eae54-472e-4e90-a3c5-ec4b25a6d8cf" transition-magic="0:7;10:2:7:b78eae54-472e-4e90-a3c5-ec4b25a6d8cf" call-id="14" rc-code="7" op-status="0" interval="0" last-run="1440063820" last-rc-change="1440063820" exec-time="11" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
++          </lrm_resource>
++        </lrm_resources>
++      </lrm>
++    </node_state>
++  </status>
++</cib>
+-- 
+2.27.0
+
diff --git a/SOURCES/026-stop_unexpected-fix.patch b/SOURCES/026-stop_unexpected-fix.patch
new file mode 100644
index 0000000..69dd95d
--- /dev/null
+++ b/SOURCES/026-stop_unexpected-fix.patch
@@ -0,0 +1,589 @@
+From 4a5dcc5210160f7d167bc68142635c1b5a6d4af2 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Fri, 22 Apr 2022 10:47:29 -0500
+Subject: [PATCH 1/3] Fix: scheduler: make multiple-active="stop_unexpected"
+ actually work
+
+The previous implementation covered the scenario in the regression test and not
+much else. It unnecessarily added an expected_node member to the native variant
+data, when the resource's allocated_to is sufficient to know the expected node.
+---
+ lib/pacemaker/pcmk_sched_native.c | 45 +++++++++++++++----------------
+ lib/pengine/unpack.c              |  1 -
+ 2 files changed, 22 insertions(+), 24 deletions(-)
+
+diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c
+index c0224849f..a1a51721e 100644
+--- a/lib/pacemaker/pcmk_sched_native.c
++++ b/lib/pacemaker/pcmk_sched_native.c
+@@ -1250,7 +1250,7 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set)
+     gboolean need_stop = FALSE;
+     bool need_promote = FALSE;
+     gboolean is_moving = FALSE;
+-    gboolean allow_migrate = pcmk_is_set(rsc->flags, pe_rsc_allow_migrate)? TRUE : FALSE;
++    gboolean allow_migrate = FALSE;
+ 
+     GList *gIter = NULL;
+     unsigned int num_all_active = 0;
+@@ -1259,9 +1259,8 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set)
+     enum rsc_role_e role = RSC_ROLE_UNKNOWN;
+     enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
+ 
+-    native_variant_data_t *native_data = NULL;
+-
+-    get_native_variant_data(native_data, rsc);
++    CRM_ASSERT(rsc != NULL);
++    allow_migrate = pcmk_is_set(rsc->flags, pe_rsc_allow_migrate)? TRUE : FALSE;
+ 
+     chosen = rsc->allocated_to;
+     next_role = rsc->next_role;
+@@ -1338,8 +1337,16 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set)
+             crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ#Resource_is_Too_Active for more information");
+         }
+ 
+-        if (rsc->recovery_type == recovery_stop_start) {
+-            need_stop = TRUE;
++        switch (rsc->recovery_type) {
++            case recovery_stop_start:
++                need_stop = TRUE;
++                break;
++            case recovery_stop_unexpected:
++                need_stop = TRUE; // StopRsc() will skip expected node
++                pe__set_resource_flags(rsc, pe_rsc_stop_unexpected);
++                break;
++            default:
++                break;
+         }
+ 
+         /* If by chance a partial migration is in process, but the migration
+@@ -1350,7 +1357,6 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set)
+     }
+ 
+     if (!multiply_active) {
+-        native_data->expected_node = NULL;
+         pe__clear_resource_flags(rsc, pe_rsc_stop_unexpected);
+     }
+ 
+@@ -2020,14 +2026,11 @@ native_expand(pe_resource_t * rsc, pe_working_set_t * data_set)
+ static bool
+ is_expected_node(const pe_resource_t *rsc, const pe_node_t *node)
+ {
+-    native_variant_data_t *native_data = NULL;
+-
+-    get_native_variant_data(native_data, rsc);
+     return pcmk_all_flags_set(rsc->flags,
+                               pe_rsc_stop_unexpected|pe_rsc_restarting)
+            && (rsc->next_role > RSC_ROLE_STOPPED)
+-           && (native_data->expected_node != NULL) && (node != NULL)
+-           && (native_data->expected_node->details == node->details);
++           && (rsc->allocated_to != NULL) && (node != NULL)
++           && (rsc->allocated_to->details == node->details);
+ }
+ 
+ gboolean
+@@ -2076,17 +2079,13 @@ StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set
+ 
+         if(rsc->allocated_to == NULL) {
+             pe_action_set_reason(stop, "node availability", TRUE);
+-        } else if (pcmk_is_set(rsc->flags, pe_rsc_restarting)) {
+-            native_variant_data_t *native_data = NULL;
+-
+-            get_native_variant_data(native_data, rsc);
+-            if (native_data->expected_node != NULL) {
+-                /* We are stopping a multiply active resource on a node that is
+-                 * not its expected node, and we are still scheduling restart
+-                 * actions, so the stop is for being multiply active.
+-                 */
+-                pe_action_set_reason(stop, "being multiply active", TRUE);
+-            }
++        } else if (pcmk_all_flags_set(rsc->flags, pe_rsc_restarting
++                                                  |pe_rsc_stop_unexpected)) {
++            /* We are stopping a multiply active resource on a node that is
++             * not its expected node, and we are still scheduling restart
++             * actions, so the stop is for being multiply active.
++             */
++            pe_action_set_reason(stop, "being multiply active", TRUE);
+         }
+ 
+         if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
+index 17dea0d7a..426022013 100644
+--- a/lib/pengine/unpack.c
++++ b/lib/pengine/unpack.c
+@@ -3945,7 +3945,6 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
+     }
+ 
+ done:
+-    pe__update_expected_node(rsc, node, status, rc, target_rc);
+     pe_rsc_trace(rsc, "Resource %s after %s: role=%s, next=%s",
+                  rsc->id, task, role2text(rsc->role),
+                  role2text(rsc->next_role));
+-- 
+2.27.0
+
+
+From 703d3a09bce389afb4e095e1ac7af29eb5edd189 Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Fri, 22 Apr 2022 14:02:34 -0500
+Subject: [PATCH 2/3] Test: scheduler: add a second regression test for
+ multiple-active=stop_unexpected
+
+---
+ cts/cts-scheduler.in                          |   3 +-
+ cts/scheduler/dot/stop-unexpected-2.dot       |   7 +
+ cts/scheduler/exp/stop-unexpected-2.exp       |  36 ++++
+ cts/scheduler/scores/stop-unexpected-2.scores |  21 ++
+ .../summary/stop-unexpected-2.summary         |  29 +++
+ cts/scheduler/xml/stop-unexpected-2.xml       | 204 ++++++++++++++++++
+ 6 files changed, 299 insertions(+), 1 deletion(-)
+ create mode 100644 cts/scheduler/dot/stop-unexpected-2.dot
+ create mode 100644 cts/scheduler/exp/stop-unexpected-2.exp
+ create mode 100644 cts/scheduler/scores/stop-unexpected-2.scores
+ create mode 100644 cts/scheduler/summary/stop-unexpected-2.summary
+ create mode 100644 cts/scheduler/xml/stop-unexpected-2.xml
+
+diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
+index 8c04687da..7fc76cce4 100644
+--- a/cts/cts-scheduler.in
++++ b/cts/cts-scheduler.in
+@@ -273,8 +273,9 @@ TESTS = [
+         [ "rec-rsc-6", "Resource Recover - multiple - restart" ],
+         [ "rec-rsc-7", "Resource Recover - multiple - stop" ],
+         [ "rec-rsc-8", "Resource Recover - multiple - block" ],
+-        [ "stop-unexpected", "Resource Recover - multiple - stop unexpected" ],
+         [ "rec-rsc-9", "Resource Recover - group/group" ],
++        [ "stop-unexpected", "Recover multiply active group with stop_unexpected" ],
++        [ "stop-unexpected-2", "Resource multiply active primitve with stop_unexpected" ],
+         [ "monitor-recovery", "on-fail=block + resource recovery detected by recurring monitor" ],
+         [ "stop-failure-no-quorum", "Stop failure without quorum" ],
+         [ "stop-failure-no-fencing", "Stop failure without fencing available" ],
+diff --git a/cts/scheduler/dot/stop-unexpected-2.dot b/cts/scheduler/dot/stop-unexpected-2.dot
+new file mode 100644
+index 000000000..cdaebf551
+--- /dev/null
++++ b/cts/scheduler/dot/stop-unexpected-2.dot
+@@ -0,0 +1,7 @@
++ digraph "g" {
++"test_monitor_10000 rhel8-4" [ style=bold color="green" fontcolor="black"]
++"test_start_0 rhel8-4" -> "test_monitor_10000 rhel8-4" [ style = bold]
++"test_start_0 rhel8-4" [ style=bold color="green" fontcolor="orange"]
++"test_stop_0 rhel8-3" -> "test_start_0 rhel8-4" [ style = bold]
++"test_stop_0 rhel8-3" [ style=bold color="green" fontcolor="black"]
++}
+diff --git a/cts/scheduler/exp/stop-unexpected-2.exp b/cts/scheduler/exp/stop-unexpected-2.exp
+new file mode 100644
+index 000000000..258053c08
+--- /dev/null
++++ b/cts/scheduler/exp/stop-unexpected-2.exp
+@@ -0,0 +1,36 @@
++<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="1"  transition_id="0">
++  <synapse id="0">
++    <action_set>
++      <rsc_op id="10" operation="monitor" operation_key="test_monitor_10000" on_node="rhel8-4" on_node_uuid="4">
++        <primitive id="test" class="ocf" provider="pacemaker" type="Dummy"/>
++        <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="rhel8-4" CRM_meta_on_node_uuid="4" CRM_meta_timeout="20000" />
++      </rsc_op>
++    </action_set>
++    <inputs>
++      <trigger>
++        <pseudo_event id="9" operation="start" operation_key="test_start_0"/>
++      </trigger>
++    </inputs>
++  </synapse>
++  <synapse id="1">
++    <action_set>
++      <pseudo_event id="9" operation="start" operation_key="test_start_0">
++        <attributes CRM_meta_name="start" CRM_meta_timeout="20000" />
++      </pseudo_event>
++    </action_set>
++    <inputs>
++      <trigger>
++        <rsc_op id="8" operation="stop" operation_key="test_stop_0" on_node="rhel8-3" on_node_uuid="3"/>
++      </trigger>
++    </inputs>
++  </synapse>
++  <synapse id="2">
++    <action_set>
++      <rsc_op id="8" operation="stop" operation_key="test_stop_0" on_node="rhel8-3" on_node_uuid="3">
++        <primitive id="test" class="ocf" provider="pacemaker" type="Dummy"/>
++        <attributes CRM_meta_name="stop" CRM_meta_on_node="rhel8-3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" />
++      </rsc_op>
++    </action_set>
++    <inputs/>
++  </synapse>
++</transition_graph>
+diff --git a/cts/scheduler/scores/stop-unexpected-2.scores b/cts/scheduler/scores/stop-unexpected-2.scores
+new file mode 100644
+index 000000000..0eb549f5e
+--- /dev/null
++++ b/cts/scheduler/scores/stop-unexpected-2.scores
+@@ -0,0 +1,21 @@
++
++pcmk__native_allocate: Fencing allocation score on rhel8-1: 0
++pcmk__native_allocate: Fencing allocation score on rhel8-2: 0
++pcmk__native_allocate: Fencing allocation score on rhel8-3: 0
++pcmk__native_allocate: Fencing allocation score on rhel8-4: 0
++pcmk__native_allocate: Fencing allocation score on rhel8-5: 0
++pcmk__native_allocate: FencingFail allocation score on rhel8-1: 0
++pcmk__native_allocate: FencingFail allocation score on rhel8-2: 0
++pcmk__native_allocate: FencingFail allocation score on rhel8-3: 0
++pcmk__native_allocate: FencingFail allocation score on rhel8-4: 0
++pcmk__native_allocate: FencingFail allocation score on rhel8-5: 0
++pcmk__native_allocate: FencingPass allocation score on rhel8-1: 0
++pcmk__native_allocate: FencingPass allocation score on rhel8-2: 0
++pcmk__native_allocate: FencingPass allocation score on rhel8-3: 0
++pcmk__native_allocate: FencingPass allocation score on rhel8-4: 0
++pcmk__native_allocate: FencingPass allocation score on rhel8-5: 0
++pcmk__native_allocate: test allocation score on rhel8-1: 0
++pcmk__native_allocate: test allocation score on rhel8-2: 0
++pcmk__native_allocate: test allocation score on rhel8-3: 0
++pcmk__native_allocate: test allocation score on rhel8-4: 0
++pcmk__native_allocate: test allocation score on rhel8-5: 0
+diff --git a/cts/scheduler/summary/stop-unexpected-2.summary b/cts/scheduler/summary/stop-unexpected-2.summary
+new file mode 100644
+index 000000000..d6b0c15dc
+--- /dev/null
++++ b/cts/scheduler/summary/stop-unexpected-2.summary
+@@ -0,0 +1,29 @@
++Using the original execution date of: 2022-04-22 14:15:37Z
++Current cluster status:
++  * Node List:
++    * Online: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ]
++
++  * Full List of Resources:
++    * Fencing	(stonith:fence_xvm):	 Started rhel8-1
++    * FencingPass	(stonith:fence_dummy):	 Started rhel8-2
++    * FencingFail	(stonith:fence_dummy):	 Started rhel8-3
++    * test	(ocf:pacemaker:Dummy):	 Started [ rhel8-4 rhel8-3 ]
++
++Transition Summary:
++  * Restart    test    ( rhel8-4 )
++
++Executing Cluster Transition:
++  * Resource action: test            stop on rhel8-3
++  * Pseudo action:   test_start_0
++  * Resource action: test            monitor=10000 on rhel8-4
++Using the original execution date of: 2022-04-22 14:15:37Z
++
++Revised Cluster Status:
++  * Node List:
++    * Online: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ]
++
++  * Full List of Resources:
++    * Fencing	(stonith:fence_xvm):	 Started rhel8-1
++    * FencingPass	(stonith:fence_dummy):	 Started rhel8-2
++    * FencingFail	(stonith:fence_dummy):	 Started rhel8-3
++    * test	(ocf:pacemaker:Dummy):	 Started rhel8-4
+diff --git a/cts/scheduler/xml/stop-unexpected-2.xml b/cts/scheduler/xml/stop-unexpected-2.xml
+new file mode 100644
+index 000000000..e103629e9
+--- /dev/null
++++ b/cts/scheduler/xml/stop-unexpected-2.xml
+@@ -0,0 +1,204 @@
++<cib crm_feature_set="3.13.0" validate-with="pacemaker-3.7" epoch="59" num_updates="14" admin_epoch="0" cib-last-written="Fri Apr 22 09:15:36 2022" update-origin="rhel8-1" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2" execution-date="1650636937">
++  <configuration>
++    <crm_config>
++      <cluster_property_set id="cib-bootstrap-options">
++        <nvpair id="cts-stonith-enabled" name="stonith-enabled" value="1"/>
++        <nvpair id="cts-start-failure-is-fatal" name="start-failure-is-fatal" value="false"/>
++        <nvpair id="cts-pe-input-series-max" name="pe-input-series-max" value="5000"/>
++        <nvpair id="cts-shutdown-escalation" name="shutdown-escalation" value="5min"/>
++        <nvpair id="cts-batch-limit" name="batch-limit" value="10"/>
++        <nvpair id="cts-dc-deadtime" name="dc-deadtime" value="5s"/>
++        <nvpair id="cts-no-quorum-policy" name="no-quorum-policy" value="stop"/>
++        <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
++        <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.2-4.el8_6.1-ada5c3b36e2"/>
++        <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
++        <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="rhel8-lab"/>
++        <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1650636936"/>
++      </cluster_property_set>
++    </crm_config>
++    <nodes>
++      <node id="1" uname="rhel8-1"/>
++      <node id="3" uname="rhel8-3"/>
++      <node id="4" uname="rhel8-4"/>
++      <node id="5" uname="rhel8-5"/>
++      <node id="2" uname="rhel8-2"/>
++    </nodes>
++    <resources>
++      <primitive class="stonith" id="Fencing" type="fence_xvm">
++        <meta_attributes id="Fencing-meta">
++          <nvpair id="Fencing-migration-threshold" name="migration-threshold" value="5"/>
++        </meta_attributes>
++        <instance_attributes id="Fencing-params">
++          <nvpair id="Fencing-pcmk_host_map" name="pcmk_host_map" value="remote-rhel8-1:rhel8-1;remote-rhel8-2:rhel8-2;remote-rhel8-3:rhel8-3;remote-rhel8-4:rhel8-4;remote-rhel8-5:rhel8-5;"/>
++          <nvpair id="Fencing-key_file" name="key_file" value="/etc/pacemaker/fence_xvm.key"/>
++          <nvpair id="Fencing-multicast_address" name="multicast_address" value="239.255.100.100"/>
++          <nvpair id="Fencing-pcmk_host_list" name="pcmk_host_list" value="rhel8-1 remote-rhel8-1 rhel8-2 remote-rhel8-2 rhel8-3 remote-rhel8-3 rhel8-4 remote-rhel8-4 rhel8-5 remote-rhel8-5"/>
++        </instance_attributes>
++        <operations>
++          <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
++          <op id="Fencing-stop-0" interval="0" name="stop" timeout="60s"/>
++          <op id="Fencing-start-0" interval="0" name="start" timeout="60s"/>
++        </operations>
++      </primitive>
++      <primitive class="stonith" id="FencingPass" type="fence_dummy">
++        <instance_attributes id="FencingPass-params">
++          <nvpair id="FencingPass-pcmk_host_list" name="pcmk_host_list" value="rhel8-4 remote-rhel8-4 rhel8-5 remote-rhel8-5"/>
++          <nvpair id="FencingPass-random_sleep_range" name="random_sleep_range" value="30"/>
++          <nvpair id="FencingPass-mode" name="mode" value="pass"/>
++        </instance_attributes>
++      </primitive>
++      <primitive class="stonith" id="FencingFail" type="fence_dummy">
++        <instance_attributes id="FencingFail-params">
++          <nvpair id="FencingFail-pcmk_host_list" name="pcmk_host_list" value="rhel8-2 remote-rhel8-2"/>
++          <nvpair id="FencingFail-random_sleep_range" name="random_sleep_range" value="30"/>
++          <nvpair id="FencingFail-mode" name="mode" value="fail"/>
++        </instance_attributes>
++      </primitive>
++      <primitive class="ocf" id="test" provider="pacemaker" type="Dummy">
++        <meta_attributes id="test-meta_attributes">
++          <nvpair id="test-meta_attributes-multiple-active" name="multiple-active" value="stop_unexpected"/>
++        </meta_attributes>
++        <operations>
++          <op id="test-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
++          <op id="test-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
++          <op id="test-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
++          <op id="test-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
++          <op id="test-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
++          <op id="test-start-interval-0s" interval="0s" name="start" timeout="20s"/>
++          <op id="test-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
++        </operations>
++      </primitive>
++    </resources>
++    <constraints/>
++    <fencing-topology>
++      <fencing-level devices="FencingFail" id="cts-rhel8-2.1" index="1" target="rhel8-2"/>
++      <fencing-level devices="Fencing" id="cts-rhel8-2.2" index="2" target="rhel8-2"/>
++      <fencing-level devices="FencingFail" id="cts-remote-rhel8-2.1" index="1" target="remote-rhel8-2"/>
++      <fencing-level devices="Fencing" id="cts-remote-rhel8-2.2" index="2" target="remote-rhel8-2"/>
++      <fencing-level devices="FencingPass,Fencing" id="cts-rhel8-4.1" index="1" target="rhel8-4"/>
++      <fencing-level devices="FencingPass,Fencing" id="cts-remote-rhel8-4.1" index="1" target="remote-rhel8-4"/>
++      <fencing-level devices="FencingPass,Fencing" id="cts-rhel8-5.1" index="1" target="rhel8-5"/>
++      <fencing-level devices="FencingPass,Fencing" id="cts-remote-rhel8-5.1" index="1" target="remote-rhel8-5"/>
++    </fencing-topology>
++    <op_defaults>
++      <meta_attributes id="cts-op_defaults-meta">
++        <nvpair id="cts-op_defaults-timeout" name="timeout" value="90s"/>
++      </meta_attributes>
++    </op_defaults>
++    <alerts>
++      <alert id="alert-1" path="/var/lib/pacemaker/notify.sh">
++        <recipient id="alert-1-recipient-1" value="/run/crm/alert.log"/>
++      </alert>
++    </alerts>
++  </configuration>
++  <status>
++    <node_state id="4" uname="rhel8-4" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
++      <transient_attributes id="4">
++        <instance_attributes id="status-4"/>
++      </transient_attributes>
++      <lrm id="4">
++        <lrm_resources>
++          <lrm_resource id="FencingPass" type="fence_dummy" class="stonith">
++            <lrm_rsc_op id="FencingPass_last_0" operation_key="FencingPass_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="47:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;47:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-4" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="0" queue-time="0" op-digest="bdca24cab6ded2b426c6b31df675bf0b"/>
++          </lrm_resource>
++          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
++            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="46:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;46:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-4" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="9" queue-time="0" op-digest="bf974d77f2d4d33e434be1f89e362a52"/>
++          </lrm_resource>
++          <lrm_resource id="FencingFail" type="fence_dummy" class="stonith">
++            <lrm_rsc_op id="FencingFail_last_0" operation_key="FencingFail_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="48:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;48:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-4" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="0" queue-time="0" op-digest="fe7e3f8acdd3228efda2766a0eea7ba5"/>
++          </lrm_resource>
++          <lrm_resource id="test" type="Dummy" class="ocf" provider="pacemaker">
++            <lrm_rsc_op id="test_last_0" operation_key="test_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.13.0" transition-key="5:46:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:0;5:46:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-4" call-id="130" rc-code="0" op-status="0" interval="0" last-rc-change="1650636936" exec-time="25" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
++            <lrm_rsc_op id="test_last_failure_0" operation_key="test_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.13.0" transition-key="5:46:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:0;5:46:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-4" call-id="130" rc-code="0" op-status="0" interval="0" last-rc-change="1650636936" exec-time="25" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
++          </lrm_resource>
++        </lrm_resources>
++      </lrm>
++    </node_state>
++    <node_state id="5" uname="rhel8-5" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
++      <transient_attributes id="5">
++        <instance_attributes id="status-5"/>
++      </transient_attributes>
++      <lrm id="5">
++        <lrm_resources>
++          <lrm_resource id="FencingPass" type="fence_dummy" class="stonith">
++            <lrm_rsc_op id="FencingPass_last_0" operation_key="FencingPass_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="62:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;62:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-5" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="0" queue-time="0" op-digest="bdca24cab6ded2b426c6b31df675bf0b"/>
++          </lrm_resource>
++          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
++            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="61:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;61:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-5" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="6" queue-time="0" op-digest="bf974d77f2d4d33e434be1f89e362a52"/>
++          </lrm_resource>
++          <lrm_resource id="FencingFail" type="fence_dummy" class="stonith">
++            <lrm_rsc_op id="FencingFail_last_0" operation_key="FencingFail_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="63:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;63:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-5" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="0" queue-time="0" op-digest="fe7e3f8acdd3228efda2766a0eea7ba5"/>
++          </lrm_resource>
++          <lrm_resource id="test" type="Dummy" class="ocf" provider="pacemaker">
++            <lrm_rsc_op id="test_last_0" operation_key="test_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.13.0" transition-key="6:46:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;6:46:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-5" call-id="113" rc-code="7" op-status="0" interval="0" last-rc-change="1650636936" exec-time="25" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
++          </lrm_resource>
++        </lrm_resources>
++      </lrm>
++    </node_state>
++    <node_state id="1" uname="rhel8-1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
++      <transient_attributes id="1">
++        <instance_attributes id="status-1"/>
++      </transient_attributes>
++      <lrm id="1">
++        <lrm_resources>
++          <lrm_resource id="FencingPass" type="fence_dummy" class="stonith">
++            <lrm_rsc_op id="FencingPass_last_0" operation_key="FencingPass_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="2:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;2:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-1" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="0" queue-time="0" op-digest="bdca24cab6ded2b426c6b31df675bf0b"/>
++          </lrm_resource>
++          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
++            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="76:0:0:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:0;76:0:0:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-1" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1650636745" exec-time="36" queue-time="0" op-digest="bf974d77f2d4d33e434be1f89e362a52"/>
++            <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="77:0:0:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:0;77:0:0:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-1" call-id="16" rc-code="0" op-status="0" interval="120000" last-rc-change="1650636745" exec-time="36" queue-time="0" op-digest="24c9c9364f847dcb857d6fb4e1b4d3c8"/>
++          </lrm_resource>
++          <lrm_resource id="FencingFail" type="fence_dummy" class="stonith">
++            <lrm_rsc_op id="FencingFail_last_0" operation_key="FencingFail_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="3:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;3:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-1" call-id="21" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="0" queue-time="0" op-digest="fe7e3f8acdd3228efda2766a0eea7ba5"/>
++          </lrm_resource>
++          <lrm_resource id="test" type="Dummy" class="ocf" provider="pacemaker">
++            <lrm_rsc_op id="test_last_0" operation_key="test_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.13.0" transition-key="2:46:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;2:46:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-1" call-id="121" rc-code="7" op-status="0" interval="0" last-rc-change="1650636936" exec-time="62" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
++          </lrm_resource>
++        </lrm_resources>
++      </lrm>
++    </node_state>
++    <node_state id="2" uname="rhel8-2" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
++      <transient_attributes id="2">
++        <instance_attributes id="status-2"/>
++      </transient_attributes>
++      <lrm id="2">
++        <lrm_resources>
++          <lrm_resource id="FencingPass" type="fence_dummy" class="stonith">
++            <lrm_rsc_op id="FencingPass_last_0" operation_key="FencingPass_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="78:0:0:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:0;78:0:0:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-2" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1650636745" exec-time="21041" queue-time="0" op-digest="bdca24cab6ded2b426c6b31df675bf0b"/>
++          </lrm_resource>
++          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
++            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="16:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;16:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-2" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="2" queue-time="0" op-digest="bf974d77f2d4d33e434be1f89e362a52"/>
++          </lrm_resource>
++          <lrm_resource id="FencingFail" type="fence_dummy" class="stonith">
++            <lrm_rsc_op id="FencingFail_last_0" operation_key="FencingFail_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="18:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;18:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-2" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="0" queue-time="0" op-digest="fe7e3f8acdd3228efda2766a0eea7ba5"/>
++          </lrm_resource>
++          <lrm_resource id="test" type="Dummy" class="ocf" provider="pacemaker">
++            <lrm_rsc_op id="test_last_0" operation_key="test_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.13.0" transition-key="3:46:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;3:46:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-2" call-id="109" rc-code="7" op-status="0" interval="0" last-rc-change="1650636936" exec-time="37" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
++          </lrm_resource>
++        </lrm_resources>
++      </lrm>
++    </node_state>
++    <node_state id="3" uname="rhel8-3" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
++      <transient_attributes id="3">
++        <instance_attributes id="status-3"/>
++      </transient_attributes>
++      <lrm id="3">
++        <lrm_resources>
++          <lrm_resource id="FencingPass" type="fence_dummy" class="stonith">
++            <lrm_rsc_op id="FencingPass_last_0" operation_key="FencingPass_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="32:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;32:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-3" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="0" queue-time="0" op-digest="bdca24cab6ded2b426c6b31df675bf0b"/>
++          </lrm_resource>
++          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
++            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="31:0:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:7;31:0:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-3" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1650636745" exec-time="2" queue-time="0" op-digest="bf974d77f2d4d33e434be1f89e362a52"/>
++          </lrm_resource>
++          <lrm_resource id="FencingFail" type="fence_dummy" class="stonith">
++            <lrm_rsc_op id="FencingFail_last_0" operation_key="FencingFail_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.13.0" transition-key="79:0:0:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:0;79:0:0:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-3" call-id="26" rc-code="0" op-status="0" interval="0" last-rc-change="1650636745" exec-time="1044" queue-time="0" op-digest="fe7e3f8acdd3228efda2766a0eea7ba5"/>
++          </lrm_resource>
++          <lrm_resource id="test" type="Dummy" class="ocf" provider="pacemaker">
++            <lrm_rsc_op id="test_last_0" operation_key="test_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.13.0" transition-key="4:46:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:0;4:46:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-3" call-id="114" rc-code="0" op-status="0" interval="0" last-rc-change="1650636936" exec-time="24" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
++            <lrm_rsc_op id="test_last_failure_0" operation_key="test_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.13.0" transition-key="4:46:7:181f4609-25b9-4673-b760-5de7a7f55635" transition-magic="0:0;4:46:7:181f4609-25b9-4673-b760-5de7a7f55635" exit-reason="" on_node="rhel8-3" call-id="114" rc-code="0" op-status="0" interval="0" last-rc-change="1650636936" exec-time="24" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
++          </lrm_resource>
++        </lrm_resources>
++      </lrm>
++    </node_state>
++  </status>
++</cib>
+-- 
+2.27.0
+
+
+From 60d8bb01ba73dfd1cb25c6764ee2b923dcfc4e8c Mon Sep 17 00:00:00 2001
+From: Ken Gaillot <kgaillot@redhat.com>
+Date: Fri, 22 Apr 2022 14:09:43 -0500
+Subject: [PATCH 3/3] Revert "Refactor: scheduler: add expected node to
+ primitive variant data"
+
+This reverts commit 871e2201d92520039df45062afc9120fd1fb0f30.
+---
+ include/crm/pengine/internal.h |  4 ----
+ lib/pengine/native.c           | 38 ----------------------------------
+ lib/pengine/variant.h          |  8 ++-----
+ 3 files changed, 2 insertions(+), 48 deletions(-)
+
+diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
+index a2e4b5bf7..fe9a23b7e 100644
+--- a/include/crm/pengine/internal.h
++++ b/include/crm/pengine/internal.h
+@@ -580,8 +580,4 @@ xmlNode *pe__failed_probe_for_rsc(pe_resource_t *rsc, const char *name);
+ 
+ const char *pe__clone_child_id(pe_resource_t *rsc);
+ 
+-void pe__update_expected_node(pe_resource_t *rsc, pe_node_t *node,
+-                              int execution_status, int exit_status,
+-                              int expected_exit_status);
+-
+ #endif
+diff --git a/lib/pengine/native.c b/lib/pengine/native.c
+index 591d1c6f5..fa7dc8960 100644
+--- a/lib/pengine/native.c
++++ b/lib/pengine/native.c
+@@ -1376,41 +1376,3 @@ pe__native_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_paren
+ 
+     return TRUE;
+ }
+-
+-/*!
+- * \internal
+- * \brief Set a resource's expected node if appropriate for a history result
+- *
+- * \param[in] rsc               Resource to set expected node for
+- * \param[in] node              Node to set as expected node
+- * \param[in] execution_status  History entry's execution status
+- * \param[in] exit_status       History entry's actual exit status
+- * \param[in] expected_status   History entry's expected exit status
+- */
+-void
+-pe__update_expected_node(pe_resource_t *rsc, pe_node_t *node,
+-                         int execution_status, int exit_status,
+-                         int expected_exit_status)
+-{
+-    native_variant_data_t *native_data = NULL;
+-
+-    get_native_variant_data(native_data, rsc);
+-
+-    if ((rsc->recovery_type == recovery_stop_unexpected)
+-        && (rsc->role > RSC_ROLE_STOPPED)
+-        && (execution_status == PCMK_EXEC_DONE)
+-        && (exit_status == expected_exit_status)) {
+-        // Resource is active and was expected on this node
+-        pe_rsc_trace(rsc, "Found expected node %s for %s",
+-                     node->details->uname, rsc->id);
+-        native_data->expected_node = node;
+-        pe__set_resource_flags(rsc, pe_rsc_stop_unexpected);
+-
+-    } else if ((native_data->expected_node != NULL)
+-               && (native_data->expected_node->details == node->details)) {
+-        // Resource is not cleanly active here
+-        pe_rsc_trace(rsc, "Clearing expected node for %s", rsc->id);
+-        native_data->expected_node = NULL;
+-        pe__clear_resource_flags(rsc, pe_rsc_stop_unexpected);
+-    }
+-}
+diff --git a/lib/pengine/variant.h b/lib/pengine/variant.h
+index d8fefa9d6..cabfbe81f 100644
+--- a/lib/pengine/variant.h
++++ b/lib/pengine/variant.h
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2004-2022 the Pacemaker project contributors
++ * Copyright 2004-2021 the Pacemaker project contributors
+  *
+  * The version control history for this file may have further details.
+  *
+@@ -139,11 +139,7 @@ typedef struct group_variant_data_s {
+ #  elif VARIANT_NATIVE
+ 
+ typedef struct native_variant_data_s {
+-    /* If the resource is multiply active, and has multiple-active set to
+-     * stop_unexpected, this will be set to the node where the resource was
+-     * found active by an operation with a expected result.
+-     */
+-    pe_node_t *expected_node;
++    int dummy;
+ } native_variant_data_t;
+ 
+ #    define get_native_variant_data(data, rsc)				\
+-- 
+2.27.0
+
diff --git a/SPECS/pacemaker.spec b/SPECS/pacemaker.spec
index d0b0903..5033db5 100644
--- a/SPECS/pacemaker.spec
+++ b/SPECS/pacemaker.spec
@@ -242,7 +242,7 @@
 Name:          pacemaker
 Summary:       Scalable High-Availability cluster resource manager
 Version:       %{pcmkversion}
-Release:       %{pcmk_release}%{?dist}
+Release:       %{pcmk_release}%{?dist}.2
 %if %{defined _unitdir}
 License:       GPLv2+ and LGPLv2+
 %else
@@ -286,6 +286,9 @@ Patch20:       020-systemd-unit.patch
 Patch21:       021-failure-messages.patch
 Patch22:       022-memory-leak.patch
 Patch23:       023-regression.patch
+Patch24:       024-stop_unexpected.patch
+Patch25:       025-stop_unexpected-test.patch
+Patch26:       026-stop_unexpected-fix.patch
 
 # downstream-only commits
 #Patch1xx:      1xx-xxxx.patch
@@ -983,6 +986,14 @@ exit 0
 %license %{nagios_name}-%{nagios_hash}/COPYING
 
 %changelog
+* Fri Apr 22 2022 Ken Gaillot <kgaillot@redhat.com> - 2.1.2-4.2
+- Fix issue with "stop_unexpected" value for "multiple-active" meta-attribute
+- Resolves: rhbz2062848
+
+* Fri Apr 8 2022 Ken Gaillot <kgaillot@redhat.com> - 2.1.2-4.1
+- Support "stop_unexpected" value for "multiple-active" meta-attribute
+- Resolves: rhbz2062848
+
 * Wed Jan 26 2022 Ken Gaillot <kgaillot@redhat.com> - 2.1.2-4
 - Fix regression in down event detection that affects remote nodes
 - Resolves: rhbz2046446