6db787
From b37391fef92548f31822f9df2a9b5fa2a61b4514 Mon Sep 17 00:00:00 2001
6db787
From: Ken Gaillot <kgaillot@redhat.com>
6db787
Date: Wed, 23 Jun 2021 15:17:54 -0500
6db787
Subject: [PATCH] Fix: CTS: handle longer Corosync token timeouts
6db787
6db787
Previously, startall() would call cluster_stable() immediately after detecting
6db787
the "controller successfully started" message. If the Corosync token timeout is
6db787
small enough, this will be fine. However with a token timeout of more than
6db787
about 1 second, the controllers will not have formed a membership by this
6db787
point, causing cluster_stable() to think there are multiple partitions, and
6db787
wait for a DC to be elected in each one, when really they will unite into a
6db787
single partition in a short time, and only elect a single DC.
6db787
6db787
Now, startall() waits until seeing that each node is a cluster member before
6db787
calling cluster_stable().
6db787
---
6db787
 cts/lab/CTS.py.in   | 3 ++-
6db787
 cts/lab/patterns.py | 2 ++
6db787
 2 files changed, 4 insertions(+), 1 deletion(-)
6db787
6db787
diff --git a/cts/lab/CTS.py.in b/cts/lab/CTS.py.in
6db787
index abcb9d285..d9924437b 100644
6db787
--- a/cts/lab/CTS.py.in
6db787
+++ b/cts/lab/CTS.py.in
6db787
@@ -628,9 +628,10 @@ class ClusterManager(UserDict):
6db787
         watchpats = [ ]
6db787
         watchpats.append(self.templates["Pat:DC_IDLE"])
6db787
         for node in nodelist:
6db787
-            watchpats.append(self.templates["Pat:Local_started"] % node)
6db787
             watchpats.append(self.templates["Pat:InfraUp"] % node)
6db787
             watchpats.append(self.templates["Pat:PacemakerUp"] % node)
6db787
+            watchpats.append(self.templates["Pat:Local_started"] % node)
6db787
+            watchpats.append(self.templates["Pat:They_up"] % (nodelist[0], node))
6db787
 
6db787
         #   Start all the nodes - at about the same time...
6db787
         watch = LogWatcher(self.Env["LogFileName"], watchpats, "fast-start", self.Env["DeadTime"]+10, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])
6db787
diff --git a/cts/lab/patterns.py b/cts/lab/patterns.py
6db787
index e21a016ff..400fd3dc8 100644
6db787
--- a/cts/lab/patterns.py
6db787
+++ b/cts/lab/patterns.py
6db787
@@ -61,6 +61,7 @@ class BasePatterns(object):
6db787
             "Pat:We_stopped"    : "%s\W.*OVERRIDE THIS PATTERN",
6db787
             "Pat:They_stopped"  : "%s\W.*LOST:.* %s ",
6db787
             "Pat:They_dead"     : "node %s.*: is dead",
6db787
+            "Pat:They_up"       : "%s %s\W.*OVERRIDE THIS PATTERN",
6db787
             "Pat:TransitionComplete" : "Transition status: Complete: complete",
6db787
 
6db787
             "Pat:Fencing_start"   : r"Requesting peer fencing .* targeting %s",
6db787
@@ -130,6 +131,7 @@ class crm_corosync(BasePatterns):
6db787
             "Pat:We_stopped"   : "%s\W.*Unloading all Corosync service engines",
6db787
             "Pat:They_stopped" : "%s\W.*pacemaker-controld.*Node %s(\[|\s).*state is now lost",
6db787
             "Pat:They_dead"    : "pacemaker-controld.*Node %s(\[|\s).*state is now lost",
6db787
+            "Pat:They_up"      : "\W%s\W.*pacemaker-controld.*Node %s state is now member",
6db787
 
6db787
             "Pat:ChildExit"    : r"\[[0-9]+\] exited with status [0-9]+ \(",
6db787
             # "with signal 9" == pcmk_child_exit(), "$" == check_active_before_startup_processes()
6db787
-- 
6db787
2.27.0
6db787