diff --git a/SOURCES/openvswitch-2.15.0.patch b/SOURCES/openvswitch-2.15.0.patch index 4627784..11993e1 100644 --- a/SOURCES/openvswitch-2.15.0.patch +++ b/SOURCES/openvswitch-2.15.0.patch @@ -21917,6 +21917,79 @@ index f0cac8e0fa..7f5561f827 100644 return NULL; } +diff --git a/lib/reconnect.c b/lib/reconnect.c +index a929ddfd2d..89a0bcaf95 100644 +--- a/lib/reconnect.c ++++ b/lib/reconnect.c +@@ -75,7 +75,8 @@ struct reconnect { + + static void reconnect_transition__(struct reconnect *, long long int now, + enum state state); +-static long long int reconnect_deadline__(const struct reconnect *); ++static long long int reconnect_deadline__(const struct reconnect *, ++ long long int now); + static bool reconnect_may_retry(struct reconnect *); + + static const char * +@@ -539,7 +540,7 @@ reconnect_transition__(struct reconnect *fsm, long long int now, + } + + static long long int +-reconnect_deadline__(const struct reconnect *fsm) ++reconnect_deadline__(const struct reconnect *fsm, long long int now) + { + ovs_assert(fsm->state_entered != LLONG_MIN); + switch (fsm->state) { +@@ -557,8 +558,18 @@ reconnect_deadline__(const struct reconnect *fsm) + if (fsm->probe_interval) { + long long int base = MAX(fsm->last_activity, fsm->state_entered); + long long int expiration = base + fsm->probe_interval; +- if (fsm->last_receive_attempt >= expiration) { ++ if (now < expiration || fsm->last_receive_attempt >= expiration) { ++ /* We still have time before the expiration or the time has ++ * already passed and there was no activity. In the first case ++ * we need to wait for the expiration, in the second - we're ++ * already past the deadline. */ + return expiration; ++ } else { ++ /* Time has already passed, but we didn't attempt to receive ++ * anything. We need to wake up and try to receive even if ++ * nothing is pending, so we can update the expiration time or ++ * transition to a different state. */ ++ return now + 1; + } + } + return LLONG_MAX; +@@ -566,8 +577,10 @@ reconnect_deadline__(const struct reconnect *fsm) + case S_IDLE: + if (fsm->probe_interval) { + long long int expiration = fsm->state_entered + fsm->probe_interval; +- if (fsm->last_receive_attempt >= expiration) { ++ if (now < expiration || fsm->last_receive_attempt >= expiration) { + return expiration; ++ } else { ++ return now + 1; + } + } + return LLONG_MAX; +@@ -618,7 +631,7 @@ reconnect_deadline__(const struct reconnect *fsm) + enum reconnect_action + reconnect_run(struct reconnect *fsm, long long int now) + { +- if (now >= reconnect_deadline__(fsm)) { ++ if (now >= reconnect_deadline__(fsm, now)) { + switch (fsm->state) { + case S_VOID: + return 0; +@@ -671,7 +684,7 @@ reconnect_wait(struct reconnect *fsm, long long int now) + int + reconnect_timeout(struct reconnect *fsm, long long int now) + { +- long long int deadline = reconnect_deadline__(fsm); ++ long long int deadline = reconnect_deadline__(fsm, now); + if (deadline != LLONG_MAX) { + long long int remaining = deadline - now; + return MAX(0, MIN(INT_MAX, remaining)); diff --git a/lib/stopwatch.c b/lib/stopwatch.c index f5602163bc..1c71df1a12 100644 --- a/lib/stopwatch.c @@ -23327,10 +23400,38 @@ index 9042658fa8..e019631e9a 100644 } diff --git a/ovsdb/raft.c b/ovsdb/raft.c -index ea91d1fdba..8fa872494e 100644 +index ea91d1fdba..a11144edcd 100644 --- a/ovsdb/raft.c +++ b/ovsdb/raft.c -@@ -940,6 +940,34 @@ raft_reset_ping_timer(struct raft *raft) +@@ -74,6 +74,7 @@ enum raft_failure_test { + FT_CRASH_BEFORE_SEND_EXEC_REQ, + FT_CRASH_AFTER_SEND_EXEC_REQ, + FT_CRASH_AFTER_RECV_APPEND_REQ_UPDATE, ++ FT_CRASH_BEFORE_SEND_SNAPSHOT_REP, + FT_DELAY_ELECTION, + FT_DONT_SEND_VOTE_REQUEST + }; +@@ -376,12 +377,19 @@ static bool raft_handle_write_error(struct raft *, struct ovsdb_error *); + static void raft_run_reconfigure(struct raft *); + + static void raft_set_leader(struct raft *, const struct uuid *sid); ++ + static struct raft_server * + raft_find_server(const struct raft *raft, const struct uuid *sid) + { + return raft_server_find(&raft->servers, sid); + } + ++static struct raft_server * ++raft_find_new_server(struct raft *raft, const struct uuid *uuid) ++{ ++ return raft_server_find(&raft->add_servers, uuid); ++} ++ + static char * + raft_make_address_passive(const char *address_) + { +@@ -940,6 +948,34 @@ raft_reset_ping_timer(struct raft *raft) raft->ping_timeout = time_msec() + raft->election_timer / 3; } @@ -23365,7 +23466,7 @@ index ea91d1fdba..8fa872494e 100644 static void raft_add_conn(struct raft *raft, struct jsonrpc_session *js, const struct uuid *sid, bool incoming) -@@ -954,7 +982,7 @@ raft_add_conn(struct raft *raft, struct jsonrpc_session *js, +@@ -954,7 +990,7 @@ raft_add_conn(struct raft *raft, struct jsonrpc_session *js, &conn->sid); conn->incoming = incoming; conn->js_seqno = jsonrpc_session_get_seqno(conn->js); @@ -23374,7 +23475,48 @@ index ea91d1fdba..8fa872494e 100644 jsonrpc_session_set_backlog_threshold(js, raft->conn_backlog_max_n_msgs, raft->conn_backlog_max_n_bytes); } -@@ -2804,6 +2832,7 @@ raft_update_commit_index(struct raft *raft, uint64_t new_commit_index) +@@ -1774,6 +1810,8 @@ raft_open_conn(struct raft *raft, const char *address, const struct uuid *sid) + static void + raft_conn_close(struct raft_conn *conn) + { ++ VLOG_DBG("closing connection to server %s (%s)", ++ conn->nickname, jsonrpc_session_get_name(conn->js)); + jsonrpc_session_close(conn->js); + ovs_list_remove(&conn->list_node); + free(conn->nickname); +@@ -1864,16 +1902,30 @@ raft_run(struct raft *raft) + } + + /* Close unneeded sessions. */ ++ struct raft_server *server; + struct raft_conn *next; + LIST_FOR_EACH_SAFE (conn, next, list_node, &raft->conns) { + if (!raft_conn_should_stay_open(raft, conn)) { ++ server = raft_find_new_server(raft, &conn->sid); ++ if (server) { ++ /* We only have one incoming connection from joining servers, ++ * so if it's closed, we need to destroy the record about the ++ * server. This way the process can be started over on the ++ * next join request. */ ++ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5); ++ VLOG_INFO_RL(&rl, "cluster "CID_FMT": server %s (%s) " ++ "disconnected while joining", ++ CID_ARGS(&raft->cid), ++ server->nickname, server->address); ++ hmap_remove(&raft->add_servers, &server->hmap_node); ++ raft_server_destroy(server); ++ } + raft->n_disconnections++; + raft_conn_close(conn); + } + } + + /* Open needed sessions. */ +- struct raft_server *server; + HMAP_FOR_EACH (server, hmap_node, &raft->servers) { + raft_open_conn(raft, server->address, &server->sid); + } +@@ -2804,6 +2856,7 @@ raft_update_commit_index(struct raft *raft, uint64_t new_commit_index) raft->election_timer, e->election_timer); raft->election_timer = e->election_timer; raft->election_timer_new = 0; @@ -23382,7 +23524,7 @@ index ea91d1fdba..8fa872494e 100644 } if (e->servers) { /* raft_run_reconfigure() can write a new Raft entry, which can -@@ -2820,6 +2849,7 @@ raft_update_commit_index(struct raft *raft, uint64_t new_commit_index) +@@ -2820,6 +2873,7 @@ raft_update_commit_index(struct raft *raft, uint64_t new_commit_index) VLOG_INFO("Election timer changed from %"PRIu64" to %"PRIu64, raft->election_timer, e->election_timer); raft->election_timer = e->election_timer; @@ -23390,7 +23532,31 @@ index ea91d1fdba..8fa872494e 100644 } } /* Check if any pending command can be completed, and complete it. -@@ -4122,9 +4152,24 @@ raft_may_snapshot(const struct raft *raft) +@@ -3258,12 +3312,6 @@ raft_find_peer(struct raft *raft, const struct uuid *uuid) + return s && !uuid_equals(&raft->sid, &s->sid) ? s : NULL; + } + +-static struct raft_server * +-raft_find_new_server(struct raft *raft, const struct uuid *uuid) +-{ +- return raft_server_find(&raft->add_servers, uuid); +-} +- + /* Figure 3.1: "If there exists an N such that N > commitIndex, a + * majority of matchIndex[i] >= N, and log[N].term == currentTerm, set + * commitIndex = N (sections 3.5 and 3.6)." */ +@@ -4038,6 +4086,10 @@ static void + raft_handle_install_snapshot_request( + struct raft *raft, const struct raft_install_snapshot_request *rq) + { ++ if (failure_test == FT_CRASH_BEFORE_SEND_SNAPSHOT_REP) { ++ ovs_fatal(0, "Raft test: crash before sending install_snapshot_reply"); ++ } ++ + if (raft_handle_install_snapshot_request__(raft, rq)) { + union raft_rpc rpy = { + .install_snapshot_reply = { +@@ -4122,9 +4174,24 @@ raft_may_snapshot(const struct raft *raft) && !raft->leaving && !raft->left && !raft->failed @@ -23415,7 +23581,7 @@ index ea91d1fdba..8fa872494e 100644 /* Replaces the log for 'raft', up to the last log entry read, by * 'new_snapshot_data'. Returns NULL if successful, otherwise an error that * the caller must eventually free. -@@ -4468,6 +4513,8 @@ raft_unixctl_status(struct unixctl_conn *conn, +@@ -4468,6 +4535,8 @@ raft_unixctl_status(struct unixctl_conn *conn, : raft->leaving ? "leaving cluster" : raft->left ? "left cluster" : raft->failed ? "failed" @@ -23424,6 +23590,15 @@ index ea91d1fdba..8fa872494e 100644 : "cluster member"); if (raft->joining) { ds_put_format(&s, "Remotes for joining:"); +@@ -4814,6 +4883,8 @@ raft_unixctl_failure_test(struct unixctl_conn *conn OVS_UNUSED, + failure_test = FT_CRASH_AFTER_SEND_EXEC_REQ; + } else if (!strcmp(test, "crash-after-receiving-append-request-update")) { + failure_test = FT_CRASH_AFTER_RECV_APPEND_REQ_UPDATE; ++ } else if (!strcmp(test, "crash-before-sending-install-snapshot-reply")) { ++ failure_test = FT_CRASH_BEFORE_SEND_SNAPSHOT_REP; + } else if (!strcmp(test, "delay-election")) { + failure_test = FT_DELAY_ELECTION; + struct raft *raft; diff --git a/ovsdb/raft.h b/ovsdb/raft.h index 99d5307e54..59902fe825 100644 --- a/ovsdb/raft.h @@ -23759,6 +23934,118 @@ index bf32f8c87c..d5127268aa 100644 + def get_num_of_remotes(self): return len(self.remotes) +diff --git a/python/ovs/reconnect.py b/python/ovs/reconnect.py +index c4c6c87e9f..6b0d023ae3 100644 +--- a/python/ovs/reconnect.py ++++ b/python/ovs/reconnect.py +@@ -44,7 +44,7 @@ class Reconnect(object): + is_connected = False + + @staticmethod +- def deadline(fsm): ++ def deadline(fsm, now): + return None + + @staticmethod +@@ -56,7 +56,7 @@ class Reconnect(object): + is_connected = False + + @staticmethod +- def deadline(fsm): ++ def deadline(fsm, now): + return None + + @staticmethod +@@ -68,7 +68,7 @@ class Reconnect(object): + is_connected = False + + @staticmethod +- def deadline(fsm): ++ def deadline(fsm, now): + return fsm.state_entered + fsm.backoff + + @staticmethod +@@ -80,7 +80,7 @@ class Reconnect(object): + is_connected = False + + @staticmethod +- def deadline(fsm): ++ def deadline(fsm, now): + return fsm.state_entered + max(1000, fsm.backoff) + + @staticmethod +@@ -92,13 +92,24 @@ class Reconnect(object): + is_connected = True + + @staticmethod +- def deadline(fsm): ++ def deadline(fsm, now): + if fsm.probe_interval: + base = max(fsm.last_activity, fsm.state_entered) + expiration = base + fsm.probe_interval +- if (fsm.last_receive_attempt is None or ++ if (now < expiration or ++ fsm.last_receive_attempt is None or + fsm.last_receive_attempt >= expiration): ++ # We still have time before the expiration or the time has ++ # already passed and there was no activity. In the first ++ # case we need to wait for the expiration, in the second - ++ # we're already past the deadline. */ + return expiration ++ else: ++ # Time has already passed, but we didn't attempt to receive ++ # anything. We need to wake up and try to receive even if ++ # nothing is pending, so we can update the expiration time ++ # or transition to a different state. ++ return now + 1 + return None + + @staticmethod +@@ -114,12 +125,15 @@ class Reconnect(object): + is_connected = True + + @staticmethod +- def deadline(fsm): ++ def deadline(fsm, now): + if fsm.probe_interval: + expiration = fsm.state_entered + fsm.probe_interval +- if (fsm.last_receive_attempt is None or ++ if (now < expiration or ++ fsm.last_receive_attempt is None or + fsm.last_receive_attempt >= expiration): + return expiration ++ else: ++ return now + 1 + return None + + @staticmethod +@@ -134,7 +148,7 @@ class Reconnect(object): + is_connected = False + + @staticmethod +- def deadline(fsm): ++ def deadline(fsm, now): + return fsm.state_entered + + @staticmethod +@@ -545,7 +559,7 @@ class Reconnect(object): + returned if the "probe interval" is nonzero--see + self.set_probe_interval()).""" + +- deadline = self.state.deadline(self) ++ deadline = self.state.deadline(self, now) + if deadline is not None and now >= deadline: + return self.state.run(self, now) + else: +@@ -562,7 +576,7 @@ class Reconnect(object): + """Returns the number of milliseconds after which self.run() should be + called if nothing else notable happens in the meantime, or None if this + is currently unnecessary.""" +- deadline = self.state.deadline(self) ++ deadline = self.state.deadline(self, now) + if deadline is not None: + remaining = deadline - now + return max(0, remaining) diff --git a/python/ovstest/rpcserver.py b/python/ovstest/rpcserver.py index c4aab70207..05b6b1be20 100644 --- a/python/ovstest/rpcserver.py @@ -24652,7 +24939,7 @@ index 8d777a0275..5e3b26aea8 100644 AT_KEYWORDS([ovsdb client positive]) diff --git a/tests/ovsdb-cluster.at b/tests/ovsdb-cluster.at -index 92aa427093..cf43e9cf86 100644 +index 92aa427093..9114ea1d13 100644 --- a/tests/ovsdb-cluster.at +++ b/tests/ovsdb-cluster.at @@ -128,7 +128,7 @@ ovsdb_test_cluster_disconnect () { @@ -24664,6 +24951,68 @@ index 92aa427093..cf43e9cf86 100644 # Start collecting raft_is_connected logs for $target before shutting down # any servers. +@@ -390,6 +390,61 @@ done + + AT_CLEANUP + ++AT_BANNER([OVSDB - cluster failure while joining]) ++AT_SETUP([OVSDB cluster - follower crash while joining]) ++AT_KEYWORDS([ovsdb server negative unix cluster join]) ++ ++n=3 ++schema_name=`ovsdb-tool schema-name $abs_srcdir/idltest.ovsschema` ++ordinal_schema > schema ++AT_CHECK([ovsdb-tool '-vPATTERN:console:%c|%p|%m' create-cluster s1.db dnl ++ $abs_srcdir/idltest.ovsschema unix:s1.raft], [0], [], [stderr]) ++cid=`ovsdb-tool db-cid s1.db` ++schema_name=`ovsdb-tool schema-name $abs_srcdir/idltest.ovsschema` ++for i in `seq 2 $n`; do ++ AT_CHECK([ovsdb-tool join-cluster s$i.db $schema_name unix:s$i.raft unix:s1.raft]) ++done ++ ++on_exit 'kill `cat *.pid`' ++ ++dnl Starting followers first, so we can configure them to crash on join. ++for j in `seq $n`; do ++ i=$(($n + 1 - $j)) ++ AT_CHECK([ovsdb-server -v -vconsole:off -vsyslog:off dnl ++ --detach --no-chdir --log-file=s$i.log dnl ++ --pidfile=s$i.pid --unixctl=s$i dnl ++ --remote=punix:s$i.ovsdb s$i.db]) ++ if test $i != 1; then ++ OVS_WAIT_UNTIL([ovs-appctl -t "`pwd`"/s$i dnl ++ cluster/failure-test crash-before-sending-install-snapshot-reply dnl ++ | grep -q "engaged"]) ++ fi ++done ++ ++dnl Make sure that followers really crashed. ++for i in `seq 2 $n`; do ++ OVS_WAIT_WHILE([test -s s$i.pid]) ++done ++ ++dnl Bring them back. ++for i in `seq 2 $n`; do ++ AT_CHECK([ovsdb-server -v -vconsole:off -vsyslog:off dnl ++ --detach --no-chdir --log-file=s$i.log dnl ++ --pidfile=s$i.pid --unixctl=s$i dnl ++ --remote=punix:s$i.ovsdb s$i.db]) ++done ++ ++dnl Make sure that all servers joined the cluster. ++for i in `seq $n`; do ++ AT_CHECK([ovsdb_client_wait unix:s$i.ovsdb $schema_name connected]) ++done ++ ++for i in `seq $n`; do ++ OVS_APP_EXIT_AND_WAIT_BY_TARGET([`pwd`/s$i], [s$i.pid]) ++done ++ ++AT_CLEANUP ++ + + + OVS_START_SHELL_HELPERS diff --git a/tests/ovsdb-idl.at b/tests/ovsdb-idl.at index 4b4791a7da..dd7b0df755 100644 --- a/tests/ovsdb-idl.at @@ -25893,6 +26242,194 @@ index 4b4791a7da..dd7b0df755 100644 +[], +[], +reconnect.*waiting .* seconds before reconnect) +diff --git a/tests/reconnect.at b/tests/reconnect.at +index 0f74709f5a..5bca84351c 100644 +--- a/tests/reconnect.at ++++ b/tests/reconnect.at +@@ -39,8 +39,19 @@ run + connected + + # Try timeout without noting that we tried to receive. +-# (This does nothing since we never timeout in this case.) ++# Timeout should be scheduled to the next probe interval. + timeout ++run ++ ++# Once we reached the timeout, it should not expire until the receive actually ++# attempted. However, we still need to wake up as soon as possible in order to ++# have a chance to mark the receive attempt even if nothing was received. ++timeout ++run ++ ++# Short time advance past the original probe interval, but not expired still. ++timeout ++run + + # Now disable the receive-attempted feature and timeout again. + receive-attempted LLONG_MAX +@@ -67,18 +78,37 @@ connected + last connected 0 ms ago, connected 0 ms total + + # Try timeout without noting that we tried to receive. +-# (This does nothing since we never timeout in this case.) +-timeout +- no timeout +- +-# Now disable the receive-attempted feature and timeout again. +-receive-attempted LLONG_MAX ++# Timeout should be scheduled to the next probe interval. + timeout + advance 5000 ms + + ### t=6000 ### + in ACTIVE for 5000 ms (0 ms backoff) + run ++ ++# Once we reached the timeout, it should not expire until the receive actually ++# attempted. However, we still need to wake up as soon as possible in order to ++# have a chance to mark the receive attempt even if nothing was received. ++timeout ++ advance 1 ms ++ ++### t=6001 ### ++ in ACTIVE for 5001 ms (0 ms backoff) ++run ++ ++# Short time advance past the original probe interval, but not expired still. ++timeout ++ advance 1 ms ++ ++### t=6002 ### ++ in ACTIVE for 5002 ms (0 ms backoff) ++run ++ ++# Now disable the receive-attempted feature and timeout again. ++receive-attempted LLONG_MAX ++timeout ++ advance 0 ms ++run + should send probe + in IDLE for 0 ms (0 ms backoff) + +@@ -86,7 +116,7 @@ run + timeout + advance 5000 ms + +-### t=11000 ### ++### t=11002 ### + in IDLE for 5000 ms (0 ms backoff) + run + should disconnect +@@ -94,7 +124,7 @@ disconnected + in BACKOFF for 0 ms (1000 ms backoff) + 1 successful connections out of 1 attempts, seqno 2 + disconnected +- disconnected at 11000 ms (0 ms ago) ++ disconnected at 11002 ms (0 ms ago) + ]) + + ###################################################################### +@@ -111,8 +141,19 @@ run + connected + + # Try timeout without noting that we tried to receive. +-# (This does nothing since we never timeout in this case.) ++# Timeout should be scheduled to the next probe interval. ++timeout ++run ++ ++# Once we reached the timeout, it should not expire until the receive actually ++# attempted. However, we still need to wake up as soon as possible in order to ++# have a chance to mark the receive attempt even if nothing was received. ++timeout ++run ++ ++# Short time advance past the original probe interval, but not expired still. + timeout ++run + + # Now disable the receive-attempted feature and timeout again. + receive-attempted LLONG_MAX +@@ -148,18 +189,37 @@ connected + last connected 0 ms ago, connected 0 ms total + + # Try timeout without noting that we tried to receive. +-# (This does nothing since we never timeout in this case.) +-timeout +- no timeout +- +-# Now disable the receive-attempted feature and timeout again. +-receive-attempted LLONG_MAX ++# Timeout should be scheduled to the next probe interval. + timeout + advance 5000 ms + + ### t=6500 ### + in ACTIVE for 5000 ms (0 ms backoff) + run ++ ++# Once we reached the timeout, it should not expire until the receive actually ++# attempted. However, we still need to wake up as soon as possible in order to ++# have a chance to mark the receive attempt even if nothing was received. ++timeout ++ advance 1 ms ++ ++### t=6501 ### ++ in ACTIVE for 5001 ms (0 ms backoff) ++run ++ ++# Short time advance past the original probe interval, but not expired still. ++timeout ++ advance 1 ms ++ ++### t=6502 ### ++ in ACTIVE for 5002 ms (0 ms backoff) ++run ++ ++# Now disable the receive-attempted feature and timeout again. ++receive-attempted LLONG_MAX ++timeout ++ advance 0 ms ++run + should send probe + in IDLE for 0 ms (0 ms backoff) + +@@ -167,7 +227,7 @@ run + timeout + advance 5000 ms + +-### t=11500 ### ++### t=11502 ### + in IDLE for 5000 ms (0 ms backoff) + run + should disconnect +@@ -175,7 +235,7 @@ disconnected + in BACKOFF for 0 ms (1000 ms backoff) + 1 successful connections out of 1 attempts, seqno 2 + disconnected +- disconnected at 11500 ms (0 ms ago) ++ disconnected at 11502 ms (0 ms ago) + ]) + + ###################################################################### +@@ -1271,14 +1331,14 @@ activity + created 1000, last activity 3000, last connected 2000 + + # Connection times out. +-timeout +- no timeout +-receive-attempted LLONG_MAX + timeout + advance 5000 ms + + ### t=8000 ### + in ACTIVE for 6000 ms (1000 ms backoff) ++receive-attempted LLONG_MAX ++timeout ++ advance 0 ms + run + should send probe + in IDLE for 0 ms (1000 ms backoff) diff --git a/tests/system-kmod-macros.at b/tests/system-kmod-macros.at index 15628a7c6f..86d633ac4f 100644 --- a/tests/system-kmod-macros.at diff --git a/SPECS/openvswitch2.15.spec b/SPECS/openvswitch2.15.spec index ea80730..f357d94 100644 --- a/SPECS/openvswitch2.15.spec +++ b/SPECS/openvswitch2.15.spec @@ -57,7 +57,7 @@ Summary: Open vSwitch Group: System Environment/Daemons daemon/database/utilities URL: http://www.openvswitch.org/ Version: 2.15.0 -Release: 76%{?dist} +Release: 77%{?dist} # Nearly all of openvswitch is ASL 2.0. The bugtool is LGPLv2+, and the # lib/sflow*.[ch] files are SISSL @@ -702,6 +702,13 @@ exit 0 %endif %changelog +* Sat Feb 26 2022 Open vSwitch CI - 2.15.0-77 +- Merging upstream branch-2.15 [RH git: 50c394233c] + Commit list: + b6007add57 ovsdb: raft: Fix inability to join the cluster after interrupted attempt. (#2033514) + dce48bb003 reconnect: Fix broken inactivity probe if there is no other reason to wake up. + + * Thu Feb 24 2022 Open vSwitch CI - 2.15.0-76 - Merging upstream branch-2.15 [RH git: 9265101135] Commit list: