diff --git a/SOURCES/openvswitch-3.1.0.patch b/SOURCES/openvswitch-3.1.0.patch
index 5dc986e..83e6067 100644
--- a/SOURCES/openvswitch-3.1.0.patch
+++ b/SOURCES/openvswitch-3.1.0.patch
@@ -1236,6 +1236,428 @@ index 4e15167ab..fa7973ac7 100644
  void ofproto_set_forward_bpdu(struct ofproto *, bool forward_bpdu);
  void ofproto_set_mac_table_config(struct ofproto *, unsigned idle_time,
                                    size_t max_entries);
+diff --git a/ovsdb/log.c b/ovsdb/log.c
+index e42f00246..fff7c6ba1 100644
+--- a/ovsdb/log.c
++++ b/ovsdb/log.c
+@@ -552,6 +552,23 @@ ovsdb_log_truncate(struct ovsdb_log *file)
+     return error;
+ }
+ 
++/* Removes all the data from the log by moving current offset to zero and
++ * truncating the file to zero bytes.  After this operation the file is empty
++ * and in a write state. */
++struct ovsdb_error * OVS_WARN_UNUSED_RESULT
++ovsdb_log_reset(struct ovsdb_log *file)
++{
++    ovsdb_error_destroy(file->error);
++    file->offset = file->prev_offset = 0;
++    file->error = ovsdb_log_truncate(file);
++    if (file->error) {
++        file->state = OVSDB_LOG_WRITE_ERROR;
++        return ovsdb_error_clone(file->error);
++    }
++    file->state = OVSDB_LOG_WRITE;
++    return NULL;
++}
++
+ /* Composes a log record for 'json' by filling 'header' with a header line and
+  * 'data' with a data line (each ending with a new-line).  To write the record
+  * to a file, write 'header' followed by 'data'.
+diff --git a/ovsdb/log.h b/ovsdb/log.h
+index 90714ea13..63e5681a0 100644
+--- a/ovsdb/log.h
++++ b/ovsdb/log.h
+@@ -66,6 +66,9 @@ struct ovsdb_error *ovsdb_log_read(struct ovsdb_log *, struct json **)
+     OVS_WARN_UNUSED_RESULT;
+ void ovsdb_log_unread(struct ovsdb_log *);
+ 
++struct ovsdb_error *ovsdb_log_reset(struct ovsdb_log *)
++    OVS_WARN_UNUSED_RESULT;
++
+ void ovsdb_log_compose_record(const struct json *, const char *magic,
+                               struct ds *header, struct ds *data);
+ 
+diff --git a/ovsdb/ovsdb-server.c b/ovsdb/ovsdb-server.c
+index 33ca4910d..cb4671d51 100644
+--- a/ovsdb/ovsdb-server.c
++++ b/ovsdb/ovsdb-server.c
+@@ -573,8 +573,9 @@ close_db(struct server_config *config, struct db *db, char *comment)
+     }
+ }
+ 
+-static void
+-update_schema(struct ovsdb *db, const struct ovsdb_schema *schema, void *aux)
++static struct ovsdb_error * OVS_WARN_UNUSED_RESULT
++update_schema(struct ovsdb *db, const struct ovsdb_schema *schema,
++              bool conversion_with_no_data, void *aux)
+ {
+     struct server_config *config = aux;
+ 
+@@ -586,13 +587,27 @@ update_schema(struct ovsdb *db, const struct ovsdb_schema *schema, void *aux)
+             : xasprintf("database %s connected to storage", db->name)));
+     }
+ 
+-    ovsdb_replace(db, ovsdb_create(ovsdb_schema_clone(schema), NULL));
++    if (db->schema && conversion_with_no_data) {
++        struct ovsdb *new_db = NULL;
++        struct ovsdb_error *error;
++
++        error = ovsdb_convert(db, schema, &new_db);
++        if (error) {
++            /* Should never happen, because conversion should have been
++             * checked before writing the schema to the storage. */
++            return error;
++        }
++        ovsdb_replace(db, new_db);
++    } else {
++        ovsdb_replace(db, ovsdb_create(ovsdb_schema_clone(schema), NULL));
++    }
+ 
+     /* Force update to schema in _Server database. */
+     struct db *dbp = shash_find_data(config->all_dbs, db->name);
+     if (dbp) {
+         dbp->row_uuid = UUID_ZERO;
+     }
++    return NULL;
+ }
+ 
+ static struct ovsdb_error * OVS_WARN_UNUSED_RESULT
+@@ -600,23 +615,30 @@ parse_txn(struct server_config *config, struct db *db,
+           const struct ovsdb_schema *schema, const struct json *txn_json,
+           const struct uuid *txnid)
+ {
++    struct ovsdb_error *error = NULL;
++    struct ovsdb_txn *txn = NULL;
++
+     if (schema) {
+-        /* We're replacing the schema (and the data).  Destroy the database
+-         * (first grabbing its storage), then replace it with the new schema.
+-         * The transaction must also include the replacement data.
++        /* We're replacing the schema (and the data).  If transaction includes
++         * replacement data, destroy the database (first grabbing its storage),
++         * then replace it with the new schema.  If not, it's a conversion
++         * without data specified.  In this case, convert the current database
++         * to a new schema instead.
+          *
+          * Only clustered database schema changes and snapshot installs
+          * go through this path.
+          */
+-        ovs_assert(txn_json);
+         ovs_assert(ovsdb_storage_is_clustered(db->db->storage));
+ 
+-        struct ovsdb_error *error = ovsdb_schema_check_for_ephemeral_columns(
+-            schema);
++        error = ovsdb_schema_check_for_ephemeral_columns(schema);
++        if (error) {
++            return error;
++        }
++
++        error = update_schema(db->db, schema, txn_json == NULL, config);
+         if (error) {
+             return error;
+         }
+-        update_schema(db->db, schema, config);
+     }
+ 
+     if (txn_json) {
+@@ -624,24 +646,25 @@ parse_txn(struct server_config *config, struct db *db,
+             return ovsdb_error(NULL, "%s: data without schema", db->filename);
+         }
+ 
+-        struct ovsdb_txn *txn;
+-        struct ovsdb_error *error;
+-
+         error = ovsdb_file_txn_from_json(db->db, txn_json, false, &txn);
+-        if (!error) {
+-            ovsdb_txn_set_txnid(txnid, txn);
+-            log_and_free_error(ovsdb_txn_replay_commit(txn));
+-        }
+-        if (!error && !uuid_is_zero(txnid)) {
+-            db->db->prereq = *txnid;
+-        }
+         if (error) {
+             ovsdb_storage_unread(db->db->storage);
+             return error;
+         }
++    } else if (schema) {
++        /* We just performed conversion without data.  Transaction history
++         * was destroyed.  Commit a dummy transaction to set the txnid. */
++        txn = ovsdb_txn_create(db->db);
+     }
+ 
+-    return NULL;
++    if (txn) {
++        ovsdb_txn_set_txnid(txnid, txn);
++        error = ovsdb_txn_replay_commit(txn);
++        if (!error && !uuid_is_zero(txnid)) {
++            db->db->prereq = *txnid;
++        }
++    }
++    return error;
+ }
+ 
+ static void
+diff --git a/ovsdb/ovsdb-tool.c b/ovsdb/ovsdb-tool.c
+index 60f353197..e26536532 100644
+--- a/ovsdb/ovsdb-tool.c
++++ b/ovsdb/ovsdb-tool.c
+@@ -1006,7 +1006,8 @@ raft_header_to_standalone_log(const struct raft_header *h,
+ }
+ 
+ static void
+-raft_record_to_standalone_log(const struct raft_record *r,
++raft_record_to_standalone_log(const char *db_file_name,
++                              const struct raft_record *r,
+                               struct ovsdb_log *db_log_data)
+ {
+     if (r->type == RAFT_REC_ENTRY) {
+@@ -1018,7 +1019,40 @@ raft_record_to_standalone_log(const struct raft_record *r,
+         if (pa->n != 2) {
+             ovs_fatal(0, "Incorrect raft record array length");
+         }
++
++        struct json *schema_json = pa->elems[0];
+         struct json *data_json = pa->elems[1];
++
++        if (schema_json->type != JSON_NULL) {
++            /* This is a database conversion record.  Reset the log and
++             * write the new schema. */
++            struct ovsdb_schema *schema;
++
++            check_ovsdb_error(ovsdb_schema_from_json(schema_json, &schema));
++
++            if (data_json->type == JSON_NULL) {
++                /* We have a conversion request with no data.  There is no
++                 * other way as to read back what we have and convert. */
++                struct ovsdb *old_db, *new_db;
++
++                check_ovsdb_error(ovsdb_log_commit_block(db_log_data));
++
++                old_db = ovsdb_file_read(db_file_name, false);
++                check_ovsdb_error(ovsdb_convert(old_db, schema, &new_db));
++                ovsdb_destroy(old_db);
++
++                pa->elems[1] = ovsdb_to_txn_json(
++                                    new_db, "converted by ovsdb-tool", true);
++                ovsdb_destroy(new_db);
++
++                json_destroy(data_json);
++                data_json = pa->elems[1];
++            }
++
++            ovsdb_schema_destroy(schema);
++            check_ovsdb_error(ovsdb_log_reset(db_log_data));
++            check_ovsdb_error(ovsdb_log_write(db_log_data, schema_json));
++        }
+         if (data_json->type != JSON_NULL) {
+             check_ovsdb_error(ovsdb_log_write(db_log_data, data_json));
+         }
+@@ -1636,7 +1670,8 @@ do_compare_versions(struct ovs_cmdl_context *ctx)
+ }
+ 
+ static void
+-do_convert_to_standalone(struct ovsdb_log *log, struct ovsdb_log *db_log_data)
++do_convert_to_standalone(const char *db_file_name,
++                         struct ovsdb_log *log, struct ovsdb_log *db_log_data)
+ {
+     for (unsigned int i = 0; ; i++) {
+         struct json *json;
+@@ -1653,7 +1688,7 @@ do_convert_to_standalone(struct ovsdb_log *log, struct ovsdb_log *db_log_data)
+         } else {
+             struct raft_record r;
+             check_ovsdb_error(raft_record_from_json(&r, json));
+-            raft_record_to_standalone_log(&r, db_log_data);
++            raft_record_to_standalone_log(db_file_name, &r, db_log_data);
+             raft_record_uninit(&r);
+         }
+         json_destroy(json);
+@@ -1676,7 +1711,7 @@ do_cluster_standalone(struct ovs_cmdl_context *ctx)
+     if (strcmp(ovsdb_log_get_magic(log), RAFT_MAGIC) != 0) {
+         ovs_fatal(0, "Database is not clustered db.\n");
+     }
+-    do_convert_to_standalone(log, db_log_data);
++    do_convert_to_standalone(db_file_name, log, db_log_data);
+     check_ovsdb_error(ovsdb_log_commit_block(db_log_data));
+     ovsdb_log_close(db_log_data);
+     ovsdb_log_close(log);
+diff --git a/ovsdb/relay.c b/ovsdb/relay.c
+index 9ff6ed8f3..94ffe01e5 100644
+--- a/ovsdb/relay.c
++++ b/ovsdb/relay.c
+@@ -301,6 +301,8 @@ static void
+ ovsdb_relay_parse_update(struct relay_ctx *ctx,
+                          const struct ovsdb_cs_update_event *update)
+ {
++    struct ovsdb_error *error = NULL;
++
+     if (!ctx->db) {
+         return;
+     }
+@@ -308,15 +310,27 @@ ovsdb_relay_parse_update(struct relay_ctx *ctx,
+     if (update->monitor_reply && ctx->new_schema) {
+         /* There was a schema change.  Updating a database with a new schema
+          * before processing monitor reply with the new data. */
+-        ctx->schema_change_cb(ctx->db, ctx->new_schema,
+-                              ctx->schema_change_aux);
++        error = ctx->schema_change_cb(ctx->db, ctx->new_schema, false,
++                                      ctx->schema_change_aux);
++        if (error) {
++            /* Should never happen, but handle this case anyway. */
++            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
++            char *s = ovsdb_error_to_string_free(error);
++
++            VLOG_ERR_RL(&rl, "%s", s);
++            free(s);
++
++            ovsdb_cs_flag_inconsistency(ctx->cs);
++            return;
++        }
+         ovsdb_schema_destroy(ctx->new_schema);
+         ctx->new_schema = NULL;
+     }
+ 
+     struct ovsdb_cs_db_update *du;
+-    struct ovsdb_error *error = ovsdb_cs_parse_db_update(update->table_updates,
+-                                                         update->version, &du);
++
++    error = ovsdb_cs_parse_db_update(update->table_updates,
++                                     update->version, &du);
+     if (!error) {
+         if (update->clear) {
+             error = ovsdb_relay_clear(ctx->db);
+diff --git a/ovsdb/relay.h b/ovsdb/relay.h
+index 390ea70c8..2d66b5e5f 100644
+--- a/ovsdb/relay.h
++++ b/ovsdb/relay.h
+@@ -23,8 +23,11 @@ struct json;
+ struct ovsdb;
+ struct ovsdb_schema;
+ 
+-typedef void (*schema_change_callback)(struct ovsdb *,
+-                                       const struct ovsdb_schema *, void *aux);
++typedef struct ovsdb_error *(*schema_change_callback)(
++                                       struct ovsdb *,
++                                       const struct ovsdb_schema *,
++                                       bool conversion_with_no_data,
++                                       void *aux);
+ 
+ void ovsdb_relay_add_db(struct ovsdb *, const char *remote,
+                         schema_change_callback schema_change_cb,
+diff --git a/ovsdb/storage.c b/ovsdb/storage.c
+index e8f95ce64..6c395106c 100644
+--- a/ovsdb/storage.c
++++ b/ovsdb/storage.c
+@@ -623,7 +623,7 @@ ovsdb_storage_store_snapshot(struct ovsdb_storage *storage,
+ 
+ struct ovsdb_write * OVS_WARN_UNUSED_RESULT
+ ovsdb_storage_write_schema_change(struct ovsdb_storage *storage,
+-                                  const struct json *schema,
++                                  const struct ovsdb_schema *schema,
+                                   const struct json *data,
+                                   const struct uuid *prereq,
+                                   struct uuid *resultp)
+@@ -633,13 +633,23 @@ ovsdb_storage_write_schema_change(struct ovsdb_storage *storage,
+     if (storage->error) {
+         w->error = ovsdb_error_clone(storage->error);
+     } else if (storage->raft) {
+-        struct json *txn_json = json_array_create_2(json_clone(schema),
+-                                                    json_clone(data));
+-        w->command = raft_command_execute(storage->raft, txn_json,
+-                                          prereq, &result);
+-        json_destroy(txn_json);
++        /* Clustered storage doesn't support ephemeral columns. */
++        w->error = ovsdb_schema_check_for_ephemeral_columns(schema);
++        if (!w->error) {
++            struct json *schema_json, *txn_json;
++
++            schema_json = ovsdb_schema_to_json(schema);
++            txn_json = json_array_create_2(schema_json, json_clone(data));
++            w->command = raft_command_execute(storage->raft, txn_json,
++                                              prereq, &result);
++            json_destroy(txn_json);
++        }
+     } else if (storage->log) {
+-        w->error = ovsdb_storage_store_snapshot__(storage, schema, data, 0);
++        struct json *schema_json = ovsdb_schema_to_json(schema);
++
++        w->error = ovsdb_storage_store_snapshot__(storage, schema_json,
++                                                  data, 0);
++        json_destroy(schema_json);
+     } else {
+         /* When 'error' and 'command' are both null, it indicates that the
+          * command is complete.  This is fine since this unbacked storage drops
+diff --git a/ovsdb/storage.h b/ovsdb/storage.h
+index a1fdaa564..05f40ce93 100644
+--- a/ovsdb/storage.h
++++ b/ovsdb/storage.h
+@@ -85,7 +85,7 @@ struct ovsdb_error *ovsdb_storage_store_snapshot(struct ovsdb_storage *storage,
+ 
+ struct ovsdb_write *ovsdb_storage_write_schema_change(
+     struct ovsdb_storage *,
+-    const struct json *schema, const struct json *data,
++    const struct ovsdb_schema *, const struct json *data,
+     const struct uuid *prereq, struct uuid *result)
+     OVS_WARN_UNUSED_RESULT;
+ 
+diff --git a/ovsdb/transaction.c b/ovsdb/transaction.c
+index 03541af85..f01de2a34 100644
+--- a/ovsdb/transaction.c
++++ b/ovsdb/transaction.c
+@@ -1251,7 +1251,7 @@ ovsdb_txn_precheck_prereq(const struct ovsdb *db)
+ 
+ struct ovsdb_txn_progress *
+ ovsdb_txn_propose_schema_change(struct ovsdb *db,
+-                                const struct json *schema,
++                                const struct ovsdb_schema *schema,
+                                 const struct json *data)
+ {
+     struct ovsdb_txn_progress *progress = xzalloc(sizeof *progress);
+diff --git a/ovsdb/transaction.h b/ovsdb/transaction.h
+index 6b5bb7f24..9991f34d2 100644
+--- a/ovsdb/transaction.h
++++ b/ovsdb/transaction.h
+@@ -21,6 +21,7 @@
+ 
+ struct json;
+ struct ovsdb;
++struct ovsdb_schema;
+ struct ovsdb_table;
+ struct uuid;
+ 
+@@ -41,7 +42,7 @@ struct ovsdb_error *ovsdb_txn_propose_commit_block(struct ovsdb_txn *,
+ void ovsdb_txn_complete(struct ovsdb_txn *);
+ 
+ struct ovsdb_txn_progress *ovsdb_txn_propose_schema_change(
+-    struct ovsdb *, const struct json *schema, const struct json *data);
++    struct ovsdb *, const struct ovsdb_schema *, const struct json *data);
+ 
+ bool ovsdb_txn_progress_is_complete(const struct ovsdb_txn_progress *);
+ const struct ovsdb_error *ovsdb_txn_progress_get_error(
+diff --git a/ovsdb/trigger.c b/ovsdb/trigger.c
+index 01bb80e28..3c93ae580 100644
+--- a/ovsdb/trigger.c
++++ b/ovsdb/trigger.c
+@@ -274,8 +274,8 @@ ovsdb_trigger_try(struct ovsdb_trigger *t, long long int now)
+             if (!error) {
+                 error = ovsdb_convert(t->db, new_schema, &newdb);
+             }
+-            ovsdb_schema_destroy(new_schema);
+             if (error) {
++                ovsdb_schema_destroy(new_schema);
+                 trigger_convert_error(t, error);
+                 return false;
+             }
+@@ -286,7 +286,8 @@ ovsdb_trigger_try(struct ovsdb_trigger *t, long long int now)
+ 
+             /* Propose the change. */
+             t->progress = ovsdb_txn_propose_schema_change(
+-                t->db, new_schema_json, txn_json);
++                t->db, new_schema, txn_json);
++            ovsdb_schema_destroy(new_schema);
+             json_destroy(txn_json);
+             t->reply = jsonrpc_create_reply(json_object_create(),
+                                             t->request->id);
 diff --git a/tests/classifier.at b/tests/classifier.at
 index f652b5983..de2705653 100644
 --- a/tests/classifier.at
@@ -1639,6 +2061,90 @@ index a92156f00..a368bff6e 100644
  AT_CHECK([RUN_OVS_VSCTL([remove netflow `cat netflow-uuid` targets '"1.2.3.4:567"'])],
    [1], [], [ovs-vsctl: "remove" operation would put 0 values in column targets of table NetFlow but the minimum number is 1
  ])
+diff --git a/tests/ovsdb-tool.at b/tests/ovsdb-tool.at
+index 12ad6fb3f..5496ccda7 100644
+--- a/tests/ovsdb-tool.at
++++ b/tests/ovsdb-tool.at
+@@ -465,6 +465,7 @@ AT_SETUP([ovsdb-tool convert-to-standalone])
+ AT_KEYWORDS([ovsdb file positive])
+ ordinal_schema > schema
+ AT_CHECK([ovsdb-tool create-cluster db schema unix:s1.raft], [0], [stdout], [ignore])
++on_exit 'kill `cat ovsdb-server.pid`'
+ AT_CHECK([ovsdb-server --detach --no-chdir --pidfile --remote=punix:socket --log-file db >/dev/null 2>&1])
+ for txn in m4_foreach([txn], [[[["ordinals",
+       {"op": "insert",
+@@ -498,3 +499,71 @@ OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+ # Make sure both standalone and cluster db data matches.
+ AT_CHECK([diff standalonedump clusterdump])
+ AT_CLEANUP
++
++AT_SETUP([ovsdb-tool convert-to-standalone after schema conversion])
++AT_KEYWORDS([ovsdb file positive])
++ordinal_schema > schema
++AT_CHECK([ovsdb-tool create-cluster db schema unix:s1.raft], [0], [stdout], [ignore])
++on_exit 'kill `cat ovsdb-server.pid`'
++AT_CHECK([ovsdb-server --detach --no-chdir --pidfile --remote=punix:socket dnl
++                       --log-file db >/dev/null 2>&1])
++for txn in m4_foreach([txn], [[[["ordinals",
++      {"op": "insert",
++       "table": "ordinals",
++       "row": {"number": 0, "name": "zero"}},
++      {"op": "insert",
++       "table": "ordinals",
++       "row": {"number": 1, "name": "one"}},
++      {"op": "insert",
++       "table": "ordinals",
++       "row": {"number": 2, "name": "two"}}]]]], ['txn' ]); do
++  AT_CHECK([ovsdb-client transact unix:socket "$txn"], [0], [ignore], [ignore])
++done
++
++dnl Change the schema.
++AT_CHECK([sed 's/5\.1\.3/5.1.4/' < schema > schema2])
++AT_CHECK([sed -i'back' -e '/.*"number":.*/a \
++           "is_seven": {"type": "boolean"},
++           ' schema2])
++
++dnl Convert the database.
++AT_CHECK([ovsdb-client convert unix:socket schema2])
++
++dnl Add a new row with a new column.
++AT_CHECK([ovsdb-client transact unix:socket dnl
++  '[["ordinals",
++     {"op": "insert",
++      "table": "ordinals",
++      "row": {"number": 7, "name": "seven", "is_seven": true}
++     }]]'], [0], [ignore], [ignore])
++
++AT_CHECK([ovsdb-client dump unix:socket > clusterdump])
++
++AT_CHECK([uuidfilt clusterdump], [0], [dnl
++ordinals table
++_uuid                                is_seven name  number
++------------------------------------ -------- ----- ------
++<0> false    one   1
++<1> false    two   2
++<2> false    zero  0
++<3> true     seven 7
++])
++
++OVS_APP_EXIT_AND_WAIT([ovsdb-server])
++
++dnl Convert to standalone database from clustered database.
++AT_CHECK(ovsdb-tool cluster-to-standalone db1 db)
++
++dnl Check it's a standalone db.
++AT_CHECK([ovsdb-tool db-is-standalone db1])
++
++dnl Dump the standalone db data.
++AT_CHECK([ovsdb-server -vconsole:off -vfile -vvlog:off --detach --no-chdir dnl
++                       --pidfile --log-file --remote=punix:db.sock db1])
++AT_CHECK([ovsdb_client_wait ordinals connected])
++AT_CHECK([ovsdb-client dump > standalonedump])
++OVS_APP_EXIT_AND_WAIT([ovsdb-server])
++
++dnl Make sure both standalone and cluster db data matches.
++AT_CHECK([diff standalonedump clusterdump])
++AT_CLEANUP
 diff --git a/tests/packet-type-aware.at b/tests/packet-type-aware.at
 index 3b5c66fe5..d63528e69 100644
 --- a/tests/packet-type-aware.at
diff --git a/SPECS/openvswitch3.1.spec b/SPECS/openvswitch3.1.spec
index 290f200..ff06c2a 100644
--- a/SPECS/openvswitch3.1.spec
+++ b/SPECS/openvswitch3.1.spec
@@ -57,7 +57,7 @@ Summary: Open vSwitch
 Group: System Environment/Daemons daemon/database/utilities
 URL: http://www.openvswitch.org/
 Version: 3.1.0
-Release: 17%{?dist}
+Release: 18%{?dist}
 
 # Nearly all of openvswitch is ASL 2.0.  The bugtool is LGPLv2+, and the
 # lib/sflow*.[ch] files are SISSL
@@ -751,6 +751,14 @@ exit 0
 %endif
 
 %changelog
+* Mon Apr 24 2023 Open vSwitch CI <ovs-ci@redhat.com> - 3.1.0-18
+- Merging upstream branch-3.1 [RH git: 131742fa49]
+    Commit list:
+    9529e9aa96 ovsdb: Allow conversion records with no data in a clustered storage.
+    7006bb112b ovsdb: Check for ephemeral columns before writing a new schema.
+    27678b3a19 ovsdb-tool: Fix cluster-to-standalone for DB conversion records.
+
+
 * Tue Apr 11 2023 Open vSwitch CI <ovs-ci@redhat.com> - 3.1.0-17
 - Merging upstream branch-3.1 [RH git: 6064fd8c48]
     Commit list: