render / rpms / libvirt

Forked from rpms/libvirt 10 months ago
Clone
Blob Blame History Raw
From 088baea86780021290cc9dbb3e313723d7040cbd Mon Sep 17 00:00:00 2001
Message-Id: <088baea86780021290cc9dbb3e313723d7040cbd@dist-git>
From: "Michael R. Hines" <mrhines@us.ibm.com>
Date: Tue, 23 Sep 2014 15:47:58 +0200
Subject: [PATCH] qemu: Memory pre-pinning support for RDMA migration

RDMA Live migration requires registering memory with the hardware, and
thus QEMU offers a new 'capability' to pre-register / mlock() the guest
memory in advance for higher RDMA performance before the migration
begins. This capability is disabled by default, which means QEMU will
register the memory with the hardware in an on-demand basis.

This patch exposes this capability with the following example usage:

virsh migrate --live --rdma-pin-all --migrateuri rdma://hostname domain qemu+ssh://hostname/system

https://bugzilla.redhat.com/show_bug.cgi?id=1013055

Signed-off-by: Michael R. Hines <mrhines@us.ibm.com>
Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
(cherry picked from commit 9cc1586d2b5b6d4d396ebfa0479ae441e0b917ea)
Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
---
 include/libvirt/libvirt.h.in |  1 +
 src/qemu/qemu_migration.c    | 49 ++++++++++++++++++++++++++++++++++++++++++++
 src/qemu/qemu_migration.h    |  3 ++-
 tools/virsh-domain.c         |  7 +++++++
 4 files changed, 59 insertions(+), 1 deletion(-)

diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in
index b07797e..0b40727 100644
--- a/include/libvirt/libvirt.h.in
+++ b/include/libvirt/libvirt.h.in
@@ -1224,6 +1224,7 @@ typedef enum {
     VIR_MIGRATE_COMPRESSED        = (1 << 11), /* compress data during migration */
     VIR_MIGRATE_ABORT_ON_ERROR    = (1 << 12), /* abort migration on I/O errors happened during migration */
     VIR_MIGRATE_AUTO_CONVERGE     = (1 << 13), /* force convergence */
+    VIR_MIGRATE_RDMA_PIN_ALL      = (1 << 14), /* RDMA memory pinning */
 } virDomainMigrateFlags;
 
 
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 3433ec9..fda7cda 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -1874,6 +1874,46 @@ qemuMigrationSetAutoConverge(virQEMUDriverPtr driver,
 
 
 static int
+qemuMigrationSetPinAll(virQEMUDriverPtr driver,
+                       virDomainObjPtr vm,
+                       qemuDomainAsyncJob job)
+{
+    qemuDomainObjPrivatePtr priv = vm->privateData;
+    int ret;
+
+    if (qemuDomainObjEnterMonitorAsync(driver, vm, job) < 0)
+        return -1;
+
+    ret = qemuMonitorGetMigrationCapability(
+                priv->mon,
+                QEMU_MONITOR_MIGRATION_CAPS_RDMA_PIN_ALL);
+
+    if (ret < 0) {
+        goto cleanup;
+    } else if (ret == 0) {
+        if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
+            virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
+                           _("rdma pinning migration is not supported by "
+                             "target QEMU binary"));
+        } else {
+            virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
+                           _("rdma pinning migration is not supported by "
+                             "source QEMU binary"));
+        }
+        ret = -1;
+        goto cleanup;
+    }
+
+    ret = qemuMonitorSetMigrationCapability(
+                priv->mon,
+                QEMU_MONITOR_MIGRATION_CAPS_RDMA_PIN_ALL);
+
+ cleanup:
+    qemuDomainObjExitMonitor(driver, vm);
+    return ret;
+}
+
+static int
 qemuMigrationWaitForSpice(virQEMUDriverPtr driver,
                           virDomainObjPtr vm)
 {
@@ -2709,6 +2749,10 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
         goto stop;
     }
 
+    if (flags & VIR_MIGRATE_RDMA_PIN_ALL &&
+        qemuMigrationSetPinAll(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+        goto stop;
+
     if (mig->lockState) {
         VIR_DEBUG("Received lockstate %s", mig->lockState);
         VIR_FREE(priv->lockState);
@@ -3532,6 +3576,11 @@ qemuMigrationRun(virQEMUDriverPtr driver,
                                      QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
         goto cleanup;
 
+    if (flags & VIR_MIGRATE_RDMA_PIN_ALL &&
+        qemuMigrationSetPinAll(driver, vm,
+                               QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+        goto cleanup;
+
     if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                        QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
         goto cleanup;
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
index 3fa68dc..e7a90c3 100644
--- a/src/qemu/qemu_migration.h
+++ b/src/qemu/qemu_migration.h
@@ -40,7 +40,8 @@
      VIR_MIGRATE_OFFLINE |                      \
      VIR_MIGRATE_COMPRESSED |                   \
      VIR_MIGRATE_ABORT_ON_ERROR |               \
-     VIR_MIGRATE_AUTO_CONVERGE)
+     VIR_MIGRATE_AUTO_CONVERGE |                \
+     VIR_MIGRATE_RDMA_PIN_ALL)
 
 /* All supported migration parameters and their types. */
 # define QEMU_MIGRATION_PARAMETERS                              \
diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c
index 683d92e..23395e5 100644
--- a/tools/virsh-domain.c
+++ b/tools/virsh-domain.c
@@ -8999,6 +8999,10 @@ static const vshCmdOptDef opts_migrate[] = {
      .type = VSH_OT_BOOL,
      .help = N_("force convergence during live migration")
     },
+    {.name = "rdma-pin-all",
+     .type = VSH_OT_BOOL,
+     .help = N_("support memory pinning during RDMA live migration")
+    },
     {.name = "abort-on-error",
      .type = VSH_OT_BOOL,
      .help = N_("abort on soft errors during migration")
@@ -9147,6 +9151,9 @@ doMigrate(void *opaque)
     if (vshCommandOptBool(cmd, "auto-converge"))
         flags |= VIR_MIGRATE_AUTO_CONVERGE;
 
+    if (vshCommandOptBool(cmd, "rdma-pin-all"))
+        flags |= VIR_MIGRATE_RDMA_PIN_ALL;
+
     if (vshCommandOptBool(cmd, "offline")) {
         flags |= VIR_MIGRATE_OFFLINE;
     }
-- 
2.1.1