|
|
9119d9 |
From ed2165ed57e2e207d08e3202e0f93201a1f1183b Mon Sep 17 00:00:00 2001
|
|
|
9119d9 |
Message-Id: <ed2165ed57e2e207d08e3202e0f93201a1f1183b@dist-git>
|
|
|
9119d9 |
From: Peter Krempa <pkrempa@redhat.com>
|
|
|
9119d9 |
Date: Mon, 8 Sep 2014 13:24:17 +0200
|
|
|
9119d9 |
Subject: [PATCH] qemu: snapshot: Fix job handling when creating snapshots
|
|
|
9119d9 |
|
|
|
9119d9 |
https://bugzilla.redhat.com/show_bug.cgi?id=1134154
|
|
|
9119d9 |
|
|
|
9119d9 |
Creating snapshots modifies the domain state. Currently we wouldn't
|
|
|
9119d9 |
enter the job for certain operations although they would modify the
|
|
|
9119d9 |
state. Refactor job handling so that everything is covered by an async
|
|
|
9119d9 |
job.
|
|
|
9119d9 |
|
|
|
9119d9 |
(cherry picked from commit b3d2a42e80aaee1f322fc9beb98b6ed541574ab3)
|
|
|
9119d9 |
|
|
|
9119d9 |
Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
|
|
|
9119d9 |
---
|
|
|
9119d9 |
src/qemu/qemu_driver.c | 173 +++++++++++++++++++++----------------------------
|
|
|
9119d9 |
1 file changed, 75 insertions(+), 98 deletions(-)
|
|
|
9119d9 |
|
|
|
9119d9 |
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
|
|
|
9119d9 |
index 74d0477..6a79f28 100644
|
|
|
9119d9 |
--- a/src/qemu/qemu_driver.c
|
|
|
9119d9 |
+++ b/src/qemu/qemu_driver.c
|
|
|
9119d9 |
@@ -12363,32 +12363,22 @@ qemuDomainSnapshotCreateInactiveExternal(virQEMUDriverPtr driver,
|
|
|
9119d9 |
static int
|
|
|
9119d9 |
qemuDomainSnapshotCreateActiveInternal(virConnectPtr conn,
|
|
|
9119d9 |
virQEMUDriverPtr driver,
|
|
|
9119d9 |
- virDomainObjPtr *vmptr,
|
|
|
9119d9 |
+ virDomainObjPtr vm,
|
|
|
9119d9 |
virDomainSnapshotObjPtr snap,
|
|
|
9119d9 |
unsigned int flags)
|
|
|
9119d9 |
{
|
|
|
9119d9 |
- virDomainObjPtr vm = *vmptr;
|
|
|
9119d9 |
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
9119d9 |
virObjectEventPtr event = NULL;
|
|
|
9119d9 |
bool resume = false;
|
|
|
9119d9 |
int ret = -1;
|
|
|
9119d9 |
|
|
|
9119d9 |
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
|
|
|
9119d9 |
- return -1;
|
|
|
9119d9 |
-
|
|
|
9119d9 |
- if (!virDomainObjIsActive(vm)) {
|
|
|
9119d9 |
- virReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
9119d9 |
- "%s", _("domain is not running"));
|
|
|
9119d9 |
- goto endjob;
|
|
|
9119d9 |
- }
|
|
|
9119d9 |
-
|
|
|
9119d9 |
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
|
|
9119d9 |
/* savevm monitor command pauses the domain emitting an event which
|
|
|
9119d9 |
* confuses libvirt since it's not notified when qemu resumes the
|
|
|
9119d9 |
* domain. Thus we stop and start CPUs ourselves.
|
|
|
9119d9 |
*/
|
|
|
9119d9 |
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE,
|
|
|
9119d9 |
- QEMU_ASYNC_JOB_NONE) < 0)
|
|
|
9119d9 |
+ QEMU_ASYNC_JOB_SNAPSHOT) < 0)
|
|
|
9119d9 |
goto cleanup;
|
|
|
9119d9 |
|
|
|
9119d9 |
resume = true;
|
|
|
9119d9 |
@@ -12399,7 +12389,12 @@ qemuDomainSnapshotCreateActiveInternal(virConnectPtr conn,
|
|
|
9119d9 |
}
|
|
|
9119d9 |
}
|
|
|
9119d9 |
|
|
|
9119d9 |
- qemuDomainObjEnterMonitor(driver, vm);
|
|
|
9119d9 |
+ if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
|
|
9119d9 |
+ QEMU_ASYNC_JOB_SNAPSHOT) < 0) {
|
|
|
9119d9 |
+ resume = false;
|
|
|
9119d9 |
+ goto cleanup;
|
|
|
9119d9 |
+ }
|
|
|
9119d9 |
+
|
|
|
9119d9 |
ret = qemuMonitorCreateSnapshot(priv->mon, snap->def->name);
|
|
|
9119d9 |
qemuDomainObjExitMonitor(driver, vm);
|
|
|
9119d9 |
if (ret < 0)
|
|
|
9119d9 |
@@ -12410,18 +12405,14 @@ qemuDomainSnapshotCreateActiveInternal(virConnectPtr conn,
|
|
|
9119d9 |
VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT);
|
|
|
9119d9 |
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT, 0);
|
|
|
9119d9 |
virDomainAuditStop(vm, "from-snapshot");
|
|
|
9119d9 |
- /* We already filtered the _HALT flag for persistent domains
|
|
|
9119d9 |
- * only, so this end job never drops the last reference. */
|
|
|
9119d9 |
- ignore_value(qemuDomainObjEndJob(driver, vm));
|
|
|
9119d9 |
resume = false;
|
|
|
9119d9 |
- vm = NULL;
|
|
|
9119d9 |
}
|
|
|
9119d9 |
|
|
|
9119d9 |
cleanup:
|
|
|
9119d9 |
if (resume && virDomainObjIsActive(vm) &&
|
|
|
9119d9 |
qemuProcessStartCPUs(driver, vm, conn,
|
|
|
9119d9 |
VIR_DOMAIN_RUNNING_UNPAUSED,
|
|
|
9119d9 |
- QEMU_ASYNC_JOB_NONE) < 0) {
|
|
|
9119d9 |
+ QEMU_ASYNC_JOB_SNAPSHOT) < 0) {
|
|
|
9119d9 |
event = virDomainEventLifecycleNewFromObj(vm,
|
|
|
9119d9 |
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
9119d9 |
VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR);
|
|
|
9119d9 |
@@ -12431,14 +12422,6 @@ qemuDomainSnapshotCreateActiveInternal(virConnectPtr conn,
|
|
|
9119d9 |
}
|
|
|
9119d9 |
}
|
|
|
9119d9 |
|
|
|
9119d9 |
- endjob:
|
|
|
9119d9 |
- if (vm && !qemuDomainObjEndJob(driver, vm)) {
|
|
|
9119d9 |
- /* Only possible if a transient vm quit while our locks were down,
|
|
|
9119d9 |
- * in which case we don't want to save snapshot metadata. */
|
|
|
9119d9 |
- *vmptr = NULL;
|
|
|
9119d9 |
- ret = -1;
|
|
|
9119d9 |
- }
|
|
|
9119d9 |
-
|
|
|
9119d9 |
if (event)
|
|
|
9119d9 |
qemuDomainEventQueue(driver, event);
|
|
|
9119d9 |
|
|
|
9119d9 |
@@ -13140,13 +13123,13 @@ qemuDomainSnapshotCreateDiskActive(virQEMUDriverPtr driver,
|
|
|
9119d9 |
static int
|
|
|
9119d9 |
qemuDomainSnapshotCreateActiveExternal(virConnectPtr conn,
|
|
|
9119d9 |
virQEMUDriverPtr driver,
|
|
|
9119d9 |
- virDomainObjPtr *vmptr,
|
|
|
9119d9 |
+ virDomainObjPtr vm,
|
|
|
9119d9 |
virDomainSnapshotObjPtr snap,
|
|
|
9119d9 |
unsigned int flags)
|
|
|
9119d9 |
{
|
|
|
9119d9 |
+ virObjectEventPtr event;
|
|
|
9119d9 |
bool resume = false;
|
|
|
9119d9 |
int ret = -1;
|
|
|
9119d9 |
- virDomainObjPtr vm = *vmptr;
|
|
|
9119d9 |
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
9119d9 |
char *xml = NULL;
|
|
|
9119d9 |
bool memory = snap->def->memory == VIR_DOMAIN_SNAPSHOT_LOCATION_EXTERNAL;
|
|
|
9119d9 |
@@ -13158,9 +13141,6 @@ qemuDomainSnapshotCreateActiveExternal(virConnectPtr conn,
|
|
|
9119d9 |
virQEMUDriverConfigPtr cfg = NULL;
|
|
|
9119d9 |
int compressed = QEMU_SAVE_FORMAT_RAW;
|
|
|
9119d9 |
|
|
|
9119d9 |
- if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SNAPSHOT) < 0)
|
|
|
9119d9 |
- goto cleanup;
|
|
|
9119d9 |
-
|
|
|
9119d9 |
/* If quiesce was requested, then issue a freeze command, and a
|
|
|
9119d9 |
* counterpart thaw command when it is actually sent to agent.
|
|
|
9119d9 |
* The command will fail if the guest is paused or the guest agent
|
|
|
9119d9 |
@@ -13171,7 +13151,7 @@ qemuDomainSnapshotCreateActiveExternal(virConnectPtr conn,
|
|
|
9119d9 |
/* the helper reported the error */
|
|
|
9119d9 |
if (freeze == -2)
|
|
|
9119d9 |
thaw = -1; /* the command is sent but agent failed */
|
|
|
9119d9 |
- goto endjob;
|
|
|
9119d9 |
+ goto cleanup;
|
|
|
9119d9 |
}
|
|
|
9119d9 |
thaw = 1;
|
|
|
9119d9 |
}
|
|
|
9119d9 |
@@ -13198,12 +13178,12 @@ qemuDomainSnapshotCreateActiveExternal(virConnectPtr conn,
|
|
|
9119d9 |
(!memory && atomic && !transaction)) {
|
|
|
9119d9 |
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SNAPSHOT,
|
|
|
9119d9 |
QEMU_ASYNC_JOB_SNAPSHOT) < 0)
|
|
|
9119d9 |
- goto endjob;
|
|
|
9119d9 |
+ goto cleanup;
|
|
|
9119d9 |
|
|
|
9119d9 |
if (!virDomainObjIsActive(vm)) {
|
|
|
9119d9 |
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
9119d9 |
_("guest unexpectedly quit"));
|
|
|
9119d9 |
- goto endjob;
|
|
|
9119d9 |
+ goto cleanup;
|
|
|
9119d9 |
}
|
|
|
9119d9 |
}
|
|
|
9119d9 |
}
|
|
|
9119d9 |
@@ -13212,7 +13192,7 @@ qemuDomainSnapshotCreateActiveExternal(virConnectPtr conn,
|
|
|
9119d9 |
if (memory) {
|
|
|
9119d9 |
/* check if migration is possible */
|
|
|
9119d9 |
if (!qemuMigrationIsAllowed(driver, vm, vm->def, false, false))
|
|
|
9119d9 |
- goto endjob;
|
|
|
9119d9 |
+ goto cleanup;
|
|
|
9119d9 |
|
|
|
9119d9 |
/* allow the migration job to be cancelled or the domain to be paused */
|
|
|
9119d9 |
qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK |
|
|
|
9119d9 |
@@ -13226,24 +13206,24 @@ qemuDomainSnapshotCreateActiveExternal(virConnectPtr conn,
|
|
|
9119d9 |
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
9119d9 |
_("Invalid snapshot image format specified "
|
|
|
9119d9 |
"in configuration file"));
|
|
|
9119d9 |
- goto endjob;
|
|
|
9119d9 |
+ goto cleanup;
|
|
|
9119d9 |
}
|
|
|
9119d9 |
|
|
|
9119d9 |
if (!qemuCompressProgramAvailable(compressed)) {
|
|
|
9119d9 |
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
9119d9 |
_("Compression program for image format "
|
|
|
9119d9 |
"in configuration file isn't available"));
|
|
|
9119d9 |
- goto endjob;
|
|
|
9119d9 |
+ goto cleanup;
|
|
|
9119d9 |
}
|
|
|
9119d9 |
}
|
|
|
9119d9 |
|
|
|
9119d9 |
if (!(xml = qemuDomainDefFormatLive(driver, vm->def, true, true)))
|
|
|
9119d9 |
- goto endjob;
|
|
|
9119d9 |
+ goto cleanup;
|
|
|
9119d9 |
|
|
|
9119d9 |
if ((ret = qemuDomainSaveMemory(driver, vm, snap->def->file,
|
|
|
9119d9 |
xml, compressed, resume, 0,
|
|
|
9119d9 |
QEMU_ASYNC_JOB_SNAPSHOT)) < 0)
|
|
|
9119d9 |
- goto endjob;
|
|
|
9119d9 |
+ goto cleanup;
|
|
|
9119d9 |
|
|
|
9119d9 |
/* the memory image was created, remove it on errors */
|
|
|
9119d9 |
memory_unlink = true;
|
|
|
9119d9 |
@@ -13261,30 +13241,22 @@ qemuDomainSnapshotCreateActiveExternal(virConnectPtr conn,
|
|
|
9119d9 |
*/
|
|
|
9119d9 |
if ((ret = qemuDomainSnapshotCreateDiskActive(driver, vm, snap, flags,
|
|
|
9119d9 |
QEMU_ASYNC_JOB_SNAPSHOT)) < 0)
|
|
|
9119d9 |
- goto endjob;
|
|
|
9119d9 |
+ goto cleanup;
|
|
|
9119d9 |
|
|
|
9119d9 |
/* the snapshot is complete now */
|
|
|
9119d9 |
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_HALT) {
|
|
|
9119d9 |
- virObjectEventPtr event;
|
|
|
9119d9 |
-
|
|
|
9119d9 |
event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED,
|
|
|
9119d9 |
VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT);
|
|
|
9119d9 |
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT, 0);
|
|
|
9119d9 |
virDomainAuditStop(vm, "from-snapshot");
|
|
|
9119d9 |
- /* We already filtered the _HALT flag for persistent domains
|
|
|
9119d9 |
- * only, so this end job never drops the last reference. */
|
|
|
9119d9 |
- ignore_value(qemuDomainObjEndAsyncJob(driver, vm));
|
|
|
9119d9 |
resume = false;
|
|
|
9119d9 |
thaw = 0;
|
|
|
9119d9 |
- vm = NULL;
|
|
|
9119d9 |
if (event)
|
|
|
9119d9 |
qemuDomainEventQueue(driver, event);
|
|
|
9119d9 |
} else if (memory && pmsuspended) {
|
|
|
9119d9 |
/* qemu 1.3 is unable to save a domain in pm-suspended (S3)
|
|
|
9119d9 |
* state; so we must emit an event stating that it was
|
|
|
9119d9 |
* converted to paused. */
|
|
|
9119d9 |
- virObjectEventPtr event;
|
|
|
9119d9 |
-
|
|
|
9119d9 |
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED,
|
|
|
9119d9 |
VIR_DOMAIN_PAUSED_FROM_SNAPSHOT);
|
|
|
9119d9 |
event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
9119d9 |
@@ -13295,12 +13267,11 @@ qemuDomainSnapshotCreateActiveExternal(virConnectPtr conn,
|
|
|
9119d9 |
|
|
|
9119d9 |
ret = 0;
|
|
|
9119d9 |
|
|
|
9119d9 |
- endjob:
|
|
|
9119d9 |
- if (resume && vm && virDomainObjIsActive(vm) &&
|
|
|
9119d9 |
+ cleanup:
|
|
|
9119d9 |
+ if (resume && virDomainObjIsActive(vm) &&
|
|
|
9119d9 |
qemuProcessStartCPUs(driver, vm, conn,
|
|
|
9119d9 |
VIR_DOMAIN_RUNNING_UNPAUSED,
|
|
|
9119d9 |
QEMU_ASYNC_JOB_SNAPSHOT) < 0) {
|
|
|
9119d9 |
- virObjectEventPtr event = NULL;
|
|
|
9119d9 |
event = virDomainEventLifecycleNewFromObj(vm,
|
|
|
9119d9 |
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
9119d9 |
VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR);
|
|
|
9119d9 |
@@ -13314,21 +13285,14 @@ qemuDomainSnapshotCreateActiveExternal(virConnectPtr conn,
|
|
|
9119d9 |
ret = -1;
|
|
|
9119d9 |
goto cleanup;
|
|
|
9119d9 |
}
|
|
|
9119d9 |
- if (vm && thaw != 0 &&
|
|
|
9119d9 |
+
|
|
|
9119d9 |
+ if (thaw != 0 &&
|
|
|
9119d9 |
qemuDomainSnapshotFSThaw(driver, vm, thaw > 0) < 0) {
|
|
|
9119d9 |
/* helper reported the error, if it was needed */
|
|
|
9119d9 |
if (thaw > 0)
|
|
|
9119d9 |
ret = -1;
|
|
|
9119d9 |
}
|
|
|
9119d9 |
- if (vm && !qemuDomainObjEndAsyncJob(driver, vm)) {
|
|
|
9119d9 |
- /* Only possible if a transient vm quit while our locks were down,
|
|
|
9119d9 |
- * in which case we don't want to save snapshot metadata.
|
|
|
9119d9 |
- */
|
|
|
9119d9 |
- *vmptr = NULL;
|
|
|
9119d9 |
- ret = -1;
|
|
|
9119d9 |
- }
|
|
|
9119d9 |
|
|
|
9119d9 |
- cleanup:
|
|
|
9119d9 |
VIR_FREE(xml);
|
|
|
9119d9 |
virObjectUnref(cfg);
|
|
|
9119d9 |
if (memory_unlink && ret < 0)
|
|
|
9119d9 |
@@ -13474,10 +13438,19 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain,
|
|
|
9119d9 |
goto cleanup;
|
|
|
9119d9 |
}
|
|
|
9119d9 |
|
|
|
9119d9 |
+ /* We are going to modify the domain below. Internal snapshots would use
|
|
|
9119d9 |
+ * a regular job, so we need to set the job mask to disallow query as
|
|
|
9119d9 |
+ * 'savevm' blocks the monitor. External snapshot will then modify the
|
|
|
9119d9 |
+ * job mask appropriately. */
|
|
|
9119d9 |
+ if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SNAPSHOT) < 0)
|
|
|
9119d9 |
+ goto cleanup;
|
|
|
9119d9 |
+
|
|
|
9119d9 |
+ qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
|
|
|
9119d9 |
+
|
|
|
9119d9 |
if (redefine) {
|
|
|
9119d9 |
if (virDomainSnapshotRedefinePrep(domain, vm, &def, &snap,
|
|
|
9119d9 |
&update_current, flags) < 0)
|
|
|
9119d9 |
- goto cleanup;
|
|
|
9119d9 |
+ goto endjob;
|
|
|
9119d9 |
} else {
|
|
|
9119d9 |
/* Easiest way to clone inactive portion of vm->def is via
|
|
|
9119d9 |
* conversion in and back out of xml. */
|
|
|
9119d9 |
@@ -13485,7 +13458,7 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain,
|
|
|
9119d9 |
!(def->dom = virDomainDefParseString(xml, caps, driver->xmlopt,
|
|
|
9119d9 |
QEMU_EXPECTED_VIRT_TYPES,
|
|
|
9119d9 |
VIR_DOMAIN_XML_INACTIVE)))
|
|
|
9119d9 |
- goto cleanup;
|
|
|
9119d9 |
+ goto endjob;
|
|
|
9119d9 |
|
|
|
9119d9 |
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY) {
|
|
|
9119d9 |
align_location = VIR_DOMAIN_SNAPSHOT_LOCATION_EXTERNAL;
|
|
|
9119d9 |
@@ -13507,7 +13480,7 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain,
|
|
|
9119d9 |
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
|
|
|
9119d9 |
_("internal snapshot of a running VM "
|
|
|
9119d9 |
"must include the memory state"));
|
|
|
9119d9 |
- goto cleanup;
|
|
|
9119d9 |
+ goto endjob;
|
|
|
9119d9 |
}
|
|
|
9119d9 |
|
|
|
9119d9 |
def->memory = (def->state == VIR_DOMAIN_SHUTOFF ?
|
|
|
9119d9 |
@@ -13517,12 +13490,12 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain,
|
|
|
9119d9 |
if (virDomainSnapshotAlignDisks(def, align_location,
|
|
|
9119d9 |
align_match) < 0 ||
|
|
|
9119d9 |
qemuDomainSnapshotPrepare(conn, vm, def, &flags) < 0)
|
|
|
9119d9 |
- goto cleanup;
|
|
|
9119d9 |
+ goto endjob;
|
|
|
9119d9 |
}
|
|
|
9119d9 |
|
|
|
9119d9 |
if (!snap) {
|
|
|
9119d9 |
if (!(snap = virDomainSnapshotAssignDef(vm->snapshots, def)))
|
|
|
9119d9 |
- goto cleanup;
|
|
|
9119d9 |
+ goto endjob;
|
|
|
9119d9 |
|
|
|
9119d9 |
def = NULL;
|
|
|
9119d9 |
}
|
|
|
9119d9 |
@@ -13532,12 +13505,12 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain,
|
|
|
9119d9 |
if (vm->current_snapshot) {
|
|
|
9119d9 |
if (!redefine &&
|
|
|
9119d9 |
VIR_STRDUP(snap->def->parent, vm->current_snapshot->def->name) < 0)
|
|
|
9119d9 |
- goto cleanup;
|
|
|
9119d9 |
+ goto endjob;
|
|
|
9119d9 |
if (update_current) {
|
|
|
9119d9 |
vm->current_snapshot->def->current = false;
|
|
|
9119d9 |
if (qemuDomainSnapshotWriteMetadata(vm, vm->current_snapshot,
|
|
|
9119d9 |
cfg->snapshotDir) < 0)
|
|
|
9119d9 |
- goto cleanup;
|
|
|
9119d9 |
+ goto endjob;
|
|
|
9119d9 |
vm->current_snapshot = NULL;
|
|
|
9119d9 |
}
|
|
|
9119d9 |
}
|
|
|
9119d9 |
@@ -13552,13 +13525,13 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain,
|
|
|
9119d9 |
snap->def->memory == VIR_DOMAIN_SNAPSHOT_LOCATION_EXTERNAL) {
|
|
|
9119d9 |
/* external checkpoint or disk snapshot */
|
|
|
9119d9 |
if (qemuDomainSnapshotCreateActiveExternal(domain->conn, driver,
|
|
|
9119d9 |
- &vm, snap, flags) < 0)
|
|
|
9119d9 |
- goto cleanup;
|
|
|
9119d9 |
+ vm, snap, flags) < 0)
|
|
|
9119d9 |
+ goto endjob;
|
|
|
9119d9 |
} else {
|
|
|
9119d9 |
/* internal checkpoint */
|
|
|
9119d9 |
if (qemuDomainSnapshotCreateActiveInternal(domain->conn, driver,
|
|
|
9119d9 |
- &vm, snap, flags) < 0)
|
|
|
9119d9 |
- goto cleanup;
|
|
|
9119d9 |
+ vm, snap, flags) < 0)
|
|
|
9119d9 |
+ goto endjob;
|
|
|
9119d9 |
}
|
|
|
9119d9 |
} else {
|
|
|
9119d9 |
/* inactive; qemuDomainSnapshotPrepare guaranteed that we
|
|
|
9119d9 |
@@ -13569,10 +13542,10 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain,
|
|
|
9119d9 |
|
|
|
9119d9 |
if (qemuDomainSnapshotCreateInactiveExternal(driver, vm, snap,
|
|
|
9119d9 |
reuse) < 0)
|
|
|
9119d9 |
- goto cleanup;
|
|
|
9119d9 |
+ goto endjob;
|
|
|
9119d9 |
} else {
|
|
|
9119d9 |
if (qemuDomainSnapshotCreateInactiveInternal(driver, vm, snap) < 0)
|
|
|
9119d9 |
- goto cleanup;
|
|
|
9119d9 |
+ goto endjob;
|
|
|
9119d9 |
}
|
|
|
9119d9 |
}
|
|
|
9119d9 |
|
|
|
9119d9 |
@@ -13582,34 +13555,38 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain,
|
|
|
9119d9 |
*/
|
|
|
9119d9 |
snapshot = virGetDomainSnapshot(domain, snap->def->name);
|
|
|
9119d9 |
|
|
|
9119d9 |
- cleanup:
|
|
|
9119d9 |
- if (vm) {
|
|
|
9119d9 |
- if (snapshot && !(flags & VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA)) {
|
|
|
9119d9 |
- if (qemuDomainSnapshotWriteMetadata(vm, snap,
|
|
|
9119d9 |
- cfg->snapshotDir) < 0) {
|
|
|
9119d9 |
- /* if writing of metadata fails, error out rather than trying
|
|
|
9119d9 |
- * to silently carry on without completing the snapshot */
|
|
|
9119d9 |
- virDomainSnapshotFree(snapshot);
|
|
|
9119d9 |
- snapshot = NULL;
|
|
|
9119d9 |
- virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
9119d9 |
- _("unable to save metadata for snapshot %s"),
|
|
|
9119d9 |
- snap->def->name);
|
|
|
9119d9 |
- virDomainSnapshotObjListRemove(vm->snapshots, snap);
|
|
|
9119d9 |
- } else {
|
|
|
9119d9 |
- if (update_current)
|
|
|
9119d9 |
- vm->current_snapshot = snap;
|
|
|
9119d9 |
- other = virDomainSnapshotFindByName(vm->snapshots,
|
|
|
9119d9 |
- snap->def->parent);
|
|
|
9119d9 |
- snap->parent = other;
|
|
|
9119d9 |
- other->nchildren++;
|
|
|
9119d9 |
- snap->sibling = other->first_child;
|
|
|
9119d9 |
- other->first_child = snap;
|
|
|
9119d9 |
- }
|
|
|
9119d9 |
- } else if (snap) {
|
|
|
9119d9 |
+ endjob:
|
|
|
9119d9 |
+ if (snapshot && !(flags & VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA)) {
|
|
|
9119d9 |
+ if (qemuDomainSnapshotWriteMetadata(vm, snap,
|
|
|
9119d9 |
+ cfg->snapshotDir) < 0) {
|
|
|
9119d9 |
+ /* if writing of metadata fails, error out rather than trying
|
|
|
9119d9 |
+ * to silently carry on without completing the snapshot */
|
|
|
9119d9 |
+ virDomainSnapshotFree(snapshot);
|
|
|
9119d9 |
+ snapshot = NULL;
|
|
|
9119d9 |
+ virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
9119d9 |
+ _("unable to save metadata for snapshot %s"),
|
|
|
9119d9 |
+ snap->def->name);
|
|
|
9119d9 |
virDomainSnapshotObjListRemove(vm->snapshots, snap);
|
|
|
9119d9 |
+ } else {
|
|
|
9119d9 |
+ if (update_current)
|
|
|
9119d9 |
+ vm->current_snapshot = snap;
|
|
|
9119d9 |
+ other = virDomainSnapshotFindByName(vm->snapshots,
|
|
|
9119d9 |
+ snap->def->parent);
|
|
|
9119d9 |
+ snap->parent = other;
|
|
|
9119d9 |
+ other->nchildren++;
|
|
|
9119d9 |
+ snap->sibling = other->first_child;
|
|
|
9119d9 |
+ other->first_child = snap;
|
|
|
9119d9 |
}
|
|
|
9119d9 |
- virObjectUnlock(vm);
|
|
|
9119d9 |
+ } else if (snap) {
|
|
|
9119d9 |
+ virDomainSnapshotObjListRemove(vm->snapshots, snap);
|
|
|
9119d9 |
}
|
|
|
9119d9 |
+
|
|
|
9119d9 |
+ if (!qemuDomainObjEndAsyncJob(driver, vm))
|
|
|
9119d9 |
+ vm = NULL;
|
|
|
9119d9 |
+
|
|
|
9119d9 |
+ cleanup:
|
|
|
9119d9 |
+ if (vm)
|
|
|
9119d9 |
+ virObjectUnlock(vm);
|
|
|
9119d9 |
virDomainSnapshotDefFree(def);
|
|
|
9119d9 |
VIR_FREE(xml);
|
|
|
9119d9 |
virObjectUnref(caps);
|
|
|
9119d9 |
--
|
|
|
9119d9 |
2.1.0
|
|
|
9119d9 |
|