|
|
dc89ad |
From a0cfb6daa757761c0397dc0ceb211b64891a1983 Mon Sep 17 00:00:00 2001
|
|
|
dc89ad |
Message-Id: <a0cfb6daa757761c0397dc0ceb211b64891a1983@dist-git>
|
|
|
dc89ad |
From: Peter Krempa <pkrempa@redhat.com>
|
|
|
dc89ad |
Date: Thu, 2 Apr 2015 11:27:59 +0200
|
|
|
dc89ad |
Subject: [PATCH] qemu: blockjob: Synchronously update backing chain in XML on
|
|
|
dc89ad |
ABORT/PIVOT
|
|
|
dc89ad |
|
|
|
dc89ad |
https://bugzilla.redhat.com/show_bug.cgi?id=1208021
|
|
|
dc89ad |
|
|
|
dc89ad |
When the synchronous pivot option is selected, libvirt would not update
|
|
|
dc89ad |
the backing chain until the job was exitted. Some applications then
|
|
|
dc89ad |
received invalid data as their job serialized first.
|
|
|
dc89ad |
|
|
|
dc89ad |
This patch removes polling to wait for the ABORT/PIVOT job completion
|
|
|
dc89ad |
and replaces it with a condition. If a synchronous operation is
|
|
|
dc89ad |
requested the update of the XML is executed in the job of the caller of
|
|
|
dc89ad |
the synchronous request. Otherwise the monitor event callback uses a
|
|
|
dc89ad |
separate worker to update the backing chain with a new job.
|
|
|
dc89ad |
|
|
|
dc89ad |
This is a regression since 1a92c719101e5bfa6fe2b78006ad04c7f075ea28
|
|
|
dc89ad |
(downstream commit 12fdae1ebb74296a4db3b191f16dfda757024b8f)
|
|
|
dc89ad |
|
|
|
dc89ad |
When the ABORT job is finished synchronously you get the following call
|
|
|
dc89ad |
stack:
|
|
|
dc89ad |
#0 qemuBlockJobEventProcess
|
|
|
dc89ad |
#1 qemuDomainBlockJobImpl
|
|
|
dc89ad |
#2 qemuDomainBlockJobAbort
|
|
|
dc89ad |
#3 virDomainBlockJobAbort
|
|
|
dc89ad |
|
|
|
dc89ad |
While previously or while using the _ASYNC flag you'd get:
|
|
|
dc89ad |
#0 qemuBlockJobEventProcess
|
|
|
dc89ad |
#1 processBlockJobEvent
|
|
|
dc89ad |
#2 qemuProcessEventHandler
|
|
|
dc89ad |
#3 virThreadPoolWorker
|
|
|
dc89ad |
|
|
|
dc89ad |
(cherry picked from commit 630ee5ac6cf4e3be3f3e986897a289865dd2604b)
|
|
|
dc89ad |
|
|
|
dc89ad |
Conflicts:
|
|
|
dc89ad |
src/qemu/qemu_driver.c - context: The deleted hunk that was
|
|
|
dc89ad |
polling for the block job state was not yet converted to the new
|
|
|
dc89ad |
locking scheme downstream.
|
|
|
dc89ad |
|
|
|
dc89ad |
Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
|
|
|
dc89ad |
---
|
|
|
dc89ad |
src/conf/domain_conf.c | 16 +++++++++++++++-
|
|
|
dc89ad |
src/conf/domain_conf.h | 6 ++++++
|
|
|
dc89ad |
src/qemu/qemu_driver.c | 45 +++++++++++++++++++--------------------------
|
|
|
dc89ad |
src/qemu/qemu_process.c | 38 +++++++++++++++++++++++++-------------
|
|
|
dc89ad |
4 files changed, 65 insertions(+), 40 deletions(-)
|
|
|
dc89ad |
|
|
|
dc89ad |
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
|
|
|
dc89ad |
index 9bfffd0..c7059d2 100644
|
|
|
dc89ad |
--- a/src/conf/domain_conf.c
|
|
|
dc89ad |
+++ b/src/conf/domain_conf.c
|
|
|
dc89ad |
@@ -1238,9 +1238,22 @@ virDomainDiskDefNew(void)
|
|
|
dc89ad |
|
|
|
dc89ad |
if (VIR_ALLOC(ret) < 0)
|
|
|
dc89ad |
return NULL;
|
|
|
dc89ad |
+
|
|
|
dc89ad |
if (VIR_ALLOC(ret->src) < 0)
|
|
|
dc89ad |
- VIR_FREE(ret);
|
|
|
dc89ad |
+ goto error;
|
|
|
dc89ad |
+
|
|
|
dc89ad |
+ if (virCondInit(&ret->blockJobSyncCond) < 0) {
|
|
|
dc89ad |
+ virReportSystemError(errno, "%s", _("Failed to initialize condition"));
|
|
|
dc89ad |
+ goto error;
|
|
|
dc89ad |
+ }
|
|
|
dc89ad |
+
|
|
|
dc89ad |
return ret;
|
|
|
dc89ad |
+
|
|
|
dc89ad |
+ error:
|
|
|
dc89ad |
+ virStorageSourceFree(ret->src);
|
|
|
dc89ad |
+ VIR_FREE(ret);
|
|
|
dc89ad |
+
|
|
|
dc89ad |
+ return NULL;
|
|
|
dc89ad |
}
|
|
|
dc89ad |
|
|
|
dc89ad |
|
|
|
dc89ad |
@@ -1258,6 +1271,7 @@ virDomainDiskDefFree(virDomainDiskDefPtr def)
|
|
|
dc89ad |
VIR_FREE(def->vendor);
|
|
|
dc89ad |
VIR_FREE(def->product);
|
|
|
dc89ad |
virDomainDeviceInfoClear(&def->info);
|
|
|
dc89ad |
+ virCondDestroy(&def->blockJobSyncCond);
|
|
|
dc89ad |
|
|
|
dc89ad |
VIR_FREE(def);
|
|
|
dc89ad |
}
|
|
|
dc89ad |
diff --git a/src/conf/domain_conf.h b/src/conf/domain_conf.h
|
|
|
dc89ad |
index 654c27d..d672294 100644
|
|
|
dc89ad |
--- a/src/conf/domain_conf.h
|
|
|
dc89ad |
+++ b/src/conf/domain_conf.h
|
|
|
dc89ad |
@@ -644,6 +644,12 @@ struct _virDomainDiskDef {
|
|
|
dc89ad |
int mirrorState; /* enum virDomainDiskMirrorState */
|
|
|
dc89ad |
int mirrorJob; /* virDomainBlockJobType */
|
|
|
dc89ad |
|
|
|
dc89ad |
+ /* for some synchronous block jobs, we need to notify the owner */
|
|
|
dc89ad |
+ virCond blockJobSyncCond;
|
|
|
dc89ad |
+ int blockJobType; /* type of the block job from the event */
|
|
|
dc89ad |
+ int blockJobStatus; /* status of the finished block job */
|
|
|
dc89ad |
+ bool blockJobSync; /* the block job needs synchronized termination */
|
|
|
dc89ad |
+
|
|
|
dc89ad |
struct {
|
|
|
dc89ad |
unsigned int cylinders;
|
|
|
dc89ad |
unsigned int heads;
|
|
|
dc89ad |
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
|
|
|
dc89ad |
index 490650e..5f7fedc 100644
|
|
|
dc89ad |
--- a/src/qemu/qemu_driver.c
|
|
|
dc89ad |
+++ b/src/qemu/qemu_driver.c
|
|
|
dc89ad |
@@ -15693,6 +15693,12 @@ qemuDomainBlockJobImpl(virDomainObjPtr vm,
|
|
|
dc89ad |
goto endjob;
|
|
|
dc89ad |
|
|
|
dc89ad |
if (mode == BLOCK_JOB_ABORT) {
|
|
|
dc89ad |
+ if (async && !(flags & VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC)) {
|
|
|
dc89ad |
+ /* prepare state for event delivery */
|
|
|
dc89ad |
+ disk->blockJobStatus = -1;
|
|
|
dc89ad |
+ disk->blockJobSync = true;
|
|
|
dc89ad |
+ }
|
|
|
dc89ad |
+
|
|
|
dc89ad |
if ((flags & VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT) &&
|
|
|
dc89ad |
!(async && disk->mirror)) {
|
|
|
dc89ad |
virReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
dc89ad |
@@ -15802,36 +15808,23 @@ qemuDomainBlockJobImpl(virDomainObjPtr vm,
|
|
|
dc89ad |
status);
|
|
|
dc89ad |
event2 = virDomainEventBlockJob2NewFromObj(vm, disk->dst, type,
|
|
|
dc89ad |
status);
|
|
|
dc89ad |
- } else if (!(flags & VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC)) {
|
|
|
dc89ad |
+ } else if (disk->blockJobSync) {
|
|
|
dc89ad |
/* XXX If the event reports failure, we should reflect
|
|
|
dc89ad |
* that back into the return status of this API call. */
|
|
|
dc89ad |
- while (1) {
|
|
|
dc89ad |
- /* Poll every 50ms */
|
|
|
dc89ad |
- static struct timespec ts = { .tv_sec = 0,
|
|
|
dc89ad |
- .tv_nsec = 50 * 1000 * 1000ull };
|
|
|
dc89ad |
- virDomainBlockJobInfo dummy;
|
|
|
dc89ad |
-
|
|
|
dc89ad |
- qemuDomainObjEnterMonitor(driver, vm);
|
|
|
dc89ad |
- ret = qemuMonitorBlockJob(priv->mon, device, NULL, NULL, 0,
|
|
|
dc89ad |
- &dummy, BLOCK_JOB_INFO, async);
|
|
|
dc89ad |
- qemuDomainObjExitMonitor(driver, vm);
|
|
|
dc89ad |
-
|
|
|
dc89ad |
- if (ret <= 0)
|
|
|
dc89ad |
- break;
|
|
|
dc89ad |
-
|
|
|
dc89ad |
- virObjectUnlock(vm);
|
|
|
dc89ad |
-
|
|
|
dc89ad |
- nanosleep(&ts, NULL);
|
|
|
dc89ad |
-
|
|
|
dc89ad |
- virObjectLock(vm);
|
|
|
dc89ad |
-
|
|
|
dc89ad |
- if (!virDomainObjIsActive(vm)) {
|
|
|
dc89ad |
- virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
dc89ad |
- _("domain is not running"));
|
|
|
dc89ad |
- ret = -1;
|
|
|
dc89ad |
- break;
|
|
|
dc89ad |
+ while (disk->blockJobStatus == -1 && disk->blockJobSync) {
|
|
|
dc89ad |
+ if (virCondWait(&disk->blockJobSyncCond, &vm->parent.lock) < 0) {
|
|
|
dc89ad |
+ virReportSystemError(errno, "%s",
|
|
|
dc89ad |
+ _("Unable to wait on block job sync "
|
|
|
dc89ad |
+ "condition"));
|
|
|
dc89ad |
+ disk->blockJobSync = false;
|
|
|
dc89ad |
+ goto endjob;
|
|
|
dc89ad |
}
|
|
|
dc89ad |
}
|
|
|
dc89ad |
+
|
|
|
dc89ad |
+ qemuBlockJobEventProcess(driver, vm, disk,
|
|
|
dc89ad |
+ disk->blockJobType,
|
|
|
dc89ad |
+ disk->blockJobStatus);
|
|
|
dc89ad |
+ disk->blockJobSync = false;
|
|
|
dc89ad |
}
|
|
|
dc89ad |
}
|
|
|
dc89ad |
|
|
|
dc89ad |
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
|
|
|
dc89ad |
index 83a59a1..49b5df4 100644
|
|
|
dc89ad |
--- a/src/qemu/qemu_process.c
|
|
|
dc89ad |
+++ b/src/qemu/qemu_process.c
|
|
|
dc89ad |
@@ -1025,28 +1025,40 @@ qemuProcessHandleBlockJob(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
dc89ad |
{
|
|
|
dc89ad |
virQEMUDriverPtr driver = opaque;
|
|
|
dc89ad |
struct qemuProcessEvent *processEvent = NULL;
|
|
|
dc89ad |
- char *data;
|
|
|
dc89ad |
+ virDomainDiskDefPtr disk;
|
|
|
dc89ad |
+ char *data = NULL;
|
|
|
dc89ad |
|
|
|
dc89ad |
virObjectLock(vm);
|
|
|
dc89ad |
|
|
|
dc89ad |
VIR_DEBUG("Block job for device %s (domain: %p,%s) type %d status %d",
|
|
|
dc89ad |
diskAlias, vm, vm->def->name, type, status);
|
|
|
dc89ad |
|
|
|
dc89ad |
- if (VIR_ALLOC(processEvent) < 0)
|
|
|
dc89ad |
+ if (!(disk = qemuProcessFindDomainDiskByAlias(vm, diskAlias)))
|
|
|
dc89ad |
goto error;
|
|
|
dc89ad |
|
|
|
dc89ad |
- processEvent->eventType = QEMU_PROCESS_EVENT_BLOCK_JOB;
|
|
|
dc89ad |
- if (VIR_STRDUP(data, diskAlias) < 0)
|
|
|
dc89ad |
- goto error;
|
|
|
dc89ad |
- processEvent->data = data;
|
|
|
dc89ad |
- processEvent->vm = vm;
|
|
|
dc89ad |
- processEvent->action = type;
|
|
|
dc89ad |
- processEvent->status = status;
|
|
|
dc89ad |
+ if (disk->blockJobSync) {
|
|
|
dc89ad |
+ disk->blockJobType = type;
|
|
|
dc89ad |
+ disk->blockJobStatus = status;
|
|
|
dc89ad |
+ /* We have an SYNC API waiting for this event, dispatch it back */
|
|
|
dc89ad |
+ virCondSignal(&disk->blockJobSyncCond);
|
|
|
dc89ad |
+ } else {
|
|
|
dc89ad |
+ /* there is no waiting SYNC API, dispatch the update to a thread */
|
|
|
dc89ad |
+ if (VIR_ALLOC(processEvent) < 0)
|
|
|
dc89ad |
+ goto error;
|
|
|
dc89ad |
|
|
|
dc89ad |
- virObjectRef(vm);
|
|
|
dc89ad |
- if (virThreadPoolSendJob(driver->workerPool, 0, processEvent) < 0) {
|
|
|
dc89ad |
- ignore_value(virObjectUnref(vm));
|
|
|
dc89ad |
- goto error;
|
|
|
dc89ad |
+ processEvent->eventType = QEMU_PROCESS_EVENT_BLOCK_JOB;
|
|
|
dc89ad |
+ if (VIR_STRDUP(data, diskAlias) < 0)
|
|
|
dc89ad |
+ goto error;
|
|
|
dc89ad |
+ processEvent->data = data;
|
|
|
dc89ad |
+ processEvent->vm = vm;
|
|
|
dc89ad |
+ processEvent->action = type;
|
|
|
dc89ad |
+ processEvent->status = status;
|
|
|
dc89ad |
+
|
|
|
dc89ad |
+ virObjectRef(vm);
|
|
|
dc89ad |
+ if (virThreadPoolSendJob(driver->workerPool, 0, processEvent) < 0) {
|
|
|
dc89ad |
+ ignore_value(virObjectUnref(vm));
|
|
|
dc89ad |
+ goto error;
|
|
|
dc89ad |
+ }
|
|
|
dc89ad |
}
|
|
|
dc89ad |
|
|
|
dc89ad |
cleanup:
|
|
|
dc89ad |
--
|
|
|
dc89ad |
2.3.5
|
|
|
dc89ad |
|