diff --git a/SOURCES/libvirt-qemu-Extract-internals-of-processBlockJobEvent-into-a-helper.patch b/SOURCES/libvirt-qemu-Extract-internals-of-processBlockJobEvent-into-a-helper.patch
new file mode 100644
index 0000000..9d7e5df
--- /dev/null
+++ b/SOURCES/libvirt-qemu-Extract-internals-of-processBlockJobEvent-into-a-helper.patch
@@ -0,0 +1,260 @@
+From 9eace35bd8fd465a36132f3b66b662fff0cb92e5 Mon Sep 17 00:00:00 2001
+Message-Id: <9eace35bd8fd465a36132f3b66b662fff0cb92e5@dist-git>
+From: Peter Krempa <pkrempa@redhat.com>
+Date: Thu, 2 Apr 2015 11:27:58 +0200
+Subject: [PATCH] qemu: Extract internals of processBlockJobEvent into a helper
+
+https://bugzilla.redhat.com/show_bug.cgi?id=1208021
+
+Later on I'll be adding a condition that will allow to synchronise a
+SYNC block job abort. The approach will require this code to be called
+from two different places so it has to be extracted into a helper.
+
+(cherry picked from commit 0c4474df4e10d27e27dbcda80b1f9cc14f4bdd8a)
+
+Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
+---
+ src/qemu/qemu_driver.c | 198 +++++++++++++++++++++++++------------------------
+ 1 file changed, 103 insertions(+), 95 deletions(-)
+
+diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
+index 0939223..490650e 100644
+--- a/src/qemu/qemu_driver.c
++++ b/src/qemu/qemu_driver.c
+@@ -4402,116 +4402,101 @@ processSerialChangedEvent(virQEMUDriverPtr driver,
+ 
+ 
+ static void
+-processBlockJobEvent(virQEMUDriverPtr driver,
+-                     virDomainObjPtr vm,
+-                     char *diskAlias,
+-                     int type,
+-                     int status)
++qemuBlockJobEventProcess(virQEMUDriverPtr driver,
++                         virDomainObjPtr vm,
++                         virDomainDiskDefPtr disk,
++                         int type,
++                         int status)
+ {
+     virObjectEventPtr event = NULL;
+     virObjectEventPtr event2 = NULL;
+     const char *path;
+-    virDomainDiskDefPtr disk;
+     virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
+     virDomainDiskDefPtr persistDisk = NULL;
+     bool save = false;
+ 
+-    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+-        goto cleanup;
++    /* Have to generate two variants of the event for old vs. new
++     * client callbacks */
++    if (type == VIR_DOMAIN_BLOCK_JOB_TYPE_COMMIT &&
++        disk->mirrorJob == VIR_DOMAIN_BLOCK_JOB_TYPE_ACTIVE_COMMIT)
++        type = disk->mirrorJob;
++    path = virDomainDiskGetSource(disk);
++    event = virDomainEventBlockJobNewFromObj(vm, path, type, status);
++    event2 = virDomainEventBlockJob2NewFromObj(vm, disk->dst, type, status);
+ 
+-    if (!virDomainObjIsActive(vm)) {
+-        VIR_DEBUG("Domain is not running");
+-        goto endjob;
+-    }
++    /* If we completed a block pull or commit, then update the XML
++     * to match.  */
++    switch ((virConnectDomainEventBlockJobStatus) status) {
++    case VIR_DOMAIN_BLOCK_JOB_COMPLETED:
++        if (disk->mirrorState == VIR_DOMAIN_DISK_MIRROR_STATE_PIVOT) {
++            if (vm->newDef) {
++                int indx = virDomainDiskIndexByName(vm->newDef, disk->dst, false);
++                virStorageSourcePtr copy = NULL;
+ 
+-    disk = qemuProcessFindDomainDiskByAlias(vm, diskAlias);
+-
+-    if (disk) {
+-        /* Have to generate two variants of the event for old vs. new
+-         * client callbacks */
+-        if (type == VIR_DOMAIN_BLOCK_JOB_TYPE_COMMIT &&
+-            disk->mirrorJob == VIR_DOMAIN_BLOCK_JOB_TYPE_ACTIVE_COMMIT)
+-            type = disk->mirrorJob;
+-        path = virDomainDiskGetSource(disk);
+-        event = virDomainEventBlockJobNewFromObj(vm, path, type, status);
+-        event2 = virDomainEventBlockJob2NewFromObj(vm, disk->dst, type,
+-                                                   status);
+-
+-        /* If we completed a block pull or commit, then update the XML
+-         * to match.  */
+-        switch ((virConnectDomainEventBlockJobStatus) status) {
+-        case VIR_DOMAIN_BLOCK_JOB_COMPLETED:
+-            if (disk->mirrorState == VIR_DOMAIN_DISK_MIRROR_STATE_PIVOT) {
+-                if (vm->newDef) {
+-                    int indx = virDomainDiskIndexByName(vm->newDef, disk->dst,
+-                                                        false);
+-                    virStorageSourcePtr copy = NULL;
+-
+-                    if (indx >= 0) {
+-                        persistDisk = vm->newDef->disks[indx];
+-                        copy = virStorageSourceCopy(disk->mirror, false);
+-                        if (virStorageSourceInitChainElement(copy,
+-                                                             persistDisk->src,
+-                                                             true) < 0) {
+-                            VIR_WARN("Unable to update persistent definition "
+-                                     "on vm %s after block job",
+-                                     vm->def->name);
+-                            virStorageSourceFree(copy);
+-                            copy = NULL;
+-                            persistDisk = NULL;
+-                        }
+-                    }
+-                    if (copy) {
+-                        virStorageSourceFree(persistDisk->src);
+-                        persistDisk->src = copy;
++                if (indx >= 0) {
++                    persistDisk = vm->newDef->disks[indx];
++                    copy = virStorageSourceCopy(disk->mirror, false);
++                    if (virStorageSourceInitChainElement(copy,
++                                                         persistDisk->src,
++                                                         true) < 0) {
++                        VIR_WARN("Unable to update persistent definition "
++                                 "on vm %s after block job",
++                                 vm->def->name);
++                        virStorageSourceFree(copy);
++                        copy = NULL;
++                        persistDisk = NULL;
+                     }
+                 }
+-
+-                /* XXX We want to revoke security labels and disk
+-                 * lease, as well as audit that revocation, before
+-                 * dropping the original source.  But it gets tricky
+-                 * if both source and mirror share common backing
+-                 * files (we want to only revoke the non-shared
+-                 * portion of the chain); so for now, we leak the
+-                 * access to the original.  */
+-                virStorageSourceFree(disk->src);
+-                disk->src = disk->mirror;
+-            } else {
+-                virStorageSourceFree(disk->mirror);
++                if (copy) {
++                    virStorageSourceFree(persistDisk->src);
++                    persistDisk->src = copy;
++                }
+             }
+ 
+-            /* Recompute the cached backing chain to match our
+-             * updates.  Better would be storing the chain ourselves
+-             * rather than reprobing, but we haven't quite completed
+-             * that conversion to use our XML tracking. */
+-            disk->mirror = NULL;
+-            save = disk->mirrorState != VIR_DOMAIN_DISK_MIRROR_STATE_NONE;
+-            disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_NONE;
+-            disk->mirrorJob = VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN;
+-            ignore_value(qemuDomainDetermineDiskChain(driver, vm, disk,
+-                                                      true, true));
+-            disk->blockjob = false;
+-            break;
+-
+-        case VIR_DOMAIN_BLOCK_JOB_READY:
+-            disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_READY;
+-            save = true;
+-            break;
+-
+-        case VIR_DOMAIN_BLOCK_JOB_FAILED:
+-        case VIR_DOMAIN_BLOCK_JOB_CANCELED:
++            /* XXX We want to revoke security labels and disk
++             * lease, as well as audit that revocation, before
++             * dropping the original source.  But it gets tricky
++             * if both source and mirror share common backing
++             * files (we want to only revoke the non-shared
++             * portion of the chain); so for now, we leak the
++             * access to the original.  */
++            virStorageSourceFree(disk->src);
++            disk->src = disk->mirror;
++        } else {
+             virStorageSourceFree(disk->mirror);
+-            disk->mirror = NULL;
+-            disk->mirrorState = status == VIR_DOMAIN_BLOCK_JOB_FAILED ?
+-                VIR_DOMAIN_DISK_MIRROR_STATE_ABORT : VIR_DOMAIN_DISK_MIRROR_STATE_NONE;
+-            disk->mirrorJob = VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN;
+-            save = true;
+-            disk->blockjob = false;
+-            break;
+-
+-        case VIR_DOMAIN_BLOCK_JOB_LAST:
+-            break;
+         }
++
++        /* Recompute the cached backing chain to match our
++         * updates.  Better would be storing the chain ourselves
++         * rather than reprobing, but we haven't quite completed
++         * that conversion to use our XML tracking. */
++        disk->mirror = NULL;
++        save = disk->mirrorState != VIR_DOMAIN_DISK_MIRROR_STATE_NONE;
++        disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_NONE;
++        disk->mirrorJob = VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN;
++        ignore_value(qemuDomainDetermineDiskChain(driver, vm, disk,
++                                                  true, true));
++        disk->blockjob = false;
++        break;
++
++    case VIR_DOMAIN_BLOCK_JOB_READY:
++        disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_READY;
++        save = true;
++        break;
++
++    case VIR_DOMAIN_BLOCK_JOB_FAILED:
++    case VIR_DOMAIN_BLOCK_JOB_CANCELED:
++        virStorageSourceFree(disk->mirror);
++        disk->mirror = NULL;
++        disk->mirrorState = status == VIR_DOMAIN_BLOCK_JOB_FAILED ?
++            VIR_DOMAIN_DISK_MIRROR_STATE_ABORT : VIR_DOMAIN_DISK_MIRROR_STATE_NONE;
++        disk->mirrorJob = VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN;
++        save = true;
++        disk->blockjob = false;
++        break;
++
++    case VIR_DOMAIN_BLOCK_JOB_LAST:
++        break;
+     }
+ 
+     if (save) {
+@@ -4523,13 +4508,36 @@ processBlockJobEvent(virQEMUDriverPtr driver,
+             VIR_WARN("Unable to update persistent definition on vm %s "
+                      "after block job", vm->def->name);
+     }
+-    virObjectUnref(cfg);
+ 
+     if (event)
+         qemuDomainEventQueue(driver, event);
+     if (event2)
+         qemuDomainEventQueue(driver, event2);
+ 
++    virObjectUnref(cfg);
++}
++
++
++static void
++processBlockJobEvent(virQEMUDriverPtr driver,
++                     virDomainObjPtr vm,
++                     char *diskAlias,
++                     int type,
++                     int status)
++{
++    virDomainDiskDefPtr disk;
++
++    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
++        goto cleanup;
++
++    if (!virDomainObjIsActive(vm)) {
++        VIR_DEBUG("Domain is not running");
++        goto endjob;
++    }
++
++    if ((disk = qemuProcessFindDomainDiskByAlias(vm, diskAlias)))
++        qemuBlockJobEventProcess(driver, vm, disk, type, status);
++
+  endjob:
+     ignore_value(qemuDomainObjEndJob(driver, vm));
+  cleanup:
+-- 
+2.3.5
+
diff --git a/SOURCES/libvirt-qemu-blockjob-Synchronously-update-backing-chain-in-XML-on-ABORT-PIVOT.patch b/SOURCES/libvirt-qemu-blockjob-Synchronously-update-backing-chain-in-XML-on-ABORT-PIVOT.patch
new file mode 100644
index 0000000..d7b4f55
--- /dev/null
+++ b/SOURCES/libvirt-qemu-blockjob-Synchronously-update-backing-chain-in-XML-on-ABORT-PIVOT.patch
@@ -0,0 +1,231 @@
+From a0cfb6daa757761c0397dc0ceb211b64891a1983 Mon Sep 17 00:00:00 2001
+Message-Id: <a0cfb6daa757761c0397dc0ceb211b64891a1983@dist-git>
+From: Peter Krempa <pkrempa@redhat.com>
+Date: Thu, 2 Apr 2015 11:27:59 +0200
+Subject: [PATCH] qemu: blockjob: Synchronously update backing chain in XML on
+ ABORT/PIVOT
+
+https://bugzilla.redhat.com/show_bug.cgi?id=1208021
+
+When the synchronous pivot option is selected, libvirt would not update
+the backing chain until the job was exitted. Some applications then
+received invalid data as their job serialized first.
+
+This patch removes polling to wait for the ABORT/PIVOT job completion
+and replaces it with a condition. If a synchronous operation is
+requested the update of the XML is executed in the job of the caller of
+the synchronous request. Otherwise the monitor event callback uses a
+separate worker to update the backing chain with a new job.
+
+This is a regression since 1a92c719101e5bfa6fe2b78006ad04c7f075ea28
+(downstream commit 12fdae1ebb74296a4db3b191f16dfda757024b8f)
+
+When the ABORT job is finished synchronously you get the following call
+stack:
+ #0  qemuBlockJobEventProcess
+ #1  qemuDomainBlockJobImpl
+ #2  qemuDomainBlockJobAbort
+ #3  virDomainBlockJobAbort
+
+While previously or while using the _ASYNC flag you'd get:
+ #0  qemuBlockJobEventProcess
+ #1  processBlockJobEvent
+ #2  qemuProcessEventHandler
+ #3  virThreadPoolWorker
+
+(cherry picked from commit 630ee5ac6cf4e3be3f3e986897a289865dd2604b)
+
+Conflicts:
+	src/qemu/qemu_driver.c - context: The deleted hunk that was
+        polling for the block job state was not yet converted to the new
+        locking scheme downstream.
+
+Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
+---
+ src/conf/domain_conf.c  | 16 +++++++++++++++-
+ src/conf/domain_conf.h  |  6 ++++++
+ src/qemu/qemu_driver.c  | 45 +++++++++++++++++++--------------------------
+ src/qemu/qemu_process.c | 38 +++++++++++++++++++++++++-------------
+ 4 files changed, 65 insertions(+), 40 deletions(-)
+
+diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
+index 9bfffd0..c7059d2 100644
+--- a/src/conf/domain_conf.c
++++ b/src/conf/domain_conf.c
+@@ -1238,9 +1238,22 @@ virDomainDiskDefNew(void)
+ 
+     if (VIR_ALLOC(ret) < 0)
+         return NULL;
++
+     if (VIR_ALLOC(ret->src) < 0)
+-        VIR_FREE(ret);
++        goto error;
++
++    if (virCondInit(&ret->blockJobSyncCond) < 0) {
++        virReportSystemError(errno, "%s", _("Failed to initialize condition"));
++        goto error;
++    }
++
+     return ret;
++
++ error:
++    virStorageSourceFree(ret->src);
++    VIR_FREE(ret);
++
++    return NULL;
+ }
+ 
+ 
+@@ -1258,6 +1271,7 @@ virDomainDiskDefFree(virDomainDiskDefPtr def)
+     VIR_FREE(def->vendor);
+     VIR_FREE(def->product);
+     virDomainDeviceInfoClear(&def->info);
++    virCondDestroy(&def->blockJobSyncCond);
+ 
+     VIR_FREE(def);
+ }
+diff --git a/src/conf/domain_conf.h b/src/conf/domain_conf.h
+index 654c27d..d672294 100644
+--- a/src/conf/domain_conf.h
++++ b/src/conf/domain_conf.h
+@@ -644,6 +644,12 @@ struct _virDomainDiskDef {
+     int mirrorState; /* enum virDomainDiskMirrorState */
+     int mirrorJob; /* virDomainBlockJobType */
+ 
++    /* for some synchronous block jobs, we need to notify the owner */
++    virCond blockJobSyncCond;
++    int blockJobType;   /* type of the block job from the event */
++    int blockJobStatus; /* status of the finished block job */
++    bool blockJobSync; /* the block job needs synchronized termination */
++
+     struct {
+         unsigned int cylinders;
+         unsigned int heads;
+diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
+index 490650e..5f7fedc 100644
+--- a/src/qemu/qemu_driver.c
++++ b/src/qemu/qemu_driver.c
+@@ -15693,6 +15693,12 @@ qemuDomainBlockJobImpl(virDomainObjPtr vm,
+         goto endjob;
+ 
+     if (mode == BLOCK_JOB_ABORT) {
++        if (async && !(flags & VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC)) {
++            /* prepare state for event delivery */
++            disk->blockJobStatus = -1;
++            disk->blockJobSync = true;
++        }
++
+         if ((flags & VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT) &&
+             !(async && disk->mirror)) {
+             virReportError(VIR_ERR_OPERATION_INVALID,
+@@ -15802,36 +15808,23 @@ qemuDomainBlockJobImpl(virDomainObjPtr vm,
+                                                      status);
+             event2 = virDomainEventBlockJob2NewFromObj(vm, disk->dst, type,
+                                                        status);
+-        } else if (!(flags & VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC)) {
++        } else if (disk->blockJobSync) {
+             /* XXX If the event reports failure, we should reflect
+              * that back into the return status of this API call.  */
+-            while (1) {
+-                /* Poll every 50ms */
+-                static struct timespec ts = { .tv_sec = 0,
+-                                              .tv_nsec = 50 * 1000 * 1000ull };
+-                virDomainBlockJobInfo dummy;
+-
+-                qemuDomainObjEnterMonitor(driver, vm);
+-                ret = qemuMonitorBlockJob(priv->mon, device, NULL, NULL, 0,
+-                                          &dummy, BLOCK_JOB_INFO, async);
+-                qemuDomainObjExitMonitor(driver, vm);
+-
+-                if (ret <= 0)
+-                    break;
+-
+-                virObjectUnlock(vm);
+-
+-                nanosleep(&ts, NULL);
+-
+-                virObjectLock(vm);
+-
+-                if (!virDomainObjIsActive(vm)) {
+-                    virReportError(VIR_ERR_OPERATION_INVALID, "%s",
+-                                   _("domain is not running"));
+-                    ret = -1;
+-                    break;
++            while (disk->blockJobStatus == -1 && disk->blockJobSync) {
++                if (virCondWait(&disk->blockJobSyncCond, &vm->parent.lock) < 0) {
++                    virReportSystemError(errno, "%s",
++                                         _("Unable to wait on block job sync "
++                                           "condition"));
++                    disk->blockJobSync = false;
++                    goto endjob;
+                 }
+             }
++
++            qemuBlockJobEventProcess(driver, vm, disk,
++                                     disk->blockJobType,
++                                     disk->blockJobStatus);
++            disk->blockJobSync = false;
+         }
+     }
+ 
+diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
+index 83a59a1..49b5df4 100644
+--- a/src/qemu/qemu_process.c
++++ b/src/qemu/qemu_process.c
+@@ -1025,28 +1025,40 @@ qemuProcessHandleBlockJob(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
+ {
+     virQEMUDriverPtr driver = opaque;
+     struct qemuProcessEvent *processEvent = NULL;
+-    char *data;
++    virDomainDiskDefPtr disk;
++    char *data = NULL;
+ 
+     virObjectLock(vm);
+ 
+     VIR_DEBUG("Block job for device %s (domain: %p,%s) type %d status %d",
+               diskAlias, vm, vm->def->name, type, status);
+ 
+-    if (VIR_ALLOC(processEvent) < 0)
++    if (!(disk = qemuProcessFindDomainDiskByAlias(vm, diskAlias)))
+         goto error;
+ 
+-    processEvent->eventType = QEMU_PROCESS_EVENT_BLOCK_JOB;
+-    if (VIR_STRDUP(data, diskAlias) < 0)
+-        goto error;
+-    processEvent->data = data;
+-    processEvent->vm = vm;
+-    processEvent->action = type;
+-    processEvent->status = status;
++    if (disk->blockJobSync) {
++        disk->blockJobType = type;
++        disk->blockJobStatus = status;
++        /* We have an SYNC API waiting for this event, dispatch it back */
++        virCondSignal(&disk->blockJobSyncCond);
++    } else {
++        /* there is no waiting SYNC API, dispatch the update to a thread */
++        if (VIR_ALLOC(processEvent) < 0)
++            goto error;
+ 
+-    virObjectRef(vm);
+-    if (virThreadPoolSendJob(driver->workerPool, 0, processEvent) < 0) {
+-        ignore_value(virObjectUnref(vm));
+-        goto error;
++        processEvent->eventType = QEMU_PROCESS_EVENT_BLOCK_JOB;
++        if (VIR_STRDUP(data, diskAlias) < 0)
++            goto error;
++        processEvent->data = data;
++        processEvent->vm = vm;
++        processEvent->action = type;
++        processEvent->status = status;
++
++        virObjectRef(vm);
++        if (virThreadPoolSendJob(driver->workerPool, 0, processEvent) < 0) {
++            ignore_value(virObjectUnref(vm));
++            goto error;
++        }
+     }
+ 
+  cleanup:
+-- 
+2.3.5
+
diff --git a/SOURCES/libvirt-qemu-processBlockJob-Don-t-unlock-vm-twice.patch b/SOURCES/libvirt-qemu-processBlockJob-Don-t-unlock-vm-twice.patch
new file mode 100644
index 0000000..5d52d87
--- /dev/null
+++ b/SOURCES/libvirt-qemu-processBlockJob-Don-t-unlock-vm-twice.patch
@@ -0,0 +1,35 @@
+From 6d0a3a1730efdc0b0c179e5bee64749ef5eed967 Mon Sep 17 00:00:00 2001
+Message-Id: <6d0a3a1730efdc0b0c179e5bee64749ef5eed967@dist-git>
+From: Peter Krempa <pkrempa@redhat.com>
+Date: Thu, 2 Apr 2015 11:27:57 +0200
+Subject: [PATCH] qemu: processBlockJob: Don't unlock @vm twice
+
+https://bugzilla.redhat.com/show_bug.cgi?id=1208021
+
+Commit 1a92c719 (known as 12fdae1ebb74 downstream) moved code to handle
+block job events to a different function that is executed in a separate
+thread. The caller of processBlockJob handles locking and unlocking of
+@vm, so the we should not do it in the function itself.
+
+(cherry picked from commit 6b6c4ab8a6d2096bd5f50d2ae9b0a929fbaaf076)
+
+Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
+---
+ src/qemu/qemu_driver.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
+index 4293817..0939223 100644
+--- a/src/qemu/qemu_driver.c
++++ b/src/qemu/qemu_driver.c
+@@ -4523,7 +4523,6 @@ processBlockJobEvent(virQEMUDriverPtr driver,
+             VIR_WARN("Unable to update persistent definition on vm %s "
+                      "after block job", vm->def->name);
+     }
+-    virObjectUnlock(vm);
+     virObjectUnref(cfg);
+ 
+     if (event)
+-- 
+2.3.5
+
diff --git a/SPECS/libvirt.spec b/SPECS/libvirt.spec
index 1a70713..8977943 100644
--- a/SPECS/libvirt.spec
+++ b/SPECS/libvirt.spec
@@ -367,7 +367,7 @@
 Summary: Library providing a simple virtualization API
 Name: libvirt
 Version: 1.2.8
-Release: 16%{?dist}.2%{?extra_release}
+Release: 16%{?dist}.3%{?extra_release}
 License: LGPLv2+
 Group: Development/Libraries
 BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
@@ -726,6 +726,9 @@ Patch345: libvirt-qemu-event-Don-t-fiddle-with-disk-backing-trees-without-a-job.
 Patch346: libvirt-qemu-Disallow-concurrent-block-jobs-on-a-single-disk.patch
 Patch347: libvirt-qemu-block-commit-Mark-disk-in-block-jobs-only-on-successful-command.patch
 Patch348: libvirt-qemu-read-backing-chain-names-from-qemu.patch
+Patch349: libvirt-qemu-processBlockJob-Don-t-unlock-vm-twice.patch
+Patch350: libvirt-qemu-Extract-internals-of-processBlockJobEvent-into-a-helper.patch
+Patch351: libvirt-qemu-blockjob-Synchronously-update-backing-chain-in-XML-on-ABORT-PIVOT.patch
 
 
 %if %{with_libvirtd}
@@ -2594,6 +2597,11 @@ exit 0
 %doc examples/systemtap
 
 %changelog
+* Thu Apr  2 2015 Jiri Denemark <jdenemar@redhat.com> - 1.2.8-16.el7_1.3
+- qemu: processBlockJob: Don't unlock @vm twice (rhbz#1208021)
+- qemu: Extract internals of processBlockJobEvent into a helper (rhbz#1208021)
+- qemu: blockjob: Synchronously update backing chain in XML on ABORT/PIVOT (rhbz#1208021)
+
 * Wed Mar 18 2015 Jiri Denemark <jdenemar@redhat.com> - 1.2.8-16.el7_1.2
 - util: storagefile: Don't crash on gluster URIs without path (rhbz#1198720)
 - qemuProcessHandleBlockJob: Set disk->mirrorState more often (rhbz#1202719)