From 46e0a49611099c3c3bc56dd1b6bcb44d11ecad71 Mon Sep 17 00:00:00 2001 Message-Id: <46e0a49611099c3c3bc56dd1b6bcb44d11ecad71.1389183249.git.jdenemar@redhat.com> From: Jiri Denemark Date: Thu, 19 Dec 2013 22:10:04 +0100 Subject: [PATCH] qemu: Do not access stale data in virDomainBlockStats CVE-2013-6458 https://bugzilla.redhat.com/show_bug.cgi?id=1043069 When virDomainDetachDeviceFlags is called concurrently to virDomainBlockStats: libvirtd may crash because qemuDomainBlockStats finds a disk in vm->def before getting a job on a domain and uses the disk pointer after getting the job. However, the domain in unlocked while waiting on a job condition and thus data behind the disk pointer may disappear. This happens when thread 1 runs virDomainDetachDeviceFlags and enters monitor to actually remove the disk. Then another thread starts running virDomainBlockStats, finds the disk in vm->def, and while it's waiting on the job condition (owned by the first thread), the first thread finishes the disk removal. When the second thread gets the job, the memory pointed to be the disk pointer is already gone. That said, every API that is going to begin a job should do that before fetching data from vm->def. (cherry picked from commit db86da5ca2109e4006c286a09b6c75bfe10676ad) Signed-off-by: Jiri Denemark --- src/qemu/qemu_driver.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 8539b40..43c072e 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -9302,34 +9302,29 @@ qemuDomainBlockStats(virDomainPtr dom, if (virDomainBlockStatsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; + if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + goto cleanup; + if (!virDomainObjIsActive(vm)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); - goto cleanup; + goto endjob; } if ((idx = virDomainDiskIndexByName(vm->def, path, false)) < 0) { virReportError(VIR_ERR_INVALID_ARG, _("invalid path: %s"), path); - goto cleanup; + goto endjob; } disk = vm->def->disks[idx]; if (!disk->info.alias) { virReportError(VIR_ERR_INTERNAL_ERROR, _("missing disk device alias name for %s"), disk->dst); - goto cleanup; + goto endjob; } priv = vm->privateData; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) - goto cleanup; - - if (!virDomainObjIsActive(vm)) { - virReportError(VIR_ERR_OPERATION_INVALID, - "%s", _("domain is not running")); - goto endjob; - } qemuDomainObjEnterMonitor(driver, vm); ret = qemuMonitorGetBlockStatsInfo(priv->mon, -- 1.8.5.2