|
|
c401cc |
From 46e0a49611099c3c3bc56dd1b6bcb44d11ecad71 Mon Sep 17 00:00:00 2001
|
|
|
c401cc |
Message-Id: <46e0a49611099c3c3bc56dd1b6bcb44d11ecad71.1389183249.git.jdenemar@redhat.com>
|
|
|
c401cc |
From: Jiri Denemark <jdenemar@redhat.com>
|
|
|
c401cc |
Date: Thu, 19 Dec 2013 22:10:04 +0100
|
|
|
c401cc |
Subject: [PATCH] qemu: Do not access stale data in virDomainBlockStats
|
|
|
c401cc |
|
|
|
c401cc |
CVE-2013-6458
|
|
|
c401cc |
https://bugzilla.redhat.com/show_bug.cgi?id=1043069
|
|
|
c401cc |
|
|
|
c401cc |
When virDomainDetachDeviceFlags is called concurrently to
|
|
|
c401cc |
virDomainBlockStats: libvirtd may crash because qemuDomainBlockStats
|
|
|
c401cc |
finds a disk in vm->def before getting a job on a domain and uses the
|
|
|
c401cc |
disk pointer after getting the job. However, the domain in unlocked
|
|
|
c401cc |
while waiting on a job condition and thus data behind the disk pointer
|
|
|
c401cc |
may disappear. This happens when thread 1 runs
|
|
|
c401cc |
virDomainDetachDeviceFlags and enters monitor to actually remove the
|
|
|
c401cc |
disk. Then another thread starts running virDomainBlockStats, finds the
|
|
|
c401cc |
disk in vm->def, and while it's waiting on the job condition (owned by
|
|
|
c401cc |
the first thread), the first thread finishes the disk removal. When the
|
|
|
c401cc |
second thread gets the job, the memory pointed to be the disk pointer is
|
|
|
c401cc |
already gone.
|
|
|
c401cc |
|
|
|
c401cc |
That said, every API that is going to begin a job should do that before
|
|
|
c401cc |
fetching data from vm->def.
|
|
|
c401cc |
|
|
|
c401cc |
(cherry picked from commit db86da5ca2109e4006c286a09b6c75bfe10676ad)
|
|
|
c401cc |
Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
|
|
|
c401cc |
---
|
|
|
c401cc |
src/qemu/qemu_driver.c | 17 ++++++-----------
|
|
|
c401cc |
1 file changed, 6 insertions(+), 11 deletions(-)
|
|
|
c401cc |
|
|
|
c401cc |
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
|
|
|
c401cc |
index 8539b40..43c072e 100644
|
|
|
c401cc |
--- a/src/qemu/qemu_driver.c
|
|
|
c401cc |
+++ b/src/qemu/qemu_driver.c
|
|
|
c401cc |
@@ -9302,34 +9302,29 @@ qemuDomainBlockStats(virDomainPtr dom,
|
|
|
c401cc |
if (virDomainBlockStatsEnsureACL(dom->conn, vm->def) < 0)
|
|
|
c401cc |
goto cleanup;
|
|
|
c401cc |
|
|
|
c401cc |
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
|
|
|
c401cc |
+ goto cleanup;
|
|
|
c401cc |
+
|
|
|
c401cc |
if (!virDomainObjIsActive(vm)) {
|
|
|
c401cc |
virReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
c401cc |
"%s", _("domain is not running"));
|
|
|
c401cc |
- goto cleanup;
|
|
|
c401cc |
+ goto endjob;
|
|
|
c401cc |
}
|
|
|
c401cc |
|
|
|
c401cc |
if ((idx = virDomainDiskIndexByName(vm->def, path, false)) < 0) {
|
|
|
c401cc |
virReportError(VIR_ERR_INVALID_ARG,
|
|
|
c401cc |
_("invalid path: %s"), path);
|
|
|
c401cc |
- goto cleanup;
|
|
|
c401cc |
+ goto endjob;
|
|
|
c401cc |
}
|
|
|
c401cc |
disk = vm->def->disks[idx];
|
|
|
c401cc |
|
|
|
c401cc |
if (!disk->info.alias) {
|
|
|
c401cc |
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
c401cc |
_("missing disk device alias name for %s"), disk->dst);
|
|
|
c401cc |
- goto cleanup;
|
|
|
c401cc |
+ goto endjob;
|
|
|
c401cc |
}
|
|
|
c401cc |
|
|
|
c401cc |
priv = vm->privateData;
|
|
|
c401cc |
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
|
|
|
c401cc |
- goto cleanup;
|
|
|
c401cc |
-
|
|
|
c401cc |
- if (!virDomainObjIsActive(vm)) {
|
|
|
c401cc |
- virReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
c401cc |
- "%s", _("domain is not running"));
|
|
|
c401cc |
- goto endjob;
|
|
|
c401cc |
- }
|
|
|
c401cc |
|
|
|
c401cc |
qemuDomainObjEnterMonitor(driver, vm);
|
|
|
c401cc |
ret = qemuMonitorGetBlockStatsInfo(priv->mon,
|
|
|
c401cc |
--
|
|
|
c401cc |
1.8.5.2
|
|
|
c401cc |
|