|
|
9119d9 |
From 4e2265af50edd9b7772750a534310f87ba76e071 Mon Sep 17 00:00:00 2001
|
|
|
9119d9 |
Message-Id: <4e2265af50edd9b7772750a534310f87ba76e071@dist-git>
|
|
|
9119d9 |
From: John Ferlan <jferlan@redhat.com>
|
|
|
9119d9 |
Date: Thu, 18 Sep 2014 09:29:54 -0400
|
|
|
9119d9 |
Subject: [PATCH] qemu: Allow pinning specific IOThreads to a CPU
|
|
|
9119d9 |
|
|
|
9119d9 |
https://bugzilla.redhat.com/show_bug.cgi?id=1101574
|
|
|
9119d9 |
|
|
|
9119d9 |
Modify qemuProcessStart() in order to allowing setting affinity to
|
|
|
9119d9 |
specific CPU's for IOThreads. The process followed is similar to
|
|
|
9119d9 |
that for the vCPU's.
|
|
|
9119d9 |
|
|
|
9119d9 |
This involves adding a function to fetch the IOThread id's via
|
|
|
9119d9 |
qemuMonitorGetIOThreads() and adding them to iothreadpids[] list.
|
|
|
9119d9 |
Then making sure all the cgroup data has been properly set up and
|
|
|
9119d9 |
finally assigning affinity.
|
|
|
9119d9 |
|
|
|
9119d9 |
(cherry picked from commit 9bef96ec502a01abde055233a8ab428a6824a4c0)
|
|
|
9119d9 |
|
|
|
9119d9 |
NOTE: Requires commit id 938fb12fad6d15c9fdb73f998c4e0ec1e278721f in order
|
|
|
9119d9 |
to build (next patch)
|
|
|
9119d9 |
|
|
|
9119d9 |
Signed-off-by: John Ferlan <jferlan@redhat.com>
|
|
|
9119d9 |
Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
|
|
|
9119d9 |
---
|
|
|
9119d9 |
src/qemu/qemu_driver.c | 8 ++++
|
|
|
9119d9 |
src/qemu/qemu_process.c | 98 +++++++++++++++++++++++++++++++++++++++++++++++++
|
|
|
9119d9 |
2 files changed, 106 insertions(+)
|
|
|
9119d9 |
|
|
|
9119d9 |
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
|
|
|
9119d9 |
index 0a5605a..a1379c2 100644
|
|
|
9119d9 |
--- a/src/qemu/qemu_driver.c
|
|
|
9119d9 |
+++ b/src/qemu/qemu_driver.c
|
|
|
9119d9 |
@@ -8766,6 +8766,14 @@ qemuDomainSetNumaParamsLive(virDomainObjPtr vm,
|
|
|
9119d9 |
virCgroupSetCpusetMems(priv->cgroup, nodeset_str) < 0)
|
|
|
9119d9 |
goto cleanup;
|
|
|
9119d9 |
|
|
|
9119d9 |
+ for (i = 0; i < priv->niothreadpids; i++) {
|
|
|
9119d9 |
+ if (virCgroupNewIOThread(priv->cgroup, i, false, &cgroup_temp) < 0 ||
|
|
|
9119d9 |
+ virCgroupSetCpusetMems(cgroup_temp, nodeset_str) < 0)
|
|
|
9119d9 |
+ goto cleanup;
|
|
|
9119d9 |
+ virCgroupFree(&cgroup_temp);
|
|
|
9119d9 |
+ }
|
|
|
9119d9 |
+
|
|
|
9119d9 |
+
|
|
|
9119d9 |
ret = 0;
|
|
|
9119d9 |
cleanup:
|
|
|
9119d9 |
VIR_FREE(nodeset_str);
|
|
|
9119d9 |
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
|
|
|
9119d9 |
index bf1f0de..ed73ae5 100644
|
|
|
9119d9 |
--- a/src/qemu/qemu_process.c
|
|
|
9119d9 |
+++ b/src/qemu/qemu_process.c
|
|
|
9119d9 |
@@ -2093,6 +2093,51 @@ qemuProcessDetectVcpuPIDs(virQEMUDriverPtr driver,
|
|
|
9119d9 |
}
|
|
|
9119d9 |
|
|
|
9119d9 |
|
|
|
9119d9 |
+static int
|
|
|
9119d9 |
+qemuProcessDetectIOThreadPIDs(virQEMUDriverPtr driver,
|
|
|
9119d9 |
+ virDomainObjPtr vm,
|
|
|
9119d9 |
+ int asyncJob)
|
|
|
9119d9 |
+{
|
|
|
9119d9 |
+ qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
9119d9 |
+ qemuMonitorIOThreadsInfoPtr *iothreads = NULL;
|
|
|
9119d9 |
+ int niothreads = 0;
|
|
|
9119d9 |
+ int ret = -1;
|
|
|
9119d9 |
+ size_t i;
|
|
|
9119d9 |
+
|
|
|
9119d9 |
+ /* Get the list of IOThreads from qemu */
|
|
|
9119d9 |
+ if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
9119d9 |
+ goto cleanup;
|
|
|
9119d9 |
+ niothreads = qemuMonitorGetIOThreads(priv->mon, &iothreads);
|
|
|
9119d9 |
+ qemuDomainObjExitMonitor(driver, vm);
|
|
|
9119d9 |
+ if (niothreads <= 0)
|
|
|
9119d9 |
+ goto cleanup;
|
|
|
9119d9 |
+
|
|
|
9119d9 |
+ if (niothreads != vm->def->iothreads) {
|
|
|
9119d9 |
+ virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
9119d9 |
+ _("got wrong number of IOThread pids from QEMU monitor. "
|
|
|
9119d9 |
+ "got %d, wanted %d"),
|
|
|
9119d9 |
+ niothreads, vm->def->iothreads);
|
|
|
9119d9 |
+ goto cleanup;
|
|
|
9119d9 |
+ }
|
|
|
9119d9 |
+
|
|
|
9119d9 |
+ if (VIR_ALLOC_N(priv->iothreadpids, niothreads) < 0)
|
|
|
9119d9 |
+ goto cleanup;
|
|
|
9119d9 |
+ priv->niothreadpids = niothreads;
|
|
|
9119d9 |
+
|
|
|
9119d9 |
+ for (i = 0; i < priv->niothreadpids; i++)
|
|
|
9119d9 |
+ priv->iothreadpids[i] = iothreads[i]->thread_id;
|
|
|
9119d9 |
+
|
|
|
9119d9 |
+ ret = 0;
|
|
|
9119d9 |
+
|
|
|
9119d9 |
+ cleanup:
|
|
|
9119d9 |
+ if (iothreads) {
|
|
|
9119d9 |
+ for (i = 0; i < niothreads; i++)
|
|
|
9119d9 |
+ qemuMonitorIOThreadsInfoFree(iothreads[i]);
|
|
|
9119d9 |
+ VIR_FREE(iothreads);
|
|
|
9119d9 |
+ }
|
|
|
9119d9 |
+ return ret;
|
|
|
9119d9 |
+}
|
|
|
9119d9 |
+
|
|
|
9119d9 |
/* Helper to prepare cpumap for affinity setting, convert
|
|
|
9119d9 |
* NUMA nodeset into cpuset if @nodemask is not NULL, otherwise
|
|
|
9119d9 |
* just return a new allocated bitmap.
|
|
|
9119d9 |
@@ -2285,6 +2330,41 @@ qemuProcessSetEmulatorAffinity(virDomainObjPtr vm)
|
|
|
9119d9 |
return ret;
|
|
|
9119d9 |
}
|
|
|
9119d9 |
|
|
|
9119d9 |
+/* Set CPU affinities for IOThreads threads. */
|
|
|
9119d9 |
+static int
|
|
|
9119d9 |
+qemuProcessSetIOThreadsAffinity(virDomainObjPtr vm)
|
|
|
9119d9 |
+{
|
|
|
9119d9 |
+ qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
9119d9 |
+ virDomainDefPtr def = vm->def;
|
|
|
9119d9 |
+ virDomainVcpuPinDefPtr pininfo;
|
|
|
9119d9 |
+ size_t i;
|
|
|
9119d9 |
+ int ret = -1;
|
|
|
9119d9 |
+
|
|
|
9119d9 |
+ if (!def->cputune.niothreadspin)
|
|
|
9119d9 |
+ return 0;
|
|
|
9119d9 |
+
|
|
|
9119d9 |
+ if (priv->iothreadpids == NULL) {
|
|
|
9119d9 |
+ virReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
9119d9 |
+ "%s", _("IOThread affinity is not supported"));
|
|
|
9119d9 |
+ return -1;
|
|
|
9119d9 |
+ }
|
|
|
9119d9 |
+
|
|
|
9119d9 |
+ for (i = 0; i < def->iothreads; i++) {
|
|
|
9119d9 |
+ /* set affinity only for existing vcpus */
|
|
|
9119d9 |
+ if (!(pininfo = virDomainVcpuPinFindByVcpu(def->cputune.iothreadspin,
|
|
|
9119d9 |
+ def->cputune.niothreadspin,
|
|
|
9119d9 |
+ i+1)))
|
|
|
9119d9 |
+ continue;
|
|
|
9119d9 |
+
|
|
|
9119d9 |
+ if (virProcessSetAffinity(priv->iothreadpids[i], pininfo->cpumask) < 0)
|
|
|
9119d9 |
+ goto cleanup;
|
|
|
9119d9 |
+ }
|
|
|
9119d9 |
+ ret = 0;
|
|
|
9119d9 |
+
|
|
|
9119d9 |
+ cleanup:
|
|
|
9119d9 |
+ return ret;
|
|
|
9119d9 |
+}
|
|
|
9119d9 |
+
|
|
|
9119d9 |
static int
|
|
|
9119d9 |
qemuProcessInitPasswords(virConnectPtr conn,
|
|
|
9119d9 |
virQEMUDriverPtr driver,
|
|
|
9119d9 |
@@ -4411,6 +4491,10 @@ int qemuProcessStart(virConnectPtr conn,
|
|
|
9119d9 |
if (qemuProcessDetectVcpuPIDs(driver, vm, asyncJob) < 0)
|
|
|
9119d9 |
goto cleanup;
|
|
|
9119d9 |
|
|
|
9119d9 |
+ VIR_DEBUG("Detecting IOThread PIDs");
|
|
|
9119d9 |
+ if (qemuProcessDetectIOThreadPIDs(driver, vm, asyncJob) < 0)
|
|
|
9119d9 |
+ goto cleanup;
|
|
|
9119d9 |
+
|
|
|
9119d9 |
VIR_DEBUG("Setting cgroup for each VCPU (if required)");
|
|
|
9119d9 |
if (qemuSetupCgroupForVcpu(vm) < 0)
|
|
|
9119d9 |
goto cleanup;
|
|
|
9119d9 |
@@ -4419,6 +4503,10 @@ int qemuProcessStart(virConnectPtr conn,
|
|
|
9119d9 |
if (qemuSetupCgroupForEmulator(driver, vm, nodemask) < 0)
|
|
|
9119d9 |
goto cleanup;
|
|
|
9119d9 |
|
|
|
9119d9 |
+ VIR_DEBUG("Setting cgroup for each IOThread (if required)");
|
|
|
9119d9 |
+ if (qemuSetupCgroupForIOThreads(vm) < 0)
|
|
|
9119d9 |
+ goto cleanup;
|
|
|
9119d9 |
+
|
|
|
9119d9 |
VIR_DEBUG("Setting VCPU affinities");
|
|
|
9119d9 |
if (qemuProcessSetVcpuAffinities(vm) < 0)
|
|
|
9119d9 |
goto cleanup;
|
|
|
9119d9 |
@@ -4427,6 +4515,10 @@ int qemuProcessStart(virConnectPtr conn,
|
|
|
9119d9 |
if (qemuProcessSetEmulatorAffinity(vm) < 0)
|
|
|
9119d9 |
goto cleanup;
|
|
|
9119d9 |
|
|
|
9119d9 |
+ VIR_DEBUG("Setting affinity of IOThread threads");
|
|
|
9119d9 |
+ if (qemuProcessSetIOThreadsAffinity(vm) < 0)
|
|
|
9119d9 |
+ goto cleanup;
|
|
|
9119d9 |
+
|
|
|
9119d9 |
VIR_DEBUG("Setting any required VM passwords");
|
|
|
9119d9 |
if (qemuProcessInitPasswords(conn, driver, vm, asyncJob) < 0)
|
|
|
9119d9 |
goto cleanup;
|
|
|
9119d9 |
@@ -4842,6 +4934,8 @@ void qemuProcessStop(virQEMUDriverPtr driver,
|
|
|
9119d9 |
virDomainObjSetState(vm, VIR_DOMAIN_SHUTOFF, reason);
|
|
|
9119d9 |
VIR_FREE(priv->vcpupids);
|
|
|
9119d9 |
priv->nvcpupids = 0;
|
|
|
9119d9 |
+ VIR_FREE(priv->iothreadpids);
|
|
|
9119d9 |
+ priv->niothreadpids = 0;
|
|
|
9119d9 |
virObjectUnref(priv->qemuCaps);
|
|
|
9119d9 |
priv->qemuCaps = NULL;
|
|
|
9119d9 |
VIR_FREE(priv->pidfile);
|
|
|
9119d9 |
@@ -5034,6 +5128,10 @@ int qemuProcessAttach(virConnectPtr conn ATTRIBUTE_UNUSED,
|
|
|
9119d9 |
if (qemuProcessDetectVcpuPIDs(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
|
|
|
9119d9 |
goto error;
|
|
|
9119d9 |
|
|
|
9119d9 |
+ VIR_DEBUG("Detecting IOThread PIDs");
|
|
|
9119d9 |
+ if (qemuProcessDetectIOThreadPIDs(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
|
|
|
9119d9 |
+ goto error;
|
|
|
9119d9 |
+
|
|
|
9119d9 |
/* If we have -device, then addresses are assigned explicitly.
|
|
|
9119d9 |
* If not, then we have to detect dynamic ones here */
|
|
|
9119d9 |
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
|
|
|
9119d9 |
--
|
|
|
9119d9 |
2.1.0
|
|
|
9119d9 |
|