Blame SOURCES/libvirt-qemu-Add-qemuProcessSetupPid-and-use-it-in-qemuProcessSetupIOThread.patch

6ae9ed
From 341e7b3d6b2e1d1bc9c11aa59ebac376f847da87 Mon Sep 17 00:00:00 2001
6ae9ed
Message-Id: <341e7b3d6b2e1d1bc9c11aa59ebac376f847da87@dist-git>
6ae9ed
From: Martin Kletzander <mkletzan@redhat.com>
6ae9ed
Date: Wed, 24 Aug 2016 16:10:54 -0400
6ae9ed
Subject: [PATCH] qemu: Add qemuProcessSetupPid() and use it in
6ae9ed
 qemuProcessSetupIOThread()
6ae9ed
6ae9ed
https://bugzilla.redhat.com/show_bug.cgi?id=1097930
6ae9ed
https://bugzilla.redhat.com/show_bug.cgi?id=1224341
6ae9ed
6ae9ed
Setting up cgroups and other things for all kinds of threads (the
6ae9ed
emulator thread, vCPU threads, I/O threads) was copy-pasted every time
6ae9ed
new thing was added.  Over time each one of those functions changed a
6ae9ed
bit differently.  So create one function that does all that setup and
6ae9ed
start using it, starting with I/O thread setup.  That will shave some
6ae9ed
duplicated code and maybe fix some bugs as well.
6ae9ed
6ae9ed
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
6ae9ed
(cherry picked from commit 71e419bbeb7d7bf6f9be0c2516c320cfb7492ef3)
6ae9ed
---
6ae9ed
 src/qemu/qemu_process.c | 200 +++++++++++++++++++++++++++---------------------
6ae9ed
 1 file changed, 114 insertions(+), 86 deletions(-)
6ae9ed
6ae9ed
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
6ae9ed
index 41e401b..64b1ac9 100644
6ae9ed
--- a/src/qemu/qemu_process.c
6ae9ed
+++ b/src/qemu/qemu_process.c
6ae9ed
@@ -2304,6 +2304,114 @@ qemuProcessSetLinkStates(virQEMUDriverPtr driver,
6ae9ed
 }
6ae9ed
 
6ae9ed
 
6ae9ed
+/**
6ae9ed
+ * qemuProcessSetupPid:
6ae9ed
+ *
6ae9ed
+ * This function sets resource properities (affinity, cgroups,
6ae9ed
+ * scheduler) for any PID associated with a domain.  It should be used
6ae9ed
+ * to set up emulator PIDs as well as vCPU and I/O thread pids to
6ae9ed
+ * ensure they are all handled the same way.
6ae9ed
+ *
6ae9ed
+ * Returns 0 on success, -1 on error.
6ae9ed
+ */
6ae9ed
+static int
6ae9ed
+qemuProcessSetupPid(virDomainObjPtr vm,
6ae9ed
+                    pid_t pid,
6ae9ed
+                    virCgroupThreadName nameval,
6ae9ed
+                    int id,
6ae9ed
+                    virBitmapPtr cpumask,
6ae9ed
+                    unsigned long long period,
6ae9ed
+                    long long quota,
6ae9ed
+                    virDomainThreadSchedParamPtr sched)
6ae9ed
+{
6ae9ed
+    qemuDomainObjPrivatePtr priv = vm->privateData;
6ae9ed
+    virDomainNumatuneMemMode mem_mode;
6ae9ed
+    virCgroupPtr cgroup = NULL;
6ae9ed
+    virBitmapPtr use_cpumask;
6ae9ed
+    char *mem_mask = NULL;
6ae9ed
+    int ret = -1;
6ae9ed
+
6ae9ed
+    if ((period || quota) &&
6ae9ed
+        !virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU)) {
6ae9ed
+        virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
6ae9ed
+                       _("cgroup cpu is required for scheduler tuning"));
6ae9ed
+        goto cleanup;
6ae9ed
+    }
6ae9ed
+
6ae9ed
+    /* Infer which cpumask shall be used. */
6ae9ed
+    if (cpumask)
6ae9ed
+        use_cpumask = cpumask;
6ae9ed
+    else if (vm->def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO)
6ae9ed
+        use_cpumask = priv->autoCpuset;
6ae9ed
+    else
6ae9ed
+        use_cpumask = vm->def->cpumask;
6ae9ed
+
6ae9ed
+    /*
6ae9ed
+     * If CPU cgroup controller is not initialized here, then we need
6ae9ed
+     * neither period nor quota settings.  And if CPUSET controller is
6ae9ed
+     * not initialized either, then there's nothing to do anyway.
6ae9ed
+     */
6ae9ed
+    if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU) ||
6ae9ed
+        virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) {
6ae9ed
+
6ae9ed
+        if (virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) == 0 &&
6ae9ed
+            mem_mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT &&
6ae9ed
+            virDomainNumatuneMaybeFormatNodeset(vm->def->numa,
6ae9ed
+                                                priv->autoNodeset,
6ae9ed
+                                                &mem_mask, -1) < 0)
6ae9ed
+            goto cleanup;
6ae9ed
+
6ae9ed
+        if (virCgroupNewThread(priv->cgroup, nameval, id, true, &cgroup) < 0)
6ae9ed
+            goto cleanup;
6ae9ed
+
6ae9ed
+        if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) {
6ae9ed
+            if (use_cpumask &&
6ae9ed
+                qemuSetupCgroupCpusetCpus(cgroup, use_cpumask) < 0)
6ae9ed
+                goto cleanup;
6ae9ed
+
6ae9ed
+            /*
6ae9ed
+             * Don't setup cpuset.mems for the emulator, they need to
6ae9ed
+             * be set up after initialization in order for kvm
6ae9ed
+             * allocations to succeed.
6ae9ed
+             */
6ae9ed
+            if (nameval != VIR_CGROUP_THREAD_EMULATOR &&
6ae9ed
+                mem_mask && virCgroupSetCpusetMems(cgroup, mem_mask) < 0)
6ae9ed
+                goto cleanup;
6ae9ed
+
6ae9ed
+        }
6ae9ed
+
6ae9ed
+        if ((period || quota) &&
6ae9ed
+            qemuSetupCgroupVcpuBW(cgroup, period, quota) < 0)
6ae9ed
+            goto cleanup;
6ae9ed
+
6ae9ed
+        /* Move the thread to the sub dir */
6ae9ed
+        if (virCgroupAddTask(cgroup, pid) < 0)
6ae9ed
+            goto cleanup;
6ae9ed
+
6ae9ed
+    }
6ae9ed
+
6ae9ed
+    /* Setup legacy affinity. */
6ae9ed
+    if (use_cpumask && virProcessSetAffinity(pid, use_cpumask) < 0)
6ae9ed
+        goto cleanup;
6ae9ed
+
6ae9ed
+    /* Set scheduler type and priority. */
6ae9ed
+    if (sched &&
6ae9ed
+        virProcessSetScheduler(pid, sched->policy, sched->priority) < 0)
6ae9ed
+        goto cleanup;
6ae9ed
+
6ae9ed
+    ret = 0;
6ae9ed
+ cleanup:
6ae9ed
+    VIR_FREE(mem_mask);
6ae9ed
+    if (cgroup) {
6ae9ed
+        if (ret < 0)
6ae9ed
+            virCgroupRemove(cgroup);
6ae9ed
+        virCgroupFree(&cgroup);
6ae9ed
+    }
6ae9ed
+
6ae9ed
+    return ret;
6ae9ed
+}
6ae9ed
+
6ae9ed
+
6ae9ed
 static int
6ae9ed
 qemuProcessSetupEmulator(virDomainObjPtr vm)
6ae9ed
 {
6ae9ed
@@ -4704,98 +4812,18 @@ qemuProcessSetupVcpus(virDomainObjPtr vm)
6ae9ed
 }
6ae9ed
 
6ae9ed
 
6ae9ed
-/**
6ae9ed
- * qemuProcessSetupIOThread:
6ae9ed
- * @vm: domain object
6ae9ed
- * @iothread: iothread data structure to set the data for
6ae9ed
- *
6ae9ed
- * This function sets resource properities (affinity, cgroups, scheduler) for a
6ae9ed
- * IOThread. This function expects that the IOThread is online and the IOThread
6ae9ed
- * pids were correctly detected at the point when it's called.
6ae9ed
- *
6ae9ed
- * Returns 0 on success, -1 on error.
6ae9ed
- */
6ae9ed
 int
6ae9ed
 qemuProcessSetupIOThread(virDomainObjPtr vm,
6ae9ed
                          virDomainIOThreadIDDefPtr iothread)
6ae9ed
 {
6ae9ed
-    qemuDomainObjPrivatePtr priv = vm->privateData;
6ae9ed
-    unsigned long long period = vm->def->cputune.period;
6ae9ed
-    long long quota = vm->def->cputune.quota;
6ae9ed
-    virDomainNumatuneMemMode mem_mode;
6ae9ed
-    char *mem_mask = NULL;
6ae9ed
-    virCgroupPtr cgroup_iothread = NULL;
6ae9ed
-    virBitmapPtr cpumask = NULL;
6ae9ed
-    int ret = -1;
6ae9ed
 
6ae9ed
-    if ((period || quota) &&
6ae9ed
-        !virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU)) {
6ae9ed
-        virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
6ae9ed
-                       _("cgroup cpu is required for scheduler tuning"));
6ae9ed
-        return -1;
6ae9ed
-    }
6ae9ed
-
6ae9ed
-    if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU) ||
6ae9ed
-        virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) {
6ae9ed
-        if (virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) == 0 &&
6ae9ed
-            mem_mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT &&
6ae9ed
-            virDomainNumatuneMaybeFormatNodeset(vm->def->numa,
6ae9ed
-                                                priv->autoNodeset,
6ae9ed
-                                                &mem_mask, -1) < 0)
6ae9ed
-            goto cleanup;
6ae9ed
-
6ae9ed
-        if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_IOTHREAD,
6ae9ed
+    return qemuProcessSetupPid(vm, iothread->thread_id,
6ae9ed
+                               VIR_CGROUP_THREAD_IOTHREAD,
6ae9ed
                                iothread->iothread_id,
6ae9ed
-                               true, &cgroup_iothread) < 0)
6ae9ed
-            goto cleanup;
6ae9ed
-    }
6ae9ed
-
6ae9ed
-    if (iothread->cpumask)
6ae9ed
-        cpumask = iothread->cpumask;
6ae9ed
-    else if (vm->def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO)
6ae9ed
-        cpumask = priv->autoCpuset;
6ae9ed
-    else
6ae9ed
-        cpumask = vm->def->cpumask;
6ae9ed
-
6ae9ed
-    if (period || quota) {
6ae9ed
-        if (qemuSetupCgroupVcpuBW(cgroup_iothread, period, quota) < 0)
6ae9ed
-            goto cleanup;
6ae9ed
-    }
6ae9ed
-
6ae9ed
-    if (cgroup_iothread) {
6ae9ed
-        if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) {
6ae9ed
-            if (mem_mask &&
6ae9ed
-                virCgroupSetCpusetMems(cgroup_iothread, mem_mask) < 0)
6ae9ed
-                goto cleanup;
6ae9ed
-
6ae9ed
-            if (cpumask &&
6ae9ed
-                qemuSetupCgroupCpusetCpus(cgroup_iothread, cpumask) < 0)
6ae9ed
-                goto cleanup;
6ae9ed
-        }
6ae9ed
-
6ae9ed
-        if (virCgroupAddTask(cgroup_iothread, iothread->thread_id) < 0)
6ae9ed
-            goto cleanup;
6ae9ed
-    }
6ae9ed
-
6ae9ed
-    if (cpumask && virProcessSetAffinity(iothread->thread_id, cpumask) < 0)
6ae9ed
-        goto cleanup;
6ae9ed
-
6ae9ed
-    if (iothread->sched.policy != VIR_PROC_POLICY_NONE &&
6ae9ed
-        virProcessSetScheduler(iothread->thread_id, iothread->sched.policy,
6ae9ed
-                               iothread->sched.priority) < 0)
6ae9ed
-        goto cleanup;
6ae9ed
-
6ae9ed
-    ret = 0;
6ae9ed
-
6ae9ed
- cleanup:
6ae9ed
-    if (cgroup_iothread) {
6ae9ed
-        if (ret < 0)
6ae9ed
-            virCgroupRemove(cgroup_iothread);
6ae9ed
-        virCgroupFree(&cgroup_iothread);
6ae9ed
-    }
6ae9ed
-
6ae9ed
-    VIR_FREE(mem_mask);
6ae9ed
-    return ret;
6ae9ed
+                               iothread->cpumask,
6ae9ed
+                               vm->def->cputune.period,
6ae9ed
+                               vm->def->cputune.quota,
6ae9ed
+                               &iothread->sched);
6ae9ed
 }
6ae9ed
 
6ae9ed
 
6ae9ed
-- 
6ae9ed
2.10.0
6ae9ed