Blob Blame History Raw
From 1666ea1986c2fdce3bc27aa96ae6bf3a632dbc99 Mon Sep 17 00:00:00 2001
From: Nigel Croxon <ncroxon@redhat.com>
Date: Wed, 31 Jul 2013 15:12:17 +0200
Subject: Introduce async_run_on_cpu

RH-Author: Nigel Croxon <ncroxon@redhat.com>
Message-id: <1375283539-18714-2-git-send-email-ncroxon@redhat.com>
Patchwork-id: 52874
O-Subject: [RHEL7 PATCH 1/3] Introduce async_run_on_cpu
Bugzilla: 985958
RH-Acked-by: Orit Wasserman <owasserm@redhat.com>
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
RH-Acked-by: Laszlo Ersek <lersek@redhat.com>

Bugzilla: 985958 - Throttle-down guest to help with live migration convergence (backport to RHEL7.0)
https://bugzilla.redhat.com/show_bug.cgi?id=985958

Backported from the following upstream commit:

commit 3c02270db980007424d797506301826310ce2db4
Author: Chegu Vinod <chegu_vinod@hp.com>
Date:   Mon Jun 24 03:49:41 2013 -0600

    Introduce async_run_on_cpu()

    Introduce an asynchronous version of run_on_cpu() i.e. the caller
    doesn't have to block till the call back routine finishes execution
    on the target vcpu.

    Signed-off-by: Chegu Vinod <chegu_vinod@hp.com>
    Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
    Signed-off-by: Juan Quintela <quintela@redhat.com>

diff --git a/cpus.c b/cpus.c
index c232265..8cd4eab 100644
--- a/cpus.c
+++ b/cpus.c
@@ -653,6 +653,7 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
 
     wi.func = func;
     wi.data = data;
+    wi.free = false;
     if (cpu->queued_work_first == NULL) {
         cpu->queued_work_first = &wi;
     } else {
@@ -671,6 +672,31 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
     }
 }
 
+void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
+{
+    struct qemu_work_item *wi;
+
+    if (qemu_cpu_is_self(cpu)) {
+        func(data);
+        return;
+    }
+
+    wi = g_malloc0(sizeof(struct qemu_work_item));
+    wi->func = func;
+    wi->data = data;
+    wi->free = true;
+    if (cpu->queued_work_first == NULL) {
+        cpu->queued_work_first = wi;
+    } else {
+        cpu->queued_work_last->next = wi;
+    }
+    cpu->queued_work_last = wi;
+    wi->next = NULL;
+    wi->done = false;
+
+    qemu_cpu_kick(cpu);
+}
+
 static void flush_queued_work(CPUState *cpu)
 {
     struct qemu_work_item *wi;
@@ -683,6 +709,9 @@ static void flush_queued_work(CPUState *cpu)
         cpu->queued_work_first = wi->next;
         wi->func(wi->data);
         wi->done = true;
+        if (wi->free) {
+            g_free(wi);
+        }
     }
     cpu->queued_work_last = NULL;
     qemu_cond_broadcast(&qemu_work_cond);
diff --git a/include/qemu-common.h b/include/qemu-common.h
index 3b1ca8e..73c6419 100644
--- a/include/qemu-common.h
+++ b/include/qemu-common.h
@@ -288,6 +288,7 @@ struct qemu_work_item {
     void (*func)(void *data);
     void *data;
     int done;
+    bool free;
 };
 
 #ifdef CONFIG_USER_ONLY
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 7cd9442..46465e9 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -265,6 +265,16 @@ bool cpu_is_stopped(CPUState *cpu);
 void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
 
 /**
+ * async_run_on_cpu:
+ * @cpu: The vCPU to run on.
+ * @func: The function to be executed.
+ * @data: Data to pass to the function.
+ *
+ * Schedules the function @func for execution on the vCPU @cpu asynchronously.
+ */
+void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
+
+/**
  * qemu_for_each_cpu:
  * @func: The function to be executed.
  * @data: Data to pass to the function.