thebeanogamer / rpms / qemu-kvm

Forked from rpms/qemu-kvm 5 months ago
Clone

Blame SOURCES/kvm-x86-add-support-for-KVM_CAP_XSAVE2-and-AMX-state-mig.patch

1be5c7
From 28cf1b55f346a9f56e84fa57921f5a28a99cd59b Mon Sep 17 00:00:00 2001
1be5c7
From: Jing Liu <jing2.liu@intel.com>
1be5c7
Date: Wed, 16 Feb 2022 22:04:32 -0800
1be5c7
Subject: [PATCH 10/24] x86: add support for KVM_CAP_XSAVE2 and AMX state
1be5c7
 migration
1be5c7
1be5c7
RH-Author: Paul Lai <plai@redhat.com>
1be5c7
RH-MergeRequest: 176: Enable KVM AMX support
1be5c7
RH-Commit: [10/13] d584f455ba1ecd8a4a87f3470e6aac24ba9a1f5a
1be5c7
RH-Bugzilla: 1916415
1be5c7
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
1be5c7
RH-Acked-by: Igor Mammedov <imammedo@redhat.com>
1be5c7
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
1be5c7
1be5c7
When dynamic xfeatures (e.g. AMX) are used by the guest, the xsave
1be5c7
area would be larger than 4KB. KVM_GET_XSAVE2 and KVM_SET_XSAVE
1be5c7
under KVM_CAP_XSAVE2 works with a xsave buffer larger than 4KB.
1be5c7
Always use the new ioctls under KVM_CAP_XSAVE2 when KVM supports it.
1be5c7
1be5c7
Signed-off-by: Jing Liu <jing2.liu@intel.com>
1be5c7
Signed-off-by: Zeng Guang <guang.zeng@intel.com>
1be5c7
Signed-off-by: Wei Wang <wei.w.wang@intel.com>
1be5c7
Signed-off-by: Yang Zhong <yang.zhong@intel.com>
1be5c7
Message-Id: <20220217060434.52460-7-yang.zhong@intel.com>
1be5c7
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1be5c7
(cherry picked from commit e56dd3c70abb31893c61ac834109fa7a38841330)
1be5c7
Signed-off-by: Paul Lai <plai@redhat.com>
1be5c7
---
1be5c7
 target/i386/cpu.h          |  4 ++++
1be5c7
 target/i386/kvm/kvm.c      | 42 ++++++++++++++++++++++++--------------
1be5c7
 target/i386/xsave_helper.c | 28 +++++++++++++++++++++++++
1be5c7
 3 files changed, 59 insertions(+), 15 deletions(-)
1be5c7
1be5c7
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
1be5c7
index f2bdef9c26..14a3501b87 100644
1be5c7
--- a/target/i386/cpu.h
1be5c7
+++ b/target/i386/cpu.h
1be5c7
@@ -1522,6 +1522,10 @@ typedef struct CPUX86State {
1be5c7
     uint64_t opmask_regs[NB_OPMASK_REGS];
1be5c7
     YMMReg zmmh_regs[CPU_NB_REGS];
1be5c7
     ZMMReg hi16_zmm_regs[CPU_NB_REGS];
1be5c7
+#ifdef TARGET_X86_64
1be5c7
+    uint8_t xtilecfg[64];
1be5c7
+    uint8_t xtiledata[8192];
1be5c7
+#endif
1be5c7
 
1be5c7
     /* sysenter registers */
1be5c7
     uint32_t sysenter_cs;
1be5c7
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
1be5c7
index a64a79d870..d3d476df27 100644
1be5c7
--- a/target/i386/kvm/kvm.c
1be5c7
+++ b/target/i386/kvm/kvm.c
1be5c7
@@ -123,6 +123,7 @@ static uint32_t num_architectural_pmu_gp_counters;
1be5c7
 static uint32_t num_architectural_pmu_fixed_counters;
1be5c7
 
1be5c7
 static int has_xsave;
1be5c7
+static int has_xsave2;
1be5c7
 static int has_xcrs;
1be5c7
 static int has_pit_state2;
1be5c7
 static int has_exception_payload;
1be5c7
@@ -1585,6 +1586,26 @@ static Error *invtsc_mig_blocker;
1be5c7
 
1be5c7
 #define KVM_MAX_CPUID_ENTRIES  100
1be5c7
 
1be5c7
+static void kvm_init_xsave(CPUX86State *env)
1be5c7
+{
1be5c7
+    if (has_xsave2) {
1be5c7
+        env->xsave_buf_len = QEMU_ALIGN_UP(has_xsave2, 4096);
1be5c7
+    } else if (has_xsave) {
1be5c7
+        env->xsave_buf_len = sizeof(struct kvm_xsave);
1be5c7
+    } else {
1be5c7
+        return;
1be5c7
+    }
1be5c7
+
1be5c7
+    env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len);
1be5c7
+    memset(env->xsave_buf, 0, env->xsave_buf_len);
1be5c7
+    /*
1be5c7
+     * The allocated storage must be large enough for all of the
1be5c7
+     * possible XSAVE state components.
1be5c7
+     */
1be5c7
+    assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX) <=
1be5c7
+           env->xsave_buf_len);
1be5c7
+}
1be5c7
+
1be5c7
 int kvm_arch_init_vcpu(CPUState *cs)
1be5c7
 {
1be5c7
     struct {
1be5c7
@@ -1614,6 +1635,8 @@ int kvm_arch_init_vcpu(CPUState *cs)
1be5c7
 
1be5c7
     cpuid_i = 0;
1be5c7
 
1be5c7
+    has_xsave2 = kvm_check_extension(cs->kvm_state, KVM_CAP_XSAVE2);
1be5c7
+
1be5c7
     r = kvm_arch_set_tsc_khz(cs);
1be5c7
     if (r < 0) {
1be5c7
         return r;
1be5c7
@@ -2003,19 +2026,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
1be5c7
     if (r) {
1be5c7
         goto fail;
1be5c7
     }
1be5c7
-
1be5c7
-    if (has_xsave) {
1be5c7
-        env->xsave_buf_len = sizeof(struct kvm_xsave);
1be5c7
-        env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len);
1be5c7
-        memset(env->xsave_buf, 0, env->xsave_buf_len);
1be5c7
-
1be5c7
-        /*
1be5c7
-         * The allocated storage must be large enough for all of the
1be5c7
-         * possible XSAVE state components.
1be5c7
-         */
1be5c7
-        assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX)
1be5c7
-               <= env->xsave_buf_len);
1be5c7
-    }
1be5c7
+    kvm_init_xsave(env);
1be5c7
 
1be5c7
     max_nested_state_len = kvm_max_nested_state_length();
1be5c7
     if (max_nested_state_len > 0) {
1be5c7
@@ -3263,13 +3274,14 @@ static int kvm_get_xsave(X86CPU *cpu)
1be5c7
 {
1be5c7
     CPUX86State *env = &cpu->env;
1be5c7
     void *xsave = env->xsave_buf;
1be5c7
-    int ret;
1be5c7
+    int type, ret;
1be5c7
 
1be5c7
     if (!has_xsave) {
1be5c7
         return kvm_get_fpu(cpu);
1be5c7
     }
1be5c7
 
1be5c7
-    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
1be5c7
+    type = has_xsave2 ? KVM_GET_XSAVE2 : KVM_GET_XSAVE;
1be5c7
+    ret = kvm_vcpu_ioctl(CPU(cpu), type, xsave);
1be5c7
     if (ret < 0) {
1be5c7
         return ret;
1be5c7
     }
1be5c7
diff --git a/target/i386/xsave_helper.c b/target/i386/xsave_helper.c
1be5c7
index ac61a96344..996e9f3bfe 100644
1be5c7
--- a/target/i386/xsave_helper.c
1be5c7
+++ b/target/i386/xsave_helper.c
1be5c7
@@ -126,6 +126,20 @@ void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen)
1be5c7
 
1be5c7
         memcpy(pkru, &env->pkru, sizeof(env->pkru));
1be5c7
     }
1be5c7
+
1be5c7
+    e = &x86_ext_save_areas[XSTATE_XTILE_CFG_BIT];
1be5c7
+    if (e->size && e->offset) {
1be5c7
+        XSaveXTILECFG *tilecfg = buf + e->offset;
1be5c7
+
1be5c7
+        memcpy(tilecfg, &env->xtilecfg, sizeof(env->xtilecfg));
1be5c7
+    }
1be5c7
+
1be5c7
+    e = &x86_ext_save_areas[XSTATE_XTILE_DATA_BIT];
1be5c7
+    if (e->size && e->offset && buflen >= e->size + e->offset) {
1be5c7
+        XSaveXTILEDATA *tiledata = buf + e->offset;
1be5c7
+
1be5c7
+        memcpy(tiledata, &env->xtiledata, sizeof(env->xtiledata));
1be5c7
+    }
1be5c7
 #endif
1be5c7
 }
1be5c7
 
1be5c7
@@ -247,5 +261,19 @@ void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen)
1be5c7
         pkru = buf + e->offset;
1be5c7
         memcpy(&env->pkru, pkru, sizeof(env->pkru));
1be5c7
     }
1be5c7
+
1be5c7
+    e = &x86_ext_save_areas[XSTATE_XTILE_CFG_BIT];
1be5c7
+    if (e->size && e->offset) {
1be5c7
+        const XSaveXTILECFG *tilecfg = buf + e->offset;
1be5c7
+
1be5c7
+        memcpy(&env->xtilecfg, tilecfg, sizeof(env->xtilecfg));
1be5c7
+    }
1be5c7
+
1be5c7
+    e = &x86_ext_save_areas[XSTATE_XTILE_DATA_BIT];
1be5c7
+    if (e->size && e->offset && buflen >= e->size + e->offset) {
1be5c7
+        const XSaveXTILEDATA *tiledata = buf + e->offset;
1be5c7
+
1be5c7
+        memcpy(&env->xtiledata, tiledata, sizeof(env->xtiledata));
1be5c7
+    }
1be5c7
 #endif
1be5c7
 }
1be5c7
-- 
1be5c7
2.35.3
1be5c7