thebeanogamer / rpms / qemu-kvm

Forked from rpms/qemu-kvm 5 months ago
Clone
0727d3
From 50840e01d05a466a1dfbc219e49233834e5d7ed0 Mon Sep 17 00:00:00 2001
0727d3
From: Yang Zhong <yang.zhong@intel.com>
0727d3
Date: Wed, 16 Feb 2022 22:04:29 -0800
0727d3
Subject: [PATCH 07/24] x86: Grant AMX permission for guest
0727d3
0727d3
RH-Author: Paul Lai <plai@redhat.com>
0727d3
RH-MergeRequest: 176: Enable KVM AMX support
0727d3
RH-Commit: [7/13] 437578191f61139ca710cc7045ab38eb0d05eae2
0727d3
RH-Bugzilla: 1916415
0727d3
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
0727d3
RH-Acked-by: Igor Mammedov <imammedo@redhat.com>
0727d3
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
0727d3
0727d3
Kernel allocates 4K xstate buffer by default. For XSAVE features
0727d3
which require large state component (e.g. AMX), Linux kernel
0727d3
dynamically expands the xstate buffer only after the process has
0727d3
acquired the necessary permissions. Those are called dynamically-
0727d3
enabled XSAVE features (or dynamic xfeatures).
0727d3
0727d3
There are separate permissions for native tasks and guests.
0727d3
0727d3
Qemu should request the guest permissions for dynamic xfeatures
0727d3
which will be exposed to the guest. This only needs to be done
0727d3
once before the first vcpu is created.
0727d3
0727d3
KVM implemented one new ARCH_GET_XCOMP_SUPP system attribute API to
0727d3
get host side supported_xcr0 and Qemu can decide if it can request
0727d3
dynamically enabled XSAVE features permission.
0727d3
https://lore.kernel.org/all/20220126152210.3044876-1-pbonzini@redhat.com/
0727d3
0727d3
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
0727d3
Signed-off-by: Yang Zhong <yang.zhong@intel.com>
0727d3
Signed-off-by: Jing Liu <jing2.liu@intel.com>
0727d3
Message-Id: <20220217060434.52460-4-yang.zhong@intel.com>
0727d3
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
0727d3
(cherry picked from commit 19db68ca68a78fa033a21d419036b6e416554564)
0727d3
Signed-off-by: Paul Lai <plai@redhat.com>
0727d3
---
0727d3
 target/i386/cpu.c          |  7 +++++
0727d3
 target/i386/cpu.h          |  4 +++
0727d3
 target/i386/kvm/kvm-cpu.c  | 12 ++++----
0727d3
 target/i386/kvm/kvm.c      | 57 ++++++++++++++++++++++++++++++++++++++
0727d3
 target/i386/kvm/kvm_i386.h |  1 +
0727d3
 5 files changed, 75 insertions(+), 6 deletions(-)
0727d3
0727d3
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
0727d3
index 0453c27c9d..c19b51ea32 100644
0727d3
--- a/target/i386/cpu.c
0727d3
+++ b/target/i386/cpu.c
0727d3
@@ -6027,6 +6027,7 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu)
0727d3
     CPUX86State *env = &cpu->env;
0727d3
     int i;
0727d3
     uint64_t mask;
0727d3
+    static bool request_perm;
0727d3
 
0727d3
     if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
0727d3
         env->features[FEAT_XSAVE_COMP_LO] = 0;
0727d3
@@ -6042,6 +6043,12 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu)
0727d3
         }
0727d3
     }
0727d3
 
0727d3
+    /* Only request permission for first vcpu */
0727d3
+    if (kvm_enabled() && !request_perm) {
0727d3
+        kvm_request_xsave_components(cpu, mask);
0727d3
+        request_perm = true;
0727d3
+    }
0727d3
+
0727d3
     env->features[FEAT_XSAVE_COMP_LO] = mask;
0727d3
     env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
0727d3
 }
0727d3
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
0727d3
index e1dd8b9555..58676390e6 100644
0727d3
--- a/target/i386/cpu.h
0727d3
+++ b/target/i386/cpu.h
0727d3
@@ -549,6 +549,10 @@ typedef enum X86Seg {
0727d3
 #define XSTATE_ZMM_Hi256_MASK           (1ULL << XSTATE_ZMM_Hi256_BIT)
0727d3
 #define XSTATE_Hi16_ZMM_MASK            (1ULL << XSTATE_Hi16_ZMM_BIT)
0727d3
 #define XSTATE_PKRU_MASK                (1ULL << XSTATE_PKRU_BIT)
0727d3
+#define XSTATE_XTILE_CFG_MASK           (1ULL << XSTATE_XTILE_CFG_BIT)
0727d3
+#define XSTATE_XTILE_DATA_MASK          (1ULL << XSTATE_XTILE_DATA_BIT)
0727d3
+
0727d3
+#define XSTATE_DYNAMIC_MASK             (XSTATE_XTILE_DATA_MASK)
0727d3
 
0727d3
 #define ESA_FEATURE_ALIGN64_BIT         1
0727d3
 
0727d3
diff --git a/target/i386/kvm/kvm-cpu.c b/target/i386/kvm/kvm-cpu.c
0727d3
index 86ef7b2712..bdc967c484 100644
0727d3
--- a/target/i386/kvm/kvm-cpu.c
0727d3
+++ b/target/i386/kvm/kvm-cpu.c
0727d3
@@ -84,7 +84,7 @@ static void kvm_cpu_max_instance_init(X86CPU *cpu)
0727d3
 static void kvm_cpu_xsave_init(void)
0727d3
 {
0727d3
     static bool first = true;
0727d3
-    KVMState *s = kvm_state;
0727d3
+    uint32_t eax, ebx, ecx, edx;
0727d3
     int i;
0727d3
 
0727d3
     if (!first) {
0727d3
@@ -100,11 +100,11 @@ static void kvm_cpu_xsave_init(void)
0727d3
         ExtSaveArea *esa = &x86_ext_save_areas[i];
0727d3
 
0727d3
         if (esa->size) {
0727d3
-            int sz = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EAX);
0727d3
-            if (sz != 0) {
0727d3
-                assert(esa->size == sz);
0727d3
-                esa->offset = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EBX);
0727d3
-                esa->ecx = kvm_arch_get_supported_cpuid(s, 0xd, i, R_ECX);
0727d3
+            host_cpuid(0xd, i, &eax, &ebx, &ecx, &edx;;
0727d3
+            if (eax != 0) {
0727d3
+                assert(esa->size == eax);
0727d3
+                esa->offset = ebx;
0727d3
+                esa->ecx = ecx;
0727d3
             }
0727d3
         }
0727d3
     }
0727d3
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
0727d3
index a668f521ac..b5d98c4361 100644
0727d3
--- a/target/i386/kvm/kvm.c
0727d3
+++ b/target/i386/kvm/kvm.c
0727d3
@@ -17,6 +17,7 @@
0727d3
 #include "qapi/error.h"
0727d3
 #include <sys/ioctl.h>
0727d3
 #include <sys/utsname.h>
0727d3
+#include <sys/syscall.h>
0727d3
 
0727d3
 #include <linux/kvm.h>
0727d3
 #include "standard-headers/asm-x86/kvm_para.h"
0727d3
@@ -347,6 +348,7 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
0727d3
     struct kvm_cpuid2 *cpuid;
0727d3
     uint32_t ret = 0;
0727d3
     uint32_t cpuid_1_edx;
0727d3
+    uint64_t bitmask;
0727d3
 
0727d3
     cpuid = get_supported_cpuid(s);
0727d3
 
0727d3
@@ -404,6 +406,25 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
0727d3
         if (!has_msr_arch_capabs) {
0727d3
             ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES;
0727d3
         }
0727d3
+    } else if (function == 0xd && index == 0 &&
0727d3
+               (reg == R_EAX || reg == R_EDX)) {
0727d3
+        struct kvm_device_attr attr = {
0727d3
+            .group = 0,
0727d3
+            .attr = KVM_X86_XCOMP_GUEST_SUPP,
0727d3
+            .addr = (unsigned long) &bitmask
0727d3
+        };
0727d3
+
0727d3
+        bool sys_attr = kvm_check_extension(s, KVM_CAP_SYS_ATTRIBUTES);
0727d3
+        if (!sys_attr) {
0727d3
+            warn_report("cannot get sys attribute capabilities %d", sys_attr);
0727d3
+        }
0727d3
+
0727d3
+        int rc = kvm_ioctl(s, KVM_GET_DEVICE_ATTR, &attr);
0727d3
+        if (rc == -1 && (errno == ENXIO || errno == EINVAL)) {
0727d3
+            warn_report("KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) "
0727d3
+                        "error: %d", rc);
0727d3
+        }
0727d3
+        ret = (reg == R_EAX) ? bitmask : bitmask >> 32;
0727d3
     } else if (function == 0x80000001 && reg == R_ECX) {
0727d3
         /*
0727d3
          * It's safe to enable TOPOEXT even if it's not returned by
0727d3
@@ -5054,3 +5075,39 @@ bool kvm_arch_cpu_check_are_resettable(void)
0727d3
 {
0727d3
     return !sev_es_enabled();
0727d3
 }
0727d3
+
0727d3
+#define ARCH_REQ_XCOMP_GUEST_PERM       0x1025
0727d3
+
0727d3
+void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask)
0727d3
+{
0727d3
+    KVMState *s = kvm_state;
0727d3
+    uint64_t supported;
0727d3
+
0727d3
+    mask &= XSTATE_DYNAMIC_MASK;
0727d3
+    if (!mask) {
0727d3
+        return;
0727d3
+    }
0727d3
+    /*
0727d3
+     * Just ignore bits that are not in CPUID[EAX=0xD,ECX=0].
0727d3
+     * ARCH_REQ_XCOMP_GUEST_PERM would fail, and QEMU has warned
0727d3
+     * about them already because they are not supported features.
0727d3
+     */
0727d3
+    supported = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
0727d3
+    supported |= (uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32;
0727d3
+    mask &= supported;
0727d3
+
0727d3
+    while (mask) {
0727d3
+        int bit = ctz64(mask);
0727d3
+        int rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit);
0727d3
+        if (rc) {
0727d3
+            /*
0727d3
+             * Older kernel version (<5.17) do not support
0727d3
+             * ARCH_REQ_XCOMP_GUEST_PERM, but also do not return
0727d3
+             * any dynamic feature from kvm_arch_get_supported_cpuid.
0727d3
+             */
0727d3
+            warn_report("prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure "
0727d3
+                        "for feature bit %d", bit);
0727d3
+        }
0727d3
+        mask &= ~BIT_ULL(bit);
0727d3
+    }
0727d3
+}
0727d3
diff --git a/target/i386/kvm/kvm_i386.h b/target/i386/kvm/kvm_i386.h
0727d3
index a978509d50..4124912c20 100644
0727d3
--- a/target/i386/kvm/kvm_i386.h
0727d3
+++ b/target/i386/kvm/kvm_i386.h
0727d3
@@ -52,5 +52,6 @@ bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp);
0727d3
 uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address);
0727d3
 
0727d3
 bool kvm_enable_sgx_provisioning(KVMState *s);
0727d3
+void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask);
0727d3
 
0727d3
 #endif
0727d3
-- 
0727d3
2.35.3
0727d3