yeahuh / rpms / qemu-kvm

Forked from rpms/qemu-kvm 2 years ago
Clone

Blame SOURCES/kvm-x86-kvm-Add-MTRR-support-for-kvm_get-put_msrs.patch

05bba0
From 7a7ee1756f309f4f69ed9929b2d7a652f9a879b2 Mon Sep 17 00:00:00 2001
05bba0
From: Alex Williamson <alex.williamson@redhat.com>
05bba0
Date: Fri, 10 Apr 2015 16:45:53 +0200
05bba0
Subject: [PATCH 12/14] x86: kvm: Add MTRR support for kvm_get|put_msrs()
05bba0
05bba0
Message-id: <20150410164553.16166.97985.stgit@gimli.home>
05bba0
Patchwork-id: 64798
05bba0
O-Subject: [RHEL7.2 qemu-kvm PATCH 2/3] x86: kvm: Add MTRR support for kvm_get|put_msrs()
05bba0
Bugzilla: 1210510
05bba0
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
05bba0
RH-Acked-by: Laszlo Ersek <lersek@redhat.com>
05bba0
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
05bba0
05bba0
Upstream: d1ae67f626c5ed5729e1d8212834291b409d26df
05bba0
05bba0
The MTRR state in KVM currently runs completely independent of the
05bba0
QEMU state in CPUX86State.mtrr_*.  This means that on migration, the
05bba0
target loses MTRR state from the source.  Generally that's ok though
05bba0
because KVM ignores it and maps everything as write-back anyway.  The
05bba0
exception to this rule is when we have an assigned device and an IOMMU
05bba0
that doesn't promote NoSnoop transactions from that device to be cache
05bba0
coherent.  In that case KVM trusts the guest mapping of memory as
05bba0
configured in the MTRR.
05bba0
05bba0
This patch updates kvm_get|put_msrs() so that we retrieve the actual
05bba0
vCPU MTRR settings and therefore keep CPUX86State synchronized for
05bba0
migration.  kvm_put_msrs() is also used on vCPU reset and therefore
05bba0
allows future modificaitons of MTRR state at reset to be realized.
05bba0
05bba0
Note that the entries array used by both functions was already
05bba0
slightly undersized for holding every possible MSR, so this patch
05bba0
increases it beyond the 28 new entries necessary for MTRR state.
05bba0
05bba0
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
05bba0
Reviewed-by: Laszlo Ersek <lersek@redhat.com>
05bba0
Cc: qemu-stable@nongnu.org
05bba0
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
05bba0
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
05bba0
---
05bba0
 target-i386/cpu.h |   2 ++
05bba0
 target-i386/kvm.c | 101 ++++++++++++++++++++++++++++++++++++++++++++++++++++--
05bba0
 2 files changed, 101 insertions(+), 2 deletions(-)
05bba0
05bba0
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
05bba0
index 763fbf2..e9edd3d 100644
05bba0
--- a/target-i386/cpu.h
05bba0
+++ b/target-i386/cpu.h
05bba0
@@ -332,6 +332,8 @@
05bba0
 #define MSR_MTRRphysBase(reg)           (0x200 + 2 * (reg))
05bba0
 #define MSR_MTRRphysMask(reg)           (0x200 + 2 * (reg) + 1)
05bba0
 
05bba0
+#define MSR_MTRRphysIndex(addr)         ((((addr) & ~1u) - 0x200) / 2)
05bba0
+
05bba0
 #define MSR_MTRRfix64K_00000            0x250
05bba0
 #define MSR_MTRRfix16K_80000            0x258
05bba0
 #define MSR_MTRRfix16K_A0000            0x259
05bba0
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
05bba0
index 65362ac..97ae345 100644
05bba0
--- a/target-i386/kvm.c
05bba0
+++ b/target-i386/kvm.c
05bba0
@@ -73,6 +73,7 @@ static int lm_capable_kernel;
05bba0
 static bool has_msr_hv_hypercall;
05bba0
 static bool has_msr_hv_vapic;
05bba0
 static bool has_msr_hv_tsc;
05bba0
+static bool has_msr_mtrr;
05bba0
 
05bba0
 static bool has_msr_architectural_pmu;
05bba0
 static uint32_t num_architectural_pmu_counters;
05bba0
@@ -711,6 +712,10 @@ int kvm_arch_init_vcpu(CPUState *cs)
05bba0
         env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
05bba0
     }
05bba0
 
05bba0
+    if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
05bba0
+        has_msr_mtrr = true;
05bba0
+    }
05bba0
+
05bba0
     return 0;
05bba0
 }
05bba0
 
05bba0
@@ -1117,7 +1122,7 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
05bba0
     CPUX86State *env = &cpu->env;
05bba0
     struct {
05bba0
         struct kvm_msrs info;
05bba0
-        struct kvm_msr_entry entries[100];
05bba0
+        struct kvm_msr_entry entries[150];
05bba0
     } msr_data;
05bba0
     struct kvm_msr_entry *msrs = msr_data.entries;
05bba0
     int n = 0, i;
05bba0
@@ -1219,6 +1224,37 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
05bba0
             kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_REFERENCE_TSC,
05bba0
                               env->msr_hv_tsc);
05bba0
         }
05bba0
+        if (has_msr_mtrr) {
05bba0
+            kvm_msr_entry_set(&msrs[n++], MSR_MTRRdefType, env->mtrr_deftype);
05bba0
+            kvm_msr_entry_set(&msrs[n++],
05bba0
+                              MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
05bba0
+            kvm_msr_entry_set(&msrs[n++],
05bba0
+                              MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
05bba0
+            kvm_msr_entry_set(&msrs[n++],
05bba0
+                              MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
05bba0
+            kvm_msr_entry_set(&msrs[n++],
05bba0
+                              MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
05bba0
+            kvm_msr_entry_set(&msrs[n++],
05bba0
+                              MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
05bba0
+            kvm_msr_entry_set(&msrs[n++],
05bba0
+                              MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
05bba0
+            kvm_msr_entry_set(&msrs[n++],
05bba0
+                              MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
05bba0
+            kvm_msr_entry_set(&msrs[n++],
05bba0
+                              MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
05bba0
+            kvm_msr_entry_set(&msrs[n++],
05bba0
+                              MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
05bba0
+            kvm_msr_entry_set(&msrs[n++],
05bba0
+                              MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
05bba0
+            kvm_msr_entry_set(&msrs[n++],
05bba0
+                              MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
05bba0
+            for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
05bba0
+                kvm_msr_entry_set(&msrs[n++],
05bba0
+                                  MSR_MTRRphysBase(i), env->mtrr_var[i].base);
05bba0
+                kvm_msr_entry_set(&msrs[n++],
05bba0
+                                  MSR_MTRRphysMask(i), env->mtrr_var[i].mask);
05bba0
+            }
05bba0
+        }
05bba0
     }
05bba0
     if (env->mcg_cap) {
05bba0
         int i;
05bba0
@@ -1418,7 +1454,7 @@ static int kvm_get_msrs(X86CPU *cpu)
05bba0
     CPUX86State *env = &cpu->env;
05bba0
     struct {
05bba0
         struct kvm_msrs info;
05bba0
-        struct kvm_msr_entry entries[100];
05bba0
+        struct kvm_msr_entry entries[150];
05bba0
     } msr_data;
05bba0
     struct kvm_msr_entry *msrs = msr_data.entries;
05bba0
     int ret, i, n;
05bba0
@@ -1500,6 +1536,24 @@ static int kvm_get_msrs(X86CPU *cpu)
05bba0
     if (has_msr_hv_tsc) {
05bba0
         msrs[n++].index = HV_X64_MSR_REFERENCE_TSC;
05bba0
     }
05bba0
+    if (has_msr_mtrr) {
05bba0
+        msrs[n++].index = MSR_MTRRdefType;
05bba0
+        msrs[n++].index = MSR_MTRRfix64K_00000;
05bba0
+        msrs[n++].index = MSR_MTRRfix16K_80000;
05bba0
+        msrs[n++].index = MSR_MTRRfix16K_A0000;
05bba0
+        msrs[n++].index = MSR_MTRRfix4K_C0000;
05bba0
+        msrs[n++].index = MSR_MTRRfix4K_C8000;
05bba0
+        msrs[n++].index = MSR_MTRRfix4K_D0000;
05bba0
+        msrs[n++].index = MSR_MTRRfix4K_D8000;
05bba0
+        msrs[n++].index = MSR_MTRRfix4K_E0000;
05bba0
+        msrs[n++].index = MSR_MTRRfix4K_E8000;
05bba0
+        msrs[n++].index = MSR_MTRRfix4K_F0000;
05bba0
+        msrs[n++].index = MSR_MTRRfix4K_F8000;
05bba0
+        for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
05bba0
+            msrs[n++].index = MSR_MTRRphysBase(i);
05bba0
+            msrs[n++].index = MSR_MTRRphysMask(i);
05bba0
+        }
05bba0
+    }
05bba0
 
05bba0
     msr_data.info.nmsrs = n;
05bba0
     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
05bba0
@@ -1614,6 +1668,49 @@ static int kvm_get_msrs(X86CPU *cpu)
05bba0
         case HV_X64_MSR_REFERENCE_TSC:
05bba0
             env->msr_hv_tsc = msrs[i].data;
05bba0
             break;
05bba0
+        case MSR_MTRRdefType:
05bba0
+            env->mtrr_deftype = msrs[i].data;
05bba0
+            break;
05bba0
+        case MSR_MTRRfix64K_00000:
05bba0
+            env->mtrr_fixed[0] = msrs[i].data;
05bba0
+            break;
05bba0
+        case MSR_MTRRfix16K_80000:
05bba0
+            env->mtrr_fixed[1] = msrs[i].data;
05bba0
+            break;
05bba0
+        case MSR_MTRRfix16K_A0000:
05bba0
+            env->mtrr_fixed[2] = msrs[i].data;
05bba0
+            break;
05bba0
+        case MSR_MTRRfix4K_C0000:
05bba0
+            env->mtrr_fixed[3] = msrs[i].data;
05bba0
+            break;
05bba0
+        case MSR_MTRRfix4K_C8000:
05bba0
+            env->mtrr_fixed[4] = msrs[i].data;
05bba0
+            break;
05bba0
+        case MSR_MTRRfix4K_D0000:
05bba0
+            env->mtrr_fixed[5] = msrs[i].data;
05bba0
+            break;
05bba0
+        case MSR_MTRRfix4K_D8000:
05bba0
+            env->mtrr_fixed[6] = msrs[i].data;
05bba0
+            break;
05bba0
+        case MSR_MTRRfix4K_E0000:
05bba0
+            env->mtrr_fixed[7] = msrs[i].data;
05bba0
+            break;
05bba0
+        case MSR_MTRRfix4K_E8000:
05bba0
+            env->mtrr_fixed[8] = msrs[i].data;
05bba0
+            break;
05bba0
+        case MSR_MTRRfix4K_F0000:
05bba0
+            env->mtrr_fixed[9] = msrs[i].data;
05bba0
+            break;
05bba0
+        case MSR_MTRRfix4K_F8000:
05bba0
+            env->mtrr_fixed[10] = msrs[i].data;
05bba0
+            break;
05bba0
+        case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
05bba0
+            if (index & 1) {
05bba0
+                env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data;
05bba0
+            } else {
05bba0
+                env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
05bba0
+            }
05bba0
+            break;
05bba0
         }
05bba0
     }
05bba0
 
05bba0
-- 
05bba0
1.8.3.1
05bba0