|
|
9ae3a8 |
From 7a7ee1756f309f4f69ed9929b2d7a652f9a879b2 Mon Sep 17 00:00:00 2001
|
|
|
9ae3a8 |
From: Alex Williamson <alex.williamson@redhat.com>
|
|
|
9ae3a8 |
Date: Fri, 10 Apr 2015 16:45:53 +0200
|
|
|
9ae3a8 |
Subject: [PATCH 12/14] x86: kvm: Add MTRR support for kvm_get|put_msrs()
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
Message-id: <20150410164553.16166.97985.stgit@gimli.home>
|
|
|
9ae3a8 |
Patchwork-id: 64798
|
|
|
9ae3a8 |
O-Subject: [RHEL7.2 qemu-kvm PATCH 2/3] x86: kvm: Add MTRR support for kvm_get|put_msrs()
|
|
|
9ae3a8 |
Bugzilla: 1210510
|
|
|
9ae3a8 |
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
|
9ae3a8 |
RH-Acked-by: Laszlo Ersek <lersek@redhat.com>
|
|
|
9ae3a8 |
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
Upstream: d1ae67f626c5ed5729e1d8212834291b409d26df
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
The MTRR state in KVM currently runs completely independent of the
|
|
|
9ae3a8 |
QEMU state in CPUX86State.mtrr_*. This means that on migration, the
|
|
|
9ae3a8 |
target loses MTRR state from the source. Generally that's ok though
|
|
|
9ae3a8 |
because KVM ignores it and maps everything as write-back anyway. The
|
|
|
9ae3a8 |
exception to this rule is when we have an assigned device and an IOMMU
|
|
|
9ae3a8 |
that doesn't promote NoSnoop transactions from that device to be cache
|
|
|
9ae3a8 |
coherent. In that case KVM trusts the guest mapping of memory as
|
|
|
9ae3a8 |
configured in the MTRR.
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
This patch updates kvm_get|put_msrs() so that we retrieve the actual
|
|
|
9ae3a8 |
vCPU MTRR settings and therefore keep CPUX86State synchronized for
|
|
|
9ae3a8 |
migration. kvm_put_msrs() is also used on vCPU reset and therefore
|
|
|
9ae3a8 |
allows future modificaitons of MTRR state at reset to be realized.
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
Note that the entries array used by both functions was already
|
|
|
9ae3a8 |
slightly undersized for holding every possible MSR, so this patch
|
|
|
9ae3a8 |
increases it beyond the 28 new entries necessary for MTRR state.
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
|
|
|
9ae3a8 |
Reviewed-by: Laszlo Ersek <lersek@redhat.com>
|
|
|
9ae3a8 |
Cc: qemu-stable@nongnu.org
|
|
|
9ae3a8 |
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
|
9ae3a8 |
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
|
9ae3a8 |
---
|
|
|
9ae3a8 |
target-i386/cpu.h | 2 ++
|
|
|
9ae3a8 |
target-i386/kvm.c | 101 ++++++++++++++++++++++++++++++++++++++++++++++++++++--
|
|
|
9ae3a8 |
2 files changed, 101 insertions(+), 2 deletions(-)
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
|
|
|
9ae3a8 |
index 763fbf2..e9edd3d 100644
|
|
|
9ae3a8 |
--- a/target-i386/cpu.h
|
|
|
9ae3a8 |
+++ b/target-i386/cpu.h
|
|
|
9ae3a8 |
@@ -332,6 +332,8 @@
|
|
|
9ae3a8 |
#define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
|
|
|
9ae3a8 |
#define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
+#define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2)
|
|
|
9ae3a8 |
+
|
|
|
9ae3a8 |
#define MSR_MTRRfix64K_00000 0x250
|
|
|
9ae3a8 |
#define MSR_MTRRfix16K_80000 0x258
|
|
|
9ae3a8 |
#define MSR_MTRRfix16K_A0000 0x259
|
|
|
9ae3a8 |
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
|
|
|
9ae3a8 |
index 65362ac..97ae345 100644
|
|
|
9ae3a8 |
--- a/target-i386/kvm.c
|
|
|
9ae3a8 |
+++ b/target-i386/kvm.c
|
|
|
9ae3a8 |
@@ -73,6 +73,7 @@ static int lm_capable_kernel;
|
|
|
9ae3a8 |
static bool has_msr_hv_hypercall;
|
|
|
9ae3a8 |
static bool has_msr_hv_vapic;
|
|
|
9ae3a8 |
static bool has_msr_hv_tsc;
|
|
|
9ae3a8 |
+static bool has_msr_mtrr;
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
static bool has_msr_architectural_pmu;
|
|
|
9ae3a8 |
static uint32_t num_architectural_pmu_counters;
|
|
|
9ae3a8 |
@@ -711,6 +712,10 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
|
|
9ae3a8 |
env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
+ if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
|
|
|
9ae3a8 |
+ has_msr_mtrr = true;
|
|
|
9ae3a8 |
+ }
|
|
|
9ae3a8 |
+
|
|
|
9ae3a8 |
return 0;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
@@ -1117,7 +1122,7 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
|
|
|
9ae3a8 |
CPUX86State *env = &cpu->env;
|
|
|
9ae3a8 |
struct {
|
|
|
9ae3a8 |
struct kvm_msrs info;
|
|
|
9ae3a8 |
- struct kvm_msr_entry entries[100];
|
|
|
9ae3a8 |
+ struct kvm_msr_entry entries[150];
|
|
|
9ae3a8 |
} msr_data;
|
|
|
9ae3a8 |
struct kvm_msr_entry *msrs = msr_data.entries;
|
|
|
9ae3a8 |
int n = 0, i;
|
|
|
9ae3a8 |
@@ -1219,6 +1224,37 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
|
|
|
9ae3a8 |
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_REFERENCE_TSC,
|
|
|
9ae3a8 |
env->msr_hv_tsc);
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
+ if (has_msr_mtrr) {
|
|
|
9ae3a8 |
+ kvm_msr_entry_set(&msrs[n++], MSR_MTRRdefType, env->mtrr_deftype);
|
|
|
9ae3a8 |
+ kvm_msr_entry_set(&msrs[n++],
|
|
|
9ae3a8 |
+ MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
|
|
|
9ae3a8 |
+ kvm_msr_entry_set(&msrs[n++],
|
|
|
9ae3a8 |
+ MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
|
|
|
9ae3a8 |
+ kvm_msr_entry_set(&msrs[n++],
|
|
|
9ae3a8 |
+ MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
|
|
|
9ae3a8 |
+ kvm_msr_entry_set(&msrs[n++],
|
|
|
9ae3a8 |
+ MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
|
|
|
9ae3a8 |
+ kvm_msr_entry_set(&msrs[n++],
|
|
|
9ae3a8 |
+ MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
|
|
|
9ae3a8 |
+ kvm_msr_entry_set(&msrs[n++],
|
|
|
9ae3a8 |
+ MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
|
|
|
9ae3a8 |
+ kvm_msr_entry_set(&msrs[n++],
|
|
|
9ae3a8 |
+ MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
|
|
|
9ae3a8 |
+ kvm_msr_entry_set(&msrs[n++],
|
|
|
9ae3a8 |
+ MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
|
|
|
9ae3a8 |
+ kvm_msr_entry_set(&msrs[n++],
|
|
|
9ae3a8 |
+ MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
|
|
|
9ae3a8 |
+ kvm_msr_entry_set(&msrs[n++],
|
|
|
9ae3a8 |
+ MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
|
|
|
9ae3a8 |
+ kvm_msr_entry_set(&msrs[n++],
|
|
|
9ae3a8 |
+ MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
|
|
|
9ae3a8 |
+ for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
|
|
|
9ae3a8 |
+ kvm_msr_entry_set(&msrs[n++],
|
|
|
9ae3a8 |
+ MSR_MTRRphysBase(i), env->mtrr_var[i].base);
|
|
|
9ae3a8 |
+ kvm_msr_entry_set(&msrs[n++],
|
|
|
9ae3a8 |
+ MSR_MTRRphysMask(i), env->mtrr_var[i].mask);
|
|
|
9ae3a8 |
+ }
|
|
|
9ae3a8 |
+ }
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
if (env->mcg_cap) {
|
|
|
9ae3a8 |
int i;
|
|
|
9ae3a8 |
@@ -1418,7 +1454,7 @@ static int kvm_get_msrs(X86CPU *cpu)
|
|
|
9ae3a8 |
CPUX86State *env = &cpu->env;
|
|
|
9ae3a8 |
struct {
|
|
|
9ae3a8 |
struct kvm_msrs info;
|
|
|
9ae3a8 |
- struct kvm_msr_entry entries[100];
|
|
|
9ae3a8 |
+ struct kvm_msr_entry entries[150];
|
|
|
9ae3a8 |
} msr_data;
|
|
|
9ae3a8 |
struct kvm_msr_entry *msrs = msr_data.entries;
|
|
|
9ae3a8 |
int ret, i, n;
|
|
|
9ae3a8 |
@@ -1500,6 +1536,24 @@ static int kvm_get_msrs(X86CPU *cpu)
|
|
|
9ae3a8 |
if (has_msr_hv_tsc) {
|
|
|
9ae3a8 |
msrs[n++].index = HV_X64_MSR_REFERENCE_TSC;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
+ if (has_msr_mtrr) {
|
|
|
9ae3a8 |
+ msrs[n++].index = MSR_MTRRdefType;
|
|
|
9ae3a8 |
+ msrs[n++].index = MSR_MTRRfix64K_00000;
|
|
|
9ae3a8 |
+ msrs[n++].index = MSR_MTRRfix16K_80000;
|
|
|
9ae3a8 |
+ msrs[n++].index = MSR_MTRRfix16K_A0000;
|
|
|
9ae3a8 |
+ msrs[n++].index = MSR_MTRRfix4K_C0000;
|
|
|
9ae3a8 |
+ msrs[n++].index = MSR_MTRRfix4K_C8000;
|
|
|
9ae3a8 |
+ msrs[n++].index = MSR_MTRRfix4K_D0000;
|
|
|
9ae3a8 |
+ msrs[n++].index = MSR_MTRRfix4K_D8000;
|
|
|
9ae3a8 |
+ msrs[n++].index = MSR_MTRRfix4K_E0000;
|
|
|
9ae3a8 |
+ msrs[n++].index = MSR_MTRRfix4K_E8000;
|
|
|
9ae3a8 |
+ msrs[n++].index = MSR_MTRRfix4K_F0000;
|
|
|
9ae3a8 |
+ msrs[n++].index = MSR_MTRRfix4K_F8000;
|
|
|
9ae3a8 |
+ for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
|
|
|
9ae3a8 |
+ msrs[n++].index = MSR_MTRRphysBase(i);
|
|
|
9ae3a8 |
+ msrs[n++].index = MSR_MTRRphysMask(i);
|
|
|
9ae3a8 |
+ }
|
|
|
9ae3a8 |
+ }
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
msr_data.info.nmsrs = n;
|
|
|
9ae3a8 |
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
|
|
|
9ae3a8 |
@@ -1614,6 +1668,49 @@ static int kvm_get_msrs(X86CPU *cpu)
|
|
|
9ae3a8 |
case HV_X64_MSR_REFERENCE_TSC:
|
|
|
9ae3a8 |
env->msr_hv_tsc = msrs[i].data;
|
|
|
9ae3a8 |
break;
|
|
|
9ae3a8 |
+ case MSR_MTRRdefType:
|
|
|
9ae3a8 |
+ env->mtrr_deftype = msrs[i].data;
|
|
|
9ae3a8 |
+ break;
|
|
|
9ae3a8 |
+ case MSR_MTRRfix64K_00000:
|
|
|
9ae3a8 |
+ env->mtrr_fixed[0] = msrs[i].data;
|
|
|
9ae3a8 |
+ break;
|
|
|
9ae3a8 |
+ case MSR_MTRRfix16K_80000:
|
|
|
9ae3a8 |
+ env->mtrr_fixed[1] = msrs[i].data;
|
|
|
9ae3a8 |
+ break;
|
|
|
9ae3a8 |
+ case MSR_MTRRfix16K_A0000:
|
|
|
9ae3a8 |
+ env->mtrr_fixed[2] = msrs[i].data;
|
|
|
9ae3a8 |
+ break;
|
|
|
9ae3a8 |
+ case MSR_MTRRfix4K_C0000:
|
|
|
9ae3a8 |
+ env->mtrr_fixed[3] = msrs[i].data;
|
|
|
9ae3a8 |
+ break;
|
|
|
9ae3a8 |
+ case MSR_MTRRfix4K_C8000:
|
|
|
9ae3a8 |
+ env->mtrr_fixed[4] = msrs[i].data;
|
|
|
9ae3a8 |
+ break;
|
|
|
9ae3a8 |
+ case MSR_MTRRfix4K_D0000:
|
|
|
9ae3a8 |
+ env->mtrr_fixed[5] = msrs[i].data;
|
|
|
9ae3a8 |
+ break;
|
|
|
9ae3a8 |
+ case MSR_MTRRfix4K_D8000:
|
|
|
9ae3a8 |
+ env->mtrr_fixed[6] = msrs[i].data;
|
|
|
9ae3a8 |
+ break;
|
|
|
9ae3a8 |
+ case MSR_MTRRfix4K_E0000:
|
|
|
9ae3a8 |
+ env->mtrr_fixed[7] = msrs[i].data;
|
|
|
9ae3a8 |
+ break;
|
|
|
9ae3a8 |
+ case MSR_MTRRfix4K_E8000:
|
|
|
9ae3a8 |
+ env->mtrr_fixed[8] = msrs[i].data;
|
|
|
9ae3a8 |
+ break;
|
|
|
9ae3a8 |
+ case MSR_MTRRfix4K_F0000:
|
|
|
9ae3a8 |
+ env->mtrr_fixed[9] = msrs[i].data;
|
|
|
9ae3a8 |
+ break;
|
|
|
9ae3a8 |
+ case MSR_MTRRfix4K_F8000:
|
|
|
9ae3a8 |
+ env->mtrr_fixed[10] = msrs[i].data;
|
|
|
9ae3a8 |
+ break;
|
|
|
9ae3a8 |
+ case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
|
|
|
9ae3a8 |
+ if (index & 1) {
|
|
|
9ae3a8 |
+ env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data;
|
|
|
9ae3a8 |
+ } else {
|
|
|
9ae3a8 |
+ env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
|
|
|
9ae3a8 |
+ }
|
|
|
9ae3a8 |
+ break;
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
}
|
|
|
9ae3a8 |
|
|
|
9ae3a8 |
--
|
|
|
9ae3a8 |
1.8.3.1
|
|
|
9ae3a8 |
|