@@ -0,0 +1,258 @@
|
|
1
|
+
From 5b7a08e0ea5bb51f886f2f5e2373de2bf5981dd2 Mon Sep 17 00:00:00 2001
|
2
|
+
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
3
|
+
Date: Thu, 28 Nov 2019 20:58:05 +0100
|
4
|
+
Subject: [PATCH 1/2] arm64: KVM: Invoke compute_layout() before alternatives
|
5
|
+
are applied
|
6
|
+
|
7
|
+
compute_layout() is invoked as part of an alternative fixup under
|
8
|
+
stop_machine(). This function invokes get_random_long() which acquires a
|
9
|
+
sleeping lock on -RT which can not be acquired in this context.
|
10
|
+
|
11
|
+
Rename compute_layout() to kvm_compute_layout() and invoke it before
|
12
|
+
stop_machine() applies the alternatives. Add a __init prefix to
|
13
|
+
kvm_compute_layout() because the caller has it, too (and so the code can be
|
14
|
+
discarded after boot).
|
15
|
+
|
16
|
+
Reviewed-by: James Morse <james.morse@arm.com>
|
17
|
+
Acked-by: Marc Zyngier <maz@kernel.org>
|
18
|
+
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
19
|
+
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
|
20
|
+
---
|
21
|
+
arch/arm64/include/asm/kvm_mmu.h | 1 +
|
22
|
+
arch/arm64/kernel/smp.c | 4 ++++
|
23
|
+
arch/arm64/kvm/va_layout.c | 8 +-------
|
24
|
+
3 files changed, 6 insertions(+), 7 deletions(-)
|
25
|
+
|
26
|
+
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
|
27
|
+
index befe37d..53d846f 100644
|
28
|
+
--- a/arch/arm64/include/asm/kvm_mmu.h
|
29
|
+
+++ b/arch/arm64/include/asm/kvm_mmu.h
|
30
|
+
|
31
|
+
|
32
|
+
void kvm_update_va_mask(struct alt_instr *alt,
|
33
|
+
__le32 *origptr, __le32 *updptr, int nr_inst);
|
34
|
+
+void kvm_compute_layout(void);
|
35
|
+
|
36
|
+
static inline unsigned long __kern_hyp_va(unsigned long v)
|
37
|
+
{
|
38
|
+
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
|
39
|
+
index dc9fe87..02d41ea 100644
|
40
|
+
--- a/arch/arm64/kernel/smp.c
|
41
|
+
+++ b/arch/arm64/kernel/smp.c
|
42
|
+
|
43
|
+
#include <linux/of.h>
|
44
|
+
#include <linux/irq_work.h>
|
45
|
+
#include <linux/kexec.h>
|
46
|
+
+#include <linux/kvm_host.h>
|
47
|
+
|
48
|
+
#include <asm/alternative.h>
|
49
|
+
#include <asm/atomic.h>
|
50
|
+
|
51
|
+
#include <asm/cputype.h>
|
52
|
+
#include <asm/cpu_ops.h>
|
53
|
+
#include <asm/daifflags.h>
|
54
|
+
+#include <asm/kvm_mmu.h>
|
55
|
+
#include <asm/mmu_context.h>
|
56
|
+
#include <asm/numa.h>
|
57
|
+
#include <asm/pgtable.h>
|
58
|
+
@@ -408,6 +410,8 @@ static void __init hyp_mode_check(void)
|
59
|
+
"CPU: CPUs started in inconsistent modes");
|
60
|
+
else
|
61
|
+
pr_info("CPU: All CPU(s) started at EL1\n");
|
62
|
+
+ if (IS_ENABLED(CONFIG_KVM_ARM_HOST))
|
63
|
+
+ kvm_compute_layout();
|
64
|
+
}
|
65
|
+
|
66
|
+
void __init smp_cpus_done(unsigned int max_cpus)
|
67
|
+
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
|
68
|
+
index 2cf7d4b..dab1fea 100644
|
69
|
+
--- a/arch/arm64/kvm/va_layout.c
|
70
|
+
+++ b/arch/arm64/kvm/va_layout.c
|
71
|
+
|
72
|
+
static u64 tag_val;
|
73
|
+
static u64 va_mask;
|
74
|
+
|
75
|
+
-static void compute_layout(void)
|
76
|
+
+__init void kvm_compute_layout(void)
|
77
|
+
{
|
78
|
+
phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
|
79
|
+
u64 hyp_va_msb;
|
80
|
+
@@ -110,9 +110,6 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
|
81
|
+
|
82
|
+
BUG_ON(nr_inst != 5);
|
83
|
+
|
84
|
+
- if (!has_vhe() && !va_mask)
|
85
|
+
- compute_layout();
|
86
|
+
-
|
87
|
+
for (i = 0; i < nr_inst; i++) {
|
88
|
+
u32 rd, rn, insn, oinsn;
|
89
|
+
|
90
|
+
@@ -156,9 +153,6 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
|
91
|
+
return;
|
92
|
+
}
|
93
|
+
|
94
|
+
- if (!va_mask)
|
95
|
+
- compute_layout();
|
96
|
+
-
|
97
|
+
/*
|
98
|
+
* Compute HYP VA by using the same computation as kern_hyp_va()
|
99
|
+
*/
|
100
|
+
--
|
101
|
+
1.8.3.1
|
102
|
+
|
103
|
+
|
104
|
+
From d2d093ca412b7fd66acd745d00e3ebd4764fe127 Mon Sep 17 00:00:00 2001
|
105
|
+
From: Russell King <rmk+kernel@armlinux.org.uk>
|
106
|
+
Date: Sat, 28 Dec 2019 11:57:14 +0000
|
107
|
+
Subject: [PATCH 2/2] arm64: kvm: Fix IDMAP overlap with HYP VA
|
108
|
+
|
109
|
+
Booting 5.4 on LX2160A reveals that KVM is non-functional:
|
110
|
+
|
111
|
+
kvm: Limiting the IPA size due to kernel Virtual Address limit
|
112
|
+
kvm [1]: IPA Size Limit: 43bits
|
113
|
+
kvm [1]: IDMAP intersecting with HYP VA, unable to continue
|
114
|
+
kvm [1]: error initializing Hyp mode: -22
|
115
|
+
|
116
|
+
Debugging shows:
|
117
|
+
|
118
|
+
kvm [1]: IDMAP page: 81a26000
|
119
|
+
kvm [1]: HYP VA range: 0:22ffffffff
|
120
|
+
|
121
|
+
as RAM is located at:
|
122
|
+
|
123
|
+
80000000-fbdfffff : System RAM
|
124
|
+
2080000000-237fffffff : System RAM
|
125
|
+
|
126
|
+
Comparing this with the same kernel on Armada 8040 shows:
|
127
|
+
|
128
|
+
kvm: Limiting the IPA size due to kernel Virtual Address limit
|
129
|
+
kvm [1]: IPA Size Limit: 43bits
|
130
|
+
kvm [1]: IDMAP page: 2a26000
|
131
|
+
kvm [1]: HYP VA range: 4800000000:493fffffff
|
132
|
+
...
|
133
|
+
kvm [1]: Hyp mode initialized successfully
|
134
|
+
|
135
|
+
which indicates that hyp_va_msb is set, and is always set to the
|
136
|
+
opposite value of the idmap page to avoid the overlap. This does not
|
137
|
+
happen with the LX2160A.
|
138
|
+
|
139
|
+
Further debugging shows vabits_actual = 39, kva_msb = 38 on LX2160A and
|
140
|
+
kva_msb = 33 on Armada 8040. Looking at the bit layout of the HYP VA,
|
141
|
+
there is still one bit available for hyp_va_msb. Set this bit
|
142
|
+
appropriately. This allows KVM to be functional on the LX2160A, but
|
143
|
+
without any HYP VA randomisation:
|
144
|
+
|
145
|
+
kvm: Limiting the IPA size due to kernel Virtual Address limit
|
146
|
+
kvm [1]: IPA Size Limit: 43bits
|
147
|
+
kvm [1]: IDMAP page: 81a24000
|
148
|
+
kvm [1]: HYP VA range: 4000000000:62ffffffff
|
149
|
+
...
|
150
|
+
kvm [1]: Hyp mode initialized successfully
|
151
|
+
|
152
|
+
Fixes: ed57cac83e05 ("arm64: KVM: Introduce EL2 VA randomisation")
|
153
|
+
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
|
154
|
+
[maz: small additional cleanups, preserved case where the tag
|
155
|
+
is legitimately 0 and we can just use the mask, Fixes tag]
|
156
|
+
Signed-off-by: Marc Zyngier <maz@kernel.org>
|
157
|
+
Link: https://lore.kernel.org/r/E1ilAiY-0000MA-RG@rmk-PC.armlinux.org.uk
|
158
|
+
---
|
159
|
+
arch/arm64/kvm/va_layout.c | 56 +++++++++++++++++++++-------------------------
|
160
|
+
1 file changed, 25 insertions(+), 31 deletions(-)
|
161
|
+
|
162
|
+
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
|
163
|
+
index dab1fea..a4f48c1 100644
|
164
|
+
--- a/arch/arm64/kvm/va_layout.c
|
165
|
+
+++ b/arch/arm64/kvm/va_layout.c
|
166
|
+
|
167
|
+
#include <asm/kvm_mmu.h>
|
168
|
+
|
169
|
+
/*
|
170
|
+
- * The LSB of the random hyp VA tag or 0 if no randomization is used.
|
171
|
+
+ * The LSB of the HYP VA tag
|
172
|
+
*/
|
173
|
+
static u8 tag_lsb;
|
174
|
+
/*
|
175
|
+
- * The random hyp VA tag value with the region bit if hyp randomization is used
|
176
|
+
+ * The HYP VA tag value with the region bit
|
177
|
+
*/
|
178
|
+
static u64 tag_val;
|
179
|
+
static u64 va_mask;
|
180
|
+
|
181
|
+
+/*
|
182
|
+
+ * We want to generate a hyp VA with the following format (with V ==
|
183
|
+
+ * vabits_actual):
|
184
|
+
+ *
|
185
|
+
+ * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
|
186
|
+
+ * ---------------------------------------------------------
|
187
|
+
+ * | 0000000 | hyp_va_msb | random tag | kern linear VA |
|
188
|
+
+ * |--------- tag_val -----------|----- va_mask ---|
|
189
|
+
+ *
|
190
|
+
+ * which does not conflict with the idmap regions.
|
191
|
+
+ */
|
192
|
+
__init void kvm_compute_layout(void)
|
193
|
+
{
|
194
|
+
phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
|
195
|
+
u64 hyp_va_msb;
|
196
|
+
- int kva_msb;
|
197
|
+
|
198
|
+
/* Where is my RAM region? */
|
199
|
+
hyp_va_msb = idmap_addr & BIT(vabits_actual - 1);
|
200
|
+
hyp_va_msb ^= BIT(vabits_actual - 1);
|
201
|
+
|
202
|
+
- kva_msb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
|
203
|
+
+ tag_lsb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
|
204
|
+
(u64)(high_memory - 1));
|
205
|
+
|
206
|
+
- if (kva_msb == (vabits_actual - 1)) {
|
207
|
+
- /*
|
208
|
+
- * No space in the address, let's compute the mask so
|
209
|
+
- * that it covers (vabits_actual - 1) bits, and the region
|
210
|
+
- * bit. The tag stays set to zero.
|
211
|
+
- */
|
212
|
+
- va_mask = BIT(vabits_actual - 1) - 1;
|
213
|
+
- va_mask |= hyp_va_msb;
|
214
|
+
- } else {
|
215
|
+
- /*
|
216
|
+
- * We do have some free bits to insert a random tag.
|
217
|
+
- * Hyp VAs are now created from kernel linear map VAs
|
218
|
+
- * using the following formula (with V == vabits_actual):
|
219
|
+
- *
|
220
|
+
- * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
|
221
|
+
- * ---------------------------------------------------------
|
222
|
+
- * | 0000000 | hyp_va_msb | random tag | kern linear VA |
|
223
|
+
- */
|
224
|
+
- tag_lsb = kva_msb;
|
225
|
+
- va_mask = GENMASK_ULL(tag_lsb - 1, 0);
|
226
|
+
- tag_val = get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
|
227
|
+
- tag_val |= hyp_va_msb;
|
228
|
+
- tag_val >>= tag_lsb;
|
229
|
+
+ va_mask = GENMASK_ULL(tag_lsb - 1, 0);
|
230
|
+
+ tag_val = hyp_va_msb;
|
231
|
+
+
|
232
|
+
+ if (tag_lsb != (vabits_actual - 1)) {
|
233
|
+
+ /* We have some free bits to insert a random tag. */
|
234
|
+
+ tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
|
235
|
+
}
|
236
|
+
+ tag_val >>= tag_lsb;
|
237
|
+
}
|
238
|
+
|
239
|
+
static u32 compute_instruction(int n, u32 rd, u32 rn)
|
240
|
+
@@ -117,11 +111,11 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
|
241
|
+
* VHE doesn't need any address translation, let's NOP
|
242
|
+
* everything.
|
243
|
+
*
|
244
|
+
- * Alternatively, if we don't have any spare bits in
|
245
|
+
- * the address, NOP everything after masking that
|
246
|
+
- * kernel VA.
|
247
|
+
+ * Alternatively, if the tag is zero (because the layout
|
248
|
+
+ * dictates it and we don't have any spare bits in the
|
249
|
+
+ * address), NOP everything after masking the kernel VA.
|
250
|
+
*/
|
251
|
+
- if (has_vhe() || (!tag_lsb && i > 0)) {
|
252
|
+
+ if (has_vhe() || (!tag_val && i > 0)) {
|
253
|
+
updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
|
254
|
+
continue;
|
255
|
+
}
|
256
|
+
--
|
257
|
+
1.8.3.1
|
258
|
+
|
@@ -866,6 +866,9 @@ Patch601: alsa-5.6.patch
|
|
866
866
|
# This is already in 5.5 rhbz 1794369
|
867
867
|
Patch603: 0001-e1000e-Add-support-for-Comet-Lake.patch
|
868
868
|
|
869
|
+
#KVM fix
|
870
|
+
Patch700: 0001-arm64-kvm-Fix-IDMAP-overlap-with-HYP-VA.patch
|
871
|
+
|
869
872
|
#CentOS
|
870
873
|
Patch9999: 0001-Fix-mt7615.patch
|
871
874
|
# END OF PATCH DEFINITIONS
|
@@ -2909,3 +2912,4 @@ fi
|
|
2909
2912
|
- Import headers and tools from Fedora
|
2910
2913
|
- Fix BR foor CentOS 7
|
2911
2914
|
- aarch64 mmap fixes
|
2915
|
+
- aarch64 kvm fixes
|