/[packages]/updates/8/kernel/current/SOURCES/arm-spectre-bhb-workaround.patch
ViewVC logotype

Contents of /updates/8/kernel/current/SOURCES/arm-spectre-bhb-workaround.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1789982 - (show annotations) (download)
Tue Mar 8 23:11:59 2022 UTC (2 years, 1 month ago) by tmb
File size: 11183 byte(s)
add current -stable queue
1 From foo@baz Tue Mar 8 08:32:37 PM CET 2022
2 From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
3 Date: Thu, 10 Feb 2022 16:05:45 +0000
4 Subject: ARM: Spectre-BHB workaround
5
6 From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
7
8 comomit b9baf5c8c5c356757f4f9d8180b5e9d234065bc3 upstream.
9
10 Workaround the Spectre BHB issues for Cortex-A15, Cortex-A57,
11 Cortex-A72, Cortex-A73 and Cortex-A75. We also include Brahma B15 as
12 well to be safe, which is affected by Spectre V2 in the same ways as
13 Cortex-A15.
14
15 Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
16 Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
17 [changes due to lack of SYSTEM_FREEING_INITMEM - gregkh]
18 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
19 ---
20 arch/arm/include/asm/assembler.h | 10 ++++
21 arch/arm/include/asm/spectre.h | 4 +
22 arch/arm/include/asm/vmlinux.lds.h | 18 +++++++-
23 arch/arm/kernel/entry-armv.S | 79 ++++++++++++++++++++++++++++++++++---
24 arch/arm/kernel/entry-common.S | 24 +++++++++++
25 arch/arm/kernel/spectre.c | 4 +
26 arch/arm/kernel/traps.c | 38 +++++++++++++++++
27 arch/arm/mm/Kconfig | 10 ++++
28 arch/arm/mm/proc-v7-bugs.c | 76 +++++++++++++++++++++++++++++++++++
29 9 files changed, 254 insertions(+), 9 deletions(-)
30
31 --- a/arch/arm/include/asm/assembler.h
32 +++ b/arch/arm/include/asm/assembler.h
33 @@ -107,6 +107,16 @@
34 .endm
35 #endif
36
37 +#if __LINUX_ARM_ARCH__ < 7
38 + .macro dsb, args
39 + mcr p15, 0, r0, c7, c10, 4
40 + .endm
41 +
42 + .macro isb, args
43 + mcr p15, 0, r0, c7, r5, 4
44 + .endm
45 +#endif
46 +
47 .macro asm_trace_hardirqs_off, save=1
48 #if defined(CONFIG_TRACE_IRQFLAGS)
49 .if \save
50 --- a/arch/arm/include/asm/spectre.h
51 +++ b/arch/arm/include/asm/spectre.h
52 @@ -14,6 +14,7 @@ enum {
53 __SPECTRE_V2_METHOD_ICIALLU,
54 __SPECTRE_V2_METHOD_SMC,
55 __SPECTRE_V2_METHOD_HVC,
56 + __SPECTRE_V2_METHOD_LOOP8,
57 };
58
59 enum {
60 @@ -21,8 +22,11 @@ enum {
61 SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU),
62 SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC),
63 SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC),
64 + SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8),
65 };
66
67 void spectre_v2_update_state(unsigned int state, unsigned int methods);
68
69 +int spectre_bhb_update_vectors(unsigned int method);
70 +
71 #endif
72 --- a/arch/arm/include/asm/vmlinux.lds.h
73 +++ b/arch/arm/include/asm/vmlinux.lds.h
74 @@ -116,11 +116,23 @@
75 */
76 #define ARM_VECTORS \
77 __vectors_lma = .; \
78 - .vectors 0xffff0000 : AT(__vectors_start) { \
79 - *(.vectors) \
80 + OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) { \
81 + .vectors { \
82 + *(.vectors) \
83 + } \
84 + .vectors.bhb.loop8 { \
85 + *(.vectors.bhb.loop8) \
86 + } \
87 + .vectors.bhb.bpiall { \
88 + *(.vectors.bhb.bpiall) \
89 + } \
90 } \
91 ARM_LMA(__vectors, .vectors); \
92 - . = __vectors_lma + SIZEOF(.vectors); \
93 + ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8); \
94 + ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall); \
95 + . = __vectors_lma + SIZEOF(.vectors) + \
96 + SIZEOF(.vectors.bhb.loop8) + \
97 + SIZEOF(.vectors.bhb.bpiall); \
98 \
99 __stubs_lma = .; \
100 .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) { \
101 --- a/arch/arm/kernel/entry-armv.S
102 +++ b/arch/arm/kernel/entry-armv.S
103 @@ -1000,12 +1000,11 @@ vector_\name:
104 sub lr, lr, #\correction
105 .endif
106
107 - @
108 - @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
109 - @ (parent CPSR)
110 - @
111 + @ Save r0, lr_<exception> (parent PC)
112 stmia sp, {r0, lr} @ save r0, lr
113 - mrs lr, spsr
114 +
115 + @ Save spsr_<exception> (parent CPSR)
116 +2: mrs lr, spsr
117 str lr, [sp, #8] @ save spsr
118
119 @
120 @@ -1026,6 +1025,44 @@ vector_\name:
121 movs pc, lr @ branch to handler in SVC mode
122 ENDPROC(vector_\name)
123
124 +#ifdef CONFIG_HARDEN_BRANCH_HISTORY
125 + .subsection 1
126 + .align 5
127 +vector_bhb_loop8_\name:
128 + .if \correction
129 + sub lr, lr, #\correction
130 + .endif
131 +
132 + @ Save r0, lr_<exception> (parent PC)
133 + stmia sp, {r0, lr}
134 +
135 + @ bhb workaround
136 + mov r0, #8
137 +1: b . + 4
138 + subs r0, r0, #1
139 + bne 1b
140 + dsb
141 + isb
142 + b 2b
143 +ENDPROC(vector_bhb_loop8_\name)
144 +
145 +vector_bhb_bpiall_\name:
146 + .if \correction
147 + sub lr, lr, #\correction
148 + .endif
149 +
150 + @ Save r0, lr_<exception> (parent PC)
151 + stmia sp, {r0, lr}
152 +
153 + @ bhb workaround
154 + mcr p15, 0, r0, c7, c5, 6 @ BPIALL
155 + @ isb not needed due to "movs pc, lr" in the vector stub
156 + @ which gives a "context synchronisation".
157 + b 2b
158 +ENDPROC(vector_bhb_bpiall_\name)
159 + .previous
160 +#endif
161 +
162 .align 2
163 @ handler addresses follow this label
164 1:
165 @@ -1034,6 +1071,10 @@ ENDPROC(vector_\name)
166 .section .stubs, "ax", %progbits
167 @ This must be the first word
168 .word vector_swi
169 +#ifdef CONFIG_HARDEN_BRANCH_HISTORY
170 + .word vector_bhb_loop8_swi
171 + .word vector_bhb_bpiall_swi
172 +#endif
173
174 vector_rst:
175 ARM( swi SYS_ERROR0 )
176 @@ -1148,8 +1189,10 @@ vector_addrexcptn:
177 * FIQ "NMI" handler
178 *-----------------------------------------------------------------------------
179 * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
180 - * systems.
181 + * systems. This must be the last vector stub, so lets place it in its own
182 + * subsection.
183 */
184 + .subsection 2
185 vector_stub fiq, FIQ_MODE, 4
186
187 .long __fiq_usr @ 0 (USR_26 / USR_32)
188 @@ -1182,6 +1225,30 @@ vector_addrexcptn:
189 W(b) vector_irq
190 W(b) vector_fiq
191
192 +#ifdef CONFIG_HARDEN_BRANCH_HISTORY
193 + .section .vectors.bhb.loop8, "ax", %progbits
194 +.L__vectors_bhb_loop8_start:
195 + W(b) vector_rst
196 + W(b) vector_bhb_loop8_und
197 + W(ldr) pc, .L__vectors_bhb_loop8_start + 0x1004
198 + W(b) vector_bhb_loop8_pabt
199 + W(b) vector_bhb_loop8_dabt
200 + W(b) vector_addrexcptn
201 + W(b) vector_bhb_loop8_irq
202 + W(b) vector_bhb_loop8_fiq
203 +
204 + .section .vectors.bhb.bpiall, "ax", %progbits
205 +.L__vectors_bhb_bpiall_start:
206 + W(b) vector_rst
207 + W(b) vector_bhb_bpiall_und
208 + W(ldr) pc, .L__vectors_bhb_bpiall_start + 0x1008
209 + W(b) vector_bhb_bpiall_pabt
210 + W(b) vector_bhb_bpiall_dabt
211 + W(b) vector_addrexcptn
212 + W(b) vector_bhb_bpiall_irq
213 + W(b) vector_bhb_bpiall_fiq
214 +#endif
215 +
216 .data
217 .align 2
218
219 --- a/arch/arm/kernel/entry-common.S
220 +++ b/arch/arm/kernel/entry-common.S
221 @@ -154,12 +154,36 @@ ENDPROC(ret_from_fork)
222 */
223
224 .align 5
225 +#ifdef CONFIG_HARDEN_BRANCH_HISTORY
226 +ENTRY(vector_bhb_loop8_swi)
227 + sub sp, sp, #PT_REGS_SIZE
228 + stmia sp, {r0 - r12}
229 + mov r8, #8
230 +1: b 2f
231 +2: subs r8, r8, #1
232 + bne 1b
233 + dsb
234 + isb
235 + b 3f
236 +ENDPROC(vector_bhb_loop8_swi)
237 +
238 + .align 5
239 +ENTRY(vector_bhb_bpiall_swi)
240 + sub sp, sp, #PT_REGS_SIZE
241 + stmia sp, {r0 - r12}
242 + mcr p15, 0, r8, c7, c5, 6 @ BPIALL
243 + isb
244 + b 3f
245 +ENDPROC(vector_bhb_bpiall_swi)
246 +#endif
247 + .align 5
248 ENTRY(vector_swi)
249 #ifdef CONFIG_CPU_V7M
250 v7m_exception_entry
251 #else
252 sub sp, sp, #PT_REGS_SIZE
253 stmia sp, {r0 - r12} @ Calling r0 - r12
254 +3:
255 ARM( add r8, sp, #S_PC )
256 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
257 THUMB( mov r8, sp )
258 --- a/arch/arm/kernel/spectre.c
259 +++ b/arch/arm/kernel/spectre.c
260 @@ -45,6 +45,10 @@ ssize_t cpu_show_spectre_v2(struct devic
261 method = "Firmware call";
262 break;
263
264 + case SPECTRE_V2_METHOD_LOOP8:
265 + method = "History overwrite";
266 + break;
267 +
268 default:
269 method = "Multiple mitigations";
270 break;
271 --- a/arch/arm/kernel/traps.c
272 +++ b/arch/arm/kernel/traps.c
273 @@ -30,6 +30,7 @@
274 #include <linux/atomic.h>
275 #include <asm/cacheflush.h>
276 #include <asm/exception.h>
277 +#include <asm/spectre.h>
278 #include <asm/unistd.h>
279 #include <asm/traps.h>
280 #include <asm/ptrace.h>
281 @@ -801,6 +802,43 @@ static void flush_vectors(void *vma, siz
282 flush_icache_range(start, end);
283 }
284
285 +#ifdef CONFIG_HARDEN_BRANCH_HISTORY
286 +int spectre_bhb_update_vectors(unsigned int method)
287 +{
288 + extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[];
289 + extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[];
290 + void *vec_start, *vec_end;
291 +
292 + if (system_state > SYSTEM_SCHEDULING) {
293 + pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n",
294 + smp_processor_id());
295 + return SPECTRE_VULNERABLE;
296 + }
297 +
298 + switch (method) {
299 + case SPECTRE_V2_METHOD_LOOP8:
300 + vec_start = __vectors_bhb_loop8_start;
301 + vec_end = __vectors_bhb_loop8_end;
302 + break;
303 +
304 + case SPECTRE_V2_METHOD_BPIALL:
305 + vec_start = __vectors_bhb_bpiall_start;
306 + vec_end = __vectors_bhb_bpiall_end;
307 + break;
308 +
309 + default:
310 + pr_err("CPU%u: unknown Spectre BHB state %d\n",
311 + smp_processor_id(), method);
312 + return SPECTRE_VULNERABLE;
313 + }
314 +
315 + copy_from_lma(vectors_page, vec_start, vec_end);
316 + flush_vectors(vectors_page, 0, vec_end - vec_start);
317 +
318 + return SPECTRE_MITIGATED;
319 +}
320 +#endif
321 +
322 void __init early_trap_init(void *vectors_base)
323 {
324 extern char __stubs_start[], __stubs_end[];
325 --- a/arch/arm/mm/Kconfig
326 +++ b/arch/arm/mm/Kconfig
327 @@ -851,6 +851,16 @@ config HARDEN_BRANCH_PREDICTOR
328
329 If unsure, say Y.
330
331 +config HARDEN_BRANCH_HISTORY
332 + bool "Harden Spectre style attacks against branch history" if EXPERT
333 + depends on CPU_SPECTRE
334 + default y
335 + help
336 + Speculation attacks against some high-performance processors can
337 + make use of branch history to influence future speculation. When
338 + taking an exception, a sequence of branches overwrites the branch
339 + history, or branch history is invalidated.
340 +
341 config TLS_REG_EMUL
342 bool
343 select NEED_KUSER_HELPERS
344 --- a/arch/arm/mm/proc-v7-bugs.c
345 +++ b/arch/arm/mm/proc-v7-bugs.c
346 @@ -177,6 +177,81 @@ static void cpu_v7_spectre_v2_init(void)
347 spectre_v2_update_state(state, method);
348 }
349
350 +#ifdef CONFIG_HARDEN_BRANCH_HISTORY
351 +static int spectre_bhb_method;
352 +
353 +static const char *spectre_bhb_method_name(int method)
354 +{
355 + switch (method) {
356 + case SPECTRE_V2_METHOD_LOOP8:
357 + return "loop";
358 +
359 + case SPECTRE_V2_METHOD_BPIALL:
360 + return "BPIALL";
361 +
362 + default:
363 + return "unknown";
364 + }
365 +}
366 +
367 +static int spectre_bhb_install_workaround(int method)
368 +{
369 + if (spectre_bhb_method != method) {
370 + if (spectre_bhb_method) {
371 + pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n",
372 + smp_processor_id());
373 +
374 + return SPECTRE_VULNERABLE;
375 + }
376 +
377 + if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE)
378 + return SPECTRE_VULNERABLE;
379 +
380 + spectre_bhb_method = method;
381 + }
382 +
383 + pr_info("CPU%u: Spectre BHB: using %s workaround\n",
384 + smp_processor_id(), spectre_bhb_method_name(method));
385 +
386 + return SPECTRE_MITIGATED;
387 +}
388 +#else
389 +static int spectre_bhb_install_workaround(int method)
390 +{
391 + return SPECTRE_VULNERABLE;
392 +}
393 +#endif
394 +
395 +static void cpu_v7_spectre_bhb_init(void)
396 +{
397 + unsigned int state, method = 0;
398 +
399 + switch (read_cpuid_part()) {
400 + case ARM_CPU_PART_CORTEX_A15:
401 + case ARM_CPU_PART_BRAHMA_B15:
402 + case ARM_CPU_PART_CORTEX_A57:
403 + case ARM_CPU_PART_CORTEX_A72:
404 + state = SPECTRE_MITIGATED;
405 + method = SPECTRE_V2_METHOD_LOOP8;
406 + break;
407 +
408 + case ARM_CPU_PART_CORTEX_A73:
409 + case ARM_CPU_PART_CORTEX_A75:
410 + state = SPECTRE_MITIGATED;
411 + method = SPECTRE_V2_METHOD_BPIALL;
412 + break;
413 +
414 + default:
415 + state = SPECTRE_UNAFFECTED;
416 + break;
417 + }
418 +
419 + if (state == SPECTRE_MITIGATED)
420 + state = spectre_bhb_install_workaround(method);
421 +
422 + spectre_v2_update_state(state, method);
423 +}
424 +
425 static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
426 u32 mask, const char *msg)
427 {
428 @@ -217,4 +292,5 @@ void cpu_v7_ca15_ibe(void)
429 void cpu_v7_bugs_init(void)
430 {
431 cpu_v7_spectre_v2_init();
432 + cpu_v7_spectre_bhb_init();
433 }

  ViewVC Help
Powered by ViewVC 1.1.30