/[packages]/updates/8/kernel/current/SOURCES/arm64-entry-add-vectors-that-have-the-bhb-mitigation-sequences.patch
ViewVC logotype

Contents of /updates/8/kernel/current/SOURCES/arm64-entry-add-vectors-that-have-the-bhb-mitigation-sequences.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1789982 - (show annotations) (download)
Tue Mar 8 23:11:59 2022 UTC (2 years, 1 month ago) by tmb
File size: 7968 byte(s)
add current -stable queue
1 From foo@baz Tue Mar 8 08:47:19 PM CET 2022
2 From: James Morse <james.morse@arm.com>
3 Date: Thu, 18 Nov 2021 13:59:46 +0000
4 Subject: arm64: entry: Add vectors that have the bhb mitigation sequences
5
6 From: James Morse <james.morse@arm.com>
7
8 commit ba2689234be92024e5635d30fe744f4853ad97db upstream.
9
10 Some CPUs affected by Spectre-BHB need a sequence of branches, or a
11 firmware call to be run before any indirect branch. This needs to go
12 in the vectors. No CPU needs both.
13
14 While this can be patched in, it would run on all CPUs as there is a
15 single set of vectors. If only one part of a big/little combination is
16 affected, the unaffected CPUs have to run the mitigation too.
17
18 Create extra vectors that include the sequence. Subsequent patches will
19 allow affected CPUs to select this set of vectors. Later patches will
20 modify the loop count to match what the CPU requires.
21
22 Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
23 Signed-off-by: James Morse <james.morse@arm.com>
24 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
25 ---
26 arch/arm64/include/asm/assembler.h | 24 ++++++++++++++++
27 arch/arm64/include/asm/vectors.h | 34 +++++++++++++++++++++++
28 arch/arm64/kernel/entry.S | 53 ++++++++++++++++++++++++++++++-------
29 arch/arm64/kernel/proton-pack.c | 16 +++++++++++
30 include/linux/arm-smccc.h | 5 +++
31 5 files changed, 123 insertions(+), 9 deletions(-)
32 create mode 100644 arch/arm64/include/asm/vectors.h
33
34 --- a/arch/arm64/include/asm/assembler.h
35 +++ b/arch/arm64/include/asm/assembler.h
36 @@ -830,4 +830,28 @@ alternative_endif
37
38 #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */
39
40 + .macro __mitigate_spectre_bhb_loop tmp
41 +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
42 + mov \tmp, #32
43 +.Lspectre_bhb_loop\@:
44 + b . + 4
45 + subs \tmp, \tmp, #1
46 + b.ne .Lspectre_bhb_loop\@
47 + sb
48 +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
49 + .endm
50 +
51 + /* Save/restores x0-x3 to the stack */
52 + .macro __mitigate_spectre_bhb_fw
53 +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
54 + stp x0, x1, [sp, #-16]!
55 + stp x2, x3, [sp, #-16]!
56 + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
57 +alternative_cb smccc_patch_fw_mitigation_conduit
58 + nop // Patched to SMC/HVC #0
59 +alternative_cb_end
60 + ldp x2, x3, [sp], #16
61 + ldp x0, x1, [sp], #16
62 +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
63 + .endm
64 #endif /* __ASM_ASSEMBLER_H */
65 --- /dev/null
66 +++ b/arch/arm64/include/asm/vectors.h
67 @@ -0,0 +1,34 @@
68 +/* SPDX-License-Identifier: GPL-2.0-only */
69 +/*
70 + * Copyright (C) 2022 ARM Ltd.
71 + */
72 +#ifndef __ASM_VECTORS_H
73 +#define __ASM_VECTORS_H
74 +
75 +/*
76 + * Note: the order of this enum corresponds to two arrays in entry.S:
77 + * tramp_vecs and __bp_harden_el1_vectors. By default the canonical
78 + * 'full fat' vectors are used directly.
79 + */
80 +enum arm64_bp_harden_el1_vectors {
81 +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
82 + /*
83 + * Perform the BHB loop mitigation, before branching to the canonical
84 + * vectors.
85 + */
86 + EL1_VECTOR_BHB_LOOP,
87 +
88 + /*
89 + * Make the SMC call for firmware mitigation, before branching to the
90 + * canonical vectors.
91 + */
92 + EL1_VECTOR_BHB_FW,
93 +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
94 +
95 + /*
96 + * Remap the kernel before branching to the canonical vectors.
97 + */
98 + EL1_VECTOR_KPTI,
99 ++};
100 +
101 +#endif /* __ASM_VECTORS_H */
102 --- a/arch/arm64/kernel/entry.S
103 +++ b/arch/arm64/kernel/entry.S
104 @@ -646,13 +646,26 @@ alternative_else_nop_endif
105 sub \dst, \dst, PAGE_SIZE
106 .endm
107
108 - .macro tramp_ventry, vector_start, regsize, kpti
109 +
110 +#define BHB_MITIGATION_NONE 0
111 +#define BHB_MITIGATION_LOOP 1
112 +#define BHB_MITIGATION_FW 2
113 +
114 + .macro tramp_ventry, vector_start, regsize, kpti, bhb
115 .align 7
116 1:
117 .if \regsize == 64
118 msr tpidrro_el0, x30 // Restored in kernel_ventry
119 .endif
120
121 + .if \bhb == BHB_MITIGATION_LOOP
122 + /*
123 + * This sequence must appear before the first indirect branch. i.e. the
124 + * ret out of tramp_ventry. It appears here because x30 is free.
125 + */
126 + __mitigate_spectre_bhb_loop x30
127 + .endif // \bhb == BHB_MITIGATION_LOOP
128 +
129 .if \kpti == 1
130 /*
131 * Defend against branch aliasing attacks by pushing a dummy
132 @@ -680,6 +693,15 @@ alternative_else_nop_endif
133 ldr x30, =vectors
134 .endif // \kpti == 1
135
136 + .if \bhb == BHB_MITIGATION_FW
137 + /*
138 + * The firmware sequence must appear before the first indirect branch.
139 + * i.e. the ret out of tramp_ventry. But it also needs the stack to be
140 + * mapped to save/restore the registers the SMC clobbers.
141 + */
142 + __mitigate_spectre_bhb_fw
143 + .endif // \bhb == BHB_MITIGATION_FW
144 +
145 add x30, x30, #(1b - \vector_start + 4)
146 ret
147 .org 1b + 128 // Did we overflow the ventry slot?
148 @@ -687,6 +709,9 @@ alternative_else_nop_endif
149
150 .macro tramp_exit, regsize = 64
151 adr x30, tramp_vectors
152 +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
153 + add x30, x30, SZ_4K
154 +#endif
155 msr vbar_el1, x30
156 ldr lr, [sp, #S_LR]
157 tramp_unmap_kernel x29
158 @@ -698,26 +723,32 @@ alternative_else_nop_endif
159 sb
160 .endm
161
162 - .macro generate_tramp_vector, kpti
163 + .macro generate_tramp_vector, kpti, bhb
164 .Lvector_start\@:
165 .space 0x400
166
167 .rept 4
168 - tramp_ventry .Lvector_start\@, 64, \kpti
169 + tramp_ventry .Lvector_start\@, 64, \kpti, \bhb
170 .endr
171 .rept 4
172 - tramp_ventry .Lvector_start\@, 32, \kpti
173 + tramp_ventry .Lvector_start\@, 32, \kpti, \bhb
174 .endr
175 .endm
176
177 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
178 /*
179 * Exception vectors trampoline.
180 + * The order must match __bp_harden_el1_vectors and the
181 + * arm64_bp_harden_el1_vectors enum.
182 */
183 .pushsection ".entry.tramp.text", "ax"
184 .align 11
185 SYM_CODE_START_NOALIGN(tramp_vectors)
186 - generate_tramp_vector kpti=1
187 +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
188 + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
189 + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
190 +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
191 + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
192 SYM_CODE_END(tramp_vectors)
193
194 SYM_CODE_START(tramp_exit_native)
195 @@ -744,7 +775,7 @@ SYM_DATA_END(__entry_tramp_data_start)
196 * Exception vectors for spectre mitigations on entry from EL1 when
197 * kpti is not in use.
198 */
199 - .macro generate_el1_vector
200 + .macro generate_el1_vector, bhb
201 .Lvector_start\@:
202 kernel_ventry 1, t, 64, sync // Synchronous EL1t
203 kernel_ventry 1, t, 64, irq // IRQ EL1t
204 @@ -757,17 +788,21 @@ SYM_DATA_END(__entry_tramp_data_start)
205 kernel_ventry 1, h, 64, error // Error EL1h
206
207 .rept 4
208 - tramp_ventry .Lvector_start\@, 64, kpti=0
209 + tramp_ventry .Lvector_start\@, 64, 0, \bhb
210 .endr
211 .rept 4
212 - tramp_ventry .Lvector_start\@, 32, kpti=0
213 + tramp_ventry .Lvector_start\@, 32, 0, \bhb
214 .endr
215 .endm
216
217 +/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
218 .pushsection ".entry.text", "ax"
219 .align 11
220 SYM_CODE_START(__bp_harden_el1_vectors)
221 - generate_el1_vector
222 +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
223 + generate_el1_vector bhb=BHB_MITIGATION_LOOP
224 + generate_el1_vector bhb=BHB_MITIGATION_FW
225 +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
226 SYM_CODE_END(__bp_harden_el1_vectors)
227 .popsection
228
229 --- a/arch/arm64/kernel/proton-pack.c
230 +++ b/arch/arm64/kernel/proton-pack.c
231 @@ -770,3 +770,19 @@ int arch_prctl_spec_ctrl_get(struct task
232 return -ENODEV;
233 }
234 }
235 +
236 +/* Patched to NOP when enabled */
237 +void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
238 + __le32 *origptr,
239 + __le32 *updptr, int nr_inst)
240 +{
241 + BUG_ON(nr_inst != 1);
242 +}
243 +
244 +/* Patched to NOP when enabled */
245 +void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt,
246 + __le32 *origptr,
247 + __le32 *updptr, int nr_inst)
248 +{
249 + BUG_ON(nr_inst != 1);
250 +}
251 --- a/include/linux/arm-smccc.h
252 +++ b/include/linux/arm-smccc.h
253 @@ -92,6 +92,11 @@
254 ARM_SMCCC_SMC_32, \
255 0, 0x7fff)
256
257 +#define ARM_SMCCC_ARCH_WORKAROUND_3 \
258 + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
259 + ARM_SMCCC_SMC_32, \
260 + 0, 0x3fff)
261 +
262 #define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \
263 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
264 ARM_SMCCC_SMC_32, \

  ViewVC Help
Powered by ViewVC 1.1.30