Skip to content

Commit a9a10e9

Browse files
committed
Merge tag 'x86_bugs_for_v6.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 CPU mitigation updates from Borislav Petkov: - Convert the tsx= cmdline parsing to use early_param() - Cleanup forward declarations gunk in bugs.c * tag 'x86_bugs_for_v6.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/bugs: Get rid of the forward declarations x86/tsx: Get the tsx= command line parameter with early_param() x86/tsx: Make tsx_ctrl_state static
2 parents cb502f0 + e679970 commit a9a10e9

3 files changed

Lines changed: 126 additions & 174 deletions

File tree

arch/x86/kernel/cpu/bugs.c

Lines changed: 93 additions & 140 deletions
Original file line numberDiff line numberDiff line change
@@ -53,53 +53,6 @@
5353
* mitigation option.
5454
*/
5555

56-
static void __init spectre_v1_select_mitigation(void);
57-
static void __init spectre_v1_apply_mitigation(void);
58-
static void __init spectre_v2_select_mitigation(void);
59-
static void __init spectre_v2_update_mitigation(void);
60-
static void __init spectre_v2_apply_mitigation(void);
61-
static void __init retbleed_select_mitigation(void);
62-
static void __init retbleed_update_mitigation(void);
63-
static void __init retbleed_apply_mitigation(void);
64-
static void __init spectre_v2_user_select_mitigation(void);
65-
static void __init spectre_v2_user_update_mitigation(void);
66-
static void __init spectre_v2_user_apply_mitigation(void);
67-
static void __init ssb_select_mitigation(void);
68-
static void __init ssb_apply_mitigation(void);
69-
static void __init l1tf_select_mitigation(void);
70-
static void __init l1tf_apply_mitigation(void);
71-
static void __init mds_select_mitigation(void);
72-
static void __init mds_update_mitigation(void);
73-
static void __init mds_apply_mitigation(void);
74-
static void __init taa_select_mitigation(void);
75-
static void __init taa_update_mitigation(void);
76-
static void __init taa_apply_mitigation(void);
77-
static void __init mmio_select_mitigation(void);
78-
static void __init mmio_update_mitigation(void);
79-
static void __init mmio_apply_mitigation(void);
80-
static void __init rfds_select_mitigation(void);
81-
static void __init rfds_update_mitigation(void);
82-
static void __init rfds_apply_mitigation(void);
83-
static void __init srbds_select_mitigation(void);
84-
static void __init srbds_apply_mitigation(void);
85-
static void __init l1d_flush_select_mitigation(void);
86-
static void __init srso_select_mitigation(void);
87-
static void __init srso_update_mitigation(void);
88-
static void __init srso_apply_mitigation(void);
89-
static void __init gds_select_mitigation(void);
90-
static void __init gds_apply_mitigation(void);
91-
static void __init bhi_select_mitigation(void);
92-
static void __init bhi_update_mitigation(void);
93-
static void __init bhi_apply_mitigation(void);
94-
static void __init its_select_mitigation(void);
95-
static void __init its_update_mitigation(void);
96-
static void __init its_apply_mitigation(void);
97-
static void __init tsa_select_mitigation(void);
98-
static void __init tsa_apply_mitigation(void);
99-
static void __init vmscape_select_mitigation(void);
100-
static void __init vmscape_update_mitigation(void);
101-
static void __init vmscape_apply_mitigation(void);
102-
10356
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
10457
u64 x86_spec_ctrl_base;
10558
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
@@ -233,99 +186,6 @@ static void __init cpu_print_attack_vectors(void)
233186
}
234187
}
235188

236-
void __init cpu_select_mitigations(void)
237-
{
238-
/*
239-
* Read the SPEC_CTRL MSR to account for reserved bits which may
240-
* have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
241-
* init code as it is not enumerated and depends on the family.
242-
*/
243-
if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
244-
rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
245-
246-
/*
247-
* Previously running kernel (kexec), may have some controls
248-
* turned ON. Clear them and let the mitigations setup below
249-
* rediscover them based on configuration.
250-
*/
251-
x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
252-
}
253-
254-
x86_arch_cap_msr = x86_read_arch_cap_msr();
255-
256-
cpu_print_attack_vectors();
257-
258-
/* Select the proper CPU mitigations before patching alternatives: */
259-
spectre_v1_select_mitigation();
260-
spectre_v2_select_mitigation();
261-
retbleed_select_mitigation();
262-
spectre_v2_user_select_mitigation();
263-
ssb_select_mitigation();
264-
l1tf_select_mitigation();
265-
mds_select_mitigation();
266-
taa_select_mitigation();
267-
mmio_select_mitigation();
268-
rfds_select_mitigation();
269-
srbds_select_mitigation();
270-
l1d_flush_select_mitigation();
271-
srso_select_mitigation();
272-
gds_select_mitigation();
273-
its_select_mitigation();
274-
bhi_select_mitigation();
275-
tsa_select_mitigation();
276-
vmscape_select_mitigation();
277-
278-
/*
279-
* After mitigations are selected, some may need to update their
280-
* choices.
281-
*/
282-
spectre_v2_update_mitigation();
283-
/*
284-
* retbleed_update_mitigation() relies on the state set by
285-
* spectre_v2_update_mitigation(); specifically it wants to know about
286-
* spectre_v2=ibrs.
287-
*/
288-
retbleed_update_mitigation();
289-
/*
290-
* its_update_mitigation() depends on spectre_v2_update_mitigation()
291-
* and retbleed_update_mitigation().
292-
*/
293-
its_update_mitigation();
294-
295-
/*
296-
* spectre_v2_user_update_mitigation() depends on
297-
* retbleed_update_mitigation(), specifically the STIBP
298-
* selection is forced for UNRET or IBPB.
299-
*/
300-
spectre_v2_user_update_mitigation();
301-
mds_update_mitigation();
302-
taa_update_mitigation();
303-
mmio_update_mitigation();
304-
rfds_update_mitigation();
305-
bhi_update_mitigation();
306-
/* srso_update_mitigation() depends on retbleed_update_mitigation(). */
307-
srso_update_mitigation();
308-
vmscape_update_mitigation();
309-
310-
spectre_v1_apply_mitigation();
311-
spectre_v2_apply_mitigation();
312-
retbleed_apply_mitigation();
313-
spectre_v2_user_apply_mitigation();
314-
ssb_apply_mitigation();
315-
l1tf_apply_mitigation();
316-
mds_apply_mitigation();
317-
taa_apply_mitigation();
318-
mmio_apply_mitigation();
319-
rfds_apply_mitigation();
320-
srbds_apply_mitigation();
321-
srso_apply_mitigation();
322-
gds_apply_mitigation();
323-
its_apply_mitigation();
324-
bhi_apply_mitigation();
325-
tsa_apply_mitigation();
326-
vmscape_apply_mitigation();
327-
}
328-
329189
/*
330190
* NOTE: This function is *only* called for SVM, since Intel uses
331191
* MSR_IA32_SPEC_CTRL for SSBD.
@@ -3371,6 +3231,99 @@ void cpu_bugs_smt_update(void)
33713231
mutex_unlock(&spec_ctrl_mutex);
33723232
}
33733233

3234+
void __init cpu_select_mitigations(void)
3235+
{
3236+
/*
3237+
* Read the SPEC_CTRL MSR to account for reserved bits which may
3238+
* have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
3239+
* init code as it is not enumerated and depends on the family.
3240+
*/
3241+
if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
3242+
rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
3243+
3244+
/*
3245+
* Previously running kernel (kexec), may have some controls
3246+
* turned ON. Clear them and let the mitigations setup below
3247+
* rediscover them based on configuration.
3248+
*/
3249+
x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
3250+
}
3251+
3252+
x86_arch_cap_msr = x86_read_arch_cap_msr();
3253+
3254+
cpu_print_attack_vectors();
3255+
3256+
/* Select the proper CPU mitigations before patching alternatives: */
3257+
spectre_v1_select_mitigation();
3258+
spectre_v2_select_mitigation();
3259+
retbleed_select_mitigation();
3260+
spectre_v2_user_select_mitigation();
3261+
ssb_select_mitigation();
3262+
l1tf_select_mitigation();
3263+
mds_select_mitigation();
3264+
taa_select_mitigation();
3265+
mmio_select_mitigation();
3266+
rfds_select_mitigation();
3267+
srbds_select_mitigation();
3268+
l1d_flush_select_mitigation();
3269+
srso_select_mitigation();
3270+
gds_select_mitigation();
3271+
its_select_mitigation();
3272+
bhi_select_mitigation();
3273+
tsa_select_mitigation();
3274+
vmscape_select_mitigation();
3275+
3276+
/*
3277+
* After mitigations are selected, some may need to update their
3278+
* choices.
3279+
*/
3280+
spectre_v2_update_mitigation();
3281+
/*
3282+
* retbleed_update_mitigation() relies on the state set by
3283+
* spectre_v2_update_mitigation(); specifically it wants to know about
3284+
* spectre_v2=ibrs.
3285+
*/
3286+
retbleed_update_mitigation();
3287+
/*
3288+
* its_update_mitigation() depends on spectre_v2_update_mitigation()
3289+
* and retbleed_update_mitigation().
3290+
*/
3291+
its_update_mitigation();
3292+
3293+
/*
3294+
* spectre_v2_user_update_mitigation() depends on
3295+
* retbleed_update_mitigation(), specifically the STIBP
3296+
* selection is forced for UNRET or IBPB.
3297+
*/
3298+
spectre_v2_user_update_mitigation();
3299+
mds_update_mitigation();
3300+
taa_update_mitigation();
3301+
mmio_update_mitigation();
3302+
rfds_update_mitigation();
3303+
bhi_update_mitigation();
3304+
/* srso_update_mitigation() depends on retbleed_update_mitigation(). */
3305+
srso_update_mitigation();
3306+
vmscape_update_mitigation();
3307+
3308+
spectre_v1_apply_mitigation();
3309+
spectre_v2_apply_mitigation();
3310+
retbleed_apply_mitigation();
3311+
spectre_v2_user_apply_mitigation();
3312+
ssb_apply_mitigation();
3313+
l1tf_apply_mitigation();
3314+
mds_apply_mitigation();
3315+
taa_apply_mitigation();
3316+
mmio_apply_mitigation();
3317+
rfds_apply_mitigation();
3318+
srbds_apply_mitigation();
3319+
srso_apply_mitigation();
3320+
gds_apply_mitigation();
3321+
its_apply_mitigation();
3322+
bhi_apply_mitigation();
3323+
tsa_apply_mitigation();
3324+
vmscape_apply_mitigation();
3325+
}
3326+
33743327
#ifdef CONFIG_SYSFS
33753328

33763329
#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"

arch/x86/kernel/cpu/cpu.h

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -42,15 +42,6 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
4242
*const __x86_cpu_dev_end[];
4343

4444
#ifdef CONFIG_CPU_SUP_INTEL
45-
enum tsx_ctrl_states {
46-
TSX_CTRL_ENABLE,
47-
TSX_CTRL_DISABLE,
48-
TSX_CTRL_RTM_ALWAYS_ABORT,
49-
TSX_CTRL_NOT_SUPPORTED,
50-
};
51-
52-
extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
53-
5445
extern void __init tsx_init(void);
5546
void tsx_ap_init(void);
5647
void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c);

arch/x86/kernel/cpu/tsx.c

Lines changed: 33 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,17 @@
1919
#undef pr_fmt
2020
#define pr_fmt(fmt) "tsx: " fmt
2121

22-
enum tsx_ctrl_states tsx_ctrl_state __ro_after_init = TSX_CTRL_NOT_SUPPORTED;
22+
enum tsx_ctrl_states {
23+
TSX_CTRL_AUTO,
24+
TSX_CTRL_ENABLE,
25+
TSX_CTRL_DISABLE,
26+
TSX_CTRL_RTM_ALWAYS_ABORT,
27+
TSX_CTRL_NOT_SUPPORTED,
28+
};
29+
30+
static enum tsx_ctrl_states tsx_ctrl_state __ro_after_init =
31+
IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_AUTO) ? TSX_CTRL_AUTO :
32+
IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_OFF) ? TSX_CTRL_DISABLE : TSX_CTRL_ENABLE;
2333

2434
static void tsx_disable(void)
2535
{
@@ -156,11 +166,28 @@ static void tsx_dev_mode_disable(void)
156166
}
157167
}
158168

159-
void __init tsx_init(void)
169+
static int __init tsx_parse_cmdline(char *str)
160170
{
161-
char arg[5] = {};
162-
int ret;
171+
if (!str)
172+
return -EINVAL;
173+
174+
if (!strcmp(str, "on")) {
175+
tsx_ctrl_state = TSX_CTRL_ENABLE;
176+
} else if (!strcmp(str, "off")) {
177+
tsx_ctrl_state = TSX_CTRL_DISABLE;
178+
} else if (!strcmp(str, "auto")) {
179+
tsx_ctrl_state = TSX_CTRL_AUTO;
180+
} else {
181+
tsx_ctrl_state = TSX_CTRL_DISABLE;
182+
pr_err("invalid option, defaulting to off\n");
183+
}
184+
185+
return 0;
186+
}
187+
early_param("tsx", tsx_parse_cmdline);
163188

189+
void __init tsx_init(void)
190+
{
164191
tsx_dev_mode_disable();
165192

166193
/*
@@ -194,27 +221,8 @@ void __init tsx_init(void)
194221
return;
195222
}
196223

197-
ret = cmdline_find_option(boot_command_line, "tsx", arg, sizeof(arg));
198-
if (ret >= 0) {
199-
if (!strcmp(arg, "on")) {
200-
tsx_ctrl_state = TSX_CTRL_ENABLE;
201-
} else if (!strcmp(arg, "off")) {
202-
tsx_ctrl_state = TSX_CTRL_DISABLE;
203-
} else if (!strcmp(arg, "auto")) {
204-
tsx_ctrl_state = x86_get_tsx_auto_mode();
205-
} else {
206-
tsx_ctrl_state = TSX_CTRL_DISABLE;
207-
pr_err("invalid option, defaulting to off\n");
208-
}
209-
} else {
210-
/* tsx= not provided */
211-
if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_AUTO))
212-
tsx_ctrl_state = x86_get_tsx_auto_mode();
213-
else if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_OFF))
214-
tsx_ctrl_state = TSX_CTRL_DISABLE;
215-
else
216-
tsx_ctrl_state = TSX_CTRL_ENABLE;
217-
}
224+
if (tsx_ctrl_state == TSX_CTRL_AUTO)
225+
tsx_ctrl_state = x86_get_tsx_auto_mode();
218226

219227
if (tsx_ctrl_state == TSX_CTRL_DISABLE) {
220228
tsx_disable();

0 commit comments

Comments
 (0)