Skip to content

Commit e679970

Browse files
committed
x86/bugs: Get rid of the forward declarations
Get rid of the forward declarations of the mitigation functions by moving their single caller below them. No functional changes. Suggested-by: Josh Poimboeuf <jpoimboe@kernel.org> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Acked-by: Josh Poimboeuf <jpoimboe@kernel.org> Link: https://lore.kernel.org/r/20251105200447.GBaQut3w4dLilZrX-z@fat_crate.local
1 parent e9cc991 commit e679970

1 file changed

Lines changed: 93 additions & 140 deletions

File tree

arch/x86/kernel/cpu/bugs.c

Lines changed: 93 additions & 140 deletions
Original file line numberDiff line numberDiff line change
@@ -53,53 +53,6 @@
5353
* mitigation option.
5454
*/
5555

56-
static void __init spectre_v1_select_mitigation(void);
57-
static void __init spectre_v1_apply_mitigation(void);
58-
static void __init spectre_v2_select_mitigation(void);
59-
static void __init spectre_v2_update_mitigation(void);
60-
static void __init spectre_v2_apply_mitigation(void);
61-
static void __init retbleed_select_mitigation(void);
62-
static void __init retbleed_update_mitigation(void);
63-
static void __init retbleed_apply_mitigation(void);
64-
static void __init spectre_v2_user_select_mitigation(void);
65-
static void __init spectre_v2_user_update_mitigation(void);
66-
static void __init spectre_v2_user_apply_mitigation(void);
67-
static void __init ssb_select_mitigation(void);
68-
static void __init ssb_apply_mitigation(void);
69-
static void __init l1tf_select_mitigation(void);
70-
static void __init l1tf_apply_mitigation(void);
71-
static void __init mds_select_mitigation(void);
72-
static void __init mds_update_mitigation(void);
73-
static void __init mds_apply_mitigation(void);
74-
static void __init taa_select_mitigation(void);
75-
static void __init taa_update_mitigation(void);
76-
static void __init taa_apply_mitigation(void);
77-
static void __init mmio_select_mitigation(void);
78-
static void __init mmio_update_mitigation(void);
79-
static void __init mmio_apply_mitigation(void);
80-
static void __init rfds_select_mitigation(void);
81-
static void __init rfds_update_mitigation(void);
82-
static void __init rfds_apply_mitigation(void);
83-
static void __init srbds_select_mitigation(void);
84-
static void __init srbds_apply_mitigation(void);
85-
static void __init l1d_flush_select_mitigation(void);
86-
static void __init srso_select_mitigation(void);
87-
static void __init srso_update_mitigation(void);
88-
static void __init srso_apply_mitigation(void);
89-
static void __init gds_select_mitigation(void);
90-
static void __init gds_apply_mitigation(void);
91-
static void __init bhi_select_mitigation(void);
92-
static void __init bhi_update_mitigation(void);
93-
static void __init bhi_apply_mitigation(void);
94-
static void __init its_select_mitigation(void);
95-
static void __init its_update_mitigation(void);
96-
static void __init its_apply_mitigation(void);
97-
static void __init tsa_select_mitigation(void);
98-
static void __init tsa_apply_mitigation(void);
99-
static void __init vmscape_select_mitigation(void);
100-
static void __init vmscape_update_mitigation(void);
101-
static void __init vmscape_apply_mitigation(void);
102-
10356
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
10457
u64 x86_spec_ctrl_base;
10558
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
@@ -233,99 +186,6 @@ static void __init cpu_print_attack_vectors(void)
233186
}
234187
}
235188

236-
void __init cpu_select_mitigations(void)
237-
{
238-
/*
239-
* Read the SPEC_CTRL MSR to account for reserved bits which may
240-
* have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
241-
* init code as it is not enumerated and depends on the family.
242-
*/
243-
if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
244-
rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
245-
246-
/*
247-
* Previously running kernel (kexec), may have some controls
248-
* turned ON. Clear them and let the mitigations setup below
249-
* rediscover them based on configuration.
250-
*/
251-
x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
252-
}
253-
254-
x86_arch_cap_msr = x86_read_arch_cap_msr();
255-
256-
cpu_print_attack_vectors();
257-
258-
/* Select the proper CPU mitigations before patching alternatives: */
259-
spectre_v1_select_mitigation();
260-
spectre_v2_select_mitigation();
261-
retbleed_select_mitigation();
262-
spectre_v2_user_select_mitigation();
263-
ssb_select_mitigation();
264-
l1tf_select_mitigation();
265-
mds_select_mitigation();
266-
taa_select_mitigation();
267-
mmio_select_mitigation();
268-
rfds_select_mitigation();
269-
srbds_select_mitigation();
270-
l1d_flush_select_mitigation();
271-
srso_select_mitigation();
272-
gds_select_mitigation();
273-
its_select_mitigation();
274-
bhi_select_mitigation();
275-
tsa_select_mitigation();
276-
vmscape_select_mitigation();
277-
278-
/*
279-
* After mitigations are selected, some may need to update their
280-
* choices.
281-
*/
282-
spectre_v2_update_mitigation();
283-
/*
284-
* retbleed_update_mitigation() relies on the state set by
285-
* spectre_v2_update_mitigation(); specifically it wants to know about
286-
* spectre_v2=ibrs.
287-
*/
288-
retbleed_update_mitigation();
289-
/*
290-
* its_update_mitigation() depends on spectre_v2_update_mitigation()
291-
* and retbleed_update_mitigation().
292-
*/
293-
its_update_mitigation();
294-
295-
/*
296-
* spectre_v2_user_update_mitigation() depends on
297-
* retbleed_update_mitigation(), specifically the STIBP
298-
* selection is forced for UNRET or IBPB.
299-
*/
300-
spectre_v2_user_update_mitigation();
301-
mds_update_mitigation();
302-
taa_update_mitigation();
303-
mmio_update_mitigation();
304-
rfds_update_mitigation();
305-
bhi_update_mitigation();
306-
/* srso_update_mitigation() depends on retbleed_update_mitigation(). */
307-
srso_update_mitigation();
308-
vmscape_update_mitigation();
309-
310-
spectre_v1_apply_mitigation();
311-
spectre_v2_apply_mitigation();
312-
retbleed_apply_mitigation();
313-
spectre_v2_user_apply_mitigation();
314-
ssb_apply_mitigation();
315-
l1tf_apply_mitigation();
316-
mds_apply_mitigation();
317-
taa_apply_mitigation();
318-
mmio_apply_mitigation();
319-
rfds_apply_mitigation();
320-
srbds_apply_mitigation();
321-
srso_apply_mitigation();
322-
gds_apply_mitigation();
323-
its_apply_mitigation();
324-
bhi_apply_mitigation();
325-
tsa_apply_mitigation();
326-
vmscape_apply_mitigation();
327-
}
328-
329189
/*
330190
* NOTE: This function is *only* called for SVM, since Intel uses
331191
* MSR_IA32_SPEC_CTRL for SSBD.
@@ -3376,6 +3236,99 @@ void cpu_bugs_smt_update(void)
33763236
mutex_unlock(&spec_ctrl_mutex);
33773237
}
33783238

3239+
void __init cpu_select_mitigations(void)
3240+
{
3241+
/*
3242+
* Read the SPEC_CTRL MSR to account for reserved bits which may
3243+
* have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
3244+
* init code as it is not enumerated and depends on the family.
3245+
*/
3246+
if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
3247+
rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
3248+
3249+
/*
3250+
* Previously running kernel (kexec), may have some controls
3251+
* turned ON. Clear them and let the mitigations setup below
3252+
* rediscover them based on configuration.
3253+
*/
3254+
x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
3255+
}
3256+
3257+
x86_arch_cap_msr = x86_read_arch_cap_msr();
3258+
3259+
cpu_print_attack_vectors();
3260+
3261+
/* Select the proper CPU mitigations before patching alternatives: */
3262+
spectre_v1_select_mitigation();
3263+
spectre_v2_select_mitigation();
3264+
retbleed_select_mitigation();
3265+
spectre_v2_user_select_mitigation();
3266+
ssb_select_mitigation();
3267+
l1tf_select_mitigation();
3268+
mds_select_mitigation();
3269+
taa_select_mitigation();
3270+
mmio_select_mitigation();
3271+
rfds_select_mitigation();
3272+
srbds_select_mitigation();
3273+
l1d_flush_select_mitigation();
3274+
srso_select_mitigation();
3275+
gds_select_mitigation();
3276+
its_select_mitigation();
3277+
bhi_select_mitigation();
3278+
tsa_select_mitigation();
3279+
vmscape_select_mitigation();
3280+
3281+
/*
3282+
* After mitigations are selected, some may need to update their
3283+
* choices.
3284+
*/
3285+
spectre_v2_update_mitigation();
3286+
/*
3287+
* retbleed_update_mitigation() relies on the state set by
3288+
* spectre_v2_update_mitigation(); specifically it wants to know about
3289+
* spectre_v2=ibrs.
3290+
*/
3291+
retbleed_update_mitigation();
3292+
/*
3293+
* its_update_mitigation() depends on spectre_v2_update_mitigation()
3294+
* and retbleed_update_mitigation().
3295+
*/
3296+
its_update_mitigation();
3297+
3298+
/*
3299+
* spectre_v2_user_update_mitigation() depends on
3300+
* retbleed_update_mitigation(), specifically the STIBP
3301+
* selection is forced for UNRET or IBPB.
3302+
*/
3303+
spectre_v2_user_update_mitigation();
3304+
mds_update_mitigation();
3305+
taa_update_mitigation();
3306+
mmio_update_mitigation();
3307+
rfds_update_mitigation();
3308+
bhi_update_mitigation();
3309+
/* srso_update_mitigation() depends on retbleed_update_mitigation(). */
3310+
srso_update_mitigation();
3311+
vmscape_update_mitigation();
3312+
3313+
spectre_v1_apply_mitigation();
3314+
spectre_v2_apply_mitigation();
3315+
retbleed_apply_mitigation();
3316+
spectre_v2_user_apply_mitigation();
3317+
ssb_apply_mitigation();
3318+
l1tf_apply_mitigation();
3319+
mds_apply_mitigation();
3320+
taa_apply_mitigation();
3321+
mmio_apply_mitigation();
3322+
rfds_apply_mitigation();
3323+
srbds_apply_mitigation();
3324+
srso_apply_mitigation();
3325+
gds_apply_mitigation();
3326+
its_apply_mitigation();
3327+
bhi_apply_mitigation();
3328+
tsa_apply_mitigation();
3329+
vmscape_apply_mitigation();
3330+
}
3331+
33793332
#ifdef CONFIG_SYSFS
33803333

33813334
#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"

0 commit comments

Comments
 (0)