@@ -1328,6 +1328,15 @@ static void queue_task_work(struct mce *m, char *msg, void (*func)(struct callba
13281328 task_work_add (current , & current -> mce_kill_me , TWA_RESUME );
13291329}
13301330
1331+ /* Handle unconfigured int18 (should never happen) */
1332+ static noinstr void unexpected_machine_check (struct pt_regs * regs )
1333+ {
1334+ instrumentation_begin ();
1335+ pr_err ("CPU#%d: Unexpected int18 (Machine Check)\n" ,
1336+ smp_processor_id ());
1337+ instrumentation_end ();
1338+ }
1339+
13311340/*
13321341 * The actual machine check handler. This only handles real
13331342 * exceptions when something got corrupted coming in through int 18.
@@ -1348,36 +1357,43 @@ static void queue_task_work(struct mce *m, char *msg, void (*func)(struct callba
13481357 */
13491358noinstr void do_machine_check (struct pt_regs * regs )
13501359{
1360+ int worst = 0 , order , no_way_out , kill_current_task , lmce ;
13511361 DECLARE_BITMAP (valid_banks , MAX_NR_BANKS );
13521362 DECLARE_BITMAP (toclear , MAX_NR_BANKS );
13531363 struct mca_config * cfg = & mca_cfg ;
13541364 struct mce m , * final ;
13551365 char * msg = NULL ;
1356- int worst = 0 ;
1366+
1367+ if (unlikely (mce_flags .p5 ))
1368+ return pentium_machine_check (regs );
1369+ else if (unlikely (mce_flags .winchip ))
1370+ return winchip_machine_check (regs );
1371+ else if (unlikely (!mca_cfg .initialized ))
1372+ return unexpected_machine_check (regs );
13571373
13581374 /*
13591375 * Establish sequential order between the CPUs entering the machine
13601376 * check handler.
13611377 */
1362- int order = -1 ;
1378+ order = -1 ;
13631379
13641380 /*
13651381 * If no_way_out gets set, there is no safe way to recover from this
13661382 * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway.
13671383 */
1368- int no_way_out = 0 ;
1384+ no_way_out = 0 ;
13691385
13701386 /*
13711387 * If kill_current_task is not set, there might be a way to recover from this
13721388 * error.
13731389 */
1374- int kill_current_task = 0 ;
1390+ kill_current_task = 0 ;
13751391
13761392 /*
13771393 * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
13781394 * on Intel.
13791395 */
1380- int lmce = 1 ;
1396+ lmce = 1 ;
13811397
13821398 this_cpu_inc (mce_exception_count );
13831399
@@ -1855,9 +1871,11 @@ static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
18551871 switch (c -> x86_vendor ) {
18561872 case X86_VENDOR_INTEL :
18571873 intel_p5_mcheck_init (c );
1874+ mce_flags .p5 = 1 ;
18581875 return 1 ;
18591876 case X86_VENDOR_CENTAUR :
18601877 winchip_mcheck_init (c );
1878+ mce_flags .winchip = 1 ;
18611879 return 1 ;
18621880 default :
18631881 return 0 ;
@@ -2012,18 +2030,6 @@ bool filter_mce(struct mce *m)
20122030 return false;
20132031}
20142032
2015- /* Handle unconfigured int18 (should never happen) */
2016- static noinstr void unexpected_machine_check (struct pt_regs * regs )
2017- {
2018- instrumentation_begin ();
2019- pr_err ("CPU#%d: Unexpected int18 (Machine Check)\n" ,
2020- smp_processor_id ());
2021- instrumentation_end ();
2022- }
2023-
2024- /* Call the installed machine check handler for this CPU setup. */
2025- void (* machine_check_vector )(struct pt_regs * ) = unexpected_machine_check ;
2026-
20272033static __always_inline void exc_machine_check_kernel (struct pt_regs * regs )
20282034{
20292035 irqentry_state_t irq_state ;
@@ -2034,31 +2040,22 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
20342040 * Only required when from kernel mode. See
20352041 * mce_check_crashing_cpu() for details.
20362042 */
2037- if (machine_check_vector == do_machine_check &&
2038- mce_check_crashing_cpu ())
2043+ if (mca_cfg .initialized && mce_check_crashing_cpu ())
20392044 return ;
20402045
20412046 irq_state = irqentry_nmi_enter (regs );
2042- /*
2043- * The call targets are marked noinstr, but objtool can't figure
2044- * that out because it's an indirect call. Annotate it.
2045- */
2046- instrumentation_begin ();
20472047
2048- machine_check_vector (regs );
2048+ do_machine_check (regs );
20492049
2050- instrumentation_end ();
20512050 irqentry_nmi_exit (regs , irq_state );
20522051}
20532052
20542053static __always_inline void exc_machine_check_user (struct pt_regs * regs )
20552054{
20562055 irqentry_enter_from_user_mode (regs );
2057- instrumentation_begin ();
20582056
2059- machine_check_vector (regs );
2057+ do_machine_check (regs );
20602058
2061- instrumentation_end ();
20622059 irqentry_exit_to_user_mode (regs );
20632060}
20642061
@@ -2125,7 +2122,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21252122 return ;
21262123 }
21272124
2128- machine_check_vector = do_machine_check ;
2125+ mca_cfg . initialized = 1 ;
21292126
21302127 __mcheck_cpu_init_early (c );
21312128 __mcheck_cpu_init_generic ();
0 commit comments