Skip to content

Commit fd2dee1

Browse files
author
Russell King (Oracle)
committed
ARM: fix branch predictor hardening
__do_user_fault() may be called with indeterminent interrupt enable state, which means we may be preemptive at this point. This causes problems when calling harden_branch_predictor(). For example, when called from a data abort, do_alignment_fault()->do_bad_area(). Move harden_branch_predictor() out of __do_user_fault() and into the calling contexts. Moving it into do_kernel_address_page_fault(), we can be sure that interrupts will be disabled here. Converting do_translation_fault() to use do_kernel_address_page_fault() rather than do_bad_area() means that we keep branch predictor handling for translation faults. Interrupts will also be disabled at this call site. do_sect_fault() needs special handling, so detect user mode accesses to kernel-addresses, and add an explicit call to branch predictor hardening. Finally, add branch predictor hardening to do_alignment() for the faulting case (user mode accessing kernel addresses) before interrupts are enabled. This should cover all cases where harden_branch_predictor() is called, ensuring that it is always has interrupts disabled, also ensuring that it is called early in each call path. Reviewed-by: Xie Yuanbin <xieyuanbin1@huawei.com> Tested-by: Xie Yuanbin <xieyuanbin1@huawei.com> Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
1 parent 7733bc7 commit fd2dee1

2 files changed

Lines changed: 31 additions & 14 deletions

File tree

arch/arm/mm/alignment.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,10 +19,11 @@
1919
#include <linux/init.h>
2020
#include <linux/sched/signal.h>
2121
#include <linux/uaccess.h>
22+
#include <linux/unaligned.h>
2223

2324
#include <asm/cp15.h>
2425
#include <asm/system_info.h>
25-
#include <linux/unaligned.h>
26+
#include <asm/system_misc.h>
2627
#include <asm/opcodes.h>
2728

2829
#include "fault.h"
@@ -809,6 +810,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
809810
int thumb2_32b = 0;
810811
int fault;
811812

813+
if (addr >= TASK_SIZE && user_mode(regs))
814+
harden_branch_predictor();
815+
812816
if (interrupts_enabled(regs))
813817
local_irq_enable();
814818

arch/arm/mm/fault.c

Lines changed: 26 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -198,9 +198,6 @@ __do_user_fault(unsigned long addr, unsigned int fsr, unsigned int sig,
198198
{
199199
struct task_struct *tsk = current;
200200

201-
if (addr > TASK_SIZE)
202-
harden_branch_predictor();
203-
204201
#ifdef CONFIG_DEBUG_USER
205202
if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
206203
((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
@@ -269,8 +266,10 @@ do_kernel_address_page_fault(struct mm_struct *mm, unsigned long addr,
269266
/*
270267
* Fault from user mode for a kernel space address. User mode
271268
* should not be faulting in kernel space, which includes the
272-
* vector/khelper page. Send a SIGSEGV.
269+
* vector/khelper page. Handle the branch predictor hardening
270+
* while interrupts are still disabled, then send a SIGSEGV.
273271
*/
272+
harden_branch_predictor();
274273
__do_user_fault(addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
275274
} else {
276275
/*
@@ -485,16 +484,20 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
485484
* We enter here because the first level page table doesn't contain
486485
* a valid entry for the address.
487486
*
488-
* If the address is in kernel space (>= TASK_SIZE), then we are
489-
* probably faulting in the vmalloc() area.
487+
* If this is a user address (addr < TASK_SIZE), we handle this as a
488+
* normal page fault. This leaves the remainder of the function to handle
489+
* kernel address translation faults.
490490
*
491-
* If the init_task's first level page tables contains the relevant
492-
* entry, we copy the it to this task. If not, we send the process
493-
* a signal, fixup the exception, or oops the kernel.
491+
* Since user mode is not permitted to access kernel addresses, pass these
492+
* directly to do_kernel_address_page_fault() to handle.
494493
*
495-
* NOTE! We MUST NOT take any locks for this case. We may be in an
496-
* interrupt or a critical region, and should only copy the information
497-
* from the master page table, nothing more.
494+
* Otherwise, we're probably faulting in the vmalloc() area, so try to fix
495+
* that up. Note that we must not take any locks or enable interrupts in
496+
* this case.
497+
*
498+
* If vmalloc() fixup fails, that means the non-leaf page tables did not
499+
* contain an entry for this address, so handle this via
500+
* do_kernel_address_page_fault().
498501
*/
499502
#ifdef CONFIG_MMU
500503
static int __kprobes
@@ -560,7 +563,8 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
560563
return 0;
561564

562565
bad_area:
563-
do_bad_area(addr, fsr, regs);
566+
do_kernel_address_page_fault(current->mm, addr, fsr, regs);
567+
564568
return 0;
565569
}
566570
#else /* CONFIG_MMU */
@@ -580,7 +584,16 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
580584
static int
581585
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
582586
{
587+
/*
588+
* If this is a kernel address, but from user mode, then userspace
589+
* is trying bad stuff. Invoke the branch predictor handling.
590+
* Interrupts are disabled here.
591+
*/
592+
if (addr >= TASK_SIZE && user_mode(regs))
593+
harden_branch_predictor();
594+
583595
do_bad_area(addr, fsr, regs);
596+
584597
return 0;
585598
}
586599
#endif /* CONFIG_ARM_LPAE */

0 commit comments

Comments
 (0)