Skip to content

Commit 7c194d8

Browse files
hcahcaVasily Gorbik
authored andcommitted
s390/mm,fault: remove VM_FAULT_BADMAP and VM_FAULT_BADACCESS
Remove the last two private vm_fault reasons: VM_FAULT_BADMAP and VM_FAULT_BADACCESS. In order to achieve this add an si_code parameter to do_no_context() and it's wrappers and directly call the wrappers instead of relying on do_fault_error() handling. Signed-off-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
1 parent b61a092 commit 7c194d8

1 file changed

Lines changed: 51 additions & 85 deletions

File tree

arch/s390/mm/fault.c

Lines changed: 51 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -45,13 +45,6 @@
4545
#include <asm/uv.h>
4646
#include "../kernel/entry.h"
4747

48-
/*
49-
* Allocate private vm_fault_reason from top.
50-
* Please make sure it won't collide with vm_fault_reason.
51-
*/
52-
#define VM_FAULT_BADMAP ((__force vm_fault_t)0x40000000)
53-
#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x20000000)
54-
5548
enum fault_type {
5649
KERNEL_FAULT,
5750
USER_FAULT,
@@ -232,12 +225,17 @@ static void do_sigsegv(struct pt_regs *regs, int si_code)
232225
force_sig_fault(SIGSEGV, si_code, (void __user *)get_fault_address(regs));
233226
}
234227

235-
static void do_no_context(struct pt_regs *regs)
228+
static void do_no_context(struct pt_regs *regs, int si_code)
236229
{
237230
enum fault_type fault_type;
238231
unsigned long address;
239232
bool is_write;
240233

234+
if (user_mode(regs)) {
235+
if (WARN_ON_ONCE(!si_code))
236+
si_code = SEGV_MAPERR;
237+
return do_sigsegv(regs, si_code);
238+
}
241239
if (fixup_exception(regs))
242240
return;
243241
fault_type = get_fault_type(regs);
@@ -255,17 +253,17 @@ static void do_no_context(struct pt_regs *regs)
255253
die(regs, "Oops");
256254
}
257255

258-
static inline void handle_fault_error_nolock(struct pt_regs *regs)
256+
static inline void handle_fault_error_nolock(struct pt_regs *regs, int si_code)
259257
{
260-
do_no_context(regs);
258+
do_no_context(regs, si_code);
261259
}
262260

263-
static void handle_fault_error(struct pt_regs *regs)
261+
static void handle_fault_error(struct pt_regs *regs, int si_code)
264262
{
265263
struct mm_struct *mm = current->mm;
266264

267265
mmap_read_unlock(mm);
268-
handle_fault_error_nolock(regs);
266+
handle_fault_error_nolock(regs, si_code);
269267
}
270268

271269
static void do_sigbus(struct pt_regs *regs)
@@ -275,43 +273,26 @@ static void do_sigbus(struct pt_regs *regs)
275273

276274
static void do_fault_error(struct pt_regs *regs, vm_fault_t fault)
277275
{
278-
int si_code;
279-
280-
switch (fault) {
281-
case VM_FAULT_BADACCESS:
282-
case VM_FAULT_BADMAP:
283-
/* Bad memory access. Check if it is kernel or user space. */
284-
if (user_mode(regs)) {
285-
/* User mode accesses just cause a SIGSEGV */
286-
si_code = (fault == VM_FAULT_BADMAP) ?
287-
SEGV_MAPERR : SEGV_ACCERR;
288-
do_sigsegv(regs, si_code);
289-
break;
290-
}
291-
do_no_context(regs);
292-
break;
293-
default: /* fault & VM_FAULT_ERROR */
294-
if (fault & VM_FAULT_OOM) {
295-
if (!user_mode(regs))
296-
do_no_context(regs);
297-
else
298-
pagefault_out_of_memory();
299-
} else if (fault & VM_FAULT_SIGSEGV) {
300-
/* Kernel mode? Handle exceptions or die */
301-
if (!user_mode(regs))
302-
do_no_context(regs);
303-
else
304-
do_sigsegv(regs, SEGV_MAPERR);
305-
} else if (fault & VM_FAULT_SIGBUS) {
306-
/* Kernel mode? Handle exceptions or die */
307-
if (!user_mode(regs))
308-
do_no_context(regs);
309-
else
310-
do_sigbus(regs);
311-
} else {
312-
BUG();
313-
}
314-
break;
276+
/* fault & VM_FAULT_ERROR */
277+
if (fault & VM_FAULT_OOM) {
278+
if (!user_mode(regs))
279+
do_no_context(regs, 0);
280+
else
281+
pagefault_out_of_memory();
282+
} else if (fault & VM_FAULT_SIGSEGV) {
283+
/* Kernel mode? Handle exceptions or die */
284+
if (!user_mode(regs))
285+
do_no_context(regs, 0);
286+
else
287+
do_sigsegv(regs, SEGV_MAPERR);
288+
} else if (fault & VM_FAULT_SIGBUS) {
289+
/* Kernel mode? Handle exceptions or die */
290+
if (!user_mode(regs))
291+
do_no_context(regs, 0);
292+
else
293+
do_sigbus(regs);
294+
} else {
295+
BUG();
315296
}
316297
}
317298

@@ -352,11 +333,11 @@ static void do_exception(struct pt_regs *regs, int access)
352333
type = get_fault_type(regs);
353334
switch (type) {
354335
case KERNEL_FAULT:
355-
return handle_fault_error_nolock(regs);
336+
return handle_fault_error_nolock(regs, 0);
356337
case USER_FAULT:
357338
case GMAP_FAULT:
358339
if (faulthandler_disabled() || !mm)
359-
return handle_fault_error_nolock(regs);
340+
return handle_fault_error_nolock(regs, 0);
360341
break;
361342
}
362343
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
@@ -389,7 +370,7 @@ static void do_exception(struct pt_regs *regs, int access)
389370
/* Quick path to respond to signals */
390371
if (fault_signal_pending(fault, regs)) {
391372
if (!user_mode(regs))
392-
handle_fault_error_nolock(regs);
373+
handle_fault_error_nolock(regs, 0);
393374
return;
394375
}
395376
lock_mmap:
@@ -401,34 +382,30 @@ static void do_exception(struct pt_regs *regs, int access)
401382
current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
402383
current->thread.gmap_int_code = regs->int_code & 0xffff;
403384
address = __gmap_translate(gmap, address);
404-
if (address == -EFAULT) {
405-
fault = VM_FAULT_BADMAP;
406-
goto out_up;
407-
}
385+
if (address == -EFAULT)
386+
return handle_fault_error(regs, SEGV_MAPERR);
408387
if (gmap->pfault_enabled)
409388
flags |= FAULT_FLAG_RETRY_NOWAIT;
410389
}
411390
retry:
412-
fault = VM_FAULT_BADMAP;
413391
vma = find_vma(mm, address);
414392
if (!vma)
415-
goto out_up;
393+
return handle_fault_error(regs, SEGV_MAPERR);
416394
if (unlikely(vma->vm_start > address)) {
417395
if (!(vma->vm_flags & VM_GROWSDOWN))
418-
goto out_up;
396+
return handle_fault_error(regs, SEGV_MAPERR);
419397
vma = expand_stack(mm, address);
420398
if (!vma)
421-
goto out;
399+
return handle_fault_error_nolock(regs, SEGV_MAPERR);
422400
}
423-
fault = VM_FAULT_BADACCESS;
424401
if (unlikely(!(vma->vm_flags & access)))
425-
goto out_up;
402+
return handle_fault_error(regs, SEGV_ACCERR);
426403
fault = handle_mm_fault(vma, address, flags, regs);
427404
if (fault_signal_pending(fault, regs)) {
428405
if (flags & FAULT_FLAG_RETRY_NOWAIT)
429406
mmap_read_unlock(mm);
430407
if (!user_mode(regs))
431-
handle_fault_error_nolock(regs);
408+
handle_fault_error_nolock(regs, 0);
432409
return;
433410
}
434411
/* The fault is fully completed (including releasing mmap lock) */
@@ -449,7 +426,7 @@ static void do_exception(struct pt_regs *regs, int access)
449426
* mmap_lock has not been released
450427
*/
451428
current->thread.gmap_pfault = 1;
452-
return handle_fault_error(regs);
429+
return handle_fault_error(regs, 0);
453430
}
454431
flags &= ~FAULT_FLAG_RETRY_NOWAIT;
455432
flags |= FAULT_FLAG_TRIED;
@@ -460,10 +437,8 @@ static void do_exception(struct pt_regs *regs, int access)
460437
if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
461438
address = __gmap_link(gmap, current->thread.gmap_addr,
462439
address);
463-
if (address == -EFAULT) {
464-
fault = VM_FAULT_BADMAP;
465-
goto out_up;
466-
}
440+
if (address == -EFAULT)
441+
return handle_fault_error(regs, SEGV_MAPERR);
467442
if (address == -ENOMEM) {
468443
fault = VM_FAULT_OOM;
469444
goto out_up;
@@ -502,12 +477,11 @@ void do_protection_exception(struct pt_regs *regs)
502477
* Low-address protection in kernel mode means
503478
* NULL pointer write access in kernel mode.
504479
*/
505-
return do_no_context(regs);
480+
return do_no_context(regs, 0);
506481
}
507482
if (unlikely(MACHINE_HAS_NX && teid.b56)) {
508483
regs->int_parm_long = (teid.addr * PAGE_SIZE) | (regs->psw.addr & PAGE_MASK);
509-
do_fault_error(regs, VM_FAULT_BADACCESS);
510-
return;
484+
return handle_fault_error_nolock(regs, SEGV_ACCERR);
511485
}
512486
do_exception(regs, VM_WRITE);
513487
}
@@ -560,20 +534,15 @@ void do_secure_storage_access(struct pt_regs *regs)
560534
mmap_read_lock(mm);
561535
addr = __gmap_translate(gmap, addr);
562536
mmap_read_unlock(mm);
563-
if (IS_ERR_VALUE(addr)) {
564-
do_fault_error(regs, VM_FAULT_BADMAP);
565-
break;
566-
}
537+
if (IS_ERR_VALUE(addr))
538+
return handle_fault_error_nolock(regs, SEGV_MAPERR);
567539
fallthrough;
568540
case USER_FAULT:
569541
mm = current->mm;
570542
mmap_read_lock(mm);
571543
vma = find_vma(mm, addr);
572-
if (!vma) {
573-
mmap_read_unlock(mm);
574-
do_fault_error(regs, VM_FAULT_BADMAP);
575-
break;
576-
}
544+
if (!vma)
545+
return handle_fault_error(regs, SEGV_MAPERR);
577546
page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
578547
if (IS_ERR_OR_NULL(page)) {
579548
mmap_read_unlock(mm);
@@ -604,11 +573,8 @@ void do_non_secure_storage_access(struct pt_regs *regs)
604573
struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
605574
unsigned long gaddr = get_fault_address(regs);
606575

607-
if (get_fault_type(regs) != GMAP_FAULT) {
608-
do_fault_error(regs, VM_FAULT_BADMAP);
609-
WARN_ON_ONCE(1);
610-
return;
611-
}
576+
if (WARN_ON_ONCE(get_fault_type(regs) != GMAP_FAULT))
577+
return handle_fault_error_nolock(regs, SEGV_MAPERR);
612578
if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
613579
send_sig(SIGSEGV, current, 0);
614580
}

0 commit comments

Comments
 (0)