@@ -232,37 +232,11 @@ static inline bool is_permission_fault(unsigned int fsr)
232232 return false;
233233}
234234
235- static vm_fault_t __kprobes
236- __do_page_fault (struct mm_struct * mm , unsigned long addr , unsigned int flags ,
237- unsigned long vma_flags , struct pt_regs * regs )
238- {
239- struct vm_area_struct * vma = find_vma (mm , addr );
240- if (unlikely (!vma ))
241- return VM_FAULT_BADMAP ;
242-
243- if (unlikely (vma -> vm_start > addr )) {
244- if (!(vma -> vm_flags & VM_GROWSDOWN ))
245- return VM_FAULT_BADMAP ;
246- if (addr < FIRST_USER_ADDRESS )
247- return VM_FAULT_BADMAP ;
248- if (expand_stack (vma , addr ))
249- return VM_FAULT_BADMAP ;
250- }
251-
252- /*
253- * ok, we have a good vm_area for this memory access, check the
254- * permissions on the VMA allow for the fault which occurred.
255- */
256- if (!(vma -> vm_flags & vma_flags ))
257- return VM_FAULT_BADACCESS ;
258-
259- return handle_mm_fault (vma , addr & PAGE_MASK , flags , regs );
260- }
261-
262235static int __kprobes
263236do_page_fault (unsigned long addr , unsigned int fsr , struct pt_regs * regs )
264237{
265238 struct mm_struct * mm = current -> mm ;
239+ struct vm_area_struct * vma ;
266240 int sig , code ;
267241 vm_fault_t fault ;
268242 unsigned int flags = FAULT_FLAG_DEFAULT ;
@@ -301,31 +275,21 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
301275
302276 perf_sw_event (PERF_COUNT_SW_PAGE_FAULTS , 1 , regs , addr );
303277
304- /*
305- * As per x86, we may deadlock here. However, since the kernel only
306- * validly references user space from well defined areas of the code,
307- * we can bug out early if this is from code which shouldn't.
308- */
309- if (!mmap_read_trylock (mm )) {
310- if (!user_mode (regs ) && !search_exception_tables (regs -> ARM_pc ))
311- goto no_context ;
312278retry :
313- mmap_read_lock (mm );
314- } else {
315- /*
316- * The above down_read_trylock() might have succeeded in
317- * which case, we'll have missed the might_sleep() from
318- * down_read()
319- */
320- might_sleep ();
321- #ifdef CONFIG_DEBUG_VM
322- if (!user_mode (regs ) &&
323- !search_exception_tables (regs -> ARM_pc ))
324- goto no_context ;
325- #endif
279+ vma = lock_mm_and_find_vma (mm , addr , regs );
280+ if (unlikely (!vma )) {
281+ fault = VM_FAULT_BADMAP ;
282+ goto bad_area ;
326283 }
327284
328- fault = __do_page_fault (mm , addr , flags , vm_flags , regs );
285+ /*
286+ * ok, we have a good vm_area for this memory access, check the
287+ * permissions on the VMA allow for the fault which occurred.
288+ */
289+ if (!(vma -> vm_flags & vm_flags ))
290+ fault = VM_FAULT_BADACCESS ;
291+ else
292+ fault = handle_mm_fault (vma , addr & PAGE_MASK , flags , regs );
329293
330294 /* If we need to retry but a fatal signal is pending, handle the
331295 * signal first. We do not need to release the mmap_lock because
@@ -356,6 +320,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
356320 if (likely (!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS ))))
357321 return 0 ;
358322
323+ bad_area :
359324 /*
360325 * If we are in kernel mode at this point, we
361326 * have no context to handle this fault with.
0 commit comments