mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-18 17:54:13 +08:00
x86: do_page_fault small unification
Copy the prefetch of map_sem from X86_64 and move the check notify_page_fault (soon to be kprobe_handle_fault) out of the unlikely if() statement. This makes the X86_32|64 pagefault handlers closer to each other. Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
f2857ce920
commit
608566b4ed
@ -295,13 +295,18 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
||||
*/
|
||||
trace_hardirqs_fixup();
|
||||
|
||||
tsk = current;
|
||||
mm = tsk->mm;
|
||||
prefetchw(&mm->mmap_sem);
|
||||
|
||||
/* get the address */
|
||||
address = read_cr2();
|
||||
|
||||
tsk = current;
|
||||
|
||||
si_code = SEGV_MAPERR;
|
||||
|
||||
if (notify_page_fault(regs))
|
||||
return;
|
||||
|
||||
/*
|
||||
* We fault-in kernel-space virtual memory on-demand. The
|
||||
* 'reference' page table is init_mm.pgd.
|
||||
@ -319,8 +324,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
||||
if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
|
||||
vmalloc_fault(address) >= 0)
|
||||
return;
|
||||
if (notify_page_fault(regs))
|
||||
return;
|
||||
/*
|
||||
* Don't take the mm semaphore here. If we fixup a prefetch
|
||||
* fault we could otherwise deadlock.
|
||||
@ -328,16 +331,11 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
||||
goto bad_area_nosemaphore;
|
||||
}
|
||||
|
||||
if (notify_page_fault(regs))
|
||||
return;
|
||||
|
||||
/* It's safe to allow irq's after cr2 has been saved and the vmalloc
|
||||
fault has been handled. */
|
||||
if (regs->flags & (X86_EFLAGS_IF|VM_MASK))
|
||||
local_irq_enable();
|
||||
|
||||
mm = tsk->mm;
|
||||
|
||||
/*
|
||||
* If we're in an interrupt, have no user context or are running in an
|
||||
* atomic region then we must not take the fault.
|
||||
|
@ -355,6 +355,8 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
|
||||
|
||||
si_code = SEGV_MAPERR;
|
||||
|
||||
if (notify_page_fault(regs))
|
||||
return;
|
||||
|
||||
/*
|
||||
* We fault-in kernel-space virtual memory on-demand. The
|
||||
@ -380,8 +382,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
|
||||
if (vmalloc_fault(address) >= 0)
|
||||
return;
|
||||
}
|
||||
if (notify_page_fault(regs))
|
||||
return;
|
||||
/*
|
||||
* Don't take the mm semaphore here. If we fixup a prefetch
|
||||
* fault we could otherwise deadlock.
|
||||
@ -389,9 +389,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
|
||||
goto bad_area_nosemaphore;
|
||||
}
|
||||
|
||||
if (notify_page_fault(regs))
|
||||
return;
|
||||
|
||||
if (likely(regs->flags & X86_EFLAGS_IF))
|
||||
local_irq_enable();
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user