mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-15 00:34:10 +08:00
mm/core, x86/mm/pkeys: Differentiate instruction fetches
As discussed earlier, we attempt to enforce protection keys in software. However, the code checks all faults to ensure that they are not violating protection key permissions. It was assumed that all faults are either write faults where we check PKRU[key].WD (write disable) or read faults where we check the AD (access disable) bit. But, there is a third category of faults for protection keys: instruction faults. Instruction faults never run afoul of protection keys because they do not affect instruction fetches. So, plumb the PF_INSTR bit down in to the arch_vma_access_permitted() function where we do the protection key checks. We also add a new FAULT_FLAG_INSTRUCTION. This is because handle_mm_fault() is not passed the architecture-specific error_code where we keep PF_INSTR, so we need to encode the instruction fetch information in to the arch-generic fault flags. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave@sr71.net> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210224.96928009@viggo.jf.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
07f146f53e
commit
d61172b4b6
@ -149,7 +149,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
|
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
|
||||||
bool write, bool foreign)
|
bool write, bool execute, bool foreign)
|
||||||
{
|
{
|
||||||
/* by default, allow everything */
|
/* by default, allow everything */
|
||||||
return true;
|
return true;
|
||||||
|
@ -131,7 +131,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
|
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
|
||||||
bool write, bool foreign)
|
bool write, bool execute, bool foreign)
|
||||||
{
|
{
|
||||||
/* by default, allow everything */
|
/* by default, allow everything */
|
||||||
return true;
|
return true;
|
||||||
|
@ -323,8 +323,11 @@ static inline bool vma_is_foreign(struct vm_area_struct *vma)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
|
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
|
||||||
bool write, bool foreign)
|
bool write, bool execute, bool foreign)
|
||||||
{
|
{
|
||||||
|
/* pkeys never affect instruction fetches */
|
||||||
|
if (execute)
|
||||||
|
return true;
|
||||||
/* allow access if the VMA is not one from this process */
|
/* allow access if the VMA is not one from this process */
|
||||||
if (foreign || vma_is_foreign(vma))
|
if (foreign || vma_is_foreign(vma))
|
||||||
return true;
|
return true;
|
||||||
|
@ -908,7 +908,8 @@ static inline bool bad_area_access_from_pkeys(unsigned long error_code,
|
|||||||
if (error_code & PF_PK)
|
if (error_code & PF_PK)
|
||||||
return true;
|
return true;
|
||||||
/* this checks permission keys on the VMA: */
|
/* this checks permission keys on the VMA: */
|
||||||
if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), foreign))
|
if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
|
||||||
|
(error_code & PF_INSTR), foreign))
|
||||||
return true;
|
return true;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -1112,7 +1113,8 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
|
|||||||
* faults just to hit a PF_PK as soon as we fill in a
|
* faults just to hit a PF_PK as soon as we fill in a
|
||||||
* page.
|
* page.
|
||||||
*/
|
*/
|
||||||
if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), foreign))
|
if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
|
||||||
|
(error_code & PF_INSTR), foreign))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (error_code & PF_WRITE) {
|
if (error_code & PF_WRITE) {
|
||||||
@ -1267,6 +1269,8 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
|
|||||||
|
|
||||||
if (error_code & PF_WRITE)
|
if (error_code & PF_WRITE)
|
||||||
flags |= FAULT_FLAG_WRITE;
|
flags |= FAULT_FLAG_WRITE;
|
||||||
|
if (error_code & PF_INSTR)
|
||||||
|
flags |= FAULT_FLAG_INSTRUCTION;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When running in the kernel we expect faults to occur only to
|
* When running in the kernel we expect faults to occur only to
|
||||||
|
@ -27,7 +27,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
|
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
|
||||||
bool write, bool foreign)
|
bool write, bool execute, bool foreign)
|
||||||
{
|
{
|
||||||
/* by default, allow everything */
|
/* by default, allow everything */
|
||||||
return true;
|
return true;
|
||||||
|
@ -252,6 +252,7 @@ extern pgprot_t protection_map[16];
|
|||||||
#define FAULT_FLAG_TRIED 0x20 /* Second try */
|
#define FAULT_FLAG_TRIED 0x20 /* Second try */
|
||||||
#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */
|
#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */
|
||||||
#define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */
|
#define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */
|
||||||
|
#define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* vm_fault is filled by the the pagefault handler and passed to the vma's
|
* vm_fault is filled by the the pagefault handler and passed to the vma's
|
||||||
|
11
mm/gup.c
11
mm/gup.c
@ -449,7 +449,11 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
|
|||||||
if (!(vm_flags & VM_MAYREAD))
|
if (!(vm_flags & VM_MAYREAD))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
if (!arch_vma_access_permitted(vma, write, foreign))
|
/*
|
||||||
|
* gups are always data accesses, not instruction
|
||||||
|
* fetches, so execute=false here
|
||||||
|
*/
|
||||||
|
if (!arch_vma_access_permitted(vma, write, false, foreign))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -629,8 +633,11 @@ bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags)
|
|||||||
/*
|
/*
|
||||||
* The architecture might have a hardware protection
|
* The architecture might have a hardware protection
|
||||||
* mechanism other than read/write that can deny access.
|
* mechanism other than read/write that can deny access.
|
||||||
|
*
|
||||||
|
* gup always represents data access, not instruction
|
||||||
|
* fetches, so execute=false here:
|
||||||
*/
|
*/
|
||||||
if (!arch_vma_access_permitted(vma, write, foreign))
|
if (!arch_vma_access_permitted(vma, write, false, foreign))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
@ -3380,6 +3380,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
|
|
||||||
if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
|
if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
|
||||||
|
flags & FAULT_FLAG_INSTRUCTION,
|
||||||
flags & FAULT_FLAG_REMOTE))
|
flags & FAULT_FLAG_REMOTE))
|
||||||
return VM_FAULT_SIGSEGV;
|
return VM_FAULT_SIGSEGV;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user