mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-15 08:44:14 +08:00
s390: add support for KFENCE
Signed-off-by: Sven Schnelle <svens@linux.ibm.com> [hca@linux.ibm.com: simplify/rework code] Link: https://lore.kernel.org/r/20210728190254.3921642-4-hca@linux.ibm.com Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
This commit is contained in:
parent
f99e12b21b
commit
e41ba1115a
@ -138,6 +138,7 @@ config S390
|
||||
select HAVE_ARCH_JUMP_LABEL_RELATIVE
|
||||
select HAVE_ARCH_KASAN
|
||||
select HAVE_ARCH_KASAN_VMALLOC
|
||||
select HAVE_ARCH_KFENCE
|
||||
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_SOFT_DIRTY
|
||||
|
42
arch/s390/include/asm/kfence.h
Normal file
42
arch/s390/include/asm/kfence.h
Normal file
@ -0,0 +1,42 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#ifndef _ASM_S390_KFENCE_H
|
||||
#define _ASM_S390_KFENCE_H
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kfence.h>
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
void __kernel_map_pages(struct page *page, int numpages, int enable);
|
||||
|
||||
static __always_inline bool arch_kfence_init_pool(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#define arch_kfence_test_address(addr) ((addr) & PAGE_MASK)
|
||||
|
||||
/*
|
||||
* Do not split kfence pool to 4k mapping with arch_kfence_init_pool(),
|
||||
* but earlier where page table allocations still happen with memblock.
|
||||
* Reason is that arch_kfence_init_pool() gets called when the system
|
||||
* is still in a limbo state - disabling and enabling bottom halves is
|
||||
* not yet allowed, but that is what our page_table_alloc() would do.
|
||||
*/
|
||||
static __always_inline void kfence_split_mapping(void)
|
||||
{
|
||||
#ifdef CONFIG_KFENCE
|
||||
unsigned long pool_pages = KFENCE_POOL_SIZE >> PAGE_SHIFT;
|
||||
|
||||
set_memory_4k((unsigned long)__kfence_pool, pool_pages);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
||||
{
|
||||
__kernel_map_pages(virt_to_page(addr), 1, !protect);
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* _ASM_S390_KFENCE_H */
|
@ -31,6 +31,7 @@
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/kfence.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/diag.h>
|
||||
#include <asm/gmap.h>
|
||||
@ -356,6 +357,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
|
||||
unsigned long address;
|
||||
unsigned int flags;
|
||||
vm_fault_t fault;
|
||||
bool is_write;
|
||||
|
||||
tsk = current;
|
||||
/*
|
||||
@ -369,6 +371,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
|
||||
|
||||
mm = tsk->mm;
|
||||
trans_exc_code = regs->int_parm_long;
|
||||
address = trans_exc_code & __FAIL_ADDR_MASK;
|
||||
is_write = (trans_exc_code & store_indication) == 0x400;
|
||||
|
||||
/*
|
||||
* Verify that the fault happened in user space, that
|
||||
@ -379,6 +383,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
|
||||
type = get_fault_type(regs);
|
||||
switch (type) {
|
||||
case KERNEL_FAULT:
|
||||
if (kfence_handle_page_fault(address, is_write, regs))
|
||||
return 0;
|
||||
goto out;
|
||||
case USER_FAULT:
|
||||
case GMAP_FAULT:
|
||||
@ -387,12 +393,11 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
|
||||
break;
|
||||
}
|
||||
|
||||
address = trans_exc_code & __FAIL_ADDR_MASK;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||
flags = FAULT_FLAG_DEFAULT;
|
||||
if (user_mode(regs))
|
||||
flags |= FAULT_FLAG_USER;
|
||||
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
|
||||
if (access == VM_WRITE || is_write)
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
mmap_read_lock(mm);
|
||||
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include <asm/processor.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/kfence.h>
|
||||
#include <asm/ptdump.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/lowcore.h>
|
||||
@ -200,7 +201,7 @@ void __init mem_init(void)
|
||||
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
|
||||
|
||||
pv_init();
|
||||
|
||||
kfence_split_mapping();
|
||||
/* Setup guest page hinting */
|
||||
cmma_init();
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/facility.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/kfence.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/set_memory.h>
|
||||
|
||||
@ -326,7 +327,7 @@ int __set_memory(unsigned long addr, int numpages, unsigned long flags)
|
||||
return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
|
||||
|
||||
static void ipte_range(pte_t *pte, unsigned long address, int nr)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user