mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-24 19:45:06 +08:00
0b46e0a3ec
Replacing a 2K page table with a 4K page table while a VMA is active for the affected memory region is fundamentally broken. Rip out the page table reallocation code and replace it with a simple system control 'vm.allocate_pgste'. If the system control is set the page tables for all processes are allocated as full 4K pages, even for processes that do not need it. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
45 lines
1.0 KiB
C
45 lines
1.0 KiB
C
#ifndef __MMU_H
|
|
#define __MMU_H
|
|
|
|
#include <linux/cpumask.h>
|
|
#include <linux/errno.h>
|
|
|
|
typedef struct {
|
|
cpumask_t cpu_attach_mask;
|
|
atomic_t attach_count;
|
|
unsigned int flush_mm;
|
|
spinlock_t list_lock;
|
|
struct list_head pgtable_list;
|
|
struct list_head gmap_list;
|
|
unsigned long asce_bits;
|
|
unsigned long asce_limit;
|
|
unsigned long vdso_base;
|
|
/* The mmu context allocates 4K page tables. */
|
|
unsigned int alloc_pgste:1;
|
|
/* The mmu context uses extended page tables. */
|
|
unsigned int has_pgste:1;
|
|
/* The mmu context uses storage keys. */
|
|
unsigned int use_skey:1;
|
|
} mm_context_t;
|
|
|
|
#define INIT_MM_CONTEXT(name) \
|
|
.context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \
|
|
.context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
|
|
.context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
|
|
|
|
static inline int tprot(unsigned long addr)
|
|
{
|
|
int rc = -EFAULT;
|
|
|
|
asm volatile(
|
|
" tprot 0(%1),0\n"
|
|
"0: ipm %0\n"
|
|
" srl %0,28\n"
|
|
"1:\n"
|
|
EX_TABLE(0b,1b)
|
|
: "+d" (rc) : "a" (addr) : "cc");
|
|
return rc;
|
|
}
|
|
|
|
#endif
|