mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-22 13:54:57 +08:00
x86/mm: Randomize per-cpu entry area
Seth found that the CPU-entry-area; the piece of per-cpu data that is mapped into the userspace page-tables for kPTI is not subject to any randomization -- irrespective of kASLR settings. On x86_64 a whole P4D (512 GB) of virtual address space is reserved for this structure, which is plenty large enough to randomize things a little. As such, use a straight forward randomization scheme that avoids duplicates to spread the existing CPUs over the available space. [ bp: Fix le build. ] Reported-by: Seth Jenkins <sethjenkins@google.com> Reviewed-by: Kees Cook <keescook@chromium.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Signed-off-by: Borislav Petkov <bp@suse.de>
This commit is contained in:
parent
3f148f3318
commit
97e3d26b5e
@ -130,10 +130,6 @@ struct cpu_entry_area {
|
||||
};
|
||||
|
||||
#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
|
||||
#define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
|
||||
|
||||
/* Total size includes the readonly IDT mapping page as well: */
|
||||
#define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE)
|
||||
|
||||
DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
|
||||
DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
|
||||
|
@ -11,6 +11,12 @@
|
||||
|
||||
#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)
|
||||
|
||||
#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE)
|
||||
#ifdef CONFIG_X86_32
|
||||
#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + \
|
||||
(CPU_ENTRY_AREA_SIZE * NR_CPUS) - \
|
||||
CPU_ENTRY_AREA_BASE)
|
||||
#else
|
||||
#define CPU_ENTRY_AREA_MAP_SIZE P4D_SIZE
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_PGTABLE_AREAS_H */
|
||||
|
@ -266,7 +266,7 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
|
||||
|
||||
/* CPU entry erea is always used for CPU entry */
|
||||
if (within_area(addr, end, CPU_ENTRY_AREA_BASE,
|
||||
CPU_ENTRY_AREA_TOTAL_SIZE))
|
||||
CPU_ENTRY_AREA_MAP_SIZE))
|
||||
return true;
|
||||
|
||||
/*
|
||||
|
@ -16,16 +16,53 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage)
|
||||
#ifdef CONFIG_X86_64
|
||||
static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
|
||||
DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, _cea_offset);
|
||||
|
||||
static __always_inline unsigned int cea_offset(unsigned int cpu)
|
||||
{
|
||||
return per_cpu(_cea_offset, cpu);
|
||||
}
|
||||
|
||||
static __init void init_cea_offsets(void)
|
||||
{
|
||||
unsigned int max_cea;
|
||||
unsigned int i, j;
|
||||
|
||||
max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE;
|
||||
|
||||
/* O(sodding terrible) */
|
||||
for_each_possible_cpu(i) {
|
||||
unsigned int cea;
|
||||
|
||||
again:
|
||||
cea = prandom_u32_max(max_cea);
|
||||
|
||||
for_each_possible_cpu(j) {
|
||||
if (cea_offset(j) == cea)
|
||||
goto again;
|
||||
|
||||
if (i == j)
|
||||
break;
|
||||
}
|
||||
|
||||
per_cpu(_cea_offset, i) = cea;
|
||||
}
|
||||
}
|
||||
#else /* !X86_64 */
|
||||
DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
|
||||
|
||||
static __always_inline unsigned int cea_offset(unsigned int cpu)
|
||||
{
|
||||
return cpu;
|
||||
}
|
||||
static inline void init_cea_offsets(void) { }
|
||||
#endif
|
||||
|
||||
/* Is called from entry code, so must be noinstr */
|
||||
noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
|
||||
{
|
||||
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
|
||||
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cea_offset(cpu) * CPU_ENTRY_AREA_SIZE;
|
||||
BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
|
||||
|
||||
return (struct cpu_entry_area *) va;
|
||||
@ -211,7 +248,6 @@ static __init void setup_cpu_entry_area_ptes(void)
|
||||
|
||||
/* The +1 is for the readonly IDT: */
|
||||
BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
|
||||
BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
|
||||
BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
|
||||
|
||||
start = CPU_ENTRY_AREA_BASE;
|
||||
@ -227,6 +263,8 @@ void __init setup_cpu_entry_areas(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
init_cea_offsets();
|
||||
|
||||
setup_cpu_entry_area_ptes();
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
|
Loading…
Reference in New Issue
Block a user