s390: avoid z13 cache aliasing

Avoid cache aliasing on z13 by aligning shared objects to multiples
of 512K. The virtual addresses of a page from a shared file needs
to have identical bits in the range 2^12 to 2^18.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Martin Schwidefsky 2015-01-14 17:51:17 +01:00
parent f8b2dcbd9e
commit 1f6b83e5e4
5 changed files with 155 additions and 18 deletions

View File

@ -163,8 +163,8 @@ extern unsigned int vdso_enabled;
the loader. We need to make sure that it is out of the way of the program the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */ that it will "exec", and that there is sufficient room for the brk. */
extern unsigned long randomize_et_dyn(unsigned long base); extern unsigned long randomize_et_dyn(void);
#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2)) #define ELF_ET_DYN_BASE randomize_et_dyn()
/* This yields a mask that user programs can use to figure out what /* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */ instruction set this CPU supports. */
@ -209,7 +209,9 @@ do { \
} while (0) } while (0)
#endif /* CONFIG_COMPAT */ #endif /* CONFIG_COMPAT */
#define STACK_RND_MASK 0x7ffUL extern unsigned long mmap_rnd_mask;
#define STACK_RND_MASK (mmap_rnd_mask)
#define ARCH_DLINFO \ #define ARCH_DLINFO \
do { \ do { \

View File

@ -1779,6 +1779,10 @@ extern int s390_enable_sie(void);
extern int s390_enable_skey(void); extern int s390_enable_skey(void);
extern void s390_reset_cmma(struct mm_struct *mm); extern void s390_reset_cmma(struct mm_struct *mm);
/* s390 has a private copy of get unmapped area to deal with cache synonyms */
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
/* /*
* No page table caches to initialise * No page table caches to initialise
*/ */

View File

@ -243,13 +243,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
ret = PAGE_ALIGN(mm->brk + brk_rnd()); ret = PAGE_ALIGN(mm->brk + brk_rnd());
return (ret > mm->brk) ? ret : mm->brk; return (ret > mm->brk) ? ret : mm->brk;
} }
unsigned long randomize_et_dyn(unsigned long base)
{
unsigned long ret;
if (!(current->flags & PF_RANDOMIZE))
return base;
ret = PAGE_ALIGN(base + brk_rnd());
return (ret > base) ? ret : base;
}

View File

@ -71,13 +71,16 @@ static void __init setup_zero_pages(void)
break; break;
case 0x2827: /* zEC12 */ case 0x2827: /* zEC12 */
case 0x2828: /* zEC12 */ case 0x2828: /* zEC12 */
default:
order = 5; order = 5;
break; break;
case 0x2964: /* z13 */
default:
order = 7;
break;
} }
/* Limit number of empty zero pages for small memory sizes */ /* Limit number of empty zero pages for small memory sizes */
if (order > 2 && totalram_pages <= 16384) while (order > 2 && (totalram_pages >> 10) < (1UL << order))
order = 2; order--;
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page) if (!empty_zero_page)

View File

@ -28,8 +28,12 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/security.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
unsigned long mmap_rnd_mask;
unsigned long mmap_align_mask;
static unsigned long stack_maxrandom_size(void) static unsigned long stack_maxrandom_size(void)
{ {
if (!(current->flags & PF_RANDOMIZE)) if (!(current->flags & PF_RANDOMIZE))
@ -60,8 +64,10 @@ static unsigned long mmap_rnd(void)
{ {
if (!(current->flags & PF_RANDOMIZE)) if (!(current->flags & PF_RANDOMIZE))
return 0; return 0;
/* 8MB randomization for mmap_base */ if (is_32bit_task())
return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; return (get_random_int() & 0x7ff) << PAGE_SHIFT;
else
return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
} }
static unsigned long mmap_base_legacy(void) static unsigned long mmap_base_legacy(void)
@ -81,6 +87,106 @@ static inline unsigned long mmap_base(void)
return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap; return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
} }
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct vm_unmapped_area_info info;
int do_color_align;
if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED)
return addr;
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = !is_32bit_task();
info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
return vm_unmapped_area(&info);
}
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
int do_color_align;
/* requested length too big for entire address space */
if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED)
return addr;
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = !is_32bit_task();
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = mm->mmap_base;
info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
if (addr & ~PAGE_MASK) {
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = TASK_SIZE;
addr = vm_unmapped_area(&info);
}
return addr;
}
unsigned long randomize_et_dyn(void)
{
unsigned long base;
base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT);
return base + mmap_rnd();
}
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
/* /*
@ -177,4 +283,36 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
} }
} }
static int __init setup_mmap_rnd(void)
{
struct cpuid cpu_id;
get_cpu_id(&cpu_id);
switch (cpu_id.machine) {
case 0x9672:
case 0x2064:
case 0x2066:
case 0x2084:
case 0x2086:
case 0x2094:
case 0x2096:
case 0x2097:
case 0x2098:
case 0x2817:
case 0x2818:
case 0x2827:
case 0x2828:
mmap_rnd_mask = 0x7ffUL;
mmap_align_mask = 0UL;
break;
case 0x2964: /* z13 */
default:
mmap_rnd_mask = 0x3ff80UL;
mmap_align_mask = 0x7fUL;
break;
}
return 0;
}
early_initcall(setup_mmap_rnd);
#endif #endif