mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-01 11:24:25 +08:00
s390/boot: fix mem_detect extended area allocation
Allocation of mem_detect extended area was not considered neither in commit9641b8cc73
("s390/ipl: read IPL report at early boot") nor in commitb2d24b97b2
("s390/kernel: add support for kernel address space layout randomization (KASLR)"). As a result mem_detect extended theoretically may overlap with ipl report or randomized kernel image position. But as mem_detect code will allocate extended area only upon exceeding 255 online regions (which should alternate with offline memory regions) it is not seen in practice. To make sure mem_detect extended area does not overlap with ipl report or randomized kernel position extend usage of "safe_addr". Make initrd handling and mem_detect extended area allocation code move it further right and make KASLR takes in into consideration as well. Fixes:9641b8cc73
("s390/ipl: read IPL report at early boot") Fixes:b2d24b97b2
("s390/kernel: add support for kernel address space layout randomization (KASLR)") Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
This commit is contained in:
parent
eb33f9eb30
commit
22476f47b6
@ -33,10 +33,10 @@ struct vmlinux_info {
|
||||
};
|
||||
|
||||
void startup_kernel(void);
|
||||
unsigned long detect_memory(void);
|
||||
unsigned long detect_memory(unsigned long *safe_addr);
|
||||
bool is_ipl_block_dump(void);
|
||||
void store_ipl_parmblock(void);
|
||||
unsigned long read_ipl_report(unsigned long safe_offset);
|
||||
unsigned long read_ipl_report(unsigned long safe_addr);
|
||||
void setup_boot_command_line(void);
|
||||
void parse_boot_command_line(void);
|
||||
void verify_facilities(void);
|
||||
|
@ -174,7 +174,6 @@ unsigned long get_random_base(unsigned long safe_addr)
|
||||
{
|
||||
unsigned long memory_limit = get_mem_detect_end();
|
||||
unsigned long base_pos, max_pos, kernel_size;
|
||||
unsigned long kasan_needs;
|
||||
int i;
|
||||
|
||||
memory_limit = min(memory_limit, ident_map_size);
|
||||
@ -186,12 +185,7 @@ unsigned long get_random_base(unsigned long safe_addr)
|
||||
*/
|
||||
memory_limit -= kasan_estimate_memory_needs(memory_limit);
|
||||
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size) {
|
||||
if (safe_addr < initrd_data.start + initrd_data.size)
|
||||
safe_addr = initrd_data.start + initrd_data.size;
|
||||
}
|
||||
safe_addr = ALIGN(safe_addr, THREAD_SIZE);
|
||||
|
||||
kernel_size = vmlinux.image_size + vmlinux.bss_size;
|
||||
if (safe_addr + kernel_size > memory_limit)
|
||||
return 0;
|
||||
|
@ -16,29 +16,10 @@ struct mem_detect_info __bootdata(mem_detect);
|
||||
#define ENTRIES_EXTENDED_MAX \
|
||||
(256 * (1020 / 2) * sizeof(struct mem_detect_block))
|
||||
|
||||
/*
|
||||
* To avoid corrupting old kernel memory during dump, find lowest memory
|
||||
* chunk possible either right after the kernel end (decompressed kernel) or
|
||||
* after initrd (if it is present and there is no hole between the kernel end
|
||||
* and initrd)
|
||||
*/
|
||||
static void *mem_detect_alloc_extended(void)
|
||||
{
|
||||
unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
|
||||
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
|
||||
initrd_data.start < offset + ENTRIES_EXTENDED_MAX)
|
||||
offset = ALIGN(initrd_data.start + initrd_data.size, sizeof(u64));
|
||||
|
||||
return (void *)offset;
|
||||
}
|
||||
|
||||
static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
|
||||
{
|
||||
if (n < MEM_INLINED_ENTRIES)
|
||||
return &mem_detect.entries[n];
|
||||
if (unlikely(!mem_detect.entries_extended))
|
||||
mem_detect.entries_extended = mem_detect_alloc_extended();
|
||||
return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
|
||||
}
|
||||
|
||||
@ -147,7 +128,7 @@ static int tprot(unsigned long addr)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void search_mem_end(void)
|
||||
static unsigned long search_mem_end(void)
|
||||
{
|
||||
unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
|
||||
unsigned long offset = 0;
|
||||
@ -159,33 +140,34 @@ static void search_mem_end(void)
|
||||
if (!tprot(pivot << 20))
|
||||
offset = pivot;
|
||||
}
|
||||
|
||||
add_mem_detect_block(0, (offset + 1) << 20);
|
||||
return (offset + 1) << 20;
|
||||
}
|
||||
|
||||
unsigned long detect_memory(void)
|
||||
unsigned long detect_memory(unsigned long *safe_addr)
|
||||
{
|
||||
unsigned long max_physmem_end = 0;
|
||||
|
||||
sclp_early_get_memsize(&max_physmem_end);
|
||||
mem_detect.entries_extended = (struct mem_detect_block *)ALIGN(*safe_addr, sizeof(u64));
|
||||
|
||||
if (!sclp_early_read_storage_info()) {
|
||||
mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
|
||||
return max_physmem_end;
|
||||
}
|
||||
|
||||
if (!diag260()) {
|
||||
} else if (!diag260()) {
|
||||
mem_detect.info_source = MEM_DETECT_DIAG260;
|
||||
return max_physmem_end ?: get_mem_detect_end();
|
||||
}
|
||||
|
||||
if (max_physmem_end) {
|
||||
max_physmem_end = max_physmem_end ?: get_mem_detect_end();
|
||||
} else if (max_physmem_end) {
|
||||
add_mem_detect_block(0, max_physmem_end);
|
||||
mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
|
||||
return max_physmem_end;
|
||||
} else {
|
||||
max_physmem_end = search_mem_end();
|
||||
add_mem_detect_block(0, max_physmem_end);
|
||||
mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
|
||||
}
|
||||
|
||||
search_mem_end();
|
||||
mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
|
||||
return get_mem_detect_end();
|
||||
if (mem_detect.count > MEM_INLINED_ENTRIES) {
|
||||
*safe_addr += (mem_detect.count - MEM_INLINED_ENTRIES) *
|
||||
sizeof(struct mem_detect_block);
|
||||
}
|
||||
|
||||
return max_physmem_end;
|
||||
}
|
||||
|
@ -76,16 +76,17 @@ unsigned long mem_safe_offset(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
static void rescue_initrd(unsigned long addr)
|
||||
static unsigned long rescue_initrd(unsigned long safe_addr)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
|
||||
return;
|
||||
return safe_addr;
|
||||
if (!initrd_data.start || !initrd_data.size)
|
||||
return;
|
||||
if (addr <= initrd_data.start)
|
||||
return;
|
||||
memmove((void *)addr, (void *)initrd_data.start, initrd_data.size);
|
||||
initrd_data.start = addr;
|
||||
return safe_addr;
|
||||
if (initrd_data.start < safe_addr) {
|
||||
memmove((void *)safe_addr, (void *)initrd_data.start, initrd_data.size);
|
||||
initrd_data.start = safe_addr;
|
||||
}
|
||||
return initrd_data.start + initrd_data.size;
|
||||
}
|
||||
|
||||
static void copy_bootdata(void)
|
||||
@ -275,6 +276,7 @@ static unsigned long reserve_amode31(unsigned long safe_addr)
|
||||
|
||||
void startup_kernel(void)
|
||||
{
|
||||
unsigned long max_physmem_end;
|
||||
unsigned long random_lma;
|
||||
unsigned long safe_addr;
|
||||
unsigned long asce_limit;
|
||||
@ -294,12 +296,13 @@ void startup_kernel(void)
|
||||
safe_addr = reserve_amode31(safe_addr);
|
||||
safe_addr = read_ipl_report(safe_addr);
|
||||
uv_query_info();
|
||||
rescue_initrd(safe_addr);
|
||||
safe_addr = rescue_initrd(safe_addr);
|
||||
sclp_early_read_info();
|
||||
setup_boot_command_line();
|
||||
parse_boot_command_line();
|
||||
sanitize_prot_virt_host();
|
||||
setup_ident_map_size(detect_memory());
|
||||
max_physmem_end = detect_memory(&safe_addr);
|
||||
setup_ident_map_size(max_physmem_end);
|
||||
setup_vmalloc_size();
|
||||
asce_limit = setup_kernel_memory_layout();
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user