2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-23 12:43:55 +08:00

s390/vdso: always enable vdso

With the upcoming move of the svc sigreturn instruction from
the signal frame to vdso we need to have vdso always enabled.

Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
This commit is contained in:
Sven Schnelle 2021-06-23 14:10:00 +02:00 committed by Vasily Gorbik
parent b9639b3155
commit d57778feb9
2 changed files with 8 additions and 24 deletions

View File

@ -146,8 +146,6 @@ typedef s390_compat_regs compat_elf_gregset_t;
#include <asm/vdso.h> #include <asm/vdso.h>
extern unsigned int vdso_enabled;
/* /*
* This is used to ensure we don't load something for the wrong architecture. * This is used to ensure we don't load something for the wrong architecture.
*/ */
@ -268,11 +266,10 @@ do { \
#define STACK_RND_MASK MMAP_RND_MASK #define STACK_RND_MASK MMAP_RND_MASK
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#define ARCH_DLINFO \ #define ARCH_DLINFO \
do { \ do { \
if (vdso_enabled) \ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \ (unsigned long)current->mm->context.vdso_base); \
(unsigned long)current->mm->context.vdso_base); \
} while (0) } while (0)
struct linux_binprm; struct linux_binprm;

View File

@ -37,18 +37,6 @@ enum vvar_pages {
VVAR_NR_PAGES, VVAR_NR_PAGES,
}; };
unsigned int __read_mostly vdso_enabled = 1;
static int __init vdso_setup(char *str)
{
bool enabled;
if (!kstrtobool(str, &enabled))
vdso_enabled = enabled;
return 1;
}
__setup("vdso=", vdso_setup);
#ifdef CONFIG_TIME_NS #ifdef CONFIG_TIME_NS
struct vdso_data *arch_get_vdso_data(void *vvar_page) struct vdso_data *arch_get_vdso_data(void *vvar_page)
{ {
@ -176,7 +164,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
int rc; int rc;
BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES); BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
if (!vdso_enabled || is_compat_task()) if (is_compat_task())
return 0; return 0;
if (mmap_write_lock_killable(mm)) if (mmap_write_lock_killable(mm))
return -EINTR; return -EINTR;
@ -218,10 +206,9 @@ static int __init vdso_init(void)
vdso_pages = (vdso64_end - vdso64_start) >> PAGE_SHIFT; vdso_pages = (vdso64_end - vdso64_start) >> PAGE_SHIFT;
pages = kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL); pages = kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
if (!pages) { if (!pages)
vdso_enabled = 0; panic("failed to allocate VDSO pages");
return -ENOMEM;
}
for (i = 0; i < vdso_pages; i++) for (i = 0; i < vdso_pages; i++)
pages[i] = virt_to_page(vdso64_start + i * PAGE_SIZE); pages[i] = virt_to_page(vdso64_start + i * PAGE_SIZE);
pages[vdso_pages] = NULL; pages[vdso_pages] = NULL;