2019-06-03 13:44:50 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-03-05 19:49:31 +08:00
|
|
|
/*
|
2019-04-15 17:49:34 +08:00
|
|
|
* VDSO implementations.
|
2012-03-05 19:49:31 +08:00
|
|
|
*
|
|
|
|
* Copyright (C) 2012 ARM Limited
|
|
|
|
*
|
|
|
|
* Author: Will Deacon <will.deacon@arm.com>
|
|
|
|
*/
|
|
|
|
|
2016-08-15 14:45:46 +08:00
|
|
|
#include <linux/cache.h>
|
2012-03-05 19:49:31 +08:00
|
|
|
#include <linux/clocksource.h>
|
|
|
|
#include <linux/elf.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/gfp.h>
|
2016-08-15 14:45:46 +08:00
|
|
|
#include <linux/kernel.h>
|
2012-03-05 19:49:31 +08:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/signal.h>
|
|
|
|
#include <linux/slab.h>
|
2020-06-24 16:33:19 +08:00
|
|
|
#include <linux/time_namespace.h>
|
2012-10-16 18:44:53 +08:00
|
|
|
#include <linux/timekeeper_internal.h>
|
2012-03-05 19:49:31 +08:00
|
|
|
#include <linux/vmalloc.h>
|
2019-06-21 17:52:31 +08:00
|
|
|
#include <vdso/datapage.h>
|
|
|
|
#include <vdso/helpers.h>
|
|
|
|
#include <vdso/vsyscall.h>
|
2012-03-05 19:49:31 +08:00
|
|
|
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include <asm/signal32.h>
|
|
|
|
#include <asm/vdso.h>
|
|
|
|
|
2020-04-29 00:49:20 +08:00
|
|
|
enum vdso_abi {
|
|
|
|
VDSO_ABI_AA64,
|
|
|
|
VDSO_ABI_AA32,
|
2019-06-21 17:52:38 +08:00
|
|
|
};
|
|
|
|
|
2020-04-29 00:49:20 +08:00
|
|
|
struct vdso_abi_info {
|
2019-06-21 17:52:38 +08:00
|
|
|
const char *name;
|
|
|
|
const char *vdso_code_start;
|
|
|
|
const char *vdso_code_end;
|
|
|
|
unsigned long vdso_pages;
|
|
|
|
/* Data Mapping */
|
|
|
|
struct vm_special_mapping *dm;
|
|
|
|
/* Code Mapping */
|
|
|
|
struct vm_special_mapping *cm;
|
|
|
|
};
|
|
|
|
|
2020-04-29 00:49:20 +08:00
|
|
|
static struct vdso_abi_info vdso_info[] __ro_after_init = {
|
|
|
|
[VDSO_ABI_AA64] = {
|
2019-06-21 17:52:38 +08:00
|
|
|
.name = "vdso",
|
|
|
|
.vdso_code_start = vdso_start,
|
|
|
|
.vdso_code_end = vdso_end,
|
|
|
|
},
|
2019-06-21 17:52:39 +08:00
|
|
|
#ifdef CONFIG_COMPAT_VDSO
|
2020-04-29 00:49:20 +08:00
|
|
|
[VDSO_ABI_AA32] = {
|
2019-06-21 17:52:39 +08:00
|
|
|
.name = "vdso32",
|
|
|
|
.vdso_code_start = vdso32_start,
|
|
|
|
.vdso_code_end = vdso32_end,
|
|
|
|
},
|
|
|
|
#endif /* CONFIG_COMPAT_VDSO */
|
2019-06-21 17:52:38 +08:00
|
|
|
};
|
2012-03-05 19:49:31 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The vDSO data page.
|
|
|
|
*/
|
2024-02-19 23:39:34 +08:00
|
|
|
static union vdso_data_store vdso_data_store __page_aligned_data;
|
2019-06-21 17:52:31 +08:00
|
|
|
struct vdso_data *vdso_data = vdso_data_store.data;
|
2012-03-05 19:49:31 +08:00
|
|
|
|
2020-12-15 11:08:25 +08:00
|
|
|
static int vdso_mremap(const struct vm_special_mapping *sm,
|
|
|
|
struct vm_area_struct *new_vma)
|
2019-06-21 17:52:38 +08:00
|
|
|
{
|
|
|
|
current->mm->context.vdso = (void *)new_vma->vm_start;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-03-30 13:54:49 +08:00
|
|
|
static int __init __vdso_init(enum vdso_abi abi)
|
2019-06-21 17:52:38 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct page **vdso_pagelist;
|
|
|
|
unsigned long pfn;
|
|
|
|
|
2020-04-29 00:49:20 +08:00
|
|
|
if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
|
2019-06-21 17:52:38 +08:00
|
|
|
pr_err("vDSO is not a valid ELF object!\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-04-29 00:49:20 +08:00
|
|
|
vdso_info[abi].vdso_pages = (
|
|
|
|
vdso_info[abi].vdso_code_end -
|
|
|
|
vdso_info[abi].vdso_code_start) >>
|
2019-06-21 17:52:38 +08:00
|
|
|
PAGE_SHIFT;
|
|
|
|
|
2020-06-24 16:33:16 +08:00
|
|
|
vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages,
|
2019-06-21 17:52:38 +08:00
|
|
|
sizeof(struct page *),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (vdso_pagelist == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* Grab the vDSO code pages. */
|
2020-04-29 00:49:20 +08:00
|
|
|
pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
|
2019-06-21 17:52:38 +08:00
|
|
|
|
2020-04-29 00:49:20 +08:00
|
|
|
for (i = 0; i < vdso_info[abi].vdso_pages; i++)
|
2020-06-24 16:33:16 +08:00
|
|
|
vdso_pagelist[i] = pfn_to_page(pfn + i);
|
2019-06-21 17:52:38 +08:00
|
|
|
|
2020-06-24 16:33:16 +08:00
|
|
|
vdso_info[abi].cm->pages = vdso_pagelist;
|
2019-06-21 17:52:38 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-06-24 16:33:17 +08:00
|
|
|
#ifdef CONFIG_TIME_NS
|
2020-06-24 16:33:18 +08:00
|
|
|
struct vdso_data *arch_get_vdso_data(void *vvar_page)
|
|
|
|
{
|
|
|
|
return (struct vdso_data *)(vvar_page);
|
|
|
|
}
|
|
|
|
|
2020-06-24 16:33:17 +08:00
|
|
|
/*
|
|
|
|
* The vvar mapping contains data for a specific time namespace, so when a task
|
|
|
|
* changes namespace we must unmap its vvar data for the old namespace.
|
|
|
|
* Subsequent faults will map in data for the new namespace.
|
|
|
|
*
|
|
|
|
* For more details see timens_setup_vdso_data().
|
|
|
|
*/
|
|
|
|
int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
|
|
|
|
{
|
|
|
|
struct mm_struct *mm = task->mm;
|
|
|
|
struct vm_area_struct *vma;
|
2022-09-07 03:48:53 +08:00
|
|
|
VMA_ITERATOR(vmi, mm, 0);
|
2020-06-24 16:33:17 +08:00
|
|
|
|
|
|
|
mmap_read_lock(mm);
|
|
|
|
|
2022-09-07 03:48:53 +08:00
|
|
|
for_each_vma(vmi, vma) {
|
2020-06-24 16:33:17 +08:00
|
|
|
if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm))
|
2023-01-04 08:27:32 +08:00
|
|
|
zap_vma_pages(vma);
|
2020-06-24 16:33:17 +08:00
|
|
|
#ifdef CONFIG_COMPAT_VDSO
|
|
|
|
if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm))
|
2023-01-04 08:27:32 +08:00
|
|
|
zap_vma_pages(vma);
|
2020-06-24 16:33:17 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
mmap_read_unlock(mm);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-06-24 16:33:16 +08:00
|
|
|
static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
|
|
|
|
struct vm_area_struct *vma, struct vm_fault *vmf)
|
|
|
|
{
|
2020-06-24 16:33:19 +08:00
|
|
|
struct page *timens_page = find_timens_vvar_page(vma);
|
|
|
|
unsigned long pfn;
|
|
|
|
|
|
|
|
switch (vmf->pgoff) {
|
|
|
|
case VVAR_DATA_PAGE_OFFSET:
|
|
|
|
if (timens_page)
|
|
|
|
pfn = page_to_pfn(timens_page);
|
|
|
|
else
|
|
|
|
pfn = sym_to_pfn(vdso_data);
|
|
|
|
break;
|
|
|
|
#ifdef CONFIG_TIME_NS
|
|
|
|
case VVAR_TIMENS_PAGE_OFFSET:
|
|
|
|
/*
|
|
|
|
* If a task belongs to a time namespace then a namespace
|
|
|
|
* specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
|
|
|
|
* the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
|
|
|
|
* offset.
|
|
|
|
* See also the comment near timens_setup_vdso_data().
|
|
|
|
*/
|
|
|
|
if (!timens_page)
|
|
|
|
return VM_FAULT_SIGBUS;
|
|
|
|
pfn = sym_to_pfn(vdso_data);
|
|
|
|
break;
|
|
|
|
#endif /* CONFIG_TIME_NS */
|
|
|
|
default:
|
|
|
|
return VM_FAULT_SIGBUS;
|
|
|
|
}
|
|
|
|
|
|
|
|
return vmf_insert_pfn(vma, vmf->address, pfn);
|
2020-06-24 16:33:16 +08:00
|
|
|
}
|
|
|
|
|
2020-04-29 00:49:20 +08:00
|
|
|
static int __setup_additional_pages(enum vdso_abi abi,
|
2019-06-21 17:52:38 +08:00
|
|
|
struct mm_struct *mm,
|
|
|
|
struct linux_binprm *bprm,
|
|
|
|
int uses_interp)
|
|
|
|
{
|
|
|
|
unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
|
2020-05-07 03:51:38 +08:00
|
|
|
unsigned long gp_flags = 0;
|
2019-06-21 17:52:38 +08:00
|
|
|
void *ret;
|
|
|
|
|
2020-06-24 16:33:18 +08:00
|
|
|
BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
|
|
|
|
|
2020-04-29 00:49:20 +08:00
|
|
|
vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
|
2019-06-21 17:52:38 +08:00
|
|
|
/* Be sure to map the data page */
|
2020-06-24 16:33:18 +08:00
|
|
|
vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
|
2019-06-21 17:52:38 +08:00
|
|
|
|
|
|
|
vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
|
|
|
|
if (IS_ERR_VALUE(vdso_base)) {
|
|
|
|
ret = ERR_PTR(vdso_base);
|
|
|
|
goto up_fail;
|
|
|
|
}
|
|
|
|
|
2020-06-24 16:33:18 +08:00
|
|
|
ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
|
2020-06-24 16:33:16 +08:00
|
|
|
VM_READ|VM_MAYREAD|VM_PFNMAP,
|
2020-04-29 00:49:20 +08:00
|
|
|
vdso_info[abi].dm);
|
2019-06-21 17:52:38 +08:00
|
|
|
if (IS_ERR(ret))
|
|
|
|
goto up_fail;
|
|
|
|
|
arm64: Avoid cpus_have_const_cap() for ARM64_HAS_BTI
In system_supports_bti() we use cpus_have_const_cap() to check for
ARM64_HAS_BTI, but this is not necessary and alternative_has_cap_*() or
cpus_have_final_*cap() would be preferable.
For historical reasons, cpus_have_const_cap() is more complicated than
it needs to be. Before cpucaps are finalized, it will perform a bitmap
test of the system_cpucaps bitmap, and once cpucaps are finalized it
will use an alternative branch. This used to be necessary to handle some
race conditions in the window between cpucap detection and the
subsequent patching of alternatives and static branches, where different
branches could be out-of-sync with one another (or w.r.t. alternative
sequences). Now that we use alternative branches instead of static
branches, these are all patched atomically w.r.t. one another, and there
are only a handful of cases that need special care in the window between
cpucap detection and alternative patching.
Due to the above, it would be nice to remove cpus_have_const_cap(), and
migrate callers over to alternative_has_cap_*(), cpus_have_final_cap(),
or cpus_have_cap() depending on when their requirements. This will
remove redundant instructions and improve code generation, and will make
it easier to determine how each callsite will behave before, during, and
after alternative patching.
When CONFIG_ARM64_BTI_KERNEL=y, the ARM64_HAS_BTI cpucap is a strict
boot cpu feature which is detected and patched early on the boot cpu.
All uses guarded by CONFIG_ARM64_BTI_KERNEL happen after the boot CPU
has detected ARM64_HAS_BTI and patched boot alternatives, and hence can
safely use alternative_has_cap_*() or cpus_have_final_boot_cap().
Regardless of CONFIG_ARM64_BTI_KERNEL, all other uses of ARM64_HAS_BTI
happen after system capabilities have been finalized and alternatives
have been patched. Hence these can safely use alternative_has_cap_*) or
cpus_have_final_cap().
This patch splits system_supports_bti() into system_supports_bti() and
system_supports_bti_kernel(), with the former handling where the cpucap
affects userspace functionality, and ther latter handling where the
cpucap affects kernel functionality. The use of cpus_have_const_cap() is
replaced by cpus_have_final_cap() in cpus_have_const_cap, and
cpus_have_final_boot_cap() in system_supports_bti_kernel(). This will
avoid generating code to test the system_cpucaps bitmap and should be
better for all subsequent calls at runtime. The use of
cpus_have_final_cap() and cpus_have_final_boot_cap() will make it easier
to spot if code is chaanged such that these run before the ARM64_HAS_BTI
cpucap is guaranteed to have been finalized.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Mark Brown <broonie@kernel.org>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2023-10-16 18:24:39 +08:00
|
|
|
if (system_supports_bti_kernel())
|
2020-05-07 03:51:38 +08:00
|
|
|
gp_flags = VM_ARM64_BTI;
|
|
|
|
|
2020-06-24 16:33:18 +08:00
|
|
|
vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
|
2019-06-21 17:52:38 +08:00
|
|
|
mm->context.vdso = (void *)vdso_base;
|
|
|
|
ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
|
2020-05-07 03:51:38 +08:00
|
|
|
VM_READ|VM_EXEC|gp_flags|
|
2019-06-21 17:52:38 +08:00
|
|
|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
|
2020-04-29 00:49:20 +08:00
|
|
|
vdso_info[abi].cm);
|
2019-06-21 17:52:38 +08:00
|
|
|
if (IS_ERR(ret))
|
|
|
|
goto up_fail;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
up_fail:
|
|
|
|
mm->context.vdso = NULL;
|
|
|
|
return PTR_ERR(ret);
|
|
|
|
}
|
|
|
|
|
2012-03-05 19:49:31 +08:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
/*
|
|
|
|
* Create and map the vectors page for AArch32 tasks.
|
|
|
|
*/
|
2020-04-29 00:49:21 +08:00
|
|
|
enum aarch32_map {
|
|
|
|
AA32_MAP_VECTORS, /* kuser helpers */
|
2020-06-22 20:28:34 +08:00
|
|
|
AA32_MAP_SIGPAGE,
|
2020-04-29 00:49:21 +08:00
|
|
|
AA32_MAP_VVAR,
|
|
|
|
AA32_MAP_VDSO,
|
|
|
|
};
|
2020-04-29 00:49:18 +08:00
|
|
|
|
|
|
|
static struct page *aarch32_vectors_page __ro_after_init;
|
|
|
|
static struct page *aarch32_sig_page __ro_after_init;
|
|
|
|
|
2021-03-19 01:07:36 +08:00
|
|
|
static int aarch32_sigpage_mremap(const struct vm_special_mapping *sm,
|
|
|
|
struct vm_area_struct *new_vma)
|
|
|
|
{
|
|
|
|
current->mm->context.sigpage = (void *)new_vma->vm_start;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-29 00:49:21 +08:00
|
|
|
static struct vm_special_mapping aarch32_vdso_maps[] = {
|
|
|
|
[AA32_MAP_VECTORS] = {
|
2019-04-15 17:49:34 +08:00
|
|
|
.name = "[vectors]", /* ABI */
|
2020-04-29 00:49:18 +08:00
|
|
|
.pages = &aarch32_vectors_page,
|
2019-04-15 17:49:34 +08:00
|
|
|
},
|
2020-06-22 20:28:34 +08:00
|
|
|
[AA32_MAP_SIGPAGE] = {
|
|
|
|
.name = "[sigpage]", /* ABI */
|
|
|
|
.pages = &aarch32_sig_page,
|
2021-03-19 01:07:36 +08:00
|
|
|
.mremap = aarch32_sigpage_mremap,
|
2020-06-22 20:28:34 +08:00
|
|
|
},
|
2020-04-29 00:49:21 +08:00
|
|
|
[AA32_MAP_VVAR] = {
|
2019-06-21 17:52:39 +08:00
|
|
|
.name = "[vvar]",
|
2020-06-24 16:33:16 +08:00
|
|
|
.fault = vvar_fault,
|
2019-06-21 17:52:39 +08:00
|
|
|
},
|
2020-04-29 00:49:21 +08:00
|
|
|
[AA32_MAP_VDSO] = {
|
2019-06-21 17:52:39 +08:00
|
|
|
.name = "[vdso]",
|
2020-12-15 11:08:25 +08:00
|
|
|
.mremap = vdso_mremap,
|
2019-06-21 17:52:39 +08:00
|
|
|
},
|
2019-04-15 17:49:34 +08:00
|
|
|
};
|
2012-03-05 19:49:31 +08:00
|
|
|
|
2019-04-15 17:49:36 +08:00
|
|
|
static int aarch32_alloc_kuser_vdso_page(void)
|
2012-03-05 19:49:31 +08:00
|
|
|
{
|
|
|
|
extern char __kuser_helper_start[], __kuser_helper_end[];
|
|
|
|
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
|
2019-04-15 17:49:36 +08:00
|
|
|
unsigned long vdso_page;
|
2012-03-05 19:49:31 +08:00
|
|
|
|
2019-04-15 17:49:37 +08:00
|
|
|
if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
|
|
|
|
return 0;
|
|
|
|
|
2021-03-19 01:07:34 +08:00
|
|
|
vdso_page = get_zeroed_page(GFP_KERNEL);
|
2019-04-15 17:49:36 +08:00
|
|
|
if (!vdso_page)
|
2019-04-15 17:49:34 +08:00
|
|
|
return -ENOMEM;
|
2012-03-05 19:49:31 +08:00
|
|
|
|
2019-04-15 17:49:36 +08:00
|
|
|
memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
|
2019-04-15 17:49:34 +08:00
|
|
|
kuser_sz);
|
2023-05-10 14:48:11 +08:00
|
|
|
aarch32_vectors_page = virt_to_page((void *)vdso_page);
|
2019-04-15 17:49:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-03-19 01:07:38 +08:00
|
|
|
#define COMPAT_SIGPAGE_POISON_WORD 0xe7fddef1
|
2020-06-22 19:35:41 +08:00
|
|
|
static int aarch32_alloc_sigpage(void)
|
2019-04-15 17:49:36 +08:00
|
|
|
{
|
|
|
|
extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
|
|
|
|
int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
|
2021-03-19 01:07:38 +08:00
|
|
|
__le32 poison = cpu_to_le32(COMPAT_SIGPAGE_POISON_WORD);
|
|
|
|
void *sigpage;
|
2012-03-05 19:49:31 +08:00
|
|
|
|
2021-03-19 01:07:38 +08:00
|
|
|
sigpage = (void *)__get_free_page(GFP_KERNEL);
|
2019-04-15 17:49:36 +08:00
|
|
|
if (!sigpage)
|
|
|
|
return -ENOMEM;
|
2019-04-15 17:49:34 +08:00
|
|
|
|
2021-03-19 01:07:38 +08:00
|
|
|
memset32(sigpage, (__force u32)poison, PAGE_SIZE / sizeof(poison));
|
|
|
|
memcpy(sigpage, __aarch32_sigret_code_start, sigret_sz);
|
2020-04-29 00:49:18 +08:00
|
|
|
aarch32_sig_page = virt_to_page(sigpage);
|
2020-06-22 19:35:41 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2012-03-05 19:49:31 +08:00
|
|
|
|
2021-03-30 13:54:49 +08:00
|
|
|
static int __init __aarch32_alloc_vdso_pages(void)
|
2020-06-22 19:35:41 +08:00
|
|
|
{
|
2020-06-22 20:28:34 +08:00
|
|
|
|
|
|
|
if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
|
|
|
|
return 0;
|
|
|
|
|
2020-06-22 19:35:41 +08:00
|
|
|
vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
|
|
|
|
vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
|
2012-03-05 19:49:31 +08:00
|
|
|
|
2020-06-22 19:35:41 +08:00
|
|
|
return __vdso_init(VDSO_ABI_AA32);
|
2012-03-05 19:49:31 +08:00
|
|
|
}
|
2019-06-21 17:52:39 +08:00
|
|
|
|
|
|
|
static int __init aarch32_alloc_vdso_pages(void)
|
|
|
|
{
|
2020-06-22 19:35:41 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = __aarch32_alloc_vdso_pages();
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = aarch32_alloc_sigpage();
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return aarch32_alloc_kuser_vdso_page();
|
2019-06-21 17:52:39 +08:00
|
|
|
}
|
2019-04-15 17:49:34 +08:00
|
|
|
arch_initcall(aarch32_alloc_vdso_pages);
|
2012-03-05 19:49:31 +08:00
|
|
|
|
2019-04-15 17:49:34 +08:00
|
|
|
static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
|
2012-03-05 19:49:31 +08:00
|
|
|
{
|
2019-04-15 17:49:34 +08:00
|
|
|
void *ret;
|
2014-07-10 02:22:12 +08:00
|
|
|
|
2019-04-15 17:49:37 +08:00
|
|
|
if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
|
|
|
|
return 0;
|
|
|
|
|
2019-04-15 17:49:34 +08:00
|
|
|
/*
|
|
|
|
* Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
|
|
|
|
* not safe to CoW the page containing the CPU exception vectors.
|
|
|
|
*/
|
|
|
|
ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
|
|
|
|
VM_READ | VM_EXEC |
|
|
|
|
VM_MAYREAD | VM_MAYEXEC,
|
2020-04-29 00:49:21 +08:00
|
|
|
&aarch32_vdso_maps[AA32_MAP_VECTORS]);
|
2019-04-15 17:49:34 +08:00
|
|
|
|
|
|
|
return PTR_ERR_OR_ZERO(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int aarch32_sigreturn_setup(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
unsigned long addr;
|
2014-07-10 02:22:12 +08:00
|
|
|
void *ret;
|
2012-03-05 19:49:31 +08:00
|
|
|
|
2019-04-15 17:49:34 +08:00
|
|
|
addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
|
|
|
|
if (IS_ERR_VALUE(addr)) {
|
|
|
|
ret = ERR_PTR(addr);
|
|
|
|
goto out;
|
|
|
|
}
|
2012-03-05 19:49:31 +08:00
|
|
|
|
2019-04-15 17:49:34 +08:00
|
|
|
/*
|
|
|
|
* VM_MAYWRITE is required to allow gdb to Copy-on-Write and
|
|
|
|
* set breakpoints.
|
|
|
|
*/
|
2014-07-10 02:22:12 +08:00
|
|
|
ret = _install_special_mapping(mm, addr, PAGE_SIZE,
|
2019-04-15 17:49:34 +08:00
|
|
|
VM_READ | VM_EXEC | VM_MAYREAD |
|
|
|
|
VM_MAYWRITE | VM_MAYEXEC,
|
2020-04-29 00:49:21 +08:00
|
|
|
&aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
|
2019-04-15 17:49:34 +08:00
|
|
|
if (IS_ERR(ret))
|
|
|
|
goto out;
|
2012-03-05 19:49:31 +08:00
|
|
|
|
2020-06-22 19:35:41 +08:00
|
|
|
mm->context.sigpage = (void *)addr;
|
2012-03-05 19:49:31 +08:00
|
|
|
|
2019-04-15 17:49:34 +08:00
|
|
|
out:
|
2014-07-10 02:22:12 +08:00
|
|
|
return PTR_ERR_OR_ZERO(ret);
|
2012-03-05 19:49:31 +08:00
|
|
|
}
|
2019-04-15 17:49:34 +08:00
|
|
|
|
|
|
|
int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
|
|
|
{
|
|
|
|
struct mm_struct *mm = current->mm;
|
|
|
|
int ret;
|
|
|
|
|
2020-06-09 12:33:25 +08:00
|
|
|
if (mmap_write_lock_killable(mm))
|
2019-04-15 17:49:34 +08:00
|
|
|
return -EINTR;
|
|
|
|
|
|
|
|
ret = aarch32_kuser_helpers_setup(mm);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
2020-06-22 20:28:34 +08:00
|
|
|
if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
|
2020-06-22 20:37:09 +08:00
|
|
|
ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm,
|
2020-06-22 20:28:34 +08:00
|
|
|
uses_interp);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
}
|
2019-04-15 17:49:34 +08:00
|
|
|
|
2020-06-22 19:35:41 +08:00
|
|
|
ret = aarch32_sigreturn_setup(mm);
|
2019-04-15 17:49:34 +08:00
|
|
|
out:
|
2020-06-09 12:33:25 +08:00
|
|
|
mmap_write_unlock(mm);
|
2019-04-15 17:49:34 +08:00
|
|
|
return ret;
|
|
|
|
}
|
2012-03-05 19:49:31 +08:00
|
|
|
#endif /* CONFIG_COMPAT */
|
|
|
|
|
2020-04-29 00:49:21 +08:00
|
|
|
enum aarch64_map {
|
|
|
|
AA64_MAP_VVAR,
|
|
|
|
AA64_MAP_VDSO,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
|
|
|
|
[AA64_MAP_VVAR] = {
|
2016-08-15 14:45:46 +08:00
|
|
|
.name = "[vvar]",
|
2020-06-24 16:33:16 +08:00
|
|
|
.fault = vvar_fault,
|
2016-08-15 14:45:46 +08:00
|
|
|
},
|
2020-04-29 00:49:21 +08:00
|
|
|
[AA64_MAP_VDSO] = {
|
2016-08-15 14:45:46 +08:00
|
|
|
.name = "[vdso]",
|
2017-07-27 01:07:37 +08:00
|
|
|
.mremap = vdso_mremap,
|
2016-08-15 14:45:46 +08:00
|
|
|
},
|
|
|
|
};
|
2014-07-10 02:22:12 +08:00
|
|
|
|
2012-03-05 19:49:31 +08:00
|
|
|
static int __init vdso_init(void)
|
|
|
|
{
|
2020-04-29 00:49:21 +08:00
|
|
|
vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR];
|
|
|
|
vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO];
|
2014-07-10 02:22:13 +08:00
|
|
|
|
2020-04-29 00:49:20 +08:00
|
|
|
return __vdso_init(VDSO_ABI_AA64);
|
2012-03-05 19:49:31 +08:00
|
|
|
}
|
|
|
|
arch_initcall(vdso_init);
|
|
|
|
|
2020-06-22 20:37:09 +08:00
|
|
|
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
2012-03-05 19:49:31 +08:00
|
|
|
{
|
|
|
|
struct mm_struct *mm = current->mm;
|
2019-06-21 17:52:38 +08:00
|
|
|
int ret;
|
2012-03-05 19:49:31 +08:00
|
|
|
|
2020-06-09 12:33:25 +08:00
|
|
|
if (mmap_write_lock_killable(mm))
|
2016-05-24 07:25:54 +08:00
|
|
|
return -EINTR;
|
2012-03-05 19:49:31 +08:00
|
|
|
|
2020-06-22 20:37:09 +08:00
|
|
|
ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp);
|
2020-06-09 12:33:25 +08:00
|
|
|
mmap_write_unlock(mm);
|
2012-03-05 19:49:31 +08:00
|
|
|
|
2019-06-21 17:52:38 +08:00
|
|
|
return ret;
|
2012-03-05 19:49:31 +08:00
|
|
|
}
|