2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-24 05:04:00 +08:00
linux-next/arch/csky/kernel/vdso.c
Michel Lespinasse d8ed45c5dc mmap locking API: use coccinelle to convert mmap_sem rwsem call sites
This change converts the existing mmap_sem rwsem calls to use the new mmap
locking API instead.

The change is generated using coccinelle with the following rule:

// spatch --sp-file mmap_lock_api.cocci --in-place --include-headers --dir .

@@
expression mm;
@@
(
-init_rwsem
+mmap_init_lock
|
-down_write
+mmap_write_lock
|
-down_write_killable
+mmap_write_lock_killable
|
-down_write_trylock
+mmap_write_trylock
|
-up_write
+mmap_write_unlock
|
-downgrade_write
+mmap_write_downgrade
|
-down_read
+mmap_read_lock
|
-down_read_killable
+mmap_read_lock_killable
|
-down_read_trylock
+mmap_read_trylock
|
-up_read
+mmap_read_unlock
)
-(&mm->mmap_sem)
+(mm)

Signed-off-by: Michel Lespinasse <walken@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Reviewed-by: Laurent Dufour <ldufour@linux.ibm.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Liam Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ying Han <yinghan@google.com>
Link: http://lkml.kernel.org/r/20200520052908.204642-5-walken@google.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-06-09 09:39:14 -07:00

87 lines
1.6 KiB
C

// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/binfmts.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/unistd.h>
#include <linux/uaccess.h>
#include <asm/vdso.h>
#include <asm/cacheflush.h>
static struct page *vdso_page;
static int __init init_vdso(void)
{
struct csky_vdso *vdso;
int err = 0;
vdso_page = alloc_page(GFP_KERNEL);
if (!vdso_page)
panic("Cannot allocate vdso");
vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL);
if (!vdso)
panic("Cannot map vdso");
clear_page(vdso);
err = setup_vdso_page(vdso->rt_signal_retcode);
if (err)
panic("Cannot set signal return code, err: %x.", err);
dcache_wb_range((unsigned long)vdso, (unsigned long)vdso + 16);
vunmap(vdso);
return 0;
}
subsys_initcall(init_vdso);
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
int ret;
unsigned long addr;
struct mm_struct *mm = current->mm;
mmap_write_lock(mm);
addr = get_unmapped_area(NULL, STACK_TOP, PAGE_SIZE, 0, 0);
if (IS_ERR_VALUE(addr)) {
ret = addr;
goto up_fail;
}
ret = install_special_mapping(
mm,
addr,
PAGE_SIZE,
VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
&vdso_page);
if (ret)
goto up_fail;
mm->context.vdso = (void *)addr;
up_fail:
mmap_write_unlock(mm);
return ret;
}
const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_mm == NULL)
return NULL;
if (vma->vm_start == (long)vma->vm_mm->context.vdso)
return "[vdso]";
else
return NULL;
}