mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-15 09:03:59 +08:00
d8ed45c5dc
This change converts the existing mmap_sem rwsem calls to use the new mmap locking API instead. The change is generated using coccinelle with the following rule: // spatch --sp-file mmap_lock_api.cocci --in-place --include-headers --dir . @@ expression mm; @@ ( -init_rwsem +mmap_init_lock | -down_write +mmap_write_lock | -down_write_killable +mmap_write_lock_killable | -down_write_trylock +mmap_write_trylock | -up_write +mmap_write_unlock | -downgrade_write +mmap_write_downgrade | -down_read +mmap_read_lock | -down_read_killable +mmap_read_lock_killable | -down_read_trylock +mmap_read_trylock | -up_read +mmap_read_unlock ) -(&mm->mmap_sem) +(mm) Signed-off-by: Michel Lespinasse <walken@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Reviewed-by: Laurent Dufour <ldufour@linux.ibm.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Davidlohr Bueso <dbueso@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Liam Howlett <Liam.Howlett@oracle.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ying Han <yinghan@google.com> Link: http://lkml.kernel.org/r/20200520052908.204642-5-walken@google.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
73 lines
1.3 KiB
C
73 lines
1.3 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
|
|
*/
|
|
|
|
#include <linux/slab.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/mm.h>
|
|
#include <asm/page.h>
|
|
#include <asm/elf.h>
|
|
#include <linux/init.h>
|
|
|
|
static unsigned int __read_mostly vdso_enabled = 1;
|
|
unsigned long um_vdso_addr;
|
|
|
|
extern unsigned long task_size;
|
|
extern char vdso_start[], vdso_end[];
|
|
|
|
static struct page **vdsop;
|
|
|
|
static int __init init_vdso(void)
|
|
{
|
|
struct page *um_vdso;
|
|
|
|
BUG_ON(vdso_end - vdso_start > PAGE_SIZE);
|
|
|
|
um_vdso_addr = task_size - PAGE_SIZE;
|
|
|
|
vdsop = kmalloc(sizeof(struct page *), GFP_KERNEL);
|
|
if (!vdsop)
|
|
goto oom;
|
|
|
|
um_vdso = alloc_page(GFP_KERNEL);
|
|
if (!um_vdso) {
|
|
kfree(vdsop);
|
|
|
|
goto oom;
|
|
}
|
|
|
|
copy_page(page_address(um_vdso), vdso_start);
|
|
*vdsop = um_vdso;
|
|
|
|
return 0;
|
|
|
|
oom:
|
|
printk(KERN_ERR "Cannot allocate vdso\n");
|
|
vdso_enabled = 0;
|
|
|
|
return -ENOMEM;
|
|
}
|
|
subsys_initcall(init_vdso);
|
|
|
|
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
|
{
|
|
int err;
|
|
struct mm_struct *mm = current->mm;
|
|
|
|
if (!vdso_enabled)
|
|
return 0;
|
|
|
|
if (mmap_write_lock_killable(mm))
|
|
return -EINTR;
|
|
|
|
err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
|
|
VM_READ|VM_EXEC|
|
|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
|
|
vdsop);
|
|
|
|
mmap_write_unlock(mm);
|
|
|
|
return err;
|
|
}
|