mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-26 22:24:09 +08:00
df2cc96e77
If a process monitored with userfaultfd changes it's memory mappings or forks() at the same time as uffd monitor fills the process memory with UFFDIO_COPY, the actual creation of page table entries and copying of the data in mcopy_atomic may happen either before of after the memory mapping modifications and there is no way for the uffd monitor to maintain consistent view of the process memory layout. For instance, let's consider fork() running in parallel with userfaultfd_copy(): process | uffd monitor ---------------------------------+------------------------------ fork() | userfaultfd_copy() ... | ... dup_mmap() | down_read(mmap_sem) down_write(mmap_sem) | /* create PTEs, copy data */ dup_uffd() | up_read(mmap_sem) copy_page_range() | up_write(mmap_sem) | dup_uffd_complete() | /* notify monitor */ | If the userfaultfd_copy() takes the mmap_sem first, the new page(s) will be present by the time copy_page_range() is called and they will appear in the child's memory mappings. However, if the fork() is the first to take the mmap_sem, the new pages won't be mapped in the child's address space. If the pages are not present and child tries to access them, the monitor will get page fault notification and everything is fine. However, if the pages *are present*, the child can access them without uffd noticing. And if we copy them into child it'll see the wrong data. Since we are talking about background copy, we'd need to decide whether the pages should be copied or not regardless #PF notifications. Since userfaultfd monitor has no way to determine what was the order, let's disallow userfaultfd_copy in parallel with the non-cooperative events. In such case we return -EAGAIN and the uffd monitor can understand that userfaultfd_copy() clashed with a non-cooperative event and take an appropriate action. Link: http://lkml.kernel.org/r/1527061324-19949-1-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Acked-by: Pavel Emelyanov <xemul@virtuozzo.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Andrei Vagin <avagin@virtuozzo.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
145 lines
3.6 KiB
C
145 lines
3.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* include/linux/userfaultfd_k.h
|
|
*
|
|
* Copyright (C) 2015 Red Hat, Inc.
|
|
*
|
|
*/
|
|
|
|
#ifndef _LINUX_USERFAULTFD_K_H
|
|
#define _LINUX_USERFAULTFD_K_H
|
|
|
|
#ifdef CONFIG_USERFAULTFD
|
|
|
|
#include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */
|
|
|
|
#include <linux/fcntl.h>
|
|
|
|
/*
|
|
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
|
|
* new flags, since they might collide with O_* ones. We want
|
|
* to re-use O_* flags that couldn't possibly have a meaning
|
|
* from userfaultfd, in order to leave a free define-space for
|
|
* shared O_* flags.
|
|
*/
|
|
#define UFFD_CLOEXEC O_CLOEXEC
|
|
#define UFFD_NONBLOCK O_NONBLOCK
|
|
|
|
#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
|
|
#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
|
|
|
|
extern int handle_userfault(struct vm_fault *vmf, unsigned long reason);
|
|
|
|
extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
|
|
unsigned long src_start, unsigned long len,
|
|
bool *mmap_changing);
|
|
extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
|
|
unsigned long dst_start,
|
|
unsigned long len,
|
|
bool *mmap_changing);
|
|
|
|
/* mm helpers */
|
|
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
|
|
struct vm_userfaultfd_ctx vm_ctx)
|
|
{
|
|
return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx;
|
|
}
|
|
|
|
static inline bool userfaultfd_missing(struct vm_area_struct *vma)
|
|
{
|
|
return vma->vm_flags & VM_UFFD_MISSING;
|
|
}
|
|
|
|
static inline bool userfaultfd_armed(struct vm_area_struct *vma)
|
|
{
|
|
return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP);
|
|
}
|
|
|
|
extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *);
|
|
extern void dup_userfaultfd_complete(struct list_head *);
|
|
|
|
extern void mremap_userfaultfd_prep(struct vm_area_struct *,
|
|
struct vm_userfaultfd_ctx *);
|
|
extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *,
|
|
unsigned long from, unsigned long to,
|
|
unsigned long len);
|
|
|
|
extern bool userfaultfd_remove(struct vm_area_struct *vma,
|
|
unsigned long start,
|
|
unsigned long end);
|
|
|
|
extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end,
|
|
struct list_head *uf);
|
|
extern void userfaultfd_unmap_complete(struct mm_struct *mm,
|
|
struct list_head *uf);
|
|
|
|
#else /* CONFIG_USERFAULTFD */
|
|
|
|
/* mm helpers */
|
|
static inline int handle_userfault(struct vm_fault *vmf, unsigned long reason)
|
|
{
|
|
return VM_FAULT_SIGBUS;
|
|
}
|
|
|
|
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
|
|
struct vm_userfaultfd_ctx vm_ctx)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline bool userfaultfd_missing(struct vm_area_struct *vma)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool userfaultfd_armed(struct vm_area_struct *vma)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline int dup_userfaultfd(struct vm_area_struct *vma,
|
|
struct list_head *l)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void dup_userfaultfd_complete(struct list_head *l)
|
|
{
|
|
}
|
|
|
|
static inline void mremap_userfaultfd_prep(struct vm_area_struct *vma,
|
|
struct vm_userfaultfd_ctx *ctx)
|
|
{
|
|
}
|
|
|
|
static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx,
|
|
unsigned long from,
|
|
unsigned long to,
|
|
unsigned long len)
|
|
{
|
|
}
|
|
|
|
static inline bool userfaultfd_remove(struct vm_area_struct *vma,
|
|
unsigned long start,
|
|
unsigned long end)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end,
|
|
struct list_head *uf)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
|
|
struct list_head *uf)
|
|
{
|
|
}
|
|
|
|
#endif /* CONFIG_USERFAULTFD */
|
|
|
|
#endif /* _LINUX_USERFAULTFD_K_H */
|