mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
mm/gup.c: convert to use get_user_{page|pages}_fast_only()
API __get_user_pages_fast() renamed to get_user_pages_fast_only() to align with pin_user_pages_fast_only(). As part of this we will get rid of write parameter. Instead caller will pass FOLL_WRITE to get_user_pages_fast_only(). This will not change any existing functionality of the API. All the callers are changed to pass FOLL_WRITE. Also introduce get_user_page_fast_only(), and use it in a few places that hard-code nr_pages to 1. Updated the documentation of the API. Signed-off-by: Souptick Joarder <jrdr.linux@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Reviewed-by: Paul Mackerras <paulus@ozlabs.org> [arch/powerpc/kvm] Cc: Matthew Wilcox <willy@infradead.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Michal Suchanek <msuchanek@suse.de> Link: http://lkml.kernel.org/r/1590396812-31277-1-git-send-email-jrdr.linux@gmail.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
e77132e758
commit
dadbb612f6
@ -581,7 +581,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||||||
* We always ask for write permission since the common case
|
* We always ask for write permission since the common case
|
||||||
* is that the page is writable.
|
* is that the page is writable.
|
||||||
*/
|
*/
|
||||||
if (__get_user_pages_fast(hva, 1, 1, &page) == 1) {
|
if (get_user_page_fast_only(hva, FOLL_WRITE, &page)) {
|
||||||
write_ok = true;
|
write_ok = true;
|
||||||
} else {
|
} else {
|
||||||
/* Call KVM generic code to do the slow-path check */
|
/* Call KVM generic code to do the slow-path check */
|
||||||
|
@ -795,7 +795,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
|
|||||||
* is that the page is writable.
|
* is that the page is writable.
|
||||||
*/
|
*/
|
||||||
hva = gfn_to_hva_memslot(memslot, gfn);
|
hva = gfn_to_hva_memslot(memslot, gfn);
|
||||||
if (!kvm_ro && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
|
if (!kvm_ro && get_user_page_fast_only(hva, FOLL_WRITE, &page)) {
|
||||||
upgrade_write = true;
|
upgrade_write = true;
|
||||||
} else {
|
} else {
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
|
@ -30,11 +30,9 @@ int read_user_stack_slow(void __user *ptr, void *buf, int nb)
|
|||||||
unsigned long addr = (unsigned long) ptr;
|
unsigned long addr = (unsigned long) ptr;
|
||||||
unsigned long offset;
|
unsigned long offset;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
int nrpages;
|
|
||||||
void *kaddr;
|
void *kaddr;
|
||||||
|
|
||||||
nrpages = __get_user_pages_fast(addr, 1, 1, &page);
|
if (get_user_page_fast_only(addr, FOLL_WRITE, &page)) {
|
||||||
if (nrpages == 1) {
|
|
||||||
kaddr = page_address(page);
|
kaddr = page_address(page);
|
||||||
|
|
||||||
/* align address to page boundary */
|
/* align address to page boundary */
|
||||||
|
@ -1824,10 +1824,16 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
|
|||||||
/*
|
/*
|
||||||
* doesn't attempt to fault and will return short.
|
* doesn't attempt to fault and will return short.
|
||||||
*/
|
*/
|
||||||
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
int get_user_pages_fast_only(unsigned long start, int nr_pages,
|
||||||
struct page **pages);
|
unsigned int gup_flags, struct page **pages);
|
||||||
int pin_user_pages_fast_only(unsigned long start, int nr_pages,
|
int pin_user_pages_fast_only(unsigned long start, int nr_pages,
|
||||||
unsigned int gup_flags, struct page **pages);
|
unsigned int gup_flags, struct page **pages);
|
||||||
|
|
||||||
|
static inline bool get_user_page_fast_only(unsigned long addr,
|
||||||
|
unsigned int gup_flags, struct page **pagep)
|
||||||
|
{
|
||||||
|
return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* per-process(per-mm_struct) statistics.
|
* per-process(per-mm_struct) statistics.
|
||||||
*/
|
*/
|
||||||
|
@ -6934,12 +6934,12 @@ static u64 perf_virt_to_phys(u64 virt)
|
|||||||
* Walking the pages tables for user address.
|
* Walking the pages tables for user address.
|
||||||
* Interrupts are disabled, so it prevents any tear down
|
* Interrupts are disabled, so it prevents any tear down
|
||||||
* of the page tables.
|
* of the page tables.
|
||||||
* Try IRQ-safe __get_user_pages_fast first.
|
* Try IRQ-safe get_user_page_fast_only first.
|
||||||
* If failed, leave phys_addr as 0.
|
* If failed, leave phys_addr as 0.
|
||||||
*/
|
*/
|
||||||
if (current->mm != NULL) {
|
if (current->mm != NULL) {
|
||||||
pagefault_disable();
|
pagefault_disable();
|
||||||
if (__get_user_pages_fast(virt, 1, 0, &p) == 1)
|
if (get_user_page_fast_only(virt, 0, &p))
|
||||||
phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
|
phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
|
||||||
pagefault_enable();
|
pagefault_enable();
|
||||||
}
|
}
|
||||||
|
29
mm/gup.c
29
mm/gup.c
@ -2294,7 +2294,7 @@ pte_unmap:
|
|||||||
* to be special.
|
* to be special.
|
||||||
*
|
*
|
||||||
* For a futex to be placed on a THP tail page, get_futex_key requires a
|
* For a futex to be placed on a THP tail page, get_futex_key requires a
|
||||||
* __get_user_pages_fast implementation that can pin pages. Thus it's still
|
* get_user_pages_fast_only implementation that can pin pages. Thus it's still
|
||||||
* useful to have gup_huge_pmd even if we can't operate on ptes.
|
* useful to have gup_huge_pmd even if we can't operate on ptes.
|
||||||
*/
|
*/
|
||||||
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
|
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
|
||||||
@ -2699,7 +2699,7 @@ static inline void gup_pgd_range(unsigned long addr, unsigned long end,
|
|||||||
|
|
||||||
#ifndef gup_fast_permitted
|
#ifndef gup_fast_permitted
|
||||||
/*
|
/*
|
||||||
* Check if it's allowed to use __get_user_pages_fast() for the range, or
|
* Check if it's allowed to use get_user_pages_fast_only() for the range, or
|
||||||
* we need to fall back to the slow version:
|
* we need to fall back to the slow version:
|
||||||
*/
|
*/
|
||||||
static bool gup_fast_permitted(unsigned long start, unsigned long end)
|
static bool gup_fast_permitted(unsigned long start, unsigned long end)
|
||||||
@ -2811,8 +2811,14 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
|
|||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
/**
|
||||||
/*
|
* get_user_pages_fast_only() - pin user pages in memory
|
||||||
|
* @start: starting user address
|
||||||
|
* @nr_pages: number of pages from start to pin
|
||||||
|
* @gup_flags: flags modifying pin behaviour
|
||||||
|
* @pages: array that receives pointers to the pages pinned.
|
||||||
|
* Should be at least nr_pages long.
|
||||||
|
*
|
||||||
* Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
|
* Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
|
||||||
* the regular GUP.
|
* the regular GUP.
|
||||||
* Note a difference with get_user_pages_fast: this always returns the
|
* Note a difference with get_user_pages_fast: this always returns the
|
||||||
@ -2825,8 +2831,8 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
|
|||||||
* access can get ambiguous page results. If you call this function without
|
* access can get ambiguous page results. If you call this function without
|
||||||
* 'write' set, you'd better be sure that you're ok with that ambiguity.
|
* 'write' set, you'd better be sure that you're ok with that ambiguity.
|
||||||
*/
|
*/
|
||||||
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
int get_user_pages_fast_only(unsigned long start, int nr_pages,
|
||||||
struct page **pages)
|
unsigned int gup_flags, struct page **pages)
|
||||||
{
|
{
|
||||||
int nr_pinned;
|
int nr_pinned;
|
||||||
/*
|
/*
|
||||||
@ -2836,10 +2842,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
|||||||
* FOLL_FAST_ONLY is required in order to match the API description of
|
* FOLL_FAST_ONLY is required in order to match the API description of
|
||||||
* this routine: no fall back to regular ("slow") GUP.
|
* this routine: no fall back to regular ("slow") GUP.
|
||||||
*/
|
*/
|
||||||
unsigned int gup_flags = FOLL_GET | FOLL_FAST_ONLY;
|
gup_flags |= FOLL_GET | FOLL_FAST_ONLY;
|
||||||
|
|
||||||
if (write)
|
|
||||||
gup_flags |= FOLL_WRITE;
|
|
||||||
|
|
||||||
nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
|
nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
|
||||||
pages);
|
pages);
|
||||||
@ -2855,7 +2858,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
|||||||
|
|
||||||
return nr_pinned;
|
return nr_pinned;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__get_user_pages_fast);
|
EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* get_user_pages_fast() - pin user pages in memory
|
* get_user_pages_fast() - pin user pages in memory
|
||||||
@ -2926,8 +2929,8 @@ int pin_user_pages_fast(unsigned long start, int nr_pages,
|
|||||||
EXPORT_SYMBOL_GPL(pin_user_pages_fast);
|
EXPORT_SYMBOL_GPL(pin_user_pages_fast);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is the FOLL_PIN equivalent of __get_user_pages_fast(). Behavior is the
|
* This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior
|
||||||
* same, except that this one sets FOLL_PIN instead of FOLL_GET.
|
* is the same, except that this one sets FOLL_PIN instead of FOLL_GET.
|
||||||
*
|
*
|
||||||
* The API rules are the same, too: no negative values may be returned.
|
* The API rules are the same, too: no negative values may be returned.
|
||||||
*/
|
*/
|
||||||
|
@ -1740,7 +1740,6 @@ static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
|
|||||||
bool *writable, kvm_pfn_t *pfn)
|
bool *writable, kvm_pfn_t *pfn)
|
||||||
{
|
{
|
||||||
struct page *page[1];
|
struct page *page[1];
|
||||||
int npages;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fast pin a writable pfn only if it is a write fault request
|
* Fast pin a writable pfn only if it is a write fault request
|
||||||
@ -1750,8 +1749,7 @@ static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
|
|||||||
if (!(write_fault || writable))
|
if (!(write_fault || writable))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
npages = __get_user_pages_fast(addr, 1, 1, page);
|
if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
|
||||||
if (npages == 1) {
|
|
||||||
*pfn = page_to_pfn(page[0]);
|
*pfn = page_to_pfn(page[0]);
|
||||||
|
|
||||||
if (writable)
|
if (writable)
|
||||||
@ -1791,7 +1789,7 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
|
|||||||
if (unlikely(!write_fault) && writable) {
|
if (unlikely(!write_fault) && writable) {
|
||||||
struct page *wpage;
|
struct page *wpage;
|
||||||
|
|
||||||
if (__get_user_pages_fast(addr, 1, 1, &wpage) == 1) {
|
if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
|
||||||
*writable = true;
|
*writable = true;
|
||||||
put_page(page);
|
put_page(page);
|
||||||
page = wpage;
|
page = wpage;
|
||||||
@ -2003,7 +2001,7 @@ int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
|
|||||||
if (entry < nr_pages)
|
if (entry < nr_pages)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return __get_user_pages_fast(addr, nr_pages, 1, pages);
|
return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
|
EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user