mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 12:44:11 +08:00
nommu: there is no mlock() for NOMMU, so don't provide the bits
The mlock() facility does not exist for NOMMU since all mappings are effectively locked anyway, so we don't make the bits available when they're not useful. Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Greg Ungerer <gerg@snapgear.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Rik van Riel <riel@redhat.com> Cc: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: Enrik Berkhan <Enrik.Berkhan@ge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
7ca43e7564
commit
33925b25d2
@ -96,6 +96,8 @@ enum pageflags {
|
|||||||
PG_swapbacked, /* Page is backed by RAM/swap */
|
PG_swapbacked, /* Page is backed by RAM/swap */
|
||||||
#ifdef CONFIG_UNEVICTABLE_LRU
|
#ifdef CONFIG_UNEVICTABLE_LRU
|
||||||
PG_unevictable, /* Page is "unevictable" */
|
PG_unevictable, /* Page is "unevictable" */
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
|
||||||
PG_mlocked, /* Page is vma mlocked */
|
PG_mlocked, /* Page is vma mlocked */
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
|
#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
|
||||||
@ -234,22 +236,22 @@ PAGEFLAG_FALSE(SwapCache)
|
|||||||
#ifdef CONFIG_UNEVICTABLE_LRU
|
#ifdef CONFIG_UNEVICTABLE_LRU
|
||||||
PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
|
PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
|
||||||
TESTCLEARFLAG(Unevictable, unevictable)
|
TESTCLEARFLAG(Unevictable, unevictable)
|
||||||
|
|
||||||
#define MLOCK_PAGES 1
|
|
||||||
PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
|
|
||||||
TESTSCFLAG(Mlocked, mlocked)
|
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
#define MLOCK_PAGES 0
|
|
||||||
PAGEFLAG_FALSE(Mlocked)
|
|
||||||
SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked)
|
|
||||||
|
|
||||||
PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable)
|
PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable)
|
||||||
SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable)
|
SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable)
|
||||||
__CLEARPAGEFLAG_NOOP(Unevictable)
|
__CLEARPAGEFLAG_NOOP(Unevictable)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
|
||||||
|
#define MLOCK_PAGES 1
|
||||||
|
PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
|
||||||
|
TESTSCFLAG(Mlocked, mlocked)
|
||||||
|
#else
|
||||||
|
#define MLOCK_PAGES 0
|
||||||
|
PAGEFLAG_FALSE(Mlocked)
|
||||||
|
SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked)
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
|
#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
|
||||||
PAGEFLAG(Uncached, uncached)
|
PAGEFLAG(Uncached, uncached)
|
||||||
#else
|
#else
|
||||||
@ -367,9 +369,13 @@ static inline void __ClearPageTail(struct page *page)
|
|||||||
|
|
||||||
#ifdef CONFIG_UNEVICTABLE_LRU
|
#ifdef CONFIG_UNEVICTABLE_LRU
|
||||||
#define __PG_UNEVICTABLE (1 << PG_unevictable)
|
#define __PG_UNEVICTABLE (1 << PG_unevictable)
|
||||||
#define __PG_MLOCKED (1 << PG_mlocked)
|
|
||||||
#else
|
#else
|
||||||
#define __PG_UNEVICTABLE 0
|
#define __PG_UNEVICTABLE 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
|
||||||
|
#define __PG_MLOCKED (1 << PG_mlocked)
|
||||||
|
#else
|
||||||
#define __PG_MLOCKED 0
|
#define __PG_MLOCKED 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -214,5 +214,13 @@ config UNEVICTABLE_LRU
|
|||||||
will use one page flag and increase the code size a little,
|
will use one page flag and increase the code size a little,
|
||||||
say Y unless you know what you are doing.
|
say Y unless you know what you are doing.
|
||||||
|
|
||||||
|
config HAVE_MLOCK
|
||||||
|
bool
|
||||||
|
default y if MMU=y
|
||||||
|
|
||||||
|
config HAVE_MLOCKED_PAGE_BIT
|
||||||
|
bool
|
||||||
|
default y if HAVE_MLOCK=y && UNEVICTABLE_LRU=y
|
||||||
|
|
||||||
config MMU_NOTIFIER
|
config MMU_NOTIFIER
|
||||||
bool
|
bool
|
||||||
|
@ -63,6 +63,7 @@ static inline unsigned long page_order(struct page *page)
|
|||||||
return page_private(page);
|
return page_private(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_HAVE_MLOCK
|
||||||
extern long mlock_vma_pages_range(struct vm_area_struct *vma,
|
extern long mlock_vma_pages_range(struct vm_area_struct *vma,
|
||||||
unsigned long start, unsigned long end);
|
unsigned long start, unsigned long end);
|
||||||
extern void munlock_vma_pages_range(struct vm_area_struct *vma,
|
extern void munlock_vma_pages_range(struct vm_area_struct *vma,
|
||||||
@ -71,6 +72,7 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
|
|||||||
{
|
{
|
||||||
munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
|
munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_UNEVICTABLE_LRU
|
#ifdef CONFIG_UNEVICTABLE_LRU
|
||||||
/*
|
/*
|
||||||
@ -90,7 +92,7 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_UNEVICTABLE_LRU
|
#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
|
||||||
/*
|
/*
|
||||||
* Called only in fault path via page_evictable() for a new page
|
* Called only in fault path via page_evictable() for a new page
|
||||||
* to determine if it's being mapped into a LOCKED vma.
|
* to determine if it's being mapped into a LOCKED vma.
|
||||||
@ -165,7 +167,7 @@ static inline void free_page_mlock(struct page *page)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* CONFIG_UNEVICTABLE_LRU */
|
#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
|
||||||
static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
|
static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
@ -175,7 +177,7 @@ static inline void mlock_vma_page(struct page *page) { }
|
|||||||
static inline void mlock_migrate_page(struct page *new, struct page *old) { }
|
static inline void mlock_migrate_page(struct page *new, struct page *old) { }
|
||||||
static inline void free_page_mlock(struct page *page) { }
|
static inline void free_page_mlock(struct page *page) { }
|
||||||
|
|
||||||
#endif /* CONFIG_UNEVICTABLE_LRU */
|
#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return the mem_map entry representing the 'offset' subpage within
|
* Return the mem_map entry representing the 'offset' subpage within
|
||||||
|
Loading…
Reference in New Issue
Block a user