mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-27 14:43:58 +08:00
19fc7bed25
There are some similar functions for migration target allocation. Since there is no fundamental difference, it's better to keep just one rather than keeping all variants. This patch implements base migration target allocation function. In the following patches, variants will be converted to use this function. Changes should be mechanical, but, unfortunately, there are some differences. First, some callers' nodemask is assgined to NULL since NULL nodemask will be considered as all available nodes, that is, &node_states[N_MEMORY]. Second, for hugetlb page allocation, gfp_mask is redefined as regular hugetlb allocation gfp_mask plus __GFP_THISNODE if user provided gfp_mask has it. This is because future caller of this function requires to set this node constaint. Lastly, if provided nodeid is NUMA_NO_NODE, nodeid is set up to the node where migration source lives. It helps to remove simple wrappers for setting up the nodeid. Note that PageHighmem() call in previous function is changed to open-code "is_highmem_idx()" since it provides more readability. [akpm@linux-foundation.org: tweak patch title, per Vlastimil] [akpm@linux-foundation.org: fix typo in comment] Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Roman Gushchin <guro@fb.com> Link: http://lkml.kernel.org/r/1594622517-20681-6-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
202 lines
6.0 KiB
C
202 lines
6.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_MIGRATE_H
|
|
#define _LINUX_MIGRATE_H
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/mempolicy.h>
|
|
#include <linux/migrate_mode.h>
|
|
#include <linux/hugetlb.h>
|
|
|
|
typedef struct page *new_page_t(struct page *page, unsigned long private);
|
|
typedef void free_page_t(struct page *page, unsigned long private);
|
|
|
|
struct migration_target_control;
|
|
|
|
/*
|
|
* Return values from addresss_space_operations.migratepage():
|
|
* - negative errno on page migration failure;
|
|
* - zero on page migration success;
|
|
*/
|
|
#define MIGRATEPAGE_SUCCESS 0
|
|
|
|
enum migrate_reason {
|
|
MR_COMPACTION,
|
|
MR_MEMORY_FAILURE,
|
|
MR_MEMORY_HOTPLUG,
|
|
MR_SYSCALL, /* also applies to cpusets */
|
|
MR_MEMPOLICY_MBIND,
|
|
MR_NUMA_MISPLACED,
|
|
MR_CONTIG_RANGE,
|
|
MR_TYPES
|
|
};
|
|
|
|
/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
|
|
extern const char *migrate_reason_names[MR_TYPES];
|
|
|
|
#ifdef CONFIG_MIGRATION
|
|
|
|
extern void putback_movable_pages(struct list_head *l);
|
|
extern int migrate_page(struct address_space *mapping,
|
|
struct page *newpage, struct page *page,
|
|
enum migrate_mode mode);
|
|
extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
|
|
unsigned long private, enum migrate_mode mode, int reason);
|
|
extern struct page *alloc_migration_target(struct page *page, unsigned long private);
|
|
extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
|
|
extern void putback_movable_page(struct page *page);
|
|
|
|
extern int migrate_prep(void);
|
|
extern int migrate_prep_local(void);
|
|
extern void migrate_page_states(struct page *newpage, struct page *page);
|
|
extern void migrate_page_copy(struct page *newpage, struct page *page);
|
|
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
|
|
struct page *newpage, struct page *page);
|
|
extern int migrate_page_move_mapping(struct address_space *mapping,
|
|
struct page *newpage, struct page *page, int extra_count);
|
|
#else
|
|
|
|
static inline void putback_movable_pages(struct list_head *l) {}
|
|
static inline int migrate_pages(struct list_head *l, new_page_t new,
|
|
free_page_t free, unsigned long private, enum migrate_mode mode,
|
|
int reason)
|
|
{ return -ENOSYS; }
|
|
static inline struct page *alloc_migration_target(struct page *page,
|
|
unsigned long private)
|
|
{ return NULL; }
|
|
static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
|
|
{ return -EBUSY; }
|
|
|
|
static inline int migrate_prep(void) { return -ENOSYS; }
|
|
static inline int migrate_prep_local(void) { return -ENOSYS; }
|
|
|
|
static inline void migrate_page_states(struct page *newpage, struct page *page)
|
|
{
|
|
}
|
|
|
|
static inline void migrate_page_copy(struct page *newpage,
|
|
struct page *page) {}
|
|
|
|
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
|
|
struct page *newpage, struct page *page)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
|
|
#endif /* CONFIG_MIGRATION */
|
|
|
|
#ifdef CONFIG_COMPACTION
|
|
extern int PageMovable(struct page *page);
|
|
extern void __SetPageMovable(struct page *page, struct address_space *mapping);
|
|
extern void __ClearPageMovable(struct page *page);
|
|
#else
|
|
static inline int PageMovable(struct page *page) { return 0; };
|
|
static inline void __SetPageMovable(struct page *page,
|
|
struct address_space *mapping)
|
|
{
|
|
}
|
|
static inline void __ClearPageMovable(struct page *page)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
extern bool pmd_trans_migrating(pmd_t pmd);
|
|
extern int migrate_misplaced_page(struct page *page,
|
|
struct vm_area_struct *vma, int node);
|
|
#else
|
|
static inline bool pmd_trans_migrating(pmd_t pmd)
|
|
{
|
|
return false;
|
|
}
|
|
static inline int migrate_misplaced_page(struct page *page,
|
|
struct vm_area_struct *vma, int node)
|
|
{
|
|
return -EAGAIN; /* can't migrate now */
|
|
}
|
|
#endif /* CONFIG_NUMA_BALANCING */
|
|
|
|
#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
|
extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
|
|
struct vm_area_struct *vma,
|
|
pmd_t *pmd, pmd_t entry,
|
|
unsigned long address,
|
|
struct page *page, int node);
|
|
#else
|
|
static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
|
|
struct vm_area_struct *vma,
|
|
pmd_t *pmd, pmd_t entry,
|
|
unsigned long address,
|
|
struct page *page, int node)
|
|
{
|
|
return -EAGAIN;
|
|
}
|
|
#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
|
|
|
|
|
|
#ifdef CONFIG_MIGRATION
|
|
|
|
/*
|
|
* Watch out for PAE architecture, which has an unsigned long, and might not
|
|
* have enough bits to store all physical address and flags. So far we have
|
|
* enough room for all our flags.
|
|
*/
|
|
#define MIGRATE_PFN_VALID (1UL << 0)
|
|
#define MIGRATE_PFN_MIGRATE (1UL << 1)
|
|
#define MIGRATE_PFN_LOCKED (1UL << 2)
|
|
#define MIGRATE_PFN_WRITE (1UL << 3)
|
|
#define MIGRATE_PFN_SHIFT 6
|
|
|
|
static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
|
|
{
|
|
if (!(mpfn & MIGRATE_PFN_VALID))
|
|
return NULL;
|
|
return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
|
|
}
|
|
|
|
static inline unsigned long migrate_pfn(unsigned long pfn)
|
|
{
|
|
return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
|
|
}
|
|
|
|
enum migrate_vma_direction {
|
|
MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
|
|
MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
|
|
};
|
|
|
|
struct migrate_vma {
|
|
struct vm_area_struct *vma;
|
|
/*
|
|
* Both src and dst array must be big enough for
|
|
* (end - start) >> PAGE_SHIFT entries.
|
|
*
|
|
* The src array must not be modified by the caller after
|
|
* migrate_vma_setup(), and must not change the dst array after
|
|
* migrate_vma_pages() returns.
|
|
*/
|
|
unsigned long *dst;
|
|
unsigned long *src;
|
|
unsigned long cpages;
|
|
unsigned long npages;
|
|
unsigned long start;
|
|
unsigned long end;
|
|
|
|
/*
|
|
* Set to the owner value also stored in page->pgmap->owner for
|
|
* migrating out of device private memory. The flags also need to
|
|
* be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE.
|
|
* The caller should always set this field when using mmu notifier
|
|
* callbacks to avoid device MMU invalidations for device private
|
|
* pages that are not being migrated.
|
|
*/
|
|
void *pgmap_owner;
|
|
unsigned long flags;
|
|
};
|
|
|
|
int migrate_vma_setup(struct migrate_vma *args);
|
|
void migrate_vma_pages(struct migrate_vma *migrate);
|
|
void migrate_vma_finalize(struct migrate_vma *migrate);
|
|
|
|
#endif /* CONFIG_MIGRATION */
|
|
|
|
#endif /* _LINUX_MIGRATE_H */
|