mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-28 23:23:55 +08:00
666feb21a0
No allocation callback is using this argument anymore. new_page_node used to use this parameter to convey node_id resp. migration error up to move_pages code (do_move_page_to_node_array). The error status never made it into the final status field and we have a better way to communicate node id to the status field now. All other allocation callbacks simply ignored the argument so we can drop it finally. [mhocko@suse.com: fix migration callback] Link: http://lkml.kernel.org/r/20180105085259.GH2801@dhcp22.suse.cz [akpm@linux-foundation.org: fix alloc_misplaced_dst_page()] [mhocko@kernel.org: fix build] Link: http://lkml.kernel.org/r/20180103091134.GB11319@dhcp22.suse.cz Link: http://lkml.kernel.org/r/20180103082555.14592-3-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Zi Yan <zi.yan@cs.rutgers.edu> Cc: Andrea Reale <ar@linux.vnet.ibm.com> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
69 lines
1.9 KiB
C
69 lines
1.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __LINUX_PAGEISOLATION_H
|
|
#define __LINUX_PAGEISOLATION_H
|
|
|
|
#ifdef CONFIG_MEMORY_ISOLATION
|
|
static inline bool has_isolate_pageblock(struct zone *zone)
|
|
{
|
|
return zone->nr_isolate_pageblock;
|
|
}
|
|
static inline bool is_migrate_isolate_page(struct page *page)
|
|
{
|
|
return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
|
|
}
|
|
static inline bool is_migrate_isolate(int migratetype)
|
|
{
|
|
return migratetype == MIGRATE_ISOLATE;
|
|
}
|
|
#else
|
|
static inline bool has_isolate_pageblock(struct zone *zone)
|
|
{
|
|
return false;
|
|
}
|
|
static inline bool is_migrate_isolate_page(struct page *page)
|
|
{
|
|
return false;
|
|
}
|
|
static inline bool is_migrate_isolate(int migratetype)
|
|
{
|
|
return false;
|
|
}
|
|
#endif
|
|
|
|
bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
|
|
int migratetype, bool skip_hwpoisoned_pages);
|
|
void set_pageblock_migratetype(struct page *page, int migratetype);
|
|
int move_freepages_block(struct zone *zone, struct page *page,
|
|
int migratetype, int *num_movable);
|
|
|
|
/*
|
|
* Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
|
|
* If specified range includes migrate types other than MOVABLE or CMA,
|
|
* this will fail with -EBUSY.
|
|
*
|
|
* For isolating all pages in the range finally, the caller have to
|
|
* free all pages in the range. test_page_isolated() can be used for
|
|
* test it.
|
|
*/
|
|
int
|
|
start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
|
|
unsigned migratetype, bool skip_hwpoisoned_pages);
|
|
|
|
/*
|
|
* Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
|
|
* target range is [start_pfn, end_pfn)
|
|
*/
|
|
int
|
|
undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
|
|
unsigned migratetype);
|
|
|
|
/*
|
|
* Test all pages in [start_pfn, end_pfn) are isolated or not.
|
|
*/
|
|
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
|
|
bool skip_hwpoisoned_pages);
|
|
|
|
struct page *alloc_migrate_target(struct page *page, unsigned long private);
|
|
|
|
#endif
|