mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 22:24:11 +08:00
mm, treewide: introduce NR_PAGE_ORDERS
[ Upstream commitfd37721803
] NR_PAGE_ORDERS defines the number of page orders supported by the page allocator, ranging from 0 to MAX_ORDER, MAX_ORDER + 1 in total. NR_PAGE_ORDERS assists in defining arrays of page orders and allows for more natural iteration over them. [kirill.shutemov@linux.intel.com: fixup for kerneldoc warning] Link: https://lkml.kernel.org/r/20240101111512.7empzyifq7kxtzk3@box Link: https://lkml.kernel.org/r/20231228144704.14033-1-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reviewed-by: Zi Yan <ziy@nvidia.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Stable-dep-of:b6976f323a
("drm/ttm: stop pooling cached NUMA pages v2") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
4c5eaf0cad
commit
ded1ffea52
@ -172,7 +172,7 @@ variables.
|
||||
Offset of the free_list's member. This value is used to compute the number
|
||||
of free pages.
|
||||
|
||||
Each zone has a free_area structure array called free_area[MAX_ORDER + 1].
|
||||
Each zone has a free_area structure array called free_area[NR_PAGE_ORDERS].
|
||||
The free_list represents a linked list of free page blocks.
|
||||
|
||||
(list_head, next|prev)
|
||||
@ -189,8 +189,8 @@ Offsets of the vmap_area's members. They carry vmalloc-specific
|
||||
information. Makedumpfile gets the start address of the vmalloc region
|
||||
from this.
|
||||
|
||||
(zone.free_area, MAX_ORDER + 1)
|
||||
-------------------------------
|
||||
(zone.free_area, NR_PAGE_ORDERS)
|
||||
--------------------------------
|
||||
|
||||
Free areas descriptor. User-space tools use this value to iterate the
|
||||
free_area ranges. MAX_ORDER is used by the zone buddy allocator.
|
||||
|
@ -16,7 +16,7 @@ struct hyp_pool {
|
||||
* API at EL2.
|
||||
*/
|
||||
hyp_spinlock_t lock;
|
||||
struct list_head free_area[MAX_ORDER + 1];
|
||||
struct list_head free_area[NR_PAGE_ORDERS];
|
||||
phys_addr_t range_start;
|
||||
phys_addr_t range_end;
|
||||
unsigned short max_order;
|
||||
|
@ -897,7 +897,7 @@ void __init cheetah_ecache_flush_init(void)
|
||||
|
||||
/* Now allocate error trap reporting scoreboard. */
|
||||
sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
|
||||
for (order = 0; order <= MAX_ORDER; order++) {
|
||||
for (order = 0; order < NR_PAGE_ORDERS; order++) {
|
||||
if ((PAGE_SIZE << order) >= sz)
|
||||
break;
|
||||
}
|
||||
|
@ -175,7 +175,7 @@ static void ttm_device_init_pools(struct kunit *test)
|
||||
|
||||
if (params->pools_init_expected) {
|
||||
for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
|
||||
for (int j = 0; j <= MAX_ORDER; ++j) {
|
||||
for (int j = 0; j < NR_PAGE_ORDERS; ++j) {
|
||||
pt = pool->caching[i].orders[j];
|
||||
KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool);
|
||||
KUNIT_EXPECT_EQ(test, pt.caching, i);
|
||||
|
@ -65,11 +65,11 @@ module_param(page_pool_size, ulong, 0644);
|
||||
|
||||
static atomic_long_t allocated_pages;
|
||||
|
||||
static struct ttm_pool_type global_write_combined[MAX_ORDER + 1];
|
||||
static struct ttm_pool_type global_uncached[MAX_ORDER + 1];
|
||||
static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
|
||||
static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];
|
||||
|
||||
static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER + 1];
|
||||
static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 1];
|
||||
static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS];
|
||||
static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS];
|
||||
|
||||
static spinlock_t shrinker_lock;
|
||||
static struct list_head shrinker_list;
|
||||
@ -565,7 +565,7 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
|
||||
|
||||
if (use_dma_alloc || nid != NUMA_NO_NODE) {
|
||||
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
|
||||
for (j = 0; j <= MAX_ORDER; ++j)
|
||||
for (j = 0; j < NR_PAGE_ORDERS; ++j)
|
||||
ttm_pool_type_init(&pool->caching[i].orders[j],
|
||||
pool, i, j);
|
||||
}
|
||||
@ -586,7 +586,7 @@ void ttm_pool_fini(struct ttm_pool *pool)
|
||||
|
||||
if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
|
||||
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
|
||||
for (j = 0; j <= MAX_ORDER; ++j)
|
||||
for (j = 0; j < NR_PAGE_ORDERS; ++j)
|
||||
ttm_pool_type_fini(&pool->caching[i].orders[j]);
|
||||
}
|
||||
|
||||
@ -641,7 +641,7 @@ static void ttm_pool_debugfs_header(struct seq_file *m)
|
||||
unsigned int i;
|
||||
|
||||
seq_puts(m, "\t ");
|
||||
for (i = 0; i <= MAX_ORDER; ++i)
|
||||
for (i = 0; i < NR_PAGE_ORDERS; ++i)
|
||||
seq_printf(m, " ---%2u---", i);
|
||||
seq_puts(m, "\n");
|
||||
}
|
||||
@ -652,7 +652,7 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i <= MAX_ORDER; ++i)
|
||||
for (i = 0; i < NR_PAGE_ORDERS; ++i)
|
||||
seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
|
||||
seq_puts(m, "\n");
|
||||
}
|
||||
@ -761,7 +761,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
|
||||
spin_lock_init(&shrinker_lock);
|
||||
INIT_LIST_HEAD(&shrinker_list);
|
||||
|
||||
for (i = 0; i <= MAX_ORDER; ++i) {
|
||||
for (i = 0; i < NR_PAGE_ORDERS; ++i) {
|
||||
ttm_pool_type_init(&global_write_combined[i], NULL,
|
||||
ttm_write_combined, i);
|
||||
ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
|
||||
@ -794,7 +794,7 @@ void ttm_pool_mgr_fini(void)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i <= MAX_ORDER; ++i) {
|
||||
for (i = 0; i < NR_PAGE_ORDERS; ++i) {
|
||||
ttm_pool_type_fini(&global_write_combined[i]);
|
||||
ttm_pool_type_fini(&global_uncached[i]);
|
||||
|
||||
|
@ -74,7 +74,7 @@ struct ttm_pool {
|
||||
bool use_dma32;
|
||||
|
||||
struct {
|
||||
struct ttm_pool_type orders[MAX_ORDER + 1];
|
||||
struct ttm_pool_type orders[NR_PAGE_ORDERS];
|
||||
} caching[TTM_NUM_CACHING_TYPES];
|
||||
};
|
||||
|
||||
|
@ -34,6 +34,8 @@
|
||||
|
||||
#define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES)
|
||||
|
||||
#define NR_PAGE_ORDERS (MAX_ORDER + 1)
|
||||
|
||||
/*
|
||||
* PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
|
||||
* costly to service. That is between allocation orders which should
|
||||
@ -95,7 +97,7 @@ static inline bool migratetype_is_mergeable(int mt)
|
||||
}
|
||||
|
||||
#define for_each_migratetype_order(order, type) \
|
||||
for (order = 0; order <= MAX_ORDER; order++) \
|
||||
for (order = 0; order < NR_PAGE_ORDERS; order++) \
|
||||
for (type = 0; type < MIGRATE_TYPES; type++)
|
||||
|
||||
extern int page_group_by_mobility_disabled;
|
||||
@ -929,7 +931,7 @@ struct zone {
|
||||
CACHELINE_PADDING(_pad1_);
|
||||
|
||||
/* free areas of different sizes */
|
||||
struct free_area free_area[MAX_ORDER + 1];
|
||||
struct free_area free_area[NR_PAGE_ORDERS];
|
||||
|
||||
#ifdef CONFIG_UNACCEPTED_MEMORY
|
||||
/* Pages to be accepted. All pages on the list are MAX_ORDER */
|
||||
|
@ -660,7 +660,7 @@ static int __init crash_save_vmcoreinfo_init(void)
|
||||
VMCOREINFO_OFFSET(list_head, prev);
|
||||
VMCOREINFO_OFFSET(vmap_area, va_start);
|
||||
VMCOREINFO_OFFSET(vmap_area, list);
|
||||
VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER + 1);
|
||||
VMCOREINFO_LENGTH(zone.free_area, NR_PAGE_ORDERS);
|
||||
log_buf_vmcoreinfo_setup();
|
||||
VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
|
||||
VMCOREINFO_NUMBER(NR_FREE_PAGES);
|
||||
|
@ -93,7 +93,7 @@ static int __init test_pages(int *total_failures)
|
||||
int failures = 0, num_tests = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i <= MAX_ORDER; i++)
|
||||
for (i = 0; i < NR_PAGE_ORDERS; i++)
|
||||
num_tests += do_alloc_pages_order(i, &failures);
|
||||
|
||||
REPORT_FAILURES_IN_FN();
|
||||
|
@ -2225,7 +2225,7 @@ static enum compact_result __compact_finished(struct compact_control *cc)
|
||||
|
||||
/* Direct compactor: Is a suitable page free? */
|
||||
ret = COMPACT_NO_SUITABLE_PAGE;
|
||||
for (order = cc->order; order <= MAX_ORDER; order++) {
|
||||
for (order = cc->order; order < NR_PAGE_ORDERS; order++) {
|
||||
struct free_area *area = &cc->zone->free_area[order];
|
||||
bool can_steal;
|
||||
|
||||
|
@ -96,7 +96,7 @@ void __init kmsan_init_shadow(void)
|
||||
struct metadata_page_pair {
|
||||
struct page *shadow, *origin;
|
||||
};
|
||||
static struct metadata_page_pair held_back[MAX_ORDER + 1] __initdata;
|
||||
static struct metadata_page_pair held_back[NR_PAGE_ORDERS] __initdata;
|
||||
|
||||
/*
|
||||
* Eager metadata allocation. When the memblock allocator is freeing pages to
|
||||
|
@ -1570,7 +1570,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
|
||||
struct page *page;
|
||||
|
||||
/* Find a page of the appropriate size in the preferred list */
|
||||
for (current_order = order; current_order <= MAX_ORDER; ++current_order) {
|
||||
for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) {
|
||||
area = &(zone->free_area[current_order]);
|
||||
page = get_page_from_free_area(area, migratetype);
|
||||
if (!page)
|
||||
@ -1940,7 +1940,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
|
||||
continue;
|
||||
|
||||
spin_lock_irqsave(&zone->lock, flags);
|
||||
for (order = 0; order <= MAX_ORDER; order++) {
|
||||
for (order = 0; order < NR_PAGE_ORDERS; order++) {
|
||||
struct free_area *area = &(zone->free_area[order]);
|
||||
|
||||
page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
|
||||
@ -2050,8 +2050,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
|
||||
return false;
|
||||
|
||||
find_smallest:
|
||||
for (current_order = order; current_order <= MAX_ORDER;
|
||||
current_order++) {
|
||||
for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
|
||||
area = &(zone->free_area[current_order]);
|
||||
fallback_mt = find_suitable_fallback(area, current_order,
|
||||
start_migratetype, false, &can_steal);
|
||||
@ -2884,7 +2883,7 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
|
||||
return true;
|
||||
|
||||
/* For a high-order request, check at least one suitable page is free */
|
||||
for (o = order; o <= MAX_ORDER; o++) {
|
||||
for (o = order; o < NR_PAGE_ORDERS; o++) {
|
||||
struct free_area *area = &z->free_area[o];
|
||||
int mt;
|
||||
|
||||
@ -6442,7 +6441,7 @@ bool is_free_buddy_page(struct page *page)
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
unsigned int order;
|
||||
|
||||
for (order = 0; order <= MAX_ORDER; order++) {
|
||||
for (order = 0; order < NR_PAGE_ORDERS; order++) {
|
||||
struct page *page_head = page - (pfn & ((1 << order) - 1));
|
||||
|
||||
if (PageBuddy(page_head) &&
|
||||
@ -6501,7 +6500,7 @@ bool take_page_off_buddy(struct page *page)
|
||||
bool ret = false;
|
||||
|
||||
spin_lock_irqsave(&zone->lock, flags);
|
||||
for (order = 0; order <= MAX_ORDER; order++) {
|
||||
for (order = 0; order < NR_PAGE_ORDERS; order++) {
|
||||
struct page *page_head = page - (pfn & ((1 << order) - 1));
|
||||
int page_order = buddy_order(page_head);
|
||||
|
||||
|
@ -276,7 +276,7 @@ page_reporting_process_zone(struct page_reporting_dev_info *prdev,
|
||||
return err;
|
||||
|
||||
/* Process each free list starting from lowest order/mt */
|
||||
for (order = page_reporting_order; order <= MAX_ORDER; order++) {
|
||||
for (order = page_reporting_order; order < NR_PAGE_ORDERS; order++) {
|
||||
for (mt = 0; mt < MIGRATE_TYPES; mt++) {
|
||||
/* We do not pull pages from the isolate free list */
|
||||
if (is_migrate_isolate(mt))
|
||||
|
@ -355,8 +355,8 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z
|
||||
|
||||
for_each_populated_zone(zone) {
|
||||
unsigned int order;
|
||||
unsigned long nr[MAX_ORDER + 1], flags, total = 0;
|
||||
unsigned char types[MAX_ORDER + 1];
|
||||
unsigned long nr[NR_PAGE_ORDERS], flags, total = 0;
|
||||
unsigned char types[NR_PAGE_ORDERS];
|
||||
|
||||
if (zone_idx(zone) > max_zone_idx)
|
||||
continue;
|
||||
@ -366,7 +366,7 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z
|
||||
printk(KERN_CONT "%s: ", zone->name);
|
||||
|
||||
spin_lock_irqsave(&zone->lock, flags);
|
||||
for (order = 0; order <= MAX_ORDER; order++) {
|
||||
for (order = 0; order < NR_PAGE_ORDERS; order++) {
|
||||
struct free_area *area = &zone->free_area[order];
|
||||
int type;
|
||||
|
||||
@ -380,7 +380,7 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&zone->lock, flags);
|
||||
for (order = 0; order <= MAX_ORDER; order++) {
|
||||
for (order = 0; order < NR_PAGE_ORDERS; order++) {
|
||||
printk(KERN_CONT "%lu*%lukB ",
|
||||
nr[order], K(1UL) << order);
|
||||
if (nr[order])
|
||||
|
12
mm/vmstat.c
12
mm/vmstat.c
@ -1055,7 +1055,7 @@ static void fill_contig_page_info(struct zone *zone,
|
||||
info->free_blocks_total = 0;
|
||||
info->free_blocks_suitable = 0;
|
||||
|
||||
for (order = 0; order <= MAX_ORDER; order++) {
|
||||
for (order = 0; order < NR_PAGE_ORDERS; order++) {
|
||||
unsigned long blocks;
|
||||
|
||||
/*
|
||||
@ -1471,7 +1471,7 @@ static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
|
||||
int order;
|
||||
|
||||
seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
|
||||
for (order = 0; order <= MAX_ORDER; ++order)
|
||||
for (order = 0; order < NR_PAGE_ORDERS; ++order)
|
||||
/*
|
||||
* Access to nr_free is lockless as nr_free is used only for
|
||||
* printing purposes. Use data_race to avoid KCSAN warning.
|
||||
@ -1500,7 +1500,7 @@ static void pagetypeinfo_showfree_print(struct seq_file *m,
|
||||
pgdat->node_id,
|
||||
zone->name,
|
||||
migratetype_names[mtype]);
|
||||
for (order = 0; order <= MAX_ORDER; ++order) {
|
||||
for (order = 0; order < NR_PAGE_ORDERS; ++order) {
|
||||
unsigned long freecount = 0;
|
||||
struct free_area *area;
|
||||
struct list_head *curr;
|
||||
@ -1540,7 +1540,7 @@ static void pagetypeinfo_showfree(struct seq_file *m, void *arg)
|
||||
|
||||
/* Print header */
|
||||
seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
|
||||
for (order = 0; order <= MAX_ORDER; ++order)
|
||||
for (order = 0; order < NR_PAGE_ORDERS; ++order)
|
||||
seq_printf(m, "%6d ", order);
|
||||
seq_putc(m, '\n');
|
||||
|
||||
@ -2176,7 +2176,7 @@ static void unusable_show_print(struct seq_file *m,
|
||||
seq_printf(m, "Node %d, zone %8s ",
|
||||
pgdat->node_id,
|
||||
zone->name);
|
||||
for (order = 0; order <= MAX_ORDER; ++order) {
|
||||
for (order = 0; order < NR_PAGE_ORDERS; ++order) {
|
||||
fill_contig_page_info(zone, order, &info);
|
||||
index = unusable_free_index(order, &info);
|
||||
seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
|
||||
@ -2228,7 +2228,7 @@ static void extfrag_show_print(struct seq_file *m,
|
||||
seq_printf(m, "Node %d, zone %8s ",
|
||||
pgdat->node_id,
|
||||
zone->name);
|
||||
for (order = 0; order <= MAX_ORDER; ++order) {
|
||||
for (order = 0; order < NR_PAGE_ORDERS; ++order) {
|
||||
fill_contig_page_info(zone, order, &info);
|
||||
index = __fragmentation_index(order, &info);
|
||||
seq_printf(m, "%2d.%03d ", index / 1000, index % 1000);
|
||||
|
Loading…
Reference in New Issue
Block a user