mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 22:24:11 +08:00
8f0f4788b1
Since all calls use folio_xchg_last_cpupid(), remove page_cpupid_xchg_last(). Link: https://lkml.kernel.org/r/20231018140806.2783514-20-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: David Hildenbrand <david@redhat.com> Cc: Huang Ying <ying.huang@intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Vincent Guittot <vincent.guittot@linaro.org> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
113 lines
2.5 KiB
C
113 lines
2.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* linux/mm/mmzone.c
|
|
*
|
|
* management codes for pgdats, zones and page flags
|
|
*/
|
|
|
|
|
|
#include <linux/stddef.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/mmzone.h>
|
|
|
|
struct pglist_data *first_online_pgdat(void)
|
|
{
|
|
return NODE_DATA(first_online_node);
|
|
}
|
|
|
|
struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
|
|
{
|
|
int nid = next_online_node(pgdat->node_id);
|
|
|
|
if (nid == MAX_NUMNODES)
|
|
return NULL;
|
|
return NODE_DATA(nid);
|
|
}
|
|
|
|
/*
|
|
* next_zone - helper magic for for_each_zone()
|
|
*/
|
|
struct zone *next_zone(struct zone *zone)
|
|
{
|
|
pg_data_t *pgdat = zone->zone_pgdat;
|
|
|
|
if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
|
|
zone++;
|
|
else {
|
|
pgdat = next_online_pgdat(pgdat);
|
|
if (pgdat)
|
|
zone = pgdat->node_zones;
|
|
else
|
|
zone = NULL;
|
|
}
|
|
return zone;
|
|
}
|
|
|
|
static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes)
|
|
{
|
|
#ifdef CONFIG_NUMA
|
|
return node_isset(zonelist_node_idx(zref), *nodes);
|
|
#else
|
|
return 1;
|
|
#endif /* CONFIG_NUMA */
|
|
}
|
|
|
|
/* Returns the next zone at or below highest_zoneidx in a zonelist */
|
|
struct zoneref *__next_zones_zonelist(struct zoneref *z,
|
|
enum zone_type highest_zoneidx,
|
|
nodemask_t *nodes)
|
|
{
|
|
/*
|
|
* Find the next suitable zone to use for the allocation.
|
|
* Only filter based on nodemask if it's set
|
|
*/
|
|
if (unlikely(nodes == NULL))
|
|
while (zonelist_zone_idx(z) > highest_zoneidx)
|
|
z++;
|
|
else
|
|
while (zonelist_zone_idx(z) > highest_zoneidx ||
|
|
(z->zone && !zref_in_nodemask(z, nodes)))
|
|
z++;
|
|
|
|
return z;
|
|
}
|
|
|
|
void lruvec_init(struct lruvec *lruvec)
|
|
{
|
|
enum lru_list lru;
|
|
|
|
memset(lruvec, 0, sizeof(struct lruvec));
|
|
spin_lock_init(&lruvec->lru_lock);
|
|
|
|
for_each_lru(lru)
|
|
INIT_LIST_HEAD(&lruvec->lists[lru]);
|
|
/*
|
|
* The "Unevictable LRU" is imaginary: though its size is maintained,
|
|
* it is never scanned, and unevictable pages are not threaded on it
|
|
* (so that their lru fields can be reused to hold mlock_count).
|
|
* Poison its list head, so that any operations on it would crash.
|
|
*/
|
|
list_del(&lruvec->lists[LRU_UNEVICTABLE]);
|
|
|
|
lru_gen_init_lruvec(lruvec);
|
|
}
|
|
|
|
#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
|
|
int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
|
|
{
|
|
unsigned long old_flags, flags;
|
|
int last_cpupid;
|
|
|
|
old_flags = READ_ONCE(folio->flags);
|
|
do {
|
|
flags = old_flags;
|
|
last_cpupid = (flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
|
|
|
|
flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
|
|
flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
|
|
} while (unlikely(!try_cmpxchg(&folio->flags, &old_flags, flags)));
|
|
|
|
return last_cpupid;
|
|
}
|
|
#endif
|