mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-18 18:23:53 +08:00
1c676e0d9b
PG_idle and PG_young allow the two PTE Accessed bit users, Idle Page Tracking and the reclaim logic concurrently work while not interfering with each other. That is, when they need to clear the Accessed bit, they set PG_young to represent the previous state of the bit, respectively. And when they need to read the bit, if the bit is cleared, they further read the PG_young to know whether the other has cleared the bit meanwhile or not. For yet another user of the PTE Accessed bit, we could add another page flag, or extend the mechanism to use the flags. For the DAMON usecase, however, we don't need to do that just yet. IDLE_PAGE_TRACKING and DAMON are mutually exclusive, so there's only ever going to be one user of the current set of flags. In this commit, we split out the CONFIG options to allow for the use of PG_young and PG_idle outside of idle page tracking. In the next commit, DAMON's reference implementation of the virtual memory address space monitoring primitives will use it. [sjpark@amazon.de: set PAGE_EXTENSION for non-64BIT] Link: https://lkml.kernel.org/r/20210806095153.6444-1-sj38.park@gmail.com [akpm@linux-foundation.org: tweak Kconfig text] [sjpark@amazon.de: hide PAGE_IDLE_FLAG from users] Link: https://lkml.kernel.org/r/20210813081238.34705-1-sj38.park@gmail.com Link: https://lkml.kernel.org/r/20210716081449.22187-5-sj38.park@gmail.com Signed-off-by: SeongJae Park <sjpark@amazon.de> Reviewed-by: Shakeel Butt <shakeelb@google.com> Reviewed-by: Fernand Sieber <sieberf@amazon.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Amit Shah <amit@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Brendan Higgins <brendanhiggins@google.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: David Woodhouse <dwmw@amazon.com> Cc: Fan Du <fan.du@intel.com> Cc: Greg Kroah-Hartman <greg@kroah.com> Cc: Greg Thelen <gthelen@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Joe Perches <joe@perches.com> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Leonard Foerster <foersleo@amazon.de> Cc: Marco Elver <elver@google.com> Cc: Markus Boehme <markubo@amazon.de> Cc: Maximilian Heyne <mheyne@amazon.de> Cc: Mel Gorman <mgorman@suse.de> Cc: Minchan Kim <minchan@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Shuah Khan <shuah@kernel.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
92 lines
1.8 KiB
C
92 lines
1.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __LINUX_PAGE_EXT_H
|
|
#define __LINUX_PAGE_EXT_H
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/stacktrace.h>
|
|
#include <linux/stackdepot.h>
|
|
|
|
struct pglist_data;
|
|
struct page_ext_operations {
|
|
size_t offset;
|
|
size_t size;
|
|
bool (*need)(void);
|
|
void (*init)(void);
|
|
};
|
|
|
|
#ifdef CONFIG_PAGE_EXTENSION
|
|
|
|
enum page_ext_flags {
|
|
PAGE_EXT_OWNER,
|
|
PAGE_EXT_OWNER_ALLOCATED,
|
|
#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
|
|
PAGE_EXT_YOUNG,
|
|
PAGE_EXT_IDLE,
|
|
#endif
|
|
};
|
|
|
|
/*
|
|
* Page Extension can be considered as an extended mem_map.
|
|
* A page_ext page is associated with every page descriptor. The
|
|
* page_ext helps us add more information about the page.
|
|
* All page_ext are allocated at boot or memory hotplug event,
|
|
* then the page_ext for pfn always exists.
|
|
*/
|
|
struct page_ext {
|
|
unsigned long flags;
|
|
};
|
|
|
|
extern unsigned long page_ext_size;
|
|
extern void pgdat_page_ext_init(struct pglist_data *pgdat);
|
|
|
|
#ifdef CONFIG_SPARSEMEM
|
|
static inline void page_ext_init_flatmem(void)
|
|
{
|
|
}
|
|
extern void page_ext_init(void);
|
|
static inline void page_ext_init_flatmem_late(void)
|
|
{
|
|
}
|
|
#else
|
|
extern void page_ext_init_flatmem(void);
|
|
extern void page_ext_init_flatmem_late(void);
|
|
static inline void page_ext_init(void)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
struct page_ext *lookup_page_ext(const struct page *page);
|
|
|
|
static inline struct page_ext *page_ext_next(struct page_ext *curr)
|
|
{
|
|
void *next = curr;
|
|
next += page_ext_size;
|
|
return next;
|
|
}
|
|
|
|
#else /* !CONFIG_PAGE_EXTENSION */
|
|
struct page_ext;
|
|
|
|
static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
|
|
{
|
|
}
|
|
|
|
static inline struct page_ext *lookup_page_ext(const struct page *page)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline void page_ext_init(void)
|
|
{
|
|
}
|
|
|
|
static inline void page_ext_init_flatmem_late(void)
|
|
{
|
|
}
|
|
|
|
static inline void page_ext_init_flatmem(void)
|
|
{
|
|
}
|
|
#endif /* CONFIG_PAGE_EXTENSION */
|
|
#endif /* __LINUX_PAGE_EXT_H */
|