mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-28 07:04:00 +08:00
1899ad18c6
Refaults happen during transitions between workingsets as well as in-place thrashing. Knowing the difference between the two has a range of applications, including measuring the impact of memory shortage on the system performance, as well as the ability to smarter balance pressure between the filesystem cache and the swap-backed workingset. During workingset transitions, inactive cache refaults and pushes out established active cache. When that active cache isn't stale, however, and also ends up refaulting, that's bonafide thrashing. Introduce a new page flag that tells on eviction whether the page has been active or not in its lifetime. This bit is then stored in the shadow entry, to classify refaults as transitioning or thrashing. How many page->flags does this leave us with on 32-bit? 20 bits are always page flags 21 if you have an MMU 23 with the zone bits for DMA, Normal, HighMem, Movable 29 with the sparsemem section bits 30 if PAE is enabled 31 with this patch. So on 32-bit PAE, that leaves 1 bit for distinguishing two NUMA nodes. If that's not enough, the system can switch to discontigmem and re-gain the 6 or 7 sparsemem section bits. Link: http://lkml.kernel.org/r/20180828172258.3185-3-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Daniel Drake <drake@endlessm.com> Tested-by: Suren Baghdasaryan <surenb@google.com> Cc: Christopher Lameter <cl@linux.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Johannes Weiner <jweiner@fb.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Enderborg <peter.enderborg@sony.com> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Shakeel Butt <shakeelb@google.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vinayak Menon <vinmenon@codeaurora.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
265 lines
8.3 KiB
C
265 lines
8.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#include <linux/node.h>
|
|
#include <linux/mmzone.h>
|
|
#include <linux/compaction.h>
|
|
/*
|
|
* The order of these masks is important. Matching masks will be seen
|
|
* first and the left over flags will end up showing by themselves.
|
|
*
|
|
* For example, if we have GFP_KERNEL before GFP_USER we wil get:
|
|
*
|
|
* GFP_KERNEL|GFP_HARDWALL
|
|
*
|
|
* Thus most bits set go first.
|
|
*/
|
|
|
|
#define __def_gfpflag_names \
|
|
{(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
|
|
{(unsigned long)GFP_TRANSHUGE_LIGHT, "GFP_TRANSHUGE_LIGHT"}, \
|
|
{(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\
|
|
{(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
|
|
{(unsigned long)GFP_USER, "GFP_USER"}, \
|
|
{(unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \
|
|
{(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
|
|
{(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
|
|
{(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
|
|
{(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
|
|
{(unsigned long)GFP_NOWAIT, "GFP_NOWAIT"}, \
|
|
{(unsigned long)GFP_DMA, "GFP_DMA"}, \
|
|
{(unsigned long)__GFP_HIGHMEM, "__GFP_HIGHMEM"}, \
|
|
{(unsigned long)GFP_DMA32, "GFP_DMA32"}, \
|
|
{(unsigned long)__GFP_HIGH, "__GFP_HIGH"}, \
|
|
{(unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \
|
|
{(unsigned long)__GFP_IO, "__GFP_IO"}, \
|
|
{(unsigned long)__GFP_FS, "__GFP_FS"}, \
|
|
{(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \
|
|
{(unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \
|
|
{(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \
|
|
{(unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \
|
|
{(unsigned long)__GFP_COMP, "__GFP_COMP"}, \
|
|
{(unsigned long)__GFP_ZERO, "__GFP_ZERO"}, \
|
|
{(unsigned long)__GFP_NOMEMALLOC, "__GFP_NOMEMALLOC"}, \
|
|
{(unsigned long)__GFP_MEMALLOC, "__GFP_MEMALLOC"}, \
|
|
{(unsigned long)__GFP_HARDWALL, "__GFP_HARDWALL"}, \
|
|
{(unsigned long)__GFP_THISNODE, "__GFP_THISNODE"}, \
|
|
{(unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \
|
|
{(unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \
|
|
{(unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \
|
|
{(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
|
|
{(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
|
|
{(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
|
|
{(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"}\
|
|
|
|
#define show_gfp_flags(flags) \
|
|
(flags) ? __print_flags(flags, "|", \
|
|
__def_gfpflag_names \
|
|
) : "none"
|
|
|
|
#ifdef CONFIG_MMU
|
|
#define IF_HAVE_PG_MLOCK(flag,string) ,{1UL << flag, string}
|
|
#else
|
|
#define IF_HAVE_PG_MLOCK(flag,string)
|
|
#endif
|
|
|
|
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
|
|
#define IF_HAVE_PG_UNCACHED(flag,string) ,{1UL << flag, string}
|
|
#else
|
|
#define IF_HAVE_PG_UNCACHED(flag,string)
|
|
#endif
|
|
|
|
#ifdef CONFIG_MEMORY_FAILURE
|
|
#define IF_HAVE_PG_HWPOISON(flag,string) ,{1UL << flag, string}
|
|
#else
|
|
#define IF_HAVE_PG_HWPOISON(flag,string)
|
|
#endif
|
|
|
|
#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
|
|
#define IF_HAVE_PG_IDLE(flag,string) ,{1UL << flag, string}
|
|
#else
|
|
#define IF_HAVE_PG_IDLE(flag,string)
|
|
#endif
|
|
|
|
#define __def_pageflag_names \
|
|
{1UL << PG_locked, "locked" }, \
|
|
{1UL << PG_waiters, "waiters" }, \
|
|
{1UL << PG_error, "error" }, \
|
|
{1UL << PG_referenced, "referenced" }, \
|
|
{1UL << PG_uptodate, "uptodate" }, \
|
|
{1UL << PG_dirty, "dirty" }, \
|
|
{1UL << PG_lru, "lru" }, \
|
|
{1UL << PG_active, "active" }, \
|
|
{1UL << PG_workingset, "workingset" }, \
|
|
{1UL << PG_slab, "slab" }, \
|
|
{1UL << PG_owner_priv_1, "owner_priv_1" }, \
|
|
{1UL << PG_arch_1, "arch_1" }, \
|
|
{1UL << PG_reserved, "reserved" }, \
|
|
{1UL << PG_private, "private" }, \
|
|
{1UL << PG_private_2, "private_2" }, \
|
|
{1UL << PG_writeback, "writeback" }, \
|
|
{1UL << PG_head, "head" }, \
|
|
{1UL << PG_mappedtodisk, "mappedtodisk" }, \
|
|
{1UL << PG_reclaim, "reclaim" }, \
|
|
{1UL << PG_swapbacked, "swapbacked" }, \
|
|
{1UL << PG_unevictable, "unevictable" } \
|
|
IF_HAVE_PG_MLOCK(PG_mlocked, "mlocked" ) \
|
|
IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \
|
|
IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \
|
|
IF_HAVE_PG_IDLE(PG_young, "young" ) \
|
|
IF_HAVE_PG_IDLE(PG_idle, "idle" )
|
|
|
|
#define show_page_flags(flags) \
|
|
(flags) ? __print_flags(flags, "|", \
|
|
__def_pageflag_names \
|
|
) : "none"
|
|
|
|
#if defined(CONFIG_X86)
|
|
#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" }
|
|
#elif defined(CONFIG_PPC)
|
|
#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" }
|
|
#elif defined(CONFIG_PARISC) || defined(CONFIG_IA64)
|
|
#define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" }
|
|
#elif !defined(CONFIG_MMU)
|
|
#define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy" }
|
|
#else
|
|
#define __VM_ARCH_SPECIFIC_1 {VM_ARCH_1, "arch_1" }
|
|
#endif
|
|
|
|
#ifdef CONFIG_MEM_SOFT_DIRTY
|
|
#define IF_HAVE_VM_SOFTDIRTY(flag,name) {flag, name },
|
|
#else
|
|
#define IF_HAVE_VM_SOFTDIRTY(flag,name)
|
|
#endif
|
|
|
|
#define __def_vmaflag_names \
|
|
{VM_READ, "read" }, \
|
|
{VM_WRITE, "write" }, \
|
|
{VM_EXEC, "exec" }, \
|
|
{VM_SHARED, "shared" }, \
|
|
{VM_MAYREAD, "mayread" }, \
|
|
{VM_MAYWRITE, "maywrite" }, \
|
|
{VM_MAYEXEC, "mayexec" }, \
|
|
{VM_MAYSHARE, "mayshare" }, \
|
|
{VM_GROWSDOWN, "growsdown" }, \
|
|
{VM_UFFD_MISSING, "uffd_missing" }, \
|
|
{VM_PFNMAP, "pfnmap" }, \
|
|
{VM_DENYWRITE, "denywrite" }, \
|
|
{VM_UFFD_WP, "uffd_wp" }, \
|
|
{VM_LOCKED, "locked" }, \
|
|
{VM_IO, "io" }, \
|
|
{VM_SEQ_READ, "seqread" }, \
|
|
{VM_RAND_READ, "randread" }, \
|
|
{VM_DONTCOPY, "dontcopy" }, \
|
|
{VM_DONTEXPAND, "dontexpand" }, \
|
|
{VM_LOCKONFAULT, "lockonfault" }, \
|
|
{VM_ACCOUNT, "account" }, \
|
|
{VM_NORESERVE, "noreserve" }, \
|
|
{VM_HUGETLB, "hugetlb" }, \
|
|
__VM_ARCH_SPECIFIC_1 , \
|
|
{VM_WIPEONFORK, "wipeonfork" }, \
|
|
{VM_DONTDUMP, "dontdump" }, \
|
|
IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \
|
|
{VM_MIXEDMAP, "mixedmap" }, \
|
|
{VM_HUGEPAGE, "hugepage" }, \
|
|
{VM_NOHUGEPAGE, "nohugepage" }, \
|
|
{VM_MERGEABLE, "mergeable" } \
|
|
|
|
#define show_vma_flags(flags) \
|
|
(flags) ? __print_flags(flags, "|", \
|
|
__def_vmaflag_names \
|
|
) : "none"
|
|
|
|
#ifdef CONFIG_COMPACTION
|
|
#define COMPACTION_STATUS \
|
|
EM( COMPACT_SKIPPED, "skipped") \
|
|
EM( COMPACT_DEFERRED, "deferred") \
|
|
EM( COMPACT_CONTINUE, "continue") \
|
|
EM( COMPACT_SUCCESS, "success") \
|
|
EM( COMPACT_PARTIAL_SKIPPED, "partial_skipped") \
|
|
EM( COMPACT_COMPLETE, "complete") \
|
|
EM( COMPACT_NO_SUITABLE_PAGE, "no_suitable_page") \
|
|
EM( COMPACT_NOT_SUITABLE_ZONE, "not_suitable_zone") \
|
|
EMe(COMPACT_CONTENDED, "contended")
|
|
|
|
/* High-level compaction status feedback */
|
|
#define COMPACTION_FAILED 1
|
|
#define COMPACTION_WITHDRAWN 2
|
|
#define COMPACTION_PROGRESS 3
|
|
|
|
#define compact_result_to_feedback(result) \
|
|
({ \
|
|
enum compact_result __result = result; \
|
|
(compaction_failed(__result)) ? COMPACTION_FAILED : \
|
|
(compaction_withdrawn(__result)) ? COMPACTION_WITHDRAWN : COMPACTION_PROGRESS; \
|
|
})
|
|
|
|
#define COMPACTION_FEEDBACK \
|
|
EM(COMPACTION_FAILED, "failed") \
|
|
EM(COMPACTION_WITHDRAWN, "withdrawn") \
|
|
EMe(COMPACTION_PROGRESS, "progress")
|
|
|
|
#define COMPACTION_PRIORITY \
|
|
EM(COMPACT_PRIO_SYNC_FULL, "COMPACT_PRIO_SYNC_FULL") \
|
|
EM(COMPACT_PRIO_SYNC_LIGHT, "COMPACT_PRIO_SYNC_LIGHT") \
|
|
EMe(COMPACT_PRIO_ASYNC, "COMPACT_PRIO_ASYNC")
|
|
#else
|
|
#define COMPACTION_STATUS
|
|
#define COMPACTION_PRIORITY
|
|
#define COMPACTION_FEEDBACK
|
|
#endif
|
|
|
|
#ifdef CONFIG_ZONE_DMA
|
|
#define IFDEF_ZONE_DMA(X) X
|
|
#else
|
|
#define IFDEF_ZONE_DMA(X)
|
|
#endif
|
|
|
|
#ifdef CONFIG_ZONE_DMA32
|
|
#define IFDEF_ZONE_DMA32(X) X
|
|
#else
|
|
#define IFDEF_ZONE_DMA32(X)
|
|
#endif
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
#define IFDEF_ZONE_HIGHMEM(X) X
|
|
#else
|
|
#define IFDEF_ZONE_HIGHMEM(X)
|
|
#endif
|
|
|
|
#define ZONE_TYPE \
|
|
IFDEF_ZONE_DMA( EM (ZONE_DMA, "DMA")) \
|
|
IFDEF_ZONE_DMA32( EM (ZONE_DMA32, "DMA32")) \
|
|
EM (ZONE_NORMAL, "Normal") \
|
|
IFDEF_ZONE_HIGHMEM( EM (ZONE_HIGHMEM,"HighMem")) \
|
|
EMe(ZONE_MOVABLE,"Movable")
|
|
|
|
#define LRU_NAMES \
|
|
EM (LRU_INACTIVE_ANON, "inactive_anon") \
|
|
EM (LRU_ACTIVE_ANON, "active_anon") \
|
|
EM (LRU_INACTIVE_FILE, "inactive_file") \
|
|
EM (LRU_ACTIVE_FILE, "active_file") \
|
|
EMe(LRU_UNEVICTABLE, "unevictable")
|
|
|
|
/*
|
|
* First define the enums in the above macros to be exported to userspace
|
|
* via TRACE_DEFINE_ENUM().
|
|
*/
|
|
#undef EM
|
|
#undef EMe
|
|
#define EM(a, b) TRACE_DEFINE_ENUM(a);
|
|
#define EMe(a, b) TRACE_DEFINE_ENUM(a);
|
|
|
|
COMPACTION_STATUS
|
|
COMPACTION_PRIORITY
|
|
/* COMPACTION_FEEDBACK are defines not enums. Not needed here. */
|
|
ZONE_TYPE
|
|
LRU_NAMES
|
|
|
|
/*
|
|
* Now redefine the EM() and EMe() macros to map the enums to the strings
|
|
* that will be printed in the output.
|
|
*/
|
|
#undef EM
|
|
#undef EMe
|
|
#define EM(a, b) {a, b},
|
|
#define EMe(a, b) {a, b}
|