mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-27 14:43:58 +08:00
90572890d2
Change the per page last fault tracking to use cpu,pid instead of nid,pid. This will allow us to try and lookup the alternate task more easily. Note that even though it is the cpu that is store in the page flags that the mpol_misplaced decision is still based on the node. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/1381141781-10992-43-git-send-email-mgorman@suse.de [ Fixed build failure on 32-bit systems. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
95 lines
2.9 KiB
C
95 lines
2.9 KiB
C
#ifndef PAGE_FLAGS_LAYOUT_H
|
|
#define PAGE_FLAGS_LAYOUT_H
|
|
|
|
#include <linux/numa.h>
|
|
#include <generated/bounds.h>
|
|
|
|
/*
|
|
* When a memory allocation must conform to specific limitations (such
|
|
* as being suitable for DMA) the caller will pass in hints to the
|
|
* allocator in the gfp_mask, in the zone modifier bits. These bits
|
|
* are used to select a priority ordered list of memory zones which
|
|
* match the requested limits. See gfp_zone() in include/linux/gfp.h
|
|
*/
|
|
#if MAX_NR_ZONES < 2
|
|
#define ZONES_SHIFT 0
|
|
#elif MAX_NR_ZONES <= 2
|
|
#define ZONES_SHIFT 1
|
|
#elif MAX_NR_ZONES <= 4
|
|
#define ZONES_SHIFT 2
|
|
#else
|
|
#error ZONES_SHIFT -- too many zones configured adjust calculation
|
|
#endif
|
|
|
|
#ifdef CONFIG_SPARSEMEM
|
|
#include <asm/sparsemem.h>
|
|
|
|
/* SECTION_SHIFT #bits space required to store a section # */
|
|
#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
|
|
|
|
#endif /* CONFIG_SPARSEMEM */
|
|
|
|
/*
|
|
* page->flags layout:
|
|
*
|
|
* There are five possibilities for how page->flags get laid out. The first
|
|
* pair is for the normal case without sparsemem. The second pair is for
|
|
* sparsemem when there is plenty of space for node and section information.
|
|
* The last is when there is insufficient space in page->flags and a separate
|
|
* lookup is necessary.
|
|
*
|
|
* No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
|
|
* " plus space for last_cpupid: | NODE | ZONE | LAST_CPUPID ... | FLAGS |
|
|
* classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
|
|
* " plus space for last_cpupid: | SECTION | NODE | ZONE | LAST_CPUPID ... | FLAGS |
|
|
* classic sparse no space for node: | SECTION | ZONE | ... | FLAGS |
|
|
*/
|
|
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
|
|
#define SECTIONS_WIDTH SECTIONS_SHIFT
|
|
#else
|
|
#define SECTIONS_WIDTH 0
|
|
#endif
|
|
|
|
#define ZONES_WIDTH ZONES_SHIFT
|
|
|
|
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
|
|
#define NODES_WIDTH NODES_SHIFT
|
|
#else
|
|
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
|
#error "Vmemmap: No space for nodes field in page flags"
|
|
#endif
|
|
#define NODES_WIDTH 0
|
|
#endif
|
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
#define LAST__PID_SHIFT 8
|
|
#define LAST__PID_MASK ((1 << LAST__PID_SHIFT)-1)
|
|
|
|
#define LAST__CPU_SHIFT NR_CPUS_BITS
|
|
#define LAST__CPU_MASK ((1 << LAST__CPU_SHIFT)-1)
|
|
|
|
#define LAST_CPUPID_SHIFT (LAST__PID_SHIFT+LAST__CPU_SHIFT)
|
|
#else
|
|
#define LAST_CPUPID_SHIFT 0
|
|
#endif
|
|
|
|
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
|
|
#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
|
|
#else
|
|
#define LAST_CPUPID_WIDTH 0
|
|
#endif
|
|
|
|
/*
|
|
* We are going to use the flags for the page to node mapping if its in
|
|
* there. This includes the case where there is no node, so it is implicit.
|
|
*/
|
|
#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
|
|
#define NODE_NOT_IN_PAGE_FLAGS
|
|
#endif
|
|
|
|
#if defined(CONFIG_NUMA_BALANCING) && LAST_CPUPID_WIDTH == 0
|
|
#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
|
|
#endif
|
|
|
|
#endif /* _LINUX_PAGE_FLAGS_LAYOUT */
|