mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-25 21:54:06 +08:00
aecfd220b2
Using uninitialized_var() is dangerous as it papers over real bugs[1]
(or can in the future), and suppresses unrelated compiler warnings (e.g.
"unused variable"). If the compiler thinks it is uninitialized, either
simply initialize the variable or make compiler changes. As a precursor
to removing[2] this[3] macro[4], refactor code to avoid its need.
The original reason for its use here was to work around the #ifdef
being the only place the variable was used. This is better expressed
using IS_ENABLED() and a new code block where the variable can be used
unconditionally.
[1] https://lore.kernel.org/lkml/20200603174714.192027-1-glider@google.com/
[2] https://lore.kernel.org/lkml/CA+55aFw+Vbj0i=1TGqCR5vQkCzWJ0QxK6CernOU6eedsudAixw@mail.gmail.com/
[3] https://lore.kernel.org/lkml/CA+55aFwgbgqhbp1fkxvRKEpzyR5J8n1vKT1VZdz9knmPuXhOeg@mail.gmail.com/
[4] https://lore.kernel.org/lkml/CA+55aFz2500WfbKXAx8s67wrm9=yVJu65TpLgN_ybYNv0VEOKA@mail.gmail.com/
Fixes: 1e01979c8f
("x86, numa: Implement pfn -> nid mapping granularity check")
Signed-off-by: Kees Cook <keescook@chromium.org>
114 lines
3.4 KiB
C
114 lines
3.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef PAGE_FLAGS_LAYOUT_H
|
|
#define PAGE_FLAGS_LAYOUT_H
|
|
|
|
#include <linux/numa.h>
|
|
#include <generated/bounds.h>
|
|
|
|
/*
|
|
* When a memory allocation must conform to specific limitations (such
|
|
* as being suitable for DMA) the caller will pass in hints to the
|
|
* allocator in the gfp_mask, in the zone modifier bits. These bits
|
|
* are used to select a priority ordered list of memory zones which
|
|
* match the requested limits. See gfp_zone() in include/linux/gfp.h
|
|
*/
|
|
#if MAX_NR_ZONES < 2
|
|
#define ZONES_SHIFT 0
|
|
#elif MAX_NR_ZONES <= 2
|
|
#define ZONES_SHIFT 1
|
|
#elif MAX_NR_ZONES <= 4
|
|
#define ZONES_SHIFT 2
|
|
#elif MAX_NR_ZONES <= 8
|
|
#define ZONES_SHIFT 3
|
|
#else
|
|
#error ZONES_SHIFT -- too many zones configured adjust calculation
|
|
#endif
|
|
|
|
#ifdef CONFIG_SPARSEMEM
|
|
#include <asm/sparsemem.h>
|
|
|
|
/* SECTION_SHIFT #bits space required to store a section # */
|
|
#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
|
|
|
|
#endif /* CONFIG_SPARSEMEM */
|
|
|
|
#ifndef BUILD_VDSO32_64
|
|
/*
|
|
* page->flags layout:
|
|
*
|
|
* There are five possibilities for how page->flags get laid out. The first
|
|
* pair is for the normal case without sparsemem. The second pair is for
|
|
* sparsemem when there is plenty of space for node and section information.
|
|
* The last is when there is insufficient space in page->flags and a separate
|
|
* lookup is necessary.
|
|
*
|
|
* No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
|
|
* " plus space for last_cpupid: | NODE | ZONE | LAST_CPUPID ... | FLAGS |
|
|
* classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
|
|
* " plus space for last_cpupid: | SECTION | NODE | ZONE | LAST_CPUPID ... | FLAGS |
|
|
* classic sparse no space for node: | SECTION | ZONE | ... | FLAGS |
|
|
*/
|
|
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
|
|
#define SECTIONS_WIDTH SECTIONS_SHIFT
|
|
#else
|
|
#define SECTIONS_WIDTH 0
|
|
#endif
|
|
|
|
#define ZONES_WIDTH ZONES_SHIFT
|
|
|
|
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
|
|
#define NODES_WIDTH NODES_SHIFT
|
|
#else
|
|
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
|
#error "Vmemmap: No space for nodes field in page flags"
|
|
#endif
|
|
#define NODES_WIDTH 0
|
|
#endif
|
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
#define LAST__PID_SHIFT 8
|
|
#define LAST__PID_MASK ((1 << LAST__PID_SHIFT)-1)
|
|
|
|
#define LAST__CPU_SHIFT NR_CPUS_BITS
|
|
#define LAST__CPU_MASK ((1 << LAST__CPU_SHIFT)-1)
|
|
|
|
#define LAST_CPUPID_SHIFT (LAST__PID_SHIFT+LAST__CPU_SHIFT)
|
|
#else
|
|
#define LAST_CPUPID_SHIFT 0
|
|
#endif
|
|
|
|
#ifdef CONFIG_KASAN_SW_TAGS
|
|
#define KASAN_TAG_WIDTH 8
|
|
#else
|
|
#define KASAN_TAG_WIDTH 0
|
|
#endif
|
|
|
|
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \
|
|
<= BITS_PER_LONG - NR_PAGEFLAGS
|
|
#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
|
|
#else
|
|
#define LAST_CPUPID_WIDTH 0
|
|
#endif
|
|
|
|
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
|
|
> BITS_PER_LONG - NR_PAGEFLAGS
|
|
#error "Not enough bits in page flags"
|
|
#endif
|
|
|
|
/*
|
|
* We are going to use the flags for the page to node mapping if its in
|
|
* there. This includes the case where there is no node, so it is implicit.
|
|
* Note that this #define MUST have a value so that it can be tested with
|
|
* the IS_ENABLED() macro.
|
|
*/
|
|
#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
|
|
#define NODE_NOT_IN_PAGE_FLAGS 1
|
|
#endif
|
|
|
|
#if defined(CONFIG_NUMA_BALANCING) && LAST_CPUPID_WIDTH == 0
|
|
#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
|
|
#endif
|
|
|
|
#endif
|
|
#endif /* _LINUX_PAGE_FLAGS_LAYOUT */
|