2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-23 20:53:53 +08:00
linux-next/mm/shuffle.h
Dan Williams 97500a4a54 mm: maintain randomization of page free lists
When freeing a page with an order >= shuffle_page_order randomly select
the front or back of the list for insertion.

While the mm tries to defragment physical pages into huge pages this can
tend to make the page allocator more predictable over time.  Inject the
front-back randomness to preserve the initial randomness established by
shuffle_free_memory() when the kernel was booted.

The overhead of this manipulation is constrained by only being applied
for MAX_ORDER sized pages by default.

[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/154899812788.3165233.9066631950746578517.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Robert Elliott <elliott@hpe.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-05-14 19:52:48 -07:00

65 lines
1.5 KiB
C

// SPDX-License-Identifier: GPL-2.0
// Copyright(c) 2018 Intel Corporation. All rights reserved.
#ifndef _MM_SHUFFLE_H
#define _MM_SHUFFLE_H
#include <linux/jump_label.h>
/*
* SHUFFLE_ENABLE is called from the command line enabling path, or by
* platform-firmware enabling that indicates the presence of a
* direct-mapped memory-side-cache. SHUFFLE_FORCE_DISABLE is called from
* the command line path and overrides any previous or future
* SHUFFLE_ENABLE.
*/
enum mm_shuffle_ctl {
SHUFFLE_ENABLE,
SHUFFLE_FORCE_DISABLE,
};
#define SHUFFLE_ORDER (MAX_ORDER-1)
#ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR
DECLARE_STATIC_KEY_FALSE(page_alloc_shuffle_key);
extern void page_alloc_shuffle(enum mm_shuffle_ctl ctl);
extern void __shuffle_free_memory(pg_data_t *pgdat);
static inline void shuffle_free_memory(pg_data_t *pgdat)
{
if (!static_branch_unlikely(&page_alloc_shuffle_key))
return;
__shuffle_free_memory(pgdat);
}
extern void __shuffle_zone(struct zone *z);
static inline void shuffle_zone(struct zone *z)
{
if (!static_branch_unlikely(&page_alloc_shuffle_key))
return;
__shuffle_zone(z);
}
static inline bool is_shuffle_order(int order)
{
if (!static_branch_unlikely(&page_alloc_shuffle_key))
return false;
return order >= SHUFFLE_ORDER;
}
#else
static inline void shuffle_free_memory(pg_data_t *pgdat)
{
}
static inline void shuffle_zone(struct zone *z)
{
}
static inline void page_alloc_shuffle(enum mm_shuffle_ctl ctl)
{
}
static inline bool is_shuffle_order(int order)
{
return false;
}
#endif
#endif /* _MM_SHUFFLE_H */