2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-28 20:44:00 +08:00

mm/shuffle: remove dynamic reconfiguration

Commit e900a918b0 ("mm: shuffle initial free memory to improve
memory-side-cache utilization") promised "autodetection of a
memory-side-cache (to be added in a follow-on patch)" over a year ago.

The original series included patches [1], however, they were dropped
during review [2] to be followed-up later.

Due to lack of platforms that publish an HMAT, autodetection is currently
not implemented.  However, manual activation is actively used [3].  Let's
simplify for now and re-add when really (ever?) needed.

[1] https://lkml.kernel.org/r/154510700291.1941238.817190985966612531.stgit@dwillia2-desk3.amr.corp.intel.com
[2] https://lkml.kernel.org/r/154690326478.676627.103843791978176914.stgit@dwillia2-desk3.amr.corp.intel.com
[3] https://lkml.kernel.org/r/CAPcyv4irwGUU2x+c6b4L=KbB1dnasNKaaZd6oSpYjL9kfsnROQ@mail.gmail.com

Signed-off-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Wei Yang <richard.weiyang@linux.alibaba.com>
Acked-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Wei Yang <richard.weiyang@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Dan Williams <dan.j.williams@intel.com>
Link: http://lkml.kernel.org/r/20200624094741.9918-4-david@redhat.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
David Hildenbrand 2020-08-06 23:25:38 -07:00 committed by Linus Torvalds
parent 93146d98ce
commit 839195352d
2 changed files with 2 additions and 43 deletions

View File

@ -10,33 +10,11 @@
#include "shuffle.h" #include "shuffle.h"
DEFINE_STATIC_KEY_FALSE(page_alloc_shuffle_key); DEFINE_STATIC_KEY_FALSE(page_alloc_shuffle_key);
static unsigned long shuffle_state __ro_after_init;
/*
* Depending on the architecture, module parameter parsing may run
* before, or after the cache detection. SHUFFLE_FORCE_DISABLE prevents,
* or reverts the enabling of the shuffle implementation. SHUFFLE_ENABLE
* attempts to turn on the implementation, but aborts if it finds
* SHUFFLE_FORCE_DISABLE already set.
*/
__meminit void page_alloc_shuffle(enum mm_shuffle_ctl ctl)
{
if (ctl == SHUFFLE_FORCE_DISABLE)
set_bit(SHUFFLE_FORCE_DISABLE, &shuffle_state);
if (test_bit(SHUFFLE_FORCE_DISABLE, &shuffle_state)) {
if (test_and_clear_bit(SHUFFLE_ENABLE, &shuffle_state))
static_branch_disable(&page_alloc_shuffle_key);
} else if (ctl == SHUFFLE_ENABLE
&& !test_and_set_bit(SHUFFLE_ENABLE, &shuffle_state))
static_branch_enable(&page_alloc_shuffle_key);
}
static bool shuffle_param; static bool shuffle_param;
static int shuffle_show(char *buffer, const struct kernel_param *kp) static int shuffle_show(char *buffer, const struct kernel_param *kp)
{ {
return sprintf(buffer, "%c\n", test_bit(SHUFFLE_ENABLE, &shuffle_state) return sprintf(buffer, "%c\n", shuffle_param ? 'Y' : 'N');
? 'Y' : 'N');
} }
static __meminit int shuffle_store(const char *val, static __meminit int shuffle_store(const char *val,
@ -47,9 +25,7 @@ static __meminit int shuffle_store(const char *val,
if (rc < 0) if (rc < 0)
return rc; return rc;
if (shuffle_param) if (shuffle_param)
page_alloc_shuffle(SHUFFLE_ENABLE); static_branch_enable(&page_alloc_shuffle_key);
else
page_alloc_shuffle(SHUFFLE_FORCE_DISABLE);
return 0; return 0;
} }
module_param_call(shuffle, shuffle_store, shuffle_show, &shuffle_param, 0400); module_param_call(shuffle, shuffle_store, shuffle_show, &shuffle_param, 0400);

View File

@ -4,23 +4,10 @@
#define _MM_SHUFFLE_H #define _MM_SHUFFLE_H
#include <linux/jump_label.h> #include <linux/jump_label.h>
/*
* SHUFFLE_ENABLE is called from the command line enabling path, or by
* platform-firmware enabling that indicates the presence of a
* direct-mapped memory-side-cache. SHUFFLE_FORCE_DISABLE is called from
* the command line path and overrides any previous or future
* SHUFFLE_ENABLE.
*/
enum mm_shuffle_ctl {
SHUFFLE_ENABLE,
SHUFFLE_FORCE_DISABLE,
};
#define SHUFFLE_ORDER (MAX_ORDER-1) #define SHUFFLE_ORDER (MAX_ORDER-1)
#ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR #ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR
DECLARE_STATIC_KEY_FALSE(page_alloc_shuffle_key); DECLARE_STATIC_KEY_FALSE(page_alloc_shuffle_key);
extern void page_alloc_shuffle(enum mm_shuffle_ctl ctl);
extern void __shuffle_free_memory(pg_data_t *pgdat); extern void __shuffle_free_memory(pg_data_t *pgdat);
extern bool shuffle_pick_tail(void); extern bool shuffle_pick_tail(void);
static inline void shuffle_free_memory(pg_data_t *pgdat) static inline void shuffle_free_memory(pg_data_t *pgdat)
@ -58,10 +45,6 @@ static inline void shuffle_zone(struct zone *z)
{ {
} }
static inline void page_alloc_shuffle(enum mm_shuffle_ctl ctl)
{
}
static inline bool is_shuffle_order(int order) static inline bool is_shuffle_order(int order)
{ {
return false; return false;