mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
mm: zswap: add zswap_never_enabled()
Add zswap_never_enabled() to skip the xarray lookup in zswap_load() if zswap was never enabled on the system. It is implemented using static branches for efficiency, as enabling zswap should be a rare event. This could shave some cycles off zswap_load() when CONFIG_ZSWAP is used but zswap is never enabled. However, the real motivation behind this patch is two-fold: - Incoming large folio swapin work will need to fallback to order-0 folios if zswap was ever enabled, because any part of the folio could be in zswap, until proper handling of large folios with zswap is added. - A warning and recovery attempt will be added in a following change in case the above was not done incorrectly. Zswap will fail the read if the folio is large and it was ever enabled. Expose zswap_never_enabled() in the header for the swapin work to use it later. [yosryahmed@google.com: expose zswap_never_enabled() in the header] Link: https://lkml.kernel.org/r/Zmjf0Dr8s9xSW41X@google.com Link: https://lkml.kernel.org/r/20240611024516.1375191-2-yosryahmed@google.com Signed-off-by: Yosry Ahmed <yosryahmed@google.com> Reviewed-by: Nhat Pham <nphamcs@gmail.com> Cc: Barry Song <baohua@kernel.org> Cc: Chengming Zhou <chengming.zhou@linux.dev> Cc: Chris Li <chrisl@kernel.org> Cc: David Hildenbrand <david@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
2b33a97c94
commit
2d4d2b1cfb
@ -36,6 +36,7 @@ void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg);
|
||||
void zswap_lruvec_state_init(struct lruvec *lruvec);
|
||||
void zswap_folio_swapin(struct folio *folio);
|
||||
bool zswap_is_enabled(void);
|
||||
bool zswap_never_enabled(void);
|
||||
#else
|
||||
|
||||
struct zswap_lruvec_state {};
|
||||
@ -65,6 +66,11 @@ static inline bool zswap_is_enabled(void)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool zswap_never_enabled(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_ZSWAP_H */
|
||||
|
10
mm/zswap.c
10
mm/zswap.c
@ -83,6 +83,7 @@ static bool zswap_pool_reached_full;
|
||||
static int zswap_setup(void);
|
||||
|
||||
/* Enable/disable zswap */
|
||||
static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled);
|
||||
static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
|
||||
static int zswap_enabled_param_set(const char *,
|
||||
const struct kernel_param *);
|
||||
@ -136,6 +137,11 @@ bool zswap_is_enabled(void)
|
||||
return zswap_enabled;
|
||||
}
|
||||
|
||||
bool zswap_never_enabled(void)
|
||||
{
|
||||
return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled);
|
||||
}
|
||||
|
||||
/*********************************
|
||||
* data structures
|
||||
**********************************/
|
||||
@ -1557,6 +1563,9 @@ bool zswap_load(struct folio *folio)
|
||||
|
||||
VM_WARN_ON_ONCE(!folio_test_locked(folio));
|
||||
|
||||
if (zswap_never_enabled())
|
||||
return false;
|
||||
|
||||
/*
|
||||
* When reading into the swapcache, invalidate our entry. The
|
||||
* swapcache can be the authoritative owner of the page and
|
||||
@ -1735,6 +1744,7 @@ static int zswap_setup(void)
|
||||
zpool_get_type(pool->zpools[0]));
|
||||
list_add(&pool->list, &zswap_pools);
|
||||
zswap_has_pool = true;
|
||||
static_branch_enable(&zswap_ever_enabled);
|
||||
} else {
|
||||
pr_err("pool creation failed\n");
|
||||
zswap_enabled = false;
|
||||
|
Loading…
Reference in New Issue
Block a user