mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
mm: zswap: function ordering: shrink_memcg_cb
shrink_memcg_cb() is called by the shrinker and is based on zswap_writeback_entry(). Move it in between. Save one fwd decl. Link: https://lkml.kernel.org/r/20240130014208.565554-21-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Nhat Pham <nphamcs@gmail.com> Cc: Chengming Zhou <zhouchengming@bytedance.com> Cc: Yosry Ahmed <yosryahmed@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
9986d35d4c
commit
eb23ee4f96
125
mm/zswap.c
125
mm/zswap.c
@ -1254,7 +1254,67 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
|
||||
* shrinker functions
|
||||
**********************************/
|
||||
static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
|
||||
spinlock_t *lock, void *arg);
|
||||
spinlock_t *lock, void *arg)
|
||||
{
|
||||
struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
|
||||
bool *encountered_page_in_swapcache = (bool *)arg;
|
||||
swp_entry_t swpentry;
|
||||
enum lru_status ret = LRU_REMOVED_RETRY;
|
||||
int writeback_result;
|
||||
|
||||
/*
|
||||
* Rotate the entry to the tail before unlocking the LRU,
|
||||
* so that in case of an invalidation race concurrent
|
||||
* reclaimers don't waste their time on it.
|
||||
*
|
||||
* If writeback succeeds, or failure is due to the entry
|
||||
* being invalidated by the swap subsystem, the invalidation
|
||||
* will unlink and free it.
|
||||
*
|
||||
* Temporary failures, where the same entry should be tried
|
||||
* again immediately, almost never happen for this shrinker.
|
||||
* We don't do any trylocking; -ENOMEM comes closest,
|
||||
* but that's extremely rare and doesn't happen spuriously
|
||||
* either. Don't bother distinguishing this case.
|
||||
*
|
||||
* But since they do exist in theory, the entry cannot just
|
||||
* be unlinked, or we could leak it. Hence, rotate.
|
||||
*/
|
||||
list_move_tail(item, &l->list);
|
||||
|
||||
/*
|
||||
* Once the lru lock is dropped, the entry might get freed. The
|
||||
* swpentry is copied to the stack, and entry isn't deref'd again
|
||||
* until the entry is verified to still be alive in the tree.
|
||||
*/
|
||||
swpentry = entry->swpentry;
|
||||
|
||||
/*
|
||||
* It's safe to drop the lock here because we return either
|
||||
* LRU_REMOVED_RETRY or LRU_RETRY.
|
||||
*/
|
||||
spin_unlock(lock);
|
||||
|
||||
writeback_result = zswap_writeback_entry(entry, swpentry);
|
||||
|
||||
if (writeback_result) {
|
||||
zswap_reject_reclaim_fail++;
|
||||
ret = LRU_RETRY;
|
||||
|
||||
/*
|
||||
* Encountering a page already in swap cache is a sign that we are shrinking
|
||||
* into the warmer region. We should terminate shrinking (if we're in the dynamic
|
||||
* shrinker context).
|
||||
*/
|
||||
if (writeback_result == -EEXIST && encountered_page_in_swapcache)
|
||||
*encountered_page_in_swapcache = true;
|
||||
} else {
|
||||
zswap_written_back_pages++;
|
||||
}
|
||||
|
||||
spin_lock(lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
|
||||
struct shrink_control *sc)
|
||||
@ -1354,69 +1414,6 @@ static void zswap_alloc_shrinker(struct zswap_pool *pool)
|
||||
pool->shrinker->seeks = DEFAULT_SEEKS;
|
||||
}
|
||||
|
||||
static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
|
||||
spinlock_t *lock, void *arg)
|
||||
{
|
||||
struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
|
||||
bool *encountered_page_in_swapcache = (bool *)arg;
|
||||
swp_entry_t swpentry;
|
||||
enum lru_status ret = LRU_REMOVED_RETRY;
|
||||
int writeback_result;
|
||||
|
||||
/*
|
||||
* Rotate the entry to the tail before unlocking the LRU,
|
||||
* so that in case of an invalidation race concurrent
|
||||
* reclaimers don't waste their time on it.
|
||||
*
|
||||
* If writeback succeeds, or failure is due to the entry
|
||||
* being invalidated by the swap subsystem, the invalidation
|
||||
* will unlink and free it.
|
||||
*
|
||||
* Temporary failures, where the same entry should be tried
|
||||
* again immediately, almost never happen for this shrinker.
|
||||
* We don't do any trylocking; -ENOMEM comes closest,
|
||||
* but that's extremely rare and doesn't happen spuriously
|
||||
* either. Don't bother distinguishing this case.
|
||||
*
|
||||
* But since they do exist in theory, the entry cannot just
|
||||
* be unlinked, or we could leak it. Hence, rotate.
|
||||
*/
|
||||
list_move_tail(item, &l->list);
|
||||
|
||||
/*
|
||||
* Once the lru lock is dropped, the entry might get freed. The
|
||||
* swpentry is copied to the stack, and entry isn't deref'd again
|
||||
* until the entry is verified to still be alive in the tree.
|
||||
*/
|
||||
swpentry = entry->swpentry;
|
||||
|
||||
/*
|
||||
* It's safe to drop the lock here because we return either
|
||||
* LRU_REMOVED_RETRY or LRU_RETRY.
|
||||
*/
|
||||
spin_unlock(lock);
|
||||
|
||||
writeback_result = zswap_writeback_entry(entry, swpentry);
|
||||
|
||||
if (writeback_result) {
|
||||
zswap_reject_reclaim_fail++;
|
||||
ret = LRU_RETRY;
|
||||
|
||||
/*
|
||||
* Encountering a page already in swap cache is a sign that we are shrinking
|
||||
* into the warmer region. We should terminate shrinking (if we're in the dynamic
|
||||
* shrinker context).
|
||||
*/
|
||||
if (writeback_result == -EEXIST && encountered_page_in_swapcache)
|
||||
*encountered_page_in_swapcache = true;
|
||||
} else {
|
||||
zswap_written_back_pages++;
|
||||
}
|
||||
|
||||
spin_lock(lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int shrink_memcg(struct mem_cgroup *memcg)
|
||||
{
|
||||
struct zswap_pool *pool;
|
||||
|
Loading…
Reference in New Issue
Block a user