mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 05:04:09 +08:00
16e2df2a05
When drop_caches truncates the page cache in an inode it also includes any shadow entries for evicted pages. However, there is a preliminary check on whether the inode has pages: if it has *only* shadow entries, it will skip running truncation on the inode and leave it behind. Fix the check to mapping_empty(), such that it runs truncation on any inode that has cache entries at all. Link: https://lkml.kernel.org/r/20210614211904.14420-2-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reported-by: Roman Gushchin <guro@fb.com> Acked-by: Roman Gushchin <guro@fb.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
78 lines
1.9 KiB
C
78 lines
1.9 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Implement the manual drop-all-pagecache function
|
|
*/
|
|
|
|
#include <linux/pagemap.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/writeback.h>
|
|
#include <linux/sysctl.h>
|
|
#include <linux/gfp.h>
|
|
#include "internal.h"
|
|
|
|
/* A global variable is a bit ugly, but it keeps the code simple */
|
|
int sysctl_drop_caches;
|
|
|
|
static void drop_pagecache_sb(struct super_block *sb, void *unused)
|
|
{
|
|
struct inode *inode, *toput_inode = NULL;
|
|
|
|
spin_lock(&sb->s_inode_list_lock);
|
|
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
|
|
spin_lock(&inode->i_lock);
|
|
/*
|
|
* We must skip inodes in unusual state. We may also skip
|
|
* inodes without pages but we deliberately won't in case
|
|
* we need to reschedule to avoid softlockups.
|
|
*/
|
|
if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
|
|
(mapping_empty(inode->i_mapping) && !need_resched())) {
|
|
spin_unlock(&inode->i_lock);
|
|
continue;
|
|
}
|
|
__iget(inode);
|
|
spin_unlock(&inode->i_lock);
|
|
spin_unlock(&sb->s_inode_list_lock);
|
|
|
|
invalidate_mapping_pages(inode->i_mapping, 0, -1);
|
|
iput(toput_inode);
|
|
toput_inode = inode;
|
|
|
|
cond_resched();
|
|
spin_lock(&sb->s_inode_list_lock);
|
|
}
|
|
spin_unlock(&sb->s_inode_list_lock);
|
|
iput(toput_inode);
|
|
}
|
|
|
|
int drop_caches_sysctl_handler(struct ctl_table *table, int write,
|
|
void *buffer, size_t *length, loff_t *ppos)
|
|
{
|
|
int ret;
|
|
|
|
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
|
|
if (ret)
|
|
return ret;
|
|
if (write) {
|
|
static int stfu;
|
|
|
|
if (sysctl_drop_caches & 1) {
|
|
iterate_supers(drop_pagecache_sb, NULL);
|
|
count_vm_event(DROP_PAGECACHE);
|
|
}
|
|
if (sysctl_drop_caches & 2) {
|
|
drop_slab();
|
|
count_vm_event(DROP_SLAB);
|
|
}
|
|
if (!stfu) {
|
|
pr_info("%s (%d): drop_caches: %d\n",
|
|
current->comm, task_pid_nr(current),
|
|
sysctl_drop_caches);
|
|
}
|
|
stfu |= sysctl_drop_caches & 4;
|
|
}
|
|
return 0;
|
|
}
|