mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-26 22:24:09 +08:00
aabb8fdb41
To be on the safe side, it should be less fragile to exclude I_NEW inodes from inode list scans by default (unless there is an important reason to have them). Normally they will get excluded (eg. by zero refcount or writecount etc), however it is a bit fragile for list walkers to know exactly what parts of the inode state is set up and valid to test when in I_NEW. So along these lines, move I_NEW checks upward as well (sometimes taking I_FREEING etc checks with them too -- this shouldn't be a problem should it?) Signed-off-by: Nick Piggin <npiggin@suse.de> Acked-by: Jan Kara <jack@suse.cz> Cc: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
77 lines
1.6 KiB
C
77 lines
1.6 KiB
C
/*
|
|
* Implement the manual drop-all-pagecache function
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/writeback.h>
|
|
#include <linux/sysctl.h>
|
|
#include <linux/gfp.h>
|
|
|
|
/* A global variable is a bit ugly, but it keeps the code simple */
|
|
int sysctl_drop_caches;
|
|
|
|
static void drop_pagecache_sb(struct super_block *sb)
|
|
{
|
|
struct inode *inode, *toput_inode = NULL;
|
|
|
|
spin_lock(&inode_lock);
|
|
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
|
|
if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
|
|
continue;
|
|
if (inode->i_mapping->nrpages == 0)
|
|
continue;
|
|
__iget(inode);
|
|
spin_unlock(&inode_lock);
|
|
__invalidate_mapping_pages(inode->i_mapping, 0, -1, true);
|
|
iput(toput_inode);
|
|
toput_inode = inode;
|
|
spin_lock(&inode_lock);
|
|
}
|
|
spin_unlock(&inode_lock);
|
|
iput(toput_inode);
|
|
}
|
|
|
|
static void drop_pagecache(void)
|
|
{
|
|
struct super_block *sb;
|
|
|
|
spin_lock(&sb_lock);
|
|
restart:
|
|
list_for_each_entry(sb, &super_blocks, s_list) {
|
|
sb->s_count++;
|
|
spin_unlock(&sb_lock);
|
|
down_read(&sb->s_umount);
|
|
if (sb->s_root)
|
|
drop_pagecache_sb(sb);
|
|
up_read(&sb->s_umount);
|
|
spin_lock(&sb_lock);
|
|
if (__put_super_and_need_restart(sb))
|
|
goto restart;
|
|
}
|
|
spin_unlock(&sb_lock);
|
|
}
|
|
|
|
static void drop_slab(void)
|
|
{
|
|
int nr_objects;
|
|
|
|
do {
|
|
nr_objects = shrink_slab(1000, GFP_KERNEL, 1000);
|
|
} while (nr_objects > 10);
|
|
}
|
|
|
|
int drop_caches_sysctl_handler(ctl_table *table, int write,
|
|
struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
|
|
{
|
|
proc_dointvec_minmax(table, write, file, buffer, length, ppos);
|
|
if (write) {
|
|
if (sysctl_drop_caches & 1)
|
|
drop_pagecache();
|
|
if (sysctl_drop_caches & 2)
|
|
drop_slab();
|
|
}
|
|
return 0;
|
|
}
|