mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-27 06:34:11 +08:00
40b1de007a
Now that we defer inode inactivation, we've decoupled the process of unlinking or closing an inode from the process of inactivating it. In theory this should lead to better throughput since we now inactivate the queued inodes in batches instead of one at a time. Unfortunately, one of the primary risks with this decoupling is the loss of rate control feedback between the frontend and background threads. In other words, a rm -rf /* thread can run the system out of memory if it can queue inodes for inactivation and jump to a new CPU faster than the background threads can actually clear the deferred work. The workers can get scheduled off the CPU if they have to do IO, etc. To solve this problem, we configure a shrinker so that it will activate the /second/ time the shrinkers are called. The custom shrinker will queue all percpu deferred inactivation workers immediately and set a flag to force frontend callers who are releasing a vfs inode to wait for the inactivation workers. On my test VM with 560M of RAM and a 2TB filesystem, this seems to solve most of the OOMing problem when deleting 10 million inodes. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Dave Chinner <dchinner@redhat.com>
86 lines
2.8 KiB
C
86 lines
2.8 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
|
|
* All Rights Reserved.
|
|
*/
|
|
#ifndef XFS_SYNC_H
|
|
#define XFS_SYNC_H 1
|
|
|
|
struct xfs_mount;
|
|
struct xfs_perag;
|
|
|
|
struct xfs_icwalk {
|
|
__u32 icw_flags;
|
|
kuid_t icw_uid;
|
|
kgid_t icw_gid;
|
|
prid_t icw_prid;
|
|
__u64 icw_min_file_size;
|
|
long icw_scan_limit;
|
|
};
|
|
|
|
/* Flags that reflect xfs_fs_eofblocks functionality. */
|
|
#define XFS_ICWALK_FLAG_SYNC (1U << 0) /* sync/wait mode scan */
|
|
#define XFS_ICWALK_FLAG_UID (1U << 1) /* filter by uid */
|
|
#define XFS_ICWALK_FLAG_GID (1U << 2) /* filter by gid */
|
|
#define XFS_ICWALK_FLAG_PRID (1U << 3) /* filter by project id */
|
|
#define XFS_ICWALK_FLAG_MINFILESIZE (1U << 4) /* filter by min file size */
|
|
|
|
#define XFS_ICWALK_FLAGS_VALID (XFS_ICWALK_FLAG_SYNC | \
|
|
XFS_ICWALK_FLAG_UID | \
|
|
XFS_ICWALK_FLAG_GID | \
|
|
XFS_ICWALK_FLAG_PRID | \
|
|
XFS_ICWALK_FLAG_MINFILESIZE)
|
|
|
|
/*
|
|
* Flags for xfs_iget()
|
|
*/
|
|
#define XFS_IGET_CREATE 0x1
|
|
#define XFS_IGET_UNTRUSTED 0x2
|
|
#define XFS_IGET_DONTCACHE 0x4
|
|
#define XFS_IGET_INCORE 0x8 /* don't read from disk or reinit */
|
|
|
|
int xfs_iget(struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t ino,
|
|
uint flags, uint lock_flags, xfs_inode_t **ipp);
|
|
|
|
/* recovery needs direct inode allocation capability */
|
|
struct xfs_inode * xfs_inode_alloc(struct xfs_mount *mp, xfs_ino_t ino);
|
|
void xfs_inode_free(struct xfs_inode *ip);
|
|
|
|
void xfs_reclaim_worker(struct work_struct *work);
|
|
|
|
void xfs_reclaim_inodes(struct xfs_mount *mp);
|
|
long xfs_reclaim_inodes_count(struct xfs_mount *mp);
|
|
long xfs_reclaim_inodes_nr(struct xfs_mount *mp, unsigned long nr_to_scan);
|
|
|
|
void xfs_inode_mark_reclaimable(struct xfs_inode *ip);
|
|
|
|
int xfs_blockgc_free_dquots(struct xfs_mount *mp, struct xfs_dquot *udqp,
|
|
struct xfs_dquot *gdqp, struct xfs_dquot *pdqp,
|
|
unsigned int iwalk_flags);
|
|
int xfs_blockgc_free_quota(struct xfs_inode *ip, unsigned int iwalk_flags);
|
|
int xfs_blockgc_free_space(struct xfs_mount *mp, struct xfs_icwalk *icm);
|
|
void xfs_blockgc_flush_all(struct xfs_mount *mp);
|
|
|
|
void xfs_inode_set_eofblocks_tag(struct xfs_inode *ip);
|
|
void xfs_inode_clear_eofblocks_tag(struct xfs_inode *ip);
|
|
|
|
void xfs_inode_set_cowblocks_tag(struct xfs_inode *ip);
|
|
void xfs_inode_clear_cowblocks_tag(struct xfs_inode *ip);
|
|
|
|
void xfs_blockgc_worker(struct work_struct *work);
|
|
|
|
int xfs_icache_inode_is_allocated(struct xfs_mount *mp, struct xfs_trans *tp,
|
|
xfs_ino_t ino, bool *inuse);
|
|
|
|
void xfs_blockgc_stop(struct xfs_mount *mp);
|
|
void xfs_blockgc_start(struct xfs_mount *mp);
|
|
|
|
void xfs_inodegc_worker(struct work_struct *work);
|
|
void xfs_inodegc_flush(struct xfs_mount *mp);
|
|
void xfs_inodegc_stop(struct xfs_mount *mp);
|
|
void xfs_inodegc_start(struct xfs_mount *mp);
|
|
void xfs_inodegc_cpu_dead(struct xfs_mount *mp, unsigned int cpu);
|
|
int xfs_inodegc_register_shrinker(struct xfs_mount *mp);
|
|
|
|
#endif
|