2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-23 20:53:53 +08:00

xfs: introduce all-mounts list for cpu hotplug notifications

The inode inactivation and CIL tracking percpu structures are
per-xfs_mount structures. That means when we get a CPU dead
notification, we need to then iterate all the per-cpu structure
instances to process them. Rather than keeping linked lists of
per-cpu structures in each subsystem, add a list of all xfs_mounts
that the generic xfs_cpu_dead() function will iterate and call into
each subsystem appropriately.

This allows us to handle both per-mount and global XFS percpu state
from xfs_cpu_dead(), and avoids the need to link subsystem
structures that can be easily found from the xfs_mount into their
own global lists.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
[djwong: expand some comments about mount list setup ordering rules]
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
This commit is contained in:
Dave Chinner 2021-08-06 11:05:38 -07:00 committed by Darrick J. Wong
parent f1653c2e28
commit 0ed17f01c8
2 changed files with 41 additions and 0 deletions

View File

@ -82,6 +82,7 @@ typedef struct xfs_mount {
xfs_buftarg_t *m_ddev_targp; /* saves taking the address */
xfs_buftarg_t *m_logdev_targp;/* ptr to log device */
xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */
struct list_head m_mount_list; /* global mount list */
/*
* Optional cache of rt summary level per bitmap block with the
* invariant that m_rsum_cache[bbno] <= the minimum i for which

View File

@ -49,6 +49,28 @@ static struct kset *xfs_kset; /* top-level xfs sysfs dir */
static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
#endif
#ifdef CONFIG_HOTPLUG_CPU
static LIST_HEAD(xfs_mount_list);
static DEFINE_SPINLOCK(xfs_mount_list_lock);
static inline void xfs_mount_list_add(struct xfs_mount *mp)
{
spin_lock(&xfs_mount_list_lock);
list_add(&mp->m_mount_list, &xfs_mount_list);
spin_unlock(&xfs_mount_list_lock);
}
static inline void xfs_mount_list_del(struct xfs_mount *mp)
{
spin_lock(&xfs_mount_list_lock);
list_del(&mp->m_mount_list);
spin_unlock(&xfs_mount_list_lock);
}
#else /* !CONFIG_HOTPLUG_CPU */
static inline void xfs_mount_list_add(struct xfs_mount *mp) {}
static inline void xfs_mount_list_del(struct xfs_mount *mp) {}
#endif
enum xfs_dax_mode {
XFS_DAX_INODE = 0,
XFS_DAX_ALWAYS = 1,
@ -1038,6 +1060,7 @@ xfs_fs_put_super(
xfs_freesb(mp);
free_percpu(mp->m_stats.xs_stats);
xfs_mount_list_del(mp);
xfs_destroy_percpu_counters(mp);
xfs_destroy_mount_workqueues(mp);
xfs_close_devices(mp);
@ -1409,6 +1432,13 @@ xfs_fs_fill_super(
if (error)
goto out_destroy_workqueues;
/*
* All percpu data structures requiring cleanup when a cpu goes offline
* must be allocated before adding this @mp to the cpu-dead handler's
* mount list.
*/
xfs_mount_list_add(mp);
/* Allocate stats memory before we do operations that might use it */
mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
if (!mp->m_stats.xs_stats) {
@ -1617,6 +1647,7 @@ xfs_fs_fill_super(
out_free_stats:
free_percpu(mp->m_stats.xs_stats);
out_destroy_counters:
xfs_mount_list_del(mp);
xfs_destroy_percpu_counters(mp);
out_destroy_workqueues:
xfs_destroy_mount_workqueues(mp);
@ -2116,6 +2147,15 @@ static int
xfs_cpu_dead(
unsigned int cpu)
{
struct xfs_mount *mp, *n;
spin_lock(&xfs_mount_list_lock);
list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) {
spin_unlock(&xfs_mount_list_lock);
/* xfs_subsys_dead(mp, cpu); */
spin_lock(&xfs_mount_list_lock);
}
spin_unlock(&xfs_mount_list_lock);
return 0;
}