2019-05-31 16:09:56 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2006-01-17 00:50:04 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
2006-05-19 03:09:15 +08:00
|
|
|
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
2006-01-17 00:50:04 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __SUPER_DOT_H__
|
|
|
|
#define __SUPER_DOT_H__
|
|
|
|
|
2008-10-14 23:05:55 +08:00
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/dcache.h>
|
2006-09-05 22:39:21 +08:00
|
|
|
#include "incore.h"
|
|
|
|
|
2021-02-06 01:10:17 +08:00
|
|
|
/* Supported fs format version range */
|
|
|
|
#define GFS2_FS_FORMAT_MIN (1801)
|
|
|
|
#define GFS2_FS_FORMAT_MAX (1802)
|
|
|
|
|
2009-01-06 19:52:25 +08:00
|
|
|
extern void gfs2_lm_unmount(struct gfs2_sbd *sdp);
|
2006-01-17 00:50:04 +08:00
|
|
|
|
|
|
|
static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
|
|
|
|
{
|
|
|
|
unsigned int x;
|
|
|
|
spin_lock(&sdp->sd_jindex_spin);
|
|
|
|
x = sdp->sd_journals;
|
|
|
|
spin_unlock(&sdp->sd_jindex_spin);
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
2009-07-31 18:07:29 +08:00
|
|
|
extern void gfs2_jindex_free(struct gfs2_sbd *sdp);
|
2008-12-19 23:32:06 +08:00
|
|
|
|
2009-01-06 19:52:25 +08:00
|
|
|
extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
|
|
|
|
extern int gfs2_jdesc_check(struct gfs2_jdesc *jd);
|
|
|
|
extern int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
|
|
|
|
struct gfs2_inode **ipp);
|
2006-01-17 00:50:04 +08:00
|
|
|
|
2009-01-06 19:52:25 +08:00
|
|
|
extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
|
2021-03-04 22:28:57 +08:00
|
|
|
extern void gfs2_make_fs_ro(struct gfs2_sbd *sdp);
|
2009-07-31 18:07:29 +08:00
|
|
|
extern void gfs2_online_uevent(struct gfs2_sbd *sdp);
|
gfs2: Fix asynchronous thread destruction
The kernel threads are currently stopped and destroyed synchronously by
gfs2_make_fs_ro() and gfs2_put_super(), and asynchronously by
signal_our_withdraw(), with no synchronization, so the synchronous and
asynchronous contexts can race with each other.
First, when creating the kernel threads, take an extra task struct
reference so that the task struct won't go away immediately when they
terminate. This allows those kthreads to terminate immediately when
they're done rather than hanging around as zombies until they are reaped
by kthread_stop(). When kthread_stop() is called on a terminated
kthread, it will return immediately.
Second, in signal_our_withdraw(), once the SDF_JOURNAL_LIVE flag has
been cleared, wake up the logd and quotad wait queues instead of
stopping the logd and quotad kthreads. The kthreads are then expected
to terminate automatically within short time, but if they cannot, they
will not block the withdraw.
For example, if a user process and one of the kthread decide to withdraw
at the same time, only one of them will perform the actual withdraw and
the other will wait for it to be done. If the kthread ends up being the
one to wait, the withdrawing user process won't be able to stop it.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
2023-08-28 23:14:32 +08:00
|
|
|
extern void gfs2_destroy_threads(struct gfs2_sbd *sdp);
|
2009-01-06 19:52:25 +08:00
|
|
|
extern int gfs2_statfs_init(struct gfs2_sbd *sdp);
|
|
|
|
extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
|
|
|
|
s64 dinodes);
|
2009-06-26 04:09:51 +08:00
|
|
|
extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc,
|
|
|
|
const void *buf);
|
2020-10-21 04:58:03 +08:00
|
|
|
extern void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc,
|
|
|
|
void *buf);
|
2021-07-01 00:46:17 +08:00
|
|
|
extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh);
|
2009-09-11 21:36:44 +08:00
|
|
|
extern int gfs2_statfs_sync(struct super_block *sb, int type);
|
2014-11-14 10:42:04 +08:00
|
|
|
extern void gfs2_freeze_func(struct work_struct *work);
|
gfs2: Rework freeze / thaw logic
So far, at mount time, gfs2 would take the freeze glock in shared mode
and then immediately drop it again, turning it into a cached glock that
can be reclaimed at any time. To freeze the filesystem cluster-wide,
the node initiating the freeze would take the freeze glock in exclusive
mode, which would cause the freeze glock's freeze_go_sync() callback to
run on each node. There, gfs2 would freeze the filesystem and schedule
gfs2_freeze_func() to run. gfs2_freeze_func() would re-acquire the
freeze glock in shared mode, thaw the filesystem, and drop the freeze
glock again. The initiating node would keep the freeze glock held in
exclusive mode. To thaw the filesystem, the initiating node would drop
the freeze glock again, which would allow gfs2_freeze_func() to resume
on all nodes, leaving the filesystem in the thawed state.
It turns out that in freeze_go_sync(), we cannot reliably and safely
freeze the filesystem. This is primarily because the final unmount of a
filesystem takes a write lock on the s_umount rw semaphore before
calling into gfs2_put_super(), and freeze_go_sync() needs to call
freeze_super() which also takes a write lock on the same semaphore,
causing a deadlock. We could work around this by trying to take an
active reference on the super block first, which would prevent unmount
from running at the same time. But that can fail, and freeze_go_sync()
isn't actually allowed to fail.
To get around this, this patch changes the freeze glock locking scheme
as follows:
At mount time, each node takes the freeze glock in shared mode. To
freeze a filesystem, the initiating node first freezes the filesystem
locally and then drops and re-acquires the freeze glock in exclusive
mode. All other nodes notice that there is contention on the freeze
glock in their go_callback callbacks, and they schedule
gfs2_freeze_func() to run. There, they freeze the filesystem locally
and drop and re-acquire the freeze glock before re-thawing the
filesystem. This is happening outside of the glock state engine, so
there, we are allowed to fail.
From a cluster point of view, taking and immediately dropping a glock is
indistinguishable from taking the glock and only dropping it upon
contention, so this new scheme is compatible with the old one.
Thanks to Li Dong <lidong@vivo.com> for reporting a locking bug in
gfs2_freeze_func() in a previous version of this commit.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
2022-11-15 06:34:50 +08:00
|
|
|
extern void gfs2_thaw_freeze_initiator(struct super_block *sb);
|
2009-01-06 19:52:25 +08:00
|
|
|
|
2020-10-21 04:58:04 +08:00
|
|
|
extern void free_local_statfs_inodes(struct gfs2_sbd *sdp);
|
|
|
|
extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
|
|
|
|
unsigned int index);
|
2019-05-17 05:46:30 +08:00
|
|
|
extern void free_sbd(struct gfs2_sbd *sdp);
|
|
|
|
|
2008-10-14 23:05:55 +08:00
|
|
|
extern struct file_system_type gfs2_fs_type;
|
|
|
|
extern struct file_system_type gfs2meta_fs_type;
|
|
|
|
extern const struct export_operations gfs2_export_ops;
|
|
|
|
extern const struct super_operations gfs2_super_ops;
|
2009-02-20 14:00:05 +08:00
|
|
|
extern const struct dentry_operations gfs2_dops;
|
2021-02-06 01:10:18 +08:00
|
|
|
|
|
|
|
extern const struct xattr_handler *gfs2_xattr_handlers_max[];
|
|
|
|
extern const struct xattr_handler **gfs2_xattr_handlers_min;
|
2008-10-14 23:05:55 +08:00
|
|
|
|
2006-01-17 00:50:04 +08:00
|
|
|
#endif /* __SUPER_DOT_H__ */
|
|
|
|
|