gfs2: Switch to a per-filesystem glock workqueue

Switch to a per-filesystem glock workqueue.  Additional workqueues are
cheap nowadays, and keeping separate workqueues allows to flush the work
of each filesystem without affecting the others.

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
This commit is contained in:
Andreas Gruenbacher 2024-04-11 14:13:30 +02:00
parent 51568ac2e9
commit 30e388d573
3 changed files with 18 additions and 15 deletions

View File

@ -65,7 +65,6 @@ static void request_demote(struct gfs2_glock *gl, unsigned int state,
unsigned long delay, bool remote);
static struct dentry *gfs2_root;
static struct workqueue_struct *glock_workqueue;
static LIST_HEAD(lru_list);
static atomic_t lru_count = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(lru_lock);
@ -274,7 +273,9 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
* work queue.
*/
static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
if (!queue_delayed_work(sdp->sd_glock_wq, &gl->gl_work, delay)) {
/*
* We are holding the lockref spinlock, and the work was still
* queued above. The queued work (glock_work_func) takes that
@ -2252,9 +2253,10 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
bool timed_out = false;
set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
flush_workqueue(glock_workqueue);
flush_workqueue(sdp->sd_glock_wq);
glock_hash_walk(clear_glock, sdp);
flush_workqueue(glock_workqueue);
flush_workqueue(sdp->sd_glock_wq);
while (!timed_out) {
wait_event_timeout(sdp->sd_kill_wait,
!atomic_read(&sdp->sd_glock_disposal),
@ -2270,6 +2272,7 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
gfs2_lm_unmount(sdp);
gfs2_free_dead_glocks(sdp);
glock_hash_walk(dump_glock_func, sdp);
destroy_workqueue(sdp->sd_glock_wq);
}
static const char *state2str(unsigned state)
@ -2534,16 +2537,8 @@ int __init gfs2_glock_init(void)
if (ret < 0)
return ret;
glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
WQ_HIGHPRI | WQ_FREEZABLE, 0);
if (!glock_workqueue) {
rhashtable_destroy(&gl_hash_table);
return -ENOMEM;
}
glock_shrinker = shrinker_alloc(0, "gfs2-glock");
if (!glock_shrinker) {
destroy_workqueue(glock_workqueue);
rhashtable_destroy(&gl_hash_table);
return -ENOMEM;
}
@ -2563,7 +2558,6 @@ void gfs2_glock_exit(void)
{
shrinker_free(glock_shrinker);
rhashtable_destroy(&gl_hash_table);
destroy_workqueue(glock_workqueue);
}
static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)

View File

@ -772,6 +772,7 @@ struct gfs2_sbd {
/* Workqueue stuff */
struct workqueue_struct *sd_glock_wq;
struct workqueue_struct *sd_delete_wq;
/* Daemon stuff */

View File

@ -1188,11 +1188,17 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s", sdp->sd_table_name);
error = -ENOMEM;
sdp->sd_glock_wq = alloc_workqueue("gfs2-glock/%s",
WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 0,
sdp->sd_fsname);
if (!sdp->sd_glock_wq)
goto fail_free;
sdp->sd_delete_wq = alloc_workqueue("gfs2-delete/%s",
WQ_MEM_RECLAIM | WQ_FREEZABLE, 0, sdp->sd_fsname);
error = -ENOMEM;
if (!sdp->sd_delete_wq)
goto fail_free;
goto fail_glock_wq;
error = gfs2_sys_fs_add(sdp);
if (error)
@ -1301,6 +1307,8 @@ fail_debug:
gfs2_sys_fs_del(sdp);
fail_delete_wq:
destroy_workqueue(sdp->sd_delete_wq);
fail_glock_wq:
destroy_workqueue(sdp->sd_glock_wq);
fail_free:
free_sbd(sdp);
sb->s_fs_info = NULL;