mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 14:44:10 +08:00
afs: Apply server breaks to mmap'd files in the call processor
Apply server breaks to mmap'd files that are being used from that server from the call processor work function rather than punting it off to a workqueue. The work item, afs_server_init_callback(), then bumps each individual inode off to its own work item introducing a potentially lengthy delay. This reduces that delay at the cost of extending the amount of time we delay replying to the CB.InitCallBack3 notification RPC from the server. Signed-off-by: David Howells <dhowells@redhat.com> cc: Marc Dionne <marc.dionne@auristor.com> cc: linux-afs@lists.infradead.org
This commit is contained in:
parent
dfa0a44946
commit
32222f0978
@ -33,9 +33,8 @@ void afs_invalidate_mmap_work(struct work_struct *work)
|
||||
unmap_mapping_pages(vnode->netfs.inode.i_mapping, 0, 0, false);
|
||||
}
|
||||
|
||||
void afs_server_init_callback_work(struct work_struct *work)
|
||||
static void afs_server_init_callback(struct afs_server *server)
|
||||
{
|
||||
struct afs_server *server = container_of(work, struct afs_server, initcb_work);
|
||||
struct afs_vnode *vnode;
|
||||
struct afs_cell *cell = server->cell;
|
||||
|
||||
@ -57,15 +56,19 @@ void afs_server_init_callback_work(struct work_struct *work)
|
||||
*/
|
||||
void afs_init_callback_state(struct afs_server *server)
|
||||
{
|
||||
rcu_read_lock();
|
||||
struct afs_cell *cell = server->cell;
|
||||
|
||||
down_read(&cell->vs_lock);
|
||||
|
||||
do {
|
||||
server->cb_s_break++;
|
||||
atomic_inc(&server->cell->fs_s_break);
|
||||
if (!list_empty(&server->cell->fs_open_mmaps))
|
||||
queue_work(system_unbound_wq, &server->initcb_work);
|
||||
afs_server_init_callback(server);
|
||||
|
||||
} while ((server = rcu_dereference(server->uuid_next)));
|
||||
rcu_read_unlock();
|
||||
|
||||
up_read(&cell->vs_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -112,7 +115,7 @@ static struct afs_volume *afs_lookup_volume_rcu(struct afs_cell *cell,
|
||||
struct rb_node *p;
|
||||
int seq = 1;
|
||||
|
||||
do {
|
||||
for (;;) {
|
||||
/* Unfortunately, rbtree walking doesn't give reliable results
|
||||
* under just the RCU read lock, so we have to check for
|
||||
* changes.
|
||||
@ -133,7 +136,12 @@ static struct afs_volume *afs_lookup_volume_rcu(struct afs_cell *cell,
|
||||
volume = NULL;
|
||||
}
|
||||
|
||||
} while (need_seqretry(&cell->volume_lock, seq));
|
||||
if (volume && afs_try_get_volume(volume, afs_volume_trace_get_callback))
|
||||
break;
|
||||
if (!need_seqretry(&cell->volume_lock, seq))
|
||||
break;
|
||||
seq |= 1; /* Want a lock next time */
|
||||
}
|
||||
|
||||
done_seqretry(&cell->volume_lock, seq);
|
||||
return volume;
|
||||
@ -188,12 +196,11 @@ static void afs_break_some_callbacks(struct afs_server *server,
|
||||
afs_volid_t vid = cbb->fid.vid;
|
||||
size_t i;
|
||||
|
||||
rcu_read_lock();
|
||||
volume = afs_lookup_volume_rcu(server->cell, vid);
|
||||
|
||||
/* TODO: Find all matching volumes if we couldn't match the server and
|
||||
* break them anyway.
|
||||
*/
|
||||
|
||||
for (i = *_count; i > 0; cbb++, i--) {
|
||||
if (cbb->fid.vid == vid) {
|
||||
_debug("- Fid { vl=%08llx n=%llu u=%u }",
|
||||
@ -207,6 +214,9 @@ static void afs_break_some_callbacks(struct afs_server *server,
|
||||
*residue++ = *cbb;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
afs_put_volume(volume, afs_volume_trace_put_callback);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -219,11 +229,6 @@ void afs_break_callbacks(struct afs_server *server, size_t count,
|
||||
|
||||
ASSERT(server != NULL);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
while (count > 0)
|
||||
afs_break_some_callbacks(server, callbacks, &count);
|
||||
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
@ -161,7 +161,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
|
||||
refcount_set(&cell->ref, 1);
|
||||
atomic_set(&cell->active, 0);
|
||||
INIT_WORK(&cell->manager, afs_manage_cell_work);
|
||||
spin_lock_init(&cell->vs_lock);
|
||||
init_rwsem(&cell->vs_lock);
|
||||
cell->volumes = RB_ROOT;
|
||||
INIT_HLIST_HEAD(&cell->proc_volumes);
|
||||
seqlock_init(&cell->volume_lock);
|
||||
|
@ -414,7 +414,7 @@ struct afs_cell {
|
||||
unsigned int debug_id;
|
||||
|
||||
/* The volumes belonging to this cell */
|
||||
spinlock_t vs_lock; /* Lock for server->volumes */
|
||||
struct rw_semaphore vs_lock; /* Lock for server->volumes */
|
||||
struct rb_root volumes; /* Tree of volumes on this server */
|
||||
struct hlist_head proc_volumes; /* procfs volume list */
|
||||
seqlock_t volume_lock; /* For volumes */
|
||||
@ -566,7 +566,6 @@ struct afs_server {
|
||||
struct hlist_node addr6_link; /* Link in net->fs_addresses6 */
|
||||
struct hlist_node proc_link; /* Link in net->fs_proc */
|
||||
struct list_head volumes; /* RCU list of afs_server_entry objects */
|
||||
struct work_struct initcb_work; /* Work for CB.InitCallBackState* */
|
||||
struct afs_server *gc_next; /* Next server in manager's list */
|
||||
time64_t unuse_time; /* Time at which last unused */
|
||||
unsigned long flags;
|
||||
@ -1041,7 +1040,6 @@ void afs_get_address_preferences(struct afs_net *net, struct afs_addr_list *alis
|
||||
* callback.c
|
||||
*/
|
||||
extern void afs_invalidate_mmap_work(struct work_struct *);
|
||||
extern void afs_server_init_callback_work(struct work_struct *work);
|
||||
extern void afs_init_callback_state(struct afs_server *);
|
||||
extern void __afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
|
||||
extern void afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
|
||||
|
@ -218,7 +218,6 @@ static struct afs_server *afs_alloc_server(struct afs_cell *cell,
|
||||
server->uuid = *uuid;
|
||||
rwlock_init(&server->fs_lock);
|
||||
INIT_LIST_HEAD(&server->volumes);
|
||||
INIT_WORK(&server->initcb_work, afs_server_init_callback_work);
|
||||
init_waitqueue_head(&server->probe_wq);
|
||||
INIT_LIST_HEAD(&server->probe_link);
|
||||
spin_lock_init(&server->probe_lock);
|
||||
@ -470,7 +469,6 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
|
||||
if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
|
||||
afs_give_up_callbacks(net, server);
|
||||
|
||||
flush_work(&server->initcb_work);
|
||||
afs_put_server(net, server, afs_server_trace_destroy);
|
||||
}
|
||||
|
||||
|
@ -136,7 +136,7 @@ void afs_attach_volume_to_servers(struct afs_volume *volume, struct afs_server_l
|
||||
struct list_head *p;
|
||||
unsigned int i;
|
||||
|
||||
spin_lock(&volume->cell->vs_lock);
|
||||
down_write(&volume->cell->vs_lock);
|
||||
|
||||
for (i = 0; i < slist->nr_servers; i++) {
|
||||
se = &slist->servers[i];
|
||||
@ -147,11 +147,11 @@ void afs_attach_volume_to_servers(struct afs_volume *volume, struct afs_server_l
|
||||
if (volume->vid <= pe->volume->vid)
|
||||
break;
|
||||
}
|
||||
list_add_tail_rcu(&se->slink, p);
|
||||
list_add_tail(&se->slink, p);
|
||||
}
|
||||
|
||||
slist->attached = true;
|
||||
spin_unlock(&volume->cell->vs_lock);
|
||||
up_write(&volume->cell->vs_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -164,7 +164,7 @@ void afs_reattach_volume_to_servers(struct afs_volume *volume, struct afs_server
|
||||
{
|
||||
unsigned int n = 0, o = 0;
|
||||
|
||||
spin_lock(&volume->cell->vs_lock);
|
||||
down_write(&volume->cell->vs_lock);
|
||||
|
||||
while (n < new->nr_servers || o < old->nr_servers) {
|
||||
struct afs_server_entry *pn = n < new->nr_servers ? &new->servers[n] : NULL;
|
||||
@ -174,7 +174,7 @@ void afs_reattach_volume_to_servers(struct afs_volume *volume, struct afs_server
|
||||
int diff;
|
||||
|
||||
if (pn && po && pn->server == po->server) {
|
||||
list_replace_rcu(&po->slink, &pn->slink);
|
||||
list_replace(&po->slink, &pn->slink);
|
||||
n++;
|
||||
o++;
|
||||
continue;
|
||||
@ -192,15 +192,15 @@ void afs_reattach_volume_to_servers(struct afs_volume *volume, struct afs_server
|
||||
if (volume->vid <= s->volume->vid)
|
||||
break;
|
||||
}
|
||||
list_add_tail_rcu(&pn->slink, p);
|
||||
list_add_tail(&pn->slink, p);
|
||||
n++;
|
||||
} else {
|
||||
list_del_rcu(&po->slink);
|
||||
list_del(&po->slink);
|
||||
o++;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&volume->cell->vs_lock);
|
||||
up_write(&volume->cell->vs_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -213,11 +213,11 @@ void afs_detach_volume_from_servers(struct afs_volume *volume, struct afs_server
|
||||
if (!slist->attached)
|
||||
return;
|
||||
|
||||
spin_lock(&volume->cell->vs_lock);
|
||||
down_write(&volume->cell->vs_lock);
|
||||
|
||||
for (i = 0; i < slist->nr_servers; i++)
|
||||
list_del_rcu(&slist->servers[i].slink);
|
||||
list_del(&slist->servers[i].slink);
|
||||
|
||||
slist->attached = false;
|
||||
spin_unlock(&volume->cell->vs_lock);
|
||||
up_write(&volume->cell->vs_lock);
|
||||
}
|
||||
|
@ -151,9 +151,11 @@ enum yfs_cm_operation {
|
||||
EM(afs_volume_trace_alloc, "ALLOC ") \
|
||||
EM(afs_volume_trace_free, "FREE ") \
|
||||
EM(afs_volume_trace_get_alloc_sbi, "GET sbi-alloc ") \
|
||||
EM(afs_volume_trace_get_callback, "GET callback ") \
|
||||
EM(afs_volume_trace_get_cell_insert, "GET cell-insrt") \
|
||||
EM(afs_volume_trace_get_new_op, "GET op-new ") \
|
||||
EM(afs_volume_trace_get_query_alias, "GET cell-alias") \
|
||||
EM(afs_volume_trace_put_callback, "PUT callback ") \
|
||||
EM(afs_volume_trace_put_cell_dup, "PUT cell-dup ") \
|
||||
EM(afs_volume_trace_put_cell_root, "PUT cell-root ") \
|
||||
EM(afs_volume_trace_put_destroy_sbi, "PUT sbi-destry") \
|
||||
|
Loading…
Reference in New Issue
Block a user