mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-22 20:23:57 +08:00
NFSv4: Ensure delegation recall and byte range lock removal don't conflict
Add a mutex to the struct nfs4_state_owner to ensure that delegation recall doesn't conflict with byte range lock removal. Note that we nest the new mutex _outside_ the state manager reclaim protection (nfsi->rwsem) in order to avoid deadlocks. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
This commit is contained in:
parent
37380e4264
commit
65b62a29f7
@ -71,8 +71,10 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_
|
||||
int status = 0;
|
||||
|
||||
if (inode->i_flock == NULL)
|
||||
goto out;
|
||||
return 0;
|
||||
|
||||
if (inode->i_flock == NULL)
|
||||
goto out;
|
||||
/* Protect inode->i_flock using the file locks lock */
|
||||
lock_flocks();
|
||||
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
|
||||
@ -113,12 +115,15 @@ again:
|
||||
get_nfs_open_context(ctx);
|
||||
spin_unlock(&inode->i_lock);
|
||||
sp = state->owner;
|
||||
/* Block nfs4_proc_unlck */
|
||||
mutex_lock(&sp->so_delegreturn_mutex);
|
||||
seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
|
||||
err = nfs4_open_delegation_recall(ctx, state, stateid);
|
||||
if (!err)
|
||||
err = nfs_delegation_claim_locks(ctx, state);
|
||||
if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
|
||||
err = -EAGAIN;
|
||||
mutex_unlock(&sp->so_delegreturn_mutex);
|
||||
put_nfs_open_context(ctx);
|
||||
if (err != 0)
|
||||
return err;
|
||||
|
@ -93,6 +93,7 @@ struct nfs4_state_owner {
|
||||
struct list_head so_states;
|
||||
struct nfs_seqid_counter so_seqid;
|
||||
seqcount_t so_reclaim_seqcount;
|
||||
struct mutex so_delegreturn_mutex;
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -4485,7 +4485,9 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
|
||||
|
||||
static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
|
||||
{
|
||||
struct nfs_inode *nfsi = NFS_I(state->inode);
|
||||
struct inode *inode = state->inode;
|
||||
struct nfs4_state_owner *sp = state->owner;
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
struct nfs_seqid *seqid;
|
||||
struct nfs4_lock_state *lsp;
|
||||
struct rpc_task *task;
|
||||
@ -4495,12 +4497,17 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
|
||||
status = nfs4_set_lock_state(state, request);
|
||||
/* Unlock _before_ we do the RPC call */
|
||||
request->fl_flags |= FL_EXISTS;
|
||||
/* Exclude nfs_delegation_claim_locks() */
|
||||
mutex_lock(&sp->so_delegreturn_mutex);
|
||||
/* Exclude nfs4_reclaim_open_stateid() - note nesting! */
|
||||
down_read(&nfsi->rwsem);
|
||||
if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
|
||||
up_read(&nfsi->rwsem);
|
||||
mutex_unlock(&sp->so_delegreturn_mutex);
|
||||
goto out;
|
||||
}
|
||||
up_read(&nfsi->rwsem);
|
||||
mutex_unlock(&sp->so_delegreturn_mutex);
|
||||
if (status != 0)
|
||||
goto out;
|
||||
/* Is this a delegated lock? */
|
||||
|
@ -519,6 +519,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
|
||||
atomic_set(&sp->so_count, 1);
|
||||
INIT_LIST_HEAD(&sp->so_lru);
|
||||
seqcount_init(&sp->so_reclaim_seqcount);
|
||||
mutex_init(&sp->so_delegreturn_mutex);
|
||||
return sp;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user