smb: client: refresh referral without acquiring refpath_lock

Avoid refreshing DFS referral with refpath_lock acquired as the I/O
could block for a while due to a potentially disconnected or slow DFS
root server and then making other threads - that use same @server and
don't require a DFS root server - unable to make any progress.

Cc: stable@vger.kernel.org # 6.4+
Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
Signed-off-by: Steve French <stfrench@microsoft.com>
This commit is contained in:
Paulo Alcantara 2024-04-01 22:44:07 -03:00 committed by Steve French
parent 062a7f0ff4
commit 0a05ad21d7

View File

@ -1172,8 +1172,8 @@ static bool is_ses_good(struct cifs_ses *ses)
return ret;
}
/* Refresh dfs referral of tcon and mark it for reconnect if needed */
static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_refresh)
/* Refresh dfs referral of @ses and mark it for reconnect if needed */
static void __refresh_ses_referral(struct cifs_ses *ses, bool force_refresh)
{
struct TCP_Server_Info *server = ses->server;
DFS_CACHE_TGT_LIST(old_tl);
@ -1181,10 +1181,21 @@ static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_ref
bool needs_refresh = false;
struct cache_entry *ce;
unsigned int xid;
char *path = NULL;
int rc = 0;
xid = get_xid();
mutex_lock(&server->refpath_lock);
if (server->leaf_fullpath) {
path = kstrdup(server->leaf_fullpath + 1, GFP_ATOMIC);
if (!path)
rc = -ENOMEM;
}
mutex_unlock(&server->refpath_lock);
if (!path)
goto out;
down_read(&htable_rw_lock);
ce = lookup_cache_entry(path);
needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
@ -1218,19 +1229,17 @@ out:
free_xid(xid);
dfs_cache_free_tgts(&old_tl);
dfs_cache_free_tgts(&new_tl);
return rc;
kfree(path);
}
static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
static inline void refresh_ses_referral(struct cifs_ses *ses)
{
struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_ses *ses = tcon->ses;
__refresh_ses_referral(ses, false);
}
mutex_lock(&server->refpath_lock);
if (server->leaf_fullpath)
__refresh_tcon(server->leaf_fullpath + 1, ses, force_refresh);
mutex_unlock(&server->refpath_lock);
return 0;
static inline void force_refresh_ses_referral(struct cifs_ses *ses)
{
__refresh_ses_referral(ses, true);
}
/**
@ -1271,25 +1280,20 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
*/
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
return refresh_tcon(tcon, true);
force_refresh_ses_referral(tcon->ses);
return 0;
}
/* Refresh all DFS referrals related to DFS tcon */
void dfs_cache_refresh(struct work_struct *work)
{
struct TCP_Server_Info *server;
struct cifs_tcon *tcon;
struct cifs_ses *ses;
tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
for (ses = tcon->ses; ses; ses = ses->dfs_root_ses) {
server = ses->server;
mutex_lock(&server->refpath_lock);
if (server->leaf_fullpath)
__refresh_tcon(server->leaf_fullpath + 1, ses, false);
mutex_unlock(&server->refpath_lock);
}
for (ses = tcon->ses; ses; ses = ses->dfs_root_ses)
refresh_ses_referral(ses);
queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
atomic_read(&dfs_cache_ttl) * HZ);