sunrpc: fix up the special handling of sv_nrpools == 1

Only pooled services take a reference to the svc_pool_map. The sunrpc
code has always used the sv_nrpools value to detect whether the service
is pooled.

The problem there is that nfsd is a pooled service, but when it's
running in "global" pool_mode, it doesn't take a reference to the pool
map because it has a sv_nrpools value of 1. This means that we have
two separate codepaths for starting the server, depending on whether
it's pooled or not.

Fix this by adding a new flag to the svc_serv, that indicates whether
the serv is pooled. With this we can have the nfsd service
unconditionally take a reference, regardless of pool_mode.

Note that this is a behavior change for
/sys/module/sunrpc/parameters/pool_mode. Usually this file does not
allow you to change the pool-mode while there are nfsd threads running,
but if the pool-mode is "global" it's allowed. My assumption is that
this is a bug, since it probably should never have worked this way.

This patch changes the behavior such that you get back EBUSY even
when nfsd is running in global mode. I think this is more reasonable
behavior, and given that most people set this today using the module
parameter, it's doubtful anyone will notice.

Signed-off-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
This commit is contained in:
Jeff Layton 2024-06-13 14:34:30 -04:00 committed by Chuck Lever
parent 3a6adfcae8
commit 8e0c8d2395
2 changed files with 8 additions and 19 deletions

View File

@ -85,6 +85,7 @@ struct svc_serv {
char * sv_name; /* service name */
unsigned int sv_nrpools; /* number of thread pools */
bool sv_is_pooled; /* is this a pooled service? */
struct svc_pool * sv_pools; /* array of thread pools */
int (*sv_threadfn)(void *data);

View File

@ -250,10 +250,8 @@ svc_pool_map_get(void)
int npools = -1;
mutex_lock(&svc_pool_map_mutex);
if (m->count++) {
mutex_unlock(&svc_pool_map_mutex);
WARN_ON_ONCE(m->npools <= 1);
return m->npools;
}
@ -275,32 +273,21 @@ svc_pool_map_get(void)
m->mode = SVC_POOL_GLOBAL;
}
m->npools = npools;
if (npools == 1)
/* service is unpooled, so doesn't hold a reference */
m->count--;
mutex_unlock(&svc_pool_map_mutex);
return npools;
}
/*
* Drop a reference to the global map of cpus to pools, if
* pools were in use, i.e. if npools > 1.
* Drop a reference to the global map of cpus to pools.
* When the last reference is dropped, the map data is
* freed; this allows the sysadmin to change the pool
* mode using the pool_mode module option without
* rebooting or re-loading sunrpc.ko.
* freed; this allows the sysadmin to change the pool.
*/
static void
svc_pool_map_put(int npools)
svc_pool_map_put(void)
{
struct svc_pool_map *m = &svc_pool_map;
if (npools <= 1)
return;
mutex_lock(&svc_pool_map_mutex);
if (!--m->count) {
kfree(m->to_pool);
m->to_pool = NULL;
@ -308,7 +295,6 @@ svc_pool_map_put(int npools)
m->pool_to = NULL;
m->npools = 0;
}
mutex_unlock(&svc_pool_map_mutex);
}
@ -553,9 +539,10 @@ struct svc_serv *svc_create_pooled(struct svc_program *prog,
serv = __svc_create(prog, stats, bufsize, npools, threadfn);
if (!serv)
goto out_err;
serv->sv_is_pooled = true;
return serv;
out_err:
svc_pool_map_put(npools);
svc_pool_map_put();
return NULL;
}
EXPORT_SYMBOL_GPL(svc_create_pooled);
@ -585,7 +572,8 @@ svc_destroy(struct svc_serv **servp)
cache_clean_deferred(serv);
svc_pool_map_put(serv->sv_nrpools);
if (serv->sv_is_pooled)
svc_pool_map_put();
for (i = 0; i < serv->sv_nrpools; i++) {
struct svc_pool *pool = &serv->sv_pools[i];