SUNRPC: Replace pool stats with per-CPU variables

Eliminate the use of bus-locked operations in svc_xprt_enqueue(),
which is a hot path. Replace them with per-cpu variables to reduce
cross-CPU memory bus traffic.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
This commit is contained in:
Chuck Lever 2023-01-10 10:32:00 -05:00
parent 65ba3d2425
commit ccf08bed6e
3 changed files with 26 additions and 19 deletions

View File

@ -21,14 +21,6 @@
#include <linux/mm.h>
#include <linux/pagevec.h>
/* statistics for svc_pool structures */
struct svc_pool_stats {
atomic_long_t packets;
unsigned long sockets_queued;
atomic_long_t threads_woken;
atomic_long_t threads_timedout;
};
/*
*
* RPC service thread pool.
@ -45,7 +37,12 @@ struct svc_pool {
struct list_head sp_sockets; /* pending sockets */
unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
struct svc_pool_stats sp_stats; /* statistics on pool operation */
/* statistics on pool operation */
struct percpu_counter sp_sockets_queued;
struct percpu_counter sp_threads_woken;
struct percpu_counter sp_threads_timedout;
#define SP_TASK_PENDING (0) /* still work to do even if no
* xprt is queued. */
#define SP_CONGESTED (1)

View File

@ -512,6 +512,10 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
INIT_LIST_HEAD(&pool->sp_sockets);
INIT_LIST_HEAD(&pool->sp_all_threads);
spin_lock_init(&pool->sp_lock);
percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL);
percpu_counter_init(&pool->sp_threads_woken, 0, GFP_KERNEL);
percpu_counter_init(&pool->sp_threads_timedout, 0, GFP_KERNEL);
}
return serv;
@ -565,6 +569,7 @@ void
svc_destroy(struct kref *ref)
{
struct svc_serv *serv = container_of(ref, struct svc_serv, sv_refcnt);
unsigned int i;
dprintk("svc: svc_destroy(%s)\n", serv->sv_program->pg_name);
timer_shutdown_sync(&serv->sv_temptimer);
@ -580,6 +585,13 @@ svc_destroy(struct kref *ref)
svc_pool_map_put(serv->sv_nrpools);
for (i = 0; i < serv->sv_nrpools; i++) {
struct svc_pool *pool = &serv->sv_pools[i];
percpu_counter_destroy(&pool->sp_sockets_queued);
percpu_counter_destroy(&pool->sp_threads_woken);
percpu_counter_destroy(&pool->sp_threads_timedout);
}
kfree(serv->sv_pools);
kfree(serv);
}

View File

@ -462,11 +462,9 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
pool = svc_pool_for_cpu(xprt->xpt_server);
atomic_long_inc(&pool->sp_stats.packets);
percpu_counter_inc(&pool->sp_sockets_queued);
spin_lock_bh(&pool->sp_lock);
list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
pool->sp_stats.sockets_queued++;
spin_unlock_bh(&pool->sp_lock);
/* find a thread for this xprt */
@ -474,7 +472,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags))
continue;
atomic_long_inc(&pool->sp_stats.threads_woken);
percpu_counter_inc(&pool->sp_threads_woken);
rqstp->rq_qtime = ktime_get();
wake_up_process(rqstp->rq_task);
goto out_unlock;
@ -769,7 +767,7 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
goto out_found;
if (!time_left)
atomic_long_inc(&pool->sp_stats.threads_timedout);
percpu_counter_inc(&pool->sp_threads_timedout);
if (signalled() || kthread_should_stop())
return ERR_PTR(-EINTR);
@ -1440,12 +1438,12 @@ static int svc_pool_stats_show(struct seq_file *m, void *p)
return 0;
}
seq_printf(m, "%u %lu %lu %lu %lu\n",
seq_printf(m, "%u %llu %llu %llu %llu\n",
pool->sp_id,
(unsigned long)atomic_long_read(&pool->sp_stats.packets),
pool->sp_stats.sockets_queued,
(unsigned long)atomic_long_read(&pool->sp_stats.threads_woken),
(unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout));
percpu_counter_sum_positive(&pool->sp_sockets_queued),
percpu_counter_sum_positive(&pool->sp_sockets_queued),
percpu_counter_sum_positive(&pool->sp_threads_woken),
percpu_counter_sum_positive(&pool->sp_threads_timedout));
return 0;
}