2019-05-19 20:08:55 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* linux/net/sunrpc/sched.c
|
|
|
|
*
|
|
|
|
* Scheduling for synchronous and asynchronous RPC requests.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
|
2007-02-10 07:38:13 +08:00
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
* TCP NFS related read + write fixes
|
|
|
|
* (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/mempool.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/spinlock.h>
|
2006-03-21 14:33:17 +08:00
|
|
|
#include <linux/mutex.h>
|
2011-12-02 05:44:39 +08:00
|
|
|
#include <linux/freezer.h>
|
2019-02-18 23:02:29 +08:00
|
|
|
#include <linux/sched/mm.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include <linux/sunrpc/clnt.h>
|
2019-05-24 04:13:48 +08:00
|
|
|
#include <linux/sunrpc/metrics.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-09-10 17:25:04 +08:00
|
|
|
#include "sunrpc.h"
|
|
|
|
|
2012-01-21 03:52:23 +08:00
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
|
#include <trace/events/sunrpc.h>
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* RPC slabs and memory pools
|
|
|
|
*/
|
|
|
|
#define RPC_BUFFER_MAXSIZE (2048)
|
|
|
|
#define RPC_BUFFER_POOLSIZE (8)
|
|
|
|
#define RPC_TASK_POOLSIZE (8)
|
2006-12-07 12:33:20 +08:00
|
|
|
static struct kmem_cache *rpc_task_slabp __read_mostly;
|
|
|
|
static struct kmem_cache *rpc_buffer_slabp __read_mostly;
|
2005-08-27 03:05:31 +08:00
|
|
|
static mempool_t *rpc_task_mempool __read_mostly;
|
|
|
|
static mempool_t *rpc_buffer_mempool __read_mostly;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-11-22 22:55:48 +08:00
|
|
|
static void rpc_async_schedule(struct work_struct *);
|
2007-01-25 03:54:53 +08:00
|
|
|
static void rpc_release_task(struct rpc_task *task);
|
2019-05-01 22:49:27 +08:00
|
|
|
static void __rpc_queue_timer_fn(struct work_struct *);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* RPC tasks sit here while waiting for conditions to improve.
|
|
|
|
*/
|
2007-07-19 01:24:19 +08:00
|
|
|
static struct rpc_wait_queue delay_queue;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* rpciod-related stuff
|
|
|
|
*/
|
2016-05-27 22:39:50 +08:00
|
|
|
struct workqueue_struct *rpciod_workqueue __read_mostly;
|
|
|
|
struct workqueue_struct *xprtiod_workqueue __read_mostly;
|
2019-06-19 22:33:42 +08:00
|
|
|
EXPORT_SYMBOL_GPL(xprtiod_workqueue);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2022-03-15 10:02:22 +08:00
|
|
|
gfp_t rpc_task_gfp_mask(void)
|
|
|
|
{
|
|
|
|
if (current->flags & PF_WQ_WORKER)
|
|
|
|
return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
|
|
|
|
return GFP_KERNEL;
|
|
|
|
}
|
2022-04-29 03:46:01 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rpc_task_gfp_mask);
|
2022-03-15 10:02:22 +08:00
|
|
|
|
2022-10-06 03:57:35 +08:00
|
|
|
bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status)
|
|
|
|
{
|
|
|
|
if (cmpxchg(&task->tk_rpc_status, 0, rpc_status) == 0)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-04-08 01:58:50 +08:00
|
|
|
unsigned long
|
|
|
|
rpc_task_timeout(const struct rpc_task *task)
|
|
|
|
{
|
|
|
|
unsigned long timeout = READ_ONCE(task->tk_timeout);
|
|
|
|
|
|
|
|
if (timeout != 0) {
|
|
|
|
unsigned long now = jiffies;
|
|
|
|
if (time_before(now, timeout))
|
|
|
|
return timeout - now;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rpc_task_timeout);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Disable the timer for a given RPC task. Should be called with
|
|
|
|
* queue->lock and bh_disabled in order to avoid races within
|
|
|
|
* rpc_run_timer().
|
|
|
|
*/
|
2008-02-23 05:34:17 +08:00
|
|
|
static void
|
2008-02-23 06:27:59 +08:00
|
|
|
__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2019-04-08 01:58:49 +08:00
|
|
|
if (list_empty(&task->u.tk_wait.timer_list))
|
2007-07-19 04:18:52 +08:00
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
task->tk_timeout = 0;
|
2007-07-19 04:18:52 +08:00
|
|
|
list_del(&task->u.tk_wait.timer_list);
|
2008-02-23 06:27:59 +08:00
|
|
|
if (list_empty(&queue->timer_list.list))
|
2019-05-01 22:49:27 +08:00
|
|
|
cancel_delayed_work(&queue->timer_list.dwork);
|
2007-07-19 04:18:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
|
|
|
|
{
|
2019-05-01 22:49:27 +08:00
|
|
|
unsigned long now = jiffies;
|
|
|
|
queue->timer_list.expires = expires;
|
|
|
|
if (time_before_eq(expires, now))
|
|
|
|
expires = 0;
|
|
|
|
else
|
|
|
|
expires -= now;
|
|
|
|
mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set up a timer for the current task.
|
|
|
|
*/
|
2008-02-23 05:34:17 +08:00
|
|
|
static void
|
2019-04-08 01:58:49 +08:00
|
|
|
__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
|
|
|
|
unsigned long timeout)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2019-04-08 01:58:49 +08:00
|
|
|
task->tk_timeout = timeout;
|
2019-05-01 22:49:27 +08:00
|
|
|
if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires))
|
|
|
|
rpc_set_queue_timer(queue, timeout);
|
2008-02-23 06:27:59 +08:00
|
|
|
list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2012-12-01 12:59:29 +08:00
|
|
|
static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
|
|
|
|
{
|
2013-01-28 03:20:49 +08:00
|
|
|
if (queue->priority != priority) {
|
|
|
|
queue->priority = priority;
|
2018-09-09 10:09:48 +08:00
|
|
|
queue->nr = 1U << priority;
|
2013-01-28 03:20:49 +08:00
|
|
|
}
|
2012-12-01 12:59:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
|
|
|
|
{
|
|
|
|
rpc_set_waitqueue_priority(queue, queue->maxpriority);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2018-09-09 10:09:48 +08:00
|
|
|
* Add a request to a queue list
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2018-09-09 10:09:48 +08:00
|
|
|
static void
|
|
|
|
__rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct rpc_task *t;
|
|
|
|
|
|
|
|
list_for_each_entry(t, q, u.tk_wait.list) {
|
2007-07-15 03:40:00 +08:00
|
|
|
if (t->tk_owner == task->tk_owner) {
|
2018-09-09 10:09:48 +08:00
|
|
|
list_add_tail(&task->u.tk_wait.links,
|
|
|
|
&t->u.tk_wait.links);
|
|
|
|
/* Cache the queue head in task->u.tk_wait.list */
|
|
|
|
task->u.tk_wait.list.next = q;
|
|
|
|
task->u.tk_wait.list.prev = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2018-09-09 10:09:48 +08:00
|
|
|
INIT_LIST_HEAD(&task->u.tk_wait.links);
|
2005-04-17 06:20:36 +08:00
|
|
|
list_add_tail(&task->u.tk_wait.list, q);
|
|
|
|
}
|
|
|
|
|
2018-09-09 10:09:48 +08:00
|
|
|
/*
|
|
|
|
* Remove request from a queue list
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
__rpc_list_dequeue_task(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
struct list_head *q;
|
|
|
|
struct rpc_task *t;
|
|
|
|
|
|
|
|
if (task->u.tk_wait.list.prev == NULL) {
|
|
|
|
list_del(&task->u.tk_wait.links);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!list_empty(&task->u.tk_wait.links)) {
|
|
|
|
t = list_first_entry(&task->u.tk_wait.links,
|
|
|
|
struct rpc_task,
|
|
|
|
u.tk_wait.links);
|
|
|
|
/* Assume __rpc_list_enqueue_task() cached the queue head */
|
|
|
|
q = t->u.tk_wait.list.next;
|
|
|
|
list_add_tail(&t->u.tk_wait.list, q);
|
|
|
|
list_del(&task->u.tk_wait.links);
|
|
|
|
}
|
|
|
|
list_del(&task->u.tk_wait.list);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add new request to a priority queue.
|
|
|
|
*/
|
|
|
|
static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
|
|
|
|
struct rpc_task *task,
|
|
|
|
unsigned char queue_priority)
|
|
|
|
{
|
|
|
|
if (unlikely(queue_priority > queue->maxpriority))
|
|
|
|
queue_priority = queue->maxpriority;
|
|
|
|
__rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Add new request to wait queue.
|
|
|
|
*/
|
2011-07-18 06:11:34 +08:00
|
|
|
static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
|
|
|
|
struct rpc_task *task,
|
|
|
|
unsigned char queue_priority)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2019-04-08 01:58:49 +08:00
|
|
|
INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (RPC_IS_PRIORITY(queue))
|
2011-07-18 06:11:34 +08:00
|
|
|
__rpc_add_wait_queue_priority(queue, task, queue_priority);
|
2005-04-17 06:20:36 +08:00
|
|
|
else
|
|
|
|
list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
|
2008-02-23 04:46:41 +08:00
|
|
|
task->tk_waitqueue = queue;
|
2006-03-21 02:44:15 +08:00
|
|
|
queue->qlen++;
|
2013-03-25 23:23:40 +08:00
|
|
|
/* barrier matches the read in rpc_wake_up_task_queue_locked() */
|
|
|
|
smp_wmb();
|
2005-04-17 06:20:36 +08:00
|
|
|
rpc_set_queued(task);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove request from a priority queue.
|
|
|
|
*/
|
|
|
|
static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
|
|
|
|
{
|
2018-09-09 10:09:48 +08:00
|
|
|
__rpc_list_dequeue_task(task);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove request from queue.
|
|
|
|
* Note: must be called with spin lock held.
|
|
|
|
*/
|
2008-02-23 04:46:41 +08:00
|
|
|
static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-02-23 06:27:59 +08:00
|
|
|
__rpc_disable_timer(queue, task);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (RPC_IS_PRIORITY(queue))
|
|
|
|
__rpc_remove_wait_queue_priority(task);
|
2018-09-09 10:09:48 +08:00
|
|
|
else
|
|
|
|
list_del(&task->u.tk_wait.list);
|
2006-03-21 02:44:15 +08:00
|
|
|
queue->qlen--;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-07-15 03:40:00 +08:00
|
|
|
static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
spin_lock_init(&queue->lock);
|
|
|
|
for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
|
|
|
|
INIT_LIST_HEAD(&queue->tasks[i]);
|
2007-07-15 03:40:00 +08:00
|
|
|
queue->maxpriority = nr_queues - 1;
|
2005-04-17 06:20:36 +08:00
|
|
|
rpc_reset_waitqueue_priority(queue);
|
2007-07-19 04:18:52 +08:00
|
|
|
queue->qlen = 0;
|
2019-05-01 22:49:27 +08:00
|
|
|
queue->timer_list.expires = 0;
|
2019-11-05 22:10:54 +08:00
|
|
|
INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn);
|
2007-07-19 04:18:52 +08:00
|
|
|
INIT_LIST_HEAD(&queue->timer_list.list);
|
2012-02-09 11:01:15 +08:00
|
|
|
rpc_assign_waitqueue_name(queue, qname);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
|
|
|
|
{
|
2007-07-15 03:40:00 +08:00
|
|
|
__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2009-12-15 13:27:56 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
|
|
|
|
{
|
2007-07-15 03:40:00 +08:00
|
|
|
__rpc_init_priority_wait_queue(queue, qname, 1);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-07-15 03:39:59 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-02-23 06:06:55 +08:00
|
|
|
void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
|
|
|
|
{
|
2019-05-01 22:49:27 +08:00
|
|
|
cancel_delayed_work_sync(&queue->timer_list.dwork);
|
2008-02-23 06:06:55 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
|
|
|
|
|
2015-12-14 05:11:16 +08:00
|
|
|
static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
|
2006-01-03 16:55:06 +08:00
|
|
|
{
|
freezer,sched: Rewrite core freezer logic
Rewrite the core freezer to behave better wrt thawing and be simpler
in general.
By replacing PF_FROZEN with TASK_FROZEN, a special block state, it is
ensured frozen tasks stay frozen until thawed and don't randomly wake
up early, as is currently possible.
As such, it does away with PF_FROZEN and PF_FREEZER_SKIP, freeing up
two PF_flags (yay!).
Specifically; the current scheme works a little like:
freezer_do_not_count();
schedule();
freezer_count();
And either the task is blocked, or it lands in try_to_freezer()
through freezer_count(). Now, when it is blocked, the freezer
considers it frozen and continues.
However, on thawing, once pm_freezing is cleared, freezer_count()
stops working, and any random/spurious wakeup will let a task run
before its time.
That is, thawing tries to thaw things in explicit order; kernel
threads and workqueues before doing bringing SMP back before userspace
etc.. However due to the above mentioned races it is entirely possible
for userspace tasks to thaw (by accident) before SMP is back.
This can be a fatal problem in asymmetric ISA architectures (eg ARMv9)
where the userspace task requires a special CPU to run.
As said; replace this with a special task state TASK_FROZEN and add
the following state transitions:
TASK_FREEZABLE -> TASK_FROZEN
__TASK_STOPPED -> TASK_FROZEN
__TASK_TRACED -> TASK_FROZEN
The new TASK_FREEZABLE can be set on any state part of TASK_NORMAL
(IOW. TASK_INTERRUPTIBLE and TASK_UNINTERRUPTIBLE) -- any such state
is already required to deal with spurious wakeups and the freezer
causes one such when thawing the task (since the original state is
lost).
The special __TASK_{STOPPED,TRACED} states *can* be restored since
their canonical state is in ->jobctl.
With this, frozen tasks need an explicit TASK_FROZEN wakeup and are
free of undue (early / spurious) wakeups.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20220822114649.055452969@infradead.org
2022-08-22 19:18:22 +08:00
|
|
|
schedule();
|
2015-12-14 05:11:16 +08:00
|
|
|
if (signal_pending_state(mode, current))
|
|
|
|
return -ERESTARTSYS;
|
2006-01-03 16:55:06 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-11-18 05:58:05 +08:00
|
|
|
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
|
2007-06-17 02:17:01 +08:00
|
|
|
static void rpc_task_set_debuginfo(struct rpc_task *task)
|
|
|
|
{
|
2021-10-04 22:10:10 +08:00
|
|
|
struct rpc_clnt *clnt = task->tk_client;
|
2007-06-17 02:17:01 +08:00
|
|
|
|
2021-10-04 22:10:10 +08:00
|
|
|
/* Might be a task carrying a reverse-direction operation */
|
|
|
|
if (!clnt) {
|
|
|
|
static atomic_t rpc_pid;
|
|
|
|
|
|
|
|
task->tk_pid = atomic_inc_return(&rpc_pid);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
task->tk_pid = atomic_inc_return(&clnt->cl_pid);
|
2007-06-17 02:17:01 +08:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void rpc_task_set_debuginfo(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2006-11-12 11:18:03 +08:00
|
|
|
static void rpc_set_active(struct rpc_task *task)
|
|
|
|
{
|
2007-06-17 02:17:01 +08:00
|
|
|
rpc_task_set_debuginfo(task);
|
2010-08-01 02:29:08 +08:00
|
|
|
set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
|
2018-03-16 22:33:44 +08:00
|
|
|
trace_rpc_task_begin(task, NULL);
|
2006-11-12 11:18:03 +08:00
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:06 +08:00
|
|
|
/*
|
|
|
|
* Mark an RPC call as having completed by clearing the 'active' bit
|
2011-02-22 03:05:41 +08:00
|
|
|
* and then waking up all tasks that were sleeping.
|
2006-01-03 16:55:06 +08:00
|
|
|
*/
|
2011-02-22 03:05:41 +08:00
|
|
|
static int rpc_complete_task(struct rpc_task *task)
|
2006-01-03 16:55:06 +08:00
|
|
|
{
|
2011-02-22 03:05:41 +08:00
|
|
|
void *m = &task->tk_runstate;
|
|
|
|
wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
|
|
|
|
struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
2018-03-16 22:33:44 +08:00
|
|
|
trace_rpc_task_complete(task, NULL);
|
2012-01-21 03:52:23 +08:00
|
|
|
|
2011-02-22 03:05:41 +08:00
|
|
|
spin_lock_irqsave(&wq->lock, flags);
|
2006-11-12 11:18:03 +08:00
|
|
|
clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
|
2011-02-22 03:05:41 +08:00
|
|
|
ret = atomic_dec_and_test(&task->tk_count);
|
|
|
|
if (waitqueue_active(wq))
|
2015-09-23 05:58:49 +08:00
|
|
|
__wake_up_locked_key(wq, TASK_NORMAL, &k);
|
2011-02-22 03:05:41 +08:00
|
|
|
spin_unlock_irqrestore(&wq->lock, flags);
|
|
|
|
return ret;
|
2006-01-03 16:55:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allow callers to wait for completion of an RPC call
|
2011-02-22 03:05:41 +08:00
|
|
|
*
|
|
|
|
* Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
|
|
|
|
* to enforce taking of the wq->lock and hence avoid races with
|
|
|
|
* rpc_complete_task().
|
2006-01-03 16:55:06 +08:00
|
|
|
*/
|
freezer,sched: Rewrite core freezer logic
Rewrite the core freezer to behave better wrt thawing and be simpler
in general.
By replacing PF_FROZEN with TASK_FROZEN, a special block state, it is
ensured frozen tasks stay frozen until thawed and don't randomly wake
up early, as is currently possible.
As such, it does away with PF_FROZEN and PF_FREEZER_SKIP, freeing up
two PF_flags (yay!).
Specifically; the current scheme works a little like:
freezer_do_not_count();
schedule();
freezer_count();
And either the task is blocked, or it lands in try_to_freezer()
through freezer_count(). Now, when it is blocked, the freezer
considers it frozen and continues.
However, on thawing, once pm_freezing is cleared, freezer_count()
stops working, and any random/spurious wakeup will let a task run
before its time.
That is, thawing tries to thaw things in explicit order; kernel
threads and workqueues before doing bringing SMP back before userspace
etc.. However due to the above mentioned races it is entirely possible
for userspace tasks to thaw (by accident) before SMP is back.
This can be a fatal problem in asymmetric ISA architectures (eg ARMv9)
where the userspace task requires a special CPU to run.
As said; replace this with a special task state TASK_FROZEN and add
the following state transitions:
TASK_FREEZABLE -> TASK_FROZEN
__TASK_STOPPED -> TASK_FROZEN
__TASK_TRACED -> TASK_FROZEN
The new TASK_FREEZABLE can be set on any state part of TASK_NORMAL
(IOW. TASK_INTERRUPTIBLE and TASK_UNINTERRUPTIBLE) -- any such state
is already required to deal with spurious wakeups and the freezer
causes one such when thawing the task (since the original state is
lost).
The special __TASK_{STOPPED,TRACED} states *can* be restored since
their canonical state is in ->jobctl.
With this, frozen tasks need an explicit TASK_FROZEN wakeup and are
free of undue (early / spurious) wakeups.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20220822114649.055452969@infradead.org
2022-08-22 19:18:22 +08:00
|
|
|
int rpc_wait_for_completion_task(struct rpc_task *task)
|
2006-01-03 16:55:06 +08:00
|
|
|
{
|
2011-02-22 03:05:41 +08:00
|
|
|
return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
|
freezer,sched: Rewrite core freezer logic
Rewrite the core freezer to behave better wrt thawing and be simpler
in general.
By replacing PF_FROZEN with TASK_FROZEN, a special block state, it is
ensured frozen tasks stay frozen until thawed and don't randomly wake
up early, as is currently possible.
As such, it does away with PF_FROZEN and PF_FREEZER_SKIP, freeing up
two PF_flags (yay!).
Specifically; the current scheme works a little like:
freezer_do_not_count();
schedule();
freezer_count();
And either the task is blocked, or it lands in try_to_freezer()
through freezer_count(). Now, when it is blocked, the freezer
considers it frozen and continues.
However, on thawing, once pm_freezing is cleared, freezer_count()
stops working, and any random/spurious wakeup will let a task run
before its time.
That is, thawing tries to thaw things in explicit order; kernel
threads and workqueues before doing bringing SMP back before userspace
etc.. However due to the above mentioned races it is entirely possible
for userspace tasks to thaw (by accident) before SMP is back.
This can be a fatal problem in asymmetric ISA architectures (eg ARMv9)
where the userspace task requires a special CPU to run.
As said; replace this with a special task state TASK_FROZEN and add
the following state transitions:
TASK_FREEZABLE -> TASK_FROZEN
__TASK_STOPPED -> TASK_FROZEN
__TASK_TRACED -> TASK_FROZEN
The new TASK_FREEZABLE can be set on any state part of TASK_NORMAL
(IOW. TASK_INTERRUPTIBLE and TASK_UNINTERRUPTIBLE) -- any such state
is already required to deal with spurious wakeups and the freezer
causes one such when thawing the task (since the original state is
lost).
The special __TASK_{STOPPED,TRACED} states *can* be restored since
their canonical state is in ->jobctl.
With this, frozen tasks need an explicit TASK_FROZEN wakeup and are
free of undue (early / spurious) wakeups.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20220822114649.055452969@infradead.org
2022-08-22 19:18:22 +08:00
|
|
|
rpc_wait_bit_killable, TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
|
2006-01-03 16:55:06 +08:00
|
|
|
}
|
freezer,sched: Rewrite core freezer logic
Rewrite the core freezer to behave better wrt thawing and be simpler
in general.
By replacing PF_FROZEN with TASK_FROZEN, a special block state, it is
ensured frozen tasks stay frozen until thawed and don't randomly wake
up early, as is currently possible.
As such, it does away with PF_FROZEN and PF_FREEZER_SKIP, freeing up
two PF_flags (yay!).
Specifically; the current scheme works a little like:
freezer_do_not_count();
schedule();
freezer_count();
And either the task is blocked, or it lands in try_to_freezer()
through freezer_count(). Now, when it is blocked, the freezer
considers it frozen and continues.
However, on thawing, once pm_freezing is cleared, freezer_count()
stops working, and any random/spurious wakeup will let a task run
before its time.
That is, thawing tries to thaw things in explicit order; kernel
threads and workqueues before doing bringing SMP back before userspace
etc.. However due to the above mentioned races it is entirely possible
for userspace tasks to thaw (by accident) before SMP is back.
This can be a fatal problem in asymmetric ISA architectures (eg ARMv9)
where the userspace task requires a special CPU to run.
As said; replace this with a special task state TASK_FROZEN and add
the following state transitions:
TASK_FREEZABLE -> TASK_FROZEN
__TASK_STOPPED -> TASK_FROZEN
__TASK_TRACED -> TASK_FROZEN
The new TASK_FREEZABLE can be set on any state part of TASK_NORMAL
(IOW. TASK_INTERRUPTIBLE and TASK_UNINTERRUPTIBLE) -- any such state
is already required to deal with spurious wakeups and the freezer
causes one such when thawing the task (since the original state is
lost).
The special __TASK_{STOPPED,TRACED} states *can* be restored since
their canonical state is in ->jobctl.
With this, frozen tasks need an explicit TASK_FROZEN wakeup and are
free of undue (early / spurious) wakeups.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20220822114649.055452969@infradead.org
2022-08-22 19:18:22 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rpc_wait_for_completion_task);
|
2006-01-03 16:55:06 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Make an RPC task runnable.
|
|
|
|
*
|
2012-07-24 03:51:55 +08:00
|
|
|
* Note: If the task is ASYNC, and is being made runnable after sitting on an
|
|
|
|
* rpc_wait_queue, this must be called with the queue spinlock held to protect
|
|
|
|
* the wait queue operation.
|
2013-05-23 00:57:24 +08:00
|
|
|
* Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
|
|
|
|
* which is needed to ensure that __rpc_execute() doesn't loop (due to the
|
|
|
|
* lockless RPC_IS_QUEUED() test) before we've had a chance to test
|
|
|
|
* the RPC_TASK_RUNNING flag.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2016-05-28 00:59:33 +08:00
|
|
|
static void rpc_make_runnable(struct workqueue_struct *wq,
|
|
|
|
struct rpc_task *task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-05-23 00:57:24 +08:00
|
|
|
bool need_wakeup = !rpc_test_and_set_running(task);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
rpc_clear_queued(task);
|
2013-05-23 00:57:24 +08:00
|
|
|
if (!need_wakeup)
|
2006-11-06 01:42:48 +08:00
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
if (RPC_IS_ASYNC(task)) {
|
2006-11-22 22:55:48 +08:00
|
|
|
INIT_WORK(&task->u.tk_work, rpc_async_schedule);
|
2016-05-28 00:59:33 +08:00
|
|
|
queue_work(wq, &task->u.tk_work);
|
2024-07-17 22:49:33 +08:00
|
|
|
} else {
|
|
|
|
smp_mb__after_atomic();
|
2005-06-23 01:16:21 +08:00
|
|
|
wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
|
2024-07-17 22:49:33 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prepare for sleeping on a wait queue.
|
|
|
|
* By always appending tasks to the list we ensure FIFO behavior.
|
|
|
|
* NB: An RPC task will only receive interrupt-driven events as long
|
|
|
|
* as it's on a wait queue.
|
|
|
|
*/
|
2020-04-05 07:52:21 +08:00
|
|
|
static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q,
|
2011-07-18 06:11:34 +08:00
|
|
|
struct rpc_task *task,
|
|
|
|
unsigned char queue_priority)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2018-03-16 22:33:44 +08:00
|
|
|
trace_rpc_task_sleep(task, q);
|
2012-01-21 03:52:23 +08:00
|
|
|
|
2011-07-18 06:11:34 +08:00
|
|
|
__rpc_add_wait_queue(q, task, queue_priority);
|
2019-04-08 01:58:49 +08:00
|
|
|
}
|
|
|
|
|
2020-04-05 07:52:21 +08:00
|
|
|
static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
|
|
|
|
struct rpc_task *task,
|
|
|
|
unsigned char queue_priority)
|
|
|
|
{
|
|
|
|
if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
|
|
|
|
return;
|
|
|
|
__rpc_do_sleep_on_priority(q, task, queue_priority);
|
|
|
|
}
|
|
|
|
|
2019-04-08 01:58:49 +08:00
|
|
|
static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
|
|
|
|
struct rpc_task *task, unsigned long timeout,
|
|
|
|
unsigned char queue_priority)
|
|
|
|
{
|
2020-04-05 07:52:21 +08:00
|
|
|
if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
|
|
|
|
return;
|
2019-04-08 01:58:49 +08:00
|
|
|
if (time_is_after_jiffies(timeout)) {
|
2020-04-05 07:52:21 +08:00
|
|
|
__rpc_do_sleep_on_priority(q, task, queue_priority);
|
2019-04-08 01:58:49 +08:00
|
|
|
__rpc_add_timer(q, task, timeout);
|
|
|
|
} else
|
|
|
|
task->tk_status = -ETIMEDOUT;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2019-04-08 01:58:47 +08:00
|
|
|
static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action)
|
|
|
|
{
|
|
|
|
if (action && !WARN_ON_ONCE(task->tk_callback != NULL))
|
|
|
|
task->tk_callback = action;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool rpc_sleep_check_activated(struct rpc_task *task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-08-01 02:29:08 +08:00
|
|
|
/* We shouldn't ever put an inactive task to sleep */
|
2019-04-08 01:58:47 +08:00
|
|
|
if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) {
|
2012-10-23 22:43:32 +08:00
|
|
|
task->tk_status = -EIO;
|
|
|
|
rpc_put_task_async(task);
|
2019-04-08 01:58:47 +08:00
|
|
|
return false;
|
2012-10-23 22:43:32 +08:00
|
|
|
}
|
2019-04-08 01:58:47 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-04-08 01:58:49 +08:00
|
|
|
void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
|
|
|
|
rpc_action action, unsigned long timeout)
|
|
|
|
{
|
|
|
|
if (!rpc_sleep_check_activated(task))
|
|
|
|
return;
|
|
|
|
|
|
|
|
rpc_set_tk_callback(task, action);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Protect the queue operations.
|
|
|
|
*/
|
2019-05-02 23:23:12 +08:00
|
|
|
spin_lock(&q->lock);
|
2019-04-08 01:58:49 +08:00
|
|
|
__rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
|
2019-05-02 23:23:12 +08:00
|
|
|
spin_unlock(&q->lock);
|
2019-04-08 01:58:49 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout);
|
|
|
|
|
2019-04-08 01:58:47 +08:00
|
|
|
void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
|
|
|
|
rpc_action action)
|
|
|
|
{
|
|
|
|
if (!rpc_sleep_check_activated(task))
|
|
|
|
return;
|
|
|
|
|
|
|
|
rpc_set_tk_callback(task, action);
|
2006-11-12 11:18:03 +08:00
|
|
|
|
2019-04-08 01:58:49 +08:00
|
|
|
WARN_ON_ONCE(task->tk_timeout != 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Protect the queue operations.
|
|
|
|
*/
|
2019-05-02 23:23:12 +08:00
|
|
|
spin_lock(&q->lock);
|
2019-04-08 01:58:47 +08:00
|
|
|
__rpc_sleep_on_priority(q, task, task->tk_priority);
|
2019-05-02 23:23:12 +08:00
|
|
|
spin_unlock(&q->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-07-15 03:39:59 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rpc_sleep_on);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-04-08 01:58:49 +08:00
|
|
|
void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
|
|
|
|
struct rpc_task *task, unsigned long timeout, int priority)
|
|
|
|
{
|
|
|
|
if (!rpc_sleep_check_activated(task))
|
|
|
|
return;
|
|
|
|
|
|
|
|
priority -= RPC_PRIORITY_LOW;
|
|
|
|
/*
|
|
|
|
* Protect the queue operations.
|
|
|
|
*/
|
2019-05-02 23:23:12 +08:00
|
|
|
spin_lock(&q->lock);
|
2019-04-08 01:58:49 +08:00
|
|
|
__rpc_sleep_on_priority_timeout(q, task, timeout, priority);
|
2019-05-02 23:23:12 +08:00
|
|
|
spin_unlock(&q->lock);
|
2019-04-08 01:58:49 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout);
|
|
|
|
|
2011-07-18 06:11:34 +08:00
|
|
|
void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
|
2019-04-08 01:58:48 +08:00
|
|
|
int priority)
|
2011-07-18 06:11:34 +08:00
|
|
|
{
|
2019-04-08 01:58:47 +08:00
|
|
|
if (!rpc_sleep_check_activated(task))
|
2012-10-23 22:43:32 +08:00
|
|
|
return;
|
2019-04-08 01:58:47 +08:00
|
|
|
|
2019-04-08 01:58:49 +08:00
|
|
|
WARN_ON_ONCE(task->tk_timeout != 0);
|
2019-04-08 01:58:48 +08:00
|
|
|
priority -= RPC_PRIORITY_LOW;
|
2011-07-18 06:11:34 +08:00
|
|
|
/*
|
|
|
|
* Protect the queue operations.
|
|
|
|
*/
|
2019-05-02 23:23:12 +08:00
|
|
|
spin_lock(&q->lock);
|
2019-04-08 01:58:48 +08:00
|
|
|
__rpc_sleep_on_priority(q, task, priority);
|
2019-05-02 23:23:12 +08:00
|
|
|
spin_unlock(&q->lock);
|
2011-07-18 06:11:34 +08:00
|
|
|
}
|
2012-11-02 04:44:05 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
|
2011-07-18 06:11:34 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/**
|
2016-05-28 00:59:33 +08:00
|
|
|
* __rpc_do_wake_up_task_on_wq - wake up a single rpc_task
|
|
|
|
* @wq: workqueue on which to run task
|
2008-02-23 04:46:41 +08:00
|
|
|
* @queue: wait queue
|
2005-04-17 06:20:36 +08:00
|
|
|
* @task: task to be woken up
|
|
|
|
*
|
|
|
|
* Caller must hold queue->lock, and have cleared the task queued flag.
|
|
|
|
*/
|
2016-05-28 00:59:33 +08:00
|
|
|
static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
|
|
|
|
struct rpc_wait_queue *queue,
|
|
|
|
struct rpc_task *task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
/* Has the task been executed yet? If not, we cannot wake it up! */
|
|
|
|
if (!RPC_IS_ACTIVATED(task)) {
|
|
|
|
printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-03-16 22:33:44 +08:00
|
|
|
trace_rpc_task_wakeup(task, queue);
|
2012-01-21 03:52:23 +08:00
|
|
|
|
2008-02-23 04:46:41 +08:00
|
|
|
__rpc_remove_wait_queue(queue, task);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-05-28 00:59:33 +08:00
|
|
|
rpc_make_runnable(wq, task);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-02-23 04:46:41 +08:00
|
|
|
* Wake up a queued task while the queue lock is being held
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2018-08-29 21:22:28 +08:00
|
|
|
static struct rpc_task *
|
|
|
|
rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq,
|
|
|
|
struct rpc_wait_queue *queue, struct rpc_task *task,
|
|
|
|
bool (*action)(struct rpc_task *, void *), void *data)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-03-25 23:23:40 +08:00
|
|
|
if (RPC_IS_QUEUED(task)) {
|
|
|
|
smp_rmb();
|
2018-08-29 21:22:28 +08:00
|
|
|
if (task->tk_waitqueue == queue) {
|
|
|
|
if (action == NULL || action(task, data)) {
|
|
|
|
__rpc_do_wake_up_task_on_wq(wq, queue, task);
|
|
|
|
return task;
|
|
|
|
}
|
|
|
|
}
|
2013-03-25 23:23:40 +08:00
|
|
|
}
|
2018-08-29 21:22:28 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-05-28 00:59:33 +08:00
|
|
|
/*
|
|
|
|
* Wake up a queued task while the queue lock is being held
|
|
|
|
*/
|
2019-08-20 06:36:19 +08:00
|
|
|
static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue,
|
|
|
|
struct rpc_task *task)
|
2016-05-28 00:59:33 +08:00
|
|
|
{
|
2019-08-20 06:36:19 +08:00
|
|
|
rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
|
|
|
|
task, NULL, NULL);
|
2018-02-07 22:13:04 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2008-02-23 04:46:41 +08:00
|
|
|
* Wake up a task on a specific queue
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2008-02-23 04:46:41 +08:00
|
|
|
void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2018-09-08 11:15:35 +08:00
|
|
|
if (!RPC_IS_QUEUED(task))
|
|
|
|
return;
|
2019-05-02 23:23:12 +08:00
|
|
|
spin_lock(&queue->lock);
|
2008-02-23 04:46:41 +08:00
|
|
|
rpc_wake_up_task_queue_locked(queue, task);
|
2019-05-02 23:23:12 +08:00
|
|
|
spin_unlock(&queue->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2008-02-23 04:46:41 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
|
|
|
|
|
2018-08-29 21:22:28 +08:00
|
|
|
static bool rpc_task_action_set_status(struct rpc_task *task, void *status)
|
|
|
|
{
|
|
|
|
task->tk_status = *(int *)status;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue,
|
|
|
|
struct rpc_task *task, int status)
|
|
|
|
{
|
|
|
|
rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
|
|
|
|
task, rpc_task_action_set_status, &status);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status
|
|
|
|
* @queue: pointer to rpc_wait_queue
|
|
|
|
* @task: pointer to rpc_task
|
|
|
|
* @status: integer error value
|
|
|
|
*
|
|
|
|
* If @task is queued on @queue, then it is woken up, and @task->tk_status is
|
|
|
|
* set to the value of @status.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
|
|
|
|
struct rpc_task *task, int status)
|
|
|
|
{
|
|
|
|
if (!RPC_IS_QUEUED(task))
|
|
|
|
return;
|
2019-05-02 23:23:12 +08:00
|
|
|
spin_lock(&queue->lock);
|
2018-08-29 21:22:28 +08:00
|
|
|
rpc_wake_up_task_queue_set_status_locked(queue, task, status);
|
2019-05-02 23:23:12 +08:00
|
|
|
spin_unlock(&queue->lock);
|
2018-08-29 21:22:28 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Wake up the next task on a priority queue.
|
|
|
|
*/
|
2012-01-18 11:57:37 +08:00
|
|
|
static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct list_head *q;
|
|
|
|
struct rpc_task *task;
|
|
|
|
|
2021-06-26 15:50:42 +08:00
|
|
|
/*
|
|
|
|
* Service the privileged queue.
|
|
|
|
*/
|
|
|
|
q = &queue->tasks[RPC_NR_PRIORITY - 1];
|
|
|
|
if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(q)) {
|
|
|
|
task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2007-07-15 03:40:00 +08:00
|
|
|
* Service a batch of tasks from a single owner.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
q = &queue->tasks[queue->priority];
|
2021-06-26 15:50:41 +08:00
|
|
|
if (!list_empty(q) && queue->nr) {
|
|
|
|
queue->nr--;
|
2018-09-09 10:09:48 +08:00
|
|
|
task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
|
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Service the next queue.
|
|
|
|
*/
|
|
|
|
do {
|
|
|
|
if (q == &queue->tasks[0])
|
|
|
|
q = &queue->tasks[queue->maxpriority];
|
|
|
|
else
|
|
|
|
q = q - 1;
|
|
|
|
if (!list_empty(q)) {
|
2018-09-09 10:09:48 +08:00
|
|
|
task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto new_queue;
|
|
|
|
}
|
|
|
|
} while (q != &queue->tasks[queue->priority]);
|
|
|
|
|
|
|
|
rpc_reset_waitqueue_priority(queue);
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
new_queue:
|
|
|
|
rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
|
|
|
|
out:
|
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
2012-01-18 11:57:37 +08:00
|
|
|
static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
|
|
|
|
{
|
|
|
|
if (RPC_IS_PRIORITY(queue))
|
|
|
|
return __rpc_find_next_queued_priority(queue);
|
|
|
|
if (!list_empty(&queue->tasks[0]))
|
|
|
|
return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2012-01-18 11:57:37 +08:00
|
|
|
* Wake up the first task on the wait queue.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2016-05-28 00:59:33 +08:00
|
|
|
struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
|
|
|
|
struct rpc_wait_queue *queue,
|
2012-01-18 11:57:37 +08:00
|
|
|
bool (*func)(struct rpc_task *, void *), void *data)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct rpc_task *task = NULL;
|
|
|
|
|
2019-05-02 23:23:12 +08:00
|
|
|
spin_lock(&queue->lock);
|
2012-01-18 11:57:37 +08:00
|
|
|
task = __rpc_find_next_queued(queue);
|
2018-08-29 21:22:28 +08:00
|
|
|
if (task != NULL)
|
|
|
|
task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
|
|
|
|
task, func, data);
|
2019-05-02 23:23:12 +08:00
|
|
|
spin_unlock(&queue->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return task;
|
|
|
|
}
|
2016-05-28 00:59:33 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Wake up the first task on the wait queue.
|
|
|
|
*/
|
|
|
|
struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
|
|
|
|
bool (*func)(struct rpc_task *, void *), void *data)
|
|
|
|
{
|
|
|
|
return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
|
|
|
|
}
|
2012-01-18 11:57:37 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rpc_wake_up_first);
|
|
|
|
|
|
|
|
static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wake up the next task on the wait queue.
|
|
|
|
*/
|
|
|
|
struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
|
|
|
|
{
|
|
|
|
return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
|
|
|
|
}
|
2007-07-15 03:39:59 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rpc_wake_up_next);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2020-10-23 05:40:33 +08:00
|
|
|
/**
|
|
|
|
* rpc_wake_up_locked - wake up all rpc_tasks
|
|
|
|
* @queue: rpc_wait_queue on which the tasks are sleeping
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void rpc_wake_up_locked(struct rpc_wait_queue *queue)
|
|
|
|
{
|
|
|
|
struct rpc_task *task;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
task = __rpc_find_next_queued(queue);
|
|
|
|
if (task == NULL)
|
|
|
|
break;
|
|
|
|
rpc_wake_up_task_queue_locked(queue, task);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/**
|
|
|
|
* rpc_wake_up - wake up all rpc_tasks
|
|
|
|
* @queue: rpc_wait_queue on which the tasks are sleeping
|
|
|
|
*
|
|
|
|
* Grabs queue->lock
|
|
|
|
*/
|
|
|
|
void rpc_wake_up(struct rpc_wait_queue *queue)
|
|
|
|
{
|
2019-05-02 23:23:12 +08:00
|
|
|
spin_lock(&queue->lock);
|
2020-10-23 05:40:33 +08:00
|
|
|
rpc_wake_up_locked(queue);
|
|
|
|
spin_unlock(&queue->lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rpc_wake_up);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* rpc_wake_up_status_locked - wake up all rpc_tasks and set their status value.
|
|
|
|
* @queue: rpc_wait_queue on which the tasks are sleeping
|
|
|
|
* @status: status value to set
|
|
|
|
*/
|
|
|
|
static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status)
|
|
|
|
{
|
|
|
|
struct rpc_task *task;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
for (;;) {
|
2020-10-23 05:40:33 +08:00
|
|
|
task = __rpc_find_next_queued(queue);
|
|
|
|
if (task == NULL)
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
2020-10-23 05:40:33 +08:00
|
|
|
rpc_wake_up_task_queue_set_status_locked(queue, task, status);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* rpc_wake_up_status - wake up all rpc_tasks and set their status value.
|
|
|
|
* @queue: rpc_wait_queue on which the tasks are sleeping
|
|
|
|
* @status: status value to set
|
|
|
|
*
|
|
|
|
* Grabs queue->lock
|
|
|
|
*/
|
|
|
|
void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
|
|
|
|
{
|
2019-05-02 23:23:12 +08:00
|
|
|
spin_lock(&queue->lock);
|
2020-10-23 05:40:33 +08:00
|
|
|
rpc_wake_up_status_locked(queue, status);
|
2019-05-02 23:23:12 +08:00
|
|
|
spin_unlock(&queue->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-07-15 03:39:59 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rpc_wake_up_status);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-05-01 22:49:27 +08:00
|
|
|
static void __rpc_queue_timer_fn(struct work_struct *work)
|
2007-07-19 04:18:52 +08:00
|
|
|
{
|
2019-05-01 22:49:27 +08:00
|
|
|
struct rpc_wait_queue *queue = container_of(work,
|
|
|
|
struct rpc_wait_queue,
|
|
|
|
timer_list.dwork.work);
|
2007-07-19 04:18:52 +08:00
|
|
|
struct rpc_task *task, *n;
|
|
|
|
unsigned long expires, now, timeo;
|
|
|
|
|
|
|
|
spin_lock(&queue->lock);
|
|
|
|
expires = now = jiffies;
|
|
|
|
list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
|
2019-04-08 01:58:49 +08:00
|
|
|
timeo = task->tk_timeout;
|
2007-07-19 04:18:52 +08:00
|
|
|
if (time_after_eq(now, timeo)) {
|
2020-07-09 04:10:50 +08:00
|
|
|
trace_rpc_task_timeout(task, task->tk_action);
|
2007-07-19 04:18:52 +08:00
|
|
|
task->tk_status = -ETIMEDOUT;
|
|
|
|
rpc_wake_up_task_queue_locked(queue, task);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (expires == now || time_after(expires, timeo))
|
|
|
|
expires = timeo;
|
|
|
|
}
|
|
|
|
if (!list_empty(&queue->timer_list.list))
|
|
|
|
rpc_set_queue_timer(queue, expires);
|
|
|
|
spin_unlock(&queue->lock);
|
|
|
|
}
|
|
|
|
|
2006-09-01 06:24:08 +08:00
|
|
|
static void __rpc_atrun(struct rpc_task *task)
|
|
|
|
{
|
2014-03-21 00:53:54 +08:00
|
|
|
if (task->tk_status == -ETIMEDOUT)
|
|
|
|
task->tk_status = 0;
|
2006-09-01 06:24:08 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Run a task at a later time
|
|
|
|
*/
|
2006-09-01 06:24:08 +08:00
|
|
|
void rpc_delay(struct rpc_task *task, unsigned long delay)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2019-04-08 01:58:49 +08:00
|
|
|
rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-07-15 03:39:59 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rpc_delay);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-01-03 16:55:05 +08:00
|
|
|
/*
|
|
|
|
* Helper to call task->tk_ops->rpc_call_prepare
|
|
|
|
*/
|
2009-04-01 21:22:40 +08:00
|
|
|
void rpc_prepare_task(struct rpc_task *task)
|
2006-01-03 16:55:05 +08:00
|
|
|
{
|
|
|
|
task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
|
|
|
|
}
|
|
|
|
|
2011-12-02 03:00:15 +08:00
|
|
|
static void
|
|
|
|
rpc_init_task_statistics(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
/* Initialize retry counters */
|
|
|
|
task->tk_garb_retry = 2;
|
|
|
|
task->tk_cred_retry = 2;
|
|
|
|
|
|
|
|
/* starting timestamp */
|
|
|
|
task->tk_start = ktime_get();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rpc_reset_task_statistics(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
task->tk_timeouts = 0;
|
2019-04-08 01:58:44 +08:00
|
|
|
task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT);
|
2011-12-02 03:00:15 +08:00
|
|
|
rpc_init_task_statistics(task);
|
|
|
|
}
|
|
|
|
|
2005-06-23 01:16:19 +08:00
|
|
|
/*
|
2006-01-03 16:55:04 +08:00
|
|
|
* Helper that calls task->tk_ops->rpc_call_done if it exists
|
2005-06-23 01:16:19 +08:00
|
|
|
*/
|
2006-01-03 16:55:03 +08:00
|
|
|
void rpc_exit_task(struct rpc_task *task)
|
2005-06-23 01:16:19 +08:00
|
|
|
{
|
2019-11-21 05:25:52 +08:00
|
|
|
trace_rpc_task_end(task, task->tk_action);
|
2006-01-03 16:55:03 +08:00
|
|
|
task->tk_action = NULL;
|
2019-05-24 04:13:48 +08:00
|
|
|
if (task->tk_ops->rpc_count_stats)
|
|
|
|
task->tk_ops->rpc_count_stats(task, task->tk_calldata);
|
|
|
|
else if (task->tk_client)
|
|
|
|
rpc_count_iostats(task, task->tk_client->cl_metrics);
|
2006-01-03 16:55:04 +08:00
|
|
|
if (task->tk_ops->rpc_call_done != NULL) {
|
2021-10-17 06:02:57 +08:00
|
|
|
trace_rpc_task_call_done(task, task->tk_ops->rpc_call_done);
|
2006-01-03 16:55:04 +08:00
|
|
|
task->tk_ops->rpc_call_done(task, task->tk_calldata);
|
2005-06-23 01:16:19 +08:00
|
|
|
if (task->tk_action != NULL) {
|
2006-01-03 16:55:03 +08:00
|
|
|
/* Always release the RPC slot and buffer memory */
|
|
|
|
xprt_release(task);
|
2011-12-02 03:00:15 +08:00
|
|
|
rpc_reset_task_statistics(task);
|
2005-06-23 01:16:19 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-08-01 02:29:08 +08:00
|
|
|
|
2019-04-08 01:58:44 +08:00
|
|
|
void rpc_signal_task(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
struct rpc_wait_queue *queue;
|
|
|
|
|
|
|
|
if (!RPC_IS_ACTIVATED(task))
|
|
|
|
return;
|
2019-12-23 23:28:28 +08:00
|
|
|
|
2022-10-06 03:57:35 +08:00
|
|
|
if (!rpc_task_set_rpc_status(task, -ERESTARTSYS))
|
|
|
|
return;
|
2019-12-23 23:28:28 +08:00
|
|
|
trace_rpc_task_signalled(task, task->tk_action);
|
2019-04-08 01:58:44 +08:00
|
|
|
set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
|
|
|
|
smp_mb__after_atomic();
|
|
|
|
queue = READ_ONCE(task->tk_waitqueue);
|
|
|
|
if (queue)
|
2022-10-06 03:57:35 +08:00
|
|
|
rpc_wake_up_queued_task(queue, task);
|
2019-04-08 01:58:44 +08:00
|
|
|
}
|
|
|
|
|
2022-10-06 03:57:36 +08:00
|
|
|
void rpc_task_try_cancel(struct rpc_task *task, int error)
|
|
|
|
{
|
|
|
|
struct rpc_wait_queue *queue;
|
|
|
|
|
|
|
|
if (!rpc_task_set_rpc_status(task, error))
|
|
|
|
return;
|
|
|
|
queue = READ_ONCE(task->tk_waitqueue);
|
|
|
|
if (queue)
|
|
|
|
rpc_wake_up_queued_task(queue, task);
|
2019-04-08 01:58:44 +08:00
|
|
|
}
|
|
|
|
|
2010-08-01 02:29:08 +08:00
|
|
|
void rpc_exit(struct rpc_task *task, int status)
|
|
|
|
{
|
|
|
|
task->tk_status = status;
|
|
|
|
task->tk_action = rpc_exit_task;
|
2019-03-10 05:20:11 +08:00
|
|
|
rpc_wake_up_queued_task(task->tk_waitqueue, task);
|
2010-08-01 02:29:08 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rpc_exit);
|
2005-06-23 01:16:19 +08:00
|
|
|
|
2006-10-19 04:01:05 +08:00
|
|
|
void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
|
|
|
|
{
|
2008-06-12 01:37:09 +08:00
|
|
|
if (ops->rpc_release != NULL)
|
2006-10-19 04:01:05 +08:00
|
|
|
ops->rpc_release(calldata);
|
|
|
|
}
|
|
|
|
|
2022-03-07 07:41:44 +08:00
|
|
|
static bool xprt_needs_memalloc(struct rpc_xprt *xprt, struct rpc_task *tk)
|
|
|
|
{
|
|
|
|
if (!xprt)
|
|
|
|
return false;
|
|
|
|
if (!atomic_read(&xprt->swapper))
|
|
|
|
return false;
|
|
|
|
return test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == tk;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* This is the RPC `scheduler' (or rather, the finite state machine).
|
|
|
|
*/
|
2007-02-04 05:38:41 +08:00
|
|
|
static void __rpc_execute(struct rpc_task *task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2009-03-11 08:33:16 +08:00
|
|
|
struct rpc_wait_queue *queue;
|
|
|
|
int task_is_async = RPC_IS_ASYNC(task);
|
|
|
|
int status = 0;
|
2022-03-07 07:41:44 +08:00
|
|
|
unsigned long pflags = current->flags;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-10-23 22:43:47 +08:00
|
|
|
WARN_ON_ONCE(RPC_IS_QUEUED(task));
|
|
|
|
if (RPC_IS_QUEUED(task))
|
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-06-23 01:16:19 +08:00
|
|
|
for (;;) {
|
2011-07-07 07:58:23 +08:00
|
|
|
void (*do_action)(struct rpc_task *);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
2018-01-04 04:38:49 +08:00
|
|
|
* Perform the next FSM step or a pending callback.
|
|
|
|
*
|
|
|
|
* tk_action may be NULL if the task has been killed.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2018-01-04 04:38:49 +08:00
|
|
|
do_action = task->tk_action;
|
2022-10-06 03:57:35 +08:00
|
|
|
/* Tasks with an RPC error status should exit */
|
2023-05-11 00:28:00 +08:00
|
|
|
if (do_action && do_action != rpc_exit_task &&
|
2022-10-06 03:57:35 +08:00
|
|
|
(status = READ_ONCE(task->tk_rpc_status)) != 0) {
|
|
|
|
task->tk_status = status;
|
2023-05-11 00:28:00 +08:00
|
|
|
do_action = rpc_exit_task;
|
2022-10-06 03:57:35 +08:00
|
|
|
}
|
|
|
|
/* Callbacks override all actions */
|
2018-01-04 04:38:49 +08:00
|
|
|
if (task->tk_callback) {
|
|
|
|
do_action = task->tk_callback;
|
|
|
|
task->tk_callback = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2018-01-04 04:38:49 +08:00
|
|
|
if (!do_action)
|
|
|
|
break;
|
2022-03-07 07:41:44 +08:00
|
|
|
if (RPC_IS_SWAPPER(task) ||
|
|
|
|
xprt_needs_memalloc(task->tk_xprt, task))
|
|
|
|
current->flags |= PF_MEMALLOC;
|
|
|
|
|
2018-03-16 22:33:44 +08:00
|
|
|
trace_rpc_task_run_action(task, do_action);
|
2011-07-07 07:58:23 +08:00
|
|
|
do_action(task);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Lockless check for whether task is sleeping or not.
|
|
|
|
*/
|
2021-07-12 21:57:08 +08:00
|
|
|
if (!RPC_IS_QUEUED(task)) {
|
|
|
|
cond_resched();
|
2005-04-17 06:20:36 +08:00
|
|
|
continue;
|
2021-07-12 21:57:08 +08:00
|
|
|
}
|
2019-04-08 01:58:44 +08:00
|
|
|
|
2009-03-11 08:33:16 +08:00
|
|
|
/*
|
|
|
|
* The queue->lock protects against races with
|
|
|
|
* rpc_make_runnable().
|
|
|
|
*
|
|
|
|
* Note that once we clear RPC_TASK_RUNNING on an asynchronous
|
|
|
|
* rpc_task, rpc_make_runnable() can assign it to a
|
|
|
|
* different workqueue. We therefore cannot assume that the
|
|
|
|
* rpc_task pointer may still be dereferenced.
|
|
|
|
*/
|
|
|
|
queue = task->tk_waitqueue;
|
2019-05-02 23:23:12 +08:00
|
|
|
spin_lock(&queue->lock);
|
2009-03-11 08:33:16 +08:00
|
|
|
if (!RPC_IS_QUEUED(task)) {
|
2019-05-02 23:23:12 +08:00
|
|
|
spin_unlock(&queue->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
continue;
|
|
|
|
}
|
2022-10-06 03:57:35 +08:00
|
|
|
/* Wake up any task that has an exit status */
|
|
|
|
if (READ_ONCE(task->tk_rpc_status) != 0) {
|
|
|
|
rpc_wake_up_task_queue_locked(queue, task);
|
|
|
|
spin_unlock(&queue->lock);
|
|
|
|
continue;
|
|
|
|
}
|
2009-03-11 08:33:16 +08:00
|
|
|
rpc_clear_running(task);
|
2019-05-02 23:23:12 +08:00
|
|
|
spin_unlock(&queue->lock);
|
2009-03-11 08:33:16 +08:00
|
|
|
if (task_is_async)
|
2022-03-07 07:41:44 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* sync task: sleep here */
|
2020-07-09 04:10:45 +08:00
|
|
|
trace_rpc_task_sync_sleep(task, task->tk_action);
|
2005-06-23 01:16:21 +08:00
|
|
|
status = out_of_line_wait_on_bit(&task->tk_runstate,
|
2007-12-07 05:24:39 +08:00
|
|
|
RPC_TASK_QUEUED, rpc_wait_bit_killable,
|
freezer,sched: Rewrite core freezer logic
Rewrite the core freezer to behave better wrt thawing and be simpler
in general.
By replacing PF_FROZEN with TASK_FROZEN, a special block state, it is
ensured frozen tasks stay frozen until thawed and don't randomly wake
up early, as is currently possible.
As such, it does away with PF_FROZEN and PF_FREEZER_SKIP, freeing up
two PF_flags (yay!).
Specifically; the current scheme works a little like:
freezer_do_not_count();
schedule();
freezer_count();
And either the task is blocked, or it lands in try_to_freezer()
through freezer_count(). Now, when it is blocked, the freezer
considers it frozen and continues.
However, on thawing, once pm_freezing is cleared, freezer_count()
stops working, and any random/spurious wakeup will let a task run
before its time.
That is, thawing tries to thaw things in explicit order; kernel
threads and workqueues before doing bringing SMP back before userspace
etc.. However due to the above mentioned races it is entirely possible
for userspace tasks to thaw (by accident) before SMP is back.
This can be a fatal problem in asymmetric ISA architectures (eg ARMv9)
where the userspace task requires a special CPU to run.
As said; replace this with a special task state TASK_FROZEN and add
the following state transitions:
TASK_FREEZABLE -> TASK_FROZEN
__TASK_STOPPED -> TASK_FROZEN
__TASK_TRACED -> TASK_FROZEN
The new TASK_FREEZABLE can be set on any state part of TASK_NORMAL
(IOW. TASK_INTERRUPTIBLE and TASK_UNINTERRUPTIBLE) -- any such state
is already required to deal with spurious wakeups and the freezer
causes one such when thawing the task (since the original state is
lost).
The special __TASK_{STOPPED,TRACED} states *can* be restored since
their canonical state is in ->jobctl.
With this, frozen tasks need an explicit TASK_FROZEN wakeup and are
free of undue (early / spurious) wakeups.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20220822114649.055452969@infradead.org
2022-08-22 19:18:22 +08:00
|
|
|
TASK_KILLABLE|TASK_FREEZABLE);
|
2019-04-08 01:58:44 +08:00
|
|
|
if (status < 0) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* When a sync task receives a signal, it exits with
|
|
|
|
* -ERESTARTSYS. In order to catch any callbacks that
|
|
|
|
* clean up after sleeping on some queue, we don't
|
|
|
|
* break the loop here, but go around once more.
|
|
|
|
*/
|
2022-10-06 03:57:35 +08:00
|
|
|
rpc_signal_task(task);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2020-07-09 04:10:45 +08:00
|
|
|
trace_rpc_task_sync_wake(task, task->tk_action);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Release all resources associated with the task */
|
|
|
|
rpc_release_task(task);
|
2022-03-07 07:41:44 +08:00
|
|
|
out:
|
|
|
|
current_restore_flags(pflags, PF_MEMALLOC);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* User-visible entry point to the scheduler.
|
|
|
|
*
|
|
|
|
* This may be called recursively if e.g. an async NFS task updates
|
|
|
|
* the attributes and finds that dirty pages must be flushed.
|
|
|
|
* NOTE: Upon exit of this function the task is guaranteed to be
|
|
|
|
* released. In particular note that tk_release() will have
|
|
|
|
* been called, so your task memory may have been freed.
|
|
|
|
*/
|
2007-02-04 05:38:41 +08:00
|
|
|
void rpc_execute(struct rpc_task *task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-05-21 11:00:18 +08:00
|
|
|
bool is_async = RPC_IS_ASYNC(task);
|
|
|
|
|
2006-01-03 16:55:06 +08:00
|
|
|
rpc_set_active(task);
|
2016-05-28 00:59:33 +08:00
|
|
|
rpc_make_runnable(rpciod_workqueue, task);
|
2021-03-03 21:47:16 +08:00
|
|
|
if (!is_async) {
|
|
|
|
unsigned int pflags = memalloc_nofs_save();
|
2010-08-01 02:29:08 +08:00
|
|
|
__rpc_execute(task);
|
2021-03-03 21:47:16 +08:00
|
|
|
memalloc_nofs_restore(pflags);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-11-22 22:55:48 +08:00
|
|
|
static void rpc_async_schedule(struct work_struct *work)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2019-02-18 23:02:29 +08:00
|
|
|
unsigned int pflags = memalloc_nofs_save();
|
|
|
|
|
2006-11-22 22:55:48 +08:00
|
|
|
__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
|
2019-02-18 23:02:29 +08:00
|
|
|
memalloc_nofs_restore(pflags);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:49 +08:00
|
|
|
/**
|
2016-09-15 22:55:20 +08:00
|
|
|
* rpc_malloc - allocate RPC buffer resources
|
|
|
|
* @task: RPC task
|
|
|
|
*
|
|
|
|
* A single memory region is allocated, which is split between the
|
|
|
|
* RPC call and RPC reply that this task is being used for. When
|
|
|
|
* this RPC is retired, the memory is released by calling rpc_free.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2007-03-30 04:47:58 +08:00
|
|
|
* To prevent rpciod from hanging, this allocator never sleeps,
|
2016-09-15 22:55:20 +08:00
|
|
|
* returning -ENOMEM and suppressing warning if the request cannot
|
|
|
|
* be serviced immediately. The caller can arrange to sleep in a
|
|
|
|
* way that is safe for rpciod.
|
2007-03-30 04:47:58 +08:00
|
|
|
*
|
|
|
|
* Most requests are 'small' (under 2KiB) and can be serviced from a
|
|
|
|
* mempool, ensuring that NFS reads and writes can always proceed,
|
|
|
|
* and that there is good locality of reference for these buffers.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2016-09-15 22:55:20 +08:00
|
|
|
int rpc_malloc(struct rpc_task *task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2016-09-15 22:55:20 +08:00
|
|
|
struct rpc_rqst *rqst = task->tk_rqstp;
|
|
|
|
size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
|
2007-05-09 06:23:28 +08:00
|
|
|
struct rpc_buffer *buf;
|
2022-03-15 10:02:22 +08:00
|
|
|
gfp_t gfp = rpc_task_gfp_mask();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-05-09 06:23:28 +08:00
|
|
|
size += sizeof(struct rpc_buffer);
|
2022-03-15 10:02:22 +08:00
|
|
|
if (size <= RPC_BUFFER_MAXSIZE) {
|
|
|
|
buf = kmem_cache_alloc(rpc_buffer_slabp, gfp);
|
|
|
|
/* Reach for the mempool if dynamic allocation fails */
|
|
|
|
if (!buf && RPC_IS_ASYNC(task))
|
|
|
|
buf = mempool_alloc(rpc_buffer_mempool, GFP_NOWAIT);
|
|
|
|
} else
|
2007-03-30 04:47:58 +08:00
|
|
|
buf = kmalloc(size, gfp);
|
2007-05-09 14:30:11 +08:00
|
|
|
|
|
|
|
if (!buf)
|
2016-09-15 22:55:20 +08:00
|
|
|
return -ENOMEM;
|
2007-05-09 14:30:11 +08:00
|
|
|
|
2007-05-09 06:23:28 +08:00
|
|
|
buf->len = size;
|
2016-09-15 22:55:20 +08:00
|
|
|
rqst->rq_buffer = buf->data;
|
2016-09-15 22:55:37 +08:00
|
|
|
rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
|
2016-09-15 22:55:20 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-09-11 01:45:36 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rpc_malloc);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-01-03 16:55:49 +08:00
|
|
|
/**
|
2016-09-15 22:55:29 +08:00
|
|
|
* rpc_free - free RPC buffer resources allocated via rpc_malloc
|
|
|
|
* @task: RPC task
|
2006-01-03 16:55:49 +08:00
|
|
|
*
|
|
|
|
*/
|
2016-09-15 22:55:29 +08:00
|
|
|
void rpc_free(struct rpc_task *task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2016-09-15 22:55:29 +08:00
|
|
|
void *buffer = task->tk_rqstp->rq_buffer;
|
2007-05-09 06:23:28 +08:00
|
|
|
size_t size;
|
|
|
|
struct rpc_buffer *buf;
|
2006-01-03 16:55:49 +08:00
|
|
|
|
2007-05-09 06:23:28 +08:00
|
|
|
buf = container_of(buffer, struct rpc_buffer, data);
|
|
|
|
size = buf->len;
|
2007-03-30 04:47:58 +08:00
|
|
|
|
|
|
|
if (size <= RPC_BUFFER_MAXSIZE)
|
|
|
|
mempool_free(buf, rpc_buffer_mempool);
|
|
|
|
else
|
|
|
|
kfree(buf);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-09-11 01:45:36 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rpc_free);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Creation and deletion of RPC task structures
|
|
|
|
*/
|
2007-10-26 06:42:55 +08:00
|
|
|
static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
memset(task, 0, sizeof(*task));
|
2006-01-03 16:55:06 +08:00
|
|
|
atomic_set(&task->tk_count, 1);
|
2007-07-15 03:39:59 +08:00
|
|
|
task->tk_flags = task_setup_data->flags;
|
|
|
|
task->tk_ops = task_setup_data->callback_ops;
|
|
|
|
task->tk_calldata = task_setup_data->callback_data;
|
2007-06-15 04:40:14 +08:00
|
|
|
INIT_LIST_HEAD(&task->tk_task);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-07-15 03:40:00 +08:00
|
|
|
task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
|
|
|
|
task->tk_owner = current->tgid;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Initialize workqueue for async tasks */
|
2008-02-20 09:04:21 +08:00
|
|
|
task->tk_workqueue = task_setup_data->workqueue;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-07-12 04:33:12 +08:00
|
|
|
task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client,
|
|
|
|
xprt_get(task_setup_data->rpc_xprt));
|
2016-01-31 07:13:05 +08:00
|
|
|
|
2018-12-03 08:30:30 +08:00
|
|
|
task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred);
|
|
|
|
|
2007-07-15 03:39:59 +08:00
|
|
|
if (task->tk_ops->rpc_call_prepare != NULL)
|
|
|
|
task->tk_action = rpc_prepare_task;
|
2006-01-03 16:55:04 +08:00
|
|
|
|
2011-12-02 03:00:15 +08:00
|
|
|
rpc_init_task_statistics(task);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2022-03-22 05:37:01 +08:00
|
|
|
static struct rpc_task *rpc_alloc_task(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2022-03-22 05:37:01 +08:00
|
|
|
struct rpc_task *task;
|
|
|
|
|
|
|
|
task = kmem_cache_alloc(rpc_task_slabp, rpc_task_gfp_mask());
|
|
|
|
if (task)
|
|
|
|
return task;
|
|
|
|
return mempool_alloc(rpc_task_mempool, GFP_NOWAIT);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2007-06-10 07:49:36 +08:00
|
|
|
* Create a new task for the specified client.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2007-07-15 03:39:59 +08:00
|
|
|
struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-10-26 06:42:53 +08:00
|
|
|
struct rpc_task *task = setup_data->task;
|
|
|
|
unsigned short flags = 0;
|
|
|
|
|
|
|
|
if (task == NULL) {
|
|
|
|
task = rpc_alloc_task();
|
2022-04-07 10:36:19 +08:00
|
|
|
if (task == NULL) {
|
|
|
|
rpc_release_calldata(setup_data->callback_ops,
|
|
|
|
setup_data->callback_data);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
2007-10-26 06:42:53 +08:00
|
|
|
flags = RPC_TASK_DYNAMIC;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-07-15 03:39:59 +08:00
|
|
|
rpc_init_task(task, setup_data);
|
2007-10-26 06:42:53 +08:00
|
|
|
task->tk_flags |= flags;
|
2005-04-17 06:20:36 +08:00
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
2013-01-05 01:23:21 +08:00
|
|
|
/*
|
|
|
|
* rpc_free_task - release rpc task and perform cleanups
|
|
|
|
*
|
|
|
|
* Note that we free up the rpc_task _after_ rpc_release_calldata()
|
|
|
|
* in order to work around a workqueue dependency issue.
|
|
|
|
*
|
|
|
|
* Tejun Heo states:
|
|
|
|
* "Workqueue currently considers two work items to be the same if they're
|
|
|
|
* on the same address and won't execute them concurrently - ie. it
|
|
|
|
* makes a work item which is queued again while being executed wait
|
|
|
|
* for the previous execution to complete.
|
|
|
|
*
|
|
|
|
* If a work function frees the work item, and then waits for an event
|
|
|
|
* which should be performed by another work item and *that* work item
|
|
|
|
* recycles the freed work item, it can create a false dependency loop.
|
|
|
|
* There really is no reliable way to detect this short of verifying
|
|
|
|
* every memory free."
|
|
|
|
*
|
|
|
|
*/
|
2008-02-20 09:04:21 +08:00
|
|
|
static void rpc_free_task(struct rpc_task *task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-01-05 01:23:21 +08:00
|
|
|
unsigned short tk_flags = task->tk_flags;
|
|
|
|
|
2018-12-03 08:30:30 +08:00
|
|
|
put_rpccred(task->tk_op_cred);
|
2013-01-05 01:23:21 +08:00
|
|
|
rpc_release_calldata(task->tk_ops, task->tk_calldata);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2020-07-09 04:10:45 +08:00
|
|
|
if (tk_flags & RPC_TASK_DYNAMIC)
|
2008-02-26 13:53:49 +08:00
|
|
|
mempool_free(task, rpc_task_mempool);
|
2008-02-20 09:04:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void rpc_async_release(struct work_struct *work)
|
|
|
|
{
|
2019-02-18 23:02:29 +08:00
|
|
|
unsigned int pflags = memalloc_nofs_save();
|
|
|
|
|
2008-02-20 09:04:21 +08:00
|
|
|
rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
|
2019-02-18 23:02:29 +08:00
|
|
|
memalloc_nofs_restore(pflags);
|
2008-02-20 09:04:21 +08:00
|
|
|
}
|
|
|
|
|
2011-02-22 03:05:41 +08:00
|
|
|
static void rpc_release_resources_task(struct rpc_task *task)
|
2008-02-20 09:04:21 +08:00
|
|
|
{
|
2013-01-08 03:30:46 +08:00
|
|
|
xprt_release(task);
|
2011-03-27 23:48:57 +08:00
|
|
|
if (task->tk_msg.rpc_cred) {
|
2020-02-08 08:11:12 +08:00
|
|
|
if (!(task->tk_flags & RPC_TASK_CRED_NOREF))
|
|
|
|
put_cred(task->tk_msg.rpc_cred);
|
2011-03-27 23:48:57 +08:00
|
|
|
task->tk_msg.rpc_cred = NULL;
|
|
|
|
}
|
2010-08-01 02:29:08 +08:00
|
|
|
rpc_task_release_client(task);
|
2011-02-22 03:05:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void rpc_final_put_task(struct rpc_task *task,
|
|
|
|
struct workqueue_struct *q)
|
|
|
|
{
|
|
|
|
if (q != NULL) {
|
2008-02-20 09:04:21 +08:00
|
|
|
INIT_WORK(&task->u.tk_work, rpc_async_release);
|
2011-02-22 03:05:41 +08:00
|
|
|
queue_work(q, &task->u.tk_work);
|
2008-02-20 09:04:21 +08:00
|
|
|
} else
|
|
|
|
rpc_free_task(task);
|
2006-11-12 11:18:03 +08:00
|
|
|
}
|
2011-02-22 03:05:41 +08:00
|
|
|
|
|
|
|
static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
|
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&task->tk_count)) {
|
|
|
|
rpc_release_resources_task(task);
|
|
|
|
rpc_final_put_task(task, q);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void rpc_put_task(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
rpc_do_put_task(task, NULL);
|
|
|
|
}
|
2007-07-15 03:39:59 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rpc_put_task);
|
2006-11-12 11:18:03 +08:00
|
|
|
|
2011-02-22 03:05:41 +08:00
|
|
|
void rpc_put_task_async(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
rpc_do_put_task(task, task->tk_workqueue);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rpc_put_task_async);
|
|
|
|
|
2007-01-25 03:54:53 +08:00
|
|
|
static void rpc_release_task(struct rpc_task *task)
|
2006-11-12 11:18:03 +08:00
|
|
|
{
|
2012-10-23 22:43:49 +08:00
|
|
|
WARN_ON_ONCE(RPC_IS_QUEUED(task));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-02-22 03:05:41 +08:00
|
|
|
rpc_release_resources_task(task);
|
2006-11-12 11:18:03 +08:00
|
|
|
|
2011-02-22 03:05:41 +08:00
|
|
|
/*
|
|
|
|
* Note: at this point we have been removed from rpc_clnt->cl_tasks,
|
|
|
|
* so it should be safe to use task->tk_count as a test for whether
|
|
|
|
* or not any other processes still hold references to our rpc_task.
|
|
|
|
*/
|
|
|
|
if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
|
|
|
|
/* Wake up anyone who may be waiting for task completion */
|
|
|
|
if (!rpc_complete_task(task))
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
if (!atomic_dec_and_test(&task->tk_count))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
rpc_final_put_task(task, task->tk_workqueue);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-07-20 04:32:20 +08:00
|
|
|
int rpciod_up(void)
|
|
|
|
{
|
|
|
|
return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void rpciod_down(void)
|
|
|
|
{
|
|
|
|
module_put(THIS_MODULE);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2007-07-20 04:32:20 +08:00
|
|
|
* Start up the rpciod workqueue.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2007-07-20 04:32:20 +08:00
|
|
|
static int rpciod_start(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct workqueue_struct *wq;
|
2007-06-15 05:08:36 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Create the rpciod thread and wait for it to start.
|
|
|
|
*/
|
2017-06-29 21:25:36 +08:00
|
|
|
wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
|
2016-05-27 22:39:50 +08:00
|
|
|
if (!wq)
|
|
|
|
goto out_failed;
|
2005-04-17 06:20:36 +08:00
|
|
|
rpciod_workqueue = wq;
|
2021-07-12 23:57:15 +08:00
|
|
|
wq = alloc_workqueue("xprtiod", WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
|
2016-05-27 22:39:50 +08:00
|
|
|
if (!wq)
|
|
|
|
goto free_rpciod;
|
|
|
|
xprtiod_workqueue = wq;
|
|
|
|
return 1;
|
|
|
|
free_rpciod:
|
|
|
|
wq = rpciod_workqueue;
|
|
|
|
rpciod_workqueue = NULL;
|
|
|
|
destroy_workqueue(wq);
|
|
|
|
out_failed:
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-07-20 04:32:20 +08:00
|
|
|
static void rpciod_stop(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-07-20 04:32:20 +08:00
|
|
|
struct workqueue_struct *wq = NULL;
|
2007-06-15 05:08:36 +08:00
|
|
|
|
2007-07-20 04:32:20 +08:00
|
|
|
if (rpciod_workqueue == NULL)
|
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-07-20 04:32:20 +08:00
|
|
|
wq = rpciod_workqueue;
|
|
|
|
rpciod_workqueue = NULL;
|
|
|
|
destroy_workqueue(wq);
|
2016-05-27 22:39:50 +08:00
|
|
|
wq = xprtiod_workqueue;
|
|
|
|
xprtiod_workqueue = NULL;
|
|
|
|
destroy_workqueue(wq);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rpc_destroy_mempool(void)
|
|
|
|
{
|
2007-07-20 04:32:20 +08:00
|
|
|
rpciod_stop();
|
2015-09-13 20:15:07 +08:00
|
|
|
mempool_destroy(rpc_buffer_mempool);
|
|
|
|
mempool_destroy(rpc_task_mempool);
|
|
|
|
kmem_cache_destroy(rpc_task_slabp);
|
|
|
|
kmem_cache_destroy(rpc_buffer_slabp);
|
2008-02-23 06:06:55 +08:00
|
|
|
rpc_destroy_wait_queue(&delay_queue);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rpc_init_mempool(void)
|
|
|
|
{
|
2008-02-23 06:06:55 +08:00
|
|
|
/*
|
|
|
|
* The following is not strictly a mempool initialisation,
|
|
|
|
* but there is no harm in doing it here
|
|
|
|
*/
|
|
|
|
rpc_init_wait_queue(&delay_queue, "delayq");
|
|
|
|
if (!rpciod_start())
|
|
|
|
goto err_nomem;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
rpc_task_slabp = kmem_cache_create("rpc_tasks",
|
|
|
|
sizeof(struct rpc_task),
|
|
|
|
0, SLAB_HWCACHE_ALIGN,
|
2007-07-20 09:11:58 +08:00
|
|
|
NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!rpc_task_slabp)
|
|
|
|
goto err_nomem;
|
|
|
|
rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
|
|
|
|
RPC_BUFFER_MAXSIZE,
|
|
|
|
0, SLAB_HWCACHE_ALIGN,
|
2007-07-20 09:11:58 +08:00
|
|
|
NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!rpc_buffer_slabp)
|
|
|
|
goto err_nomem;
|
2006-03-26 17:37:50 +08:00
|
|
|
rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
|
|
|
|
rpc_task_slabp);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!rpc_task_mempool)
|
|
|
|
goto err_nomem;
|
2006-03-26 17:37:50 +08:00
|
|
|
rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
|
|
|
|
rpc_buffer_slabp);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!rpc_buffer_mempool)
|
|
|
|
goto err_nomem;
|
|
|
|
return 0;
|
|
|
|
err_nomem:
|
|
|
|
rpc_destroy_mempool();
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|