mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-05 18:14:07 +08:00
sched/wait: Standardize internal naming of wait-queue heads
The wait-queue head parameters and variables are named in a couple of ways, we have the following variants currently: wait_queue_head_t *q wait_queue_head_t *wq wait_queue_head_t *head In particular the 'wq' naming is ambiguous in the sense whether it's a wait-queue head or entry name - as entries were often named 'wait'. ( Not to mention the confusion of any readers coming over from workqueue-land. ) Standardize all this around a single, unambiguous parameter and variable name: struct wait_queue_head *wq_head which is easy to grep for and also rhymes nicely with the wait-queue entry naming: struct wait_queue_entry *wq_entry Also rename: struct __wait_queue_head => struct wait_queue_head ... and use this struct type to migrate from typedefs usage to 'struct' usage, which is more in line with existing kernel practices. Don't touch any external users and preserve the main wait_queue_head_t typedef. Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
50816c4899
commit
9d9d676f59
@ -41,11 +41,11 @@ struct wait_bit_queue {
|
||||
struct wait_queue_entry wait;
|
||||
};
|
||||
|
||||
struct __wait_queue_head {
|
||||
struct wait_queue_head {
|
||||
spinlock_t lock;
|
||||
struct list_head task_list;
|
||||
};
|
||||
typedef struct __wait_queue_head wait_queue_head_t;
|
||||
typedef struct wait_queue_head wait_queue_head_t;
|
||||
|
||||
struct task_struct;
|
||||
|
||||
@ -66,7 +66,7 @@ struct task_struct;
|
||||
.task_list = { &(name).task_list, &(name).task_list } }
|
||||
|
||||
#define DECLARE_WAIT_QUEUE_HEAD(name) \
|
||||
wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
|
||||
struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
|
||||
|
||||
#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
|
||||
{ .flags = word, .bit_nr = bit, }
|
||||
@ -74,20 +74,20 @@ struct task_struct;
|
||||
#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
|
||||
{ .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
|
||||
|
||||
extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
|
||||
extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
|
||||
|
||||
#define init_waitqueue_head(q) \
|
||||
#define init_waitqueue_head(wq_head) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
\
|
||||
__init_waitqueue_head((q), #q, &__key); \
|
||||
__init_waitqueue_head((wq_head), #wq_head, &__key); \
|
||||
} while (0)
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
|
||||
({ init_waitqueue_head(&name); name; })
|
||||
# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
|
||||
wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
|
||||
struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
|
||||
#else
|
||||
# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
|
||||
#endif
|
||||
@ -109,14 +109,14 @@ init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t f
|
||||
|
||||
/**
|
||||
* waitqueue_active -- locklessly test for waiters on the queue
|
||||
* @q: the waitqueue to test for waiters
|
||||
* @wq_head: the waitqueue to test for waiters
|
||||
*
|
||||
* returns true if the wait list is not empty
|
||||
*
|
||||
* NOTE: this function is lockless and requires care, incorrect usage _will_
|
||||
* lead to sporadic and non-obvious failure.
|
||||
*
|
||||
* Use either while holding wait_queue_head_t::lock or when used for wakeups
|
||||
* Use either while holding wait_queue_head::lock or when used for wakeups
|
||||
* with an extra smp_mb() like:
|
||||
*
|
||||
* CPU0 - waker CPU1 - waiter
|
||||
@ -137,9 +137,9 @@ init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t f
|
||||
* Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
|
||||
* which (when the lock is uncontended) are of roughly equal cost.
|
||||
*/
|
||||
static inline int waitqueue_active(wait_queue_head_t *q)
|
||||
static inline int waitqueue_active(struct wait_queue_head *wq_head)
|
||||
{
|
||||
return !list_empty(&q->task_list);
|
||||
return !list_empty(&wq_head->task_list);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -150,7 +150,7 @@ static inline int waitqueue_active(wait_queue_head_t *q)
|
||||
*
|
||||
* Please refer to the comment for waitqueue_active.
|
||||
*/
|
||||
static inline bool wq_has_sleeper(wait_queue_head_t *wq)
|
||||
static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
|
||||
{
|
||||
/*
|
||||
* We need to be sure we are in sync with the
|
||||
@ -160,62 +160,62 @@ static inline bool wq_has_sleeper(wait_queue_head_t *wq)
|
||||
* waiting side.
|
||||
*/
|
||||
smp_mb();
|
||||
return waitqueue_active(wq);
|
||||
return waitqueue_active(wq_head);
|
||||
}
|
||||
|
||||
extern void add_wait_queue(wait_queue_head_t *q, struct wait_queue_entry *wq_entry);
|
||||
extern void add_wait_queue_exclusive(wait_queue_head_t *q, struct wait_queue_entry *wq_entry);
|
||||
extern void remove_wait_queue(wait_queue_head_t *q, struct wait_queue_entry *wq_entry);
|
||||
extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
|
||||
extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
|
||||
extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
|
||||
|
||||
static inline void __add_wait_queue(wait_queue_head_t *head, struct wait_queue_entry *wq_entry)
|
||||
static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
|
||||
{
|
||||
list_add(&wq_entry->task_list, &head->task_list);
|
||||
list_add(&wq_entry->task_list, &wq_head->task_list);
|
||||
}
|
||||
|
||||
/*
|
||||
* Used for wake-one threads:
|
||||
*/
|
||||
static inline void
|
||||
__add_wait_queue_exclusive(wait_queue_head_t *q, struct wait_queue_entry *wq_entry)
|
||||
__add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
|
||||
{
|
||||
wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
|
||||
__add_wait_queue(q, wq_entry);
|
||||
__add_wait_queue(wq_head, wq_entry);
|
||||
}
|
||||
|
||||
static inline void __add_wait_queue_entry_tail(wait_queue_head_t *head, struct wait_queue_entry *wq_entry)
|
||||
static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
|
||||
{
|
||||
list_add_tail(&wq_entry->task_list, &head->task_list);
|
||||
list_add_tail(&wq_entry->task_list, &wq_head->task_list);
|
||||
}
|
||||
|
||||
static inline void
|
||||
__add_wait_queue_entry_tail_exclusive(wait_queue_head_t *q, struct wait_queue_entry *wq_entry)
|
||||
__add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
|
||||
{
|
||||
wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
|
||||
__add_wait_queue_entry_tail(q, wq_entry);
|
||||
__add_wait_queue_entry_tail(wq_head, wq_entry);
|
||||
}
|
||||
|
||||
static inline void
|
||||
__remove_wait_queue(wait_queue_head_t *head, struct wait_queue_entry *wq_entry)
|
||||
__remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
|
||||
{
|
||||
list_del(&wq_entry->task_list);
|
||||
}
|
||||
|
||||
typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
|
||||
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
|
||||
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
|
||||
void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
|
||||
void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
|
||||
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
|
||||
void __wake_up_bit(wait_queue_head_t *, void *, int);
|
||||
int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
|
||||
int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
|
||||
void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
|
||||
void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
|
||||
void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
|
||||
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
|
||||
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
|
||||
void __wake_up_bit(struct wait_queue_head *, void *, int);
|
||||
int __wait_on_bit(struct wait_queue_head *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
|
||||
int __wait_on_bit_lock(struct wait_queue_head *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
|
||||
void wake_up_bit(void *, int);
|
||||
void wake_up_atomic_t(atomic_t *);
|
||||
int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
|
||||
int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
|
||||
int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
|
||||
int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
|
||||
wait_queue_head_t *bit_waitqueue(void *, int);
|
||||
struct wait_queue_head *bit_waitqueue(void *, int);
|
||||
|
||||
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
|
||||
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
|
||||
@ -970,10 +970,10 @@ do { \
|
||||
/*
|
||||
* Waitqueues which are removed from the waitqueue_head at wakeup time
|
||||
*/
|
||||
void prepare_to_wait(wait_queue_head_t *q, struct wait_queue_entry *wq_entry, int state);
|
||||
void prepare_to_wait_exclusive(wait_queue_head_t *q, struct wait_queue_entry *wq_entry, int state);
|
||||
long prepare_to_wait_event(wait_queue_head_t *q, struct wait_queue_entry *wq_entry, int state);
|
||||
void finish_wait(wait_queue_head_t *q, struct wait_queue_entry *wq_entry);
|
||||
void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
|
||||
void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
|
||||
long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
|
||||
void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
|
||||
long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
|
||||
int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
|
||||
int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
|
||||
|
@ -12,44 +12,44 @@
|
||||
#include <linux/hash.h>
|
||||
#include <linux/kthread.h>
|
||||
|
||||
void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key)
|
||||
void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
|
||||
{
|
||||
spin_lock_init(&q->lock);
|
||||
lockdep_set_class_and_name(&q->lock, key, name);
|
||||
INIT_LIST_HEAD(&q->task_list);
|
||||
spin_lock_init(&wq_head->lock);
|
||||
lockdep_set_class_and_name(&wq_head->lock, key, name);
|
||||
INIT_LIST_HEAD(&wq_head->task_list);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(__init_waitqueue_head);
|
||||
|
||||
void add_wait_queue(wait_queue_head_t *q, struct wait_queue_entry *wq_entry)
|
||||
void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
__add_wait_queue_entry_tail(q, wq_entry);
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
spin_lock_irqsave(&wq_head->lock, flags);
|
||||
__add_wait_queue_entry_tail(wq_head, wq_entry);
|
||||
spin_unlock_irqrestore(&wq_head->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(add_wait_queue);
|
||||
|
||||
void add_wait_queue_exclusive(wait_queue_head_t *q, struct wait_queue_entry *wq_entry)
|
||||
void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
__add_wait_queue_entry_tail(q, wq_entry);
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
spin_lock_irqsave(&wq_head->lock, flags);
|
||||
__add_wait_queue_entry_tail(wq_head, wq_entry);
|
||||
spin_unlock_irqrestore(&wq_head->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(add_wait_queue_exclusive);
|
||||
|
||||
void remove_wait_queue(wait_queue_head_t *q, struct wait_queue_entry *wq_entry)
|
||||
void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
__remove_wait_queue(q, wq_entry);
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
spin_lock_irqsave(&wq_head->lock, flags);
|
||||
__remove_wait_queue(wq_head, wq_entry);
|
||||
spin_unlock_irqrestore(&wq_head->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(remove_wait_queue);
|
||||
|
||||
@ -63,12 +63,12 @@ EXPORT_SYMBOL(remove_wait_queue);
|
||||
* started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
|
||||
* zero in this (rare) case, and we handle it by continuing to scan the queue.
|
||||
*/
|
||||
static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
|
||||
static void __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
|
||||
int nr_exclusive, int wake_flags, void *key)
|
||||
{
|
||||
wait_queue_entry_t *curr, *next;
|
||||
|
||||
list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
|
||||
list_for_each_entry_safe(curr, next, &wq_head->task_list, task_list) {
|
||||
unsigned flags = curr->flags;
|
||||
|
||||
if (curr->func(curr, mode, wake_flags, key) &&
|
||||
@ -79,7 +79,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
|
||||
|
||||
/**
|
||||
* __wake_up - wake up threads blocked on a waitqueue.
|
||||
* @q: the waitqueue
|
||||
* @wq_head: the waitqueue
|
||||
* @mode: which threads
|
||||
* @nr_exclusive: how many wake-one or wake-many threads to wake up
|
||||
* @key: is directly passed to the wakeup function
|
||||
@ -87,35 +87,35 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
|
||||
* It may be assumed that this function implies a write memory barrier before
|
||||
* changing the task state if and only if any tasks are woken up.
|
||||
*/
|
||||
void __wake_up(wait_queue_head_t *q, unsigned int mode,
|
||||
void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
|
||||
int nr_exclusive, void *key)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
__wake_up_common(q, mode, nr_exclusive, 0, key);
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
spin_lock_irqsave(&wq_head->lock, flags);
|
||||
__wake_up_common(wq_head, mode, nr_exclusive, 0, key);
|
||||
spin_unlock_irqrestore(&wq_head->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(__wake_up);
|
||||
|
||||
/*
|
||||
* Same as __wake_up but called with the spinlock in wait_queue_head_t held.
|
||||
*/
|
||||
void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
|
||||
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
|
||||
{
|
||||
__wake_up_common(q, mode, nr, 0, NULL);
|
||||
__wake_up_common(wq_head, mode, nr, 0, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__wake_up_locked);
|
||||
|
||||
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
|
||||
void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
|
||||
{
|
||||
__wake_up_common(q, mode, 1, 0, key);
|
||||
__wake_up_common(wq_head, mode, 1, 0, key);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__wake_up_locked_key);
|
||||
|
||||
/**
|
||||
* __wake_up_sync_key - wake up threads blocked on a waitqueue.
|
||||
* @q: the waitqueue
|
||||
* @wq_head: the waitqueue
|
||||
* @mode: which threads
|
||||
* @nr_exclusive: how many wake-one or wake-many threads to wake up
|
||||
* @key: opaque value to be passed to wakeup targets
|
||||
@ -130,30 +130,30 @@ EXPORT_SYMBOL_GPL(__wake_up_locked_key);
|
||||
* It may be assumed that this function implies a write memory barrier before
|
||||
* changing the task state if and only if any tasks are woken up.
|
||||
*/
|
||||
void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
|
||||
void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
|
||||
int nr_exclusive, void *key)
|
||||
{
|
||||
unsigned long flags;
|
||||
int wake_flags = 1; /* XXX WF_SYNC */
|
||||
|
||||
if (unlikely(!q))
|
||||
if (unlikely(!wq_head))
|
||||
return;
|
||||
|
||||
if (unlikely(nr_exclusive != 1))
|
||||
wake_flags = 0;
|
||||
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
__wake_up_common(q, mode, nr_exclusive, wake_flags, key);
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
spin_lock_irqsave(&wq_head->lock, flags);
|
||||
__wake_up_common(wq_head, mode, nr_exclusive, wake_flags, key);
|
||||
spin_unlock_irqrestore(&wq_head->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__wake_up_sync_key);
|
||||
|
||||
/*
|
||||
* __wake_up_sync - see __wake_up_sync_key()
|
||||
*/
|
||||
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
|
||||
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive)
|
||||
{
|
||||
__wake_up_sync_key(q, mode, nr_exclusive, NULL);
|
||||
__wake_up_sync_key(wq_head, mode, nr_exclusive, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
|
||||
|
||||
@ -170,30 +170,30 @@ EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
|
||||
* loads to move into the critical region).
|
||||
*/
|
||||
void
|
||||
prepare_to_wait(wait_queue_head_t *q, struct wait_queue_entry *wq_entry, int state)
|
||||
prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
spin_lock_irqsave(&wq_head->lock, flags);
|
||||
if (list_empty(&wq_entry->task_list))
|
||||
__add_wait_queue(q, wq_entry);
|
||||
__add_wait_queue(wq_head, wq_entry);
|
||||
set_current_state(state);
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
spin_unlock_irqrestore(&wq_head->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(prepare_to_wait);
|
||||
|
||||
void
|
||||
prepare_to_wait_exclusive(wait_queue_head_t *q, struct wait_queue_entry *wq_entry, int state)
|
||||
prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
spin_lock_irqsave(&wq_head->lock, flags);
|
||||
if (list_empty(&wq_entry->task_list))
|
||||
__add_wait_queue_entry_tail(q, wq_entry);
|
||||
__add_wait_queue_entry_tail(wq_head, wq_entry);
|
||||
set_current_state(state);
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
spin_unlock_irqrestore(&wq_head->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(prepare_to_wait_exclusive);
|
||||
|
||||
@ -206,12 +206,12 @@ void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
|
||||
}
|
||||
EXPORT_SYMBOL(init_wait_entry);
|
||||
|
||||
long prepare_to_wait_event(wait_queue_head_t *q, struct wait_queue_entry *wq_entry, int state)
|
||||
long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
|
||||
{
|
||||
unsigned long flags;
|
||||
long ret = 0;
|
||||
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
spin_lock_irqsave(&wq_head->lock, flags);
|
||||
if (unlikely(signal_pending_state(state, current))) {
|
||||
/*
|
||||
* Exclusive waiter must not fail if it was selected by wakeup,
|
||||
@ -219,7 +219,7 @@ long prepare_to_wait_event(wait_queue_head_t *q, struct wait_queue_entry *wq_ent
|
||||
*
|
||||
* The caller will recheck the condition and return success if
|
||||
* we were already woken up, we can not miss the event because
|
||||
* wakeup locks/unlocks the same q->lock.
|
||||
* wakeup locks/unlocks the same wq_head->lock.
|
||||
*
|
||||
* But we need to ensure that set-condition + wakeup after that
|
||||
* can't see us, it should wake up another exclusive waiter if
|
||||
@ -230,13 +230,13 @@ long prepare_to_wait_event(wait_queue_head_t *q, struct wait_queue_entry *wq_ent
|
||||
} else {
|
||||
if (list_empty(&wq_entry->task_list)) {
|
||||
if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
|
||||
__add_wait_queue_entry_tail(q, wq_entry);
|
||||
__add_wait_queue_entry_tail(wq_head, wq_entry);
|
||||
else
|
||||
__add_wait_queue(q, wq_entry);
|
||||
__add_wait_queue(wq_head, wq_entry);
|
||||
}
|
||||
set_current_state(state);
|
||||
}
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
spin_unlock_irqrestore(&wq_head->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -283,14 +283,14 @@ EXPORT_SYMBOL(do_wait_intr_irq);
|
||||
|
||||
/**
|
||||
* finish_wait - clean up after waiting in a queue
|
||||
* @q: waitqueue waited on
|
||||
* @wq_head: waitqueue waited on
|
||||
* @wq_entry: wait descriptor
|
||||
*
|
||||
* Sets current thread back to running state and removes
|
||||
* the wait descriptor from the given waitqueue if still
|
||||
* queued.
|
||||
*/
|
||||
void finish_wait(wait_queue_head_t *q, struct wait_queue_entry *wq_entry)
|
||||
void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@ -309,9 +309,9 @@ void finish_wait(wait_queue_head_t *q, struct wait_queue_entry *wq_entry)
|
||||
* the list).
|
||||
*/
|
||||
if (!list_empty_careful(&wq_entry->task_list)) {
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
spin_lock_irqsave(&wq_head->lock, flags);
|
||||
list_del_init(&wq_entry->task_list);
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
spin_unlock_irqrestore(&wq_head->lock, flags);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(finish_wait);
|
||||
@ -334,7 +334,7 @@ static inline bool is_kthread_should_stop(void)
|
||||
/*
|
||||
* DEFINE_WAIT_FUNC(wait, woken_wake_func);
|
||||
*
|
||||
* add_wait_queue(&wq, &wait);
|
||||
* add_wait_queue(&wq_head, &wait);
|
||||
* for (;;) {
|
||||
* if (condition)
|
||||
* break;
|
||||
@ -348,7 +348,7 @@ static inline bool is_kthread_should_stop(void)
|
||||
* smp_mb() // B smp_wmb(); // C
|
||||
* wq_entry->flags |= WQ_FLAG_WOKEN;
|
||||
* }
|
||||
* remove_wait_queue(&wq, &wait);
|
||||
* remove_wait_queue(&wq_head, &wait);
|
||||
*
|
||||
*/
|
||||
long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
|
||||
@ -412,17 +412,17 @@ EXPORT_SYMBOL(wake_bit_function);
|
||||
* permitted return codes. Nonzero return codes halt waiting and return.
|
||||
*/
|
||||
int __sched
|
||||
__wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
|
||||
__wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue *q,
|
||||
wait_bit_action_f *action, unsigned mode)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
do {
|
||||
prepare_to_wait(wq, &q->wait, mode);
|
||||
prepare_to_wait(wq_head, &q->wait, mode);
|
||||
if (test_bit(q->key.bit_nr, q->key.flags))
|
||||
ret = (*action)(&q->key, mode);
|
||||
} while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
|
||||
finish_wait(wq, &q->wait);
|
||||
finish_wait(wq_head, &q->wait);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__wait_on_bit);
|
||||
@ -430,10 +430,10 @@ EXPORT_SYMBOL(__wait_on_bit);
|
||||
int __sched out_of_line_wait_on_bit(void *word, int bit,
|
||||
wait_bit_action_f *action, unsigned mode)
|
||||
{
|
||||
wait_queue_head_t *wq = bit_waitqueue(word, bit);
|
||||
struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
|
||||
DEFINE_WAIT_BIT(wait, word, bit);
|
||||
|
||||
return __wait_on_bit(wq, &wait, action, mode);
|
||||
return __wait_on_bit(wq_head, &wait, action, mode);
|
||||
}
|
||||
EXPORT_SYMBOL(out_of_line_wait_on_bit);
|
||||
|
||||
@ -441,36 +441,36 @@ int __sched out_of_line_wait_on_bit_timeout(
|
||||
void *word, int bit, wait_bit_action_f *action,
|
||||
unsigned mode, unsigned long timeout)
|
||||
{
|
||||
wait_queue_head_t *wq = bit_waitqueue(word, bit);
|
||||
struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
|
||||
DEFINE_WAIT_BIT(wait, word, bit);
|
||||
|
||||
wait.key.timeout = jiffies + timeout;
|
||||
return __wait_on_bit(wq, &wait, action, mode);
|
||||
return __wait_on_bit(wq_head, &wait, action, mode);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout);
|
||||
|
||||
int __sched
|
||||
__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
|
||||
__wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue *q,
|
||||
wait_bit_action_f *action, unsigned mode)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
for (;;) {
|
||||
prepare_to_wait_exclusive(wq, &q->wait, mode);
|
||||
prepare_to_wait_exclusive(wq_head, &q->wait, mode);
|
||||
if (test_bit(q->key.bit_nr, q->key.flags)) {
|
||||
ret = action(&q->key, mode);
|
||||
/*
|
||||
* See the comment in prepare_to_wait_event().
|
||||
* finish_wait() does not necessarily takes wq->lock,
|
||||
* finish_wait() does not necessarily takes wwq_head->lock,
|
||||
* but test_and_set_bit() implies mb() which pairs with
|
||||
* smp_mb__after_atomic() before wake_up_page().
|
||||
*/
|
||||
if (ret)
|
||||
finish_wait(wq, &q->wait);
|
||||
finish_wait(wq_head, &q->wait);
|
||||
}
|
||||
if (!test_and_set_bit(q->key.bit_nr, q->key.flags)) {
|
||||
if (!ret)
|
||||
finish_wait(wq, &q->wait);
|
||||
finish_wait(wq_head, &q->wait);
|
||||
return 0;
|
||||
} else if (ret) {
|
||||
return ret;
|
||||
@ -482,18 +482,18 @@ EXPORT_SYMBOL(__wait_on_bit_lock);
|
||||
int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
|
||||
wait_bit_action_f *action, unsigned mode)
|
||||
{
|
||||
wait_queue_head_t *wq = bit_waitqueue(word, bit);
|
||||
struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
|
||||
DEFINE_WAIT_BIT(wait, word, bit);
|
||||
|
||||
return __wait_on_bit_lock(wq, &wait, action, mode);
|
||||
return __wait_on_bit_lock(wq_head, &wait, action, mode);
|
||||
}
|
||||
EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
|
||||
|
||||
void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
|
||||
void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit)
|
||||
{
|
||||
struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
|
||||
if (waitqueue_active(wq))
|
||||
__wake_up(wq, TASK_NORMAL, 1, &key);
|
||||
if (waitqueue_active(wq_head))
|
||||
__wake_up(wq_head, TASK_NORMAL, 1, &key);
|
||||
}
|
||||
EXPORT_SYMBOL(__wake_up_bit);
|
||||
|
||||
@ -555,20 +555,20 @@ static int wake_atomic_t_function(struct wait_queue_entry *wq_entry, unsigned mo
|
||||
* return codes halt waiting and return.
|
||||
*/
|
||||
static __sched
|
||||
int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q,
|
||||
int __wait_on_atomic_t(struct wait_queue_head *wq_head, struct wait_bit_queue *q,
|
||||
int (*action)(atomic_t *), unsigned mode)
|
||||
{
|
||||
atomic_t *val;
|
||||
int ret = 0;
|
||||
|
||||
do {
|
||||
prepare_to_wait(wq, &q->wait, mode);
|
||||
prepare_to_wait(wq_head, &q->wait, mode);
|
||||
val = q->key.flags;
|
||||
if (atomic_read(val) == 0)
|
||||
break;
|
||||
ret = (*action)(val);
|
||||
} while (!ret && atomic_read(val) != 0);
|
||||
finish_wait(wq, &q->wait);
|
||||
finish_wait(wq_head, &q->wait);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -586,10 +586,10 @@ int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q,
|
||||
__sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *),
|
||||
unsigned mode)
|
||||
{
|
||||
wait_queue_head_t *wq = atomic_t_waitqueue(p);
|
||||
struct wait_queue_head *wq_head = atomic_t_waitqueue(p);
|
||||
DEFINE_WAIT_ATOMIC_T(wait, p);
|
||||
|
||||
return __wait_on_atomic_t(wq, &wait, action, mode);
|
||||
return __wait_on_atomic_t(wq_head, &wait, action, mode);
|
||||
}
|
||||
EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user