2005-04-17 06:20:36 +08:00
|
|
|
#ifndef _LINUX_WAIT_H
|
|
|
|
#define _LINUX_WAIT_H
|
|
|
|
|
|
|
|
#define WNOHANG 0x00000001
|
|
|
|
#define WUNTRACED 0x00000002
|
|
|
|
#define WSTOPPED WUNTRACED
|
|
|
|
#define WEXITED 0x00000004
|
|
|
|
#define WCONTINUED 0x00000008
|
|
|
|
#define WNOWAIT 0x01000000 /* Don't reap, just poll status. */
|
|
|
|
|
|
|
|
#define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */
|
|
|
|
#define __WALL 0x40000000 /* Wait on all children, regardless of type */
|
|
|
|
#define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */
|
|
|
|
|
|
|
|
/* First argument to waitid: */
|
|
|
|
#define P_ALL 0
|
|
|
|
#define P_PID 1
|
|
|
|
#define P_PGID 2
|
|
|
|
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/stddef.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <asm/system.h>
|
|
|
|
#include <asm/current.h>
|
|
|
|
|
|
|
|
typedef struct __wait_queue wait_queue_t;
|
|
|
|
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync, void *key);
|
|
|
|
int default_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
|
|
|
|
|
|
|
|
struct __wait_queue {
|
|
|
|
unsigned int flags;
|
|
|
|
#define WQ_FLAG_EXCLUSIVE 0x01
|
2005-06-23 15:10:27 +08:00
|
|
|
void *private;
|
2005-04-17 06:20:36 +08:00
|
|
|
wait_queue_func_t func;
|
|
|
|
struct list_head task_list;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct wait_bit_key {
|
|
|
|
void *flags;
|
|
|
|
int bit_nr;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct wait_bit_queue {
|
|
|
|
struct wait_bit_key key;
|
|
|
|
wait_queue_t wait;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct __wait_queue_head {
|
|
|
|
spinlock_t lock;
|
|
|
|
struct list_head task_list;
|
|
|
|
};
|
|
|
|
typedef struct __wait_queue_head wait_queue_head_t;
|
|
|
|
|
2005-11-07 16:59:43 +08:00
|
|
|
struct task_struct;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Macros for declaration and initialisaton of the datatypes
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define __WAITQUEUE_INITIALIZER(name, tsk) { \
|
2005-06-23 15:10:27 +08:00
|
|
|
.private = tsk, \
|
2005-04-17 06:20:36 +08:00
|
|
|
.func = default_wake_function, \
|
|
|
|
.task_list = { NULL, NULL } }
|
|
|
|
|
|
|
|
#define DECLARE_WAITQUEUE(name, tsk) \
|
|
|
|
wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
|
|
|
|
|
|
|
|
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
|
2006-07-03 15:24:34 +08:00
|
|
|
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
|
2005-04-17 06:20:36 +08:00
|
|
|
.task_list = { &(name).task_list, &(name).task_list } }
|
|
|
|
|
|
|
|
#define DECLARE_WAIT_QUEUE_HEAD(name) \
|
|
|
|
wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
|
|
|
|
|
|
|
|
#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
|
|
|
|
{ .flags = word, .bit_nr = bit, }
|
|
|
|
|
2006-07-10 19:45:32 +08:00
|
|
|
extern void init_waitqueue_head(wait_queue_head_t *q);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-10-30 14:46:36 +08:00
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
|
|
# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
|
|
|
|
({ init_waitqueue_head(&name); name; })
|
|
|
|
# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
|
|
|
|
wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
|
|
|
|
#else
|
|
|
|
# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
|
|
|
|
{
|
|
|
|
q->flags = 0;
|
2005-06-23 15:10:27 +08:00
|
|
|
q->private = p;
|
2005-04-17 06:20:36 +08:00
|
|
|
q->func = default_wake_function;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void init_waitqueue_func_entry(wait_queue_t *q,
|
|
|
|
wait_queue_func_t func)
|
|
|
|
{
|
|
|
|
q->flags = 0;
|
2005-06-23 15:10:27 +08:00
|
|
|
q->private = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
q->func = func;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int waitqueue_active(wait_queue_head_t *q)
|
|
|
|
{
|
|
|
|
return !list_empty(&q->task_list);
|
|
|
|
}
|
|
|
|
|
2008-02-14 07:03:15 +08:00
|
|
|
extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
|
|
|
|
extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
|
|
|
|
extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
|
|
|
|
{
|
|
|
|
list_add(&new->task_list, &head->task_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Used for wake-one threads:
|
|
|
|
*/
|
|
|
|
static inline void __add_wait_queue_tail(wait_queue_head_t *head,
|
|
|
|
wait_queue_t *new)
|
|
|
|
{
|
|
|
|
list_add_tail(&new->task_list, &head->task_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __remove_wait_queue(wait_queue_head_t *head,
|
|
|
|
wait_queue_t *old)
|
|
|
|
{
|
|
|
|
list_del(&old->task_list);
|
|
|
|
}
|
|
|
|
|
2009-02-05 07:12:14 +08:00
|
|
|
void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
|
|
|
|
int nr_exclusive, int sync, void *key);
|
2008-02-14 07:03:15 +08:00
|
|
|
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
|
epoll keyed wakeups: add __wake_up_locked_key() and __wake_up_sync_key()
This patchset introduces wakeup hints for some of the most popular (from
epoll POV) devices, so that epoll code can avoid spurious wakeups on its
waiters.
The problem with epoll is that the callback-based wakeups do not, ATM,
carry any information about the events the wakeup is related to. So the
only choice epoll has (not being able to call f_op->poll() from inside the
callback), is to add the file* to a ready-list and resolve the real events
later on, at epoll_wait() (or its own f_op->poll()) time. This can cause
spurious wakeups, since the wake_up() itself might be for an event the
caller is not interested into.
The rate of these spurious wakeup can be pretty high in case of many
network sockets being monitored.
By allowing devices to report the events the wakeups refer to (at least
the two major classes - POLLIN/POLLOUT), we are able to spare useless
wakeups by proper handling inside the epoll's poll callback.
Epoll will have in any case to call f_op->poll() on the file* later on,
since the change to be done in order to have the full event set sent via
wakeup, is too invasive for the way our f_op->poll() system works (the
full event set is calculated inside the poll function - there are too many
of them to even start thinking the change - also poll/select would need
change too).
Epoll is changed in a way that both devices which send event hints, and
the ones that don't, are correctly handled. The former will gain some
efficiency though.
As a general rule for devices, would be to add an event mask by using
key-aware wakeup macros, when making up poll wait queues. I tested it
(together with the epoll's poll fix patch Andrew has in -mm) and wakeups
for the supported devices are correctly filtered.
Test program available here:
http://www.xmailserver.org/epoll_test.c
This patch:
Nothing revolutionary here. Just using the available "key" that our
wakeup core already support. The __wake_up_locked_key() was no brainer,
since both __wake_up_locked() and __wake_up_locked_key() are thin wrappers
around __wake_up_common().
The __wake_up_sync() function had a body, so the choice was between
borrowing the body for __wake_up_sync_key() and calling it from
__wake_up_sync(), or make an inline and calling it from both. I chose the
former since in most archs it all resolves to "mov $0, REG; jmp ADDR".
Signed-off-by: Davide Libenzi <davidel@xmailserver.org>
Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: David Miller <davem@davemloft.net>
Cc: William Lee Irwin III <wli@movementarian.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-04-01 06:24:20 +08:00
|
|
|
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
|
|
|
|
void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
|
|
|
|
void *key);
|
|
|
|
void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
|
|
|
|
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
|
2008-02-14 07:03:15 +08:00
|
|
|
void __wake_up_bit(wait_queue_head_t *, void *, int);
|
|
|
|
int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
|
|
|
|
int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
|
|
|
|
void wake_up_bit(void *, int);
|
|
|
|
int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
|
|
|
|
int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
|
|
|
|
wait_queue_head_t *bit_waitqueue(void *, int);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-12-07 06:34:36 +08:00
|
|
|
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
|
|
|
|
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
|
|
|
|
#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
|
|
|
|
#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL)
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
|
|
|
|
#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
|
|
|
|
#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
|
2007-12-07 06:34:36 +08:00
|
|
|
#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
lockdep: annotate epoll
On Sat, 2008-01-05 at 13:35 -0800, Davide Libenzi wrote:
> I remember I talked with Arjan about this time ago. Basically, since 1)
> you can drop an epoll fd inside another epoll fd 2) callback-based wakeups
> are used, you can see a wake_up() from inside another wake_up(), but they
> will never refer to the same lock instance.
> Think about:
>
> dfd = socket(...);
> efd1 = epoll_create();
> efd2 = epoll_create();
> epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...);
> epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...);
>
> When a packet arrives to the device underneath "dfd", the net code will
> issue a wake_up() on its poll wake list. Epoll (efd1) has installed a
> callback wakeup entry on that queue, and the wake_up() performed by the
> "dfd" net code will end up in ep_poll_callback(). At this point epoll
> (efd1) notices that it may have some event ready, so it needs to wake up
> the waiters on its poll wait list (efd2). So it calls ep_poll_safewake()
> that ends up in another wake_up(), after having checked about the
> recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to
> avoid stack blasting. Never hit the same queue, to avoid loops like:
>
> epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...);
> epoll_ctl(efd3, EPOLL_CTL_ADD, efd2, ...);
> epoll_ctl(efd4, EPOLL_CTL_ADD, efd3, ...);
> epoll_ctl(efd1, EPOLL_CTL_ADD, efd4, ...);
>
> The code "if (tncur->wq == wq || ..." prevents re-entering the same
> queue/lock.
Since the epoll code is very careful to not nest same instance locks
allow the recursion.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Tested-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Acked-by: Davide Libenzi <davidel@xmailserver.org>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 14:27:20 +08:00
|
|
|
/*
|
2009-04-01 06:24:20 +08:00
|
|
|
* Wakeup macros to be used to report events to the targets.
|
lockdep: annotate epoll
On Sat, 2008-01-05 at 13:35 -0800, Davide Libenzi wrote:
> I remember I talked with Arjan about this time ago. Basically, since 1)
> you can drop an epoll fd inside another epoll fd 2) callback-based wakeups
> are used, you can see a wake_up() from inside another wake_up(), but they
> will never refer to the same lock instance.
> Think about:
>
> dfd = socket(...);
> efd1 = epoll_create();
> efd2 = epoll_create();
> epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...);
> epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...);
>
> When a packet arrives to the device underneath "dfd", the net code will
> issue a wake_up() on its poll wake list. Epoll (efd1) has installed a
> callback wakeup entry on that queue, and the wake_up() performed by the
> "dfd" net code will end up in ep_poll_callback(). At this point epoll
> (efd1) notices that it may have some event ready, so it needs to wake up
> the waiters on its poll wait list (efd2). So it calls ep_poll_safewake()
> that ends up in another wake_up(), after having checked about the
> recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to
> avoid stack blasting. Never hit the same queue, to avoid loops like:
>
> epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...);
> epoll_ctl(efd3, EPOLL_CTL_ADD, efd2, ...);
> epoll_ctl(efd4, EPOLL_CTL_ADD, efd3, ...);
> epoll_ctl(efd1, EPOLL_CTL_ADD, efd4, ...);
>
> The code "if (tncur->wq == wq || ..." prevents re-entering the same
> queue/lock.
Since the epoll code is very careful to not nest same instance locks
allow the recursion.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Tested-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Acked-by: Davide Libenzi <davidel@xmailserver.org>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 14:27:20 +08:00
|
|
|
*/
|
2009-04-01 06:24:20 +08:00
|
|
|
#define wake_up_poll(x, m) \
|
|
|
|
__wake_up(x, TASK_NORMAL, 1, (void *) (m))
|
|
|
|
#define wake_up_locked_poll(x, m) \
|
|
|
|
__wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
|
|
|
|
#define wake_up_interruptible_poll(x, m) \
|
|
|
|
__wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
|
|
|
|
#define wake_up_interruptible_sync_poll(x, m) \
|
|
|
|
__wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
|
lockdep: annotate epoll
On Sat, 2008-01-05 at 13:35 -0800, Davide Libenzi wrote:
> I remember I talked with Arjan about this time ago. Basically, since 1)
> you can drop an epoll fd inside another epoll fd 2) callback-based wakeups
> are used, you can see a wake_up() from inside another wake_up(), but they
> will never refer to the same lock instance.
> Think about:
>
> dfd = socket(...);
> efd1 = epoll_create();
> efd2 = epoll_create();
> epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...);
> epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...);
>
> When a packet arrives to the device underneath "dfd", the net code will
> issue a wake_up() on its poll wake list. Epoll (efd1) has installed a
> callback wakeup entry on that queue, and the wake_up() performed by the
> "dfd" net code will end up in ep_poll_callback(). At this point epoll
> (efd1) notices that it may have some event ready, so it needs to wake up
> the waiters on its poll wait list (efd2). So it calls ep_poll_safewake()
> that ends up in another wake_up(), after having checked about the
> recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to
> avoid stack blasting. Never hit the same queue, to avoid loops like:
>
> epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...);
> epoll_ctl(efd3, EPOLL_CTL_ADD, efd2, ...);
> epoll_ctl(efd4, EPOLL_CTL_ADD, efd3, ...);
> epoll_ctl(efd1, EPOLL_CTL_ADD, efd4, ...);
>
> The code "if (tncur->wq == wq || ..." prevents re-entering the same
> queue/lock.
Since the epoll code is very careful to not nest same instance locks
allow the recursion.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Tested-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Acked-by: Davide Libenzi <davidel@xmailserver.org>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 14:27:20 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#define __wait_event(wq, condition) \
|
|
|
|
do { \
|
|
|
|
DEFINE_WAIT(__wait); \
|
|
|
|
\
|
|
|
|
for (;;) { \
|
|
|
|
prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
|
|
|
|
if (condition) \
|
|
|
|
break; \
|
|
|
|
schedule(); \
|
|
|
|
} \
|
|
|
|
finish_wait(&wq, &__wait); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* wait_event - sleep until a condition gets true
|
|
|
|
* @wq: the waitqueue to wait on
|
|
|
|
* @condition: a C expression for the event to wait for
|
|
|
|
*
|
|
|
|
* The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
|
|
|
|
* @condition evaluates to true. The @condition is checked each time
|
|
|
|
* the waitqueue @wq is woken up.
|
|
|
|
*
|
|
|
|
* wake_up() has to be called after changing any variable that could
|
|
|
|
* change the result of the wait condition.
|
|
|
|
*/
|
|
|
|
#define wait_event(wq, condition) \
|
|
|
|
do { \
|
|
|
|
if (condition) \
|
|
|
|
break; \
|
|
|
|
__wait_event(wq, condition); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define __wait_event_timeout(wq, condition, ret) \
|
|
|
|
do { \
|
|
|
|
DEFINE_WAIT(__wait); \
|
|
|
|
\
|
|
|
|
for (;;) { \
|
|
|
|
prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
|
|
|
|
if (condition) \
|
|
|
|
break; \
|
|
|
|
ret = schedule_timeout(ret); \
|
|
|
|
if (!ret) \
|
|
|
|
break; \
|
|
|
|
} \
|
|
|
|
finish_wait(&wq, &__wait); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* wait_event_timeout - sleep until a condition gets true or a timeout elapses
|
|
|
|
* @wq: the waitqueue to wait on
|
|
|
|
* @condition: a C expression for the event to wait for
|
|
|
|
* @timeout: timeout, in jiffies
|
|
|
|
*
|
|
|
|
* The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
|
|
|
|
* @condition evaluates to true. The @condition is checked each time
|
|
|
|
* the waitqueue @wq is woken up.
|
|
|
|
*
|
|
|
|
* wake_up() has to be called after changing any variable that could
|
|
|
|
* change the result of the wait condition.
|
|
|
|
*
|
|
|
|
* The function returns 0 if the @timeout elapsed, and the remaining
|
|
|
|
* jiffies if the condition evaluated to true before the timeout elapsed.
|
|
|
|
*/
|
|
|
|
#define wait_event_timeout(wq, condition, timeout) \
|
|
|
|
({ \
|
|
|
|
long __ret = timeout; \
|
|
|
|
if (!(condition)) \
|
|
|
|
__wait_event_timeout(wq, condition, __ret); \
|
|
|
|
__ret; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define __wait_event_interruptible(wq, condition, ret) \
|
|
|
|
do { \
|
|
|
|
DEFINE_WAIT(__wait); \
|
|
|
|
\
|
|
|
|
for (;;) { \
|
|
|
|
prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
|
|
|
|
if (condition) \
|
|
|
|
break; \
|
|
|
|
if (!signal_pending(current)) { \
|
|
|
|
schedule(); \
|
|
|
|
continue; \
|
|
|
|
} \
|
|
|
|
ret = -ERESTARTSYS; \
|
|
|
|
break; \
|
|
|
|
} \
|
|
|
|
finish_wait(&wq, &__wait); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* wait_event_interruptible - sleep until a condition gets true
|
|
|
|
* @wq: the waitqueue to wait on
|
|
|
|
* @condition: a C expression for the event to wait for
|
|
|
|
*
|
|
|
|
* The process is put to sleep (TASK_INTERRUPTIBLE) until the
|
|
|
|
* @condition evaluates to true or a signal is received.
|
|
|
|
* The @condition is checked each time the waitqueue @wq is woken up.
|
|
|
|
*
|
|
|
|
* wake_up() has to be called after changing any variable that could
|
|
|
|
* change the result of the wait condition.
|
|
|
|
*
|
|
|
|
* The function will return -ERESTARTSYS if it was interrupted by a
|
|
|
|
* signal and 0 if @condition evaluated to true.
|
|
|
|
*/
|
|
|
|
#define wait_event_interruptible(wq, condition) \
|
|
|
|
({ \
|
|
|
|
int __ret = 0; \
|
|
|
|
if (!(condition)) \
|
|
|
|
__wait_event_interruptible(wq, condition, __ret); \
|
|
|
|
__ret; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define __wait_event_interruptible_timeout(wq, condition, ret) \
|
|
|
|
do { \
|
|
|
|
DEFINE_WAIT(__wait); \
|
|
|
|
\
|
|
|
|
for (;;) { \
|
|
|
|
prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
|
|
|
|
if (condition) \
|
|
|
|
break; \
|
|
|
|
if (!signal_pending(current)) { \
|
|
|
|
ret = schedule_timeout(ret); \
|
|
|
|
if (!ret) \
|
|
|
|
break; \
|
|
|
|
continue; \
|
|
|
|
} \
|
|
|
|
ret = -ERESTARTSYS; \
|
|
|
|
break; \
|
|
|
|
} \
|
|
|
|
finish_wait(&wq, &__wait); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
|
|
|
|
* @wq: the waitqueue to wait on
|
|
|
|
* @condition: a C expression for the event to wait for
|
|
|
|
* @timeout: timeout, in jiffies
|
|
|
|
*
|
|
|
|
* The process is put to sleep (TASK_INTERRUPTIBLE) until the
|
|
|
|
* @condition evaluates to true or a signal is received.
|
|
|
|
* The @condition is checked each time the waitqueue @wq is woken up.
|
|
|
|
*
|
|
|
|
* wake_up() has to be called after changing any variable that could
|
|
|
|
* change the result of the wait condition.
|
|
|
|
*
|
|
|
|
* The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
|
|
|
|
* was interrupted by a signal, and the remaining jiffies otherwise
|
|
|
|
* if the condition evaluated to true before the timeout elapsed.
|
|
|
|
*/
|
|
|
|
#define wait_event_interruptible_timeout(wq, condition, timeout) \
|
|
|
|
({ \
|
|
|
|
long __ret = timeout; \
|
|
|
|
if (!(condition)) \
|
|
|
|
__wait_event_interruptible_timeout(wq, condition, __ret); \
|
|
|
|
__ret; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define __wait_event_interruptible_exclusive(wq, condition, ret) \
|
|
|
|
do { \
|
|
|
|
DEFINE_WAIT(__wait); \
|
|
|
|
\
|
|
|
|
for (;;) { \
|
|
|
|
prepare_to_wait_exclusive(&wq, &__wait, \
|
|
|
|
TASK_INTERRUPTIBLE); \
|
2009-02-05 07:12:14 +08:00
|
|
|
if (condition) { \
|
|
|
|
finish_wait(&wq, &__wait); \
|
2005-04-17 06:20:36 +08:00
|
|
|
break; \
|
2009-02-05 07:12:14 +08:00
|
|
|
} \
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!signal_pending(current)) { \
|
|
|
|
schedule(); \
|
|
|
|
continue; \
|
|
|
|
} \
|
|
|
|
ret = -ERESTARTSYS; \
|
2009-02-05 07:12:14 +08:00
|
|
|
abort_exclusive_wait(&wq, &__wait, \
|
|
|
|
TASK_INTERRUPTIBLE, NULL); \
|
2005-04-17 06:20:36 +08:00
|
|
|
break; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define wait_event_interruptible_exclusive(wq, condition) \
|
|
|
|
({ \
|
|
|
|
int __ret = 0; \
|
|
|
|
if (!(condition)) \
|
|
|
|
__wait_event_interruptible_exclusive(wq, condition, __ret);\
|
|
|
|
__ret; \
|
|
|
|
})
|
|
|
|
|
2007-12-07 01:00:00 +08:00
|
|
|
#define __wait_event_killable(wq, condition, ret) \
|
|
|
|
do { \
|
|
|
|
DEFINE_WAIT(__wait); \
|
|
|
|
\
|
|
|
|
for (;;) { \
|
|
|
|
prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
|
|
|
|
if (condition) \
|
|
|
|
break; \
|
|
|
|
if (!fatal_signal_pending(current)) { \
|
|
|
|
schedule(); \
|
|
|
|
continue; \
|
|
|
|
} \
|
|
|
|
ret = -ERESTARTSYS; \
|
|
|
|
break; \
|
|
|
|
} \
|
|
|
|
finish_wait(&wq, &__wait); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* wait_event_killable - sleep until a condition gets true
|
|
|
|
* @wq: the waitqueue to wait on
|
|
|
|
* @condition: a C expression for the event to wait for
|
|
|
|
*
|
|
|
|
* The process is put to sleep (TASK_KILLABLE) until the
|
|
|
|
* @condition evaluates to true or a signal is received.
|
|
|
|
* The @condition is checked each time the waitqueue @wq is woken up.
|
|
|
|
*
|
|
|
|
* wake_up() has to be called after changing any variable that could
|
|
|
|
* change the result of the wait condition.
|
|
|
|
*
|
|
|
|
* The function will return -ERESTARTSYS if it was interrupted by a
|
|
|
|
* signal and 0 if @condition evaluated to true.
|
|
|
|
*/
|
|
|
|
#define wait_event_killable(wq, condition) \
|
|
|
|
({ \
|
|
|
|
int __ret = 0; \
|
|
|
|
if (!(condition)) \
|
|
|
|
__wait_event_killable(wq, condition, __ret); \
|
|
|
|
__ret; \
|
|
|
|
})
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Must be called with the spinlock in the wait_queue_head_t held.
|
|
|
|
*/
|
|
|
|
static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q,
|
|
|
|
wait_queue_t * wait)
|
|
|
|
{
|
|
|
|
wait->flags |= WQ_FLAG_EXCLUSIVE;
|
|
|
|
__add_wait_queue_tail(q, wait);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Must be called with the spinlock in the wait_queue_head_t held.
|
|
|
|
*/
|
|
|
|
static inline void remove_wait_queue_locked(wait_queue_head_t *q,
|
|
|
|
wait_queue_t * wait)
|
|
|
|
{
|
|
|
|
__remove_wait_queue(q, wait);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* These are the old interfaces to sleep waiting for an event.
|
2007-07-10 00:52:01 +08:00
|
|
|
* They are racy. DO NOT use them, use the wait_event* interfaces above.
|
|
|
|
* We plan to remove these interfaces.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2007-07-10 00:52:01 +08:00
|
|
|
extern void sleep_on(wait_queue_head_t *q);
|
|
|
|
extern long sleep_on_timeout(wait_queue_head_t *q,
|
|
|
|
signed long timeout);
|
|
|
|
extern void interruptible_sleep_on(wait_queue_head_t *q);
|
|
|
|
extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
|
|
|
|
signed long timeout);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Waitqueues which are removed from the waitqueue_head at wakeup time
|
|
|
|
*/
|
2008-02-14 07:03:15 +08:00
|
|
|
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
|
|
|
|
void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
|
|
|
|
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
|
2009-02-05 07:12:14 +08:00
|
|
|
void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
|
|
|
|
unsigned int mode, void *key);
|
2005-04-17 06:20:36 +08:00
|
|
|
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
|
|
|
|
int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
|
|
|
|
|
2009-04-28 17:24:21 +08:00
|
|
|
#define DEFINE_WAIT_FUNC(name, function) \
|
2005-04-17 06:20:36 +08:00
|
|
|
wait_queue_t name = { \
|
2005-06-23 15:10:27 +08:00
|
|
|
.private = current, \
|
2009-04-28 17:24:21 +08:00
|
|
|
.func = function, \
|
2005-05-25 07:31:42 +08:00
|
|
|
.task_list = LIST_HEAD_INIT((name).task_list), \
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2009-04-28 17:24:21 +08:00
|
|
|
#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#define DEFINE_WAIT_BIT(name, word, bit) \
|
|
|
|
struct wait_bit_queue name = { \
|
|
|
|
.key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
|
|
|
|
.wait = { \
|
2005-06-23 15:10:27 +08:00
|
|
|
.private = current, \
|
2005-04-17 06:20:36 +08:00
|
|
|
.func = wake_bit_function, \
|
|
|
|
.task_list = \
|
|
|
|
LIST_HEAD_INIT((name).wait.task_list), \
|
|
|
|
}, \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define init_wait(wait) \
|
|
|
|
do { \
|
2005-06-23 15:10:27 +08:00
|
|
|
(wait)->private = current; \
|
2005-04-17 06:20:36 +08:00
|
|
|
(wait)->func = autoremove_wake_function; \
|
|
|
|
INIT_LIST_HEAD(&(wait)->task_list); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* wait_on_bit - wait for a bit to be cleared
|
|
|
|
* @word: the word being waited on, a kernel virtual address
|
|
|
|
* @bit: the bit of the word being waited on
|
|
|
|
* @action: the function used to sleep, which may take special actions
|
|
|
|
* @mode: the task state to sleep in
|
|
|
|
*
|
|
|
|
* There is a standard hashed waitqueue table for generic use. This
|
|
|
|
* is the part of the hashtable's accessor API that waits on a bit.
|
|
|
|
* For instance, if one were to have waiters on a bitflag, one would
|
|
|
|
* call wait_on_bit() in threads waiting for the bit to clear.
|
|
|
|
* One uses wait_on_bit() where one is waiting for the bit to clear,
|
|
|
|
* but has no intention of setting it.
|
|
|
|
*/
|
|
|
|
static inline int wait_on_bit(void *word, int bit,
|
|
|
|
int (*action)(void *), unsigned mode)
|
|
|
|
{
|
|
|
|
if (!test_bit(bit, word))
|
|
|
|
return 0;
|
|
|
|
return out_of_line_wait_on_bit(word, bit, action, mode);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
|
|
|
|
* @word: the word being waited on, a kernel virtual address
|
|
|
|
* @bit: the bit of the word being waited on
|
|
|
|
* @action: the function used to sleep, which may take special actions
|
|
|
|
* @mode: the task state to sleep in
|
|
|
|
*
|
|
|
|
* There is a standard hashed waitqueue table for generic use. This
|
|
|
|
* is the part of the hashtable's accessor API that waits on a bit
|
|
|
|
* when one intends to set it, for instance, trying to lock bitflags.
|
|
|
|
* For instance, if one were to have waiters trying to set bitflag
|
|
|
|
* and waiting for it to clear before setting it, one would call
|
|
|
|
* wait_on_bit() in threads waiting to be able to set the bit.
|
|
|
|
* One uses wait_on_bit_lock() where one is waiting for the bit to
|
|
|
|
* clear with the intention of setting it, and when done, clearing it.
|
|
|
|
*/
|
|
|
|
static inline int wait_on_bit_lock(void *word, int bit,
|
|
|
|
int (*action)(void *), unsigned mode)
|
|
|
|
{
|
|
|
|
if (!test_and_set_bit(bit, word))
|
|
|
|
return 0;
|
|
|
|
return out_of_line_wait_on_bit_lock(word, bit, action, mode);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
|
|
|
|
#endif
|