mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-18 18:23:53 +08:00
790ba4566c
Under tcp memory pressure, calling epoll_wait() in edge triggered mode after -EAGAIN, can result in an indefinite hang in epoll_wait(), even when there is sufficient memory available to continue making progress. The problem is that when __sk_mem_schedule() returns 0 under memory pressure, we do not set the SOCK_NOSPACE flag in the tcp write paths (tcp_sendmsg() or do_tcp_sendpages()). Then, since SOCK_NOSPACE is used to trigger wakeups when incoming acks create sufficient new space in the write queue, all outstanding packets are acked, but we never wake up with the the EPOLLOUT that we are expecting from epoll_wait(). This issue is currently limited to epoll() when used in edge trigger mode, since 'tcp_poll()', does in fact currently set SOCK_NOSPACE. This is sufficient for poll()/select() and epoll() in level trigger mode. However, in edge trigger mode, epoll() is relying on the write path to set SOCK_NOSPACE. EPOLL(7) says that in edge-trigger mode we can only call epoll_wait() after read/write return -EAGAIN. Thus, in the case of the socket write, we are relying on the fact that tcp_sendmsg()/network write paths are going to issue a wakeup for us at some point in the future when we get -EAGAIN. Normally, epoll() edge trigger works fine when we've exceeded the sk->sndbuf because in that case we do set SOCK_NOSPACE. However, when we return -EAGAIN from the write path b/c we are over the tcp memory limits and not b/c we are over the sndbuf, we are never going to get another wakeup. I can reproduce this issue, using SO_SNDBUF, since __sk_mem_schedule() will return 0, or failure more readily with SO_SNDBUF: 1) create socket and set SO_SNDBUF to N 2) add socket as edge trigger 3) write to socket and block in epoll on -EAGAIN 4) cause tcp mem pressure via: echo "<small val>" > net.ipv4.tcp_mem The fix here is simply to set SOCK_NOSPACE in sk_stream_wait_memory() when the socket is non-blocking. Note that SOCK_NOSPACE, in addition to waking up outstanding waiters is also used to expand the size of the sk->sndbuf. However, we will not expand it by setting it in this case because tcp_should_expand_sndbuf(), ensures that no expansion occurs when we are under tcp memory pressure. Note that we could still hang if sk->sk_wmem_queue is 0, when we get the -EAGAIN. In this case the SOCK_NOSPACE bit will not help, since we are waiting for and event that will never happen. I believe that this case is harder to hit (and did not hit in my testing), in that over the tcp 'soft' memory limits, we continue to guarantee a minimum write buffer size. Perhaps, we could return -ENOSPC in this case, or maybe we simply issue a wakeup in this case, such that we keep retrying the write. Note that this case is not specific to epoll() ET, but rather would affect blocking sockets as well. So I view this patch as bringing epoll() edge-trigger into sync with the current poll()/select()/epoll() level trigger and blocking sockets behavior. Signed-off-by: Jason Baron <jbaron@akamai.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
213 lines
5.2 KiB
C
213 lines
5.2 KiB
C
/*
|
|
* SUCS NET3:
|
|
*
|
|
* Generic stream handling routines. These are generic for most
|
|
* protocols. Even IP. Tonight 8-).
|
|
* This is used because TCP, LLC (others too) layer all have mostly
|
|
* identical sendmsg() and recvmsg() code.
|
|
* So we (will) share it here.
|
|
*
|
|
* Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
|
|
* (from old tcp.c code)
|
|
* Alan Cox <alan@lxorguk.ukuu.org.uk> (Borrowed comments 8-))
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/net.h>
|
|
#include <linux/signal.h>
|
|
#include <linux/tcp.h>
|
|
#include <linux/wait.h>
|
|
#include <net/sock.h>
|
|
|
|
/**
|
|
* sk_stream_write_space - stream socket write_space callback.
|
|
* @sk: socket
|
|
*
|
|
* FIXME: write proper description
|
|
*/
|
|
void sk_stream_write_space(struct sock *sk)
|
|
{
|
|
struct socket *sock = sk->sk_socket;
|
|
struct socket_wq *wq;
|
|
|
|
if (sk_stream_is_writeable(sk) && sock) {
|
|
clear_bit(SOCK_NOSPACE, &sock->flags);
|
|
|
|
rcu_read_lock();
|
|
wq = rcu_dereference(sk->sk_wq);
|
|
if (wq_has_sleeper(wq))
|
|
wake_up_interruptible_poll(&wq->wait, POLLOUT |
|
|
POLLWRNORM | POLLWRBAND);
|
|
if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
|
|
sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT);
|
|
rcu_read_unlock();
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(sk_stream_write_space);
|
|
|
|
/**
|
|
* sk_stream_wait_connect - Wait for a socket to get into the connected state
|
|
* @sk: sock to wait on
|
|
* @timeo_p: for how long to wait
|
|
*
|
|
* Must be called with the socket locked.
|
|
*/
|
|
int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
|
|
{
|
|
struct task_struct *tsk = current;
|
|
DEFINE_WAIT(wait);
|
|
int done;
|
|
|
|
do {
|
|
int err = sock_error(sk);
|
|
if (err)
|
|
return err;
|
|
if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV))
|
|
return -EPIPE;
|
|
if (!*timeo_p)
|
|
return -EAGAIN;
|
|
if (signal_pending(tsk))
|
|
return sock_intr_errno(*timeo_p);
|
|
|
|
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
|
sk->sk_write_pending++;
|
|
done = sk_wait_event(sk, timeo_p,
|
|
!sk->sk_err &&
|
|
!((1 << sk->sk_state) &
|
|
~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)));
|
|
finish_wait(sk_sleep(sk), &wait);
|
|
sk->sk_write_pending--;
|
|
} while (!done);
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(sk_stream_wait_connect);
|
|
|
|
/**
|
|
* sk_stream_closing - Return 1 if we still have things to send in our buffers.
|
|
* @sk: socket to verify
|
|
*/
|
|
static inline int sk_stream_closing(struct sock *sk)
|
|
{
|
|
return (1 << sk->sk_state) &
|
|
(TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK);
|
|
}
|
|
|
|
void sk_stream_wait_close(struct sock *sk, long timeout)
|
|
{
|
|
if (timeout) {
|
|
DEFINE_WAIT(wait);
|
|
|
|
do {
|
|
prepare_to_wait(sk_sleep(sk), &wait,
|
|
TASK_INTERRUPTIBLE);
|
|
if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk)))
|
|
break;
|
|
} while (!signal_pending(current) && timeout);
|
|
|
|
finish_wait(sk_sleep(sk), &wait);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(sk_stream_wait_close);
|
|
|
|
/**
|
|
* sk_stream_wait_memory - Wait for more memory for a socket
|
|
* @sk: socket to wait for memory
|
|
* @timeo_p: for how long
|
|
*/
|
|
int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
|
|
{
|
|
int err = 0;
|
|
long vm_wait = 0;
|
|
long current_timeo = *timeo_p;
|
|
bool noblock = (*timeo_p ? false : true);
|
|
DEFINE_WAIT(wait);
|
|
|
|
if (sk_stream_memory_free(sk))
|
|
current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2;
|
|
|
|
while (1) {
|
|
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
|
|
|
|
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
|
|
|
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
|
|
goto do_error;
|
|
if (!*timeo_p) {
|
|
if (noblock)
|
|
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
|
goto do_nonblock;
|
|
}
|
|
if (signal_pending(current))
|
|
goto do_interrupted;
|
|
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
|
|
if (sk_stream_memory_free(sk) && !vm_wait)
|
|
break;
|
|
|
|
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
|
sk->sk_write_pending++;
|
|
sk_wait_event(sk, ¤t_timeo, sk->sk_err ||
|
|
(sk->sk_shutdown & SEND_SHUTDOWN) ||
|
|
(sk_stream_memory_free(sk) &&
|
|
!vm_wait));
|
|
sk->sk_write_pending--;
|
|
|
|
if (vm_wait) {
|
|
vm_wait -= current_timeo;
|
|
current_timeo = *timeo_p;
|
|
if (current_timeo != MAX_SCHEDULE_TIMEOUT &&
|
|
(current_timeo -= vm_wait) < 0)
|
|
current_timeo = 0;
|
|
vm_wait = 0;
|
|
}
|
|
*timeo_p = current_timeo;
|
|
}
|
|
out:
|
|
finish_wait(sk_sleep(sk), &wait);
|
|
return err;
|
|
|
|
do_error:
|
|
err = -EPIPE;
|
|
goto out;
|
|
do_nonblock:
|
|
err = -EAGAIN;
|
|
goto out;
|
|
do_interrupted:
|
|
err = sock_intr_errno(*timeo_p);
|
|
goto out;
|
|
}
|
|
EXPORT_SYMBOL(sk_stream_wait_memory);
|
|
|
|
int sk_stream_error(struct sock *sk, int flags, int err)
|
|
{
|
|
if (err == -EPIPE)
|
|
err = sock_error(sk) ? : -EPIPE;
|
|
if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
|
|
send_sig(SIGPIPE, current, 0);
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL(sk_stream_error);
|
|
|
|
void sk_stream_kill_queues(struct sock *sk)
|
|
{
|
|
/* First the read buffer. */
|
|
__skb_queue_purge(&sk->sk_receive_queue);
|
|
|
|
/* Next, the error queue. */
|
|
__skb_queue_purge(&sk->sk_error_queue);
|
|
|
|
/* Next, the write queue. */
|
|
WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
|
|
|
|
/* Account for returned memory. */
|
|
sk_mem_reclaim(sk);
|
|
|
|
WARN_ON(sk->sk_wmem_queued);
|
|
WARN_ON(sk->sk_forward_alloc);
|
|
|
|
/* It is _impossible_ for the backlog to contain anything
|
|
* when we get here. All user references to this socket
|
|
* have gone away, only the net layer knows can touch it.
|
|
*/
|
|
}
|
|
EXPORT_SYMBOL(sk_stream_kill_queues);
|