sock: optimise sock_def_write_space barriers

Now we have a separate path for sock_def_write_space() and can go one
step further. When it's called from sock_wfree() we know that there is a
preceding atomic for putting down ->sk_wmem_alloc. We can use it to
replace to replace smb_mb() with a less expensive
smp_mb__after_atomic(). It also removes an extra RCU read lock/unlock as
a small bonus.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Pavel Begunkov 2022-04-28 11:58:19 +01:00 committed by David S. Miller
parent 052ada0968
commit 0a8afd9f02

View File

@ -146,6 +146,7 @@
static DEFINE_MUTEX(proto_list_mutex);
static LIST_HEAD(proto_list);
static void sock_def_write_space_wfree(struct sock *sk);
static void sock_def_write_space(struct sock *sk);
/**
@ -2333,7 +2334,7 @@ void sock_wfree(struct sk_buff *skb)
sk->sk_write_space == sock_def_write_space) {
rcu_read_lock();
free = refcount_sub_and_test(len, &sk->sk_wmem_alloc);
sock_def_write_space(sk);
sock_def_write_space_wfree(sk);
rcu_read_unlock();
if (unlikely(free))
__sk_free(sk);
@ -3218,6 +3219,29 @@ static void sock_def_write_space(struct sock *sk)
rcu_read_unlock();
}
/* An optimised version of sock_def_write_space(), should only be called
* for SOCK_RCU_FREE sockets under RCU read section and after putting
* ->sk_wmem_alloc.
*/
static void sock_def_write_space_wfree(struct sock *sk)
{
/* Do not wake up a writer until he can make "significant"
* progress. --DaveM
*/
if (sock_writeable(sk)) {
struct socket_wq *wq = rcu_dereference(sk->sk_wq);
/* rely on refcount_sub from sock_wfree() */
smp_mb__after_atomic();
if (wq && waitqueue_active(&wq->wait))
wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
EPOLLWRNORM | EPOLLWRBAND);
/* Should agree with poll, otherwise some programs break */
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
}
static void sock_def_destruct(struct sock *sk)
{
}