mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-22 20:23:57 +08:00
net: memcg: late association of sock to memcg
If a TCP socket is allocated in IRQ context or cloned from unassociated (i.e. not associated to a memcg) in IRQ context then it will remain unassociated for its whole life. Almost half of the TCPs created on the system are created in IRQ context, so, memory used by such sockets will not be accounted by the memcg. This issue is more widespread in cgroup v1 where network memory accounting is opt-in but it can happen in cgroup v2 if the source socket for the cloning was created in root memcg. To fix the issue, just do the association of the sockets at the accept() time in the process context and then force charge the memory buffer already used and reserved by the socket. Signed-off-by: Shakeel Butt <shakeelb@google.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
e876ecc67d
commit
d752a49865
@ -6682,20 +6682,6 @@ void mem_cgroup_sk_alloc(struct sock *sk)
|
|||||||
if (!mem_cgroup_sockets_enabled)
|
if (!mem_cgroup_sockets_enabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
|
||||||
* Socket cloning can throw us here with sk_memcg already
|
|
||||||
* filled. It won't however, necessarily happen from
|
|
||||||
* process context. So the test for root memcg given
|
|
||||||
* the current task's memcg won't help us in this case.
|
|
||||||
*
|
|
||||||
* Respecting the original socket's memcg is a better
|
|
||||||
* decision in this case.
|
|
||||||
*/
|
|
||||||
if (sk->sk_memcg) {
|
|
||||||
css_get(&sk->sk_memcg->css);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Do not associate the sock with unrelated interrupted task's memcg. */
|
/* Do not associate the sock with unrelated interrupted task's memcg. */
|
||||||
if (in_interrupt())
|
if (in_interrupt())
|
||||||
return;
|
return;
|
||||||
|
@ -1830,7 +1830,10 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
|
|||||||
atomic_set(&newsk->sk_zckey, 0);
|
atomic_set(&newsk->sk_zckey, 0);
|
||||||
|
|
||||||
sock_reset_flag(newsk, SOCK_DONE);
|
sock_reset_flag(newsk, SOCK_DONE);
|
||||||
mem_cgroup_sk_alloc(newsk);
|
|
||||||
|
/* sk->sk_memcg will be populated at accept() time */
|
||||||
|
newsk->sk_memcg = NULL;
|
||||||
|
|
||||||
cgroup_sk_alloc(&newsk->sk_cgrp_data);
|
cgroup_sk_alloc(&newsk->sk_cgrp_data);
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
@ -482,6 +482,26 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
|
|||||||
}
|
}
|
||||||
spin_unlock_bh(&queue->fastopenq.lock);
|
spin_unlock_bh(&queue->fastopenq.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (mem_cgroup_sockets_enabled) {
|
||||||
|
int amt;
|
||||||
|
|
||||||
|
/* atomically get the memory usage, set and charge the
|
||||||
|
* sk->sk_memcg.
|
||||||
|
*/
|
||||||
|
lock_sock(newsk);
|
||||||
|
|
||||||
|
/* The sk has not been accepted yet, no need to look at
|
||||||
|
* sk->sk_wmem_queued.
|
||||||
|
*/
|
||||||
|
amt = sk_mem_pages(newsk->sk_forward_alloc +
|
||||||
|
atomic_read(&sk->sk_rmem_alloc));
|
||||||
|
mem_cgroup_sk_alloc(newsk);
|
||||||
|
if (newsk->sk_memcg && amt)
|
||||||
|
mem_cgroup_charge_skmem(newsk->sk_memcg, amt);
|
||||||
|
|
||||||
|
release_sock(newsk);
|
||||||
|
}
|
||||||
out:
|
out:
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
if (req)
|
if (req)
|
||||||
|
Loading…
Reference in New Issue
Block a user