mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 23:34:05 +08:00
mm: memcontrol: generalize the socket accounting jump label
The unified hierarchy memory controller is going to use this jump label as well to control the networking callbacks. Move it to the memory controller code and give it a more generic name. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Vladimir Davydov <vdavydov@virtuozzo.com> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
baac50bbc3
commit
80e95fe0fd
@ -681,11 +681,14 @@ void sock_release_memcg(struct sock *sk);
|
||||
bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
|
||||
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
|
||||
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
|
||||
extern struct static_key memcg_sockets_enabled_key;
|
||||
#define mem_cgroup_sockets_enabled static_key_false(&memcg_sockets_enabled_key)
|
||||
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
|
||||
{
|
||||
return memcg->tcp_mem.memory_pressure;
|
||||
}
|
||||
#else
|
||||
#define mem_cgroup_sockets_enabled 0
|
||||
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
|
||||
{
|
||||
return false;
|
||||
|
@ -1079,13 +1079,6 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
|
||||
#define sk_refcnt_debug_release(sk) do { } while (0)
|
||||
#endif /* SOCK_REFCNT_DEBUG */
|
||||
|
||||
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_NET)
|
||||
extern struct static_key memcg_socket_limit_enabled;
|
||||
#define mem_cgroup_sockets_enabled static_key_false(&memcg_socket_limit_enabled)
|
||||
#else
|
||||
#define mem_cgroup_sockets_enabled 0
|
||||
#endif
|
||||
|
||||
static inline bool sk_stream_memory_free(const struct sock *sk)
|
||||
{
|
||||
if (sk->sk_wmem_queued >= sk->sk_sndbuf)
|
||||
|
@ -291,6 +291,9 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
|
||||
/* Writing them here to avoid exposing memcg's inner layout */
|
||||
#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
|
||||
|
||||
struct static_key memcg_sockets_enabled_key;
|
||||
EXPORT_SYMBOL(memcg_sockets_enabled_key);
|
||||
|
||||
void sock_update_memcg(struct sock *sk)
|
||||
{
|
||||
struct mem_cgroup *memcg;
|
||||
|
@ -202,11 +202,6 @@ EXPORT_SYMBOL(sk_net_capable);
|
||||
static struct lock_class_key af_family_keys[AF_MAX];
|
||||
static struct lock_class_key af_family_slock_keys[AF_MAX];
|
||||
|
||||
#if defined(CONFIG_MEMCG_KMEM)
|
||||
struct static_key memcg_socket_limit_enabled;
|
||||
EXPORT_SYMBOL(memcg_socket_limit_enabled);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Make lock validator output more readable. (we pre-construct these
|
||||
* strings build-time, so that runtime initialization of socket
|
||||
|
@ -34,7 +34,7 @@ void tcp_destroy_cgroup(struct mem_cgroup *memcg)
|
||||
return;
|
||||
|
||||
if (memcg->tcp_mem.active)
|
||||
static_key_slow_dec(&memcg_socket_limit_enabled);
|
||||
static_key_slow_dec(&memcg_sockets_enabled_key);
|
||||
}
|
||||
|
||||
static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages)
|
||||
@ -65,7 +65,7 @@ static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages)
|
||||
* because when this value change, the code to process it is not
|
||||
* patched in yet.
|
||||
*/
|
||||
static_key_slow_inc(&memcg_socket_limit_enabled);
|
||||
static_key_slow_inc(&memcg_sockets_enabled_key);
|
||||
memcg->tcp_mem.active = true;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user