mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-25 03:55:09 +08:00
cfdab60bfa
Patch series "memcg: optimize charge codepath", v2. Recently Linux networking stack has moved from a very old per socket pre-charge caching to per-cpu caching to avoid pre-charge fragmentation and unwarranted OOMs. One impact of this change is that for network traffic workloads, memcg charging codepath can become a bottleneck. The kernel test robot has also reported this regression[1]. This patch series tries to improve the memcg charging for such workloads. This patch series implement three optimizations: (A) Reduce atomic ops in page counter update path. (B) Change layout of struct page_counter to eliminate false sharing between usage and high. (C) Increase the memcg charge batch to 64. To evaluate the impact of these optimizations, on a 72 CPUs machine, we ran the following workload in root memcg and then compared with scenario where the workload is run in a three level of cgroup hierarchy with top level having min and low setup appropriately. $ netserver -6 # 36 instances of netperf with following params $ netperf -6 -H ::1 -l 60 -t TCP_SENDFILE -- -m 10K Results (average throughput of netperf): 1. root memcg 21694.8 Mbps 2. 6.0-rc1 10482.7 Mbps (-51.6%) 3. 6.0-rc1 + (A) 14542.5 Mbps (-32.9%) 4. 6.0-rc1 + (B) 12413.7 Mbps (-42.7%) 5. 6.0-rc1 + (C) 17063.7 Mbps (-21.3%) 6. 6.0-rc1 + (A+B+C) 20120.3 Mbps (-7.2%) With all three optimizations, the memcg overhead of this workload has been reduced from 51.6% to just 7.2%. [1] https://lore.kernel.org/linux-mm/20220619150456.GB34471@xsang-OptiPlex-9020/ This patch (of 3): For cgroups using low or min protections, the function propagate_protected_usage() was doing an atomic xchg() operation irrespectively. We can optimize out this atomic operation for one specific scenario where the workload is using the protection (i.e. min > 0) and the usage is above the protection (i.e. usage > min). This scenario is actually very common where the users want a part of their workload to be protected against the external reclaim. Though this optimization does introduce a race when the usage is around the protection and concurrent charges and uncharged trip it over or under the protection. In such cases, we might see lower effective protection but the subsequent charge/uncharge will correct it. To evaluate the impact of this optimization, on a 72 CPUs machine, we ran the following workload in a three level of cgroup hierarchy with top level having min and low setup appropriately to see if this optimization is effective for the mentioned case. $ netserver -6 # 36 instances of netperf with following params $ netperf -6 -H ::1 -l 60 -t TCP_SENDFILE -- -m 10K Results (average throughput of netperf): Without (6.0-rc1) 10482.7 Mbps With patch 14542.5 Mbps (38.7% improvement) With the patch, the throughput improved by 38.7% Link: https://lkml.kernel.org/r/20220825000506.239406-1-shakeelb@google.com Link: https://lkml.kernel.org/r/20220825000506.239406-2-shakeelb@google.com Signed-off-by: Shakeel Butt <shakeelb@google.com> Reported-by: kernel test robot <oliver.sang@intel.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> Reviewed-by: Feng Tang <feng.tang@intel.com> Acked-by: Roman Gushchin <roman.gushchin@linux.dev> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: "Michal Koutný" <mkoutny@suse.com> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Oliver Sang <oliver.sang@intel.com> Cc: Soheil Hassas Yeganeh <soheil@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
265 lines
6.8 KiB
C
265 lines
6.8 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Lockless hierarchical page accounting & limiting
|
|
*
|
|
* Copyright (C) 2014 Red Hat, Inc., Johannes Weiner
|
|
*/
|
|
|
|
#include <linux/page_counter.h>
|
|
#include <linux/atomic.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/string.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/bug.h>
|
|
#include <asm/page.h>
|
|
|
|
static void propagate_protected_usage(struct page_counter *c,
|
|
unsigned long usage)
|
|
{
|
|
unsigned long protected, old_protected;
|
|
long delta;
|
|
|
|
if (!c->parent)
|
|
return;
|
|
|
|
protected = min(usage, READ_ONCE(c->min));
|
|
old_protected = atomic_long_read(&c->min_usage);
|
|
if (protected != old_protected) {
|
|
old_protected = atomic_long_xchg(&c->min_usage, protected);
|
|
delta = protected - old_protected;
|
|
if (delta)
|
|
atomic_long_add(delta, &c->parent->children_min_usage);
|
|
}
|
|
|
|
protected = min(usage, READ_ONCE(c->low));
|
|
old_protected = atomic_long_read(&c->low_usage);
|
|
if (protected != old_protected) {
|
|
old_protected = atomic_long_xchg(&c->low_usage, protected);
|
|
delta = protected - old_protected;
|
|
if (delta)
|
|
atomic_long_add(delta, &c->parent->children_low_usage);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* page_counter_cancel - take pages out of the local counter
|
|
* @counter: counter
|
|
* @nr_pages: number of pages to cancel
|
|
*/
|
|
void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
|
|
{
|
|
long new;
|
|
|
|
new = atomic_long_sub_return(nr_pages, &counter->usage);
|
|
/* More uncharges than charges? */
|
|
if (WARN_ONCE(new < 0, "page_counter underflow: %ld nr_pages=%lu\n",
|
|
new, nr_pages)) {
|
|
new = 0;
|
|
atomic_long_set(&counter->usage, new);
|
|
}
|
|
propagate_protected_usage(counter, new);
|
|
}
|
|
|
|
/**
|
|
* page_counter_charge - hierarchically charge pages
|
|
* @counter: counter
|
|
* @nr_pages: number of pages to charge
|
|
*
|
|
* NOTE: This does not consider any configured counter limits.
|
|
*/
|
|
void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
|
|
{
|
|
struct page_counter *c;
|
|
|
|
for (c = counter; c; c = c->parent) {
|
|
long new;
|
|
|
|
new = atomic_long_add_return(nr_pages, &c->usage);
|
|
propagate_protected_usage(c, new);
|
|
/*
|
|
* This is indeed racy, but we can live with some
|
|
* inaccuracy in the watermark.
|
|
*/
|
|
if (new > READ_ONCE(c->watermark))
|
|
WRITE_ONCE(c->watermark, new);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* page_counter_try_charge - try to hierarchically charge pages
|
|
* @counter: counter
|
|
* @nr_pages: number of pages to charge
|
|
* @fail: points first counter to hit its limit, if any
|
|
*
|
|
* Returns %true on success, or %false and @fail if the counter or one
|
|
* of its ancestors has hit its configured limit.
|
|
*/
|
|
bool page_counter_try_charge(struct page_counter *counter,
|
|
unsigned long nr_pages,
|
|
struct page_counter **fail)
|
|
{
|
|
struct page_counter *c;
|
|
|
|
for (c = counter; c; c = c->parent) {
|
|
long new;
|
|
/*
|
|
* Charge speculatively to avoid an expensive CAS. If
|
|
* a bigger charge fails, it might falsely lock out a
|
|
* racing smaller charge and send it into reclaim
|
|
* early, but the error is limited to the difference
|
|
* between the two sizes, which is less than 2M/4M in
|
|
* case of a THP locking out a regular page charge.
|
|
*
|
|
* The atomic_long_add_return() implies a full memory
|
|
* barrier between incrementing the count and reading
|
|
* the limit. When racing with page_counter_set_max(),
|
|
* we either see the new limit or the setter sees the
|
|
* counter has changed and retries.
|
|
*/
|
|
new = atomic_long_add_return(nr_pages, &c->usage);
|
|
if (new > c->max) {
|
|
atomic_long_sub(nr_pages, &c->usage);
|
|
/*
|
|
* This is racy, but we can live with some
|
|
* inaccuracy in the failcnt which is only used
|
|
* to report stats.
|
|
*/
|
|
data_race(c->failcnt++);
|
|
*fail = c;
|
|
goto failed;
|
|
}
|
|
propagate_protected_usage(c, new);
|
|
/*
|
|
* Just like with failcnt, we can live with some
|
|
* inaccuracy in the watermark.
|
|
*/
|
|
if (new > READ_ONCE(c->watermark))
|
|
WRITE_ONCE(c->watermark, new);
|
|
}
|
|
return true;
|
|
|
|
failed:
|
|
for (c = counter; c != *fail; c = c->parent)
|
|
page_counter_cancel(c, nr_pages);
|
|
|
|
return false;
|
|
}
|
|
|
|
/**
|
|
* page_counter_uncharge - hierarchically uncharge pages
|
|
* @counter: counter
|
|
* @nr_pages: number of pages to uncharge
|
|
*/
|
|
void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages)
|
|
{
|
|
struct page_counter *c;
|
|
|
|
for (c = counter; c; c = c->parent)
|
|
page_counter_cancel(c, nr_pages);
|
|
}
|
|
|
|
/**
|
|
* page_counter_set_max - set the maximum number of pages allowed
|
|
* @counter: counter
|
|
* @nr_pages: limit to set
|
|
*
|
|
* Returns 0 on success, -EBUSY if the current number of pages on the
|
|
* counter already exceeds the specified limit.
|
|
*
|
|
* The caller must serialize invocations on the same counter.
|
|
*/
|
|
int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages)
|
|
{
|
|
for (;;) {
|
|
unsigned long old;
|
|
long usage;
|
|
|
|
/*
|
|
* Update the limit while making sure that it's not
|
|
* below the concurrently-changing counter value.
|
|
*
|
|
* The xchg implies two full memory barriers before
|
|
* and after, so the read-swap-read is ordered and
|
|
* ensures coherency with page_counter_try_charge():
|
|
* that function modifies the count before checking
|
|
* the limit, so if it sees the old limit, we see the
|
|
* modified counter and retry.
|
|
*/
|
|
usage = page_counter_read(counter);
|
|
|
|
if (usage > nr_pages)
|
|
return -EBUSY;
|
|
|
|
old = xchg(&counter->max, nr_pages);
|
|
|
|
if (page_counter_read(counter) <= usage || nr_pages >= old)
|
|
return 0;
|
|
|
|
counter->max = old;
|
|
cond_resched();
|
|
}
|
|
}
|
|
|
|
/**
|
|
* page_counter_set_min - set the amount of protected memory
|
|
* @counter: counter
|
|
* @nr_pages: value to set
|
|
*
|
|
* The caller must serialize invocations on the same counter.
|
|
*/
|
|
void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages)
|
|
{
|
|
struct page_counter *c;
|
|
|
|
WRITE_ONCE(counter->min, nr_pages);
|
|
|
|
for (c = counter; c; c = c->parent)
|
|
propagate_protected_usage(c, atomic_long_read(&c->usage));
|
|
}
|
|
|
|
/**
|
|
* page_counter_set_low - set the amount of protected memory
|
|
* @counter: counter
|
|
* @nr_pages: value to set
|
|
*
|
|
* The caller must serialize invocations on the same counter.
|
|
*/
|
|
void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages)
|
|
{
|
|
struct page_counter *c;
|
|
|
|
WRITE_ONCE(counter->low, nr_pages);
|
|
|
|
for (c = counter; c; c = c->parent)
|
|
propagate_protected_usage(c, atomic_long_read(&c->usage));
|
|
}
|
|
|
|
/**
|
|
* page_counter_memparse - memparse() for page counter limits
|
|
* @buf: string to parse
|
|
* @max: string meaning maximum possible value
|
|
* @nr_pages: returns the result in number of pages
|
|
*
|
|
* Returns -EINVAL, or 0 and @nr_pages on success. @nr_pages will be
|
|
* limited to %PAGE_COUNTER_MAX.
|
|
*/
|
|
int page_counter_memparse(const char *buf, const char *max,
|
|
unsigned long *nr_pages)
|
|
{
|
|
char *end;
|
|
u64 bytes;
|
|
|
|
if (!strcmp(buf, max)) {
|
|
*nr_pages = PAGE_COUNTER_MAX;
|
|
return 0;
|
|
}
|
|
|
|
bytes = memparse(buf, &end);
|
|
if (*end != '\0')
|
|
return -EINVAL;
|
|
|
|
*nr_pages = min(bytes / PAGE_SIZE, (u64)PAGE_COUNTER_MAX);
|
|
|
|
return 0;
|
|
}
|