mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 08:14:15 +08:00
2864f3d0f5
While doing MADV_PAGEOUT, the current code will clear PTE young so that vmscan won't read young flags to allow the reclamation of madvised folios to go ahead. It seems we can do it by directly ignoring references, thus we can remove tlb flush in madvise and rmap overhead in vmscan. Regarding the side effect, in the original code, if a parallel thread runs side by side to access the madvised memory with the thread doing madvise, folios will get a chance to be re-activated by vmscan (though the time gap is actually quite small since checking PTEs is done immediately after clearing PTEs young). But with this patch, they will still be reclaimed. But this behaviour doing PAGEOUT and doing access at the same time is quite silly like DoS. So probably, we don't need to care. Or ignoring the new access during the quite small time gap is even better. For DAMON's DAMOS_PAGEOUT based on physical address region, we still keep its behaviour as is since a physical address might be mapped by multiple processes. MADV_PAGEOUT based on virtual address is actually much more aggressive on reclamation. To untouch paddr's DAMOS_PAGEOUT, we simply pass ignore_references as false in reclaim_pages(). A microbench as below has shown 6% decrement on the latency of MADV_PAGEOUT, #define PGSIZE 4096 main() { int i; #define SIZE 512*1024*1024 volatile long *p = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); for (i = 0; i < SIZE/sizeof(long); i += PGSIZE / sizeof(long)) p[i] = 0x11; madvise(p, SIZE, MADV_PAGEOUT); } w/o patch w/ patch root@10:~# time ./a.out root@10:~# time ./a.out real 0m49.634s real 0m46.334s user 0m0.637s user 0m0.648s sys 0m47.434s sys 0m44.265s Link: https://lkml.kernel.org/r/20240226005739.24350-1-21cnbao@gmail.com Signed-off-by: Barry Song <v-songbaohua@oppo.com> Acked-by: Minchan Kim <minchan@kernel.org> Cc: SeongJae Park <sj@kernel.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
351 lines
7.8 KiB
C
351 lines
7.8 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* DAMON Primitives for The Physical Address Space
|
|
*
|
|
* Author: SeongJae Park <sj@kernel.org>
|
|
*/
|
|
|
|
#define pr_fmt(fmt) "damon-pa: " fmt
|
|
|
|
#include <linux/mmu_notifier.h>
|
|
#include <linux/page_idle.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/rmap.h>
|
|
#include <linux/swap.h>
|
|
|
|
#include "../internal.h"
|
|
#include "ops-common.h"
|
|
|
|
static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
|
|
unsigned long addr, void *arg)
|
|
{
|
|
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
|
|
|
|
while (page_vma_mapped_walk(&pvmw)) {
|
|
addr = pvmw.address;
|
|
if (pvmw.pte)
|
|
damon_ptep_mkold(pvmw.pte, vma, addr);
|
|
else
|
|
damon_pmdp_mkold(pvmw.pmd, vma, addr);
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static void damon_pa_mkold(unsigned long paddr)
|
|
{
|
|
struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
|
|
struct rmap_walk_control rwc = {
|
|
.rmap_one = __damon_pa_mkold,
|
|
.anon_lock = folio_lock_anon_vma_read,
|
|
};
|
|
bool need_lock;
|
|
|
|
if (!folio)
|
|
return;
|
|
|
|
if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
|
|
folio_set_idle(folio);
|
|
goto out;
|
|
}
|
|
|
|
need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
|
|
if (need_lock && !folio_trylock(folio))
|
|
goto out;
|
|
|
|
rmap_walk(folio, &rwc);
|
|
|
|
if (need_lock)
|
|
folio_unlock(folio);
|
|
|
|
out:
|
|
folio_put(folio);
|
|
}
|
|
|
|
static void __damon_pa_prepare_access_check(struct damon_region *r)
|
|
{
|
|
r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
|
|
|
|
damon_pa_mkold(r->sampling_addr);
|
|
}
|
|
|
|
static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
|
|
{
|
|
struct damon_target *t;
|
|
struct damon_region *r;
|
|
|
|
damon_for_each_target(t, ctx) {
|
|
damon_for_each_region(r, t)
|
|
__damon_pa_prepare_access_check(r);
|
|
}
|
|
}
|
|
|
|
static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
|
|
unsigned long addr, void *arg)
|
|
{
|
|
bool *accessed = arg;
|
|
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
|
|
|
|
*accessed = false;
|
|
while (page_vma_mapped_walk(&pvmw)) {
|
|
addr = pvmw.address;
|
|
if (pvmw.pte) {
|
|
*accessed = pte_young(ptep_get(pvmw.pte)) ||
|
|
!folio_test_idle(folio) ||
|
|
mmu_notifier_test_young(vma->vm_mm, addr);
|
|
} else {
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
*accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
|
|
!folio_test_idle(folio) ||
|
|
mmu_notifier_test_young(vma->vm_mm, addr);
|
|
#else
|
|
WARN_ON_ONCE(1);
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
}
|
|
if (*accessed) {
|
|
page_vma_mapped_walk_done(&pvmw);
|
|
break;
|
|
}
|
|
}
|
|
|
|
/* If accessed, stop walking */
|
|
return *accessed == false;
|
|
}
|
|
|
|
static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
|
|
{
|
|
struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
|
|
bool accessed = false;
|
|
struct rmap_walk_control rwc = {
|
|
.arg = &accessed,
|
|
.rmap_one = __damon_pa_young,
|
|
.anon_lock = folio_lock_anon_vma_read,
|
|
};
|
|
bool need_lock;
|
|
|
|
if (!folio)
|
|
return false;
|
|
|
|
if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
|
|
if (folio_test_idle(folio))
|
|
accessed = false;
|
|
else
|
|
accessed = true;
|
|
goto out;
|
|
}
|
|
|
|
need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
|
|
if (need_lock && !folio_trylock(folio))
|
|
goto out;
|
|
|
|
rmap_walk(folio, &rwc);
|
|
|
|
if (need_lock)
|
|
folio_unlock(folio);
|
|
|
|
out:
|
|
*folio_sz = folio_size(folio);
|
|
folio_put(folio);
|
|
return accessed;
|
|
}
|
|
|
|
static void __damon_pa_check_access(struct damon_region *r,
|
|
struct damon_attrs *attrs)
|
|
{
|
|
static unsigned long last_addr;
|
|
static unsigned long last_folio_sz = PAGE_SIZE;
|
|
static bool last_accessed;
|
|
|
|
/* If the region is in the last checked page, reuse the result */
|
|
if (ALIGN_DOWN(last_addr, last_folio_sz) ==
|
|
ALIGN_DOWN(r->sampling_addr, last_folio_sz)) {
|
|
damon_update_region_access_rate(r, last_accessed, attrs);
|
|
return;
|
|
}
|
|
|
|
last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
|
|
damon_update_region_access_rate(r, last_accessed, attrs);
|
|
|
|
last_addr = r->sampling_addr;
|
|
}
|
|
|
|
static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
|
|
{
|
|
struct damon_target *t;
|
|
struct damon_region *r;
|
|
unsigned int max_nr_accesses = 0;
|
|
|
|
damon_for_each_target(t, ctx) {
|
|
damon_for_each_region(r, t) {
|
|
__damon_pa_check_access(r, &ctx->attrs);
|
|
max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
|
|
}
|
|
}
|
|
|
|
return max_nr_accesses;
|
|
}
|
|
|
|
static bool __damos_pa_filter_out(struct damos_filter *filter,
|
|
struct folio *folio)
|
|
{
|
|
bool matched = false;
|
|
struct mem_cgroup *memcg;
|
|
|
|
switch (filter->type) {
|
|
case DAMOS_FILTER_TYPE_ANON:
|
|
matched = folio_test_anon(folio);
|
|
break;
|
|
case DAMOS_FILTER_TYPE_MEMCG:
|
|
rcu_read_lock();
|
|
memcg = folio_memcg_check(folio);
|
|
if (!memcg)
|
|
matched = false;
|
|
else
|
|
matched = filter->memcg_id == mem_cgroup_id(memcg);
|
|
rcu_read_unlock();
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
|
|
return matched == filter->matching;
|
|
}
|
|
|
|
/*
|
|
* damos_pa_filter_out - Return true if the page should be filtered out.
|
|
*/
|
|
static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
|
|
{
|
|
struct damos_filter *filter;
|
|
|
|
damos_for_each_filter(filter, scheme) {
|
|
if (__damos_pa_filter_out(filter, folio))
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
|
|
{
|
|
unsigned long addr, applied;
|
|
LIST_HEAD(folio_list);
|
|
|
|
for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
|
|
struct folio *folio = damon_get_folio(PHYS_PFN(addr));
|
|
|
|
if (!folio)
|
|
continue;
|
|
|
|
if (damos_pa_filter_out(s, folio))
|
|
goto put_folio;
|
|
|
|
folio_clear_referenced(folio);
|
|
folio_test_clear_young(folio);
|
|
if (!folio_isolate_lru(folio))
|
|
goto put_folio;
|
|
if (folio_test_unevictable(folio))
|
|
folio_putback_lru(folio);
|
|
else
|
|
list_add(&folio->lru, &folio_list);
|
|
put_folio:
|
|
folio_put(folio);
|
|
}
|
|
applied = reclaim_pages(&folio_list, false);
|
|
cond_resched();
|
|
return applied * PAGE_SIZE;
|
|
}
|
|
|
|
static inline unsigned long damon_pa_mark_accessed_or_deactivate(
|
|
struct damon_region *r, struct damos *s, bool mark_accessed)
|
|
{
|
|
unsigned long addr, applied = 0;
|
|
|
|
for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
|
|
struct folio *folio = damon_get_folio(PHYS_PFN(addr));
|
|
|
|
if (!folio)
|
|
continue;
|
|
|
|
if (damos_pa_filter_out(s, folio))
|
|
goto put_folio;
|
|
|
|
if (mark_accessed)
|
|
folio_mark_accessed(folio);
|
|
else
|
|
folio_deactivate(folio);
|
|
applied += folio_nr_pages(folio);
|
|
put_folio:
|
|
folio_put(folio);
|
|
}
|
|
return applied * PAGE_SIZE;
|
|
}
|
|
|
|
static unsigned long damon_pa_mark_accessed(struct damon_region *r,
|
|
struct damos *s)
|
|
{
|
|
return damon_pa_mark_accessed_or_deactivate(r, s, true);
|
|
}
|
|
|
|
static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
|
|
struct damos *s)
|
|
{
|
|
return damon_pa_mark_accessed_or_deactivate(r, s, false);
|
|
}
|
|
|
|
static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
|
|
struct damon_target *t, struct damon_region *r,
|
|
struct damos *scheme)
|
|
{
|
|
switch (scheme->action) {
|
|
case DAMOS_PAGEOUT:
|
|
return damon_pa_pageout(r, scheme);
|
|
case DAMOS_LRU_PRIO:
|
|
return damon_pa_mark_accessed(r, scheme);
|
|
case DAMOS_LRU_DEPRIO:
|
|
return damon_pa_deactivate_pages(r, scheme);
|
|
case DAMOS_STAT:
|
|
break;
|
|
default:
|
|
/* DAMOS actions that not yet supported by 'paddr'. */
|
|
break;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int damon_pa_scheme_score(struct damon_ctx *context,
|
|
struct damon_target *t, struct damon_region *r,
|
|
struct damos *scheme)
|
|
{
|
|
switch (scheme->action) {
|
|
case DAMOS_PAGEOUT:
|
|
return damon_cold_score(context, r, scheme);
|
|
case DAMOS_LRU_PRIO:
|
|
return damon_hot_score(context, r, scheme);
|
|
case DAMOS_LRU_DEPRIO:
|
|
return damon_cold_score(context, r, scheme);
|
|
default:
|
|
break;
|
|
}
|
|
|
|
return DAMOS_MAX_SCORE;
|
|
}
|
|
|
|
static int __init damon_pa_initcall(void)
|
|
{
|
|
struct damon_operations ops = {
|
|
.id = DAMON_OPS_PADDR,
|
|
.init = NULL,
|
|
.update = NULL,
|
|
.prepare_access_checks = damon_pa_prepare_access_checks,
|
|
.check_accesses = damon_pa_check_accesses,
|
|
.reset_aggregated = NULL,
|
|
.target_valid = NULL,
|
|
.cleanup = NULL,
|
|
.apply_scheme = damon_pa_apply_scheme,
|
|
.get_scheme_score = damon_pa_scheme_score,
|
|
};
|
|
|
|
return damon_register_ops(&ops);
|
|
};
|
|
|
|
subsys_initcall(damon_pa_initcall);
|