mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-21 10:05:00 +08:00
c33c794828
Convert all instances of direct pte_t* dereferencing to instead use ptep_get() helper. This means that by default, the accesses change from a C dereference to a READ_ONCE(). This is technically the correct thing to do since where pgtables are modified by HW (for access/dirty) they are volatile and therefore we should always ensure READ_ONCE() semantics. But more importantly, by always using the helper, it can be overridden by the architecture to fully encapsulate the contents of the pte. Arch code is deliberately not converted, as the arch code knows best. It is intended that arch code (arm64) will override the default with its own implementation that can (e.g.) hide certain bits from the core code, or determine young/dirty status by mixing in state from another source. Conversion was done using Coccinelle: ---- // $ make coccicheck \ // COCCI=ptepget.cocci \ // SPFLAGS="--include-headers" \ // MODE=patch virtual patch @ depends on patch @ pte_t *v; @@ - *v + ptep_get(v) ---- Then reviewed and hand-edited to avoid multiple unnecessary calls to ptep_get(), instead opting to store the result of a single call in a variable, where it is correct to do so. This aims to negate any cost of READ_ONCE() and will benefit arch-overrides that may be more complex. Included is a fix for an issue in an earlier version of this patch that was pointed out by kernel test robot. The issue arose because config MMU=n elides definition of the ptep helper functions, including ptep_get(). HUGETLB_PAGE=n configs still define a simple huge_ptep_clear_flush() for linking purposes, which dereferences the ptep. So when both configs are disabled, this caused a build error because ptep_get() is not defined. Fix by continuing to do a direct dereference when MMU=n. This is safe because for this config the arch code cannot be trying to virtualize the ptes because none of the ptep helpers are defined. Link: https://lkml.kernel.org/r/20230612151545.3317766-4-ryan.roberts@arm.com Reported-by: kernel test robot <lkp@intel.com> Link: https://lore.kernel.org/oe-kbuild-all/202305120142.yXsNEo6H-lkp@intel.com/ Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Potapenko <glider@google.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alex Williamson <alex.williamson@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Andrey Konovalov <andreyknvl@gmail.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Christoph Hellwig <hch@infradead.org> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Dave Airlie <airlied@gmail.com> Cc: Dimitri Sivanich <dimitri.sivanich@hpe.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Ian Rogers <irogers@google.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Jérôme Glisse <jglisse@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Lorenzo Stoakes <lstoakes@gmail.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naoya Horiguchi <naoya.horiguchi@nec.com> Cc: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> Cc: Pavel Tatashin <pasha.tatashin@soleen.com> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: SeongJae Park <sj@kernel.org> Cc: Shakeel Butt <shakeelb@google.com> Cc: Uladzislau Rezki (Sony) <urezki@gmail.com> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Yu Zhao <yuzhao@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
123 lines
3.0 KiB
C
123 lines
3.0 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Common Primitives for Data Access Monitoring
|
|
*
|
|
* Author: SeongJae Park <sj@kernel.org>
|
|
*/
|
|
|
|
#include <linux/mmu_notifier.h>
|
|
#include <linux/page_idle.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/rmap.h>
|
|
|
|
#include "ops-common.h"
|
|
|
|
/*
|
|
* Get an online page for a pfn if it's in the LRU list. Otherwise, returns
|
|
* NULL.
|
|
*
|
|
* The body of this function is stolen from the 'page_idle_get_folio()'. We
|
|
* steal rather than reuse it because the code is quite simple.
|
|
*/
|
|
struct folio *damon_get_folio(unsigned long pfn)
|
|
{
|
|
struct page *page = pfn_to_online_page(pfn);
|
|
struct folio *folio;
|
|
|
|
if (!page || PageTail(page))
|
|
return NULL;
|
|
|
|
folio = page_folio(page);
|
|
if (!folio_test_lru(folio) || !folio_try_get(folio))
|
|
return NULL;
|
|
if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) {
|
|
folio_put(folio);
|
|
folio = NULL;
|
|
}
|
|
return folio;
|
|
}
|
|
|
|
void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr)
|
|
{
|
|
struct folio *folio = damon_get_folio(pte_pfn(ptep_get(pte)));
|
|
|
|
if (!folio)
|
|
return;
|
|
|
|
if (ptep_clear_young_notify(vma, addr, pte))
|
|
folio_set_young(folio);
|
|
|
|
folio_set_idle(folio);
|
|
folio_put(folio);
|
|
}
|
|
|
|
void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr)
|
|
{
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
struct folio *folio = damon_get_folio(pmd_pfn(*pmd));
|
|
|
|
if (!folio)
|
|
return;
|
|
|
|
if (pmdp_clear_young_notify(vma, addr, pmd))
|
|
folio_set_young(folio);
|
|
|
|
folio_set_idle(folio);
|
|
folio_put(folio);
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
}
|
|
|
|
#define DAMON_MAX_SUBSCORE (100)
|
|
#define DAMON_MAX_AGE_IN_LOG (32)
|
|
|
|
int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
|
|
struct damos *s)
|
|
{
|
|
unsigned int max_nr_accesses;
|
|
int freq_subscore;
|
|
unsigned int age_in_sec;
|
|
int age_in_log, age_subscore;
|
|
unsigned int freq_weight = s->quota.weight_nr_accesses;
|
|
unsigned int age_weight = s->quota.weight_age;
|
|
int hotness;
|
|
|
|
max_nr_accesses = c->attrs.aggr_interval / c->attrs.sample_interval;
|
|
freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE / max_nr_accesses;
|
|
|
|
age_in_sec = (unsigned long)r->age * c->attrs.aggr_interval / 1000000;
|
|
for (age_in_log = 0; age_in_log < DAMON_MAX_AGE_IN_LOG && age_in_sec;
|
|
age_in_log++, age_in_sec >>= 1)
|
|
;
|
|
|
|
/* If frequency is 0, higher age means it's colder */
|
|
if (freq_subscore == 0)
|
|
age_in_log *= -1;
|
|
|
|
/*
|
|
* Now age_in_log is in [-DAMON_MAX_AGE_IN_LOG, DAMON_MAX_AGE_IN_LOG].
|
|
* Scale it to be in [0, 100] and set it as age subscore.
|
|
*/
|
|
age_in_log += DAMON_MAX_AGE_IN_LOG;
|
|
age_subscore = age_in_log * DAMON_MAX_SUBSCORE /
|
|
DAMON_MAX_AGE_IN_LOG / 2;
|
|
|
|
hotness = (freq_weight * freq_subscore + age_weight * age_subscore);
|
|
if (freq_weight + age_weight)
|
|
hotness /= freq_weight + age_weight;
|
|
/*
|
|
* Transform it to fit in [0, DAMOS_MAX_SCORE]
|
|
*/
|
|
hotness = hotness * DAMOS_MAX_SCORE / DAMON_MAX_SUBSCORE;
|
|
|
|
return hotness;
|
|
}
|
|
|
|
int damon_cold_score(struct damon_ctx *c, struct damon_region *r,
|
|
struct damos *s)
|
|
{
|
|
int hotness = damon_hot_score(c, r, s);
|
|
|
|
/* Return coldness of the region */
|
|
return DAMOS_MAX_SCORE - hotness;
|
|
}
|