mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 13:44:15 +08:00
mm/memcg: Convert mem_cgroup_track_foreign_dirty_slowpath() to folio
The page was only being used for the memcg and to gather trace information, so this is a simple conversion. The only caller of mem_cgroup_track_foreign_dirty() will be converted to folios in a later patch, so doing this now makes that patch simpler. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: David Howells <dhowells@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
d21bba2b7d
commit
9d8053fc7a
@ -1599,17 +1599,18 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
|
||||
unsigned long *pheadroom, unsigned long *pdirty,
|
||||
unsigned long *pwriteback);
|
||||
|
||||
void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
|
||||
void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
|
||||
struct bdi_writeback *wb);
|
||||
|
||||
static inline void mem_cgroup_track_foreign_dirty(struct page *page,
|
||||
struct bdi_writeback *wb)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
if (mem_cgroup_disabled())
|
||||
return;
|
||||
|
||||
if (unlikely(&page_memcg(page)->css != wb->memcg_css))
|
||||
mem_cgroup_track_foreign_dirty_slowpath(page, wb);
|
||||
if (unlikely(&folio_memcg(folio)->css != wb->memcg_css))
|
||||
mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
|
||||
}
|
||||
|
||||
void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
|
||||
|
@ -236,9 +236,9 @@ TRACE_EVENT(inode_switch_wbs,
|
||||
|
||||
TRACE_EVENT(track_foreign_dirty,
|
||||
|
||||
TP_PROTO(struct page *page, struct bdi_writeback *wb),
|
||||
TP_PROTO(struct folio *folio, struct bdi_writeback *wb),
|
||||
|
||||
TP_ARGS(page, wb),
|
||||
TP_ARGS(folio, wb),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array(char, name, 32)
|
||||
@ -250,7 +250,7 @@ TRACE_EVENT(track_foreign_dirty,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
struct address_space *mapping = page_mapping(page);
|
||||
struct address_space *mapping = folio_mapping(folio);
|
||||
struct inode *inode = mapping ? mapping->host : NULL;
|
||||
|
||||
strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
|
||||
@ -258,7 +258,7 @@ TRACE_EVENT(track_foreign_dirty,
|
||||
__entry->ino = inode ? inode->i_ino : 0;
|
||||
__entry->memcg_id = wb->memcg_css->id;
|
||||
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
|
||||
__entry->page_cgroup_ino = cgroup_ino(page_memcg(page)->css.cgroup);
|
||||
__entry->page_cgroup_ino = cgroup_ino(folio_memcg(folio)->css.cgroup);
|
||||
),
|
||||
|
||||
TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
|
||||
|
@ -4516,17 +4516,17 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
|
||||
* As being wrong occasionally doesn't matter, updates and accesses to the
|
||||
* records are lockless and racy.
|
||||
*/
|
||||
void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
|
||||
void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
|
||||
struct bdi_writeback *wb)
|
||||
{
|
||||
struct mem_cgroup *memcg = page_memcg(page);
|
||||
struct mem_cgroup *memcg = folio_memcg(folio);
|
||||
struct memcg_cgwb_frn *frn;
|
||||
u64 now = get_jiffies_64();
|
||||
u64 oldest_at = now;
|
||||
int oldest = -1;
|
||||
int i;
|
||||
|
||||
trace_track_foreign_dirty(page, wb);
|
||||
trace_track_foreign_dirty(folio, wb);
|
||||
|
||||
/*
|
||||
* Pick the slot to use. If there is already a slot for @wb, keep
|
||||
|
Loading…
Reference in New Issue
Block a user