writeback: add general function domain_dirty_avail to calculate dirty and avail of domain

Add general function domain_dirty_avail to calculate dirty and avail for
either dirty limit or background writeback in either global domain or wb
domain.

Link: https://lkml.kernel.org/r/20240514125254.142203-3-shikemeng@huaweicloud.com
Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
Acked-by: Tejun Heo <tj@kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kemeng Shi 2024-05-14 20:52:48 +08:00 committed by Andrew Morton
parent 7c0c629be5
commit ba62d5cfe1

View File

@ -837,6 +837,34 @@ static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
mdtc->avail = filepages + min(headroom, other_clean);
}
static inline bool dtc_is_global(struct dirty_throttle_control *dtc)
{
return mdtc_gdtc(dtc) == NULL;
}
/*
* Dirty background will ignore pages being written as we're trying to
* decide whether to put more under writeback.
*/
static void domain_dirty_avail(struct dirty_throttle_control *dtc,
bool include_writeback)
{
if (dtc_is_global(dtc)) {
dtc->avail = global_dirtyable_memory();
dtc->dirty = global_node_page_state(NR_FILE_DIRTY);
if (include_writeback)
dtc->dirty += global_node_page_state(NR_WRITEBACK);
} else {
unsigned long filepages = 0, headroom = 0, writeback = 0;
mem_cgroup_wb_stats(dtc->wb, &filepages, &headroom, &dtc->dirty,
&writeback);
if (include_writeback)
dtc->dirty += writeback;
mdtc_calc_avail(dtc, filepages, headroom);
}
}
/**
* __wb_calc_thresh - @wb's share of dirty threshold
* @dtc: dirty_throttle_context of interest
@ -899,16 +927,9 @@ unsigned long cgwb_calc_thresh(struct bdi_writeback *wb)
{
struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
struct dirty_throttle_control mdtc = { MDTC_INIT(wb, &gdtc) };
unsigned long filepages = 0, headroom = 0, writeback = 0;
gdtc.avail = global_dirtyable_memory();
gdtc.dirty = global_node_page_state(NR_FILE_DIRTY) +
global_node_page_state(NR_WRITEBACK);
mem_cgroup_wb_stats(wb, &filepages, &headroom,
&mdtc.dirty, &writeback);
mdtc.dirty += writeback;
mdtc_calc_avail(&mdtc, filepages, headroom);
domain_dirty_avail(&gdtc, true);
domain_dirty_avail(&mdtc, true);
domain_dirty_limits(&mdtc);
return __wb_calc_thresh(&mdtc, mdtc.thresh);
@ -1719,9 +1740,8 @@ static int balance_dirty_pages(struct bdi_writeback *wb,
unsigned long m_bg_thresh = 0;
nr_dirty = global_node_page_state(NR_FILE_DIRTY);
gdtc->avail = global_dirtyable_memory();
gdtc->dirty = nr_dirty + global_node_page_state(NR_WRITEBACK);
domain_dirty_avail(gdtc, true);
domain_dirty_limits(gdtc);
if (unlikely(strictlimit)) {
@ -1737,17 +1757,11 @@ static int balance_dirty_pages(struct bdi_writeback *wb,
}
if (mdtc) {
unsigned long filepages, headroom, writeback;
/*
* If @wb belongs to !root memcg, repeat the same
* basic calculations for the memcg domain.
*/
mem_cgroup_wb_stats(wb, &filepages, &headroom,
&mdtc->dirty, &writeback);
mdtc->dirty += writeback;
mdtc_calc_avail(mdtc, filepages, headroom);
domain_dirty_avail(mdtc, true);
domain_dirty_limits(mdtc);
if (unlikely(strictlimit)) {
@ -2119,14 +2133,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
&mdtc_stor : NULL;
/*
* Similar to balance_dirty_pages() but ignores pages being written
* as we're trying to decide whether to put more under writeback.
*/
gdtc->avail = global_dirtyable_memory();
gdtc->dirty = global_node_page_state(NR_FILE_DIRTY);
domain_dirty_avail(gdtc, false);
domain_dirty_limits(gdtc);
if (gdtc->dirty > gdtc->bg_thresh)
return true;
@ -2135,13 +2143,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
return true;
if (mdtc) {
unsigned long filepages, headroom, writeback;
mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty,
&writeback);
mdtc_calc_avail(mdtc, filepages, headroom);
domain_dirty_avail(mdtc, false);
domain_dirty_limits(mdtc); /* ditto, ignore writeback */
if (mdtc->dirty > mdtc->bg_thresh)
return true;