Commit ba62d5cf authored by Kemeng Shi's avatar Kemeng Shi Committed by Andrew Morton

writeback: add general function domain_dirty_avail to calculate dirty and avail of domain

Add general function domain_dirty_avail to calculate dirty and avail for
either dirty limit or background writeback in either global domain or wb
domain.

Link: https://lkml.kernel.org/r/20240514125254.142203-3-shikemeng@huaweicloud.comSigned-off-by: default avatarKemeng Shi <shikemeng@huaweicloud.com>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 7c0c629b
...@@ -837,6 +837,34 @@ static void mdtc_calc_avail(struct dirty_throttle_control *mdtc, ...@@ -837,6 +837,34 @@ static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
mdtc->avail = filepages + min(headroom, other_clean); mdtc->avail = filepages + min(headroom, other_clean);
} }
static inline bool dtc_is_global(struct dirty_throttle_control *dtc)
{
return mdtc_gdtc(dtc) == NULL;
}
/*
* Dirty background will ignore pages being written as we're trying to
* decide whether to put more under writeback.
*/
static void domain_dirty_avail(struct dirty_throttle_control *dtc,
bool include_writeback)
{
if (dtc_is_global(dtc)) {
dtc->avail = global_dirtyable_memory();
dtc->dirty = global_node_page_state(NR_FILE_DIRTY);
if (include_writeback)
dtc->dirty += global_node_page_state(NR_WRITEBACK);
} else {
unsigned long filepages = 0, headroom = 0, writeback = 0;
mem_cgroup_wb_stats(dtc->wb, &filepages, &headroom, &dtc->dirty,
&writeback);
if (include_writeback)
dtc->dirty += writeback;
mdtc_calc_avail(dtc, filepages, headroom);
}
}
/** /**
* __wb_calc_thresh - @wb's share of dirty threshold * __wb_calc_thresh - @wb's share of dirty threshold
* @dtc: dirty_throttle_context of interest * @dtc: dirty_throttle_context of interest
...@@ -899,16 +927,9 @@ unsigned long cgwb_calc_thresh(struct bdi_writeback *wb) ...@@ -899,16 +927,9 @@ unsigned long cgwb_calc_thresh(struct bdi_writeback *wb)
{ {
struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB }; struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
struct dirty_throttle_control mdtc = { MDTC_INIT(wb, &gdtc) }; struct dirty_throttle_control mdtc = { MDTC_INIT(wb, &gdtc) };
unsigned long filepages = 0, headroom = 0, writeback = 0;
gdtc.avail = global_dirtyable_memory(); domain_dirty_avail(&gdtc, true);
gdtc.dirty = global_node_page_state(NR_FILE_DIRTY) + domain_dirty_avail(&mdtc, true);
global_node_page_state(NR_WRITEBACK);
mem_cgroup_wb_stats(wb, &filepages, &headroom,
&mdtc.dirty, &writeback);
mdtc.dirty += writeback;
mdtc_calc_avail(&mdtc, filepages, headroom);
domain_dirty_limits(&mdtc); domain_dirty_limits(&mdtc);
return __wb_calc_thresh(&mdtc, mdtc.thresh); return __wb_calc_thresh(&mdtc, mdtc.thresh);
...@@ -1719,9 +1740,8 @@ static int balance_dirty_pages(struct bdi_writeback *wb, ...@@ -1719,9 +1740,8 @@ static int balance_dirty_pages(struct bdi_writeback *wb,
unsigned long m_bg_thresh = 0; unsigned long m_bg_thresh = 0;
nr_dirty = global_node_page_state(NR_FILE_DIRTY); nr_dirty = global_node_page_state(NR_FILE_DIRTY);
gdtc->avail = global_dirtyable_memory();
gdtc->dirty = nr_dirty + global_node_page_state(NR_WRITEBACK);
domain_dirty_avail(gdtc, true);
domain_dirty_limits(gdtc); domain_dirty_limits(gdtc);
if (unlikely(strictlimit)) { if (unlikely(strictlimit)) {
...@@ -1737,17 +1757,11 @@ static int balance_dirty_pages(struct bdi_writeback *wb, ...@@ -1737,17 +1757,11 @@ static int balance_dirty_pages(struct bdi_writeback *wb,
} }
if (mdtc) { if (mdtc) {
unsigned long filepages, headroom, writeback;
/* /*
* If @wb belongs to !root memcg, repeat the same * If @wb belongs to !root memcg, repeat the same
* basic calculations for the memcg domain. * basic calculations for the memcg domain.
*/ */
mem_cgroup_wb_stats(wb, &filepages, &headroom, domain_dirty_avail(mdtc, true);
&mdtc->dirty, &writeback);
mdtc->dirty += writeback;
mdtc_calc_avail(mdtc, filepages, headroom);
domain_dirty_limits(mdtc); domain_dirty_limits(mdtc);
if (unlikely(strictlimit)) { if (unlikely(strictlimit)) {
...@@ -2119,14 +2133,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb) ...@@ -2119,14 +2133,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ? struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
&mdtc_stor : NULL; &mdtc_stor : NULL;
/* domain_dirty_avail(gdtc, false);
* Similar to balance_dirty_pages() but ignores pages being written
* as we're trying to decide whether to put more under writeback.
*/
gdtc->avail = global_dirtyable_memory();
gdtc->dirty = global_node_page_state(NR_FILE_DIRTY);
domain_dirty_limits(gdtc); domain_dirty_limits(gdtc);
if (gdtc->dirty > gdtc->bg_thresh) if (gdtc->dirty > gdtc->bg_thresh)
return true; return true;
...@@ -2135,13 +2143,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb) ...@@ -2135,13 +2143,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
return true; return true;
if (mdtc) { if (mdtc) {
unsigned long filepages, headroom, writeback; domain_dirty_avail(mdtc, false);
mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty,
&writeback);
mdtc_calc_avail(mdtc, filepages, headroom);
domain_dirty_limits(mdtc); /* ditto, ignore writeback */ domain_dirty_limits(mdtc); /* ditto, ignore writeback */
if (mdtc->dirty > mdtc->bg_thresh) if (mdtc->dirty > mdtc->bg_thresh)
return true; return true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment