1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
3f30c2269SUwe Zeisberger * mm/page-writeback.c
41da177e4SLinus Torvalds *
51da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds.
690eec103SPeter Zijlstra * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
71da177e4SLinus Torvalds *
81da177e4SLinus Torvalds * Contains functions related to writing back dirty pages at the
91da177e4SLinus Torvalds * address_space level.
101da177e4SLinus Torvalds *
11e1f8e874SFrancois Cami * 10Apr2002 Andrew Morton
121da177e4SLinus Torvalds * Initial version
131da177e4SLinus Torvalds */
141da177e4SLinus Torvalds
151da177e4SLinus Torvalds #include <linux/kernel.h>
161bf27e98SStefan Roesch #include <linux/math64.h>
17b95f1b31SPaul Gortmaker #include <linux/export.h>
181da177e4SLinus Torvalds #include <linux/spinlock.h>
191da177e4SLinus Torvalds #include <linux/fs.h>
201da177e4SLinus Torvalds #include <linux/mm.h>
211da177e4SLinus Torvalds #include <linux/swap.h>
221da177e4SLinus Torvalds #include <linux/slab.h>
231da177e4SLinus Torvalds #include <linux/pagemap.h>
241da177e4SLinus Torvalds #include <linux/writeback.h>
251da177e4SLinus Torvalds #include <linux/init.h>
261da177e4SLinus Torvalds #include <linux/backing-dev.h>
2755e829afSAndrew Morton #include <linux/task_io_accounting_ops.h>
281da177e4SLinus Torvalds #include <linux/blkdev.h>
291da177e4SLinus Torvalds #include <linux/mpage.h>
30d08b3851SPeter Zijlstra #include <linux/rmap.h>
311da177e4SLinus Torvalds #include <linux/percpu.h>
321da177e4SLinus Torvalds #include <linux/smp.h>
331da177e4SLinus Torvalds #include <linux/sysctl.h>
341da177e4SLinus Torvalds #include <linux/cpu.h>
351da177e4SLinus Torvalds #include <linux/syscalls.h>
36811d736fSDavid Howells #include <linux/pagevec.h>
37eb608e3aSJan Kara #include <linux/timer.h>
388bd75c77SClark Williams #include <linux/sched/rt.h>
39f361bf4aSIngo Molnar #include <linux/sched/signal.h>
406e543d57SLisa Du #include <linux/mm_inline.h>
41028c2dd1SDave Chinner #include <trace/events/writeback.h>
421da177e4SLinus Torvalds
436e543d57SLisa Du #include "internal.h"
446e543d57SLisa Du
451da177e4SLinus Torvalds /*
46ffd1f609SWu Fengguang * Sleep at most 200ms at a time in balance_dirty_pages().
47ffd1f609SWu Fengguang */
48ffd1f609SWu Fengguang #define MAX_PAUSE max(HZ/5, 1)
49ffd1f609SWu Fengguang
50ffd1f609SWu Fengguang /*
515b9b3574SWu Fengguang * Try to keep balance_dirty_pages() call intervals higher than this many pages
525b9b3574SWu Fengguang * by raising pause time to max_pause when falls below it.
535b9b3574SWu Fengguang */
545b9b3574SWu Fengguang #define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10))
555b9b3574SWu Fengguang
565b9b3574SWu Fengguang /*
57e98be2d5SWu Fengguang * Estimate write bandwidth at 200ms intervals.
58e98be2d5SWu Fengguang */
59e98be2d5SWu Fengguang #define BANDWIDTH_INTERVAL max(HZ/5, 1)
60e98be2d5SWu Fengguang
616c14ae1eSWu Fengguang #define RATELIMIT_CALC_SHIFT 10
626c14ae1eSWu Fengguang
63e98be2d5SWu Fengguang /*
641da177e4SLinus Torvalds * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
651da177e4SLinus Torvalds * will look to see if it needs to force writeback or throttling.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds static long ratelimit_pages = 32;
681da177e4SLinus Torvalds
691da177e4SLinus Torvalds /* The following parameters are exported via /proc/sys/vm */
701da177e4SLinus Torvalds
711da177e4SLinus Torvalds /*
725b0830cbSJens Axboe * Start background writeback (via writeback threads) at this percentage
731da177e4SLinus Torvalds */
74aa779e51Szhanglianjie static int dirty_background_ratio = 10;
751da177e4SLinus Torvalds
761da177e4SLinus Torvalds /*
772da02997SDavid Rientjes * dirty_background_bytes starts at 0 (disabled) so that it is a function of
782da02997SDavid Rientjes * dirty_background_ratio * the amount of dirtyable memory
792da02997SDavid Rientjes */
80aa779e51Szhanglianjie static unsigned long dirty_background_bytes;
812da02997SDavid Rientjes
822da02997SDavid Rientjes /*
83195cf453SBron Gondwana * free highmem will not be subtracted from the total free memory
84195cf453SBron Gondwana * for calculating free ratios if vm_highmem_is_dirtyable is true
85195cf453SBron Gondwana */
86aa779e51Szhanglianjie static int vm_highmem_is_dirtyable;
87195cf453SBron Gondwana
88195cf453SBron Gondwana /*
891da177e4SLinus Torvalds * The generator of dirty data starts writeback at this percentage
901da177e4SLinus Torvalds */
91aa779e51Szhanglianjie static int vm_dirty_ratio = 20;
921da177e4SLinus Torvalds
931da177e4SLinus Torvalds /*
942da02997SDavid Rientjes * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
952da02997SDavid Rientjes * vm_dirty_ratio * the amount of dirtyable memory
962da02997SDavid Rientjes */
97aa779e51Szhanglianjie static unsigned long vm_dirty_bytes;
982da02997SDavid Rientjes
992da02997SDavid Rientjes /*
100704503d8SAlexey Dobriyan * The interval between `kupdate'-style writebacks
1011da177e4SLinus Torvalds */
10222ef37eeSToshiyuki Okajima unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
1031da177e4SLinus Torvalds
10491913a29SArtem Bityutskiy EXPORT_SYMBOL_GPL(dirty_writeback_interval);
10591913a29SArtem Bityutskiy
1061da177e4SLinus Torvalds /*
107704503d8SAlexey Dobriyan * The longest time for which data is allowed to remain dirty
1081da177e4SLinus Torvalds */
10922ef37eeSToshiyuki Okajima unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
1101da177e4SLinus Torvalds
1111da177e4SLinus Torvalds /*
112ed5b43f1SBart Samwel * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
113ed5b43f1SBart Samwel * a full sync is triggered after this time elapses without any disk activity.
1141da177e4SLinus Torvalds */
1151da177e4SLinus Torvalds int laptop_mode;
1161da177e4SLinus Torvalds
1171da177e4SLinus Torvalds EXPORT_SYMBOL(laptop_mode);
1181da177e4SLinus Torvalds
1191da177e4SLinus Torvalds /* End of sysctl-exported parameters */
1201da177e4SLinus Torvalds
121dcc25ae7STejun Heo struct wb_domain global_wb_domain;
1221da177e4SLinus Torvalds
1232bc00aefSTejun Heo /* consolidated parameters for balance_dirty_pages() and its subroutines */
1242bc00aefSTejun Heo struct dirty_throttle_control {
125e9f07dfdSTejun Heo #ifdef CONFIG_CGROUP_WRITEBACK
126e9f07dfdSTejun Heo struct wb_domain *dom;
1279fc3a43eSTejun Heo struct dirty_throttle_control *gdtc; /* only set in memcg dtc's */
128e9f07dfdSTejun Heo #endif
1292bc00aefSTejun Heo struct bdi_writeback *wb;
130e9770b34STejun Heo struct fprop_local_percpu *wb_completions;
131eb608e3aSJan Kara
1329fc3a43eSTejun Heo unsigned long avail; /* dirtyable */
1332bc00aefSTejun Heo unsigned long dirty; /* file_dirty + write + nfs */
1342bc00aefSTejun Heo unsigned long thresh; /* dirty threshold */
1352bc00aefSTejun Heo unsigned long bg_thresh; /* dirty background threshold */
1362bc00aefSTejun Heo
1372bc00aefSTejun Heo unsigned long wb_dirty; /* per-wb counterparts */
1382bc00aefSTejun Heo unsigned long wb_thresh;
139970fb01aSTejun Heo unsigned long wb_bg_thresh;
140daddfa3cSTejun Heo
141daddfa3cSTejun Heo unsigned long pos_ratio;
1422bc00aefSTejun Heo };
1432bc00aefSTejun Heo
144eb608e3aSJan Kara /*
145eb608e3aSJan Kara * Length of period for aging writeout fractions of bdis. This is an
146eb608e3aSJan Kara * arbitrarily chosen number. The longer the period, the slower fractions will
147eb608e3aSJan Kara * reflect changes in current writeout rate.
148eb608e3aSJan Kara */
149eb608e3aSJan Kara #define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
15004fbfdc1SPeter Zijlstra
151693108a8STejun Heo #ifdef CONFIG_CGROUP_WRITEBACK
152693108a8STejun Heo
153d60d1bddSTejun Heo #define GDTC_INIT(__wb) .wb = (__wb), \
154d60d1bddSTejun Heo .dom = &global_wb_domain, \
155d60d1bddSTejun Heo .wb_completions = &(__wb)->completions
156d60d1bddSTejun Heo
1579fc3a43eSTejun Heo #define GDTC_INIT_NO_WB .dom = &global_wb_domain
158d60d1bddSTejun Heo
159d60d1bddSTejun Heo #define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \
160d60d1bddSTejun Heo .dom = mem_cgroup_wb_domain(__wb), \
161d60d1bddSTejun Heo .wb_completions = &(__wb)->memcg_completions, \
162d60d1bddSTejun Heo .gdtc = __gdtc
163c2aa723aSTejun Heo
mdtc_valid(struct dirty_throttle_control * dtc)164c2aa723aSTejun Heo static bool mdtc_valid(struct dirty_throttle_control *dtc)
165c2aa723aSTejun Heo {
166c2aa723aSTejun Heo return dtc->dom;
167c2aa723aSTejun Heo }
168e9f07dfdSTejun Heo
dtc_dom(struct dirty_throttle_control * dtc)169e9f07dfdSTejun Heo static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
170e9f07dfdSTejun Heo {
171e9f07dfdSTejun Heo return dtc->dom;
172e9f07dfdSTejun Heo }
173e9f07dfdSTejun Heo
mdtc_gdtc(struct dirty_throttle_control * mdtc)1749fc3a43eSTejun Heo static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
1759fc3a43eSTejun Heo {
1769fc3a43eSTejun Heo return mdtc->gdtc;
1779fc3a43eSTejun Heo }
1789fc3a43eSTejun Heo
wb_memcg_completions(struct bdi_writeback * wb)179841710aaSTejun Heo static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
180841710aaSTejun Heo {
181841710aaSTejun Heo return &wb->memcg_completions;
182841710aaSTejun Heo }
183841710aaSTejun Heo
wb_min_max_ratio(struct bdi_writeback * wb,unsigned long * minp,unsigned long * maxp)184693108a8STejun Heo static void wb_min_max_ratio(struct bdi_writeback *wb,
185693108a8STejun Heo unsigned long *minp, unsigned long *maxp)
186693108a8STejun Heo {
18720792ebfSJan Kara unsigned long this_bw = READ_ONCE(wb->avg_write_bandwidth);
188693108a8STejun Heo unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
189693108a8STejun Heo unsigned long long min = wb->bdi->min_ratio;
190693108a8STejun Heo unsigned long long max = wb->bdi->max_ratio;
191693108a8STejun Heo
192693108a8STejun Heo /*
193693108a8STejun Heo * @wb may already be clean by the time control reaches here and
194693108a8STejun Heo * the total may not include its bw.
195693108a8STejun Heo */
196693108a8STejun Heo if (this_bw < tot_bw) {
197693108a8STejun Heo if (min) {
198693108a8STejun Heo min *= this_bw;
1996d9e8c65SWen Yang min = div64_ul(min, tot_bw);
200693108a8STejun Heo }
201ae82291eSStefan Roesch if (max < 100 * BDI_RATIO_SCALE) {
202693108a8STejun Heo max *= this_bw;
2036d9e8c65SWen Yang max = div64_ul(max, tot_bw);
204693108a8STejun Heo }
205693108a8STejun Heo }
206693108a8STejun Heo
207693108a8STejun Heo *minp = min;
208693108a8STejun Heo *maxp = max;
209693108a8STejun Heo }
210693108a8STejun Heo
211693108a8STejun Heo #else /* CONFIG_CGROUP_WRITEBACK */
212693108a8STejun Heo
213d60d1bddSTejun Heo #define GDTC_INIT(__wb) .wb = (__wb), \
214d60d1bddSTejun Heo .wb_completions = &(__wb)->completions
2159fc3a43eSTejun Heo #define GDTC_INIT_NO_WB
216c2aa723aSTejun Heo #define MDTC_INIT(__wb, __gdtc)
217c2aa723aSTejun Heo
mdtc_valid(struct dirty_throttle_control * dtc)218c2aa723aSTejun Heo static bool mdtc_valid(struct dirty_throttle_control *dtc)
219c2aa723aSTejun Heo {
220c2aa723aSTejun Heo return false;
221c2aa723aSTejun Heo }
222e9f07dfdSTejun Heo
dtc_dom(struct dirty_throttle_control * dtc)223e9f07dfdSTejun Heo static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
224e9f07dfdSTejun Heo {
225e9f07dfdSTejun Heo return &global_wb_domain;
226e9f07dfdSTejun Heo }
227e9f07dfdSTejun Heo
mdtc_gdtc(struct dirty_throttle_control * mdtc)2289fc3a43eSTejun Heo static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
2299fc3a43eSTejun Heo {
2309fc3a43eSTejun Heo return NULL;
2319fc3a43eSTejun Heo }
2329fc3a43eSTejun Heo
wb_memcg_completions(struct bdi_writeback * wb)233841710aaSTejun Heo static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
234841710aaSTejun Heo {
235841710aaSTejun Heo return NULL;
236841710aaSTejun Heo }
237841710aaSTejun Heo
wb_min_max_ratio(struct bdi_writeback * wb,unsigned long * minp,unsigned long * maxp)238693108a8STejun Heo static void wb_min_max_ratio(struct bdi_writeback *wb,
239693108a8STejun Heo unsigned long *minp, unsigned long *maxp)
240693108a8STejun Heo {
241693108a8STejun Heo *minp = wb->bdi->min_ratio;
242693108a8STejun Heo *maxp = wb->bdi->max_ratio;
243693108a8STejun Heo }
244693108a8STejun Heo
245693108a8STejun Heo #endif /* CONFIG_CGROUP_WRITEBACK */
246693108a8STejun Heo
24704fbfdc1SPeter Zijlstra /*
248a756cf59SJohannes Weiner * In a memory zone, there is a certain amount of pages we consider
249a756cf59SJohannes Weiner * available for the page cache, which is essentially the number of
250a756cf59SJohannes Weiner * free and reclaimable pages, minus some zone reserves to protect
251a756cf59SJohannes Weiner * lowmem and the ability to uphold the zone's watermarks without
252a756cf59SJohannes Weiner * requiring writeback.
253a756cf59SJohannes Weiner *
254a756cf59SJohannes Weiner * This number of dirtyable pages is the base value of which the
255e0857cf5SEthon Paul * user-configurable dirty ratio is the effective number of pages that
256a756cf59SJohannes Weiner * are allowed to be actually dirtied. Per individual zone, or
257a756cf59SJohannes Weiner * globally by using the sum of dirtyable pages over all zones.
258a756cf59SJohannes Weiner *
259a756cf59SJohannes Weiner * Because the user is allowed to specify the dirty limit globally as
260a756cf59SJohannes Weiner * absolute number of bytes, calculating the per-zone dirty limit can
261a756cf59SJohannes Weiner * require translating the configured limit into a percentage of
262a756cf59SJohannes Weiner * global dirtyable memory first.
263a756cf59SJohannes Weiner */
264a756cf59SJohannes Weiner
265a804552bSJohannes Weiner /**
266281e3726SMel Gorman * node_dirtyable_memory - number of dirtyable pages in a node
267281e3726SMel Gorman * @pgdat: the node
268a804552bSJohannes Weiner *
269a862f68aSMike Rapoport * Return: the node's number of pages potentially available for dirty
270281e3726SMel Gorman * page cache. This is the base value for the per-node dirty limits.
271a804552bSJohannes Weiner */
node_dirtyable_memory(struct pglist_data * pgdat)272281e3726SMel Gorman static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
273a804552bSJohannes Weiner {
274281e3726SMel Gorman unsigned long nr_pages = 0;
275281e3726SMel Gorman int z;
276a804552bSJohannes Weiner
277281e3726SMel Gorman for (z = 0; z < MAX_NR_ZONES; z++) {
278281e3726SMel Gorman struct zone *zone = pgdat->node_zones + z;
279281e3726SMel Gorman
280281e3726SMel Gorman if (!populated_zone(zone))
281281e3726SMel Gorman continue;
282281e3726SMel Gorman
283281e3726SMel Gorman nr_pages += zone_page_state(zone, NR_FREE_PAGES);
284281e3726SMel Gorman }
285281e3726SMel Gorman
286a8d01437SJohannes Weiner /*
287a8d01437SJohannes Weiner * Pages reserved for the kernel should not be considered
288a8d01437SJohannes Weiner * dirtyable, to prevent a situation where reclaim has to
289a8d01437SJohannes Weiner * clean pages in order to balance the zones.
290a8d01437SJohannes Weiner */
291281e3726SMel Gorman nr_pages -= min(nr_pages, pgdat->totalreserve_pages);
292a804552bSJohannes Weiner
293281e3726SMel Gorman nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE);
294281e3726SMel Gorman nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE);
295a804552bSJohannes Weiner
296a804552bSJohannes Weiner return nr_pages;
297a804552bSJohannes Weiner }
298a804552bSJohannes Weiner
highmem_dirtyable_memory(unsigned long total)2991edf2234SJohannes Weiner static unsigned long highmem_dirtyable_memory(unsigned long total)
3001edf2234SJohannes Weiner {
3011edf2234SJohannes Weiner #ifdef CONFIG_HIGHMEM
3021edf2234SJohannes Weiner int node;
303bb4cc2beSMel Gorman unsigned long x = 0;
30409b4ab3cSJoonsoo Kim int i;
3051edf2234SJohannes Weiner
3061edf2234SJohannes Weiner for_each_node_state(node, N_HIGH_MEMORY) {
307281e3726SMel Gorman for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
308281e3726SMel Gorman struct zone *z;
3099cb937e2SMinchan Kim unsigned long nr_pages;
3101edf2234SJohannes Weiner
311281e3726SMel Gorman if (!is_highmem_idx(i))
312281e3726SMel Gorman continue;
313281e3726SMel Gorman
314281e3726SMel Gorman z = &NODE_DATA(node)->node_zones[i];
3159cb937e2SMinchan Kim if (!populated_zone(z))
3169cb937e2SMinchan Kim continue;
317281e3726SMel Gorman
3189cb937e2SMinchan Kim nr_pages = zone_page_state(z, NR_FREE_PAGES);
319281e3726SMel Gorman /* watch for underflows */
3209cb937e2SMinchan Kim nr_pages -= min(nr_pages, high_wmark_pages(z));
321bb4cc2beSMel Gorman nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE);
322bb4cc2beSMel Gorman nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE);
323bb4cc2beSMel Gorman x += nr_pages;
3249cb937e2SMinchan Kim }
3259cb937e2SMinchan Kim }
326281e3726SMel Gorman
3271edf2234SJohannes Weiner /*
3281edf2234SJohannes Weiner * Make sure that the number of highmem pages is never larger
3291edf2234SJohannes Weiner * than the number of the total dirtyable memory. This can only
3301edf2234SJohannes Weiner * occur in very strange VM situations but we want to make sure
3311edf2234SJohannes Weiner * that this does not occur.
3321edf2234SJohannes Weiner */
3331edf2234SJohannes Weiner return min(x, total);
3341edf2234SJohannes Weiner #else
3351edf2234SJohannes Weiner return 0;
3361edf2234SJohannes Weiner #endif
3371edf2234SJohannes Weiner }
3381edf2234SJohannes Weiner
3391edf2234SJohannes Weiner /**
340ccafa287SJohannes Weiner * global_dirtyable_memory - number of globally dirtyable pages
3411edf2234SJohannes Weiner *
342a862f68aSMike Rapoport * Return: the global number of pages potentially available for dirty
343ccafa287SJohannes Weiner * page cache. This is the base value for the global dirty limits.
3441edf2234SJohannes Weiner */
global_dirtyable_memory(void)34518cf8cf8SH Hartley Sweeten static unsigned long global_dirtyable_memory(void)
3461edf2234SJohannes Weiner {
3471edf2234SJohannes Weiner unsigned long x;
3481edf2234SJohannes Weiner
349c41f012aSMichal Hocko x = global_zone_page_state(NR_FREE_PAGES);
350a8d01437SJohannes Weiner /*
351a8d01437SJohannes Weiner * Pages reserved for the kernel should not be considered
352a8d01437SJohannes Weiner * dirtyable, to prevent a situation where reclaim has to
353a8d01437SJohannes Weiner * clean pages in order to balance the zones.
354a8d01437SJohannes Weiner */
355a8d01437SJohannes Weiner x -= min(x, totalreserve_pages);
3561edf2234SJohannes Weiner
357599d0c95SMel Gorman x += global_node_page_state(NR_INACTIVE_FILE);
358599d0c95SMel Gorman x += global_node_page_state(NR_ACTIVE_FILE);
359a804552bSJohannes Weiner
3601edf2234SJohannes Weiner if (!vm_highmem_is_dirtyable)
3611edf2234SJohannes Weiner x -= highmem_dirtyable_memory(x);
3621edf2234SJohannes Weiner
3631edf2234SJohannes Weiner return x + 1; /* Ensure that we never return 0 */
3641edf2234SJohannes Weiner }
3651edf2234SJohannes Weiner
3669fc3a43eSTejun Heo /**
3679fc3a43eSTejun Heo * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain
3689fc3a43eSTejun Heo * @dtc: dirty_throttle_control of interest
369ccafa287SJohannes Weiner *
3709fc3a43eSTejun Heo * Calculate @dtc->thresh and ->bg_thresh considering
3719fc3a43eSTejun Heo * vm_dirty_{bytes|ratio} and dirty_background_{bytes|ratio}. The caller
3729fc3a43eSTejun Heo * must ensure that @dtc->avail is set before calling this function. The
373a37b0715SNeilBrown * dirty limits will be lifted by 1/4 for real-time tasks.
374ccafa287SJohannes Weiner */
domain_dirty_limits(struct dirty_throttle_control * dtc)3759fc3a43eSTejun Heo static void domain_dirty_limits(struct dirty_throttle_control *dtc)
3769fc3a43eSTejun Heo {
3779fc3a43eSTejun Heo const unsigned long available_memory = dtc->avail;
3789fc3a43eSTejun Heo struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
3799fc3a43eSTejun Heo unsigned long bytes = vm_dirty_bytes;
3809fc3a43eSTejun Heo unsigned long bg_bytes = dirty_background_bytes;
38162a584feSTejun Heo /* convert ratios to per-PAGE_SIZE for higher precision */
38262a584feSTejun Heo unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
38362a584feSTejun Heo unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
3849fc3a43eSTejun Heo unsigned long thresh;
3859fc3a43eSTejun Heo unsigned long bg_thresh;
3869fc3a43eSTejun Heo struct task_struct *tsk;
3879fc3a43eSTejun Heo
3889fc3a43eSTejun Heo /* gdtc is !NULL iff @dtc is for memcg domain */
3899fc3a43eSTejun Heo if (gdtc) {
3909fc3a43eSTejun Heo unsigned long global_avail = gdtc->avail;
3919fc3a43eSTejun Heo
3929fc3a43eSTejun Heo /*
3939fc3a43eSTejun Heo * The byte settings can't be applied directly to memcg
3949fc3a43eSTejun Heo * domains. Convert them to ratios by scaling against
39562a584feSTejun Heo * globally available memory. As the ratios are in
39662a584feSTejun Heo * per-PAGE_SIZE, they can be obtained by dividing bytes by
39762a584feSTejun Heo * number of pages.
3989fc3a43eSTejun Heo */
3999fc3a43eSTejun Heo if (bytes)
40062a584feSTejun Heo ratio = min(DIV_ROUND_UP(bytes, global_avail),
40162a584feSTejun Heo PAGE_SIZE);
4029fc3a43eSTejun Heo if (bg_bytes)
40362a584feSTejun Heo bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
40462a584feSTejun Heo PAGE_SIZE);
4059fc3a43eSTejun Heo bytes = bg_bytes = 0;
4069fc3a43eSTejun Heo }
4079fc3a43eSTejun Heo
4089fc3a43eSTejun Heo if (bytes)
4099fc3a43eSTejun Heo thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
4109fc3a43eSTejun Heo else
41162a584feSTejun Heo thresh = (ratio * available_memory) / PAGE_SIZE;
4129fc3a43eSTejun Heo
4139fc3a43eSTejun Heo if (bg_bytes)
4149fc3a43eSTejun Heo bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
4159fc3a43eSTejun Heo else
41662a584feSTejun Heo bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
4179fc3a43eSTejun Heo
4189fc3a43eSTejun Heo tsk = current;
419a37b0715SNeilBrown if (rt_task(tsk)) {
420a53eaff8SNeilBrown bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
421a53eaff8SNeilBrown thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
4229fc3a43eSTejun Heo }
423bd16a7eeSJan Kara /*
424bd16a7eeSJan Kara * Dirty throttling logic assumes the limits in page units fit into
425bd16a7eeSJan Kara * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
426bd16a7eeSJan Kara */
427bd16a7eeSJan Kara if (thresh > UINT_MAX)
428bd16a7eeSJan Kara thresh = UINT_MAX;
429bd16a7eeSJan Kara /* This makes sure bg_thresh is within 32-bits as well */
430bd16a7eeSJan Kara if (bg_thresh >= thresh)
431bd16a7eeSJan Kara bg_thresh = thresh / 2;
4329fc3a43eSTejun Heo dtc->thresh = thresh;
4339fc3a43eSTejun Heo dtc->bg_thresh = bg_thresh;
4349fc3a43eSTejun Heo
4359fc3a43eSTejun Heo /* we should eventually report the domain in the TP */
4369fc3a43eSTejun Heo if (!gdtc)
4379fc3a43eSTejun Heo trace_global_dirty_state(bg_thresh, thresh);
4389fc3a43eSTejun Heo }
4399fc3a43eSTejun Heo
4409fc3a43eSTejun Heo /**
4419fc3a43eSTejun Heo * global_dirty_limits - background-writeback and dirty-throttling thresholds
4429fc3a43eSTejun Heo * @pbackground: out parameter for bg_thresh
4439fc3a43eSTejun Heo * @pdirty: out parameter for thresh
4449fc3a43eSTejun Heo *
4459fc3a43eSTejun Heo * Calculate bg_thresh and thresh for global_wb_domain. See
4469fc3a43eSTejun Heo * domain_dirty_limits() for details.
4479fc3a43eSTejun Heo */
global_dirty_limits(unsigned long * pbackground,unsigned long * pdirty)448ccafa287SJohannes Weiner void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
449ccafa287SJohannes Weiner {
4509fc3a43eSTejun Heo struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
451ccafa287SJohannes Weiner
4529fc3a43eSTejun Heo gdtc.avail = global_dirtyable_memory();
4539fc3a43eSTejun Heo domain_dirty_limits(&gdtc);
454ccafa287SJohannes Weiner
4559fc3a43eSTejun Heo *pbackground = gdtc.bg_thresh;
4569fc3a43eSTejun Heo *pdirty = gdtc.thresh;
457ccafa287SJohannes Weiner }
458ccafa287SJohannes Weiner
459a756cf59SJohannes Weiner /**
460281e3726SMel Gorman * node_dirty_limit - maximum number of dirty pages allowed in a node
461281e3726SMel Gorman * @pgdat: the node
462a756cf59SJohannes Weiner *
463a862f68aSMike Rapoport * Return: the maximum number of dirty pages allowed in a node, based
464281e3726SMel Gorman * on the node's dirtyable memory.
465a756cf59SJohannes Weiner */
node_dirty_limit(struct pglist_data * pgdat)466281e3726SMel Gorman static unsigned long node_dirty_limit(struct pglist_data *pgdat)
467a756cf59SJohannes Weiner {
468281e3726SMel Gorman unsigned long node_memory = node_dirtyable_memory(pgdat);
469a756cf59SJohannes Weiner struct task_struct *tsk = current;
470a756cf59SJohannes Weiner unsigned long dirty;
471a756cf59SJohannes Weiner
472a756cf59SJohannes Weiner if (vm_dirty_bytes)
473a756cf59SJohannes Weiner dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
474281e3726SMel Gorman node_memory / global_dirtyable_memory();
475a756cf59SJohannes Weiner else
476281e3726SMel Gorman dirty = vm_dirty_ratio * node_memory / 100;
477a756cf59SJohannes Weiner
478a37b0715SNeilBrown if (rt_task(tsk))
479a756cf59SJohannes Weiner dirty += dirty / 4;
480a756cf59SJohannes Weiner
481bd16a7eeSJan Kara /*
482bd16a7eeSJan Kara * Dirty throttling logic assumes the limits in page units fit into
483bd16a7eeSJan Kara * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
484bd16a7eeSJan Kara */
485bd16a7eeSJan Kara return min_t(unsigned long, dirty, UINT_MAX);
486a756cf59SJohannes Weiner }
487a756cf59SJohannes Weiner
488a756cf59SJohannes Weiner /**
489281e3726SMel Gorman * node_dirty_ok - tells whether a node is within its dirty limits
490281e3726SMel Gorman * @pgdat: the node to check
491a756cf59SJohannes Weiner *
492a862f68aSMike Rapoport * Return: %true when the dirty pages in @pgdat are within the node's
493a756cf59SJohannes Weiner * dirty limit, %false if the limit is exceeded.
494a756cf59SJohannes Weiner */
node_dirty_ok(struct pglist_data * pgdat)495281e3726SMel Gorman bool node_dirty_ok(struct pglist_data *pgdat)
496a756cf59SJohannes Weiner {
497281e3726SMel Gorman unsigned long limit = node_dirty_limit(pgdat);
498281e3726SMel Gorman unsigned long nr_pages = 0;
499a756cf59SJohannes Weiner
50011fb9989SMel Gorman nr_pages += node_page_state(pgdat, NR_FILE_DIRTY);
50111fb9989SMel Gorman nr_pages += node_page_state(pgdat, NR_WRITEBACK);
502281e3726SMel Gorman
503281e3726SMel Gorman return nr_pages <= limit;
504a756cf59SJohannes Weiner }
505a756cf59SJohannes Weiner
506aa779e51Szhanglianjie #ifdef CONFIG_SYSCTL
dirty_background_ratio_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)507aa779e51Szhanglianjie static int dirty_background_ratio_handler(struct ctl_table *table, int write,
50832927393SChristoph Hellwig void *buffer, size_t *lenp, loff_t *ppos)
5092da02997SDavid Rientjes {
5102da02997SDavid Rientjes int ret;
5112da02997SDavid Rientjes
5128d65af78SAlexey Dobriyan ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
5132da02997SDavid Rientjes if (ret == 0 && write)
5142da02997SDavid Rientjes dirty_background_bytes = 0;
5152da02997SDavid Rientjes return ret;
5162da02997SDavid Rientjes }
5172da02997SDavid Rientjes
dirty_background_bytes_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)518aa779e51Szhanglianjie static int dirty_background_bytes_handler(struct ctl_table *table, int write,
51932927393SChristoph Hellwig void *buffer, size_t *lenp, loff_t *ppos)
5202da02997SDavid Rientjes {
5212da02997SDavid Rientjes int ret;
522bd16a7eeSJan Kara unsigned long old_bytes = dirty_background_bytes;
5232da02997SDavid Rientjes
5248d65af78SAlexey Dobriyan ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
525bd16a7eeSJan Kara if (ret == 0 && write) {
526bd16a7eeSJan Kara if (DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE) >
527bd16a7eeSJan Kara UINT_MAX) {
528bd16a7eeSJan Kara dirty_background_bytes = old_bytes;
529bd16a7eeSJan Kara return -ERANGE;
530bd16a7eeSJan Kara }
5312da02997SDavid Rientjes dirty_background_ratio = 0;
532bd16a7eeSJan Kara }
5332da02997SDavid Rientjes return ret;
5342da02997SDavid Rientjes }
5352da02997SDavid Rientjes
dirty_ratio_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)536aa779e51Szhanglianjie static int dirty_ratio_handler(struct ctl_table *table, int write, void *buffer,
53732927393SChristoph Hellwig size_t *lenp, loff_t *ppos)
53804fbfdc1SPeter Zijlstra {
53904fbfdc1SPeter Zijlstra int old_ratio = vm_dirty_ratio;
5402da02997SDavid Rientjes int ret;
5412da02997SDavid Rientjes
5428d65af78SAlexey Dobriyan ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
54304fbfdc1SPeter Zijlstra if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
544eb608e3aSJan Kara writeback_set_ratelimit();
5452da02997SDavid Rientjes vm_dirty_bytes = 0;
5462da02997SDavid Rientjes }
5472da02997SDavid Rientjes return ret;
5482da02997SDavid Rientjes }
5492da02997SDavid Rientjes
dirty_bytes_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)550aa779e51Szhanglianjie static int dirty_bytes_handler(struct ctl_table *table, int write,
55132927393SChristoph Hellwig void *buffer, size_t *lenp, loff_t *ppos)
5522da02997SDavid Rientjes {
553fc3501d4SSven Wegener unsigned long old_bytes = vm_dirty_bytes;
5542da02997SDavid Rientjes int ret;
5552da02997SDavid Rientjes
5568d65af78SAlexey Dobriyan ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
5572da02997SDavid Rientjes if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
558bd16a7eeSJan Kara if (DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) > UINT_MAX) {
559bd16a7eeSJan Kara vm_dirty_bytes = old_bytes;
560bd16a7eeSJan Kara return -ERANGE;
561bd16a7eeSJan Kara }
562eb608e3aSJan Kara writeback_set_ratelimit();
5632da02997SDavid Rientjes vm_dirty_ratio = 0;
56404fbfdc1SPeter Zijlstra }
56504fbfdc1SPeter Zijlstra return ret;
56604fbfdc1SPeter Zijlstra }
567aa779e51Szhanglianjie #endif
56804fbfdc1SPeter Zijlstra
wp_next_time(unsigned long cur_time)569eb608e3aSJan Kara static unsigned long wp_next_time(unsigned long cur_time)
570eb608e3aSJan Kara {
571eb608e3aSJan Kara cur_time += VM_COMPLETIONS_PERIOD_LEN;
572eb608e3aSJan Kara /* 0 has a special meaning... */
573eb608e3aSJan Kara if (!cur_time)
574eb608e3aSJan Kara return 1;
575eb608e3aSJan Kara return cur_time;
576eb608e3aSJan Kara }
577eb608e3aSJan Kara
wb_domain_writeout_add(struct wb_domain * dom,struct fprop_local_percpu * completions,unsigned int max_prop_frac,long nr)578cc24df4cSMatthew Wilcox (Oracle) static void wb_domain_writeout_add(struct wb_domain *dom,
579c7981433STejun Heo struct fprop_local_percpu *completions,
580cc24df4cSMatthew Wilcox (Oracle) unsigned int max_prop_frac, long nr)
58104fbfdc1SPeter Zijlstra {
582be5f1797SMatthew Wilcox (Oracle) __fprop_add_percpu_max(&dom->completions, completions,
583cc24df4cSMatthew Wilcox (Oracle) max_prop_frac, nr);
584eb608e3aSJan Kara /* First event after period switching was turned off? */
585517663edSSteven Rostedt (VMware) if (unlikely(!dom->period_time)) {
586eb608e3aSJan Kara /*
587eb608e3aSJan Kara * We can race with other __bdi_writeout_inc calls here but
588eb608e3aSJan Kara * it does not cause any harm since the resulting time when
589eb608e3aSJan Kara * timer will fire and what is in writeout_period_time will be
590eb608e3aSJan Kara * roughly the same.
591eb608e3aSJan Kara */
592380c27caSTejun Heo dom->period_time = wp_next_time(jiffies);
593380c27caSTejun Heo mod_timer(&dom->period_timer, dom->period_time);
594eb608e3aSJan Kara }
59504fbfdc1SPeter Zijlstra }
59604fbfdc1SPeter Zijlstra
597c7981433STejun Heo /*
598c7981433STejun Heo * Increment @wb's writeout completion count and the global writeout
599269ccca3SMatthew Wilcox (Oracle) * completion count. Called from __folio_end_writeback().
600c7981433STejun Heo */
__wb_writeout_add(struct bdi_writeback * wb,long nr)601cc24df4cSMatthew Wilcox (Oracle) static inline void __wb_writeout_add(struct bdi_writeback *wb, long nr)
602c7981433STejun Heo {
603841710aaSTejun Heo struct wb_domain *cgdom;
604841710aaSTejun Heo
605cc24df4cSMatthew Wilcox (Oracle) wb_stat_mod(wb, WB_WRITTEN, nr);
606cc24df4cSMatthew Wilcox (Oracle) wb_domain_writeout_add(&global_wb_domain, &wb->completions,
607cc24df4cSMatthew Wilcox (Oracle) wb->bdi->max_prop_frac, nr);
608841710aaSTejun Heo
609841710aaSTejun Heo cgdom = mem_cgroup_wb_domain(wb);
610841710aaSTejun Heo if (cgdom)
611cc24df4cSMatthew Wilcox (Oracle) wb_domain_writeout_add(cgdom, wb_memcg_completions(wb),
612cc24df4cSMatthew Wilcox (Oracle) wb->bdi->max_prop_frac, nr);
613c7981433STejun Heo }
614c7981433STejun Heo
wb_writeout_inc(struct bdi_writeback * wb)61593f78d88STejun Heo void wb_writeout_inc(struct bdi_writeback *wb)
616dd5656e5SMiklos Szeredi {
617dd5656e5SMiklos Szeredi unsigned long flags;
618dd5656e5SMiklos Szeredi
619dd5656e5SMiklos Szeredi local_irq_save(flags);
620cc24df4cSMatthew Wilcox (Oracle) __wb_writeout_add(wb, 1);
621dd5656e5SMiklos Szeredi local_irq_restore(flags);
622dd5656e5SMiklos Szeredi }
62393f78d88STejun Heo EXPORT_SYMBOL_GPL(wb_writeout_inc);
62404fbfdc1SPeter Zijlstra
62504fbfdc1SPeter Zijlstra /*
626eb608e3aSJan Kara * On idle system, we can be called long after we scheduled because we use
627eb608e3aSJan Kara * deferred timers so count with missed periods.
628eb608e3aSJan Kara */
writeout_period(struct timer_list * t)6299823e51bSKees Cook static void writeout_period(struct timer_list *t)
630eb608e3aSJan Kara {
6319823e51bSKees Cook struct wb_domain *dom = from_timer(dom, t, period_timer);
632380c27caSTejun Heo int miss_periods = (jiffies - dom->period_time) /
633eb608e3aSJan Kara VM_COMPLETIONS_PERIOD_LEN;
634eb608e3aSJan Kara
635380c27caSTejun Heo if (fprop_new_period(&dom->completions, miss_periods + 1)) {
636380c27caSTejun Heo dom->period_time = wp_next_time(dom->period_time +
637eb608e3aSJan Kara miss_periods * VM_COMPLETIONS_PERIOD_LEN);
638380c27caSTejun Heo mod_timer(&dom->period_timer, dom->period_time);
639eb608e3aSJan Kara } else {
640eb608e3aSJan Kara /*
641eb608e3aSJan Kara * Aging has zeroed all fractions. Stop wasting CPU on period
642eb608e3aSJan Kara * updates.
643eb608e3aSJan Kara */
644380c27caSTejun Heo dom->period_time = 0;
645eb608e3aSJan Kara }
646eb608e3aSJan Kara }
647eb608e3aSJan Kara
wb_domain_init(struct wb_domain * dom,gfp_t gfp)648380c27caSTejun Heo int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
649380c27caSTejun Heo {
650380c27caSTejun Heo memset(dom, 0, sizeof(*dom));
651dcc25ae7STejun Heo
652dcc25ae7STejun Heo spin_lock_init(&dom->lock);
653dcc25ae7STejun Heo
6549823e51bSKees Cook timer_setup(&dom->period_timer, writeout_period, TIMER_DEFERRABLE);
655dcc25ae7STejun Heo
656dcc25ae7STejun Heo dom->dirty_limit_tstamp = jiffies;
657dcc25ae7STejun Heo
658380c27caSTejun Heo return fprop_global_init(&dom->completions, gfp);
659380c27caSTejun Heo }
660380c27caSTejun Heo
661841710aaSTejun Heo #ifdef CONFIG_CGROUP_WRITEBACK
wb_domain_exit(struct wb_domain * dom)662841710aaSTejun Heo void wb_domain_exit(struct wb_domain *dom)
663841710aaSTejun Heo {
664841710aaSTejun Heo del_timer_sync(&dom->period_timer);
665841710aaSTejun Heo fprop_global_destroy(&dom->completions);
666841710aaSTejun Heo }
667841710aaSTejun Heo #endif
668841710aaSTejun Heo
669eb608e3aSJan Kara /*
670d08c429bSJohannes Weiner * bdi_min_ratio keeps the sum of the minimum dirty shares of all
671d08c429bSJohannes Weiner * registered backing devices, which, for obvious reasons, can not
672d08c429bSJohannes Weiner * exceed 100%.
673189d3c4aSPeter Zijlstra */
674189d3c4aSPeter Zijlstra static unsigned int bdi_min_ratio;
675189d3c4aSPeter Zijlstra
bdi_check_pages_limit(unsigned long pages)6761bf27e98SStefan Roesch static int bdi_check_pages_limit(unsigned long pages)
6771bf27e98SStefan Roesch {
6781bf27e98SStefan Roesch unsigned long max_dirty_pages = global_dirtyable_memory();
6791bf27e98SStefan Roesch
6801bf27e98SStefan Roesch if (pages > max_dirty_pages)
6811bf27e98SStefan Roesch return -EINVAL;
6821bf27e98SStefan Roesch
6831bf27e98SStefan Roesch return 0;
6841bf27e98SStefan Roesch }
6851bf27e98SStefan Roesch
bdi_ratio_from_pages(unsigned long pages)6861bf27e98SStefan Roesch static unsigned long bdi_ratio_from_pages(unsigned long pages)
6871bf27e98SStefan Roesch {
6881bf27e98SStefan Roesch unsigned long background_thresh;
6891bf27e98SStefan Roesch unsigned long dirty_thresh;
6901bf27e98SStefan Roesch unsigned long ratio;
6911bf27e98SStefan Roesch
6921bf27e98SStefan Roesch global_dirty_limits(&background_thresh, &dirty_thresh);
6931bf27e98SStefan Roesch ratio = div64_u64(pages * 100ULL * BDI_RATIO_SCALE, dirty_thresh);
6941bf27e98SStefan Roesch
6951bf27e98SStefan Roesch return ratio;
6961bf27e98SStefan Roesch }
6971bf27e98SStefan Roesch
bdi_get_bytes(unsigned int ratio)69800df7d51SStefan Roesch static u64 bdi_get_bytes(unsigned int ratio)
69900df7d51SStefan Roesch {
70000df7d51SStefan Roesch unsigned long background_thresh;
70100df7d51SStefan Roesch unsigned long dirty_thresh;
70200df7d51SStefan Roesch u64 bytes;
70300df7d51SStefan Roesch
70400df7d51SStefan Roesch global_dirty_limits(&background_thresh, &dirty_thresh);
70500df7d51SStefan Roesch bytes = (dirty_thresh * PAGE_SIZE * ratio) / BDI_RATIO_SCALE / 100;
70600df7d51SStefan Roesch
70700df7d51SStefan Roesch return bytes;
70800df7d51SStefan Roesch }
70900df7d51SStefan Roesch
__bdi_set_min_ratio(struct backing_dev_info * bdi,unsigned int min_ratio)7108021fb32SStefan Roesch static int __bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
711189d3c4aSPeter Zijlstra {
71221f0dd88SChen Wandun unsigned int delta;
713189d3c4aSPeter Zijlstra int ret = 0;
714189d3c4aSPeter Zijlstra
7152c44af4fSStefan Roesch if (min_ratio > 100 * BDI_RATIO_SCALE)
7162c44af4fSStefan Roesch return -EINVAL;
717ae82291eSStefan Roesch min_ratio *= BDI_RATIO_SCALE;
718ae82291eSStefan Roesch
719cfc4ba53SJens Axboe spin_lock_bh(&bdi_lock);
720a42dde04SPeter Zijlstra if (min_ratio > bdi->max_ratio) {
721a42dde04SPeter Zijlstra ret = -EINVAL;
722a42dde04SPeter Zijlstra } else {
72321f0dd88SChen Wandun if (min_ratio < bdi->min_ratio) {
72421f0dd88SChen Wandun delta = bdi->min_ratio - min_ratio;
72521f0dd88SChen Wandun bdi_min_ratio -= delta;
72621f0dd88SChen Wandun bdi->min_ratio = min_ratio;
72721f0dd88SChen Wandun } else {
72821f0dd88SChen Wandun delta = min_ratio - bdi->min_ratio;
729ae82291eSStefan Roesch if (bdi_min_ratio + delta < 100 * BDI_RATIO_SCALE) {
73021f0dd88SChen Wandun bdi_min_ratio += delta;
73121f0dd88SChen Wandun bdi->min_ratio = min_ratio;
732a42dde04SPeter Zijlstra } else {
733189d3c4aSPeter Zijlstra ret = -EINVAL;
734a42dde04SPeter Zijlstra }
735a42dde04SPeter Zijlstra }
73621f0dd88SChen Wandun }
737cfc4ba53SJens Axboe spin_unlock_bh(&bdi_lock);
738189d3c4aSPeter Zijlstra
739189d3c4aSPeter Zijlstra return ret;
740189d3c4aSPeter Zijlstra }
741189d3c4aSPeter Zijlstra
__bdi_set_max_ratio(struct backing_dev_info * bdi,unsigned int max_ratio)742efc3e6adSStefan Roesch static int __bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio)
743a42dde04SPeter Zijlstra {
744a42dde04SPeter Zijlstra int ret = 0;
745a42dde04SPeter Zijlstra
7464e230b40SStefan Roesch if (max_ratio > 100 * BDI_RATIO_SCALE)
7474e230b40SStefan Roesch return -EINVAL;
7484e230b40SStefan Roesch
749cfc4ba53SJens Axboe spin_lock_bh(&bdi_lock);
750a42dde04SPeter Zijlstra if (bdi->min_ratio > max_ratio) {
751a42dde04SPeter Zijlstra ret = -EINVAL;
752a42dde04SPeter Zijlstra } else {
753a42dde04SPeter Zijlstra bdi->max_ratio = max_ratio;
754eb608e3aSJan Kara bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100;
755a42dde04SPeter Zijlstra }
756cfc4ba53SJens Axboe spin_unlock_bh(&bdi_lock);
757a42dde04SPeter Zijlstra
758a42dde04SPeter Zijlstra return ret;
759a42dde04SPeter Zijlstra }
760efc3e6adSStefan Roesch
bdi_set_min_ratio_no_scale(struct backing_dev_info * bdi,unsigned int min_ratio)7612c44af4fSStefan Roesch int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio)
7622c44af4fSStefan Roesch {
7632c44af4fSStefan Roesch return __bdi_set_min_ratio(bdi, min_ratio);
7642c44af4fSStefan Roesch }
7652c44af4fSStefan Roesch
bdi_set_max_ratio_no_scale(struct backing_dev_info * bdi,unsigned int max_ratio)7664e230b40SStefan Roesch int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio)
7674e230b40SStefan Roesch {
7684e230b40SStefan Roesch return __bdi_set_max_ratio(bdi, max_ratio);
7694e230b40SStefan Roesch }
7704e230b40SStefan Roesch
bdi_set_min_ratio(struct backing_dev_info * bdi,unsigned int min_ratio)7718021fb32SStefan Roesch int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
7728021fb32SStefan Roesch {
7738021fb32SStefan Roesch return __bdi_set_min_ratio(bdi, min_ratio * BDI_RATIO_SCALE);
7748021fb32SStefan Roesch }
7758021fb32SStefan Roesch
bdi_set_max_ratio(struct backing_dev_info * bdi,unsigned int max_ratio)776efc3e6adSStefan Roesch int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio)
777efc3e6adSStefan Roesch {
778efc3e6adSStefan Roesch return __bdi_set_max_ratio(bdi, max_ratio * BDI_RATIO_SCALE);
779efc3e6adSStefan Roesch }
780a42dde04SPeter Zijlstra EXPORT_SYMBOL(bdi_set_max_ratio);
781a42dde04SPeter Zijlstra
bdi_get_min_bytes(struct backing_dev_info * bdi)782712c00d6SStefan Roesch u64 bdi_get_min_bytes(struct backing_dev_info *bdi)
783712c00d6SStefan Roesch {
784712c00d6SStefan Roesch return bdi_get_bytes(bdi->min_ratio);
785712c00d6SStefan Roesch }
786712c00d6SStefan Roesch
bdi_set_min_bytes(struct backing_dev_info * bdi,u64 min_bytes)787803c9805SStefan Roesch int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes)
788803c9805SStefan Roesch {
789803c9805SStefan Roesch int ret;
790803c9805SStefan Roesch unsigned long pages = min_bytes >> PAGE_SHIFT;
791803c9805SStefan Roesch unsigned long min_ratio;
792803c9805SStefan Roesch
793803c9805SStefan Roesch ret = bdi_check_pages_limit(pages);
794803c9805SStefan Roesch if (ret)
795803c9805SStefan Roesch return ret;
796803c9805SStefan Roesch
797803c9805SStefan Roesch min_ratio = bdi_ratio_from_pages(pages);
798803c9805SStefan Roesch return __bdi_set_min_ratio(bdi, min_ratio);
799803c9805SStefan Roesch }
800803c9805SStefan Roesch
bdi_get_max_bytes(struct backing_dev_info * bdi)80100df7d51SStefan Roesch u64 bdi_get_max_bytes(struct backing_dev_info *bdi)
80200df7d51SStefan Roesch {
80300df7d51SStefan Roesch return bdi_get_bytes(bdi->max_ratio);
80400df7d51SStefan Roesch }
80500df7d51SStefan Roesch
bdi_set_max_bytes(struct backing_dev_info * bdi,u64 max_bytes)8061bf27e98SStefan Roesch int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes)
8071bf27e98SStefan Roesch {
8081bf27e98SStefan Roesch int ret;
8091bf27e98SStefan Roesch unsigned long pages = max_bytes >> PAGE_SHIFT;
8101bf27e98SStefan Roesch unsigned long max_ratio;
8111bf27e98SStefan Roesch
8121bf27e98SStefan Roesch ret = bdi_check_pages_limit(pages);
8131bf27e98SStefan Roesch if (ret)
8141bf27e98SStefan Roesch return ret;
8151bf27e98SStefan Roesch
8161bf27e98SStefan Roesch max_ratio = bdi_ratio_from_pages(pages);
8171bf27e98SStefan Roesch return __bdi_set_max_ratio(bdi, max_ratio);
8181bf27e98SStefan Roesch }
8191bf27e98SStefan Roesch
bdi_set_strict_limit(struct backing_dev_info * bdi,unsigned int strict_limit)8208e9d5eadSStefan Roesch int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit)
8218e9d5eadSStefan Roesch {
8228e9d5eadSStefan Roesch if (strict_limit > 1)
8238e9d5eadSStefan Roesch return -EINVAL;
8248e9d5eadSStefan Roesch
8258e9d5eadSStefan Roesch spin_lock_bh(&bdi_lock);
8268e9d5eadSStefan Roesch if (strict_limit)
8278e9d5eadSStefan Roesch bdi->capabilities |= BDI_CAP_STRICTLIMIT;
8288e9d5eadSStefan Roesch else
8298e9d5eadSStefan Roesch bdi->capabilities &= ~BDI_CAP_STRICTLIMIT;
8308e9d5eadSStefan Roesch spin_unlock_bh(&bdi_lock);
8318e9d5eadSStefan Roesch
8328e9d5eadSStefan Roesch return 0;
8338e9d5eadSStefan Roesch }
8348e9d5eadSStefan Roesch
dirty_freerun_ceiling(unsigned long thresh,unsigned long bg_thresh)8356c14ae1eSWu Fengguang static unsigned long dirty_freerun_ceiling(unsigned long thresh,
8366c14ae1eSWu Fengguang unsigned long bg_thresh)
8376c14ae1eSWu Fengguang {
8386c14ae1eSWu Fengguang return (thresh + bg_thresh) / 2;
8396c14ae1eSWu Fengguang }
8406c14ae1eSWu Fengguang
hard_dirty_limit(struct wb_domain * dom,unsigned long thresh)841c7981433STejun Heo static unsigned long hard_dirty_limit(struct wb_domain *dom,
842c7981433STejun Heo unsigned long thresh)
843ffd1f609SWu Fengguang {
844dcc25ae7STejun Heo return max(thresh, dom->dirty_limit);
845ffd1f609SWu Fengguang }
846ffd1f609SWu Fengguang
847c5edf9cdSTejun Heo /*
848c5edf9cdSTejun Heo * Memory which can be further allocated to a memcg domain is capped by
849c5edf9cdSTejun Heo * system-wide clean memory excluding the amount being used in the domain.
850c5edf9cdSTejun Heo */
mdtc_calc_avail(struct dirty_throttle_control * mdtc,unsigned long filepages,unsigned long headroom)851c5edf9cdSTejun Heo static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
852c5edf9cdSTejun Heo unsigned long filepages, unsigned long headroom)
853c2aa723aSTejun Heo {
854c2aa723aSTejun Heo struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc);
855c5edf9cdSTejun Heo unsigned long clean = filepages - min(filepages, mdtc->dirty);
856c5edf9cdSTejun Heo unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty);
857c5edf9cdSTejun Heo unsigned long other_clean = global_clean - min(global_clean, clean);
858c2aa723aSTejun Heo
859c5edf9cdSTejun Heo mdtc->avail = filepages + min(headroom, other_clean);
8601b424464SChristoph Lameter }
8611b424464SChristoph Lameter
8626f718656SWu Fengguang /**
863b1cbc6d4STejun Heo * __wb_calc_thresh - @wb's share of dirty throttling threshold
864b1cbc6d4STejun Heo * @dtc: dirty_throttle_context of interest
8651babe183SWu Fengguang *
866aed21ad2SWu Fengguang * Note that balance_dirty_pages() will only seriously take it as a hard limit
867aed21ad2SWu Fengguang * when sleeping max_pause per page is not enough to keep the dirty pages under
868aed21ad2SWu Fengguang * control. For example, when the device is completely stalled due to some error
869aed21ad2SWu Fengguang * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
870aed21ad2SWu Fengguang * In the other normal situations, it acts more gently by throttling the tasks
871a88a341aSTejun Heo * more (rather than completely block them) when the wb dirty pages go high.
8726f718656SWu Fengguang *
8736f718656SWu Fengguang * It allocates high/low dirty limits to fast/slow devices, in order to prevent
8741babe183SWu Fengguang * - starving fast devices
8751babe183SWu Fengguang * - piling up dirty pages (that will take long time to sync) on slow devices
8761babe183SWu Fengguang *
877a88a341aSTejun Heo * The wb's share of dirty limit will be adapting to its throughput and
8781babe183SWu Fengguang * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
879a862f68aSMike Rapoport *
880a862f68aSMike Rapoport * Return: @wb's dirty limit in pages. The term "dirty" in the context of
8818d92890bSNeilBrown * dirty balancing includes all PG_dirty and PG_writeback pages.
8821babe183SWu Fengguang */
__wb_calc_thresh(struct dirty_throttle_control * dtc)883b1cbc6d4STejun Heo static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
88416c4042fSWu Fengguang {
885e9f07dfdSTejun Heo struct wb_domain *dom = dtc_dom(dtc);
886b1cbc6d4STejun Heo unsigned long thresh = dtc->thresh;
8870d960a38STejun Heo u64 wb_thresh;
888d3ac946eSWen Yang unsigned long numerator, denominator;
889693108a8STejun Heo unsigned long wb_min_ratio, wb_max_ratio;
89004fbfdc1SPeter Zijlstra
89104fbfdc1SPeter Zijlstra /*
8920d960a38STejun Heo * Calculate this BDI's share of the thresh ratio.
89304fbfdc1SPeter Zijlstra */
894e9770b34STejun Heo fprop_fraction_percpu(&dom->completions, dtc->wb_completions,
895380c27caSTejun Heo &numerator, &denominator);
89604fbfdc1SPeter Zijlstra
897ae82291eSStefan Roesch wb_thresh = (thresh * (100 * BDI_RATIO_SCALE - bdi_min_ratio)) / (100 * BDI_RATIO_SCALE);
8980d960a38STejun Heo wb_thresh *= numerator;
899d3ac946eSWen Yang wb_thresh = div64_ul(wb_thresh, denominator);
90016c4042fSWu Fengguang
901b1cbc6d4STejun Heo wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio);
90204fbfdc1SPeter Zijlstra
903ae82291eSStefan Roesch wb_thresh += (thresh * wb_min_ratio) / (100 * BDI_RATIO_SCALE);
904ae82291eSStefan Roesch if (wb_thresh > (thresh * wb_max_ratio) / (100 * BDI_RATIO_SCALE))
905ae82291eSStefan Roesch wb_thresh = thresh * wb_max_ratio / (100 * BDI_RATIO_SCALE);
90604fbfdc1SPeter Zijlstra
9070d960a38STejun Heo return wb_thresh;
9081da177e4SLinus Torvalds }
9091da177e4SLinus Torvalds
wb_calc_thresh(struct bdi_writeback * wb,unsigned long thresh)910b1cbc6d4STejun Heo unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
911b1cbc6d4STejun Heo {
912b1cbc6d4STejun Heo struct dirty_throttle_control gdtc = { GDTC_INIT(wb),
913b1cbc6d4STejun Heo .thresh = thresh };
914b1cbc6d4STejun Heo return __wb_calc_thresh(&gdtc);
9151da177e4SLinus Torvalds }
9161da177e4SLinus Torvalds
9176c14ae1eSWu Fengguang /*
9185a537485SMaxim Patlasov * setpoint - dirty 3
9195a537485SMaxim Patlasov * f(dirty) := 1.0 + (----------------)
9205a537485SMaxim Patlasov * limit - setpoint
9215a537485SMaxim Patlasov *
9225a537485SMaxim Patlasov * it's a 3rd order polynomial that subjects to
9235a537485SMaxim Patlasov *
9245a537485SMaxim Patlasov * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast
9255a537485SMaxim Patlasov * (2) f(setpoint) = 1.0 => the balance point
9265a537485SMaxim Patlasov * (3) f(limit) = 0 => the hard limit
9275a537485SMaxim Patlasov * (4) df/dx <= 0 => negative feedback control
9285a537485SMaxim Patlasov * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
9295a537485SMaxim Patlasov * => fast response on large errors; small oscillation near setpoint
9305a537485SMaxim Patlasov */
pos_ratio_polynom(unsigned long setpoint,unsigned long dirty,unsigned long limit)931d5c9fde3SRik van Riel static long long pos_ratio_polynom(unsigned long setpoint,
9325a537485SMaxim Patlasov unsigned long dirty,
9335a537485SMaxim Patlasov unsigned long limit)
9345a537485SMaxim Patlasov {
9355a537485SMaxim Patlasov long long pos_ratio;
9365a537485SMaxim Patlasov long x;
9375a537485SMaxim Patlasov
938d5c9fde3SRik van Riel x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
939464d1387STejun Heo (limit - setpoint) | 1);
9405a537485SMaxim Patlasov pos_ratio = x;
9415a537485SMaxim Patlasov pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
9425a537485SMaxim Patlasov pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
9435a537485SMaxim Patlasov pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
9445a537485SMaxim Patlasov
9455a537485SMaxim Patlasov return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
9465a537485SMaxim Patlasov }
9475a537485SMaxim Patlasov
9485a537485SMaxim Patlasov /*
9496c14ae1eSWu Fengguang * Dirty position control.
9506c14ae1eSWu Fengguang *
9516c14ae1eSWu Fengguang * (o) global/bdi setpoints
9526c14ae1eSWu Fengguang *
953de1fff37STejun Heo * We want the dirty pages be balanced around the global/wb setpoints.
9546c14ae1eSWu Fengguang * When the number of dirty pages is higher/lower than the setpoint, the
9556c14ae1eSWu Fengguang * dirty position control ratio (and hence task dirty ratelimit) will be
9566c14ae1eSWu Fengguang * decreased/increased to bring the dirty pages back to the setpoint.
9576c14ae1eSWu Fengguang *
9586c14ae1eSWu Fengguang * pos_ratio = 1 << RATELIMIT_CALC_SHIFT
9596c14ae1eSWu Fengguang *
9606c14ae1eSWu Fengguang * if (dirty < setpoint) scale up pos_ratio
9616c14ae1eSWu Fengguang * if (dirty > setpoint) scale down pos_ratio
9626c14ae1eSWu Fengguang *
963de1fff37STejun Heo * if (wb_dirty < wb_setpoint) scale up pos_ratio
964de1fff37STejun Heo * if (wb_dirty > wb_setpoint) scale down pos_ratio
9656c14ae1eSWu Fengguang *
9666c14ae1eSWu Fengguang * task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
9676c14ae1eSWu Fengguang *
9686c14ae1eSWu Fengguang * (o) global control line
9696c14ae1eSWu Fengguang *
9706c14ae1eSWu Fengguang * ^ pos_ratio
9716c14ae1eSWu Fengguang * |
9726c14ae1eSWu Fengguang * | |<===== global dirty control scope ======>|
97303231554SChi Wu * 2.0 * * * * * * *
9746c14ae1eSWu Fengguang * | .*
9756c14ae1eSWu Fengguang * | . *
9766c14ae1eSWu Fengguang * | . *
9776c14ae1eSWu Fengguang * | . *
9786c14ae1eSWu Fengguang * | . *
9796c14ae1eSWu Fengguang * | . *
9806c14ae1eSWu Fengguang * 1.0 ................................*
9816c14ae1eSWu Fengguang * | . . *
9826c14ae1eSWu Fengguang * | . . *
9836c14ae1eSWu Fengguang * | . . *
9846c14ae1eSWu Fengguang * | . . *
9856c14ae1eSWu Fengguang * | . . *
9866c14ae1eSWu Fengguang * 0 +------------.------------------.----------------------*------------->
9876c14ae1eSWu Fengguang * freerun^ setpoint^ limit^ dirty pages
9886c14ae1eSWu Fengguang *
989de1fff37STejun Heo * (o) wb control line
9906c14ae1eSWu Fengguang *
9916c14ae1eSWu Fengguang * ^ pos_ratio
9926c14ae1eSWu Fengguang * |
9936c14ae1eSWu Fengguang * | *
9946c14ae1eSWu Fengguang * | *
9956c14ae1eSWu Fengguang * | *
9966c14ae1eSWu Fengguang * | *
9976c14ae1eSWu Fengguang * | * |<=========== span ============>|
9986c14ae1eSWu Fengguang * 1.0 .......................*
9996c14ae1eSWu Fengguang * | . *
10006c14ae1eSWu Fengguang * | . *
10016c14ae1eSWu Fengguang * | . *
10026c14ae1eSWu Fengguang * | . *
10036c14ae1eSWu Fengguang * | . *
10046c14ae1eSWu Fengguang * | . *
10056c14ae1eSWu Fengguang * | . *
10066c14ae1eSWu Fengguang * | . *
10076c14ae1eSWu Fengguang * | . *
10086c14ae1eSWu Fengguang * | . *
10096c14ae1eSWu Fengguang * | . *
10106c14ae1eSWu Fengguang * 1/4 ...............................................* * * * * * * * * * * *
10116c14ae1eSWu Fengguang * | . .
10126c14ae1eSWu Fengguang * | . .
10136c14ae1eSWu Fengguang * | . .
10146c14ae1eSWu Fengguang * 0 +----------------------.-------------------------------.------------->
1015de1fff37STejun Heo * wb_setpoint^ x_intercept^
10166c14ae1eSWu Fengguang *
1017de1fff37STejun Heo * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
10186c14ae1eSWu Fengguang * be smoothly throttled down to normal if it starts high in situations like
10196c14ae1eSWu Fengguang * - start writing to a slow SD card and a fast disk at the same time. The SD
1020de1fff37STejun Heo * card's wb_dirty may rush to many times higher than wb_setpoint.
1021de1fff37STejun Heo * - the wb dirty thresh drops quickly due to change of JBOD workload
10226c14ae1eSWu Fengguang */
wb_position_ratio(struct dirty_throttle_control * dtc)1023daddfa3cSTejun Heo static void wb_position_ratio(struct dirty_throttle_control *dtc)
10246c14ae1eSWu Fengguang {
10252bc00aefSTejun Heo struct bdi_writeback *wb = dtc->wb;
102620792ebfSJan Kara unsigned long write_bw = READ_ONCE(wb->avg_write_bandwidth);
10272bc00aefSTejun Heo unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
1028c7981433STejun Heo unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
10292bc00aefSTejun Heo unsigned long wb_thresh = dtc->wb_thresh;
10306c14ae1eSWu Fengguang unsigned long x_intercept;
10316c14ae1eSWu Fengguang unsigned long setpoint; /* dirty pages' target balance point */
1032de1fff37STejun Heo unsigned long wb_setpoint;
10336c14ae1eSWu Fengguang unsigned long span;
10346c14ae1eSWu Fengguang long long pos_ratio; /* for scaling up/down the rate limit */
10356c14ae1eSWu Fengguang long x;
10366c14ae1eSWu Fengguang
1037daddfa3cSTejun Heo dtc->pos_ratio = 0;
1038daddfa3cSTejun Heo
10392bc00aefSTejun Heo if (unlikely(dtc->dirty >= limit))
1040daddfa3cSTejun Heo return;
10416c14ae1eSWu Fengguang
10426c14ae1eSWu Fengguang /*
10436c14ae1eSWu Fengguang * global setpoint
10446c14ae1eSWu Fengguang *
10455a537485SMaxim Patlasov * See comment for pos_ratio_polynom().
10466c14ae1eSWu Fengguang */
10476c14ae1eSWu Fengguang setpoint = (freerun + limit) / 2;
10482bc00aefSTejun Heo pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit);
10495a537485SMaxim Patlasov
10505a537485SMaxim Patlasov /*
10515a537485SMaxim Patlasov * The strictlimit feature is a tool preventing mistrusted filesystems
10525a537485SMaxim Patlasov * from growing a large number of dirty pages before throttling. For
1053de1fff37STejun Heo * such filesystems balance_dirty_pages always checks wb counters
1054de1fff37STejun Heo * against wb limits. Even if global "nr_dirty" is under "freerun".
10555a537485SMaxim Patlasov * This is especially important for fuse which sets bdi->max_ratio to
10565a537485SMaxim Patlasov * 1% by default. Without strictlimit feature, fuse writeback may
10575a537485SMaxim Patlasov * consume arbitrary amount of RAM because it is accounted in
10585a537485SMaxim Patlasov * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty".
10595a537485SMaxim Patlasov *
1060a88a341aSTejun Heo * Here, in wb_position_ratio(), we calculate pos_ratio based on
1061de1fff37STejun Heo * two values: wb_dirty and wb_thresh. Let's consider an example:
10625a537485SMaxim Patlasov * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
10635a537485SMaxim Patlasov * limits are set by default to 10% and 20% (background and throttle).
1064de1fff37STejun Heo * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
10650d960a38STejun Heo * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is
1066de1fff37STejun Heo * about ~6K pages (as the average of background and throttle wb
10675a537485SMaxim Patlasov * limits). The 3rd order polynomial will provide positive feedback if
1068de1fff37STejun Heo * wb_dirty is under wb_setpoint and vice versa.
10695a537485SMaxim Patlasov *
10705a537485SMaxim Patlasov * Note, that we cannot use global counters in these calculations
1071de1fff37STejun Heo * because we want to throttle process writing to a strictlimit wb
10725a537485SMaxim Patlasov * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
10735a537485SMaxim Patlasov * in the example above).
10745a537485SMaxim Patlasov */
1075a88a341aSTejun Heo if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1076de1fff37STejun Heo long long wb_pos_ratio;
10775a537485SMaxim Patlasov
1078daddfa3cSTejun Heo if (dtc->wb_dirty < 8) {
1079daddfa3cSTejun Heo dtc->pos_ratio = min_t(long long, pos_ratio * 2,
10805a537485SMaxim Patlasov 2 << RATELIMIT_CALC_SHIFT);
1081daddfa3cSTejun Heo return;
1082daddfa3cSTejun Heo }
10835a537485SMaxim Patlasov
10842bc00aefSTejun Heo if (dtc->wb_dirty >= wb_thresh)
1085daddfa3cSTejun Heo return;
10865a537485SMaxim Patlasov
1087970fb01aSTejun Heo wb_setpoint = dirty_freerun_ceiling(wb_thresh,
1088970fb01aSTejun Heo dtc->wb_bg_thresh);
10895a537485SMaxim Patlasov
1090de1fff37STejun Heo if (wb_setpoint == 0 || wb_setpoint == wb_thresh)
1091daddfa3cSTejun Heo return;
10925a537485SMaxim Patlasov
10932bc00aefSTejun Heo wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty,
1094de1fff37STejun Heo wb_thresh);
10955a537485SMaxim Patlasov
10965a537485SMaxim Patlasov /*
1097de1fff37STejun Heo * Typically, for strictlimit case, wb_setpoint << setpoint
1098de1fff37STejun Heo * and pos_ratio >> wb_pos_ratio. In the other words global
10995a537485SMaxim Patlasov * state ("dirty") is not limiting factor and we have to
1100de1fff37STejun Heo * make decision based on wb counters. But there is an
11015a537485SMaxim Patlasov * important case when global pos_ratio should get precedence:
11025a537485SMaxim Patlasov * global limits are exceeded (e.g. due to activities on other
1103de1fff37STejun Heo * wb's) while given strictlimit wb is below limit.
11045a537485SMaxim Patlasov *
1105de1fff37STejun Heo * "pos_ratio * wb_pos_ratio" would work for the case above,
11065a537485SMaxim Patlasov * but it would look too non-natural for the case of all
1107de1fff37STejun Heo * activity in the system coming from a single strictlimit wb
11085a537485SMaxim Patlasov * with bdi->max_ratio == 100%.
11095a537485SMaxim Patlasov *
11105a537485SMaxim Patlasov * Note that min() below somewhat changes the dynamics of the
11115a537485SMaxim Patlasov * control system. Normally, pos_ratio value can be well over 3
1112de1fff37STejun Heo * (when globally we are at freerun and wb is well below wb
11135a537485SMaxim Patlasov * setpoint). Now the maximum pos_ratio in the same situation
11145a537485SMaxim Patlasov * is 2. We might want to tweak this if we observe the control
11155a537485SMaxim Patlasov * system is too slow to adapt.
11165a537485SMaxim Patlasov */
1117daddfa3cSTejun Heo dtc->pos_ratio = min(pos_ratio, wb_pos_ratio);
1118daddfa3cSTejun Heo return;
11195a537485SMaxim Patlasov }
11206c14ae1eSWu Fengguang
11216c14ae1eSWu Fengguang /*
11226c14ae1eSWu Fengguang * We have computed basic pos_ratio above based on global situation. If
1123de1fff37STejun Heo * the wb is over/under its share of dirty pages, we want to scale
11246c14ae1eSWu Fengguang * pos_ratio further down/up. That is done by the following mechanism.
11256c14ae1eSWu Fengguang */
11266c14ae1eSWu Fengguang
11276c14ae1eSWu Fengguang /*
1128de1fff37STejun Heo * wb setpoint
11296c14ae1eSWu Fengguang *
1130de1fff37STejun Heo * f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint)
11316c14ae1eSWu Fengguang *
1132de1fff37STejun Heo * x_intercept - wb_dirty
11336c14ae1eSWu Fengguang * := --------------------------
1134de1fff37STejun Heo * x_intercept - wb_setpoint
11356c14ae1eSWu Fengguang *
1136de1fff37STejun Heo * The main wb control line is a linear function that subjects to
11376c14ae1eSWu Fengguang *
1138de1fff37STejun Heo * (1) f(wb_setpoint) = 1.0
1139de1fff37STejun Heo * (2) k = - 1 / (8 * write_bw) (in single wb case)
1140de1fff37STejun Heo * or equally: x_intercept = wb_setpoint + 8 * write_bw
11416c14ae1eSWu Fengguang *
1142de1fff37STejun Heo * For single wb case, the dirty pages are observed to fluctuate
11436c14ae1eSWu Fengguang * regularly within range
1144de1fff37STejun Heo * [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2]
11456c14ae1eSWu Fengguang * for various filesystems, where (2) can yield in a reasonable 12.5%
11466c14ae1eSWu Fengguang * fluctuation range for pos_ratio.
11476c14ae1eSWu Fengguang *
1148de1fff37STejun Heo * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its
11496c14ae1eSWu Fengguang * own size, so move the slope over accordingly and choose a slope that
1150de1fff37STejun Heo * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh.
11516c14ae1eSWu Fengguang */
11522bc00aefSTejun Heo if (unlikely(wb_thresh > dtc->thresh))
11532bc00aefSTejun Heo wb_thresh = dtc->thresh;
1154aed21ad2SWu Fengguang /*
1155de1fff37STejun Heo * It's very possible that wb_thresh is close to 0 not because the
1156aed21ad2SWu Fengguang * device is slow, but that it has remained inactive for long time.
1157aed21ad2SWu Fengguang * Honour such devices a reasonable good (hopefully IO efficient)
1158aed21ad2SWu Fengguang * threshold, so that the occasional writes won't be blocked and active
1159aed21ad2SWu Fengguang * writes can rampup the threshold quickly.
1160aed21ad2SWu Fengguang */
11612bc00aefSTejun Heo wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8);
11626c14ae1eSWu Fengguang /*
1163de1fff37STejun Heo * scale global setpoint to wb's:
1164de1fff37STejun Heo * wb_setpoint = setpoint * wb_thresh / thresh
11656c14ae1eSWu Fengguang */
1166e4bc13adSLinus Torvalds x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1);
1167de1fff37STejun Heo wb_setpoint = setpoint * (u64)x >> 16;
11686c14ae1eSWu Fengguang /*
1169de1fff37STejun Heo * Use span=(8*write_bw) in single wb case as indicated by
1170de1fff37STejun Heo * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case.
11716c14ae1eSWu Fengguang *
1172de1fff37STejun Heo * wb_thresh thresh - wb_thresh
1173de1fff37STejun Heo * span = --------- * (8 * write_bw) + ------------------ * wb_thresh
11746c14ae1eSWu Fengguang * thresh thresh
11756c14ae1eSWu Fengguang */
11762bc00aefSTejun Heo span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16;
1177de1fff37STejun Heo x_intercept = wb_setpoint + span;
11786c14ae1eSWu Fengguang
11792bc00aefSTejun Heo if (dtc->wb_dirty < x_intercept - span / 4) {
11802bc00aefSTejun Heo pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty),
1181e4bc13adSLinus Torvalds (x_intercept - wb_setpoint) | 1);
11826c14ae1eSWu Fengguang } else
11836c14ae1eSWu Fengguang pos_ratio /= 4;
11846c14ae1eSWu Fengguang
11858927f66cSWu Fengguang /*
1186de1fff37STejun Heo * wb reserve area, safeguard against dirty pool underrun and disk idle
11878927f66cSWu Fengguang * It may push the desired control point of global dirty pages higher
11888927f66cSWu Fengguang * than setpoint.
11898927f66cSWu Fengguang */
1190de1fff37STejun Heo x_intercept = wb_thresh / 2;
11912bc00aefSTejun Heo if (dtc->wb_dirty < x_intercept) {
11922bc00aefSTejun Heo if (dtc->wb_dirty > x_intercept / 8)
11932bc00aefSTejun Heo pos_ratio = div_u64(pos_ratio * x_intercept,
11942bc00aefSTejun Heo dtc->wb_dirty);
119550657fc4SWu Fengguang else
11968927f66cSWu Fengguang pos_ratio *= 8;
11978927f66cSWu Fengguang }
11988927f66cSWu Fengguang
1199daddfa3cSTejun Heo dtc->pos_ratio = pos_ratio;
12006c14ae1eSWu Fengguang }
12016c14ae1eSWu Fengguang
wb_update_write_bandwidth(struct bdi_writeback * wb,unsigned long elapsed,unsigned long written)1202a88a341aSTejun Heo static void wb_update_write_bandwidth(struct bdi_writeback *wb,
1203e98be2d5SWu Fengguang unsigned long elapsed,
1204e98be2d5SWu Fengguang unsigned long written)
1205e98be2d5SWu Fengguang {
1206e98be2d5SWu Fengguang const unsigned long period = roundup_pow_of_two(3 * HZ);
1207a88a341aSTejun Heo unsigned long avg = wb->avg_write_bandwidth;
1208a88a341aSTejun Heo unsigned long old = wb->write_bandwidth;
1209e98be2d5SWu Fengguang u64 bw;
1210e98be2d5SWu Fengguang
1211e98be2d5SWu Fengguang /*
1212e98be2d5SWu Fengguang * bw = written * HZ / elapsed
1213e98be2d5SWu Fengguang *
1214e98be2d5SWu Fengguang * bw * elapsed + write_bandwidth * (period - elapsed)
1215e98be2d5SWu Fengguang * write_bandwidth = ---------------------------------------------------
1216e98be2d5SWu Fengguang * period
1217c72efb65STejun Heo *
1218ed2da924SChristoph Hellwig * @written may have decreased due to folio_redirty_for_writepage().
1219c72efb65STejun Heo * Avoid underflowing @bw calculation.
1220e98be2d5SWu Fengguang */
1221a88a341aSTejun Heo bw = written - min(written, wb->written_stamp);
1222e98be2d5SWu Fengguang bw *= HZ;
1223e98be2d5SWu Fengguang if (unlikely(elapsed > period)) {
12240a5d1a7fSWen Yang bw = div64_ul(bw, elapsed);
1225e98be2d5SWu Fengguang avg = bw;
1226e98be2d5SWu Fengguang goto out;
1227e98be2d5SWu Fengguang }
1228a88a341aSTejun Heo bw += (u64)wb->write_bandwidth * (period - elapsed);
1229e98be2d5SWu Fengguang bw >>= ilog2(period);
1230e98be2d5SWu Fengguang
1231e98be2d5SWu Fengguang /*
1232e98be2d5SWu Fengguang * one more level of smoothing, for filtering out sudden spikes
1233e98be2d5SWu Fengguang */
1234e98be2d5SWu Fengguang if (avg > old && old >= (unsigned long)bw)
1235e98be2d5SWu Fengguang avg -= (avg - old) >> 3;
1236e98be2d5SWu Fengguang
1237e98be2d5SWu Fengguang if (avg < old && old <= (unsigned long)bw)
1238e98be2d5SWu Fengguang avg += (old - avg) >> 3;
1239e98be2d5SWu Fengguang
1240e98be2d5SWu Fengguang out:
124195a46c65STejun Heo /* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */
124295a46c65STejun Heo avg = max(avg, 1LU);
124395a46c65STejun Heo if (wb_has_dirty_io(wb)) {
124495a46c65STejun Heo long delta = avg - wb->avg_write_bandwidth;
124595a46c65STejun Heo WARN_ON_ONCE(atomic_long_add_return(delta,
124695a46c65STejun Heo &wb->bdi->tot_write_bandwidth) <= 0);
124795a46c65STejun Heo }
1248a88a341aSTejun Heo wb->write_bandwidth = bw;
124920792ebfSJan Kara WRITE_ONCE(wb->avg_write_bandwidth, avg);
1250e98be2d5SWu Fengguang }
1251e98be2d5SWu Fengguang
update_dirty_limit(struct dirty_throttle_control * dtc)12522bc00aefSTejun Heo static void update_dirty_limit(struct dirty_throttle_control *dtc)
1253c42843f2SWu Fengguang {
1254e9f07dfdSTejun Heo struct wb_domain *dom = dtc_dom(dtc);
12552bc00aefSTejun Heo unsigned long thresh = dtc->thresh;
1256dcc25ae7STejun Heo unsigned long limit = dom->dirty_limit;
1257c42843f2SWu Fengguang
1258c42843f2SWu Fengguang /*
1259c42843f2SWu Fengguang * Follow up in one step.
1260c42843f2SWu Fengguang */
1261c42843f2SWu Fengguang if (limit < thresh) {
1262c42843f2SWu Fengguang limit = thresh;
1263c42843f2SWu Fengguang goto update;
1264c42843f2SWu Fengguang }
1265c42843f2SWu Fengguang
1266c42843f2SWu Fengguang /*
1267c42843f2SWu Fengguang * Follow down slowly. Use the higher one as the target, because thresh
1268c42843f2SWu Fengguang * may drop below dirty. This is exactly the reason to introduce
1269dcc25ae7STejun Heo * dom->dirty_limit which is guaranteed to lie above the dirty pages.
1270c42843f2SWu Fengguang */
12712bc00aefSTejun Heo thresh = max(thresh, dtc->dirty);
1272c42843f2SWu Fengguang if (limit > thresh) {
1273c42843f2SWu Fengguang limit -= (limit - thresh) >> 5;
1274c42843f2SWu Fengguang goto update;
1275c42843f2SWu Fengguang }
1276c42843f2SWu Fengguang return;
1277c42843f2SWu Fengguang update:
1278dcc25ae7STejun Heo dom->dirty_limit = limit;
1279c42843f2SWu Fengguang }
1280c42843f2SWu Fengguang
domain_update_dirty_limit(struct dirty_throttle_control * dtc,unsigned long now)128142dd235cSJan Kara static void domain_update_dirty_limit(struct dirty_throttle_control *dtc,
1282c42843f2SWu Fengguang unsigned long now)
1283c42843f2SWu Fengguang {
1284e9f07dfdSTejun Heo struct wb_domain *dom = dtc_dom(dtc);
1285c42843f2SWu Fengguang
1286c42843f2SWu Fengguang /*
1287c42843f2SWu Fengguang * check locklessly first to optimize away locking for the most time
1288c42843f2SWu Fengguang */
1289dcc25ae7STejun Heo if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
1290c42843f2SWu Fengguang return;
1291c42843f2SWu Fengguang
1292dcc25ae7STejun Heo spin_lock(&dom->lock);
1293dcc25ae7STejun Heo if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
12942bc00aefSTejun Heo update_dirty_limit(dtc);
1295dcc25ae7STejun Heo dom->dirty_limit_tstamp = now;
1296c42843f2SWu Fengguang }
1297dcc25ae7STejun Heo spin_unlock(&dom->lock);
1298c42843f2SWu Fengguang }
1299c42843f2SWu Fengguang
1300be3ffa27SWu Fengguang /*
1301de1fff37STejun Heo * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
1302be3ffa27SWu Fengguang *
1303de1fff37STejun Heo * Normal wb tasks will be curbed at or below it in long term.
1304be3ffa27SWu Fengguang * Obviously it should be around (write_bw / N) when there are N dd tasks.
1305be3ffa27SWu Fengguang */
wb_update_dirty_ratelimit(struct dirty_throttle_control * dtc,unsigned long dirtied,unsigned long elapsed)13062bc00aefSTejun Heo static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
1307be3ffa27SWu Fengguang unsigned long dirtied,
1308be3ffa27SWu Fengguang unsigned long elapsed)
1309be3ffa27SWu Fengguang {
13102bc00aefSTejun Heo struct bdi_writeback *wb = dtc->wb;
13112bc00aefSTejun Heo unsigned long dirty = dtc->dirty;
13122bc00aefSTejun Heo unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
1313c7981433STejun Heo unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
13147381131cSWu Fengguang unsigned long setpoint = (freerun + limit) / 2;
1315a88a341aSTejun Heo unsigned long write_bw = wb->avg_write_bandwidth;
1316a88a341aSTejun Heo unsigned long dirty_ratelimit = wb->dirty_ratelimit;
1317be3ffa27SWu Fengguang unsigned long dirty_rate;
1318be3ffa27SWu Fengguang unsigned long task_ratelimit;
1319be3ffa27SWu Fengguang unsigned long balanced_dirty_ratelimit;
13207381131cSWu Fengguang unsigned long step;
13217381131cSWu Fengguang unsigned long x;
1322d59b1087SAndrey Ryabinin unsigned long shift;
1323be3ffa27SWu Fengguang
1324be3ffa27SWu Fengguang /*
1325be3ffa27SWu Fengguang * The dirty rate will match the writeout rate in long term, except
1326be3ffa27SWu Fengguang * when dirty pages are truncated by userspace or re-dirtied by FS.
1327be3ffa27SWu Fengguang */
1328a88a341aSTejun Heo dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed;
1329be3ffa27SWu Fengguang
1330be3ffa27SWu Fengguang /*
1331be3ffa27SWu Fengguang * task_ratelimit reflects each dd's dirty rate for the past 200ms.
1332be3ffa27SWu Fengguang */
1333be3ffa27SWu Fengguang task_ratelimit = (u64)dirty_ratelimit *
1334daddfa3cSTejun Heo dtc->pos_ratio >> RATELIMIT_CALC_SHIFT;
1335be3ffa27SWu Fengguang task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
1336be3ffa27SWu Fengguang
1337be3ffa27SWu Fengguang /*
1338be3ffa27SWu Fengguang * A linear estimation of the "balanced" throttle rate. The theory is,
1339de1fff37STejun Heo * if there are N dd tasks, each throttled at task_ratelimit, the wb's
1340be3ffa27SWu Fengguang * dirty_rate will be measured to be (N * task_ratelimit). So the below
1341be3ffa27SWu Fengguang * formula will yield the balanced rate limit (write_bw / N).
1342be3ffa27SWu Fengguang *
1343be3ffa27SWu Fengguang * Note that the expanded form is not a pure rate feedback:
1344be3ffa27SWu Fengguang * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) (1)
1345be3ffa27SWu Fengguang * but also takes pos_ratio into account:
1346be3ffa27SWu Fengguang * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio (2)
1347be3ffa27SWu Fengguang *
1348be3ffa27SWu Fengguang * (1) is not realistic because pos_ratio also takes part in balancing
1349be3ffa27SWu Fengguang * the dirty rate. Consider the state
1350be3ffa27SWu Fengguang * pos_ratio = 0.5 (3)
1351be3ffa27SWu Fengguang * rate = 2 * (write_bw / N) (4)
1352be3ffa27SWu Fengguang * If (1) is used, it will stuck in that state! Because each dd will
1353be3ffa27SWu Fengguang * be throttled at
1354be3ffa27SWu Fengguang * task_ratelimit = pos_ratio * rate = (write_bw / N) (5)
1355be3ffa27SWu Fengguang * yielding
1356be3ffa27SWu Fengguang * dirty_rate = N * task_ratelimit = write_bw (6)
1357be3ffa27SWu Fengguang * put (6) into (1) we get
1358be3ffa27SWu Fengguang * rate_(i+1) = rate_(i) (7)
1359be3ffa27SWu Fengguang *
1360be3ffa27SWu Fengguang * So we end up using (2) to always keep
1361be3ffa27SWu Fengguang * rate_(i+1) ~= (write_bw / N) (8)
1362be3ffa27SWu Fengguang * regardless of the value of pos_ratio. As long as (8) is satisfied,
1363be3ffa27SWu Fengguang * pos_ratio is able to drive itself to 1.0, which is not only where
1364be3ffa27SWu Fengguang * the dirty count meet the setpoint, but also where the slope of
1365be3ffa27SWu Fengguang * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
1366be3ffa27SWu Fengguang */
1367be3ffa27SWu Fengguang balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
1368be3ffa27SWu Fengguang dirty_rate | 1);
1369bdaac490SWu Fengguang /*
1370bdaac490SWu Fengguang * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
1371bdaac490SWu Fengguang */
1372bdaac490SWu Fengguang if (unlikely(balanced_dirty_ratelimit > write_bw))
1373bdaac490SWu Fengguang balanced_dirty_ratelimit = write_bw;
1374be3ffa27SWu Fengguang
13757381131cSWu Fengguang /*
13767381131cSWu Fengguang * We could safely do this and return immediately:
13777381131cSWu Fengguang *
1378de1fff37STejun Heo * wb->dirty_ratelimit = balanced_dirty_ratelimit;
13797381131cSWu Fengguang *
13807381131cSWu Fengguang * However to get a more stable dirty_ratelimit, the below elaborated
1381331cbdeeSWanpeng Li * code makes use of task_ratelimit to filter out singular points and
13827381131cSWu Fengguang * limit the step size.
13837381131cSWu Fengguang *
13847381131cSWu Fengguang * The below code essentially only uses the relative value of
13857381131cSWu Fengguang *
13867381131cSWu Fengguang * task_ratelimit - dirty_ratelimit
13877381131cSWu Fengguang * = (pos_ratio - 1) * dirty_ratelimit
13887381131cSWu Fengguang *
13897381131cSWu Fengguang * which reflects the direction and size of dirty position error.
13907381131cSWu Fengguang */
13917381131cSWu Fengguang
13927381131cSWu Fengguang /*
13937381131cSWu Fengguang * dirty_ratelimit will follow balanced_dirty_ratelimit iff
13947381131cSWu Fengguang * task_ratelimit is on the same side of dirty_ratelimit, too.
13957381131cSWu Fengguang * For example, when
13967381131cSWu Fengguang * - dirty_ratelimit > balanced_dirty_ratelimit
13977381131cSWu Fengguang * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
13987381131cSWu Fengguang * lowering dirty_ratelimit will help meet both the position and rate
13997381131cSWu Fengguang * control targets. Otherwise, don't update dirty_ratelimit if it will
14007381131cSWu Fengguang * only help meet the rate target. After all, what the users ultimately
14017381131cSWu Fengguang * feel and care are stable dirty rate and small position error.
14027381131cSWu Fengguang *
14037381131cSWu Fengguang * |task_ratelimit - dirty_ratelimit| is used to limit the step size
1404331cbdeeSWanpeng Li * and filter out the singular points of balanced_dirty_ratelimit. Which
14057381131cSWu Fengguang * keeps jumping around randomly and can even leap far away at times
14067381131cSWu Fengguang * due to the small 200ms estimation period of dirty_rate (we want to
14077381131cSWu Fengguang * keep that period small to reduce time lags).
14087381131cSWu Fengguang */
14097381131cSWu Fengguang step = 0;
14105a537485SMaxim Patlasov
14115a537485SMaxim Patlasov /*
1412de1fff37STejun Heo * For strictlimit case, calculations above were based on wb counters
1413a88a341aSTejun Heo * and limits (starting from pos_ratio = wb_position_ratio() and up to
14145a537485SMaxim Patlasov * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
1415de1fff37STejun Heo * Hence, to calculate "step" properly, we have to use wb_dirty as
1416de1fff37STejun Heo * "dirty" and wb_setpoint as "setpoint".
14175a537485SMaxim Patlasov *
1418de1fff37STejun Heo * We rampup dirty_ratelimit forcibly if wb_dirty is low because
1419de1fff37STejun Heo * it's possible that wb_thresh is close to zero due to inactivity
1420970fb01aSTejun Heo * of backing device.
14215a537485SMaxim Patlasov */
1422a88a341aSTejun Heo if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
14232bc00aefSTejun Heo dirty = dtc->wb_dirty;
14242bc00aefSTejun Heo if (dtc->wb_dirty < 8)
14252bc00aefSTejun Heo setpoint = dtc->wb_dirty + 1;
14265a537485SMaxim Patlasov else
1427970fb01aSTejun Heo setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
14285a537485SMaxim Patlasov }
14295a537485SMaxim Patlasov
14307381131cSWu Fengguang if (dirty < setpoint) {
1431a88a341aSTejun Heo x = min3(wb->balanced_dirty_ratelimit,
14327c809968SMark Rustad balanced_dirty_ratelimit, task_ratelimit);
14337381131cSWu Fengguang if (dirty_ratelimit < x)
14347381131cSWu Fengguang step = x - dirty_ratelimit;
14357381131cSWu Fengguang } else {
1436a88a341aSTejun Heo x = max3(wb->balanced_dirty_ratelimit,
14377c809968SMark Rustad balanced_dirty_ratelimit, task_ratelimit);
14387381131cSWu Fengguang if (dirty_ratelimit > x)
14397381131cSWu Fengguang step = dirty_ratelimit - x;
14407381131cSWu Fengguang }
14417381131cSWu Fengguang
14427381131cSWu Fengguang /*
14437381131cSWu Fengguang * Don't pursue 100% rate matching. It's impossible since the balanced
14447381131cSWu Fengguang * rate itself is constantly fluctuating. So decrease the track speed
14457381131cSWu Fengguang * when it gets close to the target. Helps eliminate pointless tremors.
14467381131cSWu Fengguang */
1447d59b1087SAndrey Ryabinin shift = dirty_ratelimit / (2 * step + 1);
1448d59b1087SAndrey Ryabinin if (shift < BITS_PER_LONG)
1449d59b1087SAndrey Ryabinin step = DIV_ROUND_UP(step >> shift, 8);
1450d59b1087SAndrey Ryabinin else
1451d59b1087SAndrey Ryabinin step = 0;
14527381131cSWu Fengguang
14537381131cSWu Fengguang if (dirty_ratelimit < balanced_dirty_ratelimit)
14547381131cSWu Fengguang dirty_ratelimit += step;
14557381131cSWu Fengguang else
14567381131cSWu Fengguang dirty_ratelimit -= step;
14577381131cSWu Fengguang
145820792ebfSJan Kara WRITE_ONCE(wb->dirty_ratelimit, max(dirty_ratelimit, 1UL));
1459a88a341aSTejun Heo wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
1460b48c104dSWu Fengguang
14615634cc2aSTejun Heo trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit);
1462be3ffa27SWu Fengguang }
1463be3ffa27SWu Fengguang
__wb_update_bandwidth(struct dirty_throttle_control * gdtc,struct dirty_throttle_control * mdtc,bool update_ratelimit)1464c2aa723aSTejun Heo static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
1465c2aa723aSTejun Heo struct dirty_throttle_control *mdtc,
14668a731799STejun Heo bool update_ratelimit)
1467e98be2d5SWu Fengguang {
1468c2aa723aSTejun Heo struct bdi_writeback *wb = gdtc->wb;
1469e98be2d5SWu Fengguang unsigned long now = jiffies;
147045a2966fSJan Kara unsigned long elapsed;
1471be3ffa27SWu Fengguang unsigned long dirtied;
1472e98be2d5SWu Fengguang unsigned long written;
1473e98be2d5SWu Fengguang
147445a2966fSJan Kara spin_lock(&wb->list_lock);
14758a731799STejun Heo
1476e98be2d5SWu Fengguang /*
147745a2966fSJan Kara * Lockless checks for elapsed time are racy and delayed update after
147845a2966fSJan Kara * IO completion doesn't do it at all (to make sure written pages are
147945a2966fSJan Kara * accounted reasonably quickly). Make sure elapsed >= 1 to avoid
148045a2966fSJan Kara * division errors.
1481e98be2d5SWu Fengguang */
148245a2966fSJan Kara elapsed = max(now - wb->bw_time_stamp, 1UL);
1483a88a341aSTejun Heo dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
1484a88a341aSTejun Heo written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
1485e98be2d5SWu Fengguang
14868a731799STejun Heo if (update_ratelimit) {
148742dd235cSJan Kara domain_update_dirty_limit(gdtc, now);
1488c2aa723aSTejun Heo wb_update_dirty_ratelimit(gdtc, dirtied, elapsed);
1489c2aa723aSTejun Heo
1490c2aa723aSTejun Heo /*
1491c2aa723aSTejun Heo * @mdtc is always NULL if !CGROUP_WRITEBACK but the
1492c2aa723aSTejun Heo * compiler has no way to figure that out. Help it.
1493c2aa723aSTejun Heo */
1494c2aa723aSTejun Heo if (IS_ENABLED(CONFIG_CGROUP_WRITEBACK) && mdtc) {
149542dd235cSJan Kara domain_update_dirty_limit(mdtc, now);
1496c2aa723aSTejun Heo wb_update_dirty_ratelimit(mdtc, dirtied, elapsed);
1497be3ffa27SWu Fengguang }
1498c42843f2SWu Fengguang }
1499a88a341aSTejun Heo wb_update_write_bandwidth(wb, elapsed, written);
1500e98be2d5SWu Fengguang
1501a88a341aSTejun Heo wb->dirtied_stamp = dirtied;
1502a88a341aSTejun Heo wb->written_stamp = written;
150320792ebfSJan Kara WRITE_ONCE(wb->bw_time_stamp, now);
150445a2966fSJan Kara spin_unlock(&wb->list_lock);
1505e98be2d5SWu Fengguang }
1506e98be2d5SWu Fengguang
wb_update_bandwidth(struct bdi_writeback * wb)150745a2966fSJan Kara void wb_update_bandwidth(struct bdi_writeback *wb)
1508e98be2d5SWu Fengguang {
15092bc00aefSTejun Heo struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
15102bc00aefSTejun Heo
1511fee468fdSJan Kara __wb_update_bandwidth(&gdtc, NULL, false);
1512fee468fdSJan Kara }
1513fee468fdSJan Kara
1514fee468fdSJan Kara /* Interval after which we consider wb idle and don't estimate bandwidth */
1515fee468fdSJan Kara #define WB_BANDWIDTH_IDLE_JIF (HZ)
1516fee468fdSJan Kara
wb_bandwidth_estimate_start(struct bdi_writeback * wb)1517fee468fdSJan Kara static void wb_bandwidth_estimate_start(struct bdi_writeback *wb)
1518fee468fdSJan Kara {
1519fee468fdSJan Kara unsigned long now = jiffies;
1520fee468fdSJan Kara unsigned long elapsed = now - READ_ONCE(wb->bw_time_stamp);
1521fee468fdSJan Kara
1522fee468fdSJan Kara if (elapsed > WB_BANDWIDTH_IDLE_JIF &&
1523fee468fdSJan Kara !atomic_read(&wb->writeback_inodes)) {
1524fee468fdSJan Kara spin_lock(&wb->list_lock);
1525fee468fdSJan Kara wb->dirtied_stamp = wb_stat(wb, WB_DIRTIED);
1526fee468fdSJan Kara wb->written_stamp = wb_stat(wb, WB_WRITTEN);
152720792ebfSJan Kara WRITE_ONCE(wb->bw_time_stamp, now);
1528fee468fdSJan Kara spin_unlock(&wb->list_lock);
1529fee468fdSJan Kara }
1530e98be2d5SWu Fengguang }
1531e98be2d5SWu Fengguang
15321da177e4SLinus Torvalds /*
1533d0e1d66bSNamjae Jeon * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
15349d823e8fSWu Fengguang * will look to see if it needs to start dirty throttling.
15359d823e8fSWu Fengguang *
15369d823e8fSWu Fengguang * If dirty_poll_interval is too low, big NUMA machines will call the expensive
1537c41f012aSMichal Hocko * global_zone_page_state() too often. So scale it near-sqrt to the safety margin
15389d823e8fSWu Fengguang * (the number of pages we may dirty without exceeding the dirty limits).
15399d823e8fSWu Fengguang */
dirty_poll_interval(unsigned long dirty,unsigned long thresh)15409d823e8fSWu Fengguang static unsigned long dirty_poll_interval(unsigned long dirty,
15419d823e8fSWu Fengguang unsigned long thresh)
15429d823e8fSWu Fengguang {
15439d823e8fSWu Fengguang if (thresh > dirty)
15449d823e8fSWu Fengguang return 1UL << (ilog2(thresh - dirty) >> 1);
15459d823e8fSWu Fengguang
15469d823e8fSWu Fengguang return 1;
15479d823e8fSWu Fengguang }
15489d823e8fSWu Fengguang
wb_max_pause(struct bdi_writeback * wb,unsigned long wb_dirty)1549a88a341aSTejun Heo static unsigned long wb_max_pause(struct bdi_writeback *wb,
1550de1fff37STejun Heo unsigned long wb_dirty)
1551c8462cc9SWu Fengguang {
155220792ebfSJan Kara unsigned long bw = READ_ONCE(wb->avg_write_bandwidth);
1553e3b6c655SFengguang Wu unsigned long t;
1554c8462cc9SWu Fengguang
1555c8462cc9SWu Fengguang /*
1556c8462cc9SWu Fengguang * Limit pause time for small memory systems. If sleeping for too long
1557c8462cc9SWu Fengguang * time, a small pool of dirty/writeback pages may go empty and disk go
1558c8462cc9SWu Fengguang * idle.
1559c8462cc9SWu Fengguang *
1560c8462cc9SWu Fengguang * 8 serves as the safety ratio.
1561c8462cc9SWu Fengguang */
1562de1fff37STejun Heo t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
15637ccb9ad5SWu Fengguang t++;
15647ccb9ad5SWu Fengguang
1565e3b6c655SFengguang Wu return min_t(unsigned long, t, MAX_PAUSE);
15667ccb9ad5SWu Fengguang }
15677ccb9ad5SWu Fengguang
wb_min_pause(struct bdi_writeback * wb,long max_pause,unsigned long task_ratelimit,unsigned long dirty_ratelimit,int * nr_dirtied_pause)1568a88a341aSTejun Heo static long wb_min_pause(struct bdi_writeback *wb,
15697ccb9ad5SWu Fengguang long max_pause,
15707ccb9ad5SWu Fengguang unsigned long task_ratelimit,
15717ccb9ad5SWu Fengguang unsigned long dirty_ratelimit,
15727ccb9ad5SWu Fengguang int *nr_dirtied_pause)
15737ccb9ad5SWu Fengguang {
157420792ebfSJan Kara long hi = ilog2(READ_ONCE(wb->avg_write_bandwidth));
157520792ebfSJan Kara long lo = ilog2(READ_ONCE(wb->dirty_ratelimit));
15767ccb9ad5SWu Fengguang long t; /* target pause */
15777ccb9ad5SWu Fengguang long pause; /* estimated next pause */
15787ccb9ad5SWu Fengguang int pages; /* target nr_dirtied_pause */
15797ccb9ad5SWu Fengguang
15807ccb9ad5SWu Fengguang /* target for 10ms pause on 1-dd case */
15817ccb9ad5SWu Fengguang t = max(1, HZ / 100);
1582c8462cc9SWu Fengguang
1583c8462cc9SWu Fengguang /*
15847ccb9ad5SWu Fengguang * Scale up pause time for concurrent dirtiers in order to reduce CPU
15857ccb9ad5SWu Fengguang * overheads.
15867ccb9ad5SWu Fengguang *
15877ccb9ad5SWu Fengguang * (N * 10ms) on 2^N concurrent tasks.
1588c8462cc9SWu Fengguang */
15897ccb9ad5SWu Fengguang if (hi > lo)
15907ccb9ad5SWu Fengguang t += (hi - lo) * (10 * HZ) / 1024;
15917ccb9ad5SWu Fengguang
15927ccb9ad5SWu Fengguang /*
15937ccb9ad5SWu Fengguang * This is a bit convoluted. We try to base the next nr_dirtied_pause
15947ccb9ad5SWu Fengguang * on the much more stable dirty_ratelimit. However the next pause time
15957ccb9ad5SWu Fengguang * will be computed based on task_ratelimit and the two rate limits may
15967ccb9ad5SWu Fengguang * depart considerably at some time. Especially if task_ratelimit goes
15977ccb9ad5SWu Fengguang * below dirty_ratelimit/2 and the target pause is max_pause, the next
15987ccb9ad5SWu Fengguang * pause time will be max_pause*2 _trimmed down_ to max_pause. As a
15997ccb9ad5SWu Fengguang * result task_ratelimit won't be executed faithfully, which could
16007ccb9ad5SWu Fengguang * eventually bring down dirty_ratelimit.
16017ccb9ad5SWu Fengguang *
16027ccb9ad5SWu Fengguang * We apply two rules to fix it up:
16037ccb9ad5SWu Fengguang * 1) try to estimate the next pause time and if necessary, use a lower
16047ccb9ad5SWu Fengguang * nr_dirtied_pause so as not to exceed max_pause. When this happens,
16057ccb9ad5SWu Fengguang * nr_dirtied_pause will be "dancing" with task_ratelimit.
16067ccb9ad5SWu Fengguang * 2) limit the target pause time to max_pause/2, so that the normal
16077ccb9ad5SWu Fengguang * small fluctuations of task_ratelimit won't trigger rule (1) and
16087ccb9ad5SWu Fengguang * nr_dirtied_pause will remain as stable as dirty_ratelimit.
16097ccb9ad5SWu Fengguang */
16107ccb9ad5SWu Fengguang t = min(t, 1 + max_pause / 2);
16117ccb9ad5SWu Fengguang pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
16127ccb9ad5SWu Fengguang
16135b9b3574SWu Fengguang /*
16145b9b3574SWu Fengguang * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
16155b9b3574SWu Fengguang * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
16165b9b3574SWu Fengguang * When the 16 consecutive reads are often interrupted by some dirty
16175b9b3574SWu Fengguang * throttling pause during the async writes, cfq will go into idles
16185b9b3574SWu Fengguang * (deadline is fine). So push nr_dirtied_pause as high as possible
16195b9b3574SWu Fengguang * until reaches DIRTY_POLL_THRESH=32 pages.
16205b9b3574SWu Fengguang */
16215b9b3574SWu Fengguang if (pages < DIRTY_POLL_THRESH) {
16225b9b3574SWu Fengguang t = max_pause;
16235b9b3574SWu Fengguang pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
16245b9b3574SWu Fengguang if (pages > DIRTY_POLL_THRESH) {
16255b9b3574SWu Fengguang pages = DIRTY_POLL_THRESH;
16265b9b3574SWu Fengguang t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
16275b9b3574SWu Fengguang }
16285b9b3574SWu Fengguang }
16295b9b3574SWu Fengguang
16307ccb9ad5SWu Fengguang pause = HZ * pages / (task_ratelimit + 1);
16317ccb9ad5SWu Fengguang if (pause > max_pause) {
16327ccb9ad5SWu Fengguang t = max_pause;
16337ccb9ad5SWu Fengguang pages = task_ratelimit * t / roundup_pow_of_two(HZ);
16347ccb9ad5SWu Fengguang }
16357ccb9ad5SWu Fengguang
16367ccb9ad5SWu Fengguang *nr_dirtied_pause = pages;
16377ccb9ad5SWu Fengguang /*
16387ccb9ad5SWu Fengguang * The minimal pause time will normally be half the target pause time.
16397ccb9ad5SWu Fengguang */
16405b9b3574SWu Fengguang return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
1641c8462cc9SWu Fengguang }
1642c8462cc9SWu Fengguang
wb_dirty_limits(struct dirty_throttle_control * dtc)1643970fb01aSTejun Heo static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
16441da177e4SLinus Torvalds {
16452bc00aefSTejun Heo struct bdi_writeback *wb = dtc->wb;
164693f78d88STejun Heo unsigned long wb_reclaimable;
1647143dfe86SWu Fengguang
1648143dfe86SWu Fengguang /*
1649de1fff37STejun Heo * wb_thresh is not treated as some limiting factor as
1650143dfe86SWu Fengguang * dirty_thresh, due to reasons
1651de1fff37STejun Heo * - in JBOD setup, wb_thresh can fluctuate a lot
1652143dfe86SWu Fengguang * - in a system with HDD and USB key, the USB key may somehow
1653de1fff37STejun Heo * go into state (wb_dirty >> wb_thresh) either because
1654de1fff37STejun Heo * wb_dirty starts high, or because wb_thresh drops low.
1655143dfe86SWu Fengguang * In this case we don't want to hard throttle the USB key
1656de1fff37STejun Heo * dirtiers for 100 seconds until wb_dirty drops under
1657de1fff37STejun Heo * wb_thresh. Instead the auxiliary wb control line in
1658a88a341aSTejun Heo * wb_position_ratio() will let the dirtier task progress
1659de1fff37STejun Heo * at some rate <= (write_bw / 2) for bringing down wb_dirty.
1660143dfe86SWu Fengguang */
1661b1cbc6d4STejun Heo dtc->wb_thresh = __wb_calc_thresh(dtc);
1662970fb01aSTejun Heo dtc->wb_bg_thresh = dtc->thresh ?
1663*f6620df1SJan Kara div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
166416c4042fSWu Fengguang
1665e50e3720SWu Fengguang /*
1666e50e3720SWu Fengguang * In order to avoid the stacked BDI deadlock we need
1667e50e3720SWu Fengguang * to ensure we accurately count the 'dirty' pages when
1668e50e3720SWu Fengguang * the threshold is low.
1669e50e3720SWu Fengguang *
1670e50e3720SWu Fengguang * Otherwise it would be possible to get thresh+n pages
1671e50e3720SWu Fengguang * reported dirty, even though there are thresh-m pages
1672e50e3720SWu Fengguang * actually dirty; with m+n sitting in the percpu
1673e50e3720SWu Fengguang * deltas.
1674e50e3720SWu Fengguang */
16752bce774eSWang Long if (dtc->wb_thresh < 2 * wb_stat_error()) {
167693f78d88STejun Heo wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
16772bc00aefSTejun Heo dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
1678e50e3720SWu Fengguang } else {
167993f78d88STejun Heo wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
16802bc00aefSTejun Heo dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
1681e50e3720SWu Fengguang }
16825a537485SMaxim Patlasov }
16835a537485SMaxim Patlasov
16845a537485SMaxim Patlasov /*
16855a537485SMaxim Patlasov * balance_dirty_pages() must be called by processes which are generating dirty
16865a537485SMaxim Patlasov * data. It looks at the number of dirty pages in the machine and will force
16875a537485SMaxim Patlasov * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
16885a537485SMaxim Patlasov * If we're over `background_thresh' then the writeback threads are woken to
16895a537485SMaxim Patlasov * perform some writeout.
16905a537485SMaxim Patlasov */
balance_dirty_pages(struct bdi_writeback * wb,unsigned long pages_dirtied,unsigned int flags)1691fe6c9c6eSJan Kara static int balance_dirty_pages(struct bdi_writeback *wb,
1692fe6c9c6eSJan Kara unsigned long pages_dirtied, unsigned int flags)
16935a537485SMaxim Patlasov {
16942bc00aefSTejun Heo struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1695c2aa723aSTejun Heo struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
16962bc00aefSTejun Heo struct dirty_throttle_control * const gdtc = &gdtc_stor;
1697c2aa723aSTejun Heo struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
1698c2aa723aSTejun Heo &mdtc_stor : NULL;
1699c2aa723aSTejun Heo struct dirty_throttle_control *sdtc;
17008d92890bSNeilBrown unsigned long nr_reclaimable; /* = file_dirty */
17015a537485SMaxim Patlasov long period;
17025a537485SMaxim Patlasov long pause;
17035a537485SMaxim Patlasov long max_pause;
17045a537485SMaxim Patlasov long min_pause;
17055a537485SMaxim Patlasov int nr_dirtied_pause;
17065a537485SMaxim Patlasov bool dirty_exceeded = false;
17075a537485SMaxim Patlasov unsigned long task_ratelimit;
17085a537485SMaxim Patlasov unsigned long dirty_ratelimit;
1709dfb8ae56STejun Heo struct backing_dev_info *bdi = wb->bdi;
17105a537485SMaxim Patlasov bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
17115a537485SMaxim Patlasov unsigned long start_time = jiffies;
1712fe6c9c6eSJan Kara int ret = 0;
17135a537485SMaxim Patlasov
17145a537485SMaxim Patlasov for (;;) {
17155a537485SMaxim Patlasov unsigned long now = jiffies;
17162bc00aefSTejun Heo unsigned long dirty, thresh, bg_thresh;
171750e55bf6SYang Shi unsigned long m_dirty = 0; /* stop bogus uninit warnings */
171850e55bf6SYang Shi unsigned long m_thresh = 0;
171950e55bf6SYang Shi unsigned long m_bg_thresh = 0;
17205a537485SMaxim Patlasov
17218d92890bSNeilBrown nr_reclaimable = global_node_page_state(NR_FILE_DIRTY);
17229fc3a43eSTejun Heo gdtc->avail = global_dirtyable_memory();
172311fb9989SMel Gorman gdtc->dirty = nr_reclaimable + global_node_page_state(NR_WRITEBACK);
17245a537485SMaxim Patlasov
17259fc3a43eSTejun Heo domain_dirty_limits(gdtc);
17265a537485SMaxim Patlasov
17275a537485SMaxim Patlasov if (unlikely(strictlimit)) {
1728970fb01aSTejun Heo wb_dirty_limits(gdtc);
17295a537485SMaxim Patlasov
17302bc00aefSTejun Heo dirty = gdtc->wb_dirty;
17312bc00aefSTejun Heo thresh = gdtc->wb_thresh;
1732970fb01aSTejun Heo bg_thresh = gdtc->wb_bg_thresh;
17335a537485SMaxim Patlasov } else {
17342bc00aefSTejun Heo dirty = gdtc->dirty;
17352bc00aefSTejun Heo thresh = gdtc->thresh;
17362bc00aefSTejun Heo bg_thresh = gdtc->bg_thresh;
17375a537485SMaxim Patlasov }
17385a537485SMaxim Patlasov
1739c2aa723aSTejun Heo if (mdtc) {
1740c5edf9cdSTejun Heo unsigned long filepages, headroom, writeback;
1741c2aa723aSTejun Heo
1742c2aa723aSTejun Heo /*
1743c2aa723aSTejun Heo * If @wb belongs to !root memcg, repeat the same
1744c2aa723aSTejun Heo * basic calculations for the memcg domain.
1745c2aa723aSTejun Heo */
1746c5edf9cdSTejun Heo mem_cgroup_wb_stats(wb, &filepages, &headroom,
1747c5edf9cdSTejun Heo &mdtc->dirty, &writeback);
1748c2aa723aSTejun Heo mdtc->dirty += writeback;
1749c5edf9cdSTejun Heo mdtc_calc_avail(mdtc, filepages, headroom);
1750c2aa723aSTejun Heo
1751c2aa723aSTejun Heo domain_dirty_limits(mdtc);
1752c2aa723aSTejun Heo
1753c2aa723aSTejun Heo if (unlikely(strictlimit)) {
1754c2aa723aSTejun Heo wb_dirty_limits(mdtc);
1755c2aa723aSTejun Heo m_dirty = mdtc->wb_dirty;
1756c2aa723aSTejun Heo m_thresh = mdtc->wb_thresh;
1757c2aa723aSTejun Heo m_bg_thresh = mdtc->wb_bg_thresh;
1758c2aa723aSTejun Heo } else {
1759c2aa723aSTejun Heo m_dirty = mdtc->dirty;
1760c2aa723aSTejun Heo m_thresh = mdtc->thresh;
1761c2aa723aSTejun Heo m_bg_thresh = mdtc->bg_thresh;
1762c2aa723aSTejun Heo }
17635a537485SMaxim Patlasov }
17645a537485SMaxim Patlasov
17655a537485SMaxim Patlasov /*
1766ea6813beSJan Kara * In laptop mode, we wait until hitting the higher threshold
1767ea6813beSJan Kara * before starting background writeout, and then write out all
1768ea6813beSJan Kara * the way down to the lower threshold. So slow writers cause
1769ea6813beSJan Kara * minimal disk activity.
1770ea6813beSJan Kara *
1771ea6813beSJan Kara * In normal mode, we start background writeout at the lower
1772ea6813beSJan Kara * background_thresh, to keep the amount of dirty memory low.
1773ea6813beSJan Kara */
1774ea6813beSJan Kara if (!laptop_mode && nr_reclaimable > gdtc->bg_thresh &&
1775ea6813beSJan Kara !writeback_in_progress(wb))
1776ea6813beSJan Kara wb_start_background_writeback(wb);
1777ea6813beSJan Kara
1778ea6813beSJan Kara /*
17795a537485SMaxim Patlasov * Throttle it only when the background writeback cannot
17805a537485SMaxim Patlasov * catch-up. This avoids (excessively) small writeouts
1781de1fff37STejun Heo * when the wb limits are ramping up in case of !strictlimit.
17825a537485SMaxim Patlasov *
1783de1fff37STejun Heo * In strictlimit case make decision based on the wb counters
1784de1fff37STejun Heo * and limits. Small writeouts when the wb limits are ramping
17855a537485SMaxim Patlasov * up are the price we consciously pay for strictlimit-ing.
1786c2aa723aSTejun Heo *
1787c2aa723aSTejun Heo * If memcg domain is in effect, @dirty should be under
1788c2aa723aSTejun Heo * both global and memcg freerun ceilings.
17895a537485SMaxim Patlasov */
1790c2aa723aSTejun Heo if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh) &&
1791c2aa723aSTejun Heo (!mdtc ||
1792c2aa723aSTejun Heo m_dirty <= dirty_freerun_ceiling(m_thresh, m_bg_thresh))) {
1793a37b0715SNeilBrown unsigned long intv;
1794a37b0715SNeilBrown unsigned long m_intv;
1795a37b0715SNeilBrown
1796a37b0715SNeilBrown free_running:
1797a37b0715SNeilBrown intv = dirty_poll_interval(dirty, thresh);
1798a37b0715SNeilBrown m_intv = ULONG_MAX;
1799c2aa723aSTejun Heo
18005a537485SMaxim Patlasov current->dirty_paused_when = now;
18015a537485SMaxim Patlasov current->nr_dirtied = 0;
1802c2aa723aSTejun Heo if (mdtc)
1803c2aa723aSTejun Heo m_intv = dirty_poll_interval(m_dirty, m_thresh);
1804c2aa723aSTejun Heo current->nr_dirtied_pause = min(intv, m_intv);
18055a537485SMaxim Patlasov break;
18065a537485SMaxim Patlasov }
18075a537485SMaxim Patlasov
1808ea6813beSJan Kara /* Start writeback even when in laptop mode */
1809bc05873dSTejun Heo if (unlikely(!writeback_in_progress(wb)))
18109ecf4866STejun Heo wb_start_background_writeback(wb);
18115a537485SMaxim Patlasov
181297b27821STejun Heo mem_cgroup_flush_foreign(wb);
181397b27821STejun Heo
1814c2aa723aSTejun Heo /*
1815c2aa723aSTejun Heo * Calculate global domain's pos_ratio and select the
1816c2aa723aSTejun Heo * global dtc by default.
1817c2aa723aSTejun Heo */
1818a37b0715SNeilBrown if (!strictlimit) {
1819970fb01aSTejun Heo wb_dirty_limits(gdtc);
18205fce25a9SPeter Zijlstra
1821a37b0715SNeilBrown if ((current->flags & PF_LOCAL_THROTTLE) &&
1822a37b0715SNeilBrown gdtc->wb_dirty <
1823a37b0715SNeilBrown dirty_freerun_ceiling(gdtc->wb_thresh,
1824a37b0715SNeilBrown gdtc->wb_bg_thresh))
1825a37b0715SNeilBrown /*
1826a37b0715SNeilBrown * LOCAL_THROTTLE tasks must not be throttled
1827a37b0715SNeilBrown * when below the per-wb freerun ceiling.
1828a37b0715SNeilBrown */
1829a37b0715SNeilBrown goto free_running;
1830a37b0715SNeilBrown }
1831a37b0715SNeilBrown
18322bc00aefSTejun Heo dirty_exceeded = (gdtc->wb_dirty > gdtc->wb_thresh) &&
18332bc00aefSTejun Heo ((gdtc->dirty > gdtc->thresh) || strictlimit);
18341da177e4SLinus Torvalds
1835daddfa3cSTejun Heo wb_position_ratio(gdtc);
1836c2aa723aSTejun Heo sdtc = gdtc;
1837e98be2d5SWu Fengguang
1838c2aa723aSTejun Heo if (mdtc) {
1839c2aa723aSTejun Heo /*
1840c2aa723aSTejun Heo * If memcg domain is in effect, calculate its
1841c2aa723aSTejun Heo * pos_ratio. @wb should satisfy constraints from
1842c2aa723aSTejun Heo * both global and memcg domains. Choose the one
1843c2aa723aSTejun Heo * w/ lower pos_ratio.
1844c2aa723aSTejun Heo */
1845a37b0715SNeilBrown if (!strictlimit) {
1846c2aa723aSTejun Heo wb_dirty_limits(mdtc);
1847c2aa723aSTejun Heo
1848a37b0715SNeilBrown if ((current->flags & PF_LOCAL_THROTTLE) &&
1849a37b0715SNeilBrown mdtc->wb_dirty <
1850a37b0715SNeilBrown dirty_freerun_ceiling(mdtc->wb_thresh,
1851a37b0715SNeilBrown mdtc->wb_bg_thresh))
1852a37b0715SNeilBrown /*
1853a37b0715SNeilBrown * LOCAL_THROTTLE tasks must not be
1854a37b0715SNeilBrown * throttled when below the per-wb
1855a37b0715SNeilBrown * freerun ceiling.
1856a37b0715SNeilBrown */
1857a37b0715SNeilBrown goto free_running;
1858a37b0715SNeilBrown }
1859c2aa723aSTejun Heo dirty_exceeded |= (mdtc->wb_dirty > mdtc->wb_thresh) &&
1860c2aa723aSTejun Heo ((mdtc->dirty > mdtc->thresh) || strictlimit);
1861c2aa723aSTejun Heo
1862c2aa723aSTejun Heo wb_position_ratio(mdtc);
1863c2aa723aSTejun Heo if (mdtc->pos_ratio < gdtc->pos_ratio)
1864c2aa723aSTejun Heo sdtc = mdtc;
1865c2aa723aSTejun Heo }
1866daddfa3cSTejun Heo
1867e92eebbbSJan Kara if (dirty_exceeded != wb->dirty_exceeded)
1868e92eebbbSJan Kara wb->dirty_exceeded = dirty_exceeded;
186904fbfdc1SPeter Zijlstra
187020792ebfSJan Kara if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
187145a2966fSJan Kara BANDWIDTH_INTERVAL))
1872fee468fdSJan Kara __wb_update_bandwidth(gdtc, mdtc, true);
18731da177e4SLinus Torvalds
1874c2aa723aSTejun Heo /* throttle according to the chosen dtc */
187520792ebfSJan Kara dirty_ratelimit = READ_ONCE(wb->dirty_ratelimit);
1876c2aa723aSTejun Heo task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >>
18773a73dbbcSWu Fengguang RATELIMIT_CALC_SHIFT;
1878c2aa723aSTejun Heo max_pause = wb_max_pause(wb, sdtc->wb_dirty);
1879a88a341aSTejun Heo min_pause = wb_min_pause(wb, max_pause,
18807ccb9ad5SWu Fengguang task_ratelimit, dirty_ratelimit,
18817ccb9ad5SWu Fengguang &nr_dirtied_pause);
18827ccb9ad5SWu Fengguang
18833a73dbbcSWu Fengguang if (unlikely(task_ratelimit == 0)) {
188483712358SWu Fengguang period = max_pause;
1885c8462cc9SWu Fengguang pause = max_pause;
1886143dfe86SWu Fengguang goto pause;
1887e50e3720SWu Fengguang }
188883712358SWu Fengguang period = HZ * pages_dirtied / task_ratelimit;
188983712358SWu Fengguang pause = period;
189083712358SWu Fengguang if (current->dirty_paused_when)
189183712358SWu Fengguang pause -= now - current->dirty_paused_when;
189283712358SWu Fengguang /*
189383712358SWu Fengguang * For less than 1s think time (ext3/4 may block the dirtier
189483712358SWu Fengguang * for up to 800ms from time to time on 1-HDD; so does xfs,
189583712358SWu Fengguang * however at much less frequency), try to compensate it in
189683712358SWu Fengguang * future periods by updating the virtual time; otherwise just
189783712358SWu Fengguang * do a reset, as it may be a light dirtier.
189883712358SWu Fengguang */
18997ccb9ad5SWu Fengguang if (pause < min_pause) {
19005634cc2aSTejun Heo trace_balance_dirty_pages(wb,
1901c2aa723aSTejun Heo sdtc->thresh,
1902c2aa723aSTejun Heo sdtc->bg_thresh,
1903c2aa723aSTejun Heo sdtc->dirty,
1904c2aa723aSTejun Heo sdtc->wb_thresh,
1905c2aa723aSTejun Heo sdtc->wb_dirty,
1906ece13ac3SWu Fengguang dirty_ratelimit,
1907ece13ac3SWu Fengguang task_ratelimit,
1908ece13ac3SWu Fengguang pages_dirtied,
190983712358SWu Fengguang period,
19107ccb9ad5SWu Fengguang min(pause, 0L),
1911ece13ac3SWu Fengguang start_time);
191283712358SWu Fengguang if (pause < -HZ) {
191383712358SWu Fengguang current->dirty_paused_when = now;
191483712358SWu Fengguang current->nr_dirtied = 0;
191583712358SWu Fengguang } else if (period) {
191683712358SWu Fengguang current->dirty_paused_when += period;
191783712358SWu Fengguang current->nr_dirtied = 0;
19187ccb9ad5SWu Fengguang } else if (current->nr_dirtied_pause <= pages_dirtied)
19197ccb9ad5SWu Fengguang current->nr_dirtied_pause += pages_dirtied;
192057fc978cSWu Fengguang break;
192157fc978cSWu Fengguang }
19227ccb9ad5SWu Fengguang if (unlikely(pause > max_pause)) {
19237ccb9ad5SWu Fengguang /* for occasional dropped task_ratelimit */
19247ccb9ad5SWu Fengguang now += min(pause - max_pause, max_pause);
19257ccb9ad5SWu Fengguang pause = max_pause;
19267ccb9ad5SWu Fengguang }
1927143dfe86SWu Fengguang
1928143dfe86SWu Fengguang pause:
19295634cc2aSTejun Heo trace_balance_dirty_pages(wb,
1930c2aa723aSTejun Heo sdtc->thresh,
1931c2aa723aSTejun Heo sdtc->bg_thresh,
1932c2aa723aSTejun Heo sdtc->dirty,
1933c2aa723aSTejun Heo sdtc->wb_thresh,
1934c2aa723aSTejun Heo sdtc->wb_dirty,
1935ece13ac3SWu Fengguang dirty_ratelimit,
1936ece13ac3SWu Fengguang task_ratelimit,
1937ece13ac3SWu Fengguang pages_dirtied,
193883712358SWu Fengguang period,
1939ece13ac3SWu Fengguang pause,
1940ece13ac3SWu Fengguang start_time);
1941fe6c9c6eSJan Kara if (flags & BDP_ASYNC) {
1942fe6c9c6eSJan Kara ret = -EAGAIN;
1943fe6c9c6eSJan Kara break;
1944fe6c9c6eSJan Kara }
1945499d05ecSJan Kara __set_current_state(TASK_KILLABLE);
1946601b5540SJan Kara bdi->last_bdp_sleep = jiffies;
1947d25105e8SWu Fengguang io_schedule_timeout(pause);
194887c6a9b2SJens Axboe
194983712358SWu Fengguang current->dirty_paused_when = now + pause;
195083712358SWu Fengguang current->nr_dirtied = 0;
19517ccb9ad5SWu Fengguang current->nr_dirtied_pause = nr_dirtied_pause;
195283712358SWu Fengguang
1953ffd1f609SWu Fengguang /*
19542bc00aefSTejun Heo * This is typically equal to (dirty < thresh) and can also
19552bc00aefSTejun Heo * keep "1000+ dd on a slow USB stick" under control.
1956ffd1f609SWu Fengguang */
19571df64719SWu Fengguang if (task_ratelimit)
1958ffd1f609SWu Fengguang break;
1959499d05ecSJan Kara
1960c5c6343cSWu Fengguang /*
1961f0953a1bSIngo Molnar * In the case of an unresponsive NFS server and the NFS dirty
1962de1fff37STejun Heo * pages exceeds dirty_thresh, give the other good wb's a pipe
1963c5c6343cSWu Fengguang * to go through, so that tasks on them still remain responsive.
1964c5c6343cSWu Fengguang *
19653f8b6fb7SMasahiro Yamada * In theory 1 page is enough to keep the consumer-producer
1966c5c6343cSWu Fengguang * pipe going: the flusher cleans 1 page => the task dirties 1
1967de1fff37STejun Heo * more page. However wb_dirty has accounting errors. So use
196893f78d88STejun Heo * the larger and more IO friendly wb_stat_error.
1969c5c6343cSWu Fengguang */
19702bce774eSWang Long if (sdtc->wb_dirty <= wb_stat_error())
1971c5c6343cSWu Fengguang break;
1972c5c6343cSWu Fengguang
1973499d05ecSJan Kara if (fatal_signal_pending(current))
1974499d05ecSJan Kara break;
19751da177e4SLinus Torvalds }
1976fe6c9c6eSJan Kara return ret;
19771da177e4SLinus Torvalds }
19781da177e4SLinus Torvalds
19799d823e8fSWu Fengguang static DEFINE_PER_CPU(int, bdp_ratelimits);
1980245b2e70STejun Heo
198154848d73SWu Fengguang /*
198254848d73SWu Fengguang * Normal tasks are throttled by
198354848d73SWu Fengguang * loop {
198454848d73SWu Fengguang * dirty tsk->nr_dirtied_pause pages;
198554848d73SWu Fengguang * take a snap in balance_dirty_pages();
198654848d73SWu Fengguang * }
198754848d73SWu Fengguang * However there is a worst case. If every task exit immediately when dirtied
198854848d73SWu Fengguang * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
198954848d73SWu Fengguang * called to throttle the page dirties. The solution is to save the not yet
199054848d73SWu Fengguang * throttled page dirties in dirty_throttle_leaks on task exit and charge them
199154848d73SWu Fengguang * randomly into the running tasks. This works well for the above worst case,
199254848d73SWu Fengguang * as the new task will pick up and accumulate the old task's leaked dirty
199354848d73SWu Fengguang * count and eventually get throttled.
199454848d73SWu Fengguang */
199554848d73SWu Fengguang DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
199654848d73SWu Fengguang
19971da177e4SLinus Torvalds /**
1998fe6c9c6eSJan Kara * balance_dirty_pages_ratelimited_flags - Balance dirty memory state.
1999fe6c9c6eSJan Kara * @mapping: address_space which was dirtied.
2000fe6c9c6eSJan Kara * @flags: BDP flags.
20011da177e4SLinus Torvalds *
20021da177e4SLinus Torvalds * Processes which are dirtying memory should call in here once for each page
20031da177e4SLinus Torvalds * which was newly dirtied. The function will periodically check the system's
20041da177e4SLinus Torvalds * dirty state and will initiate writeback if needed.
20051da177e4SLinus Torvalds *
2006fe6c9c6eSJan Kara * See balance_dirty_pages_ratelimited() for details.
2007fe6c9c6eSJan Kara *
2008fe6c9c6eSJan Kara * Return: If @flags contains BDP_ASYNC, it may return -EAGAIN to
2009fe6c9c6eSJan Kara * indicate that memory is out of balance and the caller must wait
2010fe6c9c6eSJan Kara * for I/O to complete. Otherwise, it will return 0 to indicate
2011fe6c9c6eSJan Kara * that either memory was already in balance, or it was able to sleep
2012fe6c9c6eSJan Kara * until the amount of dirty memory returned to balance.
20131da177e4SLinus Torvalds */
balance_dirty_pages_ratelimited_flags(struct address_space * mapping,unsigned int flags)2014fe6c9c6eSJan Kara int balance_dirty_pages_ratelimited_flags(struct address_space *mapping,
2015fe6c9c6eSJan Kara unsigned int flags)
20161da177e4SLinus Torvalds {
2017dfb8ae56STejun Heo struct inode *inode = mapping->host;
2018dfb8ae56STejun Heo struct backing_dev_info *bdi = inode_to_bdi(inode);
2019dfb8ae56STejun Heo struct bdi_writeback *wb = NULL;
20209d823e8fSWu Fengguang int ratelimit;
2021fe6c9c6eSJan Kara int ret = 0;
20229d823e8fSWu Fengguang int *p;
20231da177e4SLinus Torvalds
2024f56753acSChristoph Hellwig if (!(bdi->capabilities & BDI_CAP_WRITEBACK))
2025fe6c9c6eSJan Kara return ret;
202636715cefSWu Fengguang
2027dfb8ae56STejun Heo if (inode_cgwb_enabled(inode))
2028dfb8ae56STejun Heo wb = wb_get_create_current(bdi, GFP_KERNEL);
2029dfb8ae56STejun Heo if (!wb)
2030dfb8ae56STejun Heo wb = &bdi->wb;
2031dfb8ae56STejun Heo
20329d823e8fSWu Fengguang ratelimit = current->nr_dirtied_pause;
2033a88a341aSTejun Heo if (wb->dirty_exceeded)
20349d823e8fSWu Fengguang ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
20351da177e4SLinus Torvalds
2036fa5a734eSAndrew Morton preempt_disable();
20379d823e8fSWu Fengguang /*
20389d823e8fSWu Fengguang * This prevents one CPU to accumulate too many dirtied pages without
20399d823e8fSWu Fengguang * calling into balance_dirty_pages(), which can happen when there are
20409d823e8fSWu Fengguang * 1000+ tasks, all of them start dirtying pages at exactly the same
20419d823e8fSWu Fengguang * time, hence all honoured too large initial task->nr_dirtied_pause.
20429d823e8fSWu Fengguang */
20437c8e0181SChristoph Lameter p = this_cpu_ptr(&bdp_ratelimits);
20449d823e8fSWu Fengguang if (unlikely(current->nr_dirtied >= ratelimit))
2045fa5a734eSAndrew Morton *p = 0;
2046d3bc1fefSWu Fengguang else if (unlikely(*p >= ratelimit_pages)) {
20479d823e8fSWu Fengguang *p = 0;
20489d823e8fSWu Fengguang ratelimit = 0;
20499d823e8fSWu Fengguang }
205054848d73SWu Fengguang /*
205154848d73SWu Fengguang * Pick up the dirtied pages by the exited tasks. This avoids lots of
205254848d73SWu Fengguang * short-lived tasks (eg. gcc invocations in a kernel build) escaping
205354848d73SWu Fengguang * the dirty throttling and livelock other long-run dirtiers.
205454848d73SWu Fengguang */
20557c8e0181SChristoph Lameter p = this_cpu_ptr(&dirty_throttle_leaks);
205654848d73SWu Fengguang if (*p > 0 && current->nr_dirtied < ratelimit) {
2057d0e1d66bSNamjae Jeon unsigned long nr_pages_dirtied;
205854848d73SWu Fengguang nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
205954848d73SWu Fengguang *p -= nr_pages_dirtied;
206054848d73SWu Fengguang current->nr_dirtied += nr_pages_dirtied;
20611da177e4SLinus Torvalds }
2062fa5a734eSAndrew Morton preempt_enable();
20639d823e8fSWu Fengguang
20649d823e8fSWu Fengguang if (unlikely(current->nr_dirtied >= ratelimit))
2065fe6c9c6eSJan Kara ret = balance_dirty_pages(wb, current->nr_dirtied, flags);
2066dfb8ae56STejun Heo
2067dfb8ae56STejun Heo wb_put(wb);
2068fe6c9c6eSJan Kara return ret;
2069fe6c9c6eSJan Kara }
2070611df5d6SStefan Roesch EXPORT_SYMBOL_GPL(balance_dirty_pages_ratelimited_flags);
2071fe6c9c6eSJan Kara
2072fe6c9c6eSJan Kara /**
2073fe6c9c6eSJan Kara * balance_dirty_pages_ratelimited - balance dirty memory state.
2074fe6c9c6eSJan Kara * @mapping: address_space which was dirtied.
2075fe6c9c6eSJan Kara *
2076fe6c9c6eSJan Kara * Processes which are dirtying memory should call in here once for each page
2077fe6c9c6eSJan Kara * which was newly dirtied. The function will periodically check the system's
2078fe6c9c6eSJan Kara * dirty state and will initiate writeback if needed.
2079fe6c9c6eSJan Kara *
2080fe6c9c6eSJan Kara * Once we're over the dirty memory limit we decrease the ratelimiting
2081fe6c9c6eSJan Kara * by a lot, to prevent individual processes from overshooting the limit
2082fe6c9c6eSJan Kara * by (ratelimit_pages) each.
2083fe6c9c6eSJan Kara */
balance_dirty_pages_ratelimited(struct address_space * mapping)2084fe6c9c6eSJan Kara void balance_dirty_pages_ratelimited(struct address_space *mapping)
2085fe6c9c6eSJan Kara {
2086fe6c9c6eSJan Kara balance_dirty_pages_ratelimited_flags(mapping, 0);
20871da177e4SLinus Torvalds }
2088d0e1d66bSNamjae Jeon EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
20891da177e4SLinus Torvalds
2090aa661bbeSTejun Heo /**
2091aa661bbeSTejun Heo * wb_over_bg_thresh - does @wb need to be written back?
2092aa661bbeSTejun Heo * @wb: bdi_writeback of interest
2093aa661bbeSTejun Heo *
2094aa661bbeSTejun Heo * Determines whether background writeback should keep writing @wb or it's
2095a862f68aSMike Rapoport * clean enough.
2096a862f68aSMike Rapoport *
2097a862f68aSMike Rapoport * Return: %true if writeback should continue.
2098aa661bbeSTejun Heo */
wb_over_bg_thresh(struct bdi_writeback * wb)2099aa661bbeSTejun Heo bool wb_over_bg_thresh(struct bdi_writeback *wb)
2100aa661bbeSTejun Heo {
2101947e9762STejun Heo struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
2102c2aa723aSTejun Heo struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
2103947e9762STejun Heo struct dirty_throttle_control * const gdtc = &gdtc_stor;
2104c2aa723aSTejun Heo struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
2105c2aa723aSTejun Heo &mdtc_stor : NULL;
2106ab19939aSChi Wu unsigned long reclaimable;
2107ab19939aSChi Wu unsigned long thresh;
2108aa661bbeSTejun Heo
2109947e9762STejun Heo /*
2110947e9762STejun Heo * Similar to balance_dirty_pages() but ignores pages being written
2111947e9762STejun Heo * as we're trying to decide whether to put more under writeback.
2112947e9762STejun Heo */
2113947e9762STejun Heo gdtc->avail = global_dirtyable_memory();
21148d92890bSNeilBrown gdtc->dirty = global_node_page_state(NR_FILE_DIRTY);
2115947e9762STejun Heo domain_dirty_limits(gdtc);
2116aa661bbeSTejun Heo
2117947e9762STejun Heo if (gdtc->dirty > gdtc->bg_thresh)
2118aa661bbeSTejun Heo return true;
2119aa661bbeSTejun Heo
2120ab19939aSChi Wu thresh = wb_calc_thresh(gdtc->wb, gdtc->bg_thresh);
2121ab19939aSChi Wu if (thresh < 2 * wb_stat_error())
2122ab19939aSChi Wu reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
2123ab19939aSChi Wu else
2124ab19939aSChi Wu reclaimable = wb_stat(wb, WB_RECLAIMABLE);
2125ab19939aSChi Wu
2126ab19939aSChi Wu if (reclaimable > thresh)
2127aa661bbeSTejun Heo return true;
2128aa661bbeSTejun Heo
2129c2aa723aSTejun Heo if (mdtc) {
2130c5edf9cdSTejun Heo unsigned long filepages, headroom, writeback;
2131c2aa723aSTejun Heo
2132c5edf9cdSTejun Heo mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty,
2133c5edf9cdSTejun Heo &writeback);
2134c5edf9cdSTejun Heo mdtc_calc_avail(mdtc, filepages, headroom);
2135c2aa723aSTejun Heo domain_dirty_limits(mdtc); /* ditto, ignore writeback */
2136c2aa723aSTejun Heo
2137c2aa723aSTejun Heo if (mdtc->dirty > mdtc->bg_thresh)
2138c2aa723aSTejun Heo return true;
2139c2aa723aSTejun Heo
2140ab19939aSChi Wu thresh = wb_calc_thresh(mdtc->wb, mdtc->bg_thresh);
2141ab19939aSChi Wu if (thresh < 2 * wb_stat_error())
2142ab19939aSChi Wu reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
2143ab19939aSChi Wu else
2144ab19939aSChi Wu reclaimable = wb_stat(wb, WB_RECLAIMABLE);
2145ab19939aSChi Wu
2146ab19939aSChi Wu if (reclaimable > thresh)
2147c2aa723aSTejun Heo return true;
2148c2aa723aSTejun Heo }
2149c2aa723aSTejun Heo
2150aa661bbeSTejun Heo return false;
2151aa661bbeSTejun Heo }
2152aa661bbeSTejun Heo
2153aa779e51Szhanglianjie #ifdef CONFIG_SYSCTL
21541da177e4SLinus Torvalds /*
21551da177e4SLinus Torvalds * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
21561da177e4SLinus Torvalds */
dirty_writeback_centisecs_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)2157aa779e51Szhanglianjie static int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
215832927393SChristoph Hellwig void *buffer, size_t *length, loff_t *ppos)
21591da177e4SLinus Torvalds {
216094af5846SYafang Shao unsigned int old_interval = dirty_writeback_interval;
216194af5846SYafang Shao int ret;
216294af5846SYafang Shao
216394af5846SYafang Shao ret = proc_dointvec(table, write, buffer, length, ppos);
2164515c24c1SYafang Shao
2165515c24c1SYafang Shao /*
2166515c24c1SYafang Shao * Writing 0 to dirty_writeback_interval will disable periodic writeback
2167515c24c1SYafang Shao * and a different non-zero value will wakeup the writeback threads.
2168515c24c1SYafang Shao * wb_wakeup_delayed() would be more appropriate, but it's a pain to
2169515c24c1SYafang Shao * iterate over all bdis and wbs.
2170515c24c1SYafang Shao * The reason we do this is to make the change take effect immediately.
2171515c24c1SYafang Shao */
2172515c24c1SYafang Shao if (!ret && write && dirty_writeback_interval &&
2173515c24c1SYafang Shao dirty_writeback_interval != old_interval)
217494af5846SYafang Shao wakeup_flusher_threads(WB_REASON_PERIODIC);
217594af5846SYafang Shao
217694af5846SYafang Shao return ret;
21771da177e4SLinus Torvalds }
2178aa779e51Szhanglianjie #endif
21791da177e4SLinus Torvalds
laptop_mode_timer_fn(struct timer_list * t)2180bca237a5SKees Cook void laptop_mode_timer_fn(struct timer_list *t)
21811da177e4SLinus Torvalds {
2182bca237a5SKees Cook struct backing_dev_info *backing_dev_info =
2183bca237a5SKees Cook from_timer(backing_dev_info, t, laptop_mode_wb_timer);
21841da177e4SLinus Torvalds
2185bca237a5SKees Cook wakeup_flusher_threads_bdi(backing_dev_info, WB_REASON_LAPTOP_TIMER);
21861da177e4SLinus Torvalds }
21871da177e4SLinus Torvalds
21881da177e4SLinus Torvalds /*
21891da177e4SLinus Torvalds * We've spun up the disk and we're in laptop mode: schedule writeback
21901da177e4SLinus Torvalds * of all dirty data a few seconds from now. If the flush is already scheduled
21911da177e4SLinus Torvalds * then push it back - the user is still using the disk.
21921da177e4SLinus Torvalds */
laptop_io_completion(struct backing_dev_info * info)219331373d09SMatthew Garrett void laptop_io_completion(struct backing_dev_info *info)
21941da177e4SLinus Torvalds {
219531373d09SMatthew Garrett mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
21961da177e4SLinus Torvalds }
21971da177e4SLinus Torvalds
21981da177e4SLinus Torvalds /*
21991da177e4SLinus Torvalds * We're in laptop mode and we've just synced. The sync's writes will have
22001da177e4SLinus Torvalds * caused another writeback to be scheduled by laptop_io_completion.
22011da177e4SLinus Torvalds * Nothing needs to be written back anymore, so we unschedule the writeback.
22021da177e4SLinus Torvalds */
laptop_sync_completion(void)22031da177e4SLinus Torvalds void laptop_sync_completion(void)
22041da177e4SLinus Torvalds {
220531373d09SMatthew Garrett struct backing_dev_info *bdi;
220631373d09SMatthew Garrett
220731373d09SMatthew Garrett rcu_read_lock();
220831373d09SMatthew Garrett
220931373d09SMatthew Garrett list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
221031373d09SMatthew Garrett del_timer(&bdi->laptop_mode_wb_timer);
221131373d09SMatthew Garrett
221231373d09SMatthew Garrett rcu_read_unlock();
22131da177e4SLinus Torvalds }
22141da177e4SLinus Torvalds
22151da177e4SLinus Torvalds /*
22161da177e4SLinus Torvalds * If ratelimit_pages is too high then we can get into dirty-data overload
22171da177e4SLinus Torvalds * if a large number of processes all perform writes at the same time.
22181da177e4SLinus Torvalds *
22191da177e4SLinus Torvalds * Here we set ratelimit_pages to a level which ensures that when all CPUs are
22201da177e4SLinus Torvalds * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
22219d823e8fSWu Fengguang * thresholds.
22221da177e4SLinus Torvalds */
22231da177e4SLinus Torvalds
writeback_set_ratelimit(void)22242d1d43f6SChandra Seetharaman void writeback_set_ratelimit(void)
22251da177e4SLinus Torvalds {
2226dcc25ae7STejun Heo struct wb_domain *dom = &global_wb_domain;
22279d823e8fSWu Fengguang unsigned long background_thresh;
22289d823e8fSWu Fengguang unsigned long dirty_thresh;
2229dcc25ae7STejun Heo
22309d823e8fSWu Fengguang global_dirty_limits(&background_thresh, &dirty_thresh);
2231dcc25ae7STejun Heo dom->dirty_limit = dirty_thresh;
22329d823e8fSWu Fengguang ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
22331da177e4SLinus Torvalds if (ratelimit_pages < 16)
22341da177e4SLinus Torvalds ratelimit_pages = 16;
22351da177e4SLinus Torvalds }
22361da177e4SLinus Torvalds
page_writeback_cpu_online(unsigned int cpu)22371d7ac6aeSSebastian Andrzej Siewior static int page_writeback_cpu_online(unsigned int cpu)
22381da177e4SLinus Torvalds {
22392d1d43f6SChandra Seetharaman writeback_set_ratelimit();
22401d7ac6aeSSebastian Andrzej Siewior return 0;
22411da177e4SLinus Torvalds }
22421da177e4SLinus Torvalds
2243aa779e51Szhanglianjie #ifdef CONFIG_SYSCTL
22443c6a4cbaSLuis Chamberlain
22453c6a4cbaSLuis Chamberlain /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
22463c6a4cbaSLuis Chamberlain static const unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
22473c6a4cbaSLuis Chamberlain
2248aa779e51Szhanglianjie static struct ctl_table vm_page_writeback_sysctls[] = {
2249aa779e51Szhanglianjie {
2250aa779e51Szhanglianjie .procname = "dirty_background_ratio",
2251aa779e51Szhanglianjie .data = &dirty_background_ratio,
2252aa779e51Szhanglianjie .maxlen = sizeof(dirty_background_ratio),
2253aa779e51Szhanglianjie .mode = 0644,
2254aa779e51Szhanglianjie .proc_handler = dirty_background_ratio_handler,
2255aa779e51Szhanglianjie .extra1 = SYSCTL_ZERO,
2256aa779e51Szhanglianjie .extra2 = SYSCTL_ONE_HUNDRED,
2257aa779e51Szhanglianjie },
2258aa779e51Szhanglianjie {
2259aa779e51Szhanglianjie .procname = "dirty_background_bytes",
2260aa779e51Szhanglianjie .data = &dirty_background_bytes,
2261aa779e51Szhanglianjie .maxlen = sizeof(dirty_background_bytes),
2262aa779e51Szhanglianjie .mode = 0644,
2263aa779e51Szhanglianjie .proc_handler = dirty_background_bytes_handler,
2264aa779e51Szhanglianjie .extra1 = SYSCTL_LONG_ONE,
2265aa779e51Szhanglianjie },
2266aa779e51Szhanglianjie {
2267aa779e51Szhanglianjie .procname = "dirty_ratio",
2268aa779e51Szhanglianjie .data = &vm_dirty_ratio,
2269aa779e51Szhanglianjie .maxlen = sizeof(vm_dirty_ratio),
2270aa779e51Szhanglianjie .mode = 0644,
2271aa779e51Szhanglianjie .proc_handler = dirty_ratio_handler,
2272aa779e51Szhanglianjie .extra1 = SYSCTL_ZERO,
2273aa779e51Szhanglianjie .extra2 = SYSCTL_ONE_HUNDRED,
2274aa779e51Szhanglianjie },
2275aa779e51Szhanglianjie {
2276aa779e51Szhanglianjie .procname = "dirty_bytes",
2277aa779e51Szhanglianjie .data = &vm_dirty_bytes,
2278aa779e51Szhanglianjie .maxlen = sizeof(vm_dirty_bytes),
2279aa779e51Szhanglianjie .mode = 0644,
2280aa779e51Szhanglianjie .proc_handler = dirty_bytes_handler,
2281aa779e51Szhanglianjie .extra1 = (void *)&dirty_bytes_min,
2282aa779e51Szhanglianjie },
2283aa779e51Szhanglianjie {
2284aa779e51Szhanglianjie .procname = "dirty_writeback_centisecs",
2285aa779e51Szhanglianjie .data = &dirty_writeback_interval,
2286aa779e51Szhanglianjie .maxlen = sizeof(dirty_writeback_interval),
2287aa779e51Szhanglianjie .mode = 0644,
2288aa779e51Szhanglianjie .proc_handler = dirty_writeback_centisecs_handler,
2289aa779e51Szhanglianjie },
2290aa779e51Szhanglianjie {
2291aa779e51Szhanglianjie .procname = "dirty_expire_centisecs",
2292aa779e51Szhanglianjie .data = &dirty_expire_interval,
2293aa779e51Szhanglianjie .maxlen = sizeof(dirty_expire_interval),
2294aa779e51Szhanglianjie .mode = 0644,
2295aa779e51Szhanglianjie .proc_handler = proc_dointvec_minmax,
2296aa779e51Szhanglianjie .extra1 = SYSCTL_ZERO,
2297aa779e51Szhanglianjie },
2298aa779e51Szhanglianjie #ifdef CONFIG_HIGHMEM
2299aa779e51Szhanglianjie {
2300aa779e51Szhanglianjie .procname = "highmem_is_dirtyable",
2301aa779e51Szhanglianjie .data = &vm_highmem_is_dirtyable,
2302aa779e51Szhanglianjie .maxlen = sizeof(vm_highmem_is_dirtyable),
2303aa779e51Szhanglianjie .mode = 0644,
2304aa779e51Szhanglianjie .proc_handler = proc_dointvec_minmax,
2305aa779e51Szhanglianjie .extra1 = SYSCTL_ZERO,
2306aa779e51Szhanglianjie .extra2 = SYSCTL_ONE,
2307aa779e51Szhanglianjie },
2308aa779e51Szhanglianjie #endif
2309aa779e51Szhanglianjie {
2310aa779e51Szhanglianjie .procname = "laptop_mode",
2311aa779e51Szhanglianjie .data = &laptop_mode,
2312aa779e51Szhanglianjie .maxlen = sizeof(laptop_mode),
2313aa779e51Szhanglianjie .mode = 0644,
2314aa779e51Szhanglianjie .proc_handler = proc_dointvec_jiffies,
2315aa779e51Szhanglianjie },
2316aa779e51Szhanglianjie {}
2317aa779e51Szhanglianjie };
2318aa779e51Szhanglianjie #endif
2319aa779e51Szhanglianjie
23201da177e4SLinus Torvalds /*
2321dc6e29daSLinus Torvalds * Called early on to tune the page writeback dirty limits.
2322dc6e29daSLinus Torvalds *
2323dc6e29daSLinus Torvalds * We used to scale dirty pages according to how total memory
23240a18e607SDavid Hildenbrand * related to pages that could be allocated for buffers.
2325dc6e29daSLinus Torvalds *
2326dc6e29daSLinus Torvalds * However, that was when we used "dirty_ratio" to scale with
2327dc6e29daSLinus Torvalds * all memory, and we don't do that any more. "dirty_ratio"
23280a18e607SDavid Hildenbrand * is now applied to total non-HIGHPAGE memory, and as such we can't
2329dc6e29daSLinus Torvalds * get into the old insane situation any more where we had
2330dc6e29daSLinus Torvalds * large amounts of dirty pages compared to a small amount of
2331dc6e29daSLinus Torvalds * non-HIGHMEM memory.
2332dc6e29daSLinus Torvalds *
2333dc6e29daSLinus Torvalds * But we might still want to scale the dirty_ratio by how
2334dc6e29daSLinus Torvalds * much memory the box has..
23351da177e4SLinus Torvalds */
page_writeback_init(void)23361da177e4SLinus Torvalds void __init page_writeback_init(void)
23371da177e4SLinus Torvalds {
2338a50fcb51SRabin Vincent BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2339a50fcb51SRabin Vincent
23401d7ac6aeSSebastian Andrzej Siewior cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/writeback:online",
23411d7ac6aeSSebastian Andrzej Siewior page_writeback_cpu_online, NULL);
23421d7ac6aeSSebastian Andrzej Siewior cpuhp_setup_state(CPUHP_MM_WRITEBACK_DEAD, "mm/writeback:dead", NULL,
23431d7ac6aeSSebastian Andrzej Siewior page_writeback_cpu_online);
2344aa779e51Szhanglianjie #ifdef CONFIG_SYSCTL
2345aa779e51Szhanglianjie register_sysctl_init("vm", vm_page_writeback_sysctls);
2346aa779e51Szhanglianjie #endif
23471da177e4SLinus Torvalds }
23481da177e4SLinus Torvalds
2349811d736fSDavid Howells /**
2350f446daaeSJan Kara * tag_pages_for_writeback - tag pages to be written by write_cache_pages
2351f446daaeSJan Kara * @mapping: address space structure to write
2352f446daaeSJan Kara * @start: starting page index
2353f446daaeSJan Kara * @end: ending page index (inclusive)
2354f446daaeSJan Kara *
2355f446daaeSJan Kara * This function scans the page range from @start to @end (inclusive) and tags
2356f446daaeSJan Kara * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
2357f446daaeSJan Kara * that write_cache_pages (or whoever calls this function) will then use
2358f446daaeSJan Kara * TOWRITE tag to identify pages eligible for writeback. This mechanism is
2359f446daaeSJan Kara * used to avoid livelocking of writeback by a process steadily creating new
2360f446daaeSJan Kara * dirty pages in the file (thus it is important for this function to be quick
2361f446daaeSJan Kara * so that it can tag pages faster than a dirtying process can create them).
2362f446daaeSJan Kara */
tag_pages_for_writeback(struct address_space * mapping,pgoff_t start,pgoff_t end)2363f446daaeSJan Kara void tag_pages_for_writeback(struct address_space *mapping,
2364f446daaeSJan Kara pgoff_t start, pgoff_t end)
2365f446daaeSJan Kara {
2366ff9c745bSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, start);
2367ff9c745bSMatthew Wilcox unsigned int tagged = 0;
2368ff9c745bSMatthew Wilcox void *page;
2369f446daaeSJan Kara
2370ff9c745bSMatthew Wilcox xas_lock_irq(&xas);
2371ff9c745bSMatthew Wilcox xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) {
2372ff9c745bSMatthew Wilcox xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE);
2373ff9c745bSMatthew Wilcox if (++tagged % XA_CHECK_SCHED)
2374268f42deSMatthew Wilcox continue;
2375ff9c745bSMatthew Wilcox
2376ff9c745bSMatthew Wilcox xas_pause(&xas);
2377ff9c745bSMatthew Wilcox xas_unlock_irq(&xas);
2378f446daaeSJan Kara cond_resched();
2379ff9c745bSMatthew Wilcox xas_lock_irq(&xas);
2380268f42deSMatthew Wilcox }
2381ff9c745bSMatthew Wilcox xas_unlock_irq(&xas);
2382f446daaeSJan Kara }
2383f446daaeSJan Kara EXPORT_SYMBOL(tag_pages_for_writeback);
2384f446daaeSJan Kara
2385f446daaeSJan Kara /**
23860ea97180SMiklos Szeredi * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2387811d736fSDavid Howells * @mapping: address space structure to write
2388811d736fSDavid Howells * @wbc: subtract the number of written pages from *@wbc->nr_to_write
23890ea97180SMiklos Szeredi * @writepage: function called for each page
23900ea97180SMiklos Szeredi * @data: data passed to writepage function
2391811d736fSDavid Howells *
23920ea97180SMiklos Szeredi * If a page is already under I/O, write_cache_pages() skips it, even
2393811d736fSDavid Howells * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2394811d736fSDavid Howells * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2395811d736fSDavid Howells * and msync() need to guarantee that all the data which was dirty at the time
2396811d736fSDavid Howells * the call was made get new I/O started against them. If wbc->sync_mode is
2397811d736fSDavid Howells * WB_SYNC_ALL then we were called for data integrity and we must wait for
2398811d736fSDavid Howells * existing IO to complete.
2399f446daaeSJan Kara *
2400f446daaeSJan Kara * To avoid livelocks (when other process dirties new pages), we first tag
2401f446daaeSJan Kara * pages which should be written back with TOWRITE tag and only then start
2402f446daaeSJan Kara * writing them. For data-integrity sync we have to be careful so that we do
2403f446daaeSJan Kara * not miss some pages (e.g., because some other process has cleared TOWRITE
2404f446daaeSJan Kara * tag we set). The rule we follow is that TOWRITE tag can be cleared only
2405f446daaeSJan Kara * by the process clearing the DIRTY tag (and submitting the page for IO).
240664081362SDave Chinner *
240764081362SDave Chinner * To avoid deadlocks between range_cyclic writeback and callers that hold
240864081362SDave Chinner * pages in PageWriteback to aggregate IO until write_cache_pages() returns,
240964081362SDave Chinner * we do not loop back to the start of the file. Doing so causes a page
241064081362SDave Chinner * lock/page writeback access order inversion - we should only ever lock
241164081362SDave Chinner * multiple pages in ascending page->index order, and looping back to the start
241264081362SDave Chinner * of the file violates that rule and causes deadlocks.
2413a862f68aSMike Rapoport *
2414a862f68aSMike Rapoport * Return: %0 on success, negative error code otherwise
2415811d736fSDavid Howells */
write_cache_pages(struct address_space * mapping,struct writeback_control * wbc,writepage_t writepage,void * data)24160ea97180SMiklos Szeredi int write_cache_pages(struct address_space *mapping,
24170ea97180SMiklos Szeredi struct writeback_control *wbc, writepage_t writepage,
24180ea97180SMiklos Szeredi void *data)
2419811d736fSDavid Howells {
2420811d736fSDavid Howells int ret = 0;
2421811d736fSDavid Howells int done = 0;
24223fa750dcSBrian Foster int error;
24230fff435fSVishal Moola (Oracle) struct folio_batch fbatch;
24240fff435fSVishal Moola (Oracle) int nr_folios;
2425811d736fSDavid Howells pgoff_t index;
2426811d736fSDavid Howells pgoff_t end; /* Inclusive */
2427bd19e012SNick Piggin pgoff_t done_index;
2428811d736fSDavid Howells int range_whole = 0;
2429ff9c745bSMatthew Wilcox xa_mark_t tag;
2430811d736fSDavid Howells
24310fff435fSVishal Moola (Oracle) folio_batch_init(&fbatch);
2432811d736fSDavid Howells if (wbc->range_cyclic) {
243328659cc8SChao Yu index = mapping->writeback_index; /* prev offset */
2434811d736fSDavid Howells end = -1;
2435811d736fSDavid Howells } else {
243609cbfeafSKirill A. Shutemov index = wbc->range_start >> PAGE_SHIFT;
243709cbfeafSKirill A. Shutemov end = wbc->range_end >> PAGE_SHIFT;
2438811d736fSDavid Howells if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2439811d736fSDavid Howells range_whole = 1;
2440811d736fSDavid Howells }
2441cc7b8f62SMauricio Faria de Oliveira if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) {
2442f446daaeSJan Kara tag_pages_for_writeback(mapping, index, end);
2443cc7b8f62SMauricio Faria de Oliveira tag = PAGECACHE_TAG_TOWRITE;
2444cc7b8f62SMauricio Faria de Oliveira } else {
2445cc7b8f62SMauricio Faria de Oliveira tag = PAGECACHE_TAG_DIRTY;
2446cc7b8f62SMauricio Faria de Oliveira }
2447bd19e012SNick Piggin done_index = index;
24485a3d5c98SNick Piggin while (!done && (index <= end)) {
24495a3d5c98SNick Piggin int i;
24505a3d5c98SNick Piggin
24510fff435fSVishal Moola (Oracle) nr_folios = filemap_get_folios_tag(mapping, &index, end,
24520fff435fSVishal Moola (Oracle) tag, &fbatch);
24530fff435fSVishal Moola (Oracle)
24540fff435fSVishal Moola (Oracle) if (nr_folios == 0)
24555a3d5c98SNick Piggin break;
2456811d736fSDavid Howells
24570fff435fSVishal Moola (Oracle) for (i = 0; i < nr_folios; i++) {
24580fff435fSVishal Moola (Oracle) struct folio *folio = fbatch.folios[i];
24598344a3d4SMatthew Wilcox (Oracle) unsigned long nr;
2460811d736fSDavid Howells
24610fff435fSVishal Moola (Oracle) done_index = folio->index;
2462bd19e012SNick Piggin
24630fff435fSVishal Moola (Oracle) folio_lock(folio);
2464811d736fSDavid Howells
24655a3d5c98SNick Piggin /*
24665a3d5c98SNick Piggin * Page truncated or invalidated. We can freely skip it
24675a3d5c98SNick Piggin * then, even for data integrity operations: the page
24685a3d5c98SNick Piggin * has disappeared concurrently, so there could be no
2469f0953a1bSIngo Molnar * real expectation of this data integrity operation
24705a3d5c98SNick Piggin * even if there is now a new, dirty page at the same
24715a3d5c98SNick Piggin * pagecache address.
24725a3d5c98SNick Piggin */
24730fff435fSVishal Moola (Oracle) if (unlikely(folio->mapping != mapping)) {
24745a3d5c98SNick Piggin continue_unlock:
24750fff435fSVishal Moola (Oracle) folio_unlock(folio);
2476811d736fSDavid Howells continue;
2477811d736fSDavid Howells }
2478811d736fSDavid Howells
24790fff435fSVishal Moola (Oracle) if (!folio_test_dirty(folio)) {
2480515f4a03SNick Piggin /* someone wrote it for us */
2481515f4a03SNick Piggin goto continue_unlock;
2482515f4a03SNick Piggin }
2483515f4a03SNick Piggin
24840fff435fSVishal Moola (Oracle) if (folio_test_writeback(folio)) {
2485811d736fSDavid Howells if (wbc->sync_mode != WB_SYNC_NONE)
24860fff435fSVishal Moola (Oracle) folio_wait_writeback(folio);
2487515f4a03SNick Piggin else
2488515f4a03SNick Piggin goto continue_unlock;
2489515f4a03SNick Piggin }
2490811d736fSDavid Howells
24910fff435fSVishal Moola (Oracle) BUG_ON(folio_test_writeback(folio));
24920fff435fSVishal Moola (Oracle) if (!folio_clear_dirty_for_io(folio))
24935a3d5c98SNick Piggin goto continue_unlock;
2494811d736fSDavid Howells
2495de1414a6SChristoph Hellwig trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
2496d585bdbeSMatthew Wilcox (Oracle) error = writepage(folio, wbc, data);
24978344a3d4SMatthew Wilcox (Oracle) nr = folio_nr_pages(folio);
24983fa750dcSBrian Foster if (unlikely(error)) {
249900266770SNick Piggin /*
25003fa750dcSBrian Foster * Handle errors according to the type of
25013fa750dcSBrian Foster * writeback. There's no need to continue for
25023fa750dcSBrian Foster * background writeback. Just push done_index
25033fa750dcSBrian Foster * past this page so media errors won't choke
25043fa750dcSBrian Foster * writeout for the entire file. For integrity
25053fa750dcSBrian Foster * writeback, we must process the entire dirty
25063fa750dcSBrian Foster * set regardless of errors because the fs may
25073fa750dcSBrian Foster * still have state to clear for each page. In
25083fa750dcSBrian Foster * that case we continue processing and return
25093fa750dcSBrian Foster * the first error.
251000266770SNick Piggin */
25113fa750dcSBrian Foster if (error == AOP_WRITEPAGE_ACTIVATE) {
25120fff435fSVishal Moola (Oracle) folio_unlock(folio);
25133fa750dcSBrian Foster error = 0;
25143fa750dcSBrian Foster } else if (wbc->sync_mode != WB_SYNC_ALL) {
25153fa750dcSBrian Foster ret = error;
25168344a3d4SMatthew Wilcox (Oracle) done_index = folio->index + nr;
251700266770SNick Piggin done = 1;
251800266770SNick Piggin break;
2519e4230030SAndrew Morton }
25203fa750dcSBrian Foster if (!ret)
25213fa750dcSBrian Foster ret = error;
252200266770SNick Piggin }
252300266770SNick Piggin
2524dcf6a79dSArtem Bityutskiy /*
2525546a1924SDave Chinner * We stop writing back only if we are not doing
2526546a1924SDave Chinner * integrity sync. In case of integrity sync we have to
2527546a1924SDave Chinner * keep going until we have written all the pages
2528546a1924SDave Chinner * we tagged for writeback prior to entering this loop.
2529dcf6a79dSArtem Bityutskiy */
25308344a3d4SMatthew Wilcox (Oracle) wbc->nr_to_write -= nr;
25318344a3d4SMatthew Wilcox (Oracle) if (wbc->nr_to_write <= 0 &&
2532546a1924SDave Chinner wbc->sync_mode == WB_SYNC_NONE) {
2533811d736fSDavid Howells done = 1;
253482fd1a9aSAndrew Morton break;
253582fd1a9aSAndrew Morton }
253689e12190SFederico Cuello }
25370fff435fSVishal Moola (Oracle) folio_batch_release(&fbatch);
2538811d736fSDavid Howells cond_resched();
2539811d736fSDavid Howells }
254064081362SDave Chinner
2541811d736fSDavid Howells /*
254264081362SDave Chinner * If we hit the last page and there is more work to be done: wrap
254364081362SDave Chinner * back the index back to the start of the file for the next
254464081362SDave Chinner * time we are called.
2545811d736fSDavid Howells */
254664081362SDave Chinner if (wbc->range_cyclic && !done)
254764081362SDave Chinner done_index = 0;
25480b564927SDave Chinner if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2549bd19e012SNick Piggin mapping->writeback_index = done_index;
255006d6cf69SAneesh Kumar K.V
2551811d736fSDavid Howells return ret;
2552811d736fSDavid Howells }
25530ea97180SMiklos Szeredi EXPORT_SYMBOL(write_cache_pages);
25540ea97180SMiklos Szeredi
writepage_cb(struct folio * folio,struct writeback_control * wbc,void * data)2555d585bdbeSMatthew Wilcox (Oracle) static int writepage_cb(struct folio *folio, struct writeback_control *wbc,
25560ea97180SMiklos Szeredi void *data)
25570ea97180SMiklos Szeredi {
25580ea97180SMiklos Szeredi struct address_space *mapping = data;
2559d585bdbeSMatthew Wilcox (Oracle) int ret = mapping->a_ops->writepage(&folio->page, wbc);
25600ea97180SMiklos Szeredi mapping_set_error(mapping, ret);
25610ea97180SMiklos Szeredi return ret;
25620ea97180SMiklos Szeredi }
25630ea97180SMiklos Szeredi
do_writepages(struct address_space * mapping,struct writeback_control * wbc)25641da177e4SLinus Torvalds int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
25651da177e4SLinus Torvalds {
256622905f77SAndrew Morton int ret;
2567fee468fdSJan Kara struct bdi_writeback *wb;
256822905f77SAndrew Morton
25691da177e4SLinus Torvalds if (wbc->nr_to_write <= 0)
25701da177e4SLinus Torvalds return 0;
2571fee468fdSJan Kara wb = inode_to_wb_wbc(mapping->host, wbc);
2572fee468fdSJan Kara wb_bandwidth_estimate_start(wb);
257380a2ea9fSTheodore Ts'o while (1) {
2574c2ca7a59SChristoph Hellwig if (mapping->a_ops->writepages) {
257522905f77SAndrew Morton ret = mapping->a_ops->writepages(mapping, wbc);
2576c2ca7a59SChristoph Hellwig } else if (mapping->a_ops->writepage) {
2577c2ca7a59SChristoph Hellwig struct blk_plug plug;
2578c2ca7a59SChristoph Hellwig
2579c2ca7a59SChristoph Hellwig blk_start_plug(&plug);
2580c2ca7a59SChristoph Hellwig ret = write_cache_pages(mapping, wbc, writepage_cb,
2581c2ca7a59SChristoph Hellwig mapping);
2582c2ca7a59SChristoph Hellwig blk_finish_plug(&plug);
2583c2ca7a59SChristoph Hellwig } else {
2584c2ca7a59SChristoph Hellwig /* deal with chardevs and other special files */
2585c2ca7a59SChristoph Hellwig ret = 0;
2586c2ca7a59SChristoph Hellwig }
2587c2ca7a59SChristoph Hellwig if (ret != -ENOMEM || wbc->sync_mode != WB_SYNC_ALL)
258880a2ea9fSTheodore Ts'o break;
25898d58802fSMel Gorman
25908d58802fSMel Gorman /*
25918d58802fSMel Gorman * Lacking an allocation context or the locality or writeback
25928d58802fSMel Gorman * state of any of the inode's pages, throttle based on
25938d58802fSMel Gorman * writeback activity on the local node. It's as good a
25948d58802fSMel Gorman * guess as any.
25958d58802fSMel Gorman */
25968d58802fSMel Gorman reclaim_throttle(NODE_DATA(numa_node_id()),
2597c3f4a9a2SMel Gorman VMSCAN_THROTTLE_WRITEBACK);
259880a2ea9fSTheodore Ts'o }
259945a2966fSJan Kara /*
260045a2966fSJan Kara * Usually few pages are written by now from those we've just submitted
260145a2966fSJan Kara * but if there's constant writeback being submitted, this makes sure
260245a2966fSJan Kara * writeback bandwidth is updated once in a while.
260345a2966fSJan Kara */
260420792ebfSJan Kara if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
260520792ebfSJan Kara BANDWIDTH_INTERVAL))
2606fee468fdSJan Kara wb_update_bandwidth(wb);
260722905f77SAndrew Morton return ret;
26081da177e4SLinus Torvalds }
26091da177e4SLinus Torvalds
26101da177e4SLinus Torvalds /*
261176719325SKen Chen * For address_spaces which do not use buffers nor write back.
261276719325SKen Chen */
noop_dirty_folio(struct address_space * mapping,struct folio * folio)261346de8b97SMatthew Wilcox (Oracle) bool noop_dirty_folio(struct address_space *mapping, struct folio *folio)
261476719325SKen Chen {
261546de8b97SMatthew Wilcox (Oracle) if (!folio_test_dirty(folio))
261646de8b97SMatthew Wilcox (Oracle) return !folio_test_set_dirty(folio);
261746de8b97SMatthew Wilcox (Oracle) return false;
261876719325SKen Chen }
261946de8b97SMatthew Wilcox (Oracle) EXPORT_SYMBOL(noop_dirty_folio);
262076719325SKen Chen
262176719325SKen Chen /*
2622e3a7cca1SEdward Shishkin * Helper function for set_page_dirty family.
2623c4843a75SGreg Thelen *
26246c77b607SKefeng Wang * Caller must hold folio_memcg_lock().
2625c4843a75SGreg Thelen *
2626e3a7cca1SEdward Shishkin * NOTE: This relies on being atomic wrt interrupts.
2627e3a7cca1SEdward Shishkin */
folio_account_dirtied(struct folio * folio,struct address_space * mapping)2628203a3151SMatthew Wilcox (Oracle) static void folio_account_dirtied(struct folio *folio,
26296e1cae88SMatthew Wilcox (Oracle) struct address_space *mapping)
2630e3a7cca1SEdward Shishkin {
263152ebea74STejun Heo struct inode *inode = mapping->host;
263252ebea74STejun Heo
2633b9b0ff61SMatthew Wilcox (Oracle) trace_writeback_dirty_folio(folio, mapping);
26349fb0a7daSTejun Heo
2635f56753acSChristoph Hellwig if (mapping_can_writeback(mapping)) {
263652ebea74STejun Heo struct bdi_writeback *wb;
2637203a3151SMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio);
2638de1414a6SChristoph Hellwig
26399cfb816bSMatthew Wilcox (Oracle) inode_attach_wb(inode, folio);
264052ebea74STejun Heo wb = inode_to_wb(inode);
2641e3a7cca1SEdward Shishkin
2642203a3151SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
2643203a3151SMatthew Wilcox (Oracle) __zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
2644203a3151SMatthew Wilcox (Oracle) __node_stat_mod_folio(folio, NR_DIRTIED, nr);
2645203a3151SMatthew Wilcox (Oracle) wb_stat_mod(wb, WB_RECLAIMABLE, nr);
2646203a3151SMatthew Wilcox (Oracle) wb_stat_mod(wb, WB_DIRTIED, nr);
2647203a3151SMatthew Wilcox (Oracle) task_io_account_write(nr * PAGE_SIZE);
2648203a3151SMatthew Wilcox (Oracle) current->nr_dirtied += nr;
2649203a3151SMatthew Wilcox (Oracle) __this_cpu_add(bdp_ratelimits, nr);
265097b27821STejun Heo
2651203a3151SMatthew Wilcox (Oracle) mem_cgroup_track_foreign_dirty(folio, wb);
2652e3a7cca1SEdward Shishkin }
2653e3a7cca1SEdward Shishkin }
2654e3a7cca1SEdward Shishkin
2655e3a7cca1SEdward Shishkin /*
2656b9ea2515SKonstantin Khlebnikov * Helper function for deaccounting dirty page without writeback.
2657b9ea2515SKonstantin Khlebnikov *
26586c77b607SKefeng Wang * Caller must hold folio_memcg_lock().
2659b9ea2515SKonstantin Khlebnikov */
folio_account_cleaned(struct folio * folio,struct bdi_writeback * wb)2660566d3362SHugh Dickins void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
2661b9ea2515SKonstantin Khlebnikov {
2662fc9b6a53SMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio);
2663566d3362SHugh Dickins
2664fc9b6a53SMatthew Wilcox (Oracle) lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
2665fc9b6a53SMatthew Wilcox (Oracle) zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2666fc9b6a53SMatthew Wilcox (Oracle) wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
2667fc9b6a53SMatthew Wilcox (Oracle) task_io_account_cancelled_write(nr * PAGE_SIZE);
2668b9ea2515SKonstantin Khlebnikov }
2669b9ea2515SKonstantin Khlebnikov
2670b9ea2515SKonstantin Khlebnikov /*
2671203a3151SMatthew Wilcox (Oracle) * Mark the folio dirty, and set it dirty in the page cache, and mark
2672203a3151SMatthew Wilcox (Oracle) * the inode dirty.
26736e1cae88SMatthew Wilcox (Oracle) *
2674203a3151SMatthew Wilcox (Oracle) * If warn is true, then emit a warning if the folio is not uptodate and has
26756e1cae88SMatthew Wilcox (Oracle) * not been truncated.
26766e1cae88SMatthew Wilcox (Oracle) *
26776c77b607SKefeng Wang * The caller must hold folio_memcg_lock(). Most callers have the folio
2678a229a4f0SMatthew Wilcox (Oracle) * locked. A few have the folio blocked from truncation through other
2679e9adcfecSMike Kravetz * means (eg zap_vma_pages() has it mapped and is holding the page table
2680a229a4f0SMatthew Wilcox (Oracle) * lock). This can also be called from mark_buffer_dirty(), which I
2681a229a4f0SMatthew Wilcox (Oracle) * cannot prove is always protected against truncate.
26826e1cae88SMatthew Wilcox (Oracle) */
__folio_mark_dirty(struct folio * folio,struct address_space * mapping,int warn)2683203a3151SMatthew Wilcox (Oracle) void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
26846e1cae88SMatthew Wilcox (Oracle) int warn)
26856e1cae88SMatthew Wilcox (Oracle) {
26866e1cae88SMatthew Wilcox (Oracle) unsigned long flags;
26876e1cae88SMatthew Wilcox (Oracle)
26886e1cae88SMatthew Wilcox (Oracle) xa_lock_irqsave(&mapping->i_pages, flags);
2689203a3151SMatthew Wilcox (Oracle) if (folio->mapping) { /* Race with truncate? */
2690203a3151SMatthew Wilcox (Oracle) WARN_ON_ONCE(warn && !folio_test_uptodate(folio));
2691203a3151SMatthew Wilcox (Oracle) folio_account_dirtied(folio, mapping);
2692203a3151SMatthew Wilcox (Oracle) __xa_set_mark(&mapping->i_pages, folio_index(folio),
26936e1cae88SMatthew Wilcox (Oracle) PAGECACHE_TAG_DIRTY);
26946e1cae88SMatthew Wilcox (Oracle) }
26956e1cae88SMatthew Wilcox (Oracle) xa_unlock_irqrestore(&mapping->i_pages, flags);
26966e1cae88SMatthew Wilcox (Oracle) }
26976e1cae88SMatthew Wilcox (Oracle)
269885d4d2ebSMatthew Wilcox (Oracle) /**
269985d4d2ebSMatthew Wilcox (Oracle) * filemap_dirty_folio - Mark a folio dirty for filesystems which do not use buffer_heads.
270085d4d2ebSMatthew Wilcox (Oracle) * @mapping: Address space this folio belongs to.
270185d4d2ebSMatthew Wilcox (Oracle) * @folio: Folio to be marked as dirty.
27021da177e4SLinus Torvalds *
270385d4d2ebSMatthew Wilcox (Oracle) * Filesystems which do not use buffer heads should call this function
270485d4d2ebSMatthew Wilcox (Oracle) * from their set_page_dirty address space operation. It ignores the
270585d4d2ebSMatthew Wilcox (Oracle) * contents of folio_get_private(), so if the filesystem marks individual
270685d4d2ebSMatthew Wilcox (Oracle) * blocks as dirty, the filesystem should handle that itself.
27071da177e4SLinus Torvalds *
270885d4d2ebSMatthew Wilcox (Oracle) * This is also sometimes used by filesystems which use buffer_heads when
270985d4d2ebSMatthew Wilcox (Oracle) * a single buffer is being dirtied: we want to set the folio dirty in
271085d4d2ebSMatthew Wilcox (Oracle) * that case, but not all the buffers. This is a "bottom-up" dirtying,
2711e621900aSMatthew Wilcox (Oracle) * whereas block_dirty_folio() is a "top-down" dirtying.
271285d4d2ebSMatthew Wilcox (Oracle) *
271385d4d2ebSMatthew Wilcox (Oracle) * The caller must ensure this doesn't race with truncation. Most will
271485d4d2ebSMatthew Wilcox (Oracle) * simply hold the folio lock, but e.g. zap_pte_range() calls with the
271585d4d2ebSMatthew Wilcox (Oracle) * folio mapped and the pte lock held, which also locks out truncation.
27161da177e4SLinus Torvalds */
filemap_dirty_folio(struct address_space * mapping,struct folio * folio)271785d4d2ebSMatthew Wilcox (Oracle) bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio)
27181da177e4SLinus Torvalds {
271985d4d2ebSMatthew Wilcox (Oracle) folio_memcg_lock(folio);
272085d4d2ebSMatthew Wilcox (Oracle) if (folio_test_set_dirty(folio)) {
272185d4d2ebSMatthew Wilcox (Oracle) folio_memcg_unlock(folio);
272285d4d2ebSMatthew Wilcox (Oracle) return false;
2723c4843a75SGreg Thelen }
272485d4d2ebSMatthew Wilcox (Oracle)
272585d4d2ebSMatthew Wilcox (Oracle) __folio_mark_dirty(folio, mapping, !folio_test_private(folio));
272685d4d2ebSMatthew Wilcox (Oracle) folio_memcg_unlock(folio);
2727c4843a75SGreg Thelen
27281da177e4SLinus Torvalds if (mapping->host) {
27291da177e4SLinus Torvalds /* !PageAnon && !swapper_space */
27308c08540fSAndrew Morton __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
27311da177e4SLinus Torvalds }
273285d4d2ebSMatthew Wilcox (Oracle) return true;
27331da177e4SLinus Torvalds }
273485d4d2ebSMatthew Wilcox (Oracle) EXPORT_SYMBOL(filemap_dirty_folio);
27351da177e4SLinus Torvalds
273625ff8b15SMatthew Wilcox (Oracle) /**
2737cd78ab11SMatthew Wilcox (Oracle) * folio_redirty_for_writepage - Decline to write a dirty folio.
2738cd78ab11SMatthew Wilcox (Oracle) * @wbc: The writeback control.
2739cd78ab11SMatthew Wilcox (Oracle) * @folio: The folio.
2740cd78ab11SMatthew Wilcox (Oracle) *
2741cd78ab11SMatthew Wilcox (Oracle) * When a writepage implementation decides that it doesn't want to write
2742cd78ab11SMatthew Wilcox (Oracle) * @folio for some reason, it should call this function, unlock @folio and
2743cd78ab11SMatthew Wilcox (Oracle) * return 0.
2744cd78ab11SMatthew Wilcox (Oracle) *
2745cd78ab11SMatthew Wilcox (Oracle) * Return: True if we redirtied the folio. False if someone else dirtied
2746cd78ab11SMatthew Wilcox (Oracle) * it first.
27471da177e4SLinus Torvalds */
folio_redirty_for_writepage(struct writeback_control * wbc,struct folio * folio)2748cd78ab11SMatthew Wilcox (Oracle) bool folio_redirty_for_writepage(struct writeback_control *wbc,
2749cd78ab11SMatthew Wilcox (Oracle) struct folio *folio)
27501da177e4SLinus Torvalds {
2751ed2da924SChristoph Hellwig struct address_space *mapping = folio->mapping;
2752cd78ab11SMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio);
2753ed2da924SChristoph Hellwig bool ret;
27548d38633cSKonstantin Khebnikov
2755cd78ab11SMatthew Wilcox (Oracle) wbc->pages_skipped += nr;
2756ed2da924SChristoph Hellwig ret = filemap_dirty_folio(mapping, folio);
2757ed2da924SChristoph Hellwig if (mapping && mapping_can_writeback(mapping)) {
2758ed2da924SChristoph Hellwig struct inode *inode = mapping->host;
2759ed2da924SChristoph Hellwig struct bdi_writeback *wb;
2760ed2da924SChristoph Hellwig struct wb_lock_cookie cookie = {};
2761cd78ab11SMatthew Wilcox (Oracle)
2762ed2da924SChristoph Hellwig wb = unlocked_inode_to_wb_begin(inode, &cookie);
2763ed2da924SChristoph Hellwig current->nr_dirtied -= nr;
2764ed2da924SChristoph Hellwig node_stat_mod_folio(folio, NR_DIRTIED, -nr);
2765ed2da924SChristoph Hellwig wb_stat_mod(wb, WB_DIRTIED, -nr);
2766ed2da924SChristoph Hellwig unlocked_inode_to_wb_end(inode, &cookie);
2767ed2da924SChristoph Hellwig }
27688d38633cSKonstantin Khebnikov return ret;
27691da177e4SLinus Torvalds }
2770cd78ab11SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_redirty_for_writepage);
27711da177e4SLinus Torvalds
2772b5e84594SMatthew Wilcox (Oracle) /**
2773b5e84594SMatthew Wilcox (Oracle) * folio_mark_dirty - Mark a folio as being modified.
2774b5e84594SMatthew Wilcox (Oracle) * @folio: The folio.
27756746aff7SWu Fengguang *
27762ca456c2SMatthew Wilcox (Oracle) * The folio may not be truncated while this function is running.
27772ca456c2SMatthew Wilcox (Oracle) * Holding the folio lock is sufficient to prevent truncation, but some
27782ca456c2SMatthew Wilcox (Oracle) * callers cannot acquire a sleeping lock. These callers instead hold
27792ca456c2SMatthew Wilcox (Oracle) * the page table lock for a page table which contains at least one page
27802ca456c2SMatthew Wilcox (Oracle) * in this folio. Truncation will block on the page table lock as it
27812ca456c2SMatthew Wilcox (Oracle) * unmaps pages before removing the folio from its mapping.
2782b5e84594SMatthew Wilcox (Oracle) *
2783b5e84594SMatthew Wilcox (Oracle) * Return: True if the folio was newly dirtied, false if it was already dirty.
27841da177e4SLinus Torvalds */
folio_mark_dirty(struct folio * folio)2785b5e84594SMatthew Wilcox (Oracle) bool folio_mark_dirty(struct folio *folio)
27861da177e4SLinus Torvalds {
2787b5e84594SMatthew Wilcox (Oracle) struct address_space *mapping = folio_mapping(folio);
27881da177e4SLinus Torvalds
27891da177e4SLinus Torvalds if (likely(mapping)) {
2790278df9f4SMinchan Kim /*
27915a9e3474SVishal Moola (Oracle) * readahead/folio_deactivate could remain
27926f31a5a2SMatthew Wilcox (Oracle) * PG_readahead/PG_reclaim due to race with folio_end_writeback
27936f31a5a2SMatthew Wilcox (Oracle) * About readahead, if the folio is written, the flags would be
2794278df9f4SMinchan Kim * reset. So no problem.
27955a9e3474SVishal Moola (Oracle) * About folio_deactivate, if the folio is redirtied,
27966f31a5a2SMatthew Wilcox (Oracle) * the flag will be reset. So no problem. but if the
27976f31a5a2SMatthew Wilcox (Oracle) * folio is used by readahead it will confuse readahead
27986f31a5a2SMatthew Wilcox (Oracle) * and make it restart the size rampup process. But it's
27996f31a5a2SMatthew Wilcox (Oracle) * a trivial problem.
2800278df9f4SMinchan Kim */
2801b5e84594SMatthew Wilcox (Oracle) if (folio_test_reclaim(folio))
2802b5e84594SMatthew Wilcox (Oracle) folio_clear_reclaim(folio);
28036f31a5a2SMatthew Wilcox (Oracle) return mapping->a_ops->dirty_folio(mapping, folio);
28041da177e4SLinus Torvalds }
28053a3bae50SMatthew Wilcox (Oracle)
28063a3bae50SMatthew Wilcox (Oracle) return noop_dirty_folio(mapping, folio);
28071da177e4SLinus Torvalds }
2808b5e84594SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_mark_dirty);
28091da177e4SLinus Torvalds
28101da177e4SLinus Torvalds /*
28111da177e4SLinus Torvalds * set_page_dirty() is racy if the caller has no reference against
28121da177e4SLinus Torvalds * page->mapping->host, and if the page is unlocked. This is because another
28131da177e4SLinus Torvalds * CPU could truncate the page off the mapping and then free the mapping.
28141da177e4SLinus Torvalds *
28151da177e4SLinus Torvalds * Usually, the page _is_ locked, or the caller is a user-space process which
28161da177e4SLinus Torvalds * holds a reference on the inode by having an open file.
28171da177e4SLinus Torvalds *
28181da177e4SLinus Torvalds * In other cases, the page should be locked before running set_page_dirty().
28191da177e4SLinus Torvalds */
set_page_dirty_lock(struct page * page)28201da177e4SLinus Torvalds int set_page_dirty_lock(struct page *page)
28211da177e4SLinus Torvalds {
28221da177e4SLinus Torvalds int ret;
28231da177e4SLinus Torvalds
28247eaceaccSJens Axboe lock_page(page);
28251da177e4SLinus Torvalds ret = set_page_dirty(page);
28261da177e4SLinus Torvalds unlock_page(page);
28271da177e4SLinus Torvalds return ret;
28281da177e4SLinus Torvalds }
28291da177e4SLinus Torvalds EXPORT_SYMBOL(set_page_dirty_lock);
28301da177e4SLinus Torvalds
28311da177e4SLinus Torvalds /*
283211f81becSTejun Heo * This cancels just the dirty bit on the kernel page itself, it does NOT
283311f81becSTejun Heo * actually remove dirty bits on any mmap's that may be around. It also
283411f81becSTejun Heo * leaves the page tagged dirty, so any sync activity will still find it on
283511f81becSTejun Heo * the dirty lists, and in particular, clear_page_dirty_for_io() will still
283611f81becSTejun Heo * look at the dirty bits in the VM.
283711f81becSTejun Heo *
283811f81becSTejun Heo * Doing this should *normally* only ever be done when a page is truncated,
283911f81becSTejun Heo * and is not actually mapped anywhere at all. However, fs/buffer.c does
284011f81becSTejun Heo * this when it notices that somebody has cleaned out all the buffers on a
284111f81becSTejun Heo * page without actually doing it through the VM. Can you say "ext3 is
284211f81becSTejun Heo * horribly ugly"? Thought you could.
284311f81becSTejun Heo */
__folio_cancel_dirty(struct folio * folio)2844fdaf532aSMatthew Wilcox (Oracle) void __folio_cancel_dirty(struct folio *folio)
284511f81becSTejun Heo {
2846fdaf532aSMatthew Wilcox (Oracle) struct address_space *mapping = folio_mapping(folio);
2847c4843a75SGreg Thelen
2848f56753acSChristoph Hellwig if (mapping_can_writeback(mapping)) {
2849682aa8e1STejun Heo struct inode *inode = mapping->host;
2850682aa8e1STejun Heo struct bdi_writeback *wb;
28512e898e4cSGreg Thelen struct wb_lock_cookie cookie = {};
2852c4843a75SGreg Thelen
2853fdaf532aSMatthew Wilcox (Oracle) folio_memcg_lock(folio);
28542e898e4cSGreg Thelen wb = unlocked_inode_to_wb_begin(inode, &cookie);
2855c4843a75SGreg Thelen
2856fdaf532aSMatthew Wilcox (Oracle) if (folio_test_clear_dirty(folio))
2857566d3362SHugh Dickins folio_account_cleaned(folio, wb);
2858c4843a75SGreg Thelen
28592e898e4cSGreg Thelen unlocked_inode_to_wb_end(inode, &cookie);
2860fdaf532aSMatthew Wilcox (Oracle) folio_memcg_unlock(folio);
2861c4843a75SGreg Thelen } else {
2862fdaf532aSMatthew Wilcox (Oracle) folio_clear_dirty(folio);
2863c4843a75SGreg Thelen }
286411f81becSTejun Heo }
2865fdaf532aSMatthew Wilcox (Oracle) EXPORT_SYMBOL(__folio_cancel_dirty);
286611f81becSTejun Heo
286711f81becSTejun Heo /*
28689350f20aSMatthew Wilcox (Oracle) * Clear a folio's dirty flag, while caring for dirty memory accounting.
28699350f20aSMatthew Wilcox (Oracle) * Returns true if the folio was previously dirty.
28701da177e4SLinus Torvalds *
28719350f20aSMatthew Wilcox (Oracle) * This is for preparing to put the folio under writeout. We leave
28729350f20aSMatthew Wilcox (Oracle) * the folio tagged as dirty in the xarray so that a concurrent
28739350f20aSMatthew Wilcox (Oracle) * write-for-sync can discover it via a PAGECACHE_TAG_DIRTY walk.
28749350f20aSMatthew Wilcox (Oracle) * The ->writepage implementation will run either folio_start_writeback()
28759350f20aSMatthew Wilcox (Oracle) * or folio_mark_dirty(), at which stage we bring the folio's dirty flag
28769350f20aSMatthew Wilcox (Oracle) * and xarray dirty tag back into sync.
28771da177e4SLinus Torvalds *
28789350f20aSMatthew Wilcox (Oracle) * This incoherency between the folio's dirty flag and xarray tag is
28799350f20aSMatthew Wilcox (Oracle) * unfortunate, but it only exists while the folio is locked.
28801da177e4SLinus Torvalds */
folio_clear_dirty_for_io(struct folio * folio)28819350f20aSMatthew Wilcox (Oracle) bool folio_clear_dirty_for_io(struct folio *folio)
28821da177e4SLinus Torvalds {
28839350f20aSMatthew Wilcox (Oracle) struct address_space *mapping = folio_mapping(folio);
28849350f20aSMatthew Wilcox (Oracle) bool ret = false;
28851da177e4SLinus Torvalds
28869350f20aSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
288779352894SNick Piggin
2888f56753acSChristoph Hellwig if (mapping && mapping_can_writeback(mapping)) {
2889682aa8e1STejun Heo struct inode *inode = mapping->host;
2890682aa8e1STejun Heo struct bdi_writeback *wb;
28912e898e4cSGreg Thelen struct wb_lock_cookie cookie = {};
2892682aa8e1STejun Heo
28937658cc28SLinus Torvalds /*
28947658cc28SLinus Torvalds * Yes, Virginia, this is indeed insane.
28957658cc28SLinus Torvalds *
28967658cc28SLinus Torvalds * We use this sequence to make sure that
28977658cc28SLinus Torvalds * (a) we account for dirty stats properly
28987658cc28SLinus Torvalds * (b) we tell the low-level filesystem to
28999350f20aSMatthew Wilcox (Oracle) * mark the whole folio dirty if it was
29007658cc28SLinus Torvalds * dirty in a pagetable. Only to then
29019350f20aSMatthew Wilcox (Oracle) * (c) clean the folio again and return 1 to
29027658cc28SLinus Torvalds * cause the writeback.
29037658cc28SLinus Torvalds *
29047658cc28SLinus Torvalds * This way we avoid all nasty races with the
29057658cc28SLinus Torvalds * dirty bit in multiple places and clearing
29067658cc28SLinus Torvalds * them concurrently from different threads.
29077658cc28SLinus Torvalds *
29089350f20aSMatthew Wilcox (Oracle) * Note! Normally the "folio_mark_dirty(folio)"
29097658cc28SLinus Torvalds * has no effect on the actual dirty bit - since
29107658cc28SLinus Torvalds * that will already usually be set. But we
29117658cc28SLinus Torvalds * need the side effects, and it can help us
29127658cc28SLinus Torvalds * avoid races.
29137658cc28SLinus Torvalds *
29149350f20aSMatthew Wilcox (Oracle) * We basically use the folio "master dirty bit"
29157658cc28SLinus Torvalds * as a serialization point for all the different
29167658cc28SLinus Torvalds * threads doing their things.
29177658cc28SLinus Torvalds */
29189350f20aSMatthew Wilcox (Oracle) if (folio_mkclean(folio))
29199350f20aSMatthew Wilcox (Oracle) folio_mark_dirty(folio);
292079352894SNick Piggin /*
292179352894SNick Piggin * We carefully synchronise fault handlers against
29229350f20aSMatthew Wilcox (Oracle) * installing a dirty pte and marking the folio dirty
292379352894SNick Piggin * at this point. We do this by having them hold the
29249350f20aSMatthew Wilcox (Oracle) * page lock while dirtying the folio, and folios are
29252d6d7f98SJohannes Weiner * always locked coming in here, so we get the desired
29262d6d7f98SJohannes Weiner * exclusion.
292779352894SNick Piggin */
29282e898e4cSGreg Thelen wb = unlocked_inode_to_wb_begin(inode, &cookie);
29299350f20aSMatthew Wilcox (Oracle) if (folio_test_clear_dirty(folio)) {
29309350f20aSMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio);
29319350f20aSMatthew Wilcox (Oracle) lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
29329350f20aSMatthew Wilcox (Oracle) zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
29339350f20aSMatthew Wilcox (Oracle) wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
29349350f20aSMatthew Wilcox (Oracle) ret = true;
29351da177e4SLinus Torvalds }
29362e898e4cSGreg Thelen unlocked_inode_to_wb_end(inode, &cookie);
2937c4843a75SGreg Thelen return ret;
29381da177e4SLinus Torvalds }
29399350f20aSMatthew Wilcox (Oracle) return folio_test_clear_dirty(folio);
29407658cc28SLinus Torvalds }
29419350f20aSMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_clear_dirty_for_io);
29421da177e4SLinus Torvalds
wb_inode_writeback_start(struct bdi_writeback * wb)2943633a2abbSJan Kara static void wb_inode_writeback_start(struct bdi_writeback *wb)
2944633a2abbSJan Kara {
2945633a2abbSJan Kara atomic_inc(&wb->writeback_inodes);
2946633a2abbSJan Kara }
2947633a2abbSJan Kara
wb_inode_writeback_end(struct bdi_writeback * wb)2948633a2abbSJan Kara static void wb_inode_writeback_end(struct bdi_writeback *wb)
2949633a2abbSJan Kara {
2950f87904c0SKhazhismel Kumykov unsigned long flags;
2951633a2abbSJan Kara atomic_dec(&wb->writeback_inodes);
295245a2966fSJan Kara /*
295345a2966fSJan Kara * Make sure estimate of writeback throughput gets updated after
295445a2966fSJan Kara * writeback completed. We delay the update by BANDWIDTH_INTERVAL
295545a2966fSJan Kara * (which is the interval other bandwidth updates use for batching) so
295645a2966fSJan Kara * that if multiple inodes end writeback at a similar time, they get
295745a2966fSJan Kara * batched into one bandwidth update.
295845a2966fSJan Kara */
2959f87904c0SKhazhismel Kumykov spin_lock_irqsave(&wb->work_lock, flags);
2960f87904c0SKhazhismel Kumykov if (test_bit(WB_registered, &wb->state))
296145a2966fSJan Kara queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
2962f87904c0SKhazhismel Kumykov spin_unlock_irqrestore(&wb->work_lock, flags);
2963633a2abbSJan Kara }
2964633a2abbSJan Kara
__folio_end_writeback(struct folio * folio)2965269ccca3SMatthew Wilcox (Oracle) bool __folio_end_writeback(struct folio *folio)
29661da177e4SLinus Torvalds {
2967269ccca3SMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio);
2968269ccca3SMatthew Wilcox (Oracle) struct address_space *mapping = folio_mapping(folio);
2969269ccca3SMatthew Wilcox (Oracle) bool ret;
29701da177e4SLinus Torvalds
2971269ccca3SMatthew Wilcox (Oracle) folio_memcg_lock(folio);
2972371a096eSHuang Ying if (mapping && mapping_use_writeback_tags(mapping)) {
297391018134STejun Heo struct inode *inode = mapping->host;
297491018134STejun Heo struct backing_dev_info *bdi = inode_to_bdi(inode);
29751da177e4SLinus Torvalds unsigned long flags;
29761da177e4SLinus Torvalds
2977b93b0163SMatthew Wilcox xa_lock_irqsave(&mapping->i_pages, flags);
2978269ccca3SMatthew Wilcox (Oracle) ret = folio_test_clear_writeback(folio);
297969cb51d1SPeter Zijlstra if (ret) {
2980269ccca3SMatthew Wilcox (Oracle) __xa_clear_mark(&mapping->i_pages, folio_index(folio),
29811da177e4SLinus Torvalds PAGECACHE_TAG_WRITEBACK);
2982823423efSChristoph Hellwig if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
298391018134STejun Heo struct bdi_writeback *wb = inode_to_wb(inode);
298491018134STejun Heo
2985269ccca3SMatthew Wilcox (Oracle) wb_stat_mod(wb, WB_WRITEBACK, -nr);
2986269ccca3SMatthew Wilcox (Oracle) __wb_writeout_add(wb, nr);
2987633a2abbSJan Kara if (!mapping_tagged(mapping,
2988633a2abbSJan Kara PAGECACHE_TAG_WRITEBACK))
2989633a2abbSJan Kara wb_inode_writeback_end(wb);
299004fbfdc1SPeter Zijlstra }
299169cb51d1SPeter Zijlstra }
29926c60d2b5SDave Chinner
29936c60d2b5SDave Chinner if (mapping->host && !mapping_tagged(mapping,
29946c60d2b5SDave Chinner PAGECACHE_TAG_WRITEBACK))
29956c60d2b5SDave Chinner sb_clear_inode_writeback(mapping->host);
29966c60d2b5SDave Chinner
2997b93b0163SMatthew Wilcox xa_unlock_irqrestore(&mapping->i_pages, flags);
29981da177e4SLinus Torvalds } else {
2999269ccca3SMatthew Wilcox (Oracle) ret = folio_test_clear_writeback(folio);
30001da177e4SLinus Torvalds }
300199b12e3dSWu Fengguang if (ret) {
3002269ccca3SMatthew Wilcox (Oracle) lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
3003269ccca3SMatthew Wilcox (Oracle) zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
3004269ccca3SMatthew Wilcox (Oracle) node_stat_mod_folio(folio, NR_WRITTEN, nr);
300599b12e3dSWu Fengguang }
3006269ccca3SMatthew Wilcox (Oracle) folio_memcg_unlock(folio);
30071da177e4SLinus Torvalds return ret;
30081da177e4SLinus Torvalds }
30091da177e4SLinus Torvalds
__folio_start_writeback(struct folio * folio,bool keep_write)3010f143f1eaSMatthew Wilcox (Oracle) bool __folio_start_writeback(struct folio *folio, bool keep_write)
30111da177e4SLinus Torvalds {
3012f143f1eaSMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio);
3013f143f1eaSMatthew Wilcox (Oracle) struct address_space *mapping = folio_mapping(folio);
3014f143f1eaSMatthew Wilcox (Oracle) bool ret;
3015f143f1eaSMatthew Wilcox (Oracle) int access_ret;
30161da177e4SLinus Torvalds
3017f143f1eaSMatthew Wilcox (Oracle) folio_memcg_lock(folio);
3018371a096eSHuang Ying if (mapping && mapping_use_writeback_tags(mapping)) {
3019f143f1eaSMatthew Wilcox (Oracle) XA_STATE(xas, &mapping->i_pages, folio_index(folio));
302091018134STejun Heo struct inode *inode = mapping->host;
302191018134STejun Heo struct backing_dev_info *bdi = inode_to_bdi(inode);
30221da177e4SLinus Torvalds unsigned long flags;
30231da177e4SLinus Torvalds
3024ff9c745bSMatthew Wilcox xas_lock_irqsave(&xas, flags);
3025ff9c745bSMatthew Wilcox xas_load(&xas);
3026f143f1eaSMatthew Wilcox (Oracle) ret = folio_test_set_writeback(folio);
302769cb51d1SPeter Zijlstra if (!ret) {
30286c60d2b5SDave Chinner bool on_wblist;
30296c60d2b5SDave Chinner
30306c60d2b5SDave Chinner on_wblist = mapping_tagged(mapping,
30316c60d2b5SDave Chinner PAGECACHE_TAG_WRITEBACK);
30326c60d2b5SDave Chinner
3033ff9c745bSMatthew Wilcox xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
3034633a2abbSJan Kara if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
3035633a2abbSJan Kara struct bdi_writeback *wb = inode_to_wb(inode);
3036633a2abbSJan Kara
3037f143f1eaSMatthew Wilcox (Oracle) wb_stat_mod(wb, WB_WRITEBACK, nr);
3038633a2abbSJan Kara if (!on_wblist)
3039633a2abbSJan Kara wb_inode_writeback_start(wb);
3040633a2abbSJan Kara }
30416c60d2b5SDave Chinner
30426c60d2b5SDave Chinner /*
3043f143f1eaSMatthew Wilcox (Oracle) * We can come through here when swapping
3044f143f1eaSMatthew Wilcox (Oracle) * anonymous folios, so we don't necessarily
3045f143f1eaSMatthew Wilcox (Oracle) * have an inode to track for sync.
30466c60d2b5SDave Chinner */
30476c60d2b5SDave Chinner if (mapping->host && !on_wblist)
30486c60d2b5SDave Chinner sb_mark_inode_writeback(mapping->host);
304969cb51d1SPeter Zijlstra }
3050f143f1eaSMatthew Wilcox (Oracle) if (!folio_test_dirty(folio))
3051ff9c745bSMatthew Wilcox xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
30521c8349a1SNamjae Jeon if (!keep_write)
3053ff9c745bSMatthew Wilcox xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
3054ff9c745bSMatthew Wilcox xas_unlock_irqrestore(&xas, flags);
30551da177e4SLinus Torvalds } else {
3056f143f1eaSMatthew Wilcox (Oracle) ret = folio_test_set_writeback(folio);
30571da177e4SLinus Torvalds }
30583a3c02ecSJohannes Weiner if (!ret) {
3059f143f1eaSMatthew Wilcox (Oracle) lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
3060f143f1eaSMatthew Wilcox (Oracle) zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
30613a3c02ecSJohannes Weiner }
3062f143f1eaSMatthew Wilcox (Oracle) folio_memcg_unlock(folio);
3063f143f1eaSMatthew Wilcox (Oracle) access_ret = arch_make_folio_accessible(folio);
3064f28d4363SClaudio Imbrenda /*
3065f28d4363SClaudio Imbrenda * If writeback has been triggered on a page that cannot be made
3066f28d4363SClaudio Imbrenda * accessible, it is too late to recover here.
3067f28d4363SClaudio Imbrenda */
3068f143f1eaSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(access_ret != 0, folio);
3069f28d4363SClaudio Imbrenda
30701da177e4SLinus Torvalds return ret;
30711da177e4SLinus Torvalds }
3072f143f1eaSMatthew Wilcox (Oracle) EXPORT_SYMBOL(__folio_start_writeback);
30731da177e4SLinus Torvalds
3074490e016fSMatthew Wilcox (Oracle) /**
3075490e016fSMatthew Wilcox (Oracle) * folio_wait_writeback - Wait for a folio to finish writeback.
3076490e016fSMatthew Wilcox (Oracle) * @folio: The folio to wait for.
3077490e016fSMatthew Wilcox (Oracle) *
3078490e016fSMatthew Wilcox (Oracle) * If the folio is currently being written back to storage, wait for the
3079490e016fSMatthew Wilcox (Oracle) * I/O to complete.
3080490e016fSMatthew Wilcox (Oracle) *
3081490e016fSMatthew Wilcox (Oracle) * Context: Sleeps. Must be called in process context and with
3082490e016fSMatthew Wilcox (Oracle) * no spinlocks held. Caller should hold a reference on the folio.
3083490e016fSMatthew Wilcox (Oracle) * If the folio is not locked, writeback may start again after writeback
3084490e016fSMatthew Wilcox (Oracle) * has finished.
308519343b5bSYafang Shao */
folio_wait_writeback(struct folio * folio)3086490e016fSMatthew Wilcox (Oracle) void folio_wait_writeback(struct folio *folio)
308719343b5bSYafang Shao {
3088490e016fSMatthew Wilcox (Oracle) while (folio_test_writeback(folio)) {
3089b9b0ff61SMatthew Wilcox (Oracle) trace_folio_wait_writeback(folio, folio_mapping(folio));
3090101c0bf6SMatthew Wilcox (Oracle) folio_wait_bit(folio, PG_writeback);
309119343b5bSYafang Shao }
309219343b5bSYafang Shao }
3093490e016fSMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(folio_wait_writeback);
309419343b5bSYafang Shao
3095490e016fSMatthew Wilcox (Oracle) /**
3096490e016fSMatthew Wilcox (Oracle) * folio_wait_writeback_killable - Wait for a folio to finish writeback.
3097490e016fSMatthew Wilcox (Oracle) * @folio: The folio to wait for.
3098490e016fSMatthew Wilcox (Oracle) *
3099490e016fSMatthew Wilcox (Oracle) * If the folio is currently being written back to storage, wait for the
3100490e016fSMatthew Wilcox (Oracle) * I/O to complete or a fatal signal to arrive.
3101490e016fSMatthew Wilcox (Oracle) *
3102490e016fSMatthew Wilcox (Oracle) * Context: Sleeps. Must be called in process context and with
3103490e016fSMatthew Wilcox (Oracle) * no spinlocks held. Caller should hold a reference on the folio.
3104490e016fSMatthew Wilcox (Oracle) * If the folio is not locked, writeback may start again after writeback
3105490e016fSMatthew Wilcox (Oracle) * has finished.
3106490e016fSMatthew Wilcox (Oracle) * Return: 0 on success, -EINTR if we get a fatal signal while waiting.
3107e5dbd332SMatthew Wilcox (Oracle) */
folio_wait_writeback_killable(struct folio * folio)3108490e016fSMatthew Wilcox (Oracle) int folio_wait_writeback_killable(struct folio *folio)
3109e5dbd332SMatthew Wilcox (Oracle) {
3110490e016fSMatthew Wilcox (Oracle) while (folio_test_writeback(folio)) {
3111b9b0ff61SMatthew Wilcox (Oracle) trace_folio_wait_writeback(folio, folio_mapping(folio));
3112101c0bf6SMatthew Wilcox (Oracle) if (folio_wait_bit_killable(folio, PG_writeback))
3113e5dbd332SMatthew Wilcox (Oracle) return -EINTR;
3114e5dbd332SMatthew Wilcox (Oracle) }
3115e5dbd332SMatthew Wilcox (Oracle)
3116e5dbd332SMatthew Wilcox (Oracle) return 0;
3117e5dbd332SMatthew Wilcox (Oracle) }
3118490e016fSMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(folio_wait_writeback_killable);
3119e5dbd332SMatthew Wilcox (Oracle)
31201d1d1a76SDarrick J. Wong /**
3121a49d0c50SMatthew Wilcox (Oracle) * folio_wait_stable() - wait for writeback to finish, if necessary.
3122a49d0c50SMatthew Wilcox (Oracle) * @folio: The folio to wait on.
31231d1d1a76SDarrick J. Wong *
3124a49d0c50SMatthew Wilcox (Oracle) * This function determines if the given folio is related to a backing
3125a49d0c50SMatthew Wilcox (Oracle) * device that requires folio contents to be held stable during writeback.
3126a49d0c50SMatthew Wilcox (Oracle) * If so, then it will wait for any pending writeback to complete.
3127a49d0c50SMatthew Wilcox (Oracle) *
3128a49d0c50SMatthew Wilcox (Oracle) * Context: Sleeps. Must be called in process context and with
3129a49d0c50SMatthew Wilcox (Oracle) * no spinlocks held. Caller should hold a reference on the folio.
3130a49d0c50SMatthew Wilcox (Oracle) * If the folio is not locked, writeback may start again after writeback
3131a49d0c50SMatthew Wilcox (Oracle) * has finished.
31321d1d1a76SDarrick J. Wong */
folio_wait_stable(struct folio * folio)3133a49d0c50SMatthew Wilcox (Oracle) void folio_wait_stable(struct folio *folio)
31341d1d1a76SDarrick J. Wong {
31353461e3bfSChristoph Hellwig if (mapping_stable_writes(folio_mapping(folio)))
3136a49d0c50SMatthew Wilcox (Oracle) folio_wait_writeback(folio);
31371d1d1a76SDarrick J. Wong }
3138a49d0c50SMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(folio_wait_stable);
3139