1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
3f30c2269SUwe Zeisberger * mm/page-writeback.c
41da177e4SLinus Torvalds *
51da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds.
690eec103SPeter Zijlstra * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
71da177e4SLinus Torvalds *
81da177e4SLinus Torvalds * Contains functions related to writing back dirty pages at the
91da177e4SLinus Torvalds * address_space level.
101da177e4SLinus Torvalds *
11e1f8e874SFrancois Cami * 10Apr2002 Andrew Morton
121da177e4SLinus Torvalds * Initial version
131da177e4SLinus Torvalds */
141da177e4SLinus Torvalds
151da177e4SLinus Torvalds #include <linux/kernel.h>
161bf27e98SStefan Roesch #include <linux/math64.h>
17b95f1b31SPaul Gortmaker #include <linux/export.h>
181da177e4SLinus Torvalds #include <linux/spinlock.h>
191da177e4SLinus Torvalds #include <linux/fs.h>
201da177e4SLinus Torvalds #include <linux/mm.h>
211da177e4SLinus Torvalds #include <linux/swap.h>
221da177e4SLinus Torvalds #include <linux/slab.h>
231da177e4SLinus Torvalds #include <linux/pagemap.h>
241da177e4SLinus Torvalds #include <linux/writeback.h>
251da177e4SLinus Torvalds #include <linux/init.h>
261da177e4SLinus Torvalds #include <linux/backing-dev.h>
2755e829afSAndrew Morton #include <linux/task_io_accounting_ops.h>
281da177e4SLinus Torvalds #include <linux/blkdev.h>
291da177e4SLinus Torvalds #include <linux/mpage.h>
30d08b3851SPeter Zijlstra #include <linux/rmap.h>
311da177e4SLinus Torvalds #include <linux/percpu.h>
321da177e4SLinus Torvalds #include <linux/smp.h>
331da177e4SLinus Torvalds #include <linux/sysctl.h>
341da177e4SLinus Torvalds #include <linux/cpu.h>
351da177e4SLinus Torvalds #include <linux/syscalls.h>
36811d736fSDavid Howells #include <linux/pagevec.h>
37eb608e3aSJan Kara #include <linux/timer.h>
388bd75c77SClark Williams #include <linux/sched/rt.h>
39f361bf4aSIngo Molnar #include <linux/sched/signal.h>
406e543d57SLisa Du #include <linux/mm_inline.h>
41028c2dd1SDave Chinner #include <trace/events/writeback.h>
421da177e4SLinus Torvalds
436e543d57SLisa Du #include "internal.h"
446e543d57SLisa Du
451da177e4SLinus Torvalds /*
46ffd1f609SWu Fengguang * Sleep at most 200ms at a time in balance_dirty_pages().
47ffd1f609SWu Fengguang */
48ffd1f609SWu Fengguang #define MAX_PAUSE max(HZ/5, 1)
49ffd1f609SWu Fengguang
50ffd1f609SWu Fengguang /*
515b9b3574SWu Fengguang * Try to keep balance_dirty_pages() call intervals higher than this many pages
525b9b3574SWu Fengguang * by raising pause time to max_pause when falls below it.
535b9b3574SWu Fengguang */
545b9b3574SWu Fengguang #define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10))
555b9b3574SWu Fengguang
565b9b3574SWu Fengguang /*
57e98be2d5SWu Fengguang * Estimate write bandwidth at 200ms intervals.
58e98be2d5SWu Fengguang */
59e98be2d5SWu Fengguang #define BANDWIDTH_INTERVAL max(HZ/5, 1)
60e98be2d5SWu Fengguang
616c14ae1eSWu Fengguang #define RATELIMIT_CALC_SHIFT 10
626c14ae1eSWu Fengguang
63e98be2d5SWu Fengguang /*
641da177e4SLinus Torvalds * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
651da177e4SLinus Torvalds * will look to see if it needs to force writeback or throttling.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds static long ratelimit_pages = 32;
681da177e4SLinus Torvalds
691da177e4SLinus Torvalds /* The following parameters are exported via /proc/sys/vm */
701da177e4SLinus Torvalds
711da177e4SLinus Torvalds /*
725b0830cbSJens Axboe * Start background writeback (via writeback threads) at this percentage
731da177e4SLinus Torvalds */
74aa779e51Szhanglianjie static int dirty_background_ratio = 10;
751da177e4SLinus Torvalds
761da177e4SLinus Torvalds /*
772da02997SDavid Rientjes * dirty_background_bytes starts at 0 (disabled) so that it is a function of
782da02997SDavid Rientjes * dirty_background_ratio * the amount of dirtyable memory
792da02997SDavid Rientjes */
80aa779e51Szhanglianjie static unsigned long dirty_background_bytes;
812da02997SDavid Rientjes
822da02997SDavid Rientjes /*
83195cf453SBron Gondwana * free highmem will not be subtracted from the total free memory
84195cf453SBron Gondwana * for calculating free ratios if vm_highmem_is_dirtyable is true
85195cf453SBron Gondwana */
86aa779e51Szhanglianjie static int vm_highmem_is_dirtyable;
87195cf453SBron Gondwana
88195cf453SBron Gondwana /*
891da177e4SLinus Torvalds * The generator of dirty data starts writeback at this percentage
901da177e4SLinus Torvalds */
91aa779e51Szhanglianjie static int vm_dirty_ratio = 20;
921da177e4SLinus Torvalds
931da177e4SLinus Torvalds /*
942da02997SDavid Rientjes * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
952da02997SDavid Rientjes * vm_dirty_ratio * the amount of dirtyable memory
962da02997SDavid Rientjes */
97aa779e51Szhanglianjie static unsigned long vm_dirty_bytes;
982da02997SDavid Rientjes
992da02997SDavid Rientjes /*
100704503d8SAlexey Dobriyan * The interval between `kupdate'-style writebacks
1011da177e4SLinus Torvalds */
10222ef37eeSToshiyuki Okajima unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
1031da177e4SLinus Torvalds
10491913a29SArtem Bityutskiy EXPORT_SYMBOL_GPL(dirty_writeback_interval);
10591913a29SArtem Bityutskiy
1061da177e4SLinus Torvalds /*
107704503d8SAlexey Dobriyan * The longest time for which data is allowed to remain dirty
1081da177e4SLinus Torvalds */
10922ef37eeSToshiyuki Okajima unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
1101da177e4SLinus Torvalds
1111da177e4SLinus Torvalds /*
112ed5b43f1SBart Samwel * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
113ed5b43f1SBart Samwel * a full sync is triggered after this time elapses without any disk activity.
1141da177e4SLinus Torvalds */
1151da177e4SLinus Torvalds int laptop_mode;
1161da177e4SLinus Torvalds
1171da177e4SLinus Torvalds EXPORT_SYMBOL(laptop_mode);
1181da177e4SLinus Torvalds
1191da177e4SLinus Torvalds /* End of sysctl-exported parameters */
1201da177e4SLinus Torvalds
121dcc25ae7STejun Heo struct wb_domain global_wb_domain;
1221da177e4SLinus Torvalds
1232bc00aefSTejun Heo /* consolidated parameters for balance_dirty_pages() and its subroutines */
1242bc00aefSTejun Heo struct dirty_throttle_control {
125e9f07dfdSTejun Heo #ifdef CONFIG_CGROUP_WRITEBACK
126e9f07dfdSTejun Heo struct wb_domain *dom;
1279fc3a43eSTejun Heo struct dirty_throttle_control *gdtc; /* only set in memcg dtc's */
128e9f07dfdSTejun Heo #endif
1292bc00aefSTejun Heo struct bdi_writeback *wb;
130e9770b34STejun Heo struct fprop_local_percpu *wb_completions;
131eb608e3aSJan Kara
1329fc3a43eSTejun Heo unsigned long avail; /* dirtyable */
1332bc00aefSTejun Heo unsigned long dirty; /* file_dirty + write + nfs */
1342bc00aefSTejun Heo unsigned long thresh; /* dirty threshold */
1352bc00aefSTejun Heo unsigned long bg_thresh; /* dirty background threshold */
1362bc00aefSTejun Heo
1372bc00aefSTejun Heo unsigned long wb_dirty; /* per-wb counterparts */
1382bc00aefSTejun Heo unsigned long wb_thresh;
139970fb01aSTejun Heo unsigned long wb_bg_thresh;
140daddfa3cSTejun Heo
141daddfa3cSTejun Heo unsigned long pos_ratio;
1422bc00aefSTejun Heo };
1432bc00aefSTejun Heo
144eb608e3aSJan Kara /*
145eb608e3aSJan Kara * Length of period for aging writeout fractions of bdis. This is an
146eb608e3aSJan Kara * arbitrarily chosen number. The longer the period, the slower fractions will
147eb608e3aSJan Kara * reflect changes in current writeout rate.
148eb608e3aSJan Kara */
149eb608e3aSJan Kara #define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
15004fbfdc1SPeter Zijlstra
151693108a8STejun Heo #ifdef CONFIG_CGROUP_WRITEBACK
152693108a8STejun Heo
153d60d1bddSTejun Heo #define GDTC_INIT(__wb) .wb = (__wb), \
154d60d1bddSTejun Heo .dom = &global_wb_domain, \
155d60d1bddSTejun Heo .wb_completions = &(__wb)->completions
156d60d1bddSTejun Heo
1579fc3a43eSTejun Heo #define GDTC_INIT_NO_WB .dom = &global_wb_domain
158d60d1bddSTejun Heo
159d60d1bddSTejun Heo #define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \
160d60d1bddSTejun Heo .dom = mem_cgroup_wb_domain(__wb), \
161d60d1bddSTejun Heo .wb_completions = &(__wb)->memcg_completions, \
162d60d1bddSTejun Heo .gdtc = __gdtc
163c2aa723aSTejun Heo
mdtc_valid(struct dirty_throttle_control * dtc)164c2aa723aSTejun Heo static bool mdtc_valid(struct dirty_throttle_control *dtc)
165c2aa723aSTejun Heo {
166c2aa723aSTejun Heo return dtc->dom;
167c2aa723aSTejun Heo }
168e9f07dfdSTejun Heo
dtc_dom(struct dirty_throttle_control * dtc)169e9f07dfdSTejun Heo static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
170e9f07dfdSTejun Heo {
171e9f07dfdSTejun Heo return dtc->dom;
172e9f07dfdSTejun Heo }
173e9f07dfdSTejun Heo
mdtc_gdtc(struct dirty_throttle_control * mdtc)1749fc3a43eSTejun Heo static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
1759fc3a43eSTejun Heo {
1769fc3a43eSTejun Heo return mdtc->gdtc;
1779fc3a43eSTejun Heo }
1789fc3a43eSTejun Heo
wb_memcg_completions(struct bdi_writeback * wb)179841710aaSTejun Heo static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
180841710aaSTejun Heo {
181841710aaSTejun Heo return &wb->memcg_completions;
182841710aaSTejun Heo }
183841710aaSTejun Heo
wb_min_max_ratio(struct bdi_writeback * wb,unsigned long * minp,unsigned long * maxp)184693108a8STejun Heo static void wb_min_max_ratio(struct bdi_writeback *wb,
185693108a8STejun Heo unsigned long *minp, unsigned long *maxp)
186693108a8STejun Heo {
18720792ebfSJan Kara unsigned long this_bw = READ_ONCE(wb->avg_write_bandwidth);
188693108a8STejun Heo unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
189693108a8STejun Heo unsigned long long min = wb->bdi->min_ratio;
190693108a8STejun Heo unsigned long long max = wb->bdi->max_ratio;
191693108a8STejun Heo
192693108a8STejun Heo /*
193693108a8STejun Heo * @wb may already be clean by the time control reaches here and
194693108a8STejun Heo * the total may not include its bw.
195693108a8STejun Heo */
196693108a8STejun Heo if (this_bw < tot_bw) {
197693108a8STejun Heo if (min) {
198693108a8STejun Heo min *= this_bw;
1996d9e8c65SWen Yang min = div64_ul(min, tot_bw);
200693108a8STejun Heo }
201ae82291eSStefan Roesch if (max < 100 * BDI_RATIO_SCALE) {
202693108a8STejun Heo max *= this_bw;
2036d9e8c65SWen Yang max = div64_ul(max, tot_bw);
204693108a8STejun Heo }
205693108a8STejun Heo }
206693108a8STejun Heo
207693108a8STejun Heo *minp = min;
208693108a8STejun Heo *maxp = max;
209693108a8STejun Heo }
210693108a8STejun Heo
211693108a8STejun Heo #else /* CONFIG_CGROUP_WRITEBACK */
212693108a8STejun Heo
213d60d1bddSTejun Heo #define GDTC_INIT(__wb) .wb = (__wb), \
214d60d1bddSTejun Heo .wb_completions = &(__wb)->completions
2159fc3a43eSTejun Heo #define GDTC_INIT_NO_WB
216c2aa723aSTejun Heo #define MDTC_INIT(__wb, __gdtc)
217c2aa723aSTejun Heo
mdtc_valid(struct dirty_throttle_control * dtc)218c2aa723aSTejun Heo static bool mdtc_valid(struct dirty_throttle_control *dtc)
219c2aa723aSTejun Heo {
220c2aa723aSTejun Heo return false;
221c2aa723aSTejun Heo }
222e9f07dfdSTejun Heo
dtc_dom(struct dirty_throttle_control * dtc)223e9f07dfdSTejun Heo static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
224e9f07dfdSTejun Heo {
225e9f07dfdSTejun Heo return &global_wb_domain;
226e9f07dfdSTejun Heo }
227e9f07dfdSTejun Heo
mdtc_gdtc(struct dirty_throttle_control * mdtc)2289fc3a43eSTejun Heo static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
2299fc3a43eSTejun Heo {
2309fc3a43eSTejun Heo return NULL;
2319fc3a43eSTejun Heo }
2329fc3a43eSTejun Heo
wb_memcg_completions(struct bdi_writeback * wb)233841710aaSTejun Heo static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
234841710aaSTejun Heo {
235841710aaSTejun Heo return NULL;
236841710aaSTejun Heo }
237841710aaSTejun Heo
wb_min_max_ratio(struct bdi_writeback * wb,unsigned long * minp,unsigned long * maxp)238693108a8STejun Heo static void wb_min_max_ratio(struct bdi_writeback *wb,
239693108a8STejun Heo unsigned long *minp, unsigned long *maxp)
240693108a8STejun Heo {
241693108a8STejun Heo *minp = wb->bdi->min_ratio;
242693108a8STejun Heo *maxp = wb->bdi->max_ratio;
243693108a8STejun Heo }
244693108a8STejun Heo
245693108a8STejun Heo #endif /* CONFIG_CGROUP_WRITEBACK */
246693108a8STejun Heo
24704fbfdc1SPeter Zijlstra /*
248a756cf59SJohannes Weiner * In a memory zone, there is a certain amount of pages we consider
249a756cf59SJohannes Weiner * available for the page cache, which is essentially the number of
250a756cf59SJohannes Weiner * free and reclaimable pages, minus some zone reserves to protect
251a756cf59SJohannes Weiner * lowmem and the ability to uphold the zone's watermarks without
252a756cf59SJohannes Weiner * requiring writeback.
253a756cf59SJohannes Weiner *
254a756cf59SJohannes Weiner * This number of dirtyable pages is the base value of which the
255e0857cf5SEthon Paul * user-configurable dirty ratio is the effective number of pages that
256a756cf59SJohannes Weiner * are allowed to be actually dirtied. Per individual zone, or
257a756cf59SJohannes Weiner * globally by using the sum of dirtyable pages over all zones.
258a756cf59SJohannes Weiner *
259a756cf59SJohannes Weiner * Because the user is allowed to specify the dirty limit globally as
260a756cf59SJohannes Weiner * absolute number of bytes, calculating the per-zone dirty limit can
261a756cf59SJohannes Weiner * require translating the configured limit into a percentage of
262a756cf59SJohannes Weiner * global dirtyable memory first.
263a756cf59SJohannes Weiner */
264a756cf59SJohannes Weiner
265a804552bSJohannes Weiner /**
266281e3726SMel Gorman * node_dirtyable_memory - number of dirtyable pages in a node
267281e3726SMel Gorman * @pgdat: the node
268a804552bSJohannes Weiner *
269a862f68aSMike Rapoport * Return: the node's number of pages potentially available for dirty
270281e3726SMel Gorman * page cache. This is the base value for the per-node dirty limits.
271a804552bSJohannes Weiner */
node_dirtyable_memory(struct pglist_data * pgdat)272281e3726SMel Gorman static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
273a804552bSJohannes Weiner {
274281e3726SMel Gorman unsigned long nr_pages = 0;
275281e3726SMel Gorman int z;
276a804552bSJohannes Weiner
277281e3726SMel Gorman for (z = 0; z < MAX_NR_ZONES; z++) {
278281e3726SMel Gorman struct zone *zone = pgdat->node_zones + z;
279281e3726SMel Gorman
280281e3726SMel Gorman if (!populated_zone(zone))
281281e3726SMel Gorman continue;
282281e3726SMel Gorman
283281e3726SMel Gorman nr_pages += zone_page_state(zone, NR_FREE_PAGES);
284281e3726SMel Gorman }
285281e3726SMel Gorman
286a8d01437SJohannes Weiner /*
287a8d01437SJohannes Weiner * Pages reserved for the kernel should not be considered
288a8d01437SJohannes Weiner * dirtyable, to prevent a situation where reclaim has to
289a8d01437SJohannes Weiner * clean pages in order to balance the zones.
290a8d01437SJohannes Weiner */
291281e3726SMel Gorman nr_pages -= min(nr_pages, pgdat->totalreserve_pages);
292a804552bSJohannes Weiner
293281e3726SMel Gorman nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE);
294281e3726SMel Gorman nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE);
295a804552bSJohannes Weiner
296a804552bSJohannes Weiner return nr_pages;
297a804552bSJohannes Weiner }
298a804552bSJohannes Weiner
highmem_dirtyable_memory(unsigned long total)2991edf2234SJohannes Weiner static unsigned long highmem_dirtyable_memory(unsigned long total)
3001edf2234SJohannes Weiner {
3011edf2234SJohannes Weiner #ifdef CONFIG_HIGHMEM
3021edf2234SJohannes Weiner int node;
303bb4cc2beSMel Gorman unsigned long x = 0;
30409b4ab3cSJoonsoo Kim int i;
3051edf2234SJohannes Weiner
3061edf2234SJohannes Weiner for_each_node_state(node, N_HIGH_MEMORY) {
307281e3726SMel Gorman for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
308281e3726SMel Gorman struct zone *z;
3099cb937e2SMinchan Kim unsigned long nr_pages;
3101edf2234SJohannes Weiner
311281e3726SMel Gorman if (!is_highmem_idx(i))
312281e3726SMel Gorman continue;
313281e3726SMel Gorman
314281e3726SMel Gorman z = &NODE_DATA(node)->node_zones[i];
3159cb937e2SMinchan Kim if (!populated_zone(z))
3169cb937e2SMinchan Kim continue;
317281e3726SMel Gorman
3189cb937e2SMinchan Kim nr_pages = zone_page_state(z, NR_FREE_PAGES);
319281e3726SMel Gorman /* watch for underflows */
3209cb937e2SMinchan Kim nr_pages -= min(nr_pages, high_wmark_pages(z));
321bb4cc2beSMel Gorman nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE);
322bb4cc2beSMel Gorman nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE);
323bb4cc2beSMel Gorman x += nr_pages;
3249cb937e2SMinchan Kim }
3259cb937e2SMinchan Kim }
326281e3726SMel Gorman
3271edf2234SJohannes Weiner /*
3281edf2234SJohannes Weiner * Make sure that the number of highmem pages is never larger
3291edf2234SJohannes Weiner * than the number of the total dirtyable memory. This can only
3301edf2234SJohannes Weiner * occur in very strange VM situations but we want to make sure
3311edf2234SJohannes Weiner * that this does not occur.
3321edf2234SJohannes Weiner */
3331edf2234SJohannes Weiner return min(x, total);
3341edf2234SJohannes Weiner #else
3351edf2234SJohannes Weiner return 0;
3361edf2234SJohannes Weiner #endif
3371edf2234SJohannes Weiner }
3381edf2234SJohannes Weiner
3391edf2234SJohannes Weiner /**
340ccafa287SJohannes Weiner * global_dirtyable_memory - number of globally dirtyable pages
3411edf2234SJohannes Weiner *
342a862f68aSMike Rapoport * Return: the global number of pages potentially available for dirty
343ccafa287SJohannes Weiner * page cache. This is the base value for the global dirty limits.
3441edf2234SJohannes Weiner */
global_dirtyable_memory(void)34518cf8cf8SH Hartley Sweeten static unsigned long global_dirtyable_memory(void)
3461edf2234SJohannes Weiner {
3471edf2234SJohannes Weiner unsigned long x;
3481edf2234SJohannes Weiner
349c41f012aSMichal Hocko x = global_zone_page_state(NR_FREE_PAGES);
350a8d01437SJohannes Weiner /*
351a8d01437SJohannes Weiner * Pages reserved for the kernel should not be considered
352a8d01437SJohannes Weiner * dirtyable, to prevent a situation where reclaim has to
353a8d01437SJohannes Weiner * clean pages in order to balance the zones.
354a8d01437SJohannes Weiner */
355a8d01437SJohannes Weiner x -= min(x, totalreserve_pages);
3561edf2234SJohannes Weiner
357599d0c95SMel Gorman x += global_node_page_state(NR_INACTIVE_FILE);
358599d0c95SMel Gorman x += global_node_page_state(NR_ACTIVE_FILE);
359a804552bSJohannes Weiner
3601edf2234SJohannes Weiner if (!vm_highmem_is_dirtyable)
3611edf2234SJohannes Weiner x -= highmem_dirtyable_memory(x);
3621edf2234SJohannes Weiner
3631edf2234SJohannes Weiner return x + 1; /* Ensure that we never return 0 */
3641edf2234SJohannes Weiner }
3651edf2234SJohannes Weiner
3669fc3a43eSTejun Heo /**
3679fc3a43eSTejun Heo * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain
3689fc3a43eSTejun Heo * @dtc: dirty_throttle_control of interest
369ccafa287SJohannes Weiner *
3709fc3a43eSTejun Heo * Calculate @dtc->thresh and ->bg_thresh considering
3719fc3a43eSTejun Heo * vm_dirty_{bytes|ratio} and dirty_background_{bytes|ratio}. The caller
3729fc3a43eSTejun Heo * must ensure that @dtc->avail is set before calling this function. The
373a37b0715SNeilBrown * dirty limits will be lifted by 1/4 for real-time tasks.
374ccafa287SJohannes Weiner */
domain_dirty_limits(struct dirty_throttle_control * dtc)3759fc3a43eSTejun Heo static void domain_dirty_limits(struct dirty_throttle_control *dtc)
3769fc3a43eSTejun Heo {
3779fc3a43eSTejun Heo const unsigned long available_memory = dtc->avail;
3789fc3a43eSTejun Heo struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
3799fc3a43eSTejun Heo unsigned long bytes = vm_dirty_bytes;
3809fc3a43eSTejun Heo unsigned long bg_bytes = dirty_background_bytes;
38162a584feSTejun Heo /* convert ratios to per-PAGE_SIZE for higher precision */
38262a584feSTejun Heo unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
38362a584feSTejun Heo unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
3849fc3a43eSTejun Heo unsigned long thresh;
3859fc3a43eSTejun Heo unsigned long bg_thresh;
3869fc3a43eSTejun Heo struct task_struct *tsk;
3879fc3a43eSTejun Heo
3889fc3a43eSTejun Heo /* gdtc is !NULL iff @dtc is for memcg domain */
3899fc3a43eSTejun Heo if (gdtc) {
3909fc3a43eSTejun Heo unsigned long global_avail = gdtc->avail;
3919fc3a43eSTejun Heo
3929fc3a43eSTejun Heo /*
3939fc3a43eSTejun Heo * The byte settings can't be applied directly to memcg
3949fc3a43eSTejun Heo * domains. Convert them to ratios by scaling against
39562a584feSTejun Heo * globally available memory. As the ratios are in
39662a584feSTejun Heo * per-PAGE_SIZE, they can be obtained by dividing bytes by
39762a584feSTejun Heo * number of pages.
3989fc3a43eSTejun Heo */
3999fc3a43eSTejun Heo if (bytes)
40062a584feSTejun Heo ratio = min(DIV_ROUND_UP(bytes, global_avail),
40162a584feSTejun Heo PAGE_SIZE);
4029fc3a43eSTejun Heo if (bg_bytes)
40362a584feSTejun Heo bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
40462a584feSTejun Heo PAGE_SIZE);
4059fc3a43eSTejun Heo bytes = bg_bytes = 0;
4069fc3a43eSTejun Heo }
4079fc3a43eSTejun Heo
4089fc3a43eSTejun Heo if (bytes)
4099fc3a43eSTejun Heo thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
4109fc3a43eSTejun Heo else
41162a584feSTejun Heo thresh = (ratio * available_memory) / PAGE_SIZE;
4129fc3a43eSTejun Heo
4139fc3a43eSTejun Heo if (bg_bytes)
4149fc3a43eSTejun Heo bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
4159fc3a43eSTejun Heo else
41662a584feSTejun Heo bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
4179fc3a43eSTejun Heo
41890daf306SMichal Hocko if (bg_thresh >= thresh)
4199fc3a43eSTejun Heo bg_thresh = thresh / 2;
4209fc3a43eSTejun Heo tsk = current;
421a37b0715SNeilBrown if (rt_task(tsk)) {
422a53eaff8SNeilBrown bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
423a53eaff8SNeilBrown thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
4249fc3a43eSTejun Heo }
4259fc3a43eSTejun Heo dtc->thresh = thresh;
4269fc3a43eSTejun Heo dtc->bg_thresh = bg_thresh;
4279fc3a43eSTejun Heo
4289fc3a43eSTejun Heo /* we should eventually report the domain in the TP */
4299fc3a43eSTejun Heo if (!gdtc)
4309fc3a43eSTejun Heo trace_global_dirty_state(bg_thresh, thresh);
4319fc3a43eSTejun Heo }
4329fc3a43eSTejun Heo
4339fc3a43eSTejun Heo /**
4349fc3a43eSTejun Heo * global_dirty_limits - background-writeback and dirty-throttling thresholds
4359fc3a43eSTejun Heo * @pbackground: out parameter for bg_thresh
4369fc3a43eSTejun Heo * @pdirty: out parameter for thresh
4379fc3a43eSTejun Heo *
4389fc3a43eSTejun Heo * Calculate bg_thresh and thresh for global_wb_domain. See
4399fc3a43eSTejun Heo * domain_dirty_limits() for details.
4409fc3a43eSTejun Heo */
global_dirty_limits(unsigned long * pbackground,unsigned long * pdirty)441ccafa287SJohannes Weiner void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
442ccafa287SJohannes Weiner {
4439fc3a43eSTejun Heo struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
444ccafa287SJohannes Weiner
4459fc3a43eSTejun Heo gdtc.avail = global_dirtyable_memory();
4469fc3a43eSTejun Heo domain_dirty_limits(&gdtc);
447ccafa287SJohannes Weiner
4489fc3a43eSTejun Heo *pbackground = gdtc.bg_thresh;
4499fc3a43eSTejun Heo *pdirty = gdtc.thresh;
450ccafa287SJohannes Weiner }
451ccafa287SJohannes Weiner
452a756cf59SJohannes Weiner /**
453281e3726SMel Gorman * node_dirty_limit - maximum number of dirty pages allowed in a node
454281e3726SMel Gorman * @pgdat: the node
455a756cf59SJohannes Weiner *
456a862f68aSMike Rapoport * Return: the maximum number of dirty pages allowed in a node, based
457281e3726SMel Gorman * on the node's dirtyable memory.
458a756cf59SJohannes Weiner */
node_dirty_limit(struct pglist_data * pgdat)459281e3726SMel Gorman static unsigned long node_dirty_limit(struct pglist_data *pgdat)
460a756cf59SJohannes Weiner {
461281e3726SMel Gorman unsigned long node_memory = node_dirtyable_memory(pgdat);
462a756cf59SJohannes Weiner struct task_struct *tsk = current;
463a756cf59SJohannes Weiner unsigned long dirty;
464a756cf59SJohannes Weiner
465a756cf59SJohannes Weiner if (vm_dirty_bytes)
466a756cf59SJohannes Weiner dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
467281e3726SMel Gorman node_memory / global_dirtyable_memory();
468a756cf59SJohannes Weiner else
469281e3726SMel Gorman dirty = vm_dirty_ratio * node_memory / 100;
470a756cf59SJohannes Weiner
471a37b0715SNeilBrown if (rt_task(tsk))
472a756cf59SJohannes Weiner dirty += dirty / 4;
473a756cf59SJohannes Weiner
474a756cf59SJohannes Weiner return dirty;
475a756cf59SJohannes Weiner }
476a756cf59SJohannes Weiner
477a756cf59SJohannes Weiner /**
478281e3726SMel Gorman * node_dirty_ok - tells whether a node is within its dirty limits
479281e3726SMel Gorman * @pgdat: the node to check
480a756cf59SJohannes Weiner *
481a862f68aSMike Rapoport * Return: %true when the dirty pages in @pgdat are within the node's
482a756cf59SJohannes Weiner * dirty limit, %false if the limit is exceeded.
483a756cf59SJohannes Weiner */
node_dirty_ok(struct pglist_data * pgdat)484281e3726SMel Gorman bool node_dirty_ok(struct pglist_data *pgdat)
485a756cf59SJohannes Weiner {
486281e3726SMel Gorman unsigned long limit = node_dirty_limit(pgdat);
487281e3726SMel Gorman unsigned long nr_pages = 0;
488a756cf59SJohannes Weiner
48911fb9989SMel Gorman nr_pages += node_page_state(pgdat, NR_FILE_DIRTY);
49011fb9989SMel Gorman nr_pages += node_page_state(pgdat, NR_WRITEBACK);
491281e3726SMel Gorman
492281e3726SMel Gorman return nr_pages <= limit;
493a756cf59SJohannes Weiner }
494a756cf59SJohannes Weiner
495aa779e51Szhanglianjie #ifdef CONFIG_SYSCTL
dirty_background_ratio_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)496aa779e51Szhanglianjie static int dirty_background_ratio_handler(struct ctl_table *table, int write,
49732927393SChristoph Hellwig void *buffer, size_t *lenp, loff_t *ppos)
4982da02997SDavid Rientjes {
4992da02997SDavid Rientjes int ret;
5002da02997SDavid Rientjes
5018d65af78SAlexey Dobriyan ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
5022da02997SDavid Rientjes if (ret == 0 && write)
5032da02997SDavid Rientjes dirty_background_bytes = 0;
5042da02997SDavid Rientjes return ret;
5052da02997SDavid Rientjes }
5062da02997SDavid Rientjes
dirty_background_bytes_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)507aa779e51Szhanglianjie static int dirty_background_bytes_handler(struct ctl_table *table, int write,
50832927393SChristoph Hellwig void *buffer, size_t *lenp, loff_t *ppos)
5092da02997SDavid Rientjes {
5102da02997SDavid Rientjes int ret;
5112da02997SDavid Rientjes
5128d65af78SAlexey Dobriyan ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
5132da02997SDavid Rientjes if (ret == 0 && write)
5142da02997SDavid Rientjes dirty_background_ratio = 0;
5152da02997SDavid Rientjes return ret;
5162da02997SDavid Rientjes }
5172da02997SDavid Rientjes
dirty_ratio_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)518aa779e51Szhanglianjie static int dirty_ratio_handler(struct ctl_table *table, int write, void *buffer,
51932927393SChristoph Hellwig size_t *lenp, loff_t *ppos)
52004fbfdc1SPeter Zijlstra {
52104fbfdc1SPeter Zijlstra int old_ratio = vm_dirty_ratio;
5222da02997SDavid Rientjes int ret;
5232da02997SDavid Rientjes
5248d65af78SAlexey Dobriyan ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
52504fbfdc1SPeter Zijlstra if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
526eb608e3aSJan Kara writeback_set_ratelimit();
5272da02997SDavid Rientjes vm_dirty_bytes = 0;
5282da02997SDavid Rientjes }
5292da02997SDavid Rientjes return ret;
5302da02997SDavid Rientjes }
5312da02997SDavid Rientjes
dirty_bytes_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)532aa779e51Szhanglianjie static int dirty_bytes_handler(struct ctl_table *table, int write,
53332927393SChristoph Hellwig void *buffer, size_t *lenp, loff_t *ppos)
5342da02997SDavid Rientjes {
535fc3501d4SSven Wegener unsigned long old_bytes = vm_dirty_bytes;
5362da02997SDavid Rientjes int ret;
5372da02997SDavid Rientjes
5388d65af78SAlexey Dobriyan ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
5392da02997SDavid Rientjes if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
540eb608e3aSJan Kara writeback_set_ratelimit();
5412da02997SDavid Rientjes vm_dirty_ratio = 0;
54204fbfdc1SPeter Zijlstra }
54304fbfdc1SPeter Zijlstra return ret;
54404fbfdc1SPeter Zijlstra }
545aa779e51Szhanglianjie #endif
54604fbfdc1SPeter Zijlstra
wp_next_time(unsigned long cur_time)547eb608e3aSJan Kara static unsigned long wp_next_time(unsigned long cur_time)
548eb608e3aSJan Kara {
549eb608e3aSJan Kara cur_time += VM_COMPLETIONS_PERIOD_LEN;
550eb608e3aSJan Kara /* 0 has a special meaning... */
551eb608e3aSJan Kara if (!cur_time)
552eb608e3aSJan Kara return 1;
553eb608e3aSJan Kara return cur_time;
554eb608e3aSJan Kara }
555eb608e3aSJan Kara
wb_domain_writeout_add(struct wb_domain * dom,struct fprop_local_percpu * completions,unsigned int max_prop_frac,long nr)556cc24df4cSMatthew Wilcox (Oracle) static void wb_domain_writeout_add(struct wb_domain *dom,
557c7981433STejun Heo struct fprop_local_percpu *completions,
558cc24df4cSMatthew Wilcox (Oracle) unsigned int max_prop_frac, long nr)
55904fbfdc1SPeter Zijlstra {
560be5f1797SMatthew Wilcox (Oracle) __fprop_add_percpu_max(&dom->completions, completions,
561cc24df4cSMatthew Wilcox (Oracle) max_prop_frac, nr);
562eb608e3aSJan Kara /* First event after period switching was turned off? */
563517663edSSteven Rostedt (VMware) if (unlikely(!dom->period_time)) {
564eb608e3aSJan Kara /*
565eb608e3aSJan Kara * We can race with other __bdi_writeout_inc calls here but
566eb608e3aSJan Kara * it does not cause any harm since the resulting time when
567eb608e3aSJan Kara * timer will fire and what is in writeout_period_time will be
568eb608e3aSJan Kara * roughly the same.
569eb608e3aSJan Kara */
570380c27caSTejun Heo dom->period_time = wp_next_time(jiffies);
571380c27caSTejun Heo mod_timer(&dom->period_timer, dom->period_time);
572eb608e3aSJan Kara }
57304fbfdc1SPeter Zijlstra }
57404fbfdc1SPeter Zijlstra
575c7981433STejun Heo /*
576c7981433STejun Heo * Increment @wb's writeout completion count and the global writeout
577269ccca3SMatthew Wilcox (Oracle) * completion count. Called from __folio_end_writeback().
578c7981433STejun Heo */
__wb_writeout_add(struct bdi_writeback * wb,long nr)579cc24df4cSMatthew Wilcox (Oracle) static inline void __wb_writeout_add(struct bdi_writeback *wb, long nr)
580c7981433STejun Heo {
581841710aaSTejun Heo struct wb_domain *cgdom;
582841710aaSTejun Heo
583cc24df4cSMatthew Wilcox (Oracle) wb_stat_mod(wb, WB_WRITTEN, nr);
584cc24df4cSMatthew Wilcox (Oracle) wb_domain_writeout_add(&global_wb_domain, &wb->completions,
585cc24df4cSMatthew Wilcox (Oracle) wb->bdi->max_prop_frac, nr);
586841710aaSTejun Heo
587841710aaSTejun Heo cgdom = mem_cgroup_wb_domain(wb);
588841710aaSTejun Heo if (cgdom)
589cc24df4cSMatthew Wilcox (Oracle) wb_domain_writeout_add(cgdom, wb_memcg_completions(wb),
590cc24df4cSMatthew Wilcox (Oracle) wb->bdi->max_prop_frac, nr);
591c7981433STejun Heo }
592c7981433STejun Heo
wb_writeout_inc(struct bdi_writeback * wb)59393f78d88STejun Heo void wb_writeout_inc(struct bdi_writeback *wb)
594dd5656e5SMiklos Szeredi {
595dd5656e5SMiklos Szeredi unsigned long flags;
596dd5656e5SMiklos Szeredi
597dd5656e5SMiklos Szeredi local_irq_save(flags);
598cc24df4cSMatthew Wilcox (Oracle) __wb_writeout_add(wb, 1);
599dd5656e5SMiklos Szeredi local_irq_restore(flags);
600dd5656e5SMiklos Szeredi }
60193f78d88STejun Heo EXPORT_SYMBOL_GPL(wb_writeout_inc);
60204fbfdc1SPeter Zijlstra
60304fbfdc1SPeter Zijlstra /*
604eb608e3aSJan Kara * On idle system, we can be called long after we scheduled because we use
605eb608e3aSJan Kara * deferred timers so count with missed periods.
606eb608e3aSJan Kara */
writeout_period(struct timer_list * t)6079823e51bSKees Cook static void writeout_period(struct timer_list *t)
608eb608e3aSJan Kara {
6099823e51bSKees Cook struct wb_domain *dom = from_timer(dom, t, period_timer);
610380c27caSTejun Heo int miss_periods = (jiffies - dom->period_time) /
611eb608e3aSJan Kara VM_COMPLETIONS_PERIOD_LEN;
612eb608e3aSJan Kara
613380c27caSTejun Heo if (fprop_new_period(&dom->completions, miss_periods + 1)) {
614380c27caSTejun Heo dom->period_time = wp_next_time(dom->period_time +
615eb608e3aSJan Kara miss_periods * VM_COMPLETIONS_PERIOD_LEN);
616380c27caSTejun Heo mod_timer(&dom->period_timer, dom->period_time);
617eb608e3aSJan Kara } else {
618eb608e3aSJan Kara /*
619eb608e3aSJan Kara * Aging has zeroed all fractions. Stop wasting CPU on period
620eb608e3aSJan Kara * updates.
621eb608e3aSJan Kara */
622380c27caSTejun Heo dom->period_time = 0;
623eb608e3aSJan Kara }
624eb608e3aSJan Kara }
625eb608e3aSJan Kara
wb_domain_init(struct wb_domain * dom,gfp_t gfp)626380c27caSTejun Heo int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
627380c27caSTejun Heo {
628380c27caSTejun Heo memset(dom, 0, sizeof(*dom));
629dcc25ae7STejun Heo
630dcc25ae7STejun Heo spin_lock_init(&dom->lock);
631dcc25ae7STejun Heo
6329823e51bSKees Cook timer_setup(&dom->period_timer, writeout_period, TIMER_DEFERRABLE);
633dcc25ae7STejun Heo
634dcc25ae7STejun Heo dom->dirty_limit_tstamp = jiffies;
635dcc25ae7STejun Heo
636380c27caSTejun Heo return fprop_global_init(&dom->completions, gfp);
637380c27caSTejun Heo }
638380c27caSTejun Heo
639841710aaSTejun Heo #ifdef CONFIG_CGROUP_WRITEBACK
wb_domain_exit(struct wb_domain * dom)640841710aaSTejun Heo void wb_domain_exit(struct wb_domain *dom)
641841710aaSTejun Heo {
642841710aaSTejun Heo del_timer_sync(&dom->period_timer);
643841710aaSTejun Heo fprop_global_destroy(&dom->completions);
644841710aaSTejun Heo }
645841710aaSTejun Heo #endif
646841710aaSTejun Heo
647eb608e3aSJan Kara /*
648d08c429bSJohannes Weiner * bdi_min_ratio keeps the sum of the minimum dirty shares of all
649d08c429bSJohannes Weiner * registered backing devices, which, for obvious reasons, can not
650d08c429bSJohannes Weiner * exceed 100%.
651189d3c4aSPeter Zijlstra */
652189d3c4aSPeter Zijlstra static unsigned int bdi_min_ratio;
653189d3c4aSPeter Zijlstra
bdi_check_pages_limit(unsigned long pages)6541bf27e98SStefan Roesch static int bdi_check_pages_limit(unsigned long pages)
6551bf27e98SStefan Roesch {
6561bf27e98SStefan Roesch unsigned long max_dirty_pages = global_dirtyable_memory();
6571bf27e98SStefan Roesch
6581bf27e98SStefan Roesch if (pages > max_dirty_pages)
6591bf27e98SStefan Roesch return -EINVAL;
6601bf27e98SStefan Roesch
6611bf27e98SStefan Roesch return 0;
6621bf27e98SStefan Roesch }
6631bf27e98SStefan Roesch
bdi_ratio_from_pages(unsigned long pages)6641bf27e98SStefan Roesch static unsigned long bdi_ratio_from_pages(unsigned long pages)
6651bf27e98SStefan Roesch {
6661bf27e98SStefan Roesch unsigned long background_thresh;
6671bf27e98SStefan Roesch unsigned long dirty_thresh;
6681bf27e98SStefan Roesch unsigned long ratio;
6691bf27e98SStefan Roesch
6701bf27e98SStefan Roesch global_dirty_limits(&background_thresh, &dirty_thresh);
6711bf27e98SStefan Roesch ratio = div64_u64(pages * 100ULL * BDI_RATIO_SCALE, dirty_thresh);
6721bf27e98SStefan Roesch
6731bf27e98SStefan Roesch return ratio;
6741bf27e98SStefan Roesch }
6751bf27e98SStefan Roesch
bdi_get_bytes(unsigned int ratio)67600df7d51SStefan Roesch static u64 bdi_get_bytes(unsigned int ratio)
67700df7d51SStefan Roesch {
67800df7d51SStefan Roesch unsigned long background_thresh;
67900df7d51SStefan Roesch unsigned long dirty_thresh;
68000df7d51SStefan Roesch u64 bytes;
68100df7d51SStefan Roesch
68200df7d51SStefan Roesch global_dirty_limits(&background_thresh, &dirty_thresh);
68300df7d51SStefan Roesch bytes = (dirty_thresh * PAGE_SIZE * ratio) / BDI_RATIO_SCALE / 100;
68400df7d51SStefan Roesch
68500df7d51SStefan Roesch return bytes;
68600df7d51SStefan Roesch }
68700df7d51SStefan Roesch
__bdi_set_min_ratio(struct backing_dev_info * bdi,unsigned int min_ratio)6888021fb32SStefan Roesch static int __bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
689189d3c4aSPeter Zijlstra {
69021f0dd88SChen Wandun unsigned int delta;
691189d3c4aSPeter Zijlstra int ret = 0;
692189d3c4aSPeter Zijlstra
6932c44af4fSStefan Roesch if (min_ratio > 100 * BDI_RATIO_SCALE)
6942c44af4fSStefan Roesch return -EINVAL;
695ae82291eSStefan Roesch min_ratio *= BDI_RATIO_SCALE;
696ae82291eSStefan Roesch
697cfc4ba53SJens Axboe spin_lock_bh(&bdi_lock);
698a42dde04SPeter Zijlstra if (min_ratio > bdi->max_ratio) {
699a42dde04SPeter Zijlstra ret = -EINVAL;
700a42dde04SPeter Zijlstra } else {
70121f0dd88SChen Wandun if (min_ratio < bdi->min_ratio) {
70221f0dd88SChen Wandun delta = bdi->min_ratio - min_ratio;
70321f0dd88SChen Wandun bdi_min_ratio -= delta;
70421f0dd88SChen Wandun bdi->min_ratio = min_ratio;
70521f0dd88SChen Wandun } else {
70621f0dd88SChen Wandun delta = min_ratio - bdi->min_ratio;
707ae82291eSStefan Roesch if (bdi_min_ratio + delta < 100 * BDI_RATIO_SCALE) {
70821f0dd88SChen Wandun bdi_min_ratio += delta;
70921f0dd88SChen Wandun bdi->min_ratio = min_ratio;
710a42dde04SPeter Zijlstra } else {
711189d3c4aSPeter Zijlstra ret = -EINVAL;
712a42dde04SPeter Zijlstra }
713a42dde04SPeter Zijlstra }
71421f0dd88SChen Wandun }
715cfc4ba53SJens Axboe spin_unlock_bh(&bdi_lock);
716189d3c4aSPeter Zijlstra
717189d3c4aSPeter Zijlstra return ret;
718189d3c4aSPeter Zijlstra }
719189d3c4aSPeter Zijlstra
__bdi_set_max_ratio(struct backing_dev_info * bdi,unsigned int max_ratio)720efc3e6adSStefan Roesch static int __bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio)
721a42dde04SPeter Zijlstra {
722a42dde04SPeter Zijlstra int ret = 0;
723a42dde04SPeter Zijlstra
7244e230b40SStefan Roesch if (max_ratio > 100 * BDI_RATIO_SCALE)
7254e230b40SStefan Roesch return -EINVAL;
7264e230b40SStefan Roesch
727cfc4ba53SJens Axboe spin_lock_bh(&bdi_lock);
728a42dde04SPeter Zijlstra if (bdi->min_ratio > max_ratio) {
729a42dde04SPeter Zijlstra ret = -EINVAL;
730a42dde04SPeter Zijlstra } else {
731a42dde04SPeter Zijlstra bdi->max_ratio = max_ratio;
732eb608e3aSJan Kara bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100;
733a42dde04SPeter Zijlstra }
734cfc4ba53SJens Axboe spin_unlock_bh(&bdi_lock);
735a42dde04SPeter Zijlstra
736a42dde04SPeter Zijlstra return ret;
737a42dde04SPeter Zijlstra }
738efc3e6adSStefan Roesch
bdi_set_min_ratio_no_scale(struct backing_dev_info * bdi,unsigned int min_ratio)7392c44af4fSStefan Roesch int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio)
7402c44af4fSStefan Roesch {
7412c44af4fSStefan Roesch return __bdi_set_min_ratio(bdi, min_ratio);
7422c44af4fSStefan Roesch }
7432c44af4fSStefan Roesch
bdi_set_max_ratio_no_scale(struct backing_dev_info * bdi,unsigned int max_ratio)7444e230b40SStefan Roesch int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio)
7454e230b40SStefan Roesch {
7464e230b40SStefan Roesch return __bdi_set_max_ratio(bdi, max_ratio);
7474e230b40SStefan Roesch }
7484e230b40SStefan Roesch
bdi_set_min_ratio(struct backing_dev_info * bdi,unsigned int min_ratio)7498021fb32SStefan Roesch int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
7508021fb32SStefan Roesch {
7518021fb32SStefan Roesch return __bdi_set_min_ratio(bdi, min_ratio * BDI_RATIO_SCALE);
7528021fb32SStefan Roesch }
7538021fb32SStefan Roesch
bdi_set_max_ratio(struct backing_dev_info * bdi,unsigned int max_ratio)754efc3e6adSStefan Roesch int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio)
755efc3e6adSStefan Roesch {
756efc3e6adSStefan Roesch return __bdi_set_max_ratio(bdi, max_ratio * BDI_RATIO_SCALE);
757efc3e6adSStefan Roesch }
758a42dde04SPeter Zijlstra EXPORT_SYMBOL(bdi_set_max_ratio);
759a42dde04SPeter Zijlstra
bdi_get_min_bytes(struct backing_dev_info * bdi)760712c00d6SStefan Roesch u64 bdi_get_min_bytes(struct backing_dev_info *bdi)
761712c00d6SStefan Roesch {
762712c00d6SStefan Roesch return bdi_get_bytes(bdi->min_ratio);
763712c00d6SStefan Roesch }
764712c00d6SStefan Roesch
bdi_set_min_bytes(struct backing_dev_info * bdi,u64 min_bytes)765803c9805SStefan Roesch int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes)
766803c9805SStefan Roesch {
767803c9805SStefan Roesch int ret;
768803c9805SStefan Roesch unsigned long pages = min_bytes >> PAGE_SHIFT;
769803c9805SStefan Roesch unsigned long min_ratio;
770803c9805SStefan Roesch
771803c9805SStefan Roesch ret = bdi_check_pages_limit(pages);
772803c9805SStefan Roesch if (ret)
773803c9805SStefan Roesch return ret;
774803c9805SStefan Roesch
775803c9805SStefan Roesch min_ratio = bdi_ratio_from_pages(pages);
776803c9805SStefan Roesch return __bdi_set_min_ratio(bdi, min_ratio);
777803c9805SStefan Roesch }
778803c9805SStefan Roesch
bdi_get_max_bytes(struct backing_dev_info * bdi)77900df7d51SStefan Roesch u64 bdi_get_max_bytes(struct backing_dev_info *bdi)
78000df7d51SStefan Roesch {
78100df7d51SStefan Roesch return bdi_get_bytes(bdi->max_ratio);
78200df7d51SStefan Roesch }
78300df7d51SStefan Roesch
bdi_set_max_bytes(struct backing_dev_info * bdi,u64 max_bytes)7841bf27e98SStefan Roesch int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes)
7851bf27e98SStefan Roesch {
7861bf27e98SStefan Roesch int ret;
7871bf27e98SStefan Roesch unsigned long pages = max_bytes >> PAGE_SHIFT;
7881bf27e98SStefan Roesch unsigned long max_ratio;
7891bf27e98SStefan Roesch
7901bf27e98SStefan Roesch ret = bdi_check_pages_limit(pages);
7911bf27e98SStefan Roesch if (ret)
7921bf27e98SStefan Roesch return ret;
7931bf27e98SStefan Roesch
7941bf27e98SStefan Roesch max_ratio = bdi_ratio_from_pages(pages);
7951bf27e98SStefan Roesch return __bdi_set_max_ratio(bdi, max_ratio);
7961bf27e98SStefan Roesch }
7971bf27e98SStefan Roesch
bdi_set_strict_limit(struct backing_dev_info * bdi,unsigned int strict_limit)7988e9d5eadSStefan Roesch int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit)
7998e9d5eadSStefan Roesch {
8008e9d5eadSStefan Roesch if (strict_limit > 1)
8018e9d5eadSStefan Roesch return -EINVAL;
8028e9d5eadSStefan Roesch
8038e9d5eadSStefan Roesch spin_lock_bh(&bdi_lock);
8048e9d5eadSStefan Roesch if (strict_limit)
8058e9d5eadSStefan Roesch bdi->capabilities |= BDI_CAP_STRICTLIMIT;
8068e9d5eadSStefan Roesch else
8078e9d5eadSStefan Roesch bdi->capabilities &= ~BDI_CAP_STRICTLIMIT;
8088e9d5eadSStefan Roesch spin_unlock_bh(&bdi_lock);
8098e9d5eadSStefan Roesch
8108e9d5eadSStefan Roesch return 0;
8118e9d5eadSStefan Roesch }
8128e9d5eadSStefan Roesch
dirty_freerun_ceiling(unsigned long thresh,unsigned long bg_thresh)8136c14ae1eSWu Fengguang static unsigned long dirty_freerun_ceiling(unsigned long thresh,
8146c14ae1eSWu Fengguang unsigned long bg_thresh)
8156c14ae1eSWu Fengguang {
8166c14ae1eSWu Fengguang return (thresh + bg_thresh) / 2;
8176c14ae1eSWu Fengguang }
8186c14ae1eSWu Fengguang
hard_dirty_limit(struct wb_domain * dom,unsigned long thresh)819c7981433STejun Heo static unsigned long hard_dirty_limit(struct wb_domain *dom,
820c7981433STejun Heo unsigned long thresh)
821ffd1f609SWu Fengguang {
822dcc25ae7STejun Heo return max(thresh, dom->dirty_limit);
823ffd1f609SWu Fengguang }
824ffd1f609SWu Fengguang
825c5edf9cdSTejun Heo /*
826c5edf9cdSTejun Heo * Memory which can be further allocated to a memcg domain is capped by
827c5edf9cdSTejun Heo * system-wide clean memory excluding the amount being used in the domain.
828c5edf9cdSTejun Heo */
mdtc_calc_avail(struct dirty_throttle_control * mdtc,unsigned long filepages,unsigned long headroom)829c5edf9cdSTejun Heo static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
830c5edf9cdSTejun Heo unsigned long filepages, unsigned long headroom)
831c2aa723aSTejun Heo {
832c2aa723aSTejun Heo struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc);
833c5edf9cdSTejun Heo unsigned long clean = filepages - min(filepages, mdtc->dirty);
834c5edf9cdSTejun Heo unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty);
835c5edf9cdSTejun Heo unsigned long other_clean = global_clean - min(global_clean, clean);
836c2aa723aSTejun Heo
837c5edf9cdSTejun Heo mdtc->avail = filepages + min(headroom, other_clean);
8381b424464SChristoph Lameter }
8391b424464SChristoph Lameter
8406f718656SWu Fengguang /**
841b1cbc6d4STejun Heo * __wb_calc_thresh - @wb's share of dirty throttling threshold
842b1cbc6d4STejun Heo * @dtc: dirty_throttle_context of interest
8431babe183SWu Fengguang *
844aed21ad2SWu Fengguang * Note that balance_dirty_pages() will only seriously take it as a hard limit
845aed21ad2SWu Fengguang * when sleeping max_pause per page is not enough to keep the dirty pages under
846aed21ad2SWu Fengguang * control. For example, when the device is completely stalled due to some error
847aed21ad2SWu Fengguang * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
848aed21ad2SWu Fengguang * In the other normal situations, it acts more gently by throttling the tasks
849a88a341aSTejun Heo * more (rather than completely block them) when the wb dirty pages go high.
8506f718656SWu Fengguang *
8516f718656SWu Fengguang * It allocates high/low dirty limits to fast/slow devices, in order to prevent
8521babe183SWu Fengguang * - starving fast devices
8531babe183SWu Fengguang * - piling up dirty pages (that will take long time to sync) on slow devices
8541babe183SWu Fengguang *
855a88a341aSTejun Heo * The wb's share of dirty limit will be adapting to its throughput and
8561babe183SWu Fengguang * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
857a862f68aSMike Rapoport *
858a862f68aSMike Rapoport * Return: @wb's dirty limit in pages. The term "dirty" in the context of
8598d92890bSNeilBrown * dirty balancing includes all PG_dirty and PG_writeback pages.
8601babe183SWu Fengguang */
__wb_calc_thresh(struct dirty_throttle_control * dtc)861b1cbc6d4STejun Heo static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
86216c4042fSWu Fengguang {
863e9f07dfdSTejun Heo struct wb_domain *dom = dtc_dom(dtc);
864b1cbc6d4STejun Heo unsigned long thresh = dtc->thresh;
8650d960a38STejun Heo u64 wb_thresh;
866d3ac946eSWen Yang unsigned long numerator, denominator;
867693108a8STejun Heo unsigned long wb_min_ratio, wb_max_ratio;
86804fbfdc1SPeter Zijlstra
86904fbfdc1SPeter Zijlstra /*
8700d960a38STejun Heo * Calculate this BDI's share of the thresh ratio.
87104fbfdc1SPeter Zijlstra */
872e9770b34STejun Heo fprop_fraction_percpu(&dom->completions, dtc->wb_completions,
873380c27caSTejun Heo &numerator, &denominator);
87404fbfdc1SPeter Zijlstra
875ae82291eSStefan Roesch wb_thresh = (thresh * (100 * BDI_RATIO_SCALE - bdi_min_ratio)) / (100 * BDI_RATIO_SCALE);
8760d960a38STejun Heo wb_thresh *= numerator;
877d3ac946eSWen Yang wb_thresh = div64_ul(wb_thresh, denominator);
87816c4042fSWu Fengguang
879b1cbc6d4STejun Heo wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio);
88004fbfdc1SPeter Zijlstra
881ae82291eSStefan Roesch wb_thresh += (thresh * wb_min_ratio) / (100 * BDI_RATIO_SCALE);
882ae82291eSStefan Roesch if (wb_thresh > (thresh * wb_max_ratio) / (100 * BDI_RATIO_SCALE))
883ae82291eSStefan Roesch wb_thresh = thresh * wb_max_ratio / (100 * BDI_RATIO_SCALE);
88404fbfdc1SPeter Zijlstra
8850d960a38STejun Heo return wb_thresh;
8861da177e4SLinus Torvalds }
8871da177e4SLinus Torvalds
wb_calc_thresh(struct bdi_writeback * wb,unsigned long thresh)888b1cbc6d4STejun Heo unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
889b1cbc6d4STejun Heo {
890b1cbc6d4STejun Heo struct dirty_throttle_control gdtc = { GDTC_INIT(wb),
891b1cbc6d4STejun Heo .thresh = thresh };
892b1cbc6d4STejun Heo return __wb_calc_thresh(&gdtc);
8931da177e4SLinus Torvalds }
8941da177e4SLinus Torvalds
8956c14ae1eSWu Fengguang /*
8965a537485SMaxim Patlasov * setpoint - dirty 3
8975a537485SMaxim Patlasov * f(dirty) := 1.0 + (----------------)
8985a537485SMaxim Patlasov * limit - setpoint
8995a537485SMaxim Patlasov *
9005a537485SMaxim Patlasov * it's a 3rd order polynomial that subjects to
9015a537485SMaxim Patlasov *
9025a537485SMaxim Patlasov * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast
9035a537485SMaxim Patlasov * (2) f(setpoint) = 1.0 => the balance point
9045a537485SMaxim Patlasov * (3) f(limit) = 0 => the hard limit
9055a537485SMaxim Patlasov * (4) df/dx <= 0 => negative feedback control
9065a537485SMaxim Patlasov * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
9075a537485SMaxim Patlasov * => fast response on large errors; small oscillation near setpoint
9085a537485SMaxim Patlasov */
pos_ratio_polynom(unsigned long setpoint,unsigned long dirty,unsigned long limit)909d5c9fde3SRik van Riel static long long pos_ratio_polynom(unsigned long setpoint,
9105a537485SMaxim Patlasov unsigned long dirty,
9115a537485SMaxim Patlasov unsigned long limit)
9125a537485SMaxim Patlasov {
9135a537485SMaxim Patlasov long long pos_ratio;
9145a537485SMaxim Patlasov long x;
9155a537485SMaxim Patlasov
916d5c9fde3SRik van Riel x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
917464d1387STejun Heo (limit - setpoint) | 1);
9185a537485SMaxim Patlasov pos_ratio = x;
9195a537485SMaxim Patlasov pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
9205a537485SMaxim Patlasov pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
9215a537485SMaxim Patlasov pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
9225a537485SMaxim Patlasov
9235a537485SMaxim Patlasov return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
9245a537485SMaxim Patlasov }
9255a537485SMaxim Patlasov
9265a537485SMaxim Patlasov /*
9276c14ae1eSWu Fengguang * Dirty position control.
9286c14ae1eSWu Fengguang *
9296c14ae1eSWu Fengguang * (o) global/bdi setpoints
9306c14ae1eSWu Fengguang *
931de1fff37STejun Heo * We want the dirty pages be balanced around the global/wb setpoints.
9326c14ae1eSWu Fengguang * When the number of dirty pages is higher/lower than the setpoint, the
9336c14ae1eSWu Fengguang * dirty position control ratio (and hence task dirty ratelimit) will be
9346c14ae1eSWu Fengguang * decreased/increased to bring the dirty pages back to the setpoint.
9356c14ae1eSWu Fengguang *
9366c14ae1eSWu Fengguang * pos_ratio = 1 << RATELIMIT_CALC_SHIFT
9376c14ae1eSWu Fengguang *
9386c14ae1eSWu Fengguang * if (dirty < setpoint) scale up pos_ratio
9396c14ae1eSWu Fengguang * if (dirty > setpoint) scale down pos_ratio
9406c14ae1eSWu Fengguang *
941de1fff37STejun Heo * if (wb_dirty < wb_setpoint) scale up pos_ratio
942de1fff37STejun Heo * if (wb_dirty > wb_setpoint) scale down pos_ratio
9436c14ae1eSWu Fengguang *
9446c14ae1eSWu Fengguang * task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
9456c14ae1eSWu Fengguang *
9466c14ae1eSWu Fengguang * (o) global control line
9476c14ae1eSWu Fengguang *
9486c14ae1eSWu Fengguang * ^ pos_ratio
9496c14ae1eSWu Fengguang * |
9506c14ae1eSWu Fengguang * | |<===== global dirty control scope ======>|
95103231554SChi Wu * 2.0 * * * * * * *
9526c14ae1eSWu Fengguang * | .*
9536c14ae1eSWu Fengguang * | . *
9546c14ae1eSWu Fengguang * | . *
9556c14ae1eSWu Fengguang * | . *
9566c14ae1eSWu Fengguang * | . *
9576c14ae1eSWu Fengguang * | . *
9586c14ae1eSWu Fengguang * 1.0 ................................*
9596c14ae1eSWu Fengguang * | . . *
9606c14ae1eSWu Fengguang * | . . *
9616c14ae1eSWu Fengguang * | . . *
9626c14ae1eSWu Fengguang * | . . *
9636c14ae1eSWu Fengguang * | . . *
9646c14ae1eSWu Fengguang * 0 +------------.------------------.----------------------*------------->
9656c14ae1eSWu Fengguang * freerun^ setpoint^ limit^ dirty pages
9666c14ae1eSWu Fengguang *
967de1fff37STejun Heo * (o) wb control line
9686c14ae1eSWu Fengguang *
9696c14ae1eSWu Fengguang * ^ pos_ratio
9706c14ae1eSWu Fengguang * |
9716c14ae1eSWu Fengguang * | *
9726c14ae1eSWu Fengguang * | *
9736c14ae1eSWu Fengguang * | *
9746c14ae1eSWu Fengguang * | *
9756c14ae1eSWu Fengguang * | * |<=========== span ============>|
9766c14ae1eSWu Fengguang * 1.0 .......................*
9776c14ae1eSWu Fengguang * | . *
9786c14ae1eSWu Fengguang * | . *
9796c14ae1eSWu Fengguang * | . *
9806c14ae1eSWu Fengguang * | . *
9816c14ae1eSWu Fengguang * | . *
9826c14ae1eSWu Fengguang * | . *
9836c14ae1eSWu Fengguang * | . *
9846c14ae1eSWu Fengguang * | . *
9856c14ae1eSWu Fengguang * | . *
9866c14ae1eSWu Fengguang * | . *
9876c14ae1eSWu Fengguang * | . *
9886c14ae1eSWu Fengguang * 1/4 ...............................................* * * * * * * * * * * *
9896c14ae1eSWu Fengguang * | . .
9906c14ae1eSWu Fengguang * | . .
9916c14ae1eSWu Fengguang * | . .
9926c14ae1eSWu Fengguang * 0 +----------------------.-------------------------------.------------->
993de1fff37STejun Heo * wb_setpoint^ x_intercept^
9946c14ae1eSWu Fengguang *
995de1fff37STejun Heo * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
9966c14ae1eSWu Fengguang * be smoothly throttled down to normal if it starts high in situations like
9976c14ae1eSWu Fengguang * - start writing to a slow SD card and a fast disk at the same time. The SD
998de1fff37STejun Heo * card's wb_dirty may rush to many times higher than wb_setpoint.
999de1fff37STejun Heo * - the wb dirty thresh drops quickly due to change of JBOD workload
10006c14ae1eSWu Fengguang */
wb_position_ratio(struct dirty_throttle_control * dtc)1001daddfa3cSTejun Heo static void wb_position_ratio(struct dirty_throttle_control *dtc)
10026c14ae1eSWu Fengguang {
10032bc00aefSTejun Heo struct bdi_writeback *wb = dtc->wb;
100420792ebfSJan Kara unsigned long write_bw = READ_ONCE(wb->avg_write_bandwidth);
10052bc00aefSTejun Heo unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
1006c7981433STejun Heo unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
10072bc00aefSTejun Heo unsigned long wb_thresh = dtc->wb_thresh;
10086c14ae1eSWu Fengguang unsigned long x_intercept;
10096c14ae1eSWu Fengguang unsigned long setpoint; /* dirty pages' target balance point */
1010de1fff37STejun Heo unsigned long wb_setpoint;
10116c14ae1eSWu Fengguang unsigned long span;
10126c14ae1eSWu Fengguang long long pos_ratio; /* for scaling up/down the rate limit */
10136c14ae1eSWu Fengguang long x;
10146c14ae1eSWu Fengguang
1015daddfa3cSTejun Heo dtc->pos_ratio = 0;
1016daddfa3cSTejun Heo
10172bc00aefSTejun Heo if (unlikely(dtc->dirty >= limit))
1018daddfa3cSTejun Heo return;
10196c14ae1eSWu Fengguang
10206c14ae1eSWu Fengguang /*
10216c14ae1eSWu Fengguang * global setpoint
10226c14ae1eSWu Fengguang *
10235a537485SMaxim Patlasov * See comment for pos_ratio_polynom().
10246c14ae1eSWu Fengguang */
10256c14ae1eSWu Fengguang setpoint = (freerun + limit) / 2;
10262bc00aefSTejun Heo pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit);
10275a537485SMaxim Patlasov
10285a537485SMaxim Patlasov /*
10295a537485SMaxim Patlasov * The strictlimit feature is a tool preventing mistrusted filesystems
10305a537485SMaxim Patlasov * from growing a large number of dirty pages before throttling. For
1031de1fff37STejun Heo * such filesystems balance_dirty_pages always checks wb counters
1032de1fff37STejun Heo * against wb limits. Even if global "nr_dirty" is under "freerun".
10335a537485SMaxim Patlasov * This is especially important for fuse which sets bdi->max_ratio to
10345a537485SMaxim Patlasov * 1% by default. Without strictlimit feature, fuse writeback may
10355a537485SMaxim Patlasov * consume arbitrary amount of RAM because it is accounted in
10365a537485SMaxim Patlasov * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty".
10375a537485SMaxim Patlasov *
1038a88a341aSTejun Heo * Here, in wb_position_ratio(), we calculate pos_ratio based on
1039de1fff37STejun Heo * two values: wb_dirty and wb_thresh. Let's consider an example:
10405a537485SMaxim Patlasov * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
10415a537485SMaxim Patlasov * limits are set by default to 10% and 20% (background and throttle).
1042de1fff37STejun Heo * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
10430d960a38STejun Heo * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is
1044de1fff37STejun Heo * about ~6K pages (as the average of background and throttle wb
10455a537485SMaxim Patlasov * limits). The 3rd order polynomial will provide positive feedback if
1046de1fff37STejun Heo * wb_dirty is under wb_setpoint and vice versa.
10475a537485SMaxim Patlasov *
10485a537485SMaxim Patlasov * Note, that we cannot use global counters in these calculations
1049de1fff37STejun Heo * because we want to throttle process writing to a strictlimit wb
10505a537485SMaxim Patlasov * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
10515a537485SMaxim Patlasov * in the example above).
10525a537485SMaxim Patlasov */
1053a88a341aSTejun Heo if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1054de1fff37STejun Heo long long wb_pos_ratio;
10555a537485SMaxim Patlasov
1056daddfa3cSTejun Heo if (dtc->wb_dirty < 8) {
1057daddfa3cSTejun Heo dtc->pos_ratio = min_t(long long, pos_ratio * 2,
10585a537485SMaxim Patlasov 2 << RATELIMIT_CALC_SHIFT);
1059daddfa3cSTejun Heo return;
1060daddfa3cSTejun Heo }
10615a537485SMaxim Patlasov
10622bc00aefSTejun Heo if (dtc->wb_dirty >= wb_thresh)
1063daddfa3cSTejun Heo return;
10645a537485SMaxim Patlasov
1065970fb01aSTejun Heo wb_setpoint = dirty_freerun_ceiling(wb_thresh,
1066970fb01aSTejun Heo dtc->wb_bg_thresh);
10675a537485SMaxim Patlasov
1068de1fff37STejun Heo if (wb_setpoint == 0 || wb_setpoint == wb_thresh)
1069daddfa3cSTejun Heo return;
10705a537485SMaxim Patlasov
10712bc00aefSTejun Heo wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty,
1072de1fff37STejun Heo wb_thresh);
10735a537485SMaxim Patlasov
10745a537485SMaxim Patlasov /*
1075de1fff37STejun Heo * Typically, for strictlimit case, wb_setpoint << setpoint
1076de1fff37STejun Heo * and pos_ratio >> wb_pos_ratio. In the other words global
10775a537485SMaxim Patlasov * state ("dirty") is not limiting factor and we have to
1078de1fff37STejun Heo * make decision based on wb counters. But there is an
10795a537485SMaxim Patlasov * important case when global pos_ratio should get precedence:
10805a537485SMaxim Patlasov * global limits are exceeded (e.g. due to activities on other
1081de1fff37STejun Heo * wb's) while given strictlimit wb is below limit.
10825a537485SMaxim Patlasov *
1083de1fff37STejun Heo * "pos_ratio * wb_pos_ratio" would work for the case above,
10845a537485SMaxim Patlasov * but it would look too non-natural for the case of all
1085de1fff37STejun Heo * activity in the system coming from a single strictlimit wb
10865a537485SMaxim Patlasov * with bdi->max_ratio == 100%.
10875a537485SMaxim Patlasov *
10885a537485SMaxim Patlasov * Note that min() below somewhat changes the dynamics of the
10895a537485SMaxim Patlasov * control system. Normally, pos_ratio value can be well over 3
1090de1fff37STejun Heo * (when globally we are at freerun and wb is well below wb
10915a537485SMaxim Patlasov * setpoint). Now the maximum pos_ratio in the same situation
10925a537485SMaxim Patlasov * is 2. We might want to tweak this if we observe the control
10935a537485SMaxim Patlasov * system is too slow to adapt.
10945a537485SMaxim Patlasov */
1095daddfa3cSTejun Heo dtc->pos_ratio = min(pos_ratio, wb_pos_ratio);
1096daddfa3cSTejun Heo return;
10975a537485SMaxim Patlasov }
10986c14ae1eSWu Fengguang
10996c14ae1eSWu Fengguang /*
11006c14ae1eSWu Fengguang * We have computed basic pos_ratio above based on global situation. If
1101de1fff37STejun Heo * the wb is over/under its share of dirty pages, we want to scale
11026c14ae1eSWu Fengguang * pos_ratio further down/up. That is done by the following mechanism.
11036c14ae1eSWu Fengguang */
11046c14ae1eSWu Fengguang
11056c14ae1eSWu Fengguang /*
1106de1fff37STejun Heo * wb setpoint
11076c14ae1eSWu Fengguang *
1108de1fff37STejun Heo * f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint)
11096c14ae1eSWu Fengguang *
1110de1fff37STejun Heo * x_intercept - wb_dirty
11116c14ae1eSWu Fengguang * := --------------------------
1112de1fff37STejun Heo * x_intercept - wb_setpoint
11136c14ae1eSWu Fengguang *
1114de1fff37STejun Heo * The main wb control line is a linear function that subjects to
11156c14ae1eSWu Fengguang *
1116de1fff37STejun Heo * (1) f(wb_setpoint) = 1.0
1117de1fff37STejun Heo * (2) k = - 1 / (8 * write_bw) (in single wb case)
1118de1fff37STejun Heo * or equally: x_intercept = wb_setpoint + 8 * write_bw
11196c14ae1eSWu Fengguang *
1120de1fff37STejun Heo * For single wb case, the dirty pages are observed to fluctuate
11216c14ae1eSWu Fengguang * regularly within range
1122de1fff37STejun Heo * [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2]
11236c14ae1eSWu Fengguang * for various filesystems, where (2) can yield in a reasonable 12.5%
11246c14ae1eSWu Fengguang * fluctuation range for pos_ratio.
11256c14ae1eSWu Fengguang *
1126de1fff37STejun Heo * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its
11276c14ae1eSWu Fengguang * own size, so move the slope over accordingly and choose a slope that
1128de1fff37STejun Heo * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh.
11296c14ae1eSWu Fengguang */
11302bc00aefSTejun Heo if (unlikely(wb_thresh > dtc->thresh))
11312bc00aefSTejun Heo wb_thresh = dtc->thresh;
1132aed21ad2SWu Fengguang /*
1133de1fff37STejun Heo * It's very possible that wb_thresh is close to 0 not because the
1134aed21ad2SWu Fengguang * device is slow, but that it has remained inactive for long time.
1135aed21ad2SWu Fengguang * Honour such devices a reasonable good (hopefully IO efficient)
1136aed21ad2SWu Fengguang * threshold, so that the occasional writes won't be blocked and active
1137aed21ad2SWu Fengguang * writes can rampup the threshold quickly.
1138aed21ad2SWu Fengguang */
11392bc00aefSTejun Heo wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8);
11406c14ae1eSWu Fengguang /*
1141de1fff37STejun Heo * scale global setpoint to wb's:
1142de1fff37STejun Heo * wb_setpoint = setpoint * wb_thresh / thresh
11436c14ae1eSWu Fengguang */
1144e4bc13adSLinus Torvalds x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1);
1145de1fff37STejun Heo wb_setpoint = setpoint * (u64)x >> 16;
11466c14ae1eSWu Fengguang /*
1147de1fff37STejun Heo * Use span=(8*write_bw) in single wb case as indicated by
1148de1fff37STejun Heo * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case.
11496c14ae1eSWu Fengguang *
1150de1fff37STejun Heo * wb_thresh thresh - wb_thresh
1151de1fff37STejun Heo * span = --------- * (8 * write_bw) + ------------------ * wb_thresh
11526c14ae1eSWu Fengguang * thresh thresh
11536c14ae1eSWu Fengguang */
11542bc00aefSTejun Heo span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16;
1155de1fff37STejun Heo x_intercept = wb_setpoint + span;
11566c14ae1eSWu Fengguang
11572bc00aefSTejun Heo if (dtc->wb_dirty < x_intercept - span / 4) {
11582bc00aefSTejun Heo pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty),
1159e4bc13adSLinus Torvalds (x_intercept - wb_setpoint) | 1);
11606c14ae1eSWu Fengguang } else
11616c14ae1eSWu Fengguang pos_ratio /= 4;
11626c14ae1eSWu Fengguang
11638927f66cSWu Fengguang /*
1164de1fff37STejun Heo * wb reserve area, safeguard against dirty pool underrun and disk idle
11658927f66cSWu Fengguang * It may push the desired control point of global dirty pages higher
11668927f66cSWu Fengguang * than setpoint.
11678927f66cSWu Fengguang */
1168de1fff37STejun Heo x_intercept = wb_thresh / 2;
11692bc00aefSTejun Heo if (dtc->wb_dirty < x_intercept) {
11702bc00aefSTejun Heo if (dtc->wb_dirty > x_intercept / 8)
11712bc00aefSTejun Heo pos_ratio = div_u64(pos_ratio * x_intercept,
11722bc00aefSTejun Heo dtc->wb_dirty);
117350657fc4SWu Fengguang else
11748927f66cSWu Fengguang pos_ratio *= 8;
11758927f66cSWu Fengguang }
11768927f66cSWu Fengguang
1177daddfa3cSTejun Heo dtc->pos_ratio = pos_ratio;
11786c14ae1eSWu Fengguang }
11796c14ae1eSWu Fengguang
wb_update_write_bandwidth(struct bdi_writeback * wb,unsigned long elapsed,unsigned long written)1180a88a341aSTejun Heo static void wb_update_write_bandwidth(struct bdi_writeback *wb,
1181e98be2d5SWu Fengguang unsigned long elapsed,
1182e98be2d5SWu Fengguang unsigned long written)
1183e98be2d5SWu Fengguang {
1184e98be2d5SWu Fengguang const unsigned long period = roundup_pow_of_two(3 * HZ);
1185a88a341aSTejun Heo unsigned long avg = wb->avg_write_bandwidth;
1186a88a341aSTejun Heo unsigned long old = wb->write_bandwidth;
1187e98be2d5SWu Fengguang u64 bw;
1188e98be2d5SWu Fengguang
1189e98be2d5SWu Fengguang /*
1190e98be2d5SWu Fengguang * bw = written * HZ / elapsed
1191e98be2d5SWu Fengguang *
1192e98be2d5SWu Fengguang * bw * elapsed + write_bandwidth * (period - elapsed)
1193e98be2d5SWu Fengguang * write_bandwidth = ---------------------------------------------------
1194e98be2d5SWu Fengguang * period
1195c72efb65STejun Heo *
1196ed2da924SChristoph Hellwig * @written may have decreased due to folio_redirty_for_writepage().
1197c72efb65STejun Heo * Avoid underflowing @bw calculation.
1198e98be2d5SWu Fengguang */
1199a88a341aSTejun Heo bw = written - min(written, wb->written_stamp);
1200e98be2d5SWu Fengguang bw *= HZ;
1201e98be2d5SWu Fengguang if (unlikely(elapsed > period)) {
12020a5d1a7fSWen Yang bw = div64_ul(bw, elapsed);
1203e98be2d5SWu Fengguang avg = bw;
1204e98be2d5SWu Fengguang goto out;
1205e98be2d5SWu Fengguang }
1206a88a341aSTejun Heo bw += (u64)wb->write_bandwidth * (period - elapsed);
1207e98be2d5SWu Fengguang bw >>= ilog2(period);
1208e98be2d5SWu Fengguang
1209e98be2d5SWu Fengguang /*
1210e98be2d5SWu Fengguang * one more level of smoothing, for filtering out sudden spikes
1211e98be2d5SWu Fengguang */
1212e98be2d5SWu Fengguang if (avg > old && old >= (unsigned long)bw)
1213e98be2d5SWu Fengguang avg -= (avg - old) >> 3;
1214e98be2d5SWu Fengguang
1215e98be2d5SWu Fengguang if (avg < old && old <= (unsigned long)bw)
1216e98be2d5SWu Fengguang avg += (old - avg) >> 3;
1217e98be2d5SWu Fengguang
1218e98be2d5SWu Fengguang out:
121995a46c65STejun Heo /* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */
122095a46c65STejun Heo avg = max(avg, 1LU);
122195a46c65STejun Heo if (wb_has_dirty_io(wb)) {
122295a46c65STejun Heo long delta = avg - wb->avg_write_bandwidth;
122395a46c65STejun Heo WARN_ON_ONCE(atomic_long_add_return(delta,
122495a46c65STejun Heo &wb->bdi->tot_write_bandwidth) <= 0);
122595a46c65STejun Heo }
1226a88a341aSTejun Heo wb->write_bandwidth = bw;
122720792ebfSJan Kara WRITE_ONCE(wb->avg_write_bandwidth, avg);
1228e98be2d5SWu Fengguang }
1229e98be2d5SWu Fengguang
update_dirty_limit(struct dirty_throttle_control * dtc)12302bc00aefSTejun Heo static void update_dirty_limit(struct dirty_throttle_control *dtc)
1231c42843f2SWu Fengguang {
1232e9f07dfdSTejun Heo struct wb_domain *dom = dtc_dom(dtc);
12332bc00aefSTejun Heo unsigned long thresh = dtc->thresh;
1234dcc25ae7STejun Heo unsigned long limit = dom->dirty_limit;
1235c42843f2SWu Fengguang
1236c42843f2SWu Fengguang /*
1237c42843f2SWu Fengguang * Follow up in one step.
1238c42843f2SWu Fengguang */
1239c42843f2SWu Fengguang if (limit < thresh) {
1240c42843f2SWu Fengguang limit = thresh;
1241c42843f2SWu Fengguang goto update;
1242c42843f2SWu Fengguang }
1243c42843f2SWu Fengguang
1244c42843f2SWu Fengguang /*
1245c42843f2SWu Fengguang * Follow down slowly. Use the higher one as the target, because thresh
1246c42843f2SWu Fengguang * may drop below dirty. This is exactly the reason to introduce
1247dcc25ae7STejun Heo * dom->dirty_limit which is guaranteed to lie above the dirty pages.
1248c42843f2SWu Fengguang */
12492bc00aefSTejun Heo thresh = max(thresh, dtc->dirty);
1250c42843f2SWu Fengguang if (limit > thresh) {
1251c42843f2SWu Fengguang limit -= (limit - thresh) >> 5;
1252c42843f2SWu Fengguang goto update;
1253c42843f2SWu Fengguang }
1254c42843f2SWu Fengguang return;
1255c42843f2SWu Fengguang update:
1256dcc25ae7STejun Heo dom->dirty_limit = limit;
1257c42843f2SWu Fengguang }
1258c42843f2SWu Fengguang
domain_update_dirty_limit(struct dirty_throttle_control * dtc,unsigned long now)125942dd235cSJan Kara static void domain_update_dirty_limit(struct dirty_throttle_control *dtc,
1260c42843f2SWu Fengguang unsigned long now)
1261c42843f2SWu Fengguang {
1262e9f07dfdSTejun Heo struct wb_domain *dom = dtc_dom(dtc);
1263c42843f2SWu Fengguang
1264c42843f2SWu Fengguang /*
1265c42843f2SWu Fengguang * check locklessly first to optimize away locking for the most time
1266c42843f2SWu Fengguang */
1267dcc25ae7STejun Heo if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
1268c42843f2SWu Fengguang return;
1269c42843f2SWu Fengguang
1270dcc25ae7STejun Heo spin_lock(&dom->lock);
1271dcc25ae7STejun Heo if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
12722bc00aefSTejun Heo update_dirty_limit(dtc);
1273dcc25ae7STejun Heo dom->dirty_limit_tstamp = now;
1274c42843f2SWu Fengguang }
1275dcc25ae7STejun Heo spin_unlock(&dom->lock);
1276c42843f2SWu Fengguang }
1277c42843f2SWu Fengguang
1278be3ffa27SWu Fengguang /*
1279de1fff37STejun Heo * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
1280be3ffa27SWu Fengguang *
1281de1fff37STejun Heo * Normal wb tasks will be curbed at or below it in long term.
1282be3ffa27SWu Fengguang * Obviously it should be around (write_bw / N) when there are N dd tasks.
1283be3ffa27SWu Fengguang */
wb_update_dirty_ratelimit(struct dirty_throttle_control * dtc,unsigned long dirtied,unsigned long elapsed)12842bc00aefSTejun Heo static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
1285be3ffa27SWu Fengguang unsigned long dirtied,
1286be3ffa27SWu Fengguang unsigned long elapsed)
1287be3ffa27SWu Fengguang {
12882bc00aefSTejun Heo struct bdi_writeback *wb = dtc->wb;
12892bc00aefSTejun Heo unsigned long dirty = dtc->dirty;
12902bc00aefSTejun Heo unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
1291c7981433STejun Heo unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
12927381131cSWu Fengguang unsigned long setpoint = (freerun + limit) / 2;
1293a88a341aSTejun Heo unsigned long write_bw = wb->avg_write_bandwidth;
1294a88a341aSTejun Heo unsigned long dirty_ratelimit = wb->dirty_ratelimit;
1295be3ffa27SWu Fengguang unsigned long dirty_rate;
1296be3ffa27SWu Fengguang unsigned long task_ratelimit;
1297be3ffa27SWu Fengguang unsigned long balanced_dirty_ratelimit;
12987381131cSWu Fengguang unsigned long step;
12997381131cSWu Fengguang unsigned long x;
1300d59b1087SAndrey Ryabinin unsigned long shift;
1301be3ffa27SWu Fengguang
1302be3ffa27SWu Fengguang /*
1303be3ffa27SWu Fengguang * The dirty rate will match the writeout rate in long term, except
1304be3ffa27SWu Fengguang * when dirty pages are truncated by userspace or re-dirtied by FS.
1305be3ffa27SWu Fengguang */
1306a88a341aSTejun Heo dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed;
1307be3ffa27SWu Fengguang
1308be3ffa27SWu Fengguang /*
1309be3ffa27SWu Fengguang * task_ratelimit reflects each dd's dirty rate for the past 200ms.
1310be3ffa27SWu Fengguang */
1311be3ffa27SWu Fengguang task_ratelimit = (u64)dirty_ratelimit *
1312daddfa3cSTejun Heo dtc->pos_ratio >> RATELIMIT_CALC_SHIFT;
1313be3ffa27SWu Fengguang task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
1314be3ffa27SWu Fengguang
1315be3ffa27SWu Fengguang /*
1316be3ffa27SWu Fengguang * A linear estimation of the "balanced" throttle rate. The theory is,
1317de1fff37STejun Heo * if there are N dd tasks, each throttled at task_ratelimit, the wb's
1318be3ffa27SWu Fengguang * dirty_rate will be measured to be (N * task_ratelimit). So the below
1319be3ffa27SWu Fengguang * formula will yield the balanced rate limit (write_bw / N).
1320be3ffa27SWu Fengguang *
1321be3ffa27SWu Fengguang * Note that the expanded form is not a pure rate feedback:
1322be3ffa27SWu Fengguang * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) (1)
1323be3ffa27SWu Fengguang * but also takes pos_ratio into account:
1324be3ffa27SWu Fengguang * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio (2)
1325be3ffa27SWu Fengguang *
1326be3ffa27SWu Fengguang * (1) is not realistic because pos_ratio also takes part in balancing
1327be3ffa27SWu Fengguang * the dirty rate. Consider the state
1328be3ffa27SWu Fengguang * pos_ratio = 0.5 (3)
1329be3ffa27SWu Fengguang * rate = 2 * (write_bw / N) (4)
1330be3ffa27SWu Fengguang * If (1) is used, it will stuck in that state! Because each dd will
1331be3ffa27SWu Fengguang * be throttled at
1332be3ffa27SWu Fengguang * task_ratelimit = pos_ratio * rate = (write_bw / N) (5)
1333be3ffa27SWu Fengguang * yielding
1334be3ffa27SWu Fengguang * dirty_rate = N * task_ratelimit = write_bw (6)
1335be3ffa27SWu Fengguang * put (6) into (1) we get
1336be3ffa27SWu Fengguang * rate_(i+1) = rate_(i) (7)
1337be3ffa27SWu Fengguang *
1338be3ffa27SWu Fengguang * So we end up using (2) to always keep
1339be3ffa27SWu Fengguang * rate_(i+1) ~= (write_bw / N) (8)
1340be3ffa27SWu Fengguang * regardless of the value of pos_ratio. As long as (8) is satisfied,
1341be3ffa27SWu Fengguang * pos_ratio is able to drive itself to 1.0, which is not only where
1342be3ffa27SWu Fengguang * the dirty count meet the setpoint, but also where the slope of
1343be3ffa27SWu Fengguang * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
1344be3ffa27SWu Fengguang */
1345be3ffa27SWu Fengguang balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
1346be3ffa27SWu Fengguang dirty_rate | 1);
1347bdaac490SWu Fengguang /*
1348bdaac490SWu Fengguang * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
1349bdaac490SWu Fengguang */
1350bdaac490SWu Fengguang if (unlikely(balanced_dirty_ratelimit > write_bw))
1351bdaac490SWu Fengguang balanced_dirty_ratelimit = write_bw;
1352be3ffa27SWu Fengguang
13537381131cSWu Fengguang /*
13547381131cSWu Fengguang * We could safely do this and return immediately:
13557381131cSWu Fengguang *
1356de1fff37STejun Heo * wb->dirty_ratelimit = balanced_dirty_ratelimit;
13577381131cSWu Fengguang *
13587381131cSWu Fengguang * However to get a more stable dirty_ratelimit, the below elaborated
1359331cbdeeSWanpeng Li * code makes use of task_ratelimit to filter out singular points and
13607381131cSWu Fengguang * limit the step size.
13617381131cSWu Fengguang *
13627381131cSWu Fengguang * The below code essentially only uses the relative value of
13637381131cSWu Fengguang *
13647381131cSWu Fengguang * task_ratelimit - dirty_ratelimit
13657381131cSWu Fengguang * = (pos_ratio - 1) * dirty_ratelimit
13667381131cSWu Fengguang *
13677381131cSWu Fengguang * which reflects the direction and size of dirty position error.
13687381131cSWu Fengguang */
13697381131cSWu Fengguang
13707381131cSWu Fengguang /*
13717381131cSWu Fengguang * dirty_ratelimit will follow balanced_dirty_ratelimit iff
13727381131cSWu Fengguang * task_ratelimit is on the same side of dirty_ratelimit, too.
13737381131cSWu Fengguang * For example, when
13747381131cSWu Fengguang * - dirty_ratelimit > balanced_dirty_ratelimit
13757381131cSWu Fengguang * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
13767381131cSWu Fengguang * lowering dirty_ratelimit will help meet both the position and rate
13777381131cSWu Fengguang * control targets. Otherwise, don't update dirty_ratelimit if it will
13787381131cSWu Fengguang * only help meet the rate target. After all, what the users ultimately
13797381131cSWu Fengguang * feel and care are stable dirty rate and small position error.
13807381131cSWu Fengguang *
13817381131cSWu Fengguang * |task_ratelimit - dirty_ratelimit| is used to limit the step size
1382331cbdeeSWanpeng Li * and filter out the singular points of balanced_dirty_ratelimit. Which
13837381131cSWu Fengguang * keeps jumping around randomly and can even leap far away at times
13847381131cSWu Fengguang * due to the small 200ms estimation period of dirty_rate (we want to
13857381131cSWu Fengguang * keep that period small to reduce time lags).
13867381131cSWu Fengguang */
13877381131cSWu Fengguang step = 0;
13885a537485SMaxim Patlasov
13895a537485SMaxim Patlasov /*
1390de1fff37STejun Heo * For strictlimit case, calculations above were based on wb counters
1391a88a341aSTejun Heo * and limits (starting from pos_ratio = wb_position_ratio() and up to
13925a537485SMaxim Patlasov * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
1393de1fff37STejun Heo * Hence, to calculate "step" properly, we have to use wb_dirty as
1394de1fff37STejun Heo * "dirty" and wb_setpoint as "setpoint".
13955a537485SMaxim Patlasov *
1396de1fff37STejun Heo * We rampup dirty_ratelimit forcibly if wb_dirty is low because
1397de1fff37STejun Heo * it's possible that wb_thresh is close to zero due to inactivity
1398970fb01aSTejun Heo * of backing device.
13995a537485SMaxim Patlasov */
1400a88a341aSTejun Heo if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
14012bc00aefSTejun Heo dirty = dtc->wb_dirty;
14022bc00aefSTejun Heo if (dtc->wb_dirty < 8)
14032bc00aefSTejun Heo setpoint = dtc->wb_dirty + 1;
14045a537485SMaxim Patlasov else
1405970fb01aSTejun Heo setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
14065a537485SMaxim Patlasov }
14075a537485SMaxim Patlasov
14087381131cSWu Fengguang if (dirty < setpoint) {
1409a88a341aSTejun Heo x = min3(wb->balanced_dirty_ratelimit,
14107c809968SMark Rustad balanced_dirty_ratelimit, task_ratelimit);
14117381131cSWu Fengguang if (dirty_ratelimit < x)
14127381131cSWu Fengguang step = x - dirty_ratelimit;
14137381131cSWu Fengguang } else {
1414a88a341aSTejun Heo x = max3(wb->balanced_dirty_ratelimit,
14157c809968SMark Rustad balanced_dirty_ratelimit, task_ratelimit);
14167381131cSWu Fengguang if (dirty_ratelimit > x)
14177381131cSWu Fengguang step = dirty_ratelimit - x;
14187381131cSWu Fengguang }
14197381131cSWu Fengguang
14207381131cSWu Fengguang /*
14217381131cSWu Fengguang * Don't pursue 100% rate matching. It's impossible since the balanced
14227381131cSWu Fengguang * rate itself is constantly fluctuating. So decrease the track speed
14237381131cSWu Fengguang * when it gets close to the target. Helps eliminate pointless tremors.
14247381131cSWu Fengguang */
1425d59b1087SAndrey Ryabinin shift = dirty_ratelimit / (2 * step + 1);
1426d59b1087SAndrey Ryabinin if (shift < BITS_PER_LONG)
1427d59b1087SAndrey Ryabinin step = DIV_ROUND_UP(step >> shift, 8);
1428d59b1087SAndrey Ryabinin else
1429d59b1087SAndrey Ryabinin step = 0;
14307381131cSWu Fengguang
14317381131cSWu Fengguang if (dirty_ratelimit < balanced_dirty_ratelimit)
14327381131cSWu Fengguang dirty_ratelimit += step;
14337381131cSWu Fengguang else
14347381131cSWu Fengguang dirty_ratelimit -= step;
14357381131cSWu Fengguang
143620792ebfSJan Kara WRITE_ONCE(wb->dirty_ratelimit, max(dirty_ratelimit, 1UL));
1437a88a341aSTejun Heo wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
1438b48c104dSWu Fengguang
14395634cc2aSTejun Heo trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit);
1440be3ffa27SWu Fengguang }
1441be3ffa27SWu Fengguang
__wb_update_bandwidth(struct dirty_throttle_control * gdtc,struct dirty_throttle_control * mdtc,bool update_ratelimit)1442c2aa723aSTejun Heo static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
1443c2aa723aSTejun Heo struct dirty_throttle_control *mdtc,
14448a731799STejun Heo bool update_ratelimit)
1445e98be2d5SWu Fengguang {
1446c2aa723aSTejun Heo struct bdi_writeback *wb = gdtc->wb;
1447e98be2d5SWu Fengguang unsigned long now = jiffies;
144845a2966fSJan Kara unsigned long elapsed;
1449be3ffa27SWu Fengguang unsigned long dirtied;
1450e98be2d5SWu Fengguang unsigned long written;
1451e98be2d5SWu Fengguang
145245a2966fSJan Kara spin_lock(&wb->list_lock);
14538a731799STejun Heo
1454e98be2d5SWu Fengguang /*
145545a2966fSJan Kara * Lockless checks for elapsed time are racy and delayed update after
145645a2966fSJan Kara * IO completion doesn't do it at all (to make sure written pages are
145745a2966fSJan Kara * accounted reasonably quickly). Make sure elapsed >= 1 to avoid
145845a2966fSJan Kara * division errors.
1459e98be2d5SWu Fengguang */
146045a2966fSJan Kara elapsed = max(now - wb->bw_time_stamp, 1UL);
1461a88a341aSTejun Heo dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
1462a88a341aSTejun Heo written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
1463e98be2d5SWu Fengguang
14648a731799STejun Heo if (update_ratelimit) {
146542dd235cSJan Kara domain_update_dirty_limit(gdtc, now);
1466c2aa723aSTejun Heo wb_update_dirty_ratelimit(gdtc, dirtied, elapsed);
1467c2aa723aSTejun Heo
1468c2aa723aSTejun Heo /*
1469c2aa723aSTejun Heo * @mdtc is always NULL if !CGROUP_WRITEBACK but the
1470c2aa723aSTejun Heo * compiler has no way to figure that out. Help it.
1471c2aa723aSTejun Heo */
1472c2aa723aSTejun Heo if (IS_ENABLED(CONFIG_CGROUP_WRITEBACK) && mdtc) {
147342dd235cSJan Kara domain_update_dirty_limit(mdtc, now);
1474c2aa723aSTejun Heo wb_update_dirty_ratelimit(mdtc, dirtied, elapsed);
1475be3ffa27SWu Fengguang }
1476c42843f2SWu Fengguang }
1477a88a341aSTejun Heo wb_update_write_bandwidth(wb, elapsed, written);
1478e98be2d5SWu Fengguang
1479a88a341aSTejun Heo wb->dirtied_stamp = dirtied;
1480a88a341aSTejun Heo wb->written_stamp = written;
148120792ebfSJan Kara WRITE_ONCE(wb->bw_time_stamp, now);
148245a2966fSJan Kara spin_unlock(&wb->list_lock);
1483e98be2d5SWu Fengguang }
1484e98be2d5SWu Fengguang
wb_update_bandwidth(struct bdi_writeback * wb)148545a2966fSJan Kara void wb_update_bandwidth(struct bdi_writeback *wb)
1486e98be2d5SWu Fengguang {
14872bc00aefSTejun Heo struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
14882bc00aefSTejun Heo
1489fee468fdSJan Kara __wb_update_bandwidth(&gdtc, NULL, false);
1490fee468fdSJan Kara }
1491fee468fdSJan Kara
1492fee468fdSJan Kara /* Interval after which we consider wb idle and don't estimate bandwidth */
1493fee468fdSJan Kara #define WB_BANDWIDTH_IDLE_JIF (HZ)
1494fee468fdSJan Kara
wb_bandwidth_estimate_start(struct bdi_writeback * wb)1495fee468fdSJan Kara static void wb_bandwidth_estimate_start(struct bdi_writeback *wb)
1496fee468fdSJan Kara {
1497fee468fdSJan Kara unsigned long now = jiffies;
1498fee468fdSJan Kara unsigned long elapsed = now - READ_ONCE(wb->bw_time_stamp);
1499fee468fdSJan Kara
1500fee468fdSJan Kara if (elapsed > WB_BANDWIDTH_IDLE_JIF &&
1501fee468fdSJan Kara !atomic_read(&wb->writeback_inodes)) {
1502fee468fdSJan Kara spin_lock(&wb->list_lock);
1503fee468fdSJan Kara wb->dirtied_stamp = wb_stat(wb, WB_DIRTIED);
1504fee468fdSJan Kara wb->written_stamp = wb_stat(wb, WB_WRITTEN);
150520792ebfSJan Kara WRITE_ONCE(wb->bw_time_stamp, now);
1506fee468fdSJan Kara spin_unlock(&wb->list_lock);
1507fee468fdSJan Kara }
1508e98be2d5SWu Fengguang }
1509e98be2d5SWu Fengguang
15101da177e4SLinus Torvalds /*
1511d0e1d66bSNamjae Jeon * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
15129d823e8fSWu Fengguang * will look to see if it needs to start dirty throttling.
15139d823e8fSWu Fengguang *
15149d823e8fSWu Fengguang * If dirty_poll_interval is too low, big NUMA machines will call the expensive
1515c41f012aSMichal Hocko * global_zone_page_state() too often. So scale it near-sqrt to the safety margin
15169d823e8fSWu Fengguang * (the number of pages we may dirty without exceeding the dirty limits).
15179d823e8fSWu Fengguang */
dirty_poll_interval(unsigned long dirty,unsigned long thresh)15189d823e8fSWu Fengguang static unsigned long dirty_poll_interval(unsigned long dirty,
15199d823e8fSWu Fengguang unsigned long thresh)
15209d823e8fSWu Fengguang {
15219d823e8fSWu Fengguang if (thresh > dirty)
15229d823e8fSWu Fengguang return 1UL << (ilog2(thresh - dirty) >> 1);
15239d823e8fSWu Fengguang
15249d823e8fSWu Fengguang return 1;
15259d823e8fSWu Fengguang }
15269d823e8fSWu Fengguang
wb_max_pause(struct bdi_writeback * wb,unsigned long wb_dirty)1527a88a341aSTejun Heo static unsigned long wb_max_pause(struct bdi_writeback *wb,
1528de1fff37STejun Heo unsigned long wb_dirty)
1529c8462cc9SWu Fengguang {
153020792ebfSJan Kara unsigned long bw = READ_ONCE(wb->avg_write_bandwidth);
1531e3b6c655SFengguang Wu unsigned long t;
1532c8462cc9SWu Fengguang
1533c8462cc9SWu Fengguang /*
1534c8462cc9SWu Fengguang * Limit pause time for small memory systems. If sleeping for too long
1535c8462cc9SWu Fengguang * time, a small pool of dirty/writeback pages may go empty and disk go
1536c8462cc9SWu Fengguang * idle.
1537c8462cc9SWu Fengguang *
1538c8462cc9SWu Fengguang * 8 serves as the safety ratio.
1539c8462cc9SWu Fengguang */
1540de1fff37STejun Heo t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
15417ccb9ad5SWu Fengguang t++;
15427ccb9ad5SWu Fengguang
1543e3b6c655SFengguang Wu return min_t(unsigned long, t, MAX_PAUSE);
15447ccb9ad5SWu Fengguang }
15457ccb9ad5SWu Fengguang
wb_min_pause(struct bdi_writeback * wb,long max_pause,unsigned long task_ratelimit,unsigned long dirty_ratelimit,int * nr_dirtied_pause)1546a88a341aSTejun Heo static long wb_min_pause(struct bdi_writeback *wb,
15477ccb9ad5SWu Fengguang long max_pause,
15487ccb9ad5SWu Fengguang unsigned long task_ratelimit,
15497ccb9ad5SWu Fengguang unsigned long dirty_ratelimit,
15507ccb9ad5SWu Fengguang int *nr_dirtied_pause)
15517ccb9ad5SWu Fengguang {
155220792ebfSJan Kara long hi = ilog2(READ_ONCE(wb->avg_write_bandwidth));
155320792ebfSJan Kara long lo = ilog2(READ_ONCE(wb->dirty_ratelimit));
15547ccb9ad5SWu Fengguang long t; /* target pause */
15557ccb9ad5SWu Fengguang long pause; /* estimated next pause */
15567ccb9ad5SWu Fengguang int pages; /* target nr_dirtied_pause */
15577ccb9ad5SWu Fengguang
15587ccb9ad5SWu Fengguang /* target for 10ms pause on 1-dd case */
15597ccb9ad5SWu Fengguang t = max(1, HZ / 100);
1560c8462cc9SWu Fengguang
1561c8462cc9SWu Fengguang /*
15627ccb9ad5SWu Fengguang * Scale up pause time for concurrent dirtiers in order to reduce CPU
15637ccb9ad5SWu Fengguang * overheads.
15647ccb9ad5SWu Fengguang *
15657ccb9ad5SWu Fengguang * (N * 10ms) on 2^N concurrent tasks.
1566c8462cc9SWu Fengguang */
15677ccb9ad5SWu Fengguang if (hi > lo)
15687ccb9ad5SWu Fengguang t += (hi - lo) * (10 * HZ) / 1024;
15697ccb9ad5SWu Fengguang
15707ccb9ad5SWu Fengguang /*
15717ccb9ad5SWu Fengguang * This is a bit convoluted. We try to base the next nr_dirtied_pause
15727ccb9ad5SWu Fengguang * on the much more stable dirty_ratelimit. However the next pause time
15737ccb9ad5SWu Fengguang * will be computed based on task_ratelimit and the two rate limits may
15747ccb9ad5SWu Fengguang * depart considerably at some time. Especially if task_ratelimit goes
15757ccb9ad5SWu Fengguang * below dirty_ratelimit/2 and the target pause is max_pause, the next
15767ccb9ad5SWu Fengguang * pause time will be max_pause*2 _trimmed down_ to max_pause. As a
15777ccb9ad5SWu Fengguang * result task_ratelimit won't be executed faithfully, which could
15787ccb9ad5SWu Fengguang * eventually bring down dirty_ratelimit.
15797ccb9ad5SWu Fengguang *
15807ccb9ad5SWu Fengguang * We apply two rules to fix it up:
15817ccb9ad5SWu Fengguang * 1) try to estimate the next pause time and if necessary, use a lower
15827ccb9ad5SWu Fengguang * nr_dirtied_pause so as not to exceed max_pause. When this happens,
15837ccb9ad5SWu Fengguang * nr_dirtied_pause will be "dancing" with task_ratelimit.
15847ccb9ad5SWu Fengguang * 2) limit the target pause time to max_pause/2, so that the normal
15857ccb9ad5SWu Fengguang * small fluctuations of task_ratelimit won't trigger rule (1) and
15867ccb9ad5SWu Fengguang * nr_dirtied_pause will remain as stable as dirty_ratelimit.
15877ccb9ad5SWu Fengguang */
15887ccb9ad5SWu Fengguang t = min(t, 1 + max_pause / 2);
15897ccb9ad5SWu Fengguang pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
15907ccb9ad5SWu Fengguang
15915b9b3574SWu Fengguang /*
15925b9b3574SWu Fengguang * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
15935b9b3574SWu Fengguang * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
15945b9b3574SWu Fengguang * When the 16 consecutive reads are often interrupted by some dirty
15955b9b3574SWu Fengguang * throttling pause during the async writes, cfq will go into idles
15965b9b3574SWu Fengguang * (deadline is fine). So push nr_dirtied_pause as high as possible
15975b9b3574SWu Fengguang * until reaches DIRTY_POLL_THRESH=32 pages.
15985b9b3574SWu Fengguang */
15995b9b3574SWu Fengguang if (pages < DIRTY_POLL_THRESH) {
16005b9b3574SWu Fengguang t = max_pause;
16015b9b3574SWu Fengguang pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
16025b9b3574SWu Fengguang if (pages > DIRTY_POLL_THRESH) {
16035b9b3574SWu Fengguang pages = DIRTY_POLL_THRESH;
16045b9b3574SWu Fengguang t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
16055b9b3574SWu Fengguang }
16065b9b3574SWu Fengguang }
16075b9b3574SWu Fengguang
16087ccb9ad5SWu Fengguang pause = HZ * pages / (task_ratelimit + 1);
16097ccb9ad5SWu Fengguang if (pause > max_pause) {
16107ccb9ad5SWu Fengguang t = max_pause;
16117ccb9ad5SWu Fengguang pages = task_ratelimit * t / roundup_pow_of_two(HZ);
16127ccb9ad5SWu Fengguang }
16137ccb9ad5SWu Fengguang
16147ccb9ad5SWu Fengguang *nr_dirtied_pause = pages;
16157ccb9ad5SWu Fengguang /*
16167ccb9ad5SWu Fengguang * The minimal pause time will normally be half the target pause time.
16177ccb9ad5SWu Fengguang */
16185b9b3574SWu Fengguang return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
1619c8462cc9SWu Fengguang }
1620c8462cc9SWu Fengguang
wb_dirty_limits(struct dirty_throttle_control * dtc)1621970fb01aSTejun Heo static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
16221da177e4SLinus Torvalds {
16232bc00aefSTejun Heo struct bdi_writeback *wb = dtc->wb;
162493f78d88STejun Heo unsigned long wb_reclaimable;
1625143dfe86SWu Fengguang
1626143dfe86SWu Fengguang /*
1627de1fff37STejun Heo * wb_thresh is not treated as some limiting factor as
1628143dfe86SWu Fengguang * dirty_thresh, due to reasons
1629de1fff37STejun Heo * - in JBOD setup, wb_thresh can fluctuate a lot
1630143dfe86SWu Fengguang * - in a system with HDD and USB key, the USB key may somehow
1631de1fff37STejun Heo * go into state (wb_dirty >> wb_thresh) either because
1632de1fff37STejun Heo * wb_dirty starts high, or because wb_thresh drops low.
1633143dfe86SWu Fengguang * In this case we don't want to hard throttle the USB key
1634de1fff37STejun Heo * dirtiers for 100 seconds until wb_dirty drops under
1635de1fff37STejun Heo * wb_thresh. Instead the auxiliary wb control line in
1636a88a341aSTejun Heo * wb_position_ratio() will let the dirtier task progress
1637de1fff37STejun Heo * at some rate <= (write_bw / 2) for bringing down wb_dirty.
1638143dfe86SWu Fengguang */
1639b1cbc6d4STejun Heo dtc->wb_thresh = __wb_calc_thresh(dtc);
1640970fb01aSTejun Heo dtc->wb_bg_thresh = dtc->thresh ?
1641ec18ec23SZach O'Keefe div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
164216c4042fSWu Fengguang
1643e50e3720SWu Fengguang /*
1644e50e3720SWu Fengguang * In order to avoid the stacked BDI deadlock we need
1645e50e3720SWu Fengguang * to ensure we accurately count the 'dirty' pages when
1646e50e3720SWu Fengguang * the threshold is low.
1647e50e3720SWu Fengguang *
1648e50e3720SWu Fengguang * Otherwise it would be possible to get thresh+n pages
1649e50e3720SWu Fengguang * reported dirty, even though there are thresh-m pages
1650e50e3720SWu Fengguang * actually dirty; with m+n sitting in the percpu
1651e50e3720SWu Fengguang * deltas.
1652e50e3720SWu Fengguang */
16532bce774eSWang Long if (dtc->wb_thresh < 2 * wb_stat_error()) {
165493f78d88STejun Heo wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
16552bc00aefSTejun Heo dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
1656e50e3720SWu Fengguang } else {
165793f78d88STejun Heo wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
16582bc00aefSTejun Heo dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
1659e50e3720SWu Fengguang }
16605a537485SMaxim Patlasov }
16615a537485SMaxim Patlasov
16625a537485SMaxim Patlasov /*
16635a537485SMaxim Patlasov * balance_dirty_pages() must be called by processes which are generating dirty
16645a537485SMaxim Patlasov * data. It looks at the number of dirty pages in the machine and will force
16655a537485SMaxim Patlasov * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
16665a537485SMaxim Patlasov * If we're over `background_thresh' then the writeback threads are woken to
16675a537485SMaxim Patlasov * perform some writeout.
16685a537485SMaxim Patlasov */
balance_dirty_pages(struct bdi_writeback * wb,unsigned long pages_dirtied,unsigned int flags)1669fe6c9c6eSJan Kara static int balance_dirty_pages(struct bdi_writeback *wb,
1670fe6c9c6eSJan Kara unsigned long pages_dirtied, unsigned int flags)
16715a537485SMaxim Patlasov {
16722bc00aefSTejun Heo struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1673c2aa723aSTejun Heo struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
16742bc00aefSTejun Heo struct dirty_throttle_control * const gdtc = &gdtc_stor;
1675c2aa723aSTejun Heo struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
1676c2aa723aSTejun Heo &mdtc_stor : NULL;
1677c2aa723aSTejun Heo struct dirty_throttle_control *sdtc;
16788d92890bSNeilBrown unsigned long nr_reclaimable; /* = file_dirty */
16795a537485SMaxim Patlasov long period;
16805a537485SMaxim Patlasov long pause;
16815a537485SMaxim Patlasov long max_pause;
16825a537485SMaxim Patlasov long min_pause;
16835a537485SMaxim Patlasov int nr_dirtied_pause;
16845a537485SMaxim Patlasov bool dirty_exceeded = false;
16855a537485SMaxim Patlasov unsigned long task_ratelimit;
16865a537485SMaxim Patlasov unsigned long dirty_ratelimit;
1687dfb8ae56STejun Heo struct backing_dev_info *bdi = wb->bdi;
16885a537485SMaxim Patlasov bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
16895a537485SMaxim Patlasov unsigned long start_time = jiffies;
1690fe6c9c6eSJan Kara int ret = 0;
16915a537485SMaxim Patlasov
16925a537485SMaxim Patlasov for (;;) {
16935a537485SMaxim Patlasov unsigned long now = jiffies;
16942bc00aefSTejun Heo unsigned long dirty, thresh, bg_thresh;
169550e55bf6SYang Shi unsigned long m_dirty = 0; /* stop bogus uninit warnings */
169650e55bf6SYang Shi unsigned long m_thresh = 0;
169750e55bf6SYang Shi unsigned long m_bg_thresh = 0;
16985a537485SMaxim Patlasov
16998d92890bSNeilBrown nr_reclaimable = global_node_page_state(NR_FILE_DIRTY);
17009fc3a43eSTejun Heo gdtc->avail = global_dirtyable_memory();
170111fb9989SMel Gorman gdtc->dirty = nr_reclaimable + global_node_page_state(NR_WRITEBACK);
17025a537485SMaxim Patlasov
17039fc3a43eSTejun Heo domain_dirty_limits(gdtc);
17045a537485SMaxim Patlasov
17055a537485SMaxim Patlasov if (unlikely(strictlimit)) {
1706970fb01aSTejun Heo wb_dirty_limits(gdtc);
17075a537485SMaxim Patlasov
17082bc00aefSTejun Heo dirty = gdtc->wb_dirty;
17092bc00aefSTejun Heo thresh = gdtc->wb_thresh;
1710970fb01aSTejun Heo bg_thresh = gdtc->wb_bg_thresh;
17115a537485SMaxim Patlasov } else {
17122bc00aefSTejun Heo dirty = gdtc->dirty;
17132bc00aefSTejun Heo thresh = gdtc->thresh;
17142bc00aefSTejun Heo bg_thresh = gdtc->bg_thresh;
17155a537485SMaxim Patlasov }
17165a537485SMaxim Patlasov
1717c2aa723aSTejun Heo if (mdtc) {
1718c5edf9cdSTejun Heo unsigned long filepages, headroom, writeback;
1719c2aa723aSTejun Heo
1720c2aa723aSTejun Heo /*
1721c2aa723aSTejun Heo * If @wb belongs to !root memcg, repeat the same
1722c2aa723aSTejun Heo * basic calculations for the memcg domain.
1723c2aa723aSTejun Heo */
1724c5edf9cdSTejun Heo mem_cgroup_wb_stats(wb, &filepages, &headroom,
1725c5edf9cdSTejun Heo &mdtc->dirty, &writeback);
1726c2aa723aSTejun Heo mdtc->dirty += writeback;
1727c5edf9cdSTejun Heo mdtc_calc_avail(mdtc, filepages, headroom);
1728c2aa723aSTejun Heo
1729c2aa723aSTejun Heo domain_dirty_limits(mdtc);
1730c2aa723aSTejun Heo
1731c2aa723aSTejun Heo if (unlikely(strictlimit)) {
1732c2aa723aSTejun Heo wb_dirty_limits(mdtc);
1733c2aa723aSTejun Heo m_dirty = mdtc->wb_dirty;
1734c2aa723aSTejun Heo m_thresh = mdtc->wb_thresh;
1735c2aa723aSTejun Heo m_bg_thresh = mdtc->wb_bg_thresh;
1736c2aa723aSTejun Heo } else {
1737c2aa723aSTejun Heo m_dirty = mdtc->dirty;
1738c2aa723aSTejun Heo m_thresh = mdtc->thresh;
1739c2aa723aSTejun Heo m_bg_thresh = mdtc->bg_thresh;
1740c2aa723aSTejun Heo }
17415a537485SMaxim Patlasov }
17425a537485SMaxim Patlasov
17435a537485SMaxim Patlasov /*
1744ea6813beSJan Kara * In laptop mode, we wait until hitting the higher threshold
1745ea6813beSJan Kara * before starting background writeout, and then write out all
1746ea6813beSJan Kara * the way down to the lower threshold. So slow writers cause
1747ea6813beSJan Kara * minimal disk activity.
1748ea6813beSJan Kara *
1749ea6813beSJan Kara * In normal mode, we start background writeout at the lower
1750ea6813beSJan Kara * background_thresh, to keep the amount of dirty memory low.
1751ea6813beSJan Kara */
1752ea6813beSJan Kara if (!laptop_mode && nr_reclaimable > gdtc->bg_thresh &&
1753ea6813beSJan Kara !writeback_in_progress(wb))
1754ea6813beSJan Kara wb_start_background_writeback(wb);
1755ea6813beSJan Kara
1756ea6813beSJan Kara /*
17575a537485SMaxim Patlasov * Throttle it only when the background writeback cannot
17585a537485SMaxim Patlasov * catch-up. This avoids (excessively) small writeouts
1759de1fff37STejun Heo * when the wb limits are ramping up in case of !strictlimit.
17605a537485SMaxim Patlasov *
1761de1fff37STejun Heo * In strictlimit case make decision based on the wb counters
1762de1fff37STejun Heo * and limits. Small writeouts when the wb limits are ramping
17635a537485SMaxim Patlasov * up are the price we consciously pay for strictlimit-ing.
1764c2aa723aSTejun Heo *
1765c2aa723aSTejun Heo * If memcg domain is in effect, @dirty should be under
1766c2aa723aSTejun Heo * both global and memcg freerun ceilings.
17675a537485SMaxim Patlasov */
1768c2aa723aSTejun Heo if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh) &&
1769c2aa723aSTejun Heo (!mdtc ||
1770c2aa723aSTejun Heo m_dirty <= dirty_freerun_ceiling(m_thresh, m_bg_thresh))) {
1771a37b0715SNeilBrown unsigned long intv;
1772a37b0715SNeilBrown unsigned long m_intv;
1773a37b0715SNeilBrown
1774a37b0715SNeilBrown free_running:
1775a37b0715SNeilBrown intv = dirty_poll_interval(dirty, thresh);
1776a37b0715SNeilBrown m_intv = ULONG_MAX;
1777c2aa723aSTejun Heo
17785a537485SMaxim Patlasov current->dirty_paused_when = now;
17795a537485SMaxim Patlasov current->nr_dirtied = 0;
1780c2aa723aSTejun Heo if (mdtc)
1781c2aa723aSTejun Heo m_intv = dirty_poll_interval(m_dirty, m_thresh);
1782c2aa723aSTejun Heo current->nr_dirtied_pause = min(intv, m_intv);
17835a537485SMaxim Patlasov break;
17845a537485SMaxim Patlasov }
17855a537485SMaxim Patlasov
1786ea6813beSJan Kara /* Start writeback even when in laptop mode */
1787bc05873dSTejun Heo if (unlikely(!writeback_in_progress(wb)))
17889ecf4866STejun Heo wb_start_background_writeback(wb);
17895a537485SMaxim Patlasov
179097b27821STejun Heo mem_cgroup_flush_foreign(wb);
179197b27821STejun Heo
1792c2aa723aSTejun Heo /*
1793c2aa723aSTejun Heo * Calculate global domain's pos_ratio and select the
1794c2aa723aSTejun Heo * global dtc by default.
1795c2aa723aSTejun Heo */
1796a37b0715SNeilBrown if (!strictlimit) {
1797970fb01aSTejun Heo wb_dirty_limits(gdtc);
17985fce25a9SPeter Zijlstra
1799a37b0715SNeilBrown if ((current->flags & PF_LOCAL_THROTTLE) &&
1800a37b0715SNeilBrown gdtc->wb_dirty <
1801a37b0715SNeilBrown dirty_freerun_ceiling(gdtc->wb_thresh,
1802a37b0715SNeilBrown gdtc->wb_bg_thresh))
1803a37b0715SNeilBrown /*
1804a37b0715SNeilBrown * LOCAL_THROTTLE tasks must not be throttled
1805a37b0715SNeilBrown * when below the per-wb freerun ceiling.
1806a37b0715SNeilBrown */
1807a37b0715SNeilBrown goto free_running;
1808a37b0715SNeilBrown }
1809a37b0715SNeilBrown
18102bc00aefSTejun Heo dirty_exceeded = (gdtc->wb_dirty > gdtc->wb_thresh) &&
18112bc00aefSTejun Heo ((gdtc->dirty > gdtc->thresh) || strictlimit);
18121da177e4SLinus Torvalds
1813daddfa3cSTejun Heo wb_position_ratio(gdtc);
1814c2aa723aSTejun Heo sdtc = gdtc;
1815e98be2d5SWu Fengguang
1816c2aa723aSTejun Heo if (mdtc) {
1817c2aa723aSTejun Heo /*
1818c2aa723aSTejun Heo * If memcg domain is in effect, calculate its
1819c2aa723aSTejun Heo * pos_ratio. @wb should satisfy constraints from
1820c2aa723aSTejun Heo * both global and memcg domains. Choose the one
1821c2aa723aSTejun Heo * w/ lower pos_ratio.
1822c2aa723aSTejun Heo */
1823a37b0715SNeilBrown if (!strictlimit) {
1824c2aa723aSTejun Heo wb_dirty_limits(mdtc);
1825c2aa723aSTejun Heo
1826a37b0715SNeilBrown if ((current->flags & PF_LOCAL_THROTTLE) &&
1827a37b0715SNeilBrown mdtc->wb_dirty <
1828a37b0715SNeilBrown dirty_freerun_ceiling(mdtc->wb_thresh,
1829a37b0715SNeilBrown mdtc->wb_bg_thresh))
1830a37b0715SNeilBrown /*
1831a37b0715SNeilBrown * LOCAL_THROTTLE tasks must not be
1832a37b0715SNeilBrown * throttled when below the per-wb
1833a37b0715SNeilBrown * freerun ceiling.
1834a37b0715SNeilBrown */
1835a37b0715SNeilBrown goto free_running;
1836a37b0715SNeilBrown }
1837c2aa723aSTejun Heo dirty_exceeded |= (mdtc->wb_dirty > mdtc->wb_thresh) &&
1838c2aa723aSTejun Heo ((mdtc->dirty > mdtc->thresh) || strictlimit);
1839c2aa723aSTejun Heo
1840c2aa723aSTejun Heo wb_position_ratio(mdtc);
1841c2aa723aSTejun Heo if (mdtc->pos_ratio < gdtc->pos_ratio)
1842c2aa723aSTejun Heo sdtc = mdtc;
1843c2aa723aSTejun Heo }
1844daddfa3cSTejun Heo
1845e92eebbbSJan Kara if (dirty_exceeded != wb->dirty_exceeded)
1846e92eebbbSJan Kara wb->dirty_exceeded = dirty_exceeded;
184704fbfdc1SPeter Zijlstra
184820792ebfSJan Kara if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
184945a2966fSJan Kara BANDWIDTH_INTERVAL))
1850fee468fdSJan Kara __wb_update_bandwidth(gdtc, mdtc, true);
18511da177e4SLinus Torvalds
1852c2aa723aSTejun Heo /* throttle according to the chosen dtc */
185320792ebfSJan Kara dirty_ratelimit = READ_ONCE(wb->dirty_ratelimit);
1854c2aa723aSTejun Heo task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >>
18553a73dbbcSWu Fengguang RATELIMIT_CALC_SHIFT;
1856c2aa723aSTejun Heo max_pause = wb_max_pause(wb, sdtc->wb_dirty);
1857a88a341aSTejun Heo min_pause = wb_min_pause(wb, max_pause,
18587ccb9ad5SWu Fengguang task_ratelimit, dirty_ratelimit,
18597ccb9ad5SWu Fengguang &nr_dirtied_pause);
18607ccb9ad5SWu Fengguang
18613a73dbbcSWu Fengguang if (unlikely(task_ratelimit == 0)) {
186283712358SWu Fengguang period = max_pause;
1863c8462cc9SWu Fengguang pause = max_pause;
1864143dfe86SWu Fengguang goto pause;
1865e50e3720SWu Fengguang }
186683712358SWu Fengguang period = HZ * pages_dirtied / task_ratelimit;
186783712358SWu Fengguang pause = period;
186883712358SWu Fengguang if (current->dirty_paused_when)
186983712358SWu Fengguang pause -= now - current->dirty_paused_when;
187083712358SWu Fengguang /*
187183712358SWu Fengguang * For less than 1s think time (ext3/4 may block the dirtier
187283712358SWu Fengguang * for up to 800ms from time to time on 1-HDD; so does xfs,
187383712358SWu Fengguang * however at much less frequency), try to compensate it in
187483712358SWu Fengguang * future periods by updating the virtual time; otherwise just
187583712358SWu Fengguang * do a reset, as it may be a light dirtier.
187683712358SWu Fengguang */
18777ccb9ad5SWu Fengguang if (pause < min_pause) {
18785634cc2aSTejun Heo trace_balance_dirty_pages(wb,
1879c2aa723aSTejun Heo sdtc->thresh,
1880c2aa723aSTejun Heo sdtc->bg_thresh,
1881c2aa723aSTejun Heo sdtc->dirty,
1882c2aa723aSTejun Heo sdtc->wb_thresh,
1883c2aa723aSTejun Heo sdtc->wb_dirty,
1884ece13ac3SWu Fengguang dirty_ratelimit,
1885ece13ac3SWu Fengguang task_ratelimit,
1886ece13ac3SWu Fengguang pages_dirtied,
188783712358SWu Fengguang period,
18887ccb9ad5SWu Fengguang min(pause, 0L),
1889ece13ac3SWu Fengguang start_time);
189083712358SWu Fengguang if (pause < -HZ) {
189183712358SWu Fengguang current->dirty_paused_when = now;
189283712358SWu Fengguang current->nr_dirtied = 0;
189383712358SWu Fengguang } else if (period) {
189483712358SWu Fengguang current->dirty_paused_when += period;
189583712358SWu Fengguang current->nr_dirtied = 0;
18967ccb9ad5SWu Fengguang } else if (current->nr_dirtied_pause <= pages_dirtied)
18977ccb9ad5SWu Fengguang current->nr_dirtied_pause += pages_dirtied;
189857fc978cSWu Fengguang break;
189957fc978cSWu Fengguang }
19007ccb9ad5SWu Fengguang if (unlikely(pause > max_pause)) {
19017ccb9ad5SWu Fengguang /* for occasional dropped task_ratelimit */
19027ccb9ad5SWu Fengguang now += min(pause - max_pause, max_pause);
19037ccb9ad5SWu Fengguang pause = max_pause;
19047ccb9ad5SWu Fengguang }
1905143dfe86SWu Fengguang
1906143dfe86SWu Fengguang pause:
19075634cc2aSTejun Heo trace_balance_dirty_pages(wb,
1908c2aa723aSTejun Heo sdtc->thresh,
1909c2aa723aSTejun Heo sdtc->bg_thresh,
1910c2aa723aSTejun Heo sdtc->dirty,
1911c2aa723aSTejun Heo sdtc->wb_thresh,
1912c2aa723aSTejun Heo sdtc->wb_dirty,
1913ece13ac3SWu Fengguang dirty_ratelimit,
1914ece13ac3SWu Fengguang task_ratelimit,
1915ece13ac3SWu Fengguang pages_dirtied,
191683712358SWu Fengguang period,
1917ece13ac3SWu Fengguang pause,
1918ece13ac3SWu Fengguang start_time);
1919fe6c9c6eSJan Kara if (flags & BDP_ASYNC) {
1920fe6c9c6eSJan Kara ret = -EAGAIN;
1921fe6c9c6eSJan Kara break;
1922fe6c9c6eSJan Kara }
1923499d05ecSJan Kara __set_current_state(TASK_KILLABLE);
1924*601b5540SJan Kara bdi->last_bdp_sleep = jiffies;
1925d25105e8SWu Fengguang io_schedule_timeout(pause);
192687c6a9b2SJens Axboe
192783712358SWu Fengguang current->dirty_paused_when = now + pause;
192883712358SWu Fengguang current->nr_dirtied = 0;
19297ccb9ad5SWu Fengguang current->nr_dirtied_pause = nr_dirtied_pause;
193083712358SWu Fengguang
1931ffd1f609SWu Fengguang /*
19322bc00aefSTejun Heo * This is typically equal to (dirty < thresh) and can also
19332bc00aefSTejun Heo * keep "1000+ dd on a slow USB stick" under control.
1934ffd1f609SWu Fengguang */
19351df64719SWu Fengguang if (task_ratelimit)
1936ffd1f609SWu Fengguang break;
1937499d05ecSJan Kara
1938c5c6343cSWu Fengguang /*
1939f0953a1bSIngo Molnar * In the case of an unresponsive NFS server and the NFS dirty
1940de1fff37STejun Heo * pages exceeds dirty_thresh, give the other good wb's a pipe
1941c5c6343cSWu Fengguang * to go through, so that tasks on them still remain responsive.
1942c5c6343cSWu Fengguang *
19433f8b6fb7SMasahiro Yamada * In theory 1 page is enough to keep the consumer-producer
1944c5c6343cSWu Fengguang * pipe going: the flusher cleans 1 page => the task dirties 1
1945de1fff37STejun Heo * more page. However wb_dirty has accounting errors. So use
194693f78d88STejun Heo * the larger and more IO friendly wb_stat_error.
1947c5c6343cSWu Fengguang */
19482bce774eSWang Long if (sdtc->wb_dirty <= wb_stat_error())
1949c5c6343cSWu Fengguang break;
1950c5c6343cSWu Fengguang
1951499d05ecSJan Kara if (fatal_signal_pending(current))
1952499d05ecSJan Kara break;
19531da177e4SLinus Torvalds }
1954fe6c9c6eSJan Kara return ret;
19551da177e4SLinus Torvalds }
19561da177e4SLinus Torvalds
19579d823e8fSWu Fengguang static DEFINE_PER_CPU(int, bdp_ratelimits);
1958245b2e70STejun Heo
195954848d73SWu Fengguang /*
196054848d73SWu Fengguang * Normal tasks are throttled by
196154848d73SWu Fengguang * loop {
196254848d73SWu Fengguang * dirty tsk->nr_dirtied_pause pages;
196354848d73SWu Fengguang * take a snap in balance_dirty_pages();
196454848d73SWu Fengguang * }
196554848d73SWu Fengguang * However there is a worst case. If every task exit immediately when dirtied
196654848d73SWu Fengguang * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
196754848d73SWu Fengguang * called to throttle the page dirties. The solution is to save the not yet
196854848d73SWu Fengguang * throttled page dirties in dirty_throttle_leaks on task exit and charge them
196954848d73SWu Fengguang * randomly into the running tasks. This works well for the above worst case,
197054848d73SWu Fengguang * as the new task will pick up and accumulate the old task's leaked dirty
197154848d73SWu Fengguang * count and eventually get throttled.
197254848d73SWu Fengguang */
197354848d73SWu Fengguang DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
197454848d73SWu Fengguang
19751da177e4SLinus Torvalds /**
1976fe6c9c6eSJan Kara * balance_dirty_pages_ratelimited_flags - Balance dirty memory state.
1977fe6c9c6eSJan Kara * @mapping: address_space which was dirtied.
1978fe6c9c6eSJan Kara * @flags: BDP flags.
19791da177e4SLinus Torvalds *
19801da177e4SLinus Torvalds * Processes which are dirtying memory should call in here once for each page
19811da177e4SLinus Torvalds * which was newly dirtied. The function will periodically check the system's
19821da177e4SLinus Torvalds * dirty state and will initiate writeback if needed.
19831da177e4SLinus Torvalds *
1984fe6c9c6eSJan Kara * See balance_dirty_pages_ratelimited() for details.
1985fe6c9c6eSJan Kara *
1986fe6c9c6eSJan Kara * Return: If @flags contains BDP_ASYNC, it may return -EAGAIN to
1987fe6c9c6eSJan Kara * indicate that memory is out of balance and the caller must wait
1988fe6c9c6eSJan Kara * for I/O to complete. Otherwise, it will return 0 to indicate
1989fe6c9c6eSJan Kara * that either memory was already in balance, or it was able to sleep
1990fe6c9c6eSJan Kara * until the amount of dirty memory returned to balance.
19911da177e4SLinus Torvalds */
balance_dirty_pages_ratelimited_flags(struct address_space * mapping,unsigned int flags)1992fe6c9c6eSJan Kara int balance_dirty_pages_ratelimited_flags(struct address_space *mapping,
1993fe6c9c6eSJan Kara unsigned int flags)
19941da177e4SLinus Torvalds {
1995dfb8ae56STejun Heo struct inode *inode = mapping->host;
1996dfb8ae56STejun Heo struct backing_dev_info *bdi = inode_to_bdi(inode);
1997dfb8ae56STejun Heo struct bdi_writeback *wb = NULL;
19989d823e8fSWu Fengguang int ratelimit;
1999fe6c9c6eSJan Kara int ret = 0;
20009d823e8fSWu Fengguang int *p;
20011da177e4SLinus Torvalds
2002f56753acSChristoph Hellwig if (!(bdi->capabilities & BDI_CAP_WRITEBACK))
2003fe6c9c6eSJan Kara return ret;
200436715cefSWu Fengguang
2005dfb8ae56STejun Heo if (inode_cgwb_enabled(inode))
2006dfb8ae56STejun Heo wb = wb_get_create_current(bdi, GFP_KERNEL);
2007dfb8ae56STejun Heo if (!wb)
2008dfb8ae56STejun Heo wb = &bdi->wb;
2009dfb8ae56STejun Heo
20109d823e8fSWu Fengguang ratelimit = current->nr_dirtied_pause;
2011a88a341aSTejun Heo if (wb->dirty_exceeded)
20129d823e8fSWu Fengguang ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
20131da177e4SLinus Torvalds
2014fa5a734eSAndrew Morton preempt_disable();
20159d823e8fSWu Fengguang /*
20169d823e8fSWu Fengguang * This prevents one CPU to accumulate too many dirtied pages without
20179d823e8fSWu Fengguang * calling into balance_dirty_pages(), which can happen when there are
20189d823e8fSWu Fengguang * 1000+ tasks, all of them start dirtying pages at exactly the same
20199d823e8fSWu Fengguang * time, hence all honoured too large initial task->nr_dirtied_pause.
20209d823e8fSWu Fengguang */
20217c8e0181SChristoph Lameter p = this_cpu_ptr(&bdp_ratelimits);
20229d823e8fSWu Fengguang if (unlikely(current->nr_dirtied >= ratelimit))
2023fa5a734eSAndrew Morton *p = 0;
2024d3bc1fefSWu Fengguang else if (unlikely(*p >= ratelimit_pages)) {
20259d823e8fSWu Fengguang *p = 0;
20269d823e8fSWu Fengguang ratelimit = 0;
20279d823e8fSWu Fengguang }
202854848d73SWu Fengguang /*
202954848d73SWu Fengguang * Pick up the dirtied pages by the exited tasks. This avoids lots of
203054848d73SWu Fengguang * short-lived tasks (eg. gcc invocations in a kernel build) escaping
203154848d73SWu Fengguang * the dirty throttling and livelock other long-run dirtiers.
203254848d73SWu Fengguang */
20337c8e0181SChristoph Lameter p = this_cpu_ptr(&dirty_throttle_leaks);
203454848d73SWu Fengguang if (*p > 0 && current->nr_dirtied < ratelimit) {
2035d0e1d66bSNamjae Jeon unsigned long nr_pages_dirtied;
203654848d73SWu Fengguang nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
203754848d73SWu Fengguang *p -= nr_pages_dirtied;
203854848d73SWu Fengguang current->nr_dirtied += nr_pages_dirtied;
20391da177e4SLinus Torvalds }
2040fa5a734eSAndrew Morton preempt_enable();
20419d823e8fSWu Fengguang
20429d823e8fSWu Fengguang if (unlikely(current->nr_dirtied >= ratelimit))
2043fe6c9c6eSJan Kara ret = balance_dirty_pages(wb, current->nr_dirtied, flags);
2044dfb8ae56STejun Heo
2045dfb8ae56STejun Heo wb_put(wb);
2046fe6c9c6eSJan Kara return ret;
2047fe6c9c6eSJan Kara }
2048611df5d6SStefan Roesch EXPORT_SYMBOL_GPL(balance_dirty_pages_ratelimited_flags);
2049fe6c9c6eSJan Kara
2050fe6c9c6eSJan Kara /**
2051fe6c9c6eSJan Kara * balance_dirty_pages_ratelimited - balance dirty memory state.
2052fe6c9c6eSJan Kara * @mapping: address_space which was dirtied.
2053fe6c9c6eSJan Kara *
2054fe6c9c6eSJan Kara * Processes which are dirtying memory should call in here once for each page
2055fe6c9c6eSJan Kara * which was newly dirtied. The function will periodically check the system's
2056fe6c9c6eSJan Kara * dirty state and will initiate writeback if needed.
2057fe6c9c6eSJan Kara *
2058fe6c9c6eSJan Kara * Once we're over the dirty memory limit we decrease the ratelimiting
2059fe6c9c6eSJan Kara * by a lot, to prevent individual processes from overshooting the limit
2060fe6c9c6eSJan Kara * by (ratelimit_pages) each.
2061fe6c9c6eSJan Kara */
balance_dirty_pages_ratelimited(struct address_space * mapping)2062fe6c9c6eSJan Kara void balance_dirty_pages_ratelimited(struct address_space *mapping)
2063fe6c9c6eSJan Kara {
2064fe6c9c6eSJan Kara balance_dirty_pages_ratelimited_flags(mapping, 0);
20651da177e4SLinus Torvalds }
2066d0e1d66bSNamjae Jeon EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
20671da177e4SLinus Torvalds
2068aa661bbeSTejun Heo /**
2069aa661bbeSTejun Heo * wb_over_bg_thresh - does @wb need to be written back?
2070aa661bbeSTejun Heo * @wb: bdi_writeback of interest
2071aa661bbeSTejun Heo *
2072aa661bbeSTejun Heo * Determines whether background writeback should keep writing @wb or it's
2073a862f68aSMike Rapoport * clean enough.
2074a862f68aSMike Rapoport *
2075a862f68aSMike Rapoport * Return: %true if writeback should continue.
2076aa661bbeSTejun Heo */
wb_over_bg_thresh(struct bdi_writeback * wb)2077aa661bbeSTejun Heo bool wb_over_bg_thresh(struct bdi_writeback *wb)
2078aa661bbeSTejun Heo {
2079947e9762STejun Heo struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
2080c2aa723aSTejun Heo struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
2081947e9762STejun Heo struct dirty_throttle_control * const gdtc = &gdtc_stor;
2082c2aa723aSTejun Heo struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
2083c2aa723aSTejun Heo &mdtc_stor : NULL;
2084ab19939aSChi Wu unsigned long reclaimable;
2085ab19939aSChi Wu unsigned long thresh;
2086aa661bbeSTejun Heo
2087947e9762STejun Heo /*
2088947e9762STejun Heo * Similar to balance_dirty_pages() but ignores pages being written
2089947e9762STejun Heo * as we're trying to decide whether to put more under writeback.
2090947e9762STejun Heo */
2091947e9762STejun Heo gdtc->avail = global_dirtyable_memory();
20928d92890bSNeilBrown gdtc->dirty = global_node_page_state(NR_FILE_DIRTY);
2093947e9762STejun Heo domain_dirty_limits(gdtc);
2094aa661bbeSTejun Heo
2095947e9762STejun Heo if (gdtc->dirty > gdtc->bg_thresh)
2096aa661bbeSTejun Heo return true;
2097aa661bbeSTejun Heo
2098ab19939aSChi Wu thresh = wb_calc_thresh(gdtc->wb, gdtc->bg_thresh);
2099ab19939aSChi Wu if (thresh < 2 * wb_stat_error())
2100ab19939aSChi Wu reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
2101ab19939aSChi Wu else
2102ab19939aSChi Wu reclaimable = wb_stat(wb, WB_RECLAIMABLE);
2103ab19939aSChi Wu
2104ab19939aSChi Wu if (reclaimable > thresh)
2105aa661bbeSTejun Heo return true;
2106aa661bbeSTejun Heo
2107c2aa723aSTejun Heo if (mdtc) {
2108c5edf9cdSTejun Heo unsigned long filepages, headroom, writeback;
2109c2aa723aSTejun Heo
2110c5edf9cdSTejun Heo mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty,
2111c5edf9cdSTejun Heo &writeback);
2112c5edf9cdSTejun Heo mdtc_calc_avail(mdtc, filepages, headroom);
2113c2aa723aSTejun Heo domain_dirty_limits(mdtc); /* ditto, ignore writeback */
2114c2aa723aSTejun Heo
2115c2aa723aSTejun Heo if (mdtc->dirty > mdtc->bg_thresh)
2116c2aa723aSTejun Heo return true;
2117c2aa723aSTejun Heo
2118ab19939aSChi Wu thresh = wb_calc_thresh(mdtc->wb, mdtc->bg_thresh);
2119ab19939aSChi Wu if (thresh < 2 * wb_stat_error())
2120ab19939aSChi Wu reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
2121ab19939aSChi Wu else
2122ab19939aSChi Wu reclaimable = wb_stat(wb, WB_RECLAIMABLE);
2123ab19939aSChi Wu
2124ab19939aSChi Wu if (reclaimable > thresh)
2125c2aa723aSTejun Heo return true;
2126c2aa723aSTejun Heo }
2127c2aa723aSTejun Heo
2128aa661bbeSTejun Heo return false;
2129aa661bbeSTejun Heo }
2130aa661bbeSTejun Heo
2131aa779e51Szhanglianjie #ifdef CONFIG_SYSCTL
21321da177e4SLinus Torvalds /*
21331da177e4SLinus Torvalds * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
21341da177e4SLinus Torvalds */
dirty_writeback_centisecs_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)2135aa779e51Szhanglianjie static int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
213632927393SChristoph Hellwig void *buffer, size_t *length, loff_t *ppos)
21371da177e4SLinus Torvalds {
213894af5846SYafang Shao unsigned int old_interval = dirty_writeback_interval;
213994af5846SYafang Shao int ret;
214094af5846SYafang Shao
214194af5846SYafang Shao ret = proc_dointvec(table, write, buffer, length, ppos);
2142515c24c1SYafang Shao
2143515c24c1SYafang Shao /*
2144515c24c1SYafang Shao * Writing 0 to dirty_writeback_interval will disable periodic writeback
2145515c24c1SYafang Shao * and a different non-zero value will wakeup the writeback threads.
2146515c24c1SYafang Shao * wb_wakeup_delayed() would be more appropriate, but it's a pain to
2147515c24c1SYafang Shao * iterate over all bdis and wbs.
2148515c24c1SYafang Shao * The reason we do this is to make the change take effect immediately.
2149515c24c1SYafang Shao */
2150515c24c1SYafang Shao if (!ret && write && dirty_writeback_interval &&
2151515c24c1SYafang Shao dirty_writeback_interval != old_interval)
215294af5846SYafang Shao wakeup_flusher_threads(WB_REASON_PERIODIC);
215394af5846SYafang Shao
215494af5846SYafang Shao return ret;
21551da177e4SLinus Torvalds }
2156aa779e51Szhanglianjie #endif
21571da177e4SLinus Torvalds
laptop_mode_timer_fn(struct timer_list * t)2158bca237a5SKees Cook void laptop_mode_timer_fn(struct timer_list *t)
21591da177e4SLinus Torvalds {
2160bca237a5SKees Cook struct backing_dev_info *backing_dev_info =
2161bca237a5SKees Cook from_timer(backing_dev_info, t, laptop_mode_wb_timer);
21621da177e4SLinus Torvalds
2163bca237a5SKees Cook wakeup_flusher_threads_bdi(backing_dev_info, WB_REASON_LAPTOP_TIMER);
21641da177e4SLinus Torvalds }
21651da177e4SLinus Torvalds
21661da177e4SLinus Torvalds /*
21671da177e4SLinus Torvalds * We've spun up the disk and we're in laptop mode: schedule writeback
21681da177e4SLinus Torvalds * of all dirty data a few seconds from now. If the flush is already scheduled
21691da177e4SLinus Torvalds * then push it back - the user is still using the disk.
21701da177e4SLinus Torvalds */
laptop_io_completion(struct backing_dev_info * info)217131373d09SMatthew Garrett void laptop_io_completion(struct backing_dev_info *info)
21721da177e4SLinus Torvalds {
217331373d09SMatthew Garrett mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
21741da177e4SLinus Torvalds }
21751da177e4SLinus Torvalds
21761da177e4SLinus Torvalds /*
21771da177e4SLinus Torvalds * We're in laptop mode and we've just synced. The sync's writes will have
21781da177e4SLinus Torvalds * caused another writeback to be scheduled by laptop_io_completion.
21791da177e4SLinus Torvalds * Nothing needs to be written back anymore, so we unschedule the writeback.
21801da177e4SLinus Torvalds */
laptop_sync_completion(void)21811da177e4SLinus Torvalds void laptop_sync_completion(void)
21821da177e4SLinus Torvalds {
218331373d09SMatthew Garrett struct backing_dev_info *bdi;
218431373d09SMatthew Garrett
218531373d09SMatthew Garrett rcu_read_lock();
218631373d09SMatthew Garrett
218731373d09SMatthew Garrett list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
218831373d09SMatthew Garrett del_timer(&bdi->laptop_mode_wb_timer);
218931373d09SMatthew Garrett
219031373d09SMatthew Garrett rcu_read_unlock();
21911da177e4SLinus Torvalds }
21921da177e4SLinus Torvalds
21931da177e4SLinus Torvalds /*
21941da177e4SLinus Torvalds * If ratelimit_pages is too high then we can get into dirty-data overload
21951da177e4SLinus Torvalds * if a large number of processes all perform writes at the same time.
21961da177e4SLinus Torvalds *
21971da177e4SLinus Torvalds * Here we set ratelimit_pages to a level which ensures that when all CPUs are
21981da177e4SLinus Torvalds * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
21999d823e8fSWu Fengguang * thresholds.
22001da177e4SLinus Torvalds */
22011da177e4SLinus Torvalds
writeback_set_ratelimit(void)22022d1d43f6SChandra Seetharaman void writeback_set_ratelimit(void)
22031da177e4SLinus Torvalds {
2204dcc25ae7STejun Heo struct wb_domain *dom = &global_wb_domain;
22059d823e8fSWu Fengguang unsigned long background_thresh;
22069d823e8fSWu Fengguang unsigned long dirty_thresh;
2207dcc25ae7STejun Heo
22089d823e8fSWu Fengguang global_dirty_limits(&background_thresh, &dirty_thresh);
2209dcc25ae7STejun Heo dom->dirty_limit = dirty_thresh;
22109d823e8fSWu Fengguang ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
22111da177e4SLinus Torvalds if (ratelimit_pages < 16)
22121da177e4SLinus Torvalds ratelimit_pages = 16;
22131da177e4SLinus Torvalds }
22141da177e4SLinus Torvalds
page_writeback_cpu_online(unsigned int cpu)22151d7ac6aeSSebastian Andrzej Siewior static int page_writeback_cpu_online(unsigned int cpu)
22161da177e4SLinus Torvalds {
22172d1d43f6SChandra Seetharaman writeback_set_ratelimit();
22181d7ac6aeSSebastian Andrzej Siewior return 0;
22191da177e4SLinus Torvalds }
22201da177e4SLinus Torvalds
2221aa779e51Szhanglianjie #ifdef CONFIG_SYSCTL
22223c6a4cbaSLuis Chamberlain
22233c6a4cbaSLuis Chamberlain /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
22243c6a4cbaSLuis Chamberlain static const unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
22253c6a4cbaSLuis Chamberlain
2226aa779e51Szhanglianjie static struct ctl_table vm_page_writeback_sysctls[] = {
2227aa779e51Szhanglianjie {
2228aa779e51Szhanglianjie .procname = "dirty_background_ratio",
2229aa779e51Szhanglianjie .data = &dirty_background_ratio,
2230aa779e51Szhanglianjie .maxlen = sizeof(dirty_background_ratio),
2231aa779e51Szhanglianjie .mode = 0644,
2232aa779e51Szhanglianjie .proc_handler = dirty_background_ratio_handler,
2233aa779e51Szhanglianjie .extra1 = SYSCTL_ZERO,
2234aa779e51Szhanglianjie .extra2 = SYSCTL_ONE_HUNDRED,
2235aa779e51Szhanglianjie },
2236aa779e51Szhanglianjie {
2237aa779e51Szhanglianjie .procname = "dirty_background_bytes",
2238aa779e51Szhanglianjie .data = &dirty_background_bytes,
2239aa779e51Szhanglianjie .maxlen = sizeof(dirty_background_bytes),
2240aa779e51Szhanglianjie .mode = 0644,
2241aa779e51Szhanglianjie .proc_handler = dirty_background_bytes_handler,
2242aa779e51Szhanglianjie .extra1 = SYSCTL_LONG_ONE,
2243aa779e51Szhanglianjie },
2244aa779e51Szhanglianjie {
2245aa779e51Szhanglianjie .procname = "dirty_ratio",
2246aa779e51Szhanglianjie .data = &vm_dirty_ratio,
2247aa779e51Szhanglianjie .maxlen = sizeof(vm_dirty_ratio),
2248aa779e51Szhanglianjie .mode = 0644,
2249aa779e51Szhanglianjie .proc_handler = dirty_ratio_handler,
2250aa779e51Szhanglianjie .extra1 = SYSCTL_ZERO,
2251aa779e51Szhanglianjie .extra2 = SYSCTL_ONE_HUNDRED,
2252aa779e51Szhanglianjie },
2253aa779e51Szhanglianjie {
2254aa779e51Szhanglianjie .procname = "dirty_bytes",
2255aa779e51Szhanglianjie .data = &vm_dirty_bytes,
2256aa779e51Szhanglianjie .maxlen = sizeof(vm_dirty_bytes),
2257aa779e51Szhanglianjie .mode = 0644,
2258aa779e51Szhanglianjie .proc_handler = dirty_bytes_handler,
2259aa779e51Szhanglianjie .extra1 = (void *)&dirty_bytes_min,
2260aa779e51Szhanglianjie },
2261aa779e51Szhanglianjie {
2262aa779e51Szhanglianjie .procname = "dirty_writeback_centisecs",
2263aa779e51Szhanglianjie .data = &dirty_writeback_interval,
2264aa779e51Szhanglianjie .maxlen = sizeof(dirty_writeback_interval),
2265aa779e51Szhanglianjie .mode = 0644,
2266aa779e51Szhanglianjie .proc_handler = dirty_writeback_centisecs_handler,
2267aa779e51Szhanglianjie },
2268aa779e51Szhanglianjie {
2269aa779e51Szhanglianjie .procname = "dirty_expire_centisecs",
2270aa779e51Szhanglianjie .data = &dirty_expire_interval,
2271aa779e51Szhanglianjie .maxlen = sizeof(dirty_expire_interval),
2272aa779e51Szhanglianjie .mode = 0644,
2273aa779e51Szhanglianjie .proc_handler = proc_dointvec_minmax,
2274aa779e51Szhanglianjie .extra1 = SYSCTL_ZERO,
2275aa779e51Szhanglianjie },
2276aa779e51Szhanglianjie #ifdef CONFIG_HIGHMEM
2277aa779e51Szhanglianjie {
2278aa779e51Szhanglianjie .procname = "highmem_is_dirtyable",
2279aa779e51Szhanglianjie .data = &vm_highmem_is_dirtyable,
2280aa779e51Szhanglianjie .maxlen = sizeof(vm_highmem_is_dirtyable),
2281aa779e51Szhanglianjie .mode = 0644,
2282aa779e51Szhanglianjie .proc_handler = proc_dointvec_minmax,
2283aa779e51Szhanglianjie .extra1 = SYSCTL_ZERO,
2284aa779e51Szhanglianjie .extra2 = SYSCTL_ONE,
2285aa779e51Szhanglianjie },
2286aa779e51Szhanglianjie #endif
2287aa779e51Szhanglianjie {
2288aa779e51Szhanglianjie .procname = "laptop_mode",
2289aa779e51Szhanglianjie .data = &laptop_mode,
2290aa779e51Szhanglianjie .maxlen = sizeof(laptop_mode),
2291aa779e51Szhanglianjie .mode = 0644,
2292aa779e51Szhanglianjie .proc_handler = proc_dointvec_jiffies,
2293aa779e51Szhanglianjie },
2294aa779e51Szhanglianjie {}
2295aa779e51Szhanglianjie };
2296aa779e51Szhanglianjie #endif
2297aa779e51Szhanglianjie
22981da177e4SLinus Torvalds /*
2299dc6e29daSLinus Torvalds * Called early on to tune the page writeback dirty limits.
2300dc6e29daSLinus Torvalds *
2301dc6e29daSLinus Torvalds * We used to scale dirty pages according to how total memory
23020a18e607SDavid Hildenbrand * related to pages that could be allocated for buffers.
2303dc6e29daSLinus Torvalds *
2304dc6e29daSLinus Torvalds * However, that was when we used "dirty_ratio" to scale with
2305dc6e29daSLinus Torvalds * all memory, and we don't do that any more. "dirty_ratio"
23060a18e607SDavid Hildenbrand * is now applied to total non-HIGHPAGE memory, and as such we can't
2307dc6e29daSLinus Torvalds * get into the old insane situation any more where we had
2308dc6e29daSLinus Torvalds * large amounts of dirty pages compared to a small amount of
2309dc6e29daSLinus Torvalds * non-HIGHMEM memory.
2310dc6e29daSLinus Torvalds *
2311dc6e29daSLinus Torvalds * But we might still want to scale the dirty_ratio by how
2312dc6e29daSLinus Torvalds * much memory the box has..
23131da177e4SLinus Torvalds */
page_writeback_init(void)23141da177e4SLinus Torvalds void __init page_writeback_init(void)
23151da177e4SLinus Torvalds {
2316a50fcb51SRabin Vincent BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2317a50fcb51SRabin Vincent
23181d7ac6aeSSebastian Andrzej Siewior cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/writeback:online",
23191d7ac6aeSSebastian Andrzej Siewior page_writeback_cpu_online, NULL);
23201d7ac6aeSSebastian Andrzej Siewior cpuhp_setup_state(CPUHP_MM_WRITEBACK_DEAD, "mm/writeback:dead", NULL,
23211d7ac6aeSSebastian Andrzej Siewior page_writeback_cpu_online);
2322aa779e51Szhanglianjie #ifdef CONFIG_SYSCTL
2323aa779e51Szhanglianjie register_sysctl_init("vm", vm_page_writeback_sysctls);
2324aa779e51Szhanglianjie #endif
23251da177e4SLinus Torvalds }
23261da177e4SLinus Torvalds
2327811d736fSDavid Howells /**
2328f446daaeSJan Kara * tag_pages_for_writeback - tag pages to be written by write_cache_pages
2329f446daaeSJan Kara * @mapping: address space structure to write
2330f446daaeSJan Kara * @start: starting page index
2331f446daaeSJan Kara * @end: ending page index (inclusive)
2332f446daaeSJan Kara *
2333f446daaeSJan Kara * This function scans the page range from @start to @end (inclusive) and tags
2334f446daaeSJan Kara * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
2335f446daaeSJan Kara * that write_cache_pages (or whoever calls this function) will then use
2336f446daaeSJan Kara * TOWRITE tag to identify pages eligible for writeback. This mechanism is
2337f446daaeSJan Kara * used to avoid livelocking of writeback by a process steadily creating new
2338f446daaeSJan Kara * dirty pages in the file (thus it is important for this function to be quick
2339f446daaeSJan Kara * so that it can tag pages faster than a dirtying process can create them).
2340f446daaeSJan Kara */
tag_pages_for_writeback(struct address_space * mapping,pgoff_t start,pgoff_t end)2341f446daaeSJan Kara void tag_pages_for_writeback(struct address_space *mapping,
2342f446daaeSJan Kara pgoff_t start, pgoff_t end)
2343f446daaeSJan Kara {
2344ff9c745bSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, start);
2345ff9c745bSMatthew Wilcox unsigned int tagged = 0;
2346ff9c745bSMatthew Wilcox void *page;
2347f446daaeSJan Kara
2348ff9c745bSMatthew Wilcox xas_lock_irq(&xas);
2349ff9c745bSMatthew Wilcox xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) {
2350ff9c745bSMatthew Wilcox xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE);
2351ff9c745bSMatthew Wilcox if (++tagged % XA_CHECK_SCHED)
2352268f42deSMatthew Wilcox continue;
2353ff9c745bSMatthew Wilcox
2354ff9c745bSMatthew Wilcox xas_pause(&xas);
2355ff9c745bSMatthew Wilcox xas_unlock_irq(&xas);
2356f446daaeSJan Kara cond_resched();
2357ff9c745bSMatthew Wilcox xas_lock_irq(&xas);
2358268f42deSMatthew Wilcox }
2359ff9c745bSMatthew Wilcox xas_unlock_irq(&xas);
2360f446daaeSJan Kara }
2361f446daaeSJan Kara EXPORT_SYMBOL(tag_pages_for_writeback);
2362f446daaeSJan Kara
2363f446daaeSJan Kara /**
23640ea97180SMiklos Szeredi * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2365811d736fSDavid Howells * @mapping: address space structure to write
2366811d736fSDavid Howells * @wbc: subtract the number of written pages from *@wbc->nr_to_write
23670ea97180SMiklos Szeredi * @writepage: function called for each page
23680ea97180SMiklos Szeredi * @data: data passed to writepage function
2369811d736fSDavid Howells *
23700ea97180SMiklos Szeredi * If a page is already under I/O, write_cache_pages() skips it, even
2371811d736fSDavid Howells * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2372811d736fSDavid Howells * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2373811d736fSDavid Howells * and msync() need to guarantee that all the data which was dirty at the time
2374811d736fSDavid Howells * the call was made get new I/O started against them. If wbc->sync_mode is
2375811d736fSDavid Howells * WB_SYNC_ALL then we were called for data integrity and we must wait for
2376811d736fSDavid Howells * existing IO to complete.
2377f446daaeSJan Kara *
2378f446daaeSJan Kara * To avoid livelocks (when other process dirties new pages), we first tag
2379f446daaeSJan Kara * pages which should be written back with TOWRITE tag and only then start
2380f446daaeSJan Kara * writing them. For data-integrity sync we have to be careful so that we do
2381f446daaeSJan Kara * not miss some pages (e.g., because some other process has cleared TOWRITE
2382f446daaeSJan Kara * tag we set). The rule we follow is that TOWRITE tag can be cleared only
2383f446daaeSJan Kara * by the process clearing the DIRTY tag (and submitting the page for IO).
238464081362SDave Chinner *
238564081362SDave Chinner * To avoid deadlocks between range_cyclic writeback and callers that hold
238664081362SDave Chinner * pages in PageWriteback to aggregate IO until write_cache_pages() returns,
238764081362SDave Chinner * we do not loop back to the start of the file. Doing so causes a page
238864081362SDave Chinner * lock/page writeback access order inversion - we should only ever lock
238964081362SDave Chinner * multiple pages in ascending page->index order, and looping back to the start
239064081362SDave Chinner * of the file violates that rule and causes deadlocks.
2391a862f68aSMike Rapoport *
2392a862f68aSMike Rapoport * Return: %0 on success, negative error code otherwise
2393811d736fSDavid Howells */
write_cache_pages(struct address_space * mapping,struct writeback_control * wbc,writepage_t writepage,void * data)23940ea97180SMiklos Szeredi int write_cache_pages(struct address_space *mapping,
23950ea97180SMiklos Szeredi struct writeback_control *wbc, writepage_t writepage,
23960ea97180SMiklos Szeredi void *data)
2397811d736fSDavid Howells {
2398811d736fSDavid Howells int ret = 0;
2399811d736fSDavid Howells int done = 0;
24003fa750dcSBrian Foster int error;
24010fff435fSVishal Moola (Oracle) struct folio_batch fbatch;
24020fff435fSVishal Moola (Oracle) int nr_folios;
2403811d736fSDavid Howells pgoff_t index;
2404811d736fSDavid Howells pgoff_t end; /* Inclusive */
2405bd19e012SNick Piggin pgoff_t done_index;
2406811d736fSDavid Howells int range_whole = 0;
2407ff9c745bSMatthew Wilcox xa_mark_t tag;
2408811d736fSDavid Howells
24090fff435fSVishal Moola (Oracle) folio_batch_init(&fbatch);
2410811d736fSDavid Howells if (wbc->range_cyclic) {
241128659cc8SChao Yu index = mapping->writeback_index; /* prev offset */
2412811d736fSDavid Howells end = -1;
2413811d736fSDavid Howells } else {
241409cbfeafSKirill A. Shutemov index = wbc->range_start >> PAGE_SHIFT;
241509cbfeafSKirill A. Shutemov end = wbc->range_end >> PAGE_SHIFT;
2416811d736fSDavid Howells if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2417811d736fSDavid Howells range_whole = 1;
2418811d736fSDavid Howells }
2419cc7b8f62SMauricio Faria de Oliveira if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) {
2420f446daaeSJan Kara tag_pages_for_writeback(mapping, index, end);
2421cc7b8f62SMauricio Faria de Oliveira tag = PAGECACHE_TAG_TOWRITE;
2422cc7b8f62SMauricio Faria de Oliveira } else {
2423cc7b8f62SMauricio Faria de Oliveira tag = PAGECACHE_TAG_DIRTY;
2424cc7b8f62SMauricio Faria de Oliveira }
2425bd19e012SNick Piggin done_index = index;
24265a3d5c98SNick Piggin while (!done && (index <= end)) {
24275a3d5c98SNick Piggin int i;
24285a3d5c98SNick Piggin
24290fff435fSVishal Moola (Oracle) nr_folios = filemap_get_folios_tag(mapping, &index, end,
24300fff435fSVishal Moola (Oracle) tag, &fbatch);
24310fff435fSVishal Moola (Oracle)
24320fff435fSVishal Moola (Oracle) if (nr_folios == 0)
24335a3d5c98SNick Piggin break;
2434811d736fSDavid Howells
24350fff435fSVishal Moola (Oracle) for (i = 0; i < nr_folios; i++) {
24360fff435fSVishal Moola (Oracle) struct folio *folio = fbatch.folios[i];
24378344a3d4SMatthew Wilcox (Oracle) unsigned long nr;
2438811d736fSDavid Howells
24390fff435fSVishal Moola (Oracle) done_index = folio->index;
2440bd19e012SNick Piggin
24410fff435fSVishal Moola (Oracle) folio_lock(folio);
2442811d736fSDavid Howells
24435a3d5c98SNick Piggin /*
24445a3d5c98SNick Piggin * Page truncated or invalidated. We can freely skip it
24455a3d5c98SNick Piggin * then, even for data integrity operations: the page
24465a3d5c98SNick Piggin * has disappeared concurrently, so there could be no
2447f0953a1bSIngo Molnar * real expectation of this data integrity operation
24485a3d5c98SNick Piggin * even if there is now a new, dirty page at the same
24495a3d5c98SNick Piggin * pagecache address.
24505a3d5c98SNick Piggin */
24510fff435fSVishal Moola (Oracle) if (unlikely(folio->mapping != mapping)) {
24525a3d5c98SNick Piggin continue_unlock:
24530fff435fSVishal Moola (Oracle) folio_unlock(folio);
2454811d736fSDavid Howells continue;
2455811d736fSDavid Howells }
2456811d736fSDavid Howells
24570fff435fSVishal Moola (Oracle) if (!folio_test_dirty(folio)) {
2458515f4a03SNick Piggin /* someone wrote it for us */
2459515f4a03SNick Piggin goto continue_unlock;
2460515f4a03SNick Piggin }
2461515f4a03SNick Piggin
24620fff435fSVishal Moola (Oracle) if (folio_test_writeback(folio)) {
2463811d736fSDavid Howells if (wbc->sync_mode != WB_SYNC_NONE)
24640fff435fSVishal Moola (Oracle) folio_wait_writeback(folio);
2465515f4a03SNick Piggin else
2466515f4a03SNick Piggin goto continue_unlock;
2467515f4a03SNick Piggin }
2468811d736fSDavid Howells
24690fff435fSVishal Moola (Oracle) BUG_ON(folio_test_writeback(folio));
24700fff435fSVishal Moola (Oracle) if (!folio_clear_dirty_for_io(folio))
24715a3d5c98SNick Piggin goto continue_unlock;
2472811d736fSDavid Howells
2473de1414a6SChristoph Hellwig trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
2474d585bdbeSMatthew Wilcox (Oracle) error = writepage(folio, wbc, data);
24758344a3d4SMatthew Wilcox (Oracle) nr = folio_nr_pages(folio);
24763fa750dcSBrian Foster if (unlikely(error)) {
247700266770SNick Piggin /*
24783fa750dcSBrian Foster * Handle errors according to the type of
24793fa750dcSBrian Foster * writeback. There's no need to continue for
24803fa750dcSBrian Foster * background writeback. Just push done_index
24813fa750dcSBrian Foster * past this page so media errors won't choke
24823fa750dcSBrian Foster * writeout for the entire file. For integrity
24833fa750dcSBrian Foster * writeback, we must process the entire dirty
24843fa750dcSBrian Foster * set regardless of errors because the fs may
24853fa750dcSBrian Foster * still have state to clear for each page. In
24863fa750dcSBrian Foster * that case we continue processing and return
24873fa750dcSBrian Foster * the first error.
248800266770SNick Piggin */
24893fa750dcSBrian Foster if (error == AOP_WRITEPAGE_ACTIVATE) {
24900fff435fSVishal Moola (Oracle) folio_unlock(folio);
24913fa750dcSBrian Foster error = 0;
24923fa750dcSBrian Foster } else if (wbc->sync_mode != WB_SYNC_ALL) {
24933fa750dcSBrian Foster ret = error;
24948344a3d4SMatthew Wilcox (Oracle) done_index = folio->index + nr;
249500266770SNick Piggin done = 1;
249600266770SNick Piggin break;
2497e4230030SAndrew Morton }
24983fa750dcSBrian Foster if (!ret)
24993fa750dcSBrian Foster ret = error;
250000266770SNick Piggin }
250100266770SNick Piggin
2502dcf6a79dSArtem Bityutskiy /*
2503546a1924SDave Chinner * We stop writing back only if we are not doing
2504546a1924SDave Chinner * integrity sync. In case of integrity sync we have to
2505546a1924SDave Chinner * keep going until we have written all the pages
2506546a1924SDave Chinner * we tagged for writeback prior to entering this loop.
2507dcf6a79dSArtem Bityutskiy */
25088344a3d4SMatthew Wilcox (Oracle) wbc->nr_to_write -= nr;
25098344a3d4SMatthew Wilcox (Oracle) if (wbc->nr_to_write <= 0 &&
2510546a1924SDave Chinner wbc->sync_mode == WB_SYNC_NONE) {
2511811d736fSDavid Howells done = 1;
251282fd1a9aSAndrew Morton break;
251382fd1a9aSAndrew Morton }
251489e12190SFederico Cuello }
25150fff435fSVishal Moola (Oracle) folio_batch_release(&fbatch);
2516811d736fSDavid Howells cond_resched();
2517811d736fSDavid Howells }
251864081362SDave Chinner
2519811d736fSDavid Howells /*
252064081362SDave Chinner * If we hit the last page and there is more work to be done: wrap
252164081362SDave Chinner * back the index back to the start of the file for the next
252264081362SDave Chinner * time we are called.
2523811d736fSDavid Howells */
252464081362SDave Chinner if (wbc->range_cyclic && !done)
252564081362SDave Chinner done_index = 0;
25260b564927SDave Chinner if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2527bd19e012SNick Piggin mapping->writeback_index = done_index;
252806d6cf69SAneesh Kumar K.V
2529811d736fSDavid Howells return ret;
2530811d736fSDavid Howells }
25310ea97180SMiklos Szeredi EXPORT_SYMBOL(write_cache_pages);
25320ea97180SMiklos Szeredi
writepage_cb(struct folio * folio,struct writeback_control * wbc,void * data)2533d585bdbeSMatthew Wilcox (Oracle) static int writepage_cb(struct folio *folio, struct writeback_control *wbc,
25340ea97180SMiklos Szeredi void *data)
25350ea97180SMiklos Szeredi {
25360ea97180SMiklos Szeredi struct address_space *mapping = data;
2537d585bdbeSMatthew Wilcox (Oracle) int ret = mapping->a_ops->writepage(&folio->page, wbc);
25380ea97180SMiklos Szeredi mapping_set_error(mapping, ret);
25390ea97180SMiklos Szeredi return ret;
25400ea97180SMiklos Szeredi }
25410ea97180SMiklos Szeredi
do_writepages(struct address_space * mapping,struct writeback_control * wbc)25421da177e4SLinus Torvalds int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
25431da177e4SLinus Torvalds {
254422905f77SAndrew Morton int ret;
2545fee468fdSJan Kara struct bdi_writeback *wb;
254622905f77SAndrew Morton
25471da177e4SLinus Torvalds if (wbc->nr_to_write <= 0)
25481da177e4SLinus Torvalds return 0;
2549fee468fdSJan Kara wb = inode_to_wb_wbc(mapping->host, wbc);
2550fee468fdSJan Kara wb_bandwidth_estimate_start(wb);
255180a2ea9fSTheodore Ts'o while (1) {
2552c2ca7a59SChristoph Hellwig if (mapping->a_ops->writepages) {
255322905f77SAndrew Morton ret = mapping->a_ops->writepages(mapping, wbc);
2554c2ca7a59SChristoph Hellwig } else if (mapping->a_ops->writepage) {
2555c2ca7a59SChristoph Hellwig struct blk_plug plug;
2556c2ca7a59SChristoph Hellwig
2557c2ca7a59SChristoph Hellwig blk_start_plug(&plug);
2558c2ca7a59SChristoph Hellwig ret = write_cache_pages(mapping, wbc, writepage_cb,
2559c2ca7a59SChristoph Hellwig mapping);
2560c2ca7a59SChristoph Hellwig blk_finish_plug(&plug);
2561c2ca7a59SChristoph Hellwig } else {
2562c2ca7a59SChristoph Hellwig /* deal with chardevs and other special files */
2563c2ca7a59SChristoph Hellwig ret = 0;
2564c2ca7a59SChristoph Hellwig }
2565c2ca7a59SChristoph Hellwig if (ret != -ENOMEM || wbc->sync_mode != WB_SYNC_ALL)
256680a2ea9fSTheodore Ts'o break;
25678d58802fSMel Gorman
25688d58802fSMel Gorman /*
25698d58802fSMel Gorman * Lacking an allocation context or the locality or writeback
25708d58802fSMel Gorman * state of any of the inode's pages, throttle based on
25718d58802fSMel Gorman * writeback activity on the local node. It's as good a
25728d58802fSMel Gorman * guess as any.
25738d58802fSMel Gorman */
25748d58802fSMel Gorman reclaim_throttle(NODE_DATA(numa_node_id()),
2575c3f4a9a2SMel Gorman VMSCAN_THROTTLE_WRITEBACK);
257680a2ea9fSTheodore Ts'o }
257745a2966fSJan Kara /*
257845a2966fSJan Kara * Usually few pages are written by now from those we've just submitted
257945a2966fSJan Kara * but if there's constant writeback being submitted, this makes sure
258045a2966fSJan Kara * writeback bandwidth is updated once in a while.
258145a2966fSJan Kara */
258220792ebfSJan Kara if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
258320792ebfSJan Kara BANDWIDTH_INTERVAL))
2584fee468fdSJan Kara wb_update_bandwidth(wb);
258522905f77SAndrew Morton return ret;
25861da177e4SLinus Torvalds }
25871da177e4SLinus Torvalds
25881da177e4SLinus Torvalds /*
258976719325SKen Chen * For address_spaces which do not use buffers nor write back.
259076719325SKen Chen */
noop_dirty_folio(struct address_space * mapping,struct folio * folio)259146de8b97SMatthew Wilcox (Oracle) bool noop_dirty_folio(struct address_space *mapping, struct folio *folio)
259276719325SKen Chen {
259346de8b97SMatthew Wilcox (Oracle) if (!folio_test_dirty(folio))
259446de8b97SMatthew Wilcox (Oracle) return !folio_test_set_dirty(folio);
259546de8b97SMatthew Wilcox (Oracle) return false;
259676719325SKen Chen }
259746de8b97SMatthew Wilcox (Oracle) EXPORT_SYMBOL(noop_dirty_folio);
259876719325SKen Chen
259976719325SKen Chen /*
2600e3a7cca1SEdward Shishkin * Helper function for set_page_dirty family.
2601c4843a75SGreg Thelen *
26026c77b607SKefeng Wang * Caller must hold folio_memcg_lock().
2603c4843a75SGreg Thelen *
2604e3a7cca1SEdward Shishkin * NOTE: This relies on being atomic wrt interrupts.
2605e3a7cca1SEdward Shishkin */
folio_account_dirtied(struct folio * folio,struct address_space * mapping)2606203a3151SMatthew Wilcox (Oracle) static void folio_account_dirtied(struct folio *folio,
26076e1cae88SMatthew Wilcox (Oracle) struct address_space *mapping)
2608e3a7cca1SEdward Shishkin {
260952ebea74STejun Heo struct inode *inode = mapping->host;
261052ebea74STejun Heo
2611b9b0ff61SMatthew Wilcox (Oracle) trace_writeback_dirty_folio(folio, mapping);
26129fb0a7daSTejun Heo
2613f56753acSChristoph Hellwig if (mapping_can_writeback(mapping)) {
261452ebea74STejun Heo struct bdi_writeback *wb;
2615203a3151SMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio);
2616de1414a6SChristoph Hellwig
26179cfb816bSMatthew Wilcox (Oracle) inode_attach_wb(inode, folio);
261852ebea74STejun Heo wb = inode_to_wb(inode);
2619e3a7cca1SEdward Shishkin
2620203a3151SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
2621203a3151SMatthew Wilcox (Oracle) __zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
2622203a3151SMatthew Wilcox (Oracle) __node_stat_mod_folio(folio, NR_DIRTIED, nr);
2623203a3151SMatthew Wilcox (Oracle) wb_stat_mod(wb, WB_RECLAIMABLE, nr);
2624203a3151SMatthew Wilcox (Oracle) wb_stat_mod(wb, WB_DIRTIED, nr);
2625203a3151SMatthew Wilcox (Oracle) task_io_account_write(nr * PAGE_SIZE);
2626203a3151SMatthew Wilcox (Oracle) current->nr_dirtied += nr;
2627203a3151SMatthew Wilcox (Oracle) __this_cpu_add(bdp_ratelimits, nr);
262897b27821STejun Heo
2629203a3151SMatthew Wilcox (Oracle) mem_cgroup_track_foreign_dirty(folio, wb);
2630e3a7cca1SEdward Shishkin }
2631e3a7cca1SEdward Shishkin }
2632e3a7cca1SEdward Shishkin
2633e3a7cca1SEdward Shishkin /*
2634b9ea2515SKonstantin Khlebnikov * Helper function for deaccounting dirty page without writeback.
2635b9ea2515SKonstantin Khlebnikov *
26366c77b607SKefeng Wang * Caller must hold folio_memcg_lock().
2637b9ea2515SKonstantin Khlebnikov */
folio_account_cleaned(struct folio * folio,struct bdi_writeback * wb)2638566d3362SHugh Dickins void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
2639b9ea2515SKonstantin Khlebnikov {
2640fc9b6a53SMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio);
2641566d3362SHugh Dickins
2642fc9b6a53SMatthew Wilcox (Oracle) lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
2643fc9b6a53SMatthew Wilcox (Oracle) zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2644fc9b6a53SMatthew Wilcox (Oracle) wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
2645fc9b6a53SMatthew Wilcox (Oracle) task_io_account_cancelled_write(nr * PAGE_SIZE);
2646b9ea2515SKonstantin Khlebnikov }
2647b9ea2515SKonstantin Khlebnikov
2648b9ea2515SKonstantin Khlebnikov /*
2649203a3151SMatthew Wilcox (Oracle) * Mark the folio dirty, and set it dirty in the page cache, and mark
2650203a3151SMatthew Wilcox (Oracle) * the inode dirty.
26516e1cae88SMatthew Wilcox (Oracle) *
2652203a3151SMatthew Wilcox (Oracle) * If warn is true, then emit a warning if the folio is not uptodate and has
26536e1cae88SMatthew Wilcox (Oracle) * not been truncated.
26546e1cae88SMatthew Wilcox (Oracle) *
26556c77b607SKefeng Wang * The caller must hold folio_memcg_lock(). Most callers have the folio
2656a229a4f0SMatthew Wilcox (Oracle) * locked. A few have the folio blocked from truncation through other
2657e9adcfecSMike Kravetz * means (eg zap_vma_pages() has it mapped and is holding the page table
2658a229a4f0SMatthew Wilcox (Oracle) * lock). This can also be called from mark_buffer_dirty(), which I
2659a229a4f0SMatthew Wilcox (Oracle) * cannot prove is always protected against truncate.
26606e1cae88SMatthew Wilcox (Oracle) */
__folio_mark_dirty(struct folio * folio,struct address_space * mapping,int warn)2661203a3151SMatthew Wilcox (Oracle) void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
26626e1cae88SMatthew Wilcox (Oracle) int warn)
26636e1cae88SMatthew Wilcox (Oracle) {
26646e1cae88SMatthew Wilcox (Oracle) unsigned long flags;
26656e1cae88SMatthew Wilcox (Oracle)
26666e1cae88SMatthew Wilcox (Oracle) xa_lock_irqsave(&mapping->i_pages, flags);
2667203a3151SMatthew Wilcox (Oracle) if (folio->mapping) { /* Race with truncate? */
2668203a3151SMatthew Wilcox (Oracle) WARN_ON_ONCE(warn && !folio_test_uptodate(folio));
2669203a3151SMatthew Wilcox (Oracle) folio_account_dirtied(folio, mapping);
2670203a3151SMatthew Wilcox (Oracle) __xa_set_mark(&mapping->i_pages, folio_index(folio),
26716e1cae88SMatthew Wilcox (Oracle) PAGECACHE_TAG_DIRTY);
26726e1cae88SMatthew Wilcox (Oracle) }
26736e1cae88SMatthew Wilcox (Oracle) xa_unlock_irqrestore(&mapping->i_pages, flags);
26746e1cae88SMatthew Wilcox (Oracle) }
26756e1cae88SMatthew Wilcox (Oracle)
267685d4d2ebSMatthew Wilcox (Oracle) /**
267785d4d2ebSMatthew Wilcox (Oracle) * filemap_dirty_folio - Mark a folio dirty for filesystems which do not use buffer_heads.
267885d4d2ebSMatthew Wilcox (Oracle) * @mapping: Address space this folio belongs to.
267985d4d2ebSMatthew Wilcox (Oracle) * @folio: Folio to be marked as dirty.
26801da177e4SLinus Torvalds *
268185d4d2ebSMatthew Wilcox (Oracle) * Filesystems which do not use buffer heads should call this function
268285d4d2ebSMatthew Wilcox (Oracle) * from their set_page_dirty address space operation. It ignores the
268385d4d2ebSMatthew Wilcox (Oracle) * contents of folio_get_private(), so if the filesystem marks individual
268485d4d2ebSMatthew Wilcox (Oracle) * blocks as dirty, the filesystem should handle that itself.
26851da177e4SLinus Torvalds *
268685d4d2ebSMatthew Wilcox (Oracle) * This is also sometimes used by filesystems which use buffer_heads when
268785d4d2ebSMatthew Wilcox (Oracle) * a single buffer is being dirtied: we want to set the folio dirty in
268885d4d2ebSMatthew Wilcox (Oracle) * that case, but not all the buffers. This is a "bottom-up" dirtying,
2689e621900aSMatthew Wilcox (Oracle) * whereas block_dirty_folio() is a "top-down" dirtying.
269085d4d2ebSMatthew Wilcox (Oracle) *
269185d4d2ebSMatthew Wilcox (Oracle) * The caller must ensure this doesn't race with truncation. Most will
269285d4d2ebSMatthew Wilcox (Oracle) * simply hold the folio lock, but e.g. zap_pte_range() calls with the
269385d4d2ebSMatthew Wilcox (Oracle) * folio mapped and the pte lock held, which also locks out truncation.
26941da177e4SLinus Torvalds */
filemap_dirty_folio(struct address_space * mapping,struct folio * folio)269585d4d2ebSMatthew Wilcox (Oracle) bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio)
26961da177e4SLinus Torvalds {
269785d4d2ebSMatthew Wilcox (Oracle) folio_memcg_lock(folio);
269885d4d2ebSMatthew Wilcox (Oracle) if (folio_test_set_dirty(folio)) {
269985d4d2ebSMatthew Wilcox (Oracle) folio_memcg_unlock(folio);
270085d4d2ebSMatthew Wilcox (Oracle) return false;
2701c4843a75SGreg Thelen }
270285d4d2ebSMatthew Wilcox (Oracle)
270385d4d2ebSMatthew Wilcox (Oracle) __folio_mark_dirty(folio, mapping, !folio_test_private(folio));
270485d4d2ebSMatthew Wilcox (Oracle) folio_memcg_unlock(folio);
2705c4843a75SGreg Thelen
27061da177e4SLinus Torvalds if (mapping->host) {
27071da177e4SLinus Torvalds /* !PageAnon && !swapper_space */
27088c08540fSAndrew Morton __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
27091da177e4SLinus Torvalds }
271085d4d2ebSMatthew Wilcox (Oracle) return true;
27111da177e4SLinus Torvalds }
271285d4d2ebSMatthew Wilcox (Oracle) EXPORT_SYMBOL(filemap_dirty_folio);
27131da177e4SLinus Torvalds
271425ff8b15SMatthew Wilcox (Oracle) /**
2715cd78ab11SMatthew Wilcox (Oracle) * folio_redirty_for_writepage - Decline to write a dirty folio.
2716cd78ab11SMatthew Wilcox (Oracle) * @wbc: The writeback control.
2717cd78ab11SMatthew Wilcox (Oracle) * @folio: The folio.
2718cd78ab11SMatthew Wilcox (Oracle) *
2719cd78ab11SMatthew Wilcox (Oracle) * When a writepage implementation decides that it doesn't want to write
2720cd78ab11SMatthew Wilcox (Oracle) * @folio for some reason, it should call this function, unlock @folio and
2721cd78ab11SMatthew Wilcox (Oracle) * return 0.
2722cd78ab11SMatthew Wilcox (Oracle) *
2723cd78ab11SMatthew Wilcox (Oracle) * Return: True if we redirtied the folio. False if someone else dirtied
2724cd78ab11SMatthew Wilcox (Oracle) * it first.
27251da177e4SLinus Torvalds */
folio_redirty_for_writepage(struct writeback_control * wbc,struct folio * folio)2726cd78ab11SMatthew Wilcox (Oracle) bool folio_redirty_for_writepage(struct writeback_control *wbc,
2727cd78ab11SMatthew Wilcox (Oracle) struct folio *folio)
27281da177e4SLinus Torvalds {
2729ed2da924SChristoph Hellwig struct address_space *mapping = folio->mapping;
2730cd78ab11SMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio);
2731ed2da924SChristoph Hellwig bool ret;
27328d38633cSKonstantin Khebnikov
2733cd78ab11SMatthew Wilcox (Oracle) wbc->pages_skipped += nr;
2734ed2da924SChristoph Hellwig ret = filemap_dirty_folio(mapping, folio);
2735ed2da924SChristoph Hellwig if (mapping && mapping_can_writeback(mapping)) {
2736ed2da924SChristoph Hellwig struct inode *inode = mapping->host;
2737ed2da924SChristoph Hellwig struct bdi_writeback *wb;
2738ed2da924SChristoph Hellwig struct wb_lock_cookie cookie = {};
2739cd78ab11SMatthew Wilcox (Oracle)
2740ed2da924SChristoph Hellwig wb = unlocked_inode_to_wb_begin(inode, &cookie);
2741ed2da924SChristoph Hellwig current->nr_dirtied -= nr;
2742ed2da924SChristoph Hellwig node_stat_mod_folio(folio, NR_DIRTIED, -nr);
2743ed2da924SChristoph Hellwig wb_stat_mod(wb, WB_DIRTIED, -nr);
2744ed2da924SChristoph Hellwig unlocked_inode_to_wb_end(inode, &cookie);
2745ed2da924SChristoph Hellwig }
27468d38633cSKonstantin Khebnikov return ret;
27471da177e4SLinus Torvalds }
2748cd78ab11SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_redirty_for_writepage);
27491da177e4SLinus Torvalds
2750b5e84594SMatthew Wilcox (Oracle) /**
2751b5e84594SMatthew Wilcox (Oracle) * folio_mark_dirty - Mark a folio as being modified.
2752b5e84594SMatthew Wilcox (Oracle) * @folio: The folio.
27536746aff7SWu Fengguang *
27542ca456c2SMatthew Wilcox (Oracle) * The folio may not be truncated while this function is running.
27552ca456c2SMatthew Wilcox (Oracle) * Holding the folio lock is sufficient to prevent truncation, but some
27562ca456c2SMatthew Wilcox (Oracle) * callers cannot acquire a sleeping lock. These callers instead hold
27572ca456c2SMatthew Wilcox (Oracle) * the page table lock for a page table which contains at least one page
27582ca456c2SMatthew Wilcox (Oracle) * in this folio. Truncation will block on the page table lock as it
27592ca456c2SMatthew Wilcox (Oracle) * unmaps pages before removing the folio from its mapping.
2760b5e84594SMatthew Wilcox (Oracle) *
2761b5e84594SMatthew Wilcox (Oracle) * Return: True if the folio was newly dirtied, false if it was already dirty.
27621da177e4SLinus Torvalds */
folio_mark_dirty(struct folio * folio)2763b5e84594SMatthew Wilcox (Oracle) bool folio_mark_dirty(struct folio *folio)
27641da177e4SLinus Torvalds {
2765b5e84594SMatthew Wilcox (Oracle) struct address_space *mapping = folio_mapping(folio);
27661da177e4SLinus Torvalds
27671da177e4SLinus Torvalds if (likely(mapping)) {
2768278df9f4SMinchan Kim /*
27695a9e3474SVishal Moola (Oracle) * readahead/folio_deactivate could remain
27706f31a5a2SMatthew Wilcox (Oracle) * PG_readahead/PG_reclaim due to race with folio_end_writeback
27716f31a5a2SMatthew Wilcox (Oracle) * About readahead, if the folio is written, the flags would be
2772278df9f4SMinchan Kim * reset. So no problem.
27735a9e3474SVishal Moola (Oracle) * About folio_deactivate, if the folio is redirtied,
27746f31a5a2SMatthew Wilcox (Oracle) * the flag will be reset. So no problem. but if the
27756f31a5a2SMatthew Wilcox (Oracle) * folio is used by readahead it will confuse readahead
27766f31a5a2SMatthew Wilcox (Oracle) * and make it restart the size rampup process. But it's
27776f31a5a2SMatthew Wilcox (Oracle) * a trivial problem.
2778278df9f4SMinchan Kim */
2779b5e84594SMatthew Wilcox (Oracle) if (folio_test_reclaim(folio))
2780b5e84594SMatthew Wilcox (Oracle) folio_clear_reclaim(folio);
27816f31a5a2SMatthew Wilcox (Oracle) return mapping->a_ops->dirty_folio(mapping, folio);
27821da177e4SLinus Torvalds }
27833a3bae50SMatthew Wilcox (Oracle)
27843a3bae50SMatthew Wilcox (Oracle) return noop_dirty_folio(mapping, folio);
27851da177e4SLinus Torvalds }
2786b5e84594SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_mark_dirty);
27871da177e4SLinus Torvalds
27881da177e4SLinus Torvalds /*
27891da177e4SLinus Torvalds * set_page_dirty() is racy if the caller has no reference against
27901da177e4SLinus Torvalds * page->mapping->host, and if the page is unlocked. This is because another
27911da177e4SLinus Torvalds * CPU could truncate the page off the mapping and then free the mapping.
27921da177e4SLinus Torvalds *
27931da177e4SLinus Torvalds * Usually, the page _is_ locked, or the caller is a user-space process which
27941da177e4SLinus Torvalds * holds a reference on the inode by having an open file.
27951da177e4SLinus Torvalds *
27961da177e4SLinus Torvalds * In other cases, the page should be locked before running set_page_dirty().
27971da177e4SLinus Torvalds */
set_page_dirty_lock(struct page * page)27981da177e4SLinus Torvalds int set_page_dirty_lock(struct page *page)
27991da177e4SLinus Torvalds {
28001da177e4SLinus Torvalds int ret;
28011da177e4SLinus Torvalds
28027eaceaccSJens Axboe lock_page(page);
28031da177e4SLinus Torvalds ret = set_page_dirty(page);
28041da177e4SLinus Torvalds unlock_page(page);
28051da177e4SLinus Torvalds return ret;
28061da177e4SLinus Torvalds }
28071da177e4SLinus Torvalds EXPORT_SYMBOL(set_page_dirty_lock);
28081da177e4SLinus Torvalds
28091da177e4SLinus Torvalds /*
281011f81becSTejun Heo * This cancels just the dirty bit on the kernel page itself, it does NOT
281111f81becSTejun Heo * actually remove dirty bits on any mmap's that may be around. It also
281211f81becSTejun Heo * leaves the page tagged dirty, so any sync activity will still find it on
281311f81becSTejun Heo * the dirty lists, and in particular, clear_page_dirty_for_io() will still
281411f81becSTejun Heo * look at the dirty bits in the VM.
281511f81becSTejun Heo *
281611f81becSTejun Heo * Doing this should *normally* only ever be done when a page is truncated,
281711f81becSTejun Heo * and is not actually mapped anywhere at all. However, fs/buffer.c does
281811f81becSTejun Heo * this when it notices that somebody has cleaned out all the buffers on a
281911f81becSTejun Heo * page without actually doing it through the VM. Can you say "ext3 is
282011f81becSTejun Heo * horribly ugly"? Thought you could.
282111f81becSTejun Heo */
__folio_cancel_dirty(struct folio * folio)2822fdaf532aSMatthew Wilcox (Oracle) void __folio_cancel_dirty(struct folio *folio)
282311f81becSTejun Heo {
2824fdaf532aSMatthew Wilcox (Oracle) struct address_space *mapping = folio_mapping(folio);
2825c4843a75SGreg Thelen
2826f56753acSChristoph Hellwig if (mapping_can_writeback(mapping)) {
2827682aa8e1STejun Heo struct inode *inode = mapping->host;
2828682aa8e1STejun Heo struct bdi_writeback *wb;
28292e898e4cSGreg Thelen struct wb_lock_cookie cookie = {};
2830c4843a75SGreg Thelen
2831fdaf532aSMatthew Wilcox (Oracle) folio_memcg_lock(folio);
28322e898e4cSGreg Thelen wb = unlocked_inode_to_wb_begin(inode, &cookie);
2833c4843a75SGreg Thelen
2834fdaf532aSMatthew Wilcox (Oracle) if (folio_test_clear_dirty(folio))
2835566d3362SHugh Dickins folio_account_cleaned(folio, wb);
2836c4843a75SGreg Thelen
28372e898e4cSGreg Thelen unlocked_inode_to_wb_end(inode, &cookie);
2838fdaf532aSMatthew Wilcox (Oracle) folio_memcg_unlock(folio);
2839c4843a75SGreg Thelen } else {
2840fdaf532aSMatthew Wilcox (Oracle) folio_clear_dirty(folio);
2841c4843a75SGreg Thelen }
284211f81becSTejun Heo }
2843fdaf532aSMatthew Wilcox (Oracle) EXPORT_SYMBOL(__folio_cancel_dirty);
284411f81becSTejun Heo
284511f81becSTejun Heo /*
28469350f20aSMatthew Wilcox (Oracle) * Clear a folio's dirty flag, while caring for dirty memory accounting.
28479350f20aSMatthew Wilcox (Oracle) * Returns true if the folio was previously dirty.
28481da177e4SLinus Torvalds *
28499350f20aSMatthew Wilcox (Oracle) * This is for preparing to put the folio under writeout. We leave
28509350f20aSMatthew Wilcox (Oracle) * the folio tagged as dirty in the xarray so that a concurrent
28519350f20aSMatthew Wilcox (Oracle) * write-for-sync can discover it via a PAGECACHE_TAG_DIRTY walk.
28529350f20aSMatthew Wilcox (Oracle) * The ->writepage implementation will run either folio_start_writeback()
28539350f20aSMatthew Wilcox (Oracle) * or folio_mark_dirty(), at which stage we bring the folio's dirty flag
28549350f20aSMatthew Wilcox (Oracle) * and xarray dirty tag back into sync.
28551da177e4SLinus Torvalds *
28569350f20aSMatthew Wilcox (Oracle) * This incoherency between the folio's dirty flag and xarray tag is
28579350f20aSMatthew Wilcox (Oracle) * unfortunate, but it only exists while the folio is locked.
28581da177e4SLinus Torvalds */
folio_clear_dirty_for_io(struct folio * folio)28599350f20aSMatthew Wilcox (Oracle) bool folio_clear_dirty_for_io(struct folio *folio)
28601da177e4SLinus Torvalds {
28619350f20aSMatthew Wilcox (Oracle) struct address_space *mapping = folio_mapping(folio);
28629350f20aSMatthew Wilcox (Oracle) bool ret = false;
28631da177e4SLinus Torvalds
28649350f20aSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
286579352894SNick Piggin
2866f56753acSChristoph Hellwig if (mapping && mapping_can_writeback(mapping)) {
2867682aa8e1STejun Heo struct inode *inode = mapping->host;
2868682aa8e1STejun Heo struct bdi_writeback *wb;
28692e898e4cSGreg Thelen struct wb_lock_cookie cookie = {};
2870682aa8e1STejun Heo
28717658cc28SLinus Torvalds /*
28727658cc28SLinus Torvalds * Yes, Virginia, this is indeed insane.
28737658cc28SLinus Torvalds *
28747658cc28SLinus Torvalds * We use this sequence to make sure that
28757658cc28SLinus Torvalds * (a) we account for dirty stats properly
28767658cc28SLinus Torvalds * (b) we tell the low-level filesystem to
28779350f20aSMatthew Wilcox (Oracle) * mark the whole folio dirty if it was
28787658cc28SLinus Torvalds * dirty in a pagetable. Only to then
28799350f20aSMatthew Wilcox (Oracle) * (c) clean the folio again and return 1 to
28807658cc28SLinus Torvalds * cause the writeback.
28817658cc28SLinus Torvalds *
28827658cc28SLinus Torvalds * This way we avoid all nasty races with the
28837658cc28SLinus Torvalds * dirty bit in multiple places and clearing
28847658cc28SLinus Torvalds * them concurrently from different threads.
28857658cc28SLinus Torvalds *
28869350f20aSMatthew Wilcox (Oracle) * Note! Normally the "folio_mark_dirty(folio)"
28877658cc28SLinus Torvalds * has no effect on the actual dirty bit - since
28887658cc28SLinus Torvalds * that will already usually be set. But we
28897658cc28SLinus Torvalds * need the side effects, and it can help us
28907658cc28SLinus Torvalds * avoid races.
28917658cc28SLinus Torvalds *
28929350f20aSMatthew Wilcox (Oracle) * We basically use the folio "master dirty bit"
28937658cc28SLinus Torvalds * as a serialization point for all the different
28947658cc28SLinus Torvalds * threads doing their things.
28957658cc28SLinus Torvalds */
28969350f20aSMatthew Wilcox (Oracle) if (folio_mkclean(folio))
28979350f20aSMatthew Wilcox (Oracle) folio_mark_dirty(folio);
289879352894SNick Piggin /*
289979352894SNick Piggin * We carefully synchronise fault handlers against
29009350f20aSMatthew Wilcox (Oracle) * installing a dirty pte and marking the folio dirty
290179352894SNick Piggin * at this point. We do this by having them hold the
29029350f20aSMatthew Wilcox (Oracle) * page lock while dirtying the folio, and folios are
29032d6d7f98SJohannes Weiner * always locked coming in here, so we get the desired
29042d6d7f98SJohannes Weiner * exclusion.
290579352894SNick Piggin */
29062e898e4cSGreg Thelen wb = unlocked_inode_to_wb_begin(inode, &cookie);
29079350f20aSMatthew Wilcox (Oracle) if (folio_test_clear_dirty(folio)) {
29089350f20aSMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio);
29099350f20aSMatthew Wilcox (Oracle) lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
29109350f20aSMatthew Wilcox (Oracle) zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
29119350f20aSMatthew Wilcox (Oracle) wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
29129350f20aSMatthew Wilcox (Oracle) ret = true;
29131da177e4SLinus Torvalds }
29142e898e4cSGreg Thelen unlocked_inode_to_wb_end(inode, &cookie);
2915c4843a75SGreg Thelen return ret;
29161da177e4SLinus Torvalds }
29179350f20aSMatthew Wilcox (Oracle) return folio_test_clear_dirty(folio);
29187658cc28SLinus Torvalds }
29199350f20aSMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_clear_dirty_for_io);
29201da177e4SLinus Torvalds
wb_inode_writeback_start(struct bdi_writeback * wb)2921633a2abbSJan Kara static void wb_inode_writeback_start(struct bdi_writeback *wb)
2922633a2abbSJan Kara {
2923633a2abbSJan Kara atomic_inc(&wb->writeback_inodes);
2924633a2abbSJan Kara }
2925633a2abbSJan Kara
wb_inode_writeback_end(struct bdi_writeback * wb)2926633a2abbSJan Kara static void wb_inode_writeback_end(struct bdi_writeback *wb)
2927633a2abbSJan Kara {
2928f87904c0SKhazhismel Kumykov unsigned long flags;
2929633a2abbSJan Kara atomic_dec(&wb->writeback_inodes);
293045a2966fSJan Kara /*
293145a2966fSJan Kara * Make sure estimate of writeback throughput gets updated after
293245a2966fSJan Kara * writeback completed. We delay the update by BANDWIDTH_INTERVAL
293345a2966fSJan Kara * (which is the interval other bandwidth updates use for batching) so
293445a2966fSJan Kara * that if multiple inodes end writeback at a similar time, they get
293545a2966fSJan Kara * batched into one bandwidth update.
293645a2966fSJan Kara */
2937f87904c0SKhazhismel Kumykov spin_lock_irqsave(&wb->work_lock, flags);
2938f87904c0SKhazhismel Kumykov if (test_bit(WB_registered, &wb->state))
293945a2966fSJan Kara queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
2940f87904c0SKhazhismel Kumykov spin_unlock_irqrestore(&wb->work_lock, flags);
2941633a2abbSJan Kara }
2942633a2abbSJan Kara
__folio_end_writeback(struct folio * folio)2943269ccca3SMatthew Wilcox (Oracle) bool __folio_end_writeback(struct folio *folio)
29441da177e4SLinus Torvalds {
2945269ccca3SMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio);
2946269ccca3SMatthew Wilcox (Oracle) struct address_space *mapping = folio_mapping(folio);
2947269ccca3SMatthew Wilcox (Oracle) bool ret;
29481da177e4SLinus Torvalds
2949269ccca3SMatthew Wilcox (Oracle) folio_memcg_lock(folio);
2950371a096eSHuang Ying if (mapping && mapping_use_writeback_tags(mapping)) {
295191018134STejun Heo struct inode *inode = mapping->host;
295291018134STejun Heo struct backing_dev_info *bdi = inode_to_bdi(inode);
29531da177e4SLinus Torvalds unsigned long flags;
29541da177e4SLinus Torvalds
2955b93b0163SMatthew Wilcox xa_lock_irqsave(&mapping->i_pages, flags);
2956269ccca3SMatthew Wilcox (Oracle) ret = folio_test_clear_writeback(folio);
295769cb51d1SPeter Zijlstra if (ret) {
2958269ccca3SMatthew Wilcox (Oracle) __xa_clear_mark(&mapping->i_pages, folio_index(folio),
29591da177e4SLinus Torvalds PAGECACHE_TAG_WRITEBACK);
2960823423efSChristoph Hellwig if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
296191018134STejun Heo struct bdi_writeback *wb = inode_to_wb(inode);
296291018134STejun Heo
2963269ccca3SMatthew Wilcox (Oracle) wb_stat_mod(wb, WB_WRITEBACK, -nr);
2964269ccca3SMatthew Wilcox (Oracle) __wb_writeout_add(wb, nr);
2965633a2abbSJan Kara if (!mapping_tagged(mapping,
2966633a2abbSJan Kara PAGECACHE_TAG_WRITEBACK))
2967633a2abbSJan Kara wb_inode_writeback_end(wb);
296804fbfdc1SPeter Zijlstra }
296969cb51d1SPeter Zijlstra }
29706c60d2b5SDave Chinner
29716c60d2b5SDave Chinner if (mapping->host && !mapping_tagged(mapping,
29726c60d2b5SDave Chinner PAGECACHE_TAG_WRITEBACK))
29736c60d2b5SDave Chinner sb_clear_inode_writeback(mapping->host);
29746c60d2b5SDave Chinner
2975b93b0163SMatthew Wilcox xa_unlock_irqrestore(&mapping->i_pages, flags);
29761da177e4SLinus Torvalds } else {
2977269ccca3SMatthew Wilcox (Oracle) ret = folio_test_clear_writeback(folio);
29781da177e4SLinus Torvalds }
297999b12e3dSWu Fengguang if (ret) {
2980269ccca3SMatthew Wilcox (Oracle) lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
2981269ccca3SMatthew Wilcox (Oracle) zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2982269ccca3SMatthew Wilcox (Oracle) node_stat_mod_folio(folio, NR_WRITTEN, nr);
298399b12e3dSWu Fengguang }
2984269ccca3SMatthew Wilcox (Oracle) folio_memcg_unlock(folio);
29851da177e4SLinus Torvalds return ret;
29861da177e4SLinus Torvalds }
29871da177e4SLinus Torvalds
__folio_start_writeback(struct folio * folio,bool keep_write)2988f143f1eaSMatthew Wilcox (Oracle) bool __folio_start_writeback(struct folio *folio, bool keep_write)
29891da177e4SLinus Torvalds {
2990f143f1eaSMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio);
2991f143f1eaSMatthew Wilcox (Oracle) struct address_space *mapping = folio_mapping(folio);
2992f143f1eaSMatthew Wilcox (Oracle) bool ret;
2993f143f1eaSMatthew Wilcox (Oracle) int access_ret;
29941da177e4SLinus Torvalds
2995f143f1eaSMatthew Wilcox (Oracle) folio_memcg_lock(folio);
2996371a096eSHuang Ying if (mapping && mapping_use_writeback_tags(mapping)) {
2997f143f1eaSMatthew Wilcox (Oracle) XA_STATE(xas, &mapping->i_pages, folio_index(folio));
299891018134STejun Heo struct inode *inode = mapping->host;
299991018134STejun Heo struct backing_dev_info *bdi = inode_to_bdi(inode);
30001da177e4SLinus Torvalds unsigned long flags;
30011da177e4SLinus Torvalds
3002ff9c745bSMatthew Wilcox xas_lock_irqsave(&xas, flags);
3003ff9c745bSMatthew Wilcox xas_load(&xas);
3004f143f1eaSMatthew Wilcox (Oracle) ret = folio_test_set_writeback(folio);
300569cb51d1SPeter Zijlstra if (!ret) {
30066c60d2b5SDave Chinner bool on_wblist;
30076c60d2b5SDave Chinner
30086c60d2b5SDave Chinner on_wblist = mapping_tagged(mapping,
30096c60d2b5SDave Chinner PAGECACHE_TAG_WRITEBACK);
30106c60d2b5SDave Chinner
3011ff9c745bSMatthew Wilcox xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
3012633a2abbSJan Kara if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
3013633a2abbSJan Kara struct bdi_writeback *wb = inode_to_wb(inode);
3014633a2abbSJan Kara
3015f143f1eaSMatthew Wilcox (Oracle) wb_stat_mod(wb, WB_WRITEBACK, nr);
3016633a2abbSJan Kara if (!on_wblist)
3017633a2abbSJan Kara wb_inode_writeback_start(wb);
3018633a2abbSJan Kara }
30196c60d2b5SDave Chinner
30206c60d2b5SDave Chinner /*
3021f143f1eaSMatthew Wilcox (Oracle) * We can come through here when swapping
3022f143f1eaSMatthew Wilcox (Oracle) * anonymous folios, so we don't necessarily
3023f143f1eaSMatthew Wilcox (Oracle) * have an inode to track for sync.
30246c60d2b5SDave Chinner */
30256c60d2b5SDave Chinner if (mapping->host && !on_wblist)
30266c60d2b5SDave Chinner sb_mark_inode_writeback(mapping->host);
302769cb51d1SPeter Zijlstra }
3028f143f1eaSMatthew Wilcox (Oracle) if (!folio_test_dirty(folio))
3029ff9c745bSMatthew Wilcox xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
30301c8349a1SNamjae Jeon if (!keep_write)
3031ff9c745bSMatthew Wilcox xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
3032ff9c745bSMatthew Wilcox xas_unlock_irqrestore(&xas, flags);
30331da177e4SLinus Torvalds } else {
3034f143f1eaSMatthew Wilcox (Oracle) ret = folio_test_set_writeback(folio);
30351da177e4SLinus Torvalds }
30363a3c02ecSJohannes Weiner if (!ret) {
3037f143f1eaSMatthew Wilcox (Oracle) lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
3038f143f1eaSMatthew Wilcox (Oracle) zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
30393a3c02ecSJohannes Weiner }
3040f143f1eaSMatthew Wilcox (Oracle) folio_memcg_unlock(folio);
3041f143f1eaSMatthew Wilcox (Oracle) access_ret = arch_make_folio_accessible(folio);
3042f28d4363SClaudio Imbrenda /*
3043f28d4363SClaudio Imbrenda * If writeback has been triggered on a page that cannot be made
3044f28d4363SClaudio Imbrenda * accessible, it is too late to recover here.
3045f28d4363SClaudio Imbrenda */
3046f143f1eaSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(access_ret != 0, folio);
3047f28d4363SClaudio Imbrenda
30481da177e4SLinus Torvalds return ret;
30491da177e4SLinus Torvalds }
3050f143f1eaSMatthew Wilcox (Oracle) EXPORT_SYMBOL(__folio_start_writeback);
30511da177e4SLinus Torvalds
3052490e016fSMatthew Wilcox (Oracle) /**
3053490e016fSMatthew Wilcox (Oracle) * folio_wait_writeback - Wait for a folio to finish writeback.
3054490e016fSMatthew Wilcox (Oracle) * @folio: The folio to wait for.
3055490e016fSMatthew Wilcox (Oracle) *
3056490e016fSMatthew Wilcox (Oracle) * If the folio is currently being written back to storage, wait for the
3057490e016fSMatthew Wilcox (Oracle) * I/O to complete.
3058490e016fSMatthew Wilcox (Oracle) *
3059490e016fSMatthew Wilcox (Oracle) * Context: Sleeps. Must be called in process context and with
3060490e016fSMatthew Wilcox (Oracle) * no spinlocks held. Caller should hold a reference on the folio.
3061490e016fSMatthew Wilcox (Oracle) * If the folio is not locked, writeback may start again after writeback
3062490e016fSMatthew Wilcox (Oracle) * has finished.
306319343b5bSYafang Shao */
folio_wait_writeback(struct folio * folio)3064490e016fSMatthew Wilcox (Oracle) void folio_wait_writeback(struct folio *folio)
306519343b5bSYafang Shao {
3066490e016fSMatthew Wilcox (Oracle) while (folio_test_writeback(folio)) {
3067b9b0ff61SMatthew Wilcox (Oracle) trace_folio_wait_writeback(folio, folio_mapping(folio));
3068101c0bf6SMatthew Wilcox (Oracle) folio_wait_bit(folio, PG_writeback);
306919343b5bSYafang Shao }
307019343b5bSYafang Shao }
3071490e016fSMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(folio_wait_writeback);
307219343b5bSYafang Shao
3073490e016fSMatthew Wilcox (Oracle) /**
3074490e016fSMatthew Wilcox (Oracle) * folio_wait_writeback_killable - Wait for a folio to finish writeback.
3075490e016fSMatthew Wilcox (Oracle) * @folio: The folio to wait for.
3076490e016fSMatthew Wilcox (Oracle) *
3077490e016fSMatthew Wilcox (Oracle) * If the folio is currently being written back to storage, wait for the
3078490e016fSMatthew Wilcox (Oracle) * I/O to complete or a fatal signal to arrive.
3079490e016fSMatthew Wilcox (Oracle) *
3080490e016fSMatthew Wilcox (Oracle) * Context: Sleeps. Must be called in process context and with
3081490e016fSMatthew Wilcox (Oracle) * no spinlocks held. Caller should hold a reference on the folio.
3082490e016fSMatthew Wilcox (Oracle) * If the folio is not locked, writeback may start again after writeback
3083490e016fSMatthew Wilcox (Oracle) * has finished.
3084490e016fSMatthew Wilcox (Oracle) * Return: 0 on success, -EINTR if we get a fatal signal while waiting.
3085e5dbd332SMatthew Wilcox (Oracle) */
folio_wait_writeback_killable(struct folio * folio)3086490e016fSMatthew Wilcox (Oracle) int folio_wait_writeback_killable(struct folio *folio)
3087e5dbd332SMatthew Wilcox (Oracle) {
3088490e016fSMatthew Wilcox (Oracle) while (folio_test_writeback(folio)) {
3089b9b0ff61SMatthew Wilcox (Oracle) trace_folio_wait_writeback(folio, folio_mapping(folio));
3090101c0bf6SMatthew Wilcox (Oracle) if (folio_wait_bit_killable(folio, PG_writeback))
3091e5dbd332SMatthew Wilcox (Oracle) return -EINTR;
3092e5dbd332SMatthew Wilcox (Oracle) }
3093e5dbd332SMatthew Wilcox (Oracle)
3094e5dbd332SMatthew Wilcox (Oracle) return 0;
3095e5dbd332SMatthew Wilcox (Oracle) }
3096490e016fSMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(folio_wait_writeback_killable);
3097e5dbd332SMatthew Wilcox (Oracle)
30981d1d1a76SDarrick J. Wong /**
3099a49d0c50SMatthew Wilcox (Oracle) * folio_wait_stable() - wait for writeback to finish, if necessary.
3100a49d0c50SMatthew Wilcox (Oracle) * @folio: The folio to wait on.
31011d1d1a76SDarrick J. Wong *
3102a49d0c50SMatthew Wilcox (Oracle) * This function determines if the given folio is related to a backing
3103a49d0c50SMatthew Wilcox (Oracle) * device that requires folio contents to be held stable during writeback.
3104a49d0c50SMatthew Wilcox (Oracle) * If so, then it will wait for any pending writeback to complete.
3105a49d0c50SMatthew Wilcox (Oracle) *
3106a49d0c50SMatthew Wilcox (Oracle) * Context: Sleeps. Must be called in process context and with
3107a49d0c50SMatthew Wilcox (Oracle) * no spinlocks held. Caller should hold a reference on the folio.
3108a49d0c50SMatthew Wilcox (Oracle) * If the folio is not locked, writeback may start again after writeback
3109a49d0c50SMatthew Wilcox (Oracle) * has finished.
31101d1d1a76SDarrick J. Wong */
folio_wait_stable(struct folio * folio)3111a49d0c50SMatthew Wilcox (Oracle) void folio_wait_stable(struct folio *folio)
31121d1d1a76SDarrick J. Wong {
31133461e3bfSChristoph Hellwig if (mapping_stable_writes(folio_mapping(folio)))
3114a49d0c50SMatthew Wilcox (Oracle) folio_wait_writeback(folio);
31151d1d1a76SDarrick J. Wong }
3116a49d0c50SMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(folio_wait_stable);
3117