xref: /openbmc/linux/mm/page-writeback.c (revision ed5b43f15a8e86e3ae939b98bc161ee973ecedf2)
1 /*
2  * mm/page-writeback.c.
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  *
6  * Contains functions related to writing back dirty pages at the
7  * address_space level.
8  *
9  * 10Apr2002	akpm@zip.com.au
10  *		Initial version
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/spinlock.h>
16 #include <linux/fs.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 #include <linux/slab.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/init.h>
23 #include <linux/backing-dev.h>
24 #include <linux/blkdev.h>
25 #include <linux/mpage.h>
26 #include <linux/percpu.h>
27 #include <linux/notifier.h>
28 #include <linux/smp.h>
29 #include <linux/sysctl.h>
30 #include <linux/cpu.h>
31 #include <linux/syscalls.h>
32 
33 /*
34  * The maximum number of pages to writeout in a single bdflush/kupdate
35  * operation.  We do this so we don't hold I_LOCK against an inode for
36  * enormous amounts of time, which would block a userspace task which has
37  * been forced to throttle against that inode.  Also, the code reevaluates
38  * the dirty each time it has written this many pages.
39  */
40 #define MAX_WRITEBACK_PAGES	1024
41 
42 /*
43  * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
44  * will look to see if it needs to force writeback or throttling.
45  */
46 static long ratelimit_pages = 32;
47 
48 static long total_pages;	/* The total number of pages in the machine. */
49 static int dirty_exceeded __cacheline_aligned_in_smp;	/* Dirty mem may be over limit */
50 
51 /*
52  * When balance_dirty_pages decides that the caller needs to perform some
53  * non-background writeback, this is how many pages it will attempt to write.
54  * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably
55  * large amounts of I/O are submitted.
56  */
57 static inline long sync_writeback_pages(void)
58 {
59 	return ratelimit_pages + ratelimit_pages / 2;
60 }
61 
62 /* The following parameters are exported via /proc/sys/vm */
63 
64 /*
65  * Start background writeback (via pdflush) at this percentage
66  */
67 int dirty_background_ratio = 10;
68 
69 /*
70  * The generator of dirty data starts writeback at this percentage
71  */
72 int vm_dirty_ratio = 40;
73 
74 /*
75  * The interval between `kupdate'-style writebacks, in centiseconds
76  * (hundredths of a second)
77  */
78 int dirty_writeback_interval = 5 * HZ;
79 
80 /*
81  * The longest number of centiseconds for which data is allowed to remain dirty
82  */
83 int dirty_expire_interval = 30 * HZ;
84 
85 /*
86  * Flag that makes the machine dump writes/reads and block dirtyings.
87  */
88 int block_dump;
89 
90 /*
91  * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
92  * a full sync is triggered after this time elapses without any disk activity.
93  */
94 int laptop_mode;
95 
96 EXPORT_SYMBOL(laptop_mode);
97 
98 /* End of sysctl-exported parameters */
99 
100 
101 static void background_writeout(unsigned long _min_pages);
102 
103 struct writeback_state
104 {
105 	unsigned long nr_dirty;
106 	unsigned long nr_unstable;
107 	unsigned long nr_mapped;
108 	unsigned long nr_writeback;
109 };
110 
111 static void get_writeback_state(struct writeback_state *wbs)
112 {
113 	wbs->nr_dirty = read_page_state(nr_dirty);
114 	wbs->nr_unstable = read_page_state(nr_unstable);
115 	wbs->nr_mapped = read_page_state(nr_mapped);
116 	wbs->nr_writeback = read_page_state(nr_writeback);
117 }
118 
119 /*
120  * Work out the current dirty-memory clamping and background writeout
121  * thresholds.
122  *
123  * The main aim here is to lower them aggressively if there is a lot of mapped
124  * memory around.  To avoid stressing page reclaim with lots of unreclaimable
125  * pages.  It is better to clamp down on writers than to start swapping, and
126  * performing lots of scanning.
127  *
128  * We only allow 1/2 of the currently-unmapped memory to be dirtied.
129  *
130  * We don't permit the clamping level to fall below 5% - that is getting rather
131  * excessive.
132  *
133  * We make sure that the background writeout level is below the adjusted
134  * clamping level.
135  */
136 static void
137 get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
138 		struct address_space *mapping)
139 {
140 	int background_ratio;		/* Percentages */
141 	int dirty_ratio;
142 	int unmapped_ratio;
143 	long background;
144 	long dirty;
145 	unsigned long available_memory = total_pages;
146 	struct task_struct *tsk;
147 
148 	get_writeback_state(wbs);
149 
150 #ifdef CONFIG_HIGHMEM
151 	/*
152 	 * If this mapping can only allocate from low memory,
153 	 * we exclude high memory from our count.
154 	 */
155 	if (mapping && !(mapping_gfp_mask(mapping) & __GFP_HIGHMEM))
156 		available_memory -= totalhigh_pages;
157 #endif
158 
159 
160 	unmapped_ratio = 100 - (wbs->nr_mapped * 100) / total_pages;
161 
162 	dirty_ratio = vm_dirty_ratio;
163 	if (dirty_ratio > unmapped_ratio / 2)
164 		dirty_ratio = unmapped_ratio / 2;
165 
166 	if (dirty_ratio < 5)
167 		dirty_ratio = 5;
168 
169 	background_ratio = dirty_background_ratio;
170 	if (background_ratio >= dirty_ratio)
171 		background_ratio = dirty_ratio / 2;
172 
173 	background = (background_ratio * available_memory) / 100;
174 	dirty = (dirty_ratio * available_memory) / 100;
175 	tsk = current;
176 	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
177 		background += background / 4;
178 		dirty += dirty / 4;
179 	}
180 	*pbackground = background;
181 	*pdirty = dirty;
182 }
183 
184 /*
185  * balance_dirty_pages() must be called by processes which are generating dirty
186  * data.  It looks at the number of dirty pages in the machine and will force
187  * the caller to perform writeback if the system is over `vm_dirty_ratio'.
188  * If we're over `background_thresh' then pdflush is woken to perform some
189  * writeout.
190  */
191 static void balance_dirty_pages(struct address_space *mapping)
192 {
193 	struct writeback_state wbs;
194 	long nr_reclaimable;
195 	long background_thresh;
196 	long dirty_thresh;
197 	unsigned long pages_written = 0;
198 	unsigned long write_chunk = sync_writeback_pages();
199 
200 	struct backing_dev_info *bdi = mapping->backing_dev_info;
201 
202 	for (;;) {
203 		struct writeback_control wbc = {
204 			.bdi		= bdi,
205 			.sync_mode	= WB_SYNC_NONE,
206 			.older_than_this = NULL,
207 			.nr_to_write	= write_chunk,
208 		};
209 
210 		get_dirty_limits(&wbs, &background_thresh,
211 					&dirty_thresh, mapping);
212 		nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable;
213 		if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
214 			break;
215 
216 		if (!dirty_exceeded)
217 			dirty_exceeded = 1;
218 
219 		/* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
220 		 * Unstable writes are a feature of certain networked
221 		 * filesystems (i.e. NFS) in which data may have been
222 		 * written to the server's write cache, but has not yet
223 		 * been flushed to permanent storage.
224 		 */
225 		if (nr_reclaimable) {
226 			writeback_inodes(&wbc);
227 			get_dirty_limits(&wbs, &background_thresh,
228 					&dirty_thresh, mapping);
229 			nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable;
230 			if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
231 				break;
232 			pages_written += write_chunk - wbc.nr_to_write;
233 			if (pages_written >= write_chunk)
234 				break;		/* We've done our duty */
235 		}
236 		blk_congestion_wait(WRITE, HZ/10);
237 	}
238 
239 	if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh && dirty_exceeded)
240 		dirty_exceeded = 0;
241 
242 	if (writeback_in_progress(bdi))
243 		return;		/* pdflush is already working this queue */
244 
245 	/*
246 	 * In laptop mode, we wait until hitting the higher threshold before
247 	 * starting background writeout, and then write out all the way down
248 	 * to the lower threshold.  So slow writers cause minimal disk activity.
249 	 *
250 	 * In normal mode, we start background writeout at the lower
251 	 * background_thresh, to keep the amount of dirty memory low.
252 	 */
253 	if ((laptop_mode && pages_written) ||
254 	     (!laptop_mode && (nr_reclaimable > background_thresh)))
255 		pdflush_operation(background_writeout, 0);
256 }
257 
258 /**
259  * balance_dirty_pages_ratelimited - balance dirty memory state
260  * @mapping: address_space which was dirtied
261  *
262  * Processes which are dirtying memory should call in here once for each page
263  * which was newly dirtied.  The function will periodically check the system's
264  * dirty state and will initiate writeback if needed.
265  *
266  * On really big machines, get_writeback_state is expensive, so try to avoid
267  * calling it too often (ratelimiting).  But once we're over the dirty memory
268  * limit we decrease the ratelimiting by a lot, to prevent individual processes
269  * from overshooting the limit by (ratelimit_pages) each.
270  */
271 void balance_dirty_pages_ratelimited(struct address_space *mapping)
272 {
273 	static DEFINE_PER_CPU(int, ratelimits) = 0;
274 	long ratelimit;
275 
276 	ratelimit = ratelimit_pages;
277 	if (dirty_exceeded)
278 		ratelimit = 8;
279 
280 	/*
281 	 * Check the rate limiting. Also, we do not want to throttle real-time
282 	 * tasks in balance_dirty_pages(). Period.
283 	 */
284 	if (get_cpu_var(ratelimits)++ >= ratelimit) {
285 		__get_cpu_var(ratelimits) = 0;
286 		put_cpu_var(ratelimits);
287 		balance_dirty_pages(mapping);
288 		return;
289 	}
290 	put_cpu_var(ratelimits);
291 }
292 EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
293 
294 void throttle_vm_writeout(void)
295 {
296 	struct writeback_state wbs;
297 	long background_thresh;
298 	long dirty_thresh;
299 
300         for ( ; ; ) {
301 		get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL);
302 
303                 /*
304                  * Boost the allowable dirty threshold a bit for page
305                  * allocators so they don't get DoS'ed by heavy writers
306                  */
307                 dirty_thresh += dirty_thresh / 10;      /* wheeee... */
308 
309                 if (wbs.nr_unstable + wbs.nr_writeback <= dirty_thresh)
310                         break;
311                 blk_congestion_wait(WRITE, HZ/10);
312         }
313 }
314 
315 
316 /*
317  * writeback at least _min_pages, and keep writing until the amount of dirty
318  * memory is less than the background threshold, or until we're all clean.
319  */
320 static void background_writeout(unsigned long _min_pages)
321 {
322 	long min_pages = _min_pages;
323 	struct writeback_control wbc = {
324 		.bdi		= NULL,
325 		.sync_mode	= WB_SYNC_NONE,
326 		.older_than_this = NULL,
327 		.nr_to_write	= 0,
328 		.nonblocking	= 1,
329 	};
330 
331 	for ( ; ; ) {
332 		struct writeback_state wbs;
333 		long background_thresh;
334 		long dirty_thresh;
335 
336 		get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL);
337 		if (wbs.nr_dirty + wbs.nr_unstable < background_thresh
338 				&& min_pages <= 0)
339 			break;
340 		wbc.encountered_congestion = 0;
341 		wbc.nr_to_write = MAX_WRITEBACK_PAGES;
342 		wbc.pages_skipped = 0;
343 		writeback_inodes(&wbc);
344 		min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
345 		if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
346 			/* Wrote less than expected */
347 			blk_congestion_wait(WRITE, HZ/10);
348 			if (!wbc.encountered_congestion)
349 				break;
350 		}
351 	}
352 }
353 
354 /*
355  * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
356  * the whole world.  Returns 0 if a pdflush thread was dispatched.  Returns
357  * -1 if all pdflush threads were busy.
358  */
359 int wakeup_pdflush(long nr_pages)
360 {
361 	if (nr_pages == 0) {
362 		struct writeback_state wbs;
363 
364 		get_writeback_state(&wbs);
365 		nr_pages = wbs.nr_dirty + wbs.nr_unstable;
366 	}
367 	return pdflush_operation(background_writeout, nr_pages);
368 }
369 
370 static void wb_timer_fn(unsigned long unused);
371 static void laptop_timer_fn(unsigned long unused);
372 
373 static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
374 static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
375 
376 /*
377  * Periodic writeback of "old" data.
378  *
379  * Define "old": the first time one of an inode's pages is dirtied, we mark the
380  * dirtying-time in the inode's address_space.  So this periodic writeback code
381  * just walks the superblock inode list, writing back any inodes which are
382  * older than a specific point in time.
383  *
384  * Try to run once per dirty_writeback_interval.  But if a writeback event
385  * takes longer than a dirty_writeback_interval interval, then leave a
386  * one-second gap.
387  *
388  * older_than_this takes precedence over nr_to_write.  So we'll only write back
389  * all dirty pages if they are all attached to "old" mappings.
390  */
391 static void wb_kupdate(unsigned long arg)
392 {
393 	unsigned long oldest_jif;
394 	unsigned long start_jif;
395 	unsigned long next_jif;
396 	long nr_to_write;
397 	struct writeback_state wbs;
398 	struct writeback_control wbc = {
399 		.bdi		= NULL,
400 		.sync_mode	= WB_SYNC_NONE,
401 		.older_than_this = &oldest_jif,
402 		.nr_to_write	= 0,
403 		.nonblocking	= 1,
404 		.for_kupdate	= 1,
405 	};
406 
407 	sync_supers();
408 
409 	get_writeback_state(&wbs);
410 	oldest_jif = jiffies - dirty_expire_interval;
411 	start_jif = jiffies;
412 	next_jif = start_jif + dirty_writeback_interval;
413 	nr_to_write = wbs.nr_dirty + wbs.nr_unstable +
414 			(inodes_stat.nr_inodes - inodes_stat.nr_unused);
415 	while (nr_to_write > 0) {
416 		wbc.encountered_congestion = 0;
417 		wbc.nr_to_write = MAX_WRITEBACK_PAGES;
418 		writeback_inodes(&wbc);
419 		if (wbc.nr_to_write > 0) {
420 			if (wbc.encountered_congestion)
421 				blk_congestion_wait(WRITE, HZ/10);
422 			else
423 				break;	/* All the old data is written */
424 		}
425 		nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
426 	}
427 	if (time_before(next_jif, jiffies + HZ))
428 		next_jif = jiffies + HZ;
429 	if (dirty_writeback_interval)
430 		mod_timer(&wb_timer, next_jif);
431 }
432 
433 /*
434  * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
435  */
436 int dirty_writeback_centisecs_handler(ctl_table *table, int write,
437 		struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
438 {
439 	proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos);
440 	if (dirty_writeback_interval) {
441 		mod_timer(&wb_timer,
442 			jiffies + dirty_writeback_interval);
443 		} else {
444 		del_timer(&wb_timer);
445 	}
446 	return 0;
447 }
448 
449 static void wb_timer_fn(unsigned long unused)
450 {
451 	if (pdflush_operation(wb_kupdate, 0) < 0)
452 		mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */
453 }
454 
455 static void laptop_flush(unsigned long unused)
456 {
457 	sys_sync();
458 }
459 
460 static void laptop_timer_fn(unsigned long unused)
461 {
462 	pdflush_operation(laptop_flush, 0);
463 }
464 
465 /*
466  * We've spun up the disk and we're in laptop mode: schedule writeback
467  * of all dirty data a few seconds from now.  If the flush is already scheduled
468  * then push it back - the user is still using the disk.
469  */
470 void laptop_io_completion(void)
471 {
472 	mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
473 }
474 
475 /*
476  * We're in laptop mode and we've just synced. The sync's writes will have
477  * caused another writeback to be scheduled by laptop_io_completion.
478  * Nothing needs to be written back anymore, so we unschedule the writeback.
479  */
480 void laptop_sync_completion(void)
481 {
482 	del_timer(&laptop_mode_wb_timer);
483 }
484 
485 /*
486  * If ratelimit_pages is too high then we can get into dirty-data overload
487  * if a large number of processes all perform writes at the same time.
488  * If it is too low then SMP machines will call the (expensive)
489  * get_writeback_state too often.
490  *
491  * Here we set ratelimit_pages to a level which ensures that when all CPUs are
492  * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
493  * thresholds before writeback cuts in.
494  *
495  * But the limit should not be set too high.  Because it also controls the
496  * amount of memory which the balance_dirty_pages() caller has to write back.
497  * If this is too large then the caller will block on the IO queue all the
498  * time.  So limit it to four megabytes - the balance_dirty_pages() caller
499  * will write six megabyte chunks, max.
500  */
501 
502 static void set_ratelimit(void)
503 {
504 	ratelimit_pages = total_pages / (num_online_cpus() * 32);
505 	if (ratelimit_pages < 16)
506 		ratelimit_pages = 16;
507 	if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
508 		ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
509 }
510 
511 static int
512 ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
513 {
514 	set_ratelimit();
515 	return 0;
516 }
517 
518 static struct notifier_block ratelimit_nb = {
519 	.notifier_call	= ratelimit_handler,
520 	.next		= NULL,
521 };
522 
523 /*
524  * If the machine has a large highmem:lowmem ratio then scale back the default
525  * dirty memory thresholds: allowing too much dirty highmem pins an excessive
526  * number of buffer_heads.
527  */
528 void __init page_writeback_init(void)
529 {
530 	long buffer_pages = nr_free_buffer_pages();
531 	long correction;
532 
533 	total_pages = nr_free_pagecache_pages();
534 
535 	correction = (100 * 4 * buffer_pages) / total_pages;
536 
537 	if (correction < 100) {
538 		dirty_background_ratio *= correction;
539 		dirty_background_ratio /= 100;
540 		vm_dirty_ratio *= correction;
541 		vm_dirty_ratio /= 100;
542 
543 		if (dirty_background_ratio <= 0)
544 			dirty_background_ratio = 1;
545 		if (vm_dirty_ratio <= 0)
546 			vm_dirty_ratio = 1;
547 	}
548 	mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
549 	set_ratelimit();
550 	register_cpu_notifier(&ratelimit_nb);
551 }
552 
553 int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
554 {
555 	int ret;
556 
557 	if (wbc->nr_to_write <= 0)
558 		return 0;
559 	wbc->for_writepages = 1;
560 	if (mapping->a_ops->writepages)
561 		ret =  mapping->a_ops->writepages(mapping, wbc);
562 	else
563 		ret = generic_writepages(mapping, wbc);
564 	wbc->for_writepages = 0;
565 	return ret;
566 }
567 
568 /**
569  * write_one_page - write out a single page and optionally wait on I/O
570  *
571  * @page: the page to write
572  * @wait: if true, wait on writeout
573  *
574  * The page must be locked by the caller and will be unlocked upon return.
575  *
576  * write_one_page() returns a negative error code if I/O failed.
577  */
578 int write_one_page(struct page *page, int wait)
579 {
580 	struct address_space *mapping = page->mapping;
581 	int ret = 0;
582 	struct writeback_control wbc = {
583 		.sync_mode = WB_SYNC_ALL,
584 		.nr_to_write = 1,
585 	};
586 
587 	BUG_ON(!PageLocked(page));
588 
589 	if (wait)
590 		wait_on_page_writeback(page);
591 
592 	if (clear_page_dirty_for_io(page)) {
593 		page_cache_get(page);
594 		ret = mapping->a_ops->writepage(page, &wbc);
595 		if (ret == 0 && wait) {
596 			wait_on_page_writeback(page);
597 			if (PageError(page))
598 				ret = -EIO;
599 		}
600 		page_cache_release(page);
601 	} else {
602 		unlock_page(page);
603 	}
604 	return ret;
605 }
606 EXPORT_SYMBOL(write_one_page);
607 
608 /*
609  * For address_spaces which do not use buffers.  Just tag the page as dirty in
610  * its radix tree.
611  *
612  * This is also used when a single buffer is being dirtied: we want to set the
613  * page dirty in that case, but not all the buffers.  This is a "bottom-up"
614  * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
615  *
616  * Most callers have locked the page, which pins the address_space in memory.
617  * But zap_pte_range() does not lock the page, however in that case the
618  * mapping is pinned by the vma's ->vm_file reference.
619  *
620  * We take care to handle the case where the page was truncated from the
621  * mapping by re-checking page_mapping() insode tree_lock.
622  */
623 int __set_page_dirty_nobuffers(struct page *page)
624 {
625 	int ret = 0;
626 
627 	if (!TestSetPageDirty(page)) {
628 		struct address_space *mapping = page_mapping(page);
629 		struct address_space *mapping2;
630 
631 		if (mapping) {
632 			write_lock_irq(&mapping->tree_lock);
633 			mapping2 = page_mapping(page);
634 			if (mapping2) { /* Race with truncate? */
635 				BUG_ON(mapping2 != mapping);
636 				if (mapping_cap_account_dirty(mapping))
637 					inc_page_state(nr_dirty);
638 				radix_tree_tag_set(&mapping->page_tree,
639 					page_index(page), PAGECACHE_TAG_DIRTY);
640 			}
641 			write_unlock_irq(&mapping->tree_lock);
642 			if (mapping->host) {
643 				/* !PageAnon && !swapper_space */
644 				__mark_inode_dirty(mapping->host,
645 							I_DIRTY_PAGES);
646 			}
647 		}
648 	}
649 	return ret;
650 }
651 EXPORT_SYMBOL(__set_page_dirty_nobuffers);
652 
653 /*
654  * When a writepage implementation decides that it doesn't want to write this
655  * page for some reason, it should redirty the locked page via
656  * redirty_page_for_writepage() and it should then unlock the page and return 0
657  */
658 int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
659 {
660 	wbc->pages_skipped++;
661 	return __set_page_dirty_nobuffers(page);
662 }
663 EXPORT_SYMBOL(redirty_page_for_writepage);
664 
665 /*
666  * If the mapping doesn't provide a set_page_dirty a_op, then
667  * just fall through and assume that it wants buffer_heads.
668  */
669 int fastcall set_page_dirty(struct page *page)
670 {
671 	struct address_space *mapping = page_mapping(page);
672 
673 	if (likely(mapping)) {
674 		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
675 		if (spd)
676 			return (*spd)(page);
677 		return __set_page_dirty_buffers(page);
678 	}
679 	if (!PageDirty(page))
680 		SetPageDirty(page);
681 	return 0;
682 }
683 EXPORT_SYMBOL(set_page_dirty);
684 
685 /*
686  * set_page_dirty() is racy if the caller has no reference against
687  * page->mapping->host, and if the page is unlocked.  This is because another
688  * CPU could truncate the page off the mapping and then free the mapping.
689  *
690  * Usually, the page _is_ locked, or the caller is a user-space process which
691  * holds a reference on the inode by having an open file.
692  *
693  * In other cases, the page should be locked before running set_page_dirty().
694  */
695 int set_page_dirty_lock(struct page *page)
696 {
697 	int ret;
698 
699 	lock_page(page);
700 	ret = set_page_dirty(page);
701 	unlock_page(page);
702 	return ret;
703 }
704 EXPORT_SYMBOL(set_page_dirty_lock);
705 
706 /*
707  * Clear a page's dirty flag, while caring for dirty memory accounting.
708  * Returns true if the page was previously dirty.
709  */
710 int test_clear_page_dirty(struct page *page)
711 {
712 	struct address_space *mapping = page_mapping(page);
713 	unsigned long flags;
714 
715 	if (mapping) {
716 		write_lock_irqsave(&mapping->tree_lock, flags);
717 		if (TestClearPageDirty(page)) {
718 			radix_tree_tag_clear(&mapping->page_tree,
719 						page_index(page),
720 						PAGECACHE_TAG_DIRTY);
721 			write_unlock_irqrestore(&mapping->tree_lock, flags);
722 			if (mapping_cap_account_dirty(mapping))
723 				dec_page_state(nr_dirty);
724 			return 1;
725 		}
726 		write_unlock_irqrestore(&mapping->tree_lock, flags);
727 		return 0;
728 	}
729 	return TestClearPageDirty(page);
730 }
731 EXPORT_SYMBOL(test_clear_page_dirty);
732 
733 /*
734  * Clear a page's dirty flag, while caring for dirty memory accounting.
735  * Returns true if the page was previously dirty.
736  *
737  * This is for preparing to put the page under writeout.  We leave the page
738  * tagged as dirty in the radix tree so that a concurrent write-for-sync
739  * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
740  * implementation will run either set_page_writeback() or set_page_dirty(),
741  * at which stage we bring the page's dirty flag and radix-tree dirty tag
742  * back into sync.
743  *
744  * This incoherency between the page's dirty flag and radix-tree tag is
745  * unfortunate, but it only exists while the page is locked.
746  */
747 int clear_page_dirty_for_io(struct page *page)
748 {
749 	struct address_space *mapping = page_mapping(page);
750 
751 	if (mapping) {
752 		if (TestClearPageDirty(page)) {
753 			if (mapping_cap_account_dirty(mapping))
754 				dec_page_state(nr_dirty);
755 			return 1;
756 		}
757 		return 0;
758 	}
759 	return TestClearPageDirty(page);
760 }
761 EXPORT_SYMBOL(clear_page_dirty_for_io);
762 
763 int test_clear_page_writeback(struct page *page)
764 {
765 	struct address_space *mapping = page_mapping(page);
766 	int ret;
767 
768 	if (mapping) {
769 		unsigned long flags;
770 
771 		write_lock_irqsave(&mapping->tree_lock, flags);
772 		ret = TestClearPageWriteback(page);
773 		if (ret)
774 			radix_tree_tag_clear(&mapping->page_tree,
775 						page_index(page),
776 						PAGECACHE_TAG_WRITEBACK);
777 		write_unlock_irqrestore(&mapping->tree_lock, flags);
778 	} else {
779 		ret = TestClearPageWriteback(page);
780 	}
781 	return ret;
782 }
783 
784 int test_set_page_writeback(struct page *page)
785 {
786 	struct address_space *mapping = page_mapping(page);
787 	int ret;
788 
789 	if (mapping) {
790 		unsigned long flags;
791 
792 		write_lock_irqsave(&mapping->tree_lock, flags);
793 		ret = TestSetPageWriteback(page);
794 		if (!ret)
795 			radix_tree_tag_set(&mapping->page_tree,
796 						page_index(page),
797 						PAGECACHE_TAG_WRITEBACK);
798 		if (!PageDirty(page))
799 			radix_tree_tag_clear(&mapping->page_tree,
800 						page_index(page),
801 						PAGECACHE_TAG_DIRTY);
802 		write_unlock_irqrestore(&mapping->tree_lock, flags);
803 	} else {
804 		ret = TestSetPageWriteback(page);
805 	}
806 	return ret;
807 
808 }
809 EXPORT_SYMBOL(test_set_page_writeback);
810 
811 /*
812  * Return true if any of the pages in the mapping are marged with the
813  * passed tag.
814  */
815 int mapping_tagged(struct address_space *mapping, int tag)
816 {
817 	unsigned long flags;
818 	int ret;
819 
820 	read_lock_irqsave(&mapping->tree_lock, flags);
821 	ret = radix_tree_tagged(&mapping->page_tree, tag);
822 	read_unlock_irqrestore(&mapping->tree_lock, flags);
823 	return ret;
824 }
825 EXPORT_SYMBOL(mapping_tagged);
826