xref: /openbmc/linux/fs/fs-writeback.c (revision a1e58bbd)
1 /*
2  * fs/fs-writeback.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  *
6  * Contains all the functions related to writing back and waiting
7  * upon dirty inodes against superblocks, and writing back dirty
8  * pages against inodes.  ie: data writeback.  Writeout of the
9  * inode itself is not handled here.
10  *
11  * 10Apr2002	akpm@zip.com.au
12  *		Split out of fs/inode.c
13  *		Additions for address_space-based writeback
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/sched.h>
20 #include <linux/fs.h>
21 #include <linux/mm.h>
22 #include <linux/writeback.h>
23 #include <linux/blkdev.h>
24 #include <linux/backing-dev.h>
25 #include <linux/buffer_head.h>
26 #include "internal.h"
27 
28 /**
29  *	__mark_inode_dirty -	internal function
30  *	@inode: inode to mark
31  *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
32  *	Mark an inode as dirty. Callers should use mark_inode_dirty or
33  *  	mark_inode_dirty_sync.
34  *
35  * Put the inode on the super block's dirty list.
36  *
37  * CAREFUL! We mark it dirty unconditionally, but move it onto the
38  * dirty list only if it is hashed or if it refers to a blockdev.
39  * If it was not hashed, it will never be added to the dirty list
40  * even if it is later hashed, as it will have been marked dirty already.
41  *
42  * In short, make sure you hash any inodes _before_ you start marking
43  * them dirty.
44  *
45  * This function *must* be atomic for the I_DIRTY_PAGES case -
46  * set_page_dirty() is called under spinlock in several places.
47  *
48  * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
49  * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
50  * the kernel-internal blockdev inode represents the dirtying time of the
51  * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
52  * page->mapping->host, so the page-dirtying time is recorded in the internal
53  * blockdev inode.
54  */
55 void __mark_inode_dirty(struct inode *inode, int flags)
56 {
57 	struct super_block *sb = inode->i_sb;
58 
59 	/*
60 	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
61 	 * dirty the inode itself
62 	 */
63 	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
64 		if (sb->s_op->dirty_inode)
65 			sb->s_op->dirty_inode(inode);
66 	}
67 
68 	/*
69 	 * make sure that changes are seen by all cpus before we test i_state
70 	 * -- mikulas
71 	 */
72 	smp_mb();
73 
74 	/* avoid the locking if we can */
75 	if ((inode->i_state & flags) == flags)
76 		return;
77 
78 	if (unlikely(block_dump)) {
79 		struct dentry *dentry = NULL;
80 		const char *name = "?";
81 
82 		if (!list_empty(&inode->i_dentry)) {
83 			dentry = list_entry(inode->i_dentry.next,
84 					    struct dentry, d_alias);
85 			if (dentry && dentry->d_name.name)
86 				name = (const char *) dentry->d_name.name;
87 		}
88 
89 		if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev"))
90 			printk(KERN_DEBUG
91 			       "%s(%d): dirtied inode %lu (%s) on %s\n",
92 			       current->comm, task_pid_nr(current), inode->i_ino,
93 			       name, inode->i_sb->s_id);
94 	}
95 
96 	spin_lock(&inode_lock);
97 	if ((inode->i_state & flags) != flags) {
98 		const int was_dirty = inode->i_state & I_DIRTY;
99 
100 		inode->i_state |= flags;
101 
102 		/*
103 		 * If the inode is being synced, just update its dirty state.
104 		 * The unlocker will place the inode on the appropriate
105 		 * superblock list, based upon its state.
106 		 */
107 		if (inode->i_state & I_SYNC)
108 			goto out;
109 
110 		/*
111 		 * Only add valid (hashed) inodes to the superblock's
112 		 * dirty list.  Add blockdev inodes as well.
113 		 */
114 		if (!S_ISBLK(inode->i_mode)) {
115 			if (hlist_unhashed(&inode->i_hash))
116 				goto out;
117 		}
118 		if (inode->i_state & (I_FREEING|I_CLEAR))
119 			goto out;
120 
121 		/*
122 		 * If the inode was already on s_dirty/s_io/s_more_io, don't
123 		 * reposition it (that would break s_dirty time-ordering).
124 		 */
125 		if (!was_dirty) {
126 			inode->dirtied_when = jiffies;
127 			list_move(&inode->i_list, &sb->s_dirty);
128 		}
129 	}
130 out:
131 	spin_unlock(&inode_lock);
132 }
133 
134 EXPORT_SYMBOL(__mark_inode_dirty);
135 
136 static int write_inode(struct inode *inode, int sync)
137 {
138 	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
139 		return inode->i_sb->s_op->write_inode(inode, sync);
140 	return 0;
141 }
142 
143 /*
144  * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
145  * furthest end of its superblock's dirty-inode list.
146  *
147  * Before stamping the inode's ->dirtied_when, we check to see whether it is
148  * already the most-recently-dirtied inode on the s_dirty list.  If that is
149  * the case then the inode must have been redirtied while it was being written
150  * out and we don't reset its dirtied_when.
151  */
152 static void redirty_tail(struct inode *inode)
153 {
154 	struct super_block *sb = inode->i_sb;
155 
156 	if (!list_empty(&sb->s_dirty)) {
157 		struct inode *tail_inode;
158 
159 		tail_inode = list_entry(sb->s_dirty.next, struct inode, i_list);
160 		if (!time_after_eq(inode->dirtied_when,
161 				tail_inode->dirtied_when))
162 			inode->dirtied_when = jiffies;
163 	}
164 	list_move(&inode->i_list, &sb->s_dirty);
165 }
166 
167 /*
168  * requeue inode for re-scanning after sb->s_io list is exhausted.
169  */
170 static void requeue_io(struct inode *inode)
171 {
172 	list_move(&inode->i_list, &inode->i_sb->s_more_io);
173 }
174 
175 static void inode_sync_complete(struct inode *inode)
176 {
177 	/*
178 	 * Prevent speculative execution through spin_unlock(&inode_lock);
179 	 */
180 	smp_mb();
181 	wake_up_bit(&inode->i_state, __I_SYNC);
182 }
183 
184 /*
185  * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
186  */
187 static void move_expired_inodes(struct list_head *delaying_queue,
188 			       struct list_head *dispatch_queue,
189 				unsigned long *older_than_this)
190 {
191 	while (!list_empty(delaying_queue)) {
192 		struct inode *inode = list_entry(delaying_queue->prev,
193 						struct inode, i_list);
194 		if (older_than_this &&
195 			time_after(inode->dirtied_when, *older_than_this))
196 			break;
197 		list_move(&inode->i_list, dispatch_queue);
198 	}
199 }
200 
201 /*
202  * Queue all expired dirty inodes for io, eldest first.
203  */
204 static void queue_io(struct super_block *sb,
205 				unsigned long *older_than_this)
206 {
207 	list_splice_init(&sb->s_more_io, sb->s_io.prev);
208 	move_expired_inodes(&sb->s_dirty, &sb->s_io, older_than_this);
209 }
210 
211 int sb_has_dirty_inodes(struct super_block *sb)
212 {
213 	return !list_empty(&sb->s_dirty) ||
214 	       !list_empty(&sb->s_io) ||
215 	       !list_empty(&sb->s_more_io);
216 }
217 EXPORT_SYMBOL(sb_has_dirty_inodes);
218 
219 /*
220  * Write a single inode's dirty pages and inode data out to disk.
221  * If `wait' is set, wait on the writeout.
222  *
223  * The whole writeout design is quite complex and fragile.  We want to avoid
224  * starvation of particular inodes when others are being redirtied, prevent
225  * livelocks, etc.
226  *
227  * Called under inode_lock.
228  */
229 static int
230 __sync_single_inode(struct inode *inode, struct writeback_control *wbc)
231 {
232 	unsigned dirty;
233 	struct address_space *mapping = inode->i_mapping;
234 	int wait = wbc->sync_mode == WB_SYNC_ALL;
235 	int ret;
236 
237 	BUG_ON(inode->i_state & I_SYNC);
238 
239 	/* Set I_SYNC, reset I_DIRTY */
240 	dirty = inode->i_state & I_DIRTY;
241 	inode->i_state |= I_SYNC;
242 	inode->i_state &= ~I_DIRTY;
243 
244 	spin_unlock(&inode_lock);
245 
246 	ret = do_writepages(mapping, wbc);
247 
248 	/* Don't write the inode if only I_DIRTY_PAGES was set */
249 	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
250 		int err = write_inode(inode, wait);
251 		if (ret == 0)
252 			ret = err;
253 	}
254 
255 	if (wait) {
256 		int err = filemap_fdatawait(mapping);
257 		if (ret == 0)
258 			ret = err;
259 	}
260 
261 	spin_lock(&inode_lock);
262 	inode->i_state &= ~I_SYNC;
263 	if (!(inode->i_state & I_FREEING)) {
264 		if (!(inode->i_state & I_DIRTY) &&
265 		    mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
266 			/*
267 			 * We didn't write back all the pages.  nfs_writepages()
268 			 * sometimes bales out without doing anything. Redirty
269 			 * the inode; Move it from s_io onto s_more_io/s_dirty.
270 			 */
271 			/*
272 			 * akpm: if the caller was the kupdate function we put
273 			 * this inode at the head of s_dirty so it gets first
274 			 * consideration.  Otherwise, move it to the tail, for
275 			 * the reasons described there.  I'm not really sure
276 			 * how much sense this makes.  Presumably I had a good
277 			 * reasons for doing it this way, and I'd rather not
278 			 * muck with it at present.
279 			 */
280 			if (wbc->for_kupdate) {
281 				/*
282 				 * For the kupdate function we move the inode
283 				 * to s_more_io so it will get more writeout as
284 				 * soon as the queue becomes uncongested.
285 				 */
286 				inode->i_state |= I_DIRTY_PAGES;
287 				if (wbc->nr_to_write <= 0) {
288 					/*
289 					 * slice used up: queue for next turn
290 					 */
291 					requeue_io(inode);
292 				} else {
293 					/*
294 					 * somehow blocked: retry later
295 					 */
296 					redirty_tail(inode);
297 				}
298 			} else {
299 				/*
300 				 * Otherwise fully redirty the inode so that
301 				 * other inodes on this superblock will get some
302 				 * writeout.  Otherwise heavy writing to one
303 				 * file would indefinitely suspend writeout of
304 				 * all the other files.
305 				 */
306 				inode->i_state |= I_DIRTY_PAGES;
307 				redirty_tail(inode);
308 			}
309 		} else if (inode->i_state & I_DIRTY) {
310 			/*
311 			 * Someone redirtied the inode while were writing back
312 			 * the pages.
313 			 */
314 			redirty_tail(inode);
315 		} else if (atomic_read(&inode->i_count)) {
316 			/*
317 			 * The inode is clean, inuse
318 			 */
319 			list_move(&inode->i_list, &inode_in_use);
320 		} else {
321 			/*
322 			 * The inode is clean, unused
323 			 */
324 			list_move(&inode->i_list, &inode_unused);
325 		}
326 	}
327 	inode_sync_complete(inode);
328 	return ret;
329 }
330 
331 /*
332  * Write out an inode's dirty pages.  Called under inode_lock.  Either the
333  * caller has ref on the inode (either via __iget or via syscall against an fd)
334  * or the inode has I_WILL_FREE set (via generic_forget_inode)
335  */
336 static int
337 __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
338 {
339 	wait_queue_head_t *wqh;
340 
341 	if (!atomic_read(&inode->i_count))
342 		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
343 	else
344 		WARN_ON(inode->i_state & I_WILL_FREE);
345 
346 	if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_SYNC)) {
347 		/*
348 		 * We're skipping this inode because it's locked, and we're not
349 		 * doing writeback-for-data-integrity.  Move it to s_more_io so
350 		 * that writeback can proceed with the other inodes on s_io.
351 		 * We'll have another go at writing back this inode when we
352 		 * completed a full scan of s_io.
353 		 */
354 		requeue_io(inode);
355 		return 0;
356 	}
357 
358 	/*
359 	 * It's a data-integrity sync.  We must wait.
360 	 */
361 	if (inode->i_state & I_SYNC) {
362 		DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
363 
364 		wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
365 		do {
366 			spin_unlock(&inode_lock);
367 			__wait_on_bit(wqh, &wq, inode_wait,
368 							TASK_UNINTERRUPTIBLE);
369 			spin_lock(&inode_lock);
370 		} while (inode->i_state & I_SYNC);
371 	}
372 	return __sync_single_inode(inode, wbc);
373 }
374 
375 /*
376  * Write out a superblock's list of dirty inodes.  A wait will be performed
377  * upon no inodes, all inodes or the final one, depending upon sync_mode.
378  *
379  * If older_than_this is non-NULL, then only write out inodes which
380  * had their first dirtying at a time earlier than *older_than_this.
381  *
382  * If we're a pdlfush thread, then implement pdflush collision avoidance
383  * against the entire list.
384  *
385  * WB_SYNC_HOLD is a hack for sys_sync(): reattach the inode to sb->s_dirty so
386  * that it can be located for waiting on in __writeback_single_inode().
387  *
388  * Called under inode_lock.
389  *
390  * If `bdi' is non-zero then we're being asked to writeback a specific queue.
391  * This function assumes that the blockdev superblock's inodes are backed by
392  * a variety of queues, so all inodes are searched.  For other superblocks,
393  * assume that all inodes are backed by the same queue.
394  *
395  * FIXME: this linear search could get expensive with many fileystems.  But
396  * how to fix?  We need to go from an address_space to all inodes which share
397  * a queue with that address_space.  (Easy: have a global "dirty superblocks"
398  * list).
399  *
400  * The inodes to be written are parked on sb->s_io.  They are moved back onto
401  * sb->s_dirty as they are selected for writing.  This way, none can be missed
402  * on the writer throttling path, and we get decent balancing between many
403  * throttled threads: we don't want them all piling up on inode_sync_wait.
404  */
405 static void
406 sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
407 {
408 	const unsigned long start = jiffies;	/* livelock avoidance */
409 
410 	if (!wbc->for_kupdate || list_empty(&sb->s_io))
411 		queue_io(sb, wbc->older_than_this);
412 
413 	while (!list_empty(&sb->s_io)) {
414 		struct inode *inode = list_entry(sb->s_io.prev,
415 						struct inode, i_list);
416 		struct address_space *mapping = inode->i_mapping;
417 		struct backing_dev_info *bdi = mapping->backing_dev_info;
418 		long pages_skipped;
419 
420 		if (!bdi_cap_writeback_dirty(bdi)) {
421 			redirty_tail(inode);
422 			if (sb_is_blkdev_sb(sb)) {
423 				/*
424 				 * Dirty memory-backed blockdev: the ramdisk
425 				 * driver does this.  Skip just this inode
426 				 */
427 				continue;
428 			}
429 			/*
430 			 * Dirty memory-backed inode against a filesystem other
431 			 * than the kernel-internal bdev filesystem.  Skip the
432 			 * entire superblock.
433 			 */
434 			break;
435 		}
436 
437 		if (wbc->nonblocking && bdi_write_congested(bdi)) {
438 			wbc->encountered_congestion = 1;
439 			if (!sb_is_blkdev_sb(sb))
440 				break;		/* Skip a congested fs */
441 			requeue_io(inode);
442 			continue;		/* Skip a congested blockdev */
443 		}
444 
445 		if (wbc->bdi && bdi != wbc->bdi) {
446 			if (!sb_is_blkdev_sb(sb))
447 				break;		/* fs has the wrong queue */
448 			requeue_io(inode);
449 			continue;		/* blockdev has wrong queue */
450 		}
451 
452 		/* Was this inode dirtied after sync_sb_inodes was called? */
453 		if (time_after(inode->dirtied_when, start))
454 			break;
455 
456 		/* Is another pdflush already flushing this queue? */
457 		if (current_is_pdflush() && !writeback_acquire(bdi))
458 			break;
459 
460 		BUG_ON(inode->i_state & I_FREEING);
461 		__iget(inode);
462 		pages_skipped = wbc->pages_skipped;
463 		__writeback_single_inode(inode, wbc);
464 		if (wbc->sync_mode == WB_SYNC_HOLD) {
465 			inode->dirtied_when = jiffies;
466 			list_move(&inode->i_list, &sb->s_dirty);
467 		}
468 		if (current_is_pdflush())
469 			writeback_release(bdi);
470 		if (wbc->pages_skipped != pages_skipped) {
471 			/*
472 			 * writeback is not making progress due to locked
473 			 * buffers.  Skip this inode for now.
474 			 */
475 			redirty_tail(inode);
476 		}
477 		spin_unlock(&inode_lock);
478 		iput(inode);
479 		cond_resched();
480 		spin_lock(&inode_lock);
481 		if (wbc->nr_to_write <= 0) {
482 			wbc->more_io = 1;
483 			break;
484 		}
485 		if (!list_empty(&sb->s_more_io))
486 			wbc->more_io = 1;
487 	}
488 	return;		/* Leave any unwritten inodes on s_io */
489 }
490 
491 /*
492  * Start writeback of dirty pagecache data against all unlocked inodes.
493  *
494  * Note:
495  * We don't need to grab a reference to superblock here. If it has non-empty
496  * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed
497  * past sync_inodes_sb() until the ->s_dirty/s_io/s_more_io lists are all
498  * empty. Since __sync_single_inode() regains inode_lock before it finally moves
499  * inode from superblock lists we are OK.
500  *
501  * If `older_than_this' is non-zero then only flush inodes which have a
502  * flushtime older than *older_than_this.
503  *
504  * If `bdi' is non-zero then we will scan the first inode against each
505  * superblock until we find the matching ones.  One group will be the dirty
506  * inodes against a filesystem.  Then when we hit the dummy blockdev superblock,
507  * sync_sb_inodes will seekout the blockdev which matches `bdi'.  Maybe not
508  * super-efficient but we're about to do a ton of I/O...
509  */
510 void
511 writeback_inodes(struct writeback_control *wbc)
512 {
513 	struct super_block *sb;
514 
515 	might_sleep();
516 	spin_lock(&sb_lock);
517 restart:
518 	list_for_each_entry_reverse(sb, &super_blocks, s_list) {
519 		if (sb_has_dirty_inodes(sb)) {
520 			/* we're making our own get_super here */
521 			sb->s_count++;
522 			spin_unlock(&sb_lock);
523 			/*
524 			 * If we can't get the readlock, there's no sense in
525 			 * waiting around, most of the time the FS is going to
526 			 * be unmounted by the time it is released.
527 			 */
528 			if (down_read_trylock(&sb->s_umount)) {
529 				if (sb->s_root) {
530 					spin_lock(&inode_lock);
531 					sync_sb_inodes(sb, wbc);
532 					spin_unlock(&inode_lock);
533 				}
534 				up_read(&sb->s_umount);
535 			}
536 			spin_lock(&sb_lock);
537 			if (__put_super_and_need_restart(sb))
538 				goto restart;
539 		}
540 		if (wbc->nr_to_write <= 0)
541 			break;
542 	}
543 	spin_unlock(&sb_lock);
544 }
545 
546 /*
547  * writeback and wait upon the filesystem's dirty inodes.  The caller will
548  * do this in two passes - one to write, and one to wait.  WB_SYNC_HOLD is
549  * used to park the written inodes on sb->s_dirty for the wait pass.
550  *
551  * A finite limit is set on the number of pages which will be written.
552  * To prevent infinite livelock of sys_sync().
553  *
554  * We add in the number of potentially dirty inodes, because each inode write
555  * can dirty pagecache in the underlying blockdev.
556  */
557 void sync_inodes_sb(struct super_block *sb, int wait)
558 {
559 	struct writeback_control wbc = {
560 		.sync_mode	= wait ? WB_SYNC_ALL : WB_SYNC_HOLD,
561 		.range_start	= 0,
562 		.range_end	= LLONG_MAX,
563 	};
564 	unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
565 	unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
566 
567 	wbc.nr_to_write = nr_dirty + nr_unstable +
568 			(inodes_stat.nr_inodes - inodes_stat.nr_unused) +
569 			nr_dirty + nr_unstable;
570 	wbc.nr_to_write += wbc.nr_to_write / 2;		/* Bit more for luck */
571 	spin_lock(&inode_lock);
572 	sync_sb_inodes(sb, &wbc);
573 	spin_unlock(&inode_lock);
574 }
575 
576 /*
577  * Rather lame livelock avoidance.
578  */
579 static void set_sb_syncing(int val)
580 {
581 	struct super_block *sb;
582 	spin_lock(&sb_lock);
583 	list_for_each_entry_reverse(sb, &super_blocks, s_list)
584 		sb->s_syncing = val;
585 	spin_unlock(&sb_lock);
586 }
587 
588 /**
589  * sync_inodes - writes all inodes to disk
590  * @wait: wait for completion
591  *
592  * sync_inodes() goes through each super block's dirty inode list, writes the
593  * inodes out, waits on the writeout and puts the inodes back on the normal
594  * list.
595  *
596  * This is for sys_sync().  fsync_dev() uses the same algorithm.  The subtle
597  * part of the sync functions is that the blockdev "superblock" is processed
598  * last.  This is because the write_inode() function of a typical fs will
599  * perform no I/O, but will mark buffers in the blockdev mapping as dirty.
600  * What we want to do is to perform all that dirtying first, and then write
601  * back all those inode blocks via the blockdev mapping in one sweep.  So the
602  * additional (somewhat redundant) sync_blockdev() calls here are to make
603  * sure that really happens.  Because if we call sync_inodes_sb(wait=1) with
604  * outstanding dirty inodes, the writeback goes block-at-a-time within the
605  * filesystem's write_inode().  This is extremely slow.
606  */
607 static void __sync_inodes(int wait)
608 {
609 	struct super_block *sb;
610 
611 	spin_lock(&sb_lock);
612 restart:
613 	list_for_each_entry(sb, &super_blocks, s_list) {
614 		if (sb->s_syncing)
615 			continue;
616 		sb->s_syncing = 1;
617 		sb->s_count++;
618 		spin_unlock(&sb_lock);
619 		down_read(&sb->s_umount);
620 		if (sb->s_root) {
621 			sync_inodes_sb(sb, wait);
622 			sync_blockdev(sb->s_bdev);
623 		}
624 		up_read(&sb->s_umount);
625 		spin_lock(&sb_lock);
626 		if (__put_super_and_need_restart(sb))
627 			goto restart;
628 	}
629 	spin_unlock(&sb_lock);
630 }
631 
632 void sync_inodes(int wait)
633 {
634 	set_sb_syncing(0);
635 	__sync_inodes(0);
636 
637 	if (wait) {
638 		set_sb_syncing(0);
639 		__sync_inodes(1);
640 	}
641 }
642 
643 /**
644  * write_inode_now	-	write an inode to disk
645  * @inode: inode to write to disk
646  * @sync: whether the write should be synchronous or not
647  *
648  * This function commits an inode to disk immediately if it is dirty. This is
649  * primarily needed by knfsd.
650  *
651  * The caller must either have a ref on the inode or must have set I_WILL_FREE.
652  */
653 int write_inode_now(struct inode *inode, int sync)
654 {
655 	int ret;
656 	struct writeback_control wbc = {
657 		.nr_to_write = LONG_MAX,
658 		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
659 		.range_start = 0,
660 		.range_end = LLONG_MAX,
661 	};
662 
663 	if (!mapping_cap_writeback_dirty(inode->i_mapping))
664 		wbc.nr_to_write = 0;
665 
666 	might_sleep();
667 	spin_lock(&inode_lock);
668 	ret = __writeback_single_inode(inode, &wbc);
669 	spin_unlock(&inode_lock);
670 	if (sync)
671 		inode_sync_wait(inode);
672 	return ret;
673 }
674 EXPORT_SYMBOL(write_inode_now);
675 
676 /**
677  * sync_inode - write an inode and its pages to disk.
678  * @inode: the inode to sync
679  * @wbc: controls the writeback mode
680  *
681  * sync_inode() will write an inode and its pages to disk.  It will also
682  * correctly update the inode on its superblock's dirty inode lists and will
683  * update inode->i_state.
684  *
685  * The caller must have a ref on the inode.
686  */
687 int sync_inode(struct inode *inode, struct writeback_control *wbc)
688 {
689 	int ret;
690 
691 	spin_lock(&inode_lock);
692 	ret = __writeback_single_inode(inode, wbc);
693 	spin_unlock(&inode_lock);
694 	return ret;
695 }
696 EXPORT_SYMBOL(sync_inode);
697 
698 /**
699  * generic_osync_inode - flush all dirty data for a given inode to disk
700  * @inode: inode to write
701  * @mapping: the address_space that should be flushed
702  * @what:  what to write and wait upon
703  *
704  * This can be called by file_write functions for files which have the
705  * O_SYNC flag set, to flush dirty writes to disk.
706  *
707  * @what is a bitmask, specifying which part of the inode's data should be
708  * written and waited upon.
709  *
710  *    OSYNC_DATA:     i_mapping's dirty data
711  *    OSYNC_METADATA: the buffers at i_mapping->private_list
712  *    OSYNC_INODE:    the inode itself
713  */
714 
715 int generic_osync_inode(struct inode *inode, struct address_space *mapping, int what)
716 {
717 	int err = 0;
718 	int need_write_inode_now = 0;
719 	int err2;
720 
721 	if (what & OSYNC_DATA)
722 		err = filemap_fdatawrite(mapping);
723 	if (what & (OSYNC_METADATA|OSYNC_DATA)) {
724 		err2 = sync_mapping_buffers(mapping);
725 		if (!err)
726 			err = err2;
727 	}
728 	if (what & OSYNC_DATA) {
729 		err2 = filemap_fdatawait(mapping);
730 		if (!err)
731 			err = err2;
732 	}
733 
734 	spin_lock(&inode_lock);
735 	if ((inode->i_state & I_DIRTY) &&
736 	    ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC)))
737 		need_write_inode_now = 1;
738 	spin_unlock(&inode_lock);
739 
740 	if (need_write_inode_now) {
741 		err2 = write_inode_now(inode, 1);
742 		if (!err)
743 			err = err2;
744 	}
745 	else
746 		inode_sync_wait(inode);
747 
748 	return err;
749 }
750 
751 EXPORT_SYMBOL(generic_osync_inode);
752 
753 /**
754  * writeback_acquire - attempt to get exclusive writeback access to a device
755  * @bdi: the device's backing_dev_info structure
756  *
757  * It is a waste of resources to have more than one pdflush thread blocked on
758  * a single request queue.  Exclusion at the request_queue level is obtained
759  * via a flag in the request_queue's backing_dev_info.state.
760  *
761  * Non-request_queue-backed address_spaces will share default_backing_dev_info,
762  * unless they implement their own.  Which is somewhat inefficient, as this
763  * may prevent concurrent writeback against multiple devices.
764  */
765 int writeback_acquire(struct backing_dev_info *bdi)
766 {
767 	return !test_and_set_bit(BDI_pdflush, &bdi->state);
768 }
769 
770 /**
771  * writeback_in_progress - determine whether there is writeback in progress
772  * @bdi: the device's backing_dev_info structure.
773  *
774  * Determine whether there is writeback in progress against a backing device.
775  */
776 int writeback_in_progress(struct backing_dev_info *bdi)
777 {
778 	return test_bit(BDI_pdflush, &bdi->state);
779 }
780 
781 /**
782  * writeback_release - relinquish exclusive writeback access against a device.
783  * @bdi: the device's backing_dev_info structure
784  */
785 void writeback_release(struct backing_dev_info *bdi)
786 {
787 	BUG_ON(!writeback_in_progress(bdi));
788 	clear_bit(BDI_pdflush, &bdi->state);
789 }
790