xref: /openbmc/linux/fs/fs-writeback.c (revision 8010c3b6)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * fs/fs-writeback.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 2002, Linus Torvalds.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  * Contains all the functions related to writing back and waiting
71da177e4SLinus Torvalds  * upon dirty inodes against superblocks, and writing back dirty
81da177e4SLinus Torvalds  * pages against inodes.  ie: data writeback.  Writeout of the
91da177e4SLinus Torvalds  * inode itself is not handled here.
101da177e4SLinus Torvalds  *
11e1f8e874SFrancois Cami  * 10Apr2002	Andrew Morton
121da177e4SLinus Torvalds  *		Split out of fs/inode.c
131da177e4SLinus Torvalds  *		Additions for address_space-based writeback
141da177e4SLinus Torvalds  */
151da177e4SLinus Torvalds 
161da177e4SLinus Torvalds #include <linux/kernel.h>
17f5ff8422SJens Axboe #include <linux/module.h>
181da177e4SLinus Torvalds #include <linux/spinlock.h>
191da177e4SLinus Torvalds #include <linux/sched.h>
201da177e4SLinus Torvalds #include <linux/fs.h>
211da177e4SLinus Torvalds #include <linux/mm.h>
2203ba3782SJens Axboe #include <linux/kthread.h>
2303ba3782SJens Axboe #include <linux/freezer.h>
241da177e4SLinus Torvalds #include <linux/writeback.h>
251da177e4SLinus Torvalds #include <linux/blkdev.h>
261da177e4SLinus Torvalds #include <linux/backing-dev.h>
271da177e4SLinus Torvalds #include <linux/buffer_head.h>
2807f3f05cSDavid Howells #include "internal.h"
291da177e4SLinus Torvalds 
3066f3b8e2SJens Axboe #define inode_to_bdi(inode)	((inode)->i_mapping->backing_dev_info)
31f11b00f3SAdrian Bunk 
3203ba3782SJens Axboe /*
33d0bceac7SJens Axboe  * We don't actually have pdflush, but this one is exported though /proc...
34d0bceac7SJens Axboe  */
35d0bceac7SJens Axboe int nr_pdflush_threads;
36d0bceac7SJens Axboe 
37d0bceac7SJens Axboe /*
38c4a77a6cSJens Axboe  * Passed into wb_writeback(), essentially a subset of writeback_control
39c4a77a6cSJens Axboe  */
40c4a77a6cSJens Axboe struct wb_writeback_args {
41c4a77a6cSJens Axboe 	long nr_pages;
42c4a77a6cSJens Axboe 	struct super_block *sb;
43c4a77a6cSJens Axboe 	enum writeback_sync_modes sync_mode;
44c4a77a6cSJens Axboe 	int for_kupdate;
45c4a77a6cSJens Axboe 	int range_cyclic;
46c4a77a6cSJens Axboe };
47c4a77a6cSJens Axboe 
48c4a77a6cSJens Axboe /*
4903ba3782SJens Axboe  * Work items for the bdi_writeback threads
50f11b00f3SAdrian Bunk  */
5103ba3782SJens Axboe struct bdi_work {
528010c3b6SJens Axboe 	struct list_head list;		/* pending work list */
538010c3b6SJens Axboe 	struct rcu_head rcu_head;	/* for RCU free/clear of work */
5403ba3782SJens Axboe 
558010c3b6SJens Axboe 	unsigned long seen;		/* threads that have seen this work */
568010c3b6SJens Axboe 	atomic_t pending;		/* number of threads still to do work */
5703ba3782SJens Axboe 
588010c3b6SJens Axboe 	struct wb_writeback_args args;	/* writeback arguments */
5903ba3782SJens Axboe 
608010c3b6SJens Axboe 	unsigned long state;		/* flag bits, see WS_* */
6103ba3782SJens Axboe };
6203ba3782SJens Axboe 
6303ba3782SJens Axboe enum {
6403ba3782SJens Axboe 	WS_USED_B = 0,
6503ba3782SJens Axboe 	WS_ONSTACK_B,
6603ba3782SJens Axboe };
6703ba3782SJens Axboe 
6803ba3782SJens Axboe #define WS_USED (1 << WS_USED_B)
6903ba3782SJens Axboe #define WS_ONSTACK (1 << WS_ONSTACK_B)
7003ba3782SJens Axboe 
7103ba3782SJens Axboe static inline bool bdi_work_on_stack(struct bdi_work *work)
72f11b00f3SAdrian Bunk {
7303ba3782SJens Axboe 	return test_bit(WS_ONSTACK_B, &work->state);
7403ba3782SJens Axboe }
7503ba3782SJens Axboe 
7603ba3782SJens Axboe static inline void bdi_work_init(struct bdi_work *work,
77b6e51316SJens Axboe 				 struct wb_writeback_args *args)
7803ba3782SJens Axboe {
7903ba3782SJens Axboe 	INIT_RCU_HEAD(&work->rcu_head);
80b6e51316SJens Axboe 	work->args = *args;
8103ba3782SJens Axboe 	work->state = WS_USED;
8203ba3782SJens Axboe }
8303ba3782SJens Axboe 
84f11b00f3SAdrian Bunk /**
85f11b00f3SAdrian Bunk  * writeback_in_progress - determine whether there is writeback in progress
86f11b00f3SAdrian Bunk  * @bdi: the device's backing_dev_info structure.
87f11b00f3SAdrian Bunk  *
8803ba3782SJens Axboe  * Determine whether there is writeback waiting to be handled against a
8903ba3782SJens Axboe  * backing device.
90f11b00f3SAdrian Bunk  */
91f11b00f3SAdrian Bunk int writeback_in_progress(struct backing_dev_info *bdi)
92f11b00f3SAdrian Bunk {
9303ba3782SJens Axboe 	return !list_empty(&bdi->work_list);
94f11b00f3SAdrian Bunk }
95f11b00f3SAdrian Bunk 
9603ba3782SJens Axboe static void bdi_work_clear(struct bdi_work *work)
97f11b00f3SAdrian Bunk {
9803ba3782SJens Axboe 	clear_bit(WS_USED_B, &work->state);
9903ba3782SJens Axboe 	smp_mb__after_clear_bit();
10003ba3782SJens Axboe 	wake_up_bit(&work->state, WS_USED_B);
101f11b00f3SAdrian Bunk }
102f11b00f3SAdrian Bunk 
10303ba3782SJens Axboe static void bdi_work_free(struct rcu_head *head)
1044195f73dSNick Piggin {
10503ba3782SJens Axboe 	struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
1064195f73dSNick Piggin 
10703ba3782SJens Axboe 	if (!bdi_work_on_stack(work))
10803ba3782SJens Axboe 		kfree(work);
10903ba3782SJens Axboe 	else
11003ba3782SJens Axboe 		bdi_work_clear(work);
1114195f73dSNick Piggin }
1124195f73dSNick Piggin 
11303ba3782SJens Axboe static void wb_work_complete(struct bdi_work *work)
1141da177e4SLinus Torvalds {
115c4a77a6cSJens Axboe 	const enum writeback_sync_modes sync_mode = work->args.sync_mode;
1161da177e4SLinus Torvalds 
1171da177e4SLinus Torvalds 	/*
11803ba3782SJens Axboe 	 * For allocated work, we can clear the done/seen bit right here.
11903ba3782SJens Axboe 	 * For on-stack work, we need to postpone both the clear and free
12003ba3782SJens Axboe 	 * to after the RCU grace period, since the stack could be invalidated
12103ba3782SJens Axboe 	 * as soon as bdi_work_clear() has done the wakeup.
1221da177e4SLinus Torvalds 	 */
12303ba3782SJens Axboe 	if (!bdi_work_on_stack(work))
12403ba3782SJens Axboe 		bdi_work_clear(work);
12503ba3782SJens Axboe 	if (sync_mode == WB_SYNC_NONE || bdi_work_on_stack(work))
12603ba3782SJens Axboe 		call_rcu(&work->rcu_head, bdi_work_free);
1271da177e4SLinus Torvalds }
1281da177e4SLinus Torvalds 
12903ba3782SJens Axboe static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
13003ba3782SJens Axboe {
1311da177e4SLinus Torvalds 	/*
13203ba3782SJens Axboe 	 * The caller has retrieved the work arguments from this work,
13303ba3782SJens Axboe 	 * drop our reference. If this is the last ref, delete and free it
13403ba3782SJens Axboe 	 */
13503ba3782SJens Axboe 	if (atomic_dec_and_test(&work->pending)) {
13603ba3782SJens Axboe 		struct backing_dev_info *bdi = wb->bdi;
13703ba3782SJens Axboe 
13803ba3782SJens Axboe 		spin_lock(&bdi->wb_lock);
13903ba3782SJens Axboe 		list_del_rcu(&work->list);
14003ba3782SJens Axboe 		spin_unlock(&bdi->wb_lock);
14103ba3782SJens Axboe 
14203ba3782SJens Axboe 		wb_work_complete(work);
14303ba3782SJens Axboe 	}
14403ba3782SJens Axboe }
14503ba3782SJens Axboe 
14603ba3782SJens Axboe static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
14703ba3782SJens Axboe {
14803ba3782SJens Axboe 	work->seen = bdi->wb_mask;
14903ba3782SJens Axboe 	BUG_ON(!work->seen);
15003ba3782SJens Axboe 	atomic_set(&work->pending, bdi->wb_cnt);
15103ba3782SJens Axboe 	BUG_ON(!bdi->wb_cnt);
15203ba3782SJens Axboe 
15303ba3782SJens Axboe 	/*
15403ba3782SJens Axboe 	 * Make sure stores are seen before it appears on the list
1551da177e4SLinus Torvalds 	 */
1561da177e4SLinus Torvalds 	smp_mb();
1571da177e4SLinus Torvalds 
15803ba3782SJens Axboe 	spin_lock(&bdi->wb_lock);
15903ba3782SJens Axboe 	list_add_tail_rcu(&work->list, &bdi->work_list);
16003ba3782SJens Axboe 	spin_unlock(&bdi->wb_lock);
1611da177e4SLinus Torvalds 
1621da177e4SLinus Torvalds 	/*
16303ba3782SJens Axboe 	 * If the default thread isn't there, make sure we add it. When
16403ba3782SJens Axboe 	 * it gets created and wakes up, we'll run this work.
1651da177e4SLinus Torvalds 	 */
16603ba3782SJens Axboe 	if (unlikely(list_empty_careful(&bdi->wb_list)))
16703ba3782SJens Axboe 		wake_up_process(default_backing_dev_info.wb.task);
16803ba3782SJens Axboe 	else {
16903ba3782SJens Axboe 		struct bdi_writeback *wb = &bdi->wb;
1701da177e4SLinus Torvalds 
1711da177e4SLinus Torvalds 		/*
172bcddc3f0SJens Axboe 		 * End work now if this wb has no dirty IO pending. Otherwise
173bcddc3f0SJens Axboe 		 * wakeup the handling thread
1741da177e4SLinus Torvalds 		 */
175bcddc3f0SJens Axboe 		if (!wb_has_dirty_io(wb))
17603ba3782SJens Axboe 			wb_clear_pending(wb, work);
177bcddc3f0SJens Axboe 		else if (wb->task)
17803ba3782SJens Axboe 			wake_up_process(wb->task);
1791da177e4SLinus Torvalds 	}
18003ba3782SJens Axboe }
1811da177e4SLinus Torvalds 
1821da177e4SLinus Torvalds /*
18303ba3782SJens Axboe  * Used for on-stack allocated work items. The caller needs to wait until
18403ba3782SJens Axboe  * the wb threads have acked the work before it's safe to continue.
1851da177e4SLinus Torvalds  */
18603ba3782SJens Axboe static void bdi_wait_on_work_clear(struct bdi_work *work)
1871da177e4SLinus Torvalds {
18803ba3782SJens Axboe 	wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait,
18903ba3782SJens Axboe 		    TASK_UNINTERRUPTIBLE);
19003ba3782SJens Axboe }
19103ba3782SJens Axboe 
192f11fcae8SJens Axboe static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
193b6e51316SJens Axboe 				 struct wb_writeback_args *args)
19403ba3782SJens Axboe {
19503ba3782SJens Axboe 	struct bdi_work *work;
19603ba3782SJens Axboe 
197bcddc3f0SJens Axboe 	/*
198bcddc3f0SJens Axboe 	 * This is WB_SYNC_NONE writeback, so if allocation fails just
199bcddc3f0SJens Axboe 	 * wakeup the thread for old dirty data writeback
200bcddc3f0SJens Axboe 	 */
20103ba3782SJens Axboe 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
202bcddc3f0SJens Axboe 	if (work) {
203b6e51316SJens Axboe 		bdi_work_init(work, args);
204f11fcae8SJens Axboe 		bdi_queue_work(bdi, work);
205bcddc3f0SJens Axboe 	} else {
206bcddc3f0SJens Axboe 		struct bdi_writeback *wb = &bdi->wb;
207bcddc3f0SJens Axboe 
208bcddc3f0SJens Axboe 		if (wb->task)
209bcddc3f0SJens Axboe 			wake_up_process(wb->task);
210bcddc3f0SJens Axboe 	}
21103ba3782SJens Axboe }
21203ba3782SJens Axboe 
213b6e51316SJens Axboe /**
214b6e51316SJens Axboe  * bdi_sync_writeback - start and wait for writeback
215b6e51316SJens Axboe  * @bdi: the backing device to write from
216b6e51316SJens Axboe  * @sb: write inodes from this super_block
217b6e51316SJens Axboe  *
218b6e51316SJens Axboe  * Description:
219b6e51316SJens Axboe  *   This does WB_SYNC_ALL data integrity writeback and waits for the
220b6e51316SJens Axboe  *   IO to complete. Callers must hold the sb s_umount semaphore for
221b6e51316SJens Axboe  *   reading, to avoid having the super disappear before we are done.
22203ba3782SJens Axboe  */
223b6e51316SJens Axboe static void bdi_sync_writeback(struct backing_dev_info *bdi,
224b6e51316SJens Axboe 			       struct super_block *sb)
225b6e51316SJens Axboe {
226b6e51316SJens Axboe 	struct wb_writeback_args args = {
227b6e51316SJens Axboe 		.sb		= sb,
228b6e51316SJens Axboe 		.sync_mode	= WB_SYNC_ALL,
229b6e51316SJens Axboe 		.nr_pages	= LONG_MAX,
230b6e51316SJens Axboe 		.range_cyclic	= 0,
231b6e51316SJens Axboe 	};
232f0fad8a5SChristoph Hellwig 	struct bdi_work work;
233f0fad8a5SChristoph Hellwig 
234b6e51316SJens Axboe 	bdi_work_init(&work, &args);
235f0fad8a5SChristoph Hellwig 	work.state |= WS_ONSTACK;
236f0fad8a5SChristoph Hellwig 
237b6e51316SJens Axboe 	bdi_queue_work(bdi, &work);
238f0fad8a5SChristoph Hellwig 	bdi_wait_on_work_clear(&work);
23903ba3782SJens Axboe }
240b6e51316SJens Axboe 
241b6e51316SJens Axboe /**
242b6e51316SJens Axboe  * bdi_start_writeback - start writeback
243b6e51316SJens Axboe  * @bdi: the backing device to write from
244b6e51316SJens Axboe  * @nr_pages: the number of pages to write
245b6e51316SJens Axboe  *
246b6e51316SJens Axboe  * Description:
247b6e51316SJens Axboe  *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
248b6e51316SJens Axboe  *   started when this function returns, we make no guarentees on
249b6e51316SJens Axboe  *   completion. Caller need not hold sb s_umount semaphore.
250b6e51316SJens Axboe  *
251b6e51316SJens Axboe  */
252b6e51316SJens Axboe void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
253b6e51316SJens Axboe {
254b6e51316SJens Axboe 	struct wb_writeback_args args = {
255b6e51316SJens Axboe 		.sync_mode	= WB_SYNC_NONE,
256b6e51316SJens Axboe 		.nr_pages	= nr_pages,
257b6e51316SJens Axboe 		.range_cyclic	= 1,
258b6e51316SJens Axboe 	};
259b6e51316SJens Axboe 
260b6e51316SJens Axboe 	bdi_alloc_queue_work(bdi, &args);
2611da177e4SLinus Torvalds }
2621da177e4SLinus Torvalds 
2631da177e4SLinus Torvalds /*
2646610a0bcSAndrew Morton  * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
2656610a0bcSAndrew Morton  * furthest end of its superblock's dirty-inode list.
2666610a0bcSAndrew Morton  *
2676610a0bcSAndrew Morton  * Before stamping the inode's ->dirtied_when, we check to see whether it is
26866f3b8e2SJens Axboe  * already the most-recently-dirtied inode on the b_dirty list.  If that is
2696610a0bcSAndrew Morton  * the case then the inode must have been redirtied while it was being written
2706610a0bcSAndrew Morton  * out and we don't reset its dirtied_when.
2716610a0bcSAndrew Morton  */
2726610a0bcSAndrew Morton static void redirty_tail(struct inode *inode)
2736610a0bcSAndrew Morton {
27403ba3782SJens Axboe 	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
2756610a0bcSAndrew Morton 
27603ba3782SJens Axboe 	if (!list_empty(&wb->b_dirty)) {
27766f3b8e2SJens Axboe 		struct inode *tail;
2786610a0bcSAndrew Morton 
27903ba3782SJens Axboe 		tail = list_entry(wb->b_dirty.next, struct inode, i_list);
28066f3b8e2SJens Axboe 		if (time_before(inode->dirtied_when, tail->dirtied_when))
2816610a0bcSAndrew Morton 			inode->dirtied_when = jiffies;
2826610a0bcSAndrew Morton 	}
28303ba3782SJens Axboe 	list_move(&inode->i_list, &wb->b_dirty);
2846610a0bcSAndrew Morton }
2856610a0bcSAndrew Morton 
2866610a0bcSAndrew Morton /*
28766f3b8e2SJens Axboe  * requeue inode for re-scanning after bdi->b_io list is exhausted.
288c986d1e2SAndrew Morton  */
2890e0f4fc2SKen Chen static void requeue_io(struct inode *inode)
290c986d1e2SAndrew Morton {
29103ba3782SJens Axboe 	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
29203ba3782SJens Axboe 
29303ba3782SJens Axboe 	list_move(&inode->i_list, &wb->b_more_io);
294c986d1e2SAndrew Morton }
295c986d1e2SAndrew Morton 
2961c0eeaf5SJoern Engel static void inode_sync_complete(struct inode *inode)
2971c0eeaf5SJoern Engel {
2981c0eeaf5SJoern Engel 	/*
2991c0eeaf5SJoern Engel 	 * Prevent speculative execution through spin_unlock(&inode_lock);
3001c0eeaf5SJoern Engel 	 */
3011c0eeaf5SJoern Engel 	smp_mb();
3021c0eeaf5SJoern Engel 	wake_up_bit(&inode->i_state, __I_SYNC);
3031c0eeaf5SJoern Engel }
3041c0eeaf5SJoern Engel 
305d2caa3c5SJeff Layton static bool inode_dirtied_after(struct inode *inode, unsigned long t)
306d2caa3c5SJeff Layton {
307d2caa3c5SJeff Layton 	bool ret = time_after(inode->dirtied_when, t);
308d2caa3c5SJeff Layton #ifndef CONFIG_64BIT
309d2caa3c5SJeff Layton 	/*
310d2caa3c5SJeff Layton 	 * For inodes being constantly redirtied, dirtied_when can get stuck.
311d2caa3c5SJeff Layton 	 * It _appears_ to be in the future, but is actually in distant past.
312d2caa3c5SJeff Layton 	 * This test is necessary to prevent such wrapped-around relative times
313d2caa3c5SJeff Layton 	 * from permanently stopping the whole pdflush writeback.
314d2caa3c5SJeff Layton 	 */
315d2caa3c5SJeff Layton 	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
316d2caa3c5SJeff Layton #endif
317d2caa3c5SJeff Layton 	return ret;
318d2caa3c5SJeff Layton }
319d2caa3c5SJeff Layton 
320c986d1e2SAndrew Morton /*
3212c136579SFengguang Wu  * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
3222c136579SFengguang Wu  */
3232c136579SFengguang Wu static void move_expired_inodes(struct list_head *delaying_queue,
3242c136579SFengguang Wu 			       struct list_head *dispatch_queue,
3252c136579SFengguang Wu 				unsigned long *older_than_this)
3262c136579SFengguang Wu {
3272c136579SFengguang Wu 	while (!list_empty(delaying_queue)) {
3282c136579SFengguang Wu 		struct inode *inode = list_entry(delaying_queue->prev,
3292c136579SFengguang Wu 						struct inode, i_list);
3302c136579SFengguang Wu 		if (older_than_this &&
331d2caa3c5SJeff Layton 		    inode_dirtied_after(inode, *older_than_this))
3322c136579SFengguang Wu 			break;
3332c136579SFengguang Wu 		list_move(&inode->i_list, dispatch_queue);
3342c136579SFengguang Wu 	}
3352c136579SFengguang Wu }
3362c136579SFengguang Wu 
3372c136579SFengguang Wu /*
3382c136579SFengguang Wu  * Queue all expired dirty inodes for io, eldest first.
3392c136579SFengguang Wu  */
34003ba3782SJens Axboe static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
3412c136579SFengguang Wu {
34203ba3782SJens Axboe 	list_splice_init(&wb->b_more_io, wb->b_io.prev);
34303ba3782SJens Axboe 	move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
34466f3b8e2SJens Axboe }
34566f3b8e2SJens Axboe 
34603ba3782SJens Axboe static int write_inode(struct inode *inode, int sync)
34766f3b8e2SJens Axboe {
34803ba3782SJens Axboe 	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
34903ba3782SJens Axboe 		return inode->i_sb->s_op->write_inode(inode, sync);
35003ba3782SJens Axboe 	return 0;
35166f3b8e2SJens Axboe }
35208d8e974SFengguang Wu 
3532c136579SFengguang Wu /*
35401c03194SChristoph Hellwig  * Wait for writeback on an inode to complete.
35501c03194SChristoph Hellwig  */
35601c03194SChristoph Hellwig static void inode_wait_for_writeback(struct inode *inode)
35701c03194SChristoph Hellwig {
35801c03194SChristoph Hellwig 	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
35901c03194SChristoph Hellwig 	wait_queue_head_t *wqh;
36001c03194SChristoph Hellwig 
36101c03194SChristoph Hellwig 	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
36201c03194SChristoph Hellwig 	do {
36301c03194SChristoph Hellwig 		spin_unlock(&inode_lock);
36401c03194SChristoph Hellwig 		__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
36501c03194SChristoph Hellwig 		spin_lock(&inode_lock);
36601c03194SChristoph Hellwig 	} while (inode->i_state & I_SYNC);
36701c03194SChristoph Hellwig }
36801c03194SChristoph Hellwig 
36901c03194SChristoph Hellwig /*
37001c03194SChristoph Hellwig  * Write out an inode's dirty pages.  Called under inode_lock.  Either the
37101c03194SChristoph Hellwig  * caller has ref on the inode (either via __iget or via syscall against an fd)
37201c03194SChristoph Hellwig  * or the inode has I_WILL_FREE set (via generic_forget_inode)
37301c03194SChristoph Hellwig  *
3741da177e4SLinus Torvalds  * If `wait' is set, wait on the writeout.
3751da177e4SLinus Torvalds  *
3761da177e4SLinus Torvalds  * The whole writeout design is quite complex and fragile.  We want to avoid
3771da177e4SLinus Torvalds  * starvation of particular inodes when others are being redirtied, prevent
3781da177e4SLinus Torvalds  * livelocks, etc.
3791da177e4SLinus Torvalds  *
3801da177e4SLinus Torvalds  * Called under inode_lock.
3811da177e4SLinus Torvalds  */
3821da177e4SLinus Torvalds static int
38301c03194SChristoph Hellwig writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
3841da177e4SLinus Torvalds {
3851da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
3861da177e4SLinus Torvalds 	int wait = wbc->sync_mode == WB_SYNC_ALL;
38701c03194SChristoph Hellwig 	unsigned dirty;
3881da177e4SLinus Torvalds 	int ret;
3891da177e4SLinus Torvalds 
39001c03194SChristoph Hellwig 	if (!atomic_read(&inode->i_count))
39101c03194SChristoph Hellwig 		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
39201c03194SChristoph Hellwig 	else
39301c03194SChristoph Hellwig 		WARN_ON(inode->i_state & I_WILL_FREE);
39401c03194SChristoph Hellwig 
39501c03194SChristoph Hellwig 	if (inode->i_state & I_SYNC) {
39601c03194SChristoph Hellwig 		/*
39701c03194SChristoph Hellwig 		 * If this inode is locked for writeback and we are not doing
39866f3b8e2SJens Axboe 		 * writeback-for-data-integrity, move it to b_more_io so that
39901c03194SChristoph Hellwig 		 * writeback can proceed with the other inodes on s_io.
40001c03194SChristoph Hellwig 		 *
40101c03194SChristoph Hellwig 		 * We'll have another go at writing back this inode when we
40266f3b8e2SJens Axboe 		 * completed a full scan of b_io.
40301c03194SChristoph Hellwig 		 */
40401c03194SChristoph Hellwig 		if (!wait) {
40501c03194SChristoph Hellwig 			requeue_io(inode);
40601c03194SChristoph Hellwig 			return 0;
40701c03194SChristoph Hellwig 		}
40801c03194SChristoph Hellwig 
40901c03194SChristoph Hellwig 		/*
41001c03194SChristoph Hellwig 		 * It's a data-integrity sync.  We must wait.
41101c03194SChristoph Hellwig 		 */
41201c03194SChristoph Hellwig 		inode_wait_for_writeback(inode);
41301c03194SChristoph Hellwig 	}
41401c03194SChristoph Hellwig 
4151c0eeaf5SJoern Engel 	BUG_ON(inode->i_state & I_SYNC);
4161da177e4SLinus Torvalds 
4171c0eeaf5SJoern Engel 	/* Set I_SYNC, reset I_DIRTY */
4181da177e4SLinus Torvalds 	dirty = inode->i_state & I_DIRTY;
4191c0eeaf5SJoern Engel 	inode->i_state |= I_SYNC;
4201da177e4SLinus Torvalds 	inode->i_state &= ~I_DIRTY;
4211da177e4SLinus Torvalds 
4221da177e4SLinus Torvalds 	spin_unlock(&inode_lock);
4231da177e4SLinus Torvalds 
4241da177e4SLinus Torvalds 	ret = do_writepages(mapping, wbc);
4251da177e4SLinus Torvalds 
4261da177e4SLinus Torvalds 	/* Don't write the inode if only I_DIRTY_PAGES was set */
4271da177e4SLinus Torvalds 	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
4281da177e4SLinus Torvalds 		int err = write_inode(inode, wait);
4291da177e4SLinus Torvalds 		if (ret == 0)
4301da177e4SLinus Torvalds 			ret = err;
4311da177e4SLinus Torvalds 	}
4321da177e4SLinus Torvalds 
4331da177e4SLinus Torvalds 	if (wait) {
4341da177e4SLinus Torvalds 		int err = filemap_fdatawait(mapping);
4351da177e4SLinus Torvalds 		if (ret == 0)
4361da177e4SLinus Torvalds 			ret = err;
4371da177e4SLinus Torvalds 	}
4381da177e4SLinus Torvalds 
4391da177e4SLinus Torvalds 	spin_lock(&inode_lock);
4401c0eeaf5SJoern Engel 	inode->i_state &= ~I_SYNC;
44184a89245SWu Fengguang 	if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
4421da177e4SLinus Torvalds 		if (!(inode->i_state & I_DIRTY) &&
4431da177e4SLinus Torvalds 		    mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4441da177e4SLinus Torvalds 			/*
4451da177e4SLinus Torvalds 			 * We didn't write back all the pages.  nfs_writepages()
4461da177e4SLinus Torvalds 			 * sometimes bales out without doing anything. Redirty
44766f3b8e2SJens Axboe 			 * the inode; Move it from b_io onto b_more_io/b_dirty.
4481b43ef91SAndrew Morton 			 */
4491b43ef91SAndrew Morton 			/*
4501b43ef91SAndrew Morton 			 * akpm: if the caller was the kupdate function we put
45166f3b8e2SJens Axboe 			 * this inode at the head of b_dirty so it gets first
4521b43ef91SAndrew Morton 			 * consideration.  Otherwise, move it to the tail, for
4531b43ef91SAndrew Morton 			 * the reasons described there.  I'm not really sure
4541b43ef91SAndrew Morton 			 * how much sense this makes.  Presumably I had a good
4551b43ef91SAndrew Morton 			 * reasons for doing it this way, and I'd rather not
4561b43ef91SAndrew Morton 			 * muck with it at present.
4571da177e4SLinus Torvalds 			 */
4581da177e4SLinus Torvalds 			if (wbc->for_kupdate) {
4591da177e4SLinus Torvalds 				/*
4602c136579SFengguang Wu 				 * For the kupdate function we move the inode
46166f3b8e2SJens Axboe 				 * to b_more_io so it will get more writeout as
4622c136579SFengguang Wu 				 * soon as the queue becomes uncongested.
4631da177e4SLinus Torvalds 				 */
4641da177e4SLinus Torvalds 				inode->i_state |= I_DIRTY_PAGES;
4658bc3be27SFengguang Wu 				if (wbc->nr_to_write <= 0) {
4668bc3be27SFengguang Wu 					/*
4678bc3be27SFengguang Wu 					 * slice used up: queue for next turn
4688bc3be27SFengguang Wu 					 */
4690e0f4fc2SKen Chen 					requeue_io(inode);
4701da177e4SLinus Torvalds 				} else {
4711da177e4SLinus Torvalds 					/*
4728bc3be27SFengguang Wu 					 * somehow blocked: retry later
4738bc3be27SFengguang Wu 					 */
4748bc3be27SFengguang Wu 					redirty_tail(inode);
4758bc3be27SFengguang Wu 				}
4768bc3be27SFengguang Wu 			} else {
4778bc3be27SFengguang Wu 				/*
4781da177e4SLinus Torvalds 				 * Otherwise fully redirty the inode so that
4791da177e4SLinus Torvalds 				 * other inodes on this superblock will get some
4801da177e4SLinus Torvalds 				 * writeout.  Otherwise heavy writing to one
4811da177e4SLinus Torvalds 				 * file would indefinitely suspend writeout of
4821da177e4SLinus Torvalds 				 * all the other files.
4831da177e4SLinus Torvalds 				 */
4841da177e4SLinus Torvalds 				inode->i_state |= I_DIRTY_PAGES;
4851b43ef91SAndrew Morton 				redirty_tail(inode);
4861da177e4SLinus Torvalds 			}
4871da177e4SLinus Torvalds 		} else if (inode->i_state & I_DIRTY) {
4881da177e4SLinus Torvalds 			/*
4891da177e4SLinus Torvalds 			 * Someone redirtied the inode while were writing back
4901da177e4SLinus Torvalds 			 * the pages.
4911da177e4SLinus Torvalds 			 */
4926610a0bcSAndrew Morton 			redirty_tail(inode);
4931da177e4SLinus Torvalds 		} else if (atomic_read(&inode->i_count)) {
4941da177e4SLinus Torvalds 			/*
4951da177e4SLinus Torvalds 			 * The inode is clean, inuse
4961da177e4SLinus Torvalds 			 */
4971da177e4SLinus Torvalds 			list_move(&inode->i_list, &inode_in_use);
4981da177e4SLinus Torvalds 		} else {
4991da177e4SLinus Torvalds 			/*
5001da177e4SLinus Torvalds 			 * The inode is clean, unused
5011da177e4SLinus Torvalds 			 */
5021da177e4SLinus Torvalds 			list_move(&inode->i_list, &inode_unused);
5031da177e4SLinus Torvalds 		}
5041da177e4SLinus Torvalds 	}
5051c0eeaf5SJoern Engel 	inode_sync_complete(inode);
5061da177e4SLinus Torvalds 	return ret;
5071da177e4SLinus Torvalds }
5081da177e4SLinus Torvalds 
50903ba3782SJens Axboe /*
51003ba3782SJens Axboe  * For WB_SYNC_NONE writeback, the caller does not have the sb pinned
51103ba3782SJens Axboe  * before calling writeback. So make sure that we do pin it, so it doesn't
51203ba3782SJens Axboe  * go away while we are writing inodes from it.
51303ba3782SJens Axboe  *
51403ba3782SJens Axboe  * Returns 0 if the super was successfully pinned (or pinning wasn't needed),
51503ba3782SJens Axboe  * 1 if we failed.
51603ba3782SJens Axboe  */
51703ba3782SJens Axboe static int pin_sb_for_writeback(struct writeback_control *wbc,
51803ba3782SJens Axboe 				   struct inode *inode)
5191da177e4SLinus Torvalds {
52003ba3782SJens Axboe 	struct super_block *sb = inode->i_sb;
52103ba3782SJens Axboe 
52203ba3782SJens Axboe 	/*
52303ba3782SJens Axboe 	 * Caller must already hold the ref for this
52403ba3782SJens Axboe 	 */
52503ba3782SJens Axboe 	if (wbc->sync_mode == WB_SYNC_ALL) {
52603ba3782SJens Axboe 		WARN_ON(!rwsem_is_locked(&sb->s_umount));
52703ba3782SJens Axboe 		return 0;
52803ba3782SJens Axboe 	}
52903ba3782SJens Axboe 
53003ba3782SJens Axboe 	spin_lock(&sb_lock);
53103ba3782SJens Axboe 	sb->s_count++;
53203ba3782SJens Axboe 	if (down_read_trylock(&sb->s_umount)) {
53303ba3782SJens Axboe 		if (sb->s_root) {
53403ba3782SJens Axboe 			spin_unlock(&sb_lock);
53503ba3782SJens Axboe 			return 0;
53603ba3782SJens Axboe 		}
53703ba3782SJens Axboe 		/*
53803ba3782SJens Axboe 		 * umounted, drop rwsem again and fall through to failure
53903ba3782SJens Axboe 		 */
54003ba3782SJens Axboe 		up_read(&sb->s_umount);
54103ba3782SJens Axboe 	}
54203ba3782SJens Axboe 
54303ba3782SJens Axboe 	sb->s_count--;
54403ba3782SJens Axboe 	spin_unlock(&sb_lock);
54503ba3782SJens Axboe 	return 1;
54603ba3782SJens Axboe }
54703ba3782SJens Axboe 
54803ba3782SJens Axboe static void unpin_sb_for_writeback(struct writeback_control *wbc,
54903ba3782SJens Axboe 				   struct inode *inode)
55003ba3782SJens Axboe {
55103ba3782SJens Axboe 	struct super_block *sb = inode->i_sb;
55203ba3782SJens Axboe 
55303ba3782SJens Axboe 	if (wbc->sync_mode == WB_SYNC_ALL)
55403ba3782SJens Axboe 		return;
55503ba3782SJens Axboe 
55603ba3782SJens Axboe 	up_read(&sb->s_umount);
55703ba3782SJens Axboe 	put_super(sb);
55803ba3782SJens Axboe }
55903ba3782SJens Axboe 
56003ba3782SJens Axboe static void writeback_inodes_wb(struct bdi_writeback *wb,
56103ba3782SJens Axboe 				struct writeback_control *wbc)
56203ba3782SJens Axboe {
56303ba3782SJens Axboe 	struct super_block *sb = wbc->sb;
56466f3b8e2SJens Axboe 	const int is_blkdev_sb = sb_is_blkdev_sb(sb);
5651da177e4SLinus Torvalds 	const unsigned long start = jiffies;	/* livelock avoidance */
5661da177e4SLinus Torvalds 
567ae8547b0SHans Reiser 	spin_lock(&inode_lock);
5681da177e4SLinus Torvalds 
56903ba3782SJens Axboe 	if (!wbc->for_kupdate || list_empty(&wb->b_io))
57003ba3782SJens Axboe 		queue_io(wb, wbc->older_than_this);
57166f3b8e2SJens Axboe 
57203ba3782SJens Axboe 	while (!list_empty(&wb->b_io)) {
57303ba3782SJens Axboe 		struct inode *inode = list_entry(wb->b_io.prev,
5741da177e4SLinus Torvalds 						struct inode, i_list);
5751da177e4SLinus Torvalds 		long pages_skipped;
5761da177e4SLinus Torvalds 
57766f3b8e2SJens Axboe 		/*
57866f3b8e2SJens Axboe 		 * super block given and doesn't match, skip this inode
57966f3b8e2SJens Axboe 		 */
58066f3b8e2SJens Axboe 		if (sb && sb != inode->i_sb) {
58166f3b8e2SJens Axboe 			redirty_tail(inode);
58266f3b8e2SJens Axboe 			continue;
58366f3b8e2SJens Axboe 		}
58466f3b8e2SJens Axboe 
58503ba3782SJens Axboe 		if (!bdi_cap_writeback_dirty(wb->bdi)) {
5869852a0e7SAndrew Morton 			redirty_tail(inode);
58766f3b8e2SJens Axboe 			if (is_blkdev_sb) {
5881da177e4SLinus Torvalds 				/*
5891da177e4SLinus Torvalds 				 * Dirty memory-backed blockdev: the ramdisk
5901da177e4SLinus Torvalds 				 * driver does this.  Skip just this inode
5911da177e4SLinus Torvalds 				 */
5921da177e4SLinus Torvalds 				continue;
5931da177e4SLinus Torvalds 			}
5941da177e4SLinus Torvalds 			/*
5951da177e4SLinus Torvalds 			 * Dirty memory-backed inode against a filesystem other
5961da177e4SLinus Torvalds 			 * than the kernel-internal bdev filesystem.  Skip the
5971da177e4SLinus Torvalds 			 * entire superblock.
5981da177e4SLinus Torvalds 			 */
5991da177e4SLinus Torvalds 			break;
6001da177e4SLinus Torvalds 		}
6011da177e4SLinus Torvalds 
60284a89245SWu Fengguang 		if (inode->i_state & (I_NEW | I_WILL_FREE)) {
6037ef0d737SNick Piggin 			requeue_io(inode);
6047ef0d737SNick Piggin 			continue;
6057ef0d737SNick Piggin 		}
6067ef0d737SNick Piggin 
60703ba3782SJens Axboe 		if (wbc->nonblocking && bdi_write_congested(wb->bdi)) {
6081da177e4SLinus Torvalds 			wbc->encountered_congestion = 1;
60966f3b8e2SJens Axboe 			if (!is_blkdev_sb)
6101da177e4SLinus Torvalds 				break;		/* Skip a congested fs */
6110e0f4fc2SKen Chen 			requeue_io(inode);
6121da177e4SLinus Torvalds 			continue;		/* Skip a congested blockdev */
6131da177e4SLinus Torvalds 		}
6141da177e4SLinus Torvalds 
615d2caa3c5SJeff Layton 		/*
616d2caa3c5SJeff Layton 		 * Was this inode dirtied after sync_sb_inodes was called?
617d2caa3c5SJeff Layton 		 * This keeps sync from extra jobs and livelock.
618d2caa3c5SJeff Layton 		 */
619d2caa3c5SJeff Layton 		if (inode_dirtied_after(inode, start))
6201da177e4SLinus Torvalds 			break;
6211da177e4SLinus Torvalds 
62203ba3782SJens Axboe 		if (pin_sb_for_writeback(wbc, inode)) {
62303ba3782SJens Axboe 			requeue_io(inode);
62403ba3782SJens Axboe 			continue;
62503ba3782SJens Axboe 		}
6261da177e4SLinus Torvalds 
62784a89245SWu Fengguang 		BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
6281da177e4SLinus Torvalds 		__iget(inode);
6291da177e4SLinus Torvalds 		pages_skipped = wbc->pages_skipped;
63001c03194SChristoph Hellwig 		writeback_single_inode(inode, wbc);
63103ba3782SJens Axboe 		unpin_sb_for_writeback(wbc, inode);
6321da177e4SLinus Torvalds 		if (wbc->pages_skipped != pages_skipped) {
6331da177e4SLinus Torvalds 			/*
6341da177e4SLinus Torvalds 			 * writeback is not making progress due to locked
6351da177e4SLinus Torvalds 			 * buffers.  Skip this inode for now.
6361da177e4SLinus Torvalds 			 */
637f57b9b7bSAndrew Morton 			redirty_tail(inode);
6381da177e4SLinus Torvalds 		}
6391da177e4SLinus Torvalds 		spin_unlock(&inode_lock);
6401da177e4SLinus Torvalds 		iput(inode);
6414ffc8444SOGAWA Hirofumi 		cond_resched();
6421da177e4SLinus Torvalds 		spin_lock(&inode_lock);
6438bc3be27SFengguang Wu 		if (wbc->nr_to_write <= 0) {
6448bc3be27SFengguang Wu 			wbc->more_io = 1;
6451da177e4SLinus Torvalds 			break;
6461da177e4SLinus Torvalds 		}
64703ba3782SJens Axboe 		if (!list_empty(&wb->b_more_io))
6488bc3be27SFengguang Wu 			wbc->more_io = 1;
6498bc3be27SFengguang Wu 	}
65038f21977SNick Piggin 
65166f3b8e2SJens Axboe 	spin_unlock(&inode_lock);
65266f3b8e2SJens Axboe 	/* Leave any unwritten inodes on b_io */
65366f3b8e2SJens Axboe }
65466f3b8e2SJens Axboe 
65503ba3782SJens Axboe void writeback_inodes_wbc(struct writeback_control *wbc)
65603ba3782SJens Axboe {
65703ba3782SJens Axboe 	struct backing_dev_info *bdi = wbc->bdi;
65803ba3782SJens Axboe 
65903ba3782SJens Axboe 	writeback_inodes_wb(&bdi->wb, wbc);
66003ba3782SJens Axboe }
66103ba3782SJens Axboe 
66203ba3782SJens Axboe /*
66303ba3782SJens Axboe  * The maximum number of pages to writeout in a single bdi flush/kupdate
66403ba3782SJens Axboe  * operation.  We do this so we don't hold I_SYNC against an inode for
66503ba3782SJens Axboe  * enormous amounts of time, which would block a userspace task which has
66603ba3782SJens Axboe  * been forced to throttle against that inode.  Also, the code reevaluates
66703ba3782SJens Axboe  * the dirty each time it has written this many pages.
66803ba3782SJens Axboe  */
66903ba3782SJens Axboe #define MAX_WRITEBACK_PAGES     1024
67003ba3782SJens Axboe 
67103ba3782SJens Axboe static inline bool over_bground_thresh(void)
67203ba3782SJens Axboe {
67303ba3782SJens Axboe 	unsigned long background_thresh, dirty_thresh;
67403ba3782SJens Axboe 
67503ba3782SJens Axboe 	get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
67603ba3782SJens Axboe 
67703ba3782SJens Axboe 	return (global_page_state(NR_FILE_DIRTY) +
67803ba3782SJens Axboe 		global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
67903ba3782SJens Axboe }
68003ba3782SJens Axboe 
68103ba3782SJens Axboe /*
68203ba3782SJens Axboe  * Explicit flushing or periodic writeback of "old" data.
68303ba3782SJens Axboe  *
68403ba3782SJens Axboe  * Define "old": the first time one of an inode's pages is dirtied, we mark the
68503ba3782SJens Axboe  * dirtying-time in the inode's address_space.  So this periodic writeback code
68603ba3782SJens Axboe  * just walks the superblock inode list, writing back any inodes which are
68703ba3782SJens Axboe  * older than a specific point in time.
68803ba3782SJens Axboe  *
68903ba3782SJens Axboe  * Try to run once per dirty_writeback_interval.  But if a writeback event
69003ba3782SJens Axboe  * takes longer than a dirty_writeback_interval interval, then leave a
69103ba3782SJens Axboe  * one-second gap.
69203ba3782SJens Axboe  *
69303ba3782SJens Axboe  * older_than_this takes precedence over nr_to_write.  So we'll only write back
69403ba3782SJens Axboe  * all dirty pages if they are all attached to "old" mappings.
69503ba3782SJens Axboe  */
696c4a77a6cSJens Axboe static long wb_writeback(struct bdi_writeback *wb,
697c4a77a6cSJens Axboe 			 struct wb_writeback_args *args)
69803ba3782SJens Axboe {
69903ba3782SJens Axboe 	struct writeback_control wbc = {
70003ba3782SJens Axboe 		.bdi			= wb->bdi,
701c4a77a6cSJens Axboe 		.sb			= args->sb,
702c4a77a6cSJens Axboe 		.sync_mode		= args->sync_mode,
70303ba3782SJens Axboe 		.older_than_this	= NULL,
704c4a77a6cSJens Axboe 		.for_kupdate		= args->for_kupdate,
705c4a77a6cSJens Axboe 		.range_cyclic		= args->range_cyclic,
70603ba3782SJens Axboe 	};
70703ba3782SJens Axboe 	unsigned long oldest_jif;
70803ba3782SJens Axboe 	long wrote = 0;
70903ba3782SJens Axboe 
71003ba3782SJens Axboe 	if (wbc.for_kupdate) {
71103ba3782SJens Axboe 		wbc.older_than_this = &oldest_jif;
71203ba3782SJens Axboe 		oldest_jif = jiffies -
71303ba3782SJens Axboe 				msecs_to_jiffies(dirty_expire_interval * 10);
71403ba3782SJens Axboe 	}
715c4a77a6cSJens Axboe 	if (!wbc.range_cyclic) {
716c4a77a6cSJens Axboe 		wbc.range_start = 0;
717c4a77a6cSJens Axboe 		wbc.range_end = LLONG_MAX;
718c4a77a6cSJens Axboe 	}
71903ba3782SJens Axboe 
72003ba3782SJens Axboe 	for (;;) {
72103ba3782SJens Axboe 		/*
72203ba3782SJens Axboe 		 * Don't flush anything for non-integrity writeback where
72303ba3782SJens Axboe 		 * no nr_pages was given
72403ba3782SJens Axboe 		 */
725c4a77a6cSJens Axboe 		if (!args->for_kupdate && args->nr_pages <= 0 &&
726c4a77a6cSJens Axboe 		     args->sync_mode == WB_SYNC_NONE)
72703ba3782SJens Axboe 			break;
72803ba3782SJens Axboe 
72903ba3782SJens Axboe 		/*
73003ba3782SJens Axboe 		 * If no specific pages were given and this is just a
73103ba3782SJens Axboe 		 * periodic background writeout and we are below the
73203ba3782SJens Axboe 		 * background dirty threshold, don't do anything
73303ba3782SJens Axboe 		 */
734c4a77a6cSJens Axboe 		if (args->for_kupdate && args->nr_pages <= 0 &&
735c4a77a6cSJens Axboe 		    !over_bground_thresh())
73603ba3782SJens Axboe 			break;
73703ba3782SJens Axboe 
73803ba3782SJens Axboe 		wbc.more_io = 0;
73903ba3782SJens Axboe 		wbc.encountered_congestion = 0;
74003ba3782SJens Axboe 		wbc.nr_to_write = MAX_WRITEBACK_PAGES;
74103ba3782SJens Axboe 		wbc.pages_skipped = 0;
74203ba3782SJens Axboe 		writeback_inodes_wb(wb, &wbc);
743c4a77a6cSJens Axboe 		args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
74403ba3782SJens Axboe 		wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
74503ba3782SJens Axboe 
74603ba3782SJens Axboe 		/*
74703ba3782SJens Axboe 		 * If we ran out of stuff to write, bail unless more_io got set
74803ba3782SJens Axboe 		 */
74903ba3782SJens Axboe 		if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
75003ba3782SJens Axboe 			if (wbc.more_io && !wbc.for_kupdate)
75103ba3782SJens Axboe 				continue;
75203ba3782SJens Axboe 			break;
75303ba3782SJens Axboe 		}
75403ba3782SJens Axboe 	}
75503ba3782SJens Axboe 
75603ba3782SJens Axboe 	return wrote;
75703ba3782SJens Axboe }
75803ba3782SJens Axboe 
75903ba3782SJens Axboe /*
76003ba3782SJens Axboe  * Return the next bdi_work struct that hasn't been processed by this
7618010c3b6SJens Axboe  * wb thread yet. ->seen is initially set for each thread that exists
7628010c3b6SJens Axboe  * for this device, when a thread first notices a piece of work it
7638010c3b6SJens Axboe  * clears its bit. Depending on writeback type, the thread will notify
7648010c3b6SJens Axboe  * completion on either receiving the work (WB_SYNC_NONE) or after
7658010c3b6SJens Axboe  * it is done (WB_SYNC_ALL).
76603ba3782SJens Axboe  */
76703ba3782SJens Axboe static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
76803ba3782SJens Axboe 					   struct bdi_writeback *wb)
76903ba3782SJens Axboe {
77003ba3782SJens Axboe 	struct bdi_work *work, *ret = NULL;
77103ba3782SJens Axboe 
77203ba3782SJens Axboe 	rcu_read_lock();
77303ba3782SJens Axboe 
77403ba3782SJens Axboe 	list_for_each_entry_rcu(work, &bdi->work_list, list) {
77503ba3782SJens Axboe 		if (!test_and_clear_bit(wb->nr, &work->seen))
77603ba3782SJens Axboe 			continue;
77703ba3782SJens Axboe 
77803ba3782SJens Axboe 		ret = work;
77903ba3782SJens Axboe 		break;
78003ba3782SJens Axboe 	}
78103ba3782SJens Axboe 
78203ba3782SJens Axboe 	rcu_read_unlock();
78303ba3782SJens Axboe 	return ret;
78403ba3782SJens Axboe }
78503ba3782SJens Axboe 
78603ba3782SJens Axboe static long wb_check_old_data_flush(struct bdi_writeback *wb)
78703ba3782SJens Axboe {
78803ba3782SJens Axboe 	unsigned long expired;
78903ba3782SJens Axboe 	long nr_pages;
79003ba3782SJens Axboe 
79103ba3782SJens Axboe 	expired = wb->last_old_flush +
79203ba3782SJens Axboe 			msecs_to_jiffies(dirty_writeback_interval * 10);
79303ba3782SJens Axboe 	if (time_before(jiffies, expired))
79403ba3782SJens Axboe 		return 0;
79503ba3782SJens Axboe 
79603ba3782SJens Axboe 	wb->last_old_flush = jiffies;
79703ba3782SJens Axboe 	nr_pages = global_page_state(NR_FILE_DIRTY) +
79803ba3782SJens Axboe 			global_page_state(NR_UNSTABLE_NFS) +
79903ba3782SJens Axboe 			(inodes_stat.nr_inodes - inodes_stat.nr_unused);
80003ba3782SJens Axboe 
801c4a77a6cSJens Axboe 	if (nr_pages) {
802c4a77a6cSJens Axboe 		struct wb_writeback_args args = {
803c4a77a6cSJens Axboe 			.nr_pages	= nr_pages,
804c4a77a6cSJens Axboe 			.sync_mode	= WB_SYNC_NONE,
805c4a77a6cSJens Axboe 			.for_kupdate	= 1,
806c4a77a6cSJens Axboe 			.range_cyclic	= 1,
807c4a77a6cSJens Axboe 		};
808c4a77a6cSJens Axboe 
809c4a77a6cSJens Axboe 		return wb_writeback(wb, &args);
810c4a77a6cSJens Axboe 	}
81103ba3782SJens Axboe 
81203ba3782SJens Axboe 	return 0;
81303ba3782SJens Axboe }
81403ba3782SJens Axboe 
81503ba3782SJens Axboe /*
81603ba3782SJens Axboe  * Retrieve work items and do the writeback they describe
81703ba3782SJens Axboe  */
81803ba3782SJens Axboe long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
81903ba3782SJens Axboe {
82003ba3782SJens Axboe 	struct backing_dev_info *bdi = wb->bdi;
82103ba3782SJens Axboe 	struct bdi_work *work;
822c4a77a6cSJens Axboe 	long wrote = 0;
82303ba3782SJens Axboe 
82403ba3782SJens Axboe 	while ((work = get_next_work_item(bdi, wb)) != NULL) {
825c4a77a6cSJens Axboe 		struct wb_writeback_args args = work->args;
82603ba3782SJens Axboe 
82703ba3782SJens Axboe 		/*
82803ba3782SJens Axboe 		 * Override sync mode, in case we must wait for completion
82903ba3782SJens Axboe 		 */
83003ba3782SJens Axboe 		if (force_wait)
831c4a77a6cSJens Axboe 			work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
83203ba3782SJens Axboe 
83303ba3782SJens Axboe 		/*
83403ba3782SJens Axboe 		 * If this isn't a data integrity operation, just notify
83503ba3782SJens Axboe 		 * that we have seen this work and we are now starting it.
83603ba3782SJens Axboe 		 */
837c4a77a6cSJens Axboe 		if (args.sync_mode == WB_SYNC_NONE)
83803ba3782SJens Axboe 			wb_clear_pending(wb, work);
83903ba3782SJens Axboe 
840c4a77a6cSJens Axboe 		wrote += wb_writeback(wb, &args);
84103ba3782SJens Axboe 
84203ba3782SJens Axboe 		/*
84303ba3782SJens Axboe 		 * This is a data integrity writeback, so only do the
84403ba3782SJens Axboe 		 * notification when we have completed the work.
84503ba3782SJens Axboe 		 */
846c4a77a6cSJens Axboe 		if (args.sync_mode == WB_SYNC_ALL)
84703ba3782SJens Axboe 			wb_clear_pending(wb, work);
84803ba3782SJens Axboe 	}
84903ba3782SJens Axboe 
85003ba3782SJens Axboe 	/*
85103ba3782SJens Axboe 	 * Check for periodic writeback, kupdated() style
85203ba3782SJens Axboe 	 */
85303ba3782SJens Axboe 	wrote += wb_check_old_data_flush(wb);
85403ba3782SJens Axboe 
85503ba3782SJens Axboe 	return wrote;
85603ba3782SJens Axboe }
85703ba3782SJens Axboe 
85803ba3782SJens Axboe /*
85903ba3782SJens Axboe  * Handle writeback of dirty data for the device backed by this bdi. Also
86003ba3782SJens Axboe  * wakes up periodically and does kupdated style flushing.
86103ba3782SJens Axboe  */
86203ba3782SJens Axboe int bdi_writeback_task(struct bdi_writeback *wb)
86303ba3782SJens Axboe {
86403ba3782SJens Axboe 	unsigned long last_active = jiffies;
86503ba3782SJens Axboe 	unsigned long wait_jiffies = -1UL;
86603ba3782SJens Axboe 	long pages_written;
86703ba3782SJens Axboe 
86803ba3782SJens Axboe 	while (!kthread_should_stop()) {
86903ba3782SJens Axboe 		pages_written = wb_do_writeback(wb, 0);
87003ba3782SJens Axboe 
87103ba3782SJens Axboe 		if (pages_written)
87203ba3782SJens Axboe 			last_active = jiffies;
87303ba3782SJens Axboe 		else if (wait_jiffies != -1UL) {
87403ba3782SJens Axboe 			unsigned long max_idle;
87503ba3782SJens Axboe 
87603ba3782SJens Axboe 			/*
87703ba3782SJens Axboe 			 * Longest period of inactivity that we tolerate. If we
87803ba3782SJens Axboe 			 * see dirty data again later, the task will get
87903ba3782SJens Axboe 			 * recreated automatically.
88003ba3782SJens Axboe 			 */
88103ba3782SJens Axboe 			max_idle = max(5UL * 60 * HZ, wait_jiffies);
88203ba3782SJens Axboe 			if (time_after(jiffies, max_idle + last_active))
88303ba3782SJens Axboe 				break;
88403ba3782SJens Axboe 		}
88503ba3782SJens Axboe 
88603ba3782SJens Axboe 		wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
88703ba3782SJens Axboe 		set_current_state(TASK_INTERRUPTIBLE);
88803ba3782SJens Axboe 		schedule_timeout(wait_jiffies);
88903ba3782SJens Axboe 		try_to_freeze();
89003ba3782SJens Axboe 	}
89103ba3782SJens Axboe 
89203ba3782SJens Axboe 	return 0;
89303ba3782SJens Axboe }
89403ba3782SJens Axboe 
89503ba3782SJens Axboe /*
896b6e51316SJens Axboe  * Schedule writeback for all backing devices. This does WB_SYNC_NONE
897b6e51316SJens Axboe  * writeback, for integrity writeback see bdi_sync_writeback().
89803ba3782SJens Axboe  */
899b6e51316SJens Axboe static void bdi_writeback_all(struct super_block *sb, long nr_pages)
90003ba3782SJens Axboe {
901b6e51316SJens Axboe 	struct wb_writeback_args args = {
902b6e51316SJens Axboe 		.sb		= sb,
903b6e51316SJens Axboe 		.nr_pages	= nr_pages,
904b6e51316SJens Axboe 		.sync_mode	= WB_SYNC_NONE,
905b6e51316SJens Axboe 	};
90603ba3782SJens Axboe 	struct backing_dev_info *bdi;
90703ba3782SJens Axboe 
908cfc4ba53SJens Axboe 	rcu_read_lock();
90903ba3782SJens Axboe 
910cfc4ba53SJens Axboe 	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
91103ba3782SJens Axboe 		if (!bdi_has_dirty_io(bdi))
91203ba3782SJens Axboe 			continue;
91303ba3782SJens Axboe 
914b6e51316SJens Axboe 		bdi_alloc_queue_work(bdi, &args);
91503ba3782SJens Axboe 	}
91603ba3782SJens Axboe 
917cfc4ba53SJens Axboe 	rcu_read_unlock();
91803ba3782SJens Axboe }
91903ba3782SJens Axboe 
92003ba3782SJens Axboe /*
92103ba3782SJens Axboe  * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
92203ba3782SJens Axboe  * the whole world.
92303ba3782SJens Axboe  */
92403ba3782SJens Axboe void wakeup_flusher_threads(long nr_pages)
92503ba3782SJens Axboe {
92603ba3782SJens Axboe 	if (nr_pages == 0)
92703ba3782SJens Axboe 		nr_pages = global_page_state(NR_FILE_DIRTY) +
92803ba3782SJens Axboe 				global_page_state(NR_UNSTABLE_NFS);
929b6e51316SJens Axboe 	bdi_writeback_all(NULL, nr_pages);
93003ba3782SJens Axboe }
93103ba3782SJens Axboe 
93203ba3782SJens Axboe static noinline void block_dump___mark_inode_dirty(struct inode *inode)
93303ba3782SJens Axboe {
93403ba3782SJens Axboe 	if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
93503ba3782SJens Axboe 		struct dentry *dentry;
93603ba3782SJens Axboe 		const char *name = "?";
93703ba3782SJens Axboe 
93803ba3782SJens Axboe 		dentry = d_find_alias(inode);
93903ba3782SJens Axboe 		if (dentry) {
94003ba3782SJens Axboe 			spin_lock(&dentry->d_lock);
94103ba3782SJens Axboe 			name = (const char *) dentry->d_name.name;
94203ba3782SJens Axboe 		}
94303ba3782SJens Axboe 		printk(KERN_DEBUG
94403ba3782SJens Axboe 		       "%s(%d): dirtied inode %lu (%s) on %s\n",
94503ba3782SJens Axboe 		       current->comm, task_pid_nr(current), inode->i_ino,
94603ba3782SJens Axboe 		       name, inode->i_sb->s_id);
94703ba3782SJens Axboe 		if (dentry) {
94803ba3782SJens Axboe 			spin_unlock(&dentry->d_lock);
94903ba3782SJens Axboe 			dput(dentry);
95003ba3782SJens Axboe 		}
95103ba3782SJens Axboe 	}
95203ba3782SJens Axboe }
95303ba3782SJens Axboe 
95403ba3782SJens Axboe /**
95503ba3782SJens Axboe  *	__mark_inode_dirty -	internal function
95603ba3782SJens Axboe  *	@inode: inode to mark
95703ba3782SJens Axboe  *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
95803ba3782SJens Axboe  *	Mark an inode as dirty. Callers should use mark_inode_dirty or
95903ba3782SJens Axboe  *  	mark_inode_dirty_sync.
96003ba3782SJens Axboe  *
96103ba3782SJens Axboe  * Put the inode on the super block's dirty list.
96203ba3782SJens Axboe  *
96303ba3782SJens Axboe  * CAREFUL! We mark it dirty unconditionally, but move it onto the
96403ba3782SJens Axboe  * dirty list only if it is hashed or if it refers to a blockdev.
96503ba3782SJens Axboe  * If it was not hashed, it will never be added to the dirty list
96603ba3782SJens Axboe  * even if it is later hashed, as it will have been marked dirty already.
96703ba3782SJens Axboe  *
96803ba3782SJens Axboe  * In short, make sure you hash any inodes _before_ you start marking
96903ba3782SJens Axboe  * them dirty.
97003ba3782SJens Axboe  *
97103ba3782SJens Axboe  * This function *must* be atomic for the I_DIRTY_PAGES case -
97203ba3782SJens Axboe  * set_page_dirty() is called under spinlock in several places.
97303ba3782SJens Axboe  *
97403ba3782SJens Axboe  * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
97503ba3782SJens Axboe  * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
97603ba3782SJens Axboe  * the kernel-internal blockdev inode represents the dirtying time of the
97703ba3782SJens Axboe  * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
97803ba3782SJens Axboe  * page->mapping->host, so the page-dirtying time is recorded in the internal
97903ba3782SJens Axboe  * blockdev inode.
98003ba3782SJens Axboe  */
98103ba3782SJens Axboe void __mark_inode_dirty(struct inode *inode, int flags)
98203ba3782SJens Axboe {
98303ba3782SJens Axboe 	struct super_block *sb = inode->i_sb;
98403ba3782SJens Axboe 
98503ba3782SJens Axboe 	/*
98603ba3782SJens Axboe 	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
98703ba3782SJens Axboe 	 * dirty the inode itself
98803ba3782SJens Axboe 	 */
98903ba3782SJens Axboe 	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
99003ba3782SJens Axboe 		if (sb->s_op->dirty_inode)
99103ba3782SJens Axboe 			sb->s_op->dirty_inode(inode);
99203ba3782SJens Axboe 	}
99303ba3782SJens Axboe 
99403ba3782SJens Axboe 	/*
99503ba3782SJens Axboe 	 * make sure that changes are seen by all cpus before we test i_state
99603ba3782SJens Axboe 	 * -- mikulas
99703ba3782SJens Axboe 	 */
99803ba3782SJens Axboe 	smp_mb();
99903ba3782SJens Axboe 
100003ba3782SJens Axboe 	/* avoid the locking if we can */
100103ba3782SJens Axboe 	if ((inode->i_state & flags) == flags)
100203ba3782SJens Axboe 		return;
100303ba3782SJens Axboe 
100403ba3782SJens Axboe 	if (unlikely(block_dump))
100503ba3782SJens Axboe 		block_dump___mark_inode_dirty(inode);
100603ba3782SJens Axboe 
100703ba3782SJens Axboe 	spin_lock(&inode_lock);
100803ba3782SJens Axboe 	if ((inode->i_state & flags) != flags) {
100903ba3782SJens Axboe 		const int was_dirty = inode->i_state & I_DIRTY;
101003ba3782SJens Axboe 
101103ba3782SJens Axboe 		inode->i_state |= flags;
101203ba3782SJens Axboe 
101303ba3782SJens Axboe 		/*
101403ba3782SJens Axboe 		 * If the inode is being synced, just update its dirty state.
101503ba3782SJens Axboe 		 * The unlocker will place the inode on the appropriate
101603ba3782SJens Axboe 		 * superblock list, based upon its state.
101703ba3782SJens Axboe 		 */
101803ba3782SJens Axboe 		if (inode->i_state & I_SYNC)
101903ba3782SJens Axboe 			goto out;
102003ba3782SJens Axboe 
102103ba3782SJens Axboe 		/*
102203ba3782SJens Axboe 		 * Only add valid (hashed) inodes to the superblock's
102303ba3782SJens Axboe 		 * dirty list.  Add blockdev inodes as well.
102403ba3782SJens Axboe 		 */
102503ba3782SJens Axboe 		if (!S_ISBLK(inode->i_mode)) {
102603ba3782SJens Axboe 			if (hlist_unhashed(&inode->i_hash))
102703ba3782SJens Axboe 				goto out;
102803ba3782SJens Axboe 		}
102903ba3782SJens Axboe 		if (inode->i_state & (I_FREEING|I_CLEAR))
103003ba3782SJens Axboe 			goto out;
103103ba3782SJens Axboe 
103203ba3782SJens Axboe 		/*
103303ba3782SJens Axboe 		 * If the inode was already on b_dirty/b_io/b_more_io, don't
103403ba3782SJens Axboe 		 * reposition it (that would break b_dirty time-ordering).
103503ba3782SJens Axboe 		 */
103603ba3782SJens Axboe 		if (!was_dirty) {
103703ba3782SJens Axboe 			struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1038500b067cSJens Axboe 			struct backing_dev_info *bdi = wb->bdi;
1039500b067cSJens Axboe 
1040500b067cSJens Axboe 			if (bdi_cap_writeback_dirty(bdi) &&
1041500b067cSJens Axboe 			    !test_bit(BDI_registered, &bdi->state)) {
1042500b067cSJens Axboe 				WARN_ON(1);
1043500b067cSJens Axboe 				printk(KERN_ERR "bdi-%s not registered\n",
1044500b067cSJens Axboe 								bdi->name);
1045500b067cSJens Axboe 			}
104603ba3782SJens Axboe 
104703ba3782SJens Axboe 			inode->dirtied_when = jiffies;
104803ba3782SJens Axboe 			list_move(&inode->i_list, &wb->b_dirty);
104903ba3782SJens Axboe 		}
105003ba3782SJens Axboe 	}
105103ba3782SJens Axboe out:
105203ba3782SJens Axboe 	spin_unlock(&inode_lock);
105303ba3782SJens Axboe }
105403ba3782SJens Axboe EXPORT_SYMBOL(__mark_inode_dirty);
105503ba3782SJens Axboe 
105666f3b8e2SJens Axboe /*
105766f3b8e2SJens Axboe  * Write out a superblock's list of dirty inodes.  A wait will be performed
105866f3b8e2SJens Axboe  * upon no inodes, all inodes or the final one, depending upon sync_mode.
105966f3b8e2SJens Axboe  *
106066f3b8e2SJens Axboe  * If older_than_this is non-NULL, then only write out inodes which
106166f3b8e2SJens Axboe  * had their first dirtying at a time earlier than *older_than_this.
106266f3b8e2SJens Axboe  *
106366f3b8e2SJens Axboe  * If we're a pdlfush thread, then implement pdflush collision avoidance
106466f3b8e2SJens Axboe  * against the entire list.
106566f3b8e2SJens Axboe  *
106666f3b8e2SJens Axboe  * If `bdi' is non-zero then we're being asked to writeback a specific queue.
106766f3b8e2SJens Axboe  * This function assumes that the blockdev superblock's inodes are backed by
106866f3b8e2SJens Axboe  * a variety of queues, so all inodes are searched.  For other superblocks,
106966f3b8e2SJens Axboe  * assume that all inodes are backed by the same queue.
107066f3b8e2SJens Axboe  *
107166f3b8e2SJens Axboe  * The inodes to be written are parked on bdi->b_io.  They are moved back onto
107266f3b8e2SJens Axboe  * bdi->b_dirty as they are selected for writing.  This way, none can be missed
107366f3b8e2SJens Axboe  * on the writer throttling path, and we get decent balancing between many
107466f3b8e2SJens Axboe  * throttled threads: we don't want them all piling up on inode_sync_wait.
107566f3b8e2SJens Axboe  */
1076b6e51316SJens Axboe static void wait_sb_inodes(struct super_block *sb)
107766f3b8e2SJens Axboe {
107838f21977SNick Piggin 	struct inode *inode, *old_inode = NULL;
107938f21977SNick Piggin 
108003ba3782SJens Axboe 	/*
108103ba3782SJens Axboe 	 * We need to be protected against the filesystem going from
108203ba3782SJens Axboe 	 * r/o to r/w or vice versa.
108303ba3782SJens Axboe 	 */
1084b6e51316SJens Axboe 	WARN_ON(!rwsem_is_locked(&sb->s_umount));
108503ba3782SJens Axboe 
108666f3b8e2SJens Axboe 	spin_lock(&inode_lock);
108766f3b8e2SJens Axboe 
108838f21977SNick Piggin 	/*
108938f21977SNick Piggin 	 * Data integrity sync. Must wait for all pages under writeback,
109038f21977SNick Piggin 	 * because there may have been pages dirtied before our sync
109138f21977SNick Piggin 	 * call, but which had writeout started before we write it out.
109238f21977SNick Piggin 	 * In which case, the inode may not be on the dirty list, but
109338f21977SNick Piggin 	 * we still have to wait for that writeout.
109438f21977SNick Piggin 	 */
1095b6e51316SJens Axboe 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
109638f21977SNick Piggin 		struct address_space *mapping;
109738f21977SNick Piggin 
109803ba3782SJens Axboe 		if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
109938f21977SNick Piggin 			continue;
110038f21977SNick Piggin 		mapping = inode->i_mapping;
110138f21977SNick Piggin 		if (mapping->nrpages == 0)
110238f21977SNick Piggin 			continue;
110338f21977SNick Piggin 		__iget(inode);
1104ae8547b0SHans Reiser 		spin_unlock(&inode_lock);
110538f21977SNick Piggin 		/*
110638f21977SNick Piggin 		 * We hold a reference to 'inode' so it couldn't have
110738f21977SNick Piggin 		 * been removed from s_inodes list while we dropped the
110838f21977SNick Piggin 		 * inode_lock.  We cannot iput the inode now as we can
110938f21977SNick Piggin 		 * be holding the last reference and we cannot iput it
111038f21977SNick Piggin 		 * under inode_lock. So we keep the reference and iput
111138f21977SNick Piggin 		 * it later.
111238f21977SNick Piggin 		 */
111338f21977SNick Piggin 		iput(old_inode);
111438f21977SNick Piggin 		old_inode = inode;
111538f21977SNick Piggin 
111638f21977SNick Piggin 		filemap_fdatawait(mapping);
111738f21977SNick Piggin 
111838f21977SNick Piggin 		cond_resched();
111938f21977SNick Piggin 
112038f21977SNick Piggin 		spin_lock(&inode_lock);
112138f21977SNick Piggin 	}
112238f21977SNick Piggin 	spin_unlock(&inode_lock);
112338f21977SNick Piggin 	iput(old_inode);
112466f3b8e2SJens Axboe }
11251da177e4SLinus Torvalds 
1126d8a8559cSJens Axboe /**
1127d8a8559cSJens Axboe  * writeback_inodes_sb	-	writeback dirty inodes from given super_block
1128d8a8559cSJens Axboe  * @sb: the superblock
11291da177e4SLinus Torvalds  *
1130d8a8559cSJens Axboe  * Start writeback on some inodes on this super_block. No guarantees are made
1131d8a8559cSJens Axboe  * on how many (if any) will be written, and this function does not wait
1132d8a8559cSJens Axboe  * for IO completion of submitted IO. The number of pages submitted is
1133d8a8559cSJens Axboe  * returned.
11341da177e4SLinus Torvalds  */
1135b6e51316SJens Axboe void writeback_inodes_sb(struct super_block *sb)
11361da177e4SLinus Torvalds {
1137b1e7a8fdSChristoph Lameter 	unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
1138fd39fc85SChristoph Lameter 	unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
1139d8a8559cSJens Axboe 	long nr_to_write;
11401da177e4SLinus Torvalds 
1141d8a8559cSJens Axboe 	nr_to_write = nr_dirty + nr_unstable +
114238f21977SNick Piggin 			(inodes_stat.nr_inodes - inodes_stat.nr_unused);
114338f21977SNick Piggin 
1144b6e51316SJens Axboe 	bdi_writeback_all(sb, nr_to_write);
11451da177e4SLinus Torvalds }
1146d8a8559cSJens Axboe EXPORT_SYMBOL(writeback_inodes_sb);
1147d8a8559cSJens Axboe 
1148d8a8559cSJens Axboe /**
1149d8a8559cSJens Axboe  * sync_inodes_sb	-	sync sb inode pages
1150d8a8559cSJens Axboe  * @sb: the superblock
1151d8a8559cSJens Axboe  *
1152d8a8559cSJens Axboe  * This function writes and waits on any dirty inode belonging to this
1153d8a8559cSJens Axboe  * super_block. The number of pages synced is returned.
1154d8a8559cSJens Axboe  */
1155b6e51316SJens Axboe void sync_inodes_sb(struct super_block *sb)
1156d8a8559cSJens Axboe {
1157b6e51316SJens Axboe 	bdi_sync_writeback(sb->s_bdi, sb);
1158b6e51316SJens Axboe 	wait_sb_inodes(sb);
1159d8a8559cSJens Axboe }
1160d8a8559cSJens Axboe EXPORT_SYMBOL(sync_inodes_sb);
11611da177e4SLinus Torvalds 
11621da177e4SLinus Torvalds /**
11631da177e4SLinus Torvalds  * write_inode_now	-	write an inode to disk
11641da177e4SLinus Torvalds  * @inode: inode to write to disk
11651da177e4SLinus Torvalds  * @sync: whether the write should be synchronous or not
11661da177e4SLinus Torvalds  *
11677f04c26dSAndrea Arcangeli  * This function commits an inode to disk immediately if it is dirty. This is
11687f04c26dSAndrea Arcangeli  * primarily needed by knfsd.
11697f04c26dSAndrea Arcangeli  *
11707f04c26dSAndrea Arcangeli  * The caller must either have a ref on the inode or must have set I_WILL_FREE.
11711da177e4SLinus Torvalds  */
11721da177e4SLinus Torvalds int write_inode_now(struct inode *inode, int sync)
11731da177e4SLinus Torvalds {
11741da177e4SLinus Torvalds 	int ret;
11751da177e4SLinus Torvalds 	struct writeback_control wbc = {
11761da177e4SLinus Torvalds 		.nr_to_write = LONG_MAX,
117718914b18SMike Galbraith 		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1178111ebb6eSOGAWA Hirofumi 		.range_start = 0,
1179111ebb6eSOGAWA Hirofumi 		.range_end = LLONG_MAX,
11801da177e4SLinus Torvalds 	};
11811da177e4SLinus Torvalds 
11821da177e4SLinus Torvalds 	if (!mapping_cap_writeback_dirty(inode->i_mapping))
118349364ce2SAndrew Morton 		wbc.nr_to_write = 0;
11841da177e4SLinus Torvalds 
11851da177e4SLinus Torvalds 	might_sleep();
11861da177e4SLinus Torvalds 	spin_lock(&inode_lock);
118701c03194SChristoph Hellwig 	ret = writeback_single_inode(inode, &wbc);
11881da177e4SLinus Torvalds 	spin_unlock(&inode_lock);
11891da177e4SLinus Torvalds 	if (sync)
11901c0eeaf5SJoern Engel 		inode_sync_wait(inode);
11911da177e4SLinus Torvalds 	return ret;
11921da177e4SLinus Torvalds }
11931da177e4SLinus Torvalds EXPORT_SYMBOL(write_inode_now);
11941da177e4SLinus Torvalds 
11951da177e4SLinus Torvalds /**
11961da177e4SLinus Torvalds  * sync_inode - write an inode and its pages to disk.
11971da177e4SLinus Torvalds  * @inode: the inode to sync
11981da177e4SLinus Torvalds  * @wbc: controls the writeback mode
11991da177e4SLinus Torvalds  *
12001da177e4SLinus Torvalds  * sync_inode() will write an inode and its pages to disk.  It will also
12011da177e4SLinus Torvalds  * correctly update the inode on its superblock's dirty inode lists and will
12021da177e4SLinus Torvalds  * update inode->i_state.
12031da177e4SLinus Torvalds  *
12041da177e4SLinus Torvalds  * The caller must have a ref on the inode.
12051da177e4SLinus Torvalds  */
12061da177e4SLinus Torvalds int sync_inode(struct inode *inode, struct writeback_control *wbc)
12071da177e4SLinus Torvalds {
12081da177e4SLinus Torvalds 	int ret;
12091da177e4SLinus Torvalds 
12101da177e4SLinus Torvalds 	spin_lock(&inode_lock);
121101c03194SChristoph Hellwig 	ret = writeback_single_inode(inode, wbc);
12121da177e4SLinus Torvalds 	spin_unlock(&inode_lock);
12131da177e4SLinus Torvalds 	return ret;
12141da177e4SLinus Torvalds }
12151da177e4SLinus Torvalds EXPORT_SYMBOL(sync_inode);
1216