xref: /openbmc/linux/fs/fs-writeback.c (revision b3af9468)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * fs/fs-writeback.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 2002, Linus Torvalds.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  * Contains all the functions related to writing back and waiting
71da177e4SLinus Torvalds  * upon dirty inodes against superblocks, and writing back dirty
81da177e4SLinus Torvalds  * pages against inodes.  ie: data writeback.  Writeout of the
91da177e4SLinus Torvalds  * inode itself is not handled here.
101da177e4SLinus Torvalds  *
11e1f8e874SFrancois Cami  * 10Apr2002	Andrew Morton
121da177e4SLinus Torvalds  *		Split out of fs/inode.c
131da177e4SLinus Torvalds  *		Additions for address_space-based writeback
141da177e4SLinus Torvalds  */
151da177e4SLinus Torvalds 
161da177e4SLinus Torvalds #include <linux/kernel.h>
17f5ff8422SJens Axboe #include <linux/module.h>
181da177e4SLinus Torvalds #include <linux/spinlock.h>
191da177e4SLinus Torvalds #include <linux/sched.h>
201da177e4SLinus Torvalds #include <linux/fs.h>
211da177e4SLinus Torvalds #include <linux/mm.h>
2203ba3782SJens Axboe #include <linux/kthread.h>
2303ba3782SJens Axboe #include <linux/freezer.h>
241da177e4SLinus Torvalds #include <linux/writeback.h>
251da177e4SLinus Torvalds #include <linux/blkdev.h>
261da177e4SLinus Torvalds #include <linux/backing-dev.h>
271da177e4SLinus Torvalds #include <linux/buffer_head.h>
2807f3f05cSDavid Howells #include "internal.h"
291da177e4SLinus Torvalds 
3066f3b8e2SJens Axboe #define inode_to_bdi(inode)	((inode)->i_mapping->backing_dev_info)
31f11b00f3SAdrian Bunk 
3203ba3782SJens Axboe /*
33d0bceac7SJens Axboe  * We don't actually have pdflush, but this one is exported though /proc...
34d0bceac7SJens Axboe  */
35d0bceac7SJens Axboe int nr_pdflush_threads;
36d0bceac7SJens Axboe 
37d0bceac7SJens Axboe /*
38c4a77a6cSJens Axboe  * Passed into wb_writeback(), essentially a subset of writeback_control
39c4a77a6cSJens Axboe  */
40c4a77a6cSJens Axboe struct wb_writeback_args {
41c4a77a6cSJens Axboe 	long nr_pages;
42c4a77a6cSJens Axboe 	struct super_block *sb;
43c4a77a6cSJens Axboe 	enum writeback_sync_modes sync_mode;
44d3ddec76SWu Fengguang 	int for_kupdate:1;
45d3ddec76SWu Fengguang 	int range_cyclic:1;
46d3ddec76SWu Fengguang 	int for_background:1;
47c4a77a6cSJens Axboe };
48c4a77a6cSJens Axboe 
49c4a77a6cSJens Axboe /*
5003ba3782SJens Axboe  * Work items for the bdi_writeback threads
51f11b00f3SAdrian Bunk  */
5203ba3782SJens Axboe struct bdi_work {
538010c3b6SJens Axboe 	struct list_head list;		/* pending work list */
548010c3b6SJens Axboe 	struct rcu_head rcu_head;	/* for RCU free/clear of work */
5503ba3782SJens Axboe 
568010c3b6SJens Axboe 	unsigned long seen;		/* threads that have seen this work */
578010c3b6SJens Axboe 	atomic_t pending;		/* number of threads still to do work */
5803ba3782SJens Axboe 
598010c3b6SJens Axboe 	struct wb_writeback_args args;	/* writeback arguments */
6003ba3782SJens Axboe 
618010c3b6SJens Axboe 	unsigned long state;		/* flag bits, see WS_* */
6203ba3782SJens Axboe };
6303ba3782SJens Axboe 
6403ba3782SJens Axboe enum {
6503ba3782SJens Axboe 	WS_USED_B = 0,
6603ba3782SJens Axboe 	WS_ONSTACK_B,
6703ba3782SJens Axboe };
6803ba3782SJens Axboe 
6903ba3782SJens Axboe #define WS_USED (1 << WS_USED_B)
7003ba3782SJens Axboe #define WS_ONSTACK (1 << WS_ONSTACK_B)
7103ba3782SJens Axboe 
7203ba3782SJens Axboe static inline bool bdi_work_on_stack(struct bdi_work *work)
73f11b00f3SAdrian Bunk {
7403ba3782SJens Axboe 	return test_bit(WS_ONSTACK_B, &work->state);
7503ba3782SJens Axboe }
7603ba3782SJens Axboe 
7703ba3782SJens Axboe static inline void bdi_work_init(struct bdi_work *work,
78b6e51316SJens Axboe 				 struct wb_writeback_args *args)
7903ba3782SJens Axboe {
8003ba3782SJens Axboe 	INIT_RCU_HEAD(&work->rcu_head);
81b6e51316SJens Axboe 	work->args = *args;
8203ba3782SJens Axboe 	work->state = WS_USED;
8303ba3782SJens Axboe }
8403ba3782SJens Axboe 
85f11b00f3SAdrian Bunk /**
86f11b00f3SAdrian Bunk  * writeback_in_progress - determine whether there is writeback in progress
87f11b00f3SAdrian Bunk  * @bdi: the device's backing_dev_info structure.
88f11b00f3SAdrian Bunk  *
8903ba3782SJens Axboe  * Determine whether there is writeback waiting to be handled against a
9003ba3782SJens Axboe  * backing device.
91f11b00f3SAdrian Bunk  */
92f11b00f3SAdrian Bunk int writeback_in_progress(struct backing_dev_info *bdi)
93f11b00f3SAdrian Bunk {
9403ba3782SJens Axboe 	return !list_empty(&bdi->work_list);
95f11b00f3SAdrian Bunk }
96f11b00f3SAdrian Bunk 
9703ba3782SJens Axboe static void bdi_work_clear(struct bdi_work *work)
98f11b00f3SAdrian Bunk {
9903ba3782SJens Axboe 	clear_bit(WS_USED_B, &work->state);
10003ba3782SJens Axboe 	smp_mb__after_clear_bit();
1011ef7d9aaSNick Piggin 	/*
1021ef7d9aaSNick Piggin 	 * work can have disappeared at this point. bit waitq functions
1031ef7d9aaSNick Piggin 	 * should be able to tolerate this, provided bdi_sched_wait does
1041ef7d9aaSNick Piggin 	 * not dereference it's pointer argument.
1051ef7d9aaSNick Piggin 	*/
10603ba3782SJens Axboe 	wake_up_bit(&work->state, WS_USED_B);
107f11b00f3SAdrian Bunk }
108f11b00f3SAdrian Bunk 
10903ba3782SJens Axboe static void bdi_work_free(struct rcu_head *head)
1104195f73dSNick Piggin {
11103ba3782SJens Axboe 	struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
1124195f73dSNick Piggin 
11303ba3782SJens Axboe 	if (!bdi_work_on_stack(work))
11403ba3782SJens Axboe 		kfree(work);
11503ba3782SJens Axboe 	else
11603ba3782SJens Axboe 		bdi_work_clear(work);
1174195f73dSNick Piggin }
1184195f73dSNick Piggin 
11903ba3782SJens Axboe static void wb_work_complete(struct bdi_work *work)
1201da177e4SLinus Torvalds {
121c4a77a6cSJens Axboe 	const enum writeback_sync_modes sync_mode = work->args.sync_mode;
12277b9d059SNick Piggin 	int onstack = bdi_work_on_stack(work);
1231da177e4SLinus Torvalds 
1241da177e4SLinus Torvalds 	/*
12503ba3782SJens Axboe 	 * For allocated work, we can clear the done/seen bit right here.
12603ba3782SJens Axboe 	 * For on-stack work, we need to postpone both the clear and free
12703ba3782SJens Axboe 	 * to after the RCU grace period, since the stack could be invalidated
12803ba3782SJens Axboe 	 * as soon as bdi_work_clear() has done the wakeup.
1291da177e4SLinus Torvalds 	 */
13077b9d059SNick Piggin 	if (!onstack)
13103ba3782SJens Axboe 		bdi_work_clear(work);
13277b9d059SNick Piggin 	if (sync_mode == WB_SYNC_NONE || onstack)
13303ba3782SJens Axboe 		call_rcu(&work->rcu_head, bdi_work_free);
1341da177e4SLinus Torvalds }
1351da177e4SLinus Torvalds 
13603ba3782SJens Axboe static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
13703ba3782SJens Axboe {
1381da177e4SLinus Torvalds 	/*
13903ba3782SJens Axboe 	 * The caller has retrieved the work arguments from this work,
14003ba3782SJens Axboe 	 * drop our reference. If this is the last ref, delete and free it
14103ba3782SJens Axboe 	 */
14203ba3782SJens Axboe 	if (atomic_dec_and_test(&work->pending)) {
14303ba3782SJens Axboe 		struct backing_dev_info *bdi = wb->bdi;
14403ba3782SJens Axboe 
14503ba3782SJens Axboe 		spin_lock(&bdi->wb_lock);
14603ba3782SJens Axboe 		list_del_rcu(&work->list);
14703ba3782SJens Axboe 		spin_unlock(&bdi->wb_lock);
14803ba3782SJens Axboe 
14903ba3782SJens Axboe 		wb_work_complete(work);
15003ba3782SJens Axboe 	}
15103ba3782SJens Axboe }
15203ba3782SJens Axboe 
15303ba3782SJens Axboe static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
15403ba3782SJens Axboe {
15503ba3782SJens Axboe 	work->seen = bdi->wb_mask;
15603ba3782SJens Axboe 	BUG_ON(!work->seen);
15703ba3782SJens Axboe 	atomic_set(&work->pending, bdi->wb_cnt);
15803ba3782SJens Axboe 	BUG_ON(!bdi->wb_cnt);
15903ba3782SJens Axboe 
16003ba3782SJens Axboe 	/*
161deed62edSNick Piggin 	 * list_add_tail_rcu() contains the necessary barriers to
162deed62edSNick Piggin 	 * make sure the above stores are seen before the item is
163deed62edSNick Piggin 	 * noticed on the list
1641da177e4SLinus Torvalds 	 */
16503ba3782SJens Axboe 	spin_lock(&bdi->wb_lock);
16603ba3782SJens Axboe 	list_add_tail_rcu(&work->list, &bdi->work_list);
16703ba3782SJens Axboe 	spin_unlock(&bdi->wb_lock);
1681da177e4SLinus Torvalds 
1691da177e4SLinus Torvalds 	/*
17003ba3782SJens Axboe 	 * If the default thread isn't there, make sure we add it. When
17103ba3782SJens Axboe 	 * it gets created and wakes up, we'll run this work.
1721da177e4SLinus Torvalds 	 */
17303ba3782SJens Axboe 	if (unlikely(list_empty_careful(&bdi->wb_list)))
17403ba3782SJens Axboe 		wake_up_process(default_backing_dev_info.wb.task);
17503ba3782SJens Axboe 	else {
17603ba3782SJens Axboe 		struct bdi_writeback *wb = &bdi->wb;
1771da177e4SLinus Torvalds 
1781ef7d9aaSNick Piggin 		if (wb->task)
17903ba3782SJens Axboe 			wake_up_process(wb->task);
1801da177e4SLinus Torvalds 	}
18103ba3782SJens Axboe }
1821da177e4SLinus Torvalds 
1831da177e4SLinus Torvalds /*
18403ba3782SJens Axboe  * Used for on-stack allocated work items. The caller needs to wait until
18503ba3782SJens Axboe  * the wb threads have acked the work before it's safe to continue.
1861da177e4SLinus Torvalds  */
18703ba3782SJens Axboe static void bdi_wait_on_work_clear(struct bdi_work *work)
1881da177e4SLinus Torvalds {
18903ba3782SJens Axboe 	wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait,
19003ba3782SJens Axboe 		    TASK_UNINTERRUPTIBLE);
19103ba3782SJens Axboe }
19203ba3782SJens Axboe 
193f11fcae8SJens Axboe static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
194b6e51316SJens Axboe 				 struct wb_writeback_args *args)
19503ba3782SJens Axboe {
19603ba3782SJens Axboe 	struct bdi_work *work;
19703ba3782SJens Axboe 
198bcddc3f0SJens Axboe 	/*
199bcddc3f0SJens Axboe 	 * This is WB_SYNC_NONE writeback, so if allocation fails just
200bcddc3f0SJens Axboe 	 * wakeup the thread for old dirty data writeback
201bcddc3f0SJens Axboe 	 */
20203ba3782SJens Axboe 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
203bcddc3f0SJens Axboe 	if (work) {
204b6e51316SJens Axboe 		bdi_work_init(work, args);
205f11fcae8SJens Axboe 		bdi_queue_work(bdi, work);
206bcddc3f0SJens Axboe 	} else {
207bcddc3f0SJens Axboe 		struct bdi_writeback *wb = &bdi->wb;
208bcddc3f0SJens Axboe 
209bcddc3f0SJens Axboe 		if (wb->task)
210bcddc3f0SJens Axboe 			wake_up_process(wb->task);
211bcddc3f0SJens Axboe 	}
21203ba3782SJens Axboe }
21303ba3782SJens Axboe 
214b6e51316SJens Axboe /**
215b6e51316SJens Axboe  * bdi_sync_writeback - start and wait for writeback
216b6e51316SJens Axboe  * @bdi: the backing device to write from
217b6e51316SJens Axboe  * @sb: write inodes from this super_block
218b6e51316SJens Axboe  *
219b6e51316SJens Axboe  * Description:
220b6e51316SJens Axboe  *   This does WB_SYNC_ALL data integrity writeback and waits for the
221b6e51316SJens Axboe  *   IO to complete. Callers must hold the sb s_umount semaphore for
222b6e51316SJens Axboe  *   reading, to avoid having the super disappear before we are done.
22303ba3782SJens Axboe  */
224b6e51316SJens Axboe static void bdi_sync_writeback(struct backing_dev_info *bdi,
225b6e51316SJens Axboe 			       struct super_block *sb)
226b6e51316SJens Axboe {
227b6e51316SJens Axboe 	struct wb_writeback_args args = {
228b6e51316SJens Axboe 		.sb		= sb,
229b6e51316SJens Axboe 		.sync_mode	= WB_SYNC_ALL,
230b6e51316SJens Axboe 		.nr_pages	= LONG_MAX,
231b6e51316SJens Axboe 		.range_cyclic	= 0,
232b6e51316SJens Axboe 	};
233f0fad8a5SChristoph Hellwig 	struct bdi_work work;
234f0fad8a5SChristoph Hellwig 
235b6e51316SJens Axboe 	bdi_work_init(&work, &args);
236f0fad8a5SChristoph Hellwig 	work.state |= WS_ONSTACK;
237f0fad8a5SChristoph Hellwig 
238b6e51316SJens Axboe 	bdi_queue_work(bdi, &work);
239f0fad8a5SChristoph Hellwig 	bdi_wait_on_work_clear(&work);
24003ba3782SJens Axboe }
241b6e51316SJens Axboe 
242b6e51316SJens Axboe /**
243b6e51316SJens Axboe  * bdi_start_writeback - start writeback
244b6e51316SJens Axboe  * @bdi: the backing device to write from
245b6e51316SJens Axboe  * @nr_pages: the number of pages to write
246b6e51316SJens Axboe  *
247b6e51316SJens Axboe  * Description:
248b6e51316SJens Axboe  *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
249b6e51316SJens Axboe  *   started when this function returns, we make no guarentees on
250b6e51316SJens Axboe  *   completion. Caller need not hold sb s_umount semaphore.
251b6e51316SJens Axboe  *
252b6e51316SJens Axboe  */
253b6e51316SJens Axboe void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
254b6e51316SJens Axboe {
255b6e51316SJens Axboe 	struct wb_writeback_args args = {
256b6e51316SJens Axboe 		.sync_mode	= WB_SYNC_NONE,
257b6e51316SJens Axboe 		.nr_pages	= nr_pages,
258b6e51316SJens Axboe 		.range_cyclic	= 1,
259b6e51316SJens Axboe 	};
260b6e51316SJens Axboe 
261d3ddec76SWu Fengguang 	/*
262d3ddec76SWu Fengguang 	 * We treat @nr_pages=0 as the special case to do background writeback,
263d3ddec76SWu Fengguang 	 * ie. to sync pages until the background dirty threshold is reached.
264d3ddec76SWu Fengguang 	 */
265d3ddec76SWu Fengguang 	if (!nr_pages) {
266d3ddec76SWu Fengguang 		args.nr_pages = LONG_MAX;
267d3ddec76SWu Fengguang 		args.for_background = 1;
268d3ddec76SWu Fengguang 	}
269d3ddec76SWu Fengguang 
270b6e51316SJens Axboe 	bdi_alloc_queue_work(bdi, &args);
2711da177e4SLinus Torvalds }
2721da177e4SLinus Torvalds 
2731da177e4SLinus Torvalds /*
2746610a0bcSAndrew Morton  * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
2756610a0bcSAndrew Morton  * furthest end of its superblock's dirty-inode list.
2766610a0bcSAndrew Morton  *
2776610a0bcSAndrew Morton  * Before stamping the inode's ->dirtied_when, we check to see whether it is
27866f3b8e2SJens Axboe  * already the most-recently-dirtied inode on the b_dirty list.  If that is
2796610a0bcSAndrew Morton  * the case then the inode must have been redirtied while it was being written
2806610a0bcSAndrew Morton  * out and we don't reset its dirtied_when.
2816610a0bcSAndrew Morton  */
2826610a0bcSAndrew Morton static void redirty_tail(struct inode *inode)
2836610a0bcSAndrew Morton {
28403ba3782SJens Axboe 	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
2856610a0bcSAndrew Morton 
28603ba3782SJens Axboe 	if (!list_empty(&wb->b_dirty)) {
28766f3b8e2SJens Axboe 		struct inode *tail;
2886610a0bcSAndrew Morton 
28903ba3782SJens Axboe 		tail = list_entry(wb->b_dirty.next, struct inode, i_list);
29066f3b8e2SJens Axboe 		if (time_before(inode->dirtied_when, tail->dirtied_when))
2916610a0bcSAndrew Morton 			inode->dirtied_when = jiffies;
2926610a0bcSAndrew Morton 	}
29303ba3782SJens Axboe 	list_move(&inode->i_list, &wb->b_dirty);
2946610a0bcSAndrew Morton }
2956610a0bcSAndrew Morton 
2966610a0bcSAndrew Morton /*
29766f3b8e2SJens Axboe  * requeue inode for re-scanning after bdi->b_io list is exhausted.
298c986d1e2SAndrew Morton  */
2990e0f4fc2SKen Chen static void requeue_io(struct inode *inode)
300c986d1e2SAndrew Morton {
30103ba3782SJens Axboe 	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
30203ba3782SJens Axboe 
30303ba3782SJens Axboe 	list_move(&inode->i_list, &wb->b_more_io);
304c986d1e2SAndrew Morton }
305c986d1e2SAndrew Morton 
3061c0eeaf5SJoern Engel static void inode_sync_complete(struct inode *inode)
3071c0eeaf5SJoern Engel {
3081c0eeaf5SJoern Engel 	/*
3091c0eeaf5SJoern Engel 	 * Prevent speculative execution through spin_unlock(&inode_lock);
3101c0eeaf5SJoern Engel 	 */
3111c0eeaf5SJoern Engel 	smp_mb();
3121c0eeaf5SJoern Engel 	wake_up_bit(&inode->i_state, __I_SYNC);
3131c0eeaf5SJoern Engel }
3141c0eeaf5SJoern Engel 
315d2caa3c5SJeff Layton static bool inode_dirtied_after(struct inode *inode, unsigned long t)
316d2caa3c5SJeff Layton {
317d2caa3c5SJeff Layton 	bool ret = time_after(inode->dirtied_when, t);
318d2caa3c5SJeff Layton #ifndef CONFIG_64BIT
319d2caa3c5SJeff Layton 	/*
320d2caa3c5SJeff Layton 	 * For inodes being constantly redirtied, dirtied_when can get stuck.
321d2caa3c5SJeff Layton 	 * It _appears_ to be in the future, but is actually in distant past.
322d2caa3c5SJeff Layton 	 * This test is necessary to prevent such wrapped-around relative times
3235b0830cbSJens Axboe 	 * from permanently stopping the whole bdi writeback.
324d2caa3c5SJeff Layton 	 */
325d2caa3c5SJeff Layton 	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
326d2caa3c5SJeff Layton #endif
327d2caa3c5SJeff Layton 	return ret;
328d2caa3c5SJeff Layton }
329d2caa3c5SJeff Layton 
330c986d1e2SAndrew Morton /*
3312c136579SFengguang Wu  * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
3322c136579SFengguang Wu  */
3332c136579SFengguang Wu static void move_expired_inodes(struct list_head *delaying_queue,
3342c136579SFengguang Wu 			       struct list_head *dispatch_queue,
3352c136579SFengguang Wu 				unsigned long *older_than_this)
3362c136579SFengguang Wu {
3375c03449dSShaohua Li 	LIST_HEAD(tmp);
3385c03449dSShaohua Li 	struct list_head *pos, *node;
339cf137307SJens Axboe 	struct super_block *sb = NULL;
3405c03449dSShaohua Li 	struct inode *inode;
341cf137307SJens Axboe 	int do_sb_sort = 0;
3425c03449dSShaohua Li 
3432c136579SFengguang Wu 	while (!list_empty(delaying_queue)) {
3445c03449dSShaohua Li 		inode = list_entry(delaying_queue->prev, struct inode, i_list);
3452c136579SFengguang Wu 		if (older_than_this &&
346d2caa3c5SJeff Layton 		    inode_dirtied_after(inode, *older_than_this))
3472c136579SFengguang Wu 			break;
348cf137307SJens Axboe 		if (sb && sb != inode->i_sb)
349cf137307SJens Axboe 			do_sb_sort = 1;
350cf137307SJens Axboe 		sb = inode->i_sb;
3515c03449dSShaohua Li 		list_move(&inode->i_list, &tmp);
3525c03449dSShaohua Li 	}
3535c03449dSShaohua Li 
354cf137307SJens Axboe 	/* just one sb in list, splice to dispatch_queue and we're done */
355cf137307SJens Axboe 	if (!do_sb_sort) {
356cf137307SJens Axboe 		list_splice(&tmp, dispatch_queue);
357cf137307SJens Axboe 		return;
358cf137307SJens Axboe 	}
359cf137307SJens Axboe 
3605c03449dSShaohua Li 	/* Move inodes from one superblock together */
3615c03449dSShaohua Li 	while (!list_empty(&tmp)) {
3625c03449dSShaohua Li 		inode = list_entry(tmp.prev, struct inode, i_list);
3635c03449dSShaohua Li 		sb = inode->i_sb;
3645c03449dSShaohua Li 		list_for_each_prev_safe(pos, node, &tmp) {
3655c03449dSShaohua Li 			inode = list_entry(pos, struct inode, i_list);
3665c03449dSShaohua Li 			if (inode->i_sb == sb)
3672c136579SFengguang Wu 				list_move(&inode->i_list, dispatch_queue);
3682c136579SFengguang Wu 		}
3692c136579SFengguang Wu 	}
3705c03449dSShaohua Li }
3712c136579SFengguang Wu 
3722c136579SFengguang Wu /*
3732c136579SFengguang Wu  * Queue all expired dirty inodes for io, eldest first.
3742c136579SFengguang Wu  */
37503ba3782SJens Axboe static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
3762c136579SFengguang Wu {
37703ba3782SJens Axboe 	list_splice_init(&wb->b_more_io, wb->b_io.prev);
37803ba3782SJens Axboe 	move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
37966f3b8e2SJens Axboe }
38066f3b8e2SJens Axboe 
38103ba3782SJens Axboe static int write_inode(struct inode *inode, int sync)
38266f3b8e2SJens Axboe {
38303ba3782SJens Axboe 	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
38403ba3782SJens Axboe 		return inode->i_sb->s_op->write_inode(inode, sync);
38503ba3782SJens Axboe 	return 0;
38666f3b8e2SJens Axboe }
38708d8e974SFengguang Wu 
3882c136579SFengguang Wu /*
38901c03194SChristoph Hellwig  * Wait for writeback on an inode to complete.
39001c03194SChristoph Hellwig  */
39101c03194SChristoph Hellwig static void inode_wait_for_writeback(struct inode *inode)
39201c03194SChristoph Hellwig {
39301c03194SChristoph Hellwig 	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
39401c03194SChristoph Hellwig 	wait_queue_head_t *wqh;
39501c03194SChristoph Hellwig 
39601c03194SChristoph Hellwig 	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
39701c03194SChristoph Hellwig 	do {
39801c03194SChristoph Hellwig 		spin_unlock(&inode_lock);
39901c03194SChristoph Hellwig 		__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
40001c03194SChristoph Hellwig 		spin_lock(&inode_lock);
40101c03194SChristoph Hellwig 	} while (inode->i_state & I_SYNC);
40201c03194SChristoph Hellwig }
40301c03194SChristoph Hellwig 
40401c03194SChristoph Hellwig /*
40501c03194SChristoph Hellwig  * Write out an inode's dirty pages.  Called under inode_lock.  Either the
40601c03194SChristoph Hellwig  * caller has ref on the inode (either via __iget or via syscall against an fd)
40701c03194SChristoph Hellwig  * or the inode has I_WILL_FREE set (via generic_forget_inode)
40801c03194SChristoph Hellwig  *
4091da177e4SLinus Torvalds  * If `wait' is set, wait on the writeout.
4101da177e4SLinus Torvalds  *
4111da177e4SLinus Torvalds  * The whole writeout design is quite complex and fragile.  We want to avoid
4121da177e4SLinus Torvalds  * starvation of particular inodes when others are being redirtied, prevent
4131da177e4SLinus Torvalds  * livelocks, etc.
4141da177e4SLinus Torvalds  *
4151da177e4SLinus Torvalds  * Called under inode_lock.
4161da177e4SLinus Torvalds  */
4171da177e4SLinus Torvalds static int
41801c03194SChristoph Hellwig writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
4191da177e4SLinus Torvalds {
4201da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
4211da177e4SLinus Torvalds 	int wait = wbc->sync_mode == WB_SYNC_ALL;
42201c03194SChristoph Hellwig 	unsigned dirty;
4231da177e4SLinus Torvalds 	int ret;
4241da177e4SLinus Torvalds 
42501c03194SChristoph Hellwig 	if (!atomic_read(&inode->i_count))
42601c03194SChristoph Hellwig 		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
42701c03194SChristoph Hellwig 	else
42801c03194SChristoph Hellwig 		WARN_ON(inode->i_state & I_WILL_FREE);
42901c03194SChristoph Hellwig 
43001c03194SChristoph Hellwig 	if (inode->i_state & I_SYNC) {
43101c03194SChristoph Hellwig 		/*
43201c03194SChristoph Hellwig 		 * If this inode is locked for writeback and we are not doing
43366f3b8e2SJens Axboe 		 * writeback-for-data-integrity, move it to b_more_io so that
43401c03194SChristoph Hellwig 		 * writeback can proceed with the other inodes on s_io.
43501c03194SChristoph Hellwig 		 *
43601c03194SChristoph Hellwig 		 * We'll have another go at writing back this inode when we
43766f3b8e2SJens Axboe 		 * completed a full scan of b_io.
43801c03194SChristoph Hellwig 		 */
43901c03194SChristoph Hellwig 		if (!wait) {
44001c03194SChristoph Hellwig 			requeue_io(inode);
44101c03194SChristoph Hellwig 			return 0;
44201c03194SChristoph Hellwig 		}
44301c03194SChristoph Hellwig 
44401c03194SChristoph Hellwig 		/*
44501c03194SChristoph Hellwig 		 * It's a data-integrity sync.  We must wait.
44601c03194SChristoph Hellwig 		 */
44701c03194SChristoph Hellwig 		inode_wait_for_writeback(inode);
44801c03194SChristoph Hellwig 	}
44901c03194SChristoph Hellwig 
4501c0eeaf5SJoern Engel 	BUG_ON(inode->i_state & I_SYNC);
4511da177e4SLinus Torvalds 
4521c0eeaf5SJoern Engel 	/* Set I_SYNC, reset I_DIRTY */
4531da177e4SLinus Torvalds 	dirty = inode->i_state & I_DIRTY;
4541c0eeaf5SJoern Engel 	inode->i_state |= I_SYNC;
4551da177e4SLinus Torvalds 	inode->i_state &= ~I_DIRTY;
4561da177e4SLinus Torvalds 
4571da177e4SLinus Torvalds 	spin_unlock(&inode_lock);
4581da177e4SLinus Torvalds 
4591da177e4SLinus Torvalds 	ret = do_writepages(mapping, wbc);
4601da177e4SLinus Torvalds 
4611da177e4SLinus Torvalds 	/* Don't write the inode if only I_DIRTY_PAGES was set */
4621da177e4SLinus Torvalds 	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
4631da177e4SLinus Torvalds 		int err = write_inode(inode, wait);
4641da177e4SLinus Torvalds 		if (ret == 0)
4651da177e4SLinus Torvalds 			ret = err;
4661da177e4SLinus Torvalds 	}
4671da177e4SLinus Torvalds 
4681da177e4SLinus Torvalds 	if (wait) {
4691da177e4SLinus Torvalds 		int err = filemap_fdatawait(mapping);
4701da177e4SLinus Torvalds 		if (ret == 0)
4711da177e4SLinus Torvalds 			ret = err;
4721da177e4SLinus Torvalds 	}
4731da177e4SLinus Torvalds 
4741da177e4SLinus Torvalds 	spin_lock(&inode_lock);
4751c0eeaf5SJoern Engel 	inode->i_state &= ~I_SYNC;
47684a89245SWu Fengguang 	if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
477b3af9468SWu Fengguang 		if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) {
478ae1b7f7dSWu Fengguang 			/*
479b3af9468SWu Fengguang 			 * More pages get dirtied by a fast dirtier.
480b3af9468SWu Fengguang 			 */
481b3af9468SWu Fengguang 			goto select_queue;
482b3af9468SWu Fengguang 		} else if (inode->i_state & I_DIRTY) {
483b3af9468SWu Fengguang 			/*
484b3af9468SWu Fengguang 			 * At least XFS will redirty the inode during the
485b3af9468SWu Fengguang 			 * writeback (delalloc) and on io completion (isize).
486ae1b7f7dSWu Fengguang 			 */
487ae1b7f7dSWu Fengguang 			redirty_tail(inode);
488ae1b7f7dSWu Fengguang 		} else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4891da177e4SLinus Torvalds 			/*
4901da177e4SLinus Torvalds 			 * We didn't write back all the pages.  nfs_writepages()
4911da177e4SLinus Torvalds 			 * sometimes bales out without doing anything. Redirty
49266f3b8e2SJens Axboe 			 * the inode; Move it from b_io onto b_more_io/b_dirty.
4931b43ef91SAndrew Morton 			 */
4941b43ef91SAndrew Morton 			/*
4951b43ef91SAndrew Morton 			 * akpm: if the caller was the kupdate function we put
49666f3b8e2SJens Axboe 			 * this inode at the head of b_dirty so it gets first
4971b43ef91SAndrew Morton 			 * consideration.  Otherwise, move it to the tail, for
4981b43ef91SAndrew Morton 			 * the reasons described there.  I'm not really sure
4991b43ef91SAndrew Morton 			 * how much sense this makes.  Presumably I had a good
5001b43ef91SAndrew Morton 			 * reasons for doing it this way, and I'd rather not
5011b43ef91SAndrew Morton 			 * muck with it at present.
5021da177e4SLinus Torvalds 			 */
5031da177e4SLinus Torvalds 			if (wbc->for_kupdate) {
5041da177e4SLinus Torvalds 				/*
5052c136579SFengguang Wu 				 * For the kupdate function we move the inode
50666f3b8e2SJens Axboe 				 * to b_more_io so it will get more writeout as
5072c136579SFengguang Wu 				 * soon as the queue becomes uncongested.
5081da177e4SLinus Torvalds 				 */
5091da177e4SLinus Torvalds 				inode->i_state |= I_DIRTY_PAGES;
510b3af9468SWu Fengguang select_queue:
5118bc3be27SFengguang Wu 				if (wbc->nr_to_write <= 0) {
5128bc3be27SFengguang Wu 					/*
5138bc3be27SFengguang Wu 					 * slice used up: queue for next turn
5148bc3be27SFengguang Wu 					 */
5150e0f4fc2SKen Chen 					requeue_io(inode);
5161da177e4SLinus Torvalds 				} else {
5171da177e4SLinus Torvalds 					/*
5188bc3be27SFengguang Wu 					 * somehow blocked: retry later
5198bc3be27SFengguang Wu 					 */
5208bc3be27SFengguang Wu 					redirty_tail(inode);
5218bc3be27SFengguang Wu 				}
5228bc3be27SFengguang Wu 			} else {
5238bc3be27SFengguang Wu 				/*
5241da177e4SLinus Torvalds 				 * Otherwise fully redirty the inode so that
5251da177e4SLinus Torvalds 				 * other inodes on this superblock will get some
5261da177e4SLinus Torvalds 				 * writeout.  Otherwise heavy writing to one
5271da177e4SLinus Torvalds 				 * file would indefinitely suspend writeout of
5281da177e4SLinus Torvalds 				 * all the other files.
5291da177e4SLinus Torvalds 				 */
5301da177e4SLinus Torvalds 				inode->i_state |= I_DIRTY_PAGES;
5311b43ef91SAndrew Morton 				redirty_tail(inode);
5321da177e4SLinus Torvalds 			}
5331da177e4SLinus Torvalds 		} else if (atomic_read(&inode->i_count)) {
5341da177e4SLinus Torvalds 			/*
5351da177e4SLinus Torvalds 			 * The inode is clean, inuse
5361da177e4SLinus Torvalds 			 */
5371da177e4SLinus Torvalds 			list_move(&inode->i_list, &inode_in_use);
5381da177e4SLinus Torvalds 		} else {
5391da177e4SLinus Torvalds 			/*
5401da177e4SLinus Torvalds 			 * The inode is clean, unused
5411da177e4SLinus Torvalds 			 */
5421da177e4SLinus Torvalds 			list_move(&inode->i_list, &inode_unused);
5431da177e4SLinus Torvalds 		}
5441da177e4SLinus Torvalds 	}
5451c0eeaf5SJoern Engel 	inode_sync_complete(inode);
5461da177e4SLinus Torvalds 	return ret;
5471da177e4SLinus Torvalds }
5481da177e4SLinus Torvalds 
5499ecc2738SJens Axboe static void unpin_sb_for_writeback(struct super_block **psb)
5509ecc2738SJens Axboe {
5519ecc2738SJens Axboe 	struct super_block *sb = *psb;
5529ecc2738SJens Axboe 
5539ecc2738SJens Axboe 	if (sb) {
5549ecc2738SJens Axboe 		up_read(&sb->s_umount);
5559ecc2738SJens Axboe 		put_super(sb);
5569ecc2738SJens Axboe 		*psb = NULL;
5579ecc2738SJens Axboe 	}
5589ecc2738SJens Axboe }
5599ecc2738SJens Axboe 
56003ba3782SJens Axboe /*
56103ba3782SJens Axboe  * For WB_SYNC_NONE writeback, the caller does not have the sb pinned
56203ba3782SJens Axboe  * before calling writeback. So make sure that we do pin it, so it doesn't
56303ba3782SJens Axboe  * go away while we are writing inodes from it.
56403ba3782SJens Axboe  *
56503ba3782SJens Axboe  * Returns 0 if the super was successfully pinned (or pinning wasn't needed),
56603ba3782SJens Axboe  * 1 if we failed.
56703ba3782SJens Axboe  */
56803ba3782SJens Axboe static int pin_sb_for_writeback(struct writeback_control *wbc,
5699ecc2738SJens Axboe 				struct inode *inode, struct super_block **psb)
5701da177e4SLinus Torvalds {
57103ba3782SJens Axboe 	struct super_block *sb = inode->i_sb;
57203ba3782SJens Axboe 
57303ba3782SJens Axboe 	/*
5749ecc2738SJens Axboe 	 * If this sb is already pinned, nothing more to do. If not and
5759ecc2738SJens Axboe 	 * *psb is non-NULL, unpin the old one first
5769ecc2738SJens Axboe 	 */
5779ecc2738SJens Axboe 	if (sb == *psb)
5789ecc2738SJens Axboe 		return 0;
5799ecc2738SJens Axboe 	else if (*psb)
5809ecc2738SJens Axboe 		unpin_sb_for_writeback(psb);
5819ecc2738SJens Axboe 
5829ecc2738SJens Axboe 	/*
58303ba3782SJens Axboe 	 * Caller must already hold the ref for this
58403ba3782SJens Axboe 	 */
58503ba3782SJens Axboe 	if (wbc->sync_mode == WB_SYNC_ALL) {
58603ba3782SJens Axboe 		WARN_ON(!rwsem_is_locked(&sb->s_umount));
58703ba3782SJens Axboe 		return 0;
58803ba3782SJens Axboe 	}
58903ba3782SJens Axboe 
59003ba3782SJens Axboe 	spin_lock(&sb_lock);
59103ba3782SJens Axboe 	sb->s_count++;
59203ba3782SJens Axboe 	if (down_read_trylock(&sb->s_umount)) {
59303ba3782SJens Axboe 		if (sb->s_root) {
59403ba3782SJens Axboe 			spin_unlock(&sb_lock);
5959ecc2738SJens Axboe 			goto pinned;
59603ba3782SJens Axboe 		}
59703ba3782SJens Axboe 		/*
59803ba3782SJens Axboe 		 * umounted, drop rwsem again and fall through to failure
59903ba3782SJens Axboe 		 */
60003ba3782SJens Axboe 		up_read(&sb->s_umount);
60103ba3782SJens Axboe 	}
60203ba3782SJens Axboe 
60303ba3782SJens Axboe 	sb->s_count--;
60403ba3782SJens Axboe 	spin_unlock(&sb_lock);
60503ba3782SJens Axboe 	return 1;
6069ecc2738SJens Axboe pinned:
6079ecc2738SJens Axboe 	*psb = sb;
6089ecc2738SJens Axboe 	return 0;
60903ba3782SJens Axboe }
61003ba3782SJens Axboe 
61103ba3782SJens Axboe static void writeback_inodes_wb(struct bdi_writeback *wb,
61203ba3782SJens Axboe 				struct writeback_control *wbc)
61303ba3782SJens Axboe {
6149ecc2738SJens Axboe 	struct super_block *sb = wbc->sb, *pin_sb = NULL;
61566f3b8e2SJens Axboe 	const int is_blkdev_sb = sb_is_blkdev_sb(sb);
6161da177e4SLinus Torvalds 	const unsigned long start = jiffies;	/* livelock avoidance */
6171da177e4SLinus Torvalds 
618ae8547b0SHans Reiser 	spin_lock(&inode_lock);
6191da177e4SLinus Torvalds 
62003ba3782SJens Axboe 	if (!wbc->for_kupdate || list_empty(&wb->b_io))
62103ba3782SJens Axboe 		queue_io(wb, wbc->older_than_this);
62266f3b8e2SJens Axboe 
62303ba3782SJens Axboe 	while (!list_empty(&wb->b_io)) {
62403ba3782SJens Axboe 		struct inode *inode = list_entry(wb->b_io.prev,
6251da177e4SLinus Torvalds 						struct inode, i_list);
6261da177e4SLinus Torvalds 		long pages_skipped;
6271da177e4SLinus Torvalds 
62866f3b8e2SJens Axboe 		/*
62966f3b8e2SJens Axboe 		 * super block given and doesn't match, skip this inode
63066f3b8e2SJens Axboe 		 */
63166f3b8e2SJens Axboe 		if (sb && sb != inode->i_sb) {
63266f3b8e2SJens Axboe 			redirty_tail(inode);
63366f3b8e2SJens Axboe 			continue;
63466f3b8e2SJens Axboe 		}
63566f3b8e2SJens Axboe 
63603ba3782SJens Axboe 		if (!bdi_cap_writeback_dirty(wb->bdi)) {
6379852a0e7SAndrew Morton 			redirty_tail(inode);
63866f3b8e2SJens Axboe 			if (is_blkdev_sb) {
6391da177e4SLinus Torvalds 				/*
6401da177e4SLinus Torvalds 				 * Dirty memory-backed blockdev: the ramdisk
6411da177e4SLinus Torvalds 				 * driver does this.  Skip just this inode
6421da177e4SLinus Torvalds 				 */
6431da177e4SLinus Torvalds 				continue;
6441da177e4SLinus Torvalds 			}
6451da177e4SLinus Torvalds 			/*
6461da177e4SLinus Torvalds 			 * Dirty memory-backed inode against a filesystem other
6471da177e4SLinus Torvalds 			 * than the kernel-internal bdev filesystem.  Skip the
6481da177e4SLinus Torvalds 			 * entire superblock.
6491da177e4SLinus Torvalds 			 */
6501da177e4SLinus Torvalds 			break;
6511da177e4SLinus Torvalds 		}
6521da177e4SLinus Torvalds 
65384a89245SWu Fengguang 		if (inode->i_state & (I_NEW | I_WILL_FREE)) {
6547ef0d737SNick Piggin 			requeue_io(inode);
6557ef0d737SNick Piggin 			continue;
6567ef0d737SNick Piggin 		}
6577ef0d737SNick Piggin 
65803ba3782SJens Axboe 		if (wbc->nonblocking && bdi_write_congested(wb->bdi)) {
6591da177e4SLinus Torvalds 			wbc->encountered_congestion = 1;
66066f3b8e2SJens Axboe 			if (!is_blkdev_sb)
6611da177e4SLinus Torvalds 				break;		/* Skip a congested fs */
6620e0f4fc2SKen Chen 			requeue_io(inode);
6631da177e4SLinus Torvalds 			continue;		/* Skip a congested blockdev */
6641da177e4SLinus Torvalds 		}
6651da177e4SLinus Torvalds 
666d2caa3c5SJeff Layton 		/*
667d2caa3c5SJeff Layton 		 * Was this inode dirtied after sync_sb_inodes was called?
668d2caa3c5SJeff Layton 		 * This keeps sync from extra jobs and livelock.
669d2caa3c5SJeff Layton 		 */
670d2caa3c5SJeff Layton 		if (inode_dirtied_after(inode, start))
6711da177e4SLinus Torvalds 			break;
6721da177e4SLinus Torvalds 
6739ecc2738SJens Axboe 		if (pin_sb_for_writeback(wbc, inode, &pin_sb)) {
67403ba3782SJens Axboe 			requeue_io(inode);
67503ba3782SJens Axboe 			continue;
67603ba3782SJens Axboe 		}
6771da177e4SLinus Torvalds 
67884a89245SWu Fengguang 		BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
6791da177e4SLinus Torvalds 		__iget(inode);
6801da177e4SLinus Torvalds 		pages_skipped = wbc->pages_skipped;
68101c03194SChristoph Hellwig 		writeback_single_inode(inode, wbc);
6821da177e4SLinus Torvalds 		if (wbc->pages_skipped != pages_skipped) {
6831da177e4SLinus Torvalds 			/*
6841da177e4SLinus Torvalds 			 * writeback is not making progress due to locked
6851da177e4SLinus Torvalds 			 * buffers.  Skip this inode for now.
6861da177e4SLinus Torvalds 			 */
687f57b9b7bSAndrew Morton 			redirty_tail(inode);
6881da177e4SLinus Torvalds 		}
6891da177e4SLinus Torvalds 		spin_unlock(&inode_lock);
6901da177e4SLinus Torvalds 		iput(inode);
6914ffc8444SOGAWA Hirofumi 		cond_resched();
6921da177e4SLinus Torvalds 		spin_lock(&inode_lock);
6938bc3be27SFengguang Wu 		if (wbc->nr_to_write <= 0) {
6948bc3be27SFengguang Wu 			wbc->more_io = 1;
6951da177e4SLinus Torvalds 			break;
6961da177e4SLinus Torvalds 		}
69703ba3782SJens Axboe 		if (!list_empty(&wb->b_more_io))
6988bc3be27SFengguang Wu 			wbc->more_io = 1;
6998bc3be27SFengguang Wu 	}
70038f21977SNick Piggin 
7019ecc2738SJens Axboe 	unpin_sb_for_writeback(&pin_sb);
7029ecc2738SJens Axboe 
70366f3b8e2SJens Axboe 	spin_unlock(&inode_lock);
70466f3b8e2SJens Axboe 	/* Leave any unwritten inodes on b_io */
70566f3b8e2SJens Axboe }
70666f3b8e2SJens Axboe 
70703ba3782SJens Axboe void writeback_inodes_wbc(struct writeback_control *wbc)
70803ba3782SJens Axboe {
70903ba3782SJens Axboe 	struct backing_dev_info *bdi = wbc->bdi;
71003ba3782SJens Axboe 
71103ba3782SJens Axboe 	writeback_inodes_wb(&bdi->wb, wbc);
71203ba3782SJens Axboe }
71303ba3782SJens Axboe 
71403ba3782SJens Axboe /*
71503ba3782SJens Axboe  * The maximum number of pages to writeout in a single bdi flush/kupdate
71603ba3782SJens Axboe  * operation.  We do this so we don't hold I_SYNC against an inode for
71703ba3782SJens Axboe  * enormous amounts of time, which would block a userspace task which has
71803ba3782SJens Axboe  * been forced to throttle against that inode.  Also, the code reevaluates
71903ba3782SJens Axboe  * the dirty each time it has written this many pages.
72003ba3782SJens Axboe  */
72103ba3782SJens Axboe #define MAX_WRITEBACK_PAGES     1024
72203ba3782SJens Axboe 
72303ba3782SJens Axboe static inline bool over_bground_thresh(void)
72403ba3782SJens Axboe {
72503ba3782SJens Axboe 	unsigned long background_thresh, dirty_thresh;
72603ba3782SJens Axboe 
72703ba3782SJens Axboe 	get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
72803ba3782SJens Axboe 
72903ba3782SJens Axboe 	return (global_page_state(NR_FILE_DIRTY) +
73003ba3782SJens Axboe 		global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
73103ba3782SJens Axboe }
73203ba3782SJens Axboe 
73303ba3782SJens Axboe /*
73403ba3782SJens Axboe  * Explicit flushing or periodic writeback of "old" data.
73503ba3782SJens Axboe  *
73603ba3782SJens Axboe  * Define "old": the first time one of an inode's pages is dirtied, we mark the
73703ba3782SJens Axboe  * dirtying-time in the inode's address_space.  So this periodic writeback code
73803ba3782SJens Axboe  * just walks the superblock inode list, writing back any inodes which are
73903ba3782SJens Axboe  * older than a specific point in time.
74003ba3782SJens Axboe  *
74103ba3782SJens Axboe  * Try to run once per dirty_writeback_interval.  But if a writeback event
74203ba3782SJens Axboe  * takes longer than a dirty_writeback_interval interval, then leave a
74303ba3782SJens Axboe  * one-second gap.
74403ba3782SJens Axboe  *
74503ba3782SJens Axboe  * older_than_this takes precedence over nr_to_write.  So we'll only write back
74603ba3782SJens Axboe  * all dirty pages if they are all attached to "old" mappings.
74703ba3782SJens Axboe  */
748c4a77a6cSJens Axboe static long wb_writeback(struct bdi_writeback *wb,
749c4a77a6cSJens Axboe 			 struct wb_writeback_args *args)
75003ba3782SJens Axboe {
75103ba3782SJens Axboe 	struct writeback_control wbc = {
75203ba3782SJens Axboe 		.bdi			= wb->bdi,
753c4a77a6cSJens Axboe 		.sb			= args->sb,
754c4a77a6cSJens Axboe 		.sync_mode		= args->sync_mode,
75503ba3782SJens Axboe 		.older_than_this	= NULL,
756c4a77a6cSJens Axboe 		.for_kupdate		= args->for_kupdate,
757c4a77a6cSJens Axboe 		.range_cyclic		= args->range_cyclic,
75803ba3782SJens Axboe 	};
75903ba3782SJens Axboe 	unsigned long oldest_jif;
76003ba3782SJens Axboe 	long wrote = 0;
761a5989bdcSJan Kara 	struct inode *inode;
76203ba3782SJens Axboe 
76303ba3782SJens Axboe 	if (wbc.for_kupdate) {
76403ba3782SJens Axboe 		wbc.older_than_this = &oldest_jif;
76503ba3782SJens Axboe 		oldest_jif = jiffies -
76603ba3782SJens Axboe 				msecs_to_jiffies(dirty_expire_interval * 10);
76703ba3782SJens Axboe 	}
768c4a77a6cSJens Axboe 	if (!wbc.range_cyclic) {
769c4a77a6cSJens Axboe 		wbc.range_start = 0;
770c4a77a6cSJens Axboe 		wbc.range_end = LLONG_MAX;
771c4a77a6cSJens Axboe 	}
77203ba3782SJens Axboe 
77303ba3782SJens Axboe 	for (;;) {
77403ba3782SJens Axboe 		/*
775d3ddec76SWu Fengguang 		 * Stop writeback when nr_pages has been consumed
77603ba3782SJens Axboe 		 */
777d3ddec76SWu Fengguang 		if (args->nr_pages <= 0)
77803ba3782SJens Axboe 			break;
77903ba3782SJens Axboe 
78003ba3782SJens Axboe 		/*
781d3ddec76SWu Fengguang 		 * For background writeout, stop when we are below the
782d3ddec76SWu Fengguang 		 * background dirty threshold
78303ba3782SJens Axboe 		 */
784d3ddec76SWu Fengguang 		if (args->for_background && !over_bground_thresh())
78503ba3782SJens Axboe 			break;
78603ba3782SJens Axboe 
78703ba3782SJens Axboe 		wbc.more_io = 0;
78803ba3782SJens Axboe 		wbc.encountered_congestion = 0;
78903ba3782SJens Axboe 		wbc.nr_to_write = MAX_WRITEBACK_PAGES;
79003ba3782SJens Axboe 		wbc.pages_skipped = 0;
79103ba3782SJens Axboe 		writeback_inodes_wb(wb, &wbc);
792c4a77a6cSJens Axboe 		args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
79303ba3782SJens Axboe 		wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
79403ba3782SJens Axboe 
79503ba3782SJens Axboe 		/*
79671fd05a8SJens Axboe 		 * If we consumed everything, see if we have more
79703ba3782SJens Axboe 		 */
79871fd05a8SJens Axboe 		if (wbc.nr_to_write <= 0)
79971fd05a8SJens Axboe 			continue;
80071fd05a8SJens Axboe 		/*
80171fd05a8SJens Axboe 		 * Didn't write everything and we don't have more IO, bail
80271fd05a8SJens Axboe 		 */
80371fd05a8SJens Axboe 		if (!wbc.more_io)
80471fd05a8SJens Axboe 			break;
80571fd05a8SJens Axboe 		/*
80671fd05a8SJens Axboe 		 * Did we write something? Try for more
80771fd05a8SJens Axboe 		 */
808a5989bdcSJan Kara 		if (wbc.nr_to_write < MAX_WRITEBACK_PAGES)
80903ba3782SJens Axboe 			continue;
810a5989bdcSJan Kara 		/*
811a5989bdcSJan Kara 		 * Nothing written. Wait for some inode to
812a5989bdcSJan Kara 		 * become available for writeback. Otherwise
813a5989bdcSJan Kara 		 * we'll just busyloop.
814a5989bdcSJan Kara 		 */
815a5989bdcSJan Kara 		spin_lock(&inode_lock);
816a5989bdcSJan Kara 		if (!list_empty(&wb->b_more_io))  {
81771fd05a8SJens Axboe 			inode = list_entry(wb->b_more_io.prev,
818a5989bdcSJan Kara 						struct inode, i_list);
819a5989bdcSJan Kara 			inode_wait_for_writeback(inode);
820a5989bdcSJan Kara 		}
821a5989bdcSJan Kara 		spin_unlock(&inode_lock);
82203ba3782SJens Axboe 	}
82303ba3782SJens Axboe 
82403ba3782SJens Axboe 	return wrote;
82503ba3782SJens Axboe }
82603ba3782SJens Axboe 
82703ba3782SJens Axboe /*
82803ba3782SJens Axboe  * Return the next bdi_work struct that hasn't been processed by this
8298010c3b6SJens Axboe  * wb thread yet. ->seen is initially set for each thread that exists
8308010c3b6SJens Axboe  * for this device, when a thread first notices a piece of work it
8318010c3b6SJens Axboe  * clears its bit. Depending on writeback type, the thread will notify
8328010c3b6SJens Axboe  * completion on either receiving the work (WB_SYNC_NONE) or after
8338010c3b6SJens Axboe  * it is done (WB_SYNC_ALL).
83403ba3782SJens Axboe  */
83503ba3782SJens Axboe static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
83603ba3782SJens Axboe 					   struct bdi_writeback *wb)
83703ba3782SJens Axboe {
83803ba3782SJens Axboe 	struct bdi_work *work, *ret = NULL;
83903ba3782SJens Axboe 
84003ba3782SJens Axboe 	rcu_read_lock();
84103ba3782SJens Axboe 
84203ba3782SJens Axboe 	list_for_each_entry_rcu(work, &bdi->work_list, list) {
84377fad5e6SNick Piggin 		if (!test_bit(wb->nr, &work->seen))
84403ba3782SJens Axboe 			continue;
84577fad5e6SNick Piggin 		clear_bit(wb->nr, &work->seen);
84603ba3782SJens Axboe 
84703ba3782SJens Axboe 		ret = work;
84803ba3782SJens Axboe 		break;
84903ba3782SJens Axboe 	}
85003ba3782SJens Axboe 
85103ba3782SJens Axboe 	rcu_read_unlock();
85203ba3782SJens Axboe 	return ret;
85303ba3782SJens Axboe }
85403ba3782SJens Axboe 
85503ba3782SJens Axboe static long wb_check_old_data_flush(struct bdi_writeback *wb)
85603ba3782SJens Axboe {
85703ba3782SJens Axboe 	unsigned long expired;
85803ba3782SJens Axboe 	long nr_pages;
85903ba3782SJens Axboe 
86003ba3782SJens Axboe 	expired = wb->last_old_flush +
86103ba3782SJens Axboe 			msecs_to_jiffies(dirty_writeback_interval * 10);
86203ba3782SJens Axboe 	if (time_before(jiffies, expired))
86303ba3782SJens Axboe 		return 0;
86403ba3782SJens Axboe 
86503ba3782SJens Axboe 	wb->last_old_flush = jiffies;
86603ba3782SJens Axboe 	nr_pages = global_page_state(NR_FILE_DIRTY) +
86703ba3782SJens Axboe 			global_page_state(NR_UNSTABLE_NFS) +
86803ba3782SJens Axboe 			(inodes_stat.nr_inodes - inodes_stat.nr_unused);
86903ba3782SJens Axboe 
870c4a77a6cSJens Axboe 	if (nr_pages) {
871c4a77a6cSJens Axboe 		struct wb_writeback_args args = {
872c4a77a6cSJens Axboe 			.nr_pages	= nr_pages,
873c4a77a6cSJens Axboe 			.sync_mode	= WB_SYNC_NONE,
874c4a77a6cSJens Axboe 			.for_kupdate	= 1,
875c4a77a6cSJens Axboe 			.range_cyclic	= 1,
876c4a77a6cSJens Axboe 		};
877c4a77a6cSJens Axboe 
878c4a77a6cSJens Axboe 		return wb_writeback(wb, &args);
879c4a77a6cSJens Axboe 	}
88003ba3782SJens Axboe 
88103ba3782SJens Axboe 	return 0;
88203ba3782SJens Axboe }
88303ba3782SJens Axboe 
88403ba3782SJens Axboe /*
88503ba3782SJens Axboe  * Retrieve work items and do the writeback they describe
88603ba3782SJens Axboe  */
88703ba3782SJens Axboe long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
88803ba3782SJens Axboe {
88903ba3782SJens Axboe 	struct backing_dev_info *bdi = wb->bdi;
89003ba3782SJens Axboe 	struct bdi_work *work;
891c4a77a6cSJens Axboe 	long wrote = 0;
89203ba3782SJens Axboe 
89303ba3782SJens Axboe 	while ((work = get_next_work_item(bdi, wb)) != NULL) {
894c4a77a6cSJens Axboe 		struct wb_writeback_args args = work->args;
89503ba3782SJens Axboe 
89603ba3782SJens Axboe 		/*
89703ba3782SJens Axboe 		 * Override sync mode, in case we must wait for completion
89803ba3782SJens Axboe 		 */
89903ba3782SJens Axboe 		if (force_wait)
900c4a77a6cSJens Axboe 			work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
90103ba3782SJens Axboe 
90203ba3782SJens Axboe 		/*
90303ba3782SJens Axboe 		 * If this isn't a data integrity operation, just notify
90403ba3782SJens Axboe 		 * that we have seen this work and we are now starting it.
90503ba3782SJens Axboe 		 */
906c4a77a6cSJens Axboe 		if (args.sync_mode == WB_SYNC_NONE)
90703ba3782SJens Axboe 			wb_clear_pending(wb, work);
90803ba3782SJens Axboe 
909c4a77a6cSJens Axboe 		wrote += wb_writeback(wb, &args);
91003ba3782SJens Axboe 
91103ba3782SJens Axboe 		/*
91203ba3782SJens Axboe 		 * This is a data integrity writeback, so only do the
91303ba3782SJens Axboe 		 * notification when we have completed the work.
91403ba3782SJens Axboe 		 */
915c4a77a6cSJens Axboe 		if (args.sync_mode == WB_SYNC_ALL)
91603ba3782SJens Axboe 			wb_clear_pending(wb, work);
91703ba3782SJens Axboe 	}
91803ba3782SJens Axboe 
91903ba3782SJens Axboe 	/*
92003ba3782SJens Axboe 	 * Check for periodic writeback, kupdated() style
92103ba3782SJens Axboe 	 */
92203ba3782SJens Axboe 	wrote += wb_check_old_data_flush(wb);
92303ba3782SJens Axboe 
92403ba3782SJens Axboe 	return wrote;
92503ba3782SJens Axboe }
92603ba3782SJens Axboe 
92703ba3782SJens Axboe /*
92803ba3782SJens Axboe  * Handle writeback of dirty data for the device backed by this bdi. Also
92903ba3782SJens Axboe  * wakes up periodically and does kupdated style flushing.
93003ba3782SJens Axboe  */
93103ba3782SJens Axboe int bdi_writeback_task(struct bdi_writeback *wb)
93203ba3782SJens Axboe {
93303ba3782SJens Axboe 	unsigned long last_active = jiffies;
93403ba3782SJens Axboe 	unsigned long wait_jiffies = -1UL;
93503ba3782SJens Axboe 	long pages_written;
93603ba3782SJens Axboe 
93703ba3782SJens Axboe 	while (!kthread_should_stop()) {
93803ba3782SJens Axboe 		pages_written = wb_do_writeback(wb, 0);
93903ba3782SJens Axboe 
94003ba3782SJens Axboe 		if (pages_written)
94103ba3782SJens Axboe 			last_active = jiffies;
94203ba3782SJens Axboe 		else if (wait_jiffies != -1UL) {
94303ba3782SJens Axboe 			unsigned long max_idle;
94403ba3782SJens Axboe 
94503ba3782SJens Axboe 			/*
94603ba3782SJens Axboe 			 * Longest period of inactivity that we tolerate. If we
94703ba3782SJens Axboe 			 * see dirty data again later, the task will get
94803ba3782SJens Axboe 			 * recreated automatically.
94903ba3782SJens Axboe 			 */
95003ba3782SJens Axboe 			max_idle = max(5UL * 60 * HZ, wait_jiffies);
95103ba3782SJens Axboe 			if (time_after(jiffies, max_idle + last_active))
95203ba3782SJens Axboe 				break;
95303ba3782SJens Axboe 		}
95403ba3782SJens Axboe 
95503ba3782SJens Axboe 		wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
95649db0414SJens Axboe 		schedule_timeout_interruptible(wait_jiffies);
95703ba3782SJens Axboe 		try_to_freeze();
95803ba3782SJens Axboe 	}
95903ba3782SJens Axboe 
96003ba3782SJens Axboe 	return 0;
96103ba3782SJens Axboe }
96203ba3782SJens Axboe 
96303ba3782SJens Axboe /*
964b6e51316SJens Axboe  * Schedule writeback for all backing devices. This does WB_SYNC_NONE
965b6e51316SJens Axboe  * writeback, for integrity writeback see bdi_sync_writeback().
96603ba3782SJens Axboe  */
967b6e51316SJens Axboe static void bdi_writeback_all(struct super_block *sb, long nr_pages)
96803ba3782SJens Axboe {
969b6e51316SJens Axboe 	struct wb_writeback_args args = {
970b6e51316SJens Axboe 		.sb		= sb,
971b6e51316SJens Axboe 		.nr_pages	= nr_pages,
972b6e51316SJens Axboe 		.sync_mode	= WB_SYNC_NONE,
973b6e51316SJens Axboe 	};
97403ba3782SJens Axboe 	struct backing_dev_info *bdi;
97503ba3782SJens Axboe 
976cfc4ba53SJens Axboe 	rcu_read_lock();
97703ba3782SJens Axboe 
978cfc4ba53SJens Axboe 	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
97903ba3782SJens Axboe 		if (!bdi_has_dirty_io(bdi))
98003ba3782SJens Axboe 			continue;
98103ba3782SJens Axboe 
982b6e51316SJens Axboe 		bdi_alloc_queue_work(bdi, &args);
98303ba3782SJens Axboe 	}
98403ba3782SJens Axboe 
985cfc4ba53SJens Axboe 	rcu_read_unlock();
98603ba3782SJens Axboe }
98703ba3782SJens Axboe 
98803ba3782SJens Axboe /*
98903ba3782SJens Axboe  * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
99003ba3782SJens Axboe  * the whole world.
99103ba3782SJens Axboe  */
99203ba3782SJens Axboe void wakeup_flusher_threads(long nr_pages)
99303ba3782SJens Axboe {
99403ba3782SJens Axboe 	if (nr_pages == 0)
99503ba3782SJens Axboe 		nr_pages = global_page_state(NR_FILE_DIRTY) +
99603ba3782SJens Axboe 				global_page_state(NR_UNSTABLE_NFS);
997b6e51316SJens Axboe 	bdi_writeback_all(NULL, nr_pages);
99803ba3782SJens Axboe }
99903ba3782SJens Axboe 
100003ba3782SJens Axboe static noinline void block_dump___mark_inode_dirty(struct inode *inode)
100103ba3782SJens Axboe {
100203ba3782SJens Axboe 	if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
100303ba3782SJens Axboe 		struct dentry *dentry;
100403ba3782SJens Axboe 		const char *name = "?";
100503ba3782SJens Axboe 
100603ba3782SJens Axboe 		dentry = d_find_alias(inode);
100703ba3782SJens Axboe 		if (dentry) {
100803ba3782SJens Axboe 			spin_lock(&dentry->d_lock);
100903ba3782SJens Axboe 			name = (const char *) dentry->d_name.name;
101003ba3782SJens Axboe 		}
101103ba3782SJens Axboe 		printk(KERN_DEBUG
101203ba3782SJens Axboe 		       "%s(%d): dirtied inode %lu (%s) on %s\n",
101303ba3782SJens Axboe 		       current->comm, task_pid_nr(current), inode->i_ino,
101403ba3782SJens Axboe 		       name, inode->i_sb->s_id);
101503ba3782SJens Axboe 		if (dentry) {
101603ba3782SJens Axboe 			spin_unlock(&dentry->d_lock);
101703ba3782SJens Axboe 			dput(dentry);
101803ba3782SJens Axboe 		}
101903ba3782SJens Axboe 	}
102003ba3782SJens Axboe }
102103ba3782SJens Axboe 
102203ba3782SJens Axboe /**
102303ba3782SJens Axboe  *	__mark_inode_dirty -	internal function
102403ba3782SJens Axboe  *	@inode: inode to mark
102503ba3782SJens Axboe  *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
102603ba3782SJens Axboe  *	Mark an inode as dirty. Callers should use mark_inode_dirty or
102703ba3782SJens Axboe  *  	mark_inode_dirty_sync.
102803ba3782SJens Axboe  *
102903ba3782SJens Axboe  * Put the inode on the super block's dirty list.
103003ba3782SJens Axboe  *
103103ba3782SJens Axboe  * CAREFUL! We mark it dirty unconditionally, but move it onto the
103203ba3782SJens Axboe  * dirty list only if it is hashed or if it refers to a blockdev.
103303ba3782SJens Axboe  * If it was not hashed, it will never be added to the dirty list
103403ba3782SJens Axboe  * even if it is later hashed, as it will have been marked dirty already.
103503ba3782SJens Axboe  *
103603ba3782SJens Axboe  * In short, make sure you hash any inodes _before_ you start marking
103703ba3782SJens Axboe  * them dirty.
103803ba3782SJens Axboe  *
103903ba3782SJens Axboe  * This function *must* be atomic for the I_DIRTY_PAGES case -
104003ba3782SJens Axboe  * set_page_dirty() is called under spinlock in several places.
104103ba3782SJens Axboe  *
104203ba3782SJens Axboe  * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
104303ba3782SJens Axboe  * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
104403ba3782SJens Axboe  * the kernel-internal blockdev inode represents the dirtying time of the
104503ba3782SJens Axboe  * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
104603ba3782SJens Axboe  * page->mapping->host, so the page-dirtying time is recorded in the internal
104703ba3782SJens Axboe  * blockdev inode.
104803ba3782SJens Axboe  */
104903ba3782SJens Axboe void __mark_inode_dirty(struct inode *inode, int flags)
105003ba3782SJens Axboe {
105103ba3782SJens Axboe 	struct super_block *sb = inode->i_sb;
105203ba3782SJens Axboe 
105303ba3782SJens Axboe 	/*
105403ba3782SJens Axboe 	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
105503ba3782SJens Axboe 	 * dirty the inode itself
105603ba3782SJens Axboe 	 */
105703ba3782SJens Axboe 	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
105803ba3782SJens Axboe 		if (sb->s_op->dirty_inode)
105903ba3782SJens Axboe 			sb->s_op->dirty_inode(inode);
106003ba3782SJens Axboe 	}
106103ba3782SJens Axboe 
106203ba3782SJens Axboe 	/*
106303ba3782SJens Axboe 	 * make sure that changes are seen by all cpus before we test i_state
106403ba3782SJens Axboe 	 * -- mikulas
106503ba3782SJens Axboe 	 */
106603ba3782SJens Axboe 	smp_mb();
106703ba3782SJens Axboe 
106803ba3782SJens Axboe 	/* avoid the locking if we can */
106903ba3782SJens Axboe 	if ((inode->i_state & flags) == flags)
107003ba3782SJens Axboe 		return;
107103ba3782SJens Axboe 
107203ba3782SJens Axboe 	if (unlikely(block_dump))
107303ba3782SJens Axboe 		block_dump___mark_inode_dirty(inode);
107403ba3782SJens Axboe 
107503ba3782SJens Axboe 	spin_lock(&inode_lock);
107603ba3782SJens Axboe 	if ((inode->i_state & flags) != flags) {
107703ba3782SJens Axboe 		const int was_dirty = inode->i_state & I_DIRTY;
107803ba3782SJens Axboe 
107903ba3782SJens Axboe 		inode->i_state |= flags;
108003ba3782SJens Axboe 
108103ba3782SJens Axboe 		/*
108203ba3782SJens Axboe 		 * If the inode is being synced, just update its dirty state.
108303ba3782SJens Axboe 		 * The unlocker will place the inode on the appropriate
108403ba3782SJens Axboe 		 * superblock list, based upon its state.
108503ba3782SJens Axboe 		 */
108603ba3782SJens Axboe 		if (inode->i_state & I_SYNC)
108703ba3782SJens Axboe 			goto out;
108803ba3782SJens Axboe 
108903ba3782SJens Axboe 		/*
109003ba3782SJens Axboe 		 * Only add valid (hashed) inodes to the superblock's
109103ba3782SJens Axboe 		 * dirty list.  Add blockdev inodes as well.
109203ba3782SJens Axboe 		 */
109303ba3782SJens Axboe 		if (!S_ISBLK(inode->i_mode)) {
109403ba3782SJens Axboe 			if (hlist_unhashed(&inode->i_hash))
109503ba3782SJens Axboe 				goto out;
109603ba3782SJens Axboe 		}
109703ba3782SJens Axboe 		if (inode->i_state & (I_FREEING|I_CLEAR))
109803ba3782SJens Axboe 			goto out;
109903ba3782SJens Axboe 
110003ba3782SJens Axboe 		/*
110103ba3782SJens Axboe 		 * If the inode was already on b_dirty/b_io/b_more_io, don't
110203ba3782SJens Axboe 		 * reposition it (that would break b_dirty time-ordering).
110303ba3782SJens Axboe 		 */
110403ba3782SJens Axboe 		if (!was_dirty) {
110503ba3782SJens Axboe 			struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1106500b067cSJens Axboe 			struct backing_dev_info *bdi = wb->bdi;
1107500b067cSJens Axboe 
1108500b067cSJens Axboe 			if (bdi_cap_writeback_dirty(bdi) &&
1109500b067cSJens Axboe 			    !test_bit(BDI_registered, &bdi->state)) {
1110500b067cSJens Axboe 				WARN_ON(1);
1111500b067cSJens Axboe 				printk(KERN_ERR "bdi-%s not registered\n",
1112500b067cSJens Axboe 								bdi->name);
1113500b067cSJens Axboe 			}
111403ba3782SJens Axboe 
111503ba3782SJens Axboe 			inode->dirtied_when = jiffies;
111603ba3782SJens Axboe 			list_move(&inode->i_list, &wb->b_dirty);
111703ba3782SJens Axboe 		}
111803ba3782SJens Axboe 	}
111903ba3782SJens Axboe out:
112003ba3782SJens Axboe 	spin_unlock(&inode_lock);
112103ba3782SJens Axboe }
112203ba3782SJens Axboe EXPORT_SYMBOL(__mark_inode_dirty);
112303ba3782SJens Axboe 
112466f3b8e2SJens Axboe /*
112566f3b8e2SJens Axboe  * Write out a superblock's list of dirty inodes.  A wait will be performed
112666f3b8e2SJens Axboe  * upon no inodes, all inodes or the final one, depending upon sync_mode.
112766f3b8e2SJens Axboe  *
112866f3b8e2SJens Axboe  * If older_than_this is non-NULL, then only write out inodes which
112966f3b8e2SJens Axboe  * had their first dirtying at a time earlier than *older_than_this.
113066f3b8e2SJens Axboe  *
113166f3b8e2SJens Axboe  * If `bdi' is non-zero then we're being asked to writeback a specific queue.
113266f3b8e2SJens Axboe  * This function assumes that the blockdev superblock's inodes are backed by
113366f3b8e2SJens Axboe  * a variety of queues, so all inodes are searched.  For other superblocks,
113466f3b8e2SJens Axboe  * assume that all inodes are backed by the same queue.
113566f3b8e2SJens Axboe  *
113666f3b8e2SJens Axboe  * The inodes to be written are parked on bdi->b_io.  They are moved back onto
113766f3b8e2SJens Axboe  * bdi->b_dirty as they are selected for writing.  This way, none can be missed
113866f3b8e2SJens Axboe  * on the writer throttling path, and we get decent balancing between many
113966f3b8e2SJens Axboe  * throttled threads: we don't want them all piling up on inode_sync_wait.
114066f3b8e2SJens Axboe  */
1141b6e51316SJens Axboe static void wait_sb_inodes(struct super_block *sb)
114266f3b8e2SJens Axboe {
114338f21977SNick Piggin 	struct inode *inode, *old_inode = NULL;
114438f21977SNick Piggin 
114503ba3782SJens Axboe 	/*
114603ba3782SJens Axboe 	 * We need to be protected against the filesystem going from
114703ba3782SJens Axboe 	 * r/o to r/w or vice versa.
114803ba3782SJens Axboe 	 */
1149b6e51316SJens Axboe 	WARN_ON(!rwsem_is_locked(&sb->s_umount));
115003ba3782SJens Axboe 
115166f3b8e2SJens Axboe 	spin_lock(&inode_lock);
115266f3b8e2SJens Axboe 
115338f21977SNick Piggin 	/*
115438f21977SNick Piggin 	 * Data integrity sync. Must wait for all pages under writeback,
115538f21977SNick Piggin 	 * because there may have been pages dirtied before our sync
115638f21977SNick Piggin 	 * call, but which had writeout started before we write it out.
115738f21977SNick Piggin 	 * In which case, the inode may not be on the dirty list, but
115838f21977SNick Piggin 	 * we still have to wait for that writeout.
115938f21977SNick Piggin 	 */
1160b6e51316SJens Axboe 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
116138f21977SNick Piggin 		struct address_space *mapping;
116238f21977SNick Piggin 
116303ba3782SJens Axboe 		if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
116438f21977SNick Piggin 			continue;
116538f21977SNick Piggin 		mapping = inode->i_mapping;
116638f21977SNick Piggin 		if (mapping->nrpages == 0)
116738f21977SNick Piggin 			continue;
116838f21977SNick Piggin 		__iget(inode);
1169ae8547b0SHans Reiser 		spin_unlock(&inode_lock);
117038f21977SNick Piggin 		/*
117138f21977SNick Piggin 		 * We hold a reference to 'inode' so it couldn't have
117238f21977SNick Piggin 		 * been removed from s_inodes list while we dropped the
117338f21977SNick Piggin 		 * inode_lock.  We cannot iput the inode now as we can
117438f21977SNick Piggin 		 * be holding the last reference and we cannot iput it
117538f21977SNick Piggin 		 * under inode_lock. So we keep the reference and iput
117638f21977SNick Piggin 		 * it later.
117738f21977SNick Piggin 		 */
117838f21977SNick Piggin 		iput(old_inode);
117938f21977SNick Piggin 		old_inode = inode;
118038f21977SNick Piggin 
118138f21977SNick Piggin 		filemap_fdatawait(mapping);
118238f21977SNick Piggin 
118338f21977SNick Piggin 		cond_resched();
118438f21977SNick Piggin 
118538f21977SNick Piggin 		spin_lock(&inode_lock);
118638f21977SNick Piggin 	}
118738f21977SNick Piggin 	spin_unlock(&inode_lock);
118838f21977SNick Piggin 	iput(old_inode);
118966f3b8e2SJens Axboe }
11901da177e4SLinus Torvalds 
1191d8a8559cSJens Axboe /**
1192d8a8559cSJens Axboe  * writeback_inodes_sb	-	writeback dirty inodes from given super_block
1193d8a8559cSJens Axboe  * @sb: the superblock
11941da177e4SLinus Torvalds  *
1195d8a8559cSJens Axboe  * Start writeback on some inodes on this super_block. No guarantees are made
1196d8a8559cSJens Axboe  * on how many (if any) will be written, and this function does not wait
1197d8a8559cSJens Axboe  * for IO completion of submitted IO. The number of pages submitted is
1198d8a8559cSJens Axboe  * returned.
11991da177e4SLinus Torvalds  */
1200b6e51316SJens Axboe void writeback_inodes_sb(struct super_block *sb)
12011da177e4SLinus Torvalds {
1202b1e7a8fdSChristoph Lameter 	unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
1203fd39fc85SChristoph Lameter 	unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
1204d8a8559cSJens Axboe 	long nr_to_write;
12051da177e4SLinus Torvalds 
1206d8a8559cSJens Axboe 	nr_to_write = nr_dirty + nr_unstable +
120738f21977SNick Piggin 			(inodes_stat.nr_inodes - inodes_stat.nr_unused);
120838f21977SNick Piggin 
1209b6e51316SJens Axboe 	bdi_writeback_all(sb, nr_to_write);
12101da177e4SLinus Torvalds }
1211d8a8559cSJens Axboe EXPORT_SYMBOL(writeback_inodes_sb);
1212d8a8559cSJens Axboe 
1213d8a8559cSJens Axboe /**
1214d8a8559cSJens Axboe  * sync_inodes_sb	-	sync sb inode pages
1215d8a8559cSJens Axboe  * @sb: the superblock
1216d8a8559cSJens Axboe  *
1217d8a8559cSJens Axboe  * This function writes and waits on any dirty inode belonging to this
1218d8a8559cSJens Axboe  * super_block. The number of pages synced is returned.
1219d8a8559cSJens Axboe  */
1220b6e51316SJens Axboe void sync_inodes_sb(struct super_block *sb)
1221d8a8559cSJens Axboe {
1222b6e51316SJens Axboe 	bdi_sync_writeback(sb->s_bdi, sb);
1223b6e51316SJens Axboe 	wait_sb_inodes(sb);
1224d8a8559cSJens Axboe }
1225d8a8559cSJens Axboe EXPORT_SYMBOL(sync_inodes_sb);
12261da177e4SLinus Torvalds 
12271da177e4SLinus Torvalds /**
12281da177e4SLinus Torvalds  * write_inode_now	-	write an inode to disk
12291da177e4SLinus Torvalds  * @inode: inode to write to disk
12301da177e4SLinus Torvalds  * @sync: whether the write should be synchronous or not
12311da177e4SLinus Torvalds  *
12327f04c26dSAndrea Arcangeli  * This function commits an inode to disk immediately if it is dirty. This is
12337f04c26dSAndrea Arcangeli  * primarily needed by knfsd.
12347f04c26dSAndrea Arcangeli  *
12357f04c26dSAndrea Arcangeli  * The caller must either have a ref on the inode or must have set I_WILL_FREE.
12361da177e4SLinus Torvalds  */
12371da177e4SLinus Torvalds int write_inode_now(struct inode *inode, int sync)
12381da177e4SLinus Torvalds {
12391da177e4SLinus Torvalds 	int ret;
12401da177e4SLinus Torvalds 	struct writeback_control wbc = {
12411da177e4SLinus Torvalds 		.nr_to_write = LONG_MAX,
124218914b18SMike Galbraith 		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1243111ebb6eSOGAWA Hirofumi 		.range_start = 0,
1244111ebb6eSOGAWA Hirofumi 		.range_end = LLONG_MAX,
12451da177e4SLinus Torvalds 	};
12461da177e4SLinus Torvalds 
12471da177e4SLinus Torvalds 	if (!mapping_cap_writeback_dirty(inode->i_mapping))
124849364ce2SAndrew Morton 		wbc.nr_to_write = 0;
12491da177e4SLinus Torvalds 
12501da177e4SLinus Torvalds 	might_sleep();
12511da177e4SLinus Torvalds 	spin_lock(&inode_lock);
125201c03194SChristoph Hellwig 	ret = writeback_single_inode(inode, &wbc);
12531da177e4SLinus Torvalds 	spin_unlock(&inode_lock);
12541da177e4SLinus Torvalds 	if (sync)
12551c0eeaf5SJoern Engel 		inode_sync_wait(inode);
12561da177e4SLinus Torvalds 	return ret;
12571da177e4SLinus Torvalds }
12581da177e4SLinus Torvalds EXPORT_SYMBOL(write_inode_now);
12591da177e4SLinus Torvalds 
12601da177e4SLinus Torvalds /**
12611da177e4SLinus Torvalds  * sync_inode - write an inode and its pages to disk.
12621da177e4SLinus Torvalds  * @inode: the inode to sync
12631da177e4SLinus Torvalds  * @wbc: controls the writeback mode
12641da177e4SLinus Torvalds  *
12651da177e4SLinus Torvalds  * sync_inode() will write an inode and its pages to disk.  It will also
12661da177e4SLinus Torvalds  * correctly update the inode on its superblock's dirty inode lists and will
12671da177e4SLinus Torvalds  * update inode->i_state.
12681da177e4SLinus Torvalds  *
12691da177e4SLinus Torvalds  * The caller must have a ref on the inode.
12701da177e4SLinus Torvalds  */
12711da177e4SLinus Torvalds int sync_inode(struct inode *inode, struct writeback_control *wbc)
12721da177e4SLinus Torvalds {
12731da177e4SLinus Torvalds 	int ret;
12741da177e4SLinus Torvalds 
12751da177e4SLinus Torvalds 	spin_lock(&inode_lock);
127601c03194SChristoph Hellwig 	ret = writeback_single_inode(inode, wbc);
12771da177e4SLinus Torvalds 	spin_unlock(&inode_lock);
12781da177e4SLinus Torvalds 	return ret;
12791da177e4SLinus Torvalds }
12801da177e4SLinus Torvalds EXPORT_SYMBOL(sync_inode);
1281