xref: /openbmc/linux/fs/fs-writeback.c (revision 58a9d3d8)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * fs/fs-writeback.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 2002, Linus Torvalds.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  * Contains all the functions related to writing back and waiting
71da177e4SLinus Torvalds  * upon dirty inodes against superblocks, and writing back dirty
81da177e4SLinus Torvalds  * pages against inodes.  ie: data writeback.  Writeout of the
91da177e4SLinus Torvalds  * inode itself is not handled here.
101da177e4SLinus Torvalds  *
11e1f8e874SFrancois Cami  * 10Apr2002	Andrew Morton
121da177e4SLinus Torvalds  *		Split out of fs/inode.c
131da177e4SLinus Torvalds  *		Additions for address_space-based writeback
141da177e4SLinus Torvalds  */
151da177e4SLinus Torvalds 
161da177e4SLinus Torvalds #include <linux/kernel.h>
17f5ff8422SJens Axboe #include <linux/module.h>
181da177e4SLinus Torvalds #include <linux/spinlock.h>
195a0e3ad6STejun Heo #include <linux/slab.h>
201da177e4SLinus Torvalds #include <linux/sched.h>
211da177e4SLinus Torvalds #include <linux/fs.h>
221da177e4SLinus Torvalds #include <linux/mm.h>
2303ba3782SJens Axboe #include <linux/kthread.h>
2403ba3782SJens Axboe #include <linux/freezer.h>
251da177e4SLinus Torvalds #include <linux/writeback.h>
261da177e4SLinus Torvalds #include <linux/blkdev.h>
271da177e4SLinus Torvalds #include <linux/backing-dev.h>
281da177e4SLinus Torvalds #include <linux/buffer_head.h>
2907f3f05cSDavid Howells #include "internal.h"
301da177e4SLinus Torvalds 
3166f3b8e2SJens Axboe #define inode_to_bdi(inode)	((inode)->i_mapping->backing_dev_info)
32f11b00f3SAdrian Bunk 
3303ba3782SJens Axboe /*
34d0bceac7SJens Axboe  * We don't actually have pdflush, but this one is exported though /proc...
35d0bceac7SJens Axboe  */
36d0bceac7SJens Axboe int nr_pdflush_threads;
37d0bceac7SJens Axboe 
38d0bceac7SJens Axboe /*
39c4a77a6cSJens Axboe  * Passed into wb_writeback(), essentially a subset of writeback_control
40c4a77a6cSJens Axboe  */
41c4a77a6cSJens Axboe struct wb_writeback_args {
42c4a77a6cSJens Axboe 	long nr_pages;
43c4a77a6cSJens Axboe 	struct super_block *sb;
44c4a77a6cSJens Axboe 	enum writeback_sync_modes sync_mode;
4552957fe1SH Hartley Sweeten 	unsigned int for_kupdate:1;
4652957fe1SH Hartley Sweeten 	unsigned int range_cyclic:1;
4752957fe1SH Hartley Sweeten 	unsigned int for_background:1;
48e8bebe2fSLinus Torvalds 	unsigned int sb_pinned:1;
49c4a77a6cSJens Axboe };
50c4a77a6cSJens Axboe 
51c4a77a6cSJens Axboe /*
5203ba3782SJens Axboe  * Work items for the bdi_writeback threads
53f11b00f3SAdrian Bunk  */
5403ba3782SJens Axboe struct bdi_work {
558010c3b6SJens Axboe 	struct list_head list;		/* pending work list */
568010c3b6SJens Axboe 	struct rcu_head rcu_head;	/* for RCU free/clear of work */
5703ba3782SJens Axboe 
588010c3b6SJens Axboe 	unsigned long seen;		/* threads that have seen this work */
598010c3b6SJens Axboe 	atomic_t pending;		/* number of threads still to do work */
6003ba3782SJens Axboe 
618010c3b6SJens Axboe 	struct wb_writeback_args args;	/* writeback arguments */
6203ba3782SJens Axboe 
638010c3b6SJens Axboe 	unsigned long state;		/* flag bits, see WS_* */
6403ba3782SJens Axboe };
6503ba3782SJens Axboe 
6603ba3782SJens Axboe enum {
6703ba3782SJens Axboe 	WS_USED_B = 0,
6803ba3782SJens Axboe 	WS_ONSTACK_B,
6903ba3782SJens Axboe };
7003ba3782SJens Axboe 
7103ba3782SJens Axboe #define WS_USED (1 << WS_USED_B)
7203ba3782SJens Axboe #define WS_ONSTACK (1 << WS_ONSTACK_B)
7303ba3782SJens Axboe 
7403ba3782SJens Axboe static inline bool bdi_work_on_stack(struct bdi_work *work)
75f11b00f3SAdrian Bunk {
7603ba3782SJens Axboe 	return test_bit(WS_ONSTACK_B, &work->state);
7703ba3782SJens Axboe }
7803ba3782SJens Axboe 
7903ba3782SJens Axboe static inline void bdi_work_init(struct bdi_work *work,
80b6e51316SJens Axboe 				 struct wb_writeback_args *args)
8103ba3782SJens Axboe {
8203ba3782SJens Axboe 	INIT_RCU_HEAD(&work->rcu_head);
83b6e51316SJens Axboe 	work->args = *args;
8403ba3782SJens Axboe 	work->state = WS_USED;
8503ba3782SJens Axboe }
8603ba3782SJens Axboe 
87f11b00f3SAdrian Bunk /**
88f11b00f3SAdrian Bunk  * writeback_in_progress - determine whether there is writeback in progress
89f11b00f3SAdrian Bunk  * @bdi: the device's backing_dev_info structure.
90f11b00f3SAdrian Bunk  *
9103ba3782SJens Axboe  * Determine whether there is writeback waiting to be handled against a
9203ba3782SJens Axboe  * backing device.
93f11b00f3SAdrian Bunk  */
94f11b00f3SAdrian Bunk int writeback_in_progress(struct backing_dev_info *bdi)
95f11b00f3SAdrian Bunk {
9603ba3782SJens Axboe 	return !list_empty(&bdi->work_list);
97f11b00f3SAdrian Bunk }
98f11b00f3SAdrian Bunk 
9903ba3782SJens Axboe static void bdi_work_clear(struct bdi_work *work)
100f11b00f3SAdrian Bunk {
10103ba3782SJens Axboe 	clear_bit(WS_USED_B, &work->state);
10203ba3782SJens Axboe 	smp_mb__after_clear_bit();
1031ef7d9aaSNick Piggin 	/*
1041ef7d9aaSNick Piggin 	 * work can have disappeared at this point. bit waitq functions
1051ef7d9aaSNick Piggin 	 * should be able to tolerate this, provided bdi_sched_wait does
1061ef7d9aaSNick Piggin 	 * not dereference it's pointer argument.
1071ef7d9aaSNick Piggin 	*/
10803ba3782SJens Axboe 	wake_up_bit(&work->state, WS_USED_B);
109f11b00f3SAdrian Bunk }
110f11b00f3SAdrian Bunk 
11103ba3782SJens Axboe static void bdi_work_free(struct rcu_head *head)
1124195f73dSNick Piggin {
11303ba3782SJens Axboe 	struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
1144195f73dSNick Piggin 
11503ba3782SJens Axboe 	if (!bdi_work_on_stack(work))
11603ba3782SJens Axboe 		kfree(work);
11703ba3782SJens Axboe 	else
11803ba3782SJens Axboe 		bdi_work_clear(work);
1194195f73dSNick Piggin }
1204195f73dSNick Piggin 
12103ba3782SJens Axboe static void wb_work_complete(struct bdi_work *work)
1221da177e4SLinus Torvalds {
123c4a77a6cSJens Axboe 	const enum writeback_sync_modes sync_mode = work->args.sync_mode;
12477b9d059SNick Piggin 	int onstack = bdi_work_on_stack(work);
1251da177e4SLinus Torvalds 
1261da177e4SLinus Torvalds 	/*
12703ba3782SJens Axboe 	 * For allocated work, we can clear the done/seen bit right here.
12803ba3782SJens Axboe 	 * For on-stack work, we need to postpone both the clear and free
12903ba3782SJens Axboe 	 * to after the RCU grace period, since the stack could be invalidated
13003ba3782SJens Axboe 	 * as soon as bdi_work_clear() has done the wakeup.
1311da177e4SLinus Torvalds 	 */
13277b9d059SNick Piggin 	if (!onstack)
13303ba3782SJens Axboe 		bdi_work_clear(work);
13477b9d059SNick Piggin 	if (sync_mode == WB_SYNC_NONE || onstack)
13503ba3782SJens Axboe 		call_rcu(&work->rcu_head, bdi_work_free);
1361da177e4SLinus Torvalds }
1371da177e4SLinus Torvalds 
13803ba3782SJens Axboe static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
13903ba3782SJens Axboe {
1401da177e4SLinus Torvalds 	/*
14103ba3782SJens Axboe 	 * The caller has retrieved the work arguments from this work,
14203ba3782SJens Axboe 	 * drop our reference. If this is the last ref, delete and free it
14303ba3782SJens Axboe 	 */
14403ba3782SJens Axboe 	if (atomic_dec_and_test(&work->pending)) {
14503ba3782SJens Axboe 		struct backing_dev_info *bdi = wb->bdi;
14603ba3782SJens Axboe 
14703ba3782SJens Axboe 		spin_lock(&bdi->wb_lock);
14803ba3782SJens Axboe 		list_del_rcu(&work->list);
14903ba3782SJens Axboe 		spin_unlock(&bdi->wb_lock);
15003ba3782SJens Axboe 
15103ba3782SJens Axboe 		wb_work_complete(work);
15203ba3782SJens Axboe 	}
15303ba3782SJens Axboe }
15403ba3782SJens Axboe 
15503ba3782SJens Axboe static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
15603ba3782SJens Axboe {
15703ba3782SJens Axboe 	work->seen = bdi->wb_mask;
15803ba3782SJens Axboe 	BUG_ON(!work->seen);
15903ba3782SJens Axboe 	atomic_set(&work->pending, bdi->wb_cnt);
16003ba3782SJens Axboe 	BUG_ON(!bdi->wb_cnt);
16103ba3782SJens Axboe 
16203ba3782SJens Axboe 	/*
163deed62edSNick Piggin 	 * list_add_tail_rcu() contains the necessary barriers to
164deed62edSNick Piggin 	 * make sure the above stores are seen before the item is
165deed62edSNick Piggin 	 * noticed on the list
1661da177e4SLinus Torvalds 	 */
16703ba3782SJens Axboe 	spin_lock(&bdi->wb_lock);
16803ba3782SJens Axboe 	list_add_tail_rcu(&work->list, &bdi->work_list);
16903ba3782SJens Axboe 	spin_unlock(&bdi->wb_lock);
1701da177e4SLinus Torvalds 
1711da177e4SLinus Torvalds 	/*
17203ba3782SJens Axboe 	 * If the default thread isn't there, make sure we add it. When
17303ba3782SJens Axboe 	 * it gets created and wakes up, we'll run this work.
1741da177e4SLinus Torvalds 	 */
17503ba3782SJens Axboe 	if (unlikely(list_empty_careful(&bdi->wb_list)))
17603ba3782SJens Axboe 		wake_up_process(default_backing_dev_info.wb.task);
17703ba3782SJens Axboe 	else {
17803ba3782SJens Axboe 		struct bdi_writeback *wb = &bdi->wb;
1791da177e4SLinus Torvalds 
1801ef7d9aaSNick Piggin 		if (wb->task)
18103ba3782SJens Axboe 			wake_up_process(wb->task);
1821da177e4SLinus Torvalds 	}
18303ba3782SJens Axboe }
1841da177e4SLinus Torvalds 
1851da177e4SLinus Torvalds /*
18603ba3782SJens Axboe  * Used for on-stack allocated work items. The caller needs to wait until
18703ba3782SJens Axboe  * the wb threads have acked the work before it's safe to continue.
1881da177e4SLinus Torvalds  */
18903ba3782SJens Axboe static void bdi_wait_on_work_clear(struct bdi_work *work)
1901da177e4SLinus Torvalds {
19103ba3782SJens Axboe 	wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait,
19203ba3782SJens Axboe 		    TASK_UNINTERRUPTIBLE);
19303ba3782SJens Axboe }
19403ba3782SJens Axboe 
195f11fcae8SJens Axboe static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
1967c8a3554SJens Axboe 				 struct wb_writeback_args *args,
1977c8a3554SJens Axboe 				 int wait)
19803ba3782SJens Axboe {
19903ba3782SJens Axboe 	struct bdi_work *work;
20003ba3782SJens Axboe 
201bcddc3f0SJens Axboe 	/*
202bcddc3f0SJens Axboe 	 * This is WB_SYNC_NONE writeback, so if allocation fails just
203bcddc3f0SJens Axboe 	 * wakeup the thread for old dirty data writeback
204bcddc3f0SJens Axboe 	 */
20503ba3782SJens Axboe 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
206bcddc3f0SJens Axboe 	if (work) {
207b6e51316SJens Axboe 		bdi_work_init(work, args);
208f11fcae8SJens Axboe 		bdi_queue_work(bdi, work);
2097c8a3554SJens Axboe 		if (wait)
2107c8a3554SJens Axboe 			bdi_wait_on_work_clear(work);
211bcddc3f0SJens Axboe 	} else {
212bcddc3f0SJens Axboe 		struct bdi_writeback *wb = &bdi->wb;
213bcddc3f0SJens Axboe 
214bcddc3f0SJens Axboe 		if (wb->task)
215bcddc3f0SJens Axboe 			wake_up_process(wb->task);
216bcddc3f0SJens Axboe 	}
21703ba3782SJens Axboe }
21803ba3782SJens Axboe 
219b6e51316SJens Axboe /**
220b6e51316SJens Axboe  * bdi_sync_writeback - start and wait for writeback
221b6e51316SJens Axboe  * @bdi: the backing device to write from
222b6e51316SJens Axboe  * @sb: write inodes from this super_block
223b6e51316SJens Axboe  *
224b6e51316SJens Axboe  * Description:
225b6e51316SJens Axboe  *   This does WB_SYNC_ALL data integrity writeback and waits for the
226b6e51316SJens Axboe  *   IO to complete. Callers must hold the sb s_umount semaphore for
227b6e51316SJens Axboe  *   reading, to avoid having the super disappear before we are done.
22803ba3782SJens Axboe  */
229b6e51316SJens Axboe static void bdi_sync_writeback(struct backing_dev_info *bdi,
230b6e51316SJens Axboe 			       struct super_block *sb)
231b6e51316SJens Axboe {
232b6e51316SJens Axboe 	struct wb_writeback_args args = {
233b6e51316SJens Axboe 		.sb		= sb,
234b6e51316SJens Axboe 		.sync_mode	= WB_SYNC_ALL,
235b6e51316SJens Axboe 		.nr_pages	= LONG_MAX,
236b6e51316SJens Axboe 		.range_cyclic	= 0,
237e913fc82SJens Axboe 		/*
238e913fc82SJens Axboe 		 * Setting sb_pinned is not necessary for WB_SYNC_ALL, but
239e913fc82SJens Axboe 		 * lets make it explicitly clear.
240e913fc82SJens Axboe 		 */
241e913fc82SJens Axboe 		.sb_pinned	= 1,
242b6e51316SJens Axboe 	};
243f0fad8a5SChristoph Hellwig 	struct bdi_work work;
244f0fad8a5SChristoph Hellwig 
245b6e51316SJens Axboe 	bdi_work_init(&work, &args);
246f0fad8a5SChristoph Hellwig 	work.state |= WS_ONSTACK;
247f0fad8a5SChristoph Hellwig 
248b6e51316SJens Axboe 	bdi_queue_work(bdi, &work);
249f0fad8a5SChristoph Hellwig 	bdi_wait_on_work_clear(&work);
25003ba3782SJens Axboe }
251b6e51316SJens Axboe 
252b6e51316SJens Axboe /**
253b6e51316SJens Axboe  * bdi_start_writeback - start writeback
254b6e51316SJens Axboe  * @bdi: the backing device to write from
2554b6764faSJaswinder Singh Rajput  * @sb: write inodes from this super_block
256b6e51316SJens Axboe  * @nr_pages: the number of pages to write
257e913fc82SJens Axboe  * @sb_locked: caller already holds sb umount sem.
258b6e51316SJens Axboe  *
259b6e51316SJens Axboe  * Description:
260b6e51316SJens Axboe  *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
261b6e51316SJens Axboe  *   started when this function returns, we make no guarentees on
262e913fc82SJens Axboe  *   completion. Caller specifies whether sb umount sem is held already or not.
263b6e51316SJens Axboe  *
264b6e51316SJens Axboe  */
265a72bfd4dSJens Axboe void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
266e913fc82SJens Axboe 			 long nr_pages, int sb_locked)
267b6e51316SJens Axboe {
268b6e51316SJens Axboe 	struct wb_writeback_args args = {
269a72bfd4dSJens Axboe 		.sb		= sb,
270b6e51316SJens Axboe 		.sync_mode	= WB_SYNC_NONE,
271b6e51316SJens Axboe 		.nr_pages	= nr_pages,
272b6e51316SJens Axboe 		.range_cyclic	= 1,
273e913fc82SJens Axboe 		.sb_pinned	= sb_locked,
274b6e51316SJens Axboe 	};
275b6e51316SJens Axboe 
276d3ddec76SWu Fengguang 	/*
277d3ddec76SWu Fengguang 	 * We treat @nr_pages=0 as the special case to do background writeback,
278d3ddec76SWu Fengguang 	 * ie. to sync pages until the background dirty threshold is reached.
279d3ddec76SWu Fengguang 	 */
280d3ddec76SWu Fengguang 	if (!nr_pages) {
281d3ddec76SWu Fengguang 		args.nr_pages = LONG_MAX;
282d3ddec76SWu Fengguang 		args.for_background = 1;
283d3ddec76SWu Fengguang 	}
284d3ddec76SWu Fengguang 
2857c8a3554SJens Axboe 	bdi_alloc_queue_work(bdi, &args, sb_locked);
2861da177e4SLinus Torvalds }
2871da177e4SLinus Torvalds 
2881da177e4SLinus Torvalds /*
2896610a0bcSAndrew Morton  * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
2906610a0bcSAndrew Morton  * furthest end of its superblock's dirty-inode list.
2916610a0bcSAndrew Morton  *
2926610a0bcSAndrew Morton  * Before stamping the inode's ->dirtied_when, we check to see whether it is
29366f3b8e2SJens Axboe  * already the most-recently-dirtied inode on the b_dirty list.  If that is
2946610a0bcSAndrew Morton  * the case then the inode must have been redirtied while it was being written
2956610a0bcSAndrew Morton  * out and we don't reset its dirtied_when.
2966610a0bcSAndrew Morton  */
2976610a0bcSAndrew Morton static void redirty_tail(struct inode *inode)
2986610a0bcSAndrew Morton {
29903ba3782SJens Axboe 	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
3006610a0bcSAndrew Morton 
30103ba3782SJens Axboe 	if (!list_empty(&wb->b_dirty)) {
30266f3b8e2SJens Axboe 		struct inode *tail;
3036610a0bcSAndrew Morton 
30403ba3782SJens Axboe 		tail = list_entry(wb->b_dirty.next, struct inode, i_list);
30566f3b8e2SJens Axboe 		if (time_before(inode->dirtied_when, tail->dirtied_when))
3066610a0bcSAndrew Morton 			inode->dirtied_when = jiffies;
3076610a0bcSAndrew Morton 	}
30803ba3782SJens Axboe 	list_move(&inode->i_list, &wb->b_dirty);
3096610a0bcSAndrew Morton }
3106610a0bcSAndrew Morton 
3116610a0bcSAndrew Morton /*
31266f3b8e2SJens Axboe  * requeue inode for re-scanning after bdi->b_io list is exhausted.
313c986d1e2SAndrew Morton  */
3140e0f4fc2SKen Chen static void requeue_io(struct inode *inode)
315c986d1e2SAndrew Morton {
31603ba3782SJens Axboe 	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
31703ba3782SJens Axboe 
31803ba3782SJens Axboe 	list_move(&inode->i_list, &wb->b_more_io);
319c986d1e2SAndrew Morton }
320c986d1e2SAndrew Morton 
3211c0eeaf5SJoern Engel static void inode_sync_complete(struct inode *inode)
3221c0eeaf5SJoern Engel {
3231c0eeaf5SJoern Engel 	/*
3241c0eeaf5SJoern Engel 	 * Prevent speculative execution through spin_unlock(&inode_lock);
3251c0eeaf5SJoern Engel 	 */
3261c0eeaf5SJoern Engel 	smp_mb();
3271c0eeaf5SJoern Engel 	wake_up_bit(&inode->i_state, __I_SYNC);
3281c0eeaf5SJoern Engel }
3291c0eeaf5SJoern Engel 
330d2caa3c5SJeff Layton static bool inode_dirtied_after(struct inode *inode, unsigned long t)
331d2caa3c5SJeff Layton {
332d2caa3c5SJeff Layton 	bool ret = time_after(inode->dirtied_when, t);
333d2caa3c5SJeff Layton #ifndef CONFIG_64BIT
334d2caa3c5SJeff Layton 	/*
335d2caa3c5SJeff Layton 	 * For inodes being constantly redirtied, dirtied_when can get stuck.
336d2caa3c5SJeff Layton 	 * It _appears_ to be in the future, but is actually in distant past.
337d2caa3c5SJeff Layton 	 * This test is necessary to prevent such wrapped-around relative times
3385b0830cbSJens Axboe 	 * from permanently stopping the whole bdi writeback.
339d2caa3c5SJeff Layton 	 */
340d2caa3c5SJeff Layton 	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
341d2caa3c5SJeff Layton #endif
342d2caa3c5SJeff Layton 	return ret;
343d2caa3c5SJeff Layton }
344d2caa3c5SJeff Layton 
345c986d1e2SAndrew Morton /*
3462c136579SFengguang Wu  * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
3472c136579SFengguang Wu  */
3482c136579SFengguang Wu static void move_expired_inodes(struct list_head *delaying_queue,
3492c136579SFengguang Wu 			       struct list_head *dispatch_queue,
3502c136579SFengguang Wu 				unsigned long *older_than_this)
3512c136579SFengguang Wu {
3525c03449dSShaohua Li 	LIST_HEAD(tmp);
3535c03449dSShaohua Li 	struct list_head *pos, *node;
354cf137307SJens Axboe 	struct super_block *sb = NULL;
3555c03449dSShaohua Li 	struct inode *inode;
356cf137307SJens Axboe 	int do_sb_sort = 0;
3575c03449dSShaohua Li 
3582c136579SFengguang Wu 	while (!list_empty(delaying_queue)) {
3595c03449dSShaohua Li 		inode = list_entry(delaying_queue->prev, struct inode, i_list);
3602c136579SFengguang Wu 		if (older_than_this &&
361d2caa3c5SJeff Layton 		    inode_dirtied_after(inode, *older_than_this))
3622c136579SFengguang Wu 			break;
363cf137307SJens Axboe 		if (sb && sb != inode->i_sb)
364cf137307SJens Axboe 			do_sb_sort = 1;
365cf137307SJens Axboe 		sb = inode->i_sb;
3665c03449dSShaohua Li 		list_move(&inode->i_list, &tmp);
3675c03449dSShaohua Li 	}
3685c03449dSShaohua Li 
369cf137307SJens Axboe 	/* just one sb in list, splice to dispatch_queue and we're done */
370cf137307SJens Axboe 	if (!do_sb_sort) {
371cf137307SJens Axboe 		list_splice(&tmp, dispatch_queue);
372cf137307SJens Axboe 		return;
373cf137307SJens Axboe 	}
374cf137307SJens Axboe 
3755c03449dSShaohua Li 	/* Move inodes from one superblock together */
3765c03449dSShaohua Li 	while (!list_empty(&tmp)) {
3775c03449dSShaohua Li 		inode = list_entry(tmp.prev, struct inode, i_list);
3785c03449dSShaohua Li 		sb = inode->i_sb;
3795c03449dSShaohua Li 		list_for_each_prev_safe(pos, node, &tmp) {
3805c03449dSShaohua Li 			inode = list_entry(pos, struct inode, i_list);
3815c03449dSShaohua Li 			if (inode->i_sb == sb)
3822c136579SFengguang Wu 				list_move(&inode->i_list, dispatch_queue);
3832c136579SFengguang Wu 		}
3842c136579SFengguang Wu 	}
3855c03449dSShaohua Li }
3862c136579SFengguang Wu 
3872c136579SFengguang Wu /*
3882c136579SFengguang Wu  * Queue all expired dirty inodes for io, eldest first.
3892c136579SFengguang Wu  */
39003ba3782SJens Axboe static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
3912c136579SFengguang Wu {
39203ba3782SJens Axboe 	list_splice_init(&wb->b_more_io, wb->b_io.prev);
39303ba3782SJens Axboe 	move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
39466f3b8e2SJens Axboe }
39566f3b8e2SJens Axboe 
396a9185b41SChristoph Hellwig static int write_inode(struct inode *inode, struct writeback_control *wbc)
39766f3b8e2SJens Axboe {
39803ba3782SJens Axboe 	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
399a9185b41SChristoph Hellwig 		return inode->i_sb->s_op->write_inode(inode, wbc);
40003ba3782SJens Axboe 	return 0;
40166f3b8e2SJens Axboe }
40208d8e974SFengguang Wu 
4032c136579SFengguang Wu /*
40401c03194SChristoph Hellwig  * Wait for writeback on an inode to complete.
40501c03194SChristoph Hellwig  */
40601c03194SChristoph Hellwig static void inode_wait_for_writeback(struct inode *inode)
40701c03194SChristoph Hellwig {
40801c03194SChristoph Hellwig 	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
40901c03194SChristoph Hellwig 	wait_queue_head_t *wqh;
41001c03194SChristoph Hellwig 
41101c03194SChristoph Hellwig 	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
41258a9d3d8SRichard Kennedy 	 while (inode->i_state & I_SYNC) {
41301c03194SChristoph Hellwig 		spin_unlock(&inode_lock);
41401c03194SChristoph Hellwig 		__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
41501c03194SChristoph Hellwig 		spin_lock(&inode_lock);
41658a9d3d8SRichard Kennedy 	}
41701c03194SChristoph Hellwig }
41801c03194SChristoph Hellwig 
41901c03194SChristoph Hellwig /*
42001c03194SChristoph Hellwig  * Write out an inode's dirty pages.  Called under inode_lock.  Either the
42101c03194SChristoph Hellwig  * caller has ref on the inode (either via __iget or via syscall against an fd)
42201c03194SChristoph Hellwig  * or the inode has I_WILL_FREE set (via generic_forget_inode)
42301c03194SChristoph Hellwig  *
4241da177e4SLinus Torvalds  * If `wait' is set, wait on the writeout.
4251da177e4SLinus Torvalds  *
4261da177e4SLinus Torvalds  * The whole writeout design is quite complex and fragile.  We want to avoid
4271da177e4SLinus Torvalds  * starvation of particular inodes when others are being redirtied, prevent
4281da177e4SLinus Torvalds  * livelocks, etc.
4291da177e4SLinus Torvalds  *
4301da177e4SLinus Torvalds  * Called under inode_lock.
4311da177e4SLinus Torvalds  */
4321da177e4SLinus Torvalds static int
43301c03194SChristoph Hellwig writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
4341da177e4SLinus Torvalds {
4351da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
43601c03194SChristoph Hellwig 	unsigned dirty;
4371da177e4SLinus Torvalds 	int ret;
4381da177e4SLinus Torvalds 
43901c03194SChristoph Hellwig 	if (!atomic_read(&inode->i_count))
44001c03194SChristoph Hellwig 		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
44101c03194SChristoph Hellwig 	else
44201c03194SChristoph Hellwig 		WARN_ON(inode->i_state & I_WILL_FREE);
44301c03194SChristoph Hellwig 
44401c03194SChristoph Hellwig 	if (inode->i_state & I_SYNC) {
44501c03194SChristoph Hellwig 		/*
44601c03194SChristoph Hellwig 		 * If this inode is locked for writeback and we are not doing
44766f3b8e2SJens Axboe 		 * writeback-for-data-integrity, move it to b_more_io so that
44801c03194SChristoph Hellwig 		 * writeback can proceed with the other inodes on s_io.
44901c03194SChristoph Hellwig 		 *
45001c03194SChristoph Hellwig 		 * We'll have another go at writing back this inode when we
45166f3b8e2SJens Axboe 		 * completed a full scan of b_io.
45201c03194SChristoph Hellwig 		 */
453a9185b41SChristoph Hellwig 		if (wbc->sync_mode != WB_SYNC_ALL) {
45401c03194SChristoph Hellwig 			requeue_io(inode);
45501c03194SChristoph Hellwig 			return 0;
45601c03194SChristoph Hellwig 		}
45701c03194SChristoph Hellwig 
45801c03194SChristoph Hellwig 		/*
45901c03194SChristoph Hellwig 		 * It's a data-integrity sync.  We must wait.
46001c03194SChristoph Hellwig 		 */
46101c03194SChristoph Hellwig 		inode_wait_for_writeback(inode);
46201c03194SChristoph Hellwig 	}
46301c03194SChristoph Hellwig 
4641c0eeaf5SJoern Engel 	BUG_ON(inode->i_state & I_SYNC);
4651da177e4SLinus Torvalds 
4665547e8aaSDmitry Monakhov 	/* Set I_SYNC, reset I_DIRTY_PAGES */
4671c0eeaf5SJoern Engel 	inode->i_state |= I_SYNC;
4685547e8aaSDmitry Monakhov 	inode->i_state &= ~I_DIRTY_PAGES;
4691da177e4SLinus Torvalds 	spin_unlock(&inode_lock);
4701da177e4SLinus Torvalds 
4711da177e4SLinus Torvalds 	ret = do_writepages(mapping, wbc);
4721da177e4SLinus Torvalds 
47326821ed4SChristoph Hellwig 	/*
47426821ed4SChristoph Hellwig 	 * Make sure to wait on the data before writing out the metadata.
47526821ed4SChristoph Hellwig 	 * This is important for filesystems that modify metadata on data
47626821ed4SChristoph Hellwig 	 * I/O completion.
47726821ed4SChristoph Hellwig 	 */
478a9185b41SChristoph Hellwig 	if (wbc->sync_mode == WB_SYNC_ALL) {
47926821ed4SChristoph Hellwig 		int err = filemap_fdatawait(mapping);
4801da177e4SLinus Torvalds 		if (ret == 0)
4811da177e4SLinus Torvalds 			ret = err;
4821da177e4SLinus Torvalds 	}
4831da177e4SLinus Torvalds 
4845547e8aaSDmitry Monakhov 	/*
4855547e8aaSDmitry Monakhov 	 * Some filesystems may redirty the inode during the writeback
4865547e8aaSDmitry Monakhov 	 * due to delalloc, clear dirty metadata flags right before
4875547e8aaSDmitry Monakhov 	 * write_inode()
4885547e8aaSDmitry Monakhov 	 */
4895547e8aaSDmitry Monakhov 	spin_lock(&inode_lock);
4905547e8aaSDmitry Monakhov 	dirty = inode->i_state & I_DIRTY;
4915547e8aaSDmitry Monakhov 	inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
4925547e8aaSDmitry Monakhov 	spin_unlock(&inode_lock);
49326821ed4SChristoph Hellwig 	/* Don't write the inode if only I_DIRTY_PAGES was set */
49426821ed4SChristoph Hellwig 	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
495a9185b41SChristoph Hellwig 		int err = write_inode(inode, wbc);
4961da177e4SLinus Torvalds 		if (ret == 0)
4971da177e4SLinus Torvalds 			ret = err;
4981da177e4SLinus Torvalds 	}
4991da177e4SLinus Torvalds 
5001da177e4SLinus Torvalds 	spin_lock(&inode_lock);
5011c0eeaf5SJoern Engel 	inode->i_state &= ~I_SYNC;
50284a89245SWu Fengguang 	if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
503b3af9468SWu Fengguang 		if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) {
504ae1b7f7dSWu Fengguang 			/*
505b3af9468SWu Fengguang 			 * More pages get dirtied by a fast dirtier.
506b3af9468SWu Fengguang 			 */
507b3af9468SWu Fengguang 			goto select_queue;
508b3af9468SWu Fengguang 		} else if (inode->i_state & I_DIRTY) {
509b3af9468SWu Fengguang 			/*
510b3af9468SWu Fengguang 			 * At least XFS will redirty the inode during the
511b3af9468SWu Fengguang 			 * writeback (delalloc) and on io completion (isize).
512ae1b7f7dSWu Fengguang 			 */
513ae1b7f7dSWu Fengguang 			redirty_tail(inode);
514ae1b7f7dSWu Fengguang 		} else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
5151da177e4SLinus Torvalds 			/*
5161da177e4SLinus Torvalds 			 * We didn't write back all the pages.  nfs_writepages()
5171da177e4SLinus Torvalds 			 * sometimes bales out without doing anything. Redirty
51866f3b8e2SJens Axboe 			 * the inode; Move it from b_io onto b_more_io/b_dirty.
5191b43ef91SAndrew Morton 			 */
5201b43ef91SAndrew Morton 			/*
5211b43ef91SAndrew Morton 			 * akpm: if the caller was the kupdate function we put
52266f3b8e2SJens Axboe 			 * this inode at the head of b_dirty so it gets first
5231b43ef91SAndrew Morton 			 * consideration.  Otherwise, move it to the tail, for
5241b43ef91SAndrew Morton 			 * the reasons described there.  I'm not really sure
5251b43ef91SAndrew Morton 			 * how much sense this makes.  Presumably I had a good
5261b43ef91SAndrew Morton 			 * reasons for doing it this way, and I'd rather not
5271b43ef91SAndrew Morton 			 * muck with it at present.
5281da177e4SLinus Torvalds 			 */
5291da177e4SLinus Torvalds 			if (wbc->for_kupdate) {
5301da177e4SLinus Torvalds 				/*
5312c136579SFengguang Wu 				 * For the kupdate function we move the inode
53266f3b8e2SJens Axboe 				 * to b_more_io so it will get more writeout as
5332c136579SFengguang Wu 				 * soon as the queue becomes uncongested.
5341da177e4SLinus Torvalds 				 */
5351da177e4SLinus Torvalds 				inode->i_state |= I_DIRTY_PAGES;
536b3af9468SWu Fengguang select_queue:
5378bc3be27SFengguang Wu 				if (wbc->nr_to_write <= 0) {
5388bc3be27SFengguang Wu 					/*
5398bc3be27SFengguang Wu 					 * slice used up: queue for next turn
5408bc3be27SFengguang Wu 					 */
5410e0f4fc2SKen Chen 					requeue_io(inode);
5421da177e4SLinus Torvalds 				} else {
5431da177e4SLinus Torvalds 					/*
5448bc3be27SFengguang Wu 					 * somehow blocked: retry later
5458bc3be27SFengguang Wu 					 */
5468bc3be27SFengguang Wu 					redirty_tail(inode);
5478bc3be27SFengguang Wu 				}
5488bc3be27SFengguang Wu 			} else {
5498bc3be27SFengguang Wu 				/*
5501da177e4SLinus Torvalds 				 * Otherwise fully redirty the inode so that
5511da177e4SLinus Torvalds 				 * other inodes on this superblock will get some
5521da177e4SLinus Torvalds 				 * writeout.  Otherwise heavy writing to one
5531da177e4SLinus Torvalds 				 * file would indefinitely suspend writeout of
5541da177e4SLinus Torvalds 				 * all the other files.
5551da177e4SLinus Torvalds 				 */
5561da177e4SLinus Torvalds 				inode->i_state |= I_DIRTY_PAGES;
5571b43ef91SAndrew Morton 				redirty_tail(inode);
5581da177e4SLinus Torvalds 			}
5591da177e4SLinus Torvalds 		} else if (atomic_read(&inode->i_count)) {
5601da177e4SLinus Torvalds 			/*
5611da177e4SLinus Torvalds 			 * The inode is clean, inuse
5621da177e4SLinus Torvalds 			 */
5631da177e4SLinus Torvalds 			list_move(&inode->i_list, &inode_in_use);
5641da177e4SLinus Torvalds 		} else {
5651da177e4SLinus Torvalds 			/*
5661da177e4SLinus Torvalds 			 * The inode is clean, unused
5671da177e4SLinus Torvalds 			 */
5681da177e4SLinus Torvalds 			list_move(&inode->i_list, &inode_unused);
5691da177e4SLinus Torvalds 		}
5701da177e4SLinus Torvalds 	}
5711c0eeaf5SJoern Engel 	inode_sync_complete(inode);
5721da177e4SLinus Torvalds 	return ret;
5731da177e4SLinus Torvalds }
5741da177e4SLinus Torvalds 
575f11c9c5cSEdward Shishkin static void unpin_sb_for_writeback(struct super_block *sb)
5769ecc2738SJens Axboe {
5779ecc2738SJens Axboe 	up_read(&sb->s_umount);
5789ecc2738SJens Axboe 	put_super(sb);
5799ecc2738SJens Axboe }
580f11c9c5cSEdward Shishkin 
581f11c9c5cSEdward Shishkin enum sb_pin_state {
582f11c9c5cSEdward Shishkin 	SB_PINNED,
583f11c9c5cSEdward Shishkin 	SB_NOT_PINNED,
584f11c9c5cSEdward Shishkin 	SB_PIN_FAILED
585f11c9c5cSEdward Shishkin };
5869ecc2738SJens Axboe 
58703ba3782SJens Axboe /*
58803ba3782SJens Axboe  * For WB_SYNC_NONE writeback, the caller does not have the sb pinned
58903ba3782SJens Axboe  * before calling writeback. So make sure that we do pin it, so it doesn't
59003ba3782SJens Axboe  * go away while we are writing inodes from it.
59103ba3782SJens Axboe  */
592f11c9c5cSEdward Shishkin static enum sb_pin_state pin_sb_for_writeback(struct writeback_control *wbc,
593f11c9c5cSEdward Shishkin 					      struct super_block *sb)
5941da177e4SLinus Torvalds {
5959ecc2738SJens Axboe 	/*
59603ba3782SJens Axboe 	 * Caller must already hold the ref for this
59703ba3782SJens Axboe 	 */
598e913fc82SJens Axboe 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->sb_pinned) {
59903ba3782SJens Axboe 		WARN_ON(!rwsem_is_locked(&sb->s_umount));
600f11c9c5cSEdward Shishkin 		return SB_NOT_PINNED;
60103ba3782SJens Axboe 	}
60203ba3782SJens Axboe 	spin_lock(&sb_lock);
60303ba3782SJens Axboe 	sb->s_count++;
60403ba3782SJens Axboe 	if (down_read_trylock(&sb->s_umount)) {
60503ba3782SJens Axboe 		if (sb->s_root) {
60603ba3782SJens Axboe 			spin_unlock(&sb_lock);
607f11c9c5cSEdward Shishkin 			return SB_PINNED;
60803ba3782SJens Axboe 		}
60903ba3782SJens Axboe 		/*
61003ba3782SJens Axboe 		 * umounted, drop rwsem again and fall through to failure
61103ba3782SJens Axboe 		 */
61203ba3782SJens Axboe 		up_read(&sb->s_umount);
61303ba3782SJens Axboe 	}
61403ba3782SJens Axboe 	sb->s_count--;
61503ba3782SJens Axboe 	spin_unlock(&sb_lock);
616f11c9c5cSEdward Shishkin 	return SB_PIN_FAILED;
61703ba3782SJens Axboe }
61803ba3782SJens Axboe 
619f11c9c5cSEdward Shishkin /*
620f11c9c5cSEdward Shishkin  * Write a portion of b_io inodes which belong to @sb.
621f11c9c5cSEdward Shishkin  * If @wbc->sb != NULL, then find and write all such
622f11c9c5cSEdward Shishkin  * inodes. Otherwise write only ones which go sequentially
623f11c9c5cSEdward Shishkin  * in reverse order.
624f11c9c5cSEdward Shishkin  * Return 1, if the caller writeback routine should be
625f11c9c5cSEdward Shishkin  * interrupted. Otherwise return 0.
626f11c9c5cSEdward Shishkin  */
627f11c9c5cSEdward Shishkin static int writeback_sb_inodes(struct super_block *sb,
628f11c9c5cSEdward Shishkin 			       struct bdi_writeback *wb,
62903ba3782SJens Axboe 			       struct writeback_control *wbc)
63003ba3782SJens Axboe {
63103ba3782SJens Axboe 	while (!list_empty(&wb->b_io)) {
632f11c9c5cSEdward Shishkin 		long pages_skipped;
63303ba3782SJens Axboe 		struct inode *inode = list_entry(wb->b_io.prev,
6341da177e4SLinus Torvalds 						 struct inode, i_list);
635f11c9c5cSEdward Shishkin 		if (wbc->sb && sb != inode->i_sb) {
636f11c9c5cSEdward Shishkin 			/* super block given and doesn't
637f11c9c5cSEdward Shishkin 			   match, skip this inode */
63866f3b8e2SJens Axboe 			redirty_tail(inode);
63966f3b8e2SJens Axboe 			continue;
64066f3b8e2SJens Axboe 		}
641f11c9c5cSEdward Shishkin 		if (sb != inode->i_sb)
642f11c9c5cSEdward Shishkin 			/* finish with this superblock */
643f11c9c5cSEdward Shishkin 			return 0;
64484a89245SWu Fengguang 		if (inode->i_state & (I_NEW | I_WILL_FREE)) {
6457ef0d737SNick Piggin 			requeue_io(inode);
6467ef0d737SNick Piggin 			continue;
6477ef0d737SNick Piggin 		}
648d2caa3c5SJeff Layton 		/*
649d2caa3c5SJeff Layton 		 * Was this inode dirtied after sync_sb_inodes was called?
650d2caa3c5SJeff Layton 		 * This keeps sync from extra jobs and livelock.
651d2caa3c5SJeff Layton 		 */
652f11c9c5cSEdward Shishkin 		if (inode_dirtied_after(inode, wbc->wb_start))
653f11c9c5cSEdward Shishkin 			return 1;
6541da177e4SLinus Torvalds 
65584a89245SWu Fengguang 		BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
6561da177e4SLinus Torvalds 		__iget(inode);
6571da177e4SLinus Torvalds 		pages_skipped = wbc->pages_skipped;
65801c03194SChristoph Hellwig 		writeback_single_inode(inode, wbc);
6591da177e4SLinus Torvalds 		if (wbc->pages_skipped != pages_skipped) {
6601da177e4SLinus Torvalds 			/*
6611da177e4SLinus Torvalds 			 * writeback is not making progress due to locked
6621da177e4SLinus Torvalds 			 * buffers.  Skip this inode for now.
6631da177e4SLinus Torvalds 			 */
664f57b9b7bSAndrew Morton 			redirty_tail(inode);
6651da177e4SLinus Torvalds 		}
6661da177e4SLinus Torvalds 		spin_unlock(&inode_lock);
6671da177e4SLinus Torvalds 		iput(inode);
6684ffc8444SOGAWA Hirofumi 		cond_resched();
6691da177e4SLinus Torvalds 		spin_lock(&inode_lock);
6708bc3be27SFengguang Wu 		if (wbc->nr_to_write <= 0) {
6718bc3be27SFengguang Wu 			wbc->more_io = 1;
672f11c9c5cSEdward Shishkin 			return 1;
6731da177e4SLinus Torvalds 		}
67403ba3782SJens Axboe 		if (!list_empty(&wb->b_more_io))
6758bc3be27SFengguang Wu 			wbc->more_io = 1;
6768bc3be27SFengguang Wu 	}
677f11c9c5cSEdward Shishkin 	/* b_io is empty */
678f11c9c5cSEdward Shishkin 	return 1;
679f11c9c5cSEdward Shishkin }
68038f21977SNick Piggin 
681f11c9c5cSEdward Shishkin static void writeback_inodes_wb(struct bdi_writeback *wb,
682f11c9c5cSEdward Shishkin 				struct writeback_control *wbc)
683f11c9c5cSEdward Shishkin {
684f11c9c5cSEdward Shishkin 	int ret = 0;
6859ecc2738SJens Axboe 
686f11c9c5cSEdward Shishkin 	wbc->wb_start = jiffies; /* livelock avoidance */
687f11c9c5cSEdward Shishkin 	spin_lock(&inode_lock);
688f11c9c5cSEdward Shishkin 	if (!wbc->for_kupdate || list_empty(&wb->b_io))
689f11c9c5cSEdward Shishkin 		queue_io(wb, wbc->older_than_this);
690f11c9c5cSEdward Shishkin 
691f11c9c5cSEdward Shishkin 	while (!list_empty(&wb->b_io)) {
692f11c9c5cSEdward Shishkin 		struct inode *inode = list_entry(wb->b_io.prev,
693f11c9c5cSEdward Shishkin 						 struct inode, i_list);
694f11c9c5cSEdward Shishkin 		struct super_block *sb = inode->i_sb;
695f11c9c5cSEdward Shishkin 		enum sb_pin_state state;
696f11c9c5cSEdward Shishkin 
697f11c9c5cSEdward Shishkin 		if (wbc->sb && sb != wbc->sb) {
698f11c9c5cSEdward Shishkin 			/* super block given and doesn't
699f11c9c5cSEdward Shishkin 			   match, skip this inode */
700f11c9c5cSEdward Shishkin 			redirty_tail(inode);
701f11c9c5cSEdward Shishkin 			continue;
702f11c9c5cSEdward Shishkin 		}
703f11c9c5cSEdward Shishkin 		state = pin_sb_for_writeback(wbc, sb);
704f11c9c5cSEdward Shishkin 
705f11c9c5cSEdward Shishkin 		if (state == SB_PIN_FAILED) {
706f11c9c5cSEdward Shishkin 			requeue_io(inode);
707f11c9c5cSEdward Shishkin 			continue;
708f11c9c5cSEdward Shishkin 		}
709f11c9c5cSEdward Shishkin 		ret = writeback_sb_inodes(sb, wb, wbc);
710f11c9c5cSEdward Shishkin 
711f11c9c5cSEdward Shishkin 		if (state == SB_PINNED)
712f11c9c5cSEdward Shishkin 			unpin_sb_for_writeback(sb);
713f11c9c5cSEdward Shishkin 		if (ret)
714f11c9c5cSEdward Shishkin 			break;
715f11c9c5cSEdward Shishkin 	}
71666f3b8e2SJens Axboe 	spin_unlock(&inode_lock);
71766f3b8e2SJens Axboe 	/* Leave any unwritten inodes on b_io */
71866f3b8e2SJens Axboe }
71966f3b8e2SJens Axboe 
72003ba3782SJens Axboe void writeback_inodes_wbc(struct writeback_control *wbc)
72103ba3782SJens Axboe {
72203ba3782SJens Axboe 	struct backing_dev_info *bdi = wbc->bdi;
72303ba3782SJens Axboe 
72403ba3782SJens Axboe 	writeback_inodes_wb(&bdi->wb, wbc);
72503ba3782SJens Axboe }
72603ba3782SJens Axboe 
72703ba3782SJens Axboe /*
72803ba3782SJens Axboe  * The maximum number of pages to writeout in a single bdi flush/kupdate
72903ba3782SJens Axboe  * operation.  We do this so we don't hold I_SYNC against an inode for
73003ba3782SJens Axboe  * enormous amounts of time, which would block a userspace task which has
73103ba3782SJens Axboe  * been forced to throttle against that inode.  Also, the code reevaluates
73203ba3782SJens Axboe  * the dirty each time it has written this many pages.
73303ba3782SJens Axboe  */
73403ba3782SJens Axboe #define MAX_WRITEBACK_PAGES     1024
73503ba3782SJens Axboe 
73603ba3782SJens Axboe static inline bool over_bground_thresh(void)
73703ba3782SJens Axboe {
73803ba3782SJens Axboe 	unsigned long background_thresh, dirty_thresh;
73903ba3782SJens Axboe 
74003ba3782SJens Axboe 	get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
74103ba3782SJens Axboe 
74203ba3782SJens Axboe 	return (global_page_state(NR_FILE_DIRTY) +
74303ba3782SJens Axboe 		global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
74403ba3782SJens Axboe }
74503ba3782SJens Axboe 
74603ba3782SJens Axboe /*
74703ba3782SJens Axboe  * Explicit flushing or periodic writeback of "old" data.
74803ba3782SJens Axboe  *
74903ba3782SJens Axboe  * Define "old": the first time one of an inode's pages is dirtied, we mark the
75003ba3782SJens Axboe  * dirtying-time in the inode's address_space.  So this periodic writeback code
75103ba3782SJens Axboe  * just walks the superblock inode list, writing back any inodes which are
75203ba3782SJens Axboe  * older than a specific point in time.
75303ba3782SJens Axboe  *
75403ba3782SJens Axboe  * Try to run once per dirty_writeback_interval.  But if a writeback event
75503ba3782SJens Axboe  * takes longer than a dirty_writeback_interval interval, then leave a
75603ba3782SJens Axboe  * one-second gap.
75703ba3782SJens Axboe  *
75803ba3782SJens Axboe  * older_than_this takes precedence over nr_to_write.  So we'll only write back
75903ba3782SJens Axboe  * all dirty pages if they are all attached to "old" mappings.
76003ba3782SJens Axboe  */
761c4a77a6cSJens Axboe static long wb_writeback(struct bdi_writeback *wb,
762c4a77a6cSJens Axboe 			 struct wb_writeback_args *args)
76303ba3782SJens Axboe {
76403ba3782SJens Axboe 	struct writeback_control wbc = {
76503ba3782SJens Axboe 		.bdi			= wb->bdi,
766c4a77a6cSJens Axboe 		.sb			= args->sb,
767c4a77a6cSJens Axboe 		.sync_mode		= args->sync_mode,
76803ba3782SJens Axboe 		.older_than_this	= NULL,
769c4a77a6cSJens Axboe 		.for_kupdate		= args->for_kupdate,
770b17621feSWu Fengguang 		.for_background		= args->for_background,
771c4a77a6cSJens Axboe 		.range_cyclic		= args->range_cyclic,
772e913fc82SJens Axboe 		.sb_pinned		= args->sb_pinned,
77303ba3782SJens Axboe 	};
77403ba3782SJens Axboe 	unsigned long oldest_jif;
77503ba3782SJens Axboe 	long wrote = 0;
776a5989bdcSJan Kara 	struct inode *inode;
77703ba3782SJens Axboe 
77803ba3782SJens Axboe 	if (wbc.for_kupdate) {
77903ba3782SJens Axboe 		wbc.older_than_this = &oldest_jif;
78003ba3782SJens Axboe 		oldest_jif = jiffies -
78103ba3782SJens Axboe 				msecs_to_jiffies(dirty_expire_interval * 10);
78203ba3782SJens Axboe 	}
783c4a77a6cSJens Axboe 	if (!wbc.range_cyclic) {
784c4a77a6cSJens Axboe 		wbc.range_start = 0;
785c4a77a6cSJens Axboe 		wbc.range_end = LLONG_MAX;
786c4a77a6cSJens Axboe 	}
78703ba3782SJens Axboe 
78803ba3782SJens Axboe 	for (;;) {
78903ba3782SJens Axboe 		/*
790d3ddec76SWu Fengguang 		 * Stop writeback when nr_pages has been consumed
79103ba3782SJens Axboe 		 */
792d3ddec76SWu Fengguang 		if (args->nr_pages <= 0)
79303ba3782SJens Axboe 			break;
79403ba3782SJens Axboe 
79503ba3782SJens Axboe 		/*
796d3ddec76SWu Fengguang 		 * For background writeout, stop when we are below the
797d3ddec76SWu Fengguang 		 * background dirty threshold
79803ba3782SJens Axboe 		 */
799d3ddec76SWu Fengguang 		if (args->for_background && !over_bground_thresh())
80003ba3782SJens Axboe 			break;
80103ba3782SJens Axboe 
80203ba3782SJens Axboe 		wbc.more_io = 0;
80303ba3782SJens Axboe 		wbc.nr_to_write = MAX_WRITEBACK_PAGES;
80403ba3782SJens Axboe 		wbc.pages_skipped = 0;
80503ba3782SJens Axboe 		writeback_inodes_wb(wb, &wbc);
806c4a77a6cSJens Axboe 		args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
80703ba3782SJens Axboe 		wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
80803ba3782SJens Axboe 
80903ba3782SJens Axboe 		/*
81071fd05a8SJens Axboe 		 * If we consumed everything, see if we have more
81103ba3782SJens Axboe 		 */
81271fd05a8SJens Axboe 		if (wbc.nr_to_write <= 0)
81371fd05a8SJens Axboe 			continue;
81471fd05a8SJens Axboe 		/*
81571fd05a8SJens Axboe 		 * Didn't write everything and we don't have more IO, bail
81671fd05a8SJens Axboe 		 */
81771fd05a8SJens Axboe 		if (!wbc.more_io)
81871fd05a8SJens Axboe 			break;
81971fd05a8SJens Axboe 		/*
82071fd05a8SJens Axboe 		 * Did we write something? Try for more
82171fd05a8SJens Axboe 		 */
822a5989bdcSJan Kara 		if (wbc.nr_to_write < MAX_WRITEBACK_PAGES)
82303ba3782SJens Axboe 			continue;
824a5989bdcSJan Kara 		/*
825a5989bdcSJan Kara 		 * Nothing written. Wait for some inode to
826a5989bdcSJan Kara 		 * become available for writeback. Otherwise
827a5989bdcSJan Kara 		 * we'll just busyloop.
828a5989bdcSJan Kara 		 */
829a5989bdcSJan Kara 		spin_lock(&inode_lock);
830a5989bdcSJan Kara 		if (!list_empty(&wb->b_more_io))  {
83171fd05a8SJens Axboe 			inode = list_entry(wb->b_more_io.prev,
832a5989bdcSJan Kara 						struct inode, i_list);
833a5989bdcSJan Kara 			inode_wait_for_writeback(inode);
834a5989bdcSJan Kara 		}
835a5989bdcSJan Kara 		spin_unlock(&inode_lock);
83603ba3782SJens Axboe 	}
83703ba3782SJens Axboe 
83803ba3782SJens Axboe 	return wrote;
83903ba3782SJens Axboe }
84003ba3782SJens Axboe 
84103ba3782SJens Axboe /*
84203ba3782SJens Axboe  * Return the next bdi_work struct that hasn't been processed by this
8438010c3b6SJens Axboe  * wb thread yet. ->seen is initially set for each thread that exists
8448010c3b6SJens Axboe  * for this device, when a thread first notices a piece of work it
8458010c3b6SJens Axboe  * clears its bit. Depending on writeback type, the thread will notify
8468010c3b6SJens Axboe  * completion on either receiving the work (WB_SYNC_NONE) or after
8478010c3b6SJens Axboe  * it is done (WB_SYNC_ALL).
84803ba3782SJens Axboe  */
84903ba3782SJens Axboe static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
85003ba3782SJens Axboe 					   struct bdi_writeback *wb)
85103ba3782SJens Axboe {
85203ba3782SJens Axboe 	struct bdi_work *work, *ret = NULL;
85303ba3782SJens Axboe 
85403ba3782SJens Axboe 	rcu_read_lock();
85503ba3782SJens Axboe 
85603ba3782SJens Axboe 	list_for_each_entry_rcu(work, &bdi->work_list, list) {
85777fad5e6SNick Piggin 		if (!test_bit(wb->nr, &work->seen))
85803ba3782SJens Axboe 			continue;
85977fad5e6SNick Piggin 		clear_bit(wb->nr, &work->seen);
86003ba3782SJens Axboe 
86103ba3782SJens Axboe 		ret = work;
86203ba3782SJens Axboe 		break;
86303ba3782SJens Axboe 	}
86403ba3782SJens Axboe 
86503ba3782SJens Axboe 	rcu_read_unlock();
86603ba3782SJens Axboe 	return ret;
86703ba3782SJens Axboe }
86803ba3782SJens Axboe 
86903ba3782SJens Axboe static long wb_check_old_data_flush(struct bdi_writeback *wb)
87003ba3782SJens Axboe {
87103ba3782SJens Axboe 	unsigned long expired;
87203ba3782SJens Axboe 	long nr_pages;
87303ba3782SJens Axboe 
87469b62d01SJens Axboe 	/*
87569b62d01SJens Axboe 	 * When set to zero, disable periodic writeback
87669b62d01SJens Axboe 	 */
87769b62d01SJens Axboe 	if (!dirty_writeback_interval)
87869b62d01SJens Axboe 		return 0;
87969b62d01SJens Axboe 
88003ba3782SJens Axboe 	expired = wb->last_old_flush +
88103ba3782SJens Axboe 			msecs_to_jiffies(dirty_writeback_interval * 10);
88203ba3782SJens Axboe 	if (time_before(jiffies, expired))
88303ba3782SJens Axboe 		return 0;
88403ba3782SJens Axboe 
88503ba3782SJens Axboe 	wb->last_old_flush = jiffies;
88603ba3782SJens Axboe 	nr_pages = global_page_state(NR_FILE_DIRTY) +
88703ba3782SJens Axboe 			global_page_state(NR_UNSTABLE_NFS) +
88803ba3782SJens Axboe 			(inodes_stat.nr_inodes - inodes_stat.nr_unused);
88903ba3782SJens Axboe 
890c4a77a6cSJens Axboe 	if (nr_pages) {
891c4a77a6cSJens Axboe 		struct wb_writeback_args args = {
892c4a77a6cSJens Axboe 			.nr_pages	= nr_pages,
893c4a77a6cSJens Axboe 			.sync_mode	= WB_SYNC_NONE,
894c4a77a6cSJens Axboe 			.for_kupdate	= 1,
895c4a77a6cSJens Axboe 			.range_cyclic	= 1,
896c4a77a6cSJens Axboe 		};
897c4a77a6cSJens Axboe 
898c4a77a6cSJens Axboe 		return wb_writeback(wb, &args);
899c4a77a6cSJens Axboe 	}
90003ba3782SJens Axboe 
90103ba3782SJens Axboe 	return 0;
90203ba3782SJens Axboe }
90303ba3782SJens Axboe 
90403ba3782SJens Axboe /*
90503ba3782SJens Axboe  * Retrieve work items and do the writeback they describe
90603ba3782SJens Axboe  */
90703ba3782SJens Axboe long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
90803ba3782SJens Axboe {
90903ba3782SJens Axboe 	struct backing_dev_info *bdi = wb->bdi;
91003ba3782SJens Axboe 	struct bdi_work *work;
911c4a77a6cSJens Axboe 	long wrote = 0;
91203ba3782SJens Axboe 
91303ba3782SJens Axboe 	while ((work = get_next_work_item(bdi, wb)) != NULL) {
914c4a77a6cSJens Axboe 		struct wb_writeback_args args = work->args;
9157c8a3554SJens Axboe 		int post_clear;
91603ba3782SJens Axboe 
91703ba3782SJens Axboe 		/*
91803ba3782SJens Axboe 		 * Override sync mode, in case we must wait for completion
91903ba3782SJens Axboe 		 */
92003ba3782SJens Axboe 		if (force_wait)
921c4a77a6cSJens Axboe 			work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
92203ba3782SJens Axboe 
9237c8a3554SJens Axboe 		post_clear = WB_SYNC_ALL || args.sb_pinned;
9247c8a3554SJens Axboe 
92503ba3782SJens Axboe 		/*
92603ba3782SJens Axboe 		 * If this isn't a data integrity operation, just notify
92703ba3782SJens Axboe 		 * that we have seen this work and we are now starting it.
92803ba3782SJens Axboe 		 */
9297c8a3554SJens Axboe 		if (!post_clear)
93003ba3782SJens Axboe 			wb_clear_pending(wb, work);
93103ba3782SJens Axboe 
932c4a77a6cSJens Axboe 		wrote += wb_writeback(wb, &args);
93303ba3782SJens Axboe 
93403ba3782SJens Axboe 		/*
93503ba3782SJens Axboe 		 * This is a data integrity writeback, so only do the
93603ba3782SJens Axboe 		 * notification when we have completed the work.
93703ba3782SJens Axboe 		 */
9387c8a3554SJens Axboe 		if (post_clear)
93903ba3782SJens Axboe 			wb_clear_pending(wb, work);
94003ba3782SJens Axboe 	}
94103ba3782SJens Axboe 
94203ba3782SJens Axboe 	/*
94303ba3782SJens Axboe 	 * Check for periodic writeback, kupdated() style
94403ba3782SJens Axboe 	 */
94503ba3782SJens Axboe 	wrote += wb_check_old_data_flush(wb);
94603ba3782SJens Axboe 
94703ba3782SJens Axboe 	return wrote;
94803ba3782SJens Axboe }
94903ba3782SJens Axboe 
95003ba3782SJens Axboe /*
95103ba3782SJens Axboe  * Handle writeback of dirty data for the device backed by this bdi. Also
95203ba3782SJens Axboe  * wakes up periodically and does kupdated style flushing.
95303ba3782SJens Axboe  */
95403ba3782SJens Axboe int bdi_writeback_task(struct bdi_writeback *wb)
95503ba3782SJens Axboe {
95603ba3782SJens Axboe 	unsigned long last_active = jiffies;
95703ba3782SJens Axboe 	unsigned long wait_jiffies = -1UL;
95803ba3782SJens Axboe 	long pages_written;
95903ba3782SJens Axboe 
96003ba3782SJens Axboe 	while (!kthread_should_stop()) {
96103ba3782SJens Axboe 		pages_written = wb_do_writeback(wb, 0);
96203ba3782SJens Axboe 
96303ba3782SJens Axboe 		if (pages_written)
96403ba3782SJens Axboe 			last_active = jiffies;
96503ba3782SJens Axboe 		else if (wait_jiffies != -1UL) {
96603ba3782SJens Axboe 			unsigned long max_idle;
96703ba3782SJens Axboe 
96803ba3782SJens Axboe 			/*
96903ba3782SJens Axboe 			 * Longest period of inactivity that we tolerate. If we
97003ba3782SJens Axboe 			 * see dirty data again later, the task will get
97103ba3782SJens Axboe 			 * recreated automatically.
97203ba3782SJens Axboe 			 */
97303ba3782SJens Axboe 			max_idle = max(5UL * 60 * HZ, wait_jiffies);
97403ba3782SJens Axboe 			if (time_after(jiffies, max_idle + last_active))
97503ba3782SJens Axboe 				break;
97603ba3782SJens Axboe 		}
97703ba3782SJens Axboe 
97869b62d01SJens Axboe 		if (dirty_writeback_interval) {
97903ba3782SJens Axboe 			wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
98049db0414SJens Axboe 			schedule_timeout_interruptible(wait_jiffies);
981f9eadbbdSJens Axboe 		} else {
982f9eadbbdSJens Axboe 			set_current_state(TASK_INTERRUPTIBLE);
983f9eadbbdSJens Axboe 			if (list_empty_careful(&wb->bdi->work_list) &&
984f9eadbbdSJens Axboe 			    !kthread_should_stop())
98569b62d01SJens Axboe 				schedule();
986f9eadbbdSJens Axboe 			__set_current_state(TASK_RUNNING);
987f9eadbbdSJens Axboe 		}
98869b62d01SJens Axboe 
98903ba3782SJens Axboe 		try_to_freeze();
99003ba3782SJens Axboe 	}
99103ba3782SJens Axboe 
99203ba3782SJens Axboe 	return 0;
99303ba3782SJens Axboe }
99403ba3782SJens Axboe 
99503ba3782SJens Axboe /*
996b6e51316SJens Axboe  * Schedule writeback for all backing devices. This does WB_SYNC_NONE
997b6e51316SJens Axboe  * writeback, for integrity writeback see bdi_sync_writeback().
99803ba3782SJens Axboe  */
999b6e51316SJens Axboe static void bdi_writeback_all(struct super_block *sb, long nr_pages)
100003ba3782SJens Axboe {
1001b6e51316SJens Axboe 	struct wb_writeback_args args = {
1002b6e51316SJens Axboe 		.sb		= sb,
1003b6e51316SJens Axboe 		.nr_pages	= nr_pages,
1004b6e51316SJens Axboe 		.sync_mode	= WB_SYNC_NONE,
1005b6e51316SJens Axboe 	};
100603ba3782SJens Axboe 	struct backing_dev_info *bdi;
100703ba3782SJens Axboe 
1008cfc4ba53SJens Axboe 	rcu_read_lock();
100903ba3782SJens Axboe 
1010cfc4ba53SJens Axboe 	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
101103ba3782SJens Axboe 		if (!bdi_has_dirty_io(bdi))
101203ba3782SJens Axboe 			continue;
101303ba3782SJens Axboe 
10147c8a3554SJens Axboe 		bdi_alloc_queue_work(bdi, &args, 0);
101503ba3782SJens Axboe 	}
101603ba3782SJens Axboe 
1017cfc4ba53SJens Axboe 	rcu_read_unlock();
101803ba3782SJens Axboe }
101903ba3782SJens Axboe 
102003ba3782SJens Axboe /*
102103ba3782SJens Axboe  * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
102203ba3782SJens Axboe  * the whole world.
102303ba3782SJens Axboe  */
102403ba3782SJens Axboe void wakeup_flusher_threads(long nr_pages)
102503ba3782SJens Axboe {
102603ba3782SJens Axboe 	if (nr_pages == 0)
102703ba3782SJens Axboe 		nr_pages = global_page_state(NR_FILE_DIRTY) +
102803ba3782SJens Axboe 				global_page_state(NR_UNSTABLE_NFS);
1029b6e51316SJens Axboe 	bdi_writeback_all(NULL, nr_pages);
103003ba3782SJens Axboe }
103103ba3782SJens Axboe 
103203ba3782SJens Axboe static noinline void block_dump___mark_inode_dirty(struct inode *inode)
103303ba3782SJens Axboe {
103403ba3782SJens Axboe 	if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
103503ba3782SJens Axboe 		struct dentry *dentry;
103603ba3782SJens Axboe 		const char *name = "?";
103703ba3782SJens Axboe 
103803ba3782SJens Axboe 		dentry = d_find_alias(inode);
103903ba3782SJens Axboe 		if (dentry) {
104003ba3782SJens Axboe 			spin_lock(&dentry->d_lock);
104103ba3782SJens Axboe 			name = (const char *) dentry->d_name.name;
104203ba3782SJens Axboe 		}
104303ba3782SJens Axboe 		printk(KERN_DEBUG
104403ba3782SJens Axboe 		       "%s(%d): dirtied inode %lu (%s) on %s\n",
104503ba3782SJens Axboe 		       current->comm, task_pid_nr(current), inode->i_ino,
104603ba3782SJens Axboe 		       name, inode->i_sb->s_id);
104703ba3782SJens Axboe 		if (dentry) {
104803ba3782SJens Axboe 			spin_unlock(&dentry->d_lock);
104903ba3782SJens Axboe 			dput(dentry);
105003ba3782SJens Axboe 		}
105103ba3782SJens Axboe 	}
105203ba3782SJens Axboe }
105303ba3782SJens Axboe 
105403ba3782SJens Axboe /**
105503ba3782SJens Axboe  *	__mark_inode_dirty -	internal function
105603ba3782SJens Axboe  *	@inode: inode to mark
105703ba3782SJens Axboe  *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
105803ba3782SJens Axboe  *	Mark an inode as dirty. Callers should use mark_inode_dirty or
105903ba3782SJens Axboe  *  	mark_inode_dirty_sync.
106003ba3782SJens Axboe  *
106103ba3782SJens Axboe  * Put the inode on the super block's dirty list.
106203ba3782SJens Axboe  *
106303ba3782SJens Axboe  * CAREFUL! We mark it dirty unconditionally, but move it onto the
106403ba3782SJens Axboe  * dirty list only if it is hashed or if it refers to a blockdev.
106503ba3782SJens Axboe  * If it was not hashed, it will never be added to the dirty list
106603ba3782SJens Axboe  * even if it is later hashed, as it will have been marked dirty already.
106703ba3782SJens Axboe  *
106803ba3782SJens Axboe  * In short, make sure you hash any inodes _before_ you start marking
106903ba3782SJens Axboe  * them dirty.
107003ba3782SJens Axboe  *
107103ba3782SJens Axboe  * This function *must* be atomic for the I_DIRTY_PAGES case -
107203ba3782SJens Axboe  * set_page_dirty() is called under spinlock in several places.
107303ba3782SJens Axboe  *
107403ba3782SJens Axboe  * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
107503ba3782SJens Axboe  * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
107603ba3782SJens Axboe  * the kernel-internal blockdev inode represents the dirtying time of the
107703ba3782SJens Axboe  * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
107803ba3782SJens Axboe  * page->mapping->host, so the page-dirtying time is recorded in the internal
107903ba3782SJens Axboe  * blockdev inode.
108003ba3782SJens Axboe  */
108103ba3782SJens Axboe void __mark_inode_dirty(struct inode *inode, int flags)
108203ba3782SJens Axboe {
108303ba3782SJens Axboe 	struct super_block *sb = inode->i_sb;
108403ba3782SJens Axboe 
108503ba3782SJens Axboe 	/*
108603ba3782SJens Axboe 	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
108703ba3782SJens Axboe 	 * dirty the inode itself
108803ba3782SJens Axboe 	 */
108903ba3782SJens Axboe 	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
109003ba3782SJens Axboe 		if (sb->s_op->dirty_inode)
109103ba3782SJens Axboe 			sb->s_op->dirty_inode(inode);
109203ba3782SJens Axboe 	}
109303ba3782SJens Axboe 
109403ba3782SJens Axboe 	/*
109503ba3782SJens Axboe 	 * make sure that changes are seen by all cpus before we test i_state
109603ba3782SJens Axboe 	 * -- mikulas
109703ba3782SJens Axboe 	 */
109803ba3782SJens Axboe 	smp_mb();
109903ba3782SJens Axboe 
110003ba3782SJens Axboe 	/* avoid the locking if we can */
110103ba3782SJens Axboe 	if ((inode->i_state & flags) == flags)
110203ba3782SJens Axboe 		return;
110303ba3782SJens Axboe 
110403ba3782SJens Axboe 	if (unlikely(block_dump))
110503ba3782SJens Axboe 		block_dump___mark_inode_dirty(inode);
110603ba3782SJens Axboe 
110703ba3782SJens Axboe 	spin_lock(&inode_lock);
110803ba3782SJens Axboe 	if ((inode->i_state & flags) != flags) {
110903ba3782SJens Axboe 		const int was_dirty = inode->i_state & I_DIRTY;
111003ba3782SJens Axboe 
111103ba3782SJens Axboe 		inode->i_state |= flags;
111203ba3782SJens Axboe 
111303ba3782SJens Axboe 		/*
111403ba3782SJens Axboe 		 * If the inode is being synced, just update its dirty state.
111503ba3782SJens Axboe 		 * The unlocker will place the inode on the appropriate
111603ba3782SJens Axboe 		 * superblock list, based upon its state.
111703ba3782SJens Axboe 		 */
111803ba3782SJens Axboe 		if (inode->i_state & I_SYNC)
111903ba3782SJens Axboe 			goto out;
112003ba3782SJens Axboe 
112103ba3782SJens Axboe 		/*
112203ba3782SJens Axboe 		 * Only add valid (hashed) inodes to the superblock's
112303ba3782SJens Axboe 		 * dirty list.  Add blockdev inodes as well.
112403ba3782SJens Axboe 		 */
112503ba3782SJens Axboe 		if (!S_ISBLK(inode->i_mode)) {
112603ba3782SJens Axboe 			if (hlist_unhashed(&inode->i_hash))
112703ba3782SJens Axboe 				goto out;
112803ba3782SJens Axboe 		}
112903ba3782SJens Axboe 		if (inode->i_state & (I_FREEING|I_CLEAR))
113003ba3782SJens Axboe 			goto out;
113103ba3782SJens Axboe 
113203ba3782SJens Axboe 		/*
113303ba3782SJens Axboe 		 * If the inode was already on b_dirty/b_io/b_more_io, don't
113403ba3782SJens Axboe 		 * reposition it (that would break b_dirty time-ordering).
113503ba3782SJens Axboe 		 */
113603ba3782SJens Axboe 		if (!was_dirty) {
113703ba3782SJens Axboe 			struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1138500b067cSJens Axboe 			struct backing_dev_info *bdi = wb->bdi;
1139500b067cSJens Axboe 
1140500b067cSJens Axboe 			if (bdi_cap_writeback_dirty(bdi) &&
1141500b067cSJens Axboe 			    !test_bit(BDI_registered, &bdi->state)) {
1142500b067cSJens Axboe 				WARN_ON(1);
1143500b067cSJens Axboe 				printk(KERN_ERR "bdi-%s not registered\n",
1144500b067cSJens Axboe 								bdi->name);
1145500b067cSJens Axboe 			}
114603ba3782SJens Axboe 
114703ba3782SJens Axboe 			inode->dirtied_when = jiffies;
114803ba3782SJens Axboe 			list_move(&inode->i_list, &wb->b_dirty);
114903ba3782SJens Axboe 		}
115003ba3782SJens Axboe 	}
115103ba3782SJens Axboe out:
115203ba3782SJens Axboe 	spin_unlock(&inode_lock);
115303ba3782SJens Axboe }
115403ba3782SJens Axboe EXPORT_SYMBOL(__mark_inode_dirty);
115503ba3782SJens Axboe 
115666f3b8e2SJens Axboe /*
115766f3b8e2SJens Axboe  * Write out a superblock's list of dirty inodes.  A wait will be performed
115866f3b8e2SJens Axboe  * upon no inodes, all inodes or the final one, depending upon sync_mode.
115966f3b8e2SJens Axboe  *
116066f3b8e2SJens Axboe  * If older_than_this is non-NULL, then only write out inodes which
116166f3b8e2SJens Axboe  * had their first dirtying at a time earlier than *older_than_this.
116266f3b8e2SJens Axboe  *
116366f3b8e2SJens Axboe  * If `bdi' is non-zero then we're being asked to writeback a specific queue.
116466f3b8e2SJens Axboe  * This function assumes that the blockdev superblock's inodes are backed by
116566f3b8e2SJens Axboe  * a variety of queues, so all inodes are searched.  For other superblocks,
116666f3b8e2SJens Axboe  * assume that all inodes are backed by the same queue.
116766f3b8e2SJens Axboe  *
116866f3b8e2SJens Axboe  * The inodes to be written are parked on bdi->b_io.  They are moved back onto
116966f3b8e2SJens Axboe  * bdi->b_dirty as they are selected for writing.  This way, none can be missed
117066f3b8e2SJens Axboe  * on the writer throttling path, and we get decent balancing between many
117166f3b8e2SJens Axboe  * throttled threads: we don't want them all piling up on inode_sync_wait.
117266f3b8e2SJens Axboe  */
1173b6e51316SJens Axboe static void wait_sb_inodes(struct super_block *sb)
117466f3b8e2SJens Axboe {
117538f21977SNick Piggin 	struct inode *inode, *old_inode = NULL;
117638f21977SNick Piggin 
117703ba3782SJens Axboe 	/*
117803ba3782SJens Axboe 	 * We need to be protected against the filesystem going from
117903ba3782SJens Axboe 	 * r/o to r/w or vice versa.
118003ba3782SJens Axboe 	 */
1181b6e51316SJens Axboe 	WARN_ON(!rwsem_is_locked(&sb->s_umount));
118203ba3782SJens Axboe 
118366f3b8e2SJens Axboe 	spin_lock(&inode_lock);
118466f3b8e2SJens Axboe 
118538f21977SNick Piggin 	/*
118638f21977SNick Piggin 	 * Data integrity sync. Must wait for all pages under writeback,
118738f21977SNick Piggin 	 * because there may have been pages dirtied before our sync
118838f21977SNick Piggin 	 * call, but which had writeout started before we write it out.
118938f21977SNick Piggin 	 * In which case, the inode may not be on the dirty list, but
119038f21977SNick Piggin 	 * we still have to wait for that writeout.
119138f21977SNick Piggin 	 */
1192b6e51316SJens Axboe 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
119338f21977SNick Piggin 		struct address_space *mapping;
119438f21977SNick Piggin 
119503ba3782SJens Axboe 		if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
119638f21977SNick Piggin 			continue;
119738f21977SNick Piggin 		mapping = inode->i_mapping;
119838f21977SNick Piggin 		if (mapping->nrpages == 0)
119938f21977SNick Piggin 			continue;
120038f21977SNick Piggin 		__iget(inode);
1201ae8547b0SHans Reiser 		spin_unlock(&inode_lock);
120238f21977SNick Piggin 		/*
120338f21977SNick Piggin 		 * We hold a reference to 'inode' so it couldn't have
120438f21977SNick Piggin 		 * been removed from s_inodes list while we dropped the
120538f21977SNick Piggin 		 * inode_lock.  We cannot iput the inode now as we can
120638f21977SNick Piggin 		 * be holding the last reference and we cannot iput it
120738f21977SNick Piggin 		 * under inode_lock. So we keep the reference and iput
120838f21977SNick Piggin 		 * it later.
120938f21977SNick Piggin 		 */
121038f21977SNick Piggin 		iput(old_inode);
121138f21977SNick Piggin 		old_inode = inode;
121238f21977SNick Piggin 
121338f21977SNick Piggin 		filemap_fdatawait(mapping);
121438f21977SNick Piggin 
121538f21977SNick Piggin 		cond_resched();
121638f21977SNick Piggin 
121738f21977SNick Piggin 		spin_lock(&inode_lock);
121838f21977SNick Piggin 	}
121938f21977SNick Piggin 	spin_unlock(&inode_lock);
122038f21977SNick Piggin 	iput(old_inode);
122166f3b8e2SJens Axboe }
12221da177e4SLinus Torvalds 
1223e913fc82SJens Axboe static void __writeback_inodes_sb(struct super_block *sb, int sb_locked)
1224e913fc82SJens Axboe {
1225e913fc82SJens Axboe 	unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
1226e913fc82SJens Axboe 	unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
1227e913fc82SJens Axboe 	long nr_to_write;
1228e913fc82SJens Axboe 
1229e913fc82SJens Axboe 	nr_to_write = nr_dirty + nr_unstable +
1230e913fc82SJens Axboe 			(inodes_stat.nr_inodes - inodes_stat.nr_unused);
1231e913fc82SJens Axboe 
1232e913fc82SJens Axboe 	bdi_start_writeback(sb->s_bdi, sb, nr_to_write, sb_locked);
1233e913fc82SJens Axboe }
1234e913fc82SJens Axboe 
1235d8a8559cSJens Axboe /**
1236d8a8559cSJens Axboe  * writeback_inodes_sb	-	writeback dirty inodes from given super_block
1237d8a8559cSJens Axboe  * @sb: the superblock
12381da177e4SLinus Torvalds  *
1239d8a8559cSJens Axboe  * Start writeback on some inodes on this super_block. No guarantees are made
1240d8a8559cSJens Axboe  * on how many (if any) will be written, and this function does not wait
1241d8a8559cSJens Axboe  * for IO completion of submitted IO. The number of pages submitted is
1242d8a8559cSJens Axboe  * returned.
12431da177e4SLinus Torvalds  */
1244b6e51316SJens Axboe void writeback_inodes_sb(struct super_block *sb)
12451da177e4SLinus Torvalds {
1246e913fc82SJens Axboe 	__writeback_inodes_sb(sb, 0);
12471da177e4SLinus Torvalds }
1248d8a8559cSJens Axboe EXPORT_SYMBOL(writeback_inodes_sb);
1249d8a8559cSJens Axboe 
1250d8a8559cSJens Axboe /**
1251e913fc82SJens Axboe  * writeback_inodes_sb_locked	- writeback dirty inodes from given super_block
1252e913fc82SJens Axboe  * @sb: the superblock
1253e913fc82SJens Axboe  *
1254e913fc82SJens Axboe  * Like writeback_inodes_sb(), except the caller already holds the
1255e913fc82SJens Axboe  * sb umount sem.
1256e913fc82SJens Axboe  */
1257e913fc82SJens Axboe void writeback_inodes_sb_locked(struct super_block *sb)
1258e913fc82SJens Axboe {
1259e913fc82SJens Axboe 	__writeback_inodes_sb(sb, 1);
1260e913fc82SJens Axboe }
1261e913fc82SJens Axboe 
1262e913fc82SJens Axboe /**
126317bd55d0SEric Sandeen  * writeback_inodes_sb_if_idle	-	start writeback if none underway
126417bd55d0SEric Sandeen  * @sb: the superblock
126517bd55d0SEric Sandeen  *
126617bd55d0SEric Sandeen  * Invoke writeback_inodes_sb if no writeback is currently underway.
126717bd55d0SEric Sandeen  * Returns 1 if writeback was started, 0 if not.
126817bd55d0SEric Sandeen  */
126917bd55d0SEric Sandeen int writeback_inodes_sb_if_idle(struct super_block *sb)
127017bd55d0SEric Sandeen {
127117bd55d0SEric Sandeen 	if (!writeback_in_progress(sb->s_bdi)) {
127217bd55d0SEric Sandeen 		writeback_inodes_sb(sb);
127317bd55d0SEric Sandeen 		return 1;
127417bd55d0SEric Sandeen 	} else
127517bd55d0SEric Sandeen 		return 0;
127617bd55d0SEric Sandeen }
127717bd55d0SEric Sandeen EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
127817bd55d0SEric Sandeen 
127917bd55d0SEric Sandeen /**
1280d8a8559cSJens Axboe  * sync_inodes_sb	-	sync sb inode pages
1281d8a8559cSJens Axboe  * @sb: the superblock
1282d8a8559cSJens Axboe  *
1283d8a8559cSJens Axboe  * This function writes and waits on any dirty inode belonging to this
1284d8a8559cSJens Axboe  * super_block. The number of pages synced is returned.
1285d8a8559cSJens Axboe  */
1286b6e51316SJens Axboe void sync_inodes_sb(struct super_block *sb)
1287d8a8559cSJens Axboe {
1288b6e51316SJens Axboe 	bdi_sync_writeback(sb->s_bdi, sb);
1289b6e51316SJens Axboe 	wait_sb_inodes(sb);
1290d8a8559cSJens Axboe }
1291d8a8559cSJens Axboe EXPORT_SYMBOL(sync_inodes_sb);
12921da177e4SLinus Torvalds 
12931da177e4SLinus Torvalds /**
12941da177e4SLinus Torvalds  * write_inode_now	-	write an inode to disk
12951da177e4SLinus Torvalds  * @inode: inode to write to disk
12961da177e4SLinus Torvalds  * @sync: whether the write should be synchronous or not
12971da177e4SLinus Torvalds  *
12987f04c26dSAndrea Arcangeli  * This function commits an inode to disk immediately if it is dirty. This is
12997f04c26dSAndrea Arcangeli  * primarily needed by knfsd.
13007f04c26dSAndrea Arcangeli  *
13017f04c26dSAndrea Arcangeli  * The caller must either have a ref on the inode or must have set I_WILL_FREE.
13021da177e4SLinus Torvalds  */
13031da177e4SLinus Torvalds int write_inode_now(struct inode *inode, int sync)
13041da177e4SLinus Torvalds {
13051da177e4SLinus Torvalds 	int ret;
13061da177e4SLinus Torvalds 	struct writeback_control wbc = {
13071da177e4SLinus Torvalds 		.nr_to_write = LONG_MAX,
130818914b18SMike Galbraith 		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1309111ebb6eSOGAWA Hirofumi 		.range_start = 0,
1310111ebb6eSOGAWA Hirofumi 		.range_end = LLONG_MAX,
13111da177e4SLinus Torvalds 	};
13121da177e4SLinus Torvalds 
13131da177e4SLinus Torvalds 	if (!mapping_cap_writeback_dirty(inode->i_mapping))
131449364ce2SAndrew Morton 		wbc.nr_to_write = 0;
13151da177e4SLinus Torvalds 
13161da177e4SLinus Torvalds 	might_sleep();
13171da177e4SLinus Torvalds 	spin_lock(&inode_lock);
131801c03194SChristoph Hellwig 	ret = writeback_single_inode(inode, &wbc);
13191da177e4SLinus Torvalds 	spin_unlock(&inode_lock);
13201da177e4SLinus Torvalds 	if (sync)
13211c0eeaf5SJoern Engel 		inode_sync_wait(inode);
13221da177e4SLinus Torvalds 	return ret;
13231da177e4SLinus Torvalds }
13241da177e4SLinus Torvalds EXPORT_SYMBOL(write_inode_now);
13251da177e4SLinus Torvalds 
13261da177e4SLinus Torvalds /**
13271da177e4SLinus Torvalds  * sync_inode - write an inode and its pages to disk.
13281da177e4SLinus Torvalds  * @inode: the inode to sync
13291da177e4SLinus Torvalds  * @wbc: controls the writeback mode
13301da177e4SLinus Torvalds  *
13311da177e4SLinus Torvalds  * sync_inode() will write an inode and its pages to disk.  It will also
13321da177e4SLinus Torvalds  * correctly update the inode on its superblock's dirty inode lists and will
13331da177e4SLinus Torvalds  * update inode->i_state.
13341da177e4SLinus Torvalds  *
13351da177e4SLinus Torvalds  * The caller must have a ref on the inode.
13361da177e4SLinus Torvalds  */
13371da177e4SLinus Torvalds int sync_inode(struct inode *inode, struct writeback_control *wbc)
13381da177e4SLinus Torvalds {
13391da177e4SLinus Torvalds 	int ret;
13401da177e4SLinus Torvalds 
13411da177e4SLinus Torvalds 	spin_lock(&inode_lock);
134201c03194SChristoph Hellwig 	ret = writeback_single_inode(inode, wbc);
13431da177e4SLinus Torvalds 	spin_unlock(&inode_lock);
13441da177e4SLinus Torvalds 	return ret;
13451da177e4SLinus Torvalds }
13461da177e4SLinus Torvalds EXPORT_SYMBOL(sync_inode);
1347