xref: /openbmc/linux/fs/ext4/inode.c (revision 79ffab34391933ee3b95dac7f25c0478fa2f8f1e)
1ac27a0ecSDave Kleikamp /*
2617ba13bSMingming Cao  *  linux/fs/ext4/inode.c
3ac27a0ecSDave Kleikamp  *
4ac27a0ecSDave Kleikamp  * Copyright (C) 1992, 1993, 1994, 1995
5ac27a0ecSDave Kleikamp  * Remy Card (card@masi.ibp.fr)
6ac27a0ecSDave Kleikamp  * Laboratoire MASI - Institut Blaise Pascal
7ac27a0ecSDave Kleikamp  * Universite Pierre et Marie Curie (Paris VI)
8ac27a0ecSDave Kleikamp  *
9ac27a0ecSDave Kleikamp  *  from
10ac27a0ecSDave Kleikamp  *
11ac27a0ecSDave Kleikamp  *  linux/fs/minix/inode.c
12ac27a0ecSDave Kleikamp  *
13ac27a0ecSDave Kleikamp  *  Copyright (C) 1991, 1992  Linus Torvalds
14ac27a0ecSDave Kleikamp  *
15ac27a0ecSDave Kleikamp  *  Goal-directed block allocation by Stephen Tweedie
16ac27a0ecSDave Kleikamp  *	(sct@redhat.com), 1993, 1998
17ac27a0ecSDave Kleikamp  *  Big-endian to little-endian byte-swapping/bitmaps by
18ac27a0ecSDave Kleikamp  *        David S. Miller (davem@caip.rutgers.edu), 1995
19ac27a0ecSDave Kleikamp  *  64-bit file support on 64-bit platforms by Jakub Jelinek
20ac27a0ecSDave Kleikamp  *	(jj@sunsite.ms.mff.cuni.cz)
21ac27a0ecSDave Kleikamp  *
22617ba13bSMingming Cao  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
23ac27a0ecSDave Kleikamp  */
24ac27a0ecSDave Kleikamp 
25ac27a0ecSDave Kleikamp #include <linux/module.h>
26ac27a0ecSDave Kleikamp #include <linux/fs.h>
27ac27a0ecSDave Kleikamp #include <linux/time.h>
28dab291afSMingming Cao #include <linux/jbd2.h>
29ac27a0ecSDave Kleikamp #include <linux/highuid.h>
30ac27a0ecSDave Kleikamp #include <linux/pagemap.h>
31ac27a0ecSDave Kleikamp #include <linux/quotaops.h>
32ac27a0ecSDave Kleikamp #include <linux/string.h>
33ac27a0ecSDave Kleikamp #include <linux/buffer_head.h>
34ac27a0ecSDave Kleikamp #include <linux/writeback.h>
3564769240SAlex Tomas #include <linux/pagevec.h>
36ac27a0ecSDave Kleikamp #include <linux/mpage.h>
37e83c1397SDuane Griffin #include <linux/namei.h>
38ac27a0ecSDave Kleikamp #include <linux/uio.h>
39ac27a0ecSDave Kleikamp #include <linux/bio.h>
403dcf5451SChristoph Hellwig #include "ext4_jbd2.h"
41ac27a0ecSDave Kleikamp #include "xattr.h"
42ac27a0ecSDave Kleikamp #include "acl.h"
43d2a17637SMingming Cao #include "ext4_extents.h"
44ac27a0ecSDave Kleikamp 
45a1d6cc56SAneesh Kumar K.V #define MPAGE_DA_EXTENT_TAIL 0x01
46a1d6cc56SAneesh Kumar K.V 
47678aaf48SJan Kara static inline int ext4_begin_ordered_truncate(struct inode *inode,
48678aaf48SJan Kara 					      loff_t new_size)
49678aaf48SJan Kara {
507f5aa215SJan Kara 	return jbd2_journal_begin_ordered_truncate(
517f5aa215SJan Kara 					EXT4_SB(inode->i_sb)->s_journal,
527f5aa215SJan Kara 					&EXT4_I(inode)->jinode,
53678aaf48SJan Kara 					new_size);
54678aaf48SJan Kara }
55678aaf48SJan Kara 
5664769240SAlex Tomas static void ext4_invalidatepage(struct page *page, unsigned long offset);
5764769240SAlex Tomas 
58ac27a0ecSDave Kleikamp /*
59ac27a0ecSDave Kleikamp  * Test whether an inode is a fast symlink.
60ac27a0ecSDave Kleikamp  */
61617ba13bSMingming Cao static int ext4_inode_is_fast_symlink(struct inode *inode)
62ac27a0ecSDave Kleikamp {
63617ba13bSMingming Cao 	int ea_blocks = EXT4_I(inode)->i_file_acl ?
64ac27a0ecSDave Kleikamp 		(inode->i_sb->s_blocksize >> 9) : 0;
65ac27a0ecSDave Kleikamp 
66ac27a0ecSDave Kleikamp 	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
67ac27a0ecSDave Kleikamp }
68ac27a0ecSDave Kleikamp 
69ac27a0ecSDave Kleikamp /*
70617ba13bSMingming Cao  * The ext4 forget function must perform a revoke if we are freeing data
71ac27a0ecSDave Kleikamp  * which has been journaled.  Metadata (eg. indirect blocks) must be
72ac27a0ecSDave Kleikamp  * revoked in all cases.
73ac27a0ecSDave Kleikamp  *
74ac27a0ecSDave Kleikamp  * "bh" may be NULL: a metadata block may have been freed from memory
75ac27a0ecSDave Kleikamp  * but there may still be a record of it in the journal, and that record
76ac27a0ecSDave Kleikamp  * still needs to be revoked.
770390131bSFrank Mayhar  *
780390131bSFrank Mayhar  * If the handle isn't valid we're not journaling so there's nothing to do.
79ac27a0ecSDave Kleikamp  */
80617ba13bSMingming Cao int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
81617ba13bSMingming Cao 			struct buffer_head *bh, ext4_fsblk_t blocknr)
82ac27a0ecSDave Kleikamp {
83ac27a0ecSDave Kleikamp 	int err;
84ac27a0ecSDave Kleikamp 
850390131bSFrank Mayhar 	if (!ext4_handle_valid(handle))
860390131bSFrank Mayhar 		return 0;
870390131bSFrank Mayhar 
88ac27a0ecSDave Kleikamp 	might_sleep();
89ac27a0ecSDave Kleikamp 
90ac27a0ecSDave Kleikamp 	BUFFER_TRACE(bh, "enter");
91ac27a0ecSDave Kleikamp 
92ac27a0ecSDave Kleikamp 	jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
93ac27a0ecSDave Kleikamp 		  "data mode %lx\n",
94ac27a0ecSDave Kleikamp 		  bh, is_metadata, inode->i_mode,
95ac27a0ecSDave Kleikamp 		  test_opt(inode->i_sb, DATA_FLAGS));
96ac27a0ecSDave Kleikamp 
97ac27a0ecSDave Kleikamp 	/* Never use the revoke function if we are doing full data
98ac27a0ecSDave Kleikamp 	 * journaling: there is no need to, and a V1 superblock won't
99ac27a0ecSDave Kleikamp 	 * support it.  Otherwise, only skip the revoke on un-journaled
100ac27a0ecSDave Kleikamp 	 * data blocks. */
101ac27a0ecSDave Kleikamp 
102617ba13bSMingming Cao 	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
103617ba13bSMingming Cao 	    (!is_metadata && !ext4_should_journal_data(inode))) {
104ac27a0ecSDave Kleikamp 		if (bh) {
105dab291afSMingming Cao 			BUFFER_TRACE(bh, "call jbd2_journal_forget");
106617ba13bSMingming Cao 			return ext4_journal_forget(handle, bh);
107ac27a0ecSDave Kleikamp 		}
108ac27a0ecSDave Kleikamp 		return 0;
109ac27a0ecSDave Kleikamp 	}
110ac27a0ecSDave Kleikamp 
111ac27a0ecSDave Kleikamp 	/*
112ac27a0ecSDave Kleikamp 	 * data!=journal && (is_metadata || should_journal_data(inode))
113ac27a0ecSDave Kleikamp 	 */
114617ba13bSMingming Cao 	BUFFER_TRACE(bh, "call ext4_journal_revoke");
115617ba13bSMingming Cao 	err = ext4_journal_revoke(handle, blocknr, bh);
116ac27a0ecSDave Kleikamp 	if (err)
11746e665e9SHarvey Harrison 		ext4_abort(inode->i_sb, __func__,
118ac27a0ecSDave Kleikamp 			   "error %d when attempting revoke", err);
119ac27a0ecSDave Kleikamp 	BUFFER_TRACE(bh, "exit");
120ac27a0ecSDave Kleikamp 	return err;
121ac27a0ecSDave Kleikamp }
122ac27a0ecSDave Kleikamp 
123ac27a0ecSDave Kleikamp /*
124ac27a0ecSDave Kleikamp  * Work out how many blocks we need to proceed with the next chunk of a
125ac27a0ecSDave Kleikamp  * truncate transaction.
126ac27a0ecSDave Kleikamp  */
127ac27a0ecSDave Kleikamp static unsigned long blocks_for_truncate(struct inode *inode)
128ac27a0ecSDave Kleikamp {
129725d26d3SAneesh Kumar K.V 	ext4_lblk_t needed;
130ac27a0ecSDave Kleikamp 
131ac27a0ecSDave Kleikamp 	needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
132ac27a0ecSDave Kleikamp 
133ac27a0ecSDave Kleikamp 	/* Give ourselves just enough room to cope with inodes in which
134ac27a0ecSDave Kleikamp 	 * i_blocks is corrupt: we've seen disk corruptions in the past
135ac27a0ecSDave Kleikamp 	 * which resulted in random data in an inode which looked enough
136617ba13bSMingming Cao 	 * like a regular file for ext4 to try to delete it.  Things
137ac27a0ecSDave Kleikamp 	 * will go a bit crazy if that happens, but at least we should
138ac27a0ecSDave Kleikamp 	 * try not to panic the whole kernel. */
139ac27a0ecSDave Kleikamp 	if (needed < 2)
140ac27a0ecSDave Kleikamp 		needed = 2;
141ac27a0ecSDave Kleikamp 
142ac27a0ecSDave Kleikamp 	/* But we need to bound the transaction so we don't overflow the
143ac27a0ecSDave Kleikamp 	 * journal. */
144617ba13bSMingming Cao 	if (needed > EXT4_MAX_TRANS_DATA)
145617ba13bSMingming Cao 		needed = EXT4_MAX_TRANS_DATA;
146ac27a0ecSDave Kleikamp 
147617ba13bSMingming Cao 	return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
148ac27a0ecSDave Kleikamp }
149ac27a0ecSDave Kleikamp 
150ac27a0ecSDave Kleikamp /*
151ac27a0ecSDave Kleikamp  * Truncate transactions can be complex and absolutely huge.  So we need to
152ac27a0ecSDave Kleikamp  * be able to restart the transaction at a conventient checkpoint to make
153ac27a0ecSDave Kleikamp  * sure we don't overflow the journal.
154ac27a0ecSDave Kleikamp  *
155ac27a0ecSDave Kleikamp  * start_transaction gets us a new handle for a truncate transaction,
156ac27a0ecSDave Kleikamp  * and extend_transaction tries to extend the existing one a bit.  If
157ac27a0ecSDave Kleikamp  * extend fails, we need to propagate the failure up and restart the
158ac27a0ecSDave Kleikamp  * transaction in the top-level truncate loop. --sct
159ac27a0ecSDave Kleikamp  */
160ac27a0ecSDave Kleikamp static handle_t *start_transaction(struct inode *inode)
161ac27a0ecSDave Kleikamp {
162ac27a0ecSDave Kleikamp 	handle_t *result;
163ac27a0ecSDave Kleikamp 
164617ba13bSMingming Cao 	result = ext4_journal_start(inode, blocks_for_truncate(inode));
165ac27a0ecSDave Kleikamp 	if (!IS_ERR(result))
166ac27a0ecSDave Kleikamp 		return result;
167ac27a0ecSDave Kleikamp 
168617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, PTR_ERR(result));
169ac27a0ecSDave Kleikamp 	return result;
170ac27a0ecSDave Kleikamp }
171ac27a0ecSDave Kleikamp 
172ac27a0ecSDave Kleikamp /*
173ac27a0ecSDave Kleikamp  * Try to extend this transaction for the purposes of truncation.
174ac27a0ecSDave Kleikamp  *
175ac27a0ecSDave Kleikamp  * Returns 0 if we managed to create more room.  If we can't create more
176ac27a0ecSDave Kleikamp  * room, and the transaction must be restarted we return 1.
177ac27a0ecSDave Kleikamp  */
178ac27a0ecSDave Kleikamp static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
179ac27a0ecSDave Kleikamp {
1800390131bSFrank Mayhar 	if (!ext4_handle_valid(handle))
1810390131bSFrank Mayhar 		return 0;
1820390131bSFrank Mayhar 	if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
183ac27a0ecSDave Kleikamp 		return 0;
184617ba13bSMingming Cao 	if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
185ac27a0ecSDave Kleikamp 		return 0;
186ac27a0ecSDave Kleikamp 	return 1;
187ac27a0ecSDave Kleikamp }
188ac27a0ecSDave Kleikamp 
189ac27a0ecSDave Kleikamp /*
190ac27a0ecSDave Kleikamp  * Restart the transaction associated with *handle.  This does a commit,
191ac27a0ecSDave Kleikamp  * so before we call here everything must be consistently dirtied against
192ac27a0ecSDave Kleikamp  * this transaction.
193ac27a0ecSDave Kleikamp  */
194617ba13bSMingming Cao static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
195ac27a0ecSDave Kleikamp {
1960390131bSFrank Mayhar 	BUG_ON(EXT4_JOURNAL(inode) == NULL);
197ac27a0ecSDave Kleikamp 	jbd_debug(2, "restarting handle %p\n", handle);
198617ba13bSMingming Cao 	return ext4_journal_restart(handle, blocks_for_truncate(inode));
199ac27a0ecSDave Kleikamp }
200ac27a0ecSDave Kleikamp 
201ac27a0ecSDave Kleikamp /*
202ac27a0ecSDave Kleikamp  * Called at the last iput() if i_nlink is zero.
203ac27a0ecSDave Kleikamp  */
204617ba13bSMingming Cao void ext4_delete_inode(struct inode *inode)
205ac27a0ecSDave Kleikamp {
206ac27a0ecSDave Kleikamp 	handle_t *handle;
207bc965ab3STheodore Ts'o 	int err;
208ac27a0ecSDave Kleikamp 
209678aaf48SJan Kara 	if (ext4_should_order_data(inode))
210678aaf48SJan Kara 		ext4_begin_ordered_truncate(inode, 0);
211ac27a0ecSDave Kleikamp 	truncate_inode_pages(&inode->i_data, 0);
212ac27a0ecSDave Kleikamp 
213ac27a0ecSDave Kleikamp 	if (is_bad_inode(inode))
214ac27a0ecSDave Kleikamp 		goto no_delete;
215ac27a0ecSDave Kleikamp 
216bc965ab3STheodore Ts'o 	handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
217ac27a0ecSDave Kleikamp 	if (IS_ERR(handle)) {
218bc965ab3STheodore Ts'o 		ext4_std_error(inode->i_sb, PTR_ERR(handle));
219ac27a0ecSDave Kleikamp 		/*
220ac27a0ecSDave Kleikamp 		 * If we're going to skip the normal cleanup, we still need to
221ac27a0ecSDave Kleikamp 		 * make sure that the in-core orphan linked list is properly
222ac27a0ecSDave Kleikamp 		 * cleaned up.
223ac27a0ecSDave Kleikamp 		 */
224617ba13bSMingming Cao 		ext4_orphan_del(NULL, inode);
225ac27a0ecSDave Kleikamp 		goto no_delete;
226ac27a0ecSDave Kleikamp 	}
227ac27a0ecSDave Kleikamp 
228ac27a0ecSDave Kleikamp 	if (IS_SYNC(inode))
2290390131bSFrank Mayhar 		ext4_handle_sync(handle);
230ac27a0ecSDave Kleikamp 	inode->i_size = 0;
231bc965ab3STheodore Ts'o 	err = ext4_mark_inode_dirty(handle, inode);
232bc965ab3STheodore Ts'o 	if (err) {
233bc965ab3STheodore Ts'o 		ext4_warning(inode->i_sb, __func__,
234bc965ab3STheodore Ts'o 			     "couldn't mark inode dirty (err %d)", err);
235bc965ab3STheodore Ts'o 		goto stop_handle;
236bc965ab3STheodore Ts'o 	}
237ac27a0ecSDave Kleikamp 	if (inode->i_blocks)
238617ba13bSMingming Cao 		ext4_truncate(inode);
239bc965ab3STheodore Ts'o 
240bc965ab3STheodore Ts'o 	/*
241bc965ab3STheodore Ts'o 	 * ext4_ext_truncate() doesn't reserve any slop when it
242bc965ab3STheodore Ts'o 	 * restarts journal transactions; therefore there may not be
243bc965ab3STheodore Ts'o 	 * enough credits left in the handle to remove the inode from
244bc965ab3STheodore Ts'o 	 * the orphan list and set the dtime field.
245bc965ab3STheodore Ts'o 	 */
2460390131bSFrank Mayhar 	if (!ext4_handle_has_enough_credits(handle, 3)) {
247bc965ab3STheodore Ts'o 		err = ext4_journal_extend(handle, 3);
248bc965ab3STheodore Ts'o 		if (err > 0)
249bc965ab3STheodore Ts'o 			err = ext4_journal_restart(handle, 3);
250bc965ab3STheodore Ts'o 		if (err != 0) {
251bc965ab3STheodore Ts'o 			ext4_warning(inode->i_sb, __func__,
252bc965ab3STheodore Ts'o 				     "couldn't extend journal (err %d)", err);
253bc965ab3STheodore Ts'o 		stop_handle:
254bc965ab3STheodore Ts'o 			ext4_journal_stop(handle);
255bc965ab3STheodore Ts'o 			goto no_delete;
256bc965ab3STheodore Ts'o 		}
257bc965ab3STheodore Ts'o 	}
258bc965ab3STheodore Ts'o 
259ac27a0ecSDave Kleikamp 	/*
260617ba13bSMingming Cao 	 * Kill off the orphan record which ext4_truncate created.
261ac27a0ecSDave Kleikamp 	 * AKPM: I think this can be inside the above `if'.
262617ba13bSMingming Cao 	 * Note that ext4_orphan_del() has to be able to cope with the
263ac27a0ecSDave Kleikamp 	 * deletion of a non-existent orphan - this is because we don't
264617ba13bSMingming Cao 	 * know if ext4_truncate() actually created an orphan record.
265ac27a0ecSDave Kleikamp 	 * (Well, we could do this if we need to, but heck - it works)
266ac27a0ecSDave Kleikamp 	 */
267617ba13bSMingming Cao 	ext4_orphan_del(handle, inode);
268617ba13bSMingming Cao 	EXT4_I(inode)->i_dtime	= get_seconds();
269ac27a0ecSDave Kleikamp 
270ac27a0ecSDave Kleikamp 	/*
271ac27a0ecSDave Kleikamp 	 * One subtle ordering requirement: if anything has gone wrong
272ac27a0ecSDave Kleikamp 	 * (transaction abort, IO errors, whatever), then we can still
273ac27a0ecSDave Kleikamp 	 * do these next steps (the fs will already have been marked as
274ac27a0ecSDave Kleikamp 	 * having errors), but we can't free the inode if the mark_dirty
275ac27a0ecSDave Kleikamp 	 * fails.
276ac27a0ecSDave Kleikamp 	 */
277617ba13bSMingming Cao 	if (ext4_mark_inode_dirty(handle, inode))
278ac27a0ecSDave Kleikamp 		/* If that failed, just do the required in-core inode clear. */
279ac27a0ecSDave Kleikamp 		clear_inode(inode);
280ac27a0ecSDave Kleikamp 	else
281617ba13bSMingming Cao 		ext4_free_inode(handle, inode);
282617ba13bSMingming Cao 	ext4_journal_stop(handle);
283ac27a0ecSDave Kleikamp 	return;
284ac27a0ecSDave Kleikamp no_delete:
285ac27a0ecSDave Kleikamp 	clear_inode(inode);	/* We must guarantee clearing of inode... */
286ac27a0ecSDave Kleikamp }
287ac27a0ecSDave Kleikamp 
288ac27a0ecSDave Kleikamp typedef struct {
289ac27a0ecSDave Kleikamp 	__le32	*p;
290ac27a0ecSDave Kleikamp 	__le32	key;
291ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
292ac27a0ecSDave Kleikamp } Indirect;
293ac27a0ecSDave Kleikamp 
294ac27a0ecSDave Kleikamp static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
295ac27a0ecSDave Kleikamp {
296ac27a0ecSDave Kleikamp 	p->key = *(p->p = v);
297ac27a0ecSDave Kleikamp 	p->bh = bh;
298ac27a0ecSDave Kleikamp }
299ac27a0ecSDave Kleikamp 
300ac27a0ecSDave Kleikamp /**
301617ba13bSMingming Cao  *	ext4_block_to_path - parse the block number into array of offsets
302ac27a0ecSDave Kleikamp  *	@inode: inode in question (we are only interested in its superblock)
303ac27a0ecSDave Kleikamp  *	@i_block: block number to be parsed
304ac27a0ecSDave Kleikamp  *	@offsets: array to store the offsets in
305ac27a0ecSDave Kleikamp  *	@boundary: set this non-zero if the referred-to block is likely to be
306ac27a0ecSDave Kleikamp  *	       followed (on disk) by an indirect block.
307ac27a0ecSDave Kleikamp  *
308617ba13bSMingming Cao  *	To store the locations of file's data ext4 uses a data structure common
309ac27a0ecSDave Kleikamp  *	for UNIX filesystems - tree of pointers anchored in the inode, with
310ac27a0ecSDave Kleikamp  *	data blocks at leaves and indirect blocks in intermediate nodes.
311ac27a0ecSDave Kleikamp  *	This function translates the block number into path in that tree -
312ac27a0ecSDave Kleikamp  *	return value is the path length and @offsets[n] is the offset of
313ac27a0ecSDave Kleikamp  *	pointer to (n+1)th node in the nth one. If @block is out of range
314ac27a0ecSDave Kleikamp  *	(negative or too large) warning is printed and zero returned.
315ac27a0ecSDave Kleikamp  *
316ac27a0ecSDave Kleikamp  *	Note: function doesn't find node addresses, so no IO is needed. All
317ac27a0ecSDave Kleikamp  *	we need to know is the capacity of indirect blocks (taken from the
318ac27a0ecSDave Kleikamp  *	inode->i_sb).
319ac27a0ecSDave Kleikamp  */
320ac27a0ecSDave Kleikamp 
321ac27a0ecSDave Kleikamp /*
322ac27a0ecSDave Kleikamp  * Portability note: the last comparison (check that we fit into triple
323ac27a0ecSDave Kleikamp  * indirect block) is spelled differently, because otherwise on an
324ac27a0ecSDave Kleikamp  * architecture with 32-bit longs and 8Kb pages we might get into trouble
325ac27a0ecSDave Kleikamp  * if our filesystem had 8Kb blocks. We might use long long, but that would
326ac27a0ecSDave Kleikamp  * kill us on x86. Oh, well, at least the sign propagation does not matter -
327ac27a0ecSDave Kleikamp  * i_block would have to be negative in the very beginning, so we would not
328ac27a0ecSDave Kleikamp  * get there at all.
329ac27a0ecSDave Kleikamp  */
330ac27a0ecSDave Kleikamp 
331617ba13bSMingming Cao static int ext4_block_to_path(struct inode *inode,
332725d26d3SAneesh Kumar K.V 			ext4_lblk_t i_block,
333725d26d3SAneesh Kumar K.V 			ext4_lblk_t offsets[4], int *boundary)
334ac27a0ecSDave Kleikamp {
335617ba13bSMingming Cao 	int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
336617ba13bSMingming Cao 	int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
337617ba13bSMingming Cao 	const long direct_blocks = EXT4_NDIR_BLOCKS,
338ac27a0ecSDave Kleikamp 		indirect_blocks = ptrs,
339ac27a0ecSDave Kleikamp 		double_blocks = (1 << (ptrs_bits * 2));
340ac27a0ecSDave Kleikamp 	int n = 0;
341ac27a0ecSDave Kleikamp 	int final = 0;
342ac27a0ecSDave Kleikamp 
343ac27a0ecSDave Kleikamp 	if (i_block < 0) {
344617ba13bSMingming Cao 		ext4_warning(inode->i_sb, "ext4_block_to_path", "block < 0");
345ac27a0ecSDave Kleikamp 	} else if (i_block < direct_blocks) {
346ac27a0ecSDave Kleikamp 		offsets[n++] = i_block;
347ac27a0ecSDave Kleikamp 		final = direct_blocks;
348ac27a0ecSDave Kleikamp 	} else if ((i_block -= direct_blocks) < indirect_blocks) {
349617ba13bSMingming Cao 		offsets[n++] = EXT4_IND_BLOCK;
350ac27a0ecSDave Kleikamp 		offsets[n++] = i_block;
351ac27a0ecSDave Kleikamp 		final = ptrs;
352ac27a0ecSDave Kleikamp 	} else if ((i_block -= indirect_blocks) < double_blocks) {
353617ba13bSMingming Cao 		offsets[n++] = EXT4_DIND_BLOCK;
354ac27a0ecSDave Kleikamp 		offsets[n++] = i_block >> ptrs_bits;
355ac27a0ecSDave Kleikamp 		offsets[n++] = i_block & (ptrs - 1);
356ac27a0ecSDave Kleikamp 		final = ptrs;
357ac27a0ecSDave Kleikamp 	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
358617ba13bSMingming Cao 		offsets[n++] = EXT4_TIND_BLOCK;
359ac27a0ecSDave Kleikamp 		offsets[n++] = i_block >> (ptrs_bits * 2);
360ac27a0ecSDave Kleikamp 		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
361ac27a0ecSDave Kleikamp 		offsets[n++] = i_block & (ptrs - 1);
362ac27a0ecSDave Kleikamp 		final = ptrs;
363ac27a0ecSDave Kleikamp 	} else {
364e2b46574SEric Sandeen 		ext4_warning(inode->i_sb, "ext4_block_to_path",
36506a279d6STheodore Ts'o 				"block %lu > max in inode %lu",
366e2b46574SEric Sandeen 				i_block + direct_blocks +
36706a279d6STheodore Ts'o 				indirect_blocks + double_blocks, inode->i_ino);
368ac27a0ecSDave Kleikamp 	}
369ac27a0ecSDave Kleikamp 	if (boundary)
370ac27a0ecSDave Kleikamp 		*boundary = final - 1 - (i_block & (ptrs - 1));
371ac27a0ecSDave Kleikamp 	return n;
372ac27a0ecSDave Kleikamp }
373ac27a0ecSDave Kleikamp 
374fe2c8191SThiemo Nagel static int __ext4_check_blockref(const char *function, struct inode *inode,
375f73953c0SThiemo Nagel 				 __le32 *p, unsigned int max) {
376fe2c8191SThiemo Nagel 
377fe2c8191SThiemo Nagel 	unsigned int maxblocks = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es);
378f73953c0SThiemo Nagel 	__le32 *bref = p;
379fe2c8191SThiemo Nagel 	while (bref < p+max) {
380f73953c0SThiemo Nagel 		if (unlikely(le32_to_cpu(*bref) >= maxblocks)) {
381fe2c8191SThiemo Nagel 			ext4_error(inode->i_sb, function,
382fe2c8191SThiemo Nagel 				   "block reference %u >= max (%u) "
383fe2c8191SThiemo Nagel 				   "in inode #%lu, offset=%d",
384f73953c0SThiemo Nagel 				   le32_to_cpu(*bref), maxblocks,
385fe2c8191SThiemo Nagel 				   inode->i_ino, (int)(bref-p));
386fe2c8191SThiemo Nagel  			return -EIO;
387fe2c8191SThiemo Nagel  		}
388fe2c8191SThiemo Nagel 		bref++;
389fe2c8191SThiemo Nagel  	}
390fe2c8191SThiemo Nagel  	return 0;
391fe2c8191SThiemo Nagel }
392fe2c8191SThiemo Nagel 
393fe2c8191SThiemo Nagel 
394fe2c8191SThiemo Nagel #define ext4_check_indirect_blockref(inode, bh)                         \
395fe2c8191SThiemo Nagel         __ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data,  \
396fe2c8191SThiemo Nagel 			      EXT4_ADDR_PER_BLOCK((inode)->i_sb))
397fe2c8191SThiemo Nagel 
398fe2c8191SThiemo Nagel #define ext4_check_inode_blockref(inode)                                \
399fe2c8191SThiemo Nagel         __ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data,   \
400fe2c8191SThiemo Nagel 			      EXT4_NDIR_BLOCKS)
401fe2c8191SThiemo Nagel 
402ac27a0ecSDave Kleikamp /**
403617ba13bSMingming Cao  *	ext4_get_branch - read the chain of indirect blocks leading to data
404ac27a0ecSDave Kleikamp  *	@inode: inode in question
405ac27a0ecSDave Kleikamp  *	@depth: depth of the chain (1 - direct pointer, etc.)
406ac27a0ecSDave Kleikamp  *	@offsets: offsets of pointers in inode/indirect blocks
407ac27a0ecSDave Kleikamp  *	@chain: place to store the result
408ac27a0ecSDave Kleikamp  *	@err: here we store the error value
409ac27a0ecSDave Kleikamp  *
410ac27a0ecSDave Kleikamp  *	Function fills the array of triples <key, p, bh> and returns %NULL
411ac27a0ecSDave Kleikamp  *	if everything went OK or the pointer to the last filled triple
412ac27a0ecSDave Kleikamp  *	(incomplete one) otherwise. Upon the return chain[i].key contains
413ac27a0ecSDave Kleikamp  *	the number of (i+1)-th block in the chain (as it is stored in memory,
414ac27a0ecSDave Kleikamp  *	i.e. little-endian 32-bit), chain[i].p contains the address of that
415ac27a0ecSDave Kleikamp  *	number (it points into struct inode for i==0 and into the bh->b_data
416ac27a0ecSDave Kleikamp  *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
417ac27a0ecSDave Kleikamp  *	block for i>0 and NULL for i==0. In other words, it holds the block
418ac27a0ecSDave Kleikamp  *	numbers of the chain, addresses they were taken from (and where we can
419ac27a0ecSDave Kleikamp  *	verify that chain did not change) and buffer_heads hosting these
420ac27a0ecSDave Kleikamp  *	numbers.
421ac27a0ecSDave Kleikamp  *
422ac27a0ecSDave Kleikamp  *	Function stops when it stumbles upon zero pointer (absent block)
423ac27a0ecSDave Kleikamp  *		(pointer to last triple returned, *@err == 0)
424ac27a0ecSDave Kleikamp  *	or when it gets an IO error reading an indirect block
425ac27a0ecSDave Kleikamp  *		(ditto, *@err == -EIO)
426ac27a0ecSDave Kleikamp  *	or when it reads all @depth-1 indirect blocks successfully and finds
427ac27a0ecSDave Kleikamp  *	the whole chain, all way to the data (returns %NULL, *err == 0).
428c278bfecSAneesh Kumar K.V  *
429c278bfecSAneesh Kumar K.V  *      Need to be called with
4300e855ac8SAneesh Kumar K.V  *      down_read(&EXT4_I(inode)->i_data_sem)
431ac27a0ecSDave Kleikamp  */
432725d26d3SAneesh Kumar K.V static Indirect *ext4_get_branch(struct inode *inode, int depth,
433725d26d3SAneesh Kumar K.V 				 ext4_lblk_t  *offsets,
434ac27a0ecSDave Kleikamp 				 Indirect chain[4], int *err)
435ac27a0ecSDave Kleikamp {
436ac27a0ecSDave Kleikamp 	struct super_block *sb = inode->i_sb;
437ac27a0ecSDave Kleikamp 	Indirect *p = chain;
438ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
439ac27a0ecSDave Kleikamp 
440ac27a0ecSDave Kleikamp 	*err = 0;
441ac27a0ecSDave Kleikamp 	/* i_data is not going away, no lock needed */
442617ba13bSMingming Cao 	add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
443ac27a0ecSDave Kleikamp 	if (!p->key)
444ac27a0ecSDave Kleikamp 		goto no_block;
445ac27a0ecSDave Kleikamp 	while (--depth) {
446fe2c8191SThiemo Nagel 		bh = sb_getblk(sb, le32_to_cpu(p->key));
447fe2c8191SThiemo Nagel 		if (unlikely(!bh))
448ac27a0ecSDave Kleikamp 			goto failure;
449fe2c8191SThiemo Nagel 
450fe2c8191SThiemo Nagel 		if (!bh_uptodate_or_lock(bh)) {
451fe2c8191SThiemo Nagel 			if (bh_submit_read(bh) < 0) {
452fe2c8191SThiemo Nagel 				put_bh(bh);
453fe2c8191SThiemo Nagel 				goto failure;
454fe2c8191SThiemo Nagel 			}
455fe2c8191SThiemo Nagel 			/* validate block references */
456fe2c8191SThiemo Nagel 			if (ext4_check_indirect_blockref(inode, bh)) {
457fe2c8191SThiemo Nagel 				put_bh(bh);
458fe2c8191SThiemo Nagel 				goto failure;
459fe2c8191SThiemo Nagel 			}
460fe2c8191SThiemo Nagel 		}
461fe2c8191SThiemo Nagel 
462ac27a0ecSDave Kleikamp 		add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
463ac27a0ecSDave Kleikamp 		/* Reader: end */
464ac27a0ecSDave Kleikamp 		if (!p->key)
465ac27a0ecSDave Kleikamp 			goto no_block;
466ac27a0ecSDave Kleikamp 	}
467ac27a0ecSDave Kleikamp 	return NULL;
468ac27a0ecSDave Kleikamp 
469ac27a0ecSDave Kleikamp failure:
470ac27a0ecSDave Kleikamp 	*err = -EIO;
471ac27a0ecSDave Kleikamp no_block:
472ac27a0ecSDave Kleikamp 	return p;
473ac27a0ecSDave Kleikamp }
474ac27a0ecSDave Kleikamp 
475ac27a0ecSDave Kleikamp /**
476617ba13bSMingming Cao  *	ext4_find_near - find a place for allocation with sufficient locality
477ac27a0ecSDave Kleikamp  *	@inode: owner
478ac27a0ecSDave Kleikamp  *	@ind: descriptor of indirect block.
479ac27a0ecSDave Kleikamp  *
4801cc8dcf5SBenoit Boissinot  *	This function returns the preferred place for block allocation.
481ac27a0ecSDave Kleikamp  *	It is used when heuristic for sequential allocation fails.
482ac27a0ecSDave Kleikamp  *	Rules are:
483ac27a0ecSDave Kleikamp  *	  + if there is a block to the left of our position - allocate near it.
484ac27a0ecSDave Kleikamp  *	  + if pointer will live in indirect block - allocate near that block.
485ac27a0ecSDave Kleikamp  *	  + if pointer will live in inode - allocate in the same
486ac27a0ecSDave Kleikamp  *	    cylinder group.
487ac27a0ecSDave Kleikamp  *
488ac27a0ecSDave Kleikamp  * In the latter case we colour the starting block by the callers PID to
489ac27a0ecSDave Kleikamp  * prevent it from clashing with concurrent allocations for a different inode
490ac27a0ecSDave Kleikamp  * in the same block group.   The PID is used here so that functionally related
491ac27a0ecSDave Kleikamp  * files will be close-by on-disk.
492ac27a0ecSDave Kleikamp  *
493ac27a0ecSDave Kleikamp  *	Caller must make sure that @ind is valid and will stay that way.
494ac27a0ecSDave Kleikamp  */
495617ba13bSMingming Cao static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
496ac27a0ecSDave Kleikamp {
497617ba13bSMingming Cao 	struct ext4_inode_info *ei = EXT4_I(inode);
498ac27a0ecSDave Kleikamp 	__le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
499ac27a0ecSDave Kleikamp 	__le32 *p;
500617ba13bSMingming Cao 	ext4_fsblk_t bg_start;
50174d3487fSValerie Clement 	ext4_fsblk_t last_block;
502617ba13bSMingming Cao 	ext4_grpblk_t colour;
503a4912123STheodore Ts'o 	ext4_group_t block_group;
504a4912123STheodore Ts'o 	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
505ac27a0ecSDave Kleikamp 
506ac27a0ecSDave Kleikamp 	/* Try to find previous block */
507ac27a0ecSDave Kleikamp 	for (p = ind->p - 1; p >= start; p--) {
508ac27a0ecSDave Kleikamp 		if (*p)
509ac27a0ecSDave Kleikamp 			return le32_to_cpu(*p);
510ac27a0ecSDave Kleikamp 	}
511ac27a0ecSDave Kleikamp 
512ac27a0ecSDave Kleikamp 	/* No such thing, so let's try location of indirect block */
513ac27a0ecSDave Kleikamp 	if (ind->bh)
514ac27a0ecSDave Kleikamp 		return ind->bh->b_blocknr;
515ac27a0ecSDave Kleikamp 
516ac27a0ecSDave Kleikamp 	/*
517ac27a0ecSDave Kleikamp 	 * It is going to be referred to from the inode itself? OK, just put it
518ac27a0ecSDave Kleikamp 	 * into the same cylinder group then.
519ac27a0ecSDave Kleikamp 	 */
520a4912123STheodore Ts'o 	block_group = ei->i_block_group;
521a4912123STheodore Ts'o 	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
522a4912123STheodore Ts'o 		block_group &= ~(flex_size-1);
523a4912123STheodore Ts'o 		if (S_ISREG(inode->i_mode))
524a4912123STheodore Ts'o 			block_group++;
525a4912123STheodore Ts'o 	}
526a4912123STheodore Ts'o 	bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
52774d3487fSValerie Clement 	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
52874d3487fSValerie Clement 
529a4912123STheodore Ts'o 	/*
530a4912123STheodore Ts'o 	 * If we are doing delayed allocation, we don't need take
531a4912123STheodore Ts'o 	 * colour into account.
532a4912123STheodore Ts'o 	 */
533a4912123STheodore Ts'o 	if (test_opt(inode->i_sb, DELALLOC))
534a4912123STheodore Ts'o 		return bg_start;
535a4912123STheodore Ts'o 
53674d3487fSValerie Clement 	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
537ac27a0ecSDave Kleikamp 		colour = (current->pid % 16) *
538617ba13bSMingming Cao 			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
53974d3487fSValerie Clement 	else
54074d3487fSValerie Clement 		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
541ac27a0ecSDave Kleikamp 	return bg_start + colour;
542ac27a0ecSDave Kleikamp }
543ac27a0ecSDave Kleikamp 
544ac27a0ecSDave Kleikamp /**
5451cc8dcf5SBenoit Boissinot  *	ext4_find_goal - find a preferred place for allocation.
546ac27a0ecSDave Kleikamp  *	@inode: owner
547ac27a0ecSDave Kleikamp  *	@block:  block we want
548ac27a0ecSDave Kleikamp  *	@partial: pointer to the last triple within a chain
549ac27a0ecSDave Kleikamp  *
5501cc8dcf5SBenoit Boissinot  *	Normally this function find the preferred place for block allocation,
551fb01bfdaSAkinobu Mita  *	returns it.
552ac27a0ecSDave Kleikamp  */
553725d26d3SAneesh Kumar K.V static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
554fb01bfdaSAkinobu Mita 		Indirect *partial)
555ac27a0ecSDave Kleikamp {
556ac27a0ecSDave Kleikamp 	/*
557c2ea3fdeSTheodore Ts'o 	 * XXX need to get goal block from mballoc's data structures
558ac27a0ecSDave Kleikamp 	 */
559ac27a0ecSDave Kleikamp 
560617ba13bSMingming Cao 	return ext4_find_near(inode, partial);
561ac27a0ecSDave Kleikamp }
562ac27a0ecSDave Kleikamp 
563ac27a0ecSDave Kleikamp /**
564617ba13bSMingming Cao  *	ext4_blks_to_allocate: Look up the block map and count the number
565ac27a0ecSDave Kleikamp  *	of direct blocks need to be allocated for the given branch.
566ac27a0ecSDave Kleikamp  *
567ac27a0ecSDave Kleikamp  *	@branch: chain of indirect blocks
568ac27a0ecSDave Kleikamp  *	@k: number of blocks need for indirect blocks
569ac27a0ecSDave Kleikamp  *	@blks: number of data blocks to be mapped.
570ac27a0ecSDave Kleikamp  *	@blocks_to_boundary:  the offset in the indirect block
571ac27a0ecSDave Kleikamp  *
572ac27a0ecSDave Kleikamp  *	return the total number of blocks to be allocate, including the
573ac27a0ecSDave Kleikamp  *	direct and indirect blocks.
574ac27a0ecSDave Kleikamp  */
575498e5f24STheodore Ts'o static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
576ac27a0ecSDave Kleikamp 		int blocks_to_boundary)
577ac27a0ecSDave Kleikamp {
578498e5f24STheodore Ts'o 	unsigned int count = 0;
579ac27a0ecSDave Kleikamp 
580ac27a0ecSDave Kleikamp 	/*
581ac27a0ecSDave Kleikamp 	 * Simple case, [t,d]Indirect block(s) has not allocated yet
582ac27a0ecSDave Kleikamp 	 * then it's clear blocks on that path have not allocated
583ac27a0ecSDave Kleikamp 	 */
584ac27a0ecSDave Kleikamp 	if (k > 0) {
585ac27a0ecSDave Kleikamp 		/* right now we don't handle cross boundary allocation */
586ac27a0ecSDave Kleikamp 		if (blks < blocks_to_boundary + 1)
587ac27a0ecSDave Kleikamp 			count += blks;
588ac27a0ecSDave Kleikamp 		else
589ac27a0ecSDave Kleikamp 			count += blocks_to_boundary + 1;
590ac27a0ecSDave Kleikamp 		return count;
591ac27a0ecSDave Kleikamp 	}
592ac27a0ecSDave Kleikamp 
593ac27a0ecSDave Kleikamp 	count++;
594ac27a0ecSDave Kleikamp 	while (count < blks && count <= blocks_to_boundary &&
595ac27a0ecSDave Kleikamp 		le32_to_cpu(*(branch[0].p + count)) == 0) {
596ac27a0ecSDave Kleikamp 		count++;
597ac27a0ecSDave Kleikamp 	}
598ac27a0ecSDave Kleikamp 	return count;
599ac27a0ecSDave Kleikamp }
600ac27a0ecSDave Kleikamp 
601ac27a0ecSDave Kleikamp /**
602617ba13bSMingming Cao  *	ext4_alloc_blocks: multiple allocate blocks needed for a branch
603ac27a0ecSDave Kleikamp  *	@indirect_blks: the number of blocks need to allocate for indirect
604ac27a0ecSDave Kleikamp  *			blocks
605ac27a0ecSDave Kleikamp  *
606ac27a0ecSDave Kleikamp  *	@new_blocks: on return it will store the new block numbers for
607ac27a0ecSDave Kleikamp  *	the indirect blocks(if needed) and the first direct block,
608ac27a0ecSDave Kleikamp  *	@blks:	on return it will store the total number of allocated
609ac27a0ecSDave Kleikamp  *		direct blocks
610ac27a0ecSDave Kleikamp  */
611617ba13bSMingming Cao static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
6127061eba7SAneesh Kumar K.V 				ext4_lblk_t iblock, ext4_fsblk_t goal,
6137061eba7SAneesh Kumar K.V 				int indirect_blks, int blks,
614617ba13bSMingming Cao 				ext4_fsblk_t new_blocks[4], int *err)
615ac27a0ecSDave Kleikamp {
616815a1130STheodore Ts'o 	struct ext4_allocation_request ar;
617ac27a0ecSDave Kleikamp 	int target, i;
6187061eba7SAneesh Kumar K.V 	unsigned long count = 0, blk_allocated = 0;
619ac27a0ecSDave Kleikamp 	int index = 0;
620617ba13bSMingming Cao 	ext4_fsblk_t current_block = 0;
621ac27a0ecSDave Kleikamp 	int ret = 0;
622ac27a0ecSDave Kleikamp 
623ac27a0ecSDave Kleikamp 	/*
624ac27a0ecSDave Kleikamp 	 * Here we try to allocate the requested multiple blocks at once,
625ac27a0ecSDave Kleikamp 	 * on a best-effort basis.
626ac27a0ecSDave Kleikamp 	 * To build a branch, we should allocate blocks for
627ac27a0ecSDave Kleikamp 	 * the indirect blocks(if not allocated yet), and at least
628ac27a0ecSDave Kleikamp 	 * the first direct block of this branch.  That's the
629ac27a0ecSDave Kleikamp 	 * minimum number of blocks need to allocate(required)
630ac27a0ecSDave Kleikamp 	 */
6317061eba7SAneesh Kumar K.V 	/* first we try to allocate the indirect blocks */
6327061eba7SAneesh Kumar K.V 	target = indirect_blks;
6337061eba7SAneesh Kumar K.V 	while (target > 0) {
634ac27a0ecSDave Kleikamp 		count = target;
635ac27a0ecSDave Kleikamp 		/* allocating blocks for indirect blocks and direct blocks */
6367061eba7SAneesh Kumar K.V 		current_block = ext4_new_meta_blocks(handle, inode,
6377061eba7SAneesh Kumar K.V 							goal, &count, err);
638ac27a0ecSDave Kleikamp 		if (*err)
639ac27a0ecSDave Kleikamp 			goto failed_out;
640ac27a0ecSDave Kleikamp 
641ac27a0ecSDave Kleikamp 		target -= count;
642ac27a0ecSDave Kleikamp 		/* allocate blocks for indirect blocks */
643ac27a0ecSDave Kleikamp 		while (index < indirect_blks && count) {
644ac27a0ecSDave Kleikamp 			new_blocks[index++] = current_block++;
645ac27a0ecSDave Kleikamp 			count--;
646ac27a0ecSDave Kleikamp 		}
6477061eba7SAneesh Kumar K.V 		if (count > 0) {
6487061eba7SAneesh Kumar K.V 			/*
6497061eba7SAneesh Kumar K.V 			 * save the new block number
6507061eba7SAneesh Kumar K.V 			 * for the first direct block
6517061eba7SAneesh Kumar K.V 			 */
6527061eba7SAneesh Kumar K.V 			new_blocks[index] = current_block;
6537061eba7SAneesh Kumar K.V 			printk(KERN_INFO "%s returned more blocks than "
6547061eba7SAneesh Kumar K.V 						"requested\n", __func__);
6557061eba7SAneesh Kumar K.V 			WARN_ON(1);
656ac27a0ecSDave Kleikamp 			break;
657ac27a0ecSDave Kleikamp 		}
6587061eba7SAneesh Kumar K.V 	}
659ac27a0ecSDave Kleikamp 
6607061eba7SAneesh Kumar K.V 	target = blks - count ;
6617061eba7SAneesh Kumar K.V 	blk_allocated = count;
6627061eba7SAneesh Kumar K.V 	if (!target)
6637061eba7SAneesh Kumar K.V 		goto allocated;
6647061eba7SAneesh Kumar K.V 	/* Now allocate data blocks */
665815a1130STheodore Ts'o 	memset(&ar, 0, sizeof(ar));
666815a1130STheodore Ts'o 	ar.inode = inode;
667815a1130STheodore Ts'o 	ar.goal = goal;
668815a1130STheodore Ts'o 	ar.len = target;
669815a1130STheodore Ts'o 	ar.logical = iblock;
670815a1130STheodore Ts'o 	if (S_ISREG(inode->i_mode))
671815a1130STheodore Ts'o 		/* enable in-core preallocation only for regular files */
672815a1130STheodore Ts'o 		ar.flags = EXT4_MB_HINT_DATA;
673815a1130STheodore Ts'o 
674815a1130STheodore Ts'o 	current_block = ext4_mb_new_blocks(handle, &ar, err);
675815a1130STheodore Ts'o 
6767061eba7SAneesh Kumar K.V 	if (*err && (target == blks)) {
6777061eba7SAneesh Kumar K.V 		/*
6787061eba7SAneesh Kumar K.V 		 * if the allocation failed and we didn't allocate
6797061eba7SAneesh Kumar K.V 		 * any blocks before
6807061eba7SAneesh Kumar K.V 		 */
6817061eba7SAneesh Kumar K.V 		goto failed_out;
6827061eba7SAneesh Kumar K.V 	}
6837061eba7SAneesh Kumar K.V 	if (!*err) {
6847061eba7SAneesh Kumar K.V 		if (target == blks) {
6857061eba7SAneesh Kumar K.V 		/*
6867061eba7SAneesh Kumar K.V 		 * save the new block number
6877061eba7SAneesh Kumar K.V 		 * for the first direct block
6887061eba7SAneesh Kumar K.V 		 */
689ac27a0ecSDave Kleikamp 			new_blocks[index] = current_block;
6907061eba7SAneesh Kumar K.V 		}
691815a1130STheodore Ts'o 		blk_allocated += ar.len;
6927061eba7SAneesh Kumar K.V 	}
6937061eba7SAneesh Kumar K.V allocated:
694ac27a0ecSDave Kleikamp 	/* total number of blocks allocated for direct blocks */
6957061eba7SAneesh Kumar K.V 	ret = blk_allocated;
696ac27a0ecSDave Kleikamp 	*err = 0;
697ac27a0ecSDave Kleikamp 	return ret;
698ac27a0ecSDave Kleikamp failed_out:
699ac27a0ecSDave Kleikamp 	for (i = 0; i < index; i++)
700c9de560dSAlex Tomas 		ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
701ac27a0ecSDave Kleikamp 	return ret;
702ac27a0ecSDave Kleikamp }
703ac27a0ecSDave Kleikamp 
704ac27a0ecSDave Kleikamp /**
705617ba13bSMingming Cao  *	ext4_alloc_branch - allocate and set up a chain of blocks.
706ac27a0ecSDave Kleikamp  *	@inode: owner
707ac27a0ecSDave Kleikamp  *	@indirect_blks: number of allocated indirect blocks
708ac27a0ecSDave Kleikamp  *	@blks: number of allocated direct blocks
709ac27a0ecSDave Kleikamp  *	@offsets: offsets (in the blocks) to store the pointers to next.
710ac27a0ecSDave Kleikamp  *	@branch: place to store the chain in.
711ac27a0ecSDave Kleikamp  *
712ac27a0ecSDave Kleikamp  *	This function allocates blocks, zeroes out all but the last one,
713ac27a0ecSDave Kleikamp  *	links them into chain and (if we are synchronous) writes them to disk.
714ac27a0ecSDave Kleikamp  *	In other words, it prepares a branch that can be spliced onto the
715ac27a0ecSDave Kleikamp  *	inode. It stores the information about that chain in the branch[], in
716617ba13bSMingming Cao  *	the same format as ext4_get_branch() would do. We are calling it after
717ac27a0ecSDave Kleikamp  *	we had read the existing part of chain and partial points to the last
718ac27a0ecSDave Kleikamp  *	triple of that (one with zero ->key). Upon the exit we have the same
719617ba13bSMingming Cao  *	picture as after the successful ext4_get_block(), except that in one
720ac27a0ecSDave Kleikamp  *	place chain is disconnected - *branch->p is still zero (we did not
721ac27a0ecSDave Kleikamp  *	set the last link), but branch->key contains the number that should
722ac27a0ecSDave Kleikamp  *	be placed into *branch->p to fill that gap.
723ac27a0ecSDave Kleikamp  *
724ac27a0ecSDave Kleikamp  *	If allocation fails we free all blocks we've allocated (and forget
725ac27a0ecSDave Kleikamp  *	their buffer_heads) and return the error value the from failed
726617ba13bSMingming Cao  *	ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
727ac27a0ecSDave Kleikamp  *	as described above and return 0.
728ac27a0ecSDave Kleikamp  */
729617ba13bSMingming Cao static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
7307061eba7SAneesh Kumar K.V 				ext4_lblk_t iblock, int indirect_blks,
7317061eba7SAneesh Kumar K.V 				int *blks, ext4_fsblk_t goal,
732725d26d3SAneesh Kumar K.V 				ext4_lblk_t *offsets, Indirect *branch)
733ac27a0ecSDave Kleikamp {
734ac27a0ecSDave Kleikamp 	int blocksize = inode->i_sb->s_blocksize;
735ac27a0ecSDave Kleikamp 	int i, n = 0;
736ac27a0ecSDave Kleikamp 	int err = 0;
737ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
738ac27a0ecSDave Kleikamp 	int num;
739617ba13bSMingming Cao 	ext4_fsblk_t new_blocks[4];
740617ba13bSMingming Cao 	ext4_fsblk_t current_block;
741ac27a0ecSDave Kleikamp 
7427061eba7SAneesh Kumar K.V 	num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
743ac27a0ecSDave Kleikamp 				*blks, new_blocks, &err);
744ac27a0ecSDave Kleikamp 	if (err)
745ac27a0ecSDave Kleikamp 		return err;
746ac27a0ecSDave Kleikamp 
747ac27a0ecSDave Kleikamp 	branch[0].key = cpu_to_le32(new_blocks[0]);
748ac27a0ecSDave Kleikamp 	/*
749ac27a0ecSDave Kleikamp 	 * metadata blocks and data blocks are allocated.
750ac27a0ecSDave Kleikamp 	 */
751ac27a0ecSDave Kleikamp 	for (n = 1; n <= indirect_blks;  n++) {
752ac27a0ecSDave Kleikamp 		/*
753ac27a0ecSDave Kleikamp 		 * Get buffer_head for parent block, zero it out
754ac27a0ecSDave Kleikamp 		 * and set the pointer to new one, then send
755ac27a0ecSDave Kleikamp 		 * parent to disk.
756ac27a0ecSDave Kleikamp 		 */
757ac27a0ecSDave Kleikamp 		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
758ac27a0ecSDave Kleikamp 		branch[n].bh = bh;
759ac27a0ecSDave Kleikamp 		lock_buffer(bh);
760ac27a0ecSDave Kleikamp 		BUFFER_TRACE(bh, "call get_create_access");
761617ba13bSMingming Cao 		err = ext4_journal_get_create_access(handle, bh);
762ac27a0ecSDave Kleikamp 		if (err) {
763ac27a0ecSDave Kleikamp 			unlock_buffer(bh);
764ac27a0ecSDave Kleikamp 			brelse(bh);
765ac27a0ecSDave Kleikamp 			goto failed;
766ac27a0ecSDave Kleikamp 		}
767ac27a0ecSDave Kleikamp 
768ac27a0ecSDave Kleikamp 		memset(bh->b_data, 0, blocksize);
769ac27a0ecSDave Kleikamp 		branch[n].p = (__le32 *) bh->b_data + offsets[n];
770ac27a0ecSDave Kleikamp 		branch[n].key = cpu_to_le32(new_blocks[n]);
771ac27a0ecSDave Kleikamp 		*branch[n].p = branch[n].key;
772ac27a0ecSDave Kleikamp 		if (n == indirect_blks) {
773ac27a0ecSDave Kleikamp 			current_block = new_blocks[n];
774ac27a0ecSDave Kleikamp 			/*
775ac27a0ecSDave Kleikamp 			 * End of chain, update the last new metablock of
776ac27a0ecSDave Kleikamp 			 * the chain to point to the new allocated
777ac27a0ecSDave Kleikamp 			 * data blocks numbers
778ac27a0ecSDave Kleikamp 			 */
779ac27a0ecSDave Kleikamp 			for (i=1; i < num; i++)
780ac27a0ecSDave Kleikamp 				*(branch[n].p + i) = cpu_to_le32(++current_block);
781ac27a0ecSDave Kleikamp 		}
782ac27a0ecSDave Kleikamp 		BUFFER_TRACE(bh, "marking uptodate");
783ac27a0ecSDave Kleikamp 		set_buffer_uptodate(bh);
784ac27a0ecSDave Kleikamp 		unlock_buffer(bh);
785ac27a0ecSDave Kleikamp 
7860390131bSFrank Mayhar 		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
7870390131bSFrank Mayhar 		err = ext4_handle_dirty_metadata(handle, inode, bh);
788ac27a0ecSDave Kleikamp 		if (err)
789ac27a0ecSDave Kleikamp 			goto failed;
790ac27a0ecSDave Kleikamp 	}
791ac27a0ecSDave Kleikamp 	*blks = num;
792ac27a0ecSDave Kleikamp 	return err;
793ac27a0ecSDave Kleikamp failed:
794ac27a0ecSDave Kleikamp 	/* Allocation failed, free what we already allocated */
795ac27a0ecSDave Kleikamp 	for (i = 1; i <= n ; i++) {
796dab291afSMingming Cao 		BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget");
797617ba13bSMingming Cao 		ext4_journal_forget(handle, branch[i].bh);
798ac27a0ecSDave Kleikamp 	}
799ac27a0ecSDave Kleikamp 	for (i = 0; i < indirect_blks; i++)
800c9de560dSAlex Tomas 		ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
801ac27a0ecSDave Kleikamp 
802c9de560dSAlex Tomas 	ext4_free_blocks(handle, inode, new_blocks[i], num, 0);
803ac27a0ecSDave Kleikamp 
804ac27a0ecSDave Kleikamp 	return err;
805ac27a0ecSDave Kleikamp }
806ac27a0ecSDave Kleikamp 
807ac27a0ecSDave Kleikamp /**
808617ba13bSMingming Cao  * ext4_splice_branch - splice the allocated branch onto inode.
809ac27a0ecSDave Kleikamp  * @inode: owner
810ac27a0ecSDave Kleikamp  * @block: (logical) number of block we are adding
811ac27a0ecSDave Kleikamp  * @chain: chain of indirect blocks (with a missing link - see
812617ba13bSMingming Cao  *	ext4_alloc_branch)
813ac27a0ecSDave Kleikamp  * @where: location of missing link
814ac27a0ecSDave Kleikamp  * @num:   number of indirect blocks we are adding
815ac27a0ecSDave Kleikamp  * @blks:  number of direct blocks we are adding
816ac27a0ecSDave Kleikamp  *
817ac27a0ecSDave Kleikamp  * This function fills the missing link and does all housekeeping needed in
818ac27a0ecSDave Kleikamp  * inode (->i_blocks, etc.). In case of success we end up with the full
819ac27a0ecSDave Kleikamp  * chain to new block and return 0.
820ac27a0ecSDave Kleikamp  */
821617ba13bSMingming Cao static int ext4_splice_branch(handle_t *handle, struct inode *inode,
822725d26d3SAneesh Kumar K.V 			ext4_lblk_t block, Indirect *where, int num, int blks)
823ac27a0ecSDave Kleikamp {
824ac27a0ecSDave Kleikamp 	int i;
825ac27a0ecSDave Kleikamp 	int err = 0;
826617ba13bSMingming Cao 	ext4_fsblk_t current_block;
827ac27a0ecSDave Kleikamp 
828ac27a0ecSDave Kleikamp 	/*
829ac27a0ecSDave Kleikamp 	 * If we're splicing into a [td]indirect block (as opposed to the
830ac27a0ecSDave Kleikamp 	 * inode) then we need to get write access to the [td]indirect block
831ac27a0ecSDave Kleikamp 	 * before the splice.
832ac27a0ecSDave Kleikamp 	 */
833ac27a0ecSDave Kleikamp 	if (where->bh) {
834ac27a0ecSDave Kleikamp 		BUFFER_TRACE(where->bh, "get_write_access");
835617ba13bSMingming Cao 		err = ext4_journal_get_write_access(handle, where->bh);
836ac27a0ecSDave Kleikamp 		if (err)
837ac27a0ecSDave Kleikamp 			goto err_out;
838ac27a0ecSDave Kleikamp 	}
839ac27a0ecSDave Kleikamp 	/* That's it */
840ac27a0ecSDave Kleikamp 
841ac27a0ecSDave Kleikamp 	*where->p = where->key;
842ac27a0ecSDave Kleikamp 
843ac27a0ecSDave Kleikamp 	/*
844ac27a0ecSDave Kleikamp 	 * Update the host buffer_head or inode to point to more just allocated
845ac27a0ecSDave Kleikamp 	 * direct blocks blocks
846ac27a0ecSDave Kleikamp 	 */
847ac27a0ecSDave Kleikamp 	if (num == 0 && blks > 1) {
848ac27a0ecSDave Kleikamp 		current_block = le32_to_cpu(where->key) + 1;
849ac27a0ecSDave Kleikamp 		for (i = 1; i < blks; i++)
850ac27a0ecSDave Kleikamp 			*(where->p + i) = cpu_to_le32(current_block++);
851ac27a0ecSDave Kleikamp 	}
852ac27a0ecSDave Kleikamp 
853ac27a0ecSDave Kleikamp 	/* We are done with atomic stuff, now do the rest of housekeeping */
854ac27a0ecSDave Kleikamp 
855ef7f3835SKalpak Shah 	inode->i_ctime = ext4_current_time(inode);
856617ba13bSMingming Cao 	ext4_mark_inode_dirty(handle, inode);
857ac27a0ecSDave Kleikamp 
858ac27a0ecSDave Kleikamp 	/* had we spliced it onto indirect block? */
859ac27a0ecSDave Kleikamp 	if (where->bh) {
860ac27a0ecSDave Kleikamp 		/*
861ac27a0ecSDave Kleikamp 		 * If we spliced it onto an indirect block, we haven't
862ac27a0ecSDave Kleikamp 		 * altered the inode.  Note however that if it is being spliced
863ac27a0ecSDave Kleikamp 		 * onto an indirect block at the very end of the file (the
864ac27a0ecSDave Kleikamp 		 * file is growing) then we *will* alter the inode to reflect
865ac27a0ecSDave Kleikamp 		 * the new i_size.  But that is not done here - it is done in
866617ba13bSMingming Cao 		 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
867ac27a0ecSDave Kleikamp 		 */
868ac27a0ecSDave Kleikamp 		jbd_debug(5, "splicing indirect only\n");
8690390131bSFrank Mayhar 		BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
8700390131bSFrank Mayhar 		err = ext4_handle_dirty_metadata(handle, inode, where->bh);
871ac27a0ecSDave Kleikamp 		if (err)
872ac27a0ecSDave Kleikamp 			goto err_out;
873ac27a0ecSDave Kleikamp 	} else {
874ac27a0ecSDave Kleikamp 		/*
875ac27a0ecSDave Kleikamp 		 * OK, we spliced it into the inode itself on a direct block.
876ac27a0ecSDave Kleikamp 		 * Inode was dirtied above.
877ac27a0ecSDave Kleikamp 		 */
878ac27a0ecSDave Kleikamp 		jbd_debug(5, "splicing direct\n");
879ac27a0ecSDave Kleikamp 	}
880ac27a0ecSDave Kleikamp 	return err;
881ac27a0ecSDave Kleikamp 
882ac27a0ecSDave Kleikamp err_out:
883ac27a0ecSDave Kleikamp 	for (i = 1; i <= num; i++) {
884dab291afSMingming Cao 		BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget");
885617ba13bSMingming Cao 		ext4_journal_forget(handle, where[i].bh);
886c9de560dSAlex Tomas 		ext4_free_blocks(handle, inode,
887c9de560dSAlex Tomas 					le32_to_cpu(where[i-1].key), 1, 0);
888ac27a0ecSDave Kleikamp 	}
889c9de560dSAlex Tomas 	ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0);
890ac27a0ecSDave Kleikamp 
891ac27a0ecSDave Kleikamp 	return err;
892ac27a0ecSDave Kleikamp }
893ac27a0ecSDave Kleikamp 
894ac27a0ecSDave Kleikamp /*
895ac27a0ecSDave Kleikamp  * Allocation strategy is simple: if we have to allocate something, we will
896ac27a0ecSDave Kleikamp  * have to go the whole way to leaf. So let's do it before attaching anything
897ac27a0ecSDave Kleikamp  * to tree, set linkage between the newborn blocks, write them if sync is
898ac27a0ecSDave Kleikamp  * required, recheck the path, free and repeat if check fails, otherwise
899ac27a0ecSDave Kleikamp  * set the last missing link (that will protect us from any truncate-generated
900ac27a0ecSDave Kleikamp  * removals - all blocks on the path are immune now) and possibly force the
901ac27a0ecSDave Kleikamp  * write on the parent block.
902ac27a0ecSDave Kleikamp  * That has a nice additional property: no special recovery from the failed
903ac27a0ecSDave Kleikamp  * allocations is needed - we simply release blocks and do not touch anything
904ac27a0ecSDave Kleikamp  * reachable from inode.
905ac27a0ecSDave Kleikamp  *
906ac27a0ecSDave Kleikamp  * `handle' can be NULL if create == 0.
907ac27a0ecSDave Kleikamp  *
908ac27a0ecSDave Kleikamp  * return > 0, # of blocks mapped or allocated.
909ac27a0ecSDave Kleikamp  * return = 0, if plain lookup failed.
910ac27a0ecSDave Kleikamp  * return < 0, error case.
911c278bfecSAneesh Kumar K.V  *
912c278bfecSAneesh Kumar K.V  *
913c278bfecSAneesh Kumar K.V  * Need to be called with
9140e855ac8SAneesh Kumar K.V  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
9150e855ac8SAneesh Kumar K.V  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
916ac27a0ecSDave Kleikamp  */
917498e5f24STheodore Ts'o static int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
918498e5f24STheodore Ts'o 				  ext4_lblk_t iblock, unsigned int maxblocks,
919ac27a0ecSDave Kleikamp 				  struct buffer_head *bh_result,
920ac27a0ecSDave Kleikamp 				  int create, int extend_disksize)
921ac27a0ecSDave Kleikamp {
922ac27a0ecSDave Kleikamp 	int err = -EIO;
923725d26d3SAneesh Kumar K.V 	ext4_lblk_t offsets[4];
924ac27a0ecSDave Kleikamp 	Indirect chain[4];
925ac27a0ecSDave Kleikamp 	Indirect *partial;
926617ba13bSMingming Cao 	ext4_fsblk_t goal;
927ac27a0ecSDave Kleikamp 	int indirect_blks;
928ac27a0ecSDave Kleikamp 	int blocks_to_boundary = 0;
929ac27a0ecSDave Kleikamp 	int depth;
930617ba13bSMingming Cao 	struct ext4_inode_info *ei = EXT4_I(inode);
931ac27a0ecSDave Kleikamp 	int count = 0;
932617ba13bSMingming Cao 	ext4_fsblk_t first_block = 0;
93361628a3fSMingming Cao 	loff_t disksize;
934ac27a0ecSDave Kleikamp 
935ac27a0ecSDave Kleikamp 
936a86c6181SAlex Tomas 	J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
937ac27a0ecSDave Kleikamp 	J_ASSERT(handle != NULL || create == 0);
938725d26d3SAneesh Kumar K.V 	depth = ext4_block_to_path(inode, iblock, offsets,
939725d26d3SAneesh Kumar K.V 					&blocks_to_boundary);
940ac27a0ecSDave Kleikamp 
941ac27a0ecSDave Kleikamp 	if (depth == 0)
942ac27a0ecSDave Kleikamp 		goto out;
943ac27a0ecSDave Kleikamp 
944617ba13bSMingming Cao 	partial = ext4_get_branch(inode, depth, offsets, chain, &err);
945ac27a0ecSDave Kleikamp 
946ac27a0ecSDave Kleikamp 	/* Simplest case - block found, no allocation needed */
947ac27a0ecSDave Kleikamp 	if (!partial) {
948ac27a0ecSDave Kleikamp 		first_block = le32_to_cpu(chain[depth - 1].key);
949ac27a0ecSDave Kleikamp 		clear_buffer_new(bh_result);
950ac27a0ecSDave Kleikamp 		count++;
951ac27a0ecSDave Kleikamp 		/*map more blocks*/
952ac27a0ecSDave Kleikamp 		while (count < maxblocks && count <= blocks_to_boundary) {
953617ba13bSMingming Cao 			ext4_fsblk_t blk;
954ac27a0ecSDave Kleikamp 
955ac27a0ecSDave Kleikamp 			blk = le32_to_cpu(*(chain[depth-1].p + count));
956ac27a0ecSDave Kleikamp 
957ac27a0ecSDave Kleikamp 			if (blk == first_block + count)
958ac27a0ecSDave Kleikamp 				count++;
959ac27a0ecSDave Kleikamp 			else
960ac27a0ecSDave Kleikamp 				break;
961ac27a0ecSDave Kleikamp 		}
962ac27a0ecSDave Kleikamp 		goto got_it;
963ac27a0ecSDave Kleikamp 	}
964ac27a0ecSDave Kleikamp 
965ac27a0ecSDave Kleikamp 	/* Next simple case - plain lookup or failed read of indirect block */
966ac27a0ecSDave Kleikamp 	if (!create || err == -EIO)
967ac27a0ecSDave Kleikamp 		goto cleanup;
968ac27a0ecSDave Kleikamp 
969ac27a0ecSDave Kleikamp 	/*
970c2ea3fdeSTheodore Ts'o 	 * Okay, we need to do block allocation.
971ac27a0ecSDave Kleikamp 	*/
972fb01bfdaSAkinobu Mita 	goal = ext4_find_goal(inode, iblock, partial);
973ac27a0ecSDave Kleikamp 
974ac27a0ecSDave Kleikamp 	/* the number of blocks need to allocate for [d,t]indirect blocks */
975ac27a0ecSDave Kleikamp 	indirect_blks = (chain + depth) - partial - 1;
976ac27a0ecSDave Kleikamp 
977ac27a0ecSDave Kleikamp 	/*
978ac27a0ecSDave Kleikamp 	 * Next look up the indirect map to count the totoal number of
979ac27a0ecSDave Kleikamp 	 * direct blocks to allocate for this branch.
980ac27a0ecSDave Kleikamp 	 */
981617ba13bSMingming Cao 	count = ext4_blks_to_allocate(partial, indirect_blks,
982ac27a0ecSDave Kleikamp 					maxblocks, blocks_to_boundary);
983ac27a0ecSDave Kleikamp 	/*
984617ba13bSMingming Cao 	 * Block out ext4_truncate while we alter the tree
985ac27a0ecSDave Kleikamp 	 */
9867061eba7SAneesh Kumar K.V 	err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
9877061eba7SAneesh Kumar K.V 					&count, goal,
988ac27a0ecSDave Kleikamp 					offsets + (partial - chain), partial);
989ac27a0ecSDave Kleikamp 
990ac27a0ecSDave Kleikamp 	/*
991617ba13bSMingming Cao 	 * The ext4_splice_branch call will free and forget any buffers
992ac27a0ecSDave Kleikamp 	 * on the new chain if there is a failure, but that risks using
993ac27a0ecSDave Kleikamp 	 * up transaction credits, especially for bitmaps where the
994ac27a0ecSDave Kleikamp 	 * credits cannot be returned.  Can we handle this somehow?  We
995ac27a0ecSDave Kleikamp 	 * may need to return -EAGAIN upwards in the worst case.  --sct
996ac27a0ecSDave Kleikamp 	 */
997ac27a0ecSDave Kleikamp 	if (!err)
998617ba13bSMingming Cao 		err = ext4_splice_branch(handle, inode, iblock,
999ac27a0ecSDave Kleikamp 					partial, indirect_blks, count);
1000ac27a0ecSDave Kleikamp 	/*
10010e855ac8SAneesh Kumar K.V 	 * i_disksize growing is protected by i_data_sem.  Don't forget to
1002ac27a0ecSDave Kleikamp 	 * protect it if you're about to implement concurrent
1003617ba13bSMingming Cao 	 * ext4_get_block() -bzzz
1004ac27a0ecSDave Kleikamp 	*/
100561628a3fSMingming Cao 	if (!err && extend_disksize) {
100661628a3fSMingming Cao 		disksize = ((loff_t) iblock + count) << inode->i_blkbits;
100761628a3fSMingming Cao 		if (disksize > i_size_read(inode))
100861628a3fSMingming Cao 			disksize = i_size_read(inode);
100961628a3fSMingming Cao 		if (disksize > ei->i_disksize)
101061628a3fSMingming Cao 			ei->i_disksize = disksize;
101161628a3fSMingming Cao 	}
1012ac27a0ecSDave Kleikamp 	if (err)
1013ac27a0ecSDave Kleikamp 		goto cleanup;
1014ac27a0ecSDave Kleikamp 
1015ac27a0ecSDave Kleikamp 	set_buffer_new(bh_result);
1016ac27a0ecSDave Kleikamp got_it:
1017ac27a0ecSDave Kleikamp 	map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
1018ac27a0ecSDave Kleikamp 	if (count > blocks_to_boundary)
1019ac27a0ecSDave Kleikamp 		set_buffer_boundary(bh_result);
1020ac27a0ecSDave Kleikamp 	err = count;
1021ac27a0ecSDave Kleikamp 	/* Clean up and exit */
1022ac27a0ecSDave Kleikamp 	partial = chain + depth - 1;	/* the whole chain */
1023ac27a0ecSDave Kleikamp cleanup:
1024ac27a0ecSDave Kleikamp 	while (partial > chain) {
1025ac27a0ecSDave Kleikamp 		BUFFER_TRACE(partial->bh, "call brelse");
1026ac27a0ecSDave Kleikamp 		brelse(partial->bh);
1027ac27a0ecSDave Kleikamp 		partial--;
1028ac27a0ecSDave Kleikamp 	}
1029ac27a0ecSDave Kleikamp 	BUFFER_TRACE(bh_result, "returned");
1030ac27a0ecSDave Kleikamp out:
1031ac27a0ecSDave Kleikamp 	return err;
1032ac27a0ecSDave Kleikamp }
1033ac27a0ecSDave Kleikamp 
103460e58e0fSMingming Cao qsize_t ext4_get_reserved_space(struct inode *inode)
103560e58e0fSMingming Cao {
103660e58e0fSMingming Cao 	unsigned long long total;
103760e58e0fSMingming Cao 
103860e58e0fSMingming Cao 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
103960e58e0fSMingming Cao 	total = EXT4_I(inode)->i_reserved_data_blocks +
104060e58e0fSMingming Cao 		EXT4_I(inode)->i_reserved_meta_blocks;
104160e58e0fSMingming Cao 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
104260e58e0fSMingming Cao 
104360e58e0fSMingming Cao 	return total;
104460e58e0fSMingming Cao }
104512219aeaSAneesh Kumar K.V /*
104612219aeaSAneesh Kumar K.V  * Calculate the number of metadata blocks need to reserve
104712219aeaSAneesh Kumar K.V  * to allocate @blocks for non extent file based file
104812219aeaSAneesh Kumar K.V  */
104912219aeaSAneesh Kumar K.V static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks)
105012219aeaSAneesh Kumar K.V {
105112219aeaSAneesh Kumar K.V 	int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb);
105212219aeaSAneesh Kumar K.V 	int ind_blks, dind_blks, tind_blks;
105312219aeaSAneesh Kumar K.V 
105412219aeaSAneesh Kumar K.V 	/* number of new indirect blocks needed */
105512219aeaSAneesh Kumar K.V 	ind_blks = (blocks + icap - 1) / icap;
105612219aeaSAneesh Kumar K.V 
105712219aeaSAneesh Kumar K.V 	dind_blks = (ind_blks + icap - 1) / icap;
105812219aeaSAneesh Kumar K.V 
105912219aeaSAneesh Kumar K.V 	tind_blks = 1;
106012219aeaSAneesh Kumar K.V 
106112219aeaSAneesh Kumar K.V 	return ind_blks + dind_blks + tind_blks;
106212219aeaSAneesh Kumar K.V }
106312219aeaSAneesh Kumar K.V 
106412219aeaSAneesh Kumar K.V /*
106512219aeaSAneesh Kumar K.V  * Calculate the number of metadata blocks need to reserve
106612219aeaSAneesh Kumar K.V  * to allocate given number of blocks
106712219aeaSAneesh Kumar K.V  */
106812219aeaSAneesh Kumar K.V static int ext4_calc_metadata_amount(struct inode *inode, int blocks)
106912219aeaSAneesh Kumar K.V {
1070cd213226SMingming Cao 	if (!blocks)
1071cd213226SMingming Cao 		return 0;
1072cd213226SMingming Cao 
107312219aeaSAneesh Kumar K.V 	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
107412219aeaSAneesh Kumar K.V 		return ext4_ext_calc_metadata_amount(inode, blocks);
107512219aeaSAneesh Kumar K.V 
107612219aeaSAneesh Kumar K.V 	return ext4_indirect_calc_metadata_amount(inode, blocks);
107712219aeaSAneesh Kumar K.V }
107812219aeaSAneesh Kumar K.V 
107912219aeaSAneesh Kumar K.V static void ext4_da_update_reserve_space(struct inode *inode, int used)
108012219aeaSAneesh Kumar K.V {
108112219aeaSAneesh Kumar K.V 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
108212219aeaSAneesh Kumar K.V 	int total, mdb, mdb_free;
108312219aeaSAneesh Kumar K.V 
108412219aeaSAneesh Kumar K.V 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
108512219aeaSAneesh Kumar K.V 	/* recalculate the number of metablocks still need to be reserved */
108612219aeaSAneesh Kumar K.V 	total = EXT4_I(inode)->i_reserved_data_blocks - used;
108712219aeaSAneesh Kumar K.V 	mdb = ext4_calc_metadata_amount(inode, total);
108812219aeaSAneesh Kumar K.V 
108912219aeaSAneesh Kumar K.V 	/* figure out how many metablocks to release */
109012219aeaSAneesh Kumar K.V 	BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
109112219aeaSAneesh Kumar K.V 	mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
109212219aeaSAneesh Kumar K.V 
10936bc6e63fSAneesh Kumar K.V 	if (mdb_free) {
109412219aeaSAneesh Kumar K.V 		/* Account for allocated meta_blocks */
109512219aeaSAneesh Kumar K.V 		mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
109612219aeaSAneesh Kumar K.V 
10976bc6e63fSAneesh Kumar K.V 		/* update fs dirty blocks counter */
10986bc6e63fSAneesh Kumar K.V 		percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free);
10996bc6e63fSAneesh Kumar K.V 		EXT4_I(inode)->i_allocated_meta_blocks = 0;
11006bc6e63fSAneesh Kumar K.V 		EXT4_I(inode)->i_reserved_meta_blocks = mdb;
11016bc6e63fSAneesh Kumar K.V 	}
110212219aeaSAneesh Kumar K.V 
110312219aeaSAneesh Kumar K.V 	/* update per-inode reservations */
110412219aeaSAneesh Kumar K.V 	BUG_ON(used  > EXT4_I(inode)->i_reserved_data_blocks);
110512219aeaSAneesh Kumar K.V 	EXT4_I(inode)->i_reserved_data_blocks -= used;
110612219aeaSAneesh Kumar K.V 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
110760e58e0fSMingming Cao 
110860e58e0fSMingming Cao 	/*
110960e58e0fSMingming Cao 	 * free those over-booking quota for metadata blocks
111060e58e0fSMingming Cao 	 */
111160e58e0fSMingming Cao 	if (mdb_free)
111260e58e0fSMingming Cao 		vfs_dq_release_reservation_block(inode, mdb_free);
1113d6014301SAneesh Kumar K.V 
1114d6014301SAneesh Kumar K.V 	/*
1115d6014301SAneesh Kumar K.V 	 * If we have done all the pending block allocations and if
1116d6014301SAneesh Kumar K.V 	 * there aren't any writers on the inode, we can discard the
1117d6014301SAneesh Kumar K.V 	 * inode's preallocations.
1118d6014301SAneesh Kumar K.V 	 */
1119d6014301SAneesh Kumar K.V 	if (!total && (atomic_read(&inode->i_writecount) == 0))
1120d6014301SAneesh Kumar K.V 		ext4_discard_preallocations(inode);
112112219aeaSAneesh Kumar K.V }
112212219aeaSAneesh Kumar K.V 
1123f5ab0d1fSMingming Cao /*
11242b2d6d01STheodore Ts'o  * The ext4_get_blocks_wrap() function try to look up the requested blocks,
11252b2d6d01STheodore Ts'o  * and returns if the blocks are already mapped.
1126f5ab0d1fSMingming Cao  *
1127f5ab0d1fSMingming Cao  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
1128f5ab0d1fSMingming Cao  * and store the allocated blocks in the result buffer head and mark it
1129f5ab0d1fSMingming Cao  * mapped.
1130f5ab0d1fSMingming Cao  *
1131f5ab0d1fSMingming Cao  * If file type is extents based, it will call ext4_ext_get_blocks(),
1132f5ab0d1fSMingming Cao  * Otherwise, call with ext4_get_blocks_handle() to handle indirect mapping
1133f5ab0d1fSMingming Cao  * based files
1134f5ab0d1fSMingming Cao  *
1135f5ab0d1fSMingming Cao  * On success, it returns the number of blocks being mapped or allocate.
1136f5ab0d1fSMingming Cao  * if create==0 and the blocks are pre-allocated and uninitialized block,
1137f5ab0d1fSMingming Cao  * the result buffer head is unmapped. If the create ==1, it will make sure
1138f5ab0d1fSMingming Cao  * the buffer head is mapped.
1139f5ab0d1fSMingming Cao  *
1140f5ab0d1fSMingming Cao  * It returns 0 if plain look up failed (blocks have not been allocated), in
1141f5ab0d1fSMingming Cao  * that casem, buffer head is unmapped
1142f5ab0d1fSMingming Cao  *
1143f5ab0d1fSMingming Cao  * It returns the error in case of allocation failure.
1144f5ab0d1fSMingming Cao  */
11450e855ac8SAneesh Kumar K.V int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
1146498e5f24STheodore Ts'o 			unsigned int max_blocks, struct buffer_head *bh,
1147d2a17637SMingming Cao 			int create, int extend_disksize, int flag)
11480e855ac8SAneesh Kumar K.V {
11490e855ac8SAneesh Kumar K.V 	int retval;
1150f5ab0d1fSMingming Cao 
1151f5ab0d1fSMingming Cao 	clear_buffer_mapped(bh);
1152*2a8964d6SAneesh Kumar K.V 	clear_buffer_unwritten(bh);
1153f5ab0d1fSMingming Cao 
11544df3d265SAneesh Kumar K.V 	/*
11554df3d265SAneesh Kumar K.V 	 * Try to see if we can get  the block without requesting
11564df3d265SAneesh Kumar K.V 	 * for new file system block.
11574df3d265SAneesh Kumar K.V 	 */
11580e855ac8SAneesh Kumar K.V 	down_read((&EXT4_I(inode)->i_data_sem));
11594df3d265SAneesh Kumar K.V 	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
11604df3d265SAneesh Kumar K.V 		retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
11614df3d265SAneesh Kumar K.V 				bh, 0, 0);
11624df3d265SAneesh Kumar K.V 	} else {
11634df3d265SAneesh Kumar K.V 		retval = ext4_get_blocks_handle(handle,
11644df3d265SAneesh Kumar K.V 				inode, block, max_blocks, bh, 0, 0);
11650e855ac8SAneesh Kumar K.V 	}
11664df3d265SAneesh Kumar K.V 	up_read((&EXT4_I(inode)->i_data_sem));
1167f5ab0d1fSMingming Cao 
1168f5ab0d1fSMingming Cao 	/* If it is only a block(s) look up */
1169f5ab0d1fSMingming Cao 	if (!create)
11704df3d265SAneesh Kumar K.V 		return retval;
11714df3d265SAneesh Kumar K.V 
11724df3d265SAneesh Kumar K.V 	/*
1173f5ab0d1fSMingming Cao 	 * Returns if the blocks have already allocated
1174f5ab0d1fSMingming Cao 	 *
1175f5ab0d1fSMingming Cao 	 * Note that if blocks have been preallocated
1176f5ab0d1fSMingming Cao 	 * ext4_ext_get_block() returns th create = 0
1177f5ab0d1fSMingming Cao 	 * with buffer head unmapped.
1178f5ab0d1fSMingming Cao 	 */
1179f5ab0d1fSMingming Cao 	if (retval > 0 && buffer_mapped(bh))
1180f5ab0d1fSMingming Cao 		return retval;
1181f5ab0d1fSMingming Cao 
1182f5ab0d1fSMingming Cao 	/*
1183*2a8964d6SAneesh Kumar K.V 	 * When we call get_blocks without the create flag, the
1184*2a8964d6SAneesh Kumar K.V 	 * BH_Unwritten flag could have gotten set if the blocks
1185*2a8964d6SAneesh Kumar K.V 	 * requested were part of a uninitialized extent.  We need to
1186*2a8964d6SAneesh Kumar K.V 	 * clear this flag now that we are committed to convert all or
1187*2a8964d6SAneesh Kumar K.V 	 * part of the uninitialized extent to be an initialized
1188*2a8964d6SAneesh Kumar K.V 	 * extent.  This is because we need to avoid the combination
1189*2a8964d6SAneesh Kumar K.V 	 * of BH_Unwritten and BH_Mapped flags being simultaneously
1190*2a8964d6SAneesh Kumar K.V 	 * set on the buffer_head.
1191*2a8964d6SAneesh Kumar K.V 	 */
1192*2a8964d6SAneesh Kumar K.V 	clear_buffer_unwritten(bh);
1193*2a8964d6SAneesh Kumar K.V 
1194*2a8964d6SAneesh Kumar K.V 	/*
1195f5ab0d1fSMingming Cao 	 * New blocks allocate and/or writing to uninitialized extent
1196f5ab0d1fSMingming Cao 	 * will possibly result in updating i_data, so we take
1197f5ab0d1fSMingming Cao 	 * the write lock of i_data_sem, and call get_blocks()
1198f5ab0d1fSMingming Cao 	 * with create == 1 flag.
11994df3d265SAneesh Kumar K.V 	 */
12004df3d265SAneesh Kumar K.V 	down_write((&EXT4_I(inode)->i_data_sem));
1201d2a17637SMingming Cao 
1202d2a17637SMingming Cao 	/*
1203d2a17637SMingming Cao 	 * if the caller is from delayed allocation writeout path
1204d2a17637SMingming Cao 	 * we have already reserved fs blocks for allocation
1205d2a17637SMingming Cao 	 * let the underlying get_block() function know to
1206d2a17637SMingming Cao 	 * avoid double accounting
1207d2a17637SMingming Cao 	 */
1208d2a17637SMingming Cao 	if (flag)
1209d2a17637SMingming Cao 		EXT4_I(inode)->i_delalloc_reserved_flag = 1;
12104df3d265SAneesh Kumar K.V 	/*
12114df3d265SAneesh Kumar K.V 	 * We need to check for EXT4 here because migrate
12124df3d265SAneesh Kumar K.V 	 * could have changed the inode type in between
12134df3d265SAneesh Kumar K.V 	 */
12140e855ac8SAneesh Kumar K.V 	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
12150e855ac8SAneesh Kumar K.V 		retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
12160e855ac8SAneesh Kumar K.V 				bh, create, extend_disksize);
12170e855ac8SAneesh Kumar K.V 	} else {
12180e855ac8SAneesh Kumar K.V 		retval = ext4_get_blocks_handle(handle, inode, block,
12190e855ac8SAneesh Kumar K.V 				max_blocks, bh, create, extend_disksize);
1220267e4db9SAneesh Kumar K.V 
1221267e4db9SAneesh Kumar K.V 		if (retval > 0 && buffer_new(bh)) {
1222267e4db9SAneesh Kumar K.V 			/*
1223267e4db9SAneesh Kumar K.V 			 * We allocated new blocks which will result in
1224267e4db9SAneesh Kumar K.V 			 * i_data's format changing.  Force the migrate
1225267e4db9SAneesh Kumar K.V 			 * to fail by clearing migrate flags
1226267e4db9SAneesh Kumar K.V 			 */
1227267e4db9SAneesh Kumar K.V 			EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags &
1228267e4db9SAneesh Kumar K.V 							~EXT4_EXT_MIGRATE;
1229267e4db9SAneesh Kumar K.V 		}
12300e855ac8SAneesh Kumar K.V 	}
1231d2a17637SMingming Cao 
1232d2a17637SMingming Cao 	if (flag) {
1233d2a17637SMingming Cao 		EXT4_I(inode)->i_delalloc_reserved_flag = 0;
1234d2a17637SMingming Cao 		/*
1235d2a17637SMingming Cao 		 * Update reserved blocks/metadata blocks
1236d2a17637SMingming Cao 		 * after successful block allocation
1237d2a17637SMingming Cao 		 * which were deferred till now
1238d2a17637SMingming Cao 		 */
1239d2a17637SMingming Cao 		if ((retval > 0) && buffer_delay(bh))
124012219aeaSAneesh Kumar K.V 			ext4_da_update_reserve_space(inode, retval);
1241d2a17637SMingming Cao 	}
1242d2a17637SMingming Cao 
12430e855ac8SAneesh Kumar K.V 	up_write((&EXT4_I(inode)->i_data_sem));
12440e855ac8SAneesh Kumar K.V 	return retval;
12450e855ac8SAneesh Kumar K.V }
12460e855ac8SAneesh Kumar K.V 
1247f3bd1f3fSMingming Cao /* Maximum number of blocks we map for direct IO at once. */
1248f3bd1f3fSMingming Cao #define DIO_MAX_BLOCKS 4096
1249f3bd1f3fSMingming Cao 
12506873fa0dSEric Sandeen int ext4_get_block(struct inode *inode, sector_t iblock,
1251ac27a0ecSDave Kleikamp 		   struct buffer_head *bh_result, int create)
1252ac27a0ecSDave Kleikamp {
12533e4fdaf8SDmitriy Monakhov 	handle_t *handle = ext4_journal_current_handle();
12547fb5409dSJan Kara 	int ret = 0, started = 0;
1255ac27a0ecSDave Kleikamp 	unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
1256f3bd1f3fSMingming Cao 	int dio_credits;
1257ac27a0ecSDave Kleikamp 
12587fb5409dSJan Kara 	if (create && !handle) {
12597fb5409dSJan Kara 		/* Direct IO write... */
12607fb5409dSJan Kara 		if (max_blocks > DIO_MAX_BLOCKS)
12617fb5409dSJan Kara 			max_blocks = DIO_MAX_BLOCKS;
1262f3bd1f3fSMingming Cao 		dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
1263f3bd1f3fSMingming Cao 		handle = ext4_journal_start(inode, dio_credits);
12647fb5409dSJan Kara 		if (IS_ERR(handle)) {
1265ac27a0ecSDave Kleikamp 			ret = PTR_ERR(handle);
12667fb5409dSJan Kara 			goto out;
12677fb5409dSJan Kara 		}
12687fb5409dSJan Kara 		started = 1;
1269ac27a0ecSDave Kleikamp 	}
1270ac27a0ecSDave Kleikamp 
1271a86c6181SAlex Tomas 	ret = ext4_get_blocks_wrap(handle, inode, iblock,
1272d2a17637SMingming Cao 					max_blocks, bh_result, create, 0, 0);
1273ac27a0ecSDave Kleikamp 	if (ret > 0) {
1274ac27a0ecSDave Kleikamp 		bh_result->b_size = (ret << inode->i_blkbits);
1275ac27a0ecSDave Kleikamp 		ret = 0;
1276ac27a0ecSDave Kleikamp 	}
12777fb5409dSJan Kara 	if (started)
12787fb5409dSJan Kara 		ext4_journal_stop(handle);
12797fb5409dSJan Kara out:
1280ac27a0ecSDave Kleikamp 	return ret;
1281ac27a0ecSDave Kleikamp }
1282ac27a0ecSDave Kleikamp 
1283ac27a0ecSDave Kleikamp /*
1284ac27a0ecSDave Kleikamp  * `handle' can be NULL if create is zero
1285ac27a0ecSDave Kleikamp  */
1286617ba13bSMingming Cao struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1287725d26d3SAneesh Kumar K.V 				ext4_lblk_t block, int create, int *errp)
1288ac27a0ecSDave Kleikamp {
1289ac27a0ecSDave Kleikamp 	struct buffer_head dummy;
1290ac27a0ecSDave Kleikamp 	int fatal = 0, err;
1291ac27a0ecSDave Kleikamp 
1292ac27a0ecSDave Kleikamp 	J_ASSERT(handle != NULL || create == 0);
1293ac27a0ecSDave Kleikamp 
1294ac27a0ecSDave Kleikamp 	dummy.b_state = 0;
1295ac27a0ecSDave Kleikamp 	dummy.b_blocknr = -1000;
1296ac27a0ecSDave Kleikamp 	buffer_trace_init(&dummy.b_history);
1297a86c6181SAlex Tomas 	err = ext4_get_blocks_wrap(handle, inode, block, 1,
1298d2a17637SMingming Cao 					&dummy, create, 1, 0);
1299ac27a0ecSDave Kleikamp 	/*
1300617ba13bSMingming Cao 	 * ext4_get_blocks_handle() returns number of blocks
1301ac27a0ecSDave Kleikamp 	 * mapped. 0 in case of a HOLE.
1302ac27a0ecSDave Kleikamp 	 */
1303ac27a0ecSDave Kleikamp 	if (err > 0) {
1304ac27a0ecSDave Kleikamp 		if (err > 1)
1305ac27a0ecSDave Kleikamp 			WARN_ON(1);
1306ac27a0ecSDave Kleikamp 		err = 0;
1307ac27a0ecSDave Kleikamp 	}
1308ac27a0ecSDave Kleikamp 	*errp = err;
1309ac27a0ecSDave Kleikamp 	if (!err && buffer_mapped(&dummy)) {
1310ac27a0ecSDave Kleikamp 		struct buffer_head *bh;
1311ac27a0ecSDave Kleikamp 		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1312ac27a0ecSDave Kleikamp 		if (!bh) {
1313ac27a0ecSDave Kleikamp 			*errp = -EIO;
1314ac27a0ecSDave Kleikamp 			goto err;
1315ac27a0ecSDave Kleikamp 		}
1316ac27a0ecSDave Kleikamp 		if (buffer_new(&dummy)) {
1317ac27a0ecSDave Kleikamp 			J_ASSERT(create != 0);
1318ac39849dSAneesh Kumar K.V 			J_ASSERT(handle != NULL);
1319ac27a0ecSDave Kleikamp 
1320ac27a0ecSDave Kleikamp 			/*
1321ac27a0ecSDave Kleikamp 			 * Now that we do not always journal data, we should
1322ac27a0ecSDave Kleikamp 			 * keep in mind whether this should always journal the
1323ac27a0ecSDave Kleikamp 			 * new buffer as metadata.  For now, regular file
1324617ba13bSMingming Cao 			 * writes use ext4_get_block instead, so it's not a
1325ac27a0ecSDave Kleikamp 			 * problem.
1326ac27a0ecSDave Kleikamp 			 */
1327ac27a0ecSDave Kleikamp 			lock_buffer(bh);
1328ac27a0ecSDave Kleikamp 			BUFFER_TRACE(bh, "call get_create_access");
1329617ba13bSMingming Cao 			fatal = ext4_journal_get_create_access(handle, bh);
1330ac27a0ecSDave Kleikamp 			if (!fatal && !buffer_uptodate(bh)) {
1331ac27a0ecSDave Kleikamp 				memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1332ac27a0ecSDave Kleikamp 				set_buffer_uptodate(bh);
1333ac27a0ecSDave Kleikamp 			}
1334ac27a0ecSDave Kleikamp 			unlock_buffer(bh);
13350390131bSFrank Mayhar 			BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
13360390131bSFrank Mayhar 			err = ext4_handle_dirty_metadata(handle, inode, bh);
1337ac27a0ecSDave Kleikamp 			if (!fatal)
1338ac27a0ecSDave Kleikamp 				fatal = err;
1339ac27a0ecSDave Kleikamp 		} else {
1340ac27a0ecSDave Kleikamp 			BUFFER_TRACE(bh, "not a new buffer");
1341ac27a0ecSDave Kleikamp 		}
1342ac27a0ecSDave Kleikamp 		if (fatal) {
1343ac27a0ecSDave Kleikamp 			*errp = fatal;
1344ac27a0ecSDave Kleikamp 			brelse(bh);
1345ac27a0ecSDave Kleikamp 			bh = NULL;
1346ac27a0ecSDave Kleikamp 		}
1347ac27a0ecSDave Kleikamp 		return bh;
1348ac27a0ecSDave Kleikamp 	}
1349ac27a0ecSDave Kleikamp err:
1350ac27a0ecSDave Kleikamp 	return NULL;
1351ac27a0ecSDave Kleikamp }
1352ac27a0ecSDave Kleikamp 
1353617ba13bSMingming Cao struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1354725d26d3SAneesh Kumar K.V 			       ext4_lblk_t block, int create, int *err)
1355ac27a0ecSDave Kleikamp {
1356ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
1357ac27a0ecSDave Kleikamp 
1358617ba13bSMingming Cao 	bh = ext4_getblk(handle, inode, block, create, err);
1359ac27a0ecSDave Kleikamp 	if (!bh)
1360ac27a0ecSDave Kleikamp 		return bh;
1361ac27a0ecSDave Kleikamp 	if (buffer_uptodate(bh))
1362ac27a0ecSDave Kleikamp 		return bh;
1363ac27a0ecSDave Kleikamp 	ll_rw_block(READ_META, 1, &bh);
1364ac27a0ecSDave Kleikamp 	wait_on_buffer(bh);
1365ac27a0ecSDave Kleikamp 	if (buffer_uptodate(bh))
1366ac27a0ecSDave Kleikamp 		return bh;
1367ac27a0ecSDave Kleikamp 	put_bh(bh);
1368ac27a0ecSDave Kleikamp 	*err = -EIO;
1369ac27a0ecSDave Kleikamp 	return NULL;
1370ac27a0ecSDave Kleikamp }
1371ac27a0ecSDave Kleikamp 
1372ac27a0ecSDave Kleikamp static int walk_page_buffers(handle_t *handle,
1373ac27a0ecSDave Kleikamp 			     struct buffer_head *head,
1374ac27a0ecSDave Kleikamp 			     unsigned from,
1375ac27a0ecSDave Kleikamp 			     unsigned to,
1376ac27a0ecSDave Kleikamp 			     int *partial,
1377ac27a0ecSDave Kleikamp 			     int (*fn)(handle_t *handle,
1378ac27a0ecSDave Kleikamp 				       struct buffer_head *bh))
1379ac27a0ecSDave Kleikamp {
1380ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
1381ac27a0ecSDave Kleikamp 	unsigned block_start, block_end;
1382ac27a0ecSDave Kleikamp 	unsigned blocksize = head->b_size;
1383ac27a0ecSDave Kleikamp 	int err, ret = 0;
1384ac27a0ecSDave Kleikamp 	struct buffer_head *next;
1385ac27a0ecSDave Kleikamp 
1386ac27a0ecSDave Kleikamp 	for (bh = head, block_start = 0;
1387ac27a0ecSDave Kleikamp 	     ret == 0 && (bh != head || !block_start);
1388ac27a0ecSDave Kleikamp 	     block_start = block_end, bh = next)
1389ac27a0ecSDave Kleikamp 	{
1390ac27a0ecSDave Kleikamp 		next = bh->b_this_page;
1391ac27a0ecSDave Kleikamp 		block_end = block_start + blocksize;
1392ac27a0ecSDave Kleikamp 		if (block_end <= from || block_start >= to) {
1393ac27a0ecSDave Kleikamp 			if (partial && !buffer_uptodate(bh))
1394ac27a0ecSDave Kleikamp 				*partial = 1;
1395ac27a0ecSDave Kleikamp 			continue;
1396ac27a0ecSDave Kleikamp 		}
1397ac27a0ecSDave Kleikamp 		err = (*fn)(handle, bh);
1398ac27a0ecSDave Kleikamp 		if (!ret)
1399ac27a0ecSDave Kleikamp 			ret = err;
1400ac27a0ecSDave Kleikamp 	}
1401ac27a0ecSDave Kleikamp 	return ret;
1402ac27a0ecSDave Kleikamp }
1403ac27a0ecSDave Kleikamp 
1404ac27a0ecSDave Kleikamp /*
1405ac27a0ecSDave Kleikamp  * To preserve ordering, it is essential that the hole instantiation and
1406ac27a0ecSDave Kleikamp  * the data write be encapsulated in a single transaction.  We cannot
1407617ba13bSMingming Cao  * close off a transaction and start a new one between the ext4_get_block()
1408dab291afSMingming Cao  * and the commit_write().  So doing the jbd2_journal_start at the start of
1409ac27a0ecSDave Kleikamp  * prepare_write() is the right place.
1410ac27a0ecSDave Kleikamp  *
1411617ba13bSMingming Cao  * Also, this function can nest inside ext4_writepage() ->
1412617ba13bSMingming Cao  * block_write_full_page(). In that case, we *know* that ext4_writepage()
1413ac27a0ecSDave Kleikamp  * has generated enough buffer credits to do the whole page.  So we won't
1414ac27a0ecSDave Kleikamp  * block on the journal in that case, which is good, because the caller may
1415ac27a0ecSDave Kleikamp  * be PF_MEMALLOC.
1416ac27a0ecSDave Kleikamp  *
1417617ba13bSMingming Cao  * By accident, ext4 can be reentered when a transaction is open via
1418ac27a0ecSDave Kleikamp  * quota file writes.  If we were to commit the transaction while thus
1419ac27a0ecSDave Kleikamp  * reentered, there can be a deadlock - we would be holding a quota
1420ac27a0ecSDave Kleikamp  * lock, and the commit would never complete if another thread had a
1421ac27a0ecSDave Kleikamp  * transaction open and was blocking on the quota lock - a ranking
1422ac27a0ecSDave Kleikamp  * violation.
1423ac27a0ecSDave Kleikamp  *
1424dab291afSMingming Cao  * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1425ac27a0ecSDave Kleikamp  * will _not_ run commit under these circumstances because handle->h_ref
1426ac27a0ecSDave Kleikamp  * is elevated.  We'll still have enough credits for the tiny quotafile
1427ac27a0ecSDave Kleikamp  * write.
1428ac27a0ecSDave Kleikamp  */
1429ac27a0ecSDave Kleikamp static int do_journal_get_write_access(handle_t *handle,
1430ac27a0ecSDave Kleikamp 					struct buffer_head *bh)
1431ac27a0ecSDave Kleikamp {
1432ac27a0ecSDave Kleikamp 	if (!buffer_mapped(bh) || buffer_freed(bh))
1433ac27a0ecSDave Kleikamp 		return 0;
1434617ba13bSMingming Cao 	return ext4_journal_get_write_access(handle, bh);
1435ac27a0ecSDave Kleikamp }
1436ac27a0ecSDave Kleikamp 
1437bfc1af65SNick Piggin static int ext4_write_begin(struct file *file, struct address_space *mapping,
1438bfc1af65SNick Piggin 				loff_t pos, unsigned len, unsigned flags,
1439bfc1af65SNick Piggin 				struct page **pagep, void **fsdata)
1440ac27a0ecSDave Kleikamp {
1441bfc1af65SNick Piggin 	struct inode *inode = mapping->host;
14427479d2b9SAndrew Morton 	int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
1443ac27a0ecSDave Kleikamp 	handle_t *handle;
1444ac27a0ecSDave Kleikamp 	int retries = 0;
1445bfc1af65SNick Piggin 	struct page *page;
1446bfc1af65SNick Piggin  	pgoff_t index;
1447bfc1af65SNick Piggin 	unsigned from, to;
1448bfc1af65SNick Piggin 
1449ba80b101STheodore Ts'o 	trace_mark(ext4_write_begin,
1450ba80b101STheodore Ts'o 		   "dev %s ino %lu pos %llu len %u flags %u",
1451ba80b101STheodore Ts'o 		   inode->i_sb->s_id, inode->i_ino,
1452ba80b101STheodore Ts'o 		   (unsigned long long) pos, len, flags);
1453bfc1af65SNick Piggin  	index = pos >> PAGE_CACHE_SHIFT;
1454bfc1af65SNick Piggin 	from = pos & (PAGE_CACHE_SIZE - 1);
1455bfc1af65SNick Piggin 	to = from + len;
1456ac27a0ecSDave Kleikamp 
1457ac27a0ecSDave Kleikamp retry:
1458617ba13bSMingming Cao 	handle = ext4_journal_start(inode, needed_blocks);
14597479d2b9SAndrew Morton 	if (IS_ERR(handle)) {
14607479d2b9SAndrew Morton 		ret = PTR_ERR(handle);
14617479d2b9SAndrew Morton 		goto out;
14627479d2b9SAndrew Morton 	}
1463ac27a0ecSDave Kleikamp 
1464ebd3610bSJan Kara 	/* We cannot recurse into the filesystem as the transaction is already
1465ebd3610bSJan Kara 	 * started */
1466ebd3610bSJan Kara 	flags |= AOP_FLAG_NOFS;
1467ebd3610bSJan Kara 
146854566b2cSNick Piggin 	page = grab_cache_page_write_begin(mapping, index, flags);
1469cf108bcaSJan Kara 	if (!page) {
1470cf108bcaSJan Kara 		ext4_journal_stop(handle);
1471cf108bcaSJan Kara 		ret = -ENOMEM;
1472cf108bcaSJan Kara 		goto out;
1473cf108bcaSJan Kara 	}
1474cf108bcaSJan Kara 	*pagep = page;
1475cf108bcaSJan Kara 
1476bfc1af65SNick Piggin 	ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1477bfc1af65SNick Piggin 				ext4_get_block);
1478bfc1af65SNick Piggin 
1479bfc1af65SNick Piggin 	if (!ret && ext4_should_journal_data(inode)) {
1480ac27a0ecSDave Kleikamp 		ret = walk_page_buffers(handle, page_buffers(page),
1481ac27a0ecSDave Kleikamp 				from, to, NULL, do_journal_get_write_access);
1482b46be050SAndrey Savochkin 	}
1483bfc1af65SNick Piggin 
1484bfc1af65SNick Piggin 	if (ret) {
1485bfc1af65SNick Piggin 		unlock_page(page);
1486cf108bcaSJan Kara 		ext4_journal_stop(handle);
1487bfc1af65SNick Piggin 		page_cache_release(page);
1488ae4d5372SAneesh Kumar K.V 		/*
1489ae4d5372SAneesh Kumar K.V 		 * block_write_begin may have instantiated a few blocks
1490ae4d5372SAneesh Kumar K.V 		 * outside i_size.  Trim these off again. Don't need
1491ae4d5372SAneesh Kumar K.V 		 * i_size_read because we hold i_mutex.
1492ae4d5372SAneesh Kumar K.V 		 */
1493ae4d5372SAneesh Kumar K.V 		if (pos + len > inode->i_size)
1494ae4d5372SAneesh Kumar K.V 			vmtruncate(inode, inode->i_size);
1495bfc1af65SNick Piggin 	}
1496bfc1af65SNick Piggin 
1497617ba13bSMingming Cao 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1498ac27a0ecSDave Kleikamp 		goto retry;
14997479d2b9SAndrew Morton out:
1500ac27a0ecSDave Kleikamp 	return ret;
1501ac27a0ecSDave Kleikamp }
1502ac27a0ecSDave Kleikamp 
1503bfc1af65SNick Piggin /* For write_end() in data=journal mode */
1504bfc1af65SNick Piggin static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1505ac27a0ecSDave Kleikamp {
1506ac27a0ecSDave Kleikamp 	if (!buffer_mapped(bh) || buffer_freed(bh))
1507ac27a0ecSDave Kleikamp 		return 0;
1508ac27a0ecSDave Kleikamp 	set_buffer_uptodate(bh);
15090390131bSFrank Mayhar 	return ext4_handle_dirty_metadata(handle, NULL, bh);
1510ac27a0ecSDave Kleikamp }
1511ac27a0ecSDave Kleikamp 
1512ac27a0ecSDave Kleikamp /*
1513ac27a0ecSDave Kleikamp  * We need to pick up the new inode size which generic_commit_write gave us
1514ac27a0ecSDave Kleikamp  * `file' can be NULL - eg, when called from page_symlink().
1515ac27a0ecSDave Kleikamp  *
1516617ba13bSMingming Cao  * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1517ac27a0ecSDave Kleikamp  * buffers are managed internally.
1518ac27a0ecSDave Kleikamp  */
1519bfc1af65SNick Piggin static int ext4_ordered_write_end(struct file *file,
1520bfc1af65SNick Piggin 				struct address_space *mapping,
1521bfc1af65SNick Piggin 				loff_t pos, unsigned len, unsigned copied,
1522bfc1af65SNick Piggin 				struct page *page, void *fsdata)
1523ac27a0ecSDave Kleikamp {
1524617ba13bSMingming Cao 	handle_t *handle = ext4_journal_current_handle();
1525cf108bcaSJan Kara 	struct inode *inode = mapping->host;
1526ac27a0ecSDave Kleikamp 	int ret = 0, ret2;
1527ac27a0ecSDave Kleikamp 
1528ba80b101STheodore Ts'o 	trace_mark(ext4_ordered_write_end,
1529ba80b101STheodore Ts'o 		   "dev %s ino %lu pos %llu len %u copied %u",
1530ba80b101STheodore Ts'o 		   inode->i_sb->s_id, inode->i_ino,
1531ba80b101STheodore Ts'o 		   (unsigned long long) pos, len, copied);
1532678aaf48SJan Kara 	ret = ext4_jbd2_file_inode(handle, inode);
1533ac27a0ecSDave Kleikamp 
1534ac27a0ecSDave Kleikamp 	if (ret == 0) {
1535ac27a0ecSDave Kleikamp 		loff_t new_i_size;
1536ac27a0ecSDave Kleikamp 
1537bfc1af65SNick Piggin 		new_i_size = pos + copied;
1538cf17fea6SAneesh Kumar K.V 		if (new_i_size > EXT4_I(inode)->i_disksize) {
1539cf17fea6SAneesh Kumar K.V 			ext4_update_i_disksize(inode, new_i_size);
1540cf17fea6SAneesh Kumar K.V 			/* We need to mark inode dirty even if
1541cf17fea6SAneesh Kumar K.V 			 * new_i_size is less that inode->i_size
1542cf17fea6SAneesh Kumar K.V 			 * bu greater than i_disksize.(hint delalloc)
1543cf17fea6SAneesh Kumar K.V 			 */
1544cf17fea6SAneesh Kumar K.V 			ext4_mark_inode_dirty(handle, inode);
1545cf17fea6SAneesh Kumar K.V 		}
1546cf17fea6SAneesh Kumar K.V 
1547cf108bcaSJan Kara 		ret2 = generic_write_end(file, mapping, pos, len, copied,
1548bfc1af65SNick Piggin 							page, fsdata);
1549f8a87d89SRoel Kluin 		copied = ret2;
1550f8a87d89SRoel Kluin 		if (ret2 < 0)
1551f8a87d89SRoel Kluin 			ret = ret2;
1552ac27a0ecSDave Kleikamp 	}
1553617ba13bSMingming Cao 	ret2 = ext4_journal_stop(handle);
1554ac27a0ecSDave Kleikamp 	if (!ret)
1555ac27a0ecSDave Kleikamp 		ret = ret2;
1556bfc1af65SNick Piggin 
1557bfc1af65SNick Piggin 	return ret ? ret : copied;
1558ac27a0ecSDave Kleikamp }
1559ac27a0ecSDave Kleikamp 
1560bfc1af65SNick Piggin static int ext4_writeback_write_end(struct file *file,
1561bfc1af65SNick Piggin 				struct address_space *mapping,
1562bfc1af65SNick Piggin 				loff_t pos, unsigned len, unsigned copied,
1563bfc1af65SNick Piggin 				struct page *page, void *fsdata)
1564ac27a0ecSDave Kleikamp {
1565617ba13bSMingming Cao 	handle_t *handle = ext4_journal_current_handle();
1566cf108bcaSJan Kara 	struct inode *inode = mapping->host;
1567ac27a0ecSDave Kleikamp 	int ret = 0, ret2;
1568ac27a0ecSDave Kleikamp 	loff_t new_i_size;
1569ac27a0ecSDave Kleikamp 
1570ba80b101STheodore Ts'o 	trace_mark(ext4_writeback_write_end,
1571ba80b101STheodore Ts'o 		   "dev %s ino %lu pos %llu len %u copied %u",
1572ba80b101STheodore Ts'o 		   inode->i_sb->s_id, inode->i_ino,
1573ba80b101STheodore Ts'o 		   (unsigned long long) pos, len, copied);
1574bfc1af65SNick Piggin 	new_i_size = pos + copied;
1575cf17fea6SAneesh Kumar K.V 	if (new_i_size > EXT4_I(inode)->i_disksize) {
1576cf17fea6SAneesh Kumar K.V 		ext4_update_i_disksize(inode, new_i_size);
1577cf17fea6SAneesh Kumar K.V 		/* We need to mark inode dirty even if
1578cf17fea6SAneesh Kumar K.V 		 * new_i_size is less that inode->i_size
1579cf17fea6SAneesh Kumar K.V 		 * bu greater than i_disksize.(hint delalloc)
1580cf17fea6SAneesh Kumar K.V 		 */
1581cf17fea6SAneesh Kumar K.V 		ext4_mark_inode_dirty(handle, inode);
1582cf17fea6SAneesh Kumar K.V 	}
1583ac27a0ecSDave Kleikamp 
1584cf108bcaSJan Kara 	ret2 = generic_write_end(file, mapping, pos, len, copied,
1585bfc1af65SNick Piggin 							page, fsdata);
1586f8a87d89SRoel Kluin 	copied = ret2;
1587f8a87d89SRoel Kluin 	if (ret2 < 0)
1588f8a87d89SRoel Kluin 		ret = ret2;
1589ac27a0ecSDave Kleikamp 
1590617ba13bSMingming Cao 	ret2 = ext4_journal_stop(handle);
1591ac27a0ecSDave Kleikamp 	if (!ret)
1592ac27a0ecSDave Kleikamp 		ret = ret2;
1593bfc1af65SNick Piggin 
1594bfc1af65SNick Piggin 	return ret ? ret : copied;
1595ac27a0ecSDave Kleikamp }
1596ac27a0ecSDave Kleikamp 
1597bfc1af65SNick Piggin static int ext4_journalled_write_end(struct file *file,
1598bfc1af65SNick Piggin 				struct address_space *mapping,
1599bfc1af65SNick Piggin 				loff_t pos, unsigned len, unsigned copied,
1600bfc1af65SNick Piggin 				struct page *page, void *fsdata)
1601ac27a0ecSDave Kleikamp {
1602617ba13bSMingming Cao 	handle_t *handle = ext4_journal_current_handle();
1603bfc1af65SNick Piggin 	struct inode *inode = mapping->host;
1604ac27a0ecSDave Kleikamp 	int ret = 0, ret2;
1605ac27a0ecSDave Kleikamp 	int partial = 0;
1606bfc1af65SNick Piggin 	unsigned from, to;
1607cf17fea6SAneesh Kumar K.V 	loff_t new_i_size;
1608ac27a0ecSDave Kleikamp 
1609ba80b101STheodore Ts'o 	trace_mark(ext4_journalled_write_end,
1610ba80b101STheodore Ts'o 		   "dev %s ino %lu pos %llu len %u copied %u",
1611ba80b101STheodore Ts'o 		   inode->i_sb->s_id, inode->i_ino,
1612ba80b101STheodore Ts'o 		   (unsigned long long) pos, len, copied);
1613bfc1af65SNick Piggin 	from = pos & (PAGE_CACHE_SIZE - 1);
1614bfc1af65SNick Piggin 	to = from + len;
1615bfc1af65SNick Piggin 
1616bfc1af65SNick Piggin 	if (copied < len) {
1617bfc1af65SNick Piggin 		if (!PageUptodate(page))
1618bfc1af65SNick Piggin 			copied = 0;
1619bfc1af65SNick Piggin 		page_zero_new_buffers(page, from+copied, to);
1620bfc1af65SNick Piggin 	}
1621ac27a0ecSDave Kleikamp 
1622ac27a0ecSDave Kleikamp 	ret = walk_page_buffers(handle, page_buffers(page), from,
1623bfc1af65SNick Piggin 				to, &partial, write_end_fn);
1624ac27a0ecSDave Kleikamp 	if (!partial)
1625ac27a0ecSDave Kleikamp 		SetPageUptodate(page);
1626cf17fea6SAneesh Kumar K.V 	new_i_size = pos + copied;
1627cf17fea6SAneesh Kumar K.V 	if (new_i_size > inode->i_size)
1628bfc1af65SNick Piggin 		i_size_write(inode, pos+copied);
1629617ba13bSMingming Cao 	EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1630cf17fea6SAneesh Kumar K.V 	if (new_i_size > EXT4_I(inode)->i_disksize) {
1631cf17fea6SAneesh Kumar K.V 		ext4_update_i_disksize(inode, new_i_size);
1632617ba13bSMingming Cao 		ret2 = ext4_mark_inode_dirty(handle, inode);
1633ac27a0ecSDave Kleikamp 		if (!ret)
1634ac27a0ecSDave Kleikamp 			ret = ret2;
1635ac27a0ecSDave Kleikamp 	}
1636bfc1af65SNick Piggin 
1637cf108bcaSJan Kara 	unlock_page(page);
1638617ba13bSMingming Cao 	ret2 = ext4_journal_stop(handle);
1639ac27a0ecSDave Kleikamp 	if (!ret)
1640ac27a0ecSDave Kleikamp 		ret = ret2;
1641bfc1af65SNick Piggin 	page_cache_release(page);
1642bfc1af65SNick Piggin 
1643bfc1af65SNick Piggin 	return ret ? ret : copied;
1644ac27a0ecSDave Kleikamp }
1645d2a17637SMingming Cao 
1646d2a17637SMingming Cao static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
1647d2a17637SMingming Cao {
1648030ba6bcSAneesh Kumar K.V 	int retries = 0;
1649d2a17637SMingming Cao 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1650d2a17637SMingming Cao 	unsigned long md_needed, mdblocks, total = 0;
1651d2a17637SMingming Cao 
1652d2a17637SMingming Cao 	/*
1653d2a17637SMingming Cao 	 * recalculate the amount of metadata blocks to reserve
1654d2a17637SMingming Cao 	 * in order to allocate nrblocks
1655d2a17637SMingming Cao 	 * worse case is one extent per block
1656d2a17637SMingming Cao 	 */
1657030ba6bcSAneesh Kumar K.V repeat:
1658d2a17637SMingming Cao 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1659d2a17637SMingming Cao 	total = EXT4_I(inode)->i_reserved_data_blocks + nrblocks;
1660d2a17637SMingming Cao 	mdblocks = ext4_calc_metadata_amount(inode, total);
1661d2a17637SMingming Cao 	BUG_ON(mdblocks < EXT4_I(inode)->i_reserved_meta_blocks);
1662d2a17637SMingming Cao 
1663d2a17637SMingming Cao 	md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
1664d2a17637SMingming Cao 	total = md_needed + nrblocks;
1665d2a17637SMingming Cao 
166660e58e0fSMingming Cao 	/*
166760e58e0fSMingming Cao 	 * Make quota reservation here to prevent quota overflow
166860e58e0fSMingming Cao 	 * later. Real quota accounting is done at pages writeout
166960e58e0fSMingming Cao 	 * time.
167060e58e0fSMingming Cao 	 */
167160e58e0fSMingming Cao 	if (vfs_dq_reserve_block(inode, total)) {
167260e58e0fSMingming Cao 		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
167360e58e0fSMingming Cao 		return -EDQUOT;
167460e58e0fSMingming Cao 	}
167560e58e0fSMingming Cao 
1676a30d542aSAneesh Kumar K.V 	if (ext4_claim_free_blocks(sbi, total)) {
1677d2a17637SMingming Cao 		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1678030ba6bcSAneesh Kumar K.V 		if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1679030ba6bcSAneesh Kumar K.V 			yield();
1680030ba6bcSAneesh Kumar K.V 			goto repeat;
1681030ba6bcSAneesh Kumar K.V 		}
168260e58e0fSMingming Cao 		vfs_dq_release_reservation_block(inode, total);
1683d2a17637SMingming Cao 		return -ENOSPC;
1684d2a17637SMingming Cao 	}
1685d2a17637SMingming Cao 	EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
1686d2a17637SMingming Cao 	EXT4_I(inode)->i_reserved_meta_blocks = mdblocks;
1687d2a17637SMingming Cao 
1688d2a17637SMingming Cao 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1689d2a17637SMingming Cao 	return 0;       /* success */
1690d2a17637SMingming Cao }
1691d2a17637SMingming Cao 
169212219aeaSAneesh Kumar K.V static void ext4_da_release_space(struct inode *inode, int to_free)
1693d2a17637SMingming Cao {
1694d2a17637SMingming Cao 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1695d2a17637SMingming Cao 	int total, mdb, mdb_free, release;
1696d2a17637SMingming Cao 
1697cd213226SMingming Cao 	if (!to_free)
1698cd213226SMingming Cao 		return;		/* Nothing to release, exit */
1699cd213226SMingming Cao 
1700d2a17637SMingming Cao 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1701cd213226SMingming Cao 
1702cd213226SMingming Cao 	if (!EXT4_I(inode)->i_reserved_data_blocks) {
1703cd213226SMingming Cao 		/*
1704cd213226SMingming Cao 		 * if there is no reserved blocks, but we try to free some
1705cd213226SMingming Cao 		 * then the counter is messed up somewhere.
1706cd213226SMingming Cao 		 * but since this function is called from invalidate
1707cd213226SMingming Cao 		 * page, it's harmless to return without any action
1708cd213226SMingming Cao 		 */
1709cd213226SMingming Cao 		printk(KERN_INFO "ext4 delalloc try to release %d reserved "
1710cd213226SMingming Cao 			    "blocks for inode %lu, but there is no reserved "
1711cd213226SMingming Cao 			    "data blocks\n", to_free, inode->i_ino);
1712cd213226SMingming Cao 		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1713cd213226SMingming Cao 		return;
1714cd213226SMingming Cao 	}
1715cd213226SMingming Cao 
1716d2a17637SMingming Cao 	/* recalculate the number of metablocks still need to be reserved */
171712219aeaSAneesh Kumar K.V 	total = EXT4_I(inode)->i_reserved_data_blocks - to_free;
1718d2a17637SMingming Cao 	mdb = ext4_calc_metadata_amount(inode, total);
1719d2a17637SMingming Cao 
1720d2a17637SMingming Cao 	/* figure out how many metablocks to release */
1721d2a17637SMingming Cao 	BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
1722d2a17637SMingming Cao 	mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
1723d2a17637SMingming Cao 
1724d2a17637SMingming Cao 	release = to_free + mdb_free;
1725d2a17637SMingming Cao 
17266bc6e63fSAneesh Kumar K.V 	/* update fs dirty blocks counter for truncate case */
17276bc6e63fSAneesh Kumar K.V 	percpu_counter_sub(&sbi->s_dirtyblocks_counter, release);
1728d2a17637SMingming Cao 
1729d2a17637SMingming Cao 	/* update per-inode reservations */
173012219aeaSAneesh Kumar K.V 	BUG_ON(to_free > EXT4_I(inode)->i_reserved_data_blocks);
173112219aeaSAneesh Kumar K.V 	EXT4_I(inode)->i_reserved_data_blocks -= to_free;
1732d2a17637SMingming Cao 
1733d2a17637SMingming Cao 	BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
1734d2a17637SMingming Cao 	EXT4_I(inode)->i_reserved_meta_blocks = mdb;
1735d2a17637SMingming Cao 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
173660e58e0fSMingming Cao 
173760e58e0fSMingming Cao 	vfs_dq_release_reservation_block(inode, release);
1738d2a17637SMingming Cao }
1739d2a17637SMingming Cao 
1740d2a17637SMingming Cao static void ext4_da_page_release_reservation(struct page *page,
1741d2a17637SMingming Cao 						unsigned long offset)
1742d2a17637SMingming Cao {
1743d2a17637SMingming Cao 	int to_release = 0;
1744d2a17637SMingming Cao 	struct buffer_head *head, *bh;
1745d2a17637SMingming Cao 	unsigned int curr_off = 0;
1746d2a17637SMingming Cao 
1747d2a17637SMingming Cao 	head = page_buffers(page);
1748d2a17637SMingming Cao 	bh = head;
1749d2a17637SMingming Cao 	do {
1750d2a17637SMingming Cao 		unsigned int next_off = curr_off + bh->b_size;
1751d2a17637SMingming Cao 
1752d2a17637SMingming Cao 		if ((offset <= curr_off) && (buffer_delay(bh))) {
1753d2a17637SMingming Cao 			to_release++;
1754d2a17637SMingming Cao 			clear_buffer_delay(bh);
1755d2a17637SMingming Cao 		}
1756d2a17637SMingming Cao 		curr_off = next_off;
1757d2a17637SMingming Cao 	} while ((bh = bh->b_this_page) != head);
175812219aeaSAneesh Kumar K.V 	ext4_da_release_space(page->mapping->host, to_release);
1759d2a17637SMingming Cao }
1760ac27a0ecSDave Kleikamp 
1761ac27a0ecSDave Kleikamp /*
176264769240SAlex Tomas  * Delayed allocation stuff
176364769240SAlex Tomas  */
176464769240SAlex Tomas 
176564769240SAlex Tomas struct mpage_da_data {
176664769240SAlex Tomas 	struct inode *inode;
17678dc207c0STheodore Ts'o 	sector_t b_blocknr;		/* start block number of extent */
17688dc207c0STheodore Ts'o 	size_t b_size;			/* size of extent */
17698dc207c0STheodore Ts'o 	unsigned long b_state;		/* state of the extent */
177064769240SAlex Tomas 	unsigned long first_page, next_page;	/* extent of pages */
177164769240SAlex Tomas 	struct writeback_control *wbc;
1772a1d6cc56SAneesh Kumar K.V 	int io_done;
1773498e5f24STheodore Ts'o 	int pages_written;
1774df22291fSAneesh Kumar K.V 	int retval;
177564769240SAlex Tomas };
177664769240SAlex Tomas 
177764769240SAlex Tomas /*
177864769240SAlex Tomas  * mpage_da_submit_io - walks through extent of pages and try to write
1779a1d6cc56SAneesh Kumar K.V  * them with writepage() call back
178064769240SAlex Tomas  *
178164769240SAlex Tomas  * @mpd->inode: inode
178264769240SAlex Tomas  * @mpd->first_page: first page of the extent
178364769240SAlex Tomas  * @mpd->next_page: page after the last page of the extent
178464769240SAlex Tomas  *
178564769240SAlex Tomas  * By the time mpage_da_submit_io() is called we expect all blocks
178664769240SAlex Tomas  * to be allocated. this may be wrong if allocation failed.
178764769240SAlex Tomas  *
178864769240SAlex Tomas  * As pages are already locked by write_cache_pages(), we can't use it
178964769240SAlex Tomas  */
179064769240SAlex Tomas static int mpage_da_submit_io(struct mpage_da_data *mpd)
179164769240SAlex Tomas {
179222208dedSAneesh Kumar K.V 	long pages_skipped;
1793791b7f08SAneesh Kumar K.V 	struct pagevec pvec;
1794791b7f08SAneesh Kumar K.V 	unsigned long index, end;
1795791b7f08SAneesh Kumar K.V 	int ret = 0, err, nr_pages, i;
1796791b7f08SAneesh Kumar K.V 	struct inode *inode = mpd->inode;
1797791b7f08SAneesh Kumar K.V 	struct address_space *mapping = inode->i_mapping;
179864769240SAlex Tomas 
179964769240SAlex Tomas 	BUG_ON(mpd->next_page <= mpd->first_page);
1800791b7f08SAneesh Kumar K.V 	/*
1801791b7f08SAneesh Kumar K.V 	 * We need to start from the first_page to the next_page - 1
1802791b7f08SAneesh Kumar K.V 	 * to make sure we also write the mapped dirty buffer_heads.
18038dc207c0STheodore Ts'o 	 * If we look at mpd->b_blocknr we would only be looking
1804791b7f08SAneesh Kumar K.V 	 * at the currently mapped buffer_heads.
1805791b7f08SAneesh Kumar K.V 	 */
180664769240SAlex Tomas 	index = mpd->first_page;
180764769240SAlex Tomas 	end = mpd->next_page - 1;
180864769240SAlex Tomas 
1809791b7f08SAneesh Kumar K.V 	pagevec_init(&pvec, 0);
181064769240SAlex Tomas 	while (index <= end) {
1811791b7f08SAneesh Kumar K.V 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
181264769240SAlex Tomas 		if (nr_pages == 0)
181364769240SAlex Tomas 			break;
181464769240SAlex Tomas 		for (i = 0; i < nr_pages; i++) {
181564769240SAlex Tomas 			struct page *page = pvec.pages[i];
181664769240SAlex Tomas 
1817791b7f08SAneesh Kumar K.V 			index = page->index;
1818791b7f08SAneesh Kumar K.V 			if (index > end)
1819791b7f08SAneesh Kumar K.V 				break;
1820791b7f08SAneesh Kumar K.V 			index++;
1821791b7f08SAneesh Kumar K.V 
1822791b7f08SAneesh Kumar K.V 			BUG_ON(!PageLocked(page));
1823791b7f08SAneesh Kumar K.V 			BUG_ON(PageWriteback(page));
1824791b7f08SAneesh Kumar K.V 
182522208dedSAneesh Kumar K.V 			pages_skipped = mpd->wbc->pages_skipped;
1826a1d6cc56SAneesh Kumar K.V 			err = mapping->a_ops->writepage(page, mpd->wbc);
182722208dedSAneesh Kumar K.V 			if (!err && (pages_skipped == mpd->wbc->pages_skipped))
182822208dedSAneesh Kumar K.V 				/*
182922208dedSAneesh Kumar K.V 				 * have successfully written the page
183022208dedSAneesh Kumar K.V 				 * without skipping the same
183122208dedSAneesh Kumar K.V 				 */
1832a1d6cc56SAneesh Kumar K.V 				mpd->pages_written++;
183364769240SAlex Tomas 			/*
183464769240SAlex Tomas 			 * In error case, we have to continue because
183564769240SAlex Tomas 			 * remaining pages are still locked
183664769240SAlex Tomas 			 * XXX: unlock and re-dirty them?
183764769240SAlex Tomas 			 */
183864769240SAlex Tomas 			if (ret == 0)
183964769240SAlex Tomas 				ret = err;
184064769240SAlex Tomas 		}
184164769240SAlex Tomas 		pagevec_release(&pvec);
184264769240SAlex Tomas 	}
184364769240SAlex Tomas 	return ret;
184464769240SAlex Tomas }
184564769240SAlex Tomas 
184664769240SAlex Tomas /*
184764769240SAlex Tomas  * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers
184864769240SAlex Tomas  *
184964769240SAlex Tomas  * @mpd->inode - inode to walk through
185064769240SAlex Tomas  * @exbh->b_blocknr - first block on a disk
185164769240SAlex Tomas  * @exbh->b_size - amount of space in bytes
185264769240SAlex Tomas  * @logical - first logical block to start assignment with
185364769240SAlex Tomas  *
185464769240SAlex Tomas  * the function goes through all passed space and put actual disk
185564769240SAlex Tomas  * block numbers into buffer heads, dropping BH_Delay
185664769240SAlex Tomas  */
185764769240SAlex Tomas static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
185864769240SAlex Tomas 				 struct buffer_head *exbh)
185964769240SAlex Tomas {
186064769240SAlex Tomas 	struct inode *inode = mpd->inode;
186164769240SAlex Tomas 	struct address_space *mapping = inode->i_mapping;
186264769240SAlex Tomas 	int blocks = exbh->b_size >> inode->i_blkbits;
186364769240SAlex Tomas 	sector_t pblock = exbh->b_blocknr, cur_logical;
186464769240SAlex Tomas 	struct buffer_head *head, *bh;
1865a1d6cc56SAneesh Kumar K.V 	pgoff_t index, end;
186664769240SAlex Tomas 	struct pagevec pvec;
186764769240SAlex Tomas 	int nr_pages, i;
186864769240SAlex Tomas 
186964769240SAlex Tomas 	index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
187064769240SAlex Tomas 	end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
187164769240SAlex Tomas 	cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
187264769240SAlex Tomas 
187364769240SAlex Tomas 	pagevec_init(&pvec, 0);
187464769240SAlex Tomas 
187564769240SAlex Tomas 	while (index <= end) {
187664769240SAlex Tomas 		/* XXX: optimize tail */
187764769240SAlex Tomas 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
187864769240SAlex Tomas 		if (nr_pages == 0)
187964769240SAlex Tomas 			break;
188064769240SAlex Tomas 		for (i = 0; i < nr_pages; i++) {
188164769240SAlex Tomas 			struct page *page = pvec.pages[i];
188264769240SAlex Tomas 
188364769240SAlex Tomas 			index = page->index;
188464769240SAlex Tomas 			if (index > end)
188564769240SAlex Tomas 				break;
188664769240SAlex Tomas 			index++;
188764769240SAlex Tomas 
188864769240SAlex Tomas 			BUG_ON(!PageLocked(page));
188964769240SAlex Tomas 			BUG_ON(PageWriteback(page));
189064769240SAlex Tomas 			BUG_ON(!page_has_buffers(page));
189164769240SAlex Tomas 
189264769240SAlex Tomas 			bh = page_buffers(page);
189364769240SAlex Tomas 			head = bh;
189464769240SAlex Tomas 
189564769240SAlex Tomas 			/* skip blocks out of the range */
189664769240SAlex Tomas 			do {
189764769240SAlex Tomas 				if (cur_logical >= logical)
189864769240SAlex Tomas 					break;
189964769240SAlex Tomas 				cur_logical++;
190064769240SAlex Tomas 			} while ((bh = bh->b_this_page) != head);
190164769240SAlex Tomas 
190264769240SAlex Tomas 			do {
190364769240SAlex Tomas 				if (cur_logical >= logical + blocks)
190464769240SAlex Tomas 					break;
190564769240SAlex Tomas 				if (buffer_delay(bh)) {
190664769240SAlex Tomas 					bh->b_blocknr = pblock;
190764769240SAlex Tomas 					clear_buffer_delay(bh);
1908bf068ee2SAneesh Kumar K.V 					bh->b_bdev = inode->i_sb->s_bdev;
1909bf068ee2SAneesh Kumar K.V 				} else if (buffer_unwritten(bh)) {
1910bf068ee2SAneesh Kumar K.V 					bh->b_blocknr = pblock;
1911bf068ee2SAneesh Kumar K.V 					clear_buffer_unwritten(bh);
1912bf068ee2SAneesh Kumar K.V 					set_buffer_mapped(bh);
1913bf068ee2SAneesh Kumar K.V 					set_buffer_new(bh);
1914bf068ee2SAneesh Kumar K.V 					bh->b_bdev = inode->i_sb->s_bdev;
191561628a3fSMingming Cao 				} else if (buffer_mapped(bh))
191664769240SAlex Tomas 					BUG_ON(bh->b_blocknr != pblock);
191764769240SAlex Tomas 
191864769240SAlex Tomas 				cur_logical++;
191964769240SAlex Tomas 				pblock++;
192064769240SAlex Tomas 			} while ((bh = bh->b_this_page) != head);
192164769240SAlex Tomas 		}
192264769240SAlex Tomas 		pagevec_release(&pvec);
192364769240SAlex Tomas 	}
192464769240SAlex Tomas }
192564769240SAlex Tomas 
192664769240SAlex Tomas 
192764769240SAlex Tomas /*
192864769240SAlex Tomas  * __unmap_underlying_blocks - just a helper function to unmap
192964769240SAlex Tomas  * set of blocks described by @bh
193064769240SAlex Tomas  */
193164769240SAlex Tomas static inline void __unmap_underlying_blocks(struct inode *inode,
193264769240SAlex Tomas 					     struct buffer_head *bh)
193364769240SAlex Tomas {
193464769240SAlex Tomas 	struct block_device *bdev = inode->i_sb->s_bdev;
193564769240SAlex Tomas 	int blocks, i;
193664769240SAlex Tomas 
193764769240SAlex Tomas 	blocks = bh->b_size >> inode->i_blkbits;
193864769240SAlex Tomas 	for (i = 0; i < blocks; i++)
193964769240SAlex Tomas 		unmap_underlying_metadata(bdev, bh->b_blocknr + i);
194064769240SAlex Tomas }
194164769240SAlex Tomas 
1942c4a0c46eSAneesh Kumar K.V static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
1943c4a0c46eSAneesh Kumar K.V 					sector_t logical, long blk_cnt)
1944c4a0c46eSAneesh Kumar K.V {
1945c4a0c46eSAneesh Kumar K.V 	int nr_pages, i;
1946c4a0c46eSAneesh Kumar K.V 	pgoff_t index, end;
1947c4a0c46eSAneesh Kumar K.V 	struct pagevec pvec;
1948c4a0c46eSAneesh Kumar K.V 	struct inode *inode = mpd->inode;
1949c4a0c46eSAneesh Kumar K.V 	struct address_space *mapping = inode->i_mapping;
1950c4a0c46eSAneesh Kumar K.V 
1951c4a0c46eSAneesh Kumar K.V 	index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
1952c4a0c46eSAneesh Kumar K.V 	end   = (logical + blk_cnt - 1) >>
1953c4a0c46eSAneesh Kumar K.V 				(PAGE_CACHE_SHIFT - inode->i_blkbits);
1954c4a0c46eSAneesh Kumar K.V 	while (index <= end) {
1955c4a0c46eSAneesh Kumar K.V 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1956c4a0c46eSAneesh Kumar K.V 		if (nr_pages == 0)
1957c4a0c46eSAneesh Kumar K.V 			break;
1958c4a0c46eSAneesh Kumar K.V 		for (i = 0; i < nr_pages; i++) {
1959c4a0c46eSAneesh Kumar K.V 			struct page *page = pvec.pages[i];
1960c4a0c46eSAneesh Kumar K.V 			index = page->index;
1961c4a0c46eSAneesh Kumar K.V 			if (index > end)
1962c4a0c46eSAneesh Kumar K.V 				break;
1963c4a0c46eSAneesh Kumar K.V 			index++;
1964c4a0c46eSAneesh Kumar K.V 
1965c4a0c46eSAneesh Kumar K.V 			BUG_ON(!PageLocked(page));
1966c4a0c46eSAneesh Kumar K.V 			BUG_ON(PageWriteback(page));
1967c4a0c46eSAneesh Kumar K.V 			block_invalidatepage(page, 0);
1968c4a0c46eSAneesh Kumar K.V 			ClearPageUptodate(page);
1969c4a0c46eSAneesh Kumar K.V 			unlock_page(page);
1970c4a0c46eSAneesh Kumar K.V 		}
1971c4a0c46eSAneesh Kumar K.V 	}
1972c4a0c46eSAneesh Kumar K.V 	return;
1973c4a0c46eSAneesh Kumar K.V }
1974c4a0c46eSAneesh Kumar K.V 
1975df22291fSAneesh Kumar K.V static void ext4_print_free_blocks(struct inode *inode)
1976df22291fSAneesh Kumar K.V {
1977df22291fSAneesh Kumar K.V 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1978df22291fSAneesh Kumar K.V 	printk(KERN_EMERG "Total free blocks count %lld\n",
1979df22291fSAneesh Kumar K.V 			ext4_count_free_blocks(inode->i_sb));
1980df22291fSAneesh Kumar K.V 	printk(KERN_EMERG "Free/Dirty block details\n");
1981df22291fSAneesh Kumar K.V 	printk(KERN_EMERG "free_blocks=%lld\n",
19828f72fbdfSAlexander Beregalov 			(long long)percpu_counter_sum(&sbi->s_freeblocks_counter));
1983df22291fSAneesh Kumar K.V 	printk(KERN_EMERG "dirty_blocks=%lld\n",
19848f72fbdfSAlexander Beregalov 			(long long)percpu_counter_sum(&sbi->s_dirtyblocks_counter));
1985df22291fSAneesh Kumar K.V 	printk(KERN_EMERG "Block reservation details\n");
1986498e5f24STheodore Ts'o 	printk(KERN_EMERG "i_reserved_data_blocks=%u\n",
1987df22291fSAneesh Kumar K.V 			EXT4_I(inode)->i_reserved_data_blocks);
1988498e5f24STheodore Ts'o 	printk(KERN_EMERG "i_reserved_meta_blocks=%u\n",
1989df22291fSAneesh Kumar K.V 			EXT4_I(inode)->i_reserved_meta_blocks);
1990df22291fSAneesh Kumar K.V 	return;
1991df22291fSAneesh Kumar K.V }
1992df22291fSAneesh Kumar K.V 
1993ed5bde0bSTheodore Ts'o #define		EXT4_DELALLOC_RSVED	1
1994ed5bde0bSTheodore Ts'o static int ext4_da_get_block_write(struct inode *inode, sector_t iblock,
1995ed5bde0bSTheodore Ts'o 				   struct buffer_head *bh_result, int create)
1996ed5bde0bSTheodore Ts'o {
1997ed5bde0bSTheodore Ts'o 	int ret;
1998ed5bde0bSTheodore Ts'o 	unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
1999ed5bde0bSTheodore Ts'o 	loff_t disksize = EXT4_I(inode)->i_disksize;
2000ed5bde0bSTheodore Ts'o 	handle_t *handle = NULL;
2001ed5bde0bSTheodore Ts'o 
2002ed5bde0bSTheodore Ts'o 	handle = ext4_journal_current_handle();
2003ed5bde0bSTheodore Ts'o 	BUG_ON(!handle);
2004ed5bde0bSTheodore Ts'o 	ret = ext4_get_blocks_wrap(handle, inode, iblock, max_blocks,
2005ed5bde0bSTheodore Ts'o 				   bh_result, create, 0, EXT4_DELALLOC_RSVED);
2006ed5bde0bSTheodore Ts'o 	if (ret <= 0)
2007ed5bde0bSTheodore Ts'o 		return ret;
2008ed5bde0bSTheodore Ts'o 
2009ed5bde0bSTheodore Ts'o 	bh_result->b_size = (ret << inode->i_blkbits);
2010ed5bde0bSTheodore Ts'o 
2011ed5bde0bSTheodore Ts'o 	if (ext4_should_order_data(inode)) {
2012ed5bde0bSTheodore Ts'o 		int retval;
2013ed5bde0bSTheodore Ts'o 		retval = ext4_jbd2_file_inode(handle, inode);
2014ed5bde0bSTheodore Ts'o 		if (retval)
2015ed5bde0bSTheodore Ts'o 			/*
2016ed5bde0bSTheodore Ts'o 			 * Failed to add inode for ordered mode. Don't
2017ed5bde0bSTheodore Ts'o 			 * update file size
2018ed5bde0bSTheodore Ts'o 			 */
2019ed5bde0bSTheodore Ts'o 			return retval;
2020ed5bde0bSTheodore Ts'o 	}
2021ed5bde0bSTheodore Ts'o 
2022ed5bde0bSTheodore Ts'o 	/*
2023ed5bde0bSTheodore Ts'o 	 * Update on-disk size along with block allocation we don't
2024ed5bde0bSTheodore Ts'o 	 * use 'extend_disksize' as size may change within already
2025ed5bde0bSTheodore Ts'o 	 * allocated block -bzzz
2026ed5bde0bSTheodore Ts'o 	 */
2027ed5bde0bSTheodore Ts'o 	disksize = ((loff_t) iblock + ret) << inode->i_blkbits;
2028ed5bde0bSTheodore Ts'o 	if (disksize > i_size_read(inode))
2029ed5bde0bSTheodore Ts'o 		disksize = i_size_read(inode);
2030ed5bde0bSTheodore Ts'o 	if (disksize > EXT4_I(inode)->i_disksize) {
2031ed5bde0bSTheodore Ts'o 		ext4_update_i_disksize(inode, disksize);
2032ed5bde0bSTheodore Ts'o 		ret = ext4_mark_inode_dirty(handle, inode);
2033ed5bde0bSTheodore Ts'o 		return ret;
2034ed5bde0bSTheodore Ts'o 	}
2035ed5bde0bSTheodore Ts'o 	return 0;
2036ed5bde0bSTheodore Ts'o }
2037ed5bde0bSTheodore Ts'o 
203864769240SAlex Tomas /*
203964769240SAlex Tomas  * mpage_da_map_blocks - go through given space
204064769240SAlex Tomas  *
20418dc207c0STheodore Ts'o  * @mpd - bh describing space
204264769240SAlex Tomas  *
204364769240SAlex Tomas  * The function skips space we know is already mapped to disk blocks.
204464769240SAlex Tomas  *
204564769240SAlex Tomas  */
2046c4a0c46eSAneesh Kumar K.V static int mpage_da_map_blocks(struct mpage_da_data *mpd)
204764769240SAlex Tomas {
2048a1d6cc56SAneesh Kumar K.V 	int err = 0;
2049030ba6bcSAneesh Kumar K.V 	struct buffer_head new;
2050df22291fSAneesh Kumar K.V 	sector_t next;
205164769240SAlex Tomas 
205264769240SAlex Tomas 	/*
205364769240SAlex Tomas 	 * We consider only non-mapped and non-allocated blocks
205464769240SAlex Tomas 	 */
20558dc207c0STheodore Ts'o 	if ((mpd->b_state  & (1 << BH_Mapped)) &&
20568dc207c0STheodore Ts'o 	    !(mpd->b_state & (1 << BH_Delay)))
2057c4a0c46eSAneesh Kumar K.V 		return 0;
205879ffab34SAneesh Kumar K.V 	/*
205979ffab34SAneesh Kumar K.V 	 * We need to make sure the BH_Delay flag is passed down to
206079ffab34SAneesh Kumar K.V 	 * ext4_da_get_block_write(), since it calls
206179ffab34SAneesh Kumar K.V 	 * ext4_get_blocks_wrap() with the EXT4_DELALLOC_RSVED flag.
206279ffab34SAneesh Kumar K.V 	 * This flag causes ext4_get_blocks_wrap() to call
206379ffab34SAneesh Kumar K.V 	 * ext4_da_update_reserve_space() if the passed buffer head
206479ffab34SAneesh Kumar K.V 	 * has the BH_Delay flag set.  In the future, once we clean up
206579ffab34SAneesh Kumar K.V 	 * the interfaces to ext4_get_blocks_wrap(), we should pass in
206679ffab34SAneesh Kumar K.V 	 * a separate flag which requests that the delayed allocation
206779ffab34SAneesh Kumar K.V 	 * statistics should be updated, instead of depending on the
206879ffab34SAneesh Kumar K.V 	 * state information getting passed down via the map_bh's
206979ffab34SAneesh Kumar K.V 	 * state bitmasks plus the magic EXT4_DELALLOC_RSVED flag.
207079ffab34SAneesh Kumar K.V 	 */
207179ffab34SAneesh Kumar K.V 	new.b_state = mpd->b_state & (1 << BH_Delay);
207264769240SAlex Tomas 	new.b_blocknr = 0;
20738dc207c0STheodore Ts'o 	new.b_size = mpd->b_size;
20748dc207c0STheodore Ts'o 	next = mpd->b_blocknr;
207564769240SAlex Tomas 	/*
2076a1d6cc56SAneesh Kumar K.V 	 * If we didn't accumulate anything
2077a1d6cc56SAneesh Kumar K.V 	 * to write simply return
207864769240SAlex Tomas 	 */
2079a1d6cc56SAneesh Kumar K.V 	if (!new.b_size)
2080c4a0c46eSAneesh Kumar K.V 		return 0;
2081c4a0c46eSAneesh Kumar K.V 
2082ed5bde0bSTheodore Ts'o 	err = ext4_da_get_block_write(mpd->inode, next, &new, 1);
2083ed5bde0bSTheodore Ts'o 	if (err) {
2084ed5bde0bSTheodore Ts'o 		/*
2085ed5bde0bSTheodore Ts'o 		 * If get block returns with error we simply
2086ed5bde0bSTheodore Ts'o 		 * return. Later writepage will redirty the page and
2087ed5bde0bSTheodore Ts'o 		 * writepages will find the dirty page again
2088c4a0c46eSAneesh Kumar K.V 		 */
2089c4a0c46eSAneesh Kumar K.V 		if (err == -EAGAIN)
2090c4a0c46eSAneesh Kumar K.V 			return 0;
2091df22291fSAneesh Kumar K.V 
2092df22291fSAneesh Kumar K.V 		if (err == -ENOSPC &&
2093df22291fSAneesh Kumar K.V 		    ext4_count_free_blocks(mpd->inode->i_sb)) {
2094df22291fSAneesh Kumar K.V 			mpd->retval = err;
2095df22291fSAneesh Kumar K.V 			return 0;
2096df22291fSAneesh Kumar K.V 		}
2097df22291fSAneesh Kumar K.V 
2098c4a0c46eSAneesh Kumar K.V 		/*
2099ed5bde0bSTheodore Ts'o 		 * get block failure will cause us to loop in
2100ed5bde0bSTheodore Ts'o 		 * writepages, because a_ops->writepage won't be able
2101ed5bde0bSTheodore Ts'o 		 * to make progress. The page will be redirtied by
2102ed5bde0bSTheodore Ts'o 		 * writepage and writepages will again try to write
2103ed5bde0bSTheodore Ts'o 		 * the same.
2104c4a0c46eSAneesh Kumar K.V 		 */
2105c4a0c46eSAneesh Kumar K.V 		printk(KERN_EMERG "%s block allocation failed for inode %lu "
2106c4a0c46eSAneesh Kumar K.V 				  "at logical offset %llu with max blocks "
2107c4a0c46eSAneesh Kumar K.V 				  "%zd with error %d\n",
2108c4a0c46eSAneesh Kumar K.V 				  __func__, mpd->inode->i_ino,
2109c4a0c46eSAneesh Kumar K.V 				  (unsigned long long)next,
21108dc207c0STheodore Ts'o 				  mpd->b_size >> mpd->inode->i_blkbits, err);
2111c4a0c46eSAneesh Kumar K.V 		printk(KERN_EMERG "This should not happen.!! "
2112c4a0c46eSAneesh Kumar K.V 					"Data will be lost\n");
2113030ba6bcSAneesh Kumar K.V 		if (err == -ENOSPC) {
2114df22291fSAneesh Kumar K.V 			ext4_print_free_blocks(mpd->inode);
2115030ba6bcSAneesh Kumar K.V 		}
2116c4a0c46eSAneesh Kumar K.V 		/* invlaidate all the pages */
2117c4a0c46eSAneesh Kumar K.V 		ext4_da_block_invalidatepages(mpd, next,
21188dc207c0STheodore Ts'o 				mpd->b_size >> mpd->inode->i_blkbits);
2119c4a0c46eSAneesh Kumar K.V 		return err;
2120c4a0c46eSAneesh Kumar K.V 	}
212164769240SAlex Tomas 	BUG_ON(new.b_size == 0);
212264769240SAlex Tomas 
212364769240SAlex Tomas 	if (buffer_new(&new))
212464769240SAlex Tomas 		__unmap_underlying_blocks(mpd->inode, &new);
212564769240SAlex Tomas 
212664769240SAlex Tomas 	/*
212764769240SAlex Tomas 	 * If blocks are delayed marked, we need to
212864769240SAlex Tomas 	 * put actual blocknr and drop delayed bit
212964769240SAlex Tomas 	 */
21308dc207c0STheodore Ts'o 	if ((mpd->b_state & (1 << BH_Delay)) ||
21318dc207c0STheodore Ts'o 	    (mpd->b_state & (1 << BH_Unwritten)))
213264769240SAlex Tomas 		mpage_put_bnr_to_bhs(mpd, next, &new);
213364769240SAlex Tomas 
2134c4a0c46eSAneesh Kumar K.V 	return 0;
213564769240SAlex Tomas }
213664769240SAlex Tomas 
2137bf068ee2SAneesh Kumar K.V #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
2138bf068ee2SAneesh Kumar K.V 		(1 << BH_Delay) | (1 << BH_Unwritten))
213964769240SAlex Tomas 
214064769240SAlex Tomas /*
214164769240SAlex Tomas  * mpage_add_bh_to_extent - try to add one more block to extent of blocks
214264769240SAlex Tomas  *
214364769240SAlex Tomas  * @mpd->lbh - extent of blocks
214464769240SAlex Tomas  * @logical - logical number of the block in the file
214564769240SAlex Tomas  * @bh - bh of the block (used to access block's state)
214664769240SAlex Tomas  *
214764769240SAlex Tomas  * the function is used to collect contig. blocks in same state
214864769240SAlex Tomas  */
214964769240SAlex Tomas static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
21508dc207c0STheodore Ts'o 				   sector_t logical, size_t b_size,
21518dc207c0STheodore Ts'o 				   unsigned long b_state)
215264769240SAlex Tomas {
215364769240SAlex Tomas 	sector_t next;
21548dc207c0STheodore Ts'o 	int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
215564769240SAlex Tomas 
2156525f4ed8SMingming Cao 	/* check if thereserved journal credits might overflow */
2157525f4ed8SMingming Cao 	if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) {
2158525f4ed8SMingming Cao 		if (nrblocks >= EXT4_MAX_TRANS_DATA) {
2159525f4ed8SMingming Cao 			/*
2160525f4ed8SMingming Cao 			 * With non-extent format we are limited by the journal
2161525f4ed8SMingming Cao 			 * credit available.  Total credit needed to insert
2162525f4ed8SMingming Cao 			 * nrblocks contiguous blocks is dependent on the
2163525f4ed8SMingming Cao 			 * nrblocks.  So limit nrblocks.
2164525f4ed8SMingming Cao 			 */
2165525f4ed8SMingming Cao 			goto flush_it;
2166525f4ed8SMingming Cao 		} else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
2167525f4ed8SMingming Cao 				EXT4_MAX_TRANS_DATA) {
2168525f4ed8SMingming Cao 			/*
2169525f4ed8SMingming Cao 			 * Adding the new buffer_head would make it cross the
2170525f4ed8SMingming Cao 			 * allowed limit for which we have journal credit
2171525f4ed8SMingming Cao 			 * reserved. So limit the new bh->b_size
2172525f4ed8SMingming Cao 			 */
2173525f4ed8SMingming Cao 			b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
2174525f4ed8SMingming Cao 						mpd->inode->i_blkbits;
2175525f4ed8SMingming Cao 			/* we will do mpage_da_submit_io in the next loop */
2176525f4ed8SMingming Cao 		}
2177525f4ed8SMingming Cao 	}
217864769240SAlex Tomas 	/*
217964769240SAlex Tomas 	 * First block in the extent
218064769240SAlex Tomas 	 */
21818dc207c0STheodore Ts'o 	if (mpd->b_size == 0) {
21828dc207c0STheodore Ts'o 		mpd->b_blocknr = logical;
21838dc207c0STheodore Ts'o 		mpd->b_size = b_size;
21848dc207c0STheodore Ts'o 		mpd->b_state = b_state & BH_FLAGS;
218564769240SAlex Tomas 		return;
218664769240SAlex Tomas 	}
218764769240SAlex Tomas 
21888dc207c0STheodore Ts'o 	next = mpd->b_blocknr + nrblocks;
218964769240SAlex Tomas 	/*
219064769240SAlex Tomas 	 * Can we merge the block to our big extent?
219164769240SAlex Tomas 	 */
21928dc207c0STheodore Ts'o 	if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
21938dc207c0STheodore Ts'o 		mpd->b_size += b_size;
219464769240SAlex Tomas 		return;
219564769240SAlex Tomas 	}
219664769240SAlex Tomas 
2197525f4ed8SMingming Cao flush_it:
219864769240SAlex Tomas 	/*
219964769240SAlex Tomas 	 * We couldn't merge the block to our extent, so we
220064769240SAlex Tomas 	 * need to flush current  extent and start new one
220164769240SAlex Tomas 	 */
2202c4a0c46eSAneesh Kumar K.V 	if (mpage_da_map_blocks(mpd) == 0)
2203a1d6cc56SAneesh Kumar K.V 		mpage_da_submit_io(mpd);
2204a1d6cc56SAneesh Kumar K.V 	mpd->io_done = 1;
2205a1d6cc56SAneesh Kumar K.V 	return;
220664769240SAlex Tomas }
220764769240SAlex Tomas 
220864769240SAlex Tomas /*
220964769240SAlex Tomas  * __mpage_da_writepage - finds extent of pages and blocks
221064769240SAlex Tomas  *
221164769240SAlex Tomas  * @page: page to consider
221264769240SAlex Tomas  * @wbc: not used, we just follow rules
221364769240SAlex Tomas  * @data: context
221464769240SAlex Tomas  *
221564769240SAlex Tomas  * The function finds extents of pages and scan them for all blocks.
221664769240SAlex Tomas  */
221764769240SAlex Tomas static int __mpage_da_writepage(struct page *page,
221864769240SAlex Tomas 				struct writeback_control *wbc, void *data)
221964769240SAlex Tomas {
222064769240SAlex Tomas 	struct mpage_da_data *mpd = data;
222164769240SAlex Tomas 	struct inode *inode = mpd->inode;
22228dc207c0STheodore Ts'o 	struct buffer_head *bh, *head;
222364769240SAlex Tomas 	sector_t logical;
222464769240SAlex Tomas 
2225a1d6cc56SAneesh Kumar K.V 	if (mpd->io_done) {
2226a1d6cc56SAneesh Kumar K.V 		/*
2227a1d6cc56SAneesh Kumar K.V 		 * Rest of the page in the page_vec
2228a1d6cc56SAneesh Kumar K.V 		 * redirty then and skip then. We will
2229a1d6cc56SAneesh Kumar K.V 		 * try to to write them again after
2230a1d6cc56SAneesh Kumar K.V 		 * starting a new transaction
2231a1d6cc56SAneesh Kumar K.V 		 */
2232a1d6cc56SAneesh Kumar K.V 		redirty_page_for_writepage(wbc, page);
2233a1d6cc56SAneesh Kumar K.V 		unlock_page(page);
2234a1d6cc56SAneesh Kumar K.V 		return MPAGE_DA_EXTENT_TAIL;
2235a1d6cc56SAneesh Kumar K.V 	}
223664769240SAlex Tomas 	/*
223764769240SAlex Tomas 	 * Can we merge this page to current extent?
223864769240SAlex Tomas 	 */
223964769240SAlex Tomas 	if (mpd->next_page != page->index) {
224064769240SAlex Tomas 		/*
224164769240SAlex Tomas 		 * Nope, we can't. So, we map non-allocated blocks
2242a1d6cc56SAneesh Kumar K.V 		 * and start IO on them using writepage()
224364769240SAlex Tomas 		 */
224464769240SAlex Tomas 		if (mpd->next_page != mpd->first_page) {
2245c4a0c46eSAneesh Kumar K.V 			if (mpage_da_map_blocks(mpd) == 0)
224664769240SAlex Tomas 				mpage_da_submit_io(mpd);
2247a1d6cc56SAneesh Kumar K.V 			/*
2248a1d6cc56SAneesh Kumar K.V 			 * skip rest of the page in the page_vec
2249a1d6cc56SAneesh Kumar K.V 			 */
2250a1d6cc56SAneesh Kumar K.V 			mpd->io_done = 1;
2251a1d6cc56SAneesh Kumar K.V 			redirty_page_for_writepage(wbc, page);
2252a1d6cc56SAneesh Kumar K.V 			unlock_page(page);
2253a1d6cc56SAneesh Kumar K.V 			return MPAGE_DA_EXTENT_TAIL;
225464769240SAlex Tomas 		}
225564769240SAlex Tomas 
225664769240SAlex Tomas 		/*
225764769240SAlex Tomas 		 * Start next extent of pages ...
225864769240SAlex Tomas 		 */
225964769240SAlex Tomas 		mpd->first_page = page->index;
226064769240SAlex Tomas 
226164769240SAlex Tomas 		/*
226264769240SAlex Tomas 		 * ... and blocks
226364769240SAlex Tomas 		 */
22648dc207c0STheodore Ts'o 		mpd->b_size = 0;
22658dc207c0STheodore Ts'o 		mpd->b_state = 0;
22668dc207c0STheodore Ts'o 		mpd->b_blocknr = 0;
226764769240SAlex Tomas 	}
226864769240SAlex Tomas 
226964769240SAlex Tomas 	mpd->next_page = page->index + 1;
227064769240SAlex Tomas 	logical = (sector_t) page->index <<
227164769240SAlex Tomas 		  (PAGE_CACHE_SHIFT - inode->i_blkbits);
227264769240SAlex Tomas 
227364769240SAlex Tomas 	if (!page_has_buffers(page)) {
22748dc207c0STheodore Ts'o 		mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE,
22758dc207c0STheodore Ts'o 				       (1 << BH_Dirty) | (1 << BH_Uptodate));
2276a1d6cc56SAneesh Kumar K.V 		if (mpd->io_done)
2277a1d6cc56SAneesh Kumar K.V 			return MPAGE_DA_EXTENT_TAIL;
227864769240SAlex Tomas 	} else {
227964769240SAlex Tomas 		/*
228064769240SAlex Tomas 		 * Page with regular buffer heads, just add all dirty ones
228164769240SAlex Tomas 		 */
228264769240SAlex Tomas 		head = page_buffers(page);
228364769240SAlex Tomas 		bh = head;
228464769240SAlex Tomas 		do {
228564769240SAlex Tomas 			BUG_ON(buffer_locked(bh));
2286791b7f08SAneesh Kumar K.V 			/*
2287791b7f08SAneesh Kumar K.V 			 * We need to try to allocate
2288791b7f08SAneesh Kumar K.V 			 * unmapped blocks in the same page.
2289791b7f08SAneesh Kumar K.V 			 * Otherwise we won't make progress
2290791b7f08SAneesh Kumar K.V 			 * with the page in ext4_da_writepage
2291791b7f08SAneesh Kumar K.V 			 */
2292a1d6cc56SAneesh Kumar K.V 			if (buffer_dirty(bh) &&
2293a1d6cc56SAneesh Kumar K.V 			    (!buffer_mapped(bh) || buffer_delay(bh))) {
22948dc207c0STheodore Ts'o 				mpage_add_bh_to_extent(mpd, logical,
22958dc207c0STheodore Ts'o 						       bh->b_size,
22968dc207c0STheodore Ts'o 						       bh->b_state);
2297a1d6cc56SAneesh Kumar K.V 				if (mpd->io_done)
2298a1d6cc56SAneesh Kumar K.V 					return MPAGE_DA_EXTENT_TAIL;
2299791b7f08SAneesh Kumar K.V 			} else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2300791b7f08SAneesh Kumar K.V 				/*
2301791b7f08SAneesh Kumar K.V 				 * mapped dirty buffer. We need to update
2302791b7f08SAneesh Kumar K.V 				 * the b_state because we look at
2303791b7f08SAneesh Kumar K.V 				 * b_state in mpage_da_map_blocks. We don't
2304791b7f08SAneesh Kumar K.V 				 * update b_size because if we find an
2305791b7f08SAneesh Kumar K.V 				 * unmapped buffer_head later we need to
2306791b7f08SAneesh Kumar K.V 				 * use the b_state flag of that buffer_head.
2307791b7f08SAneesh Kumar K.V 				 */
23088dc207c0STheodore Ts'o 				if (mpd->b_size == 0)
23098dc207c0STheodore Ts'o 					mpd->b_state = bh->b_state & BH_FLAGS;
2310a1d6cc56SAneesh Kumar K.V 			}
231164769240SAlex Tomas 			logical++;
231264769240SAlex Tomas 		} while ((bh = bh->b_this_page) != head);
231364769240SAlex Tomas 	}
231464769240SAlex Tomas 
231564769240SAlex Tomas 	return 0;
231664769240SAlex Tomas }
231764769240SAlex Tomas 
231864769240SAlex Tomas /*
231964769240SAlex Tomas  * this is a special callback for ->write_begin() only
232064769240SAlex Tomas  * it's intention is to return mapped block or reserve space
232164769240SAlex Tomas  */
232264769240SAlex Tomas static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
232364769240SAlex Tomas 				  struct buffer_head *bh_result, int create)
232464769240SAlex Tomas {
232564769240SAlex Tomas 	int ret = 0;
232633b9817eSAneesh Kumar K.V 	sector_t invalid_block = ~((sector_t) 0xffff);
232733b9817eSAneesh Kumar K.V 
232833b9817eSAneesh Kumar K.V 	if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
232933b9817eSAneesh Kumar K.V 		invalid_block = ~0;
233064769240SAlex Tomas 
233164769240SAlex Tomas 	BUG_ON(create == 0);
233264769240SAlex Tomas 	BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
233364769240SAlex Tomas 
233464769240SAlex Tomas 	/*
233564769240SAlex Tomas 	 * first, we need to know whether the block is allocated already
233664769240SAlex Tomas 	 * preallocated blocks are unmapped but should treated
233764769240SAlex Tomas 	 * the same as allocated blocks.
233864769240SAlex Tomas 	 */
2339d2a17637SMingming Cao 	ret = ext4_get_blocks_wrap(NULL, inode, iblock, 1,  bh_result, 0, 0, 0);
2340d2a17637SMingming Cao 	if ((ret == 0) && !buffer_delay(bh_result)) {
2341d2a17637SMingming Cao 		/* the block isn't (pre)allocated yet, let's reserve space */
234264769240SAlex Tomas 		/*
234364769240SAlex Tomas 		 * XXX: __block_prepare_write() unmaps passed block,
234464769240SAlex Tomas 		 * is it OK?
234564769240SAlex Tomas 		 */
2346d2a17637SMingming Cao 		ret = ext4_da_reserve_space(inode, 1);
2347d2a17637SMingming Cao 		if (ret)
2348d2a17637SMingming Cao 			/* not enough space to reserve */
2349d2a17637SMingming Cao 			return ret;
2350d2a17637SMingming Cao 
235133b9817eSAneesh Kumar K.V 		map_bh(bh_result, inode->i_sb, invalid_block);
235264769240SAlex Tomas 		set_buffer_new(bh_result);
235364769240SAlex Tomas 		set_buffer_delay(bh_result);
235464769240SAlex Tomas 	} else if (ret > 0) {
235564769240SAlex Tomas 		bh_result->b_size = (ret << inode->i_blkbits);
23569c1ee184SAneesh Kumar K.V 		/*
23579c1ee184SAneesh Kumar K.V 		 * With sub-block writes into unwritten extents
23589c1ee184SAneesh Kumar K.V 		 * we also need to mark the buffer as new so that
23599c1ee184SAneesh Kumar K.V 		 * the unwritten parts of the buffer gets correctly zeroed.
23609c1ee184SAneesh Kumar K.V 		 */
23619c1ee184SAneesh Kumar K.V 		if (buffer_unwritten(bh_result))
23629c1ee184SAneesh Kumar K.V 			set_buffer_new(bh_result);
236364769240SAlex Tomas 		ret = 0;
236464769240SAlex Tomas 	}
236564769240SAlex Tomas 
236664769240SAlex Tomas 	return ret;
236764769240SAlex Tomas }
236861628a3fSMingming Cao 
236961628a3fSMingming Cao static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh)
237061628a3fSMingming Cao {
2371f0e6c985SAneesh Kumar K.V 	/*
2372f0e6c985SAneesh Kumar K.V 	 * unmapped buffer is possible for holes.
2373f0e6c985SAneesh Kumar K.V 	 * delay buffer is possible with delayed allocation
2374f0e6c985SAneesh Kumar K.V 	 */
2375f0e6c985SAneesh Kumar K.V 	return ((!buffer_mapped(bh) || buffer_delay(bh)) && buffer_dirty(bh));
2376f0e6c985SAneesh Kumar K.V }
2377f0e6c985SAneesh Kumar K.V 
2378f0e6c985SAneesh Kumar K.V static int ext4_normal_get_block_write(struct inode *inode, sector_t iblock,
2379f0e6c985SAneesh Kumar K.V 				   struct buffer_head *bh_result, int create)
2380f0e6c985SAneesh Kumar K.V {
2381f0e6c985SAneesh Kumar K.V 	int ret = 0;
2382f0e6c985SAneesh Kumar K.V 	unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
2383f0e6c985SAneesh Kumar K.V 
2384f0e6c985SAneesh Kumar K.V 	/*
2385f0e6c985SAneesh Kumar K.V 	 * we don't want to do block allocation in writepage
2386f0e6c985SAneesh Kumar K.V 	 * so call get_block_wrap with create = 0
2387f0e6c985SAneesh Kumar K.V 	 */
2388f0e6c985SAneesh Kumar K.V 	ret = ext4_get_blocks_wrap(NULL, inode, iblock, max_blocks,
2389f0e6c985SAneesh Kumar K.V 				   bh_result, 0, 0, 0);
2390f0e6c985SAneesh Kumar K.V 	if (ret > 0) {
2391f0e6c985SAneesh Kumar K.V 		bh_result->b_size = (ret << inode->i_blkbits);
2392f0e6c985SAneesh Kumar K.V 		ret = 0;
2393f0e6c985SAneesh Kumar K.V 	}
2394f0e6c985SAneesh Kumar K.V 	return ret;
239561628a3fSMingming Cao }
239661628a3fSMingming Cao 
239761628a3fSMingming Cao /*
2398f0e6c985SAneesh Kumar K.V  * get called vi ext4_da_writepages after taking page lock (have journal handle)
2399f0e6c985SAneesh Kumar K.V  * get called via journal_submit_inode_data_buffers (no journal handle)
2400f0e6c985SAneesh Kumar K.V  * get called via shrink_page_list via pdflush (no journal handle)
2401f0e6c985SAneesh Kumar K.V  * or grab_page_cache when doing write_begin (have journal handle)
240261628a3fSMingming Cao  */
240364769240SAlex Tomas static int ext4_da_writepage(struct page *page,
240464769240SAlex Tomas 				struct writeback_control *wbc)
240564769240SAlex Tomas {
240664769240SAlex Tomas 	int ret = 0;
240761628a3fSMingming Cao 	loff_t size;
2408498e5f24STheodore Ts'o 	unsigned int len;
240961628a3fSMingming Cao 	struct buffer_head *page_bufs;
241061628a3fSMingming Cao 	struct inode *inode = page->mapping->host;
241164769240SAlex Tomas 
2412ba80b101STheodore Ts'o 	trace_mark(ext4_da_writepage,
2413ba80b101STheodore Ts'o 		   "dev %s ino %lu page_index %lu",
2414ba80b101STheodore Ts'o 		   inode->i_sb->s_id, inode->i_ino, page->index);
241561628a3fSMingming Cao 	size = i_size_read(inode);
241661628a3fSMingming Cao 	if (page->index == size >> PAGE_CACHE_SHIFT)
241761628a3fSMingming Cao 		len = size & ~PAGE_CACHE_MASK;
241861628a3fSMingming Cao 	else
241961628a3fSMingming Cao 		len = PAGE_CACHE_SIZE;
242061628a3fSMingming Cao 
2421f0e6c985SAneesh Kumar K.V 	if (page_has_buffers(page)) {
2422f0e6c985SAneesh Kumar K.V 		page_bufs = page_buffers(page);
2423f0e6c985SAneesh Kumar K.V 		if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2424f0e6c985SAneesh Kumar K.V 					ext4_bh_unmapped_or_delay)) {
242561628a3fSMingming Cao 			/*
2426f0e6c985SAneesh Kumar K.V 			 * We don't want to do  block allocation
2427f0e6c985SAneesh Kumar K.V 			 * So redirty the page and return
2428cd1aac32SAneesh Kumar K.V 			 * We may reach here when we do a journal commit
2429cd1aac32SAneesh Kumar K.V 			 * via journal_submit_inode_data_buffers.
2430cd1aac32SAneesh Kumar K.V 			 * If we don't have mapping block we just ignore
2431f0e6c985SAneesh Kumar K.V 			 * them. We can also reach here via shrink_page_list
2432f0e6c985SAneesh Kumar K.V 			 */
2433f0e6c985SAneesh Kumar K.V 			redirty_page_for_writepage(wbc, page);
2434f0e6c985SAneesh Kumar K.V 			unlock_page(page);
2435f0e6c985SAneesh Kumar K.V 			return 0;
2436f0e6c985SAneesh Kumar K.V 		}
2437f0e6c985SAneesh Kumar K.V 	} else {
2438f0e6c985SAneesh Kumar K.V 		/*
2439f0e6c985SAneesh Kumar K.V 		 * The test for page_has_buffers() is subtle:
2440f0e6c985SAneesh Kumar K.V 		 * We know the page is dirty but it lost buffers. That means
2441f0e6c985SAneesh Kumar K.V 		 * that at some moment in time after write_begin()/write_end()
2442f0e6c985SAneesh Kumar K.V 		 * has been called all buffers have been clean and thus they
2443f0e6c985SAneesh Kumar K.V 		 * must have been written at least once. So they are all
2444f0e6c985SAneesh Kumar K.V 		 * mapped and we can happily proceed with mapping them
2445f0e6c985SAneesh Kumar K.V 		 * and writing the page.
2446f0e6c985SAneesh Kumar K.V 		 *
2447f0e6c985SAneesh Kumar K.V 		 * Try to initialize the buffer_heads and check whether
2448f0e6c985SAneesh Kumar K.V 		 * all are mapped and non delay. We don't want to
2449f0e6c985SAneesh Kumar K.V 		 * do block allocation here.
2450f0e6c985SAneesh Kumar K.V 		 */
2451f0e6c985SAneesh Kumar K.V 		ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
2452f0e6c985SAneesh Kumar K.V 						ext4_normal_get_block_write);
2453f0e6c985SAneesh Kumar K.V 		if (!ret) {
2454f0e6c985SAneesh Kumar K.V 			page_bufs = page_buffers(page);
2455f0e6c985SAneesh Kumar K.V 			/* check whether all are mapped and non delay */
2456f0e6c985SAneesh Kumar K.V 			if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2457f0e6c985SAneesh Kumar K.V 						ext4_bh_unmapped_or_delay)) {
2458f0e6c985SAneesh Kumar K.V 				redirty_page_for_writepage(wbc, page);
2459f0e6c985SAneesh Kumar K.V 				unlock_page(page);
2460f0e6c985SAneesh Kumar K.V 				return 0;
2461f0e6c985SAneesh Kumar K.V 			}
2462f0e6c985SAneesh Kumar K.V 		} else {
2463f0e6c985SAneesh Kumar K.V 			/*
2464f0e6c985SAneesh Kumar K.V 			 * We can't do block allocation here
2465f0e6c985SAneesh Kumar K.V 			 * so just redity the page and unlock
2466f0e6c985SAneesh Kumar K.V 			 * and return
246761628a3fSMingming Cao 			 */
246861628a3fSMingming Cao 			redirty_page_for_writepage(wbc, page);
246961628a3fSMingming Cao 			unlock_page(page);
247061628a3fSMingming Cao 			return 0;
247161628a3fSMingming Cao 		}
2472ed9b3e33SAneesh Kumar K.V 		/* now mark the buffer_heads as dirty and uptodate */
2473ed9b3e33SAneesh Kumar K.V 		block_commit_write(page, 0, PAGE_CACHE_SIZE);
247464769240SAlex Tomas 	}
247564769240SAlex Tomas 
247664769240SAlex Tomas 	if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
2477f0e6c985SAneesh Kumar K.V 		ret = nobh_writepage(page, ext4_normal_get_block_write, wbc);
247864769240SAlex Tomas 	else
2479f0e6c985SAneesh Kumar K.V 		ret = block_write_full_page(page,
2480f0e6c985SAneesh Kumar K.V 						ext4_normal_get_block_write,
2481f0e6c985SAneesh Kumar K.V 						wbc);
248264769240SAlex Tomas 
248364769240SAlex Tomas 	return ret;
248464769240SAlex Tomas }
248564769240SAlex Tomas 
248661628a3fSMingming Cao /*
2487525f4ed8SMingming Cao  * This is called via ext4_da_writepages() to
2488525f4ed8SMingming Cao  * calulate the total number of credits to reserve to fit
2489525f4ed8SMingming Cao  * a single extent allocation into a single transaction,
2490525f4ed8SMingming Cao  * ext4_da_writpeages() will loop calling this before
2491525f4ed8SMingming Cao  * the block allocation.
249261628a3fSMingming Cao  */
2493525f4ed8SMingming Cao 
2494525f4ed8SMingming Cao static int ext4_da_writepages_trans_blocks(struct inode *inode)
2495525f4ed8SMingming Cao {
2496525f4ed8SMingming Cao 	int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
2497525f4ed8SMingming Cao 
2498525f4ed8SMingming Cao 	/*
2499525f4ed8SMingming Cao 	 * With non-extent format the journal credit needed to
2500525f4ed8SMingming Cao 	 * insert nrblocks contiguous block is dependent on
2501525f4ed8SMingming Cao 	 * number of contiguous block. So we will limit
2502525f4ed8SMingming Cao 	 * number of contiguous block to a sane value
2503525f4ed8SMingming Cao 	 */
2504525f4ed8SMingming Cao 	if (!(inode->i_flags & EXT4_EXTENTS_FL) &&
2505525f4ed8SMingming Cao 	    (max_blocks > EXT4_MAX_TRANS_DATA))
2506525f4ed8SMingming Cao 		max_blocks = EXT4_MAX_TRANS_DATA;
2507525f4ed8SMingming Cao 
2508525f4ed8SMingming Cao 	return ext4_chunk_trans_blocks(inode, max_blocks);
2509525f4ed8SMingming Cao }
251061628a3fSMingming Cao 
251164769240SAlex Tomas static int ext4_da_writepages(struct address_space *mapping,
251264769240SAlex Tomas 			      struct writeback_control *wbc)
251364769240SAlex Tomas {
251422208dedSAneesh Kumar K.V 	pgoff_t	index;
251522208dedSAneesh Kumar K.V 	int range_whole = 0;
251661628a3fSMingming Cao 	handle_t *handle = NULL;
2517df22291fSAneesh Kumar K.V 	struct mpage_da_data mpd;
25185e745b04SAneesh Kumar K.V 	struct inode *inode = mapping->host;
251922208dedSAneesh Kumar K.V 	int no_nrwrite_index_update;
2520498e5f24STheodore Ts'o 	int pages_written = 0;
2521498e5f24STheodore Ts'o 	long pages_skipped;
25222acf2c26SAneesh Kumar K.V 	int range_cyclic, cycled = 1, io_done = 0;
25235e745b04SAneesh Kumar K.V 	int needed_blocks, ret = 0, nr_to_writebump = 0;
25245e745b04SAneesh Kumar K.V 	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
252561628a3fSMingming Cao 
2526ba80b101STheodore Ts'o 	trace_mark(ext4_da_writepages,
2527ba80b101STheodore Ts'o 		   "dev %s ino %lu nr_t_write %ld "
2528ba80b101STheodore Ts'o 		   "pages_skipped %ld range_start %llu "
2529ba80b101STheodore Ts'o 		   "range_end %llu nonblocking %d "
2530ba80b101STheodore Ts'o 		   "for_kupdate %d for_reclaim %d "
2531ba80b101STheodore Ts'o 		   "for_writepages %d range_cyclic %d",
2532ba80b101STheodore Ts'o 		   inode->i_sb->s_id, inode->i_ino,
2533ba80b101STheodore Ts'o 		   wbc->nr_to_write, wbc->pages_skipped,
2534ba80b101STheodore Ts'o 		   (unsigned long long) wbc->range_start,
2535ba80b101STheodore Ts'o 		   (unsigned long long) wbc->range_end,
2536ba80b101STheodore Ts'o 		   wbc->nonblocking, wbc->for_kupdate,
2537ba80b101STheodore Ts'o 		   wbc->for_reclaim, wbc->for_writepages,
2538ba80b101STheodore Ts'o 		   wbc->range_cyclic);
2539ba80b101STheodore Ts'o 
254061628a3fSMingming Cao 	/*
254161628a3fSMingming Cao 	 * No pages to write? This is mainly a kludge to avoid starting
254261628a3fSMingming Cao 	 * a transaction for special inodes like journal inode on last iput()
254361628a3fSMingming Cao 	 * because that could violate lock ordering on umount
254461628a3fSMingming Cao 	 */
2545a1d6cc56SAneesh Kumar K.V 	if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
254661628a3fSMingming Cao 		return 0;
25472a21e37eSTheodore Ts'o 
25482a21e37eSTheodore Ts'o 	/*
25492a21e37eSTheodore Ts'o 	 * If the filesystem has aborted, it is read-only, so return
25502a21e37eSTheodore Ts'o 	 * right away instead of dumping stack traces later on that
25512a21e37eSTheodore Ts'o 	 * will obscure the real source of the problem.  We test
25522a21e37eSTheodore Ts'o 	 * EXT4_MOUNT_ABORT instead of sb->s_flag's MS_RDONLY because
25532a21e37eSTheodore Ts'o 	 * the latter could be true if the filesystem is mounted
25542a21e37eSTheodore Ts'o 	 * read-only, and in that case, ext4_da_writepages should
25552a21e37eSTheodore Ts'o 	 * *never* be called, so if that ever happens, we would want
25562a21e37eSTheodore Ts'o 	 * the stack trace.
25572a21e37eSTheodore Ts'o 	 */
25582a21e37eSTheodore Ts'o 	if (unlikely(sbi->s_mount_opt & EXT4_MOUNT_ABORT))
25592a21e37eSTheodore Ts'o 		return -EROFS;
25602a21e37eSTheodore Ts'o 
25615e745b04SAneesh Kumar K.V 	/*
25625e745b04SAneesh Kumar K.V 	 * Make sure nr_to_write is >= sbi->s_mb_stream_request
25635e745b04SAneesh Kumar K.V 	 * This make sure small files blocks are allocated in
25645e745b04SAneesh Kumar K.V 	 * single attempt. This ensure that small files
25655e745b04SAneesh Kumar K.V 	 * get less fragmented.
25665e745b04SAneesh Kumar K.V 	 */
25675e745b04SAneesh Kumar K.V 	if (wbc->nr_to_write < sbi->s_mb_stream_request) {
25685e745b04SAneesh Kumar K.V 		nr_to_writebump = sbi->s_mb_stream_request - wbc->nr_to_write;
25695e745b04SAneesh Kumar K.V 		wbc->nr_to_write = sbi->s_mb_stream_request;
25705e745b04SAneesh Kumar K.V 	}
257122208dedSAneesh Kumar K.V 	if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
257222208dedSAneesh Kumar K.V 		range_whole = 1;
257361628a3fSMingming Cao 
25742acf2c26SAneesh Kumar K.V 	range_cyclic = wbc->range_cyclic;
25752acf2c26SAneesh Kumar K.V 	if (wbc->range_cyclic) {
257622208dedSAneesh Kumar K.V 		index = mapping->writeback_index;
25772acf2c26SAneesh Kumar K.V 		if (index)
25782acf2c26SAneesh Kumar K.V 			cycled = 0;
25792acf2c26SAneesh Kumar K.V 		wbc->range_start = index << PAGE_CACHE_SHIFT;
25802acf2c26SAneesh Kumar K.V 		wbc->range_end  = LLONG_MAX;
25812acf2c26SAneesh Kumar K.V 		wbc->range_cyclic = 0;
25822acf2c26SAneesh Kumar K.V 	} else
258322208dedSAneesh Kumar K.V 		index = wbc->range_start >> PAGE_CACHE_SHIFT;
2584a1d6cc56SAneesh Kumar K.V 
2585df22291fSAneesh Kumar K.V 	mpd.wbc = wbc;
2586df22291fSAneesh Kumar K.V 	mpd.inode = mapping->host;
2587df22291fSAneesh Kumar K.V 
258822208dedSAneesh Kumar K.V 	/*
258922208dedSAneesh Kumar K.V 	 * we don't want write_cache_pages to update
259022208dedSAneesh Kumar K.V 	 * nr_to_write and writeback_index
259122208dedSAneesh Kumar K.V 	 */
259222208dedSAneesh Kumar K.V 	no_nrwrite_index_update = wbc->no_nrwrite_index_update;
259322208dedSAneesh Kumar K.V 	wbc->no_nrwrite_index_update = 1;
259422208dedSAneesh Kumar K.V 	pages_skipped = wbc->pages_skipped;
259522208dedSAneesh Kumar K.V 
25962acf2c26SAneesh Kumar K.V retry:
259722208dedSAneesh Kumar K.V 	while (!ret && wbc->nr_to_write > 0) {
2598a1d6cc56SAneesh Kumar K.V 
2599a1d6cc56SAneesh Kumar K.V 		/*
2600a1d6cc56SAneesh Kumar K.V 		 * we  insert one extent at a time. So we need
2601a1d6cc56SAneesh Kumar K.V 		 * credit needed for single extent allocation.
2602a1d6cc56SAneesh Kumar K.V 		 * journalled mode is currently not supported
2603a1d6cc56SAneesh Kumar K.V 		 * by delalloc
2604a1d6cc56SAneesh Kumar K.V 		 */
2605a1d6cc56SAneesh Kumar K.V 		BUG_ON(ext4_should_journal_data(inode));
2606525f4ed8SMingming Cao 		needed_blocks = ext4_da_writepages_trans_blocks(inode);
2607a1d6cc56SAneesh Kumar K.V 
260861628a3fSMingming Cao 		/* start a new transaction*/
260961628a3fSMingming Cao 		handle = ext4_journal_start(inode, needed_blocks);
261061628a3fSMingming Cao 		if (IS_ERR(handle)) {
261161628a3fSMingming Cao 			ret = PTR_ERR(handle);
26122a21e37eSTheodore Ts'o 			printk(KERN_CRIT "%s: jbd2_start: "
2613a1d6cc56SAneesh Kumar K.V 			       "%ld pages, ino %lu; err %d\n", __func__,
2614a1d6cc56SAneesh Kumar K.V 				wbc->nr_to_write, inode->i_ino, ret);
2615a1d6cc56SAneesh Kumar K.V 			dump_stack();
261661628a3fSMingming Cao 			goto out_writepages;
261761628a3fSMingming Cao 		}
2618f63e6005STheodore Ts'o 
2619f63e6005STheodore Ts'o 		/*
2620f63e6005STheodore Ts'o 		 * Now call __mpage_da_writepage to find the next
2621f63e6005STheodore Ts'o 		 * contiguous region of logical blocks that need
2622f63e6005STheodore Ts'o 		 * blocks to be allocated by ext4.  We don't actually
2623f63e6005STheodore Ts'o 		 * submit the blocks for I/O here, even though
2624f63e6005STheodore Ts'o 		 * write_cache_pages thinks it will, and will set the
2625f63e6005STheodore Ts'o 		 * pages as clean for write before calling
2626f63e6005STheodore Ts'o 		 * __mpage_da_writepage().
2627f63e6005STheodore Ts'o 		 */
2628f63e6005STheodore Ts'o 		mpd.b_size = 0;
2629f63e6005STheodore Ts'o 		mpd.b_state = 0;
2630f63e6005STheodore Ts'o 		mpd.b_blocknr = 0;
2631f63e6005STheodore Ts'o 		mpd.first_page = 0;
2632f63e6005STheodore Ts'o 		mpd.next_page = 0;
2633f63e6005STheodore Ts'o 		mpd.io_done = 0;
2634f63e6005STheodore Ts'o 		mpd.pages_written = 0;
2635f63e6005STheodore Ts'o 		mpd.retval = 0;
2636f63e6005STheodore Ts'o 		ret = write_cache_pages(mapping, wbc, __mpage_da_writepage,
2637f63e6005STheodore Ts'o 					&mpd);
2638f63e6005STheodore Ts'o 		/*
2639f63e6005STheodore Ts'o 		 * If we have a contigous extent of pages and we
2640f63e6005STheodore Ts'o 		 * haven't done the I/O yet, map the blocks and submit
2641f63e6005STheodore Ts'o 		 * them for I/O.
2642f63e6005STheodore Ts'o 		 */
2643f63e6005STheodore Ts'o 		if (!mpd.io_done && mpd.next_page != mpd.first_page) {
2644f63e6005STheodore Ts'o 			if (mpage_da_map_blocks(&mpd) == 0)
2645f63e6005STheodore Ts'o 				mpage_da_submit_io(&mpd);
2646f63e6005STheodore Ts'o 			mpd.io_done = 1;
2647f63e6005STheodore Ts'o 			ret = MPAGE_DA_EXTENT_TAIL;
2648f63e6005STheodore Ts'o 		}
2649f63e6005STheodore Ts'o 		wbc->nr_to_write -= mpd.pages_written;
2650df22291fSAneesh Kumar K.V 
265161628a3fSMingming Cao 		ext4_journal_stop(handle);
2652df22291fSAneesh Kumar K.V 
26538f64b32eSEric Sandeen 		if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
265422208dedSAneesh Kumar K.V 			/* commit the transaction which would
265522208dedSAneesh Kumar K.V 			 * free blocks released in the transaction
265622208dedSAneesh Kumar K.V 			 * and try again
265722208dedSAneesh Kumar K.V 			 */
2658df22291fSAneesh Kumar K.V 			jbd2_journal_force_commit_nested(sbi->s_journal);
265922208dedSAneesh Kumar K.V 			wbc->pages_skipped = pages_skipped;
266022208dedSAneesh Kumar K.V 			ret = 0;
266122208dedSAneesh Kumar K.V 		} else if (ret == MPAGE_DA_EXTENT_TAIL) {
2662a1d6cc56SAneesh Kumar K.V 			/*
2663a1d6cc56SAneesh Kumar K.V 			 * got one extent now try with
2664a1d6cc56SAneesh Kumar K.V 			 * rest of the pages
2665a1d6cc56SAneesh Kumar K.V 			 */
266622208dedSAneesh Kumar K.V 			pages_written += mpd.pages_written;
266722208dedSAneesh Kumar K.V 			wbc->pages_skipped = pages_skipped;
2668a1d6cc56SAneesh Kumar K.V 			ret = 0;
26692acf2c26SAneesh Kumar K.V 			io_done = 1;
267022208dedSAneesh Kumar K.V 		} else if (wbc->nr_to_write)
267161628a3fSMingming Cao 			/*
267261628a3fSMingming Cao 			 * There is no more writeout needed
267361628a3fSMingming Cao 			 * or we requested for a noblocking writeout
267461628a3fSMingming Cao 			 * and we found the device congested
267561628a3fSMingming Cao 			 */
267661628a3fSMingming Cao 			break;
267761628a3fSMingming Cao 	}
26782acf2c26SAneesh Kumar K.V 	if (!io_done && !cycled) {
26792acf2c26SAneesh Kumar K.V 		cycled = 1;
26802acf2c26SAneesh Kumar K.V 		index = 0;
26812acf2c26SAneesh Kumar K.V 		wbc->range_start = index << PAGE_CACHE_SHIFT;
26822acf2c26SAneesh Kumar K.V 		wbc->range_end  = mapping->writeback_index - 1;
26832acf2c26SAneesh Kumar K.V 		goto retry;
26842acf2c26SAneesh Kumar K.V 	}
268522208dedSAneesh Kumar K.V 	if (pages_skipped != wbc->pages_skipped)
268622208dedSAneesh Kumar K.V 		printk(KERN_EMERG "This should not happen leaving %s "
268722208dedSAneesh Kumar K.V 				"with nr_to_write = %ld ret = %d\n",
268822208dedSAneesh Kumar K.V 				__func__, wbc->nr_to_write, ret);
268961628a3fSMingming Cao 
269022208dedSAneesh Kumar K.V 	/* Update index */
269122208dedSAneesh Kumar K.V 	index += pages_written;
26922acf2c26SAneesh Kumar K.V 	wbc->range_cyclic = range_cyclic;
269322208dedSAneesh Kumar K.V 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
269422208dedSAneesh Kumar K.V 		/*
269522208dedSAneesh Kumar K.V 		 * set the writeback_index so that range_cyclic
269622208dedSAneesh Kumar K.V 		 * mode will write it back later
269722208dedSAneesh Kumar K.V 		 */
269822208dedSAneesh Kumar K.V 		mapping->writeback_index = index;
2699a1d6cc56SAneesh Kumar K.V 
270061628a3fSMingming Cao out_writepages:
270122208dedSAneesh Kumar K.V 	if (!no_nrwrite_index_update)
270222208dedSAneesh Kumar K.V 		wbc->no_nrwrite_index_update = 0;
270322208dedSAneesh Kumar K.V 	wbc->nr_to_write -= nr_to_writebump;
2704ba80b101STheodore Ts'o 	trace_mark(ext4_da_writepage_result,
2705ba80b101STheodore Ts'o 		   "dev %s ino %lu ret %d pages_written %d "
2706ba80b101STheodore Ts'o 		   "pages_skipped %ld congestion %d "
2707ba80b101STheodore Ts'o 		   "more_io %d no_nrwrite_index_update %d",
2708ba80b101STheodore Ts'o 		   inode->i_sb->s_id, inode->i_ino, ret,
2709ba80b101STheodore Ts'o 		   pages_written, wbc->pages_skipped,
2710ba80b101STheodore Ts'o 		   wbc->encountered_congestion, wbc->more_io,
2711ba80b101STheodore Ts'o 		   wbc->no_nrwrite_index_update);
271261628a3fSMingming Cao 	return ret;
271364769240SAlex Tomas }
271464769240SAlex Tomas 
271579f0be8dSAneesh Kumar K.V #define FALL_BACK_TO_NONDELALLOC 1
271679f0be8dSAneesh Kumar K.V static int ext4_nonda_switch(struct super_block *sb)
271779f0be8dSAneesh Kumar K.V {
271879f0be8dSAneesh Kumar K.V 	s64 free_blocks, dirty_blocks;
271979f0be8dSAneesh Kumar K.V 	struct ext4_sb_info *sbi = EXT4_SB(sb);
272079f0be8dSAneesh Kumar K.V 
272179f0be8dSAneesh Kumar K.V 	/*
272279f0be8dSAneesh Kumar K.V 	 * switch to non delalloc mode if we are running low
272379f0be8dSAneesh Kumar K.V 	 * on free block. The free block accounting via percpu
2724179f7ebfSEric Dumazet 	 * counters can get slightly wrong with percpu_counter_batch getting
272579f0be8dSAneesh Kumar K.V 	 * accumulated on each CPU without updating global counters
272679f0be8dSAneesh Kumar K.V 	 * Delalloc need an accurate free block accounting. So switch
272779f0be8dSAneesh Kumar K.V 	 * to non delalloc when we are near to error range.
272879f0be8dSAneesh Kumar K.V 	 */
272979f0be8dSAneesh Kumar K.V 	free_blocks  = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
273079f0be8dSAneesh Kumar K.V 	dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter);
273179f0be8dSAneesh Kumar K.V 	if (2 * free_blocks < 3 * dirty_blocks ||
273279f0be8dSAneesh Kumar K.V 		free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) {
273379f0be8dSAneesh Kumar K.V 		/*
273479f0be8dSAneesh Kumar K.V 		 * free block count is less that 150% of dirty blocks
273579f0be8dSAneesh Kumar K.V 		 * or free blocks is less that watermark
273679f0be8dSAneesh Kumar K.V 		 */
273779f0be8dSAneesh Kumar K.V 		return 1;
273879f0be8dSAneesh Kumar K.V 	}
273979f0be8dSAneesh Kumar K.V 	return 0;
274079f0be8dSAneesh Kumar K.V }
274179f0be8dSAneesh Kumar K.V 
274264769240SAlex Tomas static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
274364769240SAlex Tomas 				loff_t pos, unsigned len, unsigned flags,
274464769240SAlex Tomas 				struct page **pagep, void **fsdata)
274564769240SAlex Tomas {
2746d2a17637SMingming Cao 	int ret, retries = 0;
274764769240SAlex Tomas 	struct page *page;
274864769240SAlex Tomas 	pgoff_t index;
274964769240SAlex Tomas 	unsigned from, to;
275064769240SAlex Tomas 	struct inode *inode = mapping->host;
275164769240SAlex Tomas 	handle_t *handle;
275264769240SAlex Tomas 
275364769240SAlex Tomas 	index = pos >> PAGE_CACHE_SHIFT;
275464769240SAlex Tomas 	from = pos & (PAGE_CACHE_SIZE - 1);
275564769240SAlex Tomas 	to = from + len;
275679f0be8dSAneesh Kumar K.V 
275779f0be8dSAneesh Kumar K.V 	if (ext4_nonda_switch(inode->i_sb)) {
275879f0be8dSAneesh Kumar K.V 		*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
275979f0be8dSAneesh Kumar K.V 		return ext4_write_begin(file, mapping, pos,
276079f0be8dSAneesh Kumar K.V 					len, flags, pagep, fsdata);
276179f0be8dSAneesh Kumar K.V 	}
276279f0be8dSAneesh Kumar K.V 	*fsdata = (void *)0;
2763ba80b101STheodore Ts'o 
2764ba80b101STheodore Ts'o 	trace_mark(ext4_da_write_begin,
2765ba80b101STheodore Ts'o 		   "dev %s ino %lu pos %llu len %u flags %u",
2766ba80b101STheodore Ts'o 		   inode->i_sb->s_id, inode->i_ino,
2767ba80b101STheodore Ts'o 		   (unsigned long long) pos, len, flags);
2768d2a17637SMingming Cao retry:
276964769240SAlex Tomas 	/*
277064769240SAlex Tomas 	 * With delayed allocation, we don't log the i_disksize update
277164769240SAlex Tomas 	 * if there is delayed block allocation. But we still need
277264769240SAlex Tomas 	 * to journalling the i_disksize update if writes to the end
277364769240SAlex Tomas 	 * of file which has an already mapped buffer.
277464769240SAlex Tomas 	 */
277564769240SAlex Tomas 	handle = ext4_journal_start(inode, 1);
277664769240SAlex Tomas 	if (IS_ERR(handle)) {
277764769240SAlex Tomas 		ret = PTR_ERR(handle);
277864769240SAlex Tomas 		goto out;
277964769240SAlex Tomas 	}
2780ebd3610bSJan Kara 	/* We cannot recurse into the filesystem as the transaction is already
2781ebd3610bSJan Kara 	 * started */
2782ebd3610bSJan Kara 	flags |= AOP_FLAG_NOFS;
278364769240SAlex Tomas 
278454566b2cSNick Piggin 	page = grab_cache_page_write_begin(mapping, index, flags);
2785d5a0d4f7SEric Sandeen 	if (!page) {
2786d5a0d4f7SEric Sandeen 		ext4_journal_stop(handle);
2787d5a0d4f7SEric Sandeen 		ret = -ENOMEM;
2788d5a0d4f7SEric Sandeen 		goto out;
2789d5a0d4f7SEric Sandeen 	}
279064769240SAlex Tomas 	*pagep = page;
279164769240SAlex Tomas 
279264769240SAlex Tomas 	ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
279364769240SAlex Tomas 							ext4_da_get_block_prep);
279464769240SAlex Tomas 	if (ret < 0) {
279564769240SAlex Tomas 		unlock_page(page);
279664769240SAlex Tomas 		ext4_journal_stop(handle);
279764769240SAlex Tomas 		page_cache_release(page);
2798ae4d5372SAneesh Kumar K.V 		/*
2799ae4d5372SAneesh Kumar K.V 		 * block_write_begin may have instantiated a few blocks
2800ae4d5372SAneesh Kumar K.V 		 * outside i_size.  Trim these off again. Don't need
2801ae4d5372SAneesh Kumar K.V 		 * i_size_read because we hold i_mutex.
2802ae4d5372SAneesh Kumar K.V 		 */
2803ae4d5372SAneesh Kumar K.V 		if (pos + len > inode->i_size)
2804ae4d5372SAneesh Kumar K.V 			vmtruncate(inode, inode->i_size);
280564769240SAlex Tomas 	}
280664769240SAlex Tomas 
2807d2a17637SMingming Cao 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
2808d2a17637SMingming Cao 		goto retry;
280964769240SAlex Tomas out:
281064769240SAlex Tomas 	return ret;
281164769240SAlex Tomas }
281264769240SAlex Tomas 
2813632eaeabSMingming Cao /*
2814632eaeabSMingming Cao  * Check if we should update i_disksize
2815632eaeabSMingming Cao  * when write to the end of file but not require block allocation
2816632eaeabSMingming Cao  */
2817632eaeabSMingming Cao static int ext4_da_should_update_i_disksize(struct page *page,
2818632eaeabSMingming Cao 					 unsigned long offset)
2819632eaeabSMingming Cao {
2820632eaeabSMingming Cao 	struct buffer_head *bh;
2821632eaeabSMingming Cao 	struct inode *inode = page->mapping->host;
2822632eaeabSMingming Cao 	unsigned int idx;
2823632eaeabSMingming Cao 	int i;
2824632eaeabSMingming Cao 
2825632eaeabSMingming Cao 	bh = page_buffers(page);
2826632eaeabSMingming Cao 	idx = offset >> inode->i_blkbits;
2827632eaeabSMingming Cao 
2828632eaeabSMingming Cao 	for (i = 0; i < idx; i++)
2829632eaeabSMingming Cao 		bh = bh->b_this_page;
2830632eaeabSMingming Cao 
2831632eaeabSMingming Cao 	if (!buffer_mapped(bh) || (buffer_delay(bh)))
2832632eaeabSMingming Cao 		return 0;
2833632eaeabSMingming Cao 	return 1;
2834632eaeabSMingming Cao }
2835632eaeabSMingming Cao 
283664769240SAlex Tomas static int ext4_da_write_end(struct file *file,
283764769240SAlex Tomas 				struct address_space *mapping,
283864769240SAlex Tomas 				loff_t pos, unsigned len, unsigned copied,
283964769240SAlex Tomas 				struct page *page, void *fsdata)
284064769240SAlex Tomas {
284164769240SAlex Tomas 	struct inode *inode = mapping->host;
284264769240SAlex Tomas 	int ret = 0, ret2;
284364769240SAlex Tomas 	handle_t *handle = ext4_journal_current_handle();
284464769240SAlex Tomas 	loff_t new_i_size;
2845632eaeabSMingming Cao 	unsigned long start, end;
284679f0be8dSAneesh Kumar K.V 	int write_mode = (int)(unsigned long)fsdata;
284779f0be8dSAneesh Kumar K.V 
284879f0be8dSAneesh Kumar K.V 	if (write_mode == FALL_BACK_TO_NONDELALLOC) {
284979f0be8dSAneesh Kumar K.V 		if (ext4_should_order_data(inode)) {
285079f0be8dSAneesh Kumar K.V 			return ext4_ordered_write_end(file, mapping, pos,
285179f0be8dSAneesh Kumar K.V 					len, copied, page, fsdata);
285279f0be8dSAneesh Kumar K.V 		} else if (ext4_should_writeback_data(inode)) {
285379f0be8dSAneesh Kumar K.V 			return ext4_writeback_write_end(file, mapping, pos,
285479f0be8dSAneesh Kumar K.V 					len, copied, page, fsdata);
285579f0be8dSAneesh Kumar K.V 		} else {
285679f0be8dSAneesh Kumar K.V 			BUG();
285779f0be8dSAneesh Kumar K.V 		}
285879f0be8dSAneesh Kumar K.V 	}
2859632eaeabSMingming Cao 
2860ba80b101STheodore Ts'o 	trace_mark(ext4_da_write_end,
2861ba80b101STheodore Ts'o 		   "dev %s ino %lu pos %llu len %u copied %u",
2862ba80b101STheodore Ts'o 		   inode->i_sb->s_id, inode->i_ino,
2863ba80b101STheodore Ts'o 		   (unsigned long long) pos, len, copied);
2864632eaeabSMingming Cao 	start = pos & (PAGE_CACHE_SIZE - 1);
2865632eaeabSMingming Cao 	end = start + copied - 1;
286664769240SAlex Tomas 
286764769240SAlex Tomas 	/*
286864769240SAlex Tomas 	 * generic_write_end() will run mark_inode_dirty() if i_size
286964769240SAlex Tomas 	 * changes.  So let's piggyback the i_disksize mark_inode_dirty
287064769240SAlex Tomas 	 * into that.
287164769240SAlex Tomas 	 */
287264769240SAlex Tomas 
287364769240SAlex Tomas 	new_i_size = pos + copied;
2874632eaeabSMingming Cao 	if (new_i_size > EXT4_I(inode)->i_disksize) {
2875632eaeabSMingming Cao 		if (ext4_da_should_update_i_disksize(page, end)) {
2876632eaeabSMingming Cao 			down_write(&EXT4_I(inode)->i_data_sem);
2877632eaeabSMingming Cao 			if (new_i_size > EXT4_I(inode)->i_disksize) {
287864769240SAlex Tomas 				/*
2879632eaeabSMingming Cao 				 * Updating i_disksize when extending file
2880632eaeabSMingming Cao 				 * without needing block allocation
288164769240SAlex Tomas 				 */
288264769240SAlex Tomas 				if (ext4_should_order_data(inode))
2883632eaeabSMingming Cao 					ret = ext4_jbd2_file_inode(handle,
2884632eaeabSMingming Cao 								   inode);
288564769240SAlex Tomas 
288664769240SAlex Tomas 				EXT4_I(inode)->i_disksize = new_i_size;
288764769240SAlex Tomas 			}
2888632eaeabSMingming Cao 			up_write(&EXT4_I(inode)->i_data_sem);
2889cf17fea6SAneesh Kumar K.V 			/* We need to mark inode dirty even if
2890cf17fea6SAneesh Kumar K.V 			 * new_i_size is less that inode->i_size
2891cf17fea6SAneesh Kumar K.V 			 * bu greater than i_disksize.(hint delalloc)
2892cf17fea6SAneesh Kumar K.V 			 */
2893cf17fea6SAneesh Kumar K.V 			ext4_mark_inode_dirty(handle, inode);
2894632eaeabSMingming Cao 		}
2895632eaeabSMingming Cao 	}
289664769240SAlex Tomas 	ret2 = generic_write_end(file, mapping, pos, len, copied,
289764769240SAlex Tomas 							page, fsdata);
289864769240SAlex Tomas 	copied = ret2;
289964769240SAlex Tomas 	if (ret2 < 0)
290064769240SAlex Tomas 		ret = ret2;
290164769240SAlex Tomas 	ret2 = ext4_journal_stop(handle);
290264769240SAlex Tomas 	if (!ret)
290364769240SAlex Tomas 		ret = ret2;
290464769240SAlex Tomas 
290564769240SAlex Tomas 	return ret ? ret : copied;
290664769240SAlex Tomas }
290764769240SAlex Tomas 
290864769240SAlex Tomas static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
290964769240SAlex Tomas {
291064769240SAlex Tomas 	/*
291164769240SAlex Tomas 	 * Drop reserved blocks
291264769240SAlex Tomas 	 */
291364769240SAlex Tomas 	BUG_ON(!PageLocked(page));
291464769240SAlex Tomas 	if (!page_has_buffers(page))
291564769240SAlex Tomas 		goto out;
291664769240SAlex Tomas 
2917d2a17637SMingming Cao 	ext4_da_page_release_reservation(page, offset);
291864769240SAlex Tomas 
291964769240SAlex Tomas out:
292064769240SAlex Tomas 	ext4_invalidatepage(page, offset);
292164769240SAlex Tomas 
292264769240SAlex Tomas 	return;
292364769240SAlex Tomas }
292464769240SAlex Tomas 
2925ccd2506bSTheodore Ts'o /*
2926ccd2506bSTheodore Ts'o  * Force all delayed allocation blocks to be allocated for a given inode.
2927ccd2506bSTheodore Ts'o  */
2928ccd2506bSTheodore Ts'o int ext4_alloc_da_blocks(struct inode *inode)
2929ccd2506bSTheodore Ts'o {
2930ccd2506bSTheodore Ts'o 	if (!EXT4_I(inode)->i_reserved_data_blocks &&
2931ccd2506bSTheodore Ts'o 	    !EXT4_I(inode)->i_reserved_meta_blocks)
2932ccd2506bSTheodore Ts'o 		return 0;
2933ccd2506bSTheodore Ts'o 
2934ccd2506bSTheodore Ts'o 	/*
2935ccd2506bSTheodore Ts'o 	 * We do something simple for now.  The filemap_flush() will
2936ccd2506bSTheodore Ts'o 	 * also start triggering a write of the data blocks, which is
2937ccd2506bSTheodore Ts'o 	 * not strictly speaking necessary (and for users of
2938ccd2506bSTheodore Ts'o 	 * laptop_mode, not even desirable).  However, to do otherwise
2939ccd2506bSTheodore Ts'o 	 * would require replicating code paths in:
2940ccd2506bSTheodore Ts'o 	 *
2941ccd2506bSTheodore Ts'o 	 * ext4_da_writepages() ->
2942ccd2506bSTheodore Ts'o 	 *    write_cache_pages() ---> (via passed in callback function)
2943ccd2506bSTheodore Ts'o 	 *        __mpage_da_writepage() -->
2944ccd2506bSTheodore Ts'o 	 *           mpage_add_bh_to_extent()
2945ccd2506bSTheodore Ts'o 	 *           mpage_da_map_blocks()
2946ccd2506bSTheodore Ts'o 	 *
2947ccd2506bSTheodore Ts'o 	 * The problem is that write_cache_pages(), located in
2948ccd2506bSTheodore Ts'o 	 * mm/page-writeback.c, marks pages clean in preparation for
2949ccd2506bSTheodore Ts'o 	 * doing I/O, which is not desirable if we're not planning on
2950ccd2506bSTheodore Ts'o 	 * doing I/O at all.
2951ccd2506bSTheodore Ts'o 	 *
2952ccd2506bSTheodore Ts'o 	 * We could call write_cache_pages(), and then redirty all of
2953ccd2506bSTheodore Ts'o 	 * the pages by calling redirty_page_for_writeback() but that
2954ccd2506bSTheodore Ts'o 	 * would be ugly in the extreme.  So instead we would need to
2955ccd2506bSTheodore Ts'o 	 * replicate parts of the code in the above functions,
2956ccd2506bSTheodore Ts'o 	 * simplifying them becuase we wouldn't actually intend to
2957ccd2506bSTheodore Ts'o 	 * write out the pages, but rather only collect contiguous
2958ccd2506bSTheodore Ts'o 	 * logical block extents, call the multi-block allocator, and
2959ccd2506bSTheodore Ts'o 	 * then update the buffer heads with the block allocations.
2960ccd2506bSTheodore Ts'o 	 *
2961ccd2506bSTheodore Ts'o 	 * For now, though, we'll cheat by calling filemap_flush(),
2962ccd2506bSTheodore Ts'o 	 * which will map the blocks, and start the I/O, but not
2963ccd2506bSTheodore Ts'o 	 * actually wait for the I/O to complete.
2964ccd2506bSTheodore Ts'o 	 */
2965ccd2506bSTheodore Ts'o 	return filemap_flush(inode->i_mapping);
2966ccd2506bSTheodore Ts'o }
296764769240SAlex Tomas 
296864769240SAlex Tomas /*
2969ac27a0ecSDave Kleikamp  * bmap() is special.  It gets used by applications such as lilo and by
2970ac27a0ecSDave Kleikamp  * the swapper to find the on-disk block of a specific piece of data.
2971ac27a0ecSDave Kleikamp  *
2972ac27a0ecSDave Kleikamp  * Naturally, this is dangerous if the block concerned is still in the
2973617ba13bSMingming Cao  * journal.  If somebody makes a swapfile on an ext4 data-journaling
2974ac27a0ecSDave Kleikamp  * filesystem and enables swap, then they may get a nasty shock when the
2975ac27a0ecSDave Kleikamp  * data getting swapped to that swapfile suddenly gets overwritten by
2976ac27a0ecSDave Kleikamp  * the original zero's written out previously to the journal and
2977ac27a0ecSDave Kleikamp  * awaiting writeback in the kernel's buffer cache.
2978ac27a0ecSDave Kleikamp  *
2979ac27a0ecSDave Kleikamp  * So, if we see any bmap calls here on a modified, data-journaled file,
2980ac27a0ecSDave Kleikamp  * take extra steps to flush any blocks which might be in the cache.
2981ac27a0ecSDave Kleikamp  */
2982617ba13bSMingming Cao static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
2983ac27a0ecSDave Kleikamp {
2984ac27a0ecSDave Kleikamp 	struct inode *inode = mapping->host;
2985ac27a0ecSDave Kleikamp 	journal_t *journal;
2986ac27a0ecSDave Kleikamp 	int err;
2987ac27a0ecSDave Kleikamp 
298864769240SAlex Tomas 	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
298964769240SAlex Tomas 			test_opt(inode->i_sb, DELALLOC)) {
299064769240SAlex Tomas 		/*
299164769240SAlex Tomas 		 * With delalloc we want to sync the file
299264769240SAlex Tomas 		 * so that we can make sure we allocate
299364769240SAlex Tomas 		 * blocks for file
299464769240SAlex Tomas 		 */
299564769240SAlex Tomas 		filemap_write_and_wait(mapping);
299664769240SAlex Tomas 	}
299764769240SAlex Tomas 
29980390131bSFrank Mayhar 	if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
2999ac27a0ecSDave Kleikamp 		/*
3000ac27a0ecSDave Kleikamp 		 * This is a REALLY heavyweight approach, but the use of
3001ac27a0ecSDave Kleikamp 		 * bmap on dirty files is expected to be extremely rare:
3002ac27a0ecSDave Kleikamp 		 * only if we run lilo or swapon on a freshly made file
3003ac27a0ecSDave Kleikamp 		 * do we expect this to happen.
3004ac27a0ecSDave Kleikamp 		 *
3005ac27a0ecSDave Kleikamp 		 * (bmap requires CAP_SYS_RAWIO so this does not
3006ac27a0ecSDave Kleikamp 		 * represent an unprivileged user DOS attack --- we'd be
3007ac27a0ecSDave Kleikamp 		 * in trouble if mortal users could trigger this path at
3008ac27a0ecSDave Kleikamp 		 * will.)
3009ac27a0ecSDave Kleikamp 		 *
3010617ba13bSMingming Cao 		 * NB. EXT4_STATE_JDATA is not set on files other than
3011ac27a0ecSDave Kleikamp 		 * regular files.  If somebody wants to bmap a directory
3012ac27a0ecSDave Kleikamp 		 * or symlink and gets confused because the buffer
3013ac27a0ecSDave Kleikamp 		 * hasn't yet been flushed to disk, they deserve
3014ac27a0ecSDave Kleikamp 		 * everything they get.
3015ac27a0ecSDave Kleikamp 		 */
3016ac27a0ecSDave Kleikamp 
3017617ba13bSMingming Cao 		EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
3018617ba13bSMingming Cao 		journal = EXT4_JOURNAL(inode);
3019dab291afSMingming Cao 		jbd2_journal_lock_updates(journal);
3020dab291afSMingming Cao 		err = jbd2_journal_flush(journal);
3021dab291afSMingming Cao 		jbd2_journal_unlock_updates(journal);
3022ac27a0ecSDave Kleikamp 
3023ac27a0ecSDave Kleikamp 		if (err)
3024ac27a0ecSDave Kleikamp 			return 0;
3025ac27a0ecSDave Kleikamp 	}
3026ac27a0ecSDave Kleikamp 
3027617ba13bSMingming Cao 	return generic_block_bmap(mapping, block, ext4_get_block);
3028ac27a0ecSDave Kleikamp }
3029ac27a0ecSDave Kleikamp 
3030ac27a0ecSDave Kleikamp static int bget_one(handle_t *handle, struct buffer_head *bh)
3031ac27a0ecSDave Kleikamp {
3032ac27a0ecSDave Kleikamp 	get_bh(bh);
3033ac27a0ecSDave Kleikamp 	return 0;
3034ac27a0ecSDave Kleikamp }
3035ac27a0ecSDave Kleikamp 
3036ac27a0ecSDave Kleikamp static int bput_one(handle_t *handle, struct buffer_head *bh)
3037ac27a0ecSDave Kleikamp {
3038ac27a0ecSDave Kleikamp 	put_bh(bh);
3039ac27a0ecSDave Kleikamp 	return 0;
3040ac27a0ecSDave Kleikamp }
3041ac27a0ecSDave Kleikamp 
3042ac27a0ecSDave Kleikamp /*
3043678aaf48SJan Kara  * Note that we don't need to start a transaction unless we're journaling data
3044678aaf48SJan Kara  * because we should have holes filled from ext4_page_mkwrite(). We even don't
3045678aaf48SJan Kara  * need to file the inode to the transaction's list in ordered mode because if
3046678aaf48SJan Kara  * we are writing back data added by write(), the inode is already there and if
3047678aaf48SJan Kara  * we are writing back data modified via mmap(), noone guarantees in which
3048678aaf48SJan Kara  * transaction the data will hit the disk. In case we are journaling data, we
3049678aaf48SJan Kara  * cannot start transaction directly because transaction start ranks above page
3050678aaf48SJan Kara  * lock so we have to do some magic.
3051ac27a0ecSDave Kleikamp  *
3052678aaf48SJan Kara  * In all journaling modes block_write_full_page() will start the I/O.
3053ac27a0ecSDave Kleikamp  *
3054ac27a0ecSDave Kleikamp  * Problem:
3055ac27a0ecSDave Kleikamp  *
3056617ba13bSMingming Cao  *	ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
3057617ba13bSMingming Cao  *		ext4_writepage()
3058ac27a0ecSDave Kleikamp  *
3059ac27a0ecSDave Kleikamp  * Similar for:
3060ac27a0ecSDave Kleikamp  *
3061617ba13bSMingming Cao  *	ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
3062ac27a0ecSDave Kleikamp  *
3063617ba13bSMingming Cao  * Same applies to ext4_get_block().  We will deadlock on various things like
30640e855ac8SAneesh Kumar K.V  * lock_journal and i_data_sem
3065ac27a0ecSDave Kleikamp  *
3066ac27a0ecSDave Kleikamp  * Setting PF_MEMALLOC here doesn't work - too many internal memory
3067ac27a0ecSDave Kleikamp  * allocations fail.
3068ac27a0ecSDave Kleikamp  *
3069ac27a0ecSDave Kleikamp  * 16May01: If we're reentered then journal_current_handle() will be
3070ac27a0ecSDave Kleikamp  *	    non-zero. We simply *return*.
3071ac27a0ecSDave Kleikamp  *
3072ac27a0ecSDave Kleikamp  * 1 July 2001: @@@ FIXME:
3073ac27a0ecSDave Kleikamp  *   In journalled data mode, a data buffer may be metadata against the
3074ac27a0ecSDave Kleikamp  *   current transaction.  But the same file is part of a shared mapping
3075ac27a0ecSDave Kleikamp  *   and someone does a writepage() on it.
3076ac27a0ecSDave Kleikamp  *
3077ac27a0ecSDave Kleikamp  *   We will move the buffer onto the async_data list, but *after* it has
3078ac27a0ecSDave Kleikamp  *   been dirtied. So there's a small window where we have dirty data on
3079ac27a0ecSDave Kleikamp  *   BJ_Metadata.
3080ac27a0ecSDave Kleikamp  *
3081ac27a0ecSDave Kleikamp  *   Note that this only applies to the last partial page in the file.  The
3082ac27a0ecSDave Kleikamp  *   bit which block_write_full_page() uses prepare/commit for.  (That's
3083ac27a0ecSDave Kleikamp  *   broken code anyway: it's wrong for msync()).
3084ac27a0ecSDave Kleikamp  *
3085ac27a0ecSDave Kleikamp  *   It's a rare case: affects the final partial page, for journalled data
3086ac27a0ecSDave Kleikamp  *   where the file is subject to bith write() and writepage() in the same
3087ac27a0ecSDave Kleikamp  *   transction.  To fix it we'll need a custom block_write_full_page().
3088ac27a0ecSDave Kleikamp  *   We'll probably need that anyway for journalling writepage() output.
3089ac27a0ecSDave Kleikamp  *
3090ac27a0ecSDave Kleikamp  * We don't honour synchronous mounts for writepage().  That would be
3091ac27a0ecSDave Kleikamp  * disastrous.  Any write() or metadata operation will sync the fs for
3092ac27a0ecSDave Kleikamp  * us.
3093ac27a0ecSDave Kleikamp  *
3094ac27a0ecSDave Kleikamp  */
3095678aaf48SJan Kara static int __ext4_normal_writepage(struct page *page,
3096cf108bcaSJan Kara 				struct writeback_control *wbc)
3097cf108bcaSJan Kara {
3098cf108bcaSJan Kara 	struct inode *inode = page->mapping->host;
3099cf108bcaSJan Kara 
3100cf108bcaSJan Kara 	if (test_opt(inode->i_sb, NOBH))
3101f0e6c985SAneesh Kumar K.V 		return nobh_writepage(page,
3102f0e6c985SAneesh Kumar K.V 					ext4_normal_get_block_write, wbc);
3103cf108bcaSJan Kara 	else
3104f0e6c985SAneesh Kumar K.V 		return block_write_full_page(page,
3105f0e6c985SAneesh Kumar K.V 						ext4_normal_get_block_write,
3106f0e6c985SAneesh Kumar K.V 						wbc);
3107cf108bcaSJan Kara }
3108cf108bcaSJan Kara 
3109678aaf48SJan Kara static int ext4_normal_writepage(struct page *page,
3110ac27a0ecSDave Kleikamp 				struct writeback_control *wbc)
3111ac27a0ecSDave Kleikamp {
3112ac27a0ecSDave Kleikamp 	struct inode *inode = page->mapping->host;
3113cf108bcaSJan Kara 	loff_t size = i_size_read(inode);
3114cf108bcaSJan Kara 	loff_t len;
3115cf108bcaSJan Kara 
3116ba80b101STheodore Ts'o 	trace_mark(ext4_normal_writepage,
3117ba80b101STheodore Ts'o 		   "dev %s ino %lu page_index %lu",
3118ba80b101STheodore Ts'o 		   inode->i_sb->s_id, inode->i_ino, page->index);
3119cf108bcaSJan Kara 	J_ASSERT(PageLocked(page));
3120cf108bcaSJan Kara 	if (page->index == size >> PAGE_CACHE_SHIFT)
3121cf108bcaSJan Kara 		len = size & ~PAGE_CACHE_MASK;
3122cf108bcaSJan Kara 	else
3123cf108bcaSJan Kara 		len = PAGE_CACHE_SIZE;
3124f0e6c985SAneesh Kumar K.V 
3125f0e6c985SAneesh Kumar K.V 	if (page_has_buffers(page)) {
3126f0e6c985SAneesh Kumar K.V 		/* if page has buffers it should all be mapped
3127f0e6c985SAneesh Kumar K.V 		 * and allocated. If there are not buffers attached
3128f0e6c985SAneesh Kumar K.V 		 * to the page we know the page is dirty but it lost
3129f0e6c985SAneesh Kumar K.V 		 * buffers. That means that at some moment in time
3130f0e6c985SAneesh Kumar K.V 		 * after write_begin() / write_end() has been called
3131f0e6c985SAneesh Kumar K.V 		 * all buffers have been clean and thus they must have been
3132f0e6c985SAneesh Kumar K.V 		 * written at least once. So they are all mapped and we can
3133f0e6c985SAneesh Kumar K.V 		 * happily proceed with mapping them and writing the page.
3134f0e6c985SAneesh Kumar K.V 		 */
3135cf108bcaSJan Kara 		BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
3136cf108bcaSJan Kara 					ext4_bh_unmapped_or_delay));
3137f0e6c985SAneesh Kumar K.V 	}
3138cf108bcaSJan Kara 
3139cf108bcaSJan Kara 	if (!ext4_journal_current_handle())
3140678aaf48SJan Kara 		return __ext4_normal_writepage(page, wbc);
3141cf108bcaSJan Kara 
3142cf108bcaSJan Kara 	redirty_page_for_writepage(wbc, page);
3143cf108bcaSJan Kara 	unlock_page(page);
3144cf108bcaSJan Kara 	return 0;
3145cf108bcaSJan Kara }
3146cf108bcaSJan Kara 
3147cf108bcaSJan Kara static int __ext4_journalled_writepage(struct page *page,
3148cf108bcaSJan Kara 				struct writeback_control *wbc)
3149cf108bcaSJan Kara {
3150cf108bcaSJan Kara 	struct address_space *mapping = page->mapping;
3151cf108bcaSJan Kara 	struct inode *inode = mapping->host;
3152cf108bcaSJan Kara 	struct buffer_head *page_bufs;
3153ac27a0ecSDave Kleikamp 	handle_t *handle = NULL;
3154ac27a0ecSDave Kleikamp 	int ret = 0;
3155ac27a0ecSDave Kleikamp 	int err;
3156ac27a0ecSDave Kleikamp 
3157f0e6c985SAneesh Kumar K.V 	ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
3158f0e6c985SAneesh Kumar K.V 					ext4_normal_get_block_write);
3159cf108bcaSJan Kara 	if (ret != 0)
3160cf108bcaSJan Kara 		goto out_unlock;
3161cf108bcaSJan Kara 
3162cf108bcaSJan Kara 	page_bufs = page_buffers(page);
3163cf108bcaSJan Kara 	walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL,
3164cf108bcaSJan Kara 								bget_one);
3165cf108bcaSJan Kara 	/* As soon as we unlock the page, it can go away, but we have
3166cf108bcaSJan Kara 	 * references to buffers so we are safe */
3167cf108bcaSJan Kara 	unlock_page(page);
3168ac27a0ecSDave Kleikamp 
3169617ba13bSMingming Cao 	handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
3170ac27a0ecSDave Kleikamp 	if (IS_ERR(handle)) {
3171ac27a0ecSDave Kleikamp 		ret = PTR_ERR(handle);
3172cf108bcaSJan Kara 		goto out;
3173ac27a0ecSDave Kleikamp 	}
3174ac27a0ecSDave Kleikamp 
3175cf108bcaSJan Kara 	ret = walk_page_buffers(handle, page_bufs, 0,
3176cf108bcaSJan Kara 			PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
3177ac27a0ecSDave Kleikamp 
3178cf108bcaSJan Kara 	err = walk_page_buffers(handle, page_bufs, 0,
3179cf108bcaSJan Kara 				PAGE_CACHE_SIZE, NULL, write_end_fn);
3180cf108bcaSJan Kara 	if (ret == 0)
3181cf108bcaSJan Kara 		ret = err;
3182617ba13bSMingming Cao 	err = ext4_journal_stop(handle);
3183ac27a0ecSDave Kleikamp 	if (!ret)
3184ac27a0ecSDave Kleikamp 		ret = err;
3185ac27a0ecSDave Kleikamp 
3186cf108bcaSJan Kara 	walk_page_buffers(handle, page_bufs, 0,
3187cf108bcaSJan Kara 				PAGE_CACHE_SIZE, NULL, bput_one);
3188cf108bcaSJan Kara 	EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
3189cf108bcaSJan Kara 	goto out;
3190cf108bcaSJan Kara 
3191cf108bcaSJan Kara out_unlock:
3192ac27a0ecSDave Kleikamp 	unlock_page(page);
3193cf108bcaSJan Kara out:
3194ac27a0ecSDave Kleikamp 	return ret;
3195ac27a0ecSDave Kleikamp }
3196ac27a0ecSDave Kleikamp 
3197617ba13bSMingming Cao static int ext4_journalled_writepage(struct page *page,
3198ac27a0ecSDave Kleikamp 				struct writeback_control *wbc)
3199ac27a0ecSDave Kleikamp {
3200ac27a0ecSDave Kleikamp 	struct inode *inode = page->mapping->host;
3201cf108bcaSJan Kara 	loff_t size = i_size_read(inode);
3202cf108bcaSJan Kara 	loff_t len;
3203cf108bcaSJan Kara 
3204ba80b101STheodore Ts'o 	trace_mark(ext4_journalled_writepage,
3205ba80b101STheodore Ts'o 		   "dev %s ino %lu page_index %lu",
3206ba80b101STheodore Ts'o 		   inode->i_sb->s_id, inode->i_ino, page->index);
3207cf108bcaSJan Kara 	J_ASSERT(PageLocked(page));
3208cf108bcaSJan Kara 	if (page->index == size >> PAGE_CACHE_SHIFT)
3209cf108bcaSJan Kara 		len = size & ~PAGE_CACHE_MASK;
3210cf108bcaSJan Kara 	else
3211cf108bcaSJan Kara 		len = PAGE_CACHE_SIZE;
3212f0e6c985SAneesh Kumar K.V 
3213f0e6c985SAneesh Kumar K.V 	if (page_has_buffers(page)) {
3214f0e6c985SAneesh Kumar K.V 		/* if page has buffers it should all be mapped
3215f0e6c985SAneesh Kumar K.V 		 * and allocated. If there are not buffers attached
3216f0e6c985SAneesh Kumar K.V 		 * to the page we know the page is dirty but it lost
3217f0e6c985SAneesh Kumar K.V 		 * buffers. That means that at some moment in time
3218f0e6c985SAneesh Kumar K.V 		 * after write_begin() / write_end() has been called
3219f0e6c985SAneesh Kumar K.V 		 * all buffers have been clean and thus they must have been
3220f0e6c985SAneesh Kumar K.V 		 * written at least once. So they are all mapped and we can
3221f0e6c985SAneesh Kumar K.V 		 * happily proceed with mapping them and writing the page.
3222f0e6c985SAneesh Kumar K.V 		 */
3223cf108bcaSJan Kara 		BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
3224cf108bcaSJan Kara 					ext4_bh_unmapped_or_delay));
3225f0e6c985SAneesh Kumar K.V 	}
3226ac27a0ecSDave Kleikamp 
3227617ba13bSMingming Cao 	if (ext4_journal_current_handle())
3228ac27a0ecSDave Kleikamp 		goto no_write;
3229ac27a0ecSDave Kleikamp 
3230cf108bcaSJan Kara 	if (PageChecked(page)) {
3231ac27a0ecSDave Kleikamp 		/*
3232ac27a0ecSDave Kleikamp 		 * It's mmapped pagecache.  Add buffers and journal it.  There
3233ac27a0ecSDave Kleikamp 		 * doesn't seem much point in redirtying the page here.
3234ac27a0ecSDave Kleikamp 		 */
3235ac27a0ecSDave Kleikamp 		ClearPageChecked(page);
3236cf108bcaSJan Kara 		return __ext4_journalled_writepage(page, wbc);
3237ac27a0ecSDave Kleikamp 	} else {
3238ac27a0ecSDave Kleikamp 		/*
3239ac27a0ecSDave Kleikamp 		 * It may be a page full of checkpoint-mode buffers.  We don't
3240ac27a0ecSDave Kleikamp 		 * really know unless we go poke around in the buffer_heads.
3241ac27a0ecSDave Kleikamp 		 * But block_write_full_page will do the right thing.
3242ac27a0ecSDave Kleikamp 		 */
3243f0e6c985SAneesh Kumar K.V 		return block_write_full_page(page,
3244f0e6c985SAneesh Kumar K.V 						ext4_normal_get_block_write,
3245f0e6c985SAneesh Kumar K.V 						wbc);
3246ac27a0ecSDave Kleikamp 	}
3247ac27a0ecSDave Kleikamp no_write:
3248ac27a0ecSDave Kleikamp 	redirty_page_for_writepage(wbc, page);
3249ac27a0ecSDave Kleikamp 	unlock_page(page);
3250cf108bcaSJan Kara 	return 0;
3251ac27a0ecSDave Kleikamp }
3252ac27a0ecSDave Kleikamp 
3253617ba13bSMingming Cao static int ext4_readpage(struct file *file, struct page *page)
3254ac27a0ecSDave Kleikamp {
3255617ba13bSMingming Cao 	return mpage_readpage(page, ext4_get_block);
3256ac27a0ecSDave Kleikamp }
3257ac27a0ecSDave Kleikamp 
3258ac27a0ecSDave Kleikamp static int
3259617ba13bSMingming Cao ext4_readpages(struct file *file, struct address_space *mapping,
3260ac27a0ecSDave Kleikamp 		struct list_head *pages, unsigned nr_pages)
3261ac27a0ecSDave Kleikamp {
3262617ba13bSMingming Cao 	return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
3263ac27a0ecSDave Kleikamp }
3264ac27a0ecSDave Kleikamp 
3265617ba13bSMingming Cao static void ext4_invalidatepage(struct page *page, unsigned long offset)
3266ac27a0ecSDave Kleikamp {
3267617ba13bSMingming Cao 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3268ac27a0ecSDave Kleikamp 
3269ac27a0ecSDave Kleikamp 	/*
3270ac27a0ecSDave Kleikamp 	 * If it's a full truncate we just forget about the pending dirtying
3271ac27a0ecSDave Kleikamp 	 */
3272ac27a0ecSDave Kleikamp 	if (offset == 0)
3273ac27a0ecSDave Kleikamp 		ClearPageChecked(page);
3274ac27a0ecSDave Kleikamp 
32750390131bSFrank Mayhar 	if (journal)
3276dab291afSMingming Cao 		jbd2_journal_invalidatepage(journal, page, offset);
32770390131bSFrank Mayhar 	else
32780390131bSFrank Mayhar 		block_invalidatepage(page, offset);
3279ac27a0ecSDave Kleikamp }
3280ac27a0ecSDave Kleikamp 
3281617ba13bSMingming Cao static int ext4_releasepage(struct page *page, gfp_t wait)
3282ac27a0ecSDave Kleikamp {
3283617ba13bSMingming Cao 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3284ac27a0ecSDave Kleikamp 
3285ac27a0ecSDave Kleikamp 	WARN_ON(PageChecked(page));
3286ac27a0ecSDave Kleikamp 	if (!page_has_buffers(page))
3287ac27a0ecSDave Kleikamp 		return 0;
32880390131bSFrank Mayhar 	if (journal)
3289dab291afSMingming Cao 		return jbd2_journal_try_to_free_buffers(journal, page, wait);
32900390131bSFrank Mayhar 	else
32910390131bSFrank Mayhar 		return try_to_free_buffers(page);
3292ac27a0ecSDave Kleikamp }
3293ac27a0ecSDave Kleikamp 
3294ac27a0ecSDave Kleikamp /*
3295ac27a0ecSDave Kleikamp  * If the O_DIRECT write will extend the file then add this inode to the
3296ac27a0ecSDave Kleikamp  * orphan list.  So recovery will truncate it back to the original size
3297ac27a0ecSDave Kleikamp  * if the machine crashes during the write.
3298ac27a0ecSDave Kleikamp  *
3299ac27a0ecSDave Kleikamp  * If the O_DIRECT write is intantiating holes inside i_size and the machine
33007fb5409dSJan Kara  * crashes then stale disk data _may_ be exposed inside the file. But current
33017fb5409dSJan Kara  * VFS code falls back into buffered path in that case so we are safe.
3302ac27a0ecSDave Kleikamp  */
3303617ba13bSMingming Cao static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3304ac27a0ecSDave Kleikamp 			const struct iovec *iov, loff_t offset,
3305ac27a0ecSDave Kleikamp 			unsigned long nr_segs)
3306ac27a0ecSDave Kleikamp {
3307ac27a0ecSDave Kleikamp 	struct file *file = iocb->ki_filp;
3308ac27a0ecSDave Kleikamp 	struct inode *inode = file->f_mapping->host;
3309617ba13bSMingming Cao 	struct ext4_inode_info *ei = EXT4_I(inode);
33107fb5409dSJan Kara 	handle_t *handle;
3311ac27a0ecSDave Kleikamp 	ssize_t ret;
3312ac27a0ecSDave Kleikamp 	int orphan = 0;
3313ac27a0ecSDave Kleikamp 	size_t count = iov_length(iov, nr_segs);
3314ac27a0ecSDave Kleikamp 
3315ac27a0ecSDave Kleikamp 	if (rw == WRITE) {
3316ac27a0ecSDave Kleikamp 		loff_t final_size = offset + count;
3317ac27a0ecSDave Kleikamp 
33187fb5409dSJan Kara 		if (final_size > inode->i_size) {
33197fb5409dSJan Kara 			/* Credits for sb + inode write */
33207fb5409dSJan Kara 			handle = ext4_journal_start(inode, 2);
3321ac27a0ecSDave Kleikamp 			if (IS_ERR(handle)) {
3322ac27a0ecSDave Kleikamp 				ret = PTR_ERR(handle);
3323ac27a0ecSDave Kleikamp 				goto out;
3324ac27a0ecSDave Kleikamp 			}
3325617ba13bSMingming Cao 			ret = ext4_orphan_add(handle, inode);
33267fb5409dSJan Kara 			if (ret) {
33277fb5409dSJan Kara 				ext4_journal_stop(handle);
33287fb5409dSJan Kara 				goto out;
33297fb5409dSJan Kara 			}
3330ac27a0ecSDave Kleikamp 			orphan = 1;
3331ac27a0ecSDave Kleikamp 			ei->i_disksize = inode->i_size;
33327fb5409dSJan Kara 			ext4_journal_stop(handle);
3333ac27a0ecSDave Kleikamp 		}
3334ac27a0ecSDave Kleikamp 	}
3335ac27a0ecSDave Kleikamp 
3336ac27a0ecSDave Kleikamp 	ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
3337ac27a0ecSDave Kleikamp 				 offset, nr_segs,
3338617ba13bSMingming Cao 				 ext4_get_block, NULL);
3339ac27a0ecSDave Kleikamp 
33407fb5409dSJan Kara 	if (orphan) {
3341ac27a0ecSDave Kleikamp 		int err;
3342ac27a0ecSDave Kleikamp 
33437fb5409dSJan Kara 		/* Credits for sb + inode write */
33447fb5409dSJan Kara 		handle = ext4_journal_start(inode, 2);
33457fb5409dSJan Kara 		if (IS_ERR(handle)) {
33467fb5409dSJan Kara 			/* This is really bad luck. We've written the data
33477fb5409dSJan Kara 			 * but cannot extend i_size. Bail out and pretend
33487fb5409dSJan Kara 			 * the write failed... */
33497fb5409dSJan Kara 			ret = PTR_ERR(handle);
33507fb5409dSJan Kara 			goto out;
33517fb5409dSJan Kara 		}
33527fb5409dSJan Kara 		if (inode->i_nlink)
3353617ba13bSMingming Cao 			ext4_orphan_del(handle, inode);
33547fb5409dSJan Kara 		if (ret > 0) {
3355ac27a0ecSDave Kleikamp 			loff_t end = offset + ret;
3356ac27a0ecSDave Kleikamp 			if (end > inode->i_size) {
3357ac27a0ecSDave Kleikamp 				ei->i_disksize = end;
3358ac27a0ecSDave Kleikamp 				i_size_write(inode, end);
3359ac27a0ecSDave Kleikamp 				/*
3360ac27a0ecSDave Kleikamp 				 * We're going to return a positive `ret'
3361ac27a0ecSDave Kleikamp 				 * here due to non-zero-length I/O, so there's
3362ac27a0ecSDave Kleikamp 				 * no way of reporting error returns from
3363617ba13bSMingming Cao 				 * ext4_mark_inode_dirty() to userspace.  So
3364ac27a0ecSDave Kleikamp 				 * ignore it.
3365ac27a0ecSDave Kleikamp 				 */
3366617ba13bSMingming Cao 				ext4_mark_inode_dirty(handle, inode);
3367ac27a0ecSDave Kleikamp 			}
3368ac27a0ecSDave Kleikamp 		}
3369617ba13bSMingming Cao 		err = ext4_journal_stop(handle);
3370ac27a0ecSDave Kleikamp 		if (ret == 0)
3371ac27a0ecSDave Kleikamp 			ret = err;
3372ac27a0ecSDave Kleikamp 	}
3373ac27a0ecSDave Kleikamp out:
3374ac27a0ecSDave Kleikamp 	return ret;
3375ac27a0ecSDave Kleikamp }
3376ac27a0ecSDave Kleikamp 
3377ac27a0ecSDave Kleikamp /*
3378617ba13bSMingming Cao  * Pages can be marked dirty completely asynchronously from ext4's journalling
3379ac27a0ecSDave Kleikamp  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
3380ac27a0ecSDave Kleikamp  * much here because ->set_page_dirty is called under VFS locks.  The page is
3381ac27a0ecSDave Kleikamp  * not necessarily locked.
3382ac27a0ecSDave Kleikamp  *
3383ac27a0ecSDave Kleikamp  * We cannot just dirty the page and leave attached buffers clean, because the
3384ac27a0ecSDave Kleikamp  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
3385ac27a0ecSDave Kleikamp  * or jbddirty because all the journalling code will explode.
3386ac27a0ecSDave Kleikamp  *
3387ac27a0ecSDave Kleikamp  * So what we do is to mark the page "pending dirty" and next time writepage
3388ac27a0ecSDave Kleikamp  * is called, propagate that into the buffers appropriately.
3389ac27a0ecSDave Kleikamp  */
3390617ba13bSMingming Cao static int ext4_journalled_set_page_dirty(struct page *page)
3391ac27a0ecSDave Kleikamp {
3392ac27a0ecSDave Kleikamp 	SetPageChecked(page);
3393ac27a0ecSDave Kleikamp 	return __set_page_dirty_nobuffers(page);
3394ac27a0ecSDave Kleikamp }
3395ac27a0ecSDave Kleikamp 
3396617ba13bSMingming Cao static const struct address_space_operations ext4_ordered_aops = {
3397617ba13bSMingming Cao 	.readpage		= ext4_readpage,
3398617ba13bSMingming Cao 	.readpages		= ext4_readpages,
3399678aaf48SJan Kara 	.writepage		= ext4_normal_writepage,
3400ac27a0ecSDave Kleikamp 	.sync_page		= block_sync_page,
3401bfc1af65SNick Piggin 	.write_begin		= ext4_write_begin,
3402bfc1af65SNick Piggin 	.write_end		= ext4_ordered_write_end,
3403617ba13bSMingming Cao 	.bmap			= ext4_bmap,
3404617ba13bSMingming Cao 	.invalidatepage		= ext4_invalidatepage,
3405617ba13bSMingming Cao 	.releasepage		= ext4_releasepage,
3406617ba13bSMingming Cao 	.direct_IO		= ext4_direct_IO,
3407ac27a0ecSDave Kleikamp 	.migratepage		= buffer_migrate_page,
34088ab22b9aSHisashi Hifumi 	.is_partially_uptodate  = block_is_partially_uptodate,
3409ac27a0ecSDave Kleikamp };
3410ac27a0ecSDave Kleikamp 
3411617ba13bSMingming Cao static const struct address_space_operations ext4_writeback_aops = {
3412617ba13bSMingming Cao 	.readpage		= ext4_readpage,
3413617ba13bSMingming Cao 	.readpages		= ext4_readpages,
3414678aaf48SJan Kara 	.writepage		= ext4_normal_writepage,
3415ac27a0ecSDave Kleikamp 	.sync_page		= block_sync_page,
3416bfc1af65SNick Piggin 	.write_begin		= ext4_write_begin,
3417bfc1af65SNick Piggin 	.write_end		= ext4_writeback_write_end,
3418617ba13bSMingming Cao 	.bmap			= ext4_bmap,
3419617ba13bSMingming Cao 	.invalidatepage		= ext4_invalidatepage,
3420617ba13bSMingming Cao 	.releasepage		= ext4_releasepage,
3421617ba13bSMingming Cao 	.direct_IO		= ext4_direct_IO,
3422ac27a0ecSDave Kleikamp 	.migratepage		= buffer_migrate_page,
34238ab22b9aSHisashi Hifumi 	.is_partially_uptodate  = block_is_partially_uptodate,
3424ac27a0ecSDave Kleikamp };
3425ac27a0ecSDave Kleikamp 
3426617ba13bSMingming Cao static const struct address_space_operations ext4_journalled_aops = {
3427617ba13bSMingming Cao 	.readpage		= ext4_readpage,
3428617ba13bSMingming Cao 	.readpages		= ext4_readpages,
3429617ba13bSMingming Cao 	.writepage		= ext4_journalled_writepage,
3430ac27a0ecSDave Kleikamp 	.sync_page		= block_sync_page,
3431bfc1af65SNick Piggin 	.write_begin		= ext4_write_begin,
3432bfc1af65SNick Piggin 	.write_end		= ext4_journalled_write_end,
3433617ba13bSMingming Cao 	.set_page_dirty		= ext4_journalled_set_page_dirty,
3434617ba13bSMingming Cao 	.bmap			= ext4_bmap,
3435617ba13bSMingming Cao 	.invalidatepage		= ext4_invalidatepage,
3436617ba13bSMingming Cao 	.releasepage		= ext4_releasepage,
34378ab22b9aSHisashi Hifumi 	.is_partially_uptodate  = block_is_partially_uptodate,
3438ac27a0ecSDave Kleikamp };
3439ac27a0ecSDave Kleikamp 
344064769240SAlex Tomas static const struct address_space_operations ext4_da_aops = {
344164769240SAlex Tomas 	.readpage		= ext4_readpage,
344264769240SAlex Tomas 	.readpages		= ext4_readpages,
344364769240SAlex Tomas 	.writepage		= ext4_da_writepage,
344464769240SAlex Tomas 	.writepages		= ext4_da_writepages,
344564769240SAlex Tomas 	.sync_page		= block_sync_page,
344664769240SAlex Tomas 	.write_begin		= ext4_da_write_begin,
344764769240SAlex Tomas 	.write_end		= ext4_da_write_end,
344864769240SAlex Tomas 	.bmap			= ext4_bmap,
344964769240SAlex Tomas 	.invalidatepage		= ext4_da_invalidatepage,
345064769240SAlex Tomas 	.releasepage		= ext4_releasepage,
345164769240SAlex Tomas 	.direct_IO		= ext4_direct_IO,
345264769240SAlex Tomas 	.migratepage		= buffer_migrate_page,
34538ab22b9aSHisashi Hifumi 	.is_partially_uptodate  = block_is_partially_uptodate,
345464769240SAlex Tomas };
345564769240SAlex Tomas 
3456617ba13bSMingming Cao void ext4_set_aops(struct inode *inode)
3457ac27a0ecSDave Kleikamp {
3458cd1aac32SAneesh Kumar K.V 	if (ext4_should_order_data(inode) &&
3459cd1aac32SAneesh Kumar K.V 		test_opt(inode->i_sb, DELALLOC))
3460cd1aac32SAneesh Kumar K.V 		inode->i_mapping->a_ops = &ext4_da_aops;
3461cd1aac32SAneesh Kumar K.V 	else if (ext4_should_order_data(inode))
3462617ba13bSMingming Cao 		inode->i_mapping->a_ops = &ext4_ordered_aops;
346364769240SAlex Tomas 	else if (ext4_should_writeback_data(inode) &&
346464769240SAlex Tomas 		 test_opt(inode->i_sb, DELALLOC))
346564769240SAlex Tomas 		inode->i_mapping->a_ops = &ext4_da_aops;
3466617ba13bSMingming Cao 	else if (ext4_should_writeback_data(inode))
3467617ba13bSMingming Cao 		inode->i_mapping->a_ops = &ext4_writeback_aops;
3468ac27a0ecSDave Kleikamp 	else
3469617ba13bSMingming Cao 		inode->i_mapping->a_ops = &ext4_journalled_aops;
3470ac27a0ecSDave Kleikamp }
3471ac27a0ecSDave Kleikamp 
3472ac27a0ecSDave Kleikamp /*
3473617ba13bSMingming Cao  * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3474ac27a0ecSDave Kleikamp  * up to the end of the block which corresponds to `from'.
3475ac27a0ecSDave Kleikamp  * This required during truncate. We need to physically zero the tail end
3476ac27a0ecSDave Kleikamp  * of that block so it doesn't yield old data if the file is later grown.
3477ac27a0ecSDave Kleikamp  */
3478cf108bcaSJan Kara int ext4_block_truncate_page(handle_t *handle,
3479ac27a0ecSDave Kleikamp 		struct address_space *mapping, loff_t from)
3480ac27a0ecSDave Kleikamp {
3481617ba13bSMingming Cao 	ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
3482ac27a0ecSDave Kleikamp 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
3483725d26d3SAneesh Kumar K.V 	unsigned blocksize, length, pos;
3484725d26d3SAneesh Kumar K.V 	ext4_lblk_t iblock;
3485ac27a0ecSDave Kleikamp 	struct inode *inode = mapping->host;
3486ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
3487cf108bcaSJan Kara 	struct page *page;
3488ac27a0ecSDave Kleikamp 	int err = 0;
3489ac27a0ecSDave Kleikamp 
3490cf108bcaSJan Kara 	page = grab_cache_page(mapping, from >> PAGE_CACHE_SHIFT);
3491cf108bcaSJan Kara 	if (!page)
3492cf108bcaSJan Kara 		return -EINVAL;
3493cf108bcaSJan Kara 
3494ac27a0ecSDave Kleikamp 	blocksize = inode->i_sb->s_blocksize;
3495ac27a0ecSDave Kleikamp 	length = blocksize - (offset & (blocksize - 1));
3496ac27a0ecSDave Kleikamp 	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
3497ac27a0ecSDave Kleikamp 
3498ac27a0ecSDave Kleikamp 	/*
3499ac27a0ecSDave Kleikamp 	 * For "nobh" option,  we can only work if we don't need to
3500ac27a0ecSDave Kleikamp 	 * read-in the page - otherwise we create buffers to do the IO.
3501ac27a0ecSDave Kleikamp 	 */
3502ac27a0ecSDave Kleikamp 	if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
3503617ba13bSMingming Cao 	     ext4_should_writeback_data(inode) && PageUptodate(page)) {
3504eebd2aa3SChristoph Lameter 		zero_user(page, offset, length);
3505ac27a0ecSDave Kleikamp 		set_page_dirty(page);
3506ac27a0ecSDave Kleikamp 		goto unlock;
3507ac27a0ecSDave Kleikamp 	}
3508ac27a0ecSDave Kleikamp 
3509ac27a0ecSDave Kleikamp 	if (!page_has_buffers(page))
3510ac27a0ecSDave Kleikamp 		create_empty_buffers(page, blocksize, 0);
3511ac27a0ecSDave Kleikamp 
3512ac27a0ecSDave Kleikamp 	/* Find the buffer that contains "offset" */
3513ac27a0ecSDave Kleikamp 	bh = page_buffers(page);
3514ac27a0ecSDave Kleikamp 	pos = blocksize;
3515ac27a0ecSDave Kleikamp 	while (offset >= pos) {
3516ac27a0ecSDave Kleikamp 		bh = bh->b_this_page;
3517ac27a0ecSDave Kleikamp 		iblock++;
3518ac27a0ecSDave Kleikamp 		pos += blocksize;
3519ac27a0ecSDave Kleikamp 	}
3520ac27a0ecSDave Kleikamp 
3521ac27a0ecSDave Kleikamp 	err = 0;
3522ac27a0ecSDave Kleikamp 	if (buffer_freed(bh)) {
3523ac27a0ecSDave Kleikamp 		BUFFER_TRACE(bh, "freed: skip");
3524ac27a0ecSDave Kleikamp 		goto unlock;
3525ac27a0ecSDave Kleikamp 	}
3526ac27a0ecSDave Kleikamp 
3527ac27a0ecSDave Kleikamp 	if (!buffer_mapped(bh)) {
3528ac27a0ecSDave Kleikamp 		BUFFER_TRACE(bh, "unmapped");
3529617ba13bSMingming Cao 		ext4_get_block(inode, iblock, bh, 0);
3530ac27a0ecSDave Kleikamp 		/* unmapped? It's a hole - nothing to do */
3531ac27a0ecSDave Kleikamp 		if (!buffer_mapped(bh)) {
3532ac27a0ecSDave Kleikamp 			BUFFER_TRACE(bh, "still unmapped");
3533ac27a0ecSDave Kleikamp 			goto unlock;
3534ac27a0ecSDave Kleikamp 		}
3535ac27a0ecSDave Kleikamp 	}
3536ac27a0ecSDave Kleikamp 
3537ac27a0ecSDave Kleikamp 	/* Ok, it's mapped. Make sure it's up-to-date */
3538ac27a0ecSDave Kleikamp 	if (PageUptodate(page))
3539ac27a0ecSDave Kleikamp 		set_buffer_uptodate(bh);
3540ac27a0ecSDave Kleikamp 
3541ac27a0ecSDave Kleikamp 	if (!buffer_uptodate(bh)) {
3542ac27a0ecSDave Kleikamp 		err = -EIO;
3543ac27a0ecSDave Kleikamp 		ll_rw_block(READ, 1, &bh);
3544ac27a0ecSDave Kleikamp 		wait_on_buffer(bh);
3545ac27a0ecSDave Kleikamp 		/* Uhhuh. Read error. Complain and punt. */
3546ac27a0ecSDave Kleikamp 		if (!buffer_uptodate(bh))
3547ac27a0ecSDave Kleikamp 			goto unlock;
3548ac27a0ecSDave Kleikamp 	}
3549ac27a0ecSDave Kleikamp 
3550617ba13bSMingming Cao 	if (ext4_should_journal_data(inode)) {
3551ac27a0ecSDave Kleikamp 		BUFFER_TRACE(bh, "get write access");
3552617ba13bSMingming Cao 		err = ext4_journal_get_write_access(handle, bh);
3553ac27a0ecSDave Kleikamp 		if (err)
3554ac27a0ecSDave Kleikamp 			goto unlock;
3555ac27a0ecSDave Kleikamp 	}
3556ac27a0ecSDave Kleikamp 
3557eebd2aa3SChristoph Lameter 	zero_user(page, offset, length);
3558ac27a0ecSDave Kleikamp 
3559ac27a0ecSDave Kleikamp 	BUFFER_TRACE(bh, "zeroed end of block");
3560ac27a0ecSDave Kleikamp 
3561ac27a0ecSDave Kleikamp 	err = 0;
3562617ba13bSMingming Cao 	if (ext4_should_journal_data(inode)) {
35630390131bSFrank Mayhar 		err = ext4_handle_dirty_metadata(handle, inode, bh);
3564ac27a0ecSDave Kleikamp 	} else {
3565617ba13bSMingming Cao 		if (ext4_should_order_data(inode))
3566678aaf48SJan Kara 			err = ext4_jbd2_file_inode(handle, inode);
3567ac27a0ecSDave Kleikamp 		mark_buffer_dirty(bh);
3568ac27a0ecSDave Kleikamp 	}
3569ac27a0ecSDave Kleikamp 
3570ac27a0ecSDave Kleikamp unlock:
3571ac27a0ecSDave Kleikamp 	unlock_page(page);
3572ac27a0ecSDave Kleikamp 	page_cache_release(page);
3573ac27a0ecSDave Kleikamp 	return err;
3574ac27a0ecSDave Kleikamp }
3575ac27a0ecSDave Kleikamp 
3576ac27a0ecSDave Kleikamp /*
3577ac27a0ecSDave Kleikamp  * Probably it should be a library function... search for first non-zero word
3578ac27a0ecSDave Kleikamp  * or memcmp with zero_page, whatever is better for particular architecture.
3579ac27a0ecSDave Kleikamp  * Linus?
3580ac27a0ecSDave Kleikamp  */
3581ac27a0ecSDave Kleikamp static inline int all_zeroes(__le32 *p, __le32 *q)
3582ac27a0ecSDave Kleikamp {
3583ac27a0ecSDave Kleikamp 	while (p < q)
3584ac27a0ecSDave Kleikamp 		if (*p++)
3585ac27a0ecSDave Kleikamp 			return 0;
3586ac27a0ecSDave Kleikamp 	return 1;
3587ac27a0ecSDave Kleikamp }
3588ac27a0ecSDave Kleikamp 
3589ac27a0ecSDave Kleikamp /**
3590617ba13bSMingming Cao  *	ext4_find_shared - find the indirect blocks for partial truncation.
3591ac27a0ecSDave Kleikamp  *	@inode:	  inode in question
3592ac27a0ecSDave Kleikamp  *	@depth:	  depth of the affected branch
3593617ba13bSMingming Cao  *	@offsets: offsets of pointers in that branch (see ext4_block_to_path)
3594ac27a0ecSDave Kleikamp  *	@chain:	  place to store the pointers to partial indirect blocks
3595ac27a0ecSDave Kleikamp  *	@top:	  place to the (detached) top of branch
3596ac27a0ecSDave Kleikamp  *
3597617ba13bSMingming Cao  *	This is a helper function used by ext4_truncate().
3598ac27a0ecSDave Kleikamp  *
3599ac27a0ecSDave Kleikamp  *	When we do truncate() we may have to clean the ends of several
3600ac27a0ecSDave Kleikamp  *	indirect blocks but leave the blocks themselves alive. Block is
3601ac27a0ecSDave Kleikamp  *	partially truncated if some data below the new i_size is refered
3602ac27a0ecSDave Kleikamp  *	from it (and it is on the path to the first completely truncated
3603ac27a0ecSDave Kleikamp  *	data block, indeed).  We have to free the top of that path along
3604ac27a0ecSDave Kleikamp  *	with everything to the right of the path. Since no allocation
3605617ba13bSMingming Cao  *	past the truncation point is possible until ext4_truncate()
3606ac27a0ecSDave Kleikamp  *	finishes, we may safely do the latter, but top of branch may
3607ac27a0ecSDave Kleikamp  *	require special attention - pageout below the truncation point
3608ac27a0ecSDave Kleikamp  *	might try to populate it.
3609ac27a0ecSDave Kleikamp  *
3610ac27a0ecSDave Kleikamp  *	We atomically detach the top of branch from the tree, store the
3611ac27a0ecSDave Kleikamp  *	block number of its root in *@top, pointers to buffer_heads of
3612ac27a0ecSDave Kleikamp  *	partially truncated blocks - in @chain[].bh and pointers to
3613ac27a0ecSDave Kleikamp  *	their last elements that should not be removed - in
3614ac27a0ecSDave Kleikamp  *	@chain[].p. Return value is the pointer to last filled element
3615ac27a0ecSDave Kleikamp  *	of @chain.
3616ac27a0ecSDave Kleikamp  *
3617ac27a0ecSDave Kleikamp  *	The work left to caller to do the actual freeing of subtrees:
3618ac27a0ecSDave Kleikamp  *		a) free the subtree starting from *@top
3619ac27a0ecSDave Kleikamp  *		b) free the subtrees whose roots are stored in
3620ac27a0ecSDave Kleikamp  *			(@chain[i].p+1 .. end of @chain[i].bh->b_data)
3621ac27a0ecSDave Kleikamp  *		c) free the subtrees growing from the inode past the @chain[0].
3622ac27a0ecSDave Kleikamp  *			(no partially truncated stuff there).  */
3623ac27a0ecSDave Kleikamp 
3624617ba13bSMingming Cao static Indirect *ext4_find_shared(struct inode *inode, int depth,
3625725d26d3SAneesh Kumar K.V 			ext4_lblk_t offsets[4], Indirect chain[4], __le32 *top)
3626ac27a0ecSDave Kleikamp {
3627ac27a0ecSDave Kleikamp 	Indirect *partial, *p;
3628ac27a0ecSDave Kleikamp 	int k, err;
3629ac27a0ecSDave Kleikamp 
3630ac27a0ecSDave Kleikamp 	*top = 0;
3631ac27a0ecSDave Kleikamp 	/* Make k index the deepest non-null offest + 1 */
3632ac27a0ecSDave Kleikamp 	for (k = depth; k > 1 && !offsets[k-1]; k--)
3633ac27a0ecSDave Kleikamp 		;
3634617ba13bSMingming Cao 	partial = ext4_get_branch(inode, k, offsets, chain, &err);
3635ac27a0ecSDave Kleikamp 	/* Writer: pointers */
3636ac27a0ecSDave Kleikamp 	if (!partial)
3637ac27a0ecSDave Kleikamp 		partial = chain + k-1;
3638ac27a0ecSDave Kleikamp 	/*
3639ac27a0ecSDave Kleikamp 	 * If the branch acquired continuation since we've looked at it -
3640ac27a0ecSDave Kleikamp 	 * fine, it should all survive and (new) top doesn't belong to us.
3641ac27a0ecSDave Kleikamp 	 */
3642ac27a0ecSDave Kleikamp 	if (!partial->key && *partial->p)
3643ac27a0ecSDave Kleikamp 		/* Writer: end */
3644ac27a0ecSDave Kleikamp 		goto no_top;
3645af5bc92dSTheodore Ts'o 	for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
3646ac27a0ecSDave Kleikamp 		;
3647ac27a0ecSDave Kleikamp 	/*
3648ac27a0ecSDave Kleikamp 	 * OK, we've found the last block that must survive. The rest of our
3649ac27a0ecSDave Kleikamp 	 * branch should be detached before unlocking. However, if that rest
3650ac27a0ecSDave Kleikamp 	 * of branch is all ours and does not grow immediately from the inode
3651ac27a0ecSDave Kleikamp 	 * it's easier to cheat and just decrement partial->p.
3652ac27a0ecSDave Kleikamp 	 */
3653ac27a0ecSDave Kleikamp 	if (p == chain + k - 1 && p > chain) {
3654ac27a0ecSDave Kleikamp 		p->p--;
3655ac27a0ecSDave Kleikamp 	} else {
3656ac27a0ecSDave Kleikamp 		*top = *p->p;
3657617ba13bSMingming Cao 		/* Nope, don't do this in ext4.  Must leave the tree intact */
3658ac27a0ecSDave Kleikamp #if 0
3659ac27a0ecSDave Kleikamp 		*p->p = 0;
3660ac27a0ecSDave Kleikamp #endif
3661ac27a0ecSDave Kleikamp 	}
3662ac27a0ecSDave Kleikamp 	/* Writer: end */
3663ac27a0ecSDave Kleikamp 
3664ac27a0ecSDave Kleikamp 	while (partial > p) {
3665ac27a0ecSDave Kleikamp 		brelse(partial->bh);
3666ac27a0ecSDave Kleikamp 		partial--;
3667ac27a0ecSDave Kleikamp 	}
3668ac27a0ecSDave Kleikamp no_top:
3669ac27a0ecSDave Kleikamp 	return partial;
3670ac27a0ecSDave Kleikamp }
3671ac27a0ecSDave Kleikamp 
3672ac27a0ecSDave Kleikamp /*
3673ac27a0ecSDave Kleikamp  * Zero a number of block pointers in either an inode or an indirect block.
3674ac27a0ecSDave Kleikamp  * If we restart the transaction we must again get write access to the
3675ac27a0ecSDave Kleikamp  * indirect block for further modification.
3676ac27a0ecSDave Kleikamp  *
3677ac27a0ecSDave Kleikamp  * We release `count' blocks on disk, but (last - first) may be greater
3678ac27a0ecSDave Kleikamp  * than `count' because there can be holes in there.
3679ac27a0ecSDave Kleikamp  */
3680617ba13bSMingming Cao static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
3681617ba13bSMingming Cao 		struct buffer_head *bh, ext4_fsblk_t block_to_free,
3682ac27a0ecSDave Kleikamp 		unsigned long count, __le32 *first, __le32 *last)
3683ac27a0ecSDave Kleikamp {
3684ac27a0ecSDave Kleikamp 	__le32 *p;
3685ac27a0ecSDave Kleikamp 	if (try_to_extend_transaction(handle, inode)) {
3686ac27a0ecSDave Kleikamp 		if (bh) {
36870390131bSFrank Mayhar 			BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
36880390131bSFrank Mayhar 			ext4_handle_dirty_metadata(handle, inode, bh);
3689ac27a0ecSDave Kleikamp 		}
3690617ba13bSMingming Cao 		ext4_mark_inode_dirty(handle, inode);
3691617ba13bSMingming Cao 		ext4_journal_test_restart(handle, inode);
3692ac27a0ecSDave Kleikamp 		if (bh) {
3693ac27a0ecSDave Kleikamp 			BUFFER_TRACE(bh, "retaking write access");
3694617ba13bSMingming Cao 			ext4_journal_get_write_access(handle, bh);
3695ac27a0ecSDave Kleikamp 		}
3696ac27a0ecSDave Kleikamp 	}
3697ac27a0ecSDave Kleikamp 
3698ac27a0ecSDave Kleikamp 	/*
3699ac27a0ecSDave Kleikamp 	 * Any buffers which are on the journal will be in memory. We find
3700dab291afSMingming Cao 	 * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget()
3701ac27a0ecSDave Kleikamp 	 * on them.  We've already detached each block from the file, so
3702dab291afSMingming Cao 	 * bforget() in jbd2_journal_forget() should be safe.
3703ac27a0ecSDave Kleikamp 	 *
3704dab291afSMingming Cao 	 * AKPM: turn on bforget in jbd2_journal_forget()!!!
3705ac27a0ecSDave Kleikamp 	 */
3706ac27a0ecSDave Kleikamp 	for (p = first; p < last; p++) {
3707ac27a0ecSDave Kleikamp 		u32 nr = le32_to_cpu(*p);
3708ac27a0ecSDave Kleikamp 		if (nr) {
37091d03ec98SAneesh Kumar K.V 			struct buffer_head *tbh;
3710ac27a0ecSDave Kleikamp 
3711ac27a0ecSDave Kleikamp 			*p = 0;
37121d03ec98SAneesh Kumar K.V 			tbh = sb_find_get_block(inode->i_sb, nr);
37131d03ec98SAneesh Kumar K.V 			ext4_forget(handle, 0, inode, tbh, nr);
3714ac27a0ecSDave Kleikamp 		}
3715ac27a0ecSDave Kleikamp 	}
3716ac27a0ecSDave Kleikamp 
3717c9de560dSAlex Tomas 	ext4_free_blocks(handle, inode, block_to_free, count, 0);
3718ac27a0ecSDave Kleikamp }
3719ac27a0ecSDave Kleikamp 
3720ac27a0ecSDave Kleikamp /**
3721617ba13bSMingming Cao  * ext4_free_data - free a list of data blocks
3722ac27a0ecSDave Kleikamp  * @handle:	handle for this transaction
3723ac27a0ecSDave Kleikamp  * @inode:	inode we are dealing with
3724ac27a0ecSDave Kleikamp  * @this_bh:	indirect buffer_head which contains *@first and *@last
3725ac27a0ecSDave Kleikamp  * @first:	array of block numbers
3726ac27a0ecSDave Kleikamp  * @last:	points immediately past the end of array
3727ac27a0ecSDave Kleikamp  *
3728ac27a0ecSDave Kleikamp  * We are freeing all blocks refered from that array (numbers are stored as
3729ac27a0ecSDave Kleikamp  * little-endian 32-bit) and updating @inode->i_blocks appropriately.
3730ac27a0ecSDave Kleikamp  *
3731ac27a0ecSDave Kleikamp  * We accumulate contiguous runs of blocks to free.  Conveniently, if these
3732ac27a0ecSDave Kleikamp  * blocks are contiguous then releasing them at one time will only affect one
3733ac27a0ecSDave Kleikamp  * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
3734ac27a0ecSDave Kleikamp  * actually use a lot of journal space.
3735ac27a0ecSDave Kleikamp  *
3736ac27a0ecSDave Kleikamp  * @this_bh will be %NULL if @first and @last point into the inode's direct
3737ac27a0ecSDave Kleikamp  * block pointers.
3738ac27a0ecSDave Kleikamp  */
3739617ba13bSMingming Cao static void ext4_free_data(handle_t *handle, struct inode *inode,
3740ac27a0ecSDave Kleikamp 			   struct buffer_head *this_bh,
3741ac27a0ecSDave Kleikamp 			   __le32 *first, __le32 *last)
3742ac27a0ecSDave Kleikamp {
3743617ba13bSMingming Cao 	ext4_fsblk_t block_to_free = 0;    /* Starting block # of a run */
3744ac27a0ecSDave Kleikamp 	unsigned long count = 0;	    /* Number of blocks in the run */
3745ac27a0ecSDave Kleikamp 	__le32 *block_to_free_p = NULL;	    /* Pointer into inode/ind
3746ac27a0ecSDave Kleikamp 					       corresponding to
3747ac27a0ecSDave Kleikamp 					       block_to_free */
3748617ba13bSMingming Cao 	ext4_fsblk_t nr;		    /* Current block # */
3749ac27a0ecSDave Kleikamp 	__le32 *p;			    /* Pointer into inode/ind
3750ac27a0ecSDave Kleikamp 					       for current block */
3751ac27a0ecSDave Kleikamp 	int err;
3752ac27a0ecSDave Kleikamp 
3753ac27a0ecSDave Kleikamp 	if (this_bh) {				/* For indirect block */
3754ac27a0ecSDave Kleikamp 		BUFFER_TRACE(this_bh, "get_write_access");
3755617ba13bSMingming Cao 		err = ext4_journal_get_write_access(handle, this_bh);
3756ac27a0ecSDave Kleikamp 		/* Important: if we can't update the indirect pointers
3757ac27a0ecSDave Kleikamp 		 * to the blocks, we can't free them. */
3758ac27a0ecSDave Kleikamp 		if (err)
3759ac27a0ecSDave Kleikamp 			return;
3760ac27a0ecSDave Kleikamp 	}
3761ac27a0ecSDave Kleikamp 
3762ac27a0ecSDave Kleikamp 	for (p = first; p < last; p++) {
3763ac27a0ecSDave Kleikamp 		nr = le32_to_cpu(*p);
3764ac27a0ecSDave Kleikamp 		if (nr) {
3765ac27a0ecSDave Kleikamp 			/* accumulate blocks to free if they're contiguous */
3766ac27a0ecSDave Kleikamp 			if (count == 0) {
3767ac27a0ecSDave Kleikamp 				block_to_free = nr;
3768ac27a0ecSDave Kleikamp 				block_to_free_p = p;
3769ac27a0ecSDave Kleikamp 				count = 1;
3770ac27a0ecSDave Kleikamp 			} else if (nr == block_to_free + count) {
3771ac27a0ecSDave Kleikamp 				count++;
3772ac27a0ecSDave Kleikamp 			} else {
3773617ba13bSMingming Cao 				ext4_clear_blocks(handle, inode, this_bh,
3774ac27a0ecSDave Kleikamp 						  block_to_free,
3775ac27a0ecSDave Kleikamp 						  count, block_to_free_p, p);
3776ac27a0ecSDave Kleikamp 				block_to_free = nr;
3777ac27a0ecSDave Kleikamp 				block_to_free_p = p;
3778ac27a0ecSDave Kleikamp 				count = 1;
3779ac27a0ecSDave Kleikamp 			}
3780ac27a0ecSDave Kleikamp 		}
3781ac27a0ecSDave Kleikamp 	}
3782ac27a0ecSDave Kleikamp 
3783ac27a0ecSDave Kleikamp 	if (count > 0)
3784617ba13bSMingming Cao 		ext4_clear_blocks(handle, inode, this_bh, block_to_free,
3785ac27a0ecSDave Kleikamp 				  count, block_to_free_p, p);
3786ac27a0ecSDave Kleikamp 
3787ac27a0ecSDave Kleikamp 	if (this_bh) {
37880390131bSFrank Mayhar 		BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
378971dc8fbcSDuane Griffin 
379071dc8fbcSDuane Griffin 		/*
379171dc8fbcSDuane Griffin 		 * The buffer head should have an attached journal head at this
379271dc8fbcSDuane Griffin 		 * point. However, if the data is corrupted and an indirect
379371dc8fbcSDuane Griffin 		 * block pointed to itself, it would have been detached when
379471dc8fbcSDuane Griffin 		 * the block was cleared. Check for this instead of OOPSing.
379571dc8fbcSDuane Griffin 		 */
3796e7f07968STheodore Ts'o 		if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
37970390131bSFrank Mayhar 			ext4_handle_dirty_metadata(handle, inode, this_bh);
379871dc8fbcSDuane Griffin 		else
379971dc8fbcSDuane Griffin 			ext4_error(inode->i_sb, __func__,
380071dc8fbcSDuane Griffin 				   "circular indirect block detected, "
380171dc8fbcSDuane Griffin 				   "inode=%lu, block=%llu",
380271dc8fbcSDuane Griffin 				   inode->i_ino,
380371dc8fbcSDuane Griffin 				   (unsigned long long) this_bh->b_blocknr);
3804ac27a0ecSDave Kleikamp 	}
3805ac27a0ecSDave Kleikamp }
3806ac27a0ecSDave Kleikamp 
3807ac27a0ecSDave Kleikamp /**
3808617ba13bSMingming Cao  *	ext4_free_branches - free an array of branches
3809ac27a0ecSDave Kleikamp  *	@handle: JBD handle for this transaction
3810ac27a0ecSDave Kleikamp  *	@inode:	inode we are dealing with
3811ac27a0ecSDave Kleikamp  *	@parent_bh: the buffer_head which contains *@first and *@last
3812ac27a0ecSDave Kleikamp  *	@first:	array of block numbers
3813ac27a0ecSDave Kleikamp  *	@last:	pointer immediately past the end of array
3814ac27a0ecSDave Kleikamp  *	@depth:	depth of the branches to free
3815ac27a0ecSDave Kleikamp  *
3816ac27a0ecSDave Kleikamp  *	We are freeing all blocks refered from these branches (numbers are
3817ac27a0ecSDave Kleikamp  *	stored as little-endian 32-bit) and updating @inode->i_blocks
3818ac27a0ecSDave Kleikamp  *	appropriately.
3819ac27a0ecSDave Kleikamp  */
3820617ba13bSMingming Cao static void ext4_free_branches(handle_t *handle, struct inode *inode,
3821ac27a0ecSDave Kleikamp 			       struct buffer_head *parent_bh,
3822ac27a0ecSDave Kleikamp 			       __le32 *first, __le32 *last, int depth)
3823ac27a0ecSDave Kleikamp {
3824617ba13bSMingming Cao 	ext4_fsblk_t nr;
3825ac27a0ecSDave Kleikamp 	__le32 *p;
3826ac27a0ecSDave Kleikamp 
38270390131bSFrank Mayhar 	if (ext4_handle_is_aborted(handle))
3828ac27a0ecSDave Kleikamp 		return;
3829ac27a0ecSDave Kleikamp 
3830ac27a0ecSDave Kleikamp 	if (depth--) {
3831ac27a0ecSDave Kleikamp 		struct buffer_head *bh;
3832617ba13bSMingming Cao 		int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
3833ac27a0ecSDave Kleikamp 		p = last;
3834ac27a0ecSDave Kleikamp 		while (--p >= first) {
3835ac27a0ecSDave Kleikamp 			nr = le32_to_cpu(*p);
3836ac27a0ecSDave Kleikamp 			if (!nr)
3837ac27a0ecSDave Kleikamp 				continue;		/* A hole */
3838ac27a0ecSDave Kleikamp 
3839ac27a0ecSDave Kleikamp 			/* Go read the buffer for the next level down */
3840ac27a0ecSDave Kleikamp 			bh = sb_bread(inode->i_sb, nr);
3841ac27a0ecSDave Kleikamp 
3842ac27a0ecSDave Kleikamp 			/*
3843ac27a0ecSDave Kleikamp 			 * A read failure? Report error and clear slot
3844ac27a0ecSDave Kleikamp 			 * (should be rare).
3845ac27a0ecSDave Kleikamp 			 */
3846ac27a0ecSDave Kleikamp 			if (!bh) {
3847617ba13bSMingming Cao 				ext4_error(inode->i_sb, "ext4_free_branches",
38482ae02107SMingming Cao 					   "Read failure, inode=%lu, block=%llu",
3849ac27a0ecSDave Kleikamp 					   inode->i_ino, nr);
3850ac27a0ecSDave Kleikamp 				continue;
3851ac27a0ecSDave Kleikamp 			}
3852ac27a0ecSDave Kleikamp 
3853ac27a0ecSDave Kleikamp 			/* This zaps the entire block.  Bottom up. */
3854ac27a0ecSDave Kleikamp 			BUFFER_TRACE(bh, "free child branches");
3855617ba13bSMingming Cao 			ext4_free_branches(handle, inode, bh,
3856ac27a0ecSDave Kleikamp 					(__le32 *) bh->b_data,
3857ac27a0ecSDave Kleikamp 					(__le32 *) bh->b_data + addr_per_block,
3858ac27a0ecSDave Kleikamp 					depth);
3859ac27a0ecSDave Kleikamp 
3860ac27a0ecSDave Kleikamp 			/*
3861ac27a0ecSDave Kleikamp 			 * We've probably journalled the indirect block several
3862ac27a0ecSDave Kleikamp 			 * times during the truncate.  But it's no longer
3863ac27a0ecSDave Kleikamp 			 * needed and we now drop it from the transaction via
3864dab291afSMingming Cao 			 * jbd2_journal_revoke().
3865ac27a0ecSDave Kleikamp 			 *
3866ac27a0ecSDave Kleikamp 			 * That's easy if it's exclusively part of this
3867ac27a0ecSDave Kleikamp 			 * transaction.  But if it's part of the committing
3868dab291afSMingming Cao 			 * transaction then jbd2_journal_forget() will simply
3869ac27a0ecSDave Kleikamp 			 * brelse() it.  That means that if the underlying
3870617ba13bSMingming Cao 			 * block is reallocated in ext4_get_block(),
3871ac27a0ecSDave Kleikamp 			 * unmap_underlying_metadata() will find this block
3872ac27a0ecSDave Kleikamp 			 * and will try to get rid of it.  damn, damn.
3873ac27a0ecSDave Kleikamp 			 *
3874ac27a0ecSDave Kleikamp 			 * If this block has already been committed to the
3875ac27a0ecSDave Kleikamp 			 * journal, a revoke record will be written.  And
3876ac27a0ecSDave Kleikamp 			 * revoke records must be emitted *before* clearing
3877ac27a0ecSDave Kleikamp 			 * this block's bit in the bitmaps.
3878ac27a0ecSDave Kleikamp 			 */
3879617ba13bSMingming Cao 			ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
3880ac27a0ecSDave Kleikamp 
3881ac27a0ecSDave Kleikamp 			/*
3882ac27a0ecSDave Kleikamp 			 * Everything below this this pointer has been
3883ac27a0ecSDave Kleikamp 			 * released.  Now let this top-of-subtree go.
3884ac27a0ecSDave Kleikamp 			 *
3885ac27a0ecSDave Kleikamp 			 * We want the freeing of this indirect block to be
3886ac27a0ecSDave Kleikamp 			 * atomic in the journal with the updating of the
3887ac27a0ecSDave Kleikamp 			 * bitmap block which owns it.  So make some room in
3888ac27a0ecSDave Kleikamp 			 * the journal.
3889ac27a0ecSDave Kleikamp 			 *
3890ac27a0ecSDave Kleikamp 			 * We zero the parent pointer *after* freeing its
3891ac27a0ecSDave Kleikamp 			 * pointee in the bitmaps, so if extend_transaction()
3892ac27a0ecSDave Kleikamp 			 * for some reason fails to put the bitmap changes and
3893ac27a0ecSDave Kleikamp 			 * the release into the same transaction, recovery
3894ac27a0ecSDave Kleikamp 			 * will merely complain about releasing a free block,
3895ac27a0ecSDave Kleikamp 			 * rather than leaking blocks.
3896ac27a0ecSDave Kleikamp 			 */
38970390131bSFrank Mayhar 			if (ext4_handle_is_aborted(handle))
3898ac27a0ecSDave Kleikamp 				return;
3899ac27a0ecSDave Kleikamp 			if (try_to_extend_transaction(handle, inode)) {
3900617ba13bSMingming Cao 				ext4_mark_inode_dirty(handle, inode);
3901617ba13bSMingming Cao 				ext4_journal_test_restart(handle, inode);
3902ac27a0ecSDave Kleikamp 			}
3903ac27a0ecSDave Kleikamp 
3904c9de560dSAlex Tomas 			ext4_free_blocks(handle, inode, nr, 1, 1);
3905ac27a0ecSDave Kleikamp 
3906ac27a0ecSDave Kleikamp 			if (parent_bh) {
3907ac27a0ecSDave Kleikamp 				/*
3908ac27a0ecSDave Kleikamp 				 * The block which we have just freed is
3909ac27a0ecSDave Kleikamp 				 * pointed to by an indirect block: journal it
3910ac27a0ecSDave Kleikamp 				 */
3911ac27a0ecSDave Kleikamp 				BUFFER_TRACE(parent_bh, "get_write_access");
3912617ba13bSMingming Cao 				if (!ext4_journal_get_write_access(handle,
3913ac27a0ecSDave Kleikamp 								   parent_bh)){
3914ac27a0ecSDave Kleikamp 					*p = 0;
3915ac27a0ecSDave Kleikamp 					BUFFER_TRACE(parent_bh,
39160390131bSFrank Mayhar 					"call ext4_handle_dirty_metadata");
39170390131bSFrank Mayhar 					ext4_handle_dirty_metadata(handle,
39180390131bSFrank Mayhar 								   inode,
3919ac27a0ecSDave Kleikamp 								   parent_bh);
3920ac27a0ecSDave Kleikamp 				}
3921ac27a0ecSDave Kleikamp 			}
3922ac27a0ecSDave Kleikamp 		}
3923ac27a0ecSDave Kleikamp 	} else {
3924ac27a0ecSDave Kleikamp 		/* We have reached the bottom of the tree. */
3925ac27a0ecSDave Kleikamp 		BUFFER_TRACE(parent_bh, "free data blocks");
3926617ba13bSMingming Cao 		ext4_free_data(handle, inode, parent_bh, first, last);
3927ac27a0ecSDave Kleikamp 	}
3928ac27a0ecSDave Kleikamp }
3929ac27a0ecSDave Kleikamp 
393091ef4cafSDuane Griffin int ext4_can_truncate(struct inode *inode)
393191ef4cafSDuane Griffin {
393291ef4cafSDuane Griffin 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
393391ef4cafSDuane Griffin 		return 0;
393491ef4cafSDuane Griffin 	if (S_ISREG(inode->i_mode))
393591ef4cafSDuane Griffin 		return 1;
393691ef4cafSDuane Griffin 	if (S_ISDIR(inode->i_mode))
393791ef4cafSDuane Griffin 		return 1;
393891ef4cafSDuane Griffin 	if (S_ISLNK(inode->i_mode))
393991ef4cafSDuane Griffin 		return !ext4_inode_is_fast_symlink(inode);
394091ef4cafSDuane Griffin 	return 0;
394191ef4cafSDuane Griffin }
394291ef4cafSDuane Griffin 
3943ac27a0ecSDave Kleikamp /*
3944617ba13bSMingming Cao  * ext4_truncate()
3945ac27a0ecSDave Kleikamp  *
3946617ba13bSMingming Cao  * We block out ext4_get_block() block instantiations across the entire
3947617ba13bSMingming Cao  * transaction, and VFS/VM ensures that ext4_truncate() cannot run
3948ac27a0ecSDave Kleikamp  * simultaneously on behalf of the same inode.
3949ac27a0ecSDave Kleikamp  *
3950ac27a0ecSDave Kleikamp  * As we work through the truncate and commmit bits of it to the journal there
3951ac27a0ecSDave Kleikamp  * is one core, guiding principle: the file's tree must always be consistent on
3952ac27a0ecSDave Kleikamp  * disk.  We must be able to restart the truncate after a crash.
3953ac27a0ecSDave Kleikamp  *
3954ac27a0ecSDave Kleikamp  * The file's tree may be transiently inconsistent in memory (although it
3955ac27a0ecSDave Kleikamp  * probably isn't), but whenever we close off and commit a journal transaction,
3956ac27a0ecSDave Kleikamp  * the contents of (the filesystem + the journal) must be consistent and
3957ac27a0ecSDave Kleikamp  * restartable.  It's pretty simple, really: bottom up, right to left (although
3958ac27a0ecSDave Kleikamp  * left-to-right works OK too).
3959ac27a0ecSDave Kleikamp  *
3960ac27a0ecSDave Kleikamp  * Note that at recovery time, journal replay occurs *before* the restart of
3961ac27a0ecSDave Kleikamp  * truncate against the orphan inode list.
3962ac27a0ecSDave Kleikamp  *
3963ac27a0ecSDave Kleikamp  * The committed inode has the new, desired i_size (which is the same as
3964617ba13bSMingming Cao  * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
3965ac27a0ecSDave Kleikamp  * that this inode's truncate did not complete and it will again call
3966617ba13bSMingming Cao  * ext4_truncate() to have another go.  So there will be instantiated blocks
3967617ba13bSMingming Cao  * to the right of the truncation point in a crashed ext4 filesystem.  But
3968ac27a0ecSDave Kleikamp  * that's fine - as long as they are linked from the inode, the post-crash
3969617ba13bSMingming Cao  * ext4_truncate() run will find them and release them.
3970ac27a0ecSDave Kleikamp  */
3971617ba13bSMingming Cao void ext4_truncate(struct inode *inode)
3972ac27a0ecSDave Kleikamp {
3973ac27a0ecSDave Kleikamp 	handle_t *handle;
3974617ba13bSMingming Cao 	struct ext4_inode_info *ei = EXT4_I(inode);
3975ac27a0ecSDave Kleikamp 	__le32 *i_data = ei->i_data;
3976617ba13bSMingming Cao 	int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
3977ac27a0ecSDave Kleikamp 	struct address_space *mapping = inode->i_mapping;
3978725d26d3SAneesh Kumar K.V 	ext4_lblk_t offsets[4];
3979ac27a0ecSDave Kleikamp 	Indirect chain[4];
3980ac27a0ecSDave Kleikamp 	Indirect *partial;
3981ac27a0ecSDave Kleikamp 	__le32 nr = 0;
3982ac27a0ecSDave Kleikamp 	int n;
3983725d26d3SAneesh Kumar K.V 	ext4_lblk_t last_block;
3984ac27a0ecSDave Kleikamp 	unsigned blocksize = inode->i_sb->s_blocksize;
3985ac27a0ecSDave Kleikamp 
398691ef4cafSDuane Griffin 	if (!ext4_can_truncate(inode))
3987ac27a0ecSDave Kleikamp 		return;
3988ac27a0ecSDave Kleikamp 
3989afd4672dSTheodore Ts'o 	if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
39907d8f9f7dSTheodore Ts'o 		ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE;
39917d8f9f7dSTheodore Ts'o 
39921d03ec98SAneesh Kumar K.V 	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
3993cf108bcaSJan Kara 		ext4_ext_truncate(inode);
39941d03ec98SAneesh Kumar K.V 		return;
39951d03ec98SAneesh Kumar K.V 	}
3996a86c6181SAlex Tomas 
3997ac27a0ecSDave Kleikamp 	handle = start_transaction(inode);
3998cf108bcaSJan Kara 	if (IS_ERR(handle))
3999ac27a0ecSDave Kleikamp 		return;		/* AKPM: return what? */
4000ac27a0ecSDave Kleikamp 
4001ac27a0ecSDave Kleikamp 	last_block = (inode->i_size + blocksize-1)
4002617ba13bSMingming Cao 					>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
4003ac27a0ecSDave Kleikamp 
4004cf108bcaSJan Kara 	if (inode->i_size & (blocksize - 1))
4005cf108bcaSJan Kara 		if (ext4_block_truncate_page(handle, mapping, inode->i_size))
4006cf108bcaSJan Kara 			goto out_stop;
4007ac27a0ecSDave Kleikamp 
4008617ba13bSMingming Cao 	n = ext4_block_to_path(inode, last_block, offsets, NULL);
4009ac27a0ecSDave Kleikamp 	if (n == 0)
4010ac27a0ecSDave Kleikamp 		goto out_stop;	/* error */
4011ac27a0ecSDave Kleikamp 
4012ac27a0ecSDave Kleikamp 	/*
4013ac27a0ecSDave Kleikamp 	 * OK.  This truncate is going to happen.  We add the inode to the
4014ac27a0ecSDave Kleikamp 	 * orphan list, so that if this truncate spans multiple transactions,
4015ac27a0ecSDave Kleikamp 	 * and we crash, we will resume the truncate when the filesystem
4016ac27a0ecSDave Kleikamp 	 * recovers.  It also marks the inode dirty, to catch the new size.
4017ac27a0ecSDave Kleikamp 	 *
4018ac27a0ecSDave Kleikamp 	 * Implication: the file must always be in a sane, consistent
4019ac27a0ecSDave Kleikamp 	 * truncatable state while each transaction commits.
4020ac27a0ecSDave Kleikamp 	 */
4021617ba13bSMingming Cao 	if (ext4_orphan_add(handle, inode))
4022ac27a0ecSDave Kleikamp 		goto out_stop;
4023ac27a0ecSDave Kleikamp 
4024ac27a0ecSDave Kleikamp 	/*
4025632eaeabSMingming Cao 	 * From here we block out all ext4_get_block() callers who want to
4026632eaeabSMingming Cao 	 * modify the block allocation tree.
4027632eaeabSMingming Cao 	 */
4028632eaeabSMingming Cao 	down_write(&ei->i_data_sem);
4029b4df2030STheodore Ts'o 
4030c2ea3fdeSTheodore Ts'o 	ext4_discard_preallocations(inode);
4031b4df2030STheodore Ts'o 
4032632eaeabSMingming Cao 	/*
4033ac27a0ecSDave Kleikamp 	 * The orphan list entry will now protect us from any crash which
4034ac27a0ecSDave Kleikamp 	 * occurs before the truncate completes, so it is now safe to propagate
4035ac27a0ecSDave Kleikamp 	 * the new, shorter inode size (held for now in i_size) into the
4036ac27a0ecSDave Kleikamp 	 * on-disk inode. We do this via i_disksize, which is the value which
4037617ba13bSMingming Cao 	 * ext4 *really* writes onto the disk inode.
4038ac27a0ecSDave Kleikamp 	 */
4039ac27a0ecSDave Kleikamp 	ei->i_disksize = inode->i_size;
4040ac27a0ecSDave Kleikamp 
4041ac27a0ecSDave Kleikamp 	if (n == 1) {		/* direct blocks */
4042617ba13bSMingming Cao 		ext4_free_data(handle, inode, NULL, i_data+offsets[0],
4043617ba13bSMingming Cao 			       i_data + EXT4_NDIR_BLOCKS);
4044ac27a0ecSDave Kleikamp 		goto do_indirects;
4045ac27a0ecSDave Kleikamp 	}
4046ac27a0ecSDave Kleikamp 
4047617ba13bSMingming Cao 	partial = ext4_find_shared(inode, n, offsets, chain, &nr);
4048ac27a0ecSDave Kleikamp 	/* Kill the top of shared branch (not detached) */
4049ac27a0ecSDave Kleikamp 	if (nr) {
4050ac27a0ecSDave Kleikamp 		if (partial == chain) {
4051ac27a0ecSDave Kleikamp 			/* Shared branch grows from the inode */
4052617ba13bSMingming Cao 			ext4_free_branches(handle, inode, NULL,
4053ac27a0ecSDave Kleikamp 					   &nr, &nr+1, (chain+n-1) - partial);
4054ac27a0ecSDave Kleikamp 			*partial->p = 0;
4055ac27a0ecSDave Kleikamp 			/*
4056ac27a0ecSDave Kleikamp 			 * We mark the inode dirty prior to restart,
4057ac27a0ecSDave Kleikamp 			 * and prior to stop.  No need for it here.
4058ac27a0ecSDave Kleikamp 			 */
4059ac27a0ecSDave Kleikamp 		} else {
4060ac27a0ecSDave Kleikamp 			/* Shared branch grows from an indirect block */
4061ac27a0ecSDave Kleikamp 			BUFFER_TRACE(partial->bh, "get_write_access");
4062617ba13bSMingming Cao 			ext4_free_branches(handle, inode, partial->bh,
4063ac27a0ecSDave Kleikamp 					partial->p,
4064ac27a0ecSDave Kleikamp 					partial->p+1, (chain+n-1) - partial);
4065ac27a0ecSDave Kleikamp 		}
4066ac27a0ecSDave Kleikamp 	}
4067ac27a0ecSDave Kleikamp 	/* Clear the ends of indirect blocks on the shared branch */
4068ac27a0ecSDave Kleikamp 	while (partial > chain) {
4069617ba13bSMingming Cao 		ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
4070ac27a0ecSDave Kleikamp 				   (__le32*)partial->bh->b_data+addr_per_block,
4071ac27a0ecSDave Kleikamp 				   (chain+n-1) - partial);
4072ac27a0ecSDave Kleikamp 		BUFFER_TRACE(partial->bh, "call brelse");
4073ac27a0ecSDave Kleikamp 		brelse (partial->bh);
4074ac27a0ecSDave Kleikamp 		partial--;
4075ac27a0ecSDave Kleikamp 	}
4076ac27a0ecSDave Kleikamp do_indirects:
4077ac27a0ecSDave Kleikamp 	/* Kill the remaining (whole) subtrees */
4078ac27a0ecSDave Kleikamp 	switch (offsets[0]) {
4079ac27a0ecSDave Kleikamp 	default:
4080617ba13bSMingming Cao 		nr = i_data[EXT4_IND_BLOCK];
4081ac27a0ecSDave Kleikamp 		if (nr) {
4082617ba13bSMingming Cao 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
4083617ba13bSMingming Cao 			i_data[EXT4_IND_BLOCK] = 0;
4084ac27a0ecSDave Kleikamp 		}
4085617ba13bSMingming Cao 	case EXT4_IND_BLOCK:
4086617ba13bSMingming Cao 		nr = i_data[EXT4_DIND_BLOCK];
4087ac27a0ecSDave Kleikamp 		if (nr) {
4088617ba13bSMingming Cao 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
4089617ba13bSMingming Cao 			i_data[EXT4_DIND_BLOCK] = 0;
4090ac27a0ecSDave Kleikamp 		}
4091617ba13bSMingming Cao 	case EXT4_DIND_BLOCK:
4092617ba13bSMingming Cao 		nr = i_data[EXT4_TIND_BLOCK];
4093ac27a0ecSDave Kleikamp 		if (nr) {
4094617ba13bSMingming Cao 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
4095617ba13bSMingming Cao 			i_data[EXT4_TIND_BLOCK] = 0;
4096ac27a0ecSDave Kleikamp 		}
4097617ba13bSMingming Cao 	case EXT4_TIND_BLOCK:
4098ac27a0ecSDave Kleikamp 		;
4099ac27a0ecSDave Kleikamp 	}
4100ac27a0ecSDave Kleikamp 
41010e855ac8SAneesh Kumar K.V 	up_write(&ei->i_data_sem);
4102ef7f3835SKalpak Shah 	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4103617ba13bSMingming Cao 	ext4_mark_inode_dirty(handle, inode);
4104ac27a0ecSDave Kleikamp 
4105ac27a0ecSDave Kleikamp 	/*
4106ac27a0ecSDave Kleikamp 	 * In a multi-transaction truncate, we only make the final transaction
4107ac27a0ecSDave Kleikamp 	 * synchronous
4108ac27a0ecSDave Kleikamp 	 */
4109ac27a0ecSDave Kleikamp 	if (IS_SYNC(inode))
41100390131bSFrank Mayhar 		ext4_handle_sync(handle);
4111ac27a0ecSDave Kleikamp out_stop:
4112ac27a0ecSDave Kleikamp 	/*
4113ac27a0ecSDave Kleikamp 	 * If this was a simple ftruncate(), and the file will remain alive
4114ac27a0ecSDave Kleikamp 	 * then we need to clear up the orphan record which we created above.
4115ac27a0ecSDave Kleikamp 	 * However, if this was a real unlink then we were called by
4116617ba13bSMingming Cao 	 * ext4_delete_inode(), and we allow that function to clean up the
4117ac27a0ecSDave Kleikamp 	 * orphan info for us.
4118ac27a0ecSDave Kleikamp 	 */
4119ac27a0ecSDave Kleikamp 	if (inode->i_nlink)
4120617ba13bSMingming Cao 		ext4_orphan_del(handle, inode);
4121ac27a0ecSDave Kleikamp 
4122617ba13bSMingming Cao 	ext4_journal_stop(handle);
4123ac27a0ecSDave Kleikamp }
4124ac27a0ecSDave Kleikamp 
4125ac27a0ecSDave Kleikamp /*
4126617ba13bSMingming Cao  * ext4_get_inode_loc returns with an extra refcount against the inode's
4127ac27a0ecSDave Kleikamp  * underlying buffer_head on success. If 'in_mem' is true, we have all
4128ac27a0ecSDave Kleikamp  * data in memory that is needed to recreate the on-disk version of this
4129ac27a0ecSDave Kleikamp  * inode.
4130ac27a0ecSDave Kleikamp  */
4131617ba13bSMingming Cao static int __ext4_get_inode_loc(struct inode *inode,
4132617ba13bSMingming Cao 				struct ext4_iloc *iloc, int in_mem)
4133ac27a0ecSDave Kleikamp {
4134240799cdSTheodore Ts'o 	struct ext4_group_desc	*gdp;
4135ac27a0ecSDave Kleikamp 	struct buffer_head	*bh;
4136240799cdSTheodore Ts'o 	struct super_block	*sb = inode->i_sb;
4137240799cdSTheodore Ts'o 	ext4_fsblk_t		block;
4138240799cdSTheodore Ts'o 	int			inodes_per_block, inode_offset;
4139ac27a0ecSDave Kleikamp 
41403a06d778SAneesh Kumar K.V 	iloc->bh = NULL;
4141240799cdSTheodore Ts'o 	if (!ext4_valid_inum(sb, inode->i_ino))
4142ac27a0ecSDave Kleikamp 		return -EIO;
4143ac27a0ecSDave Kleikamp 
4144240799cdSTheodore Ts'o 	iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
4145240799cdSTheodore Ts'o 	gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
4146240799cdSTheodore Ts'o 	if (!gdp)
4147240799cdSTheodore Ts'o 		return -EIO;
4148240799cdSTheodore Ts'o 
4149240799cdSTheodore Ts'o 	/*
4150240799cdSTheodore Ts'o 	 * Figure out the offset within the block group inode table
4151240799cdSTheodore Ts'o 	 */
4152240799cdSTheodore Ts'o 	inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb));
4153240799cdSTheodore Ts'o 	inode_offset = ((inode->i_ino - 1) %
4154240799cdSTheodore Ts'o 			EXT4_INODES_PER_GROUP(sb));
4155240799cdSTheodore Ts'o 	block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
4156240799cdSTheodore Ts'o 	iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
4157240799cdSTheodore Ts'o 
4158240799cdSTheodore Ts'o 	bh = sb_getblk(sb, block);
4159ac27a0ecSDave Kleikamp 	if (!bh) {
4160240799cdSTheodore Ts'o 		ext4_error(sb, "ext4_get_inode_loc", "unable to read "
4161240799cdSTheodore Ts'o 			   "inode block - inode=%lu, block=%llu",
4162ac27a0ecSDave Kleikamp 			   inode->i_ino, block);
4163ac27a0ecSDave Kleikamp 		return -EIO;
4164ac27a0ecSDave Kleikamp 	}
4165ac27a0ecSDave Kleikamp 	if (!buffer_uptodate(bh)) {
4166ac27a0ecSDave Kleikamp 		lock_buffer(bh);
41679c83a923SHidehiro Kawai 
41689c83a923SHidehiro Kawai 		/*
41699c83a923SHidehiro Kawai 		 * If the buffer has the write error flag, we have failed
41709c83a923SHidehiro Kawai 		 * to write out another inode in the same block.  In this
41719c83a923SHidehiro Kawai 		 * case, we don't have to read the block because we may
41729c83a923SHidehiro Kawai 		 * read the old inode data successfully.
41739c83a923SHidehiro Kawai 		 */
41749c83a923SHidehiro Kawai 		if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
41759c83a923SHidehiro Kawai 			set_buffer_uptodate(bh);
41769c83a923SHidehiro Kawai 
4177ac27a0ecSDave Kleikamp 		if (buffer_uptodate(bh)) {
4178ac27a0ecSDave Kleikamp 			/* someone brought it uptodate while we waited */
4179ac27a0ecSDave Kleikamp 			unlock_buffer(bh);
4180ac27a0ecSDave Kleikamp 			goto has_buffer;
4181ac27a0ecSDave Kleikamp 		}
4182ac27a0ecSDave Kleikamp 
4183ac27a0ecSDave Kleikamp 		/*
4184ac27a0ecSDave Kleikamp 		 * If we have all information of the inode in memory and this
4185ac27a0ecSDave Kleikamp 		 * is the only valid inode in the block, we need not read the
4186ac27a0ecSDave Kleikamp 		 * block.
4187ac27a0ecSDave Kleikamp 		 */
4188ac27a0ecSDave Kleikamp 		if (in_mem) {
4189ac27a0ecSDave Kleikamp 			struct buffer_head *bitmap_bh;
4190240799cdSTheodore Ts'o 			int i, start;
4191ac27a0ecSDave Kleikamp 
4192240799cdSTheodore Ts'o 			start = inode_offset & ~(inodes_per_block - 1);
4193ac27a0ecSDave Kleikamp 
4194ac27a0ecSDave Kleikamp 			/* Is the inode bitmap in cache? */
4195240799cdSTheodore Ts'o 			bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
4196ac27a0ecSDave Kleikamp 			if (!bitmap_bh)
4197ac27a0ecSDave Kleikamp 				goto make_io;
4198ac27a0ecSDave Kleikamp 
4199ac27a0ecSDave Kleikamp 			/*
4200ac27a0ecSDave Kleikamp 			 * If the inode bitmap isn't in cache then the
4201ac27a0ecSDave Kleikamp 			 * optimisation may end up performing two reads instead
4202ac27a0ecSDave Kleikamp 			 * of one, so skip it.
4203ac27a0ecSDave Kleikamp 			 */
4204ac27a0ecSDave Kleikamp 			if (!buffer_uptodate(bitmap_bh)) {
4205ac27a0ecSDave Kleikamp 				brelse(bitmap_bh);
4206ac27a0ecSDave Kleikamp 				goto make_io;
4207ac27a0ecSDave Kleikamp 			}
4208240799cdSTheodore Ts'o 			for (i = start; i < start + inodes_per_block; i++) {
4209ac27a0ecSDave Kleikamp 				if (i == inode_offset)
4210ac27a0ecSDave Kleikamp 					continue;
4211617ba13bSMingming Cao 				if (ext4_test_bit(i, bitmap_bh->b_data))
4212ac27a0ecSDave Kleikamp 					break;
4213ac27a0ecSDave Kleikamp 			}
4214ac27a0ecSDave Kleikamp 			brelse(bitmap_bh);
4215240799cdSTheodore Ts'o 			if (i == start + inodes_per_block) {
4216ac27a0ecSDave Kleikamp 				/* all other inodes are free, so skip I/O */
4217ac27a0ecSDave Kleikamp 				memset(bh->b_data, 0, bh->b_size);
4218ac27a0ecSDave Kleikamp 				set_buffer_uptodate(bh);
4219ac27a0ecSDave Kleikamp 				unlock_buffer(bh);
4220ac27a0ecSDave Kleikamp 				goto has_buffer;
4221ac27a0ecSDave Kleikamp 			}
4222ac27a0ecSDave Kleikamp 		}
4223ac27a0ecSDave Kleikamp 
4224ac27a0ecSDave Kleikamp make_io:
4225ac27a0ecSDave Kleikamp 		/*
4226240799cdSTheodore Ts'o 		 * If we need to do any I/O, try to pre-readahead extra
4227240799cdSTheodore Ts'o 		 * blocks from the inode table.
4228240799cdSTheodore Ts'o 		 */
4229240799cdSTheodore Ts'o 		if (EXT4_SB(sb)->s_inode_readahead_blks) {
4230240799cdSTheodore Ts'o 			ext4_fsblk_t b, end, table;
4231240799cdSTheodore Ts'o 			unsigned num;
4232240799cdSTheodore Ts'o 
4233240799cdSTheodore Ts'o 			table = ext4_inode_table(sb, gdp);
4234b713a5ecSTheodore Ts'o 			/* s_inode_readahead_blks is always a power of 2 */
4235240799cdSTheodore Ts'o 			b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
4236240799cdSTheodore Ts'o 			if (table > b)
4237240799cdSTheodore Ts'o 				b = table;
4238240799cdSTheodore Ts'o 			end = b + EXT4_SB(sb)->s_inode_readahead_blks;
4239240799cdSTheodore Ts'o 			num = EXT4_INODES_PER_GROUP(sb);
4240240799cdSTheodore Ts'o 			if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
4241240799cdSTheodore Ts'o 				       EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
4242560671a0SAneesh Kumar K.V 				num -= ext4_itable_unused_count(sb, gdp);
4243240799cdSTheodore Ts'o 			table += num / inodes_per_block;
4244240799cdSTheodore Ts'o 			if (end > table)
4245240799cdSTheodore Ts'o 				end = table;
4246240799cdSTheodore Ts'o 			while (b <= end)
4247240799cdSTheodore Ts'o 				sb_breadahead(sb, b++);
4248240799cdSTheodore Ts'o 		}
4249240799cdSTheodore Ts'o 
4250240799cdSTheodore Ts'o 		/*
4251ac27a0ecSDave Kleikamp 		 * There are other valid inodes in the buffer, this inode
4252ac27a0ecSDave Kleikamp 		 * has in-inode xattrs, or we don't have this inode in memory.
4253ac27a0ecSDave Kleikamp 		 * Read the block from disk.
4254ac27a0ecSDave Kleikamp 		 */
4255ac27a0ecSDave Kleikamp 		get_bh(bh);
4256ac27a0ecSDave Kleikamp 		bh->b_end_io = end_buffer_read_sync;
4257ac27a0ecSDave Kleikamp 		submit_bh(READ_META, bh);
4258ac27a0ecSDave Kleikamp 		wait_on_buffer(bh);
4259ac27a0ecSDave Kleikamp 		if (!buffer_uptodate(bh)) {
4260240799cdSTheodore Ts'o 			ext4_error(sb, __func__,
4261240799cdSTheodore Ts'o 				   "unable to read inode block - inode=%lu, "
4262240799cdSTheodore Ts'o 				   "block=%llu", inode->i_ino, block);
4263ac27a0ecSDave Kleikamp 			brelse(bh);
4264ac27a0ecSDave Kleikamp 			return -EIO;
4265ac27a0ecSDave Kleikamp 		}
4266ac27a0ecSDave Kleikamp 	}
4267ac27a0ecSDave Kleikamp has_buffer:
4268ac27a0ecSDave Kleikamp 	iloc->bh = bh;
4269ac27a0ecSDave Kleikamp 	return 0;
4270ac27a0ecSDave Kleikamp }
4271ac27a0ecSDave Kleikamp 
4272617ba13bSMingming Cao int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4273ac27a0ecSDave Kleikamp {
4274ac27a0ecSDave Kleikamp 	/* We have all inode data except xattrs in memory here. */
4275617ba13bSMingming Cao 	return __ext4_get_inode_loc(inode, iloc,
4276617ba13bSMingming Cao 		!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
4277ac27a0ecSDave Kleikamp }
4278ac27a0ecSDave Kleikamp 
4279617ba13bSMingming Cao void ext4_set_inode_flags(struct inode *inode)
4280ac27a0ecSDave Kleikamp {
4281617ba13bSMingming Cao 	unsigned int flags = EXT4_I(inode)->i_flags;
4282ac27a0ecSDave Kleikamp 
4283ac27a0ecSDave Kleikamp 	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
4284617ba13bSMingming Cao 	if (flags & EXT4_SYNC_FL)
4285ac27a0ecSDave Kleikamp 		inode->i_flags |= S_SYNC;
4286617ba13bSMingming Cao 	if (flags & EXT4_APPEND_FL)
4287ac27a0ecSDave Kleikamp 		inode->i_flags |= S_APPEND;
4288617ba13bSMingming Cao 	if (flags & EXT4_IMMUTABLE_FL)
4289ac27a0ecSDave Kleikamp 		inode->i_flags |= S_IMMUTABLE;
4290617ba13bSMingming Cao 	if (flags & EXT4_NOATIME_FL)
4291ac27a0ecSDave Kleikamp 		inode->i_flags |= S_NOATIME;
4292617ba13bSMingming Cao 	if (flags & EXT4_DIRSYNC_FL)
4293ac27a0ecSDave Kleikamp 		inode->i_flags |= S_DIRSYNC;
4294ac27a0ecSDave Kleikamp }
4295ac27a0ecSDave Kleikamp 
4296ff9ddf7eSJan Kara /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
4297ff9ddf7eSJan Kara void ext4_get_inode_flags(struct ext4_inode_info *ei)
4298ff9ddf7eSJan Kara {
4299ff9ddf7eSJan Kara 	unsigned int flags = ei->vfs_inode.i_flags;
4300ff9ddf7eSJan Kara 
4301ff9ddf7eSJan Kara 	ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
4302ff9ddf7eSJan Kara 			EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
4303ff9ddf7eSJan Kara 	if (flags & S_SYNC)
4304ff9ddf7eSJan Kara 		ei->i_flags |= EXT4_SYNC_FL;
4305ff9ddf7eSJan Kara 	if (flags & S_APPEND)
4306ff9ddf7eSJan Kara 		ei->i_flags |= EXT4_APPEND_FL;
4307ff9ddf7eSJan Kara 	if (flags & S_IMMUTABLE)
4308ff9ddf7eSJan Kara 		ei->i_flags |= EXT4_IMMUTABLE_FL;
4309ff9ddf7eSJan Kara 	if (flags & S_NOATIME)
4310ff9ddf7eSJan Kara 		ei->i_flags |= EXT4_NOATIME_FL;
4311ff9ddf7eSJan Kara 	if (flags & S_DIRSYNC)
4312ff9ddf7eSJan Kara 		ei->i_flags |= EXT4_DIRSYNC_FL;
4313ff9ddf7eSJan Kara }
43140fc1b451SAneesh Kumar K.V static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
43150fc1b451SAneesh Kumar K.V 					struct ext4_inode_info *ei)
43160fc1b451SAneesh Kumar K.V {
43170fc1b451SAneesh Kumar K.V 	blkcnt_t i_blocks ;
43188180a562SAneesh Kumar K.V 	struct inode *inode = &(ei->vfs_inode);
43198180a562SAneesh Kumar K.V 	struct super_block *sb = inode->i_sb;
43200fc1b451SAneesh Kumar K.V 
43210fc1b451SAneesh Kumar K.V 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
43220fc1b451SAneesh Kumar K.V 				EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
43230fc1b451SAneesh Kumar K.V 		/* we are using combined 48 bit field */
43240fc1b451SAneesh Kumar K.V 		i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
43250fc1b451SAneesh Kumar K.V 					le32_to_cpu(raw_inode->i_blocks_lo);
43268180a562SAneesh Kumar K.V 		if (ei->i_flags & EXT4_HUGE_FILE_FL) {
43278180a562SAneesh Kumar K.V 			/* i_blocks represent file system block size */
43288180a562SAneesh Kumar K.V 			return i_blocks  << (inode->i_blkbits - 9);
43298180a562SAneesh Kumar K.V 		} else {
43300fc1b451SAneesh Kumar K.V 			return i_blocks;
43318180a562SAneesh Kumar K.V 		}
43320fc1b451SAneesh Kumar K.V 	} else {
43330fc1b451SAneesh Kumar K.V 		return le32_to_cpu(raw_inode->i_blocks_lo);
43340fc1b451SAneesh Kumar K.V 	}
43350fc1b451SAneesh Kumar K.V }
4336ff9ddf7eSJan Kara 
43371d1fe1eeSDavid Howells struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4338ac27a0ecSDave Kleikamp {
4339617ba13bSMingming Cao 	struct ext4_iloc iloc;
4340617ba13bSMingming Cao 	struct ext4_inode *raw_inode;
43411d1fe1eeSDavid Howells 	struct ext4_inode_info *ei;
4342ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
43431d1fe1eeSDavid Howells 	struct inode *inode;
43441d1fe1eeSDavid Howells 	long ret;
4345ac27a0ecSDave Kleikamp 	int block;
4346ac27a0ecSDave Kleikamp 
43471d1fe1eeSDavid Howells 	inode = iget_locked(sb, ino);
43481d1fe1eeSDavid Howells 	if (!inode)
43491d1fe1eeSDavid Howells 		return ERR_PTR(-ENOMEM);
43501d1fe1eeSDavid Howells 	if (!(inode->i_state & I_NEW))
43511d1fe1eeSDavid Howells 		return inode;
43521d1fe1eeSDavid Howells 
43531d1fe1eeSDavid Howells 	ei = EXT4_I(inode);
435403010a33STheodore Ts'o #ifdef CONFIG_EXT4_FS_POSIX_ACL
4355617ba13bSMingming Cao 	ei->i_acl = EXT4_ACL_NOT_CACHED;
4356617ba13bSMingming Cao 	ei->i_default_acl = EXT4_ACL_NOT_CACHED;
4357ac27a0ecSDave Kleikamp #endif
4358ac27a0ecSDave Kleikamp 
43591d1fe1eeSDavid Howells 	ret = __ext4_get_inode_loc(inode, &iloc, 0);
43601d1fe1eeSDavid Howells 	if (ret < 0)
4361ac27a0ecSDave Kleikamp 		goto bad_inode;
4362ac27a0ecSDave Kleikamp 	bh = iloc.bh;
4363617ba13bSMingming Cao 	raw_inode = ext4_raw_inode(&iloc);
4364ac27a0ecSDave Kleikamp 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4365ac27a0ecSDave Kleikamp 	inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4366ac27a0ecSDave Kleikamp 	inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4367ac27a0ecSDave Kleikamp 	if (!(test_opt(inode->i_sb, NO_UID32))) {
4368ac27a0ecSDave Kleikamp 		inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4369ac27a0ecSDave Kleikamp 		inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4370ac27a0ecSDave Kleikamp 	}
4371ac27a0ecSDave Kleikamp 	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
4372ac27a0ecSDave Kleikamp 
4373ac27a0ecSDave Kleikamp 	ei->i_state = 0;
4374ac27a0ecSDave Kleikamp 	ei->i_dir_start_lookup = 0;
4375ac27a0ecSDave Kleikamp 	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4376ac27a0ecSDave Kleikamp 	/* We now have enough fields to check if the inode was active or not.
4377ac27a0ecSDave Kleikamp 	 * This is needed because nfsd might try to access dead inodes
4378ac27a0ecSDave Kleikamp 	 * the test is that same one that e2fsck uses
4379ac27a0ecSDave Kleikamp 	 * NeilBrown 1999oct15
4380ac27a0ecSDave Kleikamp 	 */
4381ac27a0ecSDave Kleikamp 	if (inode->i_nlink == 0) {
4382ac27a0ecSDave Kleikamp 		if (inode->i_mode == 0 ||
4383617ba13bSMingming Cao 		    !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
4384ac27a0ecSDave Kleikamp 			/* this inode is deleted */
4385ac27a0ecSDave Kleikamp 			brelse(bh);
43861d1fe1eeSDavid Howells 			ret = -ESTALE;
4387ac27a0ecSDave Kleikamp 			goto bad_inode;
4388ac27a0ecSDave Kleikamp 		}
4389ac27a0ecSDave Kleikamp 		/* The only unlinked inodes we let through here have
4390ac27a0ecSDave Kleikamp 		 * valid i_mode and are being read by the orphan
4391ac27a0ecSDave Kleikamp 		 * recovery code: that's fine, we're about to complete
4392ac27a0ecSDave Kleikamp 		 * the process of deleting those. */
4393ac27a0ecSDave Kleikamp 	}
4394ac27a0ecSDave Kleikamp 	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
43950fc1b451SAneesh Kumar K.V 	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
43967973c0c1SAneesh Kumar K.V 	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4397a9e81742STheodore Ts'o 	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
4398a1ddeb7eSBadari Pulavarty 		ei->i_file_acl |=
4399a1ddeb7eSBadari Pulavarty 			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4400a48380f7SAneesh Kumar K.V 	inode->i_size = ext4_isize(raw_inode);
4401ac27a0ecSDave Kleikamp 	ei->i_disksize = inode->i_size;
4402ac27a0ecSDave Kleikamp 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4403ac27a0ecSDave Kleikamp 	ei->i_block_group = iloc.block_group;
4404a4912123STheodore Ts'o 	ei->i_last_alloc_group = ~0;
4405ac27a0ecSDave Kleikamp 	/*
4406ac27a0ecSDave Kleikamp 	 * NOTE! The in-memory inode i_data array is in little-endian order
4407ac27a0ecSDave Kleikamp 	 * even on big-endian machines: we do NOT byteswap the block numbers!
4408ac27a0ecSDave Kleikamp 	 */
4409617ba13bSMingming Cao 	for (block = 0; block < EXT4_N_BLOCKS; block++)
4410ac27a0ecSDave Kleikamp 		ei->i_data[block] = raw_inode->i_block[block];
4411ac27a0ecSDave Kleikamp 	INIT_LIST_HEAD(&ei->i_orphan);
4412ac27a0ecSDave Kleikamp 
44130040d987SEric Sandeen 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4414ac27a0ecSDave Kleikamp 		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4415617ba13bSMingming Cao 		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4416e5d2861fSKirill Korotaev 		    EXT4_INODE_SIZE(inode->i_sb)) {
4417e5d2861fSKirill Korotaev 			brelse(bh);
44181d1fe1eeSDavid Howells 			ret = -EIO;
4419ac27a0ecSDave Kleikamp 			goto bad_inode;
4420e5d2861fSKirill Korotaev 		}
4421ac27a0ecSDave Kleikamp 		if (ei->i_extra_isize == 0) {
4422ac27a0ecSDave Kleikamp 			/* The extra space is currently unused. Use it. */
4423617ba13bSMingming Cao 			ei->i_extra_isize = sizeof(struct ext4_inode) -
4424617ba13bSMingming Cao 					    EXT4_GOOD_OLD_INODE_SIZE;
4425ac27a0ecSDave Kleikamp 		} else {
4426ac27a0ecSDave Kleikamp 			__le32 *magic = (void *)raw_inode +
4427617ba13bSMingming Cao 					EXT4_GOOD_OLD_INODE_SIZE +
4428ac27a0ecSDave Kleikamp 					ei->i_extra_isize;
4429617ba13bSMingming Cao 			if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
4430617ba13bSMingming Cao 				 ei->i_state |= EXT4_STATE_XATTR;
4431ac27a0ecSDave Kleikamp 		}
4432ac27a0ecSDave Kleikamp 	} else
4433ac27a0ecSDave Kleikamp 		ei->i_extra_isize = 0;
4434ac27a0ecSDave Kleikamp 
4435ef7f3835SKalpak Shah 	EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
4436ef7f3835SKalpak Shah 	EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
4437ef7f3835SKalpak Shah 	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
4438ef7f3835SKalpak Shah 	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4439ef7f3835SKalpak Shah 
444025ec56b5SJean Noel Cordenner 	inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
444125ec56b5SJean Noel Cordenner 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
444225ec56b5SJean Noel Cordenner 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
444325ec56b5SJean Noel Cordenner 			inode->i_version |=
444425ec56b5SJean Noel Cordenner 			(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
444525ec56b5SJean Noel Cordenner 	}
444625ec56b5SJean Noel Cordenner 
4447c4b5a614STheodore Ts'o 	ret = 0;
4448485c26ecSTheodore Ts'o 	if (ei->i_file_acl &&
4449485c26ecSTheodore Ts'o 	    ((ei->i_file_acl <
4450485c26ecSTheodore Ts'o 	      (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) +
4451485c26ecSTheodore Ts'o 	       EXT4_SB(sb)->s_gdb_count)) ||
4452485c26ecSTheodore Ts'o 	     (ei->i_file_acl >= ext4_blocks_count(EXT4_SB(sb)->s_es)))) {
4453485c26ecSTheodore Ts'o 		ext4_error(sb, __func__,
4454485c26ecSTheodore Ts'o 			   "bad extended attribute block %llu in inode #%lu",
4455485c26ecSTheodore Ts'o 			   ei->i_file_acl, inode->i_ino);
4456485c26ecSTheodore Ts'o 		ret = -EIO;
4457485c26ecSTheodore Ts'o 		goto bad_inode;
4458485c26ecSTheodore Ts'o 	} else if (ei->i_flags & EXT4_EXTENTS_FL) {
4459c4b5a614STheodore Ts'o 		if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4460c4b5a614STheodore Ts'o 		    (S_ISLNK(inode->i_mode) &&
4461c4b5a614STheodore Ts'o 		     !ext4_inode_is_fast_symlink(inode)))
44627a262f7cSAneesh Kumar K.V 			/* Validate extent which is part of inode */
44637a262f7cSAneesh Kumar K.V 			ret = ext4_ext_check_inode(inode);
4464fe2c8191SThiemo Nagel  	} else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4465fe2c8191SThiemo Nagel 		   (S_ISLNK(inode->i_mode) &&
4466fe2c8191SThiemo Nagel 		    !ext4_inode_is_fast_symlink(inode))) {
4467fe2c8191SThiemo Nagel 	 	/* Validate block references which are part of inode */
4468fe2c8191SThiemo Nagel 		ret = ext4_check_inode_blockref(inode);
4469fe2c8191SThiemo Nagel 	}
44707a262f7cSAneesh Kumar K.V 	if (ret) {
44717a262f7cSAneesh Kumar K.V  		brelse(bh);
44727a262f7cSAneesh Kumar K.V  		goto bad_inode;
44737a262f7cSAneesh Kumar K.V 	}
44747a262f7cSAneesh Kumar K.V 
4475ac27a0ecSDave Kleikamp 	if (S_ISREG(inode->i_mode)) {
4476617ba13bSMingming Cao 		inode->i_op = &ext4_file_inode_operations;
4477617ba13bSMingming Cao 		inode->i_fop = &ext4_file_operations;
4478617ba13bSMingming Cao 		ext4_set_aops(inode);
4479ac27a0ecSDave Kleikamp 	} else if (S_ISDIR(inode->i_mode)) {
4480617ba13bSMingming Cao 		inode->i_op = &ext4_dir_inode_operations;
4481617ba13bSMingming Cao 		inode->i_fop = &ext4_dir_operations;
4482ac27a0ecSDave Kleikamp 	} else if (S_ISLNK(inode->i_mode)) {
4483e83c1397SDuane Griffin 		if (ext4_inode_is_fast_symlink(inode)) {
4484617ba13bSMingming Cao 			inode->i_op = &ext4_fast_symlink_inode_operations;
4485e83c1397SDuane Griffin 			nd_terminate_link(ei->i_data, inode->i_size,
4486e83c1397SDuane Griffin 				sizeof(ei->i_data) - 1);
4487e83c1397SDuane Griffin 		} else {
4488617ba13bSMingming Cao 			inode->i_op = &ext4_symlink_inode_operations;
4489617ba13bSMingming Cao 			ext4_set_aops(inode);
4490ac27a0ecSDave Kleikamp 		}
4491563bdd61STheodore Ts'o 	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
4492563bdd61STheodore Ts'o 	      S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
4493617ba13bSMingming Cao 		inode->i_op = &ext4_special_inode_operations;
4494ac27a0ecSDave Kleikamp 		if (raw_inode->i_block[0])
4495ac27a0ecSDave Kleikamp 			init_special_inode(inode, inode->i_mode,
4496ac27a0ecSDave Kleikamp 			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
4497ac27a0ecSDave Kleikamp 		else
4498ac27a0ecSDave Kleikamp 			init_special_inode(inode, inode->i_mode,
4499ac27a0ecSDave Kleikamp 			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
4500563bdd61STheodore Ts'o 	} else {
4501563bdd61STheodore Ts'o 		brelse(bh);
4502563bdd61STheodore Ts'o 		ret = -EIO;
4503563bdd61STheodore Ts'o 		ext4_error(inode->i_sb, __func__,
4504563bdd61STheodore Ts'o 			   "bogus i_mode (%o) for inode=%lu",
4505563bdd61STheodore Ts'o 			   inode->i_mode, inode->i_ino);
4506563bdd61STheodore Ts'o 		goto bad_inode;
4507ac27a0ecSDave Kleikamp 	}
4508ac27a0ecSDave Kleikamp 	brelse(iloc.bh);
4509617ba13bSMingming Cao 	ext4_set_inode_flags(inode);
45101d1fe1eeSDavid Howells 	unlock_new_inode(inode);
45111d1fe1eeSDavid Howells 	return inode;
4512ac27a0ecSDave Kleikamp 
4513ac27a0ecSDave Kleikamp bad_inode:
45141d1fe1eeSDavid Howells 	iget_failed(inode);
45151d1fe1eeSDavid Howells 	return ERR_PTR(ret);
4516ac27a0ecSDave Kleikamp }
4517ac27a0ecSDave Kleikamp 
45180fc1b451SAneesh Kumar K.V static int ext4_inode_blocks_set(handle_t *handle,
45190fc1b451SAneesh Kumar K.V 				struct ext4_inode *raw_inode,
45200fc1b451SAneesh Kumar K.V 				struct ext4_inode_info *ei)
45210fc1b451SAneesh Kumar K.V {
45220fc1b451SAneesh Kumar K.V 	struct inode *inode = &(ei->vfs_inode);
45230fc1b451SAneesh Kumar K.V 	u64 i_blocks = inode->i_blocks;
45240fc1b451SAneesh Kumar K.V 	struct super_block *sb = inode->i_sb;
45250fc1b451SAneesh Kumar K.V 
45260fc1b451SAneesh Kumar K.V 	if (i_blocks <= ~0U) {
45270fc1b451SAneesh Kumar K.V 		/*
45280fc1b451SAneesh Kumar K.V 		 * i_blocks can be represnted in a 32 bit variable
45290fc1b451SAneesh Kumar K.V 		 * as multiple of 512 bytes
45300fc1b451SAneesh Kumar K.V 		 */
45318180a562SAneesh Kumar K.V 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
45320fc1b451SAneesh Kumar K.V 		raw_inode->i_blocks_high = 0;
45338180a562SAneesh Kumar K.V 		ei->i_flags &= ~EXT4_HUGE_FILE_FL;
4534f287a1a5STheodore Ts'o 		return 0;
4535f287a1a5STheodore Ts'o 	}
4536f287a1a5STheodore Ts'o 	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
4537f287a1a5STheodore Ts'o 		return -EFBIG;
4538f287a1a5STheodore Ts'o 
4539f287a1a5STheodore Ts'o 	if (i_blocks <= 0xffffffffffffULL) {
45400fc1b451SAneesh Kumar K.V 		/*
45410fc1b451SAneesh Kumar K.V 		 * i_blocks can be represented in a 48 bit variable
45420fc1b451SAneesh Kumar K.V 		 * as multiple of 512 bytes
45430fc1b451SAneesh Kumar K.V 		 */
45448180a562SAneesh Kumar K.V 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
45450fc1b451SAneesh Kumar K.V 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
45468180a562SAneesh Kumar K.V 		ei->i_flags &= ~EXT4_HUGE_FILE_FL;
45470fc1b451SAneesh Kumar K.V 	} else {
45488180a562SAneesh Kumar K.V 		ei->i_flags |= EXT4_HUGE_FILE_FL;
45498180a562SAneesh Kumar K.V 		/* i_block is stored in file system block size */
45508180a562SAneesh Kumar K.V 		i_blocks = i_blocks >> (inode->i_blkbits - 9);
45518180a562SAneesh Kumar K.V 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
45528180a562SAneesh Kumar K.V 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
45530fc1b451SAneesh Kumar K.V 	}
4554f287a1a5STheodore Ts'o 	return 0;
45550fc1b451SAneesh Kumar K.V }
45560fc1b451SAneesh Kumar K.V 
4557ac27a0ecSDave Kleikamp /*
4558ac27a0ecSDave Kleikamp  * Post the struct inode info into an on-disk inode location in the
4559ac27a0ecSDave Kleikamp  * buffer-cache.  This gobbles the caller's reference to the
4560ac27a0ecSDave Kleikamp  * buffer_head in the inode location struct.
4561ac27a0ecSDave Kleikamp  *
4562ac27a0ecSDave Kleikamp  * The caller must have write access to iloc->bh.
4563ac27a0ecSDave Kleikamp  */
4564617ba13bSMingming Cao static int ext4_do_update_inode(handle_t *handle,
4565ac27a0ecSDave Kleikamp 				struct inode *inode,
4566617ba13bSMingming Cao 				struct ext4_iloc *iloc)
4567ac27a0ecSDave Kleikamp {
4568617ba13bSMingming Cao 	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
4569617ba13bSMingming Cao 	struct ext4_inode_info *ei = EXT4_I(inode);
4570ac27a0ecSDave Kleikamp 	struct buffer_head *bh = iloc->bh;
4571ac27a0ecSDave Kleikamp 	int err = 0, rc, block;
4572ac27a0ecSDave Kleikamp 
4573ac27a0ecSDave Kleikamp 	/* For fields not not tracking in the in-memory inode,
4574ac27a0ecSDave Kleikamp 	 * initialise them to zero for new inodes. */
4575617ba13bSMingming Cao 	if (ei->i_state & EXT4_STATE_NEW)
4576617ba13bSMingming Cao 		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
4577ac27a0ecSDave Kleikamp 
4578ff9ddf7eSJan Kara 	ext4_get_inode_flags(ei);
4579ac27a0ecSDave Kleikamp 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4580ac27a0ecSDave Kleikamp 	if (!(test_opt(inode->i_sb, NO_UID32))) {
4581ac27a0ecSDave Kleikamp 		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
4582ac27a0ecSDave Kleikamp 		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
4583ac27a0ecSDave Kleikamp /*
4584ac27a0ecSDave Kleikamp  * Fix up interoperability with old kernels. Otherwise, old inodes get
4585ac27a0ecSDave Kleikamp  * re-used with the upper 16 bits of the uid/gid intact
4586ac27a0ecSDave Kleikamp  */
4587ac27a0ecSDave Kleikamp 		if (!ei->i_dtime) {
4588ac27a0ecSDave Kleikamp 			raw_inode->i_uid_high =
4589ac27a0ecSDave Kleikamp 				cpu_to_le16(high_16_bits(inode->i_uid));
4590ac27a0ecSDave Kleikamp 			raw_inode->i_gid_high =
4591ac27a0ecSDave Kleikamp 				cpu_to_le16(high_16_bits(inode->i_gid));
4592ac27a0ecSDave Kleikamp 		} else {
4593ac27a0ecSDave Kleikamp 			raw_inode->i_uid_high = 0;
4594ac27a0ecSDave Kleikamp 			raw_inode->i_gid_high = 0;
4595ac27a0ecSDave Kleikamp 		}
4596ac27a0ecSDave Kleikamp 	} else {
4597ac27a0ecSDave Kleikamp 		raw_inode->i_uid_low =
4598ac27a0ecSDave Kleikamp 			cpu_to_le16(fs_high2lowuid(inode->i_uid));
4599ac27a0ecSDave Kleikamp 		raw_inode->i_gid_low =
4600ac27a0ecSDave Kleikamp 			cpu_to_le16(fs_high2lowgid(inode->i_gid));
4601ac27a0ecSDave Kleikamp 		raw_inode->i_uid_high = 0;
4602ac27a0ecSDave Kleikamp 		raw_inode->i_gid_high = 0;
4603ac27a0ecSDave Kleikamp 	}
4604ac27a0ecSDave Kleikamp 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4605ef7f3835SKalpak Shah 
4606ef7f3835SKalpak Shah 	EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4607ef7f3835SKalpak Shah 	EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4608ef7f3835SKalpak Shah 	EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4609ef7f3835SKalpak Shah 	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4610ef7f3835SKalpak Shah 
46110fc1b451SAneesh Kumar K.V 	if (ext4_inode_blocks_set(handle, raw_inode, ei))
46120fc1b451SAneesh Kumar K.V 		goto out_brelse;
4613ac27a0ecSDave Kleikamp 	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4614267e4db9SAneesh Kumar K.V 	/* clear the migrate flag in the raw_inode */
4615267e4db9SAneesh Kumar K.V 	raw_inode->i_flags = cpu_to_le32(ei->i_flags & ~EXT4_EXT_MIGRATE);
46169b8f1f01SMingming Cao 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
46179b8f1f01SMingming Cao 	    cpu_to_le32(EXT4_OS_HURD))
4618a1ddeb7eSBadari Pulavarty 		raw_inode->i_file_acl_high =
4619a1ddeb7eSBadari Pulavarty 			cpu_to_le16(ei->i_file_acl >> 32);
46207973c0c1SAneesh Kumar K.V 	raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4621a48380f7SAneesh Kumar K.V 	ext4_isize_set(raw_inode, ei->i_disksize);
4622ac27a0ecSDave Kleikamp 	if (ei->i_disksize > 0x7fffffffULL) {
4623ac27a0ecSDave Kleikamp 		struct super_block *sb = inode->i_sb;
4624617ba13bSMingming Cao 		if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
4625617ba13bSMingming Cao 				EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
4626617ba13bSMingming Cao 				EXT4_SB(sb)->s_es->s_rev_level ==
4627617ba13bSMingming Cao 				cpu_to_le32(EXT4_GOOD_OLD_REV)) {
4628ac27a0ecSDave Kleikamp 			/* If this is the first large file
4629ac27a0ecSDave Kleikamp 			 * created, add a flag to the superblock.
4630ac27a0ecSDave Kleikamp 			 */
4631617ba13bSMingming Cao 			err = ext4_journal_get_write_access(handle,
4632617ba13bSMingming Cao 					EXT4_SB(sb)->s_sbh);
4633ac27a0ecSDave Kleikamp 			if (err)
4634ac27a0ecSDave Kleikamp 				goto out_brelse;
4635617ba13bSMingming Cao 			ext4_update_dynamic_rev(sb);
4636617ba13bSMingming Cao 			EXT4_SET_RO_COMPAT_FEATURE(sb,
4637617ba13bSMingming Cao 					EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
4638ac27a0ecSDave Kleikamp 			sb->s_dirt = 1;
46390390131bSFrank Mayhar 			ext4_handle_sync(handle);
46400390131bSFrank Mayhar 			err = ext4_handle_dirty_metadata(handle, inode,
4641617ba13bSMingming Cao 					EXT4_SB(sb)->s_sbh);
4642ac27a0ecSDave Kleikamp 		}
4643ac27a0ecSDave Kleikamp 	}
4644ac27a0ecSDave Kleikamp 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4645ac27a0ecSDave Kleikamp 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4646ac27a0ecSDave Kleikamp 		if (old_valid_dev(inode->i_rdev)) {
4647ac27a0ecSDave Kleikamp 			raw_inode->i_block[0] =
4648ac27a0ecSDave Kleikamp 				cpu_to_le32(old_encode_dev(inode->i_rdev));
4649ac27a0ecSDave Kleikamp 			raw_inode->i_block[1] = 0;
4650ac27a0ecSDave Kleikamp 		} else {
4651ac27a0ecSDave Kleikamp 			raw_inode->i_block[0] = 0;
4652ac27a0ecSDave Kleikamp 			raw_inode->i_block[1] =
4653ac27a0ecSDave Kleikamp 				cpu_to_le32(new_encode_dev(inode->i_rdev));
4654ac27a0ecSDave Kleikamp 			raw_inode->i_block[2] = 0;
4655ac27a0ecSDave Kleikamp 		}
4656617ba13bSMingming Cao 	} else for (block = 0; block < EXT4_N_BLOCKS; block++)
4657ac27a0ecSDave Kleikamp 		raw_inode->i_block[block] = ei->i_data[block];
4658ac27a0ecSDave Kleikamp 
465925ec56b5SJean Noel Cordenner 	raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
466025ec56b5SJean Noel Cordenner 	if (ei->i_extra_isize) {
466125ec56b5SJean Noel Cordenner 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
466225ec56b5SJean Noel Cordenner 			raw_inode->i_version_hi =
466325ec56b5SJean Noel Cordenner 			cpu_to_le32(inode->i_version >> 32);
4664ac27a0ecSDave Kleikamp 		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
466525ec56b5SJean Noel Cordenner 	}
466625ec56b5SJean Noel Cordenner 
46670390131bSFrank Mayhar 	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
46680390131bSFrank Mayhar 	rc = ext4_handle_dirty_metadata(handle, inode, bh);
4669ac27a0ecSDave Kleikamp 	if (!err)
4670ac27a0ecSDave Kleikamp 		err = rc;
4671617ba13bSMingming Cao 	ei->i_state &= ~EXT4_STATE_NEW;
4672ac27a0ecSDave Kleikamp 
4673ac27a0ecSDave Kleikamp out_brelse:
4674ac27a0ecSDave Kleikamp 	brelse(bh);
4675617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, err);
4676ac27a0ecSDave Kleikamp 	return err;
4677ac27a0ecSDave Kleikamp }
4678ac27a0ecSDave Kleikamp 
4679ac27a0ecSDave Kleikamp /*
4680617ba13bSMingming Cao  * ext4_write_inode()
4681ac27a0ecSDave Kleikamp  *
4682ac27a0ecSDave Kleikamp  * We are called from a few places:
4683ac27a0ecSDave Kleikamp  *
4684ac27a0ecSDave Kleikamp  * - Within generic_file_write() for O_SYNC files.
4685ac27a0ecSDave Kleikamp  *   Here, there will be no transaction running. We wait for any running
4686ac27a0ecSDave Kleikamp  *   trasnaction to commit.
4687ac27a0ecSDave Kleikamp  *
4688ac27a0ecSDave Kleikamp  * - Within sys_sync(), kupdate and such.
4689ac27a0ecSDave Kleikamp  *   We wait on commit, if tol to.
4690ac27a0ecSDave Kleikamp  *
4691ac27a0ecSDave Kleikamp  * - Within prune_icache() (PF_MEMALLOC == true)
4692ac27a0ecSDave Kleikamp  *   Here we simply return.  We can't afford to block kswapd on the
4693ac27a0ecSDave Kleikamp  *   journal commit.
4694ac27a0ecSDave Kleikamp  *
4695ac27a0ecSDave Kleikamp  * In all cases it is actually safe for us to return without doing anything,
4696ac27a0ecSDave Kleikamp  * because the inode has been copied into a raw inode buffer in
4697617ba13bSMingming Cao  * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
4698ac27a0ecSDave Kleikamp  * knfsd.
4699ac27a0ecSDave Kleikamp  *
4700ac27a0ecSDave Kleikamp  * Note that we are absolutely dependent upon all inode dirtiers doing the
4701ac27a0ecSDave Kleikamp  * right thing: they *must* call mark_inode_dirty() after dirtying info in
4702ac27a0ecSDave Kleikamp  * which we are interested.
4703ac27a0ecSDave Kleikamp  *
4704ac27a0ecSDave Kleikamp  * It would be a bug for them to not do this.  The code:
4705ac27a0ecSDave Kleikamp  *
4706ac27a0ecSDave Kleikamp  *	mark_inode_dirty(inode)
4707ac27a0ecSDave Kleikamp  *	stuff();
4708ac27a0ecSDave Kleikamp  *	inode->i_size = expr;
4709ac27a0ecSDave Kleikamp  *
4710ac27a0ecSDave Kleikamp  * is in error because a kswapd-driven write_inode() could occur while
4711ac27a0ecSDave Kleikamp  * `stuff()' is running, and the new i_size will be lost.  Plus the inode
4712ac27a0ecSDave Kleikamp  * will no longer be on the superblock's dirty inode list.
4713ac27a0ecSDave Kleikamp  */
4714617ba13bSMingming Cao int ext4_write_inode(struct inode *inode, int wait)
4715ac27a0ecSDave Kleikamp {
4716ac27a0ecSDave Kleikamp 	if (current->flags & PF_MEMALLOC)
4717ac27a0ecSDave Kleikamp 		return 0;
4718ac27a0ecSDave Kleikamp 
4719617ba13bSMingming Cao 	if (ext4_journal_current_handle()) {
4720b38bd33aSMingming Cao 		jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
4721ac27a0ecSDave Kleikamp 		dump_stack();
4722ac27a0ecSDave Kleikamp 		return -EIO;
4723ac27a0ecSDave Kleikamp 	}
4724ac27a0ecSDave Kleikamp 
4725ac27a0ecSDave Kleikamp 	if (!wait)
4726ac27a0ecSDave Kleikamp 		return 0;
4727ac27a0ecSDave Kleikamp 
4728617ba13bSMingming Cao 	return ext4_force_commit(inode->i_sb);
4729ac27a0ecSDave Kleikamp }
4730ac27a0ecSDave Kleikamp 
47310390131bSFrank Mayhar int __ext4_write_dirty_metadata(struct inode *inode, struct buffer_head *bh)
47320390131bSFrank Mayhar {
47330390131bSFrank Mayhar 	int err = 0;
47340390131bSFrank Mayhar 
47350390131bSFrank Mayhar 	mark_buffer_dirty(bh);
47360390131bSFrank Mayhar 	if (inode && inode_needs_sync(inode)) {
47370390131bSFrank Mayhar 		sync_dirty_buffer(bh);
47380390131bSFrank Mayhar 		if (buffer_req(bh) && !buffer_uptodate(bh)) {
47390390131bSFrank Mayhar 			ext4_error(inode->i_sb, __func__,
47400390131bSFrank Mayhar 				   "IO error syncing inode, "
47410390131bSFrank Mayhar 				   "inode=%lu, block=%llu",
47420390131bSFrank Mayhar 				   inode->i_ino,
47430390131bSFrank Mayhar 				   (unsigned long long)bh->b_blocknr);
47440390131bSFrank Mayhar 			err = -EIO;
47450390131bSFrank Mayhar 		}
47460390131bSFrank Mayhar 	}
47470390131bSFrank Mayhar 	return err;
47480390131bSFrank Mayhar }
47490390131bSFrank Mayhar 
4750ac27a0ecSDave Kleikamp /*
4751617ba13bSMingming Cao  * ext4_setattr()
4752ac27a0ecSDave Kleikamp  *
4753ac27a0ecSDave Kleikamp  * Called from notify_change.
4754ac27a0ecSDave Kleikamp  *
4755ac27a0ecSDave Kleikamp  * We want to trap VFS attempts to truncate the file as soon as
4756ac27a0ecSDave Kleikamp  * possible.  In particular, we want to make sure that when the VFS
4757ac27a0ecSDave Kleikamp  * shrinks i_size, we put the inode on the orphan list and modify
4758ac27a0ecSDave Kleikamp  * i_disksize immediately, so that during the subsequent flushing of
4759ac27a0ecSDave Kleikamp  * dirty pages and freeing of disk blocks, we can guarantee that any
4760ac27a0ecSDave Kleikamp  * commit will leave the blocks being flushed in an unused state on
4761ac27a0ecSDave Kleikamp  * disk.  (On recovery, the inode will get truncated and the blocks will
4762ac27a0ecSDave Kleikamp  * be freed, so we have a strong guarantee that no future commit will
4763ac27a0ecSDave Kleikamp  * leave these blocks visible to the user.)
4764ac27a0ecSDave Kleikamp  *
4765678aaf48SJan Kara  * Another thing we have to assure is that if we are in ordered mode
4766678aaf48SJan Kara  * and inode is still attached to the committing transaction, we must
4767678aaf48SJan Kara  * we start writeout of all the dirty pages which are being truncated.
4768678aaf48SJan Kara  * This way we are sure that all the data written in the previous
4769678aaf48SJan Kara  * transaction are already on disk (truncate waits for pages under
4770678aaf48SJan Kara  * writeback).
4771678aaf48SJan Kara  *
4772678aaf48SJan Kara  * Called with inode->i_mutex down.
4773ac27a0ecSDave Kleikamp  */
4774617ba13bSMingming Cao int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4775ac27a0ecSDave Kleikamp {
4776ac27a0ecSDave Kleikamp 	struct inode *inode = dentry->d_inode;
4777ac27a0ecSDave Kleikamp 	int error, rc = 0;
4778ac27a0ecSDave Kleikamp 	const unsigned int ia_valid = attr->ia_valid;
4779ac27a0ecSDave Kleikamp 
4780ac27a0ecSDave Kleikamp 	error = inode_change_ok(inode, attr);
4781ac27a0ecSDave Kleikamp 	if (error)
4782ac27a0ecSDave Kleikamp 		return error;
4783ac27a0ecSDave Kleikamp 
4784ac27a0ecSDave Kleikamp 	if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
4785ac27a0ecSDave Kleikamp 		(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
4786ac27a0ecSDave Kleikamp 		handle_t *handle;
4787ac27a0ecSDave Kleikamp 
4788ac27a0ecSDave Kleikamp 		/* (user+group)*(old+new) structure, inode write (sb,
4789ac27a0ecSDave Kleikamp 		 * inode block, ? - but truncate inode update has it) */
4790617ba13bSMingming Cao 		handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+
4791617ba13bSMingming Cao 					EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
4792ac27a0ecSDave Kleikamp 		if (IS_ERR(handle)) {
4793ac27a0ecSDave Kleikamp 			error = PTR_ERR(handle);
4794ac27a0ecSDave Kleikamp 			goto err_out;
4795ac27a0ecSDave Kleikamp 		}
4796a269eb18SJan Kara 		error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
4797ac27a0ecSDave Kleikamp 		if (error) {
4798617ba13bSMingming Cao 			ext4_journal_stop(handle);
4799ac27a0ecSDave Kleikamp 			return error;
4800ac27a0ecSDave Kleikamp 		}
4801ac27a0ecSDave Kleikamp 		/* Update corresponding info in inode so that everything is in
4802ac27a0ecSDave Kleikamp 		 * one transaction */
4803ac27a0ecSDave Kleikamp 		if (attr->ia_valid & ATTR_UID)
4804ac27a0ecSDave Kleikamp 			inode->i_uid = attr->ia_uid;
4805ac27a0ecSDave Kleikamp 		if (attr->ia_valid & ATTR_GID)
4806ac27a0ecSDave Kleikamp 			inode->i_gid = attr->ia_gid;
4807617ba13bSMingming Cao 		error = ext4_mark_inode_dirty(handle, inode);
4808617ba13bSMingming Cao 		ext4_journal_stop(handle);
4809ac27a0ecSDave Kleikamp 	}
4810ac27a0ecSDave Kleikamp 
4811e2b46574SEric Sandeen 	if (attr->ia_valid & ATTR_SIZE) {
4812e2b46574SEric Sandeen 		if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
4813e2b46574SEric Sandeen 			struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4814e2b46574SEric Sandeen 
4815e2b46574SEric Sandeen 			if (attr->ia_size > sbi->s_bitmap_maxbytes) {
4816e2b46574SEric Sandeen 				error = -EFBIG;
4817e2b46574SEric Sandeen 				goto err_out;
4818e2b46574SEric Sandeen 			}
4819e2b46574SEric Sandeen 		}
4820e2b46574SEric Sandeen 	}
4821e2b46574SEric Sandeen 
4822ac27a0ecSDave Kleikamp 	if (S_ISREG(inode->i_mode) &&
4823ac27a0ecSDave Kleikamp 	    attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
4824ac27a0ecSDave Kleikamp 		handle_t *handle;
4825ac27a0ecSDave Kleikamp 
4826617ba13bSMingming Cao 		handle = ext4_journal_start(inode, 3);
4827ac27a0ecSDave Kleikamp 		if (IS_ERR(handle)) {
4828ac27a0ecSDave Kleikamp 			error = PTR_ERR(handle);
4829ac27a0ecSDave Kleikamp 			goto err_out;
4830ac27a0ecSDave Kleikamp 		}
4831ac27a0ecSDave Kleikamp 
4832617ba13bSMingming Cao 		error = ext4_orphan_add(handle, inode);
4833617ba13bSMingming Cao 		EXT4_I(inode)->i_disksize = attr->ia_size;
4834617ba13bSMingming Cao 		rc = ext4_mark_inode_dirty(handle, inode);
4835ac27a0ecSDave Kleikamp 		if (!error)
4836ac27a0ecSDave Kleikamp 			error = rc;
4837617ba13bSMingming Cao 		ext4_journal_stop(handle);
4838678aaf48SJan Kara 
4839678aaf48SJan Kara 		if (ext4_should_order_data(inode)) {
4840678aaf48SJan Kara 			error = ext4_begin_ordered_truncate(inode,
4841678aaf48SJan Kara 							    attr->ia_size);
4842678aaf48SJan Kara 			if (error) {
4843678aaf48SJan Kara 				/* Do as much error cleanup as possible */
4844678aaf48SJan Kara 				handle = ext4_journal_start(inode, 3);
4845678aaf48SJan Kara 				if (IS_ERR(handle)) {
4846678aaf48SJan Kara 					ext4_orphan_del(NULL, inode);
4847678aaf48SJan Kara 					goto err_out;
4848678aaf48SJan Kara 				}
4849678aaf48SJan Kara 				ext4_orphan_del(handle, inode);
4850678aaf48SJan Kara 				ext4_journal_stop(handle);
4851678aaf48SJan Kara 				goto err_out;
4852678aaf48SJan Kara 			}
4853678aaf48SJan Kara 		}
4854ac27a0ecSDave Kleikamp 	}
4855ac27a0ecSDave Kleikamp 
4856ac27a0ecSDave Kleikamp 	rc = inode_setattr(inode, attr);
4857ac27a0ecSDave Kleikamp 
4858617ba13bSMingming Cao 	/* If inode_setattr's call to ext4_truncate failed to get a
4859ac27a0ecSDave Kleikamp 	 * transaction handle at all, we need to clean up the in-core
4860ac27a0ecSDave Kleikamp 	 * orphan list manually. */
4861ac27a0ecSDave Kleikamp 	if (inode->i_nlink)
4862617ba13bSMingming Cao 		ext4_orphan_del(NULL, inode);
4863ac27a0ecSDave Kleikamp 
4864ac27a0ecSDave Kleikamp 	if (!rc && (ia_valid & ATTR_MODE))
4865617ba13bSMingming Cao 		rc = ext4_acl_chmod(inode);
4866ac27a0ecSDave Kleikamp 
4867ac27a0ecSDave Kleikamp err_out:
4868617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, error);
4869ac27a0ecSDave Kleikamp 	if (!error)
4870ac27a0ecSDave Kleikamp 		error = rc;
4871ac27a0ecSDave Kleikamp 	return error;
4872ac27a0ecSDave Kleikamp }
4873ac27a0ecSDave Kleikamp 
48743e3398a0SMingming Cao int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
48753e3398a0SMingming Cao 		 struct kstat *stat)
48763e3398a0SMingming Cao {
48773e3398a0SMingming Cao 	struct inode *inode;
48783e3398a0SMingming Cao 	unsigned long delalloc_blocks;
48793e3398a0SMingming Cao 
48803e3398a0SMingming Cao 	inode = dentry->d_inode;
48813e3398a0SMingming Cao 	generic_fillattr(inode, stat);
48823e3398a0SMingming Cao 
48833e3398a0SMingming Cao 	/*
48843e3398a0SMingming Cao 	 * We can't update i_blocks if the block allocation is delayed
48853e3398a0SMingming Cao 	 * otherwise in the case of system crash before the real block
48863e3398a0SMingming Cao 	 * allocation is done, we will have i_blocks inconsistent with
48873e3398a0SMingming Cao 	 * on-disk file blocks.
48883e3398a0SMingming Cao 	 * We always keep i_blocks updated together with real
48893e3398a0SMingming Cao 	 * allocation. But to not confuse with user, stat
48903e3398a0SMingming Cao 	 * will return the blocks that include the delayed allocation
48913e3398a0SMingming Cao 	 * blocks for this file.
48923e3398a0SMingming Cao 	 */
48933e3398a0SMingming Cao 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
48943e3398a0SMingming Cao 	delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
48953e3398a0SMingming Cao 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
48963e3398a0SMingming Cao 
48973e3398a0SMingming Cao 	stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
48983e3398a0SMingming Cao 	return 0;
48993e3398a0SMingming Cao }
4900ac27a0ecSDave Kleikamp 
4901a02908f1SMingming Cao static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
4902a02908f1SMingming Cao 				      int chunk)
4903ac27a0ecSDave Kleikamp {
4904a02908f1SMingming Cao 	int indirects;
4905ac27a0ecSDave Kleikamp 
4906a02908f1SMingming Cao 	/* if nrblocks are contiguous */
4907a02908f1SMingming Cao 	if (chunk) {
4908a02908f1SMingming Cao 		/*
4909a02908f1SMingming Cao 		 * With N contiguous data blocks, it need at most
4910a02908f1SMingming Cao 		 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks
4911a02908f1SMingming Cao 		 * 2 dindirect blocks
4912a02908f1SMingming Cao 		 * 1 tindirect block
4913a02908f1SMingming Cao 		 */
4914a02908f1SMingming Cao 		indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb);
4915a02908f1SMingming Cao 		return indirects + 3;
4916a02908f1SMingming Cao 	}
4917a02908f1SMingming Cao 	/*
4918a02908f1SMingming Cao 	 * if nrblocks are not contiguous, worse case, each block touch
4919a02908f1SMingming Cao 	 * a indirect block, and each indirect block touch a double indirect
4920a02908f1SMingming Cao 	 * block, plus a triple indirect block
4921a02908f1SMingming Cao 	 */
4922a02908f1SMingming Cao 	indirects = nrblocks * 2 + 1;
4923a02908f1SMingming Cao 	return indirects;
4924a02908f1SMingming Cao }
4925a86c6181SAlex Tomas 
4926a02908f1SMingming Cao static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4927a02908f1SMingming Cao {
4928a02908f1SMingming Cao 	if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
4929ac51d837STheodore Ts'o 		return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
4930ac51d837STheodore Ts'o 	return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
4931a02908f1SMingming Cao }
4932ac51d837STheodore Ts'o 
4933a02908f1SMingming Cao /*
4934a02908f1SMingming Cao  * Account for index blocks, block groups bitmaps and block group
4935a02908f1SMingming Cao  * descriptor blocks if modify datablocks and index blocks
4936a02908f1SMingming Cao  * worse case, the indexs blocks spread over different block groups
4937a02908f1SMingming Cao  *
4938a02908f1SMingming Cao  * If datablocks are discontiguous, they are possible to spread over
4939a02908f1SMingming Cao  * different block groups too. If they are contiugous, with flexbg,
4940a02908f1SMingming Cao  * they could still across block group boundary.
4941a02908f1SMingming Cao  *
4942a02908f1SMingming Cao  * Also account for superblock, inode, quota and xattr blocks
4943a02908f1SMingming Cao  */
4944a02908f1SMingming Cao int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4945a02908f1SMingming Cao {
4946a02908f1SMingming Cao 	int groups, gdpblocks;
4947a02908f1SMingming Cao 	int idxblocks;
4948a02908f1SMingming Cao 	int ret = 0;
4949a02908f1SMingming Cao 
4950a02908f1SMingming Cao 	/*
4951a02908f1SMingming Cao 	 * How many index blocks need to touch to modify nrblocks?
4952a02908f1SMingming Cao 	 * The "Chunk" flag indicating whether the nrblocks is
4953a02908f1SMingming Cao 	 * physically contiguous on disk
4954a02908f1SMingming Cao 	 *
4955a02908f1SMingming Cao 	 * For Direct IO and fallocate, they calls get_block to allocate
4956a02908f1SMingming Cao 	 * one single extent at a time, so they could set the "Chunk" flag
4957a02908f1SMingming Cao 	 */
4958a02908f1SMingming Cao 	idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
4959a02908f1SMingming Cao 
4960a02908f1SMingming Cao 	ret = idxblocks;
4961a02908f1SMingming Cao 
4962a02908f1SMingming Cao 	/*
4963a02908f1SMingming Cao 	 * Now let's see how many group bitmaps and group descriptors need
4964a02908f1SMingming Cao 	 * to account
4965a02908f1SMingming Cao 	 */
4966a02908f1SMingming Cao 	groups = idxblocks;
4967a02908f1SMingming Cao 	if (chunk)
4968a02908f1SMingming Cao 		groups += 1;
4969ac27a0ecSDave Kleikamp 	else
4970a02908f1SMingming Cao 		groups += nrblocks;
4971ac27a0ecSDave Kleikamp 
4972a02908f1SMingming Cao 	gdpblocks = groups;
4973a02908f1SMingming Cao 	if (groups > EXT4_SB(inode->i_sb)->s_groups_count)
4974a02908f1SMingming Cao 		groups = EXT4_SB(inode->i_sb)->s_groups_count;
4975a02908f1SMingming Cao 	if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
4976a02908f1SMingming Cao 		gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
4977a02908f1SMingming Cao 
4978a02908f1SMingming Cao 	/* bitmaps and block group descriptor blocks */
4979a02908f1SMingming Cao 	ret += groups + gdpblocks;
4980a02908f1SMingming Cao 
4981a02908f1SMingming Cao 	/* Blocks for super block, inode, quota and xattr blocks */
4982a02908f1SMingming Cao 	ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
4983ac27a0ecSDave Kleikamp 
4984ac27a0ecSDave Kleikamp 	return ret;
4985ac27a0ecSDave Kleikamp }
4986ac27a0ecSDave Kleikamp 
4987ac27a0ecSDave Kleikamp /*
4988a02908f1SMingming Cao  * Calulate the total number of credits to reserve to fit
4989f3bd1f3fSMingming Cao  * the modification of a single pages into a single transaction,
4990f3bd1f3fSMingming Cao  * which may include multiple chunks of block allocations.
4991a02908f1SMingming Cao  *
4992525f4ed8SMingming Cao  * This could be called via ext4_write_begin()
4993a02908f1SMingming Cao  *
4994525f4ed8SMingming Cao  * We need to consider the worse case, when
4995a02908f1SMingming Cao  * one new block per extent.
4996a02908f1SMingming Cao  */
4997a02908f1SMingming Cao int ext4_writepage_trans_blocks(struct inode *inode)
4998a02908f1SMingming Cao {
4999a02908f1SMingming Cao 	int bpp = ext4_journal_blocks_per_page(inode);
5000a02908f1SMingming Cao 	int ret;
5001a02908f1SMingming Cao 
5002a02908f1SMingming Cao 	ret = ext4_meta_trans_blocks(inode, bpp, 0);
5003a02908f1SMingming Cao 
5004a02908f1SMingming Cao 	/* Account for data blocks for journalled mode */
5005a02908f1SMingming Cao 	if (ext4_should_journal_data(inode))
5006a02908f1SMingming Cao 		ret += bpp;
5007a02908f1SMingming Cao 	return ret;
5008a02908f1SMingming Cao }
5009f3bd1f3fSMingming Cao 
5010f3bd1f3fSMingming Cao /*
5011f3bd1f3fSMingming Cao  * Calculate the journal credits for a chunk of data modification.
5012f3bd1f3fSMingming Cao  *
5013f3bd1f3fSMingming Cao  * This is called from DIO, fallocate or whoever calling
5014f3bd1f3fSMingming Cao  * ext4_get_blocks_wrap() to map/allocate a chunk of contigous disk blocks.
5015f3bd1f3fSMingming Cao  *
5016f3bd1f3fSMingming Cao  * journal buffers for data blocks are not included here, as DIO
5017f3bd1f3fSMingming Cao  * and fallocate do no need to journal data buffers.
5018f3bd1f3fSMingming Cao  */
5019f3bd1f3fSMingming Cao int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5020f3bd1f3fSMingming Cao {
5021f3bd1f3fSMingming Cao 	return ext4_meta_trans_blocks(inode, nrblocks, 1);
5022f3bd1f3fSMingming Cao }
5023f3bd1f3fSMingming Cao 
5024a02908f1SMingming Cao /*
5025617ba13bSMingming Cao  * The caller must have previously called ext4_reserve_inode_write().
5026ac27a0ecSDave Kleikamp  * Give this, we know that the caller already has write access to iloc->bh.
5027ac27a0ecSDave Kleikamp  */
5028617ba13bSMingming Cao int ext4_mark_iloc_dirty(handle_t *handle,
5029617ba13bSMingming Cao 		struct inode *inode, struct ext4_iloc *iloc)
5030ac27a0ecSDave Kleikamp {
5031ac27a0ecSDave Kleikamp 	int err = 0;
5032ac27a0ecSDave Kleikamp 
503325ec56b5SJean Noel Cordenner 	if (test_opt(inode->i_sb, I_VERSION))
503425ec56b5SJean Noel Cordenner 		inode_inc_iversion(inode);
503525ec56b5SJean Noel Cordenner 
5036ac27a0ecSDave Kleikamp 	/* the do_update_inode consumes one bh->b_count */
5037ac27a0ecSDave Kleikamp 	get_bh(iloc->bh);
5038ac27a0ecSDave Kleikamp 
5039dab291afSMingming Cao 	/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
5040617ba13bSMingming Cao 	err = ext4_do_update_inode(handle, inode, iloc);
5041ac27a0ecSDave Kleikamp 	put_bh(iloc->bh);
5042ac27a0ecSDave Kleikamp 	return err;
5043ac27a0ecSDave Kleikamp }
5044ac27a0ecSDave Kleikamp 
5045ac27a0ecSDave Kleikamp /*
5046ac27a0ecSDave Kleikamp  * On success, We end up with an outstanding reference count against
5047ac27a0ecSDave Kleikamp  * iloc->bh.  This _must_ be cleaned up later.
5048ac27a0ecSDave Kleikamp  */
5049ac27a0ecSDave Kleikamp 
5050ac27a0ecSDave Kleikamp int
5051617ba13bSMingming Cao ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5052617ba13bSMingming Cao 			 struct ext4_iloc *iloc)
5053ac27a0ecSDave Kleikamp {
50540390131bSFrank Mayhar 	int err;
50550390131bSFrank Mayhar 
5056617ba13bSMingming Cao 	err = ext4_get_inode_loc(inode, iloc);
5057ac27a0ecSDave Kleikamp 	if (!err) {
5058ac27a0ecSDave Kleikamp 		BUFFER_TRACE(iloc->bh, "get_write_access");
5059617ba13bSMingming Cao 		err = ext4_journal_get_write_access(handle, iloc->bh);
5060ac27a0ecSDave Kleikamp 		if (err) {
5061ac27a0ecSDave Kleikamp 			brelse(iloc->bh);
5062ac27a0ecSDave Kleikamp 			iloc->bh = NULL;
5063ac27a0ecSDave Kleikamp 		}
5064ac27a0ecSDave Kleikamp 	}
5065617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, err);
5066ac27a0ecSDave Kleikamp 	return err;
5067ac27a0ecSDave Kleikamp }
5068ac27a0ecSDave Kleikamp 
5069ac27a0ecSDave Kleikamp /*
50706dd4ee7cSKalpak Shah  * Expand an inode by new_extra_isize bytes.
50716dd4ee7cSKalpak Shah  * Returns 0 on success or negative error number on failure.
50726dd4ee7cSKalpak Shah  */
50731d03ec98SAneesh Kumar K.V static int ext4_expand_extra_isize(struct inode *inode,
50741d03ec98SAneesh Kumar K.V 				   unsigned int new_extra_isize,
50751d03ec98SAneesh Kumar K.V 				   struct ext4_iloc iloc,
50761d03ec98SAneesh Kumar K.V 				   handle_t *handle)
50776dd4ee7cSKalpak Shah {
50786dd4ee7cSKalpak Shah 	struct ext4_inode *raw_inode;
50796dd4ee7cSKalpak Shah 	struct ext4_xattr_ibody_header *header;
50806dd4ee7cSKalpak Shah 	struct ext4_xattr_entry *entry;
50816dd4ee7cSKalpak Shah 
50826dd4ee7cSKalpak Shah 	if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
50836dd4ee7cSKalpak Shah 		return 0;
50846dd4ee7cSKalpak Shah 
50856dd4ee7cSKalpak Shah 	raw_inode = ext4_raw_inode(&iloc);
50866dd4ee7cSKalpak Shah 
50876dd4ee7cSKalpak Shah 	header = IHDR(inode, raw_inode);
50886dd4ee7cSKalpak Shah 	entry = IFIRST(header);
50896dd4ee7cSKalpak Shah 
50906dd4ee7cSKalpak Shah 	/* No extended attributes present */
50916dd4ee7cSKalpak Shah 	if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
50926dd4ee7cSKalpak Shah 		header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
50936dd4ee7cSKalpak Shah 		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
50946dd4ee7cSKalpak Shah 			new_extra_isize);
50956dd4ee7cSKalpak Shah 		EXT4_I(inode)->i_extra_isize = new_extra_isize;
50966dd4ee7cSKalpak Shah 		return 0;
50976dd4ee7cSKalpak Shah 	}
50986dd4ee7cSKalpak Shah 
50996dd4ee7cSKalpak Shah 	/* try to expand with EAs present */
51006dd4ee7cSKalpak Shah 	return ext4_expand_extra_isize_ea(inode, new_extra_isize,
51016dd4ee7cSKalpak Shah 					  raw_inode, handle);
51026dd4ee7cSKalpak Shah }
51036dd4ee7cSKalpak Shah 
51046dd4ee7cSKalpak Shah /*
5105ac27a0ecSDave Kleikamp  * What we do here is to mark the in-core inode as clean with respect to inode
5106ac27a0ecSDave Kleikamp  * dirtiness (it may still be data-dirty).
5107ac27a0ecSDave Kleikamp  * This means that the in-core inode may be reaped by prune_icache
5108ac27a0ecSDave Kleikamp  * without having to perform any I/O.  This is a very good thing,
5109ac27a0ecSDave Kleikamp  * because *any* task may call prune_icache - even ones which
5110ac27a0ecSDave Kleikamp  * have a transaction open against a different journal.
5111ac27a0ecSDave Kleikamp  *
5112ac27a0ecSDave Kleikamp  * Is this cheating?  Not really.  Sure, we haven't written the
5113ac27a0ecSDave Kleikamp  * inode out, but prune_icache isn't a user-visible syncing function.
5114ac27a0ecSDave Kleikamp  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
5115ac27a0ecSDave Kleikamp  * we start and wait on commits.
5116ac27a0ecSDave Kleikamp  *
5117ac27a0ecSDave Kleikamp  * Is this efficient/effective?  Well, we're being nice to the system
5118ac27a0ecSDave Kleikamp  * by cleaning up our inodes proactively so they can be reaped
5119ac27a0ecSDave Kleikamp  * without I/O.  But we are potentially leaving up to five seconds'
5120ac27a0ecSDave Kleikamp  * worth of inodes floating about which prune_icache wants us to
5121ac27a0ecSDave Kleikamp  * write out.  One way to fix that would be to get prune_icache()
5122ac27a0ecSDave Kleikamp  * to do a write_super() to free up some memory.  It has the desired
5123ac27a0ecSDave Kleikamp  * effect.
5124ac27a0ecSDave Kleikamp  */
5125617ba13bSMingming Cao int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5126ac27a0ecSDave Kleikamp {
5127617ba13bSMingming Cao 	struct ext4_iloc iloc;
51286dd4ee7cSKalpak Shah 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
51296dd4ee7cSKalpak Shah 	static unsigned int mnt_count;
51306dd4ee7cSKalpak Shah 	int err, ret;
5131ac27a0ecSDave Kleikamp 
5132ac27a0ecSDave Kleikamp 	might_sleep();
5133617ba13bSMingming Cao 	err = ext4_reserve_inode_write(handle, inode, &iloc);
51340390131bSFrank Mayhar 	if (ext4_handle_valid(handle) &&
51350390131bSFrank Mayhar 	    EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
51366dd4ee7cSKalpak Shah 	    !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
51376dd4ee7cSKalpak Shah 		/*
51386dd4ee7cSKalpak Shah 		 * We need extra buffer credits since we may write into EA block
51396dd4ee7cSKalpak Shah 		 * with this same handle. If journal_extend fails, then it will
51406dd4ee7cSKalpak Shah 		 * only result in a minor loss of functionality for that inode.
51416dd4ee7cSKalpak Shah 		 * If this is felt to be critical, then e2fsck should be run to
51426dd4ee7cSKalpak Shah 		 * force a large enough s_min_extra_isize.
51436dd4ee7cSKalpak Shah 		 */
51446dd4ee7cSKalpak Shah 		if ((jbd2_journal_extend(handle,
51456dd4ee7cSKalpak Shah 			     EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
51466dd4ee7cSKalpak Shah 			ret = ext4_expand_extra_isize(inode,
51476dd4ee7cSKalpak Shah 						      sbi->s_want_extra_isize,
51486dd4ee7cSKalpak Shah 						      iloc, handle);
51496dd4ee7cSKalpak Shah 			if (ret) {
51506dd4ee7cSKalpak Shah 				EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
5151c1bddad9SAneesh Kumar K.V 				if (mnt_count !=
5152c1bddad9SAneesh Kumar K.V 					le16_to_cpu(sbi->s_es->s_mnt_count)) {
515346e665e9SHarvey Harrison 					ext4_warning(inode->i_sb, __func__,
51546dd4ee7cSKalpak Shah 					"Unable to expand inode %lu. Delete"
51556dd4ee7cSKalpak Shah 					" some EAs or run e2fsck.",
51566dd4ee7cSKalpak Shah 					inode->i_ino);
5157c1bddad9SAneesh Kumar K.V 					mnt_count =
5158c1bddad9SAneesh Kumar K.V 					  le16_to_cpu(sbi->s_es->s_mnt_count);
51596dd4ee7cSKalpak Shah 				}
51606dd4ee7cSKalpak Shah 			}
51616dd4ee7cSKalpak Shah 		}
51626dd4ee7cSKalpak Shah 	}
5163ac27a0ecSDave Kleikamp 	if (!err)
5164617ba13bSMingming Cao 		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
5165ac27a0ecSDave Kleikamp 	return err;
5166ac27a0ecSDave Kleikamp }
5167ac27a0ecSDave Kleikamp 
5168ac27a0ecSDave Kleikamp /*
5169617ba13bSMingming Cao  * ext4_dirty_inode() is called from __mark_inode_dirty()
5170ac27a0ecSDave Kleikamp  *
5171ac27a0ecSDave Kleikamp  * We're really interested in the case where a file is being extended.
5172ac27a0ecSDave Kleikamp  * i_size has been changed by generic_commit_write() and we thus need
5173ac27a0ecSDave Kleikamp  * to include the updated inode in the current transaction.
5174ac27a0ecSDave Kleikamp  *
5175a269eb18SJan Kara  * Also, vfs_dq_alloc_block() will always dirty the inode when blocks
5176ac27a0ecSDave Kleikamp  * are allocated to the file.
5177ac27a0ecSDave Kleikamp  *
5178ac27a0ecSDave Kleikamp  * If the inode is marked synchronous, we don't honour that here - doing
5179ac27a0ecSDave Kleikamp  * so would cause a commit on atime updates, which we don't bother doing.
5180ac27a0ecSDave Kleikamp  * We handle synchronous inodes at the highest possible level.
5181ac27a0ecSDave Kleikamp  */
5182617ba13bSMingming Cao void ext4_dirty_inode(struct inode *inode)
5183ac27a0ecSDave Kleikamp {
5184617ba13bSMingming Cao 	handle_t *current_handle = ext4_journal_current_handle();
5185ac27a0ecSDave Kleikamp 	handle_t *handle;
5186ac27a0ecSDave Kleikamp 
51870390131bSFrank Mayhar 	if (!ext4_handle_valid(current_handle)) {
51880390131bSFrank Mayhar 		ext4_mark_inode_dirty(current_handle, inode);
51890390131bSFrank Mayhar 		return;
51900390131bSFrank Mayhar 	}
51910390131bSFrank Mayhar 
5192617ba13bSMingming Cao 	handle = ext4_journal_start(inode, 2);
5193ac27a0ecSDave Kleikamp 	if (IS_ERR(handle))
5194ac27a0ecSDave Kleikamp 		goto out;
5195ac27a0ecSDave Kleikamp 	if (current_handle &&
5196ac27a0ecSDave Kleikamp 		current_handle->h_transaction != handle->h_transaction) {
5197ac27a0ecSDave Kleikamp 		/* This task has a transaction open against a different fs */
5198ac27a0ecSDave Kleikamp 		printk(KERN_EMERG "%s: transactions do not match!\n",
519946e665e9SHarvey Harrison 		       __func__);
5200ac27a0ecSDave Kleikamp 	} else {
5201ac27a0ecSDave Kleikamp 		jbd_debug(5, "marking dirty.  outer handle=%p\n",
5202ac27a0ecSDave Kleikamp 				current_handle);
5203617ba13bSMingming Cao 		ext4_mark_inode_dirty(handle, inode);
5204ac27a0ecSDave Kleikamp 	}
5205617ba13bSMingming Cao 	ext4_journal_stop(handle);
5206ac27a0ecSDave Kleikamp out:
5207ac27a0ecSDave Kleikamp 	return;
5208ac27a0ecSDave Kleikamp }
5209ac27a0ecSDave Kleikamp 
5210ac27a0ecSDave Kleikamp #if 0
5211ac27a0ecSDave Kleikamp /*
5212ac27a0ecSDave Kleikamp  * Bind an inode's backing buffer_head into this transaction, to prevent
5213ac27a0ecSDave Kleikamp  * it from being flushed to disk early.  Unlike
5214617ba13bSMingming Cao  * ext4_reserve_inode_write, this leaves behind no bh reference and
5215ac27a0ecSDave Kleikamp  * returns no iloc structure, so the caller needs to repeat the iloc
5216ac27a0ecSDave Kleikamp  * lookup to mark the inode dirty later.
5217ac27a0ecSDave Kleikamp  */
5218617ba13bSMingming Cao static int ext4_pin_inode(handle_t *handle, struct inode *inode)
5219ac27a0ecSDave Kleikamp {
5220617ba13bSMingming Cao 	struct ext4_iloc iloc;
5221ac27a0ecSDave Kleikamp 
5222ac27a0ecSDave Kleikamp 	int err = 0;
5223ac27a0ecSDave Kleikamp 	if (handle) {
5224617ba13bSMingming Cao 		err = ext4_get_inode_loc(inode, &iloc);
5225ac27a0ecSDave Kleikamp 		if (!err) {
5226ac27a0ecSDave Kleikamp 			BUFFER_TRACE(iloc.bh, "get_write_access");
5227dab291afSMingming Cao 			err = jbd2_journal_get_write_access(handle, iloc.bh);
5228ac27a0ecSDave Kleikamp 			if (!err)
52290390131bSFrank Mayhar 				err = ext4_handle_dirty_metadata(handle,
52300390131bSFrank Mayhar 								 inode,
5231ac27a0ecSDave Kleikamp 								 iloc.bh);
5232ac27a0ecSDave Kleikamp 			brelse(iloc.bh);
5233ac27a0ecSDave Kleikamp 		}
5234ac27a0ecSDave Kleikamp 	}
5235617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, err);
5236ac27a0ecSDave Kleikamp 	return err;
5237ac27a0ecSDave Kleikamp }
5238ac27a0ecSDave Kleikamp #endif
5239ac27a0ecSDave Kleikamp 
5240617ba13bSMingming Cao int ext4_change_inode_journal_flag(struct inode *inode, int val)
5241ac27a0ecSDave Kleikamp {
5242ac27a0ecSDave Kleikamp 	journal_t *journal;
5243ac27a0ecSDave Kleikamp 	handle_t *handle;
5244ac27a0ecSDave Kleikamp 	int err;
5245ac27a0ecSDave Kleikamp 
5246ac27a0ecSDave Kleikamp 	/*
5247ac27a0ecSDave Kleikamp 	 * We have to be very careful here: changing a data block's
5248ac27a0ecSDave Kleikamp 	 * journaling status dynamically is dangerous.  If we write a
5249ac27a0ecSDave Kleikamp 	 * data block to the journal, change the status and then delete
5250ac27a0ecSDave Kleikamp 	 * that block, we risk forgetting to revoke the old log record
5251ac27a0ecSDave Kleikamp 	 * from the journal and so a subsequent replay can corrupt data.
5252ac27a0ecSDave Kleikamp 	 * So, first we make sure that the journal is empty and that
5253ac27a0ecSDave Kleikamp 	 * nobody is changing anything.
5254ac27a0ecSDave Kleikamp 	 */
5255ac27a0ecSDave Kleikamp 
5256617ba13bSMingming Cao 	journal = EXT4_JOURNAL(inode);
52570390131bSFrank Mayhar 	if (!journal)
52580390131bSFrank Mayhar 		return 0;
5259d699594dSDave Hansen 	if (is_journal_aborted(journal))
5260ac27a0ecSDave Kleikamp 		return -EROFS;
5261ac27a0ecSDave Kleikamp 
5262dab291afSMingming Cao 	jbd2_journal_lock_updates(journal);
5263dab291afSMingming Cao 	jbd2_journal_flush(journal);
5264ac27a0ecSDave Kleikamp 
5265ac27a0ecSDave Kleikamp 	/*
5266ac27a0ecSDave Kleikamp 	 * OK, there are no updates running now, and all cached data is
5267ac27a0ecSDave Kleikamp 	 * synced to disk.  We are now in a completely consistent state
5268ac27a0ecSDave Kleikamp 	 * which doesn't have anything in the journal, and we know that
5269ac27a0ecSDave Kleikamp 	 * no filesystem updates are running, so it is safe to modify
5270ac27a0ecSDave Kleikamp 	 * the inode's in-core data-journaling state flag now.
5271ac27a0ecSDave Kleikamp 	 */
5272ac27a0ecSDave Kleikamp 
5273ac27a0ecSDave Kleikamp 	if (val)
5274617ba13bSMingming Cao 		EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
5275ac27a0ecSDave Kleikamp 	else
5276617ba13bSMingming Cao 		EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
5277617ba13bSMingming Cao 	ext4_set_aops(inode);
5278ac27a0ecSDave Kleikamp 
5279dab291afSMingming Cao 	jbd2_journal_unlock_updates(journal);
5280ac27a0ecSDave Kleikamp 
5281ac27a0ecSDave Kleikamp 	/* Finally we can mark the inode as dirty. */
5282ac27a0ecSDave Kleikamp 
5283617ba13bSMingming Cao 	handle = ext4_journal_start(inode, 1);
5284ac27a0ecSDave Kleikamp 	if (IS_ERR(handle))
5285ac27a0ecSDave Kleikamp 		return PTR_ERR(handle);
5286ac27a0ecSDave Kleikamp 
5287617ba13bSMingming Cao 	err = ext4_mark_inode_dirty(handle, inode);
52880390131bSFrank Mayhar 	ext4_handle_sync(handle);
5289617ba13bSMingming Cao 	ext4_journal_stop(handle);
5290617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, err);
5291ac27a0ecSDave Kleikamp 
5292ac27a0ecSDave Kleikamp 	return err;
5293ac27a0ecSDave Kleikamp }
52942e9ee850SAneesh Kumar K.V 
52952e9ee850SAneesh Kumar K.V static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
52962e9ee850SAneesh Kumar K.V {
52972e9ee850SAneesh Kumar K.V 	return !buffer_mapped(bh);
52982e9ee850SAneesh Kumar K.V }
52992e9ee850SAneesh Kumar K.V 
5300c2ec175cSNick Piggin int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
53012e9ee850SAneesh Kumar K.V {
5302c2ec175cSNick Piggin 	struct page *page = vmf->page;
53032e9ee850SAneesh Kumar K.V 	loff_t size;
53042e9ee850SAneesh Kumar K.V 	unsigned long len;
53052e9ee850SAneesh Kumar K.V 	int ret = -EINVAL;
530679f0be8dSAneesh Kumar K.V 	void *fsdata;
53072e9ee850SAneesh Kumar K.V 	struct file *file = vma->vm_file;
53082e9ee850SAneesh Kumar K.V 	struct inode *inode = file->f_path.dentry->d_inode;
53092e9ee850SAneesh Kumar K.V 	struct address_space *mapping = inode->i_mapping;
53102e9ee850SAneesh Kumar K.V 
53112e9ee850SAneesh Kumar K.V 	/*
53122e9ee850SAneesh Kumar K.V 	 * Get i_alloc_sem to stop truncates messing with the inode. We cannot
53132e9ee850SAneesh Kumar K.V 	 * get i_mutex because we are already holding mmap_sem.
53142e9ee850SAneesh Kumar K.V 	 */
53152e9ee850SAneesh Kumar K.V 	down_read(&inode->i_alloc_sem);
53162e9ee850SAneesh Kumar K.V 	size = i_size_read(inode);
53172e9ee850SAneesh Kumar K.V 	if (page->mapping != mapping || size <= page_offset(page)
53182e9ee850SAneesh Kumar K.V 	    || !PageUptodate(page)) {
53192e9ee850SAneesh Kumar K.V 		/* page got truncated from under us? */
53202e9ee850SAneesh Kumar K.V 		goto out_unlock;
53212e9ee850SAneesh Kumar K.V 	}
53222e9ee850SAneesh Kumar K.V 	ret = 0;
53232e9ee850SAneesh Kumar K.V 	if (PageMappedToDisk(page))
53242e9ee850SAneesh Kumar K.V 		goto out_unlock;
53252e9ee850SAneesh Kumar K.V 
53262e9ee850SAneesh Kumar K.V 	if (page->index == size >> PAGE_CACHE_SHIFT)
53272e9ee850SAneesh Kumar K.V 		len = size & ~PAGE_CACHE_MASK;
53282e9ee850SAneesh Kumar K.V 	else
53292e9ee850SAneesh Kumar K.V 		len = PAGE_CACHE_SIZE;
53302e9ee850SAneesh Kumar K.V 
53312e9ee850SAneesh Kumar K.V 	if (page_has_buffers(page)) {
53322e9ee850SAneesh Kumar K.V 		/* return if we have all the buffers mapped */
53332e9ee850SAneesh Kumar K.V 		if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
53342e9ee850SAneesh Kumar K.V 				       ext4_bh_unmapped))
53352e9ee850SAneesh Kumar K.V 			goto out_unlock;
53362e9ee850SAneesh Kumar K.V 	}
53372e9ee850SAneesh Kumar K.V 	/*
53382e9ee850SAneesh Kumar K.V 	 * OK, we need to fill the hole... Do write_begin write_end
53392e9ee850SAneesh Kumar K.V 	 * to do block allocation/reservation.We are not holding
53402e9ee850SAneesh Kumar K.V 	 * inode.i__mutex here. That allow * parallel write_begin,
53412e9ee850SAneesh Kumar K.V 	 * write_end call. lock_page prevent this from happening
53422e9ee850SAneesh Kumar K.V 	 * on the same page though
53432e9ee850SAneesh Kumar K.V 	 */
53442e9ee850SAneesh Kumar K.V 	ret = mapping->a_ops->write_begin(file, mapping, page_offset(page),
534579f0be8dSAneesh Kumar K.V 			len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
53462e9ee850SAneesh Kumar K.V 	if (ret < 0)
53472e9ee850SAneesh Kumar K.V 		goto out_unlock;
53482e9ee850SAneesh Kumar K.V 	ret = mapping->a_ops->write_end(file, mapping, page_offset(page),
534979f0be8dSAneesh Kumar K.V 			len, len, page, fsdata);
53502e9ee850SAneesh Kumar K.V 	if (ret < 0)
53512e9ee850SAneesh Kumar K.V 		goto out_unlock;
53522e9ee850SAneesh Kumar K.V 	ret = 0;
53532e9ee850SAneesh Kumar K.V out_unlock:
5354c2ec175cSNick Piggin 	if (ret)
5355c2ec175cSNick Piggin 		ret = VM_FAULT_SIGBUS;
53562e9ee850SAneesh Kumar K.V 	up_read(&inode->i_alloc_sem);
53572e9ee850SAneesh Kumar K.V 	return ret;
53582e9ee850SAneesh Kumar K.V }
5359