xref: /openbmc/linux/fs/ext4/inode.c (revision bc965ab3f2b4b7bb898b11d61d25295c2053b8ac)
1ac27a0ecSDave Kleikamp /*
2617ba13bSMingming Cao  *  linux/fs/ext4/inode.c
3ac27a0ecSDave Kleikamp  *
4ac27a0ecSDave Kleikamp  * Copyright (C) 1992, 1993, 1994, 1995
5ac27a0ecSDave Kleikamp  * Remy Card (card@masi.ibp.fr)
6ac27a0ecSDave Kleikamp  * Laboratoire MASI - Institut Blaise Pascal
7ac27a0ecSDave Kleikamp  * Universite Pierre et Marie Curie (Paris VI)
8ac27a0ecSDave Kleikamp  *
9ac27a0ecSDave Kleikamp  *  from
10ac27a0ecSDave Kleikamp  *
11ac27a0ecSDave Kleikamp  *  linux/fs/minix/inode.c
12ac27a0ecSDave Kleikamp  *
13ac27a0ecSDave Kleikamp  *  Copyright (C) 1991, 1992  Linus Torvalds
14ac27a0ecSDave Kleikamp  *
15ac27a0ecSDave Kleikamp  *  Goal-directed block allocation by Stephen Tweedie
16ac27a0ecSDave Kleikamp  *	(sct@redhat.com), 1993, 1998
17ac27a0ecSDave Kleikamp  *  Big-endian to little-endian byte-swapping/bitmaps by
18ac27a0ecSDave Kleikamp  *        David S. Miller (davem@caip.rutgers.edu), 1995
19ac27a0ecSDave Kleikamp  *  64-bit file support on 64-bit platforms by Jakub Jelinek
20ac27a0ecSDave Kleikamp  *	(jj@sunsite.ms.mff.cuni.cz)
21ac27a0ecSDave Kleikamp  *
22617ba13bSMingming Cao  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
23ac27a0ecSDave Kleikamp  */
24ac27a0ecSDave Kleikamp 
25ac27a0ecSDave Kleikamp #include <linux/module.h>
26ac27a0ecSDave Kleikamp #include <linux/fs.h>
27ac27a0ecSDave Kleikamp #include <linux/time.h>
28dab291afSMingming Cao #include <linux/jbd2.h>
29ac27a0ecSDave Kleikamp #include <linux/highuid.h>
30ac27a0ecSDave Kleikamp #include <linux/pagemap.h>
31ac27a0ecSDave Kleikamp #include <linux/quotaops.h>
32ac27a0ecSDave Kleikamp #include <linux/string.h>
33ac27a0ecSDave Kleikamp #include <linux/buffer_head.h>
34ac27a0ecSDave Kleikamp #include <linux/writeback.h>
3564769240SAlex Tomas #include <linux/pagevec.h>
36ac27a0ecSDave Kleikamp #include <linux/mpage.h>
37ac27a0ecSDave Kleikamp #include <linux/uio.h>
38ac27a0ecSDave Kleikamp #include <linux/bio.h>
393dcf5451SChristoph Hellwig #include "ext4_jbd2.h"
40ac27a0ecSDave Kleikamp #include "xattr.h"
41ac27a0ecSDave Kleikamp #include "acl.h"
42d2a17637SMingming Cao #include "ext4_extents.h"
43ac27a0ecSDave Kleikamp 
44678aaf48SJan Kara static inline int ext4_begin_ordered_truncate(struct inode *inode,
45678aaf48SJan Kara 					      loff_t new_size)
46678aaf48SJan Kara {
47678aaf48SJan Kara 	return jbd2_journal_begin_ordered_truncate(&EXT4_I(inode)->jinode,
48678aaf48SJan Kara 						   new_size);
49678aaf48SJan Kara }
50678aaf48SJan Kara 
5164769240SAlex Tomas static void ext4_invalidatepage(struct page *page, unsigned long offset);
5264769240SAlex Tomas 
53ac27a0ecSDave Kleikamp /*
54ac27a0ecSDave Kleikamp  * Test whether an inode is a fast symlink.
55ac27a0ecSDave Kleikamp  */
56617ba13bSMingming Cao static int ext4_inode_is_fast_symlink(struct inode *inode)
57ac27a0ecSDave Kleikamp {
58617ba13bSMingming Cao 	int ea_blocks = EXT4_I(inode)->i_file_acl ?
59ac27a0ecSDave Kleikamp 		(inode->i_sb->s_blocksize >> 9) : 0;
60ac27a0ecSDave Kleikamp 
61ac27a0ecSDave Kleikamp 	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
62ac27a0ecSDave Kleikamp }
63ac27a0ecSDave Kleikamp 
64ac27a0ecSDave Kleikamp /*
65617ba13bSMingming Cao  * The ext4 forget function must perform a revoke if we are freeing data
66ac27a0ecSDave Kleikamp  * which has been journaled.  Metadata (eg. indirect blocks) must be
67ac27a0ecSDave Kleikamp  * revoked in all cases.
68ac27a0ecSDave Kleikamp  *
69ac27a0ecSDave Kleikamp  * "bh" may be NULL: a metadata block may have been freed from memory
70ac27a0ecSDave Kleikamp  * but there may still be a record of it in the journal, and that record
71ac27a0ecSDave Kleikamp  * still needs to be revoked.
72ac27a0ecSDave Kleikamp  */
73617ba13bSMingming Cao int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
74617ba13bSMingming Cao 			struct buffer_head *bh, ext4_fsblk_t blocknr)
75ac27a0ecSDave Kleikamp {
76ac27a0ecSDave Kleikamp 	int err;
77ac27a0ecSDave Kleikamp 
78ac27a0ecSDave Kleikamp 	might_sleep();
79ac27a0ecSDave Kleikamp 
80ac27a0ecSDave Kleikamp 	BUFFER_TRACE(bh, "enter");
81ac27a0ecSDave Kleikamp 
82ac27a0ecSDave Kleikamp 	jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
83ac27a0ecSDave Kleikamp 		  "data mode %lx\n",
84ac27a0ecSDave Kleikamp 		  bh, is_metadata, inode->i_mode,
85ac27a0ecSDave Kleikamp 		  test_opt(inode->i_sb, DATA_FLAGS));
86ac27a0ecSDave Kleikamp 
87ac27a0ecSDave Kleikamp 	/* Never use the revoke function if we are doing full data
88ac27a0ecSDave Kleikamp 	 * journaling: there is no need to, and a V1 superblock won't
89ac27a0ecSDave Kleikamp 	 * support it.  Otherwise, only skip the revoke on un-journaled
90ac27a0ecSDave Kleikamp 	 * data blocks. */
91ac27a0ecSDave Kleikamp 
92617ba13bSMingming Cao 	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
93617ba13bSMingming Cao 	    (!is_metadata && !ext4_should_journal_data(inode))) {
94ac27a0ecSDave Kleikamp 		if (bh) {
95dab291afSMingming Cao 			BUFFER_TRACE(bh, "call jbd2_journal_forget");
96617ba13bSMingming Cao 			return ext4_journal_forget(handle, bh);
97ac27a0ecSDave Kleikamp 		}
98ac27a0ecSDave Kleikamp 		return 0;
99ac27a0ecSDave Kleikamp 	}
100ac27a0ecSDave Kleikamp 
101ac27a0ecSDave Kleikamp 	/*
102ac27a0ecSDave Kleikamp 	 * data!=journal && (is_metadata || should_journal_data(inode))
103ac27a0ecSDave Kleikamp 	 */
104617ba13bSMingming Cao 	BUFFER_TRACE(bh, "call ext4_journal_revoke");
105617ba13bSMingming Cao 	err = ext4_journal_revoke(handle, blocknr, bh);
106ac27a0ecSDave Kleikamp 	if (err)
10746e665e9SHarvey Harrison 		ext4_abort(inode->i_sb, __func__,
108ac27a0ecSDave Kleikamp 			   "error %d when attempting revoke", err);
109ac27a0ecSDave Kleikamp 	BUFFER_TRACE(bh, "exit");
110ac27a0ecSDave Kleikamp 	return err;
111ac27a0ecSDave Kleikamp }
112ac27a0ecSDave Kleikamp 
113ac27a0ecSDave Kleikamp /*
114ac27a0ecSDave Kleikamp  * Work out how many blocks we need to proceed with the next chunk of a
115ac27a0ecSDave Kleikamp  * truncate transaction.
116ac27a0ecSDave Kleikamp  */
117ac27a0ecSDave Kleikamp static unsigned long blocks_for_truncate(struct inode *inode)
118ac27a0ecSDave Kleikamp {
119725d26d3SAneesh Kumar K.V 	ext4_lblk_t needed;
120ac27a0ecSDave Kleikamp 
121ac27a0ecSDave Kleikamp 	needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
122ac27a0ecSDave Kleikamp 
123ac27a0ecSDave Kleikamp 	/* Give ourselves just enough room to cope with inodes in which
124ac27a0ecSDave Kleikamp 	 * i_blocks is corrupt: we've seen disk corruptions in the past
125ac27a0ecSDave Kleikamp 	 * which resulted in random data in an inode which looked enough
126617ba13bSMingming Cao 	 * like a regular file for ext4 to try to delete it.  Things
127ac27a0ecSDave Kleikamp 	 * will go a bit crazy if that happens, but at least we should
128ac27a0ecSDave Kleikamp 	 * try not to panic the whole kernel. */
129ac27a0ecSDave Kleikamp 	if (needed < 2)
130ac27a0ecSDave Kleikamp 		needed = 2;
131ac27a0ecSDave Kleikamp 
132ac27a0ecSDave Kleikamp 	/* But we need to bound the transaction so we don't overflow the
133ac27a0ecSDave Kleikamp 	 * journal. */
134617ba13bSMingming Cao 	if (needed > EXT4_MAX_TRANS_DATA)
135617ba13bSMingming Cao 		needed = EXT4_MAX_TRANS_DATA;
136ac27a0ecSDave Kleikamp 
137617ba13bSMingming Cao 	return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
138ac27a0ecSDave Kleikamp }
139ac27a0ecSDave Kleikamp 
140ac27a0ecSDave Kleikamp /*
141ac27a0ecSDave Kleikamp  * Truncate transactions can be complex and absolutely huge.  So we need to
142ac27a0ecSDave Kleikamp  * be able to restart the transaction at a conventient checkpoint to make
143ac27a0ecSDave Kleikamp  * sure we don't overflow the journal.
144ac27a0ecSDave Kleikamp  *
145ac27a0ecSDave Kleikamp  * start_transaction gets us a new handle for a truncate transaction,
146ac27a0ecSDave Kleikamp  * and extend_transaction tries to extend the existing one a bit.  If
147ac27a0ecSDave Kleikamp  * extend fails, we need to propagate the failure up and restart the
148ac27a0ecSDave Kleikamp  * transaction in the top-level truncate loop. --sct
149ac27a0ecSDave Kleikamp  */
150ac27a0ecSDave Kleikamp static handle_t *start_transaction(struct inode *inode)
151ac27a0ecSDave Kleikamp {
152ac27a0ecSDave Kleikamp 	handle_t *result;
153ac27a0ecSDave Kleikamp 
154617ba13bSMingming Cao 	result = ext4_journal_start(inode, blocks_for_truncate(inode));
155ac27a0ecSDave Kleikamp 	if (!IS_ERR(result))
156ac27a0ecSDave Kleikamp 		return result;
157ac27a0ecSDave Kleikamp 
158617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, PTR_ERR(result));
159ac27a0ecSDave Kleikamp 	return result;
160ac27a0ecSDave Kleikamp }
161ac27a0ecSDave Kleikamp 
162ac27a0ecSDave Kleikamp /*
163ac27a0ecSDave Kleikamp  * Try to extend this transaction for the purposes of truncation.
164ac27a0ecSDave Kleikamp  *
165ac27a0ecSDave Kleikamp  * Returns 0 if we managed to create more room.  If we can't create more
166ac27a0ecSDave Kleikamp  * room, and the transaction must be restarted we return 1.
167ac27a0ecSDave Kleikamp  */
168ac27a0ecSDave Kleikamp static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
169ac27a0ecSDave Kleikamp {
170617ba13bSMingming Cao 	if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS)
171ac27a0ecSDave Kleikamp 		return 0;
172617ba13bSMingming Cao 	if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
173ac27a0ecSDave Kleikamp 		return 0;
174ac27a0ecSDave Kleikamp 	return 1;
175ac27a0ecSDave Kleikamp }
176ac27a0ecSDave Kleikamp 
177ac27a0ecSDave Kleikamp /*
178ac27a0ecSDave Kleikamp  * Restart the transaction associated with *handle.  This does a commit,
179ac27a0ecSDave Kleikamp  * so before we call here everything must be consistently dirtied against
180ac27a0ecSDave Kleikamp  * this transaction.
181ac27a0ecSDave Kleikamp  */
182617ba13bSMingming Cao static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
183ac27a0ecSDave Kleikamp {
184ac27a0ecSDave Kleikamp 	jbd_debug(2, "restarting handle %p\n", handle);
185617ba13bSMingming Cao 	return ext4_journal_restart(handle, blocks_for_truncate(inode));
186ac27a0ecSDave Kleikamp }
187ac27a0ecSDave Kleikamp 
188ac27a0ecSDave Kleikamp /*
189ac27a0ecSDave Kleikamp  * Called at the last iput() if i_nlink is zero.
190ac27a0ecSDave Kleikamp  */
191617ba13bSMingming Cao void ext4_delete_inode (struct inode * inode)
192ac27a0ecSDave Kleikamp {
193ac27a0ecSDave Kleikamp 	handle_t *handle;
194bc965ab3STheodore Ts'o 	int err;
195ac27a0ecSDave Kleikamp 
196678aaf48SJan Kara 	if (ext4_should_order_data(inode))
197678aaf48SJan Kara 		ext4_begin_ordered_truncate(inode, 0);
198ac27a0ecSDave Kleikamp 	truncate_inode_pages(&inode->i_data, 0);
199ac27a0ecSDave Kleikamp 
200ac27a0ecSDave Kleikamp 	if (is_bad_inode(inode))
201ac27a0ecSDave Kleikamp 		goto no_delete;
202ac27a0ecSDave Kleikamp 
203bc965ab3STheodore Ts'o 	handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
204ac27a0ecSDave Kleikamp 	if (IS_ERR(handle)) {
205bc965ab3STheodore Ts'o 		ext4_std_error(inode->i_sb, PTR_ERR(handle));
206ac27a0ecSDave Kleikamp 		/*
207ac27a0ecSDave Kleikamp 		 * If we're going to skip the normal cleanup, we still need to
208ac27a0ecSDave Kleikamp 		 * make sure that the in-core orphan linked list is properly
209ac27a0ecSDave Kleikamp 		 * cleaned up.
210ac27a0ecSDave Kleikamp 		 */
211617ba13bSMingming Cao 		ext4_orphan_del(NULL, inode);
212ac27a0ecSDave Kleikamp 		goto no_delete;
213ac27a0ecSDave Kleikamp 	}
214ac27a0ecSDave Kleikamp 
215ac27a0ecSDave Kleikamp 	if (IS_SYNC(inode))
216ac27a0ecSDave Kleikamp 		handle->h_sync = 1;
217ac27a0ecSDave Kleikamp 	inode->i_size = 0;
218bc965ab3STheodore Ts'o 	err = ext4_mark_inode_dirty(handle, inode);
219bc965ab3STheodore Ts'o 	if (err) {
220bc965ab3STheodore Ts'o 		ext4_warning(inode->i_sb, __func__,
221bc965ab3STheodore Ts'o 			     "couldn't mark inode dirty (err %d)", err);
222bc965ab3STheodore Ts'o 		goto stop_handle;
223bc965ab3STheodore Ts'o 	}
224ac27a0ecSDave Kleikamp 	if (inode->i_blocks)
225617ba13bSMingming Cao 		ext4_truncate(inode);
226bc965ab3STheodore Ts'o 
227bc965ab3STheodore Ts'o 	/*
228bc965ab3STheodore Ts'o 	 * ext4_ext_truncate() doesn't reserve any slop when it
229bc965ab3STheodore Ts'o 	 * restarts journal transactions; therefore there may not be
230bc965ab3STheodore Ts'o 	 * enough credits left in the handle to remove the inode from
231bc965ab3STheodore Ts'o 	 * the orphan list and set the dtime field.
232bc965ab3STheodore Ts'o 	 */
233bc965ab3STheodore Ts'o 	if (handle->h_buffer_credits < 3) {
234bc965ab3STheodore Ts'o 		err = ext4_journal_extend(handle, 3);
235bc965ab3STheodore Ts'o 		if (err > 0)
236bc965ab3STheodore Ts'o 			err = ext4_journal_restart(handle, 3);
237bc965ab3STheodore Ts'o 		if (err != 0) {
238bc965ab3STheodore Ts'o 			ext4_warning(inode->i_sb, __func__,
239bc965ab3STheodore Ts'o 				     "couldn't extend journal (err %d)", err);
240bc965ab3STheodore Ts'o 		stop_handle:
241bc965ab3STheodore Ts'o 			ext4_journal_stop(handle);
242bc965ab3STheodore Ts'o 			goto no_delete;
243bc965ab3STheodore Ts'o 		}
244bc965ab3STheodore Ts'o 	}
245bc965ab3STheodore Ts'o 
246ac27a0ecSDave Kleikamp 	/*
247617ba13bSMingming Cao 	 * Kill off the orphan record which ext4_truncate created.
248ac27a0ecSDave Kleikamp 	 * AKPM: I think this can be inside the above `if'.
249617ba13bSMingming Cao 	 * Note that ext4_orphan_del() has to be able to cope with the
250ac27a0ecSDave Kleikamp 	 * deletion of a non-existent orphan - this is because we don't
251617ba13bSMingming Cao 	 * know if ext4_truncate() actually created an orphan record.
252ac27a0ecSDave Kleikamp 	 * (Well, we could do this if we need to, but heck - it works)
253ac27a0ecSDave Kleikamp 	 */
254617ba13bSMingming Cao 	ext4_orphan_del(handle, inode);
255617ba13bSMingming Cao 	EXT4_I(inode)->i_dtime	= get_seconds();
256ac27a0ecSDave Kleikamp 
257ac27a0ecSDave Kleikamp 	/*
258ac27a0ecSDave Kleikamp 	 * One subtle ordering requirement: if anything has gone wrong
259ac27a0ecSDave Kleikamp 	 * (transaction abort, IO errors, whatever), then we can still
260ac27a0ecSDave Kleikamp 	 * do these next steps (the fs will already have been marked as
261ac27a0ecSDave Kleikamp 	 * having errors), but we can't free the inode if the mark_dirty
262ac27a0ecSDave Kleikamp 	 * fails.
263ac27a0ecSDave Kleikamp 	 */
264617ba13bSMingming Cao 	if (ext4_mark_inode_dirty(handle, inode))
265ac27a0ecSDave Kleikamp 		/* If that failed, just do the required in-core inode clear. */
266ac27a0ecSDave Kleikamp 		clear_inode(inode);
267ac27a0ecSDave Kleikamp 	else
268617ba13bSMingming Cao 		ext4_free_inode(handle, inode);
269617ba13bSMingming Cao 	ext4_journal_stop(handle);
270ac27a0ecSDave Kleikamp 	return;
271ac27a0ecSDave Kleikamp no_delete:
272ac27a0ecSDave Kleikamp 	clear_inode(inode);	/* We must guarantee clearing of inode... */
273ac27a0ecSDave Kleikamp }
274ac27a0ecSDave Kleikamp 
275ac27a0ecSDave Kleikamp typedef struct {
276ac27a0ecSDave Kleikamp 	__le32	*p;
277ac27a0ecSDave Kleikamp 	__le32	key;
278ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
279ac27a0ecSDave Kleikamp } Indirect;
280ac27a0ecSDave Kleikamp 
281ac27a0ecSDave Kleikamp static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
282ac27a0ecSDave Kleikamp {
283ac27a0ecSDave Kleikamp 	p->key = *(p->p = v);
284ac27a0ecSDave Kleikamp 	p->bh = bh;
285ac27a0ecSDave Kleikamp }
286ac27a0ecSDave Kleikamp 
287ac27a0ecSDave Kleikamp /**
288617ba13bSMingming Cao  *	ext4_block_to_path - parse the block number into array of offsets
289ac27a0ecSDave Kleikamp  *	@inode: inode in question (we are only interested in its superblock)
290ac27a0ecSDave Kleikamp  *	@i_block: block number to be parsed
291ac27a0ecSDave Kleikamp  *	@offsets: array to store the offsets in
292ac27a0ecSDave Kleikamp  *	@boundary: set this non-zero if the referred-to block is likely to be
293ac27a0ecSDave Kleikamp  *	       followed (on disk) by an indirect block.
294ac27a0ecSDave Kleikamp  *
295617ba13bSMingming Cao  *	To store the locations of file's data ext4 uses a data structure common
296ac27a0ecSDave Kleikamp  *	for UNIX filesystems - tree of pointers anchored in the inode, with
297ac27a0ecSDave Kleikamp  *	data blocks at leaves and indirect blocks in intermediate nodes.
298ac27a0ecSDave Kleikamp  *	This function translates the block number into path in that tree -
299ac27a0ecSDave Kleikamp  *	return value is the path length and @offsets[n] is the offset of
300ac27a0ecSDave Kleikamp  *	pointer to (n+1)th node in the nth one. If @block is out of range
301ac27a0ecSDave Kleikamp  *	(negative or too large) warning is printed and zero returned.
302ac27a0ecSDave Kleikamp  *
303ac27a0ecSDave Kleikamp  *	Note: function doesn't find node addresses, so no IO is needed. All
304ac27a0ecSDave Kleikamp  *	we need to know is the capacity of indirect blocks (taken from the
305ac27a0ecSDave Kleikamp  *	inode->i_sb).
306ac27a0ecSDave Kleikamp  */
307ac27a0ecSDave Kleikamp 
308ac27a0ecSDave Kleikamp /*
309ac27a0ecSDave Kleikamp  * Portability note: the last comparison (check that we fit into triple
310ac27a0ecSDave Kleikamp  * indirect block) is spelled differently, because otherwise on an
311ac27a0ecSDave Kleikamp  * architecture with 32-bit longs and 8Kb pages we might get into trouble
312ac27a0ecSDave Kleikamp  * if our filesystem had 8Kb blocks. We might use long long, but that would
313ac27a0ecSDave Kleikamp  * kill us on x86. Oh, well, at least the sign propagation does not matter -
314ac27a0ecSDave Kleikamp  * i_block would have to be negative in the very beginning, so we would not
315ac27a0ecSDave Kleikamp  * get there at all.
316ac27a0ecSDave Kleikamp  */
317ac27a0ecSDave Kleikamp 
318617ba13bSMingming Cao static int ext4_block_to_path(struct inode *inode,
319725d26d3SAneesh Kumar K.V 			ext4_lblk_t i_block,
320725d26d3SAneesh Kumar K.V 			ext4_lblk_t offsets[4], int *boundary)
321ac27a0ecSDave Kleikamp {
322617ba13bSMingming Cao 	int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
323617ba13bSMingming Cao 	int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
324617ba13bSMingming Cao 	const long direct_blocks = EXT4_NDIR_BLOCKS,
325ac27a0ecSDave Kleikamp 		indirect_blocks = ptrs,
326ac27a0ecSDave Kleikamp 		double_blocks = (1 << (ptrs_bits * 2));
327ac27a0ecSDave Kleikamp 	int n = 0;
328ac27a0ecSDave Kleikamp 	int final = 0;
329ac27a0ecSDave Kleikamp 
330ac27a0ecSDave Kleikamp 	if (i_block < 0) {
331617ba13bSMingming Cao 		ext4_warning (inode->i_sb, "ext4_block_to_path", "block < 0");
332ac27a0ecSDave Kleikamp 	} else if (i_block < direct_blocks) {
333ac27a0ecSDave Kleikamp 		offsets[n++] = i_block;
334ac27a0ecSDave Kleikamp 		final = direct_blocks;
335ac27a0ecSDave Kleikamp 	} else if ( (i_block -= direct_blocks) < indirect_blocks) {
336617ba13bSMingming Cao 		offsets[n++] = EXT4_IND_BLOCK;
337ac27a0ecSDave Kleikamp 		offsets[n++] = i_block;
338ac27a0ecSDave Kleikamp 		final = ptrs;
339ac27a0ecSDave Kleikamp 	} else if ((i_block -= indirect_blocks) < double_blocks) {
340617ba13bSMingming Cao 		offsets[n++] = EXT4_DIND_BLOCK;
341ac27a0ecSDave Kleikamp 		offsets[n++] = i_block >> ptrs_bits;
342ac27a0ecSDave Kleikamp 		offsets[n++] = i_block & (ptrs - 1);
343ac27a0ecSDave Kleikamp 		final = ptrs;
344ac27a0ecSDave Kleikamp 	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
345617ba13bSMingming Cao 		offsets[n++] = EXT4_TIND_BLOCK;
346ac27a0ecSDave Kleikamp 		offsets[n++] = i_block >> (ptrs_bits * 2);
347ac27a0ecSDave Kleikamp 		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
348ac27a0ecSDave Kleikamp 		offsets[n++] = i_block & (ptrs - 1);
349ac27a0ecSDave Kleikamp 		final = ptrs;
350ac27a0ecSDave Kleikamp 	} else {
351e2b46574SEric Sandeen 		ext4_warning(inode->i_sb, "ext4_block_to_path",
3520e855ac8SAneesh Kumar K.V 				"block %lu > max",
353e2b46574SEric Sandeen 				i_block + direct_blocks +
354e2b46574SEric Sandeen 				indirect_blocks + double_blocks);
355ac27a0ecSDave Kleikamp 	}
356ac27a0ecSDave Kleikamp 	if (boundary)
357ac27a0ecSDave Kleikamp 		*boundary = final - 1 - (i_block & (ptrs - 1));
358ac27a0ecSDave Kleikamp 	return n;
359ac27a0ecSDave Kleikamp }
360ac27a0ecSDave Kleikamp 
361ac27a0ecSDave Kleikamp /**
362617ba13bSMingming Cao  *	ext4_get_branch - read the chain of indirect blocks leading to data
363ac27a0ecSDave Kleikamp  *	@inode: inode in question
364ac27a0ecSDave Kleikamp  *	@depth: depth of the chain (1 - direct pointer, etc.)
365ac27a0ecSDave Kleikamp  *	@offsets: offsets of pointers in inode/indirect blocks
366ac27a0ecSDave Kleikamp  *	@chain: place to store the result
367ac27a0ecSDave Kleikamp  *	@err: here we store the error value
368ac27a0ecSDave Kleikamp  *
369ac27a0ecSDave Kleikamp  *	Function fills the array of triples <key, p, bh> and returns %NULL
370ac27a0ecSDave Kleikamp  *	if everything went OK or the pointer to the last filled triple
371ac27a0ecSDave Kleikamp  *	(incomplete one) otherwise. Upon the return chain[i].key contains
372ac27a0ecSDave Kleikamp  *	the number of (i+1)-th block in the chain (as it is stored in memory,
373ac27a0ecSDave Kleikamp  *	i.e. little-endian 32-bit), chain[i].p contains the address of that
374ac27a0ecSDave Kleikamp  *	number (it points into struct inode for i==0 and into the bh->b_data
375ac27a0ecSDave Kleikamp  *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
376ac27a0ecSDave Kleikamp  *	block for i>0 and NULL for i==0. In other words, it holds the block
377ac27a0ecSDave Kleikamp  *	numbers of the chain, addresses they were taken from (and where we can
378ac27a0ecSDave Kleikamp  *	verify that chain did not change) and buffer_heads hosting these
379ac27a0ecSDave Kleikamp  *	numbers.
380ac27a0ecSDave Kleikamp  *
381ac27a0ecSDave Kleikamp  *	Function stops when it stumbles upon zero pointer (absent block)
382ac27a0ecSDave Kleikamp  *		(pointer to last triple returned, *@err == 0)
383ac27a0ecSDave Kleikamp  *	or when it gets an IO error reading an indirect block
384ac27a0ecSDave Kleikamp  *		(ditto, *@err == -EIO)
385ac27a0ecSDave Kleikamp  *	or when it reads all @depth-1 indirect blocks successfully and finds
386ac27a0ecSDave Kleikamp  *	the whole chain, all way to the data (returns %NULL, *err == 0).
387c278bfecSAneesh Kumar K.V  *
388c278bfecSAneesh Kumar K.V  *      Need to be called with
3890e855ac8SAneesh Kumar K.V  *      down_read(&EXT4_I(inode)->i_data_sem)
390ac27a0ecSDave Kleikamp  */
391725d26d3SAneesh Kumar K.V static Indirect *ext4_get_branch(struct inode *inode, int depth,
392725d26d3SAneesh Kumar K.V 				 ext4_lblk_t  *offsets,
393ac27a0ecSDave Kleikamp 				 Indirect chain[4], int *err)
394ac27a0ecSDave Kleikamp {
395ac27a0ecSDave Kleikamp 	struct super_block *sb = inode->i_sb;
396ac27a0ecSDave Kleikamp 	Indirect *p = chain;
397ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
398ac27a0ecSDave Kleikamp 
399ac27a0ecSDave Kleikamp 	*err = 0;
400ac27a0ecSDave Kleikamp 	/* i_data is not going away, no lock needed */
401617ba13bSMingming Cao 	add_chain (chain, NULL, EXT4_I(inode)->i_data + *offsets);
402ac27a0ecSDave Kleikamp 	if (!p->key)
403ac27a0ecSDave Kleikamp 		goto no_block;
404ac27a0ecSDave Kleikamp 	while (--depth) {
405ac27a0ecSDave Kleikamp 		bh = sb_bread(sb, le32_to_cpu(p->key));
406ac27a0ecSDave Kleikamp 		if (!bh)
407ac27a0ecSDave Kleikamp 			goto failure;
408ac27a0ecSDave Kleikamp 		add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
409ac27a0ecSDave Kleikamp 		/* Reader: end */
410ac27a0ecSDave Kleikamp 		if (!p->key)
411ac27a0ecSDave Kleikamp 			goto no_block;
412ac27a0ecSDave Kleikamp 	}
413ac27a0ecSDave Kleikamp 	return NULL;
414ac27a0ecSDave Kleikamp 
415ac27a0ecSDave Kleikamp failure:
416ac27a0ecSDave Kleikamp 	*err = -EIO;
417ac27a0ecSDave Kleikamp no_block:
418ac27a0ecSDave Kleikamp 	return p;
419ac27a0ecSDave Kleikamp }
420ac27a0ecSDave Kleikamp 
421ac27a0ecSDave Kleikamp /**
422617ba13bSMingming Cao  *	ext4_find_near - find a place for allocation with sufficient locality
423ac27a0ecSDave Kleikamp  *	@inode: owner
424ac27a0ecSDave Kleikamp  *	@ind: descriptor of indirect block.
425ac27a0ecSDave Kleikamp  *
4261cc8dcf5SBenoit Boissinot  *	This function returns the preferred place for block allocation.
427ac27a0ecSDave Kleikamp  *	It is used when heuristic for sequential allocation fails.
428ac27a0ecSDave Kleikamp  *	Rules are:
429ac27a0ecSDave Kleikamp  *	  + if there is a block to the left of our position - allocate near it.
430ac27a0ecSDave Kleikamp  *	  + if pointer will live in indirect block - allocate near that block.
431ac27a0ecSDave Kleikamp  *	  + if pointer will live in inode - allocate in the same
432ac27a0ecSDave Kleikamp  *	    cylinder group.
433ac27a0ecSDave Kleikamp  *
434ac27a0ecSDave Kleikamp  * In the latter case we colour the starting block by the callers PID to
435ac27a0ecSDave Kleikamp  * prevent it from clashing with concurrent allocations for a different inode
436ac27a0ecSDave Kleikamp  * in the same block group.   The PID is used here so that functionally related
437ac27a0ecSDave Kleikamp  * files will be close-by on-disk.
438ac27a0ecSDave Kleikamp  *
439ac27a0ecSDave Kleikamp  *	Caller must make sure that @ind is valid and will stay that way.
440ac27a0ecSDave Kleikamp  */
441617ba13bSMingming Cao static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
442ac27a0ecSDave Kleikamp {
443617ba13bSMingming Cao 	struct ext4_inode_info *ei = EXT4_I(inode);
444ac27a0ecSDave Kleikamp 	__le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
445ac27a0ecSDave Kleikamp 	__le32 *p;
446617ba13bSMingming Cao 	ext4_fsblk_t bg_start;
44774d3487fSValerie Clement 	ext4_fsblk_t last_block;
448617ba13bSMingming Cao 	ext4_grpblk_t colour;
449ac27a0ecSDave Kleikamp 
450ac27a0ecSDave Kleikamp 	/* Try to find previous block */
451ac27a0ecSDave Kleikamp 	for (p = ind->p - 1; p >= start; p--) {
452ac27a0ecSDave Kleikamp 		if (*p)
453ac27a0ecSDave Kleikamp 			return le32_to_cpu(*p);
454ac27a0ecSDave Kleikamp 	}
455ac27a0ecSDave Kleikamp 
456ac27a0ecSDave Kleikamp 	/* No such thing, so let's try location of indirect block */
457ac27a0ecSDave Kleikamp 	if (ind->bh)
458ac27a0ecSDave Kleikamp 		return ind->bh->b_blocknr;
459ac27a0ecSDave Kleikamp 
460ac27a0ecSDave Kleikamp 	/*
461ac27a0ecSDave Kleikamp 	 * It is going to be referred to from the inode itself? OK, just put it
462ac27a0ecSDave Kleikamp 	 * into the same cylinder group then.
463ac27a0ecSDave Kleikamp 	 */
464617ba13bSMingming Cao 	bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group);
46574d3487fSValerie Clement 	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
46674d3487fSValerie Clement 
46774d3487fSValerie Clement 	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
468ac27a0ecSDave Kleikamp 		colour = (current->pid % 16) *
469617ba13bSMingming Cao 			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
47074d3487fSValerie Clement 	else
47174d3487fSValerie Clement 		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
472ac27a0ecSDave Kleikamp 	return bg_start + colour;
473ac27a0ecSDave Kleikamp }
474ac27a0ecSDave Kleikamp 
475ac27a0ecSDave Kleikamp /**
4761cc8dcf5SBenoit Boissinot  *	ext4_find_goal - find a preferred place for allocation.
477ac27a0ecSDave Kleikamp  *	@inode: owner
478ac27a0ecSDave Kleikamp  *	@block:  block we want
479ac27a0ecSDave Kleikamp  *	@partial: pointer to the last triple within a chain
480ac27a0ecSDave Kleikamp  *
4811cc8dcf5SBenoit Boissinot  *	Normally this function find the preferred place for block allocation,
482fb01bfdaSAkinobu Mita  *	returns it.
483ac27a0ecSDave Kleikamp  */
484725d26d3SAneesh Kumar K.V static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
485fb01bfdaSAkinobu Mita 		Indirect *partial)
486ac27a0ecSDave Kleikamp {
487617ba13bSMingming Cao 	struct ext4_block_alloc_info *block_i;
488ac27a0ecSDave Kleikamp 
489617ba13bSMingming Cao 	block_i =  EXT4_I(inode)->i_block_alloc_info;
490ac27a0ecSDave Kleikamp 
491ac27a0ecSDave Kleikamp 	/*
492ac27a0ecSDave Kleikamp 	 * try the heuristic for sequential allocation,
493ac27a0ecSDave Kleikamp 	 * failing that at least try to get decent locality.
494ac27a0ecSDave Kleikamp 	 */
495ac27a0ecSDave Kleikamp 	if (block_i && (block == block_i->last_alloc_logical_block + 1)
496ac27a0ecSDave Kleikamp 		&& (block_i->last_alloc_physical_block != 0)) {
497ac27a0ecSDave Kleikamp 		return block_i->last_alloc_physical_block + 1;
498ac27a0ecSDave Kleikamp 	}
499ac27a0ecSDave Kleikamp 
500617ba13bSMingming Cao 	return ext4_find_near(inode, partial);
501ac27a0ecSDave Kleikamp }
502ac27a0ecSDave Kleikamp 
503ac27a0ecSDave Kleikamp /**
504617ba13bSMingming Cao  *	ext4_blks_to_allocate: Look up the block map and count the number
505ac27a0ecSDave Kleikamp  *	of direct blocks need to be allocated for the given branch.
506ac27a0ecSDave Kleikamp  *
507ac27a0ecSDave Kleikamp  *	@branch: chain of indirect blocks
508ac27a0ecSDave Kleikamp  *	@k: number of blocks need for indirect blocks
509ac27a0ecSDave Kleikamp  *	@blks: number of data blocks to be mapped.
510ac27a0ecSDave Kleikamp  *	@blocks_to_boundary:  the offset in the indirect block
511ac27a0ecSDave Kleikamp  *
512ac27a0ecSDave Kleikamp  *	return the total number of blocks to be allocate, including the
513ac27a0ecSDave Kleikamp  *	direct and indirect blocks.
514ac27a0ecSDave Kleikamp  */
515617ba13bSMingming Cao static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
516ac27a0ecSDave Kleikamp 		int blocks_to_boundary)
517ac27a0ecSDave Kleikamp {
518ac27a0ecSDave Kleikamp 	unsigned long count = 0;
519ac27a0ecSDave Kleikamp 
520ac27a0ecSDave Kleikamp 	/*
521ac27a0ecSDave Kleikamp 	 * Simple case, [t,d]Indirect block(s) has not allocated yet
522ac27a0ecSDave Kleikamp 	 * then it's clear blocks on that path have not allocated
523ac27a0ecSDave Kleikamp 	 */
524ac27a0ecSDave Kleikamp 	if (k > 0) {
525ac27a0ecSDave Kleikamp 		/* right now we don't handle cross boundary allocation */
526ac27a0ecSDave Kleikamp 		if (blks < blocks_to_boundary + 1)
527ac27a0ecSDave Kleikamp 			count += blks;
528ac27a0ecSDave Kleikamp 		else
529ac27a0ecSDave Kleikamp 			count += blocks_to_boundary + 1;
530ac27a0ecSDave Kleikamp 		return count;
531ac27a0ecSDave Kleikamp 	}
532ac27a0ecSDave Kleikamp 
533ac27a0ecSDave Kleikamp 	count++;
534ac27a0ecSDave Kleikamp 	while (count < blks && count <= blocks_to_boundary &&
535ac27a0ecSDave Kleikamp 		le32_to_cpu(*(branch[0].p + count)) == 0) {
536ac27a0ecSDave Kleikamp 		count++;
537ac27a0ecSDave Kleikamp 	}
538ac27a0ecSDave Kleikamp 	return count;
539ac27a0ecSDave Kleikamp }
540ac27a0ecSDave Kleikamp 
541ac27a0ecSDave Kleikamp /**
542617ba13bSMingming Cao  *	ext4_alloc_blocks: multiple allocate blocks needed for a branch
543ac27a0ecSDave Kleikamp  *	@indirect_blks: the number of blocks need to allocate for indirect
544ac27a0ecSDave Kleikamp  *			blocks
545ac27a0ecSDave Kleikamp  *
546ac27a0ecSDave Kleikamp  *	@new_blocks: on return it will store the new block numbers for
547ac27a0ecSDave Kleikamp  *	the indirect blocks(if needed) and the first direct block,
548ac27a0ecSDave Kleikamp  *	@blks:	on return it will store the total number of allocated
549ac27a0ecSDave Kleikamp  *		direct blocks
550ac27a0ecSDave Kleikamp  */
551617ba13bSMingming Cao static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
5527061eba7SAneesh Kumar K.V 				ext4_lblk_t iblock, ext4_fsblk_t goal,
5537061eba7SAneesh Kumar K.V 				int indirect_blks, int blks,
554617ba13bSMingming Cao 				ext4_fsblk_t new_blocks[4], int *err)
555ac27a0ecSDave Kleikamp {
556ac27a0ecSDave Kleikamp 	int target, i;
5577061eba7SAneesh Kumar K.V 	unsigned long count = 0, blk_allocated = 0;
558ac27a0ecSDave Kleikamp 	int index = 0;
559617ba13bSMingming Cao 	ext4_fsblk_t current_block = 0;
560ac27a0ecSDave Kleikamp 	int ret = 0;
561ac27a0ecSDave Kleikamp 
562ac27a0ecSDave Kleikamp 	/*
563ac27a0ecSDave Kleikamp 	 * Here we try to allocate the requested multiple blocks at once,
564ac27a0ecSDave Kleikamp 	 * on a best-effort basis.
565ac27a0ecSDave Kleikamp 	 * To build a branch, we should allocate blocks for
566ac27a0ecSDave Kleikamp 	 * the indirect blocks(if not allocated yet), and at least
567ac27a0ecSDave Kleikamp 	 * the first direct block of this branch.  That's the
568ac27a0ecSDave Kleikamp 	 * minimum number of blocks need to allocate(required)
569ac27a0ecSDave Kleikamp 	 */
5707061eba7SAneesh Kumar K.V 	/* first we try to allocate the indirect blocks */
5717061eba7SAneesh Kumar K.V 	target = indirect_blks;
5727061eba7SAneesh Kumar K.V 	while (target > 0) {
573ac27a0ecSDave Kleikamp 		count = target;
574ac27a0ecSDave Kleikamp 		/* allocating blocks for indirect blocks and direct blocks */
5757061eba7SAneesh Kumar K.V 		current_block = ext4_new_meta_blocks(handle, inode,
5767061eba7SAneesh Kumar K.V 							goal, &count, err);
577ac27a0ecSDave Kleikamp 		if (*err)
578ac27a0ecSDave Kleikamp 			goto failed_out;
579ac27a0ecSDave Kleikamp 
580ac27a0ecSDave Kleikamp 		target -= count;
581ac27a0ecSDave Kleikamp 		/* allocate blocks for indirect blocks */
582ac27a0ecSDave Kleikamp 		while (index < indirect_blks && count) {
583ac27a0ecSDave Kleikamp 			new_blocks[index++] = current_block++;
584ac27a0ecSDave Kleikamp 			count--;
585ac27a0ecSDave Kleikamp 		}
5867061eba7SAneesh Kumar K.V 		if (count > 0) {
5877061eba7SAneesh Kumar K.V 			/*
5887061eba7SAneesh Kumar K.V 			 * save the new block number
5897061eba7SAneesh Kumar K.V 			 * for the first direct block
5907061eba7SAneesh Kumar K.V 			 */
5917061eba7SAneesh Kumar K.V 			new_blocks[index] = current_block;
5927061eba7SAneesh Kumar K.V 			printk(KERN_INFO "%s returned more blocks than "
5937061eba7SAneesh Kumar K.V 						"requested\n", __func__);
5947061eba7SAneesh Kumar K.V 			WARN_ON(1);
595ac27a0ecSDave Kleikamp 			break;
596ac27a0ecSDave Kleikamp 		}
5977061eba7SAneesh Kumar K.V 	}
598ac27a0ecSDave Kleikamp 
5997061eba7SAneesh Kumar K.V 	target = blks - count ;
6007061eba7SAneesh Kumar K.V 	blk_allocated = count;
6017061eba7SAneesh Kumar K.V 	if (!target)
6027061eba7SAneesh Kumar K.V 		goto allocated;
6037061eba7SAneesh Kumar K.V 	/* Now allocate data blocks */
6047061eba7SAneesh Kumar K.V 	count = target;
605654b4908SAneesh Kumar K.V 	/* allocating blocks for data blocks */
6067061eba7SAneesh Kumar K.V 	current_block = ext4_new_blocks(handle, inode, iblock,
6077061eba7SAneesh Kumar K.V 						goal, &count, err);
6087061eba7SAneesh Kumar K.V 	if (*err && (target == blks)) {
6097061eba7SAneesh Kumar K.V 		/*
6107061eba7SAneesh Kumar K.V 		 * if the allocation failed and we didn't allocate
6117061eba7SAneesh Kumar K.V 		 * any blocks before
6127061eba7SAneesh Kumar K.V 		 */
6137061eba7SAneesh Kumar K.V 		goto failed_out;
6147061eba7SAneesh Kumar K.V 	}
6157061eba7SAneesh Kumar K.V 	if (!*err) {
6167061eba7SAneesh Kumar K.V 		if (target == blks) {
6177061eba7SAneesh Kumar K.V 		/*
6187061eba7SAneesh Kumar K.V 		 * save the new block number
6197061eba7SAneesh Kumar K.V 		 * for the first direct block
6207061eba7SAneesh Kumar K.V 		 */
621ac27a0ecSDave Kleikamp 			new_blocks[index] = current_block;
6227061eba7SAneesh Kumar K.V 		}
6237061eba7SAneesh Kumar K.V 		blk_allocated += count;
6247061eba7SAneesh Kumar K.V 	}
6257061eba7SAneesh Kumar K.V allocated:
626ac27a0ecSDave Kleikamp 	/* total number of blocks allocated for direct blocks */
6277061eba7SAneesh Kumar K.V 	ret = blk_allocated;
628ac27a0ecSDave Kleikamp 	*err = 0;
629ac27a0ecSDave Kleikamp 	return ret;
630ac27a0ecSDave Kleikamp failed_out:
631ac27a0ecSDave Kleikamp 	for (i = 0; i <index; i++)
632c9de560dSAlex Tomas 		ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
633ac27a0ecSDave Kleikamp 	return ret;
634ac27a0ecSDave Kleikamp }
635ac27a0ecSDave Kleikamp 
636ac27a0ecSDave Kleikamp /**
637617ba13bSMingming Cao  *	ext4_alloc_branch - allocate and set up a chain of blocks.
638ac27a0ecSDave Kleikamp  *	@inode: owner
639ac27a0ecSDave Kleikamp  *	@indirect_blks: number of allocated indirect blocks
640ac27a0ecSDave Kleikamp  *	@blks: number of allocated direct blocks
641ac27a0ecSDave Kleikamp  *	@offsets: offsets (in the blocks) to store the pointers to next.
642ac27a0ecSDave Kleikamp  *	@branch: place to store the chain in.
643ac27a0ecSDave Kleikamp  *
644ac27a0ecSDave Kleikamp  *	This function allocates blocks, zeroes out all but the last one,
645ac27a0ecSDave Kleikamp  *	links them into chain and (if we are synchronous) writes them to disk.
646ac27a0ecSDave Kleikamp  *	In other words, it prepares a branch that can be spliced onto the
647ac27a0ecSDave Kleikamp  *	inode. It stores the information about that chain in the branch[], in
648617ba13bSMingming Cao  *	the same format as ext4_get_branch() would do. We are calling it after
649ac27a0ecSDave Kleikamp  *	we had read the existing part of chain and partial points to the last
650ac27a0ecSDave Kleikamp  *	triple of that (one with zero ->key). Upon the exit we have the same
651617ba13bSMingming Cao  *	picture as after the successful ext4_get_block(), except that in one
652ac27a0ecSDave Kleikamp  *	place chain is disconnected - *branch->p is still zero (we did not
653ac27a0ecSDave Kleikamp  *	set the last link), but branch->key contains the number that should
654ac27a0ecSDave Kleikamp  *	be placed into *branch->p to fill that gap.
655ac27a0ecSDave Kleikamp  *
656ac27a0ecSDave Kleikamp  *	If allocation fails we free all blocks we've allocated (and forget
657ac27a0ecSDave Kleikamp  *	their buffer_heads) and return the error value the from failed
658617ba13bSMingming Cao  *	ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
659ac27a0ecSDave Kleikamp  *	as described above and return 0.
660ac27a0ecSDave Kleikamp  */
661617ba13bSMingming Cao static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
6627061eba7SAneesh Kumar K.V 				ext4_lblk_t iblock, int indirect_blks,
6637061eba7SAneesh Kumar K.V 				int *blks, ext4_fsblk_t goal,
664725d26d3SAneesh Kumar K.V 				ext4_lblk_t *offsets, Indirect *branch)
665ac27a0ecSDave Kleikamp {
666ac27a0ecSDave Kleikamp 	int blocksize = inode->i_sb->s_blocksize;
667ac27a0ecSDave Kleikamp 	int i, n = 0;
668ac27a0ecSDave Kleikamp 	int err = 0;
669ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
670ac27a0ecSDave Kleikamp 	int num;
671617ba13bSMingming Cao 	ext4_fsblk_t new_blocks[4];
672617ba13bSMingming Cao 	ext4_fsblk_t current_block;
673ac27a0ecSDave Kleikamp 
6747061eba7SAneesh Kumar K.V 	num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
675ac27a0ecSDave Kleikamp 				*blks, new_blocks, &err);
676ac27a0ecSDave Kleikamp 	if (err)
677ac27a0ecSDave Kleikamp 		return err;
678ac27a0ecSDave Kleikamp 
679ac27a0ecSDave Kleikamp 	branch[0].key = cpu_to_le32(new_blocks[0]);
680ac27a0ecSDave Kleikamp 	/*
681ac27a0ecSDave Kleikamp 	 * metadata blocks and data blocks are allocated.
682ac27a0ecSDave Kleikamp 	 */
683ac27a0ecSDave Kleikamp 	for (n = 1; n <= indirect_blks;  n++) {
684ac27a0ecSDave Kleikamp 		/*
685ac27a0ecSDave Kleikamp 		 * Get buffer_head for parent block, zero it out
686ac27a0ecSDave Kleikamp 		 * and set the pointer to new one, then send
687ac27a0ecSDave Kleikamp 		 * parent to disk.
688ac27a0ecSDave Kleikamp 		 */
689ac27a0ecSDave Kleikamp 		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
690ac27a0ecSDave Kleikamp 		branch[n].bh = bh;
691ac27a0ecSDave Kleikamp 		lock_buffer(bh);
692ac27a0ecSDave Kleikamp 		BUFFER_TRACE(bh, "call get_create_access");
693617ba13bSMingming Cao 		err = ext4_journal_get_create_access(handle, bh);
694ac27a0ecSDave Kleikamp 		if (err) {
695ac27a0ecSDave Kleikamp 			unlock_buffer(bh);
696ac27a0ecSDave Kleikamp 			brelse(bh);
697ac27a0ecSDave Kleikamp 			goto failed;
698ac27a0ecSDave Kleikamp 		}
699ac27a0ecSDave Kleikamp 
700ac27a0ecSDave Kleikamp 		memset(bh->b_data, 0, blocksize);
701ac27a0ecSDave Kleikamp 		branch[n].p = (__le32 *) bh->b_data + offsets[n];
702ac27a0ecSDave Kleikamp 		branch[n].key = cpu_to_le32(new_blocks[n]);
703ac27a0ecSDave Kleikamp 		*branch[n].p = branch[n].key;
704ac27a0ecSDave Kleikamp 		if ( n == indirect_blks) {
705ac27a0ecSDave Kleikamp 			current_block = new_blocks[n];
706ac27a0ecSDave Kleikamp 			/*
707ac27a0ecSDave Kleikamp 			 * End of chain, update the last new metablock of
708ac27a0ecSDave Kleikamp 			 * the chain to point to the new allocated
709ac27a0ecSDave Kleikamp 			 * data blocks numbers
710ac27a0ecSDave Kleikamp 			 */
711ac27a0ecSDave Kleikamp 			for (i=1; i < num; i++)
712ac27a0ecSDave Kleikamp 				*(branch[n].p + i) = cpu_to_le32(++current_block);
713ac27a0ecSDave Kleikamp 		}
714ac27a0ecSDave Kleikamp 		BUFFER_TRACE(bh, "marking uptodate");
715ac27a0ecSDave Kleikamp 		set_buffer_uptodate(bh);
716ac27a0ecSDave Kleikamp 		unlock_buffer(bh);
717ac27a0ecSDave Kleikamp 
718617ba13bSMingming Cao 		BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
719617ba13bSMingming Cao 		err = ext4_journal_dirty_metadata(handle, bh);
720ac27a0ecSDave Kleikamp 		if (err)
721ac27a0ecSDave Kleikamp 			goto failed;
722ac27a0ecSDave Kleikamp 	}
723ac27a0ecSDave Kleikamp 	*blks = num;
724ac27a0ecSDave Kleikamp 	return err;
725ac27a0ecSDave Kleikamp failed:
726ac27a0ecSDave Kleikamp 	/* Allocation failed, free what we already allocated */
727ac27a0ecSDave Kleikamp 	for (i = 1; i <= n ; i++) {
728dab291afSMingming Cao 		BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget");
729617ba13bSMingming Cao 		ext4_journal_forget(handle, branch[i].bh);
730ac27a0ecSDave Kleikamp 	}
731ac27a0ecSDave Kleikamp 	for (i = 0; i <indirect_blks; i++)
732c9de560dSAlex Tomas 		ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
733ac27a0ecSDave Kleikamp 
734c9de560dSAlex Tomas 	ext4_free_blocks(handle, inode, new_blocks[i], num, 0);
735ac27a0ecSDave Kleikamp 
736ac27a0ecSDave Kleikamp 	return err;
737ac27a0ecSDave Kleikamp }
738ac27a0ecSDave Kleikamp 
739ac27a0ecSDave Kleikamp /**
740617ba13bSMingming Cao  * ext4_splice_branch - splice the allocated branch onto inode.
741ac27a0ecSDave Kleikamp  * @inode: owner
742ac27a0ecSDave Kleikamp  * @block: (logical) number of block we are adding
743ac27a0ecSDave Kleikamp  * @chain: chain of indirect blocks (with a missing link - see
744617ba13bSMingming Cao  *	ext4_alloc_branch)
745ac27a0ecSDave Kleikamp  * @where: location of missing link
746ac27a0ecSDave Kleikamp  * @num:   number of indirect blocks we are adding
747ac27a0ecSDave Kleikamp  * @blks:  number of direct blocks we are adding
748ac27a0ecSDave Kleikamp  *
749ac27a0ecSDave Kleikamp  * This function fills the missing link and does all housekeeping needed in
750ac27a0ecSDave Kleikamp  * inode (->i_blocks, etc.). In case of success we end up with the full
751ac27a0ecSDave Kleikamp  * chain to new block and return 0.
752ac27a0ecSDave Kleikamp  */
753617ba13bSMingming Cao static int ext4_splice_branch(handle_t *handle, struct inode *inode,
754725d26d3SAneesh Kumar K.V 			ext4_lblk_t block, Indirect *where, int num, int blks)
755ac27a0ecSDave Kleikamp {
756ac27a0ecSDave Kleikamp 	int i;
757ac27a0ecSDave Kleikamp 	int err = 0;
758617ba13bSMingming Cao 	struct ext4_block_alloc_info *block_i;
759617ba13bSMingming Cao 	ext4_fsblk_t current_block;
760ac27a0ecSDave Kleikamp 
761617ba13bSMingming Cao 	block_i = EXT4_I(inode)->i_block_alloc_info;
762ac27a0ecSDave Kleikamp 	/*
763ac27a0ecSDave Kleikamp 	 * If we're splicing into a [td]indirect block (as opposed to the
764ac27a0ecSDave Kleikamp 	 * inode) then we need to get write access to the [td]indirect block
765ac27a0ecSDave Kleikamp 	 * before the splice.
766ac27a0ecSDave Kleikamp 	 */
767ac27a0ecSDave Kleikamp 	if (where->bh) {
768ac27a0ecSDave Kleikamp 		BUFFER_TRACE(where->bh, "get_write_access");
769617ba13bSMingming Cao 		err = ext4_journal_get_write_access(handle, where->bh);
770ac27a0ecSDave Kleikamp 		if (err)
771ac27a0ecSDave Kleikamp 			goto err_out;
772ac27a0ecSDave Kleikamp 	}
773ac27a0ecSDave Kleikamp 	/* That's it */
774ac27a0ecSDave Kleikamp 
775ac27a0ecSDave Kleikamp 	*where->p = where->key;
776ac27a0ecSDave Kleikamp 
777ac27a0ecSDave Kleikamp 	/*
778ac27a0ecSDave Kleikamp 	 * Update the host buffer_head or inode to point to more just allocated
779ac27a0ecSDave Kleikamp 	 * direct blocks blocks
780ac27a0ecSDave Kleikamp 	 */
781ac27a0ecSDave Kleikamp 	if (num == 0 && blks > 1) {
782ac27a0ecSDave Kleikamp 		current_block = le32_to_cpu(where->key) + 1;
783ac27a0ecSDave Kleikamp 		for (i = 1; i < blks; i++)
784ac27a0ecSDave Kleikamp 			*(where->p + i ) = cpu_to_le32(current_block++);
785ac27a0ecSDave Kleikamp 	}
786ac27a0ecSDave Kleikamp 
787ac27a0ecSDave Kleikamp 	/*
788ac27a0ecSDave Kleikamp 	 * update the most recently allocated logical & physical block
789ac27a0ecSDave Kleikamp 	 * in i_block_alloc_info, to assist find the proper goal block for next
790ac27a0ecSDave Kleikamp 	 * allocation
791ac27a0ecSDave Kleikamp 	 */
792ac27a0ecSDave Kleikamp 	if (block_i) {
793ac27a0ecSDave Kleikamp 		block_i->last_alloc_logical_block = block + blks - 1;
794ac27a0ecSDave Kleikamp 		block_i->last_alloc_physical_block =
795ac27a0ecSDave Kleikamp 				le32_to_cpu(where[num].key) + blks - 1;
796ac27a0ecSDave Kleikamp 	}
797ac27a0ecSDave Kleikamp 
798ac27a0ecSDave Kleikamp 	/* We are done with atomic stuff, now do the rest of housekeeping */
799ac27a0ecSDave Kleikamp 
800ef7f3835SKalpak Shah 	inode->i_ctime = ext4_current_time(inode);
801617ba13bSMingming Cao 	ext4_mark_inode_dirty(handle, inode);
802ac27a0ecSDave Kleikamp 
803ac27a0ecSDave Kleikamp 	/* had we spliced it onto indirect block? */
804ac27a0ecSDave Kleikamp 	if (where->bh) {
805ac27a0ecSDave Kleikamp 		/*
806ac27a0ecSDave Kleikamp 		 * If we spliced it onto an indirect block, we haven't
807ac27a0ecSDave Kleikamp 		 * altered the inode.  Note however that if it is being spliced
808ac27a0ecSDave Kleikamp 		 * onto an indirect block at the very end of the file (the
809ac27a0ecSDave Kleikamp 		 * file is growing) then we *will* alter the inode to reflect
810ac27a0ecSDave Kleikamp 		 * the new i_size.  But that is not done here - it is done in
811617ba13bSMingming Cao 		 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
812ac27a0ecSDave Kleikamp 		 */
813ac27a0ecSDave Kleikamp 		jbd_debug(5, "splicing indirect only\n");
814617ba13bSMingming Cao 		BUFFER_TRACE(where->bh, "call ext4_journal_dirty_metadata");
815617ba13bSMingming Cao 		err = ext4_journal_dirty_metadata(handle, where->bh);
816ac27a0ecSDave Kleikamp 		if (err)
817ac27a0ecSDave Kleikamp 			goto err_out;
818ac27a0ecSDave Kleikamp 	} else {
819ac27a0ecSDave Kleikamp 		/*
820ac27a0ecSDave Kleikamp 		 * OK, we spliced it into the inode itself on a direct block.
821ac27a0ecSDave Kleikamp 		 * Inode was dirtied above.
822ac27a0ecSDave Kleikamp 		 */
823ac27a0ecSDave Kleikamp 		jbd_debug(5, "splicing direct\n");
824ac27a0ecSDave Kleikamp 	}
825ac27a0ecSDave Kleikamp 	return err;
826ac27a0ecSDave Kleikamp 
827ac27a0ecSDave Kleikamp err_out:
828ac27a0ecSDave Kleikamp 	for (i = 1; i <= num; i++) {
829dab291afSMingming Cao 		BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget");
830617ba13bSMingming Cao 		ext4_journal_forget(handle, where[i].bh);
831c9de560dSAlex Tomas 		ext4_free_blocks(handle, inode,
832c9de560dSAlex Tomas 					le32_to_cpu(where[i-1].key), 1, 0);
833ac27a0ecSDave Kleikamp 	}
834c9de560dSAlex Tomas 	ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0);
835ac27a0ecSDave Kleikamp 
836ac27a0ecSDave Kleikamp 	return err;
837ac27a0ecSDave Kleikamp }
838ac27a0ecSDave Kleikamp 
839ac27a0ecSDave Kleikamp /*
840ac27a0ecSDave Kleikamp  * Allocation strategy is simple: if we have to allocate something, we will
841ac27a0ecSDave Kleikamp  * have to go the whole way to leaf. So let's do it before attaching anything
842ac27a0ecSDave Kleikamp  * to tree, set linkage between the newborn blocks, write them if sync is
843ac27a0ecSDave Kleikamp  * required, recheck the path, free and repeat if check fails, otherwise
844ac27a0ecSDave Kleikamp  * set the last missing link (that will protect us from any truncate-generated
845ac27a0ecSDave Kleikamp  * removals - all blocks on the path are immune now) and possibly force the
846ac27a0ecSDave Kleikamp  * write on the parent block.
847ac27a0ecSDave Kleikamp  * That has a nice additional property: no special recovery from the failed
848ac27a0ecSDave Kleikamp  * allocations is needed - we simply release blocks and do not touch anything
849ac27a0ecSDave Kleikamp  * reachable from inode.
850ac27a0ecSDave Kleikamp  *
851ac27a0ecSDave Kleikamp  * `handle' can be NULL if create == 0.
852ac27a0ecSDave Kleikamp  *
853ac27a0ecSDave Kleikamp  * return > 0, # of blocks mapped or allocated.
854ac27a0ecSDave Kleikamp  * return = 0, if plain lookup failed.
855ac27a0ecSDave Kleikamp  * return < 0, error case.
856c278bfecSAneesh Kumar K.V  *
857c278bfecSAneesh Kumar K.V  *
858c278bfecSAneesh Kumar K.V  * Need to be called with
8590e855ac8SAneesh Kumar K.V  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
8600e855ac8SAneesh Kumar K.V  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
861ac27a0ecSDave Kleikamp  */
862617ba13bSMingming Cao int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
863725d26d3SAneesh Kumar K.V 		ext4_lblk_t iblock, unsigned long maxblocks,
864ac27a0ecSDave Kleikamp 		struct buffer_head *bh_result,
865ac27a0ecSDave Kleikamp 		int create, int extend_disksize)
866ac27a0ecSDave Kleikamp {
867ac27a0ecSDave Kleikamp 	int err = -EIO;
868725d26d3SAneesh Kumar K.V 	ext4_lblk_t offsets[4];
869ac27a0ecSDave Kleikamp 	Indirect chain[4];
870ac27a0ecSDave Kleikamp 	Indirect *partial;
871617ba13bSMingming Cao 	ext4_fsblk_t goal;
872ac27a0ecSDave Kleikamp 	int indirect_blks;
873ac27a0ecSDave Kleikamp 	int blocks_to_boundary = 0;
874ac27a0ecSDave Kleikamp 	int depth;
875617ba13bSMingming Cao 	struct ext4_inode_info *ei = EXT4_I(inode);
876ac27a0ecSDave Kleikamp 	int count = 0;
877617ba13bSMingming Cao 	ext4_fsblk_t first_block = 0;
87861628a3fSMingming Cao 	loff_t disksize;
879ac27a0ecSDave Kleikamp 
880ac27a0ecSDave Kleikamp 
881a86c6181SAlex Tomas 	J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
882ac27a0ecSDave Kleikamp 	J_ASSERT(handle != NULL || create == 0);
883725d26d3SAneesh Kumar K.V 	depth = ext4_block_to_path(inode, iblock, offsets,
884725d26d3SAneesh Kumar K.V 					&blocks_to_boundary);
885ac27a0ecSDave Kleikamp 
886ac27a0ecSDave Kleikamp 	if (depth == 0)
887ac27a0ecSDave Kleikamp 		goto out;
888ac27a0ecSDave Kleikamp 
889617ba13bSMingming Cao 	partial = ext4_get_branch(inode, depth, offsets, chain, &err);
890ac27a0ecSDave Kleikamp 
891ac27a0ecSDave Kleikamp 	/* Simplest case - block found, no allocation needed */
892ac27a0ecSDave Kleikamp 	if (!partial) {
893ac27a0ecSDave Kleikamp 		first_block = le32_to_cpu(chain[depth - 1].key);
894ac27a0ecSDave Kleikamp 		clear_buffer_new(bh_result);
895ac27a0ecSDave Kleikamp 		count++;
896ac27a0ecSDave Kleikamp 		/*map more blocks*/
897ac27a0ecSDave Kleikamp 		while (count < maxblocks && count <= blocks_to_boundary) {
898617ba13bSMingming Cao 			ext4_fsblk_t blk;
899ac27a0ecSDave Kleikamp 
900ac27a0ecSDave Kleikamp 			blk = le32_to_cpu(*(chain[depth-1].p + count));
901ac27a0ecSDave Kleikamp 
902ac27a0ecSDave Kleikamp 			if (blk == first_block + count)
903ac27a0ecSDave Kleikamp 				count++;
904ac27a0ecSDave Kleikamp 			else
905ac27a0ecSDave Kleikamp 				break;
906ac27a0ecSDave Kleikamp 		}
907ac27a0ecSDave Kleikamp 		goto got_it;
908ac27a0ecSDave Kleikamp 	}
909ac27a0ecSDave Kleikamp 
910ac27a0ecSDave Kleikamp 	/* Next simple case - plain lookup or failed read of indirect block */
911ac27a0ecSDave Kleikamp 	if (!create || err == -EIO)
912ac27a0ecSDave Kleikamp 		goto cleanup;
913ac27a0ecSDave Kleikamp 
914ac27a0ecSDave Kleikamp 	/*
915ac27a0ecSDave Kleikamp 	 * Okay, we need to do block allocation.  Lazily initialize the block
916ac27a0ecSDave Kleikamp 	 * allocation info here if necessary
917ac27a0ecSDave Kleikamp 	*/
918ac27a0ecSDave Kleikamp 	if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
919617ba13bSMingming Cao 		ext4_init_block_alloc_info(inode);
920ac27a0ecSDave Kleikamp 
921fb01bfdaSAkinobu Mita 	goal = ext4_find_goal(inode, iblock, partial);
922ac27a0ecSDave Kleikamp 
923ac27a0ecSDave Kleikamp 	/* the number of blocks need to allocate for [d,t]indirect blocks */
924ac27a0ecSDave Kleikamp 	indirect_blks = (chain + depth) - partial - 1;
925ac27a0ecSDave Kleikamp 
926ac27a0ecSDave Kleikamp 	/*
927ac27a0ecSDave Kleikamp 	 * Next look up the indirect map to count the totoal number of
928ac27a0ecSDave Kleikamp 	 * direct blocks to allocate for this branch.
929ac27a0ecSDave Kleikamp 	 */
930617ba13bSMingming Cao 	count = ext4_blks_to_allocate(partial, indirect_blks,
931ac27a0ecSDave Kleikamp 					maxblocks, blocks_to_boundary);
932ac27a0ecSDave Kleikamp 	/*
933617ba13bSMingming Cao 	 * Block out ext4_truncate while we alter the tree
934ac27a0ecSDave Kleikamp 	 */
9357061eba7SAneesh Kumar K.V 	err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
9367061eba7SAneesh Kumar K.V 					&count, goal,
937ac27a0ecSDave Kleikamp 					offsets + (partial - chain), partial);
938ac27a0ecSDave Kleikamp 
939ac27a0ecSDave Kleikamp 	/*
940617ba13bSMingming Cao 	 * The ext4_splice_branch call will free and forget any buffers
941ac27a0ecSDave Kleikamp 	 * on the new chain if there is a failure, but that risks using
942ac27a0ecSDave Kleikamp 	 * up transaction credits, especially for bitmaps where the
943ac27a0ecSDave Kleikamp 	 * credits cannot be returned.  Can we handle this somehow?  We
944ac27a0ecSDave Kleikamp 	 * may need to return -EAGAIN upwards in the worst case.  --sct
945ac27a0ecSDave Kleikamp 	 */
946ac27a0ecSDave Kleikamp 	if (!err)
947617ba13bSMingming Cao 		err = ext4_splice_branch(handle, inode, iblock,
948ac27a0ecSDave Kleikamp 					partial, indirect_blks, count);
949ac27a0ecSDave Kleikamp 	/*
9500e855ac8SAneesh Kumar K.V 	 * i_disksize growing is protected by i_data_sem.  Don't forget to
951ac27a0ecSDave Kleikamp 	 * protect it if you're about to implement concurrent
952617ba13bSMingming Cao 	 * ext4_get_block() -bzzz
953ac27a0ecSDave Kleikamp 	*/
95461628a3fSMingming Cao 	if (!err && extend_disksize) {
95561628a3fSMingming Cao 		disksize = ((loff_t) iblock + count) << inode->i_blkbits;
95661628a3fSMingming Cao 		if (disksize > i_size_read(inode))
95761628a3fSMingming Cao 			disksize = i_size_read(inode);
95861628a3fSMingming Cao 		if (disksize > ei->i_disksize)
95961628a3fSMingming Cao 			ei->i_disksize = disksize;
96061628a3fSMingming Cao 	}
961ac27a0ecSDave Kleikamp 	if (err)
962ac27a0ecSDave Kleikamp 		goto cleanup;
963ac27a0ecSDave Kleikamp 
964ac27a0ecSDave Kleikamp 	set_buffer_new(bh_result);
965ac27a0ecSDave Kleikamp got_it:
966ac27a0ecSDave Kleikamp 	map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
967ac27a0ecSDave Kleikamp 	if (count > blocks_to_boundary)
968ac27a0ecSDave Kleikamp 		set_buffer_boundary(bh_result);
969ac27a0ecSDave Kleikamp 	err = count;
970ac27a0ecSDave Kleikamp 	/* Clean up and exit */
971ac27a0ecSDave Kleikamp 	partial = chain + depth - 1;	/* the whole chain */
972ac27a0ecSDave Kleikamp cleanup:
973ac27a0ecSDave Kleikamp 	while (partial > chain) {
974ac27a0ecSDave Kleikamp 		BUFFER_TRACE(partial->bh, "call brelse");
975ac27a0ecSDave Kleikamp 		brelse(partial->bh);
976ac27a0ecSDave Kleikamp 		partial--;
977ac27a0ecSDave Kleikamp 	}
978ac27a0ecSDave Kleikamp 	BUFFER_TRACE(bh_result, "returned");
979ac27a0ecSDave Kleikamp out:
980ac27a0ecSDave Kleikamp 	return err;
981ac27a0ecSDave Kleikamp }
982ac27a0ecSDave Kleikamp 
9837fb5409dSJan Kara /* Maximum number of blocks we map for direct IO at once. */
9847fb5409dSJan Kara #define DIO_MAX_BLOCKS 4096
9857fb5409dSJan Kara /*
9867fb5409dSJan Kara  * Number of credits we need for writing DIO_MAX_BLOCKS:
9877fb5409dSJan Kara  * We need sb + group descriptor + bitmap + inode -> 4
9887fb5409dSJan Kara  * For B blocks with A block pointers per block we need:
9897fb5409dSJan Kara  * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
9907fb5409dSJan Kara  * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
9917fb5409dSJan Kara  */
9927fb5409dSJan Kara #define DIO_CREDITS 25
993ac27a0ecSDave Kleikamp 
994f5ab0d1fSMingming Cao 
995f5ab0d1fSMingming Cao /*
996f5ab0d1fSMingming Cao  *
997f5ab0d1fSMingming Cao  *
998f5ab0d1fSMingming Cao  * ext4_ext4 get_block() wrapper function
999f5ab0d1fSMingming Cao  * It will do a look up first, and returns if the blocks already mapped.
1000f5ab0d1fSMingming Cao  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
1001f5ab0d1fSMingming Cao  * and store the allocated blocks in the result buffer head and mark it
1002f5ab0d1fSMingming Cao  * mapped.
1003f5ab0d1fSMingming Cao  *
1004f5ab0d1fSMingming Cao  * If file type is extents based, it will call ext4_ext_get_blocks(),
1005f5ab0d1fSMingming Cao  * Otherwise, call with ext4_get_blocks_handle() to handle indirect mapping
1006f5ab0d1fSMingming Cao  * based files
1007f5ab0d1fSMingming Cao  *
1008f5ab0d1fSMingming Cao  * On success, it returns the number of blocks being mapped or allocate.
1009f5ab0d1fSMingming Cao  * if create==0 and the blocks are pre-allocated and uninitialized block,
1010f5ab0d1fSMingming Cao  * the result buffer head is unmapped. If the create ==1, it will make sure
1011f5ab0d1fSMingming Cao  * the buffer head is mapped.
1012f5ab0d1fSMingming Cao  *
1013f5ab0d1fSMingming Cao  * It returns 0 if plain look up failed (blocks have not been allocated), in
1014f5ab0d1fSMingming Cao  * that casem, buffer head is unmapped
1015f5ab0d1fSMingming Cao  *
1016f5ab0d1fSMingming Cao  * It returns the error in case of allocation failure.
1017f5ab0d1fSMingming Cao  */
10180e855ac8SAneesh Kumar K.V int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
10190e855ac8SAneesh Kumar K.V 			unsigned long max_blocks, struct buffer_head *bh,
1020d2a17637SMingming Cao 			int create, int extend_disksize, int flag)
10210e855ac8SAneesh Kumar K.V {
10220e855ac8SAneesh Kumar K.V 	int retval;
1023f5ab0d1fSMingming Cao 
1024f5ab0d1fSMingming Cao 	clear_buffer_mapped(bh);
1025f5ab0d1fSMingming Cao 
10264df3d265SAneesh Kumar K.V 	/*
10274df3d265SAneesh Kumar K.V 	 * Try to see if we can get  the block without requesting
10284df3d265SAneesh Kumar K.V 	 * for new file system block.
10294df3d265SAneesh Kumar K.V 	 */
10300e855ac8SAneesh Kumar K.V 	down_read((&EXT4_I(inode)->i_data_sem));
10314df3d265SAneesh Kumar K.V 	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
10324df3d265SAneesh Kumar K.V 		retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
10334df3d265SAneesh Kumar K.V 				bh, 0, 0);
10344df3d265SAneesh Kumar K.V 	} else {
10354df3d265SAneesh Kumar K.V 		retval = ext4_get_blocks_handle(handle,
10364df3d265SAneesh Kumar K.V 				inode, block, max_blocks, bh, 0, 0);
10370e855ac8SAneesh Kumar K.V 	}
10384df3d265SAneesh Kumar K.V 	up_read((&EXT4_I(inode)->i_data_sem));
1039f5ab0d1fSMingming Cao 
1040f5ab0d1fSMingming Cao 	/* If it is only a block(s) look up */
1041f5ab0d1fSMingming Cao 	if (!create)
10424df3d265SAneesh Kumar K.V 		return retval;
10434df3d265SAneesh Kumar K.V 
10444df3d265SAneesh Kumar K.V 	/*
1045f5ab0d1fSMingming Cao 	 * Returns if the blocks have already allocated
1046f5ab0d1fSMingming Cao 	 *
1047f5ab0d1fSMingming Cao 	 * Note that if blocks have been preallocated
1048f5ab0d1fSMingming Cao 	 * ext4_ext_get_block() returns th create = 0
1049f5ab0d1fSMingming Cao 	 * with buffer head unmapped.
1050f5ab0d1fSMingming Cao 	 */
1051f5ab0d1fSMingming Cao 	if (retval > 0 && buffer_mapped(bh))
1052f5ab0d1fSMingming Cao 		return retval;
1053f5ab0d1fSMingming Cao 
1054f5ab0d1fSMingming Cao 	/*
1055f5ab0d1fSMingming Cao 	 * New blocks allocate and/or writing to uninitialized extent
1056f5ab0d1fSMingming Cao 	 * will possibly result in updating i_data, so we take
1057f5ab0d1fSMingming Cao 	 * the write lock of i_data_sem, and call get_blocks()
1058f5ab0d1fSMingming Cao 	 * with create == 1 flag.
10594df3d265SAneesh Kumar K.V 	 */
10604df3d265SAneesh Kumar K.V 	down_write((&EXT4_I(inode)->i_data_sem));
1061d2a17637SMingming Cao 
1062d2a17637SMingming Cao 	/*
1063d2a17637SMingming Cao 	 * if the caller is from delayed allocation writeout path
1064d2a17637SMingming Cao 	 * we have already reserved fs blocks for allocation
1065d2a17637SMingming Cao 	 * let the underlying get_block() function know to
1066d2a17637SMingming Cao 	 * avoid double accounting
1067d2a17637SMingming Cao 	 */
1068d2a17637SMingming Cao 	if (flag)
1069d2a17637SMingming Cao 		EXT4_I(inode)->i_delalloc_reserved_flag = 1;
10704df3d265SAneesh Kumar K.V 	/*
10714df3d265SAneesh Kumar K.V 	 * We need to check for EXT4 here because migrate
10724df3d265SAneesh Kumar K.V 	 * could have changed the inode type in between
10734df3d265SAneesh Kumar K.V 	 */
10740e855ac8SAneesh Kumar K.V 	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
10750e855ac8SAneesh Kumar K.V 		retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
10760e855ac8SAneesh Kumar K.V 				bh, create, extend_disksize);
10770e855ac8SAneesh Kumar K.V 	} else {
10780e855ac8SAneesh Kumar K.V 		retval = ext4_get_blocks_handle(handle, inode, block,
10790e855ac8SAneesh Kumar K.V 				max_blocks, bh, create, extend_disksize);
1080267e4db9SAneesh Kumar K.V 
1081267e4db9SAneesh Kumar K.V 		if (retval > 0 && buffer_new(bh)) {
1082267e4db9SAneesh Kumar K.V 			/*
1083267e4db9SAneesh Kumar K.V 			 * We allocated new blocks which will result in
1084267e4db9SAneesh Kumar K.V 			 * i_data's format changing.  Force the migrate
1085267e4db9SAneesh Kumar K.V 			 * to fail by clearing migrate flags
1086267e4db9SAneesh Kumar K.V 			 */
1087267e4db9SAneesh Kumar K.V 			EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags &
1088267e4db9SAneesh Kumar K.V 							~EXT4_EXT_MIGRATE;
1089267e4db9SAneesh Kumar K.V 		}
10900e855ac8SAneesh Kumar K.V 	}
1091d2a17637SMingming Cao 
1092d2a17637SMingming Cao 	if (flag) {
1093d2a17637SMingming Cao 		EXT4_I(inode)->i_delalloc_reserved_flag = 0;
1094d2a17637SMingming Cao 		/*
1095d2a17637SMingming Cao 		 * Update reserved blocks/metadata blocks
1096d2a17637SMingming Cao 		 * after successful block allocation
1097d2a17637SMingming Cao 		 * which were deferred till now
1098d2a17637SMingming Cao 		 */
1099d2a17637SMingming Cao 		if ((retval > 0) && buffer_delay(bh))
1100d2a17637SMingming Cao 			ext4_da_release_space(inode, retval, 0);
1101d2a17637SMingming Cao 	}
1102d2a17637SMingming Cao 
11030e855ac8SAneesh Kumar K.V 	up_write((&EXT4_I(inode)->i_data_sem));
11040e855ac8SAneesh Kumar K.V 	return retval;
11050e855ac8SAneesh Kumar K.V }
11060e855ac8SAneesh Kumar K.V 
1107617ba13bSMingming Cao static int ext4_get_block(struct inode *inode, sector_t iblock,
1108ac27a0ecSDave Kleikamp 			struct buffer_head *bh_result, int create)
1109ac27a0ecSDave Kleikamp {
11103e4fdaf8SDmitriy Monakhov 	handle_t *handle = ext4_journal_current_handle();
11117fb5409dSJan Kara 	int ret = 0, started = 0;
1112ac27a0ecSDave Kleikamp 	unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
1113ac27a0ecSDave Kleikamp 
11147fb5409dSJan Kara 	if (create && !handle) {
11157fb5409dSJan Kara 		/* Direct IO write... */
11167fb5409dSJan Kara 		if (max_blocks > DIO_MAX_BLOCKS)
11177fb5409dSJan Kara 			max_blocks = DIO_MAX_BLOCKS;
11187fb5409dSJan Kara 		handle = ext4_journal_start(inode, DIO_CREDITS +
11197fb5409dSJan Kara 			      2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb));
11207fb5409dSJan Kara 		if (IS_ERR(handle)) {
1121ac27a0ecSDave Kleikamp 			ret = PTR_ERR(handle);
11227fb5409dSJan Kara 			goto out;
11237fb5409dSJan Kara 		}
11247fb5409dSJan Kara 		started = 1;
1125ac27a0ecSDave Kleikamp 	}
1126ac27a0ecSDave Kleikamp 
1127a86c6181SAlex Tomas 	ret = ext4_get_blocks_wrap(handle, inode, iblock,
1128d2a17637SMingming Cao 					max_blocks, bh_result, create, 0, 0);
1129ac27a0ecSDave Kleikamp 	if (ret > 0) {
1130ac27a0ecSDave Kleikamp 		bh_result->b_size = (ret << inode->i_blkbits);
1131ac27a0ecSDave Kleikamp 		ret = 0;
1132ac27a0ecSDave Kleikamp 	}
11337fb5409dSJan Kara 	if (started)
11347fb5409dSJan Kara 		ext4_journal_stop(handle);
11357fb5409dSJan Kara out:
1136ac27a0ecSDave Kleikamp 	return ret;
1137ac27a0ecSDave Kleikamp }
1138ac27a0ecSDave Kleikamp 
1139ac27a0ecSDave Kleikamp /*
1140ac27a0ecSDave Kleikamp  * `handle' can be NULL if create is zero
1141ac27a0ecSDave Kleikamp  */
1142617ba13bSMingming Cao struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1143725d26d3SAneesh Kumar K.V 				ext4_lblk_t block, int create, int *errp)
1144ac27a0ecSDave Kleikamp {
1145ac27a0ecSDave Kleikamp 	struct buffer_head dummy;
1146ac27a0ecSDave Kleikamp 	int fatal = 0, err;
1147ac27a0ecSDave Kleikamp 
1148ac27a0ecSDave Kleikamp 	J_ASSERT(handle != NULL || create == 0);
1149ac27a0ecSDave Kleikamp 
1150ac27a0ecSDave Kleikamp 	dummy.b_state = 0;
1151ac27a0ecSDave Kleikamp 	dummy.b_blocknr = -1000;
1152ac27a0ecSDave Kleikamp 	buffer_trace_init(&dummy.b_history);
1153a86c6181SAlex Tomas 	err = ext4_get_blocks_wrap(handle, inode, block, 1,
1154d2a17637SMingming Cao 					&dummy, create, 1, 0);
1155ac27a0ecSDave Kleikamp 	/*
1156617ba13bSMingming Cao 	 * ext4_get_blocks_handle() returns number of blocks
1157ac27a0ecSDave Kleikamp 	 * mapped. 0 in case of a HOLE.
1158ac27a0ecSDave Kleikamp 	 */
1159ac27a0ecSDave Kleikamp 	if (err > 0) {
1160ac27a0ecSDave Kleikamp 		if (err > 1)
1161ac27a0ecSDave Kleikamp 			WARN_ON(1);
1162ac27a0ecSDave Kleikamp 		err = 0;
1163ac27a0ecSDave Kleikamp 	}
1164ac27a0ecSDave Kleikamp 	*errp = err;
1165ac27a0ecSDave Kleikamp 	if (!err && buffer_mapped(&dummy)) {
1166ac27a0ecSDave Kleikamp 		struct buffer_head *bh;
1167ac27a0ecSDave Kleikamp 		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1168ac27a0ecSDave Kleikamp 		if (!bh) {
1169ac27a0ecSDave Kleikamp 			*errp = -EIO;
1170ac27a0ecSDave Kleikamp 			goto err;
1171ac27a0ecSDave Kleikamp 		}
1172ac27a0ecSDave Kleikamp 		if (buffer_new(&dummy)) {
1173ac27a0ecSDave Kleikamp 			J_ASSERT(create != 0);
1174ac39849dSAneesh Kumar K.V 			J_ASSERT(handle != NULL);
1175ac27a0ecSDave Kleikamp 
1176ac27a0ecSDave Kleikamp 			/*
1177ac27a0ecSDave Kleikamp 			 * Now that we do not always journal data, we should
1178ac27a0ecSDave Kleikamp 			 * keep in mind whether this should always journal the
1179ac27a0ecSDave Kleikamp 			 * new buffer as metadata.  For now, regular file
1180617ba13bSMingming Cao 			 * writes use ext4_get_block instead, so it's not a
1181ac27a0ecSDave Kleikamp 			 * problem.
1182ac27a0ecSDave Kleikamp 			 */
1183ac27a0ecSDave Kleikamp 			lock_buffer(bh);
1184ac27a0ecSDave Kleikamp 			BUFFER_TRACE(bh, "call get_create_access");
1185617ba13bSMingming Cao 			fatal = ext4_journal_get_create_access(handle, bh);
1186ac27a0ecSDave Kleikamp 			if (!fatal && !buffer_uptodate(bh)) {
1187ac27a0ecSDave Kleikamp 				memset(bh->b_data,0,inode->i_sb->s_blocksize);
1188ac27a0ecSDave Kleikamp 				set_buffer_uptodate(bh);
1189ac27a0ecSDave Kleikamp 			}
1190ac27a0ecSDave Kleikamp 			unlock_buffer(bh);
1191617ba13bSMingming Cao 			BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
1192617ba13bSMingming Cao 			err = ext4_journal_dirty_metadata(handle, bh);
1193ac27a0ecSDave Kleikamp 			if (!fatal)
1194ac27a0ecSDave Kleikamp 				fatal = err;
1195ac27a0ecSDave Kleikamp 		} else {
1196ac27a0ecSDave Kleikamp 			BUFFER_TRACE(bh, "not a new buffer");
1197ac27a0ecSDave Kleikamp 		}
1198ac27a0ecSDave Kleikamp 		if (fatal) {
1199ac27a0ecSDave Kleikamp 			*errp = fatal;
1200ac27a0ecSDave Kleikamp 			brelse(bh);
1201ac27a0ecSDave Kleikamp 			bh = NULL;
1202ac27a0ecSDave Kleikamp 		}
1203ac27a0ecSDave Kleikamp 		return bh;
1204ac27a0ecSDave Kleikamp 	}
1205ac27a0ecSDave Kleikamp err:
1206ac27a0ecSDave Kleikamp 	return NULL;
1207ac27a0ecSDave Kleikamp }
1208ac27a0ecSDave Kleikamp 
1209617ba13bSMingming Cao struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1210725d26d3SAneesh Kumar K.V 			       ext4_lblk_t block, int create, int *err)
1211ac27a0ecSDave Kleikamp {
1212ac27a0ecSDave Kleikamp 	struct buffer_head * bh;
1213ac27a0ecSDave Kleikamp 
1214617ba13bSMingming Cao 	bh = ext4_getblk(handle, inode, block, create, err);
1215ac27a0ecSDave Kleikamp 	if (!bh)
1216ac27a0ecSDave Kleikamp 		return bh;
1217ac27a0ecSDave Kleikamp 	if (buffer_uptodate(bh))
1218ac27a0ecSDave Kleikamp 		return bh;
1219ac27a0ecSDave Kleikamp 	ll_rw_block(READ_META, 1, &bh);
1220ac27a0ecSDave Kleikamp 	wait_on_buffer(bh);
1221ac27a0ecSDave Kleikamp 	if (buffer_uptodate(bh))
1222ac27a0ecSDave Kleikamp 		return bh;
1223ac27a0ecSDave Kleikamp 	put_bh(bh);
1224ac27a0ecSDave Kleikamp 	*err = -EIO;
1225ac27a0ecSDave Kleikamp 	return NULL;
1226ac27a0ecSDave Kleikamp }
1227ac27a0ecSDave Kleikamp 
1228ac27a0ecSDave Kleikamp static int walk_page_buffers(	handle_t *handle,
1229ac27a0ecSDave Kleikamp 				struct buffer_head *head,
1230ac27a0ecSDave Kleikamp 				unsigned from,
1231ac27a0ecSDave Kleikamp 				unsigned to,
1232ac27a0ecSDave Kleikamp 				int *partial,
1233ac27a0ecSDave Kleikamp 				int (*fn)(	handle_t *handle,
1234ac27a0ecSDave Kleikamp 						struct buffer_head *bh))
1235ac27a0ecSDave Kleikamp {
1236ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
1237ac27a0ecSDave Kleikamp 	unsigned block_start, block_end;
1238ac27a0ecSDave Kleikamp 	unsigned blocksize = head->b_size;
1239ac27a0ecSDave Kleikamp 	int err, ret = 0;
1240ac27a0ecSDave Kleikamp 	struct buffer_head *next;
1241ac27a0ecSDave Kleikamp 
1242ac27a0ecSDave Kleikamp 	for (	bh = head, block_start = 0;
1243ac27a0ecSDave Kleikamp 		ret == 0 && (bh != head || !block_start);
1244ac27a0ecSDave Kleikamp 		block_start = block_end, bh = next)
1245ac27a0ecSDave Kleikamp 	{
1246ac27a0ecSDave Kleikamp 		next = bh->b_this_page;
1247ac27a0ecSDave Kleikamp 		block_end = block_start + blocksize;
1248ac27a0ecSDave Kleikamp 		if (block_end <= from || block_start >= to) {
1249ac27a0ecSDave Kleikamp 			if (partial && !buffer_uptodate(bh))
1250ac27a0ecSDave Kleikamp 				*partial = 1;
1251ac27a0ecSDave Kleikamp 			continue;
1252ac27a0ecSDave Kleikamp 		}
1253ac27a0ecSDave Kleikamp 		err = (*fn)(handle, bh);
1254ac27a0ecSDave Kleikamp 		if (!ret)
1255ac27a0ecSDave Kleikamp 			ret = err;
1256ac27a0ecSDave Kleikamp 	}
1257ac27a0ecSDave Kleikamp 	return ret;
1258ac27a0ecSDave Kleikamp }
1259ac27a0ecSDave Kleikamp 
1260ac27a0ecSDave Kleikamp /*
1261ac27a0ecSDave Kleikamp  * To preserve ordering, it is essential that the hole instantiation and
1262ac27a0ecSDave Kleikamp  * the data write be encapsulated in a single transaction.  We cannot
1263617ba13bSMingming Cao  * close off a transaction and start a new one between the ext4_get_block()
1264dab291afSMingming Cao  * and the commit_write().  So doing the jbd2_journal_start at the start of
1265ac27a0ecSDave Kleikamp  * prepare_write() is the right place.
1266ac27a0ecSDave Kleikamp  *
1267617ba13bSMingming Cao  * Also, this function can nest inside ext4_writepage() ->
1268617ba13bSMingming Cao  * block_write_full_page(). In that case, we *know* that ext4_writepage()
1269ac27a0ecSDave Kleikamp  * has generated enough buffer credits to do the whole page.  So we won't
1270ac27a0ecSDave Kleikamp  * block on the journal in that case, which is good, because the caller may
1271ac27a0ecSDave Kleikamp  * be PF_MEMALLOC.
1272ac27a0ecSDave Kleikamp  *
1273617ba13bSMingming Cao  * By accident, ext4 can be reentered when a transaction is open via
1274ac27a0ecSDave Kleikamp  * quota file writes.  If we were to commit the transaction while thus
1275ac27a0ecSDave Kleikamp  * reentered, there can be a deadlock - we would be holding a quota
1276ac27a0ecSDave Kleikamp  * lock, and the commit would never complete if another thread had a
1277ac27a0ecSDave Kleikamp  * transaction open and was blocking on the quota lock - a ranking
1278ac27a0ecSDave Kleikamp  * violation.
1279ac27a0ecSDave Kleikamp  *
1280dab291afSMingming Cao  * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1281ac27a0ecSDave Kleikamp  * will _not_ run commit under these circumstances because handle->h_ref
1282ac27a0ecSDave Kleikamp  * is elevated.  We'll still have enough credits for the tiny quotafile
1283ac27a0ecSDave Kleikamp  * write.
1284ac27a0ecSDave Kleikamp  */
1285ac27a0ecSDave Kleikamp static int do_journal_get_write_access(handle_t *handle,
1286ac27a0ecSDave Kleikamp 					struct buffer_head *bh)
1287ac27a0ecSDave Kleikamp {
1288ac27a0ecSDave Kleikamp 	if (!buffer_mapped(bh) || buffer_freed(bh))
1289ac27a0ecSDave Kleikamp 		return 0;
1290617ba13bSMingming Cao 	return ext4_journal_get_write_access(handle, bh);
1291ac27a0ecSDave Kleikamp }
1292ac27a0ecSDave Kleikamp 
1293bfc1af65SNick Piggin static int ext4_write_begin(struct file *file, struct address_space *mapping,
1294bfc1af65SNick Piggin 				loff_t pos, unsigned len, unsigned flags,
1295bfc1af65SNick Piggin 				struct page **pagep, void **fsdata)
1296ac27a0ecSDave Kleikamp {
1297bfc1af65SNick Piggin  	struct inode *inode = mapping->host;
12987479d2b9SAndrew Morton 	int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
1299ac27a0ecSDave Kleikamp 	handle_t *handle;
1300ac27a0ecSDave Kleikamp 	int retries = 0;
1301bfc1af65SNick Piggin  	struct page *page;
1302bfc1af65SNick Piggin  	pgoff_t index;
1303bfc1af65SNick Piggin  	unsigned from, to;
1304bfc1af65SNick Piggin 
1305bfc1af65SNick Piggin  	index = pos >> PAGE_CACHE_SHIFT;
1306bfc1af65SNick Piggin  	from = pos & (PAGE_CACHE_SIZE - 1);
1307bfc1af65SNick Piggin  	to = from + len;
1308ac27a0ecSDave Kleikamp 
1309ac27a0ecSDave Kleikamp retry:
1310617ba13bSMingming Cao   	handle = ext4_journal_start(inode, needed_blocks);
13117479d2b9SAndrew Morton   	if (IS_ERR(handle)) {
13127479d2b9SAndrew Morton   		ret = PTR_ERR(handle);
13137479d2b9SAndrew Morton   		goto out;
13147479d2b9SAndrew Morton 	}
1315ac27a0ecSDave Kleikamp 
1316cf108bcaSJan Kara 	page = __grab_cache_page(mapping, index);
1317cf108bcaSJan Kara 	if (!page) {
1318cf108bcaSJan Kara 		ext4_journal_stop(handle);
1319cf108bcaSJan Kara 		ret = -ENOMEM;
1320cf108bcaSJan Kara 		goto out;
1321cf108bcaSJan Kara 	}
1322cf108bcaSJan Kara 	*pagep = page;
1323cf108bcaSJan Kara 
1324bfc1af65SNick Piggin 	ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1325bfc1af65SNick Piggin 							ext4_get_block);
1326bfc1af65SNick Piggin 
1327bfc1af65SNick Piggin 	if (!ret && ext4_should_journal_data(inode)) {
1328ac27a0ecSDave Kleikamp 		ret = walk_page_buffers(handle, page_buffers(page),
1329ac27a0ecSDave Kleikamp 				from, to, NULL, do_journal_get_write_access);
1330b46be050SAndrey Savochkin 	}
1331bfc1af65SNick Piggin 
1332bfc1af65SNick Piggin 	if (ret) {
1333bfc1af65SNick Piggin  		unlock_page(page);
1334cf108bcaSJan Kara 		ext4_journal_stop(handle);
1335bfc1af65SNick Piggin  		page_cache_release(page);
1336bfc1af65SNick Piggin 	}
1337bfc1af65SNick Piggin 
1338617ba13bSMingming Cao 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1339ac27a0ecSDave Kleikamp 		goto retry;
13407479d2b9SAndrew Morton out:
1341ac27a0ecSDave Kleikamp 	return ret;
1342ac27a0ecSDave Kleikamp }
1343ac27a0ecSDave Kleikamp 
1344bfc1af65SNick Piggin /* For write_end() in data=journal mode */
1345bfc1af65SNick Piggin static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1346ac27a0ecSDave Kleikamp {
1347ac27a0ecSDave Kleikamp 	if (!buffer_mapped(bh) || buffer_freed(bh))
1348ac27a0ecSDave Kleikamp 		return 0;
1349ac27a0ecSDave Kleikamp 	set_buffer_uptodate(bh);
1350617ba13bSMingming Cao 	return ext4_journal_dirty_metadata(handle, bh);
1351ac27a0ecSDave Kleikamp }
1352ac27a0ecSDave Kleikamp 
1353ac27a0ecSDave Kleikamp /*
1354ac27a0ecSDave Kleikamp  * We need to pick up the new inode size which generic_commit_write gave us
1355ac27a0ecSDave Kleikamp  * `file' can be NULL - eg, when called from page_symlink().
1356ac27a0ecSDave Kleikamp  *
1357617ba13bSMingming Cao  * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1358ac27a0ecSDave Kleikamp  * buffers are managed internally.
1359ac27a0ecSDave Kleikamp  */
1360bfc1af65SNick Piggin static int ext4_ordered_write_end(struct file *file,
1361bfc1af65SNick Piggin 				struct address_space *mapping,
1362bfc1af65SNick Piggin 				loff_t pos, unsigned len, unsigned copied,
1363bfc1af65SNick Piggin 				struct page *page, void *fsdata)
1364ac27a0ecSDave Kleikamp {
1365617ba13bSMingming Cao 	handle_t *handle = ext4_journal_current_handle();
1366cf108bcaSJan Kara 	struct inode *inode = mapping->host;
1367bfc1af65SNick Piggin 	unsigned from, to;
1368ac27a0ecSDave Kleikamp 	int ret = 0, ret2;
1369ac27a0ecSDave Kleikamp 
1370bfc1af65SNick Piggin 	from = pos & (PAGE_CACHE_SIZE - 1);
1371bfc1af65SNick Piggin 	to = from + len;
1372bfc1af65SNick Piggin 
1373678aaf48SJan Kara 	ret = ext4_jbd2_file_inode(handle, inode);
1374ac27a0ecSDave Kleikamp 
1375ac27a0ecSDave Kleikamp 	if (ret == 0) {
1376ac27a0ecSDave Kleikamp 		/*
1377bfc1af65SNick Piggin 		 * generic_write_end() will run mark_inode_dirty() if i_size
1378ac27a0ecSDave Kleikamp 		 * changes.  So let's piggyback the i_disksize mark_inode_dirty
1379ac27a0ecSDave Kleikamp 		 * into that.
1380ac27a0ecSDave Kleikamp 		 */
1381ac27a0ecSDave Kleikamp 		loff_t new_i_size;
1382ac27a0ecSDave Kleikamp 
1383bfc1af65SNick Piggin 		new_i_size = pos + copied;
1384617ba13bSMingming Cao 		if (new_i_size > EXT4_I(inode)->i_disksize)
1385617ba13bSMingming Cao 			EXT4_I(inode)->i_disksize = new_i_size;
1386cf108bcaSJan Kara 		ret2 = generic_write_end(file, mapping, pos, len, copied,
1387bfc1af65SNick Piggin 							page, fsdata);
1388f8a87d89SRoel Kluin 		copied = ret2;
1389f8a87d89SRoel Kluin 		if (ret2 < 0)
1390f8a87d89SRoel Kluin 			ret = ret2;
1391ac27a0ecSDave Kleikamp 	}
1392617ba13bSMingming Cao 	ret2 = ext4_journal_stop(handle);
1393ac27a0ecSDave Kleikamp 	if (!ret)
1394ac27a0ecSDave Kleikamp 		ret = ret2;
1395bfc1af65SNick Piggin 
1396bfc1af65SNick Piggin 	return ret ? ret : copied;
1397ac27a0ecSDave Kleikamp }
1398ac27a0ecSDave Kleikamp 
1399bfc1af65SNick Piggin static int ext4_writeback_write_end(struct file *file,
1400bfc1af65SNick Piggin 				struct address_space *mapping,
1401bfc1af65SNick Piggin 				loff_t pos, unsigned len, unsigned copied,
1402bfc1af65SNick Piggin 				struct page *page, void *fsdata)
1403ac27a0ecSDave Kleikamp {
1404617ba13bSMingming Cao 	handle_t *handle = ext4_journal_current_handle();
1405cf108bcaSJan Kara 	struct inode *inode = mapping->host;
1406ac27a0ecSDave Kleikamp 	int ret = 0, ret2;
1407ac27a0ecSDave Kleikamp 	loff_t new_i_size;
1408ac27a0ecSDave Kleikamp 
1409bfc1af65SNick Piggin 	new_i_size = pos + copied;
1410617ba13bSMingming Cao 	if (new_i_size > EXT4_I(inode)->i_disksize)
1411617ba13bSMingming Cao 		EXT4_I(inode)->i_disksize = new_i_size;
1412ac27a0ecSDave Kleikamp 
1413cf108bcaSJan Kara 	ret2 = generic_write_end(file, mapping, pos, len, copied,
1414bfc1af65SNick Piggin 							page, fsdata);
1415f8a87d89SRoel Kluin 	copied = ret2;
1416f8a87d89SRoel Kluin 	if (ret2 < 0)
1417f8a87d89SRoel Kluin 		ret = ret2;
1418ac27a0ecSDave Kleikamp 
1419617ba13bSMingming Cao 	ret2 = ext4_journal_stop(handle);
1420ac27a0ecSDave Kleikamp 	if (!ret)
1421ac27a0ecSDave Kleikamp 		ret = ret2;
1422bfc1af65SNick Piggin 
1423bfc1af65SNick Piggin 	return ret ? ret : copied;
1424ac27a0ecSDave Kleikamp }
1425ac27a0ecSDave Kleikamp 
1426bfc1af65SNick Piggin static int ext4_journalled_write_end(struct file *file,
1427bfc1af65SNick Piggin 				struct address_space *mapping,
1428bfc1af65SNick Piggin 				loff_t pos, unsigned len, unsigned copied,
1429bfc1af65SNick Piggin 				struct page *page, void *fsdata)
1430ac27a0ecSDave Kleikamp {
1431617ba13bSMingming Cao 	handle_t *handle = ext4_journal_current_handle();
1432bfc1af65SNick Piggin 	struct inode *inode = mapping->host;
1433ac27a0ecSDave Kleikamp 	int ret = 0, ret2;
1434ac27a0ecSDave Kleikamp 	int partial = 0;
1435bfc1af65SNick Piggin 	unsigned from, to;
1436ac27a0ecSDave Kleikamp 
1437bfc1af65SNick Piggin 	from = pos & (PAGE_CACHE_SIZE - 1);
1438bfc1af65SNick Piggin 	to = from + len;
1439bfc1af65SNick Piggin 
1440bfc1af65SNick Piggin 	if (copied < len) {
1441bfc1af65SNick Piggin 		if (!PageUptodate(page))
1442bfc1af65SNick Piggin 			copied = 0;
1443bfc1af65SNick Piggin 		page_zero_new_buffers(page, from+copied, to);
1444bfc1af65SNick Piggin 	}
1445ac27a0ecSDave Kleikamp 
1446ac27a0ecSDave Kleikamp 	ret = walk_page_buffers(handle, page_buffers(page), from,
1447bfc1af65SNick Piggin 				to, &partial, write_end_fn);
1448ac27a0ecSDave Kleikamp 	if (!partial)
1449ac27a0ecSDave Kleikamp 		SetPageUptodate(page);
1450bfc1af65SNick Piggin 	if (pos+copied > inode->i_size)
1451bfc1af65SNick Piggin 		i_size_write(inode, pos+copied);
1452617ba13bSMingming Cao 	EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1453617ba13bSMingming Cao 	if (inode->i_size > EXT4_I(inode)->i_disksize) {
1454617ba13bSMingming Cao 		EXT4_I(inode)->i_disksize = inode->i_size;
1455617ba13bSMingming Cao 		ret2 = ext4_mark_inode_dirty(handle, inode);
1456ac27a0ecSDave Kleikamp 		if (!ret)
1457ac27a0ecSDave Kleikamp 			ret = ret2;
1458ac27a0ecSDave Kleikamp 	}
1459bfc1af65SNick Piggin 
1460cf108bcaSJan Kara 	unlock_page(page);
1461617ba13bSMingming Cao 	ret2 = ext4_journal_stop(handle);
1462ac27a0ecSDave Kleikamp 	if (!ret)
1463ac27a0ecSDave Kleikamp 		ret = ret2;
1464bfc1af65SNick Piggin 	page_cache_release(page);
1465bfc1af65SNick Piggin 
1466bfc1af65SNick Piggin 	return ret ? ret : copied;
1467ac27a0ecSDave Kleikamp }
1468d2a17637SMingming Cao /*
1469d2a17637SMingming Cao  * Calculate the number of metadata blocks need to reserve
1470d2a17637SMingming Cao  * to allocate @blocks for non extent file based file
1471d2a17637SMingming Cao  */
1472d2a17637SMingming Cao static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks)
1473d2a17637SMingming Cao {
1474d2a17637SMingming Cao 	int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1475d2a17637SMingming Cao 	int ind_blks, dind_blks, tind_blks;
1476d2a17637SMingming Cao 
1477d2a17637SMingming Cao 	/* number of new indirect blocks needed */
1478d2a17637SMingming Cao 	ind_blks = (blocks + icap - 1) / icap;
1479d2a17637SMingming Cao 
1480d2a17637SMingming Cao 	dind_blks = (ind_blks + icap - 1) / icap;
1481d2a17637SMingming Cao 
1482d2a17637SMingming Cao 	tind_blks = 1;
1483d2a17637SMingming Cao 
1484d2a17637SMingming Cao 	return ind_blks + dind_blks + tind_blks;
1485d2a17637SMingming Cao }
1486d2a17637SMingming Cao 
1487d2a17637SMingming Cao /*
1488d2a17637SMingming Cao  * Calculate the number of metadata blocks need to reserve
1489d2a17637SMingming Cao  * to allocate given number of blocks
1490d2a17637SMingming Cao  */
1491d2a17637SMingming Cao static int ext4_calc_metadata_amount(struct inode *inode, int blocks)
1492d2a17637SMingming Cao {
1493d2a17637SMingming Cao 	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
1494d2a17637SMingming Cao 		return ext4_ext_calc_metadata_amount(inode, blocks);
1495d2a17637SMingming Cao 
1496d2a17637SMingming Cao 	return ext4_indirect_calc_metadata_amount(inode, blocks);
1497d2a17637SMingming Cao }
1498d2a17637SMingming Cao 
1499d2a17637SMingming Cao static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
1500d2a17637SMingming Cao {
1501d2a17637SMingming Cao        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1502d2a17637SMingming Cao        unsigned long md_needed, mdblocks, total = 0;
1503d2a17637SMingming Cao 
1504d2a17637SMingming Cao 	/*
1505d2a17637SMingming Cao 	 * recalculate the amount of metadata blocks to reserve
1506d2a17637SMingming Cao 	 * in order to allocate nrblocks
1507d2a17637SMingming Cao 	 * worse case is one extent per block
1508d2a17637SMingming Cao 	 */
1509d2a17637SMingming Cao 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1510d2a17637SMingming Cao 	total = EXT4_I(inode)->i_reserved_data_blocks + nrblocks;
1511d2a17637SMingming Cao 	mdblocks = ext4_calc_metadata_amount(inode, total);
1512d2a17637SMingming Cao 	BUG_ON(mdblocks < EXT4_I(inode)->i_reserved_meta_blocks);
1513d2a17637SMingming Cao 
1514d2a17637SMingming Cao 	md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
1515d2a17637SMingming Cao 	total = md_needed + nrblocks;
1516d2a17637SMingming Cao 
1517d2a17637SMingming Cao 	if (ext4_has_free_blocks(sbi, total) < total) {
1518d2a17637SMingming Cao 		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1519d2a17637SMingming Cao 		return -ENOSPC;
1520d2a17637SMingming Cao 	}
1521d2a17637SMingming Cao 
1522d2a17637SMingming Cao 	/* reduce fs free blocks counter */
1523d2a17637SMingming Cao 	percpu_counter_sub(&sbi->s_freeblocks_counter, total);
1524d2a17637SMingming Cao 
1525d2a17637SMingming Cao 	EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
1526d2a17637SMingming Cao 	EXT4_I(inode)->i_reserved_meta_blocks = mdblocks;
1527d2a17637SMingming Cao 
1528d2a17637SMingming Cao 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1529d2a17637SMingming Cao 	return 0;       /* success */
1530d2a17637SMingming Cao }
1531d2a17637SMingming Cao 
1532d2a17637SMingming Cao void ext4_da_release_space(struct inode *inode, int used, int to_free)
1533d2a17637SMingming Cao {
1534d2a17637SMingming Cao 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1535d2a17637SMingming Cao 	int total, mdb, mdb_free, release;
1536d2a17637SMingming Cao 
1537d2a17637SMingming Cao 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1538d2a17637SMingming Cao 	/* recalculate the number of metablocks still need to be reserved */
1539d2a17637SMingming Cao 	total = EXT4_I(inode)->i_reserved_data_blocks - used - to_free;
1540d2a17637SMingming Cao 	mdb = ext4_calc_metadata_amount(inode, total);
1541d2a17637SMingming Cao 
1542d2a17637SMingming Cao 	/* figure out how many metablocks to release */
1543d2a17637SMingming Cao 	BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
1544d2a17637SMingming Cao 	mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
1545d2a17637SMingming Cao 
1546d2a17637SMingming Cao 	/* Account for allocated meta_blocks */
1547d2a17637SMingming Cao 	mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
1548d2a17637SMingming Cao 
1549d2a17637SMingming Cao 	release = to_free + mdb_free;
1550d2a17637SMingming Cao 
1551d2a17637SMingming Cao 	/* update fs free blocks counter for truncate case */
1552d2a17637SMingming Cao 	percpu_counter_add(&sbi->s_freeblocks_counter, release);
1553d2a17637SMingming Cao 
1554d2a17637SMingming Cao 	/* update per-inode reservations */
1555d2a17637SMingming Cao 	BUG_ON(used + to_free > EXT4_I(inode)->i_reserved_data_blocks);
1556d2a17637SMingming Cao 	EXT4_I(inode)->i_reserved_data_blocks -= (used + to_free);
1557d2a17637SMingming Cao 
1558d2a17637SMingming Cao 	BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
1559d2a17637SMingming Cao 	EXT4_I(inode)->i_reserved_meta_blocks = mdb;
1560d2a17637SMingming Cao 	EXT4_I(inode)->i_allocated_meta_blocks = 0;
1561d2a17637SMingming Cao 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1562d2a17637SMingming Cao }
1563d2a17637SMingming Cao 
1564d2a17637SMingming Cao static void ext4_da_page_release_reservation(struct page *page,
1565d2a17637SMingming Cao 						unsigned long offset)
1566d2a17637SMingming Cao {
1567d2a17637SMingming Cao 	int to_release = 0;
1568d2a17637SMingming Cao 	struct buffer_head *head, *bh;
1569d2a17637SMingming Cao 	unsigned int curr_off = 0;
1570d2a17637SMingming Cao 
1571d2a17637SMingming Cao 	head = page_buffers(page);
1572d2a17637SMingming Cao 	bh = head;
1573d2a17637SMingming Cao 	do {
1574d2a17637SMingming Cao 		unsigned int next_off = curr_off + bh->b_size;
1575d2a17637SMingming Cao 
1576d2a17637SMingming Cao 		if ((offset <= curr_off) && (buffer_delay(bh))) {
1577d2a17637SMingming Cao 			to_release++;
1578d2a17637SMingming Cao 			clear_buffer_delay(bh);
1579d2a17637SMingming Cao 		}
1580d2a17637SMingming Cao 		curr_off = next_off;
1581d2a17637SMingming Cao 	} while ((bh = bh->b_this_page) != head);
1582d2a17637SMingming Cao 	ext4_da_release_space(page->mapping->host, 0, to_release);
1583d2a17637SMingming Cao }
1584ac27a0ecSDave Kleikamp 
1585ac27a0ecSDave Kleikamp /*
158664769240SAlex Tomas  * Delayed allocation stuff
158764769240SAlex Tomas  */
158864769240SAlex Tomas 
158964769240SAlex Tomas struct mpage_da_data {
159064769240SAlex Tomas 	struct inode *inode;
159164769240SAlex Tomas 	struct buffer_head lbh;			/* extent of blocks */
159264769240SAlex Tomas 	unsigned long first_page, next_page;	/* extent of pages */
159364769240SAlex Tomas 	get_block_t *get_block;
159464769240SAlex Tomas 	struct writeback_control *wbc;
159564769240SAlex Tomas };
159664769240SAlex Tomas 
159764769240SAlex Tomas /*
159864769240SAlex Tomas  * mpage_da_submit_io - walks through extent of pages and try to write
159964769240SAlex Tomas  * them with __mpage_writepage()
160064769240SAlex Tomas  *
160164769240SAlex Tomas  * @mpd->inode: inode
160264769240SAlex Tomas  * @mpd->first_page: first page of the extent
160364769240SAlex Tomas  * @mpd->next_page: page after the last page of the extent
160464769240SAlex Tomas  * @mpd->get_block: the filesystem's block mapper function
160564769240SAlex Tomas  *
160664769240SAlex Tomas  * By the time mpage_da_submit_io() is called we expect all blocks
160764769240SAlex Tomas  * to be allocated. this may be wrong if allocation failed.
160864769240SAlex Tomas  *
160964769240SAlex Tomas  * As pages are already locked by write_cache_pages(), we can't use it
161064769240SAlex Tomas  */
161164769240SAlex Tomas static int mpage_da_submit_io(struct mpage_da_data *mpd)
161264769240SAlex Tomas {
161364769240SAlex Tomas 	struct address_space *mapping = mpd->inode->i_mapping;
161464769240SAlex Tomas 	struct mpage_data mpd_pp = {
161564769240SAlex Tomas 		.bio = NULL,
161664769240SAlex Tomas 		.last_block_in_bio = 0,
161764769240SAlex Tomas 		.get_block = mpd->get_block,
161864769240SAlex Tomas 		.use_writepage = 1,
161964769240SAlex Tomas 	};
162064769240SAlex Tomas 	int ret = 0, err, nr_pages, i;
162164769240SAlex Tomas 	unsigned long index, end;
162264769240SAlex Tomas 	struct pagevec pvec;
162364769240SAlex Tomas 
162464769240SAlex Tomas 	BUG_ON(mpd->next_page <= mpd->first_page);
162564769240SAlex Tomas 
162664769240SAlex Tomas 	pagevec_init(&pvec, 0);
162764769240SAlex Tomas 	index = mpd->first_page;
162864769240SAlex Tomas 	end = mpd->next_page - 1;
162964769240SAlex Tomas 
163064769240SAlex Tomas 	while (index <= end) {
163164769240SAlex Tomas 		/* XXX: optimize tail */
163264769240SAlex Tomas 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
163364769240SAlex Tomas 		if (nr_pages == 0)
163464769240SAlex Tomas 			break;
163564769240SAlex Tomas 		for (i = 0; i < nr_pages; i++) {
163664769240SAlex Tomas 			struct page *page = pvec.pages[i];
163764769240SAlex Tomas 
163864769240SAlex Tomas 			index = page->index;
163964769240SAlex Tomas 			if (index > end)
164064769240SAlex Tomas 				break;
164164769240SAlex Tomas 			index++;
164264769240SAlex Tomas 
164364769240SAlex Tomas 			err = __mpage_writepage(page, mpd->wbc, &mpd_pp);
164464769240SAlex Tomas 
164564769240SAlex Tomas 			/*
164664769240SAlex Tomas 			 * In error case, we have to continue because
164764769240SAlex Tomas 			 * remaining pages are still locked
164864769240SAlex Tomas 			 * XXX: unlock and re-dirty them?
164964769240SAlex Tomas 			 */
165064769240SAlex Tomas 			if (ret == 0)
165164769240SAlex Tomas 				ret = err;
165264769240SAlex Tomas 		}
165364769240SAlex Tomas 		pagevec_release(&pvec);
165464769240SAlex Tomas 	}
165564769240SAlex Tomas 	if (mpd_pp.bio)
165664769240SAlex Tomas 		mpage_bio_submit(WRITE, mpd_pp.bio);
165764769240SAlex Tomas 
165864769240SAlex Tomas 	return ret;
165964769240SAlex Tomas }
166064769240SAlex Tomas 
166164769240SAlex Tomas /*
166264769240SAlex Tomas  * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers
166364769240SAlex Tomas  *
166464769240SAlex Tomas  * @mpd->inode - inode to walk through
166564769240SAlex Tomas  * @exbh->b_blocknr - first block on a disk
166664769240SAlex Tomas  * @exbh->b_size - amount of space in bytes
166764769240SAlex Tomas  * @logical - first logical block to start assignment with
166864769240SAlex Tomas  *
166964769240SAlex Tomas  * the function goes through all passed space and put actual disk
167064769240SAlex Tomas  * block numbers into buffer heads, dropping BH_Delay
167164769240SAlex Tomas  */
167264769240SAlex Tomas static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
167364769240SAlex Tomas 				 struct buffer_head *exbh)
167464769240SAlex Tomas {
167564769240SAlex Tomas 	struct inode *inode = mpd->inode;
167664769240SAlex Tomas 	struct address_space *mapping = inode->i_mapping;
167764769240SAlex Tomas 	int blocks = exbh->b_size >> inode->i_blkbits;
167864769240SAlex Tomas 	sector_t pblock = exbh->b_blocknr, cur_logical;
167964769240SAlex Tomas 	struct buffer_head *head, *bh;
168064769240SAlex Tomas 	unsigned long index, end;
168164769240SAlex Tomas 	struct pagevec pvec;
168264769240SAlex Tomas 	int nr_pages, i;
168364769240SAlex Tomas 
168464769240SAlex Tomas 	index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
168564769240SAlex Tomas 	end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
168664769240SAlex Tomas 	cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
168764769240SAlex Tomas 
168864769240SAlex Tomas 	pagevec_init(&pvec, 0);
168964769240SAlex Tomas 
169064769240SAlex Tomas 	while (index <= end) {
169164769240SAlex Tomas 		/* XXX: optimize tail */
169264769240SAlex Tomas 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
169364769240SAlex Tomas 		if (nr_pages == 0)
169464769240SAlex Tomas 			break;
169564769240SAlex Tomas 		for (i = 0; i < nr_pages; i++) {
169664769240SAlex Tomas 			struct page *page = pvec.pages[i];
169764769240SAlex Tomas 
169864769240SAlex Tomas 			index = page->index;
169964769240SAlex Tomas 			if (index > end)
170064769240SAlex Tomas 				break;
170164769240SAlex Tomas 			index++;
170264769240SAlex Tomas 
170364769240SAlex Tomas 			BUG_ON(!PageLocked(page));
170464769240SAlex Tomas 			BUG_ON(PageWriteback(page));
170564769240SAlex Tomas 			BUG_ON(!page_has_buffers(page));
170664769240SAlex Tomas 
170764769240SAlex Tomas 			bh = page_buffers(page);
170864769240SAlex Tomas 			head = bh;
170964769240SAlex Tomas 
171064769240SAlex Tomas 			/* skip blocks out of the range */
171164769240SAlex Tomas 			do {
171264769240SAlex Tomas 				if (cur_logical >= logical)
171364769240SAlex Tomas 					break;
171464769240SAlex Tomas 				cur_logical++;
171564769240SAlex Tomas 			} while ((bh = bh->b_this_page) != head);
171664769240SAlex Tomas 
171764769240SAlex Tomas 			do {
171864769240SAlex Tomas 				if (cur_logical >= logical + blocks)
171964769240SAlex Tomas 					break;
172064769240SAlex Tomas 				if (buffer_delay(bh)) {
172164769240SAlex Tomas 					bh->b_blocknr = pblock;
172264769240SAlex Tomas 					clear_buffer_delay(bh);
172361628a3fSMingming Cao 				} else if (buffer_mapped(bh))
172464769240SAlex Tomas 					BUG_ON(bh->b_blocknr != pblock);
172564769240SAlex Tomas 
172664769240SAlex Tomas 				cur_logical++;
172764769240SAlex Tomas 				pblock++;
172864769240SAlex Tomas 			} while ((bh = bh->b_this_page) != head);
172964769240SAlex Tomas 		}
173064769240SAlex Tomas 		pagevec_release(&pvec);
173164769240SAlex Tomas 	}
173264769240SAlex Tomas }
173364769240SAlex Tomas 
173464769240SAlex Tomas 
173564769240SAlex Tomas /*
173664769240SAlex Tomas  * __unmap_underlying_blocks - just a helper function to unmap
173764769240SAlex Tomas  * set of blocks described by @bh
173864769240SAlex Tomas  */
173964769240SAlex Tomas static inline void __unmap_underlying_blocks(struct inode *inode,
174064769240SAlex Tomas 					     struct buffer_head *bh)
174164769240SAlex Tomas {
174264769240SAlex Tomas 	struct block_device *bdev = inode->i_sb->s_bdev;
174364769240SAlex Tomas 	int blocks, i;
174464769240SAlex Tomas 
174564769240SAlex Tomas 	blocks = bh->b_size >> inode->i_blkbits;
174664769240SAlex Tomas 	for (i = 0; i < blocks; i++)
174764769240SAlex Tomas 		unmap_underlying_metadata(bdev, bh->b_blocknr + i);
174864769240SAlex Tomas }
174964769240SAlex Tomas 
175064769240SAlex Tomas /*
175164769240SAlex Tomas  * mpage_da_map_blocks - go through given space
175264769240SAlex Tomas  *
175364769240SAlex Tomas  * @mpd->lbh - bh describing space
175464769240SAlex Tomas  * @mpd->get_block - the filesystem's block mapper function
175564769240SAlex Tomas  *
175664769240SAlex Tomas  * The function skips space we know is already mapped to disk blocks.
175764769240SAlex Tomas  *
175864769240SAlex Tomas  * The function ignores errors ->get_block() returns, thus real
175964769240SAlex Tomas  * error handling is postponed to __mpage_writepage()
176064769240SAlex Tomas  */
176164769240SAlex Tomas static void mpage_da_map_blocks(struct mpage_da_data *mpd)
176264769240SAlex Tomas {
176364769240SAlex Tomas 	struct buffer_head *lbh = &mpd->lbh;
176464769240SAlex Tomas 	int err = 0, remain = lbh->b_size;
176564769240SAlex Tomas 	sector_t next = lbh->b_blocknr;
176664769240SAlex Tomas 	struct buffer_head new;
176764769240SAlex Tomas 
176864769240SAlex Tomas 	/*
176964769240SAlex Tomas 	 * We consider only non-mapped and non-allocated blocks
177064769240SAlex Tomas 	 */
177164769240SAlex Tomas 	if (buffer_mapped(lbh) && !buffer_delay(lbh))
177264769240SAlex Tomas 		return;
177364769240SAlex Tomas 
177464769240SAlex Tomas 	while (remain) {
177564769240SAlex Tomas 		new.b_state = lbh->b_state;
177664769240SAlex Tomas 		new.b_blocknr = 0;
177764769240SAlex Tomas 		new.b_size = remain;
177864769240SAlex Tomas 		err = mpd->get_block(mpd->inode, next, &new, 1);
177964769240SAlex Tomas 		if (err) {
178064769240SAlex Tomas 			/*
178164769240SAlex Tomas 			 * Rather than implement own error handling
178264769240SAlex Tomas 			 * here, we just leave remaining blocks
178364769240SAlex Tomas 			 * unallocated and try again with ->writepage()
178464769240SAlex Tomas 			 */
178564769240SAlex Tomas 			break;
178664769240SAlex Tomas 		}
178764769240SAlex Tomas 		BUG_ON(new.b_size == 0);
178864769240SAlex Tomas 
178964769240SAlex Tomas 		if (buffer_new(&new))
179064769240SAlex Tomas 			__unmap_underlying_blocks(mpd->inode, &new);
179164769240SAlex Tomas 
179264769240SAlex Tomas 		/*
179364769240SAlex Tomas 		 * If blocks are delayed marked, we need to
179464769240SAlex Tomas 		 * put actual blocknr and drop delayed bit
179564769240SAlex Tomas 		 */
179664769240SAlex Tomas 		if (buffer_delay(lbh))
179764769240SAlex Tomas 			mpage_put_bnr_to_bhs(mpd, next, &new);
179864769240SAlex Tomas 
179964769240SAlex Tomas 		/* go for the remaining blocks */
180064769240SAlex Tomas 		next += new.b_size >> mpd->inode->i_blkbits;
180164769240SAlex Tomas 		remain -= new.b_size;
180264769240SAlex Tomas 	}
180364769240SAlex Tomas }
180464769240SAlex Tomas 
180564769240SAlex Tomas #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | (1 << BH_Delay))
180664769240SAlex Tomas 
180764769240SAlex Tomas /*
180864769240SAlex Tomas  * mpage_add_bh_to_extent - try to add one more block to extent of blocks
180964769240SAlex Tomas  *
181064769240SAlex Tomas  * @mpd->lbh - extent of blocks
181164769240SAlex Tomas  * @logical - logical number of the block in the file
181264769240SAlex Tomas  * @bh - bh of the block (used to access block's state)
181364769240SAlex Tomas  *
181464769240SAlex Tomas  * the function is used to collect contig. blocks in same state
181564769240SAlex Tomas  */
181664769240SAlex Tomas static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
181764769240SAlex Tomas 				   sector_t logical, struct buffer_head *bh)
181864769240SAlex Tomas {
181964769240SAlex Tomas 	struct buffer_head *lbh = &mpd->lbh;
182064769240SAlex Tomas 	sector_t next;
182164769240SAlex Tomas 
182264769240SAlex Tomas 	next = lbh->b_blocknr + (lbh->b_size >> mpd->inode->i_blkbits);
182364769240SAlex Tomas 
182464769240SAlex Tomas 	/*
182564769240SAlex Tomas 	 * First block in the extent
182664769240SAlex Tomas 	 */
182764769240SAlex Tomas 	if (lbh->b_size == 0) {
182864769240SAlex Tomas 		lbh->b_blocknr = logical;
182964769240SAlex Tomas 		lbh->b_size = bh->b_size;
183064769240SAlex Tomas 		lbh->b_state = bh->b_state & BH_FLAGS;
183164769240SAlex Tomas 		return;
183264769240SAlex Tomas 	}
183364769240SAlex Tomas 
183464769240SAlex Tomas 	/*
183564769240SAlex Tomas 	 * Can we merge the block to our big extent?
183664769240SAlex Tomas 	 */
183764769240SAlex Tomas 	if (logical == next && (bh->b_state & BH_FLAGS) == lbh->b_state) {
183864769240SAlex Tomas 		lbh->b_size += bh->b_size;
183964769240SAlex Tomas 		return;
184064769240SAlex Tomas 	}
184164769240SAlex Tomas 
184264769240SAlex Tomas 	/*
184364769240SAlex Tomas 	 * We couldn't merge the block to our extent, so we
184464769240SAlex Tomas 	 * need to flush current  extent and start new one
184564769240SAlex Tomas 	 */
184664769240SAlex Tomas 	mpage_da_map_blocks(mpd);
184764769240SAlex Tomas 
184864769240SAlex Tomas 	/*
184964769240SAlex Tomas 	 * Now start a new extent
185064769240SAlex Tomas 	 */
185164769240SAlex Tomas 	lbh->b_size = bh->b_size;
185264769240SAlex Tomas 	lbh->b_state = bh->b_state & BH_FLAGS;
185364769240SAlex Tomas 	lbh->b_blocknr = logical;
185464769240SAlex Tomas }
185564769240SAlex Tomas 
185664769240SAlex Tomas /*
185764769240SAlex Tomas  * __mpage_da_writepage - finds extent of pages and blocks
185864769240SAlex Tomas  *
185964769240SAlex Tomas  * @page: page to consider
186064769240SAlex Tomas  * @wbc: not used, we just follow rules
186164769240SAlex Tomas  * @data: context
186264769240SAlex Tomas  *
186364769240SAlex Tomas  * The function finds extents of pages and scan them for all blocks.
186464769240SAlex Tomas  */
186564769240SAlex Tomas static int __mpage_da_writepage(struct page *page,
186664769240SAlex Tomas 				struct writeback_control *wbc, void *data)
186764769240SAlex Tomas {
186864769240SAlex Tomas 	struct mpage_da_data *mpd = data;
186964769240SAlex Tomas 	struct inode *inode = mpd->inode;
187064769240SAlex Tomas 	struct buffer_head *bh, *head, fake;
187164769240SAlex Tomas 	sector_t logical;
187264769240SAlex Tomas 
187364769240SAlex Tomas 	/*
187464769240SAlex Tomas 	 * Can we merge this page to current extent?
187564769240SAlex Tomas 	 */
187664769240SAlex Tomas 	if (mpd->next_page != page->index) {
187764769240SAlex Tomas 		/*
187864769240SAlex Tomas 		 * Nope, we can't. So, we map non-allocated blocks
187964769240SAlex Tomas 		 * and start IO on them using __mpage_writepage()
188064769240SAlex Tomas 		 */
188164769240SAlex Tomas 		if (mpd->next_page != mpd->first_page) {
188264769240SAlex Tomas 			mpage_da_map_blocks(mpd);
188364769240SAlex Tomas 			mpage_da_submit_io(mpd);
188464769240SAlex Tomas 		}
188564769240SAlex Tomas 
188664769240SAlex Tomas 		/*
188764769240SAlex Tomas 		 * Start next extent of pages ...
188864769240SAlex Tomas 		 */
188964769240SAlex Tomas 		mpd->first_page = page->index;
189064769240SAlex Tomas 
189164769240SAlex Tomas 		/*
189264769240SAlex Tomas 		 * ... and blocks
189364769240SAlex Tomas 		 */
189464769240SAlex Tomas 		mpd->lbh.b_size = 0;
189564769240SAlex Tomas 		mpd->lbh.b_state = 0;
189664769240SAlex Tomas 		mpd->lbh.b_blocknr = 0;
189764769240SAlex Tomas 	}
189864769240SAlex Tomas 
189964769240SAlex Tomas 	mpd->next_page = page->index + 1;
190064769240SAlex Tomas 	logical = (sector_t) page->index <<
190164769240SAlex Tomas 		  (PAGE_CACHE_SHIFT - inode->i_blkbits);
190264769240SAlex Tomas 
190364769240SAlex Tomas 	if (!page_has_buffers(page)) {
190464769240SAlex Tomas 		/*
190564769240SAlex Tomas 		 * There is no attached buffer heads yet (mmap?)
190664769240SAlex Tomas 		 * we treat the page asfull of dirty blocks
190764769240SAlex Tomas 		 */
190864769240SAlex Tomas 		bh = &fake;
190964769240SAlex Tomas 		bh->b_size = PAGE_CACHE_SIZE;
191064769240SAlex Tomas 		bh->b_state = 0;
191164769240SAlex Tomas 		set_buffer_dirty(bh);
191264769240SAlex Tomas 		set_buffer_uptodate(bh);
191364769240SAlex Tomas 		mpage_add_bh_to_extent(mpd, logical, bh);
191464769240SAlex Tomas 	} else {
191564769240SAlex Tomas 		/*
191664769240SAlex Tomas 		 * Page with regular buffer heads, just add all dirty ones
191764769240SAlex Tomas 		 */
191864769240SAlex Tomas 		head = page_buffers(page);
191964769240SAlex Tomas 		bh = head;
192064769240SAlex Tomas 		do {
192164769240SAlex Tomas 			BUG_ON(buffer_locked(bh));
192264769240SAlex Tomas 			if (buffer_dirty(bh))
192364769240SAlex Tomas 				mpage_add_bh_to_extent(mpd, logical, bh);
192464769240SAlex Tomas 			logical++;
192564769240SAlex Tomas 		} while ((bh = bh->b_this_page) != head);
192664769240SAlex Tomas 	}
192764769240SAlex Tomas 
192864769240SAlex Tomas 	return 0;
192964769240SAlex Tomas }
193064769240SAlex Tomas 
193164769240SAlex Tomas /*
193264769240SAlex Tomas  * mpage_da_writepages - walk the list of dirty pages of the given
193364769240SAlex Tomas  * address space, allocates non-allocated blocks, maps newly-allocated
193464769240SAlex Tomas  * blocks to existing bhs and issue IO them
193564769240SAlex Tomas  *
193664769240SAlex Tomas  * @mapping: address space structure to write
193764769240SAlex Tomas  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
193864769240SAlex Tomas  * @get_block: the filesystem's block mapper function.
193964769240SAlex Tomas  *
194064769240SAlex Tomas  * This is a library function, which implements the writepages()
194164769240SAlex Tomas  * address_space_operation.
194264769240SAlex Tomas  *
194364769240SAlex Tomas  * In order to avoid duplication of logic that deals with partial pages,
194464769240SAlex Tomas  * multiple bio per page, etc, we find non-allocated blocks, allocate
194564769240SAlex Tomas  * them with minimal calls to ->get_block() and re-use __mpage_writepage()
194664769240SAlex Tomas  *
194764769240SAlex Tomas  * It's important that we call __mpage_writepage() only once for each
194864769240SAlex Tomas  * involved page, otherwise we'd have to implement more complicated logic
194964769240SAlex Tomas  * to deal with pages w/o PG_lock or w/ PG_writeback and so on.
195064769240SAlex Tomas  *
195164769240SAlex Tomas  * See comments to mpage_writepages()
195264769240SAlex Tomas  */
195364769240SAlex Tomas static int mpage_da_writepages(struct address_space *mapping,
195464769240SAlex Tomas 			       struct writeback_control *wbc,
195564769240SAlex Tomas 			       get_block_t get_block)
195664769240SAlex Tomas {
195764769240SAlex Tomas 	struct mpage_da_data mpd;
195864769240SAlex Tomas 	int ret;
195964769240SAlex Tomas 
196064769240SAlex Tomas 	if (!get_block)
196164769240SAlex Tomas 		return generic_writepages(mapping, wbc);
196264769240SAlex Tomas 
196364769240SAlex Tomas 	mpd.wbc = wbc;
196464769240SAlex Tomas 	mpd.inode = mapping->host;
196564769240SAlex Tomas 	mpd.lbh.b_size = 0;
196664769240SAlex Tomas 	mpd.lbh.b_state = 0;
196764769240SAlex Tomas 	mpd.lbh.b_blocknr = 0;
196864769240SAlex Tomas 	mpd.first_page = 0;
196964769240SAlex Tomas 	mpd.next_page = 0;
197064769240SAlex Tomas 	mpd.get_block = get_block;
197164769240SAlex Tomas 
197264769240SAlex Tomas 	ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, &mpd);
197364769240SAlex Tomas 
197464769240SAlex Tomas 	/*
197564769240SAlex Tomas 	 * Handle last extent of pages
197664769240SAlex Tomas 	 */
197764769240SAlex Tomas 	if (mpd.next_page != mpd.first_page) {
197864769240SAlex Tomas 		mpage_da_map_blocks(&mpd);
197964769240SAlex Tomas 		mpage_da_submit_io(&mpd);
198064769240SAlex Tomas 	}
198164769240SAlex Tomas 
198264769240SAlex Tomas 	return ret;
198364769240SAlex Tomas }
198464769240SAlex Tomas 
198564769240SAlex Tomas /*
198664769240SAlex Tomas  * this is a special callback for ->write_begin() only
198764769240SAlex Tomas  * it's intention is to return mapped block or reserve space
198864769240SAlex Tomas  */
198964769240SAlex Tomas static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
199064769240SAlex Tomas 				  struct buffer_head *bh_result, int create)
199164769240SAlex Tomas {
199264769240SAlex Tomas 	int ret = 0;
199364769240SAlex Tomas 
199464769240SAlex Tomas 	BUG_ON(create == 0);
199564769240SAlex Tomas 	BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
199664769240SAlex Tomas 
199764769240SAlex Tomas 	/*
199864769240SAlex Tomas 	 * first, we need to know whether the block is allocated already
199964769240SAlex Tomas 	 * preallocated blocks are unmapped but should treated
200064769240SAlex Tomas 	 * the same as allocated blocks.
200164769240SAlex Tomas 	 */
2002d2a17637SMingming Cao 	ret = ext4_get_blocks_wrap(NULL, inode, iblock, 1,  bh_result, 0, 0, 0);
2003d2a17637SMingming Cao 	if ((ret == 0) && !buffer_delay(bh_result)) {
2004d2a17637SMingming Cao 		/* the block isn't (pre)allocated yet, let's reserve space */
200564769240SAlex Tomas 		/*
200664769240SAlex Tomas 		 * XXX: __block_prepare_write() unmaps passed block,
200764769240SAlex Tomas 		 * is it OK?
200864769240SAlex Tomas 		 */
2009d2a17637SMingming Cao 		ret = ext4_da_reserve_space(inode, 1);
2010d2a17637SMingming Cao 		if (ret)
2011d2a17637SMingming Cao 			/* not enough space to reserve */
2012d2a17637SMingming Cao 			return ret;
2013d2a17637SMingming Cao 
201464769240SAlex Tomas 		map_bh(bh_result, inode->i_sb, 0);
201564769240SAlex Tomas 		set_buffer_new(bh_result);
201664769240SAlex Tomas 		set_buffer_delay(bh_result);
201764769240SAlex Tomas 	} else if (ret > 0) {
201864769240SAlex Tomas 		bh_result->b_size = (ret << inode->i_blkbits);
201964769240SAlex Tomas 		ret = 0;
202064769240SAlex Tomas 	}
202164769240SAlex Tomas 
202264769240SAlex Tomas 	return ret;
202364769240SAlex Tomas }
2024d2a17637SMingming Cao #define		EXT4_DELALLOC_RSVED	1
202564769240SAlex Tomas static int ext4_da_get_block_write(struct inode *inode, sector_t iblock,
202664769240SAlex Tomas 				   struct buffer_head *bh_result, int create)
202764769240SAlex Tomas {
202861628a3fSMingming Cao 	int ret;
202964769240SAlex Tomas 	unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
203064769240SAlex Tomas 	loff_t disksize = EXT4_I(inode)->i_disksize;
203164769240SAlex Tomas 	handle_t *handle = NULL;
203264769240SAlex Tomas 
203361628a3fSMingming Cao 	handle = ext4_journal_current_handle();
2034f0e6c985SAneesh Kumar K.V 	if (!handle) {
2035f0e6c985SAneesh Kumar K.V 		ret = ext4_get_blocks_wrap(handle, inode, iblock, max_blocks,
2036f0e6c985SAneesh Kumar K.V 				   bh_result, 0, 0, 0);
2037f0e6c985SAneesh Kumar K.V 		BUG_ON(!ret);
2038f0e6c985SAneesh Kumar K.V 	} else {
203964769240SAlex Tomas 		ret = ext4_get_blocks_wrap(handle, inode, iblock, max_blocks,
2040d2a17637SMingming Cao 				   bh_result, create, 0, EXT4_DELALLOC_RSVED);
2041f0e6c985SAneesh Kumar K.V 	}
2042f0e6c985SAneesh Kumar K.V 
204364769240SAlex Tomas 	if (ret > 0) {
204464769240SAlex Tomas 		bh_result->b_size = (ret << inode->i_blkbits);
204564769240SAlex Tomas 
204664769240SAlex Tomas 		/*
204764769240SAlex Tomas 		 * Update on-disk size along with block allocation
204864769240SAlex Tomas 		 * we don't use 'extend_disksize' as size may change
204964769240SAlex Tomas 		 * within already allocated block -bzzz
205064769240SAlex Tomas 		 */
205164769240SAlex Tomas 		disksize = ((loff_t) iblock + ret) << inode->i_blkbits;
205264769240SAlex Tomas 		if (disksize > i_size_read(inode))
205364769240SAlex Tomas 			disksize = i_size_read(inode);
205464769240SAlex Tomas 		if (disksize > EXT4_I(inode)->i_disksize) {
205564769240SAlex Tomas 			/*
205664769240SAlex Tomas 			 * XXX: replace with spinlock if seen contended -bzzz
205764769240SAlex Tomas 			 */
205864769240SAlex Tomas 			down_write(&EXT4_I(inode)->i_data_sem);
205964769240SAlex Tomas 			if (disksize > EXT4_I(inode)->i_disksize)
206064769240SAlex Tomas 				EXT4_I(inode)->i_disksize = disksize;
206164769240SAlex Tomas 			up_write(&EXT4_I(inode)->i_data_sem);
206264769240SAlex Tomas 
206364769240SAlex Tomas 			if (EXT4_I(inode)->i_disksize == disksize) {
206461628a3fSMingming Cao 				ret = ext4_mark_inode_dirty(handle, inode);
206564769240SAlex Tomas 				return ret;
206664769240SAlex Tomas 			}
206761628a3fSMingming Cao 		}
206861628a3fSMingming Cao 		ret = 0;
206961628a3fSMingming Cao 	}
207061628a3fSMingming Cao 	return ret;
207161628a3fSMingming Cao }
207261628a3fSMingming Cao 
207361628a3fSMingming Cao static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh)
207461628a3fSMingming Cao {
2075f0e6c985SAneesh Kumar K.V 	/*
2076f0e6c985SAneesh Kumar K.V 	 * unmapped buffer is possible for holes.
2077f0e6c985SAneesh Kumar K.V 	 * delay buffer is possible with delayed allocation
2078f0e6c985SAneesh Kumar K.V 	 */
2079f0e6c985SAneesh Kumar K.V 	return ((!buffer_mapped(bh) || buffer_delay(bh)) && buffer_dirty(bh));
2080f0e6c985SAneesh Kumar K.V }
2081f0e6c985SAneesh Kumar K.V 
2082f0e6c985SAneesh Kumar K.V static int ext4_normal_get_block_write(struct inode *inode, sector_t iblock,
2083f0e6c985SAneesh Kumar K.V 				   struct buffer_head *bh_result, int create)
2084f0e6c985SAneesh Kumar K.V {
2085f0e6c985SAneesh Kumar K.V 	int ret = 0;
2086f0e6c985SAneesh Kumar K.V 	unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
2087f0e6c985SAneesh Kumar K.V 
2088f0e6c985SAneesh Kumar K.V 	/*
2089f0e6c985SAneesh Kumar K.V 	 * we don't want to do block allocation in writepage
2090f0e6c985SAneesh Kumar K.V 	 * so call get_block_wrap with create = 0
2091f0e6c985SAneesh Kumar K.V 	 */
2092f0e6c985SAneesh Kumar K.V 	ret = ext4_get_blocks_wrap(NULL, inode, iblock, max_blocks,
2093f0e6c985SAneesh Kumar K.V 				   bh_result, 0, 0, 0);
2094f0e6c985SAneesh Kumar K.V 	if (ret > 0) {
2095f0e6c985SAneesh Kumar K.V 		bh_result->b_size = (ret << inode->i_blkbits);
2096f0e6c985SAneesh Kumar K.V 		ret = 0;
2097f0e6c985SAneesh Kumar K.V 	}
2098f0e6c985SAneesh Kumar K.V 	return ret;
209961628a3fSMingming Cao }
210061628a3fSMingming Cao 
210161628a3fSMingming Cao /*
2102f0e6c985SAneesh Kumar K.V  * get called vi ext4_da_writepages after taking page lock (have journal handle)
2103f0e6c985SAneesh Kumar K.V  * get called via journal_submit_inode_data_buffers (no journal handle)
2104f0e6c985SAneesh Kumar K.V  * get called via shrink_page_list via pdflush (no journal handle)
2105f0e6c985SAneesh Kumar K.V  * or grab_page_cache when doing write_begin (have journal handle)
210661628a3fSMingming Cao  */
210764769240SAlex Tomas static int ext4_da_writepage(struct page *page,
210864769240SAlex Tomas 				struct writeback_control *wbc)
210964769240SAlex Tomas {
211064769240SAlex Tomas 	int ret = 0;
211161628a3fSMingming Cao 	loff_t size;
211261628a3fSMingming Cao 	unsigned long len;
211361628a3fSMingming Cao 	struct buffer_head *page_bufs;
211461628a3fSMingming Cao 	struct inode *inode = page->mapping->host;
211564769240SAlex Tomas 
211661628a3fSMingming Cao 	size = i_size_read(inode);
211761628a3fSMingming Cao 	if (page->index == size >> PAGE_CACHE_SHIFT)
211861628a3fSMingming Cao 		len = size & ~PAGE_CACHE_MASK;
211961628a3fSMingming Cao 	else
212061628a3fSMingming Cao 		len = PAGE_CACHE_SIZE;
212161628a3fSMingming Cao 
2122f0e6c985SAneesh Kumar K.V 	if (page_has_buffers(page)) {
2123f0e6c985SAneesh Kumar K.V 		page_bufs = page_buffers(page);
2124f0e6c985SAneesh Kumar K.V 		if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2125f0e6c985SAneesh Kumar K.V 					ext4_bh_unmapped_or_delay)) {
212661628a3fSMingming Cao 			/*
2127f0e6c985SAneesh Kumar K.V 			 * We don't want to do  block allocation
2128f0e6c985SAneesh Kumar K.V 			 * So redirty the page and return
2129cd1aac32SAneesh Kumar K.V 			 * We may reach here when we do a journal commit
2130cd1aac32SAneesh Kumar K.V 			 * via journal_submit_inode_data_buffers.
2131cd1aac32SAneesh Kumar K.V 			 * If we don't have mapping block we just ignore
2132f0e6c985SAneesh Kumar K.V 			 * them. We can also reach here via shrink_page_list
2133f0e6c985SAneesh Kumar K.V 			 */
2134f0e6c985SAneesh Kumar K.V 			redirty_page_for_writepage(wbc, page);
2135f0e6c985SAneesh Kumar K.V 			unlock_page(page);
2136f0e6c985SAneesh Kumar K.V 			return 0;
2137f0e6c985SAneesh Kumar K.V 		}
2138f0e6c985SAneesh Kumar K.V 	} else {
2139f0e6c985SAneesh Kumar K.V 		/*
2140f0e6c985SAneesh Kumar K.V 		 * The test for page_has_buffers() is subtle:
2141f0e6c985SAneesh Kumar K.V 		 * We know the page is dirty but it lost buffers. That means
2142f0e6c985SAneesh Kumar K.V 		 * that at some moment in time after write_begin()/write_end()
2143f0e6c985SAneesh Kumar K.V 		 * has been called all buffers have been clean and thus they
2144f0e6c985SAneesh Kumar K.V 		 * must have been written at least once. So they are all
2145f0e6c985SAneesh Kumar K.V 		 * mapped and we can happily proceed with mapping them
2146f0e6c985SAneesh Kumar K.V 		 * and writing the page.
2147f0e6c985SAneesh Kumar K.V 		 *
2148f0e6c985SAneesh Kumar K.V 		 * Try to initialize the buffer_heads and check whether
2149f0e6c985SAneesh Kumar K.V 		 * all are mapped and non delay. We don't want to
2150f0e6c985SAneesh Kumar K.V 		 * do block allocation here.
2151f0e6c985SAneesh Kumar K.V 		 */
2152f0e6c985SAneesh Kumar K.V 		ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
2153f0e6c985SAneesh Kumar K.V 						ext4_normal_get_block_write);
2154f0e6c985SAneesh Kumar K.V 		if (!ret) {
2155f0e6c985SAneesh Kumar K.V 			page_bufs = page_buffers(page);
2156f0e6c985SAneesh Kumar K.V 			/* check whether all are mapped and non delay */
2157f0e6c985SAneesh Kumar K.V 			if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2158f0e6c985SAneesh Kumar K.V 						ext4_bh_unmapped_or_delay)) {
2159f0e6c985SAneesh Kumar K.V 				redirty_page_for_writepage(wbc, page);
2160f0e6c985SAneesh Kumar K.V 				unlock_page(page);
2161f0e6c985SAneesh Kumar K.V 				return 0;
2162f0e6c985SAneesh Kumar K.V 			}
2163f0e6c985SAneesh Kumar K.V 		} else {
2164f0e6c985SAneesh Kumar K.V 			/*
2165f0e6c985SAneesh Kumar K.V 			 * We can't do block allocation here
2166f0e6c985SAneesh Kumar K.V 			 * so just redity the page and unlock
2167f0e6c985SAneesh Kumar K.V 			 * and return
216861628a3fSMingming Cao 			 */
216961628a3fSMingming Cao 			redirty_page_for_writepage(wbc, page);
217061628a3fSMingming Cao 			unlock_page(page);
217161628a3fSMingming Cao 			return 0;
217261628a3fSMingming Cao 		}
217364769240SAlex Tomas 	}
217464769240SAlex Tomas 
217564769240SAlex Tomas 	if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
2176f0e6c985SAneesh Kumar K.V 		ret = nobh_writepage(page, ext4_normal_get_block_write, wbc);
217764769240SAlex Tomas 	else
2178f0e6c985SAneesh Kumar K.V 		ret = block_write_full_page(page,
2179f0e6c985SAneesh Kumar K.V 						ext4_normal_get_block_write,
2180f0e6c985SAneesh Kumar K.V 						wbc);
218164769240SAlex Tomas 
218264769240SAlex Tomas 	return ret;
218364769240SAlex Tomas }
218464769240SAlex Tomas 
218561628a3fSMingming Cao /*
218661628a3fSMingming Cao  * For now just follow the DIO way to estimate the max credits
218761628a3fSMingming Cao  * needed to write out EXT4_MAX_WRITEBACK_PAGES.
218861628a3fSMingming Cao  * todo: need to calculate the max credits need for
218961628a3fSMingming Cao  * extent based files, currently the DIO credits is based on
219061628a3fSMingming Cao  * indirect-blocks mapping way.
219161628a3fSMingming Cao  *
219261628a3fSMingming Cao  * Probably should have a generic way to calculate credits
219361628a3fSMingming Cao  * for DIO, writepages, and truncate
219461628a3fSMingming Cao  */
219561628a3fSMingming Cao #define EXT4_MAX_WRITEBACK_PAGES      DIO_MAX_BLOCKS
219661628a3fSMingming Cao #define EXT4_MAX_WRITEBACK_CREDITS    DIO_CREDITS
219761628a3fSMingming Cao 
219864769240SAlex Tomas static int ext4_da_writepages(struct address_space *mapping,
219964769240SAlex Tomas 				struct writeback_control *wbc)
220064769240SAlex Tomas {
220161628a3fSMingming Cao 	struct inode *inode = mapping->host;
220261628a3fSMingming Cao 	handle_t *handle = NULL;
220361628a3fSMingming Cao 	int needed_blocks;
220461628a3fSMingming Cao 	int ret = 0;
220561628a3fSMingming Cao 	long to_write;
220661628a3fSMingming Cao 	loff_t range_start = 0;
220761628a3fSMingming Cao 
220861628a3fSMingming Cao 	/*
220961628a3fSMingming Cao 	 * No pages to write? This is mainly a kludge to avoid starting
221061628a3fSMingming Cao 	 * a transaction for special inodes like journal inode on last iput()
221161628a3fSMingming Cao 	 * because that could violate lock ordering on umount
221261628a3fSMingming Cao 	 */
221361628a3fSMingming Cao 	if (!mapping->nrpages)
221461628a3fSMingming Cao 		return 0;
221561628a3fSMingming Cao 
221661628a3fSMingming Cao 	/*
221761628a3fSMingming Cao 	 * Estimate the worse case needed credits to write out
221861628a3fSMingming Cao 	 * EXT4_MAX_BUF_BLOCKS pages
221961628a3fSMingming Cao 	 */
222061628a3fSMingming Cao 	needed_blocks = EXT4_MAX_WRITEBACK_CREDITS;
222161628a3fSMingming Cao 
222261628a3fSMingming Cao 	to_write = wbc->nr_to_write;
222361628a3fSMingming Cao 	if (!wbc->range_cyclic) {
222461628a3fSMingming Cao 		/*
222561628a3fSMingming Cao 		 * If range_cyclic is not set force range_cont
222661628a3fSMingming Cao 		 * and save the old writeback_index
222761628a3fSMingming Cao 		 */
222861628a3fSMingming Cao 		wbc->range_cont = 1;
222961628a3fSMingming Cao 		range_start =  wbc->range_start;
223061628a3fSMingming Cao 	}
223161628a3fSMingming Cao 
223261628a3fSMingming Cao 	while (!ret && to_write) {
223361628a3fSMingming Cao 		/* start a new transaction*/
223461628a3fSMingming Cao 		handle = ext4_journal_start(inode, needed_blocks);
223561628a3fSMingming Cao 		if (IS_ERR(handle)) {
223661628a3fSMingming Cao 			ret = PTR_ERR(handle);
223761628a3fSMingming Cao 			goto out_writepages;
223861628a3fSMingming Cao 		}
2239cd1aac32SAneesh Kumar K.V 		if (ext4_should_order_data(inode)) {
2240cd1aac32SAneesh Kumar K.V 			/*
2241cd1aac32SAneesh Kumar K.V 			 * With ordered mode we need to add
2242cd1aac32SAneesh Kumar K.V 			 * the inode to the journal handle
2243cd1aac32SAneesh Kumar K.V 			 * when we do block allocation.
2244cd1aac32SAneesh Kumar K.V 			 */
2245cd1aac32SAneesh Kumar K.V 			ret = ext4_jbd2_file_inode(handle, inode);
2246cd1aac32SAneesh Kumar K.V 			if (ret) {
2247cd1aac32SAneesh Kumar K.V 				ext4_journal_stop(handle);
2248cd1aac32SAneesh Kumar K.V 				goto out_writepages;
2249cd1aac32SAneesh Kumar K.V 			}
2250cd1aac32SAneesh Kumar K.V 
2251cd1aac32SAneesh Kumar K.V 		}
225261628a3fSMingming Cao 		/*
225361628a3fSMingming Cao 		 * set the max dirty pages could be write at a time
225461628a3fSMingming Cao 		 * to fit into the reserved transaction credits
225561628a3fSMingming Cao 		 */
225661628a3fSMingming Cao 		if (wbc->nr_to_write > EXT4_MAX_WRITEBACK_PAGES)
225761628a3fSMingming Cao 			wbc->nr_to_write = EXT4_MAX_WRITEBACK_PAGES;
225861628a3fSMingming Cao 
225961628a3fSMingming Cao 		to_write -= wbc->nr_to_write;
226061628a3fSMingming Cao 		ret = mpage_da_writepages(mapping, wbc,
226161628a3fSMingming Cao 						ext4_da_get_block_write);
226261628a3fSMingming Cao 		ext4_journal_stop(handle);
226361628a3fSMingming Cao 		if (wbc->nr_to_write) {
226461628a3fSMingming Cao 			/*
226561628a3fSMingming Cao 			 * There is no more writeout needed
226661628a3fSMingming Cao 			 * or we requested for a noblocking writeout
226761628a3fSMingming Cao 			 * and we found the device congested
226861628a3fSMingming Cao 			 */
226961628a3fSMingming Cao 			to_write += wbc->nr_to_write;
227061628a3fSMingming Cao 			break;
227161628a3fSMingming Cao 		}
227261628a3fSMingming Cao 		wbc->nr_to_write = to_write;
227361628a3fSMingming Cao 	}
227461628a3fSMingming Cao 
227561628a3fSMingming Cao out_writepages:
227661628a3fSMingming Cao 	wbc->nr_to_write = to_write;
227761628a3fSMingming Cao 	if (range_start)
227861628a3fSMingming Cao 		wbc->range_start = range_start;
227961628a3fSMingming Cao 	return ret;
228064769240SAlex Tomas }
228164769240SAlex Tomas 
228264769240SAlex Tomas static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
228364769240SAlex Tomas 				loff_t pos, unsigned len, unsigned flags,
228464769240SAlex Tomas 				struct page **pagep, void **fsdata)
228564769240SAlex Tomas {
2286d2a17637SMingming Cao 	int ret, retries = 0;
228764769240SAlex Tomas 	struct page *page;
228864769240SAlex Tomas 	pgoff_t index;
228964769240SAlex Tomas 	unsigned from, to;
229064769240SAlex Tomas 	struct inode *inode = mapping->host;
229164769240SAlex Tomas 	handle_t *handle;
229264769240SAlex Tomas 
229364769240SAlex Tomas 	index = pos >> PAGE_CACHE_SHIFT;
229464769240SAlex Tomas 	from = pos & (PAGE_CACHE_SIZE - 1);
229564769240SAlex Tomas 	to = from + len;
229664769240SAlex Tomas 
2297d2a17637SMingming Cao retry:
229864769240SAlex Tomas 	/*
229964769240SAlex Tomas 	 * With delayed allocation, we don't log the i_disksize update
230064769240SAlex Tomas 	 * if there is delayed block allocation. But we still need
230164769240SAlex Tomas 	 * to journalling the i_disksize update if writes to the end
230264769240SAlex Tomas 	 * of file which has an already mapped buffer.
230364769240SAlex Tomas 	 */
230464769240SAlex Tomas 	handle = ext4_journal_start(inode, 1);
230564769240SAlex Tomas 	if (IS_ERR(handle)) {
230664769240SAlex Tomas 		ret = PTR_ERR(handle);
230764769240SAlex Tomas 		goto out;
230864769240SAlex Tomas 	}
230964769240SAlex Tomas 
231064769240SAlex Tomas 	page = __grab_cache_page(mapping, index);
2311d5a0d4f7SEric Sandeen 	if (!page) {
2312d5a0d4f7SEric Sandeen 		ext4_journal_stop(handle);
2313d5a0d4f7SEric Sandeen 		ret = -ENOMEM;
2314d5a0d4f7SEric Sandeen 		goto out;
2315d5a0d4f7SEric Sandeen 	}
231664769240SAlex Tomas 	*pagep = page;
231764769240SAlex Tomas 
231864769240SAlex Tomas 	ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
231964769240SAlex Tomas 							ext4_da_get_block_prep);
232064769240SAlex Tomas 	if (ret < 0) {
232164769240SAlex Tomas 		unlock_page(page);
232264769240SAlex Tomas 		ext4_journal_stop(handle);
232364769240SAlex Tomas 		page_cache_release(page);
232464769240SAlex Tomas 	}
232564769240SAlex Tomas 
2326d2a17637SMingming Cao 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
2327d2a17637SMingming Cao 		goto retry;
232864769240SAlex Tomas out:
232964769240SAlex Tomas 	return ret;
233064769240SAlex Tomas }
233164769240SAlex Tomas 
2332632eaeabSMingming Cao /*
2333632eaeabSMingming Cao  * Check if we should update i_disksize
2334632eaeabSMingming Cao  * when write to the end of file but not require block allocation
2335632eaeabSMingming Cao  */
2336632eaeabSMingming Cao static int ext4_da_should_update_i_disksize(struct page *page,
2337632eaeabSMingming Cao 					 unsigned long offset)
2338632eaeabSMingming Cao {
2339632eaeabSMingming Cao 	struct buffer_head *bh;
2340632eaeabSMingming Cao 	struct inode *inode = page->mapping->host;
2341632eaeabSMingming Cao 	unsigned int idx;
2342632eaeabSMingming Cao 	int i;
2343632eaeabSMingming Cao 
2344632eaeabSMingming Cao 	bh = page_buffers(page);
2345632eaeabSMingming Cao 	idx = offset >> inode->i_blkbits;
2346632eaeabSMingming Cao 
2347632eaeabSMingming Cao 	for (i=0; i < idx; i++)
2348632eaeabSMingming Cao 		bh = bh->b_this_page;
2349632eaeabSMingming Cao 
2350632eaeabSMingming Cao 	if (!buffer_mapped(bh) || (buffer_delay(bh)))
2351632eaeabSMingming Cao 		return 0;
2352632eaeabSMingming Cao 	return 1;
2353632eaeabSMingming Cao }
2354632eaeabSMingming Cao 
235564769240SAlex Tomas static int ext4_da_write_end(struct file *file,
235664769240SAlex Tomas 				struct address_space *mapping,
235764769240SAlex Tomas 				loff_t pos, unsigned len, unsigned copied,
235864769240SAlex Tomas 				struct page *page, void *fsdata)
235964769240SAlex Tomas {
236064769240SAlex Tomas 	struct inode *inode = mapping->host;
236164769240SAlex Tomas 	int ret = 0, ret2;
236264769240SAlex Tomas 	handle_t *handle = ext4_journal_current_handle();
236364769240SAlex Tomas 	loff_t new_i_size;
2364632eaeabSMingming Cao 	unsigned long start, end;
2365632eaeabSMingming Cao 
2366632eaeabSMingming Cao 	start = pos & (PAGE_CACHE_SIZE - 1);
2367632eaeabSMingming Cao 	end = start + copied -1;
236864769240SAlex Tomas 
236964769240SAlex Tomas 	/*
237064769240SAlex Tomas 	 * generic_write_end() will run mark_inode_dirty() if i_size
237164769240SAlex Tomas 	 * changes.  So let's piggyback the i_disksize mark_inode_dirty
237264769240SAlex Tomas 	 * into that.
237364769240SAlex Tomas 	 */
237464769240SAlex Tomas 
237564769240SAlex Tomas 	new_i_size = pos + copied;
2376632eaeabSMingming Cao 	if (new_i_size > EXT4_I(inode)->i_disksize) {
2377632eaeabSMingming Cao 		if (ext4_da_should_update_i_disksize(page, end)) {
2378632eaeabSMingming Cao 			down_write(&EXT4_I(inode)->i_data_sem);
2379632eaeabSMingming Cao 			if (new_i_size > EXT4_I(inode)->i_disksize) {
238064769240SAlex Tomas 				/*
2381632eaeabSMingming Cao 				 * Updating i_disksize when extending file
2382632eaeabSMingming Cao 				 * without needing block allocation
238364769240SAlex Tomas 				 */
238464769240SAlex Tomas 				if (ext4_should_order_data(inode))
2385632eaeabSMingming Cao 					ret = ext4_jbd2_file_inode(handle,
2386632eaeabSMingming Cao 								   inode);
238764769240SAlex Tomas 
238864769240SAlex Tomas 				EXT4_I(inode)->i_disksize = new_i_size;
238964769240SAlex Tomas 			}
2390632eaeabSMingming Cao 			up_write(&EXT4_I(inode)->i_data_sem);
2391632eaeabSMingming Cao 		}
2392632eaeabSMingming Cao 	}
239364769240SAlex Tomas 	ret2 = generic_write_end(file, mapping, pos, len, copied,
239464769240SAlex Tomas 							page, fsdata);
239564769240SAlex Tomas 	copied = ret2;
239664769240SAlex Tomas 	if (ret2 < 0)
239764769240SAlex Tomas 		ret = ret2;
239864769240SAlex Tomas 	ret2 = ext4_journal_stop(handle);
239964769240SAlex Tomas 	if (!ret)
240064769240SAlex Tomas 		ret = ret2;
240164769240SAlex Tomas 
240264769240SAlex Tomas 	return ret ? ret : copied;
240364769240SAlex Tomas }
240464769240SAlex Tomas 
240564769240SAlex Tomas static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
240664769240SAlex Tomas {
240764769240SAlex Tomas 	/*
240864769240SAlex Tomas 	 * Drop reserved blocks
240964769240SAlex Tomas 	 */
241064769240SAlex Tomas 	BUG_ON(!PageLocked(page));
241164769240SAlex Tomas 	if (!page_has_buffers(page))
241264769240SAlex Tomas 		goto out;
241364769240SAlex Tomas 
2414d2a17637SMingming Cao 	ext4_da_page_release_reservation(page, offset);
241564769240SAlex Tomas 
241664769240SAlex Tomas out:
241764769240SAlex Tomas 	ext4_invalidatepage(page, offset);
241864769240SAlex Tomas 
241964769240SAlex Tomas 	return;
242064769240SAlex Tomas }
242164769240SAlex Tomas 
242264769240SAlex Tomas 
242364769240SAlex Tomas /*
2424ac27a0ecSDave Kleikamp  * bmap() is special.  It gets used by applications such as lilo and by
2425ac27a0ecSDave Kleikamp  * the swapper to find the on-disk block of a specific piece of data.
2426ac27a0ecSDave Kleikamp  *
2427ac27a0ecSDave Kleikamp  * Naturally, this is dangerous if the block concerned is still in the
2428617ba13bSMingming Cao  * journal.  If somebody makes a swapfile on an ext4 data-journaling
2429ac27a0ecSDave Kleikamp  * filesystem and enables swap, then they may get a nasty shock when the
2430ac27a0ecSDave Kleikamp  * data getting swapped to that swapfile suddenly gets overwritten by
2431ac27a0ecSDave Kleikamp  * the original zero's written out previously to the journal and
2432ac27a0ecSDave Kleikamp  * awaiting writeback in the kernel's buffer cache.
2433ac27a0ecSDave Kleikamp  *
2434ac27a0ecSDave Kleikamp  * So, if we see any bmap calls here on a modified, data-journaled file,
2435ac27a0ecSDave Kleikamp  * take extra steps to flush any blocks which might be in the cache.
2436ac27a0ecSDave Kleikamp  */
2437617ba13bSMingming Cao static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
2438ac27a0ecSDave Kleikamp {
2439ac27a0ecSDave Kleikamp 	struct inode *inode = mapping->host;
2440ac27a0ecSDave Kleikamp 	journal_t *journal;
2441ac27a0ecSDave Kleikamp 	int err;
2442ac27a0ecSDave Kleikamp 
244364769240SAlex Tomas 	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
244464769240SAlex Tomas 			test_opt(inode->i_sb, DELALLOC)) {
244564769240SAlex Tomas 		/*
244664769240SAlex Tomas 		 * With delalloc we want to sync the file
244764769240SAlex Tomas 		 * so that we can make sure we allocate
244864769240SAlex Tomas 		 * blocks for file
244964769240SAlex Tomas 		 */
245064769240SAlex Tomas 		filemap_write_and_wait(mapping);
245164769240SAlex Tomas 	}
245264769240SAlex Tomas 
2453617ba13bSMingming Cao 	if (EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
2454ac27a0ecSDave Kleikamp 		/*
2455ac27a0ecSDave Kleikamp 		 * This is a REALLY heavyweight approach, but the use of
2456ac27a0ecSDave Kleikamp 		 * bmap on dirty files is expected to be extremely rare:
2457ac27a0ecSDave Kleikamp 		 * only if we run lilo or swapon on a freshly made file
2458ac27a0ecSDave Kleikamp 		 * do we expect this to happen.
2459ac27a0ecSDave Kleikamp 		 *
2460ac27a0ecSDave Kleikamp 		 * (bmap requires CAP_SYS_RAWIO so this does not
2461ac27a0ecSDave Kleikamp 		 * represent an unprivileged user DOS attack --- we'd be
2462ac27a0ecSDave Kleikamp 		 * in trouble if mortal users could trigger this path at
2463ac27a0ecSDave Kleikamp 		 * will.)
2464ac27a0ecSDave Kleikamp 		 *
2465617ba13bSMingming Cao 		 * NB. EXT4_STATE_JDATA is not set on files other than
2466ac27a0ecSDave Kleikamp 		 * regular files.  If somebody wants to bmap a directory
2467ac27a0ecSDave Kleikamp 		 * or symlink and gets confused because the buffer
2468ac27a0ecSDave Kleikamp 		 * hasn't yet been flushed to disk, they deserve
2469ac27a0ecSDave Kleikamp 		 * everything they get.
2470ac27a0ecSDave Kleikamp 		 */
2471ac27a0ecSDave Kleikamp 
2472617ba13bSMingming Cao 		EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
2473617ba13bSMingming Cao 		journal = EXT4_JOURNAL(inode);
2474dab291afSMingming Cao 		jbd2_journal_lock_updates(journal);
2475dab291afSMingming Cao 		err = jbd2_journal_flush(journal);
2476dab291afSMingming Cao 		jbd2_journal_unlock_updates(journal);
2477ac27a0ecSDave Kleikamp 
2478ac27a0ecSDave Kleikamp 		if (err)
2479ac27a0ecSDave Kleikamp 			return 0;
2480ac27a0ecSDave Kleikamp 	}
2481ac27a0ecSDave Kleikamp 
2482617ba13bSMingming Cao 	return generic_block_bmap(mapping,block,ext4_get_block);
2483ac27a0ecSDave Kleikamp }
2484ac27a0ecSDave Kleikamp 
2485ac27a0ecSDave Kleikamp static int bget_one(handle_t *handle, struct buffer_head *bh)
2486ac27a0ecSDave Kleikamp {
2487ac27a0ecSDave Kleikamp 	get_bh(bh);
2488ac27a0ecSDave Kleikamp 	return 0;
2489ac27a0ecSDave Kleikamp }
2490ac27a0ecSDave Kleikamp 
2491ac27a0ecSDave Kleikamp static int bput_one(handle_t *handle, struct buffer_head *bh)
2492ac27a0ecSDave Kleikamp {
2493ac27a0ecSDave Kleikamp 	put_bh(bh);
2494ac27a0ecSDave Kleikamp 	return 0;
2495ac27a0ecSDave Kleikamp }
2496ac27a0ecSDave Kleikamp 
2497ac27a0ecSDave Kleikamp /*
2498678aaf48SJan Kara  * Note that we don't need to start a transaction unless we're journaling data
2499678aaf48SJan Kara  * because we should have holes filled from ext4_page_mkwrite(). We even don't
2500678aaf48SJan Kara  * need to file the inode to the transaction's list in ordered mode because if
2501678aaf48SJan Kara  * we are writing back data added by write(), the inode is already there and if
2502678aaf48SJan Kara  * we are writing back data modified via mmap(), noone guarantees in which
2503678aaf48SJan Kara  * transaction the data will hit the disk. In case we are journaling data, we
2504678aaf48SJan Kara  * cannot start transaction directly because transaction start ranks above page
2505678aaf48SJan Kara  * lock so we have to do some magic.
2506ac27a0ecSDave Kleikamp  *
2507678aaf48SJan Kara  * In all journaling modes block_write_full_page() will start the I/O.
2508ac27a0ecSDave Kleikamp  *
2509ac27a0ecSDave Kleikamp  * Problem:
2510ac27a0ecSDave Kleikamp  *
2511617ba13bSMingming Cao  *	ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
2512617ba13bSMingming Cao  *		ext4_writepage()
2513ac27a0ecSDave Kleikamp  *
2514ac27a0ecSDave Kleikamp  * Similar for:
2515ac27a0ecSDave Kleikamp  *
2516617ba13bSMingming Cao  *	ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
2517ac27a0ecSDave Kleikamp  *
2518617ba13bSMingming Cao  * Same applies to ext4_get_block().  We will deadlock on various things like
25190e855ac8SAneesh Kumar K.V  * lock_journal and i_data_sem
2520ac27a0ecSDave Kleikamp  *
2521ac27a0ecSDave Kleikamp  * Setting PF_MEMALLOC here doesn't work - too many internal memory
2522ac27a0ecSDave Kleikamp  * allocations fail.
2523ac27a0ecSDave Kleikamp  *
2524ac27a0ecSDave Kleikamp  * 16May01: If we're reentered then journal_current_handle() will be
2525ac27a0ecSDave Kleikamp  *	    non-zero. We simply *return*.
2526ac27a0ecSDave Kleikamp  *
2527ac27a0ecSDave Kleikamp  * 1 July 2001: @@@ FIXME:
2528ac27a0ecSDave Kleikamp  *   In journalled data mode, a data buffer may be metadata against the
2529ac27a0ecSDave Kleikamp  *   current transaction.  But the same file is part of a shared mapping
2530ac27a0ecSDave Kleikamp  *   and someone does a writepage() on it.
2531ac27a0ecSDave Kleikamp  *
2532ac27a0ecSDave Kleikamp  *   We will move the buffer onto the async_data list, but *after* it has
2533ac27a0ecSDave Kleikamp  *   been dirtied. So there's a small window where we have dirty data on
2534ac27a0ecSDave Kleikamp  *   BJ_Metadata.
2535ac27a0ecSDave Kleikamp  *
2536ac27a0ecSDave Kleikamp  *   Note that this only applies to the last partial page in the file.  The
2537ac27a0ecSDave Kleikamp  *   bit which block_write_full_page() uses prepare/commit for.  (That's
2538ac27a0ecSDave Kleikamp  *   broken code anyway: it's wrong for msync()).
2539ac27a0ecSDave Kleikamp  *
2540ac27a0ecSDave Kleikamp  *   It's a rare case: affects the final partial page, for journalled data
2541ac27a0ecSDave Kleikamp  *   where the file is subject to bith write() and writepage() in the same
2542ac27a0ecSDave Kleikamp  *   transction.  To fix it we'll need a custom block_write_full_page().
2543ac27a0ecSDave Kleikamp  *   We'll probably need that anyway for journalling writepage() output.
2544ac27a0ecSDave Kleikamp  *
2545ac27a0ecSDave Kleikamp  * We don't honour synchronous mounts for writepage().  That would be
2546ac27a0ecSDave Kleikamp  * disastrous.  Any write() or metadata operation will sync the fs for
2547ac27a0ecSDave Kleikamp  * us.
2548ac27a0ecSDave Kleikamp  *
2549ac27a0ecSDave Kleikamp  */
2550678aaf48SJan Kara static int __ext4_normal_writepage(struct page *page,
2551cf108bcaSJan Kara 				struct writeback_control *wbc)
2552cf108bcaSJan Kara {
2553cf108bcaSJan Kara 	struct inode *inode = page->mapping->host;
2554cf108bcaSJan Kara 
2555cf108bcaSJan Kara 	if (test_opt(inode->i_sb, NOBH))
2556f0e6c985SAneesh Kumar K.V 		return nobh_writepage(page,
2557f0e6c985SAneesh Kumar K.V 					ext4_normal_get_block_write, wbc);
2558cf108bcaSJan Kara 	else
2559f0e6c985SAneesh Kumar K.V 		return block_write_full_page(page,
2560f0e6c985SAneesh Kumar K.V 						ext4_normal_get_block_write,
2561f0e6c985SAneesh Kumar K.V 						wbc);
2562cf108bcaSJan Kara }
2563cf108bcaSJan Kara 
2564678aaf48SJan Kara static int ext4_normal_writepage(struct page *page,
2565ac27a0ecSDave Kleikamp 				struct writeback_control *wbc)
2566ac27a0ecSDave Kleikamp {
2567ac27a0ecSDave Kleikamp 	struct inode *inode = page->mapping->host;
2568cf108bcaSJan Kara 	loff_t size = i_size_read(inode);
2569cf108bcaSJan Kara 	loff_t len;
2570cf108bcaSJan Kara 
2571cf108bcaSJan Kara 	J_ASSERT(PageLocked(page));
2572cf108bcaSJan Kara 	if (page->index == size >> PAGE_CACHE_SHIFT)
2573cf108bcaSJan Kara 		len = size & ~PAGE_CACHE_MASK;
2574cf108bcaSJan Kara 	else
2575cf108bcaSJan Kara 		len = PAGE_CACHE_SIZE;
2576f0e6c985SAneesh Kumar K.V 
2577f0e6c985SAneesh Kumar K.V 	if (page_has_buffers(page)) {
2578f0e6c985SAneesh Kumar K.V 		/* if page has buffers it should all be mapped
2579f0e6c985SAneesh Kumar K.V 		 * and allocated. If there are not buffers attached
2580f0e6c985SAneesh Kumar K.V 		 * to the page we know the page is dirty but it lost
2581f0e6c985SAneesh Kumar K.V 		 * buffers. That means that at some moment in time
2582f0e6c985SAneesh Kumar K.V 		 * after write_begin() / write_end() has been called
2583f0e6c985SAneesh Kumar K.V 		 * all buffers have been clean and thus they must have been
2584f0e6c985SAneesh Kumar K.V 		 * written at least once. So they are all mapped and we can
2585f0e6c985SAneesh Kumar K.V 		 * happily proceed with mapping them and writing the page.
2586f0e6c985SAneesh Kumar K.V 		 */
2587cf108bcaSJan Kara 		BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
2588cf108bcaSJan Kara 					ext4_bh_unmapped_or_delay));
2589f0e6c985SAneesh Kumar K.V 	}
2590cf108bcaSJan Kara 
2591cf108bcaSJan Kara 	if (!ext4_journal_current_handle())
2592678aaf48SJan Kara 		return __ext4_normal_writepage(page, wbc);
2593cf108bcaSJan Kara 
2594cf108bcaSJan Kara 	redirty_page_for_writepage(wbc, page);
2595cf108bcaSJan Kara 	unlock_page(page);
2596cf108bcaSJan Kara 	return 0;
2597cf108bcaSJan Kara }
2598cf108bcaSJan Kara 
2599cf108bcaSJan Kara static int __ext4_journalled_writepage(struct page *page,
2600cf108bcaSJan Kara 				struct writeback_control *wbc)
2601cf108bcaSJan Kara {
2602cf108bcaSJan Kara 	struct address_space *mapping = page->mapping;
2603cf108bcaSJan Kara 	struct inode *inode = mapping->host;
2604cf108bcaSJan Kara 	struct buffer_head *page_bufs;
2605ac27a0ecSDave Kleikamp 	handle_t *handle = NULL;
2606ac27a0ecSDave Kleikamp 	int ret = 0;
2607ac27a0ecSDave Kleikamp 	int err;
2608ac27a0ecSDave Kleikamp 
2609f0e6c985SAneesh Kumar K.V 	ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
2610f0e6c985SAneesh Kumar K.V 					ext4_normal_get_block_write);
2611cf108bcaSJan Kara 	if (ret != 0)
2612cf108bcaSJan Kara 		goto out_unlock;
2613cf108bcaSJan Kara 
2614cf108bcaSJan Kara 	page_bufs = page_buffers(page);
2615cf108bcaSJan Kara 	walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL,
2616cf108bcaSJan Kara 								bget_one);
2617cf108bcaSJan Kara 	/* As soon as we unlock the page, it can go away, but we have
2618cf108bcaSJan Kara 	 * references to buffers so we are safe */
2619cf108bcaSJan Kara 	unlock_page(page);
2620ac27a0ecSDave Kleikamp 
2621617ba13bSMingming Cao 	handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
2622ac27a0ecSDave Kleikamp 	if (IS_ERR(handle)) {
2623ac27a0ecSDave Kleikamp 		ret = PTR_ERR(handle);
2624cf108bcaSJan Kara 		goto out;
2625ac27a0ecSDave Kleikamp 	}
2626ac27a0ecSDave Kleikamp 
2627cf108bcaSJan Kara 	ret = walk_page_buffers(handle, page_bufs, 0,
2628cf108bcaSJan Kara 			PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
2629ac27a0ecSDave Kleikamp 
2630cf108bcaSJan Kara 	err = walk_page_buffers(handle, page_bufs, 0,
2631cf108bcaSJan Kara 				PAGE_CACHE_SIZE, NULL, write_end_fn);
2632cf108bcaSJan Kara 	if (ret == 0)
2633cf108bcaSJan Kara 		ret = err;
2634617ba13bSMingming Cao 	err = ext4_journal_stop(handle);
2635ac27a0ecSDave Kleikamp 	if (!ret)
2636ac27a0ecSDave Kleikamp 		ret = err;
2637ac27a0ecSDave Kleikamp 
2638cf108bcaSJan Kara 	walk_page_buffers(handle, page_bufs, 0,
2639cf108bcaSJan Kara 				PAGE_CACHE_SIZE, NULL, bput_one);
2640cf108bcaSJan Kara 	EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
2641cf108bcaSJan Kara 	goto out;
2642cf108bcaSJan Kara 
2643cf108bcaSJan Kara out_unlock:
2644ac27a0ecSDave Kleikamp 	unlock_page(page);
2645cf108bcaSJan Kara out:
2646ac27a0ecSDave Kleikamp 	return ret;
2647ac27a0ecSDave Kleikamp }
2648ac27a0ecSDave Kleikamp 
2649617ba13bSMingming Cao static int ext4_journalled_writepage(struct page *page,
2650ac27a0ecSDave Kleikamp 				struct writeback_control *wbc)
2651ac27a0ecSDave Kleikamp {
2652ac27a0ecSDave Kleikamp 	struct inode *inode = page->mapping->host;
2653cf108bcaSJan Kara 	loff_t size = i_size_read(inode);
2654cf108bcaSJan Kara 	loff_t len;
2655cf108bcaSJan Kara 
2656cf108bcaSJan Kara 	J_ASSERT(PageLocked(page));
2657cf108bcaSJan Kara 	if (page->index == size >> PAGE_CACHE_SHIFT)
2658cf108bcaSJan Kara 		len = size & ~PAGE_CACHE_MASK;
2659cf108bcaSJan Kara 	else
2660cf108bcaSJan Kara 		len = PAGE_CACHE_SIZE;
2661f0e6c985SAneesh Kumar K.V 
2662f0e6c985SAneesh Kumar K.V 	if (page_has_buffers(page)) {
2663f0e6c985SAneesh Kumar K.V 		/* if page has buffers it should all be mapped
2664f0e6c985SAneesh Kumar K.V 		 * and allocated. If there are not buffers attached
2665f0e6c985SAneesh Kumar K.V 		 * to the page we know the page is dirty but it lost
2666f0e6c985SAneesh Kumar K.V 		 * buffers. That means that at some moment in time
2667f0e6c985SAneesh Kumar K.V 		 * after write_begin() / write_end() has been called
2668f0e6c985SAneesh Kumar K.V 		 * all buffers have been clean and thus they must have been
2669f0e6c985SAneesh Kumar K.V 		 * written at least once. So they are all mapped and we can
2670f0e6c985SAneesh Kumar K.V 		 * happily proceed with mapping them and writing the page.
2671f0e6c985SAneesh Kumar K.V 		 */
2672cf108bcaSJan Kara 		BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
2673cf108bcaSJan Kara 					ext4_bh_unmapped_or_delay));
2674f0e6c985SAneesh Kumar K.V 	}
2675ac27a0ecSDave Kleikamp 
2676617ba13bSMingming Cao 	if (ext4_journal_current_handle())
2677ac27a0ecSDave Kleikamp 		goto no_write;
2678ac27a0ecSDave Kleikamp 
2679cf108bcaSJan Kara 	if (PageChecked(page)) {
2680ac27a0ecSDave Kleikamp 		/*
2681ac27a0ecSDave Kleikamp 		 * It's mmapped pagecache.  Add buffers and journal it.  There
2682ac27a0ecSDave Kleikamp 		 * doesn't seem much point in redirtying the page here.
2683ac27a0ecSDave Kleikamp 		 */
2684ac27a0ecSDave Kleikamp 		ClearPageChecked(page);
2685cf108bcaSJan Kara 		return __ext4_journalled_writepage(page, wbc);
2686ac27a0ecSDave Kleikamp 	} else {
2687ac27a0ecSDave Kleikamp 		/*
2688ac27a0ecSDave Kleikamp 		 * It may be a page full of checkpoint-mode buffers.  We don't
2689ac27a0ecSDave Kleikamp 		 * really know unless we go poke around in the buffer_heads.
2690ac27a0ecSDave Kleikamp 		 * But block_write_full_page will do the right thing.
2691ac27a0ecSDave Kleikamp 		 */
2692f0e6c985SAneesh Kumar K.V 		return block_write_full_page(page,
2693f0e6c985SAneesh Kumar K.V 						ext4_normal_get_block_write,
2694f0e6c985SAneesh Kumar K.V 						wbc);
2695ac27a0ecSDave Kleikamp 	}
2696ac27a0ecSDave Kleikamp no_write:
2697ac27a0ecSDave Kleikamp 	redirty_page_for_writepage(wbc, page);
2698ac27a0ecSDave Kleikamp 	unlock_page(page);
2699cf108bcaSJan Kara 	return 0;
2700ac27a0ecSDave Kleikamp }
2701ac27a0ecSDave Kleikamp 
2702617ba13bSMingming Cao static int ext4_readpage(struct file *file, struct page *page)
2703ac27a0ecSDave Kleikamp {
2704617ba13bSMingming Cao 	return mpage_readpage(page, ext4_get_block);
2705ac27a0ecSDave Kleikamp }
2706ac27a0ecSDave Kleikamp 
2707ac27a0ecSDave Kleikamp static int
2708617ba13bSMingming Cao ext4_readpages(struct file *file, struct address_space *mapping,
2709ac27a0ecSDave Kleikamp 		struct list_head *pages, unsigned nr_pages)
2710ac27a0ecSDave Kleikamp {
2711617ba13bSMingming Cao 	return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
2712ac27a0ecSDave Kleikamp }
2713ac27a0ecSDave Kleikamp 
2714617ba13bSMingming Cao static void ext4_invalidatepage(struct page *page, unsigned long offset)
2715ac27a0ecSDave Kleikamp {
2716617ba13bSMingming Cao 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2717ac27a0ecSDave Kleikamp 
2718ac27a0ecSDave Kleikamp 	/*
2719ac27a0ecSDave Kleikamp 	 * If it's a full truncate we just forget about the pending dirtying
2720ac27a0ecSDave Kleikamp 	 */
2721ac27a0ecSDave Kleikamp 	if (offset == 0)
2722ac27a0ecSDave Kleikamp 		ClearPageChecked(page);
2723ac27a0ecSDave Kleikamp 
2724dab291afSMingming Cao 	jbd2_journal_invalidatepage(journal, page, offset);
2725ac27a0ecSDave Kleikamp }
2726ac27a0ecSDave Kleikamp 
2727617ba13bSMingming Cao static int ext4_releasepage(struct page *page, gfp_t wait)
2728ac27a0ecSDave Kleikamp {
2729617ba13bSMingming Cao 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2730ac27a0ecSDave Kleikamp 
2731ac27a0ecSDave Kleikamp 	WARN_ON(PageChecked(page));
2732ac27a0ecSDave Kleikamp 	if (!page_has_buffers(page))
2733ac27a0ecSDave Kleikamp 		return 0;
2734dab291afSMingming Cao 	return jbd2_journal_try_to_free_buffers(journal, page, wait);
2735ac27a0ecSDave Kleikamp }
2736ac27a0ecSDave Kleikamp 
2737ac27a0ecSDave Kleikamp /*
2738ac27a0ecSDave Kleikamp  * If the O_DIRECT write will extend the file then add this inode to the
2739ac27a0ecSDave Kleikamp  * orphan list.  So recovery will truncate it back to the original size
2740ac27a0ecSDave Kleikamp  * if the machine crashes during the write.
2741ac27a0ecSDave Kleikamp  *
2742ac27a0ecSDave Kleikamp  * If the O_DIRECT write is intantiating holes inside i_size and the machine
27437fb5409dSJan Kara  * crashes then stale disk data _may_ be exposed inside the file. But current
27447fb5409dSJan Kara  * VFS code falls back into buffered path in that case so we are safe.
2745ac27a0ecSDave Kleikamp  */
2746617ba13bSMingming Cao static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
2747ac27a0ecSDave Kleikamp 			const struct iovec *iov, loff_t offset,
2748ac27a0ecSDave Kleikamp 			unsigned long nr_segs)
2749ac27a0ecSDave Kleikamp {
2750ac27a0ecSDave Kleikamp 	struct file *file = iocb->ki_filp;
2751ac27a0ecSDave Kleikamp 	struct inode *inode = file->f_mapping->host;
2752617ba13bSMingming Cao 	struct ext4_inode_info *ei = EXT4_I(inode);
27537fb5409dSJan Kara 	handle_t *handle;
2754ac27a0ecSDave Kleikamp 	ssize_t ret;
2755ac27a0ecSDave Kleikamp 	int orphan = 0;
2756ac27a0ecSDave Kleikamp 	size_t count = iov_length(iov, nr_segs);
2757ac27a0ecSDave Kleikamp 
2758ac27a0ecSDave Kleikamp 	if (rw == WRITE) {
2759ac27a0ecSDave Kleikamp 		loff_t final_size = offset + count;
2760ac27a0ecSDave Kleikamp 
27617fb5409dSJan Kara 		if (final_size > inode->i_size) {
27627fb5409dSJan Kara 			/* Credits for sb + inode write */
27637fb5409dSJan Kara 			handle = ext4_journal_start(inode, 2);
2764ac27a0ecSDave Kleikamp 			if (IS_ERR(handle)) {
2765ac27a0ecSDave Kleikamp 				ret = PTR_ERR(handle);
2766ac27a0ecSDave Kleikamp 				goto out;
2767ac27a0ecSDave Kleikamp 			}
2768617ba13bSMingming Cao 			ret = ext4_orphan_add(handle, inode);
27697fb5409dSJan Kara 			if (ret) {
27707fb5409dSJan Kara 				ext4_journal_stop(handle);
27717fb5409dSJan Kara 				goto out;
27727fb5409dSJan Kara 			}
2773ac27a0ecSDave Kleikamp 			orphan = 1;
2774ac27a0ecSDave Kleikamp 			ei->i_disksize = inode->i_size;
27757fb5409dSJan Kara 			ext4_journal_stop(handle);
2776ac27a0ecSDave Kleikamp 		}
2777ac27a0ecSDave Kleikamp 	}
2778ac27a0ecSDave Kleikamp 
2779ac27a0ecSDave Kleikamp 	ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
2780ac27a0ecSDave Kleikamp 				 offset, nr_segs,
2781617ba13bSMingming Cao 				 ext4_get_block, NULL);
2782ac27a0ecSDave Kleikamp 
27837fb5409dSJan Kara 	if (orphan) {
2784ac27a0ecSDave Kleikamp 		int err;
2785ac27a0ecSDave Kleikamp 
27867fb5409dSJan Kara 		/* Credits for sb + inode write */
27877fb5409dSJan Kara 		handle = ext4_journal_start(inode, 2);
27887fb5409dSJan Kara 		if (IS_ERR(handle)) {
27897fb5409dSJan Kara 			/* This is really bad luck. We've written the data
27907fb5409dSJan Kara 			 * but cannot extend i_size. Bail out and pretend
27917fb5409dSJan Kara 			 * the write failed... */
27927fb5409dSJan Kara 			ret = PTR_ERR(handle);
27937fb5409dSJan Kara 			goto out;
27947fb5409dSJan Kara 		}
27957fb5409dSJan Kara 		if (inode->i_nlink)
2796617ba13bSMingming Cao 			ext4_orphan_del(handle, inode);
27977fb5409dSJan Kara 		if (ret > 0) {
2798ac27a0ecSDave Kleikamp 			loff_t end = offset + ret;
2799ac27a0ecSDave Kleikamp 			if (end > inode->i_size) {
2800ac27a0ecSDave Kleikamp 				ei->i_disksize = end;
2801ac27a0ecSDave Kleikamp 				i_size_write(inode, end);
2802ac27a0ecSDave Kleikamp 				/*
2803ac27a0ecSDave Kleikamp 				 * We're going to return a positive `ret'
2804ac27a0ecSDave Kleikamp 				 * here due to non-zero-length I/O, so there's
2805ac27a0ecSDave Kleikamp 				 * no way of reporting error returns from
2806617ba13bSMingming Cao 				 * ext4_mark_inode_dirty() to userspace.  So
2807ac27a0ecSDave Kleikamp 				 * ignore it.
2808ac27a0ecSDave Kleikamp 				 */
2809617ba13bSMingming Cao 				ext4_mark_inode_dirty(handle, inode);
2810ac27a0ecSDave Kleikamp 			}
2811ac27a0ecSDave Kleikamp 		}
2812617ba13bSMingming Cao 		err = ext4_journal_stop(handle);
2813ac27a0ecSDave Kleikamp 		if (ret == 0)
2814ac27a0ecSDave Kleikamp 			ret = err;
2815ac27a0ecSDave Kleikamp 	}
2816ac27a0ecSDave Kleikamp out:
2817ac27a0ecSDave Kleikamp 	return ret;
2818ac27a0ecSDave Kleikamp }
2819ac27a0ecSDave Kleikamp 
2820ac27a0ecSDave Kleikamp /*
2821617ba13bSMingming Cao  * Pages can be marked dirty completely asynchronously from ext4's journalling
2822ac27a0ecSDave Kleikamp  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
2823ac27a0ecSDave Kleikamp  * much here because ->set_page_dirty is called under VFS locks.  The page is
2824ac27a0ecSDave Kleikamp  * not necessarily locked.
2825ac27a0ecSDave Kleikamp  *
2826ac27a0ecSDave Kleikamp  * We cannot just dirty the page and leave attached buffers clean, because the
2827ac27a0ecSDave Kleikamp  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
2828ac27a0ecSDave Kleikamp  * or jbddirty because all the journalling code will explode.
2829ac27a0ecSDave Kleikamp  *
2830ac27a0ecSDave Kleikamp  * So what we do is to mark the page "pending dirty" and next time writepage
2831ac27a0ecSDave Kleikamp  * is called, propagate that into the buffers appropriately.
2832ac27a0ecSDave Kleikamp  */
2833617ba13bSMingming Cao static int ext4_journalled_set_page_dirty(struct page *page)
2834ac27a0ecSDave Kleikamp {
2835ac27a0ecSDave Kleikamp 	SetPageChecked(page);
2836ac27a0ecSDave Kleikamp 	return __set_page_dirty_nobuffers(page);
2837ac27a0ecSDave Kleikamp }
2838ac27a0ecSDave Kleikamp 
2839617ba13bSMingming Cao static const struct address_space_operations ext4_ordered_aops = {
2840617ba13bSMingming Cao 	.readpage		= ext4_readpage,
2841617ba13bSMingming Cao 	.readpages		= ext4_readpages,
2842678aaf48SJan Kara 	.writepage		= ext4_normal_writepage,
2843ac27a0ecSDave Kleikamp 	.sync_page		= block_sync_page,
2844bfc1af65SNick Piggin 	.write_begin		= ext4_write_begin,
2845bfc1af65SNick Piggin 	.write_end		= ext4_ordered_write_end,
2846617ba13bSMingming Cao 	.bmap			= ext4_bmap,
2847617ba13bSMingming Cao 	.invalidatepage		= ext4_invalidatepage,
2848617ba13bSMingming Cao 	.releasepage		= ext4_releasepage,
2849617ba13bSMingming Cao 	.direct_IO		= ext4_direct_IO,
2850ac27a0ecSDave Kleikamp 	.migratepage		= buffer_migrate_page,
2851*8ab22b9aSHisashi Hifumi 	.is_partially_uptodate  = block_is_partially_uptodate,
2852ac27a0ecSDave Kleikamp };
2853ac27a0ecSDave Kleikamp 
2854617ba13bSMingming Cao static const struct address_space_operations ext4_writeback_aops = {
2855617ba13bSMingming Cao 	.readpage		= ext4_readpage,
2856617ba13bSMingming Cao 	.readpages		= ext4_readpages,
2857678aaf48SJan Kara 	.writepage		= ext4_normal_writepage,
2858ac27a0ecSDave Kleikamp 	.sync_page		= block_sync_page,
2859bfc1af65SNick Piggin 	.write_begin		= ext4_write_begin,
2860bfc1af65SNick Piggin 	.write_end		= ext4_writeback_write_end,
2861617ba13bSMingming Cao 	.bmap			= ext4_bmap,
2862617ba13bSMingming Cao 	.invalidatepage		= ext4_invalidatepage,
2863617ba13bSMingming Cao 	.releasepage		= ext4_releasepage,
2864617ba13bSMingming Cao 	.direct_IO		= ext4_direct_IO,
2865ac27a0ecSDave Kleikamp 	.migratepage		= buffer_migrate_page,
2866*8ab22b9aSHisashi Hifumi 	.is_partially_uptodate  = block_is_partially_uptodate,
2867ac27a0ecSDave Kleikamp };
2868ac27a0ecSDave Kleikamp 
2869617ba13bSMingming Cao static const struct address_space_operations ext4_journalled_aops = {
2870617ba13bSMingming Cao 	.readpage		= ext4_readpage,
2871617ba13bSMingming Cao 	.readpages		= ext4_readpages,
2872617ba13bSMingming Cao 	.writepage		= ext4_journalled_writepage,
2873ac27a0ecSDave Kleikamp 	.sync_page		= block_sync_page,
2874bfc1af65SNick Piggin 	.write_begin		= ext4_write_begin,
2875bfc1af65SNick Piggin 	.write_end		= ext4_journalled_write_end,
2876617ba13bSMingming Cao 	.set_page_dirty		= ext4_journalled_set_page_dirty,
2877617ba13bSMingming Cao 	.bmap			= ext4_bmap,
2878617ba13bSMingming Cao 	.invalidatepage		= ext4_invalidatepage,
2879617ba13bSMingming Cao 	.releasepage		= ext4_releasepage,
2880*8ab22b9aSHisashi Hifumi 	.is_partially_uptodate  = block_is_partially_uptodate,
2881ac27a0ecSDave Kleikamp };
2882ac27a0ecSDave Kleikamp 
288364769240SAlex Tomas static const struct address_space_operations ext4_da_aops = {
288464769240SAlex Tomas 	.readpage		= ext4_readpage,
288564769240SAlex Tomas 	.readpages		= ext4_readpages,
288664769240SAlex Tomas 	.writepage		= ext4_da_writepage,
288764769240SAlex Tomas 	.writepages		= ext4_da_writepages,
288864769240SAlex Tomas 	.sync_page		= block_sync_page,
288964769240SAlex Tomas 	.write_begin		= ext4_da_write_begin,
289064769240SAlex Tomas 	.write_end		= ext4_da_write_end,
289164769240SAlex Tomas 	.bmap			= ext4_bmap,
289264769240SAlex Tomas 	.invalidatepage		= ext4_da_invalidatepage,
289364769240SAlex Tomas 	.releasepage		= ext4_releasepage,
289464769240SAlex Tomas 	.direct_IO		= ext4_direct_IO,
289564769240SAlex Tomas 	.migratepage		= buffer_migrate_page,
2896*8ab22b9aSHisashi Hifumi 	.is_partially_uptodate  = block_is_partially_uptodate,
289764769240SAlex Tomas };
289864769240SAlex Tomas 
2899617ba13bSMingming Cao void ext4_set_aops(struct inode *inode)
2900ac27a0ecSDave Kleikamp {
2901cd1aac32SAneesh Kumar K.V 	if (ext4_should_order_data(inode) &&
2902cd1aac32SAneesh Kumar K.V 		test_opt(inode->i_sb, DELALLOC))
2903cd1aac32SAneesh Kumar K.V 		inode->i_mapping->a_ops = &ext4_da_aops;
2904cd1aac32SAneesh Kumar K.V 	else if (ext4_should_order_data(inode))
2905617ba13bSMingming Cao 		inode->i_mapping->a_ops = &ext4_ordered_aops;
290664769240SAlex Tomas 	else if (ext4_should_writeback_data(inode) &&
290764769240SAlex Tomas 		 test_opt(inode->i_sb, DELALLOC))
290864769240SAlex Tomas 		inode->i_mapping->a_ops = &ext4_da_aops;
2909617ba13bSMingming Cao 	else if (ext4_should_writeback_data(inode))
2910617ba13bSMingming Cao 		inode->i_mapping->a_ops = &ext4_writeback_aops;
2911ac27a0ecSDave Kleikamp 	else
2912617ba13bSMingming Cao 		inode->i_mapping->a_ops = &ext4_journalled_aops;
2913ac27a0ecSDave Kleikamp }
2914ac27a0ecSDave Kleikamp 
2915ac27a0ecSDave Kleikamp /*
2916617ba13bSMingming Cao  * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
2917ac27a0ecSDave Kleikamp  * up to the end of the block which corresponds to `from'.
2918ac27a0ecSDave Kleikamp  * This required during truncate. We need to physically zero the tail end
2919ac27a0ecSDave Kleikamp  * of that block so it doesn't yield old data if the file is later grown.
2920ac27a0ecSDave Kleikamp  */
2921cf108bcaSJan Kara int ext4_block_truncate_page(handle_t *handle,
2922ac27a0ecSDave Kleikamp 		struct address_space *mapping, loff_t from)
2923ac27a0ecSDave Kleikamp {
2924617ba13bSMingming Cao 	ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
2925ac27a0ecSDave Kleikamp 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2926725d26d3SAneesh Kumar K.V 	unsigned blocksize, length, pos;
2927725d26d3SAneesh Kumar K.V 	ext4_lblk_t iblock;
2928ac27a0ecSDave Kleikamp 	struct inode *inode = mapping->host;
2929ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
2930cf108bcaSJan Kara 	struct page *page;
2931ac27a0ecSDave Kleikamp 	int err = 0;
2932ac27a0ecSDave Kleikamp 
2933cf108bcaSJan Kara 	page = grab_cache_page(mapping, from >> PAGE_CACHE_SHIFT);
2934cf108bcaSJan Kara 	if (!page)
2935cf108bcaSJan Kara 		return -EINVAL;
2936cf108bcaSJan Kara 
2937ac27a0ecSDave Kleikamp 	blocksize = inode->i_sb->s_blocksize;
2938ac27a0ecSDave Kleikamp 	length = blocksize - (offset & (blocksize - 1));
2939ac27a0ecSDave Kleikamp 	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
2940ac27a0ecSDave Kleikamp 
2941ac27a0ecSDave Kleikamp 	/*
2942ac27a0ecSDave Kleikamp 	 * For "nobh" option,  we can only work if we don't need to
2943ac27a0ecSDave Kleikamp 	 * read-in the page - otherwise we create buffers to do the IO.
2944ac27a0ecSDave Kleikamp 	 */
2945ac27a0ecSDave Kleikamp 	if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
2946617ba13bSMingming Cao 	     ext4_should_writeback_data(inode) && PageUptodate(page)) {
2947eebd2aa3SChristoph Lameter 		zero_user(page, offset, length);
2948ac27a0ecSDave Kleikamp 		set_page_dirty(page);
2949ac27a0ecSDave Kleikamp 		goto unlock;
2950ac27a0ecSDave Kleikamp 	}
2951ac27a0ecSDave Kleikamp 
2952ac27a0ecSDave Kleikamp 	if (!page_has_buffers(page))
2953ac27a0ecSDave Kleikamp 		create_empty_buffers(page, blocksize, 0);
2954ac27a0ecSDave Kleikamp 
2955ac27a0ecSDave Kleikamp 	/* Find the buffer that contains "offset" */
2956ac27a0ecSDave Kleikamp 	bh = page_buffers(page);
2957ac27a0ecSDave Kleikamp 	pos = blocksize;
2958ac27a0ecSDave Kleikamp 	while (offset >= pos) {
2959ac27a0ecSDave Kleikamp 		bh = bh->b_this_page;
2960ac27a0ecSDave Kleikamp 		iblock++;
2961ac27a0ecSDave Kleikamp 		pos += blocksize;
2962ac27a0ecSDave Kleikamp 	}
2963ac27a0ecSDave Kleikamp 
2964ac27a0ecSDave Kleikamp 	err = 0;
2965ac27a0ecSDave Kleikamp 	if (buffer_freed(bh)) {
2966ac27a0ecSDave Kleikamp 		BUFFER_TRACE(bh, "freed: skip");
2967ac27a0ecSDave Kleikamp 		goto unlock;
2968ac27a0ecSDave Kleikamp 	}
2969ac27a0ecSDave Kleikamp 
2970ac27a0ecSDave Kleikamp 	if (!buffer_mapped(bh)) {
2971ac27a0ecSDave Kleikamp 		BUFFER_TRACE(bh, "unmapped");
2972617ba13bSMingming Cao 		ext4_get_block(inode, iblock, bh, 0);
2973ac27a0ecSDave Kleikamp 		/* unmapped? It's a hole - nothing to do */
2974ac27a0ecSDave Kleikamp 		if (!buffer_mapped(bh)) {
2975ac27a0ecSDave Kleikamp 			BUFFER_TRACE(bh, "still unmapped");
2976ac27a0ecSDave Kleikamp 			goto unlock;
2977ac27a0ecSDave Kleikamp 		}
2978ac27a0ecSDave Kleikamp 	}
2979ac27a0ecSDave Kleikamp 
2980ac27a0ecSDave Kleikamp 	/* Ok, it's mapped. Make sure it's up-to-date */
2981ac27a0ecSDave Kleikamp 	if (PageUptodate(page))
2982ac27a0ecSDave Kleikamp 		set_buffer_uptodate(bh);
2983ac27a0ecSDave Kleikamp 
2984ac27a0ecSDave Kleikamp 	if (!buffer_uptodate(bh)) {
2985ac27a0ecSDave Kleikamp 		err = -EIO;
2986ac27a0ecSDave Kleikamp 		ll_rw_block(READ, 1, &bh);
2987ac27a0ecSDave Kleikamp 		wait_on_buffer(bh);
2988ac27a0ecSDave Kleikamp 		/* Uhhuh. Read error. Complain and punt. */
2989ac27a0ecSDave Kleikamp 		if (!buffer_uptodate(bh))
2990ac27a0ecSDave Kleikamp 			goto unlock;
2991ac27a0ecSDave Kleikamp 	}
2992ac27a0ecSDave Kleikamp 
2993617ba13bSMingming Cao 	if (ext4_should_journal_data(inode)) {
2994ac27a0ecSDave Kleikamp 		BUFFER_TRACE(bh, "get write access");
2995617ba13bSMingming Cao 		err = ext4_journal_get_write_access(handle, bh);
2996ac27a0ecSDave Kleikamp 		if (err)
2997ac27a0ecSDave Kleikamp 			goto unlock;
2998ac27a0ecSDave Kleikamp 	}
2999ac27a0ecSDave Kleikamp 
3000eebd2aa3SChristoph Lameter 	zero_user(page, offset, length);
3001ac27a0ecSDave Kleikamp 
3002ac27a0ecSDave Kleikamp 	BUFFER_TRACE(bh, "zeroed end of block");
3003ac27a0ecSDave Kleikamp 
3004ac27a0ecSDave Kleikamp 	err = 0;
3005617ba13bSMingming Cao 	if (ext4_should_journal_data(inode)) {
3006617ba13bSMingming Cao 		err = ext4_journal_dirty_metadata(handle, bh);
3007ac27a0ecSDave Kleikamp 	} else {
3008617ba13bSMingming Cao 		if (ext4_should_order_data(inode))
3009678aaf48SJan Kara 			err = ext4_jbd2_file_inode(handle, inode);
3010ac27a0ecSDave Kleikamp 		mark_buffer_dirty(bh);
3011ac27a0ecSDave Kleikamp 	}
3012ac27a0ecSDave Kleikamp 
3013ac27a0ecSDave Kleikamp unlock:
3014ac27a0ecSDave Kleikamp 	unlock_page(page);
3015ac27a0ecSDave Kleikamp 	page_cache_release(page);
3016ac27a0ecSDave Kleikamp 	return err;
3017ac27a0ecSDave Kleikamp }
3018ac27a0ecSDave Kleikamp 
3019ac27a0ecSDave Kleikamp /*
3020ac27a0ecSDave Kleikamp  * Probably it should be a library function... search for first non-zero word
3021ac27a0ecSDave Kleikamp  * or memcmp with zero_page, whatever is better for particular architecture.
3022ac27a0ecSDave Kleikamp  * Linus?
3023ac27a0ecSDave Kleikamp  */
3024ac27a0ecSDave Kleikamp static inline int all_zeroes(__le32 *p, __le32 *q)
3025ac27a0ecSDave Kleikamp {
3026ac27a0ecSDave Kleikamp 	while (p < q)
3027ac27a0ecSDave Kleikamp 		if (*p++)
3028ac27a0ecSDave Kleikamp 			return 0;
3029ac27a0ecSDave Kleikamp 	return 1;
3030ac27a0ecSDave Kleikamp }
3031ac27a0ecSDave Kleikamp 
3032ac27a0ecSDave Kleikamp /**
3033617ba13bSMingming Cao  *	ext4_find_shared - find the indirect blocks for partial truncation.
3034ac27a0ecSDave Kleikamp  *	@inode:	  inode in question
3035ac27a0ecSDave Kleikamp  *	@depth:	  depth of the affected branch
3036617ba13bSMingming Cao  *	@offsets: offsets of pointers in that branch (see ext4_block_to_path)
3037ac27a0ecSDave Kleikamp  *	@chain:	  place to store the pointers to partial indirect blocks
3038ac27a0ecSDave Kleikamp  *	@top:	  place to the (detached) top of branch
3039ac27a0ecSDave Kleikamp  *
3040617ba13bSMingming Cao  *	This is a helper function used by ext4_truncate().
3041ac27a0ecSDave Kleikamp  *
3042ac27a0ecSDave Kleikamp  *	When we do truncate() we may have to clean the ends of several
3043ac27a0ecSDave Kleikamp  *	indirect blocks but leave the blocks themselves alive. Block is
3044ac27a0ecSDave Kleikamp  *	partially truncated if some data below the new i_size is refered
3045ac27a0ecSDave Kleikamp  *	from it (and it is on the path to the first completely truncated
3046ac27a0ecSDave Kleikamp  *	data block, indeed).  We have to free the top of that path along
3047ac27a0ecSDave Kleikamp  *	with everything to the right of the path. Since no allocation
3048617ba13bSMingming Cao  *	past the truncation point is possible until ext4_truncate()
3049ac27a0ecSDave Kleikamp  *	finishes, we may safely do the latter, but top of branch may
3050ac27a0ecSDave Kleikamp  *	require special attention - pageout below the truncation point
3051ac27a0ecSDave Kleikamp  *	might try to populate it.
3052ac27a0ecSDave Kleikamp  *
3053ac27a0ecSDave Kleikamp  *	We atomically detach the top of branch from the tree, store the
3054ac27a0ecSDave Kleikamp  *	block number of its root in *@top, pointers to buffer_heads of
3055ac27a0ecSDave Kleikamp  *	partially truncated blocks - in @chain[].bh and pointers to
3056ac27a0ecSDave Kleikamp  *	their last elements that should not be removed - in
3057ac27a0ecSDave Kleikamp  *	@chain[].p. Return value is the pointer to last filled element
3058ac27a0ecSDave Kleikamp  *	of @chain.
3059ac27a0ecSDave Kleikamp  *
3060ac27a0ecSDave Kleikamp  *	The work left to caller to do the actual freeing of subtrees:
3061ac27a0ecSDave Kleikamp  *		a) free the subtree starting from *@top
3062ac27a0ecSDave Kleikamp  *		b) free the subtrees whose roots are stored in
3063ac27a0ecSDave Kleikamp  *			(@chain[i].p+1 .. end of @chain[i].bh->b_data)
3064ac27a0ecSDave Kleikamp  *		c) free the subtrees growing from the inode past the @chain[0].
3065ac27a0ecSDave Kleikamp  *			(no partially truncated stuff there).  */
3066ac27a0ecSDave Kleikamp 
3067617ba13bSMingming Cao static Indirect *ext4_find_shared(struct inode *inode, int depth,
3068725d26d3SAneesh Kumar K.V 			ext4_lblk_t offsets[4], Indirect chain[4], __le32 *top)
3069ac27a0ecSDave Kleikamp {
3070ac27a0ecSDave Kleikamp 	Indirect *partial, *p;
3071ac27a0ecSDave Kleikamp 	int k, err;
3072ac27a0ecSDave Kleikamp 
3073ac27a0ecSDave Kleikamp 	*top = 0;
3074ac27a0ecSDave Kleikamp 	/* Make k index the deepest non-null offest + 1 */
3075ac27a0ecSDave Kleikamp 	for (k = depth; k > 1 && !offsets[k-1]; k--)
3076ac27a0ecSDave Kleikamp 		;
3077617ba13bSMingming Cao 	partial = ext4_get_branch(inode, k, offsets, chain, &err);
3078ac27a0ecSDave Kleikamp 	/* Writer: pointers */
3079ac27a0ecSDave Kleikamp 	if (!partial)
3080ac27a0ecSDave Kleikamp 		partial = chain + k-1;
3081ac27a0ecSDave Kleikamp 	/*
3082ac27a0ecSDave Kleikamp 	 * If the branch acquired continuation since we've looked at it -
3083ac27a0ecSDave Kleikamp 	 * fine, it should all survive and (new) top doesn't belong to us.
3084ac27a0ecSDave Kleikamp 	 */
3085ac27a0ecSDave Kleikamp 	if (!partial->key && *partial->p)
3086ac27a0ecSDave Kleikamp 		/* Writer: end */
3087ac27a0ecSDave Kleikamp 		goto no_top;
3088ac27a0ecSDave Kleikamp 	for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
3089ac27a0ecSDave Kleikamp 		;
3090ac27a0ecSDave Kleikamp 	/*
3091ac27a0ecSDave Kleikamp 	 * OK, we've found the last block that must survive. The rest of our
3092ac27a0ecSDave Kleikamp 	 * branch should be detached before unlocking. However, if that rest
3093ac27a0ecSDave Kleikamp 	 * of branch is all ours and does not grow immediately from the inode
3094ac27a0ecSDave Kleikamp 	 * it's easier to cheat and just decrement partial->p.
3095ac27a0ecSDave Kleikamp 	 */
3096ac27a0ecSDave Kleikamp 	if (p == chain + k - 1 && p > chain) {
3097ac27a0ecSDave Kleikamp 		p->p--;
3098ac27a0ecSDave Kleikamp 	} else {
3099ac27a0ecSDave Kleikamp 		*top = *p->p;
3100617ba13bSMingming Cao 		/* Nope, don't do this in ext4.  Must leave the tree intact */
3101ac27a0ecSDave Kleikamp #if 0
3102ac27a0ecSDave Kleikamp 		*p->p = 0;
3103ac27a0ecSDave Kleikamp #endif
3104ac27a0ecSDave Kleikamp 	}
3105ac27a0ecSDave Kleikamp 	/* Writer: end */
3106ac27a0ecSDave Kleikamp 
3107ac27a0ecSDave Kleikamp 	while(partial > p) {
3108ac27a0ecSDave Kleikamp 		brelse(partial->bh);
3109ac27a0ecSDave Kleikamp 		partial--;
3110ac27a0ecSDave Kleikamp 	}
3111ac27a0ecSDave Kleikamp no_top:
3112ac27a0ecSDave Kleikamp 	return partial;
3113ac27a0ecSDave Kleikamp }
3114ac27a0ecSDave Kleikamp 
3115ac27a0ecSDave Kleikamp /*
3116ac27a0ecSDave Kleikamp  * Zero a number of block pointers in either an inode or an indirect block.
3117ac27a0ecSDave Kleikamp  * If we restart the transaction we must again get write access to the
3118ac27a0ecSDave Kleikamp  * indirect block for further modification.
3119ac27a0ecSDave Kleikamp  *
3120ac27a0ecSDave Kleikamp  * We release `count' blocks on disk, but (last - first) may be greater
3121ac27a0ecSDave Kleikamp  * than `count' because there can be holes in there.
3122ac27a0ecSDave Kleikamp  */
3123617ba13bSMingming Cao static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
3124617ba13bSMingming Cao 		struct buffer_head *bh, ext4_fsblk_t block_to_free,
3125ac27a0ecSDave Kleikamp 		unsigned long count, __le32 *first, __le32 *last)
3126ac27a0ecSDave Kleikamp {
3127ac27a0ecSDave Kleikamp 	__le32 *p;
3128ac27a0ecSDave Kleikamp 	if (try_to_extend_transaction(handle, inode)) {
3129ac27a0ecSDave Kleikamp 		if (bh) {
3130617ba13bSMingming Cao 			BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
3131617ba13bSMingming Cao 			ext4_journal_dirty_metadata(handle, bh);
3132ac27a0ecSDave Kleikamp 		}
3133617ba13bSMingming Cao 		ext4_mark_inode_dirty(handle, inode);
3134617ba13bSMingming Cao 		ext4_journal_test_restart(handle, inode);
3135ac27a0ecSDave Kleikamp 		if (bh) {
3136ac27a0ecSDave Kleikamp 			BUFFER_TRACE(bh, "retaking write access");
3137617ba13bSMingming Cao 			ext4_journal_get_write_access(handle, bh);
3138ac27a0ecSDave Kleikamp 		}
3139ac27a0ecSDave Kleikamp 	}
3140ac27a0ecSDave Kleikamp 
3141ac27a0ecSDave Kleikamp 	/*
3142ac27a0ecSDave Kleikamp 	 * Any buffers which are on the journal will be in memory. We find
3143dab291afSMingming Cao 	 * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget()
3144ac27a0ecSDave Kleikamp 	 * on them.  We've already detached each block from the file, so
3145dab291afSMingming Cao 	 * bforget() in jbd2_journal_forget() should be safe.
3146ac27a0ecSDave Kleikamp 	 *
3147dab291afSMingming Cao 	 * AKPM: turn on bforget in jbd2_journal_forget()!!!
3148ac27a0ecSDave Kleikamp 	 */
3149ac27a0ecSDave Kleikamp 	for (p = first; p < last; p++) {
3150ac27a0ecSDave Kleikamp 		u32 nr = le32_to_cpu(*p);
3151ac27a0ecSDave Kleikamp 		if (nr) {
31521d03ec98SAneesh Kumar K.V 			struct buffer_head *tbh;
3153ac27a0ecSDave Kleikamp 
3154ac27a0ecSDave Kleikamp 			*p = 0;
31551d03ec98SAneesh Kumar K.V 			tbh = sb_find_get_block(inode->i_sb, nr);
31561d03ec98SAneesh Kumar K.V 			ext4_forget(handle, 0, inode, tbh, nr);
3157ac27a0ecSDave Kleikamp 		}
3158ac27a0ecSDave Kleikamp 	}
3159ac27a0ecSDave Kleikamp 
3160c9de560dSAlex Tomas 	ext4_free_blocks(handle, inode, block_to_free, count, 0);
3161ac27a0ecSDave Kleikamp }
3162ac27a0ecSDave Kleikamp 
3163ac27a0ecSDave Kleikamp /**
3164617ba13bSMingming Cao  * ext4_free_data - free a list of data blocks
3165ac27a0ecSDave Kleikamp  * @handle:	handle for this transaction
3166ac27a0ecSDave Kleikamp  * @inode:	inode we are dealing with
3167ac27a0ecSDave Kleikamp  * @this_bh:	indirect buffer_head which contains *@first and *@last
3168ac27a0ecSDave Kleikamp  * @first:	array of block numbers
3169ac27a0ecSDave Kleikamp  * @last:	points immediately past the end of array
3170ac27a0ecSDave Kleikamp  *
3171ac27a0ecSDave Kleikamp  * We are freeing all blocks refered from that array (numbers are stored as
3172ac27a0ecSDave Kleikamp  * little-endian 32-bit) and updating @inode->i_blocks appropriately.
3173ac27a0ecSDave Kleikamp  *
3174ac27a0ecSDave Kleikamp  * We accumulate contiguous runs of blocks to free.  Conveniently, if these
3175ac27a0ecSDave Kleikamp  * blocks are contiguous then releasing them at one time will only affect one
3176ac27a0ecSDave Kleikamp  * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
3177ac27a0ecSDave Kleikamp  * actually use a lot of journal space.
3178ac27a0ecSDave Kleikamp  *
3179ac27a0ecSDave Kleikamp  * @this_bh will be %NULL if @first and @last point into the inode's direct
3180ac27a0ecSDave Kleikamp  * block pointers.
3181ac27a0ecSDave Kleikamp  */
3182617ba13bSMingming Cao static void ext4_free_data(handle_t *handle, struct inode *inode,
3183ac27a0ecSDave Kleikamp 			   struct buffer_head *this_bh,
3184ac27a0ecSDave Kleikamp 			   __le32 *first, __le32 *last)
3185ac27a0ecSDave Kleikamp {
3186617ba13bSMingming Cao 	ext4_fsblk_t block_to_free = 0;    /* Starting block # of a run */
3187ac27a0ecSDave Kleikamp 	unsigned long count = 0;	    /* Number of blocks in the run */
3188ac27a0ecSDave Kleikamp 	__le32 *block_to_free_p = NULL;	    /* Pointer into inode/ind
3189ac27a0ecSDave Kleikamp 					       corresponding to
3190ac27a0ecSDave Kleikamp 					       block_to_free */
3191617ba13bSMingming Cao 	ext4_fsblk_t nr;		    /* Current block # */
3192ac27a0ecSDave Kleikamp 	__le32 *p;			    /* Pointer into inode/ind
3193ac27a0ecSDave Kleikamp 					       for current block */
3194ac27a0ecSDave Kleikamp 	int err;
3195ac27a0ecSDave Kleikamp 
3196ac27a0ecSDave Kleikamp 	if (this_bh) {				/* For indirect block */
3197ac27a0ecSDave Kleikamp 		BUFFER_TRACE(this_bh, "get_write_access");
3198617ba13bSMingming Cao 		err = ext4_journal_get_write_access(handle, this_bh);
3199ac27a0ecSDave Kleikamp 		/* Important: if we can't update the indirect pointers
3200ac27a0ecSDave Kleikamp 		 * to the blocks, we can't free them. */
3201ac27a0ecSDave Kleikamp 		if (err)
3202ac27a0ecSDave Kleikamp 			return;
3203ac27a0ecSDave Kleikamp 	}
3204ac27a0ecSDave Kleikamp 
3205ac27a0ecSDave Kleikamp 	for (p = first; p < last; p++) {
3206ac27a0ecSDave Kleikamp 		nr = le32_to_cpu(*p);
3207ac27a0ecSDave Kleikamp 		if (nr) {
3208ac27a0ecSDave Kleikamp 			/* accumulate blocks to free if they're contiguous */
3209ac27a0ecSDave Kleikamp 			if (count == 0) {
3210ac27a0ecSDave Kleikamp 				block_to_free = nr;
3211ac27a0ecSDave Kleikamp 				block_to_free_p = p;
3212ac27a0ecSDave Kleikamp 				count = 1;
3213ac27a0ecSDave Kleikamp 			} else if (nr == block_to_free + count) {
3214ac27a0ecSDave Kleikamp 				count++;
3215ac27a0ecSDave Kleikamp 			} else {
3216617ba13bSMingming Cao 				ext4_clear_blocks(handle, inode, this_bh,
3217ac27a0ecSDave Kleikamp 						  block_to_free,
3218ac27a0ecSDave Kleikamp 						  count, block_to_free_p, p);
3219ac27a0ecSDave Kleikamp 				block_to_free = nr;
3220ac27a0ecSDave Kleikamp 				block_to_free_p = p;
3221ac27a0ecSDave Kleikamp 				count = 1;
3222ac27a0ecSDave Kleikamp 			}
3223ac27a0ecSDave Kleikamp 		}
3224ac27a0ecSDave Kleikamp 	}
3225ac27a0ecSDave Kleikamp 
3226ac27a0ecSDave Kleikamp 	if (count > 0)
3227617ba13bSMingming Cao 		ext4_clear_blocks(handle, inode, this_bh, block_to_free,
3228ac27a0ecSDave Kleikamp 				  count, block_to_free_p, p);
3229ac27a0ecSDave Kleikamp 
3230ac27a0ecSDave Kleikamp 	if (this_bh) {
3231617ba13bSMingming Cao 		BUFFER_TRACE(this_bh, "call ext4_journal_dirty_metadata");
323271dc8fbcSDuane Griffin 
323371dc8fbcSDuane Griffin 		/*
323471dc8fbcSDuane Griffin 		 * The buffer head should have an attached journal head at this
323571dc8fbcSDuane Griffin 		 * point. However, if the data is corrupted and an indirect
323671dc8fbcSDuane Griffin 		 * block pointed to itself, it would have been detached when
323771dc8fbcSDuane Griffin 		 * the block was cleared. Check for this instead of OOPSing.
323871dc8fbcSDuane Griffin 		 */
323971dc8fbcSDuane Griffin 		if (bh2jh(this_bh))
3240617ba13bSMingming Cao 			ext4_journal_dirty_metadata(handle, this_bh);
324171dc8fbcSDuane Griffin 		else
324271dc8fbcSDuane Griffin 			ext4_error(inode->i_sb, __func__,
324371dc8fbcSDuane Griffin 				   "circular indirect block detected, "
324471dc8fbcSDuane Griffin 				   "inode=%lu, block=%llu",
324571dc8fbcSDuane Griffin 				   inode->i_ino,
324671dc8fbcSDuane Griffin 				   (unsigned long long) this_bh->b_blocknr);
3247ac27a0ecSDave Kleikamp 	}
3248ac27a0ecSDave Kleikamp }
3249ac27a0ecSDave Kleikamp 
3250ac27a0ecSDave Kleikamp /**
3251617ba13bSMingming Cao  *	ext4_free_branches - free an array of branches
3252ac27a0ecSDave Kleikamp  *	@handle: JBD handle for this transaction
3253ac27a0ecSDave Kleikamp  *	@inode:	inode we are dealing with
3254ac27a0ecSDave Kleikamp  *	@parent_bh: the buffer_head which contains *@first and *@last
3255ac27a0ecSDave Kleikamp  *	@first:	array of block numbers
3256ac27a0ecSDave Kleikamp  *	@last:	pointer immediately past the end of array
3257ac27a0ecSDave Kleikamp  *	@depth:	depth of the branches to free
3258ac27a0ecSDave Kleikamp  *
3259ac27a0ecSDave Kleikamp  *	We are freeing all blocks refered from these branches (numbers are
3260ac27a0ecSDave Kleikamp  *	stored as little-endian 32-bit) and updating @inode->i_blocks
3261ac27a0ecSDave Kleikamp  *	appropriately.
3262ac27a0ecSDave Kleikamp  */
3263617ba13bSMingming Cao static void ext4_free_branches(handle_t *handle, struct inode *inode,
3264ac27a0ecSDave Kleikamp 			       struct buffer_head *parent_bh,
3265ac27a0ecSDave Kleikamp 			       __le32 *first, __le32 *last, int depth)
3266ac27a0ecSDave Kleikamp {
3267617ba13bSMingming Cao 	ext4_fsblk_t nr;
3268ac27a0ecSDave Kleikamp 	__le32 *p;
3269ac27a0ecSDave Kleikamp 
3270ac27a0ecSDave Kleikamp 	if (is_handle_aborted(handle))
3271ac27a0ecSDave Kleikamp 		return;
3272ac27a0ecSDave Kleikamp 
3273ac27a0ecSDave Kleikamp 	if (depth--) {
3274ac27a0ecSDave Kleikamp 		struct buffer_head *bh;
3275617ba13bSMingming Cao 		int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
3276ac27a0ecSDave Kleikamp 		p = last;
3277ac27a0ecSDave Kleikamp 		while (--p >= first) {
3278ac27a0ecSDave Kleikamp 			nr = le32_to_cpu(*p);
3279ac27a0ecSDave Kleikamp 			if (!nr)
3280ac27a0ecSDave Kleikamp 				continue;		/* A hole */
3281ac27a0ecSDave Kleikamp 
3282ac27a0ecSDave Kleikamp 			/* Go read the buffer for the next level down */
3283ac27a0ecSDave Kleikamp 			bh = sb_bread(inode->i_sb, nr);
3284ac27a0ecSDave Kleikamp 
3285ac27a0ecSDave Kleikamp 			/*
3286ac27a0ecSDave Kleikamp 			 * A read failure? Report error and clear slot
3287ac27a0ecSDave Kleikamp 			 * (should be rare).
3288ac27a0ecSDave Kleikamp 			 */
3289ac27a0ecSDave Kleikamp 			if (!bh) {
3290617ba13bSMingming Cao 				ext4_error(inode->i_sb, "ext4_free_branches",
32912ae02107SMingming Cao 					   "Read failure, inode=%lu, block=%llu",
3292ac27a0ecSDave Kleikamp 					   inode->i_ino, nr);
3293ac27a0ecSDave Kleikamp 				continue;
3294ac27a0ecSDave Kleikamp 			}
3295ac27a0ecSDave Kleikamp 
3296ac27a0ecSDave Kleikamp 			/* This zaps the entire block.  Bottom up. */
3297ac27a0ecSDave Kleikamp 			BUFFER_TRACE(bh, "free child branches");
3298617ba13bSMingming Cao 			ext4_free_branches(handle, inode, bh,
3299ac27a0ecSDave Kleikamp 					   (__le32*)bh->b_data,
3300ac27a0ecSDave Kleikamp 					   (__le32*)bh->b_data + addr_per_block,
3301ac27a0ecSDave Kleikamp 					   depth);
3302ac27a0ecSDave Kleikamp 
3303ac27a0ecSDave Kleikamp 			/*
3304ac27a0ecSDave Kleikamp 			 * We've probably journalled the indirect block several
3305ac27a0ecSDave Kleikamp 			 * times during the truncate.  But it's no longer
3306ac27a0ecSDave Kleikamp 			 * needed and we now drop it from the transaction via
3307dab291afSMingming Cao 			 * jbd2_journal_revoke().
3308ac27a0ecSDave Kleikamp 			 *
3309ac27a0ecSDave Kleikamp 			 * That's easy if it's exclusively part of this
3310ac27a0ecSDave Kleikamp 			 * transaction.  But if it's part of the committing
3311dab291afSMingming Cao 			 * transaction then jbd2_journal_forget() will simply
3312ac27a0ecSDave Kleikamp 			 * brelse() it.  That means that if the underlying
3313617ba13bSMingming Cao 			 * block is reallocated in ext4_get_block(),
3314ac27a0ecSDave Kleikamp 			 * unmap_underlying_metadata() will find this block
3315ac27a0ecSDave Kleikamp 			 * and will try to get rid of it.  damn, damn.
3316ac27a0ecSDave Kleikamp 			 *
3317ac27a0ecSDave Kleikamp 			 * If this block has already been committed to the
3318ac27a0ecSDave Kleikamp 			 * journal, a revoke record will be written.  And
3319ac27a0ecSDave Kleikamp 			 * revoke records must be emitted *before* clearing
3320ac27a0ecSDave Kleikamp 			 * this block's bit in the bitmaps.
3321ac27a0ecSDave Kleikamp 			 */
3322617ba13bSMingming Cao 			ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
3323ac27a0ecSDave Kleikamp 
3324ac27a0ecSDave Kleikamp 			/*
3325ac27a0ecSDave Kleikamp 			 * Everything below this this pointer has been
3326ac27a0ecSDave Kleikamp 			 * released.  Now let this top-of-subtree go.
3327ac27a0ecSDave Kleikamp 			 *
3328ac27a0ecSDave Kleikamp 			 * We want the freeing of this indirect block to be
3329ac27a0ecSDave Kleikamp 			 * atomic in the journal with the updating of the
3330ac27a0ecSDave Kleikamp 			 * bitmap block which owns it.  So make some room in
3331ac27a0ecSDave Kleikamp 			 * the journal.
3332ac27a0ecSDave Kleikamp 			 *
3333ac27a0ecSDave Kleikamp 			 * We zero the parent pointer *after* freeing its
3334ac27a0ecSDave Kleikamp 			 * pointee in the bitmaps, so if extend_transaction()
3335ac27a0ecSDave Kleikamp 			 * for some reason fails to put the bitmap changes and
3336ac27a0ecSDave Kleikamp 			 * the release into the same transaction, recovery
3337ac27a0ecSDave Kleikamp 			 * will merely complain about releasing a free block,
3338ac27a0ecSDave Kleikamp 			 * rather than leaking blocks.
3339ac27a0ecSDave Kleikamp 			 */
3340ac27a0ecSDave Kleikamp 			if (is_handle_aborted(handle))
3341ac27a0ecSDave Kleikamp 				return;
3342ac27a0ecSDave Kleikamp 			if (try_to_extend_transaction(handle, inode)) {
3343617ba13bSMingming Cao 				ext4_mark_inode_dirty(handle, inode);
3344617ba13bSMingming Cao 				ext4_journal_test_restart(handle, inode);
3345ac27a0ecSDave Kleikamp 			}
3346ac27a0ecSDave Kleikamp 
3347c9de560dSAlex Tomas 			ext4_free_blocks(handle, inode, nr, 1, 1);
3348ac27a0ecSDave Kleikamp 
3349ac27a0ecSDave Kleikamp 			if (parent_bh) {
3350ac27a0ecSDave Kleikamp 				/*
3351ac27a0ecSDave Kleikamp 				 * The block which we have just freed is
3352ac27a0ecSDave Kleikamp 				 * pointed to by an indirect block: journal it
3353ac27a0ecSDave Kleikamp 				 */
3354ac27a0ecSDave Kleikamp 				BUFFER_TRACE(parent_bh, "get_write_access");
3355617ba13bSMingming Cao 				if (!ext4_journal_get_write_access(handle,
3356ac27a0ecSDave Kleikamp 								   parent_bh)){
3357ac27a0ecSDave Kleikamp 					*p = 0;
3358ac27a0ecSDave Kleikamp 					BUFFER_TRACE(parent_bh,
3359617ba13bSMingming Cao 					"call ext4_journal_dirty_metadata");
3360617ba13bSMingming Cao 					ext4_journal_dirty_metadata(handle,
3361ac27a0ecSDave Kleikamp 								    parent_bh);
3362ac27a0ecSDave Kleikamp 				}
3363ac27a0ecSDave Kleikamp 			}
3364ac27a0ecSDave Kleikamp 		}
3365ac27a0ecSDave Kleikamp 	} else {
3366ac27a0ecSDave Kleikamp 		/* We have reached the bottom of the tree. */
3367ac27a0ecSDave Kleikamp 		BUFFER_TRACE(parent_bh, "free data blocks");
3368617ba13bSMingming Cao 		ext4_free_data(handle, inode, parent_bh, first, last);
3369ac27a0ecSDave Kleikamp 	}
3370ac27a0ecSDave Kleikamp }
3371ac27a0ecSDave Kleikamp 
337291ef4cafSDuane Griffin int ext4_can_truncate(struct inode *inode)
337391ef4cafSDuane Griffin {
337491ef4cafSDuane Griffin 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
337591ef4cafSDuane Griffin 		return 0;
337691ef4cafSDuane Griffin 	if (S_ISREG(inode->i_mode))
337791ef4cafSDuane Griffin 		return 1;
337891ef4cafSDuane Griffin 	if (S_ISDIR(inode->i_mode))
337991ef4cafSDuane Griffin 		return 1;
338091ef4cafSDuane Griffin 	if (S_ISLNK(inode->i_mode))
338191ef4cafSDuane Griffin 		return !ext4_inode_is_fast_symlink(inode);
338291ef4cafSDuane Griffin 	return 0;
338391ef4cafSDuane Griffin }
338491ef4cafSDuane Griffin 
3385ac27a0ecSDave Kleikamp /*
3386617ba13bSMingming Cao  * ext4_truncate()
3387ac27a0ecSDave Kleikamp  *
3388617ba13bSMingming Cao  * We block out ext4_get_block() block instantiations across the entire
3389617ba13bSMingming Cao  * transaction, and VFS/VM ensures that ext4_truncate() cannot run
3390ac27a0ecSDave Kleikamp  * simultaneously on behalf of the same inode.
3391ac27a0ecSDave Kleikamp  *
3392ac27a0ecSDave Kleikamp  * As we work through the truncate and commmit bits of it to the journal there
3393ac27a0ecSDave Kleikamp  * is one core, guiding principle: the file's tree must always be consistent on
3394ac27a0ecSDave Kleikamp  * disk.  We must be able to restart the truncate after a crash.
3395ac27a0ecSDave Kleikamp  *
3396ac27a0ecSDave Kleikamp  * The file's tree may be transiently inconsistent in memory (although it
3397ac27a0ecSDave Kleikamp  * probably isn't), but whenever we close off and commit a journal transaction,
3398ac27a0ecSDave Kleikamp  * the contents of (the filesystem + the journal) must be consistent and
3399ac27a0ecSDave Kleikamp  * restartable.  It's pretty simple, really: bottom up, right to left (although
3400ac27a0ecSDave Kleikamp  * left-to-right works OK too).
3401ac27a0ecSDave Kleikamp  *
3402ac27a0ecSDave Kleikamp  * Note that at recovery time, journal replay occurs *before* the restart of
3403ac27a0ecSDave Kleikamp  * truncate against the orphan inode list.
3404ac27a0ecSDave Kleikamp  *
3405ac27a0ecSDave Kleikamp  * The committed inode has the new, desired i_size (which is the same as
3406617ba13bSMingming Cao  * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
3407ac27a0ecSDave Kleikamp  * that this inode's truncate did not complete and it will again call
3408617ba13bSMingming Cao  * ext4_truncate() to have another go.  So there will be instantiated blocks
3409617ba13bSMingming Cao  * to the right of the truncation point in a crashed ext4 filesystem.  But
3410ac27a0ecSDave Kleikamp  * that's fine - as long as they are linked from the inode, the post-crash
3411617ba13bSMingming Cao  * ext4_truncate() run will find them and release them.
3412ac27a0ecSDave Kleikamp  */
3413617ba13bSMingming Cao void ext4_truncate(struct inode *inode)
3414ac27a0ecSDave Kleikamp {
3415ac27a0ecSDave Kleikamp 	handle_t *handle;
3416617ba13bSMingming Cao 	struct ext4_inode_info *ei = EXT4_I(inode);
3417ac27a0ecSDave Kleikamp 	__le32 *i_data = ei->i_data;
3418617ba13bSMingming Cao 	int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
3419ac27a0ecSDave Kleikamp 	struct address_space *mapping = inode->i_mapping;
3420725d26d3SAneesh Kumar K.V 	ext4_lblk_t offsets[4];
3421ac27a0ecSDave Kleikamp 	Indirect chain[4];
3422ac27a0ecSDave Kleikamp 	Indirect *partial;
3423ac27a0ecSDave Kleikamp 	__le32 nr = 0;
3424ac27a0ecSDave Kleikamp 	int n;
3425725d26d3SAneesh Kumar K.V 	ext4_lblk_t last_block;
3426ac27a0ecSDave Kleikamp 	unsigned blocksize = inode->i_sb->s_blocksize;
3427ac27a0ecSDave Kleikamp 
342891ef4cafSDuane Griffin 	if (!ext4_can_truncate(inode))
3429ac27a0ecSDave Kleikamp 		return;
3430ac27a0ecSDave Kleikamp 
34311d03ec98SAneesh Kumar K.V 	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
3432cf108bcaSJan Kara 		ext4_ext_truncate(inode);
34331d03ec98SAneesh Kumar K.V 		return;
34341d03ec98SAneesh Kumar K.V 	}
3435a86c6181SAlex Tomas 
3436ac27a0ecSDave Kleikamp 	handle = start_transaction(inode);
3437cf108bcaSJan Kara 	if (IS_ERR(handle))
3438ac27a0ecSDave Kleikamp 		return;		/* AKPM: return what? */
3439ac27a0ecSDave Kleikamp 
3440ac27a0ecSDave Kleikamp 	last_block = (inode->i_size + blocksize-1)
3441617ba13bSMingming Cao 					>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
3442ac27a0ecSDave Kleikamp 
3443cf108bcaSJan Kara 	if (inode->i_size & (blocksize - 1))
3444cf108bcaSJan Kara 		if (ext4_block_truncate_page(handle, mapping, inode->i_size))
3445cf108bcaSJan Kara 			goto out_stop;
3446ac27a0ecSDave Kleikamp 
3447617ba13bSMingming Cao 	n = ext4_block_to_path(inode, last_block, offsets, NULL);
3448ac27a0ecSDave Kleikamp 	if (n == 0)
3449ac27a0ecSDave Kleikamp 		goto out_stop;	/* error */
3450ac27a0ecSDave Kleikamp 
3451ac27a0ecSDave Kleikamp 	/*
3452ac27a0ecSDave Kleikamp 	 * OK.  This truncate is going to happen.  We add the inode to the
3453ac27a0ecSDave Kleikamp 	 * orphan list, so that if this truncate spans multiple transactions,
3454ac27a0ecSDave Kleikamp 	 * and we crash, we will resume the truncate when the filesystem
3455ac27a0ecSDave Kleikamp 	 * recovers.  It also marks the inode dirty, to catch the new size.
3456ac27a0ecSDave Kleikamp 	 *
3457ac27a0ecSDave Kleikamp 	 * Implication: the file must always be in a sane, consistent
3458ac27a0ecSDave Kleikamp 	 * truncatable state while each transaction commits.
3459ac27a0ecSDave Kleikamp 	 */
3460617ba13bSMingming Cao 	if (ext4_orphan_add(handle, inode))
3461ac27a0ecSDave Kleikamp 		goto out_stop;
3462ac27a0ecSDave Kleikamp 
3463ac27a0ecSDave Kleikamp 	/*
3464632eaeabSMingming Cao 	 * From here we block out all ext4_get_block() callers who want to
3465632eaeabSMingming Cao 	 * modify the block allocation tree.
3466632eaeabSMingming Cao 	 */
3467632eaeabSMingming Cao 	down_write(&ei->i_data_sem);
3468632eaeabSMingming Cao 	/*
3469ac27a0ecSDave Kleikamp 	 * The orphan list entry will now protect us from any crash which
3470ac27a0ecSDave Kleikamp 	 * occurs before the truncate completes, so it is now safe to propagate
3471ac27a0ecSDave Kleikamp 	 * the new, shorter inode size (held for now in i_size) into the
3472ac27a0ecSDave Kleikamp 	 * on-disk inode. We do this via i_disksize, which is the value which
3473617ba13bSMingming Cao 	 * ext4 *really* writes onto the disk inode.
3474ac27a0ecSDave Kleikamp 	 */
3475ac27a0ecSDave Kleikamp 	ei->i_disksize = inode->i_size;
3476ac27a0ecSDave Kleikamp 
3477ac27a0ecSDave Kleikamp 	if (n == 1) {		/* direct blocks */
3478617ba13bSMingming Cao 		ext4_free_data(handle, inode, NULL, i_data+offsets[0],
3479617ba13bSMingming Cao 			       i_data + EXT4_NDIR_BLOCKS);
3480ac27a0ecSDave Kleikamp 		goto do_indirects;
3481ac27a0ecSDave Kleikamp 	}
3482ac27a0ecSDave Kleikamp 
3483617ba13bSMingming Cao 	partial = ext4_find_shared(inode, n, offsets, chain, &nr);
3484ac27a0ecSDave Kleikamp 	/* Kill the top of shared branch (not detached) */
3485ac27a0ecSDave Kleikamp 	if (nr) {
3486ac27a0ecSDave Kleikamp 		if (partial == chain) {
3487ac27a0ecSDave Kleikamp 			/* Shared branch grows from the inode */
3488617ba13bSMingming Cao 			ext4_free_branches(handle, inode, NULL,
3489ac27a0ecSDave Kleikamp 					   &nr, &nr+1, (chain+n-1) - partial);
3490ac27a0ecSDave Kleikamp 			*partial->p = 0;
3491ac27a0ecSDave Kleikamp 			/*
3492ac27a0ecSDave Kleikamp 			 * We mark the inode dirty prior to restart,
3493ac27a0ecSDave Kleikamp 			 * and prior to stop.  No need for it here.
3494ac27a0ecSDave Kleikamp 			 */
3495ac27a0ecSDave Kleikamp 		} else {
3496ac27a0ecSDave Kleikamp 			/* Shared branch grows from an indirect block */
3497ac27a0ecSDave Kleikamp 			BUFFER_TRACE(partial->bh, "get_write_access");
3498617ba13bSMingming Cao 			ext4_free_branches(handle, inode, partial->bh,
3499ac27a0ecSDave Kleikamp 					partial->p,
3500ac27a0ecSDave Kleikamp 					partial->p+1, (chain+n-1) - partial);
3501ac27a0ecSDave Kleikamp 		}
3502ac27a0ecSDave Kleikamp 	}
3503ac27a0ecSDave Kleikamp 	/* Clear the ends of indirect blocks on the shared branch */
3504ac27a0ecSDave Kleikamp 	while (partial > chain) {
3505617ba13bSMingming Cao 		ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
3506ac27a0ecSDave Kleikamp 				   (__le32*)partial->bh->b_data+addr_per_block,
3507ac27a0ecSDave Kleikamp 				   (chain+n-1) - partial);
3508ac27a0ecSDave Kleikamp 		BUFFER_TRACE(partial->bh, "call brelse");
3509ac27a0ecSDave Kleikamp 		brelse (partial->bh);
3510ac27a0ecSDave Kleikamp 		partial--;
3511ac27a0ecSDave Kleikamp 	}
3512ac27a0ecSDave Kleikamp do_indirects:
3513ac27a0ecSDave Kleikamp 	/* Kill the remaining (whole) subtrees */
3514ac27a0ecSDave Kleikamp 	switch (offsets[0]) {
3515ac27a0ecSDave Kleikamp 	default:
3516617ba13bSMingming Cao 		nr = i_data[EXT4_IND_BLOCK];
3517ac27a0ecSDave Kleikamp 		if (nr) {
3518617ba13bSMingming Cao 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
3519617ba13bSMingming Cao 			i_data[EXT4_IND_BLOCK] = 0;
3520ac27a0ecSDave Kleikamp 		}
3521617ba13bSMingming Cao 	case EXT4_IND_BLOCK:
3522617ba13bSMingming Cao 		nr = i_data[EXT4_DIND_BLOCK];
3523ac27a0ecSDave Kleikamp 		if (nr) {
3524617ba13bSMingming Cao 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
3525617ba13bSMingming Cao 			i_data[EXT4_DIND_BLOCK] = 0;
3526ac27a0ecSDave Kleikamp 		}
3527617ba13bSMingming Cao 	case EXT4_DIND_BLOCK:
3528617ba13bSMingming Cao 		nr = i_data[EXT4_TIND_BLOCK];
3529ac27a0ecSDave Kleikamp 		if (nr) {
3530617ba13bSMingming Cao 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
3531617ba13bSMingming Cao 			i_data[EXT4_TIND_BLOCK] = 0;
3532ac27a0ecSDave Kleikamp 		}
3533617ba13bSMingming Cao 	case EXT4_TIND_BLOCK:
3534ac27a0ecSDave Kleikamp 		;
3535ac27a0ecSDave Kleikamp 	}
3536ac27a0ecSDave Kleikamp 
3537617ba13bSMingming Cao 	ext4_discard_reservation(inode);
3538ac27a0ecSDave Kleikamp 
35390e855ac8SAneesh Kumar K.V 	up_write(&ei->i_data_sem);
3540ef7f3835SKalpak Shah 	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
3541617ba13bSMingming Cao 	ext4_mark_inode_dirty(handle, inode);
3542ac27a0ecSDave Kleikamp 
3543ac27a0ecSDave Kleikamp 	/*
3544ac27a0ecSDave Kleikamp 	 * In a multi-transaction truncate, we only make the final transaction
3545ac27a0ecSDave Kleikamp 	 * synchronous
3546ac27a0ecSDave Kleikamp 	 */
3547ac27a0ecSDave Kleikamp 	if (IS_SYNC(inode))
3548ac27a0ecSDave Kleikamp 		handle->h_sync = 1;
3549ac27a0ecSDave Kleikamp out_stop:
3550ac27a0ecSDave Kleikamp 	/*
3551ac27a0ecSDave Kleikamp 	 * If this was a simple ftruncate(), and the file will remain alive
3552ac27a0ecSDave Kleikamp 	 * then we need to clear up the orphan record which we created above.
3553ac27a0ecSDave Kleikamp 	 * However, if this was a real unlink then we were called by
3554617ba13bSMingming Cao 	 * ext4_delete_inode(), and we allow that function to clean up the
3555ac27a0ecSDave Kleikamp 	 * orphan info for us.
3556ac27a0ecSDave Kleikamp 	 */
3557ac27a0ecSDave Kleikamp 	if (inode->i_nlink)
3558617ba13bSMingming Cao 		ext4_orphan_del(handle, inode);
3559ac27a0ecSDave Kleikamp 
3560617ba13bSMingming Cao 	ext4_journal_stop(handle);
3561ac27a0ecSDave Kleikamp }
3562ac27a0ecSDave Kleikamp 
3563617ba13bSMingming Cao static ext4_fsblk_t ext4_get_inode_block(struct super_block *sb,
3564617ba13bSMingming Cao 		unsigned long ino, struct ext4_iloc *iloc)
3565ac27a0ecSDave Kleikamp {
3566fd2d4291SAvantika Mathur 	ext4_group_t block_group;
3567ac27a0ecSDave Kleikamp 	unsigned long offset;
3568617ba13bSMingming Cao 	ext4_fsblk_t block;
3569617ba13bSMingming Cao 	struct ext4_group_desc *gdp;
3570ac27a0ecSDave Kleikamp 
3571617ba13bSMingming Cao 	if (!ext4_valid_inum(sb, ino)) {
3572ac27a0ecSDave Kleikamp 		/*
3573ac27a0ecSDave Kleikamp 		 * This error is already checked for in namei.c unless we are
3574ac27a0ecSDave Kleikamp 		 * looking at an NFS filehandle, in which case no error
3575ac27a0ecSDave Kleikamp 		 * report is needed
3576ac27a0ecSDave Kleikamp 		 */
3577ac27a0ecSDave Kleikamp 		return 0;
3578ac27a0ecSDave Kleikamp 	}
3579ac27a0ecSDave Kleikamp 
3580617ba13bSMingming Cao 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
3581c0a4ef38SAkinobu Mita 	gdp = ext4_get_group_desc(sb, block_group, NULL);
3582c0a4ef38SAkinobu Mita 	if (!gdp)
3583ac27a0ecSDave Kleikamp 		return 0;
3584ac27a0ecSDave Kleikamp 
3585ac27a0ecSDave Kleikamp 	/*
3586ac27a0ecSDave Kleikamp 	 * Figure out the offset within the block group inode table
3587ac27a0ecSDave Kleikamp 	 */
3588617ba13bSMingming Cao 	offset = ((ino - 1) % EXT4_INODES_PER_GROUP(sb)) *
3589617ba13bSMingming Cao 		EXT4_INODE_SIZE(sb);
35908fadc143SAlexandre Ratchov 	block = ext4_inode_table(sb, gdp) +
35918fadc143SAlexandre Ratchov 		(offset >> EXT4_BLOCK_SIZE_BITS(sb));
3592ac27a0ecSDave Kleikamp 
3593ac27a0ecSDave Kleikamp 	iloc->block_group = block_group;
3594617ba13bSMingming Cao 	iloc->offset = offset & (EXT4_BLOCK_SIZE(sb) - 1);
3595ac27a0ecSDave Kleikamp 	return block;
3596ac27a0ecSDave Kleikamp }
3597ac27a0ecSDave Kleikamp 
3598ac27a0ecSDave Kleikamp /*
3599617ba13bSMingming Cao  * ext4_get_inode_loc returns with an extra refcount against the inode's
3600ac27a0ecSDave Kleikamp  * underlying buffer_head on success. If 'in_mem' is true, we have all
3601ac27a0ecSDave Kleikamp  * data in memory that is needed to recreate the on-disk version of this
3602ac27a0ecSDave Kleikamp  * inode.
3603ac27a0ecSDave Kleikamp  */
3604617ba13bSMingming Cao static int __ext4_get_inode_loc(struct inode *inode,
3605617ba13bSMingming Cao 				struct ext4_iloc *iloc, int in_mem)
3606ac27a0ecSDave Kleikamp {
3607617ba13bSMingming Cao 	ext4_fsblk_t block;
3608ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
3609ac27a0ecSDave Kleikamp 
3610617ba13bSMingming Cao 	block = ext4_get_inode_block(inode->i_sb, inode->i_ino, iloc);
3611ac27a0ecSDave Kleikamp 	if (!block)
3612ac27a0ecSDave Kleikamp 		return -EIO;
3613ac27a0ecSDave Kleikamp 
3614ac27a0ecSDave Kleikamp 	bh = sb_getblk(inode->i_sb, block);
3615ac27a0ecSDave Kleikamp 	if (!bh) {
3616617ba13bSMingming Cao 		ext4_error (inode->i_sb, "ext4_get_inode_loc",
3617ac27a0ecSDave Kleikamp 				"unable to read inode block - "
36182ae02107SMingming Cao 				"inode=%lu, block=%llu",
3619ac27a0ecSDave Kleikamp 				 inode->i_ino, block);
3620ac27a0ecSDave Kleikamp 		return -EIO;
3621ac27a0ecSDave Kleikamp 	}
3622ac27a0ecSDave Kleikamp 	if (!buffer_uptodate(bh)) {
3623ac27a0ecSDave Kleikamp 		lock_buffer(bh);
36249c83a923SHidehiro Kawai 
36259c83a923SHidehiro Kawai 		/*
36269c83a923SHidehiro Kawai 		 * If the buffer has the write error flag, we have failed
36279c83a923SHidehiro Kawai 		 * to write out another inode in the same block.  In this
36289c83a923SHidehiro Kawai 		 * case, we don't have to read the block because we may
36299c83a923SHidehiro Kawai 		 * read the old inode data successfully.
36309c83a923SHidehiro Kawai 		 */
36319c83a923SHidehiro Kawai 		if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
36329c83a923SHidehiro Kawai 			set_buffer_uptodate(bh);
36339c83a923SHidehiro Kawai 
3634ac27a0ecSDave Kleikamp 		if (buffer_uptodate(bh)) {
3635ac27a0ecSDave Kleikamp 			/* someone brought it uptodate while we waited */
3636ac27a0ecSDave Kleikamp 			unlock_buffer(bh);
3637ac27a0ecSDave Kleikamp 			goto has_buffer;
3638ac27a0ecSDave Kleikamp 		}
3639ac27a0ecSDave Kleikamp 
3640ac27a0ecSDave Kleikamp 		/*
3641ac27a0ecSDave Kleikamp 		 * If we have all information of the inode in memory and this
3642ac27a0ecSDave Kleikamp 		 * is the only valid inode in the block, we need not read the
3643ac27a0ecSDave Kleikamp 		 * block.
3644ac27a0ecSDave Kleikamp 		 */
3645ac27a0ecSDave Kleikamp 		if (in_mem) {
3646ac27a0ecSDave Kleikamp 			struct buffer_head *bitmap_bh;
3647617ba13bSMingming Cao 			struct ext4_group_desc *desc;
3648ac27a0ecSDave Kleikamp 			int inodes_per_buffer;
3649ac27a0ecSDave Kleikamp 			int inode_offset, i;
3650fd2d4291SAvantika Mathur 			ext4_group_t block_group;
3651ac27a0ecSDave Kleikamp 			int start;
3652ac27a0ecSDave Kleikamp 
3653ac27a0ecSDave Kleikamp 			block_group = (inode->i_ino - 1) /
3654617ba13bSMingming Cao 					EXT4_INODES_PER_GROUP(inode->i_sb);
3655ac27a0ecSDave Kleikamp 			inodes_per_buffer = bh->b_size /
3656617ba13bSMingming Cao 				EXT4_INODE_SIZE(inode->i_sb);
3657ac27a0ecSDave Kleikamp 			inode_offset = ((inode->i_ino - 1) %
3658617ba13bSMingming Cao 					EXT4_INODES_PER_GROUP(inode->i_sb));
3659ac27a0ecSDave Kleikamp 			start = inode_offset & ~(inodes_per_buffer - 1);
3660ac27a0ecSDave Kleikamp 
3661ac27a0ecSDave Kleikamp 			/* Is the inode bitmap in cache? */
3662617ba13bSMingming Cao 			desc = ext4_get_group_desc(inode->i_sb,
3663ac27a0ecSDave Kleikamp 						block_group, NULL);
3664ac27a0ecSDave Kleikamp 			if (!desc)
3665ac27a0ecSDave Kleikamp 				goto make_io;
3666ac27a0ecSDave Kleikamp 
3667ac27a0ecSDave Kleikamp 			bitmap_bh = sb_getblk(inode->i_sb,
36688fadc143SAlexandre Ratchov 				ext4_inode_bitmap(inode->i_sb, desc));
3669ac27a0ecSDave Kleikamp 			if (!bitmap_bh)
3670ac27a0ecSDave Kleikamp 				goto make_io;
3671ac27a0ecSDave Kleikamp 
3672ac27a0ecSDave Kleikamp 			/*
3673ac27a0ecSDave Kleikamp 			 * If the inode bitmap isn't in cache then the
3674ac27a0ecSDave Kleikamp 			 * optimisation may end up performing two reads instead
3675ac27a0ecSDave Kleikamp 			 * of one, so skip it.
3676ac27a0ecSDave Kleikamp 			 */
3677ac27a0ecSDave Kleikamp 			if (!buffer_uptodate(bitmap_bh)) {
3678ac27a0ecSDave Kleikamp 				brelse(bitmap_bh);
3679ac27a0ecSDave Kleikamp 				goto make_io;
3680ac27a0ecSDave Kleikamp 			}
3681ac27a0ecSDave Kleikamp 			for (i = start; i < start + inodes_per_buffer; i++) {
3682ac27a0ecSDave Kleikamp 				if (i == inode_offset)
3683ac27a0ecSDave Kleikamp 					continue;
3684617ba13bSMingming Cao 				if (ext4_test_bit(i, bitmap_bh->b_data))
3685ac27a0ecSDave Kleikamp 					break;
3686ac27a0ecSDave Kleikamp 			}
3687ac27a0ecSDave Kleikamp 			brelse(bitmap_bh);
3688ac27a0ecSDave Kleikamp 			if (i == start + inodes_per_buffer) {
3689ac27a0ecSDave Kleikamp 				/* all other inodes are free, so skip I/O */
3690ac27a0ecSDave Kleikamp 				memset(bh->b_data, 0, bh->b_size);
3691ac27a0ecSDave Kleikamp 				set_buffer_uptodate(bh);
3692ac27a0ecSDave Kleikamp 				unlock_buffer(bh);
3693ac27a0ecSDave Kleikamp 				goto has_buffer;
3694ac27a0ecSDave Kleikamp 			}
3695ac27a0ecSDave Kleikamp 		}
3696ac27a0ecSDave Kleikamp 
3697ac27a0ecSDave Kleikamp make_io:
3698ac27a0ecSDave Kleikamp 		/*
3699ac27a0ecSDave Kleikamp 		 * There are other valid inodes in the buffer, this inode
3700ac27a0ecSDave Kleikamp 		 * has in-inode xattrs, or we don't have this inode in memory.
3701ac27a0ecSDave Kleikamp 		 * Read the block from disk.
3702ac27a0ecSDave Kleikamp 		 */
3703ac27a0ecSDave Kleikamp 		get_bh(bh);
3704ac27a0ecSDave Kleikamp 		bh->b_end_io = end_buffer_read_sync;
3705ac27a0ecSDave Kleikamp 		submit_bh(READ_META, bh);
3706ac27a0ecSDave Kleikamp 		wait_on_buffer(bh);
3707ac27a0ecSDave Kleikamp 		if (!buffer_uptodate(bh)) {
3708617ba13bSMingming Cao 			ext4_error(inode->i_sb, "ext4_get_inode_loc",
3709ac27a0ecSDave Kleikamp 					"unable to read inode block - "
37102ae02107SMingming Cao 					"inode=%lu, block=%llu",
3711ac27a0ecSDave Kleikamp 					inode->i_ino, block);
3712ac27a0ecSDave Kleikamp 			brelse(bh);
3713ac27a0ecSDave Kleikamp 			return -EIO;
3714ac27a0ecSDave Kleikamp 		}
3715ac27a0ecSDave Kleikamp 	}
3716ac27a0ecSDave Kleikamp has_buffer:
3717ac27a0ecSDave Kleikamp 	iloc->bh = bh;
3718ac27a0ecSDave Kleikamp 	return 0;
3719ac27a0ecSDave Kleikamp }
3720ac27a0ecSDave Kleikamp 
3721617ba13bSMingming Cao int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
3722ac27a0ecSDave Kleikamp {
3723ac27a0ecSDave Kleikamp 	/* We have all inode data except xattrs in memory here. */
3724617ba13bSMingming Cao 	return __ext4_get_inode_loc(inode, iloc,
3725617ba13bSMingming Cao 		!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
3726ac27a0ecSDave Kleikamp }
3727ac27a0ecSDave Kleikamp 
3728617ba13bSMingming Cao void ext4_set_inode_flags(struct inode *inode)
3729ac27a0ecSDave Kleikamp {
3730617ba13bSMingming Cao 	unsigned int flags = EXT4_I(inode)->i_flags;
3731ac27a0ecSDave Kleikamp 
3732ac27a0ecSDave Kleikamp 	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
3733617ba13bSMingming Cao 	if (flags & EXT4_SYNC_FL)
3734ac27a0ecSDave Kleikamp 		inode->i_flags |= S_SYNC;
3735617ba13bSMingming Cao 	if (flags & EXT4_APPEND_FL)
3736ac27a0ecSDave Kleikamp 		inode->i_flags |= S_APPEND;
3737617ba13bSMingming Cao 	if (flags & EXT4_IMMUTABLE_FL)
3738ac27a0ecSDave Kleikamp 		inode->i_flags |= S_IMMUTABLE;
3739617ba13bSMingming Cao 	if (flags & EXT4_NOATIME_FL)
3740ac27a0ecSDave Kleikamp 		inode->i_flags |= S_NOATIME;
3741617ba13bSMingming Cao 	if (flags & EXT4_DIRSYNC_FL)
3742ac27a0ecSDave Kleikamp 		inode->i_flags |= S_DIRSYNC;
3743ac27a0ecSDave Kleikamp }
3744ac27a0ecSDave Kleikamp 
3745ff9ddf7eSJan Kara /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
3746ff9ddf7eSJan Kara void ext4_get_inode_flags(struct ext4_inode_info *ei)
3747ff9ddf7eSJan Kara {
3748ff9ddf7eSJan Kara 	unsigned int flags = ei->vfs_inode.i_flags;
3749ff9ddf7eSJan Kara 
3750ff9ddf7eSJan Kara 	ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
3751ff9ddf7eSJan Kara 			EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
3752ff9ddf7eSJan Kara 	if (flags & S_SYNC)
3753ff9ddf7eSJan Kara 		ei->i_flags |= EXT4_SYNC_FL;
3754ff9ddf7eSJan Kara 	if (flags & S_APPEND)
3755ff9ddf7eSJan Kara 		ei->i_flags |= EXT4_APPEND_FL;
3756ff9ddf7eSJan Kara 	if (flags & S_IMMUTABLE)
3757ff9ddf7eSJan Kara 		ei->i_flags |= EXT4_IMMUTABLE_FL;
3758ff9ddf7eSJan Kara 	if (flags & S_NOATIME)
3759ff9ddf7eSJan Kara 		ei->i_flags |= EXT4_NOATIME_FL;
3760ff9ddf7eSJan Kara 	if (flags & S_DIRSYNC)
3761ff9ddf7eSJan Kara 		ei->i_flags |= EXT4_DIRSYNC_FL;
3762ff9ddf7eSJan Kara }
37630fc1b451SAneesh Kumar K.V static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
37640fc1b451SAneesh Kumar K.V 					struct ext4_inode_info *ei)
37650fc1b451SAneesh Kumar K.V {
37660fc1b451SAneesh Kumar K.V 	blkcnt_t i_blocks ;
37678180a562SAneesh Kumar K.V 	struct inode *inode = &(ei->vfs_inode);
37688180a562SAneesh Kumar K.V 	struct super_block *sb = inode->i_sb;
37690fc1b451SAneesh Kumar K.V 
37700fc1b451SAneesh Kumar K.V 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
37710fc1b451SAneesh Kumar K.V 				EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
37720fc1b451SAneesh Kumar K.V 		/* we are using combined 48 bit field */
37730fc1b451SAneesh Kumar K.V 		i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
37740fc1b451SAneesh Kumar K.V 					le32_to_cpu(raw_inode->i_blocks_lo);
37758180a562SAneesh Kumar K.V 		if (ei->i_flags & EXT4_HUGE_FILE_FL) {
37768180a562SAneesh Kumar K.V 			/* i_blocks represent file system block size */
37778180a562SAneesh Kumar K.V 			return i_blocks  << (inode->i_blkbits - 9);
37788180a562SAneesh Kumar K.V 		} else {
37790fc1b451SAneesh Kumar K.V 			return i_blocks;
37808180a562SAneesh Kumar K.V 		}
37810fc1b451SAneesh Kumar K.V 	} else {
37820fc1b451SAneesh Kumar K.V 		return le32_to_cpu(raw_inode->i_blocks_lo);
37830fc1b451SAneesh Kumar K.V 	}
37840fc1b451SAneesh Kumar K.V }
3785ff9ddf7eSJan Kara 
37861d1fe1eeSDavid Howells struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
3787ac27a0ecSDave Kleikamp {
3788617ba13bSMingming Cao 	struct ext4_iloc iloc;
3789617ba13bSMingming Cao 	struct ext4_inode *raw_inode;
37901d1fe1eeSDavid Howells 	struct ext4_inode_info *ei;
3791ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
37921d1fe1eeSDavid Howells 	struct inode *inode;
37931d1fe1eeSDavid Howells 	long ret;
3794ac27a0ecSDave Kleikamp 	int block;
3795ac27a0ecSDave Kleikamp 
37961d1fe1eeSDavid Howells 	inode = iget_locked(sb, ino);
37971d1fe1eeSDavid Howells 	if (!inode)
37981d1fe1eeSDavid Howells 		return ERR_PTR(-ENOMEM);
37991d1fe1eeSDavid Howells 	if (!(inode->i_state & I_NEW))
38001d1fe1eeSDavid Howells 		return inode;
38011d1fe1eeSDavid Howells 
38021d1fe1eeSDavid Howells 	ei = EXT4_I(inode);
3803617ba13bSMingming Cao #ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
3804617ba13bSMingming Cao 	ei->i_acl = EXT4_ACL_NOT_CACHED;
3805617ba13bSMingming Cao 	ei->i_default_acl = EXT4_ACL_NOT_CACHED;
3806ac27a0ecSDave Kleikamp #endif
3807ac27a0ecSDave Kleikamp 	ei->i_block_alloc_info = NULL;
3808ac27a0ecSDave Kleikamp 
38091d1fe1eeSDavid Howells 	ret = __ext4_get_inode_loc(inode, &iloc, 0);
38101d1fe1eeSDavid Howells 	if (ret < 0)
3811ac27a0ecSDave Kleikamp 		goto bad_inode;
3812ac27a0ecSDave Kleikamp 	bh = iloc.bh;
3813617ba13bSMingming Cao 	raw_inode = ext4_raw_inode(&iloc);
3814ac27a0ecSDave Kleikamp 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
3815ac27a0ecSDave Kleikamp 	inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
3816ac27a0ecSDave Kleikamp 	inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
3817ac27a0ecSDave Kleikamp 	if(!(test_opt (inode->i_sb, NO_UID32))) {
3818ac27a0ecSDave Kleikamp 		inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
3819ac27a0ecSDave Kleikamp 		inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
3820ac27a0ecSDave Kleikamp 	}
3821ac27a0ecSDave Kleikamp 	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
3822ac27a0ecSDave Kleikamp 
3823ac27a0ecSDave Kleikamp 	ei->i_state = 0;
3824ac27a0ecSDave Kleikamp 	ei->i_dir_start_lookup = 0;
3825ac27a0ecSDave Kleikamp 	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
3826ac27a0ecSDave Kleikamp 	/* We now have enough fields to check if the inode was active or not.
3827ac27a0ecSDave Kleikamp 	 * This is needed because nfsd might try to access dead inodes
3828ac27a0ecSDave Kleikamp 	 * the test is that same one that e2fsck uses
3829ac27a0ecSDave Kleikamp 	 * NeilBrown 1999oct15
3830ac27a0ecSDave Kleikamp 	 */
3831ac27a0ecSDave Kleikamp 	if (inode->i_nlink == 0) {
3832ac27a0ecSDave Kleikamp 		if (inode->i_mode == 0 ||
3833617ba13bSMingming Cao 		    !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
3834ac27a0ecSDave Kleikamp 			/* this inode is deleted */
3835ac27a0ecSDave Kleikamp 			brelse (bh);
38361d1fe1eeSDavid Howells 			ret = -ESTALE;
3837ac27a0ecSDave Kleikamp 			goto bad_inode;
3838ac27a0ecSDave Kleikamp 		}
3839ac27a0ecSDave Kleikamp 		/* The only unlinked inodes we let through here have
3840ac27a0ecSDave Kleikamp 		 * valid i_mode and are being read by the orphan
3841ac27a0ecSDave Kleikamp 		 * recovery code: that's fine, we're about to complete
3842ac27a0ecSDave Kleikamp 		 * the process of deleting those. */
3843ac27a0ecSDave Kleikamp 	}
3844ac27a0ecSDave Kleikamp 	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
38450fc1b451SAneesh Kumar K.V 	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
38467973c0c1SAneesh Kumar K.V 	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
38479b8f1f01SMingming Cao 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
3848a48380f7SAneesh Kumar K.V 	    cpu_to_le32(EXT4_OS_HURD)) {
3849a1ddeb7eSBadari Pulavarty 		ei->i_file_acl |=
3850a1ddeb7eSBadari Pulavarty 			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
3851ac27a0ecSDave Kleikamp 	}
3852a48380f7SAneesh Kumar K.V 	inode->i_size = ext4_isize(raw_inode);
3853ac27a0ecSDave Kleikamp 	ei->i_disksize = inode->i_size;
3854ac27a0ecSDave Kleikamp 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
3855ac27a0ecSDave Kleikamp 	ei->i_block_group = iloc.block_group;
3856ac27a0ecSDave Kleikamp 	/*
3857ac27a0ecSDave Kleikamp 	 * NOTE! The in-memory inode i_data array is in little-endian order
3858ac27a0ecSDave Kleikamp 	 * even on big-endian machines: we do NOT byteswap the block numbers!
3859ac27a0ecSDave Kleikamp 	 */
3860617ba13bSMingming Cao 	for (block = 0; block < EXT4_N_BLOCKS; block++)
3861ac27a0ecSDave Kleikamp 		ei->i_data[block] = raw_inode->i_block[block];
3862ac27a0ecSDave Kleikamp 	INIT_LIST_HEAD(&ei->i_orphan);
3863ac27a0ecSDave Kleikamp 
38640040d987SEric Sandeen 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3865ac27a0ecSDave Kleikamp 		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
3866617ba13bSMingming Cao 		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
3867e5d2861fSKirill Korotaev 		    EXT4_INODE_SIZE(inode->i_sb)) {
3868e5d2861fSKirill Korotaev 			brelse (bh);
38691d1fe1eeSDavid Howells 			ret = -EIO;
3870ac27a0ecSDave Kleikamp 			goto bad_inode;
3871e5d2861fSKirill Korotaev 		}
3872ac27a0ecSDave Kleikamp 		if (ei->i_extra_isize == 0) {
3873ac27a0ecSDave Kleikamp 			/* The extra space is currently unused. Use it. */
3874617ba13bSMingming Cao 			ei->i_extra_isize = sizeof(struct ext4_inode) -
3875617ba13bSMingming Cao 					    EXT4_GOOD_OLD_INODE_SIZE;
3876ac27a0ecSDave Kleikamp 		} else {
3877ac27a0ecSDave Kleikamp 			__le32 *magic = (void *)raw_inode +
3878617ba13bSMingming Cao 					EXT4_GOOD_OLD_INODE_SIZE +
3879ac27a0ecSDave Kleikamp 					ei->i_extra_isize;
3880617ba13bSMingming Cao 			if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
3881617ba13bSMingming Cao 				 ei->i_state |= EXT4_STATE_XATTR;
3882ac27a0ecSDave Kleikamp 		}
3883ac27a0ecSDave Kleikamp 	} else
3884ac27a0ecSDave Kleikamp 		ei->i_extra_isize = 0;
3885ac27a0ecSDave Kleikamp 
3886ef7f3835SKalpak Shah 	EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
3887ef7f3835SKalpak Shah 	EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
3888ef7f3835SKalpak Shah 	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
3889ef7f3835SKalpak Shah 	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
3890ef7f3835SKalpak Shah 
389125ec56b5SJean Noel Cordenner 	inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
389225ec56b5SJean Noel Cordenner 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
389325ec56b5SJean Noel Cordenner 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
389425ec56b5SJean Noel Cordenner 			inode->i_version |=
389525ec56b5SJean Noel Cordenner 			(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
389625ec56b5SJean Noel Cordenner 	}
389725ec56b5SJean Noel Cordenner 
3898ac27a0ecSDave Kleikamp 	if (S_ISREG(inode->i_mode)) {
3899617ba13bSMingming Cao 		inode->i_op = &ext4_file_inode_operations;
3900617ba13bSMingming Cao 		inode->i_fop = &ext4_file_operations;
3901617ba13bSMingming Cao 		ext4_set_aops(inode);
3902ac27a0ecSDave Kleikamp 	} else if (S_ISDIR(inode->i_mode)) {
3903617ba13bSMingming Cao 		inode->i_op = &ext4_dir_inode_operations;
3904617ba13bSMingming Cao 		inode->i_fop = &ext4_dir_operations;
3905ac27a0ecSDave Kleikamp 	} else if (S_ISLNK(inode->i_mode)) {
3906617ba13bSMingming Cao 		if (ext4_inode_is_fast_symlink(inode))
3907617ba13bSMingming Cao 			inode->i_op = &ext4_fast_symlink_inode_operations;
3908ac27a0ecSDave Kleikamp 		else {
3909617ba13bSMingming Cao 			inode->i_op = &ext4_symlink_inode_operations;
3910617ba13bSMingming Cao 			ext4_set_aops(inode);
3911ac27a0ecSDave Kleikamp 		}
3912ac27a0ecSDave Kleikamp 	} else {
3913617ba13bSMingming Cao 		inode->i_op = &ext4_special_inode_operations;
3914ac27a0ecSDave Kleikamp 		if (raw_inode->i_block[0])
3915ac27a0ecSDave Kleikamp 			init_special_inode(inode, inode->i_mode,
3916ac27a0ecSDave Kleikamp 			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
3917ac27a0ecSDave Kleikamp 		else
3918ac27a0ecSDave Kleikamp 			init_special_inode(inode, inode->i_mode,
3919ac27a0ecSDave Kleikamp 			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
3920ac27a0ecSDave Kleikamp 	}
3921ac27a0ecSDave Kleikamp 	brelse (iloc.bh);
3922617ba13bSMingming Cao 	ext4_set_inode_flags(inode);
39231d1fe1eeSDavid Howells 	unlock_new_inode(inode);
39241d1fe1eeSDavid Howells 	return inode;
3925ac27a0ecSDave Kleikamp 
3926ac27a0ecSDave Kleikamp bad_inode:
39271d1fe1eeSDavid Howells 	iget_failed(inode);
39281d1fe1eeSDavid Howells 	return ERR_PTR(ret);
3929ac27a0ecSDave Kleikamp }
3930ac27a0ecSDave Kleikamp 
39310fc1b451SAneesh Kumar K.V static int ext4_inode_blocks_set(handle_t *handle,
39320fc1b451SAneesh Kumar K.V 				struct ext4_inode *raw_inode,
39330fc1b451SAneesh Kumar K.V 				struct ext4_inode_info *ei)
39340fc1b451SAneesh Kumar K.V {
39350fc1b451SAneesh Kumar K.V 	struct inode *inode = &(ei->vfs_inode);
39360fc1b451SAneesh Kumar K.V 	u64 i_blocks = inode->i_blocks;
39370fc1b451SAneesh Kumar K.V 	struct super_block *sb = inode->i_sb;
39380fc1b451SAneesh Kumar K.V 	int err = 0;
39390fc1b451SAneesh Kumar K.V 
39400fc1b451SAneesh Kumar K.V 	if (i_blocks <= ~0U) {
39410fc1b451SAneesh Kumar K.V 		/*
39420fc1b451SAneesh Kumar K.V 		 * i_blocks can be represnted in a 32 bit variable
39430fc1b451SAneesh Kumar K.V 		 * as multiple of 512 bytes
39440fc1b451SAneesh Kumar K.V 		 */
39458180a562SAneesh Kumar K.V 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
39460fc1b451SAneesh Kumar K.V 		raw_inode->i_blocks_high = 0;
39478180a562SAneesh Kumar K.V 		ei->i_flags &= ~EXT4_HUGE_FILE_FL;
39480fc1b451SAneesh Kumar K.V 	} else if (i_blocks <= 0xffffffffffffULL) {
39490fc1b451SAneesh Kumar K.V 		/*
39500fc1b451SAneesh Kumar K.V 		 * i_blocks can be represented in a 48 bit variable
39510fc1b451SAneesh Kumar K.V 		 * as multiple of 512 bytes
39520fc1b451SAneesh Kumar K.V 		 */
39530fc1b451SAneesh Kumar K.V 		err = ext4_update_rocompat_feature(handle, sb,
39540fc1b451SAneesh Kumar K.V 					    EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
39550fc1b451SAneesh Kumar K.V 		if (err)
39560fc1b451SAneesh Kumar K.V 			goto  err_out;
39570fc1b451SAneesh Kumar K.V 		/* i_block is stored in the split  48 bit fields */
39588180a562SAneesh Kumar K.V 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
39590fc1b451SAneesh Kumar K.V 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
39608180a562SAneesh Kumar K.V 		ei->i_flags &= ~EXT4_HUGE_FILE_FL;
39610fc1b451SAneesh Kumar K.V 	} else {
39628180a562SAneesh Kumar K.V 		/*
39638180a562SAneesh Kumar K.V 		 * i_blocks should be represented in a 48 bit variable
39648180a562SAneesh Kumar K.V 		 * as multiple of  file system block size
39658180a562SAneesh Kumar K.V 		 */
39668180a562SAneesh Kumar K.V 		err = ext4_update_rocompat_feature(handle, sb,
39678180a562SAneesh Kumar K.V 					    EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
39688180a562SAneesh Kumar K.V 		if (err)
39698180a562SAneesh Kumar K.V 			goto  err_out;
39708180a562SAneesh Kumar K.V 		ei->i_flags |= EXT4_HUGE_FILE_FL;
39718180a562SAneesh Kumar K.V 		/* i_block is stored in file system block size */
39728180a562SAneesh Kumar K.V 		i_blocks = i_blocks >> (inode->i_blkbits - 9);
39738180a562SAneesh Kumar K.V 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
39748180a562SAneesh Kumar K.V 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
39750fc1b451SAneesh Kumar K.V 	}
39760fc1b451SAneesh Kumar K.V err_out:
39770fc1b451SAneesh Kumar K.V 	return err;
39780fc1b451SAneesh Kumar K.V }
39790fc1b451SAneesh Kumar K.V 
3980ac27a0ecSDave Kleikamp /*
3981ac27a0ecSDave Kleikamp  * Post the struct inode info into an on-disk inode location in the
3982ac27a0ecSDave Kleikamp  * buffer-cache.  This gobbles the caller's reference to the
3983ac27a0ecSDave Kleikamp  * buffer_head in the inode location struct.
3984ac27a0ecSDave Kleikamp  *
3985ac27a0ecSDave Kleikamp  * The caller must have write access to iloc->bh.
3986ac27a0ecSDave Kleikamp  */
3987617ba13bSMingming Cao static int ext4_do_update_inode(handle_t *handle,
3988ac27a0ecSDave Kleikamp 				struct inode *inode,
3989617ba13bSMingming Cao 				struct ext4_iloc *iloc)
3990ac27a0ecSDave Kleikamp {
3991617ba13bSMingming Cao 	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
3992617ba13bSMingming Cao 	struct ext4_inode_info *ei = EXT4_I(inode);
3993ac27a0ecSDave Kleikamp 	struct buffer_head *bh = iloc->bh;
3994ac27a0ecSDave Kleikamp 	int err = 0, rc, block;
3995ac27a0ecSDave Kleikamp 
3996ac27a0ecSDave Kleikamp 	/* For fields not not tracking in the in-memory inode,
3997ac27a0ecSDave Kleikamp 	 * initialise them to zero for new inodes. */
3998617ba13bSMingming Cao 	if (ei->i_state & EXT4_STATE_NEW)
3999617ba13bSMingming Cao 		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
4000ac27a0ecSDave Kleikamp 
4001ff9ddf7eSJan Kara 	ext4_get_inode_flags(ei);
4002ac27a0ecSDave Kleikamp 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4003ac27a0ecSDave Kleikamp 	if(!(test_opt(inode->i_sb, NO_UID32))) {
4004ac27a0ecSDave Kleikamp 		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
4005ac27a0ecSDave Kleikamp 		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
4006ac27a0ecSDave Kleikamp /*
4007ac27a0ecSDave Kleikamp  * Fix up interoperability with old kernels. Otherwise, old inodes get
4008ac27a0ecSDave Kleikamp  * re-used with the upper 16 bits of the uid/gid intact
4009ac27a0ecSDave Kleikamp  */
4010ac27a0ecSDave Kleikamp 		if(!ei->i_dtime) {
4011ac27a0ecSDave Kleikamp 			raw_inode->i_uid_high =
4012ac27a0ecSDave Kleikamp 				cpu_to_le16(high_16_bits(inode->i_uid));
4013ac27a0ecSDave Kleikamp 			raw_inode->i_gid_high =
4014ac27a0ecSDave Kleikamp 				cpu_to_le16(high_16_bits(inode->i_gid));
4015ac27a0ecSDave Kleikamp 		} else {
4016ac27a0ecSDave Kleikamp 			raw_inode->i_uid_high = 0;
4017ac27a0ecSDave Kleikamp 			raw_inode->i_gid_high = 0;
4018ac27a0ecSDave Kleikamp 		}
4019ac27a0ecSDave Kleikamp 	} else {
4020ac27a0ecSDave Kleikamp 		raw_inode->i_uid_low =
4021ac27a0ecSDave Kleikamp 			cpu_to_le16(fs_high2lowuid(inode->i_uid));
4022ac27a0ecSDave Kleikamp 		raw_inode->i_gid_low =
4023ac27a0ecSDave Kleikamp 			cpu_to_le16(fs_high2lowgid(inode->i_gid));
4024ac27a0ecSDave Kleikamp 		raw_inode->i_uid_high = 0;
4025ac27a0ecSDave Kleikamp 		raw_inode->i_gid_high = 0;
4026ac27a0ecSDave Kleikamp 	}
4027ac27a0ecSDave Kleikamp 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4028ef7f3835SKalpak Shah 
4029ef7f3835SKalpak Shah 	EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4030ef7f3835SKalpak Shah 	EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4031ef7f3835SKalpak Shah 	EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4032ef7f3835SKalpak Shah 	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4033ef7f3835SKalpak Shah 
40340fc1b451SAneesh Kumar K.V 	if (ext4_inode_blocks_set(handle, raw_inode, ei))
40350fc1b451SAneesh Kumar K.V 		goto out_brelse;
4036ac27a0ecSDave Kleikamp 	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4037267e4db9SAneesh Kumar K.V 	/* clear the migrate flag in the raw_inode */
4038267e4db9SAneesh Kumar K.V 	raw_inode->i_flags = cpu_to_le32(ei->i_flags & ~EXT4_EXT_MIGRATE);
40399b8f1f01SMingming Cao 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
40409b8f1f01SMingming Cao 	    cpu_to_le32(EXT4_OS_HURD))
4041a1ddeb7eSBadari Pulavarty 		raw_inode->i_file_acl_high =
4042a1ddeb7eSBadari Pulavarty 			cpu_to_le16(ei->i_file_acl >> 32);
40437973c0c1SAneesh Kumar K.V 	raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4044a48380f7SAneesh Kumar K.V 	ext4_isize_set(raw_inode, ei->i_disksize);
4045ac27a0ecSDave Kleikamp 	if (ei->i_disksize > 0x7fffffffULL) {
4046ac27a0ecSDave Kleikamp 		struct super_block *sb = inode->i_sb;
4047617ba13bSMingming Cao 		if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
4048617ba13bSMingming Cao 				EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
4049617ba13bSMingming Cao 				EXT4_SB(sb)->s_es->s_rev_level ==
4050617ba13bSMingming Cao 				cpu_to_le32(EXT4_GOOD_OLD_REV)) {
4051ac27a0ecSDave Kleikamp 			/* If this is the first large file
4052ac27a0ecSDave Kleikamp 			 * created, add a flag to the superblock.
4053ac27a0ecSDave Kleikamp 			 */
4054617ba13bSMingming Cao 			err = ext4_journal_get_write_access(handle,
4055617ba13bSMingming Cao 					EXT4_SB(sb)->s_sbh);
4056ac27a0ecSDave Kleikamp 			if (err)
4057ac27a0ecSDave Kleikamp 				goto out_brelse;
4058617ba13bSMingming Cao 			ext4_update_dynamic_rev(sb);
4059617ba13bSMingming Cao 			EXT4_SET_RO_COMPAT_FEATURE(sb,
4060617ba13bSMingming Cao 					EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
4061ac27a0ecSDave Kleikamp 			sb->s_dirt = 1;
4062ac27a0ecSDave Kleikamp 			handle->h_sync = 1;
4063617ba13bSMingming Cao 			err = ext4_journal_dirty_metadata(handle,
4064617ba13bSMingming Cao 					EXT4_SB(sb)->s_sbh);
4065ac27a0ecSDave Kleikamp 		}
4066ac27a0ecSDave Kleikamp 	}
4067ac27a0ecSDave Kleikamp 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4068ac27a0ecSDave Kleikamp 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4069ac27a0ecSDave Kleikamp 		if (old_valid_dev(inode->i_rdev)) {
4070ac27a0ecSDave Kleikamp 			raw_inode->i_block[0] =
4071ac27a0ecSDave Kleikamp 				cpu_to_le32(old_encode_dev(inode->i_rdev));
4072ac27a0ecSDave Kleikamp 			raw_inode->i_block[1] = 0;
4073ac27a0ecSDave Kleikamp 		} else {
4074ac27a0ecSDave Kleikamp 			raw_inode->i_block[0] = 0;
4075ac27a0ecSDave Kleikamp 			raw_inode->i_block[1] =
4076ac27a0ecSDave Kleikamp 				cpu_to_le32(new_encode_dev(inode->i_rdev));
4077ac27a0ecSDave Kleikamp 			raw_inode->i_block[2] = 0;
4078ac27a0ecSDave Kleikamp 		}
4079617ba13bSMingming Cao 	} else for (block = 0; block < EXT4_N_BLOCKS; block++)
4080ac27a0ecSDave Kleikamp 		raw_inode->i_block[block] = ei->i_data[block];
4081ac27a0ecSDave Kleikamp 
408225ec56b5SJean Noel Cordenner 	raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
408325ec56b5SJean Noel Cordenner 	if (ei->i_extra_isize) {
408425ec56b5SJean Noel Cordenner 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
408525ec56b5SJean Noel Cordenner 			raw_inode->i_version_hi =
408625ec56b5SJean Noel Cordenner 			cpu_to_le32(inode->i_version >> 32);
4087ac27a0ecSDave Kleikamp 		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
408825ec56b5SJean Noel Cordenner 	}
408925ec56b5SJean Noel Cordenner 
4090ac27a0ecSDave Kleikamp 
4091617ba13bSMingming Cao 	BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
4092617ba13bSMingming Cao 	rc = ext4_journal_dirty_metadata(handle, bh);
4093ac27a0ecSDave Kleikamp 	if (!err)
4094ac27a0ecSDave Kleikamp 		err = rc;
4095617ba13bSMingming Cao 	ei->i_state &= ~EXT4_STATE_NEW;
4096ac27a0ecSDave Kleikamp 
4097ac27a0ecSDave Kleikamp out_brelse:
4098ac27a0ecSDave Kleikamp 	brelse (bh);
4099617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, err);
4100ac27a0ecSDave Kleikamp 	return err;
4101ac27a0ecSDave Kleikamp }
4102ac27a0ecSDave Kleikamp 
4103ac27a0ecSDave Kleikamp /*
4104617ba13bSMingming Cao  * ext4_write_inode()
4105ac27a0ecSDave Kleikamp  *
4106ac27a0ecSDave Kleikamp  * We are called from a few places:
4107ac27a0ecSDave Kleikamp  *
4108ac27a0ecSDave Kleikamp  * - Within generic_file_write() for O_SYNC files.
4109ac27a0ecSDave Kleikamp  *   Here, there will be no transaction running. We wait for any running
4110ac27a0ecSDave Kleikamp  *   trasnaction to commit.
4111ac27a0ecSDave Kleikamp  *
4112ac27a0ecSDave Kleikamp  * - Within sys_sync(), kupdate and such.
4113ac27a0ecSDave Kleikamp  *   We wait on commit, if tol to.
4114ac27a0ecSDave Kleikamp  *
4115ac27a0ecSDave Kleikamp  * - Within prune_icache() (PF_MEMALLOC == true)
4116ac27a0ecSDave Kleikamp  *   Here we simply return.  We can't afford to block kswapd on the
4117ac27a0ecSDave Kleikamp  *   journal commit.
4118ac27a0ecSDave Kleikamp  *
4119ac27a0ecSDave Kleikamp  * In all cases it is actually safe for us to return without doing anything,
4120ac27a0ecSDave Kleikamp  * because the inode has been copied into a raw inode buffer in
4121617ba13bSMingming Cao  * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
4122ac27a0ecSDave Kleikamp  * knfsd.
4123ac27a0ecSDave Kleikamp  *
4124ac27a0ecSDave Kleikamp  * Note that we are absolutely dependent upon all inode dirtiers doing the
4125ac27a0ecSDave Kleikamp  * right thing: they *must* call mark_inode_dirty() after dirtying info in
4126ac27a0ecSDave Kleikamp  * which we are interested.
4127ac27a0ecSDave Kleikamp  *
4128ac27a0ecSDave Kleikamp  * It would be a bug for them to not do this.  The code:
4129ac27a0ecSDave Kleikamp  *
4130ac27a0ecSDave Kleikamp  *	mark_inode_dirty(inode)
4131ac27a0ecSDave Kleikamp  *	stuff();
4132ac27a0ecSDave Kleikamp  *	inode->i_size = expr;
4133ac27a0ecSDave Kleikamp  *
4134ac27a0ecSDave Kleikamp  * is in error because a kswapd-driven write_inode() could occur while
4135ac27a0ecSDave Kleikamp  * `stuff()' is running, and the new i_size will be lost.  Plus the inode
4136ac27a0ecSDave Kleikamp  * will no longer be on the superblock's dirty inode list.
4137ac27a0ecSDave Kleikamp  */
4138617ba13bSMingming Cao int ext4_write_inode(struct inode *inode, int wait)
4139ac27a0ecSDave Kleikamp {
4140ac27a0ecSDave Kleikamp 	if (current->flags & PF_MEMALLOC)
4141ac27a0ecSDave Kleikamp 		return 0;
4142ac27a0ecSDave Kleikamp 
4143617ba13bSMingming Cao 	if (ext4_journal_current_handle()) {
4144b38bd33aSMingming Cao 		jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
4145ac27a0ecSDave Kleikamp 		dump_stack();
4146ac27a0ecSDave Kleikamp 		return -EIO;
4147ac27a0ecSDave Kleikamp 	}
4148ac27a0ecSDave Kleikamp 
4149ac27a0ecSDave Kleikamp 	if (!wait)
4150ac27a0ecSDave Kleikamp 		return 0;
4151ac27a0ecSDave Kleikamp 
4152617ba13bSMingming Cao 	return ext4_force_commit(inode->i_sb);
4153ac27a0ecSDave Kleikamp }
4154ac27a0ecSDave Kleikamp 
4155ac27a0ecSDave Kleikamp /*
4156617ba13bSMingming Cao  * ext4_setattr()
4157ac27a0ecSDave Kleikamp  *
4158ac27a0ecSDave Kleikamp  * Called from notify_change.
4159ac27a0ecSDave Kleikamp  *
4160ac27a0ecSDave Kleikamp  * We want to trap VFS attempts to truncate the file as soon as
4161ac27a0ecSDave Kleikamp  * possible.  In particular, we want to make sure that when the VFS
4162ac27a0ecSDave Kleikamp  * shrinks i_size, we put the inode on the orphan list and modify
4163ac27a0ecSDave Kleikamp  * i_disksize immediately, so that during the subsequent flushing of
4164ac27a0ecSDave Kleikamp  * dirty pages and freeing of disk blocks, we can guarantee that any
4165ac27a0ecSDave Kleikamp  * commit will leave the blocks being flushed in an unused state on
4166ac27a0ecSDave Kleikamp  * disk.  (On recovery, the inode will get truncated and the blocks will
4167ac27a0ecSDave Kleikamp  * be freed, so we have a strong guarantee that no future commit will
4168ac27a0ecSDave Kleikamp  * leave these blocks visible to the user.)
4169ac27a0ecSDave Kleikamp  *
4170678aaf48SJan Kara  * Another thing we have to assure is that if we are in ordered mode
4171678aaf48SJan Kara  * and inode is still attached to the committing transaction, we must
4172678aaf48SJan Kara  * we start writeout of all the dirty pages which are being truncated.
4173678aaf48SJan Kara  * This way we are sure that all the data written in the previous
4174678aaf48SJan Kara  * transaction are already on disk (truncate waits for pages under
4175678aaf48SJan Kara  * writeback).
4176678aaf48SJan Kara  *
4177678aaf48SJan Kara  * Called with inode->i_mutex down.
4178ac27a0ecSDave Kleikamp  */
4179617ba13bSMingming Cao int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4180ac27a0ecSDave Kleikamp {
4181ac27a0ecSDave Kleikamp 	struct inode *inode = dentry->d_inode;
4182ac27a0ecSDave Kleikamp 	int error, rc = 0;
4183ac27a0ecSDave Kleikamp 	const unsigned int ia_valid = attr->ia_valid;
4184ac27a0ecSDave Kleikamp 
4185ac27a0ecSDave Kleikamp 	error = inode_change_ok(inode, attr);
4186ac27a0ecSDave Kleikamp 	if (error)
4187ac27a0ecSDave Kleikamp 		return error;
4188ac27a0ecSDave Kleikamp 
4189ac27a0ecSDave Kleikamp 	if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
4190ac27a0ecSDave Kleikamp 		(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
4191ac27a0ecSDave Kleikamp 		handle_t *handle;
4192ac27a0ecSDave Kleikamp 
4193ac27a0ecSDave Kleikamp 		/* (user+group)*(old+new) structure, inode write (sb,
4194ac27a0ecSDave Kleikamp 		 * inode block, ? - but truncate inode update has it) */
4195617ba13bSMingming Cao 		handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+
4196617ba13bSMingming Cao 					EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
4197ac27a0ecSDave Kleikamp 		if (IS_ERR(handle)) {
4198ac27a0ecSDave Kleikamp 			error = PTR_ERR(handle);
4199ac27a0ecSDave Kleikamp 			goto err_out;
4200ac27a0ecSDave Kleikamp 		}
4201ac27a0ecSDave Kleikamp 		error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
4202ac27a0ecSDave Kleikamp 		if (error) {
4203617ba13bSMingming Cao 			ext4_journal_stop(handle);
4204ac27a0ecSDave Kleikamp 			return error;
4205ac27a0ecSDave Kleikamp 		}
4206ac27a0ecSDave Kleikamp 		/* Update corresponding info in inode so that everything is in
4207ac27a0ecSDave Kleikamp 		 * one transaction */
4208ac27a0ecSDave Kleikamp 		if (attr->ia_valid & ATTR_UID)
4209ac27a0ecSDave Kleikamp 			inode->i_uid = attr->ia_uid;
4210ac27a0ecSDave Kleikamp 		if (attr->ia_valid & ATTR_GID)
4211ac27a0ecSDave Kleikamp 			inode->i_gid = attr->ia_gid;
4212617ba13bSMingming Cao 		error = ext4_mark_inode_dirty(handle, inode);
4213617ba13bSMingming Cao 		ext4_journal_stop(handle);
4214ac27a0ecSDave Kleikamp 	}
4215ac27a0ecSDave Kleikamp 
4216e2b46574SEric Sandeen 	if (attr->ia_valid & ATTR_SIZE) {
4217e2b46574SEric Sandeen 		if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
4218e2b46574SEric Sandeen 			struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4219e2b46574SEric Sandeen 
4220e2b46574SEric Sandeen 			if (attr->ia_size > sbi->s_bitmap_maxbytes) {
4221e2b46574SEric Sandeen 				error = -EFBIG;
4222e2b46574SEric Sandeen 				goto err_out;
4223e2b46574SEric Sandeen 			}
4224e2b46574SEric Sandeen 		}
4225e2b46574SEric Sandeen 	}
4226e2b46574SEric Sandeen 
4227ac27a0ecSDave Kleikamp 	if (S_ISREG(inode->i_mode) &&
4228ac27a0ecSDave Kleikamp 	    attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
4229ac27a0ecSDave Kleikamp 		handle_t *handle;
4230ac27a0ecSDave Kleikamp 
4231617ba13bSMingming Cao 		handle = ext4_journal_start(inode, 3);
4232ac27a0ecSDave Kleikamp 		if (IS_ERR(handle)) {
4233ac27a0ecSDave Kleikamp 			error = PTR_ERR(handle);
4234ac27a0ecSDave Kleikamp 			goto err_out;
4235ac27a0ecSDave Kleikamp 		}
4236ac27a0ecSDave Kleikamp 
4237617ba13bSMingming Cao 		error = ext4_orphan_add(handle, inode);
4238617ba13bSMingming Cao 		EXT4_I(inode)->i_disksize = attr->ia_size;
4239617ba13bSMingming Cao 		rc = ext4_mark_inode_dirty(handle, inode);
4240ac27a0ecSDave Kleikamp 		if (!error)
4241ac27a0ecSDave Kleikamp 			error = rc;
4242617ba13bSMingming Cao 		ext4_journal_stop(handle);
4243678aaf48SJan Kara 
4244678aaf48SJan Kara 		if (ext4_should_order_data(inode)) {
4245678aaf48SJan Kara 			error = ext4_begin_ordered_truncate(inode,
4246678aaf48SJan Kara 							    attr->ia_size);
4247678aaf48SJan Kara 			if (error) {
4248678aaf48SJan Kara 				/* Do as much error cleanup as possible */
4249678aaf48SJan Kara 				handle = ext4_journal_start(inode, 3);
4250678aaf48SJan Kara 				if (IS_ERR(handle)) {
4251678aaf48SJan Kara 					ext4_orphan_del(NULL, inode);
4252678aaf48SJan Kara 					goto err_out;
4253678aaf48SJan Kara 				}
4254678aaf48SJan Kara 				ext4_orphan_del(handle, inode);
4255678aaf48SJan Kara 				ext4_journal_stop(handle);
4256678aaf48SJan Kara 				goto err_out;
4257678aaf48SJan Kara 			}
4258678aaf48SJan Kara 		}
4259ac27a0ecSDave Kleikamp 	}
4260ac27a0ecSDave Kleikamp 
4261ac27a0ecSDave Kleikamp 	rc = inode_setattr(inode, attr);
4262ac27a0ecSDave Kleikamp 
4263617ba13bSMingming Cao 	/* If inode_setattr's call to ext4_truncate failed to get a
4264ac27a0ecSDave Kleikamp 	 * transaction handle at all, we need to clean up the in-core
4265ac27a0ecSDave Kleikamp 	 * orphan list manually. */
4266ac27a0ecSDave Kleikamp 	if (inode->i_nlink)
4267617ba13bSMingming Cao 		ext4_orphan_del(NULL, inode);
4268ac27a0ecSDave Kleikamp 
4269ac27a0ecSDave Kleikamp 	if (!rc && (ia_valid & ATTR_MODE))
4270617ba13bSMingming Cao 		rc = ext4_acl_chmod(inode);
4271ac27a0ecSDave Kleikamp 
4272ac27a0ecSDave Kleikamp err_out:
4273617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, error);
4274ac27a0ecSDave Kleikamp 	if (!error)
4275ac27a0ecSDave Kleikamp 		error = rc;
4276ac27a0ecSDave Kleikamp 	return error;
4277ac27a0ecSDave Kleikamp }
4278ac27a0ecSDave Kleikamp 
42793e3398a0SMingming Cao int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
42803e3398a0SMingming Cao 		 struct kstat *stat)
42813e3398a0SMingming Cao {
42823e3398a0SMingming Cao 	struct inode *inode;
42833e3398a0SMingming Cao 	unsigned long delalloc_blocks;
42843e3398a0SMingming Cao 
42853e3398a0SMingming Cao 	inode = dentry->d_inode;
42863e3398a0SMingming Cao 	generic_fillattr(inode, stat);
42873e3398a0SMingming Cao 
42883e3398a0SMingming Cao 	/*
42893e3398a0SMingming Cao 	 * We can't update i_blocks if the block allocation is delayed
42903e3398a0SMingming Cao 	 * otherwise in the case of system crash before the real block
42913e3398a0SMingming Cao 	 * allocation is done, we will have i_blocks inconsistent with
42923e3398a0SMingming Cao 	 * on-disk file blocks.
42933e3398a0SMingming Cao 	 * We always keep i_blocks updated together with real
42943e3398a0SMingming Cao 	 * allocation. But to not confuse with user, stat
42953e3398a0SMingming Cao 	 * will return the blocks that include the delayed allocation
42963e3398a0SMingming Cao 	 * blocks for this file.
42973e3398a0SMingming Cao 	 */
42983e3398a0SMingming Cao 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
42993e3398a0SMingming Cao 	delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
43003e3398a0SMingming Cao 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
43013e3398a0SMingming Cao 
43023e3398a0SMingming Cao 	stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
43033e3398a0SMingming Cao 	return 0;
43043e3398a0SMingming Cao }
4305ac27a0ecSDave Kleikamp 
4306ac27a0ecSDave Kleikamp /*
4307ac27a0ecSDave Kleikamp  * How many blocks doth make a writepage()?
4308ac27a0ecSDave Kleikamp  *
4309ac27a0ecSDave Kleikamp  * With N blocks per page, it may be:
4310ac27a0ecSDave Kleikamp  * N data blocks
4311ac27a0ecSDave Kleikamp  * 2 indirect block
4312ac27a0ecSDave Kleikamp  * 2 dindirect
4313ac27a0ecSDave Kleikamp  * 1 tindirect
4314ac27a0ecSDave Kleikamp  * N+5 bitmap blocks (from the above)
4315ac27a0ecSDave Kleikamp  * N+5 group descriptor summary blocks
4316ac27a0ecSDave Kleikamp  * 1 inode block
4317ac27a0ecSDave Kleikamp  * 1 superblock.
4318617ba13bSMingming Cao  * 2 * EXT4_SINGLEDATA_TRANS_BLOCKS for the quote files
4319ac27a0ecSDave Kleikamp  *
4320617ba13bSMingming Cao  * 3 * (N + 5) + 2 + 2 * EXT4_SINGLEDATA_TRANS_BLOCKS
4321ac27a0ecSDave Kleikamp  *
4322ac27a0ecSDave Kleikamp  * With ordered or writeback data it's the same, less the N data blocks.
4323ac27a0ecSDave Kleikamp  *
4324ac27a0ecSDave Kleikamp  * If the inode's direct blocks can hold an integral number of pages then a
4325ac27a0ecSDave Kleikamp  * page cannot straddle two indirect blocks, and we can only touch one indirect
4326ac27a0ecSDave Kleikamp  * and dindirect block, and the "5" above becomes "3".
4327ac27a0ecSDave Kleikamp  *
4328ac27a0ecSDave Kleikamp  * This still overestimates under most circumstances.  If we were to pass the
4329ac27a0ecSDave Kleikamp  * start and end offsets in here as well we could do block_to_path() on each
4330ac27a0ecSDave Kleikamp  * block and work out the exact number of indirects which are touched.  Pah.
4331ac27a0ecSDave Kleikamp  */
4332ac27a0ecSDave Kleikamp 
4333a86c6181SAlex Tomas int ext4_writepage_trans_blocks(struct inode *inode)
4334ac27a0ecSDave Kleikamp {
4335617ba13bSMingming Cao 	int bpp = ext4_journal_blocks_per_page(inode);
4336617ba13bSMingming Cao 	int indirects = (EXT4_NDIR_BLOCKS % bpp) ? 5 : 3;
4337ac27a0ecSDave Kleikamp 	int ret;
4338ac27a0ecSDave Kleikamp 
4339a86c6181SAlex Tomas 	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
4340a86c6181SAlex Tomas 		return ext4_ext_writepage_trans_blocks(inode, bpp);
4341a86c6181SAlex Tomas 
4342617ba13bSMingming Cao 	if (ext4_should_journal_data(inode))
4343ac27a0ecSDave Kleikamp 		ret = 3 * (bpp + indirects) + 2;
4344ac27a0ecSDave Kleikamp 	else
4345ac27a0ecSDave Kleikamp 		ret = 2 * (bpp + indirects) + 2;
4346ac27a0ecSDave Kleikamp 
4347ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA
4348ac27a0ecSDave Kleikamp 	/* We know that structure was already allocated during DQUOT_INIT so
4349ac27a0ecSDave Kleikamp 	 * we will be updating only the data blocks + inodes */
4350617ba13bSMingming Cao 	ret += 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
4351ac27a0ecSDave Kleikamp #endif
4352ac27a0ecSDave Kleikamp 
4353ac27a0ecSDave Kleikamp 	return ret;
4354ac27a0ecSDave Kleikamp }
4355ac27a0ecSDave Kleikamp 
4356ac27a0ecSDave Kleikamp /*
4357617ba13bSMingming Cao  * The caller must have previously called ext4_reserve_inode_write().
4358ac27a0ecSDave Kleikamp  * Give this, we know that the caller already has write access to iloc->bh.
4359ac27a0ecSDave Kleikamp  */
4360617ba13bSMingming Cao int ext4_mark_iloc_dirty(handle_t *handle,
4361617ba13bSMingming Cao 		struct inode *inode, struct ext4_iloc *iloc)
4362ac27a0ecSDave Kleikamp {
4363ac27a0ecSDave Kleikamp 	int err = 0;
4364ac27a0ecSDave Kleikamp 
436525ec56b5SJean Noel Cordenner 	if (test_opt(inode->i_sb, I_VERSION))
436625ec56b5SJean Noel Cordenner 		inode_inc_iversion(inode);
436725ec56b5SJean Noel Cordenner 
4368ac27a0ecSDave Kleikamp 	/* the do_update_inode consumes one bh->b_count */
4369ac27a0ecSDave Kleikamp 	get_bh(iloc->bh);
4370ac27a0ecSDave Kleikamp 
4371dab291afSMingming Cao 	/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
4372617ba13bSMingming Cao 	err = ext4_do_update_inode(handle, inode, iloc);
4373ac27a0ecSDave Kleikamp 	put_bh(iloc->bh);
4374ac27a0ecSDave Kleikamp 	return err;
4375ac27a0ecSDave Kleikamp }
4376ac27a0ecSDave Kleikamp 
4377ac27a0ecSDave Kleikamp /*
4378ac27a0ecSDave Kleikamp  * On success, We end up with an outstanding reference count against
4379ac27a0ecSDave Kleikamp  * iloc->bh.  This _must_ be cleaned up later.
4380ac27a0ecSDave Kleikamp  */
4381ac27a0ecSDave Kleikamp 
4382ac27a0ecSDave Kleikamp int
4383617ba13bSMingming Cao ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
4384617ba13bSMingming Cao 			 struct ext4_iloc *iloc)
4385ac27a0ecSDave Kleikamp {
4386ac27a0ecSDave Kleikamp 	int err = 0;
4387ac27a0ecSDave Kleikamp 	if (handle) {
4388617ba13bSMingming Cao 		err = ext4_get_inode_loc(inode, iloc);
4389ac27a0ecSDave Kleikamp 		if (!err) {
4390ac27a0ecSDave Kleikamp 			BUFFER_TRACE(iloc->bh, "get_write_access");
4391617ba13bSMingming Cao 			err = ext4_journal_get_write_access(handle, iloc->bh);
4392ac27a0ecSDave Kleikamp 			if (err) {
4393ac27a0ecSDave Kleikamp 				brelse(iloc->bh);
4394ac27a0ecSDave Kleikamp 				iloc->bh = NULL;
4395ac27a0ecSDave Kleikamp 			}
4396ac27a0ecSDave Kleikamp 		}
4397ac27a0ecSDave Kleikamp 	}
4398617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, err);
4399ac27a0ecSDave Kleikamp 	return err;
4400ac27a0ecSDave Kleikamp }
4401ac27a0ecSDave Kleikamp 
4402ac27a0ecSDave Kleikamp /*
44036dd4ee7cSKalpak Shah  * Expand an inode by new_extra_isize bytes.
44046dd4ee7cSKalpak Shah  * Returns 0 on success or negative error number on failure.
44056dd4ee7cSKalpak Shah  */
44061d03ec98SAneesh Kumar K.V static int ext4_expand_extra_isize(struct inode *inode,
44071d03ec98SAneesh Kumar K.V 				   unsigned int new_extra_isize,
44081d03ec98SAneesh Kumar K.V 				   struct ext4_iloc iloc,
44091d03ec98SAneesh Kumar K.V 				   handle_t *handle)
44106dd4ee7cSKalpak Shah {
44116dd4ee7cSKalpak Shah 	struct ext4_inode *raw_inode;
44126dd4ee7cSKalpak Shah 	struct ext4_xattr_ibody_header *header;
44136dd4ee7cSKalpak Shah 	struct ext4_xattr_entry *entry;
44146dd4ee7cSKalpak Shah 
44156dd4ee7cSKalpak Shah 	if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
44166dd4ee7cSKalpak Shah 		return 0;
44176dd4ee7cSKalpak Shah 
44186dd4ee7cSKalpak Shah 	raw_inode = ext4_raw_inode(&iloc);
44196dd4ee7cSKalpak Shah 
44206dd4ee7cSKalpak Shah 	header = IHDR(inode, raw_inode);
44216dd4ee7cSKalpak Shah 	entry = IFIRST(header);
44226dd4ee7cSKalpak Shah 
44236dd4ee7cSKalpak Shah 	/* No extended attributes present */
44246dd4ee7cSKalpak Shah 	if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
44256dd4ee7cSKalpak Shah 		header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
44266dd4ee7cSKalpak Shah 		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
44276dd4ee7cSKalpak Shah 			new_extra_isize);
44286dd4ee7cSKalpak Shah 		EXT4_I(inode)->i_extra_isize = new_extra_isize;
44296dd4ee7cSKalpak Shah 		return 0;
44306dd4ee7cSKalpak Shah 	}
44316dd4ee7cSKalpak Shah 
44326dd4ee7cSKalpak Shah 	/* try to expand with EAs present */
44336dd4ee7cSKalpak Shah 	return ext4_expand_extra_isize_ea(inode, new_extra_isize,
44346dd4ee7cSKalpak Shah 					  raw_inode, handle);
44356dd4ee7cSKalpak Shah }
44366dd4ee7cSKalpak Shah 
44376dd4ee7cSKalpak Shah /*
4438ac27a0ecSDave Kleikamp  * What we do here is to mark the in-core inode as clean with respect to inode
4439ac27a0ecSDave Kleikamp  * dirtiness (it may still be data-dirty).
4440ac27a0ecSDave Kleikamp  * This means that the in-core inode may be reaped by prune_icache
4441ac27a0ecSDave Kleikamp  * without having to perform any I/O.  This is a very good thing,
4442ac27a0ecSDave Kleikamp  * because *any* task may call prune_icache - even ones which
4443ac27a0ecSDave Kleikamp  * have a transaction open against a different journal.
4444ac27a0ecSDave Kleikamp  *
4445ac27a0ecSDave Kleikamp  * Is this cheating?  Not really.  Sure, we haven't written the
4446ac27a0ecSDave Kleikamp  * inode out, but prune_icache isn't a user-visible syncing function.
4447ac27a0ecSDave Kleikamp  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
4448ac27a0ecSDave Kleikamp  * we start and wait on commits.
4449ac27a0ecSDave Kleikamp  *
4450ac27a0ecSDave Kleikamp  * Is this efficient/effective?  Well, we're being nice to the system
4451ac27a0ecSDave Kleikamp  * by cleaning up our inodes proactively so they can be reaped
4452ac27a0ecSDave Kleikamp  * without I/O.  But we are potentially leaving up to five seconds'
4453ac27a0ecSDave Kleikamp  * worth of inodes floating about which prune_icache wants us to
4454ac27a0ecSDave Kleikamp  * write out.  One way to fix that would be to get prune_icache()
4455ac27a0ecSDave Kleikamp  * to do a write_super() to free up some memory.  It has the desired
4456ac27a0ecSDave Kleikamp  * effect.
4457ac27a0ecSDave Kleikamp  */
4458617ba13bSMingming Cao int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4459ac27a0ecSDave Kleikamp {
4460617ba13bSMingming Cao 	struct ext4_iloc iloc;
44616dd4ee7cSKalpak Shah 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
44626dd4ee7cSKalpak Shah 	static unsigned int mnt_count;
44636dd4ee7cSKalpak Shah 	int err, ret;
4464ac27a0ecSDave Kleikamp 
4465ac27a0ecSDave Kleikamp 	might_sleep();
4466617ba13bSMingming Cao 	err = ext4_reserve_inode_write(handle, inode, &iloc);
44676dd4ee7cSKalpak Shah 	if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
44686dd4ee7cSKalpak Shah 	    !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
44696dd4ee7cSKalpak Shah 		/*
44706dd4ee7cSKalpak Shah 		 * We need extra buffer credits since we may write into EA block
44716dd4ee7cSKalpak Shah 		 * with this same handle. If journal_extend fails, then it will
44726dd4ee7cSKalpak Shah 		 * only result in a minor loss of functionality for that inode.
44736dd4ee7cSKalpak Shah 		 * If this is felt to be critical, then e2fsck should be run to
44746dd4ee7cSKalpak Shah 		 * force a large enough s_min_extra_isize.
44756dd4ee7cSKalpak Shah 		 */
44766dd4ee7cSKalpak Shah 		if ((jbd2_journal_extend(handle,
44776dd4ee7cSKalpak Shah 			     EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
44786dd4ee7cSKalpak Shah 			ret = ext4_expand_extra_isize(inode,
44796dd4ee7cSKalpak Shah 						      sbi->s_want_extra_isize,
44806dd4ee7cSKalpak Shah 						      iloc, handle);
44816dd4ee7cSKalpak Shah 			if (ret) {
44826dd4ee7cSKalpak Shah 				EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
4483c1bddad9SAneesh Kumar K.V 				if (mnt_count !=
4484c1bddad9SAneesh Kumar K.V 					le16_to_cpu(sbi->s_es->s_mnt_count)) {
448546e665e9SHarvey Harrison 					ext4_warning(inode->i_sb, __func__,
44866dd4ee7cSKalpak Shah 					"Unable to expand inode %lu. Delete"
44876dd4ee7cSKalpak Shah 					" some EAs or run e2fsck.",
44886dd4ee7cSKalpak Shah 					inode->i_ino);
4489c1bddad9SAneesh Kumar K.V 					mnt_count =
4490c1bddad9SAneesh Kumar K.V 					  le16_to_cpu(sbi->s_es->s_mnt_count);
44916dd4ee7cSKalpak Shah 				}
44926dd4ee7cSKalpak Shah 			}
44936dd4ee7cSKalpak Shah 		}
44946dd4ee7cSKalpak Shah 	}
4495ac27a0ecSDave Kleikamp 	if (!err)
4496617ba13bSMingming Cao 		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
4497ac27a0ecSDave Kleikamp 	return err;
4498ac27a0ecSDave Kleikamp }
4499ac27a0ecSDave Kleikamp 
4500ac27a0ecSDave Kleikamp /*
4501617ba13bSMingming Cao  * ext4_dirty_inode() is called from __mark_inode_dirty()
4502ac27a0ecSDave Kleikamp  *
4503ac27a0ecSDave Kleikamp  * We're really interested in the case where a file is being extended.
4504ac27a0ecSDave Kleikamp  * i_size has been changed by generic_commit_write() and we thus need
4505ac27a0ecSDave Kleikamp  * to include the updated inode in the current transaction.
4506ac27a0ecSDave Kleikamp  *
4507ac27a0ecSDave Kleikamp  * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
4508ac27a0ecSDave Kleikamp  * are allocated to the file.
4509ac27a0ecSDave Kleikamp  *
4510ac27a0ecSDave Kleikamp  * If the inode is marked synchronous, we don't honour that here - doing
4511ac27a0ecSDave Kleikamp  * so would cause a commit on atime updates, which we don't bother doing.
4512ac27a0ecSDave Kleikamp  * We handle synchronous inodes at the highest possible level.
4513ac27a0ecSDave Kleikamp  */
4514617ba13bSMingming Cao void ext4_dirty_inode(struct inode *inode)
4515ac27a0ecSDave Kleikamp {
4516617ba13bSMingming Cao 	handle_t *current_handle = ext4_journal_current_handle();
4517ac27a0ecSDave Kleikamp 	handle_t *handle;
4518ac27a0ecSDave Kleikamp 
4519617ba13bSMingming Cao 	handle = ext4_journal_start(inode, 2);
4520ac27a0ecSDave Kleikamp 	if (IS_ERR(handle))
4521ac27a0ecSDave Kleikamp 		goto out;
4522ac27a0ecSDave Kleikamp 	if (current_handle &&
4523ac27a0ecSDave Kleikamp 		current_handle->h_transaction != handle->h_transaction) {
4524ac27a0ecSDave Kleikamp 		/* This task has a transaction open against a different fs */
4525ac27a0ecSDave Kleikamp 		printk(KERN_EMERG "%s: transactions do not match!\n",
452646e665e9SHarvey Harrison 		       __func__);
4527ac27a0ecSDave Kleikamp 	} else {
4528ac27a0ecSDave Kleikamp 		jbd_debug(5, "marking dirty.  outer handle=%p\n",
4529ac27a0ecSDave Kleikamp 				current_handle);
4530617ba13bSMingming Cao 		ext4_mark_inode_dirty(handle, inode);
4531ac27a0ecSDave Kleikamp 	}
4532617ba13bSMingming Cao 	ext4_journal_stop(handle);
4533ac27a0ecSDave Kleikamp out:
4534ac27a0ecSDave Kleikamp 	return;
4535ac27a0ecSDave Kleikamp }
4536ac27a0ecSDave Kleikamp 
4537ac27a0ecSDave Kleikamp #if 0
4538ac27a0ecSDave Kleikamp /*
4539ac27a0ecSDave Kleikamp  * Bind an inode's backing buffer_head into this transaction, to prevent
4540ac27a0ecSDave Kleikamp  * it from being flushed to disk early.  Unlike
4541617ba13bSMingming Cao  * ext4_reserve_inode_write, this leaves behind no bh reference and
4542ac27a0ecSDave Kleikamp  * returns no iloc structure, so the caller needs to repeat the iloc
4543ac27a0ecSDave Kleikamp  * lookup to mark the inode dirty later.
4544ac27a0ecSDave Kleikamp  */
4545617ba13bSMingming Cao static int ext4_pin_inode(handle_t *handle, struct inode *inode)
4546ac27a0ecSDave Kleikamp {
4547617ba13bSMingming Cao 	struct ext4_iloc iloc;
4548ac27a0ecSDave Kleikamp 
4549ac27a0ecSDave Kleikamp 	int err = 0;
4550ac27a0ecSDave Kleikamp 	if (handle) {
4551617ba13bSMingming Cao 		err = ext4_get_inode_loc(inode, &iloc);
4552ac27a0ecSDave Kleikamp 		if (!err) {
4553ac27a0ecSDave Kleikamp 			BUFFER_TRACE(iloc.bh, "get_write_access");
4554dab291afSMingming Cao 			err = jbd2_journal_get_write_access(handle, iloc.bh);
4555ac27a0ecSDave Kleikamp 			if (!err)
4556617ba13bSMingming Cao 				err = ext4_journal_dirty_metadata(handle,
4557ac27a0ecSDave Kleikamp 								  iloc.bh);
4558ac27a0ecSDave Kleikamp 			brelse(iloc.bh);
4559ac27a0ecSDave Kleikamp 		}
4560ac27a0ecSDave Kleikamp 	}
4561617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, err);
4562ac27a0ecSDave Kleikamp 	return err;
4563ac27a0ecSDave Kleikamp }
4564ac27a0ecSDave Kleikamp #endif
4565ac27a0ecSDave Kleikamp 
4566617ba13bSMingming Cao int ext4_change_inode_journal_flag(struct inode *inode, int val)
4567ac27a0ecSDave Kleikamp {
4568ac27a0ecSDave Kleikamp 	journal_t *journal;
4569ac27a0ecSDave Kleikamp 	handle_t *handle;
4570ac27a0ecSDave Kleikamp 	int err;
4571ac27a0ecSDave Kleikamp 
4572ac27a0ecSDave Kleikamp 	/*
4573ac27a0ecSDave Kleikamp 	 * We have to be very careful here: changing a data block's
4574ac27a0ecSDave Kleikamp 	 * journaling status dynamically is dangerous.  If we write a
4575ac27a0ecSDave Kleikamp 	 * data block to the journal, change the status and then delete
4576ac27a0ecSDave Kleikamp 	 * that block, we risk forgetting to revoke the old log record
4577ac27a0ecSDave Kleikamp 	 * from the journal and so a subsequent replay can corrupt data.
4578ac27a0ecSDave Kleikamp 	 * So, first we make sure that the journal is empty and that
4579ac27a0ecSDave Kleikamp 	 * nobody is changing anything.
4580ac27a0ecSDave Kleikamp 	 */
4581ac27a0ecSDave Kleikamp 
4582617ba13bSMingming Cao 	journal = EXT4_JOURNAL(inode);
4583d699594dSDave Hansen 	if (is_journal_aborted(journal))
4584ac27a0ecSDave Kleikamp 		return -EROFS;
4585ac27a0ecSDave Kleikamp 
4586dab291afSMingming Cao 	jbd2_journal_lock_updates(journal);
4587dab291afSMingming Cao 	jbd2_journal_flush(journal);
4588ac27a0ecSDave Kleikamp 
4589ac27a0ecSDave Kleikamp 	/*
4590ac27a0ecSDave Kleikamp 	 * OK, there are no updates running now, and all cached data is
4591ac27a0ecSDave Kleikamp 	 * synced to disk.  We are now in a completely consistent state
4592ac27a0ecSDave Kleikamp 	 * which doesn't have anything in the journal, and we know that
4593ac27a0ecSDave Kleikamp 	 * no filesystem updates are running, so it is safe to modify
4594ac27a0ecSDave Kleikamp 	 * the inode's in-core data-journaling state flag now.
4595ac27a0ecSDave Kleikamp 	 */
4596ac27a0ecSDave Kleikamp 
4597ac27a0ecSDave Kleikamp 	if (val)
4598617ba13bSMingming Cao 		EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
4599ac27a0ecSDave Kleikamp 	else
4600617ba13bSMingming Cao 		EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
4601617ba13bSMingming Cao 	ext4_set_aops(inode);
4602ac27a0ecSDave Kleikamp 
4603dab291afSMingming Cao 	jbd2_journal_unlock_updates(journal);
4604ac27a0ecSDave Kleikamp 
4605ac27a0ecSDave Kleikamp 	/* Finally we can mark the inode as dirty. */
4606ac27a0ecSDave Kleikamp 
4607617ba13bSMingming Cao 	handle = ext4_journal_start(inode, 1);
4608ac27a0ecSDave Kleikamp 	if (IS_ERR(handle))
4609ac27a0ecSDave Kleikamp 		return PTR_ERR(handle);
4610ac27a0ecSDave Kleikamp 
4611617ba13bSMingming Cao 	err = ext4_mark_inode_dirty(handle, inode);
4612ac27a0ecSDave Kleikamp 	handle->h_sync = 1;
4613617ba13bSMingming Cao 	ext4_journal_stop(handle);
4614617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, err);
4615ac27a0ecSDave Kleikamp 
4616ac27a0ecSDave Kleikamp 	return err;
4617ac27a0ecSDave Kleikamp }
46182e9ee850SAneesh Kumar K.V 
46192e9ee850SAneesh Kumar K.V static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
46202e9ee850SAneesh Kumar K.V {
46212e9ee850SAneesh Kumar K.V 	return !buffer_mapped(bh);
46222e9ee850SAneesh Kumar K.V }
46232e9ee850SAneesh Kumar K.V 
46242e9ee850SAneesh Kumar K.V int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page)
46252e9ee850SAneesh Kumar K.V {
46262e9ee850SAneesh Kumar K.V 	loff_t size;
46272e9ee850SAneesh Kumar K.V 	unsigned long len;
46282e9ee850SAneesh Kumar K.V 	int ret = -EINVAL;
46292e9ee850SAneesh Kumar K.V 	struct file *file = vma->vm_file;
46302e9ee850SAneesh Kumar K.V 	struct inode *inode = file->f_path.dentry->d_inode;
46312e9ee850SAneesh Kumar K.V 	struct address_space *mapping = inode->i_mapping;
46322e9ee850SAneesh Kumar K.V 
46332e9ee850SAneesh Kumar K.V 	/*
46342e9ee850SAneesh Kumar K.V 	 * Get i_alloc_sem to stop truncates messing with the inode. We cannot
46352e9ee850SAneesh Kumar K.V 	 * get i_mutex because we are already holding mmap_sem.
46362e9ee850SAneesh Kumar K.V 	 */
46372e9ee850SAneesh Kumar K.V 	down_read(&inode->i_alloc_sem);
46382e9ee850SAneesh Kumar K.V 	size = i_size_read(inode);
46392e9ee850SAneesh Kumar K.V 	if (page->mapping != mapping || size <= page_offset(page)
46402e9ee850SAneesh Kumar K.V 	    || !PageUptodate(page)) {
46412e9ee850SAneesh Kumar K.V 		/* page got truncated from under us? */
46422e9ee850SAneesh Kumar K.V 		goto out_unlock;
46432e9ee850SAneesh Kumar K.V 	}
46442e9ee850SAneesh Kumar K.V 	ret = 0;
46452e9ee850SAneesh Kumar K.V 	if (PageMappedToDisk(page))
46462e9ee850SAneesh Kumar K.V 		goto out_unlock;
46472e9ee850SAneesh Kumar K.V 
46482e9ee850SAneesh Kumar K.V 	if (page->index == size >> PAGE_CACHE_SHIFT)
46492e9ee850SAneesh Kumar K.V 		len = size & ~PAGE_CACHE_MASK;
46502e9ee850SAneesh Kumar K.V 	else
46512e9ee850SAneesh Kumar K.V 		len = PAGE_CACHE_SIZE;
46522e9ee850SAneesh Kumar K.V 
46532e9ee850SAneesh Kumar K.V 	if (page_has_buffers(page)) {
46542e9ee850SAneesh Kumar K.V 		/* return if we have all the buffers mapped */
46552e9ee850SAneesh Kumar K.V 		if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
46562e9ee850SAneesh Kumar K.V 				       ext4_bh_unmapped))
46572e9ee850SAneesh Kumar K.V 			goto out_unlock;
46582e9ee850SAneesh Kumar K.V 	}
46592e9ee850SAneesh Kumar K.V 	/*
46602e9ee850SAneesh Kumar K.V 	 * OK, we need to fill the hole... Do write_begin write_end
46612e9ee850SAneesh Kumar K.V 	 * to do block allocation/reservation.We are not holding
46622e9ee850SAneesh Kumar K.V 	 * inode.i__mutex here. That allow * parallel write_begin,
46632e9ee850SAneesh Kumar K.V 	 * write_end call. lock_page prevent this from happening
46642e9ee850SAneesh Kumar K.V 	 * on the same page though
46652e9ee850SAneesh Kumar K.V 	 */
46662e9ee850SAneesh Kumar K.V 	ret = mapping->a_ops->write_begin(file, mapping, page_offset(page),
46672e9ee850SAneesh Kumar K.V 			len, AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
46682e9ee850SAneesh Kumar K.V 	if (ret < 0)
46692e9ee850SAneesh Kumar K.V 		goto out_unlock;
46702e9ee850SAneesh Kumar K.V 	ret = mapping->a_ops->write_end(file, mapping, page_offset(page),
46712e9ee850SAneesh Kumar K.V 			len, len, page, NULL);
46722e9ee850SAneesh Kumar K.V 	if (ret < 0)
46732e9ee850SAneesh Kumar K.V 		goto out_unlock;
46742e9ee850SAneesh Kumar K.V 	ret = 0;
46752e9ee850SAneesh Kumar K.V out_unlock:
46762e9ee850SAneesh Kumar K.V 	up_read(&inode->i_alloc_sem);
46772e9ee850SAneesh Kumar K.V 	return ret;
46782e9ee850SAneesh Kumar K.V }
4679