xref: /openbmc/linux/fs/ext4/inode.c (revision 82ced6fd)
1 /*
2  *  linux/fs/ext4/inode.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/inode.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  Goal-directed block allocation by Stephen Tweedie
16  *	(sct@redhat.com), 1993, 1998
17  *  Big-endian to little-endian byte-swapping/bitmaps by
18  *        David S. Miller (davem@caip.rutgers.edu), 1995
19  *  64-bit file support on 64-bit platforms by Jakub Jelinek
20  *	(jj@sunsite.ms.mff.cuni.cz)
21  *
22  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
23  */
24 
25 #include <linux/module.h>
26 #include <linux/fs.h>
27 #include <linux/time.h>
28 #include <linux/jbd2.h>
29 #include <linux/highuid.h>
30 #include <linux/pagemap.h>
31 #include <linux/quotaops.h>
32 #include <linux/string.h>
33 #include <linux/buffer_head.h>
34 #include <linux/writeback.h>
35 #include <linux/pagevec.h>
36 #include <linux/mpage.h>
37 #include <linux/namei.h>
38 #include <linux/uio.h>
39 #include <linux/bio.h>
40 #include "ext4_jbd2.h"
41 #include "xattr.h"
42 #include "acl.h"
43 #include "ext4_extents.h"
44 
45 #define MPAGE_DA_EXTENT_TAIL 0x01
46 
47 static inline int ext4_begin_ordered_truncate(struct inode *inode,
48 					      loff_t new_size)
49 {
50 	return jbd2_journal_begin_ordered_truncate(
51 					EXT4_SB(inode->i_sb)->s_journal,
52 					&EXT4_I(inode)->jinode,
53 					new_size);
54 }
55 
56 static void ext4_invalidatepage(struct page *page, unsigned long offset);
57 
58 /*
59  * Test whether an inode is a fast symlink.
60  */
61 static int ext4_inode_is_fast_symlink(struct inode *inode)
62 {
63 	int ea_blocks = EXT4_I(inode)->i_file_acl ?
64 		(inode->i_sb->s_blocksize >> 9) : 0;
65 
66 	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
67 }
68 
69 /*
70  * The ext4 forget function must perform a revoke if we are freeing data
71  * which has been journaled.  Metadata (eg. indirect blocks) must be
72  * revoked in all cases.
73  *
74  * "bh" may be NULL: a metadata block may have been freed from memory
75  * but there may still be a record of it in the journal, and that record
76  * still needs to be revoked.
77  *
78  * If the handle isn't valid we're not journaling so there's nothing to do.
79  */
80 int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
81 			struct buffer_head *bh, ext4_fsblk_t blocknr)
82 {
83 	int err;
84 
85 	if (!ext4_handle_valid(handle))
86 		return 0;
87 
88 	might_sleep();
89 
90 	BUFFER_TRACE(bh, "enter");
91 
92 	jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
93 		  "data mode %lx\n",
94 		  bh, is_metadata, inode->i_mode,
95 		  test_opt(inode->i_sb, DATA_FLAGS));
96 
97 	/* Never use the revoke function if we are doing full data
98 	 * journaling: there is no need to, and a V1 superblock won't
99 	 * support it.  Otherwise, only skip the revoke on un-journaled
100 	 * data blocks. */
101 
102 	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
103 	    (!is_metadata && !ext4_should_journal_data(inode))) {
104 		if (bh) {
105 			BUFFER_TRACE(bh, "call jbd2_journal_forget");
106 			return ext4_journal_forget(handle, bh);
107 		}
108 		return 0;
109 	}
110 
111 	/*
112 	 * data!=journal && (is_metadata || should_journal_data(inode))
113 	 */
114 	BUFFER_TRACE(bh, "call ext4_journal_revoke");
115 	err = ext4_journal_revoke(handle, blocknr, bh);
116 	if (err)
117 		ext4_abort(inode->i_sb, __func__,
118 			   "error %d when attempting revoke", err);
119 	BUFFER_TRACE(bh, "exit");
120 	return err;
121 }
122 
123 /*
124  * Work out how many blocks we need to proceed with the next chunk of a
125  * truncate transaction.
126  */
127 static unsigned long blocks_for_truncate(struct inode *inode)
128 {
129 	ext4_lblk_t needed;
130 
131 	needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
132 
133 	/* Give ourselves just enough room to cope with inodes in which
134 	 * i_blocks is corrupt: we've seen disk corruptions in the past
135 	 * which resulted in random data in an inode which looked enough
136 	 * like a regular file for ext4 to try to delete it.  Things
137 	 * will go a bit crazy if that happens, but at least we should
138 	 * try not to panic the whole kernel. */
139 	if (needed < 2)
140 		needed = 2;
141 
142 	/* But we need to bound the transaction so we don't overflow the
143 	 * journal. */
144 	if (needed > EXT4_MAX_TRANS_DATA)
145 		needed = EXT4_MAX_TRANS_DATA;
146 
147 	return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
148 }
149 
150 /*
151  * Truncate transactions can be complex and absolutely huge.  So we need to
152  * be able to restart the transaction at a conventient checkpoint to make
153  * sure we don't overflow the journal.
154  *
155  * start_transaction gets us a new handle for a truncate transaction,
156  * and extend_transaction tries to extend the existing one a bit.  If
157  * extend fails, we need to propagate the failure up and restart the
158  * transaction in the top-level truncate loop. --sct
159  */
160 static handle_t *start_transaction(struct inode *inode)
161 {
162 	handle_t *result;
163 
164 	result = ext4_journal_start(inode, blocks_for_truncate(inode));
165 	if (!IS_ERR(result))
166 		return result;
167 
168 	ext4_std_error(inode->i_sb, PTR_ERR(result));
169 	return result;
170 }
171 
172 /*
173  * Try to extend this transaction for the purposes of truncation.
174  *
175  * Returns 0 if we managed to create more room.  If we can't create more
176  * room, and the transaction must be restarted we return 1.
177  */
178 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
179 {
180 	if (!ext4_handle_valid(handle))
181 		return 0;
182 	if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
183 		return 0;
184 	if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
185 		return 0;
186 	return 1;
187 }
188 
189 /*
190  * Restart the transaction associated with *handle.  This does a commit,
191  * so before we call here everything must be consistently dirtied against
192  * this transaction.
193  */
194 static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
195 {
196 	BUG_ON(EXT4_JOURNAL(inode) == NULL);
197 	jbd_debug(2, "restarting handle %p\n", handle);
198 	return ext4_journal_restart(handle, blocks_for_truncate(inode));
199 }
200 
201 /*
202  * Called at the last iput() if i_nlink is zero.
203  */
204 void ext4_delete_inode(struct inode *inode)
205 {
206 	handle_t *handle;
207 	int err;
208 
209 	if (ext4_should_order_data(inode))
210 		ext4_begin_ordered_truncate(inode, 0);
211 	truncate_inode_pages(&inode->i_data, 0);
212 
213 	if (is_bad_inode(inode))
214 		goto no_delete;
215 
216 	handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
217 	if (IS_ERR(handle)) {
218 		ext4_std_error(inode->i_sb, PTR_ERR(handle));
219 		/*
220 		 * If we're going to skip the normal cleanup, we still need to
221 		 * make sure that the in-core orphan linked list is properly
222 		 * cleaned up.
223 		 */
224 		ext4_orphan_del(NULL, inode);
225 		goto no_delete;
226 	}
227 
228 	if (IS_SYNC(inode))
229 		ext4_handle_sync(handle);
230 	inode->i_size = 0;
231 	err = ext4_mark_inode_dirty(handle, inode);
232 	if (err) {
233 		ext4_warning(inode->i_sb, __func__,
234 			     "couldn't mark inode dirty (err %d)", err);
235 		goto stop_handle;
236 	}
237 	if (inode->i_blocks)
238 		ext4_truncate(inode);
239 
240 	/*
241 	 * ext4_ext_truncate() doesn't reserve any slop when it
242 	 * restarts journal transactions; therefore there may not be
243 	 * enough credits left in the handle to remove the inode from
244 	 * the orphan list and set the dtime field.
245 	 */
246 	if (!ext4_handle_has_enough_credits(handle, 3)) {
247 		err = ext4_journal_extend(handle, 3);
248 		if (err > 0)
249 			err = ext4_journal_restart(handle, 3);
250 		if (err != 0) {
251 			ext4_warning(inode->i_sb, __func__,
252 				     "couldn't extend journal (err %d)", err);
253 		stop_handle:
254 			ext4_journal_stop(handle);
255 			goto no_delete;
256 		}
257 	}
258 
259 	/*
260 	 * Kill off the orphan record which ext4_truncate created.
261 	 * AKPM: I think this can be inside the above `if'.
262 	 * Note that ext4_orphan_del() has to be able to cope with the
263 	 * deletion of a non-existent orphan - this is because we don't
264 	 * know if ext4_truncate() actually created an orphan record.
265 	 * (Well, we could do this if we need to, but heck - it works)
266 	 */
267 	ext4_orphan_del(handle, inode);
268 	EXT4_I(inode)->i_dtime	= get_seconds();
269 
270 	/*
271 	 * One subtle ordering requirement: if anything has gone wrong
272 	 * (transaction abort, IO errors, whatever), then we can still
273 	 * do these next steps (the fs will already have been marked as
274 	 * having errors), but we can't free the inode if the mark_dirty
275 	 * fails.
276 	 */
277 	if (ext4_mark_inode_dirty(handle, inode))
278 		/* If that failed, just do the required in-core inode clear. */
279 		clear_inode(inode);
280 	else
281 		ext4_free_inode(handle, inode);
282 	ext4_journal_stop(handle);
283 	return;
284 no_delete:
285 	clear_inode(inode);	/* We must guarantee clearing of inode... */
286 }
287 
288 typedef struct {
289 	__le32	*p;
290 	__le32	key;
291 	struct buffer_head *bh;
292 } Indirect;
293 
294 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
295 {
296 	p->key = *(p->p = v);
297 	p->bh = bh;
298 }
299 
300 /**
301  *	ext4_block_to_path - parse the block number into array of offsets
302  *	@inode: inode in question (we are only interested in its superblock)
303  *	@i_block: block number to be parsed
304  *	@offsets: array to store the offsets in
305  *	@boundary: set this non-zero if the referred-to block is likely to be
306  *	       followed (on disk) by an indirect block.
307  *
308  *	To store the locations of file's data ext4 uses a data structure common
309  *	for UNIX filesystems - tree of pointers anchored in the inode, with
310  *	data blocks at leaves and indirect blocks in intermediate nodes.
311  *	This function translates the block number into path in that tree -
312  *	return value is the path length and @offsets[n] is the offset of
313  *	pointer to (n+1)th node in the nth one. If @block is out of range
314  *	(negative or too large) warning is printed and zero returned.
315  *
316  *	Note: function doesn't find node addresses, so no IO is needed. All
317  *	we need to know is the capacity of indirect blocks (taken from the
318  *	inode->i_sb).
319  */
320 
321 /*
322  * Portability note: the last comparison (check that we fit into triple
323  * indirect block) is spelled differently, because otherwise on an
324  * architecture with 32-bit longs and 8Kb pages we might get into trouble
325  * if our filesystem had 8Kb blocks. We might use long long, but that would
326  * kill us on x86. Oh, well, at least the sign propagation does not matter -
327  * i_block would have to be negative in the very beginning, so we would not
328  * get there at all.
329  */
330 
331 static int ext4_block_to_path(struct inode *inode,
332 			ext4_lblk_t i_block,
333 			ext4_lblk_t offsets[4], int *boundary)
334 {
335 	int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
336 	int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
337 	const long direct_blocks = EXT4_NDIR_BLOCKS,
338 		indirect_blocks = ptrs,
339 		double_blocks = (1 << (ptrs_bits * 2));
340 	int n = 0;
341 	int final = 0;
342 
343 	if (i_block < 0) {
344 		ext4_warning(inode->i_sb, "ext4_block_to_path", "block < 0");
345 	} else if (i_block < direct_blocks) {
346 		offsets[n++] = i_block;
347 		final = direct_blocks;
348 	} else if ((i_block -= direct_blocks) < indirect_blocks) {
349 		offsets[n++] = EXT4_IND_BLOCK;
350 		offsets[n++] = i_block;
351 		final = ptrs;
352 	} else if ((i_block -= indirect_blocks) < double_blocks) {
353 		offsets[n++] = EXT4_DIND_BLOCK;
354 		offsets[n++] = i_block >> ptrs_bits;
355 		offsets[n++] = i_block & (ptrs - 1);
356 		final = ptrs;
357 	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
358 		offsets[n++] = EXT4_TIND_BLOCK;
359 		offsets[n++] = i_block >> (ptrs_bits * 2);
360 		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
361 		offsets[n++] = i_block & (ptrs - 1);
362 		final = ptrs;
363 	} else {
364 		ext4_warning(inode->i_sb, "ext4_block_to_path",
365 				"block %lu > max in inode %lu",
366 				i_block + direct_blocks +
367 				indirect_blocks + double_blocks, inode->i_ino);
368 	}
369 	if (boundary)
370 		*boundary = final - 1 - (i_block & (ptrs - 1));
371 	return n;
372 }
373 
374 static int __ext4_check_blockref(const char *function, struct inode *inode,
375 				 __le32 *p, unsigned int max) {
376 
377 	unsigned int maxblocks = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es);
378 	__le32 *bref = p;
379 	while (bref < p+max) {
380 		if (unlikely(le32_to_cpu(*bref) >= maxblocks)) {
381 			ext4_error(inode->i_sb, function,
382 				   "block reference %u >= max (%u) "
383 				   "in inode #%lu, offset=%d",
384 				   le32_to_cpu(*bref), maxblocks,
385 				   inode->i_ino, (int)(bref-p));
386  			return -EIO;
387  		}
388 		bref++;
389  	}
390  	return 0;
391 }
392 
393 
394 #define ext4_check_indirect_blockref(inode, bh)                         \
395         __ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data,  \
396 			      EXT4_ADDR_PER_BLOCK((inode)->i_sb))
397 
398 #define ext4_check_inode_blockref(inode)                                \
399         __ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data,   \
400 			      EXT4_NDIR_BLOCKS)
401 
402 /**
403  *	ext4_get_branch - read the chain of indirect blocks leading to data
404  *	@inode: inode in question
405  *	@depth: depth of the chain (1 - direct pointer, etc.)
406  *	@offsets: offsets of pointers in inode/indirect blocks
407  *	@chain: place to store the result
408  *	@err: here we store the error value
409  *
410  *	Function fills the array of triples <key, p, bh> and returns %NULL
411  *	if everything went OK or the pointer to the last filled triple
412  *	(incomplete one) otherwise. Upon the return chain[i].key contains
413  *	the number of (i+1)-th block in the chain (as it is stored in memory,
414  *	i.e. little-endian 32-bit), chain[i].p contains the address of that
415  *	number (it points into struct inode for i==0 and into the bh->b_data
416  *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
417  *	block for i>0 and NULL for i==0. In other words, it holds the block
418  *	numbers of the chain, addresses they were taken from (and where we can
419  *	verify that chain did not change) and buffer_heads hosting these
420  *	numbers.
421  *
422  *	Function stops when it stumbles upon zero pointer (absent block)
423  *		(pointer to last triple returned, *@err == 0)
424  *	or when it gets an IO error reading an indirect block
425  *		(ditto, *@err == -EIO)
426  *	or when it reads all @depth-1 indirect blocks successfully and finds
427  *	the whole chain, all way to the data (returns %NULL, *err == 0).
428  *
429  *      Need to be called with
430  *      down_read(&EXT4_I(inode)->i_data_sem)
431  */
432 static Indirect *ext4_get_branch(struct inode *inode, int depth,
433 				 ext4_lblk_t  *offsets,
434 				 Indirect chain[4], int *err)
435 {
436 	struct super_block *sb = inode->i_sb;
437 	Indirect *p = chain;
438 	struct buffer_head *bh;
439 
440 	*err = 0;
441 	/* i_data is not going away, no lock needed */
442 	add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
443 	if (!p->key)
444 		goto no_block;
445 	while (--depth) {
446 		bh = sb_getblk(sb, le32_to_cpu(p->key));
447 		if (unlikely(!bh))
448 			goto failure;
449 
450 		if (!bh_uptodate_or_lock(bh)) {
451 			if (bh_submit_read(bh) < 0) {
452 				put_bh(bh);
453 				goto failure;
454 			}
455 			/* validate block references */
456 			if (ext4_check_indirect_blockref(inode, bh)) {
457 				put_bh(bh);
458 				goto failure;
459 			}
460 		}
461 
462 		add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
463 		/* Reader: end */
464 		if (!p->key)
465 			goto no_block;
466 	}
467 	return NULL;
468 
469 failure:
470 	*err = -EIO;
471 no_block:
472 	return p;
473 }
474 
475 /**
476  *	ext4_find_near - find a place for allocation with sufficient locality
477  *	@inode: owner
478  *	@ind: descriptor of indirect block.
479  *
480  *	This function returns the preferred place for block allocation.
481  *	It is used when heuristic for sequential allocation fails.
482  *	Rules are:
483  *	  + if there is a block to the left of our position - allocate near it.
484  *	  + if pointer will live in indirect block - allocate near that block.
485  *	  + if pointer will live in inode - allocate in the same
486  *	    cylinder group.
487  *
488  * In the latter case we colour the starting block by the callers PID to
489  * prevent it from clashing with concurrent allocations for a different inode
490  * in the same block group.   The PID is used here so that functionally related
491  * files will be close-by on-disk.
492  *
493  *	Caller must make sure that @ind is valid and will stay that way.
494  */
495 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
496 {
497 	struct ext4_inode_info *ei = EXT4_I(inode);
498 	__le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
499 	__le32 *p;
500 	ext4_fsblk_t bg_start;
501 	ext4_fsblk_t last_block;
502 	ext4_grpblk_t colour;
503 	ext4_group_t block_group;
504 	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
505 
506 	/* Try to find previous block */
507 	for (p = ind->p - 1; p >= start; p--) {
508 		if (*p)
509 			return le32_to_cpu(*p);
510 	}
511 
512 	/* No such thing, so let's try location of indirect block */
513 	if (ind->bh)
514 		return ind->bh->b_blocknr;
515 
516 	/*
517 	 * It is going to be referred to from the inode itself? OK, just put it
518 	 * into the same cylinder group then.
519 	 */
520 	block_group = ei->i_block_group;
521 	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
522 		block_group &= ~(flex_size-1);
523 		if (S_ISREG(inode->i_mode))
524 			block_group++;
525 	}
526 	bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
527 	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
528 
529 	/*
530 	 * If we are doing delayed allocation, we don't need take
531 	 * colour into account.
532 	 */
533 	if (test_opt(inode->i_sb, DELALLOC))
534 		return bg_start;
535 
536 	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
537 		colour = (current->pid % 16) *
538 			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
539 	else
540 		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
541 	return bg_start + colour;
542 }
543 
544 /**
545  *	ext4_find_goal - find a preferred place for allocation.
546  *	@inode: owner
547  *	@block:  block we want
548  *	@partial: pointer to the last triple within a chain
549  *
550  *	Normally this function find the preferred place for block allocation,
551  *	returns it.
552  */
553 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
554 		Indirect *partial)
555 {
556 	/*
557 	 * XXX need to get goal block from mballoc's data structures
558 	 */
559 
560 	return ext4_find_near(inode, partial);
561 }
562 
563 /**
564  *	ext4_blks_to_allocate: Look up the block map and count the number
565  *	of direct blocks need to be allocated for the given branch.
566  *
567  *	@branch: chain of indirect blocks
568  *	@k: number of blocks need for indirect blocks
569  *	@blks: number of data blocks to be mapped.
570  *	@blocks_to_boundary:  the offset in the indirect block
571  *
572  *	return the total number of blocks to be allocate, including the
573  *	direct and indirect blocks.
574  */
575 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
576 		int blocks_to_boundary)
577 {
578 	unsigned int count = 0;
579 
580 	/*
581 	 * Simple case, [t,d]Indirect block(s) has not allocated yet
582 	 * then it's clear blocks on that path have not allocated
583 	 */
584 	if (k > 0) {
585 		/* right now we don't handle cross boundary allocation */
586 		if (blks < blocks_to_boundary + 1)
587 			count += blks;
588 		else
589 			count += blocks_to_boundary + 1;
590 		return count;
591 	}
592 
593 	count++;
594 	while (count < blks && count <= blocks_to_boundary &&
595 		le32_to_cpu(*(branch[0].p + count)) == 0) {
596 		count++;
597 	}
598 	return count;
599 }
600 
601 /**
602  *	ext4_alloc_blocks: multiple allocate blocks needed for a branch
603  *	@indirect_blks: the number of blocks need to allocate for indirect
604  *			blocks
605  *
606  *	@new_blocks: on return it will store the new block numbers for
607  *	the indirect blocks(if needed) and the first direct block,
608  *	@blks:	on return it will store the total number of allocated
609  *		direct blocks
610  */
611 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
612 				ext4_lblk_t iblock, ext4_fsblk_t goal,
613 				int indirect_blks, int blks,
614 				ext4_fsblk_t new_blocks[4], int *err)
615 {
616 	struct ext4_allocation_request ar;
617 	int target, i;
618 	unsigned long count = 0, blk_allocated = 0;
619 	int index = 0;
620 	ext4_fsblk_t current_block = 0;
621 	int ret = 0;
622 
623 	/*
624 	 * Here we try to allocate the requested multiple blocks at once,
625 	 * on a best-effort basis.
626 	 * To build a branch, we should allocate blocks for
627 	 * the indirect blocks(if not allocated yet), and at least
628 	 * the first direct block of this branch.  That's the
629 	 * minimum number of blocks need to allocate(required)
630 	 */
631 	/* first we try to allocate the indirect blocks */
632 	target = indirect_blks;
633 	while (target > 0) {
634 		count = target;
635 		/* allocating blocks for indirect blocks and direct blocks */
636 		current_block = ext4_new_meta_blocks(handle, inode,
637 							goal, &count, err);
638 		if (*err)
639 			goto failed_out;
640 
641 		target -= count;
642 		/* allocate blocks for indirect blocks */
643 		while (index < indirect_blks && count) {
644 			new_blocks[index++] = current_block++;
645 			count--;
646 		}
647 		if (count > 0) {
648 			/*
649 			 * save the new block number
650 			 * for the first direct block
651 			 */
652 			new_blocks[index] = current_block;
653 			printk(KERN_INFO "%s returned more blocks than "
654 						"requested\n", __func__);
655 			WARN_ON(1);
656 			break;
657 		}
658 	}
659 
660 	target = blks - count ;
661 	blk_allocated = count;
662 	if (!target)
663 		goto allocated;
664 	/* Now allocate data blocks */
665 	memset(&ar, 0, sizeof(ar));
666 	ar.inode = inode;
667 	ar.goal = goal;
668 	ar.len = target;
669 	ar.logical = iblock;
670 	if (S_ISREG(inode->i_mode))
671 		/* enable in-core preallocation only for regular files */
672 		ar.flags = EXT4_MB_HINT_DATA;
673 
674 	current_block = ext4_mb_new_blocks(handle, &ar, err);
675 
676 	if (*err && (target == blks)) {
677 		/*
678 		 * if the allocation failed and we didn't allocate
679 		 * any blocks before
680 		 */
681 		goto failed_out;
682 	}
683 	if (!*err) {
684 		if (target == blks) {
685 		/*
686 		 * save the new block number
687 		 * for the first direct block
688 		 */
689 			new_blocks[index] = current_block;
690 		}
691 		blk_allocated += ar.len;
692 	}
693 allocated:
694 	/* total number of blocks allocated for direct blocks */
695 	ret = blk_allocated;
696 	*err = 0;
697 	return ret;
698 failed_out:
699 	for (i = 0; i < index; i++)
700 		ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
701 	return ret;
702 }
703 
704 /**
705  *	ext4_alloc_branch - allocate and set up a chain of blocks.
706  *	@inode: owner
707  *	@indirect_blks: number of allocated indirect blocks
708  *	@blks: number of allocated direct blocks
709  *	@offsets: offsets (in the blocks) to store the pointers to next.
710  *	@branch: place to store the chain in.
711  *
712  *	This function allocates blocks, zeroes out all but the last one,
713  *	links them into chain and (if we are synchronous) writes them to disk.
714  *	In other words, it prepares a branch that can be spliced onto the
715  *	inode. It stores the information about that chain in the branch[], in
716  *	the same format as ext4_get_branch() would do. We are calling it after
717  *	we had read the existing part of chain and partial points to the last
718  *	triple of that (one with zero ->key). Upon the exit we have the same
719  *	picture as after the successful ext4_get_block(), except that in one
720  *	place chain is disconnected - *branch->p is still zero (we did not
721  *	set the last link), but branch->key contains the number that should
722  *	be placed into *branch->p to fill that gap.
723  *
724  *	If allocation fails we free all blocks we've allocated (and forget
725  *	their buffer_heads) and return the error value the from failed
726  *	ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
727  *	as described above and return 0.
728  */
729 static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
730 				ext4_lblk_t iblock, int indirect_blks,
731 				int *blks, ext4_fsblk_t goal,
732 				ext4_lblk_t *offsets, Indirect *branch)
733 {
734 	int blocksize = inode->i_sb->s_blocksize;
735 	int i, n = 0;
736 	int err = 0;
737 	struct buffer_head *bh;
738 	int num;
739 	ext4_fsblk_t new_blocks[4];
740 	ext4_fsblk_t current_block;
741 
742 	num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
743 				*blks, new_blocks, &err);
744 	if (err)
745 		return err;
746 
747 	branch[0].key = cpu_to_le32(new_blocks[0]);
748 	/*
749 	 * metadata blocks and data blocks are allocated.
750 	 */
751 	for (n = 1; n <= indirect_blks;  n++) {
752 		/*
753 		 * Get buffer_head for parent block, zero it out
754 		 * and set the pointer to new one, then send
755 		 * parent to disk.
756 		 */
757 		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
758 		branch[n].bh = bh;
759 		lock_buffer(bh);
760 		BUFFER_TRACE(bh, "call get_create_access");
761 		err = ext4_journal_get_create_access(handle, bh);
762 		if (err) {
763 			unlock_buffer(bh);
764 			brelse(bh);
765 			goto failed;
766 		}
767 
768 		memset(bh->b_data, 0, blocksize);
769 		branch[n].p = (__le32 *) bh->b_data + offsets[n];
770 		branch[n].key = cpu_to_le32(new_blocks[n]);
771 		*branch[n].p = branch[n].key;
772 		if (n == indirect_blks) {
773 			current_block = new_blocks[n];
774 			/*
775 			 * End of chain, update the last new metablock of
776 			 * the chain to point to the new allocated
777 			 * data blocks numbers
778 			 */
779 			for (i=1; i < num; i++)
780 				*(branch[n].p + i) = cpu_to_le32(++current_block);
781 		}
782 		BUFFER_TRACE(bh, "marking uptodate");
783 		set_buffer_uptodate(bh);
784 		unlock_buffer(bh);
785 
786 		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
787 		err = ext4_handle_dirty_metadata(handle, inode, bh);
788 		if (err)
789 			goto failed;
790 	}
791 	*blks = num;
792 	return err;
793 failed:
794 	/* Allocation failed, free what we already allocated */
795 	for (i = 1; i <= n ; i++) {
796 		BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget");
797 		ext4_journal_forget(handle, branch[i].bh);
798 	}
799 	for (i = 0; i < indirect_blks; i++)
800 		ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
801 
802 	ext4_free_blocks(handle, inode, new_blocks[i], num, 0);
803 
804 	return err;
805 }
806 
807 /**
808  * ext4_splice_branch - splice the allocated branch onto inode.
809  * @inode: owner
810  * @block: (logical) number of block we are adding
811  * @chain: chain of indirect blocks (with a missing link - see
812  *	ext4_alloc_branch)
813  * @where: location of missing link
814  * @num:   number of indirect blocks we are adding
815  * @blks:  number of direct blocks we are adding
816  *
817  * This function fills the missing link and does all housekeeping needed in
818  * inode (->i_blocks, etc.). In case of success we end up with the full
819  * chain to new block and return 0.
820  */
821 static int ext4_splice_branch(handle_t *handle, struct inode *inode,
822 			ext4_lblk_t block, Indirect *where, int num, int blks)
823 {
824 	int i;
825 	int err = 0;
826 	ext4_fsblk_t current_block;
827 
828 	/*
829 	 * If we're splicing into a [td]indirect block (as opposed to the
830 	 * inode) then we need to get write access to the [td]indirect block
831 	 * before the splice.
832 	 */
833 	if (where->bh) {
834 		BUFFER_TRACE(where->bh, "get_write_access");
835 		err = ext4_journal_get_write_access(handle, where->bh);
836 		if (err)
837 			goto err_out;
838 	}
839 	/* That's it */
840 
841 	*where->p = where->key;
842 
843 	/*
844 	 * Update the host buffer_head or inode to point to more just allocated
845 	 * direct blocks blocks
846 	 */
847 	if (num == 0 && blks > 1) {
848 		current_block = le32_to_cpu(where->key) + 1;
849 		for (i = 1; i < blks; i++)
850 			*(where->p + i) = cpu_to_le32(current_block++);
851 	}
852 
853 	/* We are done with atomic stuff, now do the rest of housekeeping */
854 
855 	inode->i_ctime = ext4_current_time(inode);
856 	ext4_mark_inode_dirty(handle, inode);
857 
858 	/* had we spliced it onto indirect block? */
859 	if (where->bh) {
860 		/*
861 		 * If we spliced it onto an indirect block, we haven't
862 		 * altered the inode.  Note however that if it is being spliced
863 		 * onto an indirect block at the very end of the file (the
864 		 * file is growing) then we *will* alter the inode to reflect
865 		 * the new i_size.  But that is not done here - it is done in
866 		 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
867 		 */
868 		jbd_debug(5, "splicing indirect only\n");
869 		BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
870 		err = ext4_handle_dirty_metadata(handle, inode, where->bh);
871 		if (err)
872 			goto err_out;
873 	} else {
874 		/*
875 		 * OK, we spliced it into the inode itself on a direct block.
876 		 * Inode was dirtied above.
877 		 */
878 		jbd_debug(5, "splicing direct\n");
879 	}
880 	return err;
881 
882 err_out:
883 	for (i = 1; i <= num; i++) {
884 		BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget");
885 		ext4_journal_forget(handle, where[i].bh);
886 		ext4_free_blocks(handle, inode,
887 					le32_to_cpu(where[i-1].key), 1, 0);
888 	}
889 	ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0);
890 
891 	return err;
892 }
893 
894 /*
895  * Allocation strategy is simple: if we have to allocate something, we will
896  * have to go the whole way to leaf. So let's do it before attaching anything
897  * to tree, set linkage between the newborn blocks, write them if sync is
898  * required, recheck the path, free and repeat if check fails, otherwise
899  * set the last missing link (that will protect us from any truncate-generated
900  * removals - all blocks on the path are immune now) and possibly force the
901  * write on the parent block.
902  * That has a nice additional property: no special recovery from the failed
903  * allocations is needed - we simply release blocks and do not touch anything
904  * reachable from inode.
905  *
906  * `handle' can be NULL if create == 0.
907  *
908  * return > 0, # of blocks mapped or allocated.
909  * return = 0, if plain lookup failed.
910  * return < 0, error case.
911  *
912  *
913  * Need to be called with
914  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
915  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
916  */
917 static int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
918 				  ext4_lblk_t iblock, unsigned int maxblocks,
919 				  struct buffer_head *bh_result,
920 				  int create, int extend_disksize)
921 {
922 	int err = -EIO;
923 	ext4_lblk_t offsets[4];
924 	Indirect chain[4];
925 	Indirect *partial;
926 	ext4_fsblk_t goal;
927 	int indirect_blks;
928 	int blocks_to_boundary = 0;
929 	int depth;
930 	struct ext4_inode_info *ei = EXT4_I(inode);
931 	int count = 0;
932 	ext4_fsblk_t first_block = 0;
933 	loff_t disksize;
934 
935 
936 	J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
937 	J_ASSERT(handle != NULL || create == 0);
938 	depth = ext4_block_to_path(inode, iblock, offsets,
939 					&blocks_to_boundary);
940 
941 	if (depth == 0)
942 		goto out;
943 
944 	partial = ext4_get_branch(inode, depth, offsets, chain, &err);
945 
946 	/* Simplest case - block found, no allocation needed */
947 	if (!partial) {
948 		first_block = le32_to_cpu(chain[depth - 1].key);
949 		clear_buffer_new(bh_result);
950 		count++;
951 		/*map more blocks*/
952 		while (count < maxblocks && count <= blocks_to_boundary) {
953 			ext4_fsblk_t blk;
954 
955 			blk = le32_to_cpu(*(chain[depth-1].p + count));
956 
957 			if (blk == first_block + count)
958 				count++;
959 			else
960 				break;
961 		}
962 		goto got_it;
963 	}
964 
965 	/* Next simple case - plain lookup or failed read of indirect block */
966 	if (!create || err == -EIO)
967 		goto cleanup;
968 
969 	/*
970 	 * Okay, we need to do block allocation.
971 	*/
972 	goal = ext4_find_goal(inode, iblock, partial);
973 
974 	/* the number of blocks need to allocate for [d,t]indirect blocks */
975 	indirect_blks = (chain + depth) - partial - 1;
976 
977 	/*
978 	 * Next look up the indirect map to count the totoal number of
979 	 * direct blocks to allocate for this branch.
980 	 */
981 	count = ext4_blks_to_allocate(partial, indirect_blks,
982 					maxblocks, blocks_to_boundary);
983 	/*
984 	 * Block out ext4_truncate while we alter the tree
985 	 */
986 	err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
987 					&count, goal,
988 					offsets + (partial - chain), partial);
989 
990 	/*
991 	 * The ext4_splice_branch call will free and forget any buffers
992 	 * on the new chain if there is a failure, but that risks using
993 	 * up transaction credits, especially for bitmaps where the
994 	 * credits cannot be returned.  Can we handle this somehow?  We
995 	 * may need to return -EAGAIN upwards in the worst case.  --sct
996 	 */
997 	if (!err)
998 		err = ext4_splice_branch(handle, inode, iblock,
999 					partial, indirect_blks, count);
1000 	/*
1001 	 * i_disksize growing is protected by i_data_sem.  Don't forget to
1002 	 * protect it if you're about to implement concurrent
1003 	 * ext4_get_block() -bzzz
1004 	*/
1005 	if (!err && extend_disksize) {
1006 		disksize = ((loff_t) iblock + count) << inode->i_blkbits;
1007 		if (disksize > i_size_read(inode))
1008 			disksize = i_size_read(inode);
1009 		if (disksize > ei->i_disksize)
1010 			ei->i_disksize = disksize;
1011 	}
1012 	if (err)
1013 		goto cleanup;
1014 
1015 	set_buffer_new(bh_result);
1016 got_it:
1017 	map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
1018 	if (count > blocks_to_boundary)
1019 		set_buffer_boundary(bh_result);
1020 	err = count;
1021 	/* Clean up and exit */
1022 	partial = chain + depth - 1;	/* the whole chain */
1023 cleanup:
1024 	while (partial > chain) {
1025 		BUFFER_TRACE(partial->bh, "call brelse");
1026 		brelse(partial->bh);
1027 		partial--;
1028 	}
1029 	BUFFER_TRACE(bh_result, "returned");
1030 out:
1031 	return err;
1032 }
1033 
1034 qsize_t ext4_get_reserved_space(struct inode *inode)
1035 {
1036 	unsigned long long total;
1037 
1038 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1039 	total = EXT4_I(inode)->i_reserved_data_blocks +
1040 		EXT4_I(inode)->i_reserved_meta_blocks;
1041 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1042 
1043 	return total;
1044 }
1045 /*
1046  * Calculate the number of metadata blocks need to reserve
1047  * to allocate @blocks for non extent file based file
1048  */
1049 static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks)
1050 {
1051 	int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1052 	int ind_blks, dind_blks, tind_blks;
1053 
1054 	/* number of new indirect blocks needed */
1055 	ind_blks = (blocks + icap - 1) / icap;
1056 
1057 	dind_blks = (ind_blks + icap - 1) / icap;
1058 
1059 	tind_blks = 1;
1060 
1061 	return ind_blks + dind_blks + tind_blks;
1062 }
1063 
1064 /*
1065  * Calculate the number of metadata blocks need to reserve
1066  * to allocate given number of blocks
1067  */
1068 static int ext4_calc_metadata_amount(struct inode *inode, int blocks)
1069 {
1070 	if (!blocks)
1071 		return 0;
1072 
1073 	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
1074 		return ext4_ext_calc_metadata_amount(inode, blocks);
1075 
1076 	return ext4_indirect_calc_metadata_amount(inode, blocks);
1077 }
1078 
1079 static void ext4_da_update_reserve_space(struct inode *inode, int used)
1080 {
1081 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1082 	int total, mdb, mdb_free;
1083 
1084 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1085 	/* recalculate the number of metablocks still need to be reserved */
1086 	total = EXT4_I(inode)->i_reserved_data_blocks - used;
1087 	mdb = ext4_calc_metadata_amount(inode, total);
1088 
1089 	/* figure out how many metablocks to release */
1090 	BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
1091 	mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
1092 
1093 	if (mdb_free) {
1094 		/* Account for allocated meta_blocks */
1095 		mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
1096 
1097 		/* update fs dirty blocks counter */
1098 		percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free);
1099 		EXT4_I(inode)->i_allocated_meta_blocks = 0;
1100 		EXT4_I(inode)->i_reserved_meta_blocks = mdb;
1101 	}
1102 
1103 	/* update per-inode reservations */
1104 	BUG_ON(used  > EXT4_I(inode)->i_reserved_data_blocks);
1105 	EXT4_I(inode)->i_reserved_data_blocks -= used;
1106 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1107 
1108 	/*
1109 	 * free those over-booking quota for metadata blocks
1110 	 */
1111 	if (mdb_free)
1112 		vfs_dq_release_reservation_block(inode, mdb_free);
1113 
1114 	/*
1115 	 * If we have done all the pending block allocations and if
1116 	 * there aren't any writers on the inode, we can discard the
1117 	 * inode's preallocations.
1118 	 */
1119 	if (!total && (atomic_read(&inode->i_writecount) == 0))
1120 		ext4_discard_preallocations(inode);
1121 }
1122 
1123 /*
1124  * The ext4_get_blocks_wrap() function try to look up the requested blocks,
1125  * and returns if the blocks are already mapped.
1126  *
1127  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
1128  * and store the allocated blocks in the result buffer head and mark it
1129  * mapped.
1130  *
1131  * If file type is extents based, it will call ext4_ext_get_blocks(),
1132  * Otherwise, call with ext4_get_blocks_handle() to handle indirect mapping
1133  * based files
1134  *
1135  * On success, it returns the number of blocks being mapped or allocate.
1136  * if create==0 and the blocks are pre-allocated and uninitialized block,
1137  * the result buffer head is unmapped. If the create ==1, it will make sure
1138  * the buffer head is mapped.
1139  *
1140  * It returns 0 if plain look up failed (blocks have not been allocated), in
1141  * that casem, buffer head is unmapped
1142  *
1143  * It returns the error in case of allocation failure.
1144  */
1145 int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
1146 			unsigned int max_blocks, struct buffer_head *bh,
1147 			int create, int extend_disksize, int flag)
1148 {
1149 	int retval;
1150 
1151 	clear_buffer_mapped(bh);
1152 	clear_buffer_unwritten(bh);
1153 
1154 	/*
1155 	 * Try to see if we can get  the block without requesting
1156 	 * for new file system block.
1157 	 */
1158 	down_read((&EXT4_I(inode)->i_data_sem));
1159 	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
1160 		retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
1161 				bh, 0, 0);
1162 	} else {
1163 		retval = ext4_get_blocks_handle(handle,
1164 				inode, block, max_blocks, bh, 0, 0);
1165 	}
1166 	up_read((&EXT4_I(inode)->i_data_sem));
1167 
1168 	/* If it is only a block(s) look up */
1169 	if (!create)
1170 		return retval;
1171 
1172 	/*
1173 	 * Returns if the blocks have already allocated
1174 	 *
1175 	 * Note that if blocks have been preallocated
1176 	 * ext4_ext_get_block() returns th create = 0
1177 	 * with buffer head unmapped.
1178 	 */
1179 	if (retval > 0 && buffer_mapped(bh))
1180 		return retval;
1181 
1182 	/*
1183 	 * When we call get_blocks without the create flag, the
1184 	 * BH_Unwritten flag could have gotten set if the blocks
1185 	 * requested were part of a uninitialized extent.  We need to
1186 	 * clear this flag now that we are committed to convert all or
1187 	 * part of the uninitialized extent to be an initialized
1188 	 * extent.  This is because we need to avoid the combination
1189 	 * of BH_Unwritten and BH_Mapped flags being simultaneously
1190 	 * set on the buffer_head.
1191 	 */
1192 	clear_buffer_unwritten(bh);
1193 
1194 	/*
1195 	 * New blocks allocate and/or writing to uninitialized extent
1196 	 * will possibly result in updating i_data, so we take
1197 	 * the write lock of i_data_sem, and call get_blocks()
1198 	 * with create == 1 flag.
1199 	 */
1200 	down_write((&EXT4_I(inode)->i_data_sem));
1201 
1202 	/*
1203 	 * if the caller is from delayed allocation writeout path
1204 	 * we have already reserved fs blocks for allocation
1205 	 * let the underlying get_block() function know to
1206 	 * avoid double accounting
1207 	 */
1208 	if (flag)
1209 		EXT4_I(inode)->i_delalloc_reserved_flag = 1;
1210 	/*
1211 	 * We need to check for EXT4 here because migrate
1212 	 * could have changed the inode type in between
1213 	 */
1214 	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
1215 		retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
1216 				bh, create, extend_disksize);
1217 	} else {
1218 		retval = ext4_get_blocks_handle(handle, inode, block,
1219 				max_blocks, bh, create, extend_disksize);
1220 
1221 		if (retval > 0 && buffer_new(bh)) {
1222 			/*
1223 			 * We allocated new blocks which will result in
1224 			 * i_data's format changing.  Force the migrate
1225 			 * to fail by clearing migrate flags
1226 			 */
1227 			EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags &
1228 							~EXT4_EXT_MIGRATE;
1229 		}
1230 	}
1231 
1232 	if (flag) {
1233 		EXT4_I(inode)->i_delalloc_reserved_flag = 0;
1234 		/*
1235 		 * Update reserved blocks/metadata blocks
1236 		 * after successful block allocation
1237 		 * which were deferred till now
1238 		 */
1239 		if ((retval > 0) && buffer_delay(bh))
1240 			ext4_da_update_reserve_space(inode, retval);
1241 	}
1242 
1243 	up_write((&EXT4_I(inode)->i_data_sem));
1244 	return retval;
1245 }
1246 
1247 /* Maximum number of blocks we map for direct IO at once. */
1248 #define DIO_MAX_BLOCKS 4096
1249 
1250 int ext4_get_block(struct inode *inode, sector_t iblock,
1251 		   struct buffer_head *bh_result, int create)
1252 {
1253 	handle_t *handle = ext4_journal_current_handle();
1254 	int ret = 0, started = 0;
1255 	unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
1256 	int dio_credits;
1257 
1258 	if (create && !handle) {
1259 		/* Direct IO write... */
1260 		if (max_blocks > DIO_MAX_BLOCKS)
1261 			max_blocks = DIO_MAX_BLOCKS;
1262 		dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
1263 		handle = ext4_journal_start(inode, dio_credits);
1264 		if (IS_ERR(handle)) {
1265 			ret = PTR_ERR(handle);
1266 			goto out;
1267 		}
1268 		started = 1;
1269 	}
1270 
1271 	ret = ext4_get_blocks_wrap(handle, inode, iblock,
1272 					max_blocks, bh_result, create, 0, 0);
1273 	if (ret > 0) {
1274 		bh_result->b_size = (ret << inode->i_blkbits);
1275 		ret = 0;
1276 	}
1277 	if (started)
1278 		ext4_journal_stop(handle);
1279 out:
1280 	return ret;
1281 }
1282 
1283 /*
1284  * `handle' can be NULL if create is zero
1285  */
1286 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1287 				ext4_lblk_t block, int create, int *errp)
1288 {
1289 	struct buffer_head dummy;
1290 	int fatal = 0, err;
1291 
1292 	J_ASSERT(handle != NULL || create == 0);
1293 
1294 	dummy.b_state = 0;
1295 	dummy.b_blocknr = -1000;
1296 	buffer_trace_init(&dummy.b_history);
1297 	err = ext4_get_blocks_wrap(handle, inode, block, 1,
1298 					&dummy, create, 1, 0);
1299 	/*
1300 	 * ext4_get_blocks_handle() returns number of blocks
1301 	 * mapped. 0 in case of a HOLE.
1302 	 */
1303 	if (err > 0) {
1304 		if (err > 1)
1305 			WARN_ON(1);
1306 		err = 0;
1307 	}
1308 	*errp = err;
1309 	if (!err && buffer_mapped(&dummy)) {
1310 		struct buffer_head *bh;
1311 		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1312 		if (!bh) {
1313 			*errp = -EIO;
1314 			goto err;
1315 		}
1316 		if (buffer_new(&dummy)) {
1317 			J_ASSERT(create != 0);
1318 			J_ASSERT(handle != NULL);
1319 
1320 			/*
1321 			 * Now that we do not always journal data, we should
1322 			 * keep in mind whether this should always journal the
1323 			 * new buffer as metadata.  For now, regular file
1324 			 * writes use ext4_get_block instead, so it's not a
1325 			 * problem.
1326 			 */
1327 			lock_buffer(bh);
1328 			BUFFER_TRACE(bh, "call get_create_access");
1329 			fatal = ext4_journal_get_create_access(handle, bh);
1330 			if (!fatal && !buffer_uptodate(bh)) {
1331 				memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1332 				set_buffer_uptodate(bh);
1333 			}
1334 			unlock_buffer(bh);
1335 			BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1336 			err = ext4_handle_dirty_metadata(handle, inode, bh);
1337 			if (!fatal)
1338 				fatal = err;
1339 		} else {
1340 			BUFFER_TRACE(bh, "not a new buffer");
1341 		}
1342 		if (fatal) {
1343 			*errp = fatal;
1344 			brelse(bh);
1345 			bh = NULL;
1346 		}
1347 		return bh;
1348 	}
1349 err:
1350 	return NULL;
1351 }
1352 
1353 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1354 			       ext4_lblk_t block, int create, int *err)
1355 {
1356 	struct buffer_head *bh;
1357 
1358 	bh = ext4_getblk(handle, inode, block, create, err);
1359 	if (!bh)
1360 		return bh;
1361 	if (buffer_uptodate(bh))
1362 		return bh;
1363 	ll_rw_block(READ_META, 1, &bh);
1364 	wait_on_buffer(bh);
1365 	if (buffer_uptodate(bh))
1366 		return bh;
1367 	put_bh(bh);
1368 	*err = -EIO;
1369 	return NULL;
1370 }
1371 
1372 static int walk_page_buffers(handle_t *handle,
1373 			     struct buffer_head *head,
1374 			     unsigned from,
1375 			     unsigned to,
1376 			     int *partial,
1377 			     int (*fn)(handle_t *handle,
1378 				       struct buffer_head *bh))
1379 {
1380 	struct buffer_head *bh;
1381 	unsigned block_start, block_end;
1382 	unsigned blocksize = head->b_size;
1383 	int err, ret = 0;
1384 	struct buffer_head *next;
1385 
1386 	for (bh = head, block_start = 0;
1387 	     ret == 0 && (bh != head || !block_start);
1388 	     block_start = block_end, bh = next)
1389 	{
1390 		next = bh->b_this_page;
1391 		block_end = block_start + blocksize;
1392 		if (block_end <= from || block_start >= to) {
1393 			if (partial && !buffer_uptodate(bh))
1394 				*partial = 1;
1395 			continue;
1396 		}
1397 		err = (*fn)(handle, bh);
1398 		if (!ret)
1399 			ret = err;
1400 	}
1401 	return ret;
1402 }
1403 
1404 /*
1405  * To preserve ordering, it is essential that the hole instantiation and
1406  * the data write be encapsulated in a single transaction.  We cannot
1407  * close off a transaction and start a new one between the ext4_get_block()
1408  * and the commit_write().  So doing the jbd2_journal_start at the start of
1409  * prepare_write() is the right place.
1410  *
1411  * Also, this function can nest inside ext4_writepage() ->
1412  * block_write_full_page(). In that case, we *know* that ext4_writepage()
1413  * has generated enough buffer credits to do the whole page.  So we won't
1414  * block on the journal in that case, which is good, because the caller may
1415  * be PF_MEMALLOC.
1416  *
1417  * By accident, ext4 can be reentered when a transaction is open via
1418  * quota file writes.  If we were to commit the transaction while thus
1419  * reentered, there can be a deadlock - we would be holding a quota
1420  * lock, and the commit would never complete if another thread had a
1421  * transaction open and was blocking on the quota lock - a ranking
1422  * violation.
1423  *
1424  * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1425  * will _not_ run commit under these circumstances because handle->h_ref
1426  * is elevated.  We'll still have enough credits for the tiny quotafile
1427  * write.
1428  */
1429 static int do_journal_get_write_access(handle_t *handle,
1430 					struct buffer_head *bh)
1431 {
1432 	if (!buffer_mapped(bh) || buffer_freed(bh))
1433 		return 0;
1434 	return ext4_journal_get_write_access(handle, bh);
1435 }
1436 
1437 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1438 				loff_t pos, unsigned len, unsigned flags,
1439 				struct page **pagep, void **fsdata)
1440 {
1441 	struct inode *inode = mapping->host;
1442 	int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
1443 	handle_t *handle;
1444 	int retries = 0;
1445 	struct page *page;
1446  	pgoff_t index;
1447 	unsigned from, to;
1448 
1449 	trace_mark(ext4_write_begin,
1450 		   "dev %s ino %lu pos %llu len %u flags %u",
1451 		   inode->i_sb->s_id, inode->i_ino,
1452 		   (unsigned long long) pos, len, flags);
1453  	index = pos >> PAGE_CACHE_SHIFT;
1454 	from = pos & (PAGE_CACHE_SIZE - 1);
1455 	to = from + len;
1456 
1457 retry:
1458 	handle = ext4_journal_start(inode, needed_blocks);
1459 	if (IS_ERR(handle)) {
1460 		ret = PTR_ERR(handle);
1461 		goto out;
1462 	}
1463 
1464 	/* We cannot recurse into the filesystem as the transaction is already
1465 	 * started */
1466 	flags |= AOP_FLAG_NOFS;
1467 
1468 	page = grab_cache_page_write_begin(mapping, index, flags);
1469 	if (!page) {
1470 		ext4_journal_stop(handle);
1471 		ret = -ENOMEM;
1472 		goto out;
1473 	}
1474 	*pagep = page;
1475 
1476 	ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1477 				ext4_get_block);
1478 
1479 	if (!ret && ext4_should_journal_data(inode)) {
1480 		ret = walk_page_buffers(handle, page_buffers(page),
1481 				from, to, NULL, do_journal_get_write_access);
1482 	}
1483 
1484 	if (ret) {
1485 		unlock_page(page);
1486 		ext4_journal_stop(handle);
1487 		page_cache_release(page);
1488 		/*
1489 		 * block_write_begin may have instantiated a few blocks
1490 		 * outside i_size.  Trim these off again. Don't need
1491 		 * i_size_read because we hold i_mutex.
1492 		 */
1493 		if (pos + len > inode->i_size)
1494 			vmtruncate(inode, inode->i_size);
1495 	}
1496 
1497 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1498 		goto retry;
1499 out:
1500 	return ret;
1501 }
1502 
1503 /* For write_end() in data=journal mode */
1504 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1505 {
1506 	if (!buffer_mapped(bh) || buffer_freed(bh))
1507 		return 0;
1508 	set_buffer_uptodate(bh);
1509 	return ext4_handle_dirty_metadata(handle, NULL, bh);
1510 }
1511 
1512 /*
1513  * We need to pick up the new inode size which generic_commit_write gave us
1514  * `file' can be NULL - eg, when called from page_symlink().
1515  *
1516  * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1517  * buffers are managed internally.
1518  */
1519 static int ext4_ordered_write_end(struct file *file,
1520 				struct address_space *mapping,
1521 				loff_t pos, unsigned len, unsigned copied,
1522 				struct page *page, void *fsdata)
1523 {
1524 	handle_t *handle = ext4_journal_current_handle();
1525 	struct inode *inode = mapping->host;
1526 	int ret = 0, ret2;
1527 
1528 	trace_mark(ext4_ordered_write_end,
1529 		   "dev %s ino %lu pos %llu len %u copied %u",
1530 		   inode->i_sb->s_id, inode->i_ino,
1531 		   (unsigned long long) pos, len, copied);
1532 	ret = ext4_jbd2_file_inode(handle, inode);
1533 
1534 	if (ret == 0) {
1535 		loff_t new_i_size;
1536 
1537 		new_i_size = pos + copied;
1538 		if (new_i_size > EXT4_I(inode)->i_disksize) {
1539 			ext4_update_i_disksize(inode, new_i_size);
1540 			/* We need to mark inode dirty even if
1541 			 * new_i_size is less that inode->i_size
1542 			 * bu greater than i_disksize.(hint delalloc)
1543 			 */
1544 			ext4_mark_inode_dirty(handle, inode);
1545 		}
1546 
1547 		ret2 = generic_write_end(file, mapping, pos, len, copied,
1548 							page, fsdata);
1549 		copied = ret2;
1550 		if (ret2 < 0)
1551 			ret = ret2;
1552 	}
1553 	ret2 = ext4_journal_stop(handle);
1554 	if (!ret)
1555 		ret = ret2;
1556 
1557 	return ret ? ret : copied;
1558 }
1559 
1560 static int ext4_writeback_write_end(struct file *file,
1561 				struct address_space *mapping,
1562 				loff_t pos, unsigned len, unsigned copied,
1563 				struct page *page, void *fsdata)
1564 {
1565 	handle_t *handle = ext4_journal_current_handle();
1566 	struct inode *inode = mapping->host;
1567 	int ret = 0, ret2;
1568 	loff_t new_i_size;
1569 
1570 	trace_mark(ext4_writeback_write_end,
1571 		   "dev %s ino %lu pos %llu len %u copied %u",
1572 		   inode->i_sb->s_id, inode->i_ino,
1573 		   (unsigned long long) pos, len, copied);
1574 	new_i_size = pos + copied;
1575 	if (new_i_size > EXT4_I(inode)->i_disksize) {
1576 		ext4_update_i_disksize(inode, new_i_size);
1577 		/* We need to mark inode dirty even if
1578 		 * new_i_size is less that inode->i_size
1579 		 * bu greater than i_disksize.(hint delalloc)
1580 		 */
1581 		ext4_mark_inode_dirty(handle, inode);
1582 	}
1583 
1584 	ret2 = generic_write_end(file, mapping, pos, len, copied,
1585 							page, fsdata);
1586 	copied = ret2;
1587 	if (ret2 < 0)
1588 		ret = ret2;
1589 
1590 	ret2 = ext4_journal_stop(handle);
1591 	if (!ret)
1592 		ret = ret2;
1593 
1594 	return ret ? ret : copied;
1595 }
1596 
1597 static int ext4_journalled_write_end(struct file *file,
1598 				struct address_space *mapping,
1599 				loff_t pos, unsigned len, unsigned copied,
1600 				struct page *page, void *fsdata)
1601 {
1602 	handle_t *handle = ext4_journal_current_handle();
1603 	struct inode *inode = mapping->host;
1604 	int ret = 0, ret2;
1605 	int partial = 0;
1606 	unsigned from, to;
1607 	loff_t new_i_size;
1608 
1609 	trace_mark(ext4_journalled_write_end,
1610 		   "dev %s ino %lu pos %llu len %u copied %u",
1611 		   inode->i_sb->s_id, inode->i_ino,
1612 		   (unsigned long long) pos, len, copied);
1613 	from = pos & (PAGE_CACHE_SIZE - 1);
1614 	to = from + len;
1615 
1616 	if (copied < len) {
1617 		if (!PageUptodate(page))
1618 			copied = 0;
1619 		page_zero_new_buffers(page, from+copied, to);
1620 	}
1621 
1622 	ret = walk_page_buffers(handle, page_buffers(page), from,
1623 				to, &partial, write_end_fn);
1624 	if (!partial)
1625 		SetPageUptodate(page);
1626 	new_i_size = pos + copied;
1627 	if (new_i_size > inode->i_size)
1628 		i_size_write(inode, pos+copied);
1629 	EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1630 	if (new_i_size > EXT4_I(inode)->i_disksize) {
1631 		ext4_update_i_disksize(inode, new_i_size);
1632 		ret2 = ext4_mark_inode_dirty(handle, inode);
1633 		if (!ret)
1634 			ret = ret2;
1635 	}
1636 
1637 	unlock_page(page);
1638 	ret2 = ext4_journal_stop(handle);
1639 	if (!ret)
1640 		ret = ret2;
1641 	page_cache_release(page);
1642 
1643 	return ret ? ret : copied;
1644 }
1645 
1646 static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
1647 {
1648 	int retries = 0;
1649 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1650 	unsigned long md_needed, mdblocks, total = 0;
1651 
1652 	/*
1653 	 * recalculate the amount of metadata blocks to reserve
1654 	 * in order to allocate nrblocks
1655 	 * worse case is one extent per block
1656 	 */
1657 repeat:
1658 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1659 	total = EXT4_I(inode)->i_reserved_data_blocks + nrblocks;
1660 	mdblocks = ext4_calc_metadata_amount(inode, total);
1661 	BUG_ON(mdblocks < EXT4_I(inode)->i_reserved_meta_blocks);
1662 
1663 	md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
1664 	total = md_needed + nrblocks;
1665 
1666 	/*
1667 	 * Make quota reservation here to prevent quota overflow
1668 	 * later. Real quota accounting is done at pages writeout
1669 	 * time.
1670 	 */
1671 	if (vfs_dq_reserve_block(inode, total)) {
1672 		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1673 		return -EDQUOT;
1674 	}
1675 
1676 	if (ext4_claim_free_blocks(sbi, total)) {
1677 		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1678 		if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1679 			yield();
1680 			goto repeat;
1681 		}
1682 		vfs_dq_release_reservation_block(inode, total);
1683 		return -ENOSPC;
1684 	}
1685 	EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
1686 	EXT4_I(inode)->i_reserved_meta_blocks = mdblocks;
1687 
1688 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1689 	return 0;       /* success */
1690 }
1691 
1692 static void ext4_da_release_space(struct inode *inode, int to_free)
1693 {
1694 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1695 	int total, mdb, mdb_free, release;
1696 
1697 	if (!to_free)
1698 		return;		/* Nothing to release, exit */
1699 
1700 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1701 
1702 	if (!EXT4_I(inode)->i_reserved_data_blocks) {
1703 		/*
1704 		 * if there is no reserved blocks, but we try to free some
1705 		 * then the counter is messed up somewhere.
1706 		 * but since this function is called from invalidate
1707 		 * page, it's harmless to return without any action
1708 		 */
1709 		printk(KERN_INFO "ext4 delalloc try to release %d reserved "
1710 			    "blocks for inode %lu, but there is no reserved "
1711 			    "data blocks\n", to_free, inode->i_ino);
1712 		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1713 		return;
1714 	}
1715 
1716 	/* recalculate the number of metablocks still need to be reserved */
1717 	total = EXT4_I(inode)->i_reserved_data_blocks - to_free;
1718 	mdb = ext4_calc_metadata_amount(inode, total);
1719 
1720 	/* figure out how many metablocks to release */
1721 	BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
1722 	mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
1723 
1724 	release = to_free + mdb_free;
1725 
1726 	/* update fs dirty blocks counter for truncate case */
1727 	percpu_counter_sub(&sbi->s_dirtyblocks_counter, release);
1728 
1729 	/* update per-inode reservations */
1730 	BUG_ON(to_free > EXT4_I(inode)->i_reserved_data_blocks);
1731 	EXT4_I(inode)->i_reserved_data_blocks -= to_free;
1732 
1733 	BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
1734 	EXT4_I(inode)->i_reserved_meta_blocks = mdb;
1735 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1736 
1737 	vfs_dq_release_reservation_block(inode, release);
1738 }
1739 
1740 static void ext4_da_page_release_reservation(struct page *page,
1741 						unsigned long offset)
1742 {
1743 	int to_release = 0;
1744 	struct buffer_head *head, *bh;
1745 	unsigned int curr_off = 0;
1746 
1747 	head = page_buffers(page);
1748 	bh = head;
1749 	do {
1750 		unsigned int next_off = curr_off + bh->b_size;
1751 
1752 		if ((offset <= curr_off) && (buffer_delay(bh))) {
1753 			to_release++;
1754 			clear_buffer_delay(bh);
1755 		}
1756 		curr_off = next_off;
1757 	} while ((bh = bh->b_this_page) != head);
1758 	ext4_da_release_space(page->mapping->host, to_release);
1759 }
1760 
1761 /*
1762  * Delayed allocation stuff
1763  */
1764 
1765 struct mpage_da_data {
1766 	struct inode *inode;
1767 	sector_t b_blocknr;		/* start block number of extent */
1768 	size_t b_size;			/* size of extent */
1769 	unsigned long b_state;		/* state of the extent */
1770 	unsigned long first_page, next_page;	/* extent of pages */
1771 	struct writeback_control *wbc;
1772 	int io_done;
1773 	int pages_written;
1774 	int retval;
1775 };
1776 
1777 /*
1778  * mpage_da_submit_io - walks through extent of pages and try to write
1779  * them with writepage() call back
1780  *
1781  * @mpd->inode: inode
1782  * @mpd->first_page: first page of the extent
1783  * @mpd->next_page: page after the last page of the extent
1784  *
1785  * By the time mpage_da_submit_io() is called we expect all blocks
1786  * to be allocated. this may be wrong if allocation failed.
1787  *
1788  * As pages are already locked by write_cache_pages(), we can't use it
1789  */
1790 static int mpage_da_submit_io(struct mpage_da_data *mpd)
1791 {
1792 	long pages_skipped;
1793 	struct pagevec pvec;
1794 	unsigned long index, end;
1795 	int ret = 0, err, nr_pages, i;
1796 	struct inode *inode = mpd->inode;
1797 	struct address_space *mapping = inode->i_mapping;
1798 
1799 	BUG_ON(mpd->next_page <= mpd->first_page);
1800 	/*
1801 	 * We need to start from the first_page to the next_page - 1
1802 	 * to make sure we also write the mapped dirty buffer_heads.
1803 	 * If we look at mpd->b_blocknr we would only be looking
1804 	 * at the currently mapped buffer_heads.
1805 	 */
1806 	index = mpd->first_page;
1807 	end = mpd->next_page - 1;
1808 
1809 	pagevec_init(&pvec, 0);
1810 	while (index <= end) {
1811 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1812 		if (nr_pages == 0)
1813 			break;
1814 		for (i = 0; i < nr_pages; i++) {
1815 			struct page *page = pvec.pages[i];
1816 
1817 			index = page->index;
1818 			if (index > end)
1819 				break;
1820 			index++;
1821 
1822 			BUG_ON(!PageLocked(page));
1823 			BUG_ON(PageWriteback(page));
1824 
1825 			pages_skipped = mpd->wbc->pages_skipped;
1826 			err = mapping->a_ops->writepage(page, mpd->wbc);
1827 			if (!err && (pages_skipped == mpd->wbc->pages_skipped))
1828 				/*
1829 				 * have successfully written the page
1830 				 * without skipping the same
1831 				 */
1832 				mpd->pages_written++;
1833 			/*
1834 			 * In error case, we have to continue because
1835 			 * remaining pages are still locked
1836 			 * XXX: unlock and re-dirty them?
1837 			 */
1838 			if (ret == 0)
1839 				ret = err;
1840 		}
1841 		pagevec_release(&pvec);
1842 	}
1843 	return ret;
1844 }
1845 
1846 /*
1847  * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers
1848  *
1849  * @mpd->inode - inode to walk through
1850  * @exbh->b_blocknr - first block on a disk
1851  * @exbh->b_size - amount of space in bytes
1852  * @logical - first logical block to start assignment with
1853  *
1854  * the function goes through all passed space and put actual disk
1855  * block numbers into buffer heads, dropping BH_Delay
1856  */
1857 static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
1858 				 struct buffer_head *exbh)
1859 {
1860 	struct inode *inode = mpd->inode;
1861 	struct address_space *mapping = inode->i_mapping;
1862 	int blocks = exbh->b_size >> inode->i_blkbits;
1863 	sector_t pblock = exbh->b_blocknr, cur_logical;
1864 	struct buffer_head *head, *bh;
1865 	pgoff_t index, end;
1866 	struct pagevec pvec;
1867 	int nr_pages, i;
1868 
1869 	index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
1870 	end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
1871 	cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1872 
1873 	pagevec_init(&pvec, 0);
1874 
1875 	while (index <= end) {
1876 		/* XXX: optimize tail */
1877 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1878 		if (nr_pages == 0)
1879 			break;
1880 		for (i = 0; i < nr_pages; i++) {
1881 			struct page *page = pvec.pages[i];
1882 
1883 			index = page->index;
1884 			if (index > end)
1885 				break;
1886 			index++;
1887 
1888 			BUG_ON(!PageLocked(page));
1889 			BUG_ON(PageWriteback(page));
1890 			BUG_ON(!page_has_buffers(page));
1891 
1892 			bh = page_buffers(page);
1893 			head = bh;
1894 
1895 			/* skip blocks out of the range */
1896 			do {
1897 				if (cur_logical >= logical)
1898 					break;
1899 				cur_logical++;
1900 			} while ((bh = bh->b_this_page) != head);
1901 
1902 			do {
1903 				if (cur_logical >= logical + blocks)
1904 					break;
1905 				if (buffer_delay(bh)) {
1906 					bh->b_blocknr = pblock;
1907 					clear_buffer_delay(bh);
1908 					bh->b_bdev = inode->i_sb->s_bdev;
1909 				} else if (buffer_unwritten(bh)) {
1910 					bh->b_blocknr = pblock;
1911 					clear_buffer_unwritten(bh);
1912 					set_buffer_mapped(bh);
1913 					set_buffer_new(bh);
1914 					bh->b_bdev = inode->i_sb->s_bdev;
1915 				} else if (buffer_mapped(bh))
1916 					BUG_ON(bh->b_blocknr != pblock);
1917 
1918 				cur_logical++;
1919 				pblock++;
1920 			} while ((bh = bh->b_this_page) != head);
1921 		}
1922 		pagevec_release(&pvec);
1923 	}
1924 }
1925 
1926 
1927 /*
1928  * __unmap_underlying_blocks - just a helper function to unmap
1929  * set of blocks described by @bh
1930  */
1931 static inline void __unmap_underlying_blocks(struct inode *inode,
1932 					     struct buffer_head *bh)
1933 {
1934 	struct block_device *bdev = inode->i_sb->s_bdev;
1935 	int blocks, i;
1936 
1937 	blocks = bh->b_size >> inode->i_blkbits;
1938 	for (i = 0; i < blocks; i++)
1939 		unmap_underlying_metadata(bdev, bh->b_blocknr + i);
1940 }
1941 
1942 static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
1943 					sector_t logical, long blk_cnt)
1944 {
1945 	int nr_pages, i;
1946 	pgoff_t index, end;
1947 	struct pagevec pvec;
1948 	struct inode *inode = mpd->inode;
1949 	struct address_space *mapping = inode->i_mapping;
1950 
1951 	index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
1952 	end   = (logical + blk_cnt - 1) >>
1953 				(PAGE_CACHE_SHIFT - inode->i_blkbits);
1954 	while (index <= end) {
1955 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1956 		if (nr_pages == 0)
1957 			break;
1958 		for (i = 0; i < nr_pages; i++) {
1959 			struct page *page = pvec.pages[i];
1960 			index = page->index;
1961 			if (index > end)
1962 				break;
1963 			index++;
1964 
1965 			BUG_ON(!PageLocked(page));
1966 			BUG_ON(PageWriteback(page));
1967 			block_invalidatepage(page, 0);
1968 			ClearPageUptodate(page);
1969 			unlock_page(page);
1970 		}
1971 	}
1972 	return;
1973 }
1974 
1975 static void ext4_print_free_blocks(struct inode *inode)
1976 {
1977 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1978 	printk(KERN_EMERG "Total free blocks count %lld\n",
1979 			ext4_count_free_blocks(inode->i_sb));
1980 	printk(KERN_EMERG "Free/Dirty block details\n");
1981 	printk(KERN_EMERG "free_blocks=%lld\n",
1982 			(long long)percpu_counter_sum(&sbi->s_freeblocks_counter));
1983 	printk(KERN_EMERG "dirty_blocks=%lld\n",
1984 			(long long)percpu_counter_sum(&sbi->s_dirtyblocks_counter));
1985 	printk(KERN_EMERG "Block reservation details\n");
1986 	printk(KERN_EMERG "i_reserved_data_blocks=%u\n",
1987 			EXT4_I(inode)->i_reserved_data_blocks);
1988 	printk(KERN_EMERG "i_reserved_meta_blocks=%u\n",
1989 			EXT4_I(inode)->i_reserved_meta_blocks);
1990 	return;
1991 }
1992 
1993 #define		EXT4_DELALLOC_RSVED	1
1994 static int ext4_da_get_block_write(struct inode *inode, sector_t iblock,
1995 				   struct buffer_head *bh_result, int create)
1996 {
1997 	int ret;
1998 	unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
1999 	loff_t disksize = EXT4_I(inode)->i_disksize;
2000 	handle_t *handle = NULL;
2001 
2002 	handle = ext4_journal_current_handle();
2003 	BUG_ON(!handle);
2004 	ret = ext4_get_blocks_wrap(handle, inode, iblock, max_blocks,
2005 				   bh_result, create, 0, EXT4_DELALLOC_RSVED);
2006 	if (ret <= 0)
2007 		return ret;
2008 
2009 	bh_result->b_size = (ret << inode->i_blkbits);
2010 
2011 	if (ext4_should_order_data(inode)) {
2012 		int retval;
2013 		retval = ext4_jbd2_file_inode(handle, inode);
2014 		if (retval)
2015 			/*
2016 			 * Failed to add inode for ordered mode. Don't
2017 			 * update file size
2018 			 */
2019 			return retval;
2020 	}
2021 
2022 	/*
2023 	 * Update on-disk size along with block allocation we don't
2024 	 * use 'extend_disksize' as size may change within already
2025 	 * allocated block -bzzz
2026 	 */
2027 	disksize = ((loff_t) iblock + ret) << inode->i_blkbits;
2028 	if (disksize > i_size_read(inode))
2029 		disksize = i_size_read(inode);
2030 	if (disksize > EXT4_I(inode)->i_disksize) {
2031 		ext4_update_i_disksize(inode, disksize);
2032 		ret = ext4_mark_inode_dirty(handle, inode);
2033 		return ret;
2034 	}
2035 	return 0;
2036 }
2037 
2038 /*
2039  * mpage_da_map_blocks - go through given space
2040  *
2041  * @mpd - bh describing space
2042  *
2043  * The function skips space we know is already mapped to disk blocks.
2044  *
2045  */
2046 static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2047 {
2048 	int err = 0;
2049 	struct buffer_head new;
2050 	sector_t next;
2051 
2052 	/*
2053 	 * We consider only non-mapped and non-allocated blocks
2054 	 */
2055 	if ((mpd->b_state  & (1 << BH_Mapped)) &&
2056 	    !(mpd->b_state & (1 << BH_Delay)))
2057 		return 0;
2058 	new.b_state = mpd->b_state;
2059 	new.b_blocknr = 0;
2060 	new.b_size = mpd->b_size;
2061 	next = mpd->b_blocknr;
2062 	/*
2063 	 * If we didn't accumulate anything
2064 	 * to write simply return
2065 	 */
2066 	if (!new.b_size)
2067 		return 0;
2068 
2069 	err = ext4_da_get_block_write(mpd->inode, next, &new, 1);
2070 	if (err) {
2071 		/*
2072 		 * If get block returns with error we simply
2073 		 * return. Later writepage will redirty the page and
2074 		 * writepages will find the dirty page again
2075 		 */
2076 		if (err == -EAGAIN)
2077 			return 0;
2078 
2079 		if (err == -ENOSPC &&
2080 		    ext4_count_free_blocks(mpd->inode->i_sb)) {
2081 			mpd->retval = err;
2082 			return 0;
2083 		}
2084 
2085 		/*
2086 		 * get block failure will cause us to loop in
2087 		 * writepages, because a_ops->writepage won't be able
2088 		 * to make progress. The page will be redirtied by
2089 		 * writepage and writepages will again try to write
2090 		 * the same.
2091 		 */
2092 		printk(KERN_EMERG "%s block allocation failed for inode %lu "
2093 				  "at logical offset %llu with max blocks "
2094 				  "%zd with error %d\n",
2095 				  __func__, mpd->inode->i_ino,
2096 				  (unsigned long long)next,
2097 				  mpd->b_size >> mpd->inode->i_blkbits, err);
2098 		printk(KERN_EMERG "This should not happen.!! "
2099 					"Data will be lost\n");
2100 		if (err == -ENOSPC) {
2101 			ext4_print_free_blocks(mpd->inode);
2102 		}
2103 		/* invlaidate all the pages */
2104 		ext4_da_block_invalidatepages(mpd, next,
2105 				mpd->b_size >> mpd->inode->i_blkbits);
2106 		return err;
2107 	}
2108 	BUG_ON(new.b_size == 0);
2109 
2110 	if (buffer_new(&new))
2111 		__unmap_underlying_blocks(mpd->inode, &new);
2112 
2113 	/*
2114 	 * If blocks are delayed marked, we need to
2115 	 * put actual blocknr and drop delayed bit
2116 	 */
2117 	if ((mpd->b_state & (1 << BH_Delay)) ||
2118 	    (mpd->b_state & (1 << BH_Unwritten)))
2119 		mpage_put_bnr_to_bhs(mpd, next, &new);
2120 
2121 	return 0;
2122 }
2123 
2124 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
2125 		(1 << BH_Delay) | (1 << BH_Unwritten))
2126 
2127 /*
2128  * mpage_add_bh_to_extent - try to add one more block to extent of blocks
2129  *
2130  * @mpd->lbh - extent of blocks
2131  * @logical - logical number of the block in the file
2132  * @bh - bh of the block (used to access block's state)
2133  *
2134  * the function is used to collect contig. blocks in same state
2135  */
2136 static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
2137 				   sector_t logical, size_t b_size,
2138 				   unsigned long b_state)
2139 {
2140 	sector_t next;
2141 	int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
2142 
2143 	/* check if thereserved journal credits might overflow */
2144 	if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) {
2145 		if (nrblocks >= EXT4_MAX_TRANS_DATA) {
2146 			/*
2147 			 * With non-extent format we are limited by the journal
2148 			 * credit available.  Total credit needed to insert
2149 			 * nrblocks contiguous blocks is dependent on the
2150 			 * nrblocks.  So limit nrblocks.
2151 			 */
2152 			goto flush_it;
2153 		} else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
2154 				EXT4_MAX_TRANS_DATA) {
2155 			/*
2156 			 * Adding the new buffer_head would make it cross the
2157 			 * allowed limit for which we have journal credit
2158 			 * reserved. So limit the new bh->b_size
2159 			 */
2160 			b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
2161 						mpd->inode->i_blkbits;
2162 			/* we will do mpage_da_submit_io in the next loop */
2163 		}
2164 	}
2165 	/*
2166 	 * First block in the extent
2167 	 */
2168 	if (mpd->b_size == 0) {
2169 		mpd->b_blocknr = logical;
2170 		mpd->b_size = b_size;
2171 		mpd->b_state = b_state & BH_FLAGS;
2172 		return;
2173 	}
2174 
2175 	next = mpd->b_blocknr + nrblocks;
2176 	/*
2177 	 * Can we merge the block to our big extent?
2178 	 */
2179 	if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
2180 		mpd->b_size += b_size;
2181 		return;
2182 	}
2183 
2184 flush_it:
2185 	/*
2186 	 * We couldn't merge the block to our extent, so we
2187 	 * need to flush current  extent and start new one
2188 	 */
2189 	if (mpage_da_map_blocks(mpd) == 0)
2190 		mpage_da_submit_io(mpd);
2191 	mpd->io_done = 1;
2192 	return;
2193 }
2194 
2195 /*
2196  * __mpage_da_writepage - finds extent of pages and blocks
2197  *
2198  * @page: page to consider
2199  * @wbc: not used, we just follow rules
2200  * @data: context
2201  *
2202  * The function finds extents of pages and scan them for all blocks.
2203  */
2204 static int __mpage_da_writepage(struct page *page,
2205 				struct writeback_control *wbc, void *data)
2206 {
2207 	struct mpage_da_data *mpd = data;
2208 	struct inode *inode = mpd->inode;
2209 	struct buffer_head *bh, *head;
2210 	sector_t logical;
2211 
2212 	if (mpd->io_done) {
2213 		/*
2214 		 * Rest of the page in the page_vec
2215 		 * redirty then and skip then. We will
2216 		 * try to to write them again after
2217 		 * starting a new transaction
2218 		 */
2219 		redirty_page_for_writepage(wbc, page);
2220 		unlock_page(page);
2221 		return MPAGE_DA_EXTENT_TAIL;
2222 	}
2223 	/*
2224 	 * Can we merge this page to current extent?
2225 	 */
2226 	if (mpd->next_page != page->index) {
2227 		/*
2228 		 * Nope, we can't. So, we map non-allocated blocks
2229 		 * and start IO on them using writepage()
2230 		 */
2231 		if (mpd->next_page != mpd->first_page) {
2232 			if (mpage_da_map_blocks(mpd) == 0)
2233 				mpage_da_submit_io(mpd);
2234 			/*
2235 			 * skip rest of the page in the page_vec
2236 			 */
2237 			mpd->io_done = 1;
2238 			redirty_page_for_writepage(wbc, page);
2239 			unlock_page(page);
2240 			return MPAGE_DA_EXTENT_TAIL;
2241 		}
2242 
2243 		/*
2244 		 * Start next extent of pages ...
2245 		 */
2246 		mpd->first_page = page->index;
2247 
2248 		/*
2249 		 * ... and blocks
2250 		 */
2251 		mpd->b_size = 0;
2252 		mpd->b_state = 0;
2253 		mpd->b_blocknr = 0;
2254 	}
2255 
2256 	mpd->next_page = page->index + 1;
2257 	logical = (sector_t) page->index <<
2258 		  (PAGE_CACHE_SHIFT - inode->i_blkbits);
2259 
2260 	if (!page_has_buffers(page)) {
2261 		mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE,
2262 				       (1 << BH_Dirty) | (1 << BH_Uptodate));
2263 		if (mpd->io_done)
2264 			return MPAGE_DA_EXTENT_TAIL;
2265 	} else {
2266 		/*
2267 		 * Page with regular buffer heads, just add all dirty ones
2268 		 */
2269 		head = page_buffers(page);
2270 		bh = head;
2271 		do {
2272 			BUG_ON(buffer_locked(bh));
2273 			/*
2274 			 * We need to try to allocate
2275 			 * unmapped blocks in the same page.
2276 			 * Otherwise we won't make progress
2277 			 * with the page in ext4_da_writepage
2278 			 */
2279 			if (buffer_dirty(bh) &&
2280 			    (!buffer_mapped(bh) || buffer_delay(bh))) {
2281 				mpage_add_bh_to_extent(mpd, logical,
2282 						       bh->b_size,
2283 						       bh->b_state);
2284 				if (mpd->io_done)
2285 					return MPAGE_DA_EXTENT_TAIL;
2286 			} else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2287 				/*
2288 				 * mapped dirty buffer. We need to update
2289 				 * the b_state because we look at
2290 				 * b_state in mpage_da_map_blocks. We don't
2291 				 * update b_size because if we find an
2292 				 * unmapped buffer_head later we need to
2293 				 * use the b_state flag of that buffer_head.
2294 				 */
2295 				if (mpd->b_size == 0)
2296 					mpd->b_state = bh->b_state & BH_FLAGS;
2297 			}
2298 			logical++;
2299 		} while ((bh = bh->b_this_page) != head);
2300 	}
2301 
2302 	return 0;
2303 }
2304 
2305 /*
2306  * this is a special callback for ->write_begin() only
2307  * it's intention is to return mapped block or reserve space
2308  */
2309 static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2310 				  struct buffer_head *bh_result, int create)
2311 {
2312 	int ret = 0;
2313 	sector_t invalid_block = ~((sector_t) 0xffff);
2314 
2315 	if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
2316 		invalid_block = ~0;
2317 
2318 	BUG_ON(create == 0);
2319 	BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
2320 
2321 	/*
2322 	 * first, we need to know whether the block is allocated already
2323 	 * preallocated blocks are unmapped but should treated
2324 	 * the same as allocated blocks.
2325 	 */
2326 	ret = ext4_get_blocks_wrap(NULL, inode, iblock, 1,  bh_result, 0, 0, 0);
2327 	if ((ret == 0) && !buffer_delay(bh_result)) {
2328 		/* the block isn't (pre)allocated yet, let's reserve space */
2329 		/*
2330 		 * XXX: __block_prepare_write() unmaps passed block,
2331 		 * is it OK?
2332 		 */
2333 		ret = ext4_da_reserve_space(inode, 1);
2334 		if (ret)
2335 			/* not enough space to reserve */
2336 			return ret;
2337 
2338 		map_bh(bh_result, inode->i_sb, invalid_block);
2339 		set_buffer_new(bh_result);
2340 		set_buffer_delay(bh_result);
2341 	} else if (ret > 0) {
2342 		bh_result->b_size = (ret << inode->i_blkbits);
2343 		/*
2344 		 * With sub-block writes into unwritten extents
2345 		 * we also need to mark the buffer as new so that
2346 		 * the unwritten parts of the buffer gets correctly zeroed.
2347 		 */
2348 		if (buffer_unwritten(bh_result))
2349 			set_buffer_new(bh_result);
2350 		ret = 0;
2351 	}
2352 
2353 	return ret;
2354 }
2355 
2356 static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh)
2357 {
2358 	/*
2359 	 * unmapped buffer is possible for holes.
2360 	 * delay buffer is possible with delayed allocation
2361 	 */
2362 	return ((!buffer_mapped(bh) || buffer_delay(bh)) && buffer_dirty(bh));
2363 }
2364 
2365 static int ext4_normal_get_block_write(struct inode *inode, sector_t iblock,
2366 				   struct buffer_head *bh_result, int create)
2367 {
2368 	int ret = 0;
2369 	unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
2370 
2371 	/*
2372 	 * we don't want to do block allocation in writepage
2373 	 * so call get_block_wrap with create = 0
2374 	 */
2375 	ret = ext4_get_blocks_wrap(NULL, inode, iblock, max_blocks,
2376 				   bh_result, 0, 0, 0);
2377 	if (ret > 0) {
2378 		bh_result->b_size = (ret << inode->i_blkbits);
2379 		ret = 0;
2380 	}
2381 	return ret;
2382 }
2383 
2384 /*
2385  * get called vi ext4_da_writepages after taking page lock (have journal handle)
2386  * get called via journal_submit_inode_data_buffers (no journal handle)
2387  * get called via shrink_page_list via pdflush (no journal handle)
2388  * or grab_page_cache when doing write_begin (have journal handle)
2389  */
2390 static int ext4_da_writepage(struct page *page,
2391 				struct writeback_control *wbc)
2392 {
2393 	int ret = 0;
2394 	loff_t size;
2395 	unsigned int len;
2396 	struct buffer_head *page_bufs;
2397 	struct inode *inode = page->mapping->host;
2398 
2399 	trace_mark(ext4_da_writepage,
2400 		   "dev %s ino %lu page_index %lu",
2401 		   inode->i_sb->s_id, inode->i_ino, page->index);
2402 	size = i_size_read(inode);
2403 	if (page->index == size >> PAGE_CACHE_SHIFT)
2404 		len = size & ~PAGE_CACHE_MASK;
2405 	else
2406 		len = PAGE_CACHE_SIZE;
2407 
2408 	if (page_has_buffers(page)) {
2409 		page_bufs = page_buffers(page);
2410 		if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2411 					ext4_bh_unmapped_or_delay)) {
2412 			/*
2413 			 * We don't want to do  block allocation
2414 			 * So redirty the page and return
2415 			 * We may reach here when we do a journal commit
2416 			 * via journal_submit_inode_data_buffers.
2417 			 * If we don't have mapping block we just ignore
2418 			 * them. We can also reach here via shrink_page_list
2419 			 */
2420 			redirty_page_for_writepage(wbc, page);
2421 			unlock_page(page);
2422 			return 0;
2423 		}
2424 	} else {
2425 		/*
2426 		 * The test for page_has_buffers() is subtle:
2427 		 * We know the page is dirty but it lost buffers. That means
2428 		 * that at some moment in time after write_begin()/write_end()
2429 		 * has been called all buffers have been clean and thus they
2430 		 * must have been written at least once. So they are all
2431 		 * mapped and we can happily proceed with mapping them
2432 		 * and writing the page.
2433 		 *
2434 		 * Try to initialize the buffer_heads and check whether
2435 		 * all are mapped and non delay. We don't want to
2436 		 * do block allocation here.
2437 		 */
2438 		ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
2439 						ext4_normal_get_block_write);
2440 		if (!ret) {
2441 			page_bufs = page_buffers(page);
2442 			/* check whether all are mapped and non delay */
2443 			if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2444 						ext4_bh_unmapped_or_delay)) {
2445 				redirty_page_for_writepage(wbc, page);
2446 				unlock_page(page);
2447 				return 0;
2448 			}
2449 		} else {
2450 			/*
2451 			 * We can't do block allocation here
2452 			 * so just redity the page and unlock
2453 			 * and return
2454 			 */
2455 			redirty_page_for_writepage(wbc, page);
2456 			unlock_page(page);
2457 			return 0;
2458 		}
2459 		/* now mark the buffer_heads as dirty and uptodate */
2460 		block_commit_write(page, 0, PAGE_CACHE_SIZE);
2461 	}
2462 
2463 	if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
2464 		ret = nobh_writepage(page, ext4_normal_get_block_write, wbc);
2465 	else
2466 		ret = block_write_full_page(page,
2467 						ext4_normal_get_block_write,
2468 						wbc);
2469 
2470 	return ret;
2471 }
2472 
2473 /*
2474  * This is called via ext4_da_writepages() to
2475  * calulate the total number of credits to reserve to fit
2476  * a single extent allocation into a single transaction,
2477  * ext4_da_writpeages() will loop calling this before
2478  * the block allocation.
2479  */
2480 
2481 static int ext4_da_writepages_trans_blocks(struct inode *inode)
2482 {
2483 	int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
2484 
2485 	/*
2486 	 * With non-extent format the journal credit needed to
2487 	 * insert nrblocks contiguous block is dependent on
2488 	 * number of contiguous block. So we will limit
2489 	 * number of contiguous block to a sane value
2490 	 */
2491 	if (!(inode->i_flags & EXT4_EXTENTS_FL) &&
2492 	    (max_blocks > EXT4_MAX_TRANS_DATA))
2493 		max_blocks = EXT4_MAX_TRANS_DATA;
2494 
2495 	return ext4_chunk_trans_blocks(inode, max_blocks);
2496 }
2497 
2498 static int ext4_da_writepages(struct address_space *mapping,
2499 			      struct writeback_control *wbc)
2500 {
2501 	pgoff_t	index;
2502 	int range_whole = 0;
2503 	handle_t *handle = NULL;
2504 	struct mpage_da_data mpd;
2505 	struct inode *inode = mapping->host;
2506 	int no_nrwrite_index_update;
2507 	int pages_written = 0;
2508 	long pages_skipped;
2509 	int range_cyclic, cycled = 1, io_done = 0;
2510 	int needed_blocks, ret = 0, nr_to_writebump = 0;
2511 	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2512 
2513 	trace_mark(ext4_da_writepages,
2514 		   "dev %s ino %lu nr_t_write %ld "
2515 		   "pages_skipped %ld range_start %llu "
2516 		   "range_end %llu nonblocking %d "
2517 		   "for_kupdate %d for_reclaim %d "
2518 		   "for_writepages %d range_cyclic %d",
2519 		   inode->i_sb->s_id, inode->i_ino,
2520 		   wbc->nr_to_write, wbc->pages_skipped,
2521 		   (unsigned long long) wbc->range_start,
2522 		   (unsigned long long) wbc->range_end,
2523 		   wbc->nonblocking, wbc->for_kupdate,
2524 		   wbc->for_reclaim, wbc->for_writepages,
2525 		   wbc->range_cyclic);
2526 
2527 	/*
2528 	 * No pages to write? This is mainly a kludge to avoid starting
2529 	 * a transaction for special inodes like journal inode on last iput()
2530 	 * because that could violate lock ordering on umount
2531 	 */
2532 	if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2533 		return 0;
2534 
2535 	/*
2536 	 * If the filesystem has aborted, it is read-only, so return
2537 	 * right away instead of dumping stack traces later on that
2538 	 * will obscure the real source of the problem.  We test
2539 	 * EXT4_MOUNT_ABORT instead of sb->s_flag's MS_RDONLY because
2540 	 * the latter could be true if the filesystem is mounted
2541 	 * read-only, and in that case, ext4_da_writepages should
2542 	 * *never* be called, so if that ever happens, we would want
2543 	 * the stack trace.
2544 	 */
2545 	if (unlikely(sbi->s_mount_opt & EXT4_MOUNT_ABORT))
2546 		return -EROFS;
2547 
2548 	/*
2549 	 * Make sure nr_to_write is >= sbi->s_mb_stream_request
2550 	 * This make sure small files blocks are allocated in
2551 	 * single attempt. This ensure that small files
2552 	 * get less fragmented.
2553 	 */
2554 	if (wbc->nr_to_write < sbi->s_mb_stream_request) {
2555 		nr_to_writebump = sbi->s_mb_stream_request - wbc->nr_to_write;
2556 		wbc->nr_to_write = sbi->s_mb_stream_request;
2557 	}
2558 	if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2559 		range_whole = 1;
2560 
2561 	range_cyclic = wbc->range_cyclic;
2562 	if (wbc->range_cyclic) {
2563 		index = mapping->writeback_index;
2564 		if (index)
2565 			cycled = 0;
2566 		wbc->range_start = index << PAGE_CACHE_SHIFT;
2567 		wbc->range_end  = LLONG_MAX;
2568 		wbc->range_cyclic = 0;
2569 	} else
2570 		index = wbc->range_start >> PAGE_CACHE_SHIFT;
2571 
2572 	mpd.wbc = wbc;
2573 	mpd.inode = mapping->host;
2574 
2575 	/*
2576 	 * we don't want write_cache_pages to update
2577 	 * nr_to_write and writeback_index
2578 	 */
2579 	no_nrwrite_index_update = wbc->no_nrwrite_index_update;
2580 	wbc->no_nrwrite_index_update = 1;
2581 	pages_skipped = wbc->pages_skipped;
2582 
2583 retry:
2584 	while (!ret && wbc->nr_to_write > 0) {
2585 
2586 		/*
2587 		 * we  insert one extent at a time. So we need
2588 		 * credit needed for single extent allocation.
2589 		 * journalled mode is currently not supported
2590 		 * by delalloc
2591 		 */
2592 		BUG_ON(ext4_should_journal_data(inode));
2593 		needed_blocks = ext4_da_writepages_trans_blocks(inode);
2594 
2595 		/* start a new transaction*/
2596 		handle = ext4_journal_start(inode, needed_blocks);
2597 		if (IS_ERR(handle)) {
2598 			ret = PTR_ERR(handle);
2599 			printk(KERN_CRIT "%s: jbd2_start: "
2600 			       "%ld pages, ino %lu; err %d\n", __func__,
2601 				wbc->nr_to_write, inode->i_ino, ret);
2602 			dump_stack();
2603 			goto out_writepages;
2604 		}
2605 
2606 		/*
2607 		 * Now call __mpage_da_writepage to find the next
2608 		 * contiguous region of logical blocks that need
2609 		 * blocks to be allocated by ext4.  We don't actually
2610 		 * submit the blocks for I/O here, even though
2611 		 * write_cache_pages thinks it will, and will set the
2612 		 * pages as clean for write before calling
2613 		 * __mpage_da_writepage().
2614 		 */
2615 		mpd.b_size = 0;
2616 		mpd.b_state = 0;
2617 		mpd.b_blocknr = 0;
2618 		mpd.first_page = 0;
2619 		mpd.next_page = 0;
2620 		mpd.io_done = 0;
2621 		mpd.pages_written = 0;
2622 		mpd.retval = 0;
2623 		ret = write_cache_pages(mapping, wbc, __mpage_da_writepage,
2624 					&mpd);
2625 		/*
2626 		 * If we have a contigous extent of pages and we
2627 		 * haven't done the I/O yet, map the blocks and submit
2628 		 * them for I/O.
2629 		 */
2630 		if (!mpd.io_done && mpd.next_page != mpd.first_page) {
2631 			if (mpage_da_map_blocks(&mpd) == 0)
2632 				mpage_da_submit_io(&mpd);
2633 			mpd.io_done = 1;
2634 			ret = MPAGE_DA_EXTENT_TAIL;
2635 		}
2636 		wbc->nr_to_write -= mpd.pages_written;
2637 
2638 		ext4_journal_stop(handle);
2639 
2640 		if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
2641 			/* commit the transaction which would
2642 			 * free blocks released in the transaction
2643 			 * and try again
2644 			 */
2645 			jbd2_journal_force_commit_nested(sbi->s_journal);
2646 			wbc->pages_skipped = pages_skipped;
2647 			ret = 0;
2648 		} else if (ret == MPAGE_DA_EXTENT_TAIL) {
2649 			/*
2650 			 * got one extent now try with
2651 			 * rest of the pages
2652 			 */
2653 			pages_written += mpd.pages_written;
2654 			wbc->pages_skipped = pages_skipped;
2655 			ret = 0;
2656 			io_done = 1;
2657 		} else if (wbc->nr_to_write)
2658 			/*
2659 			 * There is no more writeout needed
2660 			 * or we requested for a noblocking writeout
2661 			 * and we found the device congested
2662 			 */
2663 			break;
2664 	}
2665 	if (!io_done && !cycled) {
2666 		cycled = 1;
2667 		index = 0;
2668 		wbc->range_start = index << PAGE_CACHE_SHIFT;
2669 		wbc->range_end  = mapping->writeback_index - 1;
2670 		goto retry;
2671 	}
2672 	if (pages_skipped != wbc->pages_skipped)
2673 		printk(KERN_EMERG "This should not happen leaving %s "
2674 				"with nr_to_write = %ld ret = %d\n",
2675 				__func__, wbc->nr_to_write, ret);
2676 
2677 	/* Update index */
2678 	index += pages_written;
2679 	wbc->range_cyclic = range_cyclic;
2680 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2681 		/*
2682 		 * set the writeback_index so that range_cyclic
2683 		 * mode will write it back later
2684 		 */
2685 		mapping->writeback_index = index;
2686 
2687 out_writepages:
2688 	if (!no_nrwrite_index_update)
2689 		wbc->no_nrwrite_index_update = 0;
2690 	wbc->nr_to_write -= nr_to_writebump;
2691 	trace_mark(ext4_da_writepage_result,
2692 		   "dev %s ino %lu ret %d pages_written %d "
2693 		   "pages_skipped %ld congestion %d "
2694 		   "more_io %d no_nrwrite_index_update %d",
2695 		   inode->i_sb->s_id, inode->i_ino, ret,
2696 		   pages_written, wbc->pages_skipped,
2697 		   wbc->encountered_congestion, wbc->more_io,
2698 		   wbc->no_nrwrite_index_update);
2699 	return ret;
2700 }
2701 
2702 #define FALL_BACK_TO_NONDELALLOC 1
2703 static int ext4_nonda_switch(struct super_block *sb)
2704 {
2705 	s64 free_blocks, dirty_blocks;
2706 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2707 
2708 	/*
2709 	 * switch to non delalloc mode if we are running low
2710 	 * on free block. The free block accounting via percpu
2711 	 * counters can get slightly wrong with percpu_counter_batch getting
2712 	 * accumulated on each CPU without updating global counters
2713 	 * Delalloc need an accurate free block accounting. So switch
2714 	 * to non delalloc when we are near to error range.
2715 	 */
2716 	free_blocks  = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
2717 	dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter);
2718 	if (2 * free_blocks < 3 * dirty_blocks ||
2719 		free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) {
2720 		/*
2721 		 * free block count is less that 150% of dirty blocks
2722 		 * or free blocks is less that watermark
2723 		 */
2724 		return 1;
2725 	}
2726 	return 0;
2727 }
2728 
2729 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2730 				loff_t pos, unsigned len, unsigned flags,
2731 				struct page **pagep, void **fsdata)
2732 {
2733 	int ret, retries = 0;
2734 	struct page *page;
2735 	pgoff_t index;
2736 	unsigned from, to;
2737 	struct inode *inode = mapping->host;
2738 	handle_t *handle;
2739 
2740 	index = pos >> PAGE_CACHE_SHIFT;
2741 	from = pos & (PAGE_CACHE_SIZE - 1);
2742 	to = from + len;
2743 
2744 	if (ext4_nonda_switch(inode->i_sb)) {
2745 		*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2746 		return ext4_write_begin(file, mapping, pos,
2747 					len, flags, pagep, fsdata);
2748 	}
2749 	*fsdata = (void *)0;
2750 
2751 	trace_mark(ext4_da_write_begin,
2752 		   "dev %s ino %lu pos %llu len %u flags %u",
2753 		   inode->i_sb->s_id, inode->i_ino,
2754 		   (unsigned long long) pos, len, flags);
2755 retry:
2756 	/*
2757 	 * With delayed allocation, we don't log the i_disksize update
2758 	 * if there is delayed block allocation. But we still need
2759 	 * to journalling the i_disksize update if writes to the end
2760 	 * of file which has an already mapped buffer.
2761 	 */
2762 	handle = ext4_journal_start(inode, 1);
2763 	if (IS_ERR(handle)) {
2764 		ret = PTR_ERR(handle);
2765 		goto out;
2766 	}
2767 	/* We cannot recurse into the filesystem as the transaction is already
2768 	 * started */
2769 	flags |= AOP_FLAG_NOFS;
2770 
2771 	page = grab_cache_page_write_begin(mapping, index, flags);
2772 	if (!page) {
2773 		ext4_journal_stop(handle);
2774 		ret = -ENOMEM;
2775 		goto out;
2776 	}
2777 	*pagep = page;
2778 
2779 	ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
2780 							ext4_da_get_block_prep);
2781 	if (ret < 0) {
2782 		unlock_page(page);
2783 		ext4_journal_stop(handle);
2784 		page_cache_release(page);
2785 		/*
2786 		 * block_write_begin may have instantiated a few blocks
2787 		 * outside i_size.  Trim these off again. Don't need
2788 		 * i_size_read because we hold i_mutex.
2789 		 */
2790 		if (pos + len > inode->i_size)
2791 			vmtruncate(inode, inode->i_size);
2792 	}
2793 
2794 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
2795 		goto retry;
2796 out:
2797 	return ret;
2798 }
2799 
2800 /*
2801  * Check if we should update i_disksize
2802  * when write to the end of file but not require block allocation
2803  */
2804 static int ext4_da_should_update_i_disksize(struct page *page,
2805 					 unsigned long offset)
2806 {
2807 	struct buffer_head *bh;
2808 	struct inode *inode = page->mapping->host;
2809 	unsigned int idx;
2810 	int i;
2811 
2812 	bh = page_buffers(page);
2813 	idx = offset >> inode->i_blkbits;
2814 
2815 	for (i = 0; i < idx; i++)
2816 		bh = bh->b_this_page;
2817 
2818 	if (!buffer_mapped(bh) || (buffer_delay(bh)))
2819 		return 0;
2820 	return 1;
2821 }
2822 
2823 static int ext4_da_write_end(struct file *file,
2824 				struct address_space *mapping,
2825 				loff_t pos, unsigned len, unsigned copied,
2826 				struct page *page, void *fsdata)
2827 {
2828 	struct inode *inode = mapping->host;
2829 	int ret = 0, ret2;
2830 	handle_t *handle = ext4_journal_current_handle();
2831 	loff_t new_i_size;
2832 	unsigned long start, end;
2833 	int write_mode = (int)(unsigned long)fsdata;
2834 
2835 	if (write_mode == FALL_BACK_TO_NONDELALLOC) {
2836 		if (ext4_should_order_data(inode)) {
2837 			return ext4_ordered_write_end(file, mapping, pos,
2838 					len, copied, page, fsdata);
2839 		} else if (ext4_should_writeback_data(inode)) {
2840 			return ext4_writeback_write_end(file, mapping, pos,
2841 					len, copied, page, fsdata);
2842 		} else {
2843 			BUG();
2844 		}
2845 	}
2846 
2847 	trace_mark(ext4_da_write_end,
2848 		   "dev %s ino %lu pos %llu len %u copied %u",
2849 		   inode->i_sb->s_id, inode->i_ino,
2850 		   (unsigned long long) pos, len, copied);
2851 	start = pos & (PAGE_CACHE_SIZE - 1);
2852 	end = start + copied - 1;
2853 
2854 	/*
2855 	 * generic_write_end() will run mark_inode_dirty() if i_size
2856 	 * changes.  So let's piggyback the i_disksize mark_inode_dirty
2857 	 * into that.
2858 	 */
2859 
2860 	new_i_size = pos + copied;
2861 	if (new_i_size > EXT4_I(inode)->i_disksize) {
2862 		if (ext4_da_should_update_i_disksize(page, end)) {
2863 			down_write(&EXT4_I(inode)->i_data_sem);
2864 			if (new_i_size > EXT4_I(inode)->i_disksize) {
2865 				/*
2866 				 * Updating i_disksize when extending file
2867 				 * without needing block allocation
2868 				 */
2869 				if (ext4_should_order_data(inode))
2870 					ret = ext4_jbd2_file_inode(handle,
2871 								   inode);
2872 
2873 				EXT4_I(inode)->i_disksize = new_i_size;
2874 			}
2875 			up_write(&EXT4_I(inode)->i_data_sem);
2876 			/* We need to mark inode dirty even if
2877 			 * new_i_size is less that inode->i_size
2878 			 * bu greater than i_disksize.(hint delalloc)
2879 			 */
2880 			ext4_mark_inode_dirty(handle, inode);
2881 		}
2882 	}
2883 	ret2 = generic_write_end(file, mapping, pos, len, copied,
2884 							page, fsdata);
2885 	copied = ret2;
2886 	if (ret2 < 0)
2887 		ret = ret2;
2888 	ret2 = ext4_journal_stop(handle);
2889 	if (!ret)
2890 		ret = ret2;
2891 
2892 	return ret ? ret : copied;
2893 }
2894 
2895 static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
2896 {
2897 	/*
2898 	 * Drop reserved blocks
2899 	 */
2900 	BUG_ON(!PageLocked(page));
2901 	if (!page_has_buffers(page))
2902 		goto out;
2903 
2904 	ext4_da_page_release_reservation(page, offset);
2905 
2906 out:
2907 	ext4_invalidatepage(page, offset);
2908 
2909 	return;
2910 }
2911 
2912 /*
2913  * Force all delayed allocation blocks to be allocated for a given inode.
2914  */
2915 int ext4_alloc_da_blocks(struct inode *inode)
2916 {
2917 	if (!EXT4_I(inode)->i_reserved_data_blocks &&
2918 	    !EXT4_I(inode)->i_reserved_meta_blocks)
2919 		return 0;
2920 
2921 	/*
2922 	 * We do something simple for now.  The filemap_flush() will
2923 	 * also start triggering a write of the data blocks, which is
2924 	 * not strictly speaking necessary (and for users of
2925 	 * laptop_mode, not even desirable).  However, to do otherwise
2926 	 * would require replicating code paths in:
2927 	 *
2928 	 * ext4_da_writepages() ->
2929 	 *    write_cache_pages() ---> (via passed in callback function)
2930 	 *        __mpage_da_writepage() -->
2931 	 *           mpage_add_bh_to_extent()
2932 	 *           mpage_da_map_blocks()
2933 	 *
2934 	 * The problem is that write_cache_pages(), located in
2935 	 * mm/page-writeback.c, marks pages clean in preparation for
2936 	 * doing I/O, which is not desirable if we're not planning on
2937 	 * doing I/O at all.
2938 	 *
2939 	 * We could call write_cache_pages(), and then redirty all of
2940 	 * the pages by calling redirty_page_for_writeback() but that
2941 	 * would be ugly in the extreme.  So instead we would need to
2942 	 * replicate parts of the code in the above functions,
2943 	 * simplifying them becuase we wouldn't actually intend to
2944 	 * write out the pages, but rather only collect contiguous
2945 	 * logical block extents, call the multi-block allocator, and
2946 	 * then update the buffer heads with the block allocations.
2947 	 *
2948 	 * For now, though, we'll cheat by calling filemap_flush(),
2949 	 * which will map the blocks, and start the I/O, but not
2950 	 * actually wait for the I/O to complete.
2951 	 */
2952 	return filemap_flush(inode->i_mapping);
2953 }
2954 
2955 /*
2956  * bmap() is special.  It gets used by applications such as lilo and by
2957  * the swapper to find the on-disk block of a specific piece of data.
2958  *
2959  * Naturally, this is dangerous if the block concerned is still in the
2960  * journal.  If somebody makes a swapfile on an ext4 data-journaling
2961  * filesystem and enables swap, then they may get a nasty shock when the
2962  * data getting swapped to that swapfile suddenly gets overwritten by
2963  * the original zero's written out previously to the journal and
2964  * awaiting writeback in the kernel's buffer cache.
2965  *
2966  * So, if we see any bmap calls here on a modified, data-journaled file,
2967  * take extra steps to flush any blocks which might be in the cache.
2968  */
2969 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
2970 {
2971 	struct inode *inode = mapping->host;
2972 	journal_t *journal;
2973 	int err;
2974 
2975 	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
2976 			test_opt(inode->i_sb, DELALLOC)) {
2977 		/*
2978 		 * With delalloc we want to sync the file
2979 		 * so that we can make sure we allocate
2980 		 * blocks for file
2981 		 */
2982 		filemap_write_and_wait(mapping);
2983 	}
2984 
2985 	if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
2986 		/*
2987 		 * This is a REALLY heavyweight approach, but the use of
2988 		 * bmap on dirty files is expected to be extremely rare:
2989 		 * only if we run lilo or swapon on a freshly made file
2990 		 * do we expect this to happen.
2991 		 *
2992 		 * (bmap requires CAP_SYS_RAWIO so this does not
2993 		 * represent an unprivileged user DOS attack --- we'd be
2994 		 * in trouble if mortal users could trigger this path at
2995 		 * will.)
2996 		 *
2997 		 * NB. EXT4_STATE_JDATA is not set on files other than
2998 		 * regular files.  If somebody wants to bmap a directory
2999 		 * or symlink and gets confused because the buffer
3000 		 * hasn't yet been flushed to disk, they deserve
3001 		 * everything they get.
3002 		 */
3003 
3004 		EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
3005 		journal = EXT4_JOURNAL(inode);
3006 		jbd2_journal_lock_updates(journal);
3007 		err = jbd2_journal_flush(journal);
3008 		jbd2_journal_unlock_updates(journal);
3009 
3010 		if (err)
3011 			return 0;
3012 	}
3013 
3014 	return generic_block_bmap(mapping, block, ext4_get_block);
3015 }
3016 
3017 static int bget_one(handle_t *handle, struct buffer_head *bh)
3018 {
3019 	get_bh(bh);
3020 	return 0;
3021 }
3022 
3023 static int bput_one(handle_t *handle, struct buffer_head *bh)
3024 {
3025 	put_bh(bh);
3026 	return 0;
3027 }
3028 
3029 /*
3030  * Note that we don't need to start a transaction unless we're journaling data
3031  * because we should have holes filled from ext4_page_mkwrite(). We even don't
3032  * need to file the inode to the transaction's list in ordered mode because if
3033  * we are writing back data added by write(), the inode is already there and if
3034  * we are writing back data modified via mmap(), noone guarantees in which
3035  * transaction the data will hit the disk. In case we are journaling data, we
3036  * cannot start transaction directly because transaction start ranks above page
3037  * lock so we have to do some magic.
3038  *
3039  * In all journaling modes block_write_full_page() will start the I/O.
3040  *
3041  * Problem:
3042  *
3043  *	ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
3044  *		ext4_writepage()
3045  *
3046  * Similar for:
3047  *
3048  *	ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
3049  *
3050  * Same applies to ext4_get_block().  We will deadlock on various things like
3051  * lock_journal and i_data_sem
3052  *
3053  * Setting PF_MEMALLOC here doesn't work - too many internal memory
3054  * allocations fail.
3055  *
3056  * 16May01: If we're reentered then journal_current_handle() will be
3057  *	    non-zero. We simply *return*.
3058  *
3059  * 1 July 2001: @@@ FIXME:
3060  *   In journalled data mode, a data buffer may be metadata against the
3061  *   current transaction.  But the same file is part of a shared mapping
3062  *   and someone does a writepage() on it.
3063  *
3064  *   We will move the buffer onto the async_data list, but *after* it has
3065  *   been dirtied. So there's a small window where we have dirty data on
3066  *   BJ_Metadata.
3067  *
3068  *   Note that this only applies to the last partial page in the file.  The
3069  *   bit which block_write_full_page() uses prepare/commit for.  (That's
3070  *   broken code anyway: it's wrong for msync()).
3071  *
3072  *   It's a rare case: affects the final partial page, for journalled data
3073  *   where the file is subject to bith write() and writepage() in the same
3074  *   transction.  To fix it we'll need a custom block_write_full_page().
3075  *   We'll probably need that anyway for journalling writepage() output.
3076  *
3077  * We don't honour synchronous mounts for writepage().  That would be
3078  * disastrous.  Any write() or metadata operation will sync the fs for
3079  * us.
3080  *
3081  */
3082 static int __ext4_normal_writepage(struct page *page,
3083 				struct writeback_control *wbc)
3084 {
3085 	struct inode *inode = page->mapping->host;
3086 
3087 	if (test_opt(inode->i_sb, NOBH))
3088 		return nobh_writepage(page,
3089 					ext4_normal_get_block_write, wbc);
3090 	else
3091 		return block_write_full_page(page,
3092 						ext4_normal_get_block_write,
3093 						wbc);
3094 }
3095 
3096 static int ext4_normal_writepage(struct page *page,
3097 				struct writeback_control *wbc)
3098 {
3099 	struct inode *inode = page->mapping->host;
3100 	loff_t size = i_size_read(inode);
3101 	loff_t len;
3102 
3103 	trace_mark(ext4_normal_writepage,
3104 		   "dev %s ino %lu page_index %lu",
3105 		   inode->i_sb->s_id, inode->i_ino, page->index);
3106 	J_ASSERT(PageLocked(page));
3107 	if (page->index == size >> PAGE_CACHE_SHIFT)
3108 		len = size & ~PAGE_CACHE_MASK;
3109 	else
3110 		len = PAGE_CACHE_SIZE;
3111 
3112 	if (page_has_buffers(page)) {
3113 		/* if page has buffers it should all be mapped
3114 		 * and allocated. If there are not buffers attached
3115 		 * to the page we know the page is dirty but it lost
3116 		 * buffers. That means that at some moment in time
3117 		 * after write_begin() / write_end() has been called
3118 		 * all buffers have been clean and thus they must have been
3119 		 * written at least once. So they are all mapped and we can
3120 		 * happily proceed with mapping them and writing the page.
3121 		 */
3122 		BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
3123 					ext4_bh_unmapped_or_delay));
3124 	}
3125 
3126 	if (!ext4_journal_current_handle())
3127 		return __ext4_normal_writepage(page, wbc);
3128 
3129 	redirty_page_for_writepage(wbc, page);
3130 	unlock_page(page);
3131 	return 0;
3132 }
3133 
3134 static int __ext4_journalled_writepage(struct page *page,
3135 				struct writeback_control *wbc)
3136 {
3137 	struct address_space *mapping = page->mapping;
3138 	struct inode *inode = mapping->host;
3139 	struct buffer_head *page_bufs;
3140 	handle_t *handle = NULL;
3141 	int ret = 0;
3142 	int err;
3143 
3144 	ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
3145 					ext4_normal_get_block_write);
3146 	if (ret != 0)
3147 		goto out_unlock;
3148 
3149 	page_bufs = page_buffers(page);
3150 	walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL,
3151 								bget_one);
3152 	/* As soon as we unlock the page, it can go away, but we have
3153 	 * references to buffers so we are safe */
3154 	unlock_page(page);
3155 
3156 	handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
3157 	if (IS_ERR(handle)) {
3158 		ret = PTR_ERR(handle);
3159 		goto out;
3160 	}
3161 
3162 	ret = walk_page_buffers(handle, page_bufs, 0,
3163 			PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
3164 
3165 	err = walk_page_buffers(handle, page_bufs, 0,
3166 				PAGE_CACHE_SIZE, NULL, write_end_fn);
3167 	if (ret == 0)
3168 		ret = err;
3169 	err = ext4_journal_stop(handle);
3170 	if (!ret)
3171 		ret = err;
3172 
3173 	walk_page_buffers(handle, page_bufs, 0,
3174 				PAGE_CACHE_SIZE, NULL, bput_one);
3175 	EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
3176 	goto out;
3177 
3178 out_unlock:
3179 	unlock_page(page);
3180 out:
3181 	return ret;
3182 }
3183 
3184 static int ext4_journalled_writepage(struct page *page,
3185 				struct writeback_control *wbc)
3186 {
3187 	struct inode *inode = page->mapping->host;
3188 	loff_t size = i_size_read(inode);
3189 	loff_t len;
3190 
3191 	trace_mark(ext4_journalled_writepage,
3192 		   "dev %s ino %lu page_index %lu",
3193 		   inode->i_sb->s_id, inode->i_ino, page->index);
3194 	J_ASSERT(PageLocked(page));
3195 	if (page->index == size >> PAGE_CACHE_SHIFT)
3196 		len = size & ~PAGE_CACHE_MASK;
3197 	else
3198 		len = PAGE_CACHE_SIZE;
3199 
3200 	if (page_has_buffers(page)) {
3201 		/* if page has buffers it should all be mapped
3202 		 * and allocated. If there are not buffers attached
3203 		 * to the page we know the page is dirty but it lost
3204 		 * buffers. That means that at some moment in time
3205 		 * after write_begin() / write_end() has been called
3206 		 * all buffers have been clean and thus they must have been
3207 		 * written at least once. So they are all mapped and we can
3208 		 * happily proceed with mapping them and writing the page.
3209 		 */
3210 		BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
3211 					ext4_bh_unmapped_or_delay));
3212 	}
3213 
3214 	if (ext4_journal_current_handle())
3215 		goto no_write;
3216 
3217 	if (PageChecked(page)) {
3218 		/*
3219 		 * It's mmapped pagecache.  Add buffers and journal it.  There
3220 		 * doesn't seem much point in redirtying the page here.
3221 		 */
3222 		ClearPageChecked(page);
3223 		return __ext4_journalled_writepage(page, wbc);
3224 	} else {
3225 		/*
3226 		 * It may be a page full of checkpoint-mode buffers.  We don't
3227 		 * really know unless we go poke around in the buffer_heads.
3228 		 * But block_write_full_page will do the right thing.
3229 		 */
3230 		return block_write_full_page(page,
3231 						ext4_normal_get_block_write,
3232 						wbc);
3233 	}
3234 no_write:
3235 	redirty_page_for_writepage(wbc, page);
3236 	unlock_page(page);
3237 	return 0;
3238 }
3239 
3240 static int ext4_readpage(struct file *file, struct page *page)
3241 {
3242 	return mpage_readpage(page, ext4_get_block);
3243 }
3244 
3245 static int
3246 ext4_readpages(struct file *file, struct address_space *mapping,
3247 		struct list_head *pages, unsigned nr_pages)
3248 {
3249 	return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
3250 }
3251 
3252 static void ext4_invalidatepage(struct page *page, unsigned long offset)
3253 {
3254 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3255 
3256 	/*
3257 	 * If it's a full truncate we just forget about the pending dirtying
3258 	 */
3259 	if (offset == 0)
3260 		ClearPageChecked(page);
3261 
3262 	if (journal)
3263 		jbd2_journal_invalidatepage(journal, page, offset);
3264 	else
3265 		block_invalidatepage(page, offset);
3266 }
3267 
3268 static int ext4_releasepage(struct page *page, gfp_t wait)
3269 {
3270 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3271 
3272 	WARN_ON(PageChecked(page));
3273 	if (!page_has_buffers(page))
3274 		return 0;
3275 	if (journal)
3276 		return jbd2_journal_try_to_free_buffers(journal, page, wait);
3277 	else
3278 		return try_to_free_buffers(page);
3279 }
3280 
3281 /*
3282  * If the O_DIRECT write will extend the file then add this inode to the
3283  * orphan list.  So recovery will truncate it back to the original size
3284  * if the machine crashes during the write.
3285  *
3286  * If the O_DIRECT write is intantiating holes inside i_size and the machine
3287  * crashes then stale disk data _may_ be exposed inside the file. But current
3288  * VFS code falls back into buffered path in that case so we are safe.
3289  */
3290 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3291 			const struct iovec *iov, loff_t offset,
3292 			unsigned long nr_segs)
3293 {
3294 	struct file *file = iocb->ki_filp;
3295 	struct inode *inode = file->f_mapping->host;
3296 	struct ext4_inode_info *ei = EXT4_I(inode);
3297 	handle_t *handle;
3298 	ssize_t ret;
3299 	int orphan = 0;
3300 	size_t count = iov_length(iov, nr_segs);
3301 
3302 	if (rw == WRITE) {
3303 		loff_t final_size = offset + count;
3304 
3305 		if (final_size > inode->i_size) {
3306 			/* Credits for sb + inode write */
3307 			handle = ext4_journal_start(inode, 2);
3308 			if (IS_ERR(handle)) {
3309 				ret = PTR_ERR(handle);
3310 				goto out;
3311 			}
3312 			ret = ext4_orphan_add(handle, inode);
3313 			if (ret) {
3314 				ext4_journal_stop(handle);
3315 				goto out;
3316 			}
3317 			orphan = 1;
3318 			ei->i_disksize = inode->i_size;
3319 			ext4_journal_stop(handle);
3320 		}
3321 	}
3322 
3323 	ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
3324 				 offset, nr_segs,
3325 				 ext4_get_block, NULL);
3326 
3327 	if (orphan) {
3328 		int err;
3329 
3330 		/* Credits for sb + inode write */
3331 		handle = ext4_journal_start(inode, 2);
3332 		if (IS_ERR(handle)) {
3333 			/* This is really bad luck. We've written the data
3334 			 * but cannot extend i_size. Bail out and pretend
3335 			 * the write failed... */
3336 			ret = PTR_ERR(handle);
3337 			goto out;
3338 		}
3339 		if (inode->i_nlink)
3340 			ext4_orphan_del(handle, inode);
3341 		if (ret > 0) {
3342 			loff_t end = offset + ret;
3343 			if (end > inode->i_size) {
3344 				ei->i_disksize = end;
3345 				i_size_write(inode, end);
3346 				/*
3347 				 * We're going to return a positive `ret'
3348 				 * here due to non-zero-length I/O, so there's
3349 				 * no way of reporting error returns from
3350 				 * ext4_mark_inode_dirty() to userspace.  So
3351 				 * ignore it.
3352 				 */
3353 				ext4_mark_inode_dirty(handle, inode);
3354 			}
3355 		}
3356 		err = ext4_journal_stop(handle);
3357 		if (ret == 0)
3358 			ret = err;
3359 	}
3360 out:
3361 	return ret;
3362 }
3363 
3364 /*
3365  * Pages can be marked dirty completely asynchronously from ext4's journalling
3366  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
3367  * much here because ->set_page_dirty is called under VFS locks.  The page is
3368  * not necessarily locked.
3369  *
3370  * We cannot just dirty the page and leave attached buffers clean, because the
3371  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
3372  * or jbddirty because all the journalling code will explode.
3373  *
3374  * So what we do is to mark the page "pending dirty" and next time writepage
3375  * is called, propagate that into the buffers appropriately.
3376  */
3377 static int ext4_journalled_set_page_dirty(struct page *page)
3378 {
3379 	SetPageChecked(page);
3380 	return __set_page_dirty_nobuffers(page);
3381 }
3382 
3383 static const struct address_space_operations ext4_ordered_aops = {
3384 	.readpage		= ext4_readpage,
3385 	.readpages		= ext4_readpages,
3386 	.writepage		= ext4_normal_writepage,
3387 	.sync_page		= block_sync_page,
3388 	.write_begin		= ext4_write_begin,
3389 	.write_end		= ext4_ordered_write_end,
3390 	.bmap			= ext4_bmap,
3391 	.invalidatepage		= ext4_invalidatepage,
3392 	.releasepage		= ext4_releasepage,
3393 	.direct_IO		= ext4_direct_IO,
3394 	.migratepage		= buffer_migrate_page,
3395 	.is_partially_uptodate  = block_is_partially_uptodate,
3396 };
3397 
3398 static const struct address_space_operations ext4_writeback_aops = {
3399 	.readpage		= ext4_readpage,
3400 	.readpages		= ext4_readpages,
3401 	.writepage		= ext4_normal_writepage,
3402 	.sync_page		= block_sync_page,
3403 	.write_begin		= ext4_write_begin,
3404 	.write_end		= ext4_writeback_write_end,
3405 	.bmap			= ext4_bmap,
3406 	.invalidatepage		= ext4_invalidatepage,
3407 	.releasepage		= ext4_releasepage,
3408 	.direct_IO		= ext4_direct_IO,
3409 	.migratepage		= buffer_migrate_page,
3410 	.is_partially_uptodate  = block_is_partially_uptodate,
3411 };
3412 
3413 static const struct address_space_operations ext4_journalled_aops = {
3414 	.readpage		= ext4_readpage,
3415 	.readpages		= ext4_readpages,
3416 	.writepage		= ext4_journalled_writepage,
3417 	.sync_page		= block_sync_page,
3418 	.write_begin		= ext4_write_begin,
3419 	.write_end		= ext4_journalled_write_end,
3420 	.set_page_dirty		= ext4_journalled_set_page_dirty,
3421 	.bmap			= ext4_bmap,
3422 	.invalidatepage		= ext4_invalidatepage,
3423 	.releasepage		= ext4_releasepage,
3424 	.is_partially_uptodate  = block_is_partially_uptodate,
3425 };
3426 
3427 static const struct address_space_operations ext4_da_aops = {
3428 	.readpage		= ext4_readpage,
3429 	.readpages		= ext4_readpages,
3430 	.writepage		= ext4_da_writepage,
3431 	.writepages		= ext4_da_writepages,
3432 	.sync_page		= block_sync_page,
3433 	.write_begin		= ext4_da_write_begin,
3434 	.write_end		= ext4_da_write_end,
3435 	.bmap			= ext4_bmap,
3436 	.invalidatepage		= ext4_da_invalidatepage,
3437 	.releasepage		= ext4_releasepage,
3438 	.direct_IO		= ext4_direct_IO,
3439 	.migratepage		= buffer_migrate_page,
3440 	.is_partially_uptodate  = block_is_partially_uptodate,
3441 };
3442 
3443 void ext4_set_aops(struct inode *inode)
3444 {
3445 	if (ext4_should_order_data(inode) &&
3446 		test_opt(inode->i_sb, DELALLOC))
3447 		inode->i_mapping->a_ops = &ext4_da_aops;
3448 	else if (ext4_should_order_data(inode))
3449 		inode->i_mapping->a_ops = &ext4_ordered_aops;
3450 	else if (ext4_should_writeback_data(inode) &&
3451 		 test_opt(inode->i_sb, DELALLOC))
3452 		inode->i_mapping->a_ops = &ext4_da_aops;
3453 	else if (ext4_should_writeback_data(inode))
3454 		inode->i_mapping->a_ops = &ext4_writeback_aops;
3455 	else
3456 		inode->i_mapping->a_ops = &ext4_journalled_aops;
3457 }
3458 
3459 /*
3460  * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3461  * up to the end of the block which corresponds to `from'.
3462  * This required during truncate. We need to physically zero the tail end
3463  * of that block so it doesn't yield old data if the file is later grown.
3464  */
3465 int ext4_block_truncate_page(handle_t *handle,
3466 		struct address_space *mapping, loff_t from)
3467 {
3468 	ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
3469 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
3470 	unsigned blocksize, length, pos;
3471 	ext4_lblk_t iblock;
3472 	struct inode *inode = mapping->host;
3473 	struct buffer_head *bh;
3474 	struct page *page;
3475 	int err = 0;
3476 
3477 	page = grab_cache_page(mapping, from >> PAGE_CACHE_SHIFT);
3478 	if (!page)
3479 		return -EINVAL;
3480 
3481 	blocksize = inode->i_sb->s_blocksize;
3482 	length = blocksize - (offset & (blocksize - 1));
3483 	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
3484 
3485 	/*
3486 	 * For "nobh" option,  we can only work if we don't need to
3487 	 * read-in the page - otherwise we create buffers to do the IO.
3488 	 */
3489 	if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
3490 	     ext4_should_writeback_data(inode) && PageUptodate(page)) {
3491 		zero_user(page, offset, length);
3492 		set_page_dirty(page);
3493 		goto unlock;
3494 	}
3495 
3496 	if (!page_has_buffers(page))
3497 		create_empty_buffers(page, blocksize, 0);
3498 
3499 	/* Find the buffer that contains "offset" */
3500 	bh = page_buffers(page);
3501 	pos = blocksize;
3502 	while (offset >= pos) {
3503 		bh = bh->b_this_page;
3504 		iblock++;
3505 		pos += blocksize;
3506 	}
3507 
3508 	err = 0;
3509 	if (buffer_freed(bh)) {
3510 		BUFFER_TRACE(bh, "freed: skip");
3511 		goto unlock;
3512 	}
3513 
3514 	if (!buffer_mapped(bh)) {
3515 		BUFFER_TRACE(bh, "unmapped");
3516 		ext4_get_block(inode, iblock, bh, 0);
3517 		/* unmapped? It's a hole - nothing to do */
3518 		if (!buffer_mapped(bh)) {
3519 			BUFFER_TRACE(bh, "still unmapped");
3520 			goto unlock;
3521 		}
3522 	}
3523 
3524 	/* Ok, it's mapped. Make sure it's up-to-date */
3525 	if (PageUptodate(page))
3526 		set_buffer_uptodate(bh);
3527 
3528 	if (!buffer_uptodate(bh)) {
3529 		err = -EIO;
3530 		ll_rw_block(READ, 1, &bh);
3531 		wait_on_buffer(bh);
3532 		/* Uhhuh. Read error. Complain and punt. */
3533 		if (!buffer_uptodate(bh))
3534 			goto unlock;
3535 	}
3536 
3537 	if (ext4_should_journal_data(inode)) {
3538 		BUFFER_TRACE(bh, "get write access");
3539 		err = ext4_journal_get_write_access(handle, bh);
3540 		if (err)
3541 			goto unlock;
3542 	}
3543 
3544 	zero_user(page, offset, length);
3545 
3546 	BUFFER_TRACE(bh, "zeroed end of block");
3547 
3548 	err = 0;
3549 	if (ext4_should_journal_data(inode)) {
3550 		err = ext4_handle_dirty_metadata(handle, inode, bh);
3551 	} else {
3552 		if (ext4_should_order_data(inode))
3553 			err = ext4_jbd2_file_inode(handle, inode);
3554 		mark_buffer_dirty(bh);
3555 	}
3556 
3557 unlock:
3558 	unlock_page(page);
3559 	page_cache_release(page);
3560 	return err;
3561 }
3562 
3563 /*
3564  * Probably it should be a library function... search for first non-zero word
3565  * or memcmp with zero_page, whatever is better for particular architecture.
3566  * Linus?
3567  */
3568 static inline int all_zeroes(__le32 *p, __le32 *q)
3569 {
3570 	while (p < q)
3571 		if (*p++)
3572 			return 0;
3573 	return 1;
3574 }
3575 
3576 /**
3577  *	ext4_find_shared - find the indirect blocks for partial truncation.
3578  *	@inode:	  inode in question
3579  *	@depth:	  depth of the affected branch
3580  *	@offsets: offsets of pointers in that branch (see ext4_block_to_path)
3581  *	@chain:	  place to store the pointers to partial indirect blocks
3582  *	@top:	  place to the (detached) top of branch
3583  *
3584  *	This is a helper function used by ext4_truncate().
3585  *
3586  *	When we do truncate() we may have to clean the ends of several
3587  *	indirect blocks but leave the blocks themselves alive. Block is
3588  *	partially truncated if some data below the new i_size is refered
3589  *	from it (and it is on the path to the first completely truncated
3590  *	data block, indeed).  We have to free the top of that path along
3591  *	with everything to the right of the path. Since no allocation
3592  *	past the truncation point is possible until ext4_truncate()
3593  *	finishes, we may safely do the latter, but top of branch may
3594  *	require special attention - pageout below the truncation point
3595  *	might try to populate it.
3596  *
3597  *	We atomically detach the top of branch from the tree, store the
3598  *	block number of its root in *@top, pointers to buffer_heads of
3599  *	partially truncated blocks - in @chain[].bh and pointers to
3600  *	their last elements that should not be removed - in
3601  *	@chain[].p. Return value is the pointer to last filled element
3602  *	of @chain.
3603  *
3604  *	The work left to caller to do the actual freeing of subtrees:
3605  *		a) free the subtree starting from *@top
3606  *		b) free the subtrees whose roots are stored in
3607  *			(@chain[i].p+1 .. end of @chain[i].bh->b_data)
3608  *		c) free the subtrees growing from the inode past the @chain[0].
3609  *			(no partially truncated stuff there).  */
3610 
3611 static Indirect *ext4_find_shared(struct inode *inode, int depth,
3612 			ext4_lblk_t offsets[4], Indirect chain[4], __le32 *top)
3613 {
3614 	Indirect *partial, *p;
3615 	int k, err;
3616 
3617 	*top = 0;
3618 	/* Make k index the deepest non-null offest + 1 */
3619 	for (k = depth; k > 1 && !offsets[k-1]; k--)
3620 		;
3621 	partial = ext4_get_branch(inode, k, offsets, chain, &err);
3622 	/* Writer: pointers */
3623 	if (!partial)
3624 		partial = chain + k-1;
3625 	/*
3626 	 * If the branch acquired continuation since we've looked at it -
3627 	 * fine, it should all survive and (new) top doesn't belong to us.
3628 	 */
3629 	if (!partial->key && *partial->p)
3630 		/* Writer: end */
3631 		goto no_top;
3632 	for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
3633 		;
3634 	/*
3635 	 * OK, we've found the last block that must survive. The rest of our
3636 	 * branch should be detached before unlocking. However, if that rest
3637 	 * of branch is all ours and does not grow immediately from the inode
3638 	 * it's easier to cheat and just decrement partial->p.
3639 	 */
3640 	if (p == chain + k - 1 && p > chain) {
3641 		p->p--;
3642 	} else {
3643 		*top = *p->p;
3644 		/* Nope, don't do this in ext4.  Must leave the tree intact */
3645 #if 0
3646 		*p->p = 0;
3647 #endif
3648 	}
3649 	/* Writer: end */
3650 
3651 	while (partial > p) {
3652 		brelse(partial->bh);
3653 		partial--;
3654 	}
3655 no_top:
3656 	return partial;
3657 }
3658 
3659 /*
3660  * Zero a number of block pointers in either an inode or an indirect block.
3661  * If we restart the transaction we must again get write access to the
3662  * indirect block for further modification.
3663  *
3664  * We release `count' blocks on disk, but (last - first) may be greater
3665  * than `count' because there can be holes in there.
3666  */
3667 static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
3668 		struct buffer_head *bh, ext4_fsblk_t block_to_free,
3669 		unsigned long count, __le32 *first, __le32 *last)
3670 {
3671 	__le32 *p;
3672 	if (try_to_extend_transaction(handle, inode)) {
3673 		if (bh) {
3674 			BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
3675 			ext4_handle_dirty_metadata(handle, inode, bh);
3676 		}
3677 		ext4_mark_inode_dirty(handle, inode);
3678 		ext4_journal_test_restart(handle, inode);
3679 		if (bh) {
3680 			BUFFER_TRACE(bh, "retaking write access");
3681 			ext4_journal_get_write_access(handle, bh);
3682 		}
3683 	}
3684 
3685 	/*
3686 	 * Any buffers which are on the journal will be in memory. We find
3687 	 * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget()
3688 	 * on them.  We've already detached each block from the file, so
3689 	 * bforget() in jbd2_journal_forget() should be safe.
3690 	 *
3691 	 * AKPM: turn on bforget in jbd2_journal_forget()!!!
3692 	 */
3693 	for (p = first; p < last; p++) {
3694 		u32 nr = le32_to_cpu(*p);
3695 		if (nr) {
3696 			struct buffer_head *tbh;
3697 
3698 			*p = 0;
3699 			tbh = sb_find_get_block(inode->i_sb, nr);
3700 			ext4_forget(handle, 0, inode, tbh, nr);
3701 		}
3702 	}
3703 
3704 	ext4_free_blocks(handle, inode, block_to_free, count, 0);
3705 }
3706 
3707 /**
3708  * ext4_free_data - free a list of data blocks
3709  * @handle:	handle for this transaction
3710  * @inode:	inode we are dealing with
3711  * @this_bh:	indirect buffer_head which contains *@first and *@last
3712  * @first:	array of block numbers
3713  * @last:	points immediately past the end of array
3714  *
3715  * We are freeing all blocks refered from that array (numbers are stored as
3716  * little-endian 32-bit) and updating @inode->i_blocks appropriately.
3717  *
3718  * We accumulate contiguous runs of blocks to free.  Conveniently, if these
3719  * blocks are contiguous then releasing them at one time will only affect one
3720  * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
3721  * actually use a lot of journal space.
3722  *
3723  * @this_bh will be %NULL if @first and @last point into the inode's direct
3724  * block pointers.
3725  */
3726 static void ext4_free_data(handle_t *handle, struct inode *inode,
3727 			   struct buffer_head *this_bh,
3728 			   __le32 *first, __le32 *last)
3729 {
3730 	ext4_fsblk_t block_to_free = 0;    /* Starting block # of a run */
3731 	unsigned long count = 0;	    /* Number of blocks in the run */
3732 	__le32 *block_to_free_p = NULL;	    /* Pointer into inode/ind
3733 					       corresponding to
3734 					       block_to_free */
3735 	ext4_fsblk_t nr;		    /* Current block # */
3736 	__le32 *p;			    /* Pointer into inode/ind
3737 					       for current block */
3738 	int err;
3739 
3740 	if (this_bh) {				/* For indirect block */
3741 		BUFFER_TRACE(this_bh, "get_write_access");
3742 		err = ext4_journal_get_write_access(handle, this_bh);
3743 		/* Important: if we can't update the indirect pointers
3744 		 * to the blocks, we can't free them. */
3745 		if (err)
3746 			return;
3747 	}
3748 
3749 	for (p = first; p < last; p++) {
3750 		nr = le32_to_cpu(*p);
3751 		if (nr) {
3752 			/* accumulate blocks to free if they're contiguous */
3753 			if (count == 0) {
3754 				block_to_free = nr;
3755 				block_to_free_p = p;
3756 				count = 1;
3757 			} else if (nr == block_to_free + count) {
3758 				count++;
3759 			} else {
3760 				ext4_clear_blocks(handle, inode, this_bh,
3761 						  block_to_free,
3762 						  count, block_to_free_p, p);
3763 				block_to_free = nr;
3764 				block_to_free_p = p;
3765 				count = 1;
3766 			}
3767 		}
3768 	}
3769 
3770 	if (count > 0)
3771 		ext4_clear_blocks(handle, inode, this_bh, block_to_free,
3772 				  count, block_to_free_p, p);
3773 
3774 	if (this_bh) {
3775 		BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
3776 
3777 		/*
3778 		 * The buffer head should have an attached journal head at this
3779 		 * point. However, if the data is corrupted and an indirect
3780 		 * block pointed to itself, it would have been detached when
3781 		 * the block was cleared. Check for this instead of OOPSing.
3782 		 */
3783 		if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
3784 			ext4_handle_dirty_metadata(handle, inode, this_bh);
3785 		else
3786 			ext4_error(inode->i_sb, __func__,
3787 				   "circular indirect block detected, "
3788 				   "inode=%lu, block=%llu",
3789 				   inode->i_ino,
3790 				   (unsigned long long) this_bh->b_blocknr);
3791 	}
3792 }
3793 
3794 /**
3795  *	ext4_free_branches - free an array of branches
3796  *	@handle: JBD handle for this transaction
3797  *	@inode:	inode we are dealing with
3798  *	@parent_bh: the buffer_head which contains *@first and *@last
3799  *	@first:	array of block numbers
3800  *	@last:	pointer immediately past the end of array
3801  *	@depth:	depth of the branches to free
3802  *
3803  *	We are freeing all blocks refered from these branches (numbers are
3804  *	stored as little-endian 32-bit) and updating @inode->i_blocks
3805  *	appropriately.
3806  */
3807 static void ext4_free_branches(handle_t *handle, struct inode *inode,
3808 			       struct buffer_head *parent_bh,
3809 			       __le32 *first, __le32 *last, int depth)
3810 {
3811 	ext4_fsblk_t nr;
3812 	__le32 *p;
3813 
3814 	if (ext4_handle_is_aborted(handle))
3815 		return;
3816 
3817 	if (depth--) {
3818 		struct buffer_head *bh;
3819 		int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
3820 		p = last;
3821 		while (--p >= first) {
3822 			nr = le32_to_cpu(*p);
3823 			if (!nr)
3824 				continue;		/* A hole */
3825 
3826 			/* Go read the buffer for the next level down */
3827 			bh = sb_bread(inode->i_sb, nr);
3828 
3829 			/*
3830 			 * A read failure? Report error and clear slot
3831 			 * (should be rare).
3832 			 */
3833 			if (!bh) {
3834 				ext4_error(inode->i_sb, "ext4_free_branches",
3835 					   "Read failure, inode=%lu, block=%llu",
3836 					   inode->i_ino, nr);
3837 				continue;
3838 			}
3839 
3840 			/* This zaps the entire block.  Bottom up. */
3841 			BUFFER_TRACE(bh, "free child branches");
3842 			ext4_free_branches(handle, inode, bh,
3843 					(__le32 *) bh->b_data,
3844 					(__le32 *) bh->b_data + addr_per_block,
3845 					depth);
3846 
3847 			/*
3848 			 * We've probably journalled the indirect block several
3849 			 * times during the truncate.  But it's no longer
3850 			 * needed and we now drop it from the transaction via
3851 			 * jbd2_journal_revoke().
3852 			 *
3853 			 * That's easy if it's exclusively part of this
3854 			 * transaction.  But if it's part of the committing
3855 			 * transaction then jbd2_journal_forget() will simply
3856 			 * brelse() it.  That means that if the underlying
3857 			 * block is reallocated in ext4_get_block(),
3858 			 * unmap_underlying_metadata() will find this block
3859 			 * and will try to get rid of it.  damn, damn.
3860 			 *
3861 			 * If this block has already been committed to the
3862 			 * journal, a revoke record will be written.  And
3863 			 * revoke records must be emitted *before* clearing
3864 			 * this block's bit in the bitmaps.
3865 			 */
3866 			ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
3867 
3868 			/*
3869 			 * Everything below this this pointer has been
3870 			 * released.  Now let this top-of-subtree go.
3871 			 *
3872 			 * We want the freeing of this indirect block to be
3873 			 * atomic in the journal with the updating of the
3874 			 * bitmap block which owns it.  So make some room in
3875 			 * the journal.
3876 			 *
3877 			 * We zero the parent pointer *after* freeing its
3878 			 * pointee in the bitmaps, so if extend_transaction()
3879 			 * for some reason fails to put the bitmap changes and
3880 			 * the release into the same transaction, recovery
3881 			 * will merely complain about releasing a free block,
3882 			 * rather than leaking blocks.
3883 			 */
3884 			if (ext4_handle_is_aborted(handle))
3885 				return;
3886 			if (try_to_extend_transaction(handle, inode)) {
3887 				ext4_mark_inode_dirty(handle, inode);
3888 				ext4_journal_test_restart(handle, inode);
3889 			}
3890 
3891 			ext4_free_blocks(handle, inode, nr, 1, 1);
3892 
3893 			if (parent_bh) {
3894 				/*
3895 				 * The block which we have just freed is
3896 				 * pointed to by an indirect block: journal it
3897 				 */
3898 				BUFFER_TRACE(parent_bh, "get_write_access");
3899 				if (!ext4_journal_get_write_access(handle,
3900 								   parent_bh)){
3901 					*p = 0;
3902 					BUFFER_TRACE(parent_bh,
3903 					"call ext4_handle_dirty_metadata");
3904 					ext4_handle_dirty_metadata(handle,
3905 								   inode,
3906 								   parent_bh);
3907 				}
3908 			}
3909 		}
3910 	} else {
3911 		/* We have reached the bottom of the tree. */
3912 		BUFFER_TRACE(parent_bh, "free data blocks");
3913 		ext4_free_data(handle, inode, parent_bh, first, last);
3914 	}
3915 }
3916 
3917 int ext4_can_truncate(struct inode *inode)
3918 {
3919 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
3920 		return 0;
3921 	if (S_ISREG(inode->i_mode))
3922 		return 1;
3923 	if (S_ISDIR(inode->i_mode))
3924 		return 1;
3925 	if (S_ISLNK(inode->i_mode))
3926 		return !ext4_inode_is_fast_symlink(inode);
3927 	return 0;
3928 }
3929 
3930 /*
3931  * ext4_truncate()
3932  *
3933  * We block out ext4_get_block() block instantiations across the entire
3934  * transaction, and VFS/VM ensures that ext4_truncate() cannot run
3935  * simultaneously on behalf of the same inode.
3936  *
3937  * As we work through the truncate and commmit bits of it to the journal there
3938  * is one core, guiding principle: the file's tree must always be consistent on
3939  * disk.  We must be able to restart the truncate after a crash.
3940  *
3941  * The file's tree may be transiently inconsistent in memory (although it
3942  * probably isn't), but whenever we close off and commit a journal transaction,
3943  * the contents of (the filesystem + the journal) must be consistent and
3944  * restartable.  It's pretty simple, really: bottom up, right to left (although
3945  * left-to-right works OK too).
3946  *
3947  * Note that at recovery time, journal replay occurs *before* the restart of
3948  * truncate against the orphan inode list.
3949  *
3950  * The committed inode has the new, desired i_size (which is the same as
3951  * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
3952  * that this inode's truncate did not complete and it will again call
3953  * ext4_truncate() to have another go.  So there will be instantiated blocks
3954  * to the right of the truncation point in a crashed ext4 filesystem.  But
3955  * that's fine - as long as they are linked from the inode, the post-crash
3956  * ext4_truncate() run will find them and release them.
3957  */
3958 void ext4_truncate(struct inode *inode)
3959 {
3960 	handle_t *handle;
3961 	struct ext4_inode_info *ei = EXT4_I(inode);
3962 	__le32 *i_data = ei->i_data;
3963 	int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
3964 	struct address_space *mapping = inode->i_mapping;
3965 	ext4_lblk_t offsets[4];
3966 	Indirect chain[4];
3967 	Indirect *partial;
3968 	__le32 nr = 0;
3969 	int n;
3970 	ext4_lblk_t last_block;
3971 	unsigned blocksize = inode->i_sb->s_blocksize;
3972 
3973 	if (!ext4_can_truncate(inode))
3974 		return;
3975 
3976 	if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
3977 		ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE;
3978 
3979 	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
3980 		ext4_ext_truncate(inode);
3981 		return;
3982 	}
3983 
3984 	handle = start_transaction(inode);
3985 	if (IS_ERR(handle))
3986 		return;		/* AKPM: return what? */
3987 
3988 	last_block = (inode->i_size + blocksize-1)
3989 					>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
3990 
3991 	if (inode->i_size & (blocksize - 1))
3992 		if (ext4_block_truncate_page(handle, mapping, inode->i_size))
3993 			goto out_stop;
3994 
3995 	n = ext4_block_to_path(inode, last_block, offsets, NULL);
3996 	if (n == 0)
3997 		goto out_stop;	/* error */
3998 
3999 	/*
4000 	 * OK.  This truncate is going to happen.  We add the inode to the
4001 	 * orphan list, so that if this truncate spans multiple transactions,
4002 	 * and we crash, we will resume the truncate when the filesystem
4003 	 * recovers.  It also marks the inode dirty, to catch the new size.
4004 	 *
4005 	 * Implication: the file must always be in a sane, consistent
4006 	 * truncatable state while each transaction commits.
4007 	 */
4008 	if (ext4_orphan_add(handle, inode))
4009 		goto out_stop;
4010 
4011 	/*
4012 	 * From here we block out all ext4_get_block() callers who want to
4013 	 * modify the block allocation tree.
4014 	 */
4015 	down_write(&ei->i_data_sem);
4016 
4017 	ext4_discard_preallocations(inode);
4018 
4019 	/*
4020 	 * The orphan list entry will now protect us from any crash which
4021 	 * occurs before the truncate completes, so it is now safe to propagate
4022 	 * the new, shorter inode size (held for now in i_size) into the
4023 	 * on-disk inode. We do this via i_disksize, which is the value which
4024 	 * ext4 *really* writes onto the disk inode.
4025 	 */
4026 	ei->i_disksize = inode->i_size;
4027 
4028 	if (n == 1) {		/* direct blocks */
4029 		ext4_free_data(handle, inode, NULL, i_data+offsets[0],
4030 			       i_data + EXT4_NDIR_BLOCKS);
4031 		goto do_indirects;
4032 	}
4033 
4034 	partial = ext4_find_shared(inode, n, offsets, chain, &nr);
4035 	/* Kill the top of shared branch (not detached) */
4036 	if (nr) {
4037 		if (partial == chain) {
4038 			/* Shared branch grows from the inode */
4039 			ext4_free_branches(handle, inode, NULL,
4040 					   &nr, &nr+1, (chain+n-1) - partial);
4041 			*partial->p = 0;
4042 			/*
4043 			 * We mark the inode dirty prior to restart,
4044 			 * and prior to stop.  No need for it here.
4045 			 */
4046 		} else {
4047 			/* Shared branch grows from an indirect block */
4048 			BUFFER_TRACE(partial->bh, "get_write_access");
4049 			ext4_free_branches(handle, inode, partial->bh,
4050 					partial->p,
4051 					partial->p+1, (chain+n-1) - partial);
4052 		}
4053 	}
4054 	/* Clear the ends of indirect blocks on the shared branch */
4055 	while (partial > chain) {
4056 		ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
4057 				   (__le32*)partial->bh->b_data+addr_per_block,
4058 				   (chain+n-1) - partial);
4059 		BUFFER_TRACE(partial->bh, "call brelse");
4060 		brelse (partial->bh);
4061 		partial--;
4062 	}
4063 do_indirects:
4064 	/* Kill the remaining (whole) subtrees */
4065 	switch (offsets[0]) {
4066 	default:
4067 		nr = i_data[EXT4_IND_BLOCK];
4068 		if (nr) {
4069 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
4070 			i_data[EXT4_IND_BLOCK] = 0;
4071 		}
4072 	case EXT4_IND_BLOCK:
4073 		nr = i_data[EXT4_DIND_BLOCK];
4074 		if (nr) {
4075 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
4076 			i_data[EXT4_DIND_BLOCK] = 0;
4077 		}
4078 	case EXT4_DIND_BLOCK:
4079 		nr = i_data[EXT4_TIND_BLOCK];
4080 		if (nr) {
4081 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
4082 			i_data[EXT4_TIND_BLOCK] = 0;
4083 		}
4084 	case EXT4_TIND_BLOCK:
4085 		;
4086 	}
4087 
4088 	up_write(&ei->i_data_sem);
4089 	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4090 	ext4_mark_inode_dirty(handle, inode);
4091 
4092 	/*
4093 	 * In a multi-transaction truncate, we only make the final transaction
4094 	 * synchronous
4095 	 */
4096 	if (IS_SYNC(inode))
4097 		ext4_handle_sync(handle);
4098 out_stop:
4099 	/*
4100 	 * If this was a simple ftruncate(), and the file will remain alive
4101 	 * then we need to clear up the orphan record which we created above.
4102 	 * However, if this was a real unlink then we were called by
4103 	 * ext4_delete_inode(), and we allow that function to clean up the
4104 	 * orphan info for us.
4105 	 */
4106 	if (inode->i_nlink)
4107 		ext4_orphan_del(handle, inode);
4108 
4109 	ext4_journal_stop(handle);
4110 }
4111 
4112 /*
4113  * ext4_get_inode_loc returns with an extra refcount against the inode's
4114  * underlying buffer_head on success. If 'in_mem' is true, we have all
4115  * data in memory that is needed to recreate the on-disk version of this
4116  * inode.
4117  */
4118 static int __ext4_get_inode_loc(struct inode *inode,
4119 				struct ext4_iloc *iloc, int in_mem)
4120 {
4121 	struct ext4_group_desc	*gdp;
4122 	struct buffer_head	*bh;
4123 	struct super_block	*sb = inode->i_sb;
4124 	ext4_fsblk_t		block;
4125 	int			inodes_per_block, inode_offset;
4126 
4127 	iloc->bh = NULL;
4128 	if (!ext4_valid_inum(sb, inode->i_ino))
4129 		return -EIO;
4130 
4131 	iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
4132 	gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
4133 	if (!gdp)
4134 		return -EIO;
4135 
4136 	/*
4137 	 * Figure out the offset within the block group inode table
4138 	 */
4139 	inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb));
4140 	inode_offset = ((inode->i_ino - 1) %
4141 			EXT4_INODES_PER_GROUP(sb));
4142 	block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
4143 	iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
4144 
4145 	bh = sb_getblk(sb, block);
4146 	if (!bh) {
4147 		ext4_error(sb, "ext4_get_inode_loc", "unable to read "
4148 			   "inode block - inode=%lu, block=%llu",
4149 			   inode->i_ino, block);
4150 		return -EIO;
4151 	}
4152 	if (!buffer_uptodate(bh)) {
4153 		lock_buffer(bh);
4154 
4155 		/*
4156 		 * If the buffer has the write error flag, we have failed
4157 		 * to write out another inode in the same block.  In this
4158 		 * case, we don't have to read the block because we may
4159 		 * read the old inode data successfully.
4160 		 */
4161 		if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
4162 			set_buffer_uptodate(bh);
4163 
4164 		if (buffer_uptodate(bh)) {
4165 			/* someone brought it uptodate while we waited */
4166 			unlock_buffer(bh);
4167 			goto has_buffer;
4168 		}
4169 
4170 		/*
4171 		 * If we have all information of the inode in memory and this
4172 		 * is the only valid inode in the block, we need not read the
4173 		 * block.
4174 		 */
4175 		if (in_mem) {
4176 			struct buffer_head *bitmap_bh;
4177 			int i, start;
4178 
4179 			start = inode_offset & ~(inodes_per_block - 1);
4180 
4181 			/* Is the inode bitmap in cache? */
4182 			bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
4183 			if (!bitmap_bh)
4184 				goto make_io;
4185 
4186 			/*
4187 			 * If the inode bitmap isn't in cache then the
4188 			 * optimisation may end up performing two reads instead
4189 			 * of one, so skip it.
4190 			 */
4191 			if (!buffer_uptodate(bitmap_bh)) {
4192 				brelse(bitmap_bh);
4193 				goto make_io;
4194 			}
4195 			for (i = start; i < start + inodes_per_block; i++) {
4196 				if (i == inode_offset)
4197 					continue;
4198 				if (ext4_test_bit(i, bitmap_bh->b_data))
4199 					break;
4200 			}
4201 			brelse(bitmap_bh);
4202 			if (i == start + inodes_per_block) {
4203 				/* all other inodes are free, so skip I/O */
4204 				memset(bh->b_data, 0, bh->b_size);
4205 				set_buffer_uptodate(bh);
4206 				unlock_buffer(bh);
4207 				goto has_buffer;
4208 			}
4209 		}
4210 
4211 make_io:
4212 		/*
4213 		 * If we need to do any I/O, try to pre-readahead extra
4214 		 * blocks from the inode table.
4215 		 */
4216 		if (EXT4_SB(sb)->s_inode_readahead_blks) {
4217 			ext4_fsblk_t b, end, table;
4218 			unsigned num;
4219 
4220 			table = ext4_inode_table(sb, gdp);
4221 			/* s_inode_readahead_blks is always a power of 2 */
4222 			b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
4223 			if (table > b)
4224 				b = table;
4225 			end = b + EXT4_SB(sb)->s_inode_readahead_blks;
4226 			num = EXT4_INODES_PER_GROUP(sb);
4227 			if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
4228 				       EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
4229 				num -= ext4_itable_unused_count(sb, gdp);
4230 			table += num / inodes_per_block;
4231 			if (end > table)
4232 				end = table;
4233 			while (b <= end)
4234 				sb_breadahead(sb, b++);
4235 		}
4236 
4237 		/*
4238 		 * There are other valid inodes in the buffer, this inode
4239 		 * has in-inode xattrs, or we don't have this inode in memory.
4240 		 * Read the block from disk.
4241 		 */
4242 		get_bh(bh);
4243 		bh->b_end_io = end_buffer_read_sync;
4244 		submit_bh(READ_META, bh);
4245 		wait_on_buffer(bh);
4246 		if (!buffer_uptodate(bh)) {
4247 			ext4_error(sb, __func__,
4248 				   "unable to read inode block - inode=%lu, "
4249 				   "block=%llu", inode->i_ino, block);
4250 			brelse(bh);
4251 			return -EIO;
4252 		}
4253 	}
4254 has_buffer:
4255 	iloc->bh = bh;
4256 	return 0;
4257 }
4258 
4259 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4260 {
4261 	/* We have all inode data except xattrs in memory here. */
4262 	return __ext4_get_inode_loc(inode, iloc,
4263 		!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
4264 }
4265 
4266 void ext4_set_inode_flags(struct inode *inode)
4267 {
4268 	unsigned int flags = EXT4_I(inode)->i_flags;
4269 
4270 	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
4271 	if (flags & EXT4_SYNC_FL)
4272 		inode->i_flags |= S_SYNC;
4273 	if (flags & EXT4_APPEND_FL)
4274 		inode->i_flags |= S_APPEND;
4275 	if (flags & EXT4_IMMUTABLE_FL)
4276 		inode->i_flags |= S_IMMUTABLE;
4277 	if (flags & EXT4_NOATIME_FL)
4278 		inode->i_flags |= S_NOATIME;
4279 	if (flags & EXT4_DIRSYNC_FL)
4280 		inode->i_flags |= S_DIRSYNC;
4281 }
4282 
4283 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
4284 void ext4_get_inode_flags(struct ext4_inode_info *ei)
4285 {
4286 	unsigned int flags = ei->vfs_inode.i_flags;
4287 
4288 	ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
4289 			EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
4290 	if (flags & S_SYNC)
4291 		ei->i_flags |= EXT4_SYNC_FL;
4292 	if (flags & S_APPEND)
4293 		ei->i_flags |= EXT4_APPEND_FL;
4294 	if (flags & S_IMMUTABLE)
4295 		ei->i_flags |= EXT4_IMMUTABLE_FL;
4296 	if (flags & S_NOATIME)
4297 		ei->i_flags |= EXT4_NOATIME_FL;
4298 	if (flags & S_DIRSYNC)
4299 		ei->i_flags |= EXT4_DIRSYNC_FL;
4300 }
4301 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
4302 					struct ext4_inode_info *ei)
4303 {
4304 	blkcnt_t i_blocks ;
4305 	struct inode *inode = &(ei->vfs_inode);
4306 	struct super_block *sb = inode->i_sb;
4307 
4308 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
4309 				EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
4310 		/* we are using combined 48 bit field */
4311 		i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
4312 					le32_to_cpu(raw_inode->i_blocks_lo);
4313 		if (ei->i_flags & EXT4_HUGE_FILE_FL) {
4314 			/* i_blocks represent file system block size */
4315 			return i_blocks  << (inode->i_blkbits - 9);
4316 		} else {
4317 			return i_blocks;
4318 		}
4319 	} else {
4320 		return le32_to_cpu(raw_inode->i_blocks_lo);
4321 	}
4322 }
4323 
4324 struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4325 {
4326 	struct ext4_iloc iloc;
4327 	struct ext4_inode *raw_inode;
4328 	struct ext4_inode_info *ei;
4329 	struct buffer_head *bh;
4330 	struct inode *inode;
4331 	long ret;
4332 	int block;
4333 
4334 	inode = iget_locked(sb, ino);
4335 	if (!inode)
4336 		return ERR_PTR(-ENOMEM);
4337 	if (!(inode->i_state & I_NEW))
4338 		return inode;
4339 
4340 	ei = EXT4_I(inode);
4341 #ifdef CONFIG_EXT4_FS_POSIX_ACL
4342 	ei->i_acl = EXT4_ACL_NOT_CACHED;
4343 	ei->i_default_acl = EXT4_ACL_NOT_CACHED;
4344 #endif
4345 
4346 	ret = __ext4_get_inode_loc(inode, &iloc, 0);
4347 	if (ret < 0)
4348 		goto bad_inode;
4349 	bh = iloc.bh;
4350 	raw_inode = ext4_raw_inode(&iloc);
4351 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4352 	inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4353 	inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4354 	if (!(test_opt(inode->i_sb, NO_UID32))) {
4355 		inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4356 		inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4357 	}
4358 	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
4359 
4360 	ei->i_state = 0;
4361 	ei->i_dir_start_lookup = 0;
4362 	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4363 	/* We now have enough fields to check if the inode was active or not.
4364 	 * This is needed because nfsd might try to access dead inodes
4365 	 * the test is that same one that e2fsck uses
4366 	 * NeilBrown 1999oct15
4367 	 */
4368 	if (inode->i_nlink == 0) {
4369 		if (inode->i_mode == 0 ||
4370 		    !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
4371 			/* this inode is deleted */
4372 			brelse(bh);
4373 			ret = -ESTALE;
4374 			goto bad_inode;
4375 		}
4376 		/* The only unlinked inodes we let through here have
4377 		 * valid i_mode and are being read by the orphan
4378 		 * recovery code: that's fine, we're about to complete
4379 		 * the process of deleting those. */
4380 	}
4381 	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
4382 	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4383 	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4384 	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
4385 		ei->i_file_acl |=
4386 			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4387 	inode->i_size = ext4_isize(raw_inode);
4388 	ei->i_disksize = inode->i_size;
4389 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4390 	ei->i_block_group = iloc.block_group;
4391 	ei->i_last_alloc_group = ~0;
4392 	/*
4393 	 * NOTE! The in-memory inode i_data array is in little-endian order
4394 	 * even on big-endian machines: we do NOT byteswap the block numbers!
4395 	 */
4396 	for (block = 0; block < EXT4_N_BLOCKS; block++)
4397 		ei->i_data[block] = raw_inode->i_block[block];
4398 	INIT_LIST_HEAD(&ei->i_orphan);
4399 
4400 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4401 		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4402 		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4403 		    EXT4_INODE_SIZE(inode->i_sb)) {
4404 			brelse(bh);
4405 			ret = -EIO;
4406 			goto bad_inode;
4407 		}
4408 		if (ei->i_extra_isize == 0) {
4409 			/* The extra space is currently unused. Use it. */
4410 			ei->i_extra_isize = sizeof(struct ext4_inode) -
4411 					    EXT4_GOOD_OLD_INODE_SIZE;
4412 		} else {
4413 			__le32 *magic = (void *)raw_inode +
4414 					EXT4_GOOD_OLD_INODE_SIZE +
4415 					ei->i_extra_isize;
4416 			if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
4417 				 ei->i_state |= EXT4_STATE_XATTR;
4418 		}
4419 	} else
4420 		ei->i_extra_isize = 0;
4421 
4422 	EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
4423 	EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
4424 	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
4425 	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4426 
4427 	inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
4428 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4429 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4430 			inode->i_version |=
4431 			(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
4432 	}
4433 
4434 	ret = 0;
4435 	if (ei->i_file_acl &&
4436 	    ((ei->i_file_acl <
4437 	      (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) +
4438 	       EXT4_SB(sb)->s_gdb_count)) ||
4439 	     (ei->i_file_acl >= ext4_blocks_count(EXT4_SB(sb)->s_es)))) {
4440 		ext4_error(sb, __func__,
4441 			   "bad extended attribute block %llu in inode #%lu",
4442 			   ei->i_file_acl, inode->i_ino);
4443 		ret = -EIO;
4444 		goto bad_inode;
4445 	} else if (ei->i_flags & EXT4_EXTENTS_FL) {
4446 		if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4447 		    (S_ISLNK(inode->i_mode) &&
4448 		     !ext4_inode_is_fast_symlink(inode)))
4449 			/* Validate extent which is part of inode */
4450 			ret = ext4_ext_check_inode(inode);
4451  	} else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4452 		   (S_ISLNK(inode->i_mode) &&
4453 		    !ext4_inode_is_fast_symlink(inode))) {
4454 	 	/* Validate block references which are part of inode */
4455 		ret = ext4_check_inode_blockref(inode);
4456 	}
4457 	if (ret) {
4458  		brelse(bh);
4459  		goto bad_inode;
4460 	}
4461 
4462 	if (S_ISREG(inode->i_mode)) {
4463 		inode->i_op = &ext4_file_inode_operations;
4464 		inode->i_fop = &ext4_file_operations;
4465 		ext4_set_aops(inode);
4466 	} else if (S_ISDIR(inode->i_mode)) {
4467 		inode->i_op = &ext4_dir_inode_operations;
4468 		inode->i_fop = &ext4_dir_operations;
4469 	} else if (S_ISLNK(inode->i_mode)) {
4470 		if (ext4_inode_is_fast_symlink(inode)) {
4471 			inode->i_op = &ext4_fast_symlink_inode_operations;
4472 			nd_terminate_link(ei->i_data, inode->i_size,
4473 				sizeof(ei->i_data) - 1);
4474 		} else {
4475 			inode->i_op = &ext4_symlink_inode_operations;
4476 			ext4_set_aops(inode);
4477 		}
4478 	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
4479 	      S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
4480 		inode->i_op = &ext4_special_inode_operations;
4481 		if (raw_inode->i_block[0])
4482 			init_special_inode(inode, inode->i_mode,
4483 			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
4484 		else
4485 			init_special_inode(inode, inode->i_mode,
4486 			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
4487 	} else {
4488 		brelse(bh);
4489 		ret = -EIO;
4490 		ext4_error(inode->i_sb, __func__,
4491 			   "bogus i_mode (%o) for inode=%lu",
4492 			   inode->i_mode, inode->i_ino);
4493 		goto bad_inode;
4494 	}
4495 	brelse(iloc.bh);
4496 	ext4_set_inode_flags(inode);
4497 	unlock_new_inode(inode);
4498 	return inode;
4499 
4500 bad_inode:
4501 	iget_failed(inode);
4502 	return ERR_PTR(ret);
4503 }
4504 
4505 static int ext4_inode_blocks_set(handle_t *handle,
4506 				struct ext4_inode *raw_inode,
4507 				struct ext4_inode_info *ei)
4508 {
4509 	struct inode *inode = &(ei->vfs_inode);
4510 	u64 i_blocks = inode->i_blocks;
4511 	struct super_block *sb = inode->i_sb;
4512 
4513 	if (i_blocks <= ~0U) {
4514 		/*
4515 		 * i_blocks can be represnted in a 32 bit variable
4516 		 * as multiple of 512 bytes
4517 		 */
4518 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
4519 		raw_inode->i_blocks_high = 0;
4520 		ei->i_flags &= ~EXT4_HUGE_FILE_FL;
4521 		return 0;
4522 	}
4523 	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
4524 		return -EFBIG;
4525 
4526 	if (i_blocks <= 0xffffffffffffULL) {
4527 		/*
4528 		 * i_blocks can be represented in a 48 bit variable
4529 		 * as multiple of 512 bytes
4530 		 */
4531 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
4532 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4533 		ei->i_flags &= ~EXT4_HUGE_FILE_FL;
4534 	} else {
4535 		ei->i_flags |= EXT4_HUGE_FILE_FL;
4536 		/* i_block is stored in file system block size */
4537 		i_blocks = i_blocks >> (inode->i_blkbits - 9);
4538 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
4539 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4540 	}
4541 	return 0;
4542 }
4543 
4544 /*
4545  * Post the struct inode info into an on-disk inode location in the
4546  * buffer-cache.  This gobbles the caller's reference to the
4547  * buffer_head in the inode location struct.
4548  *
4549  * The caller must have write access to iloc->bh.
4550  */
4551 static int ext4_do_update_inode(handle_t *handle,
4552 				struct inode *inode,
4553 				struct ext4_iloc *iloc)
4554 {
4555 	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
4556 	struct ext4_inode_info *ei = EXT4_I(inode);
4557 	struct buffer_head *bh = iloc->bh;
4558 	int err = 0, rc, block;
4559 
4560 	/* For fields not not tracking in the in-memory inode,
4561 	 * initialise them to zero for new inodes. */
4562 	if (ei->i_state & EXT4_STATE_NEW)
4563 		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
4564 
4565 	ext4_get_inode_flags(ei);
4566 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4567 	if (!(test_opt(inode->i_sb, NO_UID32))) {
4568 		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
4569 		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
4570 /*
4571  * Fix up interoperability with old kernels. Otherwise, old inodes get
4572  * re-used with the upper 16 bits of the uid/gid intact
4573  */
4574 		if (!ei->i_dtime) {
4575 			raw_inode->i_uid_high =
4576 				cpu_to_le16(high_16_bits(inode->i_uid));
4577 			raw_inode->i_gid_high =
4578 				cpu_to_le16(high_16_bits(inode->i_gid));
4579 		} else {
4580 			raw_inode->i_uid_high = 0;
4581 			raw_inode->i_gid_high = 0;
4582 		}
4583 	} else {
4584 		raw_inode->i_uid_low =
4585 			cpu_to_le16(fs_high2lowuid(inode->i_uid));
4586 		raw_inode->i_gid_low =
4587 			cpu_to_le16(fs_high2lowgid(inode->i_gid));
4588 		raw_inode->i_uid_high = 0;
4589 		raw_inode->i_gid_high = 0;
4590 	}
4591 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4592 
4593 	EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4594 	EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4595 	EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4596 	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4597 
4598 	if (ext4_inode_blocks_set(handle, raw_inode, ei))
4599 		goto out_brelse;
4600 	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4601 	/* clear the migrate flag in the raw_inode */
4602 	raw_inode->i_flags = cpu_to_le32(ei->i_flags & ~EXT4_EXT_MIGRATE);
4603 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
4604 	    cpu_to_le32(EXT4_OS_HURD))
4605 		raw_inode->i_file_acl_high =
4606 			cpu_to_le16(ei->i_file_acl >> 32);
4607 	raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4608 	ext4_isize_set(raw_inode, ei->i_disksize);
4609 	if (ei->i_disksize > 0x7fffffffULL) {
4610 		struct super_block *sb = inode->i_sb;
4611 		if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
4612 				EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
4613 				EXT4_SB(sb)->s_es->s_rev_level ==
4614 				cpu_to_le32(EXT4_GOOD_OLD_REV)) {
4615 			/* If this is the first large file
4616 			 * created, add a flag to the superblock.
4617 			 */
4618 			err = ext4_journal_get_write_access(handle,
4619 					EXT4_SB(sb)->s_sbh);
4620 			if (err)
4621 				goto out_brelse;
4622 			ext4_update_dynamic_rev(sb);
4623 			EXT4_SET_RO_COMPAT_FEATURE(sb,
4624 					EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
4625 			sb->s_dirt = 1;
4626 			ext4_handle_sync(handle);
4627 			err = ext4_handle_dirty_metadata(handle, inode,
4628 					EXT4_SB(sb)->s_sbh);
4629 		}
4630 	}
4631 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4632 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4633 		if (old_valid_dev(inode->i_rdev)) {
4634 			raw_inode->i_block[0] =
4635 				cpu_to_le32(old_encode_dev(inode->i_rdev));
4636 			raw_inode->i_block[1] = 0;
4637 		} else {
4638 			raw_inode->i_block[0] = 0;
4639 			raw_inode->i_block[1] =
4640 				cpu_to_le32(new_encode_dev(inode->i_rdev));
4641 			raw_inode->i_block[2] = 0;
4642 		}
4643 	} else for (block = 0; block < EXT4_N_BLOCKS; block++)
4644 		raw_inode->i_block[block] = ei->i_data[block];
4645 
4646 	raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
4647 	if (ei->i_extra_isize) {
4648 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4649 			raw_inode->i_version_hi =
4650 			cpu_to_le32(inode->i_version >> 32);
4651 		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
4652 	}
4653 
4654 	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4655 	rc = ext4_handle_dirty_metadata(handle, inode, bh);
4656 	if (!err)
4657 		err = rc;
4658 	ei->i_state &= ~EXT4_STATE_NEW;
4659 
4660 out_brelse:
4661 	brelse(bh);
4662 	ext4_std_error(inode->i_sb, err);
4663 	return err;
4664 }
4665 
4666 /*
4667  * ext4_write_inode()
4668  *
4669  * We are called from a few places:
4670  *
4671  * - Within generic_file_write() for O_SYNC files.
4672  *   Here, there will be no transaction running. We wait for any running
4673  *   trasnaction to commit.
4674  *
4675  * - Within sys_sync(), kupdate and such.
4676  *   We wait on commit, if tol to.
4677  *
4678  * - Within prune_icache() (PF_MEMALLOC == true)
4679  *   Here we simply return.  We can't afford to block kswapd on the
4680  *   journal commit.
4681  *
4682  * In all cases it is actually safe for us to return without doing anything,
4683  * because the inode has been copied into a raw inode buffer in
4684  * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
4685  * knfsd.
4686  *
4687  * Note that we are absolutely dependent upon all inode dirtiers doing the
4688  * right thing: they *must* call mark_inode_dirty() after dirtying info in
4689  * which we are interested.
4690  *
4691  * It would be a bug for them to not do this.  The code:
4692  *
4693  *	mark_inode_dirty(inode)
4694  *	stuff();
4695  *	inode->i_size = expr;
4696  *
4697  * is in error because a kswapd-driven write_inode() could occur while
4698  * `stuff()' is running, and the new i_size will be lost.  Plus the inode
4699  * will no longer be on the superblock's dirty inode list.
4700  */
4701 int ext4_write_inode(struct inode *inode, int wait)
4702 {
4703 	if (current->flags & PF_MEMALLOC)
4704 		return 0;
4705 
4706 	if (ext4_journal_current_handle()) {
4707 		jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
4708 		dump_stack();
4709 		return -EIO;
4710 	}
4711 
4712 	if (!wait)
4713 		return 0;
4714 
4715 	return ext4_force_commit(inode->i_sb);
4716 }
4717 
4718 int __ext4_write_dirty_metadata(struct inode *inode, struct buffer_head *bh)
4719 {
4720 	int err = 0;
4721 
4722 	mark_buffer_dirty(bh);
4723 	if (inode && inode_needs_sync(inode)) {
4724 		sync_dirty_buffer(bh);
4725 		if (buffer_req(bh) && !buffer_uptodate(bh)) {
4726 			ext4_error(inode->i_sb, __func__,
4727 				   "IO error syncing inode, "
4728 				   "inode=%lu, block=%llu",
4729 				   inode->i_ino,
4730 				   (unsigned long long)bh->b_blocknr);
4731 			err = -EIO;
4732 		}
4733 	}
4734 	return err;
4735 }
4736 
4737 /*
4738  * ext4_setattr()
4739  *
4740  * Called from notify_change.
4741  *
4742  * We want to trap VFS attempts to truncate the file as soon as
4743  * possible.  In particular, we want to make sure that when the VFS
4744  * shrinks i_size, we put the inode on the orphan list and modify
4745  * i_disksize immediately, so that during the subsequent flushing of
4746  * dirty pages and freeing of disk blocks, we can guarantee that any
4747  * commit will leave the blocks being flushed in an unused state on
4748  * disk.  (On recovery, the inode will get truncated and the blocks will
4749  * be freed, so we have a strong guarantee that no future commit will
4750  * leave these blocks visible to the user.)
4751  *
4752  * Another thing we have to assure is that if we are in ordered mode
4753  * and inode is still attached to the committing transaction, we must
4754  * we start writeout of all the dirty pages which are being truncated.
4755  * This way we are sure that all the data written in the previous
4756  * transaction are already on disk (truncate waits for pages under
4757  * writeback).
4758  *
4759  * Called with inode->i_mutex down.
4760  */
4761 int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4762 {
4763 	struct inode *inode = dentry->d_inode;
4764 	int error, rc = 0;
4765 	const unsigned int ia_valid = attr->ia_valid;
4766 
4767 	error = inode_change_ok(inode, attr);
4768 	if (error)
4769 		return error;
4770 
4771 	if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
4772 		(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
4773 		handle_t *handle;
4774 
4775 		/* (user+group)*(old+new) structure, inode write (sb,
4776 		 * inode block, ? - but truncate inode update has it) */
4777 		handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+
4778 					EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
4779 		if (IS_ERR(handle)) {
4780 			error = PTR_ERR(handle);
4781 			goto err_out;
4782 		}
4783 		error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
4784 		if (error) {
4785 			ext4_journal_stop(handle);
4786 			return error;
4787 		}
4788 		/* Update corresponding info in inode so that everything is in
4789 		 * one transaction */
4790 		if (attr->ia_valid & ATTR_UID)
4791 			inode->i_uid = attr->ia_uid;
4792 		if (attr->ia_valid & ATTR_GID)
4793 			inode->i_gid = attr->ia_gid;
4794 		error = ext4_mark_inode_dirty(handle, inode);
4795 		ext4_journal_stop(handle);
4796 	}
4797 
4798 	if (attr->ia_valid & ATTR_SIZE) {
4799 		if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
4800 			struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4801 
4802 			if (attr->ia_size > sbi->s_bitmap_maxbytes) {
4803 				error = -EFBIG;
4804 				goto err_out;
4805 			}
4806 		}
4807 	}
4808 
4809 	if (S_ISREG(inode->i_mode) &&
4810 	    attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
4811 		handle_t *handle;
4812 
4813 		handle = ext4_journal_start(inode, 3);
4814 		if (IS_ERR(handle)) {
4815 			error = PTR_ERR(handle);
4816 			goto err_out;
4817 		}
4818 
4819 		error = ext4_orphan_add(handle, inode);
4820 		EXT4_I(inode)->i_disksize = attr->ia_size;
4821 		rc = ext4_mark_inode_dirty(handle, inode);
4822 		if (!error)
4823 			error = rc;
4824 		ext4_journal_stop(handle);
4825 
4826 		if (ext4_should_order_data(inode)) {
4827 			error = ext4_begin_ordered_truncate(inode,
4828 							    attr->ia_size);
4829 			if (error) {
4830 				/* Do as much error cleanup as possible */
4831 				handle = ext4_journal_start(inode, 3);
4832 				if (IS_ERR(handle)) {
4833 					ext4_orphan_del(NULL, inode);
4834 					goto err_out;
4835 				}
4836 				ext4_orphan_del(handle, inode);
4837 				ext4_journal_stop(handle);
4838 				goto err_out;
4839 			}
4840 		}
4841 	}
4842 
4843 	rc = inode_setattr(inode, attr);
4844 
4845 	/* If inode_setattr's call to ext4_truncate failed to get a
4846 	 * transaction handle at all, we need to clean up the in-core
4847 	 * orphan list manually. */
4848 	if (inode->i_nlink)
4849 		ext4_orphan_del(NULL, inode);
4850 
4851 	if (!rc && (ia_valid & ATTR_MODE))
4852 		rc = ext4_acl_chmod(inode);
4853 
4854 err_out:
4855 	ext4_std_error(inode->i_sb, error);
4856 	if (!error)
4857 		error = rc;
4858 	return error;
4859 }
4860 
4861 int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
4862 		 struct kstat *stat)
4863 {
4864 	struct inode *inode;
4865 	unsigned long delalloc_blocks;
4866 
4867 	inode = dentry->d_inode;
4868 	generic_fillattr(inode, stat);
4869 
4870 	/*
4871 	 * We can't update i_blocks if the block allocation is delayed
4872 	 * otherwise in the case of system crash before the real block
4873 	 * allocation is done, we will have i_blocks inconsistent with
4874 	 * on-disk file blocks.
4875 	 * We always keep i_blocks updated together with real
4876 	 * allocation. But to not confuse with user, stat
4877 	 * will return the blocks that include the delayed allocation
4878 	 * blocks for this file.
4879 	 */
4880 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
4881 	delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
4882 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
4883 
4884 	stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
4885 	return 0;
4886 }
4887 
4888 static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
4889 				      int chunk)
4890 {
4891 	int indirects;
4892 
4893 	/* if nrblocks are contiguous */
4894 	if (chunk) {
4895 		/*
4896 		 * With N contiguous data blocks, it need at most
4897 		 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks
4898 		 * 2 dindirect blocks
4899 		 * 1 tindirect block
4900 		 */
4901 		indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb);
4902 		return indirects + 3;
4903 	}
4904 	/*
4905 	 * if nrblocks are not contiguous, worse case, each block touch
4906 	 * a indirect block, and each indirect block touch a double indirect
4907 	 * block, plus a triple indirect block
4908 	 */
4909 	indirects = nrblocks * 2 + 1;
4910 	return indirects;
4911 }
4912 
4913 static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4914 {
4915 	if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
4916 		return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
4917 	return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
4918 }
4919 
4920 /*
4921  * Account for index blocks, block groups bitmaps and block group
4922  * descriptor blocks if modify datablocks and index blocks
4923  * worse case, the indexs blocks spread over different block groups
4924  *
4925  * If datablocks are discontiguous, they are possible to spread over
4926  * different block groups too. If they are contiugous, with flexbg,
4927  * they could still across block group boundary.
4928  *
4929  * Also account for superblock, inode, quota and xattr blocks
4930  */
4931 int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4932 {
4933 	int groups, gdpblocks;
4934 	int idxblocks;
4935 	int ret = 0;
4936 
4937 	/*
4938 	 * How many index blocks need to touch to modify nrblocks?
4939 	 * The "Chunk" flag indicating whether the nrblocks is
4940 	 * physically contiguous on disk
4941 	 *
4942 	 * For Direct IO and fallocate, they calls get_block to allocate
4943 	 * one single extent at a time, so they could set the "Chunk" flag
4944 	 */
4945 	idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
4946 
4947 	ret = idxblocks;
4948 
4949 	/*
4950 	 * Now let's see how many group bitmaps and group descriptors need
4951 	 * to account
4952 	 */
4953 	groups = idxblocks;
4954 	if (chunk)
4955 		groups += 1;
4956 	else
4957 		groups += nrblocks;
4958 
4959 	gdpblocks = groups;
4960 	if (groups > EXT4_SB(inode->i_sb)->s_groups_count)
4961 		groups = EXT4_SB(inode->i_sb)->s_groups_count;
4962 	if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
4963 		gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
4964 
4965 	/* bitmaps and block group descriptor blocks */
4966 	ret += groups + gdpblocks;
4967 
4968 	/* Blocks for super block, inode, quota and xattr blocks */
4969 	ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
4970 
4971 	return ret;
4972 }
4973 
4974 /*
4975  * Calulate the total number of credits to reserve to fit
4976  * the modification of a single pages into a single transaction,
4977  * which may include multiple chunks of block allocations.
4978  *
4979  * This could be called via ext4_write_begin()
4980  *
4981  * We need to consider the worse case, when
4982  * one new block per extent.
4983  */
4984 int ext4_writepage_trans_blocks(struct inode *inode)
4985 {
4986 	int bpp = ext4_journal_blocks_per_page(inode);
4987 	int ret;
4988 
4989 	ret = ext4_meta_trans_blocks(inode, bpp, 0);
4990 
4991 	/* Account for data blocks for journalled mode */
4992 	if (ext4_should_journal_data(inode))
4993 		ret += bpp;
4994 	return ret;
4995 }
4996 
4997 /*
4998  * Calculate the journal credits for a chunk of data modification.
4999  *
5000  * This is called from DIO, fallocate or whoever calling
5001  * ext4_get_blocks_wrap() to map/allocate a chunk of contigous disk blocks.
5002  *
5003  * journal buffers for data blocks are not included here, as DIO
5004  * and fallocate do no need to journal data buffers.
5005  */
5006 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5007 {
5008 	return ext4_meta_trans_blocks(inode, nrblocks, 1);
5009 }
5010 
5011 /*
5012  * The caller must have previously called ext4_reserve_inode_write().
5013  * Give this, we know that the caller already has write access to iloc->bh.
5014  */
5015 int ext4_mark_iloc_dirty(handle_t *handle,
5016 		struct inode *inode, struct ext4_iloc *iloc)
5017 {
5018 	int err = 0;
5019 
5020 	if (test_opt(inode->i_sb, I_VERSION))
5021 		inode_inc_iversion(inode);
5022 
5023 	/* the do_update_inode consumes one bh->b_count */
5024 	get_bh(iloc->bh);
5025 
5026 	/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
5027 	err = ext4_do_update_inode(handle, inode, iloc);
5028 	put_bh(iloc->bh);
5029 	return err;
5030 }
5031 
5032 /*
5033  * On success, We end up with an outstanding reference count against
5034  * iloc->bh.  This _must_ be cleaned up later.
5035  */
5036 
5037 int
5038 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5039 			 struct ext4_iloc *iloc)
5040 {
5041 	int err;
5042 
5043 	err = ext4_get_inode_loc(inode, iloc);
5044 	if (!err) {
5045 		BUFFER_TRACE(iloc->bh, "get_write_access");
5046 		err = ext4_journal_get_write_access(handle, iloc->bh);
5047 		if (err) {
5048 			brelse(iloc->bh);
5049 			iloc->bh = NULL;
5050 		}
5051 	}
5052 	ext4_std_error(inode->i_sb, err);
5053 	return err;
5054 }
5055 
5056 /*
5057  * Expand an inode by new_extra_isize bytes.
5058  * Returns 0 on success or negative error number on failure.
5059  */
5060 static int ext4_expand_extra_isize(struct inode *inode,
5061 				   unsigned int new_extra_isize,
5062 				   struct ext4_iloc iloc,
5063 				   handle_t *handle)
5064 {
5065 	struct ext4_inode *raw_inode;
5066 	struct ext4_xattr_ibody_header *header;
5067 	struct ext4_xattr_entry *entry;
5068 
5069 	if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
5070 		return 0;
5071 
5072 	raw_inode = ext4_raw_inode(&iloc);
5073 
5074 	header = IHDR(inode, raw_inode);
5075 	entry = IFIRST(header);
5076 
5077 	/* No extended attributes present */
5078 	if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
5079 		header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5080 		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
5081 			new_extra_isize);
5082 		EXT4_I(inode)->i_extra_isize = new_extra_isize;
5083 		return 0;
5084 	}
5085 
5086 	/* try to expand with EAs present */
5087 	return ext4_expand_extra_isize_ea(inode, new_extra_isize,
5088 					  raw_inode, handle);
5089 }
5090 
5091 /*
5092  * What we do here is to mark the in-core inode as clean with respect to inode
5093  * dirtiness (it may still be data-dirty).
5094  * This means that the in-core inode may be reaped by prune_icache
5095  * without having to perform any I/O.  This is a very good thing,
5096  * because *any* task may call prune_icache - even ones which
5097  * have a transaction open against a different journal.
5098  *
5099  * Is this cheating?  Not really.  Sure, we haven't written the
5100  * inode out, but prune_icache isn't a user-visible syncing function.
5101  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
5102  * we start and wait on commits.
5103  *
5104  * Is this efficient/effective?  Well, we're being nice to the system
5105  * by cleaning up our inodes proactively so they can be reaped
5106  * without I/O.  But we are potentially leaving up to five seconds'
5107  * worth of inodes floating about which prune_icache wants us to
5108  * write out.  One way to fix that would be to get prune_icache()
5109  * to do a write_super() to free up some memory.  It has the desired
5110  * effect.
5111  */
5112 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5113 {
5114 	struct ext4_iloc iloc;
5115 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5116 	static unsigned int mnt_count;
5117 	int err, ret;
5118 
5119 	might_sleep();
5120 	err = ext4_reserve_inode_write(handle, inode, &iloc);
5121 	if (ext4_handle_valid(handle) &&
5122 	    EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
5123 	    !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
5124 		/*
5125 		 * We need extra buffer credits since we may write into EA block
5126 		 * with this same handle. If journal_extend fails, then it will
5127 		 * only result in a minor loss of functionality for that inode.
5128 		 * If this is felt to be critical, then e2fsck should be run to
5129 		 * force a large enough s_min_extra_isize.
5130 		 */
5131 		if ((jbd2_journal_extend(handle,
5132 			     EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
5133 			ret = ext4_expand_extra_isize(inode,
5134 						      sbi->s_want_extra_isize,
5135 						      iloc, handle);
5136 			if (ret) {
5137 				EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
5138 				if (mnt_count !=
5139 					le16_to_cpu(sbi->s_es->s_mnt_count)) {
5140 					ext4_warning(inode->i_sb, __func__,
5141 					"Unable to expand inode %lu. Delete"
5142 					" some EAs or run e2fsck.",
5143 					inode->i_ino);
5144 					mnt_count =
5145 					  le16_to_cpu(sbi->s_es->s_mnt_count);
5146 				}
5147 			}
5148 		}
5149 	}
5150 	if (!err)
5151 		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
5152 	return err;
5153 }
5154 
5155 /*
5156  * ext4_dirty_inode() is called from __mark_inode_dirty()
5157  *
5158  * We're really interested in the case where a file is being extended.
5159  * i_size has been changed by generic_commit_write() and we thus need
5160  * to include the updated inode in the current transaction.
5161  *
5162  * Also, vfs_dq_alloc_block() will always dirty the inode when blocks
5163  * are allocated to the file.
5164  *
5165  * If the inode is marked synchronous, we don't honour that here - doing
5166  * so would cause a commit on atime updates, which we don't bother doing.
5167  * We handle synchronous inodes at the highest possible level.
5168  */
5169 void ext4_dirty_inode(struct inode *inode)
5170 {
5171 	handle_t *current_handle = ext4_journal_current_handle();
5172 	handle_t *handle;
5173 
5174 	if (!ext4_handle_valid(current_handle)) {
5175 		ext4_mark_inode_dirty(current_handle, inode);
5176 		return;
5177 	}
5178 
5179 	handle = ext4_journal_start(inode, 2);
5180 	if (IS_ERR(handle))
5181 		goto out;
5182 	if (current_handle &&
5183 		current_handle->h_transaction != handle->h_transaction) {
5184 		/* This task has a transaction open against a different fs */
5185 		printk(KERN_EMERG "%s: transactions do not match!\n",
5186 		       __func__);
5187 	} else {
5188 		jbd_debug(5, "marking dirty.  outer handle=%p\n",
5189 				current_handle);
5190 		ext4_mark_inode_dirty(handle, inode);
5191 	}
5192 	ext4_journal_stop(handle);
5193 out:
5194 	return;
5195 }
5196 
5197 #if 0
5198 /*
5199  * Bind an inode's backing buffer_head into this transaction, to prevent
5200  * it from being flushed to disk early.  Unlike
5201  * ext4_reserve_inode_write, this leaves behind no bh reference and
5202  * returns no iloc structure, so the caller needs to repeat the iloc
5203  * lookup to mark the inode dirty later.
5204  */
5205 static int ext4_pin_inode(handle_t *handle, struct inode *inode)
5206 {
5207 	struct ext4_iloc iloc;
5208 
5209 	int err = 0;
5210 	if (handle) {
5211 		err = ext4_get_inode_loc(inode, &iloc);
5212 		if (!err) {
5213 			BUFFER_TRACE(iloc.bh, "get_write_access");
5214 			err = jbd2_journal_get_write_access(handle, iloc.bh);
5215 			if (!err)
5216 				err = ext4_handle_dirty_metadata(handle,
5217 								 inode,
5218 								 iloc.bh);
5219 			brelse(iloc.bh);
5220 		}
5221 	}
5222 	ext4_std_error(inode->i_sb, err);
5223 	return err;
5224 }
5225 #endif
5226 
5227 int ext4_change_inode_journal_flag(struct inode *inode, int val)
5228 {
5229 	journal_t *journal;
5230 	handle_t *handle;
5231 	int err;
5232 
5233 	/*
5234 	 * We have to be very careful here: changing a data block's
5235 	 * journaling status dynamically is dangerous.  If we write a
5236 	 * data block to the journal, change the status and then delete
5237 	 * that block, we risk forgetting to revoke the old log record
5238 	 * from the journal and so a subsequent replay can corrupt data.
5239 	 * So, first we make sure that the journal is empty and that
5240 	 * nobody is changing anything.
5241 	 */
5242 
5243 	journal = EXT4_JOURNAL(inode);
5244 	if (!journal)
5245 		return 0;
5246 	if (is_journal_aborted(journal))
5247 		return -EROFS;
5248 
5249 	jbd2_journal_lock_updates(journal);
5250 	jbd2_journal_flush(journal);
5251 
5252 	/*
5253 	 * OK, there are no updates running now, and all cached data is
5254 	 * synced to disk.  We are now in a completely consistent state
5255 	 * which doesn't have anything in the journal, and we know that
5256 	 * no filesystem updates are running, so it is safe to modify
5257 	 * the inode's in-core data-journaling state flag now.
5258 	 */
5259 
5260 	if (val)
5261 		EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
5262 	else
5263 		EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
5264 	ext4_set_aops(inode);
5265 
5266 	jbd2_journal_unlock_updates(journal);
5267 
5268 	/* Finally we can mark the inode as dirty. */
5269 
5270 	handle = ext4_journal_start(inode, 1);
5271 	if (IS_ERR(handle))
5272 		return PTR_ERR(handle);
5273 
5274 	err = ext4_mark_inode_dirty(handle, inode);
5275 	ext4_handle_sync(handle);
5276 	ext4_journal_stop(handle);
5277 	ext4_std_error(inode->i_sb, err);
5278 
5279 	return err;
5280 }
5281 
5282 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
5283 {
5284 	return !buffer_mapped(bh);
5285 }
5286 
5287 int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5288 {
5289 	struct page *page = vmf->page;
5290 	loff_t size;
5291 	unsigned long len;
5292 	int ret = -EINVAL;
5293 	void *fsdata;
5294 	struct file *file = vma->vm_file;
5295 	struct inode *inode = file->f_path.dentry->d_inode;
5296 	struct address_space *mapping = inode->i_mapping;
5297 
5298 	/*
5299 	 * Get i_alloc_sem to stop truncates messing with the inode. We cannot
5300 	 * get i_mutex because we are already holding mmap_sem.
5301 	 */
5302 	down_read(&inode->i_alloc_sem);
5303 	size = i_size_read(inode);
5304 	if (page->mapping != mapping || size <= page_offset(page)
5305 	    || !PageUptodate(page)) {
5306 		/* page got truncated from under us? */
5307 		goto out_unlock;
5308 	}
5309 	ret = 0;
5310 	if (PageMappedToDisk(page))
5311 		goto out_unlock;
5312 
5313 	if (page->index == size >> PAGE_CACHE_SHIFT)
5314 		len = size & ~PAGE_CACHE_MASK;
5315 	else
5316 		len = PAGE_CACHE_SIZE;
5317 
5318 	if (page_has_buffers(page)) {
5319 		/* return if we have all the buffers mapped */
5320 		if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
5321 				       ext4_bh_unmapped))
5322 			goto out_unlock;
5323 	}
5324 	/*
5325 	 * OK, we need to fill the hole... Do write_begin write_end
5326 	 * to do block allocation/reservation.We are not holding
5327 	 * inode.i__mutex here. That allow * parallel write_begin,
5328 	 * write_end call. lock_page prevent this from happening
5329 	 * on the same page though
5330 	 */
5331 	ret = mapping->a_ops->write_begin(file, mapping, page_offset(page),
5332 			len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
5333 	if (ret < 0)
5334 		goto out_unlock;
5335 	ret = mapping->a_ops->write_end(file, mapping, page_offset(page),
5336 			len, len, page, fsdata);
5337 	if (ret < 0)
5338 		goto out_unlock;
5339 	ret = 0;
5340 out_unlock:
5341 	if (ret)
5342 		ret = VM_FAULT_SIGBUS;
5343 	up_read(&inode->i_alloc_sem);
5344 	return ret;
5345 }
5346