xref: /openbmc/linux/fs/ext4/inode.c (revision 92ed1a76)
1 /*
2  *  linux/fs/ext4/inode.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/inode.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  Goal-directed block allocation by Stephen Tweedie
16  *	(sct@redhat.com), 1993, 1998
17  *  Big-endian to little-endian byte-swapping/bitmaps by
18  *        David S. Miller (davem@caip.rutgers.edu), 1995
19  *  64-bit file support on 64-bit platforms by Jakub Jelinek
20  *	(jj@sunsite.ms.mff.cuni.cz)
21  *
22  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
23  */
24 
25 #include <linux/module.h>
26 #include <linux/fs.h>
27 #include <linux/time.h>
28 #include <linux/jbd2.h>
29 #include <linux/highuid.h>
30 #include <linux/pagemap.h>
31 #include <linux/quotaops.h>
32 #include <linux/string.h>
33 #include <linux/buffer_head.h>
34 #include <linux/writeback.h>
35 #include <linux/pagevec.h>
36 #include <linux/mpage.h>
37 #include <linux/namei.h>
38 #include <linux/uio.h>
39 #include <linux/bio.h>
40 #include <linux/workqueue.h>
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 
44 #include "ext4_jbd2.h"
45 #include "xattr.h"
46 #include "acl.h"
47 #include "ext4_extents.h"
48 
49 #include <trace/events/ext4.h>
50 
51 #define MPAGE_DA_EXTENT_TAIL 0x01
52 
53 static inline int ext4_begin_ordered_truncate(struct inode *inode,
54 					      loff_t new_size)
55 {
56 	trace_ext4_begin_ordered_truncate(inode, new_size);
57 	return jbd2_journal_begin_ordered_truncate(
58 					EXT4_SB(inode->i_sb)->s_journal,
59 					&EXT4_I(inode)->jinode,
60 					new_size);
61 }
62 
63 static void ext4_invalidatepage(struct page *page, unsigned long offset);
64 static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
65 				   struct buffer_head *bh_result, int create);
66 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
67 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
68 static int __ext4_journalled_writepage(struct page *page, unsigned int len);
69 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
70 
71 /*
72  * Test whether an inode is a fast symlink.
73  */
74 static int ext4_inode_is_fast_symlink(struct inode *inode)
75 {
76 	int ea_blocks = EXT4_I(inode)->i_file_acl ?
77 		(inode->i_sb->s_blocksize >> 9) : 0;
78 
79 	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
80 }
81 
82 /*
83  * Work out how many blocks we need to proceed with the next chunk of a
84  * truncate transaction.
85  */
86 static unsigned long blocks_for_truncate(struct inode *inode)
87 {
88 	ext4_lblk_t needed;
89 
90 	needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
91 
92 	/* Give ourselves just enough room to cope with inodes in which
93 	 * i_blocks is corrupt: we've seen disk corruptions in the past
94 	 * which resulted in random data in an inode which looked enough
95 	 * like a regular file for ext4 to try to delete it.  Things
96 	 * will go a bit crazy if that happens, but at least we should
97 	 * try not to panic the whole kernel. */
98 	if (needed < 2)
99 		needed = 2;
100 
101 	/* But we need to bound the transaction so we don't overflow the
102 	 * journal. */
103 	if (needed > EXT4_MAX_TRANS_DATA)
104 		needed = EXT4_MAX_TRANS_DATA;
105 
106 	return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
107 }
108 
109 /*
110  * Truncate transactions can be complex and absolutely huge.  So we need to
111  * be able to restart the transaction at a conventient checkpoint to make
112  * sure we don't overflow the journal.
113  *
114  * start_transaction gets us a new handle for a truncate transaction,
115  * and extend_transaction tries to extend the existing one a bit.  If
116  * extend fails, we need to propagate the failure up and restart the
117  * transaction in the top-level truncate loop. --sct
118  */
119 static handle_t *start_transaction(struct inode *inode)
120 {
121 	handle_t *result;
122 
123 	result = ext4_journal_start(inode, blocks_for_truncate(inode));
124 	if (!IS_ERR(result))
125 		return result;
126 
127 	ext4_std_error(inode->i_sb, PTR_ERR(result));
128 	return result;
129 }
130 
131 /*
132  * Try to extend this transaction for the purposes of truncation.
133  *
134  * Returns 0 if we managed to create more room.  If we can't create more
135  * room, and the transaction must be restarted we return 1.
136  */
137 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
138 {
139 	if (!ext4_handle_valid(handle))
140 		return 0;
141 	if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
142 		return 0;
143 	if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
144 		return 0;
145 	return 1;
146 }
147 
148 /*
149  * Restart the transaction associated with *handle.  This does a commit,
150  * so before we call here everything must be consistently dirtied against
151  * this transaction.
152  */
153 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
154 				 int nblocks)
155 {
156 	int ret;
157 
158 	/*
159 	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
160 	 * moment, get_block can be called only for blocks inside i_size since
161 	 * page cache has been already dropped and writes are blocked by
162 	 * i_mutex. So we can safely drop the i_data_sem here.
163 	 */
164 	BUG_ON(EXT4_JOURNAL(inode) == NULL);
165 	jbd_debug(2, "restarting handle %p\n", handle);
166 	up_write(&EXT4_I(inode)->i_data_sem);
167 	ret = ext4_journal_restart(handle, blocks_for_truncate(inode));
168 	down_write(&EXT4_I(inode)->i_data_sem);
169 	ext4_discard_preallocations(inode);
170 
171 	return ret;
172 }
173 
174 /*
175  * Called at the last iput() if i_nlink is zero.
176  */
177 void ext4_evict_inode(struct inode *inode)
178 {
179 	handle_t *handle;
180 	int err;
181 
182 	trace_ext4_evict_inode(inode);
183 	if (inode->i_nlink) {
184 		truncate_inode_pages(&inode->i_data, 0);
185 		goto no_delete;
186 	}
187 
188 	if (!is_bad_inode(inode))
189 		dquot_initialize(inode);
190 
191 	if (ext4_should_order_data(inode))
192 		ext4_begin_ordered_truncate(inode, 0);
193 	truncate_inode_pages(&inode->i_data, 0);
194 
195 	if (is_bad_inode(inode))
196 		goto no_delete;
197 
198 	handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
199 	if (IS_ERR(handle)) {
200 		ext4_std_error(inode->i_sb, PTR_ERR(handle));
201 		/*
202 		 * If we're going to skip the normal cleanup, we still need to
203 		 * make sure that the in-core orphan linked list is properly
204 		 * cleaned up.
205 		 */
206 		ext4_orphan_del(NULL, inode);
207 		goto no_delete;
208 	}
209 
210 	if (IS_SYNC(inode))
211 		ext4_handle_sync(handle);
212 	inode->i_size = 0;
213 	err = ext4_mark_inode_dirty(handle, inode);
214 	if (err) {
215 		ext4_warning(inode->i_sb,
216 			     "couldn't mark inode dirty (err %d)", err);
217 		goto stop_handle;
218 	}
219 	if (inode->i_blocks)
220 		ext4_truncate(inode);
221 
222 	/*
223 	 * ext4_ext_truncate() doesn't reserve any slop when it
224 	 * restarts journal transactions; therefore there may not be
225 	 * enough credits left in the handle to remove the inode from
226 	 * the orphan list and set the dtime field.
227 	 */
228 	if (!ext4_handle_has_enough_credits(handle, 3)) {
229 		err = ext4_journal_extend(handle, 3);
230 		if (err > 0)
231 			err = ext4_journal_restart(handle, 3);
232 		if (err != 0) {
233 			ext4_warning(inode->i_sb,
234 				     "couldn't extend journal (err %d)", err);
235 		stop_handle:
236 			ext4_journal_stop(handle);
237 			ext4_orphan_del(NULL, inode);
238 			goto no_delete;
239 		}
240 	}
241 
242 	/*
243 	 * Kill off the orphan record which ext4_truncate created.
244 	 * AKPM: I think this can be inside the above `if'.
245 	 * Note that ext4_orphan_del() has to be able to cope with the
246 	 * deletion of a non-existent orphan - this is because we don't
247 	 * know if ext4_truncate() actually created an orphan record.
248 	 * (Well, we could do this if we need to, but heck - it works)
249 	 */
250 	ext4_orphan_del(handle, inode);
251 	EXT4_I(inode)->i_dtime	= get_seconds();
252 
253 	/*
254 	 * One subtle ordering requirement: if anything has gone wrong
255 	 * (transaction abort, IO errors, whatever), then we can still
256 	 * do these next steps (the fs will already have been marked as
257 	 * having errors), but we can't free the inode if the mark_dirty
258 	 * fails.
259 	 */
260 	if (ext4_mark_inode_dirty(handle, inode))
261 		/* If that failed, just do the required in-core inode clear. */
262 		ext4_clear_inode(inode);
263 	else
264 		ext4_free_inode(handle, inode);
265 	ext4_journal_stop(handle);
266 	return;
267 no_delete:
268 	ext4_clear_inode(inode);	/* We must guarantee clearing of inode... */
269 }
270 
271 typedef struct {
272 	__le32	*p;
273 	__le32	key;
274 	struct buffer_head *bh;
275 } Indirect;
276 
277 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
278 {
279 	p->key = *(p->p = v);
280 	p->bh = bh;
281 }
282 
283 /**
284  *	ext4_block_to_path - parse the block number into array of offsets
285  *	@inode: inode in question (we are only interested in its superblock)
286  *	@i_block: block number to be parsed
287  *	@offsets: array to store the offsets in
288  *	@boundary: set this non-zero if the referred-to block is likely to be
289  *	       followed (on disk) by an indirect block.
290  *
291  *	To store the locations of file's data ext4 uses a data structure common
292  *	for UNIX filesystems - tree of pointers anchored in the inode, with
293  *	data blocks at leaves and indirect blocks in intermediate nodes.
294  *	This function translates the block number into path in that tree -
295  *	return value is the path length and @offsets[n] is the offset of
296  *	pointer to (n+1)th node in the nth one. If @block is out of range
297  *	(negative or too large) warning is printed and zero returned.
298  *
299  *	Note: function doesn't find node addresses, so no IO is needed. All
300  *	we need to know is the capacity of indirect blocks (taken from the
301  *	inode->i_sb).
302  */
303 
304 /*
305  * Portability note: the last comparison (check that we fit into triple
306  * indirect block) is spelled differently, because otherwise on an
307  * architecture with 32-bit longs and 8Kb pages we might get into trouble
308  * if our filesystem had 8Kb blocks. We might use long long, but that would
309  * kill us on x86. Oh, well, at least the sign propagation does not matter -
310  * i_block would have to be negative in the very beginning, so we would not
311  * get there at all.
312  */
313 
314 static int ext4_block_to_path(struct inode *inode,
315 			      ext4_lblk_t i_block,
316 			      ext4_lblk_t offsets[4], int *boundary)
317 {
318 	int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
319 	int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
320 	const long direct_blocks = EXT4_NDIR_BLOCKS,
321 		indirect_blocks = ptrs,
322 		double_blocks = (1 << (ptrs_bits * 2));
323 	int n = 0;
324 	int final = 0;
325 
326 	if (i_block < direct_blocks) {
327 		offsets[n++] = i_block;
328 		final = direct_blocks;
329 	} else if ((i_block -= direct_blocks) < indirect_blocks) {
330 		offsets[n++] = EXT4_IND_BLOCK;
331 		offsets[n++] = i_block;
332 		final = ptrs;
333 	} else if ((i_block -= indirect_blocks) < double_blocks) {
334 		offsets[n++] = EXT4_DIND_BLOCK;
335 		offsets[n++] = i_block >> ptrs_bits;
336 		offsets[n++] = i_block & (ptrs - 1);
337 		final = ptrs;
338 	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
339 		offsets[n++] = EXT4_TIND_BLOCK;
340 		offsets[n++] = i_block >> (ptrs_bits * 2);
341 		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
342 		offsets[n++] = i_block & (ptrs - 1);
343 		final = ptrs;
344 	} else {
345 		ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
346 			     i_block + direct_blocks +
347 			     indirect_blocks + double_blocks, inode->i_ino);
348 	}
349 	if (boundary)
350 		*boundary = final - 1 - (i_block & (ptrs - 1));
351 	return n;
352 }
353 
354 static int __ext4_check_blockref(const char *function, unsigned int line,
355 				 struct inode *inode,
356 				 __le32 *p, unsigned int max)
357 {
358 	struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
359 	__le32 *bref = p;
360 	unsigned int blk;
361 
362 	while (bref < p+max) {
363 		blk = le32_to_cpu(*bref++);
364 		if (blk &&
365 		    unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
366 						    blk, 1))) {
367 			es->s_last_error_block = cpu_to_le64(blk);
368 			ext4_error_inode(inode, function, line, blk,
369 					 "invalid block");
370 			return -EIO;
371 		}
372 	}
373 	return 0;
374 }
375 
376 
377 #define ext4_check_indirect_blockref(inode, bh)                         \
378 	__ext4_check_blockref(__func__, __LINE__, inode,		\
379 			      (__le32 *)(bh)->b_data,			\
380 			      EXT4_ADDR_PER_BLOCK((inode)->i_sb))
381 
382 #define ext4_check_inode_blockref(inode)                                \
383 	__ext4_check_blockref(__func__, __LINE__, inode,		\
384 			      EXT4_I(inode)->i_data,			\
385 			      EXT4_NDIR_BLOCKS)
386 
387 /**
388  *	ext4_get_branch - read the chain of indirect blocks leading to data
389  *	@inode: inode in question
390  *	@depth: depth of the chain (1 - direct pointer, etc.)
391  *	@offsets: offsets of pointers in inode/indirect blocks
392  *	@chain: place to store the result
393  *	@err: here we store the error value
394  *
395  *	Function fills the array of triples <key, p, bh> and returns %NULL
396  *	if everything went OK or the pointer to the last filled triple
397  *	(incomplete one) otherwise. Upon the return chain[i].key contains
398  *	the number of (i+1)-th block in the chain (as it is stored in memory,
399  *	i.e. little-endian 32-bit), chain[i].p contains the address of that
400  *	number (it points into struct inode for i==0 and into the bh->b_data
401  *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
402  *	block for i>0 and NULL for i==0. In other words, it holds the block
403  *	numbers of the chain, addresses they were taken from (and where we can
404  *	verify that chain did not change) and buffer_heads hosting these
405  *	numbers.
406  *
407  *	Function stops when it stumbles upon zero pointer (absent block)
408  *		(pointer to last triple returned, *@err == 0)
409  *	or when it gets an IO error reading an indirect block
410  *		(ditto, *@err == -EIO)
411  *	or when it reads all @depth-1 indirect blocks successfully and finds
412  *	the whole chain, all way to the data (returns %NULL, *err == 0).
413  *
414  *      Need to be called with
415  *      down_read(&EXT4_I(inode)->i_data_sem)
416  */
417 static Indirect *ext4_get_branch(struct inode *inode, int depth,
418 				 ext4_lblk_t  *offsets,
419 				 Indirect chain[4], int *err)
420 {
421 	struct super_block *sb = inode->i_sb;
422 	Indirect *p = chain;
423 	struct buffer_head *bh;
424 
425 	*err = 0;
426 	/* i_data is not going away, no lock needed */
427 	add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
428 	if (!p->key)
429 		goto no_block;
430 	while (--depth) {
431 		bh = sb_getblk(sb, le32_to_cpu(p->key));
432 		if (unlikely(!bh))
433 			goto failure;
434 
435 		if (!bh_uptodate_or_lock(bh)) {
436 			if (bh_submit_read(bh) < 0) {
437 				put_bh(bh);
438 				goto failure;
439 			}
440 			/* validate block references */
441 			if (ext4_check_indirect_blockref(inode, bh)) {
442 				put_bh(bh);
443 				goto failure;
444 			}
445 		}
446 
447 		add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
448 		/* Reader: end */
449 		if (!p->key)
450 			goto no_block;
451 	}
452 	return NULL;
453 
454 failure:
455 	*err = -EIO;
456 no_block:
457 	return p;
458 }
459 
460 /**
461  *	ext4_find_near - find a place for allocation with sufficient locality
462  *	@inode: owner
463  *	@ind: descriptor of indirect block.
464  *
465  *	This function returns the preferred place for block allocation.
466  *	It is used when heuristic for sequential allocation fails.
467  *	Rules are:
468  *	  + if there is a block to the left of our position - allocate near it.
469  *	  + if pointer will live in indirect block - allocate near that block.
470  *	  + if pointer will live in inode - allocate in the same
471  *	    cylinder group.
472  *
473  * In the latter case we colour the starting block by the callers PID to
474  * prevent it from clashing with concurrent allocations for a different inode
475  * in the same block group.   The PID is used here so that functionally related
476  * files will be close-by on-disk.
477  *
478  *	Caller must make sure that @ind is valid and will stay that way.
479  */
480 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
481 {
482 	struct ext4_inode_info *ei = EXT4_I(inode);
483 	__le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
484 	__le32 *p;
485 	ext4_fsblk_t bg_start;
486 	ext4_fsblk_t last_block;
487 	ext4_grpblk_t colour;
488 	ext4_group_t block_group;
489 	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
490 
491 	/* Try to find previous block */
492 	for (p = ind->p - 1; p >= start; p--) {
493 		if (*p)
494 			return le32_to_cpu(*p);
495 	}
496 
497 	/* No such thing, so let's try location of indirect block */
498 	if (ind->bh)
499 		return ind->bh->b_blocknr;
500 
501 	/*
502 	 * It is going to be referred to from the inode itself? OK, just put it
503 	 * into the same cylinder group then.
504 	 */
505 	block_group = ei->i_block_group;
506 	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
507 		block_group &= ~(flex_size-1);
508 		if (S_ISREG(inode->i_mode))
509 			block_group++;
510 	}
511 	bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
512 	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
513 
514 	/*
515 	 * If we are doing delayed allocation, we don't need take
516 	 * colour into account.
517 	 */
518 	if (test_opt(inode->i_sb, DELALLOC))
519 		return bg_start;
520 
521 	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
522 		colour = (current->pid % 16) *
523 			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
524 	else
525 		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
526 	return bg_start + colour;
527 }
528 
529 /**
530  *	ext4_find_goal - find a preferred place for allocation.
531  *	@inode: owner
532  *	@block:  block we want
533  *	@partial: pointer to the last triple within a chain
534  *
535  *	Normally this function find the preferred place for block allocation,
536  *	returns it.
537  *	Because this is only used for non-extent files, we limit the block nr
538  *	to 32 bits.
539  */
540 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
541 				   Indirect *partial)
542 {
543 	ext4_fsblk_t goal;
544 
545 	/*
546 	 * XXX need to get goal block from mballoc's data structures
547 	 */
548 
549 	goal = ext4_find_near(inode, partial);
550 	goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
551 	return goal;
552 }
553 
554 /**
555  *	ext4_blks_to_allocate: Look up the block map and count the number
556  *	of direct blocks need to be allocated for the given branch.
557  *
558  *	@branch: chain of indirect blocks
559  *	@k: number of blocks need for indirect blocks
560  *	@blks: number of data blocks to be mapped.
561  *	@blocks_to_boundary:  the offset in the indirect block
562  *
563  *	return the total number of blocks to be allocate, including the
564  *	direct and indirect blocks.
565  */
566 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
567 				 int blocks_to_boundary)
568 {
569 	unsigned int count = 0;
570 
571 	/*
572 	 * Simple case, [t,d]Indirect block(s) has not allocated yet
573 	 * then it's clear blocks on that path have not allocated
574 	 */
575 	if (k > 0) {
576 		/* right now we don't handle cross boundary allocation */
577 		if (blks < blocks_to_boundary + 1)
578 			count += blks;
579 		else
580 			count += blocks_to_boundary + 1;
581 		return count;
582 	}
583 
584 	count++;
585 	while (count < blks && count <= blocks_to_boundary &&
586 		le32_to_cpu(*(branch[0].p + count)) == 0) {
587 		count++;
588 	}
589 	return count;
590 }
591 
592 /**
593  *	ext4_alloc_blocks: multiple allocate blocks needed for a branch
594  *	@indirect_blks: the number of blocks need to allocate for indirect
595  *			blocks
596  *
597  *	@new_blocks: on return it will store the new block numbers for
598  *	the indirect blocks(if needed) and the first direct block,
599  *	@blks:	on return it will store the total number of allocated
600  *		direct blocks
601  */
602 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
603 			     ext4_lblk_t iblock, ext4_fsblk_t goal,
604 			     int indirect_blks, int blks,
605 			     ext4_fsblk_t new_blocks[4], int *err)
606 {
607 	struct ext4_allocation_request ar;
608 	int target, i;
609 	unsigned long count = 0, blk_allocated = 0;
610 	int index = 0;
611 	ext4_fsblk_t current_block = 0;
612 	int ret = 0;
613 
614 	/*
615 	 * Here we try to allocate the requested multiple blocks at once,
616 	 * on a best-effort basis.
617 	 * To build a branch, we should allocate blocks for
618 	 * the indirect blocks(if not allocated yet), and at least
619 	 * the first direct block of this branch.  That's the
620 	 * minimum number of blocks need to allocate(required)
621 	 */
622 	/* first we try to allocate the indirect blocks */
623 	target = indirect_blks;
624 	while (target > 0) {
625 		count = target;
626 		/* allocating blocks for indirect blocks and direct blocks */
627 		current_block = ext4_new_meta_blocks(handle, inode,
628 							goal, &count, err);
629 		if (*err)
630 			goto failed_out;
631 
632 		if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) {
633 			EXT4_ERROR_INODE(inode,
634 					 "current_block %llu + count %lu > %d!",
635 					 current_block, count,
636 					 EXT4_MAX_BLOCK_FILE_PHYS);
637 			*err = -EIO;
638 			goto failed_out;
639 		}
640 
641 		target -= count;
642 		/* allocate blocks for indirect blocks */
643 		while (index < indirect_blks && count) {
644 			new_blocks[index++] = current_block++;
645 			count--;
646 		}
647 		if (count > 0) {
648 			/*
649 			 * save the new block number
650 			 * for the first direct block
651 			 */
652 			new_blocks[index] = current_block;
653 			printk(KERN_INFO "%s returned more blocks than "
654 						"requested\n", __func__);
655 			WARN_ON(1);
656 			break;
657 		}
658 	}
659 
660 	target = blks - count ;
661 	blk_allocated = count;
662 	if (!target)
663 		goto allocated;
664 	/* Now allocate data blocks */
665 	memset(&ar, 0, sizeof(ar));
666 	ar.inode = inode;
667 	ar.goal = goal;
668 	ar.len = target;
669 	ar.logical = iblock;
670 	if (S_ISREG(inode->i_mode))
671 		/* enable in-core preallocation only for regular files */
672 		ar.flags = EXT4_MB_HINT_DATA;
673 
674 	current_block = ext4_mb_new_blocks(handle, &ar, err);
675 	if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) {
676 		EXT4_ERROR_INODE(inode,
677 				 "current_block %llu + ar.len %d > %d!",
678 				 current_block, ar.len,
679 				 EXT4_MAX_BLOCK_FILE_PHYS);
680 		*err = -EIO;
681 		goto failed_out;
682 	}
683 
684 	if (*err && (target == blks)) {
685 		/*
686 		 * if the allocation failed and we didn't allocate
687 		 * any blocks before
688 		 */
689 		goto failed_out;
690 	}
691 	if (!*err) {
692 		if (target == blks) {
693 			/*
694 			 * save the new block number
695 			 * for the first direct block
696 			 */
697 			new_blocks[index] = current_block;
698 		}
699 		blk_allocated += ar.len;
700 	}
701 allocated:
702 	/* total number of blocks allocated for direct blocks */
703 	ret = blk_allocated;
704 	*err = 0;
705 	return ret;
706 failed_out:
707 	for (i = 0; i < index; i++)
708 		ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
709 	return ret;
710 }
711 
712 /**
713  *	ext4_alloc_branch - allocate and set up a chain of blocks.
714  *	@inode: owner
715  *	@indirect_blks: number of allocated indirect blocks
716  *	@blks: number of allocated direct blocks
717  *	@offsets: offsets (in the blocks) to store the pointers to next.
718  *	@branch: place to store the chain in.
719  *
720  *	This function allocates blocks, zeroes out all but the last one,
721  *	links them into chain and (if we are synchronous) writes them to disk.
722  *	In other words, it prepares a branch that can be spliced onto the
723  *	inode. It stores the information about that chain in the branch[], in
724  *	the same format as ext4_get_branch() would do. We are calling it after
725  *	we had read the existing part of chain and partial points to the last
726  *	triple of that (one with zero ->key). Upon the exit we have the same
727  *	picture as after the successful ext4_get_block(), except that in one
728  *	place chain is disconnected - *branch->p is still zero (we did not
729  *	set the last link), but branch->key contains the number that should
730  *	be placed into *branch->p to fill that gap.
731  *
732  *	If allocation fails we free all blocks we've allocated (and forget
733  *	their buffer_heads) and return the error value the from failed
734  *	ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
735  *	as described above and return 0.
736  */
737 static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
738 			     ext4_lblk_t iblock, int indirect_blks,
739 			     int *blks, ext4_fsblk_t goal,
740 			     ext4_lblk_t *offsets, Indirect *branch)
741 {
742 	int blocksize = inode->i_sb->s_blocksize;
743 	int i, n = 0;
744 	int err = 0;
745 	struct buffer_head *bh;
746 	int num;
747 	ext4_fsblk_t new_blocks[4];
748 	ext4_fsblk_t current_block;
749 
750 	num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
751 				*blks, new_blocks, &err);
752 	if (err)
753 		return err;
754 
755 	branch[0].key = cpu_to_le32(new_blocks[0]);
756 	/*
757 	 * metadata blocks and data blocks are allocated.
758 	 */
759 	for (n = 1; n <= indirect_blks;  n++) {
760 		/*
761 		 * Get buffer_head for parent block, zero it out
762 		 * and set the pointer to new one, then send
763 		 * parent to disk.
764 		 */
765 		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
766 		if (unlikely(!bh)) {
767 			err = -EIO;
768 			goto failed;
769 		}
770 
771 		branch[n].bh = bh;
772 		lock_buffer(bh);
773 		BUFFER_TRACE(bh, "call get_create_access");
774 		err = ext4_journal_get_create_access(handle, bh);
775 		if (err) {
776 			/* Don't brelse(bh) here; it's done in
777 			 * ext4_journal_forget() below */
778 			unlock_buffer(bh);
779 			goto failed;
780 		}
781 
782 		memset(bh->b_data, 0, blocksize);
783 		branch[n].p = (__le32 *) bh->b_data + offsets[n];
784 		branch[n].key = cpu_to_le32(new_blocks[n]);
785 		*branch[n].p = branch[n].key;
786 		if (n == indirect_blks) {
787 			current_block = new_blocks[n];
788 			/*
789 			 * End of chain, update the last new metablock of
790 			 * the chain to point to the new allocated
791 			 * data blocks numbers
792 			 */
793 			for (i = 1; i < num; i++)
794 				*(branch[n].p + i) = cpu_to_le32(++current_block);
795 		}
796 		BUFFER_TRACE(bh, "marking uptodate");
797 		set_buffer_uptodate(bh);
798 		unlock_buffer(bh);
799 
800 		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
801 		err = ext4_handle_dirty_metadata(handle, inode, bh);
802 		if (err)
803 			goto failed;
804 	}
805 	*blks = num;
806 	return err;
807 failed:
808 	/* Allocation failed, free what we already allocated */
809 	ext4_free_blocks(handle, inode, 0, new_blocks[0], 1, 0);
810 	for (i = 1; i <= n ; i++) {
811 		/*
812 		 * branch[i].bh is newly allocated, so there is no
813 		 * need to revoke the block, which is why we don't
814 		 * need to set EXT4_FREE_BLOCKS_METADATA.
815 		 */
816 		ext4_free_blocks(handle, inode, 0, new_blocks[i], 1,
817 				 EXT4_FREE_BLOCKS_FORGET);
818 	}
819 	for (i = n+1; i < indirect_blks; i++)
820 		ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
821 
822 	ext4_free_blocks(handle, inode, 0, new_blocks[i], num, 0);
823 
824 	return err;
825 }
826 
827 /**
828  * ext4_splice_branch - splice the allocated branch onto inode.
829  * @inode: owner
830  * @block: (logical) number of block we are adding
831  * @chain: chain of indirect blocks (with a missing link - see
832  *	ext4_alloc_branch)
833  * @where: location of missing link
834  * @num:   number of indirect blocks we are adding
835  * @blks:  number of direct blocks we are adding
836  *
837  * This function fills the missing link and does all housekeeping needed in
838  * inode (->i_blocks, etc.). In case of success we end up with the full
839  * chain to new block and return 0.
840  */
841 static int ext4_splice_branch(handle_t *handle, struct inode *inode,
842 			      ext4_lblk_t block, Indirect *where, int num,
843 			      int blks)
844 {
845 	int i;
846 	int err = 0;
847 	ext4_fsblk_t current_block;
848 
849 	/*
850 	 * If we're splicing into a [td]indirect block (as opposed to the
851 	 * inode) then we need to get write access to the [td]indirect block
852 	 * before the splice.
853 	 */
854 	if (where->bh) {
855 		BUFFER_TRACE(where->bh, "get_write_access");
856 		err = ext4_journal_get_write_access(handle, where->bh);
857 		if (err)
858 			goto err_out;
859 	}
860 	/* That's it */
861 
862 	*where->p = where->key;
863 
864 	/*
865 	 * Update the host buffer_head or inode to point to more just allocated
866 	 * direct blocks blocks
867 	 */
868 	if (num == 0 && blks > 1) {
869 		current_block = le32_to_cpu(where->key) + 1;
870 		for (i = 1; i < blks; i++)
871 			*(where->p + i) = cpu_to_le32(current_block++);
872 	}
873 
874 	/* We are done with atomic stuff, now do the rest of housekeeping */
875 	/* had we spliced it onto indirect block? */
876 	if (where->bh) {
877 		/*
878 		 * If we spliced it onto an indirect block, we haven't
879 		 * altered the inode.  Note however that if it is being spliced
880 		 * onto an indirect block at the very end of the file (the
881 		 * file is growing) then we *will* alter the inode to reflect
882 		 * the new i_size.  But that is not done here - it is done in
883 		 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
884 		 */
885 		jbd_debug(5, "splicing indirect only\n");
886 		BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
887 		err = ext4_handle_dirty_metadata(handle, inode, where->bh);
888 		if (err)
889 			goto err_out;
890 	} else {
891 		/*
892 		 * OK, we spliced it into the inode itself on a direct block.
893 		 */
894 		ext4_mark_inode_dirty(handle, inode);
895 		jbd_debug(5, "splicing direct\n");
896 	}
897 	return err;
898 
899 err_out:
900 	for (i = 1; i <= num; i++) {
901 		/*
902 		 * branch[i].bh is newly allocated, so there is no
903 		 * need to revoke the block, which is why we don't
904 		 * need to set EXT4_FREE_BLOCKS_METADATA.
905 		 */
906 		ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
907 				 EXT4_FREE_BLOCKS_FORGET);
908 	}
909 	ext4_free_blocks(handle, inode, 0, le32_to_cpu(where[num].key),
910 			 blks, 0);
911 
912 	return err;
913 }
914 
915 /*
916  * The ext4_ind_map_blocks() function handles non-extents inodes
917  * (i.e., using the traditional indirect/double-indirect i_blocks
918  * scheme) for ext4_map_blocks().
919  *
920  * Allocation strategy is simple: if we have to allocate something, we will
921  * have to go the whole way to leaf. So let's do it before attaching anything
922  * to tree, set linkage between the newborn blocks, write them if sync is
923  * required, recheck the path, free and repeat if check fails, otherwise
924  * set the last missing link (that will protect us from any truncate-generated
925  * removals - all blocks on the path are immune now) and possibly force the
926  * write on the parent block.
927  * That has a nice additional property: no special recovery from the failed
928  * allocations is needed - we simply release blocks and do not touch anything
929  * reachable from inode.
930  *
931  * `handle' can be NULL if create == 0.
932  *
933  * return > 0, # of blocks mapped or allocated.
934  * return = 0, if plain lookup failed.
935  * return < 0, error case.
936  *
937  * The ext4_ind_get_blocks() function should be called with
938  * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
939  * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
940  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
941  * blocks.
942  */
943 static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
944 			       struct ext4_map_blocks *map,
945 			       int flags)
946 {
947 	int err = -EIO;
948 	ext4_lblk_t offsets[4];
949 	Indirect chain[4];
950 	Indirect *partial;
951 	ext4_fsblk_t goal;
952 	int indirect_blks;
953 	int blocks_to_boundary = 0;
954 	int depth;
955 	int count = 0;
956 	ext4_fsblk_t first_block = 0;
957 
958 	J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
959 	J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
960 	depth = ext4_block_to_path(inode, map->m_lblk, offsets,
961 				   &blocks_to_boundary);
962 
963 	if (depth == 0)
964 		goto out;
965 
966 	partial = ext4_get_branch(inode, depth, offsets, chain, &err);
967 
968 	/* Simplest case - block found, no allocation needed */
969 	if (!partial) {
970 		first_block = le32_to_cpu(chain[depth - 1].key);
971 		count++;
972 		/*map more blocks*/
973 		while (count < map->m_len && count <= blocks_to_boundary) {
974 			ext4_fsblk_t blk;
975 
976 			blk = le32_to_cpu(*(chain[depth-1].p + count));
977 
978 			if (blk == first_block + count)
979 				count++;
980 			else
981 				break;
982 		}
983 		goto got_it;
984 	}
985 
986 	/* Next simple case - plain lookup or failed read of indirect block */
987 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
988 		goto cleanup;
989 
990 	/*
991 	 * Okay, we need to do block allocation.
992 	*/
993 	goal = ext4_find_goal(inode, map->m_lblk, partial);
994 
995 	/* the number of blocks need to allocate for [d,t]indirect blocks */
996 	indirect_blks = (chain + depth) - partial - 1;
997 
998 	/*
999 	 * Next look up the indirect map to count the totoal number of
1000 	 * direct blocks to allocate for this branch.
1001 	 */
1002 	count = ext4_blks_to_allocate(partial, indirect_blks,
1003 				      map->m_len, blocks_to_boundary);
1004 	/*
1005 	 * Block out ext4_truncate while we alter the tree
1006 	 */
1007 	err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
1008 				&count, goal,
1009 				offsets + (partial - chain), partial);
1010 
1011 	/*
1012 	 * The ext4_splice_branch call will free and forget any buffers
1013 	 * on the new chain if there is a failure, but that risks using
1014 	 * up transaction credits, especially for bitmaps where the
1015 	 * credits cannot be returned.  Can we handle this somehow?  We
1016 	 * may need to return -EAGAIN upwards in the worst case.  --sct
1017 	 */
1018 	if (!err)
1019 		err = ext4_splice_branch(handle, inode, map->m_lblk,
1020 					 partial, indirect_blks, count);
1021 	if (err)
1022 		goto cleanup;
1023 
1024 	map->m_flags |= EXT4_MAP_NEW;
1025 
1026 	ext4_update_inode_fsync_trans(handle, inode, 1);
1027 got_it:
1028 	map->m_flags |= EXT4_MAP_MAPPED;
1029 	map->m_pblk = le32_to_cpu(chain[depth-1].key);
1030 	map->m_len = count;
1031 	if (count > blocks_to_boundary)
1032 		map->m_flags |= EXT4_MAP_BOUNDARY;
1033 	err = count;
1034 	/* Clean up and exit */
1035 	partial = chain + depth - 1;	/* the whole chain */
1036 cleanup:
1037 	while (partial > chain) {
1038 		BUFFER_TRACE(partial->bh, "call brelse");
1039 		brelse(partial->bh);
1040 		partial--;
1041 	}
1042 out:
1043 	return err;
1044 }
1045 
1046 #ifdef CONFIG_QUOTA
1047 qsize_t *ext4_get_reserved_space(struct inode *inode)
1048 {
1049 	return &EXT4_I(inode)->i_reserved_quota;
1050 }
1051 #endif
1052 
1053 /*
1054  * Calculate the number of metadata blocks need to reserve
1055  * to allocate a new block at @lblocks for non extent file based file
1056  */
1057 static int ext4_indirect_calc_metadata_amount(struct inode *inode,
1058 					      sector_t lblock)
1059 {
1060 	struct ext4_inode_info *ei = EXT4_I(inode);
1061 	sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
1062 	int blk_bits;
1063 
1064 	if (lblock < EXT4_NDIR_BLOCKS)
1065 		return 0;
1066 
1067 	lblock -= EXT4_NDIR_BLOCKS;
1068 
1069 	if (ei->i_da_metadata_calc_len &&
1070 	    (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
1071 		ei->i_da_metadata_calc_len++;
1072 		return 0;
1073 	}
1074 	ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
1075 	ei->i_da_metadata_calc_len = 1;
1076 	blk_bits = order_base_2(lblock);
1077 	return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
1078 }
1079 
1080 /*
1081  * Calculate the number of metadata blocks need to reserve
1082  * to allocate a block located at @lblock
1083  */
1084 static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock)
1085 {
1086 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1087 		return ext4_ext_calc_metadata_amount(inode, lblock);
1088 
1089 	return ext4_indirect_calc_metadata_amount(inode, lblock);
1090 }
1091 
1092 /*
1093  * Called with i_data_sem down, which is important since we can call
1094  * ext4_discard_preallocations() from here.
1095  */
1096 void ext4_da_update_reserve_space(struct inode *inode,
1097 					int used, int quota_claim)
1098 {
1099 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1100 	struct ext4_inode_info *ei = EXT4_I(inode);
1101 
1102 	spin_lock(&ei->i_block_reservation_lock);
1103 	trace_ext4_da_update_reserve_space(inode, used);
1104 	if (unlikely(used > ei->i_reserved_data_blocks)) {
1105 		ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
1106 			 "with only %d reserved data blocks\n",
1107 			 __func__, inode->i_ino, used,
1108 			 ei->i_reserved_data_blocks);
1109 		WARN_ON(1);
1110 		used = ei->i_reserved_data_blocks;
1111 	}
1112 
1113 	/* Update per-inode reservations */
1114 	ei->i_reserved_data_blocks -= used;
1115 	ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
1116 	percpu_counter_sub(&sbi->s_dirtyblocks_counter,
1117 			   used + ei->i_allocated_meta_blocks);
1118 	ei->i_allocated_meta_blocks = 0;
1119 
1120 	if (ei->i_reserved_data_blocks == 0) {
1121 		/*
1122 		 * We can release all of the reserved metadata blocks
1123 		 * only when we have written all of the delayed
1124 		 * allocation blocks.
1125 		 */
1126 		percpu_counter_sub(&sbi->s_dirtyblocks_counter,
1127 				   ei->i_reserved_meta_blocks);
1128 		ei->i_reserved_meta_blocks = 0;
1129 		ei->i_da_metadata_calc_len = 0;
1130 	}
1131 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1132 
1133 	/* Update quota subsystem for data blocks */
1134 	if (quota_claim)
1135 		dquot_claim_block(inode, used);
1136 	else {
1137 		/*
1138 		 * We did fallocate with an offset that is already delayed
1139 		 * allocated. So on delayed allocated writeback we should
1140 		 * not re-claim the quota for fallocated blocks.
1141 		 */
1142 		dquot_release_reservation_block(inode, used);
1143 	}
1144 
1145 	/*
1146 	 * If we have done all the pending block allocations and if
1147 	 * there aren't any writers on the inode, we can discard the
1148 	 * inode's preallocations.
1149 	 */
1150 	if ((ei->i_reserved_data_blocks == 0) &&
1151 	    (atomic_read(&inode->i_writecount) == 0))
1152 		ext4_discard_preallocations(inode);
1153 }
1154 
1155 static int __check_block_validity(struct inode *inode, const char *func,
1156 				unsigned int line,
1157 				struct ext4_map_blocks *map)
1158 {
1159 	if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
1160 				   map->m_len)) {
1161 		ext4_error_inode(inode, func, line, map->m_pblk,
1162 				 "lblock %lu mapped to illegal pblock "
1163 				 "(length %d)", (unsigned long) map->m_lblk,
1164 				 map->m_len);
1165 		return -EIO;
1166 	}
1167 	return 0;
1168 }
1169 
1170 #define check_block_validity(inode, map)	\
1171 	__check_block_validity((inode), __func__, __LINE__, (map))
1172 
1173 /*
1174  * Return the number of contiguous dirty pages in a given inode
1175  * starting at page frame idx.
1176  */
1177 static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
1178 				    unsigned int max_pages)
1179 {
1180 	struct address_space *mapping = inode->i_mapping;
1181 	pgoff_t	index;
1182 	struct pagevec pvec;
1183 	pgoff_t num = 0;
1184 	int i, nr_pages, done = 0;
1185 
1186 	if (max_pages == 0)
1187 		return 0;
1188 	pagevec_init(&pvec, 0);
1189 	while (!done) {
1190 		index = idx;
1191 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1192 					      PAGECACHE_TAG_DIRTY,
1193 					      (pgoff_t)PAGEVEC_SIZE);
1194 		if (nr_pages == 0)
1195 			break;
1196 		for (i = 0; i < nr_pages; i++) {
1197 			struct page *page = pvec.pages[i];
1198 			struct buffer_head *bh, *head;
1199 
1200 			lock_page(page);
1201 			if (unlikely(page->mapping != mapping) ||
1202 			    !PageDirty(page) ||
1203 			    PageWriteback(page) ||
1204 			    page->index != idx) {
1205 				done = 1;
1206 				unlock_page(page);
1207 				break;
1208 			}
1209 			if (page_has_buffers(page)) {
1210 				bh = head = page_buffers(page);
1211 				do {
1212 					if (!buffer_delay(bh) &&
1213 					    !buffer_unwritten(bh))
1214 						done = 1;
1215 					bh = bh->b_this_page;
1216 				} while (!done && (bh != head));
1217 			}
1218 			unlock_page(page);
1219 			if (done)
1220 				break;
1221 			idx++;
1222 			num++;
1223 			if (num >= max_pages) {
1224 				done = 1;
1225 				break;
1226 			}
1227 		}
1228 		pagevec_release(&pvec);
1229 	}
1230 	return num;
1231 }
1232 
1233 /*
1234  * The ext4_map_blocks() function tries to look up the requested blocks,
1235  * and returns if the blocks are already mapped.
1236  *
1237  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
1238  * and store the allocated blocks in the result buffer head and mark it
1239  * mapped.
1240  *
1241  * If file type is extents based, it will call ext4_ext_map_blocks(),
1242  * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
1243  * based files
1244  *
1245  * On success, it returns the number of blocks being mapped or allocate.
1246  * if create==0 and the blocks are pre-allocated and uninitialized block,
1247  * the result buffer head is unmapped. If the create ==1, it will make sure
1248  * the buffer head is mapped.
1249  *
1250  * It returns 0 if plain look up failed (blocks have not been allocated), in
1251  * that casem, buffer head is unmapped
1252  *
1253  * It returns the error in case of allocation failure.
1254  */
1255 int ext4_map_blocks(handle_t *handle, struct inode *inode,
1256 		    struct ext4_map_blocks *map, int flags)
1257 {
1258 	int retval;
1259 
1260 	map->m_flags = 0;
1261 	ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
1262 		  "logical block %lu\n", inode->i_ino, flags, map->m_len,
1263 		  (unsigned long) map->m_lblk);
1264 	/*
1265 	 * Try to see if we can get the block without requesting a new
1266 	 * file system block.
1267 	 */
1268 	down_read((&EXT4_I(inode)->i_data_sem));
1269 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
1270 		retval = ext4_ext_map_blocks(handle, inode, map, 0);
1271 	} else {
1272 		retval = ext4_ind_map_blocks(handle, inode, map, 0);
1273 	}
1274 	up_read((&EXT4_I(inode)->i_data_sem));
1275 
1276 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
1277 		int ret = check_block_validity(inode, map);
1278 		if (ret != 0)
1279 			return ret;
1280 	}
1281 
1282 	/* If it is only a block(s) look up */
1283 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
1284 		return retval;
1285 
1286 	/*
1287 	 * Returns if the blocks have already allocated
1288 	 *
1289 	 * Note that if blocks have been preallocated
1290 	 * ext4_ext_get_block() returns th create = 0
1291 	 * with buffer head unmapped.
1292 	 */
1293 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
1294 		return retval;
1295 
1296 	/*
1297 	 * When we call get_blocks without the create flag, the
1298 	 * BH_Unwritten flag could have gotten set if the blocks
1299 	 * requested were part of a uninitialized extent.  We need to
1300 	 * clear this flag now that we are committed to convert all or
1301 	 * part of the uninitialized extent to be an initialized
1302 	 * extent.  This is because we need to avoid the combination
1303 	 * of BH_Unwritten and BH_Mapped flags being simultaneously
1304 	 * set on the buffer_head.
1305 	 */
1306 	map->m_flags &= ~EXT4_MAP_UNWRITTEN;
1307 
1308 	/*
1309 	 * New blocks allocate and/or writing to uninitialized extent
1310 	 * will possibly result in updating i_data, so we take
1311 	 * the write lock of i_data_sem, and call get_blocks()
1312 	 * with create == 1 flag.
1313 	 */
1314 	down_write((&EXT4_I(inode)->i_data_sem));
1315 
1316 	/*
1317 	 * if the caller is from delayed allocation writeout path
1318 	 * we have already reserved fs blocks for allocation
1319 	 * let the underlying get_block() function know to
1320 	 * avoid double accounting
1321 	 */
1322 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1323 		EXT4_I(inode)->i_delalloc_reserved_flag = 1;
1324 	/*
1325 	 * We need to check for EXT4 here because migrate
1326 	 * could have changed the inode type in between
1327 	 */
1328 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
1329 		retval = ext4_ext_map_blocks(handle, inode, map, flags);
1330 	} else {
1331 		retval = ext4_ind_map_blocks(handle, inode, map, flags);
1332 
1333 		if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
1334 			/*
1335 			 * We allocated new blocks which will result in
1336 			 * i_data's format changing.  Force the migrate
1337 			 * to fail by clearing migrate flags
1338 			 */
1339 			ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
1340 		}
1341 
1342 		/*
1343 		 * Update reserved blocks/metadata blocks after successful
1344 		 * block allocation which had been deferred till now. We don't
1345 		 * support fallocate for non extent files. So we can update
1346 		 * reserve space here.
1347 		 */
1348 		if ((retval > 0) &&
1349 			(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
1350 			ext4_da_update_reserve_space(inode, retval, 1);
1351 	}
1352 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1353 		EXT4_I(inode)->i_delalloc_reserved_flag = 0;
1354 
1355 	up_write((&EXT4_I(inode)->i_data_sem));
1356 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
1357 		int ret = check_block_validity(inode, map);
1358 		if (ret != 0)
1359 			return ret;
1360 	}
1361 	return retval;
1362 }
1363 
1364 /* Maximum number of blocks we map for direct IO at once. */
1365 #define DIO_MAX_BLOCKS 4096
1366 
1367 static int _ext4_get_block(struct inode *inode, sector_t iblock,
1368 			   struct buffer_head *bh, int flags)
1369 {
1370 	handle_t *handle = ext4_journal_current_handle();
1371 	struct ext4_map_blocks map;
1372 	int ret = 0, started = 0;
1373 	int dio_credits;
1374 
1375 	map.m_lblk = iblock;
1376 	map.m_len = bh->b_size >> inode->i_blkbits;
1377 
1378 	if (flags && !handle) {
1379 		/* Direct IO write... */
1380 		if (map.m_len > DIO_MAX_BLOCKS)
1381 			map.m_len = DIO_MAX_BLOCKS;
1382 		dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
1383 		handle = ext4_journal_start(inode, dio_credits);
1384 		if (IS_ERR(handle)) {
1385 			ret = PTR_ERR(handle);
1386 			return ret;
1387 		}
1388 		started = 1;
1389 	}
1390 
1391 	ret = ext4_map_blocks(handle, inode, &map, flags);
1392 	if (ret > 0) {
1393 		map_bh(bh, inode->i_sb, map.m_pblk);
1394 		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
1395 		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
1396 		ret = 0;
1397 	}
1398 	if (started)
1399 		ext4_journal_stop(handle);
1400 	return ret;
1401 }
1402 
1403 int ext4_get_block(struct inode *inode, sector_t iblock,
1404 		   struct buffer_head *bh, int create)
1405 {
1406 	return _ext4_get_block(inode, iblock, bh,
1407 			       create ? EXT4_GET_BLOCKS_CREATE : 0);
1408 }
1409 
1410 /*
1411  * `handle' can be NULL if create is zero
1412  */
1413 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1414 				ext4_lblk_t block, int create, int *errp)
1415 {
1416 	struct ext4_map_blocks map;
1417 	struct buffer_head *bh;
1418 	int fatal = 0, err;
1419 
1420 	J_ASSERT(handle != NULL || create == 0);
1421 
1422 	map.m_lblk = block;
1423 	map.m_len = 1;
1424 	err = ext4_map_blocks(handle, inode, &map,
1425 			      create ? EXT4_GET_BLOCKS_CREATE : 0);
1426 
1427 	if (err < 0)
1428 		*errp = err;
1429 	if (err <= 0)
1430 		return NULL;
1431 	*errp = 0;
1432 
1433 	bh = sb_getblk(inode->i_sb, map.m_pblk);
1434 	if (!bh) {
1435 		*errp = -EIO;
1436 		return NULL;
1437 	}
1438 	if (map.m_flags & EXT4_MAP_NEW) {
1439 		J_ASSERT(create != 0);
1440 		J_ASSERT(handle != NULL);
1441 
1442 		/*
1443 		 * Now that we do not always journal data, we should
1444 		 * keep in mind whether this should always journal the
1445 		 * new buffer as metadata.  For now, regular file
1446 		 * writes use ext4_get_block instead, so it's not a
1447 		 * problem.
1448 		 */
1449 		lock_buffer(bh);
1450 		BUFFER_TRACE(bh, "call get_create_access");
1451 		fatal = ext4_journal_get_create_access(handle, bh);
1452 		if (!fatal && !buffer_uptodate(bh)) {
1453 			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1454 			set_buffer_uptodate(bh);
1455 		}
1456 		unlock_buffer(bh);
1457 		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1458 		err = ext4_handle_dirty_metadata(handle, inode, bh);
1459 		if (!fatal)
1460 			fatal = err;
1461 	} else {
1462 		BUFFER_TRACE(bh, "not a new buffer");
1463 	}
1464 	if (fatal) {
1465 		*errp = fatal;
1466 		brelse(bh);
1467 		bh = NULL;
1468 	}
1469 	return bh;
1470 }
1471 
1472 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1473 			       ext4_lblk_t block, int create, int *err)
1474 {
1475 	struct buffer_head *bh;
1476 
1477 	bh = ext4_getblk(handle, inode, block, create, err);
1478 	if (!bh)
1479 		return bh;
1480 	if (buffer_uptodate(bh))
1481 		return bh;
1482 	ll_rw_block(READ_META, 1, &bh);
1483 	wait_on_buffer(bh);
1484 	if (buffer_uptodate(bh))
1485 		return bh;
1486 	put_bh(bh);
1487 	*err = -EIO;
1488 	return NULL;
1489 }
1490 
1491 static int walk_page_buffers(handle_t *handle,
1492 			     struct buffer_head *head,
1493 			     unsigned from,
1494 			     unsigned to,
1495 			     int *partial,
1496 			     int (*fn)(handle_t *handle,
1497 				       struct buffer_head *bh))
1498 {
1499 	struct buffer_head *bh;
1500 	unsigned block_start, block_end;
1501 	unsigned blocksize = head->b_size;
1502 	int err, ret = 0;
1503 	struct buffer_head *next;
1504 
1505 	for (bh = head, block_start = 0;
1506 	     ret == 0 && (bh != head || !block_start);
1507 	     block_start = block_end, bh = next) {
1508 		next = bh->b_this_page;
1509 		block_end = block_start + blocksize;
1510 		if (block_end <= from || block_start >= to) {
1511 			if (partial && !buffer_uptodate(bh))
1512 				*partial = 1;
1513 			continue;
1514 		}
1515 		err = (*fn)(handle, bh);
1516 		if (!ret)
1517 			ret = err;
1518 	}
1519 	return ret;
1520 }
1521 
1522 /*
1523  * To preserve ordering, it is essential that the hole instantiation and
1524  * the data write be encapsulated in a single transaction.  We cannot
1525  * close off a transaction and start a new one between the ext4_get_block()
1526  * and the commit_write().  So doing the jbd2_journal_start at the start of
1527  * prepare_write() is the right place.
1528  *
1529  * Also, this function can nest inside ext4_writepage() ->
1530  * block_write_full_page(). In that case, we *know* that ext4_writepage()
1531  * has generated enough buffer credits to do the whole page.  So we won't
1532  * block on the journal in that case, which is good, because the caller may
1533  * be PF_MEMALLOC.
1534  *
1535  * By accident, ext4 can be reentered when a transaction is open via
1536  * quota file writes.  If we were to commit the transaction while thus
1537  * reentered, there can be a deadlock - we would be holding a quota
1538  * lock, and the commit would never complete if another thread had a
1539  * transaction open and was blocking on the quota lock - a ranking
1540  * violation.
1541  *
1542  * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1543  * will _not_ run commit under these circumstances because handle->h_ref
1544  * is elevated.  We'll still have enough credits for the tiny quotafile
1545  * write.
1546  */
1547 static int do_journal_get_write_access(handle_t *handle,
1548 				       struct buffer_head *bh)
1549 {
1550 	int dirty = buffer_dirty(bh);
1551 	int ret;
1552 
1553 	if (!buffer_mapped(bh) || buffer_freed(bh))
1554 		return 0;
1555 	/*
1556 	 * __block_write_begin() could have dirtied some buffers. Clean
1557 	 * the dirty bit as jbd2_journal_get_write_access() could complain
1558 	 * otherwise about fs integrity issues. Setting of the dirty bit
1559 	 * by __block_write_begin() isn't a real problem here as we clear
1560 	 * the bit before releasing a page lock and thus writeback cannot
1561 	 * ever write the buffer.
1562 	 */
1563 	if (dirty)
1564 		clear_buffer_dirty(bh);
1565 	ret = ext4_journal_get_write_access(handle, bh);
1566 	if (!ret && dirty)
1567 		ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1568 	return ret;
1569 }
1570 
1571 /*
1572  * Truncate blocks that were not used by write. We have to truncate the
1573  * pagecache as well so that corresponding buffers get properly unmapped.
1574  */
1575 static void ext4_truncate_failed_write(struct inode *inode)
1576 {
1577 	truncate_inode_pages(inode->i_mapping, inode->i_size);
1578 	ext4_truncate(inode);
1579 }
1580 
1581 static int ext4_get_block_write(struct inode *inode, sector_t iblock,
1582 		   struct buffer_head *bh_result, int create);
1583 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1584 			    loff_t pos, unsigned len, unsigned flags,
1585 			    struct page **pagep, void **fsdata)
1586 {
1587 	struct inode *inode = mapping->host;
1588 	int ret, needed_blocks;
1589 	handle_t *handle;
1590 	int retries = 0;
1591 	struct page *page;
1592 	pgoff_t index;
1593 	unsigned from, to;
1594 
1595 	trace_ext4_write_begin(inode, pos, len, flags);
1596 	/*
1597 	 * Reserve one block more for addition to orphan list in case
1598 	 * we allocate blocks but write fails for some reason
1599 	 */
1600 	needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1601 	index = pos >> PAGE_CACHE_SHIFT;
1602 	from = pos & (PAGE_CACHE_SIZE - 1);
1603 	to = from + len;
1604 
1605 retry:
1606 	handle = ext4_journal_start(inode, needed_blocks);
1607 	if (IS_ERR(handle)) {
1608 		ret = PTR_ERR(handle);
1609 		goto out;
1610 	}
1611 
1612 	/* We cannot recurse into the filesystem as the transaction is already
1613 	 * started */
1614 	flags |= AOP_FLAG_NOFS;
1615 
1616 	page = grab_cache_page_write_begin(mapping, index, flags);
1617 	if (!page) {
1618 		ext4_journal_stop(handle);
1619 		ret = -ENOMEM;
1620 		goto out;
1621 	}
1622 	*pagep = page;
1623 
1624 	if (ext4_should_dioread_nolock(inode))
1625 		ret = __block_write_begin(page, pos, len, ext4_get_block_write);
1626 	else
1627 		ret = __block_write_begin(page, pos, len, ext4_get_block);
1628 
1629 	if (!ret && ext4_should_journal_data(inode)) {
1630 		ret = walk_page_buffers(handle, page_buffers(page),
1631 				from, to, NULL, do_journal_get_write_access);
1632 	}
1633 
1634 	if (ret) {
1635 		unlock_page(page);
1636 		page_cache_release(page);
1637 		/*
1638 		 * __block_write_begin may have instantiated a few blocks
1639 		 * outside i_size.  Trim these off again. Don't need
1640 		 * i_size_read because we hold i_mutex.
1641 		 *
1642 		 * Add inode to orphan list in case we crash before
1643 		 * truncate finishes
1644 		 */
1645 		if (pos + len > inode->i_size && ext4_can_truncate(inode))
1646 			ext4_orphan_add(handle, inode);
1647 
1648 		ext4_journal_stop(handle);
1649 		if (pos + len > inode->i_size) {
1650 			ext4_truncate_failed_write(inode);
1651 			/*
1652 			 * If truncate failed early the inode might
1653 			 * still be on the orphan list; we need to
1654 			 * make sure the inode is removed from the
1655 			 * orphan list in that case.
1656 			 */
1657 			if (inode->i_nlink)
1658 				ext4_orphan_del(NULL, inode);
1659 		}
1660 	}
1661 
1662 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1663 		goto retry;
1664 out:
1665 	return ret;
1666 }
1667 
1668 /* For write_end() in data=journal mode */
1669 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1670 {
1671 	if (!buffer_mapped(bh) || buffer_freed(bh))
1672 		return 0;
1673 	set_buffer_uptodate(bh);
1674 	return ext4_handle_dirty_metadata(handle, NULL, bh);
1675 }
1676 
1677 static int ext4_generic_write_end(struct file *file,
1678 				  struct address_space *mapping,
1679 				  loff_t pos, unsigned len, unsigned copied,
1680 				  struct page *page, void *fsdata)
1681 {
1682 	int i_size_changed = 0;
1683 	struct inode *inode = mapping->host;
1684 	handle_t *handle = ext4_journal_current_handle();
1685 
1686 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1687 
1688 	/*
1689 	 * No need to use i_size_read() here, the i_size
1690 	 * cannot change under us because we hold i_mutex.
1691 	 *
1692 	 * But it's important to update i_size while still holding page lock:
1693 	 * page writeout could otherwise come in and zero beyond i_size.
1694 	 */
1695 	if (pos + copied > inode->i_size) {
1696 		i_size_write(inode, pos + copied);
1697 		i_size_changed = 1;
1698 	}
1699 
1700 	if (pos + copied >  EXT4_I(inode)->i_disksize) {
1701 		/* We need to mark inode dirty even if
1702 		 * new_i_size is less that inode->i_size
1703 		 * bu greater than i_disksize.(hint delalloc)
1704 		 */
1705 		ext4_update_i_disksize(inode, (pos + copied));
1706 		i_size_changed = 1;
1707 	}
1708 	unlock_page(page);
1709 	page_cache_release(page);
1710 
1711 	/*
1712 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
1713 	 * makes the holding time of page lock longer. Second, it forces lock
1714 	 * ordering of page lock and transaction start for journaling
1715 	 * filesystems.
1716 	 */
1717 	if (i_size_changed)
1718 		ext4_mark_inode_dirty(handle, inode);
1719 
1720 	return copied;
1721 }
1722 
1723 /*
1724  * We need to pick up the new inode size which generic_commit_write gave us
1725  * `file' can be NULL - eg, when called from page_symlink().
1726  *
1727  * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1728  * buffers are managed internally.
1729  */
1730 static int ext4_ordered_write_end(struct file *file,
1731 				  struct address_space *mapping,
1732 				  loff_t pos, unsigned len, unsigned copied,
1733 				  struct page *page, void *fsdata)
1734 {
1735 	handle_t *handle = ext4_journal_current_handle();
1736 	struct inode *inode = mapping->host;
1737 	int ret = 0, ret2;
1738 
1739 	trace_ext4_ordered_write_end(inode, pos, len, copied);
1740 	ret = ext4_jbd2_file_inode(handle, inode);
1741 
1742 	if (ret == 0) {
1743 		ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1744 							page, fsdata);
1745 		copied = ret2;
1746 		if (pos + len > inode->i_size && ext4_can_truncate(inode))
1747 			/* if we have allocated more blocks and copied
1748 			 * less. We will have blocks allocated outside
1749 			 * inode->i_size. So truncate them
1750 			 */
1751 			ext4_orphan_add(handle, inode);
1752 		if (ret2 < 0)
1753 			ret = ret2;
1754 	}
1755 	ret2 = ext4_journal_stop(handle);
1756 	if (!ret)
1757 		ret = ret2;
1758 
1759 	if (pos + len > inode->i_size) {
1760 		ext4_truncate_failed_write(inode);
1761 		/*
1762 		 * If truncate failed early the inode might still be
1763 		 * on the orphan list; we need to make sure the inode
1764 		 * is removed from the orphan list in that case.
1765 		 */
1766 		if (inode->i_nlink)
1767 			ext4_orphan_del(NULL, inode);
1768 	}
1769 
1770 
1771 	return ret ? ret : copied;
1772 }
1773 
1774 static int ext4_writeback_write_end(struct file *file,
1775 				    struct address_space *mapping,
1776 				    loff_t pos, unsigned len, unsigned copied,
1777 				    struct page *page, void *fsdata)
1778 {
1779 	handle_t *handle = ext4_journal_current_handle();
1780 	struct inode *inode = mapping->host;
1781 	int ret = 0, ret2;
1782 
1783 	trace_ext4_writeback_write_end(inode, pos, len, copied);
1784 	ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1785 							page, fsdata);
1786 	copied = ret2;
1787 	if (pos + len > inode->i_size && ext4_can_truncate(inode))
1788 		/* if we have allocated more blocks and copied
1789 		 * less. We will have blocks allocated outside
1790 		 * inode->i_size. So truncate them
1791 		 */
1792 		ext4_orphan_add(handle, inode);
1793 
1794 	if (ret2 < 0)
1795 		ret = ret2;
1796 
1797 	ret2 = ext4_journal_stop(handle);
1798 	if (!ret)
1799 		ret = ret2;
1800 
1801 	if (pos + len > inode->i_size) {
1802 		ext4_truncate_failed_write(inode);
1803 		/*
1804 		 * If truncate failed early the inode might still be
1805 		 * on the orphan list; we need to make sure the inode
1806 		 * is removed from the orphan list in that case.
1807 		 */
1808 		if (inode->i_nlink)
1809 			ext4_orphan_del(NULL, inode);
1810 	}
1811 
1812 	return ret ? ret : copied;
1813 }
1814 
1815 static int ext4_journalled_write_end(struct file *file,
1816 				     struct address_space *mapping,
1817 				     loff_t pos, unsigned len, unsigned copied,
1818 				     struct page *page, void *fsdata)
1819 {
1820 	handle_t *handle = ext4_journal_current_handle();
1821 	struct inode *inode = mapping->host;
1822 	int ret = 0, ret2;
1823 	int partial = 0;
1824 	unsigned from, to;
1825 	loff_t new_i_size;
1826 
1827 	trace_ext4_journalled_write_end(inode, pos, len, copied);
1828 	from = pos & (PAGE_CACHE_SIZE - 1);
1829 	to = from + len;
1830 
1831 	if (copied < len) {
1832 		if (!PageUptodate(page))
1833 			copied = 0;
1834 		page_zero_new_buffers(page, from+copied, to);
1835 	}
1836 
1837 	ret = walk_page_buffers(handle, page_buffers(page), from,
1838 				to, &partial, write_end_fn);
1839 	if (!partial)
1840 		SetPageUptodate(page);
1841 	new_i_size = pos + copied;
1842 	if (new_i_size > inode->i_size)
1843 		i_size_write(inode, pos+copied);
1844 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1845 	if (new_i_size > EXT4_I(inode)->i_disksize) {
1846 		ext4_update_i_disksize(inode, new_i_size);
1847 		ret2 = ext4_mark_inode_dirty(handle, inode);
1848 		if (!ret)
1849 			ret = ret2;
1850 	}
1851 
1852 	unlock_page(page);
1853 	page_cache_release(page);
1854 	if (pos + len > inode->i_size && ext4_can_truncate(inode))
1855 		/* if we have allocated more blocks and copied
1856 		 * less. We will have blocks allocated outside
1857 		 * inode->i_size. So truncate them
1858 		 */
1859 		ext4_orphan_add(handle, inode);
1860 
1861 	ret2 = ext4_journal_stop(handle);
1862 	if (!ret)
1863 		ret = ret2;
1864 	if (pos + len > inode->i_size) {
1865 		ext4_truncate_failed_write(inode);
1866 		/*
1867 		 * If truncate failed early the inode might still be
1868 		 * on the orphan list; we need to make sure the inode
1869 		 * is removed from the orphan list in that case.
1870 		 */
1871 		if (inode->i_nlink)
1872 			ext4_orphan_del(NULL, inode);
1873 	}
1874 
1875 	return ret ? ret : copied;
1876 }
1877 
1878 /*
1879  * Reserve a single block located at lblock
1880  */
1881 static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
1882 {
1883 	int retries = 0;
1884 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1885 	struct ext4_inode_info *ei = EXT4_I(inode);
1886 	unsigned long md_needed;
1887 	int ret;
1888 
1889 	/*
1890 	 * recalculate the amount of metadata blocks to reserve
1891 	 * in order to allocate nrblocks
1892 	 * worse case is one extent per block
1893 	 */
1894 repeat:
1895 	spin_lock(&ei->i_block_reservation_lock);
1896 	md_needed = ext4_calc_metadata_amount(inode, lblock);
1897 	trace_ext4_da_reserve_space(inode, md_needed);
1898 	spin_unlock(&ei->i_block_reservation_lock);
1899 
1900 	/*
1901 	 * We will charge metadata quota at writeout time; this saves
1902 	 * us from metadata over-estimation, though we may go over by
1903 	 * a small amount in the end.  Here we just reserve for data.
1904 	 */
1905 	ret = dquot_reserve_block(inode, 1);
1906 	if (ret)
1907 		return ret;
1908 	/*
1909 	 * We do still charge estimated metadata to the sb though;
1910 	 * we cannot afford to run out of free blocks.
1911 	 */
1912 	if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
1913 		dquot_release_reservation_block(inode, 1);
1914 		if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1915 			yield();
1916 			goto repeat;
1917 		}
1918 		return -ENOSPC;
1919 	}
1920 	spin_lock(&ei->i_block_reservation_lock);
1921 	ei->i_reserved_data_blocks++;
1922 	ei->i_reserved_meta_blocks += md_needed;
1923 	spin_unlock(&ei->i_block_reservation_lock);
1924 
1925 	return 0;       /* success */
1926 }
1927 
1928 static void ext4_da_release_space(struct inode *inode, int to_free)
1929 {
1930 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1931 	struct ext4_inode_info *ei = EXT4_I(inode);
1932 
1933 	if (!to_free)
1934 		return;		/* Nothing to release, exit */
1935 
1936 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1937 
1938 	trace_ext4_da_release_space(inode, to_free);
1939 	if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1940 		/*
1941 		 * if there aren't enough reserved blocks, then the
1942 		 * counter is messed up somewhere.  Since this
1943 		 * function is called from invalidate page, it's
1944 		 * harmless to return without any action.
1945 		 */
1946 		ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
1947 			 "ino %lu, to_free %d with only %d reserved "
1948 			 "data blocks\n", inode->i_ino, to_free,
1949 			 ei->i_reserved_data_blocks);
1950 		WARN_ON(1);
1951 		to_free = ei->i_reserved_data_blocks;
1952 	}
1953 	ei->i_reserved_data_blocks -= to_free;
1954 
1955 	if (ei->i_reserved_data_blocks == 0) {
1956 		/*
1957 		 * We can release all of the reserved metadata blocks
1958 		 * only when we have written all of the delayed
1959 		 * allocation blocks.
1960 		 */
1961 		percpu_counter_sub(&sbi->s_dirtyblocks_counter,
1962 				   ei->i_reserved_meta_blocks);
1963 		ei->i_reserved_meta_blocks = 0;
1964 		ei->i_da_metadata_calc_len = 0;
1965 	}
1966 
1967 	/* update fs dirty data blocks counter */
1968 	percpu_counter_sub(&sbi->s_dirtyblocks_counter, to_free);
1969 
1970 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1971 
1972 	dquot_release_reservation_block(inode, to_free);
1973 }
1974 
1975 static void ext4_da_page_release_reservation(struct page *page,
1976 					     unsigned long offset)
1977 {
1978 	int to_release = 0;
1979 	struct buffer_head *head, *bh;
1980 	unsigned int curr_off = 0;
1981 
1982 	head = page_buffers(page);
1983 	bh = head;
1984 	do {
1985 		unsigned int next_off = curr_off + bh->b_size;
1986 
1987 		if ((offset <= curr_off) && (buffer_delay(bh))) {
1988 			to_release++;
1989 			clear_buffer_delay(bh);
1990 		}
1991 		curr_off = next_off;
1992 	} while ((bh = bh->b_this_page) != head);
1993 	ext4_da_release_space(page->mapping->host, to_release);
1994 }
1995 
1996 /*
1997  * Delayed allocation stuff
1998  */
1999 
2000 /*
2001  * mpage_da_submit_io - walks through extent of pages and try to write
2002  * them with writepage() call back
2003  *
2004  * @mpd->inode: inode
2005  * @mpd->first_page: first page of the extent
2006  * @mpd->next_page: page after the last page of the extent
2007  *
2008  * By the time mpage_da_submit_io() is called we expect all blocks
2009  * to be allocated. this may be wrong if allocation failed.
2010  *
2011  * As pages are already locked by write_cache_pages(), we can't use it
2012  */
2013 static int mpage_da_submit_io(struct mpage_da_data *mpd,
2014 			      struct ext4_map_blocks *map)
2015 {
2016 	struct pagevec pvec;
2017 	unsigned long index, end;
2018 	int ret = 0, err, nr_pages, i;
2019 	struct inode *inode = mpd->inode;
2020 	struct address_space *mapping = inode->i_mapping;
2021 	loff_t size = i_size_read(inode);
2022 	unsigned int len, block_start;
2023 	struct buffer_head *bh, *page_bufs = NULL;
2024 	int journal_data = ext4_should_journal_data(inode);
2025 	sector_t pblock = 0, cur_logical = 0;
2026 	struct ext4_io_submit io_submit;
2027 
2028 	BUG_ON(mpd->next_page <= mpd->first_page);
2029 	memset(&io_submit, 0, sizeof(io_submit));
2030 	/*
2031 	 * We need to start from the first_page to the next_page - 1
2032 	 * to make sure we also write the mapped dirty buffer_heads.
2033 	 * If we look at mpd->b_blocknr we would only be looking
2034 	 * at the currently mapped buffer_heads.
2035 	 */
2036 	index = mpd->first_page;
2037 	end = mpd->next_page - 1;
2038 
2039 	pagevec_init(&pvec, 0);
2040 	while (index <= end) {
2041 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
2042 		if (nr_pages == 0)
2043 			break;
2044 		for (i = 0; i < nr_pages; i++) {
2045 			int commit_write = 0, redirty_page = 0;
2046 			struct page *page = pvec.pages[i];
2047 
2048 			index = page->index;
2049 			if (index > end)
2050 				break;
2051 
2052 			if (index == size >> PAGE_CACHE_SHIFT)
2053 				len = size & ~PAGE_CACHE_MASK;
2054 			else
2055 				len = PAGE_CACHE_SIZE;
2056 			if (map) {
2057 				cur_logical = index << (PAGE_CACHE_SHIFT -
2058 							inode->i_blkbits);
2059 				pblock = map->m_pblk + (cur_logical -
2060 							map->m_lblk);
2061 			}
2062 			index++;
2063 
2064 			BUG_ON(!PageLocked(page));
2065 			BUG_ON(PageWriteback(page));
2066 
2067 			/*
2068 			 * If the page does not have buffers (for
2069 			 * whatever reason), try to create them using
2070 			 * __block_write_begin.  If this fails,
2071 			 * redirty the page and move on.
2072 			 */
2073 			if (!page_has_buffers(page)) {
2074 				if (__block_write_begin(page, 0, len,
2075 						noalloc_get_block_write)) {
2076 				redirty_page:
2077 					redirty_page_for_writepage(mpd->wbc,
2078 								   page);
2079 					unlock_page(page);
2080 					continue;
2081 				}
2082 				commit_write = 1;
2083 			}
2084 
2085 			bh = page_bufs = page_buffers(page);
2086 			block_start = 0;
2087 			do {
2088 				if (!bh)
2089 					goto redirty_page;
2090 				if (map && (cur_logical >= map->m_lblk) &&
2091 				    (cur_logical <= (map->m_lblk +
2092 						     (map->m_len - 1)))) {
2093 					if (buffer_delay(bh)) {
2094 						clear_buffer_delay(bh);
2095 						bh->b_blocknr = pblock;
2096 					}
2097 					if (buffer_unwritten(bh) ||
2098 					    buffer_mapped(bh))
2099 						BUG_ON(bh->b_blocknr != pblock);
2100 					if (map->m_flags & EXT4_MAP_UNINIT)
2101 						set_buffer_uninit(bh);
2102 					clear_buffer_unwritten(bh);
2103 				}
2104 
2105 				/* redirty page if block allocation undone */
2106 				if (buffer_delay(bh) || buffer_unwritten(bh))
2107 					redirty_page = 1;
2108 				bh = bh->b_this_page;
2109 				block_start += bh->b_size;
2110 				cur_logical++;
2111 				pblock++;
2112 			} while (bh != page_bufs);
2113 
2114 			if (redirty_page)
2115 				goto redirty_page;
2116 
2117 			if (commit_write)
2118 				/* mark the buffer_heads as dirty & uptodate */
2119 				block_commit_write(page, 0, len);
2120 
2121 			/*
2122 			 * Delalloc doesn't support data journalling,
2123 			 * but eventually maybe we'll lift this
2124 			 * restriction.
2125 			 */
2126 			if (unlikely(journal_data && PageChecked(page)))
2127 				err = __ext4_journalled_writepage(page, len);
2128 			else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT))
2129 				err = ext4_bio_write_page(&io_submit, page,
2130 							  len, mpd->wbc);
2131 			else
2132 				err = block_write_full_page(page,
2133 					noalloc_get_block_write, mpd->wbc);
2134 
2135 			if (!err)
2136 				mpd->pages_written++;
2137 			/*
2138 			 * In error case, we have to continue because
2139 			 * remaining pages are still locked
2140 			 */
2141 			if (ret == 0)
2142 				ret = err;
2143 		}
2144 		pagevec_release(&pvec);
2145 	}
2146 	ext4_io_submit(&io_submit);
2147 	return ret;
2148 }
2149 
2150 static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
2151 					sector_t logical, long blk_cnt)
2152 {
2153 	int nr_pages, i;
2154 	pgoff_t index, end;
2155 	struct pagevec pvec;
2156 	struct inode *inode = mpd->inode;
2157 	struct address_space *mapping = inode->i_mapping;
2158 
2159 	index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
2160 	end   = (logical + blk_cnt - 1) >>
2161 				(PAGE_CACHE_SHIFT - inode->i_blkbits);
2162 	while (index <= end) {
2163 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
2164 		if (nr_pages == 0)
2165 			break;
2166 		for (i = 0; i < nr_pages; i++) {
2167 			struct page *page = pvec.pages[i];
2168 			if (page->index > end)
2169 				break;
2170 			BUG_ON(!PageLocked(page));
2171 			BUG_ON(PageWriteback(page));
2172 			block_invalidatepage(page, 0);
2173 			ClearPageUptodate(page);
2174 			unlock_page(page);
2175 		}
2176 		index = pvec.pages[nr_pages - 1]->index + 1;
2177 		pagevec_release(&pvec);
2178 	}
2179 	return;
2180 }
2181 
2182 static void ext4_print_free_blocks(struct inode *inode)
2183 {
2184 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2185 	printk(KERN_CRIT "Total free blocks count %lld\n",
2186 	       ext4_count_free_blocks(inode->i_sb));
2187 	printk(KERN_CRIT "Free/Dirty block details\n");
2188 	printk(KERN_CRIT "free_blocks=%lld\n",
2189 	       (long long) percpu_counter_sum(&sbi->s_freeblocks_counter));
2190 	printk(KERN_CRIT "dirty_blocks=%lld\n",
2191 	       (long long) percpu_counter_sum(&sbi->s_dirtyblocks_counter));
2192 	printk(KERN_CRIT "Block reservation details\n");
2193 	printk(KERN_CRIT "i_reserved_data_blocks=%u\n",
2194 	       EXT4_I(inode)->i_reserved_data_blocks);
2195 	printk(KERN_CRIT "i_reserved_meta_blocks=%u\n",
2196 	       EXT4_I(inode)->i_reserved_meta_blocks);
2197 	return;
2198 }
2199 
2200 /*
2201  * mpage_da_map_and_submit - go through given space, map them
2202  *       if necessary, and then submit them for I/O
2203  *
2204  * @mpd - bh describing space
2205  *
2206  * The function skips space we know is already mapped to disk blocks.
2207  *
2208  */
2209 static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
2210 {
2211 	int err, blks, get_blocks_flags;
2212 	struct ext4_map_blocks map, *mapp = NULL;
2213 	sector_t next = mpd->b_blocknr;
2214 	unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
2215 	loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
2216 	handle_t *handle = NULL;
2217 
2218 	/*
2219 	 * If the blocks are mapped already, or we couldn't accumulate
2220 	 * any blocks, then proceed immediately to the submission stage.
2221 	 */
2222 	if ((mpd->b_size == 0) ||
2223 	    ((mpd->b_state  & (1 << BH_Mapped)) &&
2224 	     !(mpd->b_state & (1 << BH_Delay)) &&
2225 	     !(mpd->b_state & (1 << BH_Unwritten))))
2226 		goto submit_io;
2227 
2228 	handle = ext4_journal_current_handle();
2229 	BUG_ON(!handle);
2230 
2231 	/*
2232 	 * Call ext4_map_blocks() to allocate any delayed allocation
2233 	 * blocks, or to convert an uninitialized extent to be
2234 	 * initialized (in the case where we have written into
2235 	 * one or more preallocated blocks).
2236 	 *
2237 	 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
2238 	 * indicate that we are on the delayed allocation path.  This
2239 	 * affects functions in many different parts of the allocation
2240 	 * call path.  This flag exists primarily because we don't
2241 	 * want to change *many* call functions, so ext4_map_blocks()
2242 	 * will set the magic i_delalloc_reserved_flag once the
2243 	 * inode's allocation semaphore is taken.
2244 	 *
2245 	 * If the blocks in questions were delalloc blocks, set
2246 	 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
2247 	 * variables are updated after the blocks have been allocated.
2248 	 */
2249 	map.m_lblk = next;
2250 	map.m_len = max_blocks;
2251 	get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
2252 	if (ext4_should_dioread_nolock(mpd->inode))
2253 		get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2254 	if (mpd->b_state & (1 << BH_Delay))
2255 		get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2256 
2257 	blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
2258 	if (blks < 0) {
2259 		struct super_block *sb = mpd->inode->i_sb;
2260 
2261 		err = blks;
2262 		/*
2263 		 * If get block returns EAGAIN or ENOSPC and there
2264 		 * appears to be free blocks we will call
2265 		 * ext4_writepage() for all of the pages which will
2266 		 * just redirty the pages.
2267 		 */
2268 		if (err == -EAGAIN)
2269 			goto submit_io;
2270 
2271 		if (err == -ENOSPC &&
2272 		    ext4_count_free_blocks(sb)) {
2273 			mpd->retval = err;
2274 			goto submit_io;
2275 		}
2276 
2277 		/*
2278 		 * get block failure will cause us to loop in
2279 		 * writepages, because a_ops->writepage won't be able
2280 		 * to make progress. The page will be redirtied by
2281 		 * writepage and writepages will again try to write
2282 		 * the same.
2283 		 */
2284 		if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) {
2285 			ext4_msg(sb, KERN_CRIT,
2286 				 "delayed block allocation failed for inode %lu "
2287 				 "at logical offset %llu with max blocks %zd "
2288 				 "with error %d", mpd->inode->i_ino,
2289 				 (unsigned long long) next,
2290 				 mpd->b_size >> mpd->inode->i_blkbits, err);
2291 			ext4_msg(sb, KERN_CRIT,
2292 				"This should not happen!! Data will be lost\n");
2293 			if (err == -ENOSPC)
2294 				ext4_print_free_blocks(mpd->inode);
2295 		}
2296 		/* invalidate all the pages */
2297 		ext4_da_block_invalidatepages(mpd, next,
2298 				mpd->b_size >> mpd->inode->i_blkbits);
2299 		return;
2300 	}
2301 	BUG_ON(blks == 0);
2302 
2303 	mapp = &map;
2304 	if (map.m_flags & EXT4_MAP_NEW) {
2305 		struct block_device *bdev = mpd->inode->i_sb->s_bdev;
2306 		int i;
2307 
2308 		for (i = 0; i < map.m_len; i++)
2309 			unmap_underlying_metadata(bdev, map.m_pblk + i);
2310 	}
2311 
2312 	if (ext4_should_order_data(mpd->inode)) {
2313 		err = ext4_jbd2_file_inode(handle, mpd->inode);
2314 		if (err)
2315 			/* This only happens if the journal is aborted */
2316 			return;
2317 	}
2318 
2319 	/*
2320 	 * Update on-disk size along with block allocation.
2321 	 */
2322 	disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
2323 	if (disksize > i_size_read(mpd->inode))
2324 		disksize = i_size_read(mpd->inode);
2325 	if (disksize > EXT4_I(mpd->inode)->i_disksize) {
2326 		ext4_update_i_disksize(mpd->inode, disksize);
2327 		err = ext4_mark_inode_dirty(handle, mpd->inode);
2328 		if (err)
2329 			ext4_error(mpd->inode->i_sb,
2330 				   "Failed to mark inode %lu dirty",
2331 				   mpd->inode->i_ino);
2332 	}
2333 
2334 submit_io:
2335 	mpage_da_submit_io(mpd, mapp);
2336 	mpd->io_done = 1;
2337 }
2338 
2339 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
2340 		(1 << BH_Delay) | (1 << BH_Unwritten))
2341 
2342 /*
2343  * mpage_add_bh_to_extent - try to add one more block to extent of blocks
2344  *
2345  * @mpd->lbh - extent of blocks
2346  * @logical - logical number of the block in the file
2347  * @bh - bh of the block (used to access block's state)
2348  *
2349  * the function is used to collect contig. blocks in same state
2350  */
2351 static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
2352 				   sector_t logical, size_t b_size,
2353 				   unsigned long b_state)
2354 {
2355 	sector_t next;
2356 	int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
2357 
2358 	/*
2359 	 * XXX Don't go larger than mballoc is willing to allocate
2360 	 * This is a stopgap solution.  We eventually need to fold
2361 	 * mpage_da_submit_io() into this function and then call
2362 	 * ext4_map_blocks() multiple times in a loop
2363 	 */
2364 	if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
2365 		goto flush_it;
2366 
2367 	/* check if thereserved journal credits might overflow */
2368 	if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) {
2369 		if (nrblocks >= EXT4_MAX_TRANS_DATA) {
2370 			/*
2371 			 * With non-extent format we are limited by the journal
2372 			 * credit available.  Total credit needed to insert
2373 			 * nrblocks contiguous blocks is dependent on the
2374 			 * nrblocks.  So limit nrblocks.
2375 			 */
2376 			goto flush_it;
2377 		} else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
2378 				EXT4_MAX_TRANS_DATA) {
2379 			/*
2380 			 * Adding the new buffer_head would make it cross the
2381 			 * allowed limit for which we have journal credit
2382 			 * reserved. So limit the new bh->b_size
2383 			 */
2384 			b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
2385 						mpd->inode->i_blkbits;
2386 			/* we will do mpage_da_submit_io in the next loop */
2387 		}
2388 	}
2389 	/*
2390 	 * First block in the extent
2391 	 */
2392 	if (mpd->b_size == 0) {
2393 		mpd->b_blocknr = logical;
2394 		mpd->b_size = b_size;
2395 		mpd->b_state = b_state & BH_FLAGS;
2396 		return;
2397 	}
2398 
2399 	next = mpd->b_blocknr + nrblocks;
2400 	/*
2401 	 * Can we merge the block to our big extent?
2402 	 */
2403 	if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
2404 		mpd->b_size += b_size;
2405 		return;
2406 	}
2407 
2408 flush_it:
2409 	/*
2410 	 * We couldn't merge the block to our extent, so we
2411 	 * need to flush current  extent and start new one
2412 	 */
2413 	mpage_da_map_and_submit(mpd);
2414 	return;
2415 }
2416 
2417 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
2418 {
2419 	return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
2420 }
2421 
2422 /*
2423  * __mpage_da_writepage - finds extent of pages and blocks
2424  *
2425  * @page: page to consider
2426  * @wbc: not used, we just follow rules
2427  * @data: context
2428  *
2429  * The function finds extents of pages and scan them for all blocks.
2430  */
2431 static int __mpage_da_writepage(struct page *page,
2432 				struct writeback_control *wbc,
2433 				struct mpage_da_data *mpd)
2434 {
2435 	struct inode *inode = mpd->inode;
2436 	struct buffer_head *bh, *head;
2437 	sector_t logical;
2438 
2439 	/*
2440 	 * Can we merge this page to current extent?
2441 	 */
2442 	if (mpd->next_page != page->index) {
2443 		/*
2444 		 * Nope, we can't. So, we map non-allocated blocks
2445 		 * and start IO on them
2446 		 */
2447 		if (mpd->next_page != mpd->first_page) {
2448 			mpage_da_map_and_submit(mpd);
2449 			/*
2450 			 * skip rest of the page in the page_vec
2451 			 */
2452 			redirty_page_for_writepage(wbc, page);
2453 			unlock_page(page);
2454 			return MPAGE_DA_EXTENT_TAIL;
2455 		}
2456 
2457 		/*
2458 		 * Start next extent of pages ...
2459 		 */
2460 		mpd->first_page = page->index;
2461 
2462 		/*
2463 		 * ... and blocks
2464 		 */
2465 		mpd->b_size = 0;
2466 		mpd->b_state = 0;
2467 		mpd->b_blocknr = 0;
2468 	}
2469 
2470 	mpd->next_page = page->index + 1;
2471 	logical = (sector_t) page->index <<
2472 		  (PAGE_CACHE_SHIFT - inode->i_blkbits);
2473 
2474 	if (!page_has_buffers(page)) {
2475 		mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE,
2476 				       (1 << BH_Dirty) | (1 << BH_Uptodate));
2477 		if (mpd->io_done)
2478 			return MPAGE_DA_EXTENT_TAIL;
2479 	} else {
2480 		/*
2481 		 * Page with regular buffer heads, just add all dirty ones
2482 		 */
2483 		head = page_buffers(page);
2484 		bh = head;
2485 		do {
2486 			BUG_ON(buffer_locked(bh));
2487 			/*
2488 			 * We need to try to allocate
2489 			 * unmapped blocks in the same page.
2490 			 * Otherwise we won't make progress
2491 			 * with the page in ext4_writepage
2492 			 */
2493 			if (ext4_bh_delay_or_unwritten(NULL, bh)) {
2494 				mpage_add_bh_to_extent(mpd, logical,
2495 						       bh->b_size,
2496 						       bh->b_state);
2497 				if (mpd->io_done)
2498 					return MPAGE_DA_EXTENT_TAIL;
2499 			} else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2500 				/*
2501 				 * mapped dirty buffer. We need to update
2502 				 * the b_state because we look at
2503 				 * b_state in mpage_da_map_blocks. We don't
2504 				 * update b_size because if we find an
2505 				 * unmapped buffer_head later we need to
2506 				 * use the b_state flag of that buffer_head.
2507 				 */
2508 				if (mpd->b_size == 0)
2509 					mpd->b_state = bh->b_state & BH_FLAGS;
2510 			}
2511 			logical++;
2512 		} while ((bh = bh->b_this_page) != head);
2513 	}
2514 
2515 	return 0;
2516 }
2517 
2518 /*
2519  * This is a special get_blocks_t callback which is used by
2520  * ext4_da_write_begin().  It will either return mapped block or
2521  * reserve space for a single block.
2522  *
2523  * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
2524  * We also have b_blocknr = -1 and b_bdev initialized properly
2525  *
2526  * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
2527  * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
2528  * initialized properly.
2529  */
2530 static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2531 				  struct buffer_head *bh, int create)
2532 {
2533 	struct ext4_map_blocks map;
2534 	int ret = 0;
2535 	sector_t invalid_block = ~((sector_t) 0xffff);
2536 
2537 	if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
2538 		invalid_block = ~0;
2539 
2540 	BUG_ON(create == 0);
2541 	BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
2542 
2543 	map.m_lblk = iblock;
2544 	map.m_len = 1;
2545 
2546 	/*
2547 	 * first, we need to know whether the block is allocated already
2548 	 * preallocated blocks are unmapped but should treated
2549 	 * the same as allocated blocks.
2550 	 */
2551 	ret = ext4_map_blocks(NULL, inode, &map, 0);
2552 	if (ret < 0)
2553 		return ret;
2554 	if (ret == 0) {
2555 		if (buffer_delay(bh))
2556 			return 0; /* Not sure this could or should happen */
2557 		/*
2558 		 * XXX: __block_write_begin() unmaps passed block, is it OK?
2559 		 */
2560 		ret = ext4_da_reserve_space(inode, iblock);
2561 		if (ret)
2562 			/* not enough space to reserve */
2563 			return ret;
2564 
2565 		map_bh(bh, inode->i_sb, invalid_block);
2566 		set_buffer_new(bh);
2567 		set_buffer_delay(bh);
2568 		return 0;
2569 	}
2570 
2571 	map_bh(bh, inode->i_sb, map.m_pblk);
2572 	bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
2573 
2574 	if (buffer_unwritten(bh)) {
2575 		/* A delayed write to unwritten bh should be marked
2576 		 * new and mapped.  Mapped ensures that we don't do
2577 		 * get_block multiple times when we write to the same
2578 		 * offset and new ensures that we do proper zero out
2579 		 * for partial write.
2580 		 */
2581 		set_buffer_new(bh);
2582 		set_buffer_mapped(bh);
2583 	}
2584 	return 0;
2585 }
2586 
2587 /*
2588  * This function is used as a standard get_block_t calback function
2589  * when there is no desire to allocate any blocks.  It is used as a
2590  * callback function for block_write_begin() and block_write_full_page().
2591  * These functions should only try to map a single block at a time.
2592  *
2593  * Since this function doesn't do block allocations even if the caller
2594  * requests it by passing in create=1, it is critically important that
2595  * any caller checks to make sure that any buffer heads are returned
2596  * by this function are either all already mapped or marked for
2597  * delayed allocation before calling  block_write_full_page().  Otherwise,
2598  * b_blocknr could be left unitialized, and the page write functions will
2599  * be taken by surprise.
2600  */
2601 static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
2602 				   struct buffer_head *bh_result, int create)
2603 {
2604 	BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
2605 	return _ext4_get_block(inode, iblock, bh_result, 0);
2606 }
2607 
2608 static int bget_one(handle_t *handle, struct buffer_head *bh)
2609 {
2610 	get_bh(bh);
2611 	return 0;
2612 }
2613 
2614 static int bput_one(handle_t *handle, struct buffer_head *bh)
2615 {
2616 	put_bh(bh);
2617 	return 0;
2618 }
2619 
2620 static int __ext4_journalled_writepage(struct page *page,
2621 				       unsigned int len)
2622 {
2623 	struct address_space *mapping = page->mapping;
2624 	struct inode *inode = mapping->host;
2625 	struct buffer_head *page_bufs;
2626 	handle_t *handle = NULL;
2627 	int ret = 0;
2628 	int err;
2629 
2630 	ClearPageChecked(page);
2631 	page_bufs = page_buffers(page);
2632 	BUG_ON(!page_bufs);
2633 	walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
2634 	/* As soon as we unlock the page, it can go away, but we have
2635 	 * references to buffers so we are safe */
2636 	unlock_page(page);
2637 
2638 	handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
2639 	if (IS_ERR(handle)) {
2640 		ret = PTR_ERR(handle);
2641 		goto out;
2642 	}
2643 
2644 	ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
2645 				do_journal_get_write_access);
2646 
2647 	err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
2648 				write_end_fn);
2649 	if (ret == 0)
2650 		ret = err;
2651 	err = ext4_journal_stop(handle);
2652 	if (!ret)
2653 		ret = err;
2654 
2655 	walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
2656 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
2657 out:
2658 	return ret;
2659 }
2660 
2661 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
2662 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
2663 
2664 /*
2665  * Note that we don't need to start a transaction unless we're journaling data
2666  * because we should have holes filled from ext4_page_mkwrite(). We even don't
2667  * need to file the inode to the transaction's list in ordered mode because if
2668  * we are writing back data added by write(), the inode is already there and if
2669  * we are writing back data modified via mmap(), noone guarantees in which
2670  * transaction the data will hit the disk. In case we are journaling data, we
2671  * cannot start transaction directly because transaction start ranks above page
2672  * lock so we have to do some magic.
2673  *
2674  * This function can get called via...
2675  *   - ext4_da_writepages after taking page lock (have journal handle)
2676  *   - journal_submit_inode_data_buffers (no journal handle)
2677  *   - shrink_page_list via pdflush (no journal handle)
2678  *   - grab_page_cache when doing write_begin (have journal handle)
2679  *
2680  * We don't do any block allocation in this function. If we have page with
2681  * multiple blocks we need to write those buffer_heads that are mapped. This
2682  * is important for mmaped based write. So if we do with blocksize 1K
2683  * truncate(f, 1024);
2684  * a = mmap(f, 0, 4096);
2685  * a[0] = 'a';
2686  * truncate(f, 4096);
2687  * we have in the page first buffer_head mapped via page_mkwrite call back
2688  * but other bufer_heads would be unmapped but dirty(dirty done via the
2689  * do_wp_page). So writepage should write the first block. If we modify
2690  * the mmap area beyond 1024 we will again get a page_fault and the
2691  * page_mkwrite callback will do the block allocation and mark the
2692  * buffer_heads mapped.
2693  *
2694  * We redirty the page if we have any buffer_heads that is either delay or
2695  * unwritten in the page.
2696  *
2697  * We can get recursively called as show below.
2698  *
2699  *	ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
2700  *		ext4_writepage()
2701  *
2702  * But since we don't do any block allocation we should not deadlock.
2703  * Page also have the dirty flag cleared so we don't get recurive page_lock.
2704  */
2705 static int ext4_writepage(struct page *page,
2706 			  struct writeback_control *wbc)
2707 {
2708 	int ret = 0, commit_write = 0;
2709 	loff_t size;
2710 	unsigned int len;
2711 	struct buffer_head *page_bufs = NULL;
2712 	struct inode *inode = page->mapping->host;
2713 
2714 	trace_ext4_writepage(inode, page);
2715 	size = i_size_read(inode);
2716 	if (page->index == size >> PAGE_CACHE_SHIFT)
2717 		len = size & ~PAGE_CACHE_MASK;
2718 	else
2719 		len = PAGE_CACHE_SIZE;
2720 
2721 	/*
2722 	 * If the page does not have buffers (for whatever reason),
2723 	 * try to create them using __block_write_begin.  If this
2724 	 * fails, redirty the page and move on.
2725 	 */
2726 	if (!page_has_buffers(page)) {
2727 		if (__block_write_begin(page, 0, len,
2728 					noalloc_get_block_write)) {
2729 		redirty_page:
2730 			redirty_page_for_writepage(wbc, page);
2731 			unlock_page(page);
2732 			return 0;
2733 		}
2734 		commit_write = 1;
2735 	}
2736 	page_bufs = page_buffers(page);
2737 	if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2738 			      ext4_bh_delay_or_unwritten)) {
2739 		/*
2740 		 * We don't want to do block allocation, so redirty
2741 		 * the page and return.  We may reach here when we do
2742 		 * a journal commit via journal_submit_inode_data_buffers.
2743 		 * We can also reach here via shrink_page_list
2744 		 */
2745 		goto redirty_page;
2746 	}
2747 	if (commit_write)
2748 		/* now mark the buffer_heads as dirty and uptodate */
2749 		block_commit_write(page, 0, len);
2750 
2751 	if (PageChecked(page) && ext4_should_journal_data(inode))
2752 		/*
2753 		 * It's mmapped pagecache.  Add buffers and journal it.  There
2754 		 * doesn't seem much point in redirtying the page here.
2755 		 */
2756 		return __ext4_journalled_writepage(page, len);
2757 
2758 	if (buffer_uninit(page_bufs)) {
2759 		ext4_set_bh_endio(page_bufs, inode);
2760 		ret = block_write_full_page_endio(page, noalloc_get_block_write,
2761 					    wbc, ext4_end_io_buffer_write);
2762 	} else
2763 		ret = block_write_full_page(page, noalloc_get_block_write,
2764 					    wbc);
2765 
2766 	return ret;
2767 }
2768 
2769 /*
2770  * This is called via ext4_da_writepages() to
2771  * calulate the total number of credits to reserve to fit
2772  * a single extent allocation into a single transaction,
2773  * ext4_da_writpeages() will loop calling this before
2774  * the block allocation.
2775  */
2776 
2777 static int ext4_da_writepages_trans_blocks(struct inode *inode)
2778 {
2779 	int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
2780 
2781 	/*
2782 	 * With non-extent format the journal credit needed to
2783 	 * insert nrblocks contiguous block is dependent on
2784 	 * number of contiguous block. So we will limit
2785 	 * number of contiguous block to a sane value
2786 	 */
2787 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
2788 	    (max_blocks > EXT4_MAX_TRANS_DATA))
2789 		max_blocks = EXT4_MAX_TRANS_DATA;
2790 
2791 	return ext4_chunk_trans_blocks(inode, max_blocks);
2792 }
2793 
2794 /*
2795  * write_cache_pages_da - walk the list of dirty pages of the given
2796  * address space and call the callback function (which usually writes
2797  * the pages).
2798  *
2799  * This is a forked version of write_cache_pages().  Differences:
2800  *	Range cyclic is ignored.
2801  *	no_nrwrite_index_update is always presumed true
2802  */
2803 static int write_cache_pages_da(struct address_space *mapping,
2804 				struct writeback_control *wbc,
2805 				struct mpage_da_data *mpd,
2806 				pgoff_t *done_index)
2807 {
2808 	int ret = 0;
2809 	int done = 0;
2810 	struct pagevec pvec;
2811 	unsigned nr_pages;
2812 	pgoff_t index;
2813 	pgoff_t end;		/* Inclusive */
2814 	long nr_to_write = wbc->nr_to_write;
2815 	int tag;
2816 
2817 	pagevec_init(&pvec, 0);
2818 	index = wbc->range_start >> PAGE_CACHE_SHIFT;
2819 	end = wbc->range_end >> PAGE_CACHE_SHIFT;
2820 
2821 	if (wbc->sync_mode == WB_SYNC_ALL)
2822 		tag = PAGECACHE_TAG_TOWRITE;
2823 	else
2824 		tag = PAGECACHE_TAG_DIRTY;
2825 
2826 	*done_index = index;
2827 	while (!done && (index <= end)) {
2828 		int i;
2829 
2830 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2831 			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2832 		if (nr_pages == 0)
2833 			break;
2834 
2835 		for (i = 0; i < nr_pages; i++) {
2836 			struct page *page = pvec.pages[i];
2837 
2838 			/*
2839 			 * At this point, the page may be truncated or
2840 			 * invalidated (changing page->mapping to NULL), or
2841 			 * even swizzled back from swapper_space to tmpfs file
2842 			 * mapping. However, page->index will not change
2843 			 * because we have a reference on the page.
2844 			 */
2845 			if (page->index > end) {
2846 				done = 1;
2847 				break;
2848 			}
2849 
2850 			*done_index = page->index + 1;
2851 
2852 			lock_page(page);
2853 
2854 			/*
2855 			 * Page truncated or invalidated. We can freely skip it
2856 			 * then, even for data integrity operations: the page
2857 			 * has disappeared concurrently, so there could be no
2858 			 * real expectation of this data interity operation
2859 			 * even if there is now a new, dirty page at the same
2860 			 * pagecache address.
2861 			 */
2862 			if (unlikely(page->mapping != mapping)) {
2863 continue_unlock:
2864 				unlock_page(page);
2865 				continue;
2866 			}
2867 
2868 			if (!PageDirty(page)) {
2869 				/* someone wrote it for us */
2870 				goto continue_unlock;
2871 			}
2872 
2873 			if (PageWriteback(page)) {
2874 				if (wbc->sync_mode != WB_SYNC_NONE)
2875 					wait_on_page_writeback(page);
2876 				else
2877 					goto continue_unlock;
2878 			}
2879 
2880 			BUG_ON(PageWriteback(page));
2881 			if (!clear_page_dirty_for_io(page))
2882 				goto continue_unlock;
2883 
2884 			ret = __mpage_da_writepage(page, wbc, mpd);
2885 			if (unlikely(ret)) {
2886 				if (ret == AOP_WRITEPAGE_ACTIVATE) {
2887 					unlock_page(page);
2888 					ret = 0;
2889 				} else {
2890 					done = 1;
2891 					break;
2892 				}
2893 			}
2894 
2895 			if (nr_to_write > 0) {
2896 				nr_to_write--;
2897 				if (nr_to_write == 0 &&
2898 				    wbc->sync_mode == WB_SYNC_NONE) {
2899 					/*
2900 					 * We stop writing back only if we are
2901 					 * not doing integrity sync. In case of
2902 					 * integrity sync we have to keep going
2903 					 * because someone may be concurrently
2904 					 * dirtying pages, and we might have
2905 					 * synced a lot of newly appeared dirty
2906 					 * pages, but have not synced all of the
2907 					 * old dirty pages.
2908 					 */
2909 					done = 1;
2910 					break;
2911 				}
2912 			}
2913 		}
2914 		pagevec_release(&pvec);
2915 		cond_resched();
2916 	}
2917 	return ret;
2918 }
2919 
2920 
2921 static int ext4_da_writepages(struct address_space *mapping,
2922 			      struct writeback_control *wbc)
2923 {
2924 	pgoff_t	index;
2925 	int range_whole = 0;
2926 	handle_t *handle = NULL;
2927 	struct mpage_da_data mpd;
2928 	struct inode *inode = mapping->host;
2929 	int pages_written = 0;
2930 	long pages_skipped;
2931 	unsigned int max_pages;
2932 	int range_cyclic, cycled = 1, io_done = 0;
2933 	int needed_blocks, ret = 0;
2934 	long desired_nr_to_write, nr_to_writebump = 0;
2935 	loff_t range_start = wbc->range_start;
2936 	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2937 	pgoff_t done_index = 0;
2938 	pgoff_t end;
2939 
2940 	trace_ext4_da_writepages(inode, wbc);
2941 
2942 	/*
2943 	 * No pages to write? This is mainly a kludge to avoid starting
2944 	 * a transaction for special inodes like journal inode on last iput()
2945 	 * because that could violate lock ordering on umount
2946 	 */
2947 	if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2948 		return 0;
2949 
2950 	/*
2951 	 * If the filesystem has aborted, it is read-only, so return
2952 	 * right away instead of dumping stack traces later on that
2953 	 * will obscure the real source of the problem.  We test
2954 	 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
2955 	 * the latter could be true if the filesystem is mounted
2956 	 * read-only, and in that case, ext4_da_writepages should
2957 	 * *never* be called, so if that ever happens, we would want
2958 	 * the stack trace.
2959 	 */
2960 	if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
2961 		return -EROFS;
2962 
2963 	if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2964 		range_whole = 1;
2965 
2966 	range_cyclic = wbc->range_cyclic;
2967 	if (wbc->range_cyclic) {
2968 		index = mapping->writeback_index;
2969 		if (index)
2970 			cycled = 0;
2971 		wbc->range_start = index << PAGE_CACHE_SHIFT;
2972 		wbc->range_end  = LLONG_MAX;
2973 		wbc->range_cyclic = 0;
2974 		end = -1;
2975 	} else {
2976 		index = wbc->range_start >> PAGE_CACHE_SHIFT;
2977 		end = wbc->range_end >> PAGE_CACHE_SHIFT;
2978 	}
2979 
2980 	/*
2981 	 * This works around two forms of stupidity.  The first is in
2982 	 * the writeback code, which caps the maximum number of pages
2983 	 * written to be 1024 pages.  This is wrong on multiple
2984 	 * levels; different architectues have a different page size,
2985 	 * which changes the maximum amount of data which gets
2986 	 * written.  Secondly, 4 megabytes is way too small.  XFS
2987 	 * forces this value to be 16 megabytes by multiplying
2988 	 * nr_to_write parameter by four, and then relies on its
2989 	 * allocator to allocate larger extents to make them
2990 	 * contiguous.  Unfortunately this brings us to the second
2991 	 * stupidity, which is that ext4's mballoc code only allocates
2992 	 * at most 2048 blocks.  So we force contiguous writes up to
2993 	 * the number of dirty blocks in the inode, or
2994 	 * sbi->max_writeback_mb_bump whichever is smaller.
2995 	 */
2996 	max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
2997 	if (!range_cyclic && range_whole) {
2998 		if (wbc->nr_to_write == LONG_MAX)
2999 			desired_nr_to_write = wbc->nr_to_write;
3000 		else
3001 			desired_nr_to_write = wbc->nr_to_write * 8;
3002 	} else
3003 		desired_nr_to_write = ext4_num_dirty_pages(inode, index,
3004 							   max_pages);
3005 	if (desired_nr_to_write > max_pages)
3006 		desired_nr_to_write = max_pages;
3007 
3008 	if (wbc->nr_to_write < desired_nr_to_write) {
3009 		nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
3010 		wbc->nr_to_write = desired_nr_to_write;
3011 	}
3012 
3013 	mpd.wbc = wbc;
3014 	mpd.inode = mapping->host;
3015 
3016 	pages_skipped = wbc->pages_skipped;
3017 
3018 retry:
3019 	if (wbc->sync_mode == WB_SYNC_ALL)
3020 		tag_pages_for_writeback(mapping, index, end);
3021 
3022 	while (!ret && wbc->nr_to_write > 0) {
3023 
3024 		/*
3025 		 * we  insert one extent at a time. So we need
3026 		 * credit needed for single extent allocation.
3027 		 * journalled mode is currently not supported
3028 		 * by delalloc
3029 		 */
3030 		BUG_ON(ext4_should_journal_data(inode));
3031 		needed_blocks = ext4_da_writepages_trans_blocks(inode);
3032 
3033 		/* start a new transaction*/
3034 		handle = ext4_journal_start(inode, needed_blocks);
3035 		if (IS_ERR(handle)) {
3036 			ret = PTR_ERR(handle);
3037 			ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
3038 			       "%ld pages, ino %lu; err %d", __func__,
3039 				wbc->nr_to_write, inode->i_ino, ret);
3040 			goto out_writepages;
3041 		}
3042 
3043 		/*
3044 		 * Now call __mpage_da_writepage to find the next
3045 		 * contiguous region of logical blocks that need
3046 		 * blocks to be allocated by ext4.  We don't actually
3047 		 * submit the blocks for I/O here, even though
3048 		 * write_cache_pages thinks it will, and will set the
3049 		 * pages as clean for write before calling
3050 		 * __mpage_da_writepage().
3051 		 */
3052 		mpd.b_size = 0;
3053 		mpd.b_state = 0;
3054 		mpd.b_blocknr = 0;
3055 		mpd.first_page = 0;
3056 		mpd.next_page = 0;
3057 		mpd.io_done = 0;
3058 		mpd.pages_written = 0;
3059 		mpd.retval = 0;
3060 		ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
3061 		/*
3062 		 * If we have a contiguous extent of pages and we
3063 		 * haven't done the I/O yet, map the blocks and submit
3064 		 * them for I/O.
3065 		 */
3066 		if (!mpd.io_done && mpd.next_page != mpd.first_page) {
3067 			mpage_da_map_and_submit(&mpd);
3068 			ret = MPAGE_DA_EXTENT_TAIL;
3069 		}
3070 		trace_ext4_da_write_pages(inode, &mpd);
3071 		wbc->nr_to_write -= mpd.pages_written;
3072 
3073 		ext4_journal_stop(handle);
3074 
3075 		if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
3076 			/* commit the transaction which would
3077 			 * free blocks released in the transaction
3078 			 * and try again
3079 			 */
3080 			jbd2_journal_force_commit_nested(sbi->s_journal);
3081 			wbc->pages_skipped = pages_skipped;
3082 			ret = 0;
3083 		} else if (ret == MPAGE_DA_EXTENT_TAIL) {
3084 			/*
3085 			 * got one extent now try with
3086 			 * rest of the pages
3087 			 */
3088 			pages_written += mpd.pages_written;
3089 			wbc->pages_skipped = pages_skipped;
3090 			ret = 0;
3091 			io_done = 1;
3092 		} else if (wbc->nr_to_write)
3093 			/*
3094 			 * There is no more writeout needed
3095 			 * or we requested for a noblocking writeout
3096 			 * and we found the device congested
3097 			 */
3098 			break;
3099 	}
3100 	if (!io_done && !cycled) {
3101 		cycled = 1;
3102 		index = 0;
3103 		wbc->range_start = index << PAGE_CACHE_SHIFT;
3104 		wbc->range_end  = mapping->writeback_index - 1;
3105 		goto retry;
3106 	}
3107 	if (pages_skipped != wbc->pages_skipped)
3108 		ext4_msg(inode->i_sb, KERN_CRIT,
3109 			 "This should not happen leaving %s "
3110 			 "with nr_to_write = %ld ret = %d",
3111 			 __func__, wbc->nr_to_write, ret);
3112 
3113 	/* Update index */
3114 	wbc->range_cyclic = range_cyclic;
3115 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
3116 		/*
3117 		 * set the writeback_index so that range_cyclic
3118 		 * mode will write it back later
3119 		 */
3120 		mapping->writeback_index = done_index;
3121 
3122 out_writepages:
3123 	wbc->nr_to_write -= nr_to_writebump;
3124 	wbc->range_start = range_start;
3125 	trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
3126 	return ret;
3127 }
3128 
3129 #define FALL_BACK_TO_NONDELALLOC 1
3130 static int ext4_nonda_switch(struct super_block *sb)
3131 {
3132 	s64 free_blocks, dirty_blocks;
3133 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3134 
3135 	/*
3136 	 * switch to non delalloc mode if we are running low
3137 	 * on free block. The free block accounting via percpu
3138 	 * counters can get slightly wrong with percpu_counter_batch getting
3139 	 * accumulated on each CPU without updating global counters
3140 	 * Delalloc need an accurate free block accounting. So switch
3141 	 * to non delalloc when we are near to error range.
3142 	 */
3143 	free_blocks  = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
3144 	dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter);
3145 	if (2 * free_blocks < 3 * dirty_blocks ||
3146 		free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) {
3147 		/*
3148 		 * free block count is less than 150% of dirty blocks
3149 		 * or free blocks is less than watermark
3150 		 */
3151 		return 1;
3152 	}
3153 	/*
3154 	 * Even if we don't switch but are nearing capacity,
3155 	 * start pushing delalloc when 1/2 of free blocks are dirty.
3156 	 */
3157 	if (free_blocks < 2 * dirty_blocks)
3158 		writeback_inodes_sb_if_idle(sb);
3159 
3160 	return 0;
3161 }
3162 
3163 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
3164 			       loff_t pos, unsigned len, unsigned flags,
3165 			       struct page **pagep, void **fsdata)
3166 {
3167 	int ret, retries = 0;
3168 	struct page *page;
3169 	pgoff_t index;
3170 	struct inode *inode = mapping->host;
3171 	handle_t *handle;
3172 
3173 	index = pos >> PAGE_CACHE_SHIFT;
3174 
3175 	if (ext4_nonda_switch(inode->i_sb)) {
3176 		*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
3177 		return ext4_write_begin(file, mapping, pos,
3178 					len, flags, pagep, fsdata);
3179 	}
3180 	*fsdata = (void *)0;
3181 	trace_ext4_da_write_begin(inode, pos, len, flags);
3182 retry:
3183 	/*
3184 	 * With delayed allocation, we don't log the i_disksize update
3185 	 * if there is delayed block allocation. But we still need
3186 	 * to journalling the i_disksize update if writes to the end
3187 	 * of file which has an already mapped buffer.
3188 	 */
3189 	handle = ext4_journal_start(inode, 1);
3190 	if (IS_ERR(handle)) {
3191 		ret = PTR_ERR(handle);
3192 		goto out;
3193 	}
3194 	/* We cannot recurse into the filesystem as the transaction is already
3195 	 * started */
3196 	flags |= AOP_FLAG_NOFS;
3197 
3198 	page = grab_cache_page_write_begin(mapping, index, flags);
3199 	if (!page) {
3200 		ext4_journal_stop(handle);
3201 		ret = -ENOMEM;
3202 		goto out;
3203 	}
3204 	*pagep = page;
3205 
3206 	ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
3207 	if (ret < 0) {
3208 		unlock_page(page);
3209 		ext4_journal_stop(handle);
3210 		page_cache_release(page);
3211 		/*
3212 		 * block_write_begin may have instantiated a few blocks
3213 		 * outside i_size.  Trim these off again. Don't need
3214 		 * i_size_read because we hold i_mutex.
3215 		 */
3216 		if (pos + len > inode->i_size)
3217 			ext4_truncate_failed_write(inode);
3218 	}
3219 
3220 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3221 		goto retry;
3222 out:
3223 	return ret;
3224 }
3225 
3226 /*
3227  * Check if we should update i_disksize
3228  * when write to the end of file but not require block allocation
3229  */
3230 static int ext4_da_should_update_i_disksize(struct page *page,
3231 					    unsigned long offset)
3232 {
3233 	struct buffer_head *bh;
3234 	struct inode *inode = page->mapping->host;
3235 	unsigned int idx;
3236 	int i;
3237 
3238 	bh = page_buffers(page);
3239 	idx = offset >> inode->i_blkbits;
3240 
3241 	for (i = 0; i < idx; i++)
3242 		bh = bh->b_this_page;
3243 
3244 	if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
3245 		return 0;
3246 	return 1;
3247 }
3248 
3249 static int ext4_da_write_end(struct file *file,
3250 			     struct address_space *mapping,
3251 			     loff_t pos, unsigned len, unsigned copied,
3252 			     struct page *page, void *fsdata)
3253 {
3254 	struct inode *inode = mapping->host;
3255 	int ret = 0, ret2;
3256 	handle_t *handle = ext4_journal_current_handle();
3257 	loff_t new_i_size;
3258 	unsigned long start, end;
3259 	int write_mode = (int)(unsigned long)fsdata;
3260 
3261 	if (write_mode == FALL_BACK_TO_NONDELALLOC) {
3262 		if (ext4_should_order_data(inode)) {
3263 			return ext4_ordered_write_end(file, mapping, pos,
3264 					len, copied, page, fsdata);
3265 		} else if (ext4_should_writeback_data(inode)) {
3266 			return ext4_writeback_write_end(file, mapping, pos,
3267 					len, copied, page, fsdata);
3268 		} else {
3269 			BUG();
3270 		}
3271 	}
3272 
3273 	trace_ext4_da_write_end(inode, pos, len, copied);
3274 	start = pos & (PAGE_CACHE_SIZE - 1);
3275 	end = start + copied - 1;
3276 
3277 	/*
3278 	 * generic_write_end() will run mark_inode_dirty() if i_size
3279 	 * changes.  So let's piggyback the i_disksize mark_inode_dirty
3280 	 * into that.
3281 	 */
3282 
3283 	new_i_size = pos + copied;
3284 	if (new_i_size > EXT4_I(inode)->i_disksize) {
3285 		if (ext4_da_should_update_i_disksize(page, end)) {
3286 			down_write(&EXT4_I(inode)->i_data_sem);
3287 			if (new_i_size > EXT4_I(inode)->i_disksize) {
3288 				/*
3289 				 * Updating i_disksize when extending file
3290 				 * without needing block allocation
3291 				 */
3292 				if (ext4_should_order_data(inode))
3293 					ret = ext4_jbd2_file_inode(handle,
3294 								   inode);
3295 
3296 				EXT4_I(inode)->i_disksize = new_i_size;
3297 			}
3298 			up_write(&EXT4_I(inode)->i_data_sem);
3299 			/* We need to mark inode dirty even if
3300 			 * new_i_size is less that inode->i_size
3301 			 * bu greater than i_disksize.(hint delalloc)
3302 			 */
3303 			ext4_mark_inode_dirty(handle, inode);
3304 		}
3305 	}
3306 	ret2 = generic_write_end(file, mapping, pos, len, copied,
3307 							page, fsdata);
3308 	copied = ret2;
3309 	if (ret2 < 0)
3310 		ret = ret2;
3311 	ret2 = ext4_journal_stop(handle);
3312 	if (!ret)
3313 		ret = ret2;
3314 
3315 	return ret ? ret : copied;
3316 }
3317 
3318 static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
3319 {
3320 	/*
3321 	 * Drop reserved blocks
3322 	 */
3323 	BUG_ON(!PageLocked(page));
3324 	if (!page_has_buffers(page))
3325 		goto out;
3326 
3327 	ext4_da_page_release_reservation(page, offset);
3328 
3329 out:
3330 	ext4_invalidatepage(page, offset);
3331 
3332 	return;
3333 }
3334 
3335 /*
3336  * Force all delayed allocation blocks to be allocated for a given inode.
3337  */
3338 int ext4_alloc_da_blocks(struct inode *inode)
3339 {
3340 	trace_ext4_alloc_da_blocks(inode);
3341 
3342 	if (!EXT4_I(inode)->i_reserved_data_blocks &&
3343 	    !EXT4_I(inode)->i_reserved_meta_blocks)
3344 		return 0;
3345 
3346 	/*
3347 	 * We do something simple for now.  The filemap_flush() will
3348 	 * also start triggering a write of the data blocks, which is
3349 	 * not strictly speaking necessary (and for users of
3350 	 * laptop_mode, not even desirable).  However, to do otherwise
3351 	 * would require replicating code paths in:
3352 	 *
3353 	 * ext4_da_writepages() ->
3354 	 *    write_cache_pages() ---> (via passed in callback function)
3355 	 *        __mpage_da_writepage() -->
3356 	 *           mpage_add_bh_to_extent()
3357 	 *           mpage_da_map_blocks()
3358 	 *
3359 	 * The problem is that write_cache_pages(), located in
3360 	 * mm/page-writeback.c, marks pages clean in preparation for
3361 	 * doing I/O, which is not desirable if we're not planning on
3362 	 * doing I/O at all.
3363 	 *
3364 	 * We could call write_cache_pages(), and then redirty all of
3365 	 * the pages by calling redirty_page_for_writeback() but that
3366 	 * would be ugly in the extreme.  So instead we would need to
3367 	 * replicate parts of the code in the above functions,
3368 	 * simplifying them becuase we wouldn't actually intend to
3369 	 * write out the pages, but rather only collect contiguous
3370 	 * logical block extents, call the multi-block allocator, and
3371 	 * then update the buffer heads with the block allocations.
3372 	 *
3373 	 * For now, though, we'll cheat by calling filemap_flush(),
3374 	 * which will map the blocks, and start the I/O, but not
3375 	 * actually wait for the I/O to complete.
3376 	 */
3377 	return filemap_flush(inode->i_mapping);
3378 }
3379 
3380 /*
3381  * bmap() is special.  It gets used by applications such as lilo and by
3382  * the swapper to find the on-disk block of a specific piece of data.
3383  *
3384  * Naturally, this is dangerous if the block concerned is still in the
3385  * journal.  If somebody makes a swapfile on an ext4 data-journaling
3386  * filesystem and enables swap, then they may get a nasty shock when the
3387  * data getting swapped to that swapfile suddenly gets overwritten by
3388  * the original zero's written out previously to the journal and
3389  * awaiting writeback in the kernel's buffer cache.
3390  *
3391  * So, if we see any bmap calls here on a modified, data-journaled file,
3392  * take extra steps to flush any blocks which might be in the cache.
3393  */
3394 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3395 {
3396 	struct inode *inode = mapping->host;
3397 	journal_t *journal;
3398 	int err;
3399 
3400 	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
3401 			test_opt(inode->i_sb, DELALLOC)) {
3402 		/*
3403 		 * With delalloc we want to sync the file
3404 		 * so that we can make sure we allocate
3405 		 * blocks for file
3406 		 */
3407 		filemap_write_and_wait(mapping);
3408 	}
3409 
3410 	if (EXT4_JOURNAL(inode) &&
3411 	    ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
3412 		/*
3413 		 * This is a REALLY heavyweight approach, but the use of
3414 		 * bmap on dirty files is expected to be extremely rare:
3415 		 * only if we run lilo or swapon on a freshly made file
3416 		 * do we expect this to happen.
3417 		 *
3418 		 * (bmap requires CAP_SYS_RAWIO so this does not
3419 		 * represent an unprivileged user DOS attack --- we'd be
3420 		 * in trouble if mortal users could trigger this path at
3421 		 * will.)
3422 		 *
3423 		 * NB. EXT4_STATE_JDATA is not set on files other than
3424 		 * regular files.  If somebody wants to bmap a directory
3425 		 * or symlink and gets confused because the buffer
3426 		 * hasn't yet been flushed to disk, they deserve
3427 		 * everything they get.
3428 		 */
3429 
3430 		ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
3431 		journal = EXT4_JOURNAL(inode);
3432 		jbd2_journal_lock_updates(journal);
3433 		err = jbd2_journal_flush(journal);
3434 		jbd2_journal_unlock_updates(journal);
3435 
3436 		if (err)
3437 			return 0;
3438 	}
3439 
3440 	return generic_block_bmap(mapping, block, ext4_get_block);
3441 }
3442 
3443 static int ext4_readpage(struct file *file, struct page *page)
3444 {
3445 	return mpage_readpage(page, ext4_get_block);
3446 }
3447 
3448 static int
3449 ext4_readpages(struct file *file, struct address_space *mapping,
3450 		struct list_head *pages, unsigned nr_pages)
3451 {
3452 	return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
3453 }
3454 
3455 static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset)
3456 {
3457 	struct buffer_head *head, *bh;
3458 	unsigned int curr_off = 0;
3459 
3460 	if (!page_has_buffers(page))
3461 		return;
3462 	head = bh = page_buffers(page);
3463 	do {
3464 		if (offset <= curr_off && test_clear_buffer_uninit(bh)
3465 					&& bh->b_private) {
3466 			ext4_free_io_end(bh->b_private);
3467 			bh->b_private = NULL;
3468 			bh->b_end_io = NULL;
3469 		}
3470 		curr_off = curr_off + bh->b_size;
3471 		bh = bh->b_this_page;
3472 	} while (bh != head);
3473 }
3474 
3475 static void ext4_invalidatepage(struct page *page, unsigned long offset)
3476 {
3477 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3478 
3479 	/*
3480 	 * free any io_end structure allocated for buffers to be discarded
3481 	 */
3482 	if (ext4_should_dioread_nolock(page->mapping->host))
3483 		ext4_invalidatepage_free_endio(page, offset);
3484 	/*
3485 	 * If it's a full truncate we just forget about the pending dirtying
3486 	 */
3487 	if (offset == 0)
3488 		ClearPageChecked(page);
3489 
3490 	if (journal)
3491 		jbd2_journal_invalidatepage(journal, page, offset);
3492 	else
3493 		block_invalidatepage(page, offset);
3494 }
3495 
3496 static int ext4_releasepage(struct page *page, gfp_t wait)
3497 {
3498 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3499 
3500 	WARN_ON(PageChecked(page));
3501 	if (!page_has_buffers(page))
3502 		return 0;
3503 	if (journal)
3504 		return jbd2_journal_try_to_free_buffers(journal, page, wait);
3505 	else
3506 		return try_to_free_buffers(page);
3507 }
3508 
3509 /*
3510  * O_DIRECT for ext3 (or indirect map) based files
3511  *
3512  * If the O_DIRECT write will extend the file then add this inode to the
3513  * orphan list.  So recovery will truncate it back to the original size
3514  * if the machine crashes during the write.
3515  *
3516  * If the O_DIRECT write is intantiating holes inside i_size and the machine
3517  * crashes then stale disk data _may_ be exposed inside the file. But current
3518  * VFS code falls back into buffered path in that case so we are safe.
3519  */
3520 static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
3521 			      const struct iovec *iov, loff_t offset,
3522 			      unsigned long nr_segs)
3523 {
3524 	struct file *file = iocb->ki_filp;
3525 	struct inode *inode = file->f_mapping->host;
3526 	struct ext4_inode_info *ei = EXT4_I(inode);
3527 	handle_t *handle;
3528 	ssize_t ret;
3529 	int orphan = 0;
3530 	size_t count = iov_length(iov, nr_segs);
3531 	int retries = 0;
3532 
3533 	if (rw == WRITE) {
3534 		loff_t final_size = offset + count;
3535 
3536 		if (final_size > inode->i_size) {
3537 			/* Credits for sb + inode write */
3538 			handle = ext4_journal_start(inode, 2);
3539 			if (IS_ERR(handle)) {
3540 				ret = PTR_ERR(handle);
3541 				goto out;
3542 			}
3543 			ret = ext4_orphan_add(handle, inode);
3544 			if (ret) {
3545 				ext4_journal_stop(handle);
3546 				goto out;
3547 			}
3548 			orphan = 1;
3549 			ei->i_disksize = inode->i_size;
3550 			ext4_journal_stop(handle);
3551 		}
3552 	}
3553 
3554 retry:
3555 	if (rw == READ && ext4_should_dioread_nolock(inode))
3556 		ret = __blockdev_direct_IO(rw, iocb, inode,
3557 				 inode->i_sb->s_bdev, iov,
3558 				 offset, nr_segs,
3559 				 ext4_get_block, NULL, NULL, 0);
3560 	else {
3561 		ret = blockdev_direct_IO(rw, iocb, inode,
3562 				 inode->i_sb->s_bdev, iov,
3563 				 offset, nr_segs,
3564 				 ext4_get_block, NULL);
3565 
3566 		if (unlikely((rw & WRITE) && ret < 0)) {
3567 			loff_t isize = i_size_read(inode);
3568 			loff_t end = offset + iov_length(iov, nr_segs);
3569 
3570 			if (end > isize)
3571 				vmtruncate(inode, isize);
3572 		}
3573 	}
3574 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3575 		goto retry;
3576 
3577 	if (orphan) {
3578 		int err;
3579 
3580 		/* Credits for sb + inode write */
3581 		handle = ext4_journal_start(inode, 2);
3582 		if (IS_ERR(handle)) {
3583 			/* This is really bad luck. We've written the data
3584 			 * but cannot extend i_size. Bail out and pretend
3585 			 * the write failed... */
3586 			ret = PTR_ERR(handle);
3587 			if (inode->i_nlink)
3588 				ext4_orphan_del(NULL, inode);
3589 
3590 			goto out;
3591 		}
3592 		if (inode->i_nlink)
3593 			ext4_orphan_del(handle, inode);
3594 		if (ret > 0) {
3595 			loff_t end = offset + ret;
3596 			if (end > inode->i_size) {
3597 				ei->i_disksize = end;
3598 				i_size_write(inode, end);
3599 				/*
3600 				 * We're going to return a positive `ret'
3601 				 * here due to non-zero-length I/O, so there's
3602 				 * no way of reporting error returns from
3603 				 * ext4_mark_inode_dirty() to userspace.  So
3604 				 * ignore it.
3605 				 */
3606 				ext4_mark_inode_dirty(handle, inode);
3607 			}
3608 		}
3609 		err = ext4_journal_stop(handle);
3610 		if (ret == 0)
3611 			ret = err;
3612 	}
3613 out:
3614 	return ret;
3615 }
3616 
3617 /*
3618  * ext4_get_block used when preparing for a DIO write or buffer write.
3619  * We allocate an uinitialized extent if blocks haven't been allocated.
3620  * The extent will be converted to initialized after the IO is complete.
3621  */
3622 static int ext4_get_block_write(struct inode *inode, sector_t iblock,
3623 		   struct buffer_head *bh_result, int create)
3624 {
3625 	ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
3626 		   inode->i_ino, create);
3627 	return _ext4_get_block(inode, iblock, bh_result,
3628 			       EXT4_GET_BLOCKS_IO_CREATE_EXT);
3629 }
3630 
3631 static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3632 			    ssize_t size, void *private, int ret,
3633 			    bool is_async)
3634 {
3635         ext4_io_end_t *io_end = iocb->private;
3636 	struct workqueue_struct *wq;
3637 	unsigned long flags;
3638 	struct ext4_inode_info *ei;
3639 
3640 	/* if not async direct IO or dio with 0 bytes write, just return */
3641 	if (!io_end || !size)
3642 		goto out;
3643 
3644 	ext_debug("ext4_end_io_dio(): io_end 0x%p"
3645 		  "for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
3646  		  iocb->private, io_end->inode->i_ino, iocb, offset,
3647 		  size);
3648 
3649 	/* if not aio dio with unwritten extents, just free io and return */
3650 	if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
3651 		ext4_free_io_end(io_end);
3652 		iocb->private = NULL;
3653 out:
3654 		if (is_async)
3655 			aio_complete(iocb, ret, 0);
3656 		return;
3657 	}
3658 
3659 	io_end->offset = offset;
3660 	io_end->size = size;
3661 	if (is_async) {
3662 		io_end->iocb = iocb;
3663 		io_end->result = ret;
3664 	}
3665 	wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
3666 
3667 	/* Add the io_end to per-inode completed aio dio list*/
3668 	ei = EXT4_I(io_end->inode);
3669 	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
3670 	list_add_tail(&io_end->list, &ei->i_completed_io_list);
3671 	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
3672 
3673 	/* queue the work to convert unwritten extents to written */
3674 	queue_work(wq, &io_end->work);
3675 	iocb->private = NULL;
3676 }
3677 
3678 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
3679 {
3680 	ext4_io_end_t *io_end = bh->b_private;
3681 	struct workqueue_struct *wq;
3682 	struct inode *inode;
3683 	unsigned long flags;
3684 
3685 	if (!test_clear_buffer_uninit(bh) || !io_end)
3686 		goto out;
3687 
3688 	if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) {
3689 		printk("sb umounted, discard end_io request for inode %lu\n",
3690 			io_end->inode->i_ino);
3691 		ext4_free_io_end(io_end);
3692 		goto out;
3693 	}
3694 
3695 	io_end->flag = EXT4_IO_END_UNWRITTEN;
3696 	inode = io_end->inode;
3697 
3698 	/* Add the io_end to per-inode completed io list*/
3699 	spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
3700 	list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
3701 	spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
3702 
3703 	wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
3704 	/* queue the work to convert unwritten extents to written */
3705 	queue_work(wq, &io_end->work);
3706 out:
3707 	bh->b_private = NULL;
3708 	bh->b_end_io = NULL;
3709 	clear_buffer_uninit(bh);
3710 	end_buffer_async_write(bh, uptodate);
3711 }
3712 
3713 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
3714 {
3715 	ext4_io_end_t *io_end;
3716 	struct page *page = bh->b_page;
3717 	loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT;
3718 	size_t size = bh->b_size;
3719 
3720 retry:
3721 	io_end = ext4_init_io_end(inode, GFP_ATOMIC);
3722 	if (!io_end) {
3723 		if (printk_ratelimit())
3724 			printk(KERN_WARNING "%s: allocation fail\n", __func__);
3725 		schedule();
3726 		goto retry;
3727 	}
3728 	io_end->offset = offset;
3729 	io_end->size = size;
3730 	/*
3731 	 * We need to hold a reference to the page to make sure it
3732 	 * doesn't get evicted before ext4_end_io_work() has a chance
3733 	 * to convert the extent from written to unwritten.
3734 	 */
3735 	io_end->page = page;
3736 	get_page(io_end->page);
3737 
3738 	bh->b_private = io_end;
3739 	bh->b_end_io = ext4_end_io_buffer_write;
3740 	return 0;
3741 }
3742 
3743 /*
3744  * For ext4 extent files, ext4 will do direct-io write to holes,
3745  * preallocated extents, and those write extend the file, no need to
3746  * fall back to buffered IO.
3747  *
3748  * For holes, we fallocate those blocks, mark them as unintialized
3749  * If those blocks were preallocated, we mark sure they are splited, but
3750  * still keep the range to write as unintialized.
3751  *
3752  * The unwrritten extents will be converted to written when DIO is completed.
3753  * For async direct IO, since the IO may still pending when return, we
3754  * set up an end_io call back function, which will do the convertion
3755  * when async direct IO completed.
3756  *
3757  * If the O_DIRECT write will extend the file then add this inode to the
3758  * orphan list.  So recovery will truncate it back to the original size
3759  * if the machine crashes during the write.
3760  *
3761  */
3762 static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3763 			      const struct iovec *iov, loff_t offset,
3764 			      unsigned long nr_segs)
3765 {
3766 	struct file *file = iocb->ki_filp;
3767 	struct inode *inode = file->f_mapping->host;
3768 	ssize_t ret;
3769 	size_t count = iov_length(iov, nr_segs);
3770 
3771 	loff_t final_size = offset + count;
3772 	if (rw == WRITE && final_size <= inode->i_size) {
3773 		/*
3774  		 * We could direct write to holes and fallocate.
3775 		 *
3776  		 * Allocated blocks to fill the hole are marked as uninitialized
3777  		 * to prevent paralel buffered read to expose the stale data
3778  		 * before DIO complete the data IO.
3779 		 *
3780  		 * As to previously fallocated extents, ext4 get_block
3781  		 * will just simply mark the buffer mapped but still
3782  		 * keep the extents uninitialized.
3783  		 *
3784 		 * for non AIO case, we will convert those unwritten extents
3785 		 * to written after return back from blockdev_direct_IO.
3786 		 *
3787 		 * for async DIO, the conversion needs to be defered when
3788 		 * the IO is completed. The ext4 end_io callback function
3789 		 * will be called to take care of the conversion work.
3790 		 * Here for async case, we allocate an io_end structure to
3791 		 * hook to the iocb.
3792  		 */
3793 		iocb->private = NULL;
3794 		EXT4_I(inode)->cur_aio_dio = NULL;
3795 		if (!is_sync_kiocb(iocb)) {
3796 			iocb->private = ext4_init_io_end(inode, GFP_NOFS);
3797 			if (!iocb->private)
3798 				return -ENOMEM;
3799 			/*
3800 			 * we save the io structure for current async
3801 			 * direct IO, so that later ext4_map_blocks()
3802 			 * could flag the io structure whether there
3803 			 * is a unwritten extents needs to be converted
3804 			 * when IO is completed.
3805 			 */
3806 			EXT4_I(inode)->cur_aio_dio = iocb->private;
3807 		}
3808 
3809 		ret = blockdev_direct_IO(rw, iocb, inode,
3810 					 inode->i_sb->s_bdev, iov,
3811 					 offset, nr_segs,
3812 					 ext4_get_block_write,
3813 					 ext4_end_io_dio);
3814 		if (iocb->private)
3815 			EXT4_I(inode)->cur_aio_dio = NULL;
3816 		/*
3817 		 * The io_end structure takes a reference to the inode,
3818 		 * that structure needs to be destroyed and the
3819 		 * reference to the inode need to be dropped, when IO is
3820 		 * complete, even with 0 byte write, or failed.
3821 		 *
3822 		 * In the successful AIO DIO case, the io_end structure will be
3823 		 * desctroyed and the reference to the inode will be dropped
3824 		 * after the end_io call back function is called.
3825 		 *
3826 		 * In the case there is 0 byte write, or error case, since
3827 		 * VFS direct IO won't invoke the end_io call back function,
3828 		 * we need to free the end_io structure here.
3829 		 */
3830 		if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
3831 			ext4_free_io_end(iocb->private);
3832 			iocb->private = NULL;
3833 		} else if (ret > 0 && ext4_test_inode_state(inode,
3834 						EXT4_STATE_DIO_UNWRITTEN)) {
3835 			int err;
3836 			/*
3837 			 * for non AIO case, since the IO is already
3838 			 * completed, we could do the convertion right here
3839 			 */
3840 			err = ext4_convert_unwritten_extents(inode,
3841 							     offset, ret);
3842 			if (err < 0)
3843 				ret = err;
3844 			ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3845 		}
3846 		return ret;
3847 	}
3848 
3849 	/* for write the the end of file case, we fall back to old way */
3850 	return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3851 }
3852 
3853 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3854 			      const struct iovec *iov, loff_t offset,
3855 			      unsigned long nr_segs)
3856 {
3857 	struct file *file = iocb->ki_filp;
3858 	struct inode *inode = file->f_mapping->host;
3859 
3860 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3861 		return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
3862 
3863 	return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3864 }
3865 
3866 /*
3867  * Pages can be marked dirty completely asynchronously from ext4's journalling
3868  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
3869  * much here because ->set_page_dirty is called under VFS locks.  The page is
3870  * not necessarily locked.
3871  *
3872  * We cannot just dirty the page and leave attached buffers clean, because the
3873  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
3874  * or jbddirty because all the journalling code will explode.
3875  *
3876  * So what we do is to mark the page "pending dirty" and next time writepage
3877  * is called, propagate that into the buffers appropriately.
3878  */
3879 static int ext4_journalled_set_page_dirty(struct page *page)
3880 {
3881 	SetPageChecked(page);
3882 	return __set_page_dirty_nobuffers(page);
3883 }
3884 
3885 static const struct address_space_operations ext4_ordered_aops = {
3886 	.readpage		= ext4_readpage,
3887 	.readpages		= ext4_readpages,
3888 	.writepage		= ext4_writepage,
3889 	.sync_page		= block_sync_page,
3890 	.write_begin		= ext4_write_begin,
3891 	.write_end		= ext4_ordered_write_end,
3892 	.bmap			= ext4_bmap,
3893 	.invalidatepage		= ext4_invalidatepage,
3894 	.releasepage		= ext4_releasepage,
3895 	.direct_IO		= ext4_direct_IO,
3896 	.migratepage		= buffer_migrate_page,
3897 	.is_partially_uptodate  = block_is_partially_uptodate,
3898 	.error_remove_page	= generic_error_remove_page,
3899 };
3900 
3901 static const struct address_space_operations ext4_writeback_aops = {
3902 	.readpage		= ext4_readpage,
3903 	.readpages		= ext4_readpages,
3904 	.writepage		= ext4_writepage,
3905 	.sync_page		= block_sync_page,
3906 	.write_begin		= ext4_write_begin,
3907 	.write_end		= ext4_writeback_write_end,
3908 	.bmap			= ext4_bmap,
3909 	.invalidatepage		= ext4_invalidatepage,
3910 	.releasepage		= ext4_releasepage,
3911 	.direct_IO		= ext4_direct_IO,
3912 	.migratepage		= buffer_migrate_page,
3913 	.is_partially_uptodate  = block_is_partially_uptodate,
3914 	.error_remove_page	= generic_error_remove_page,
3915 };
3916 
3917 static const struct address_space_operations ext4_journalled_aops = {
3918 	.readpage		= ext4_readpage,
3919 	.readpages		= ext4_readpages,
3920 	.writepage		= ext4_writepage,
3921 	.sync_page		= block_sync_page,
3922 	.write_begin		= ext4_write_begin,
3923 	.write_end		= ext4_journalled_write_end,
3924 	.set_page_dirty		= ext4_journalled_set_page_dirty,
3925 	.bmap			= ext4_bmap,
3926 	.invalidatepage		= ext4_invalidatepage,
3927 	.releasepage		= ext4_releasepage,
3928 	.is_partially_uptodate  = block_is_partially_uptodate,
3929 	.error_remove_page	= generic_error_remove_page,
3930 };
3931 
3932 static const struct address_space_operations ext4_da_aops = {
3933 	.readpage		= ext4_readpage,
3934 	.readpages		= ext4_readpages,
3935 	.writepage		= ext4_writepage,
3936 	.writepages		= ext4_da_writepages,
3937 	.sync_page		= block_sync_page,
3938 	.write_begin		= ext4_da_write_begin,
3939 	.write_end		= ext4_da_write_end,
3940 	.bmap			= ext4_bmap,
3941 	.invalidatepage		= ext4_da_invalidatepage,
3942 	.releasepage		= ext4_releasepage,
3943 	.direct_IO		= ext4_direct_IO,
3944 	.migratepage		= buffer_migrate_page,
3945 	.is_partially_uptodate  = block_is_partially_uptodate,
3946 	.error_remove_page	= generic_error_remove_page,
3947 };
3948 
3949 void ext4_set_aops(struct inode *inode)
3950 {
3951 	if (ext4_should_order_data(inode) &&
3952 		test_opt(inode->i_sb, DELALLOC))
3953 		inode->i_mapping->a_ops = &ext4_da_aops;
3954 	else if (ext4_should_order_data(inode))
3955 		inode->i_mapping->a_ops = &ext4_ordered_aops;
3956 	else if (ext4_should_writeback_data(inode) &&
3957 		 test_opt(inode->i_sb, DELALLOC))
3958 		inode->i_mapping->a_ops = &ext4_da_aops;
3959 	else if (ext4_should_writeback_data(inode))
3960 		inode->i_mapping->a_ops = &ext4_writeback_aops;
3961 	else
3962 		inode->i_mapping->a_ops = &ext4_journalled_aops;
3963 }
3964 
3965 /*
3966  * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3967  * up to the end of the block which corresponds to `from'.
3968  * This required during truncate. We need to physically zero the tail end
3969  * of that block so it doesn't yield old data if the file is later grown.
3970  */
3971 int ext4_block_truncate_page(handle_t *handle,
3972 		struct address_space *mapping, loff_t from)
3973 {
3974 	ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
3975 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
3976 	unsigned blocksize, length, pos;
3977 	ext4_lblk_t iblock;
3978 	struct inode *inode = mapping->host;
3979 	struct buffer_head *bh;
3980 	struct page *page;
3981 	int err = 0;
3982 
3983 	page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
3984 				   mapping_gfp_mask(mapping) & ~__GFP_FS);
3985 	if (!page)
3986 		return -EINVAL;
3987 
3988 	blocksize = inode->i_sb->s_blocksize;
3989 	length = blocksize - (offset & (blocksize - 1));
3990 	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
3991 
3992 	if (!page_has_buffers(page))
3993 		create_empty_buffers(page, blocksize, 0);
3994 
3995 	/* Find the buffer that contains "offset" */
3996 	bh = page_buffers(page);
3997 	pos = blocksize;
3998 	while (offset >= pos) {
3999 		bh = bh->b_this_page;
4000 		iblock++;
4001 		pos += blocksize;
4002 	}
4003 
4004 	err = 0;
4005 	if (buffer_freed(bh)) {
4006 		BUFFER_TRACE(bh, "freed: skip");
4007 		goto unlock;
4008 	}
4009 
4010 	if (!buffer_mapped(bh)) {
4011 		BUFFER_TRACE(bh, "unmapped");
4012 		ext4_get_block(inode, iblock, bh, 0);
4013 		/* unmapped? It's a hole - nothing to do */
4014 		if (!buffer_mapped(bh)) {
4015 			BUFFER_TRACE(bh, "still unmapped");
4016 			goto unlock;
4017 		}
4018 	}
4019 
4020 	/* Ok, it's mapped. Make sure it's up-to-date */
4021 	if (PageUptodate(page))
4022 		set_buffer_uptodate(bh);
4023 
4024 	if (!buffer_uptodate(bh)) {
4025 		err = -EIO;
4026 		ll_rw_block(READ, 1, &bh);
4027 		wait_on_buffer(bh);
4028 		/* Uhhuh. Read error. Complain and punt. */
4029 		if (!buffer_uptodate(bh))
4030 			goto unlock;
4031 	}
4032 
4033 	if (ext4_should_journal_data(inode)) {
4034 		BUFFER_TRACE(bh, "get write access");
4035 		err = ext4_journal_get_write_access(handle, bh);
4036 		if (err)
4037 			goto unlock;
4038 	}
4039 
4040 	zero_user(page, offset, length);
4041 
4042 	BUFFER_TRACE(bh, "zeroed end of block");
4043 
4044 	err = 0;
4045 	if (ext4_should_journal_data(inode)) {
4046 		err = ext4_handle_dirty_metadata(handle, inode, bh);
4047 	} else {
4048 		if (ext4_should_order_data(inode))
4049 			err = ext4_jbd2_file_inode(handle, inode);
4050 		mark_buffer_dirty(bh);
4051 	}
4052 
4053 unlock:
4054 	unlock_page(page);
4055 	page_cache_release(page);
4056 	return err;
4057 }
4058 
4059 /*
4060  * Probably it should be a library function... search for first non-zero word
4061  * or memcmp with zero_page, whatever is better for particular architecture.
4062  * Linus?
4063  */
4064 static inline int all_zeroes(__le32 *p, __le32 *q)
4065 {
4066 	while (p < q)
4067 		if (*p++)
4068 			return 0;
4069 	return 1;
4070 }
4071 
4072 /**
4073  *	ext4_find_shared - find the indirect blocks for partial truncation.
4074  *	@inode:	  inode in question
4075  *	@depth:	  depth of the affected branch
4076  *	@offsets: offsets of pointers in that branch (see ext4_block_to_path)
4077  *	@chain:	  place to store the pointers to partial indirect blocks
4078  *	@top:	  place to the (detached) top of branch
4079  *
4080  *	This is a helper function used by ext4_truncate().
4081  *
4082  *	When we do truncate() we may have to clean the ends of several
4083  *	indirect blocks but leave the blocks themselves alive. Block is
4084  *	partially truncated if some data below the new i_size is refered
4085  *	from it (and it is on the path to the first completely truncated
4086  *	data block, indeed).  We have to free the top of that path along
4087  *	with everything to the right of the path. Since no allocation
4088  *	past the truncation point is possible until ext4_truncate()
4089  *	finishes, we may safely do the latter, but top of branch may
4090  *	require special attention - pageout below the truncation point
4091  *	might try to populate it.
4092  *
4093  *	We atomically detach the top of branch from the tree, store the
4094  *	block number of its root in *@top, pointers to buffer_heads of
4095  *	partially truncated blocks - in @chain[].bh and pointers to
4096  *	their last elements that should not be removed - in
4097  *	@chain[].p. Return value is the pointer to last filled element
4098  *	of @chain.
4099  *
4100  *	The work left to caller to do the actual freeing of subtrees:
4101  *		a) free the subtree starting from *@top
4102  *		b) free the subtrees whose roots are stored in
4103  *			(@chain[i].p+1 .. end of @chain[i].bh->b_data)
4104  *		c) free the subtrees growing from the inode past the @chain[0].
4105  *			(no partially truncated stuff there).  */
4106 
4107 static Indirect *ext4_find_shared(struct inode *inode, int depth,
4108 				  ext4_lblk_t offsets[4], Indirect chain[4],
4109 				  __le32 *top)
4110 {
4111 	Indirect *partial, *p;
4112 	int k, err;
4113 
4114 	*top = 0;
4115 	/* Make k index the deepest non-null offset + 1 */
4116 	for (k = depth; k > 1 && !offsets[k-1]; k--)
4117 		;
4118 	partial = ext4_get_branch(inode, k, offsets, chain, &err);
4119 	/* Writer: pointers */
4120 	if (!partial)
4121 		partial = chain + k-1;
4122 	/*
4123 	 * If the branch acquired continuation since we've looked at it -
4124 	 * fine, it should all survive and (new) top doesn't belong to us.
4125 	 */
4126 	if (!partial->key && *partial->p)
4127 		/* Writer: end */
4128 		goto no_top;
4129 	for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
4130 		;
4131 	/*
4132 	 * OK, we've found the last block that must survive. The rest of our
4133 	 * branch should be detached before unlocking. However, if that rest
4134 	 * of branch is all ours and does not grow immediately from the inode
4135 	 * it's easier to cheat and just decrement partial->p.
4136 	 */
4137 	if (p == chain + k - 1 && p > chain) {
4138 		p->p--;
4139 	} else {
4140 		*top = *p->p;
4141 		/* Nope, don't do this in ext4.  Must leave the tree intact */
4142 #if 0
4143 		*p->p = 0;
4144 #endif
4145 	}
4146 	/* Writer: end */
4147 
4148 	while (partial > p) {
4149 		brelse(partial->bh);
4150 		partial--;
4151 	}
4152 no_top:
4153 	return partial;
4154 }
4155 
4156 /*
4157  * Zero a number of block pointers in either an inode or an indirect block.
4158  * If we restart the transaction we must again get write access to the
4159  * indirect block for further modification.
4160  *
4161  * We release `count' blocks on disk, but (last - first) may be greater
4162  * than `count' because there can be holes in there.
4163  */
4164 static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
4165 			     struct buffer_head *bh,
4166 			     ext4_fsblk_t block_to_free,
4167 			     unsigned long count, __le32 *first,
4168 			     __le32 *last)
4169 {
4170 	__le32 *p;
4171 	int	flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
4172 
4173 	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
4174 		flags |= EXT4_FREE_BLOCKS_METADATA;
4175 
4176 	if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
4177 				   count)) {
4178 		EXT4_ERROR_INODE(inode, "attempt to clear invalid "
4179 				 "blocks %llu len %lu",
4180 				 (unsigned long long) block_to_free, count);
4181 		return 1;
4182 	}
4183 
4184 	if (try_to_extend_transaction(handle, inode)) {
4185 		if (bh) {
4186 			BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4187 			ext4_handle_dirty_metadata(handle, inode, bh);
4188 		}
4189 		ext4_mark_inode_dirty(handle, inode);
4190 		ext4_truncate_restart_trans(handle, inode,
4191 					    blocks_for_truncate(inode));
4192 		if (bh) {
4193 			BUFFER_TRACE(bh, "retaking write access");
4194 			ext4_journal_get_write_access(handle, bh);
4195 		}
4196 	}
4197 
4198 	for (p = first; p < last; p++)
4199 		*p = 0;
4200 
4201 	ext4_free_blocks(handle, inode, 0, block_to_free, count, flags);
4202 	return 0;
4203 }
4204 
4205 /**
4206  * ext4_free_data - free a list of data blocks
4207  * @handle:	handle for this transaction
4208  * @inode:	inode we are dealing with
4209  * @this_bh:	indirect buffer_head which contains *@first and *@last
4210  * @first:	array of block numbers
4211  * @last:	points immediately past the end of array
4212  *
4213  * We are freeing all blocks refered from that array (numbers are stored as
4214  * little-endian 32-bit) and updating @inode->i_blocks appropriately.
4215  *
4216  * We accumulate contiguous runs of blocks to free.  Conveniently, if these
4217  * blocks are contiguous then releasing them at one time will only affect one
4218  * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
4219  * actually use a lot of journal space.
4220  *
4221  * @this_bh will be %NULL if @first and @last point into the inode's direct
4222  * block pointers.
4223  */
4224 static void ext4_free_data(handle_t *handle, struct inode *inode,
4225 			   struct buffer_head *this_bh,
4226 			   __le32 *first, __le32 *last)
4227 {
4228 	ext4_fsblk_t block_to_free = 0;    /* Starting block # of a run */
4229 	unsigned long count = 0;	    /* Number of blocks in the run */
4230 	__le32 *block_to_free_p = NULL;	    /* Pointer into inode/ind
4231 					       corresponding to
4232 					       block_to_free */
4233 	ext4_fsblk_t nr;		    /* Current block # */
4234 	__le32 *p;			    /* Pointer into inode/ind
4235 					       for current block */
4236 	int err;
4237 
4238 	if (this_bh) {				/* For indirect block */
4239 		BUFFER_TRACE(this_bh, "get_write_access");
4240 		err = ext4_journal_get_write_access(handle, this_bh);
4241 		/* Important: if we can't update the indirect pointers
4242 		 * to the blocks, we can't free them. */
4243 		if (err)
4244 			return;
4245 	}
4246 
4247 	for (p = first; p < last; p++) {
4248 		nr = le32_to_cpu(*p);
4249 		if (nr) {
4250 			/* accumulate blocks to free if they're contiguous */
4251 			if (count == 0) {
4252 				block_to_free = nr;
4253 				block_to_free_p = p;
4254 				count = 1;
4255 			} else if (nr == block_to_free + count) {
4256 				count++;
4257 			} else {
4258 				if (ext4_clear_blocks(handle, inode, this_bh,
4259 						      block_to_free, count,
4260 						      block_to_free_p, p))
4261 					break;
4262 				block_to_free = nr;
4263 				block_to_free_p = p;
4264 				count = 1;
4265 			}
4266 		}
4267 	}
4268 
4269 	if (count > 0)
4270 		ext4_clear_blocks(handle, inode, this_bh, block_to_free,
4271 				  count, block_to_free_p, p);
4272 
4273 	if (this_bh) {
4274 		BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
4275 
4276 		/*
4277 		 * The buffer head should have an attached journal head at this
4278 		 * point. However, if the data is corrupted and an indirect
4279 		 * block pointed to itself, it would have been detached when
4280 		 * the block was cleared. Check for this instead of OOPSing.
4281 		 */
4282 		if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
4283 			ext4_handle_dirty_metadata(handle, inode, this_bh);
4284 		else
4285 			EXT4_ERROR_INODE(inode,
4286 					 "circular indirect block detected at "
4287 					 "block %llu",
4288 				(unsigned long long) this_bh->b_blocknr);
4289 	}
4290 }
4291 
4292 /**
4293  *	ext4_free_branches - free an array of branches
4294  *	@handle: JBD handle for this transaction
4295  *	@inode:	inode we are dealing with
4296  *	@parent_bh: the buffer_head which contains *@first and *@last
4297  *	@first:	array of block numbers
4298  *	@last:	pointer immediately past the end of array
4299  *	@depth:	depth of the branches to free
4300  *
4301  *	We are freeing all blocks refered from these branches (numbers are
4302  *	stored as little-endian 32-bit) and updating @inode->i_blocks
4303  *	appropriately.
4304  */
4305 static void ext4_free_branches(handle_t *handle, struct inode *inode,
4306 			       struct buffer_head *parent_bh,
4307 			       __le32 *first, __le32 *last, int depth)
4308 {
4309 	ext4_fsblk_t nr;
4310 	__le32 *p;
4311 
4312 	if (ext4_handle_is_aborted(handle))
4313 		return;
4314 
4315 	if (depth--) {
4316 		struct buffer_head *bh;
4317 		int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
4318 		p = last;
4319 		while (--p >= first) {
4320 			nr = le32_to_cpu(*p);
4321 			if (!nr)
4322 				continue;		/* A hole */
4323 
4324 			if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
4325 						   nr, 1)) {
4326 				EXT4_ERROR_INODE(inode,
4327 						 "invalid indirect mapped "
4328 						 "block %lu (level %d)",
4329 						 (unsigned long) nr, depth);
4330 				break;
4331 			}
4332 
4333 			/* Go read the buffer for the next level down */
4334 			bh = sb_bread(inode->i_sb, nr);
4335 
4336 			/*
4337 			 * A read failure? Report error and clear slot
4338 			 * (should be rare).
4339 			 */
4340 			if (!bh) {
4341 				EXT4_ERROR_INODE_BLOCK(inode, nr,
4342 						       "Read failure");
4343 				continue;
4344 			}
4345 
4346 			/* This zaps the entire block.  Bottom up. */
4347 			BUFFER_TRACE(bh, "free child branches");
4348 			ext4_free_branches(handle, inode, bh,
4349 					(__le32 *) bh->b_data,
4350 					(__le32 *) bh->b_data + addr_per_block,
4351 					depth);
4352 
4353 			/*
4354 			 * Everything below this this pointer has been
4355 			 * released.  Now let this top-of-subtree go.
4356 			 *
4357 			 * We want the freeing of this indirect block to be
4358 			 * atomic in the journal with the updating of the
4359 			 * bitmap block which owns it.  So make some room in
4360 			 * the journal.
4361 			 *
4362 			 * We zero the parent pointer *after* freeing its
4363 			 * pointee in the bitmaps, so if extend_transaction()
4364 			 * for some reason fails to put the bitmap changes and
4365 			 * the release into the same transaction, recovery
4366 			 * will merely complain about releasing a free block,
4367 			 * rather than leaking blocks.
4368 			 */
4369 			if (ext4_handle_is_aborted(handle))
4370 				return;
4371 			if (try_to_extend_transaction(handle, inode)) {
4372 				ext4_mark_inode_dirty(handle, inode);
4373 				ext4_truncate_restart_trans(handle, inode,
4374 					    blocks_for_truncate(inode));
4375 			}
4376 
4377 			/*
4378 			 * The forget flag here is critical because if
4379 			 * we are journaling (and not doing data
4380 			 * journaling), we have to make sure a revoke
4381 			 * record is written to prevent the journal
4382 			 * replay from overwriting the (former)
4383 			 * indirect block if it gets reallocated as a
4384 			 * data block.  This must happen in the same
4385 			 * transaction where the data blocks are
4386 			 * actually freed.
4387 			 */
4388 			ext4_free_blocks(handle, inode, 0, nr, 1,
4389 					 EXT4_FREE_BLOCKS_METADATA|
4390 					 EXT4_FREE_BLOCKS_FORGET);
4391 
4392 			if (parent_bh) {
4393 				/*
4394 				 * The block which we have just freed is
4395 				 * pointed to by an indirect block: journal it
4396 				 */
4397 				BUFFER_TRACE(parent_bh, "get_write_access");
4398 				if (!ext4_journal_get_write_access(handle,
4399 								   parent_bh)){
4400 					*p = 0;
4401 					BUFFER_TRACE(parent_bh,
4402 					"call ext4_handle_dirty_metadata");
4403 					ext4_handle_dirty_metadata(handle,
4404 								   inode,
4405 								   parent_bh);
4406 				}
4407 			}
4408 		}
4409 	} else {
4410 		/* We have reached the bottom of the tree. */
4411 		BUFFER_TRACE(parent_bh, "free data blocks");
4412 		ext4_free_data(handle, inode, parent_bh, first, last);
4413 	}
4414 }
4415 
4416 int ext4_can_truncate(struct inode *inode)
4417 {
4418 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4419 		return 0;
4420 	if (S_ISREG(inode->i_mode))
4421 		return 1;
4422 	if (S_ISDIR(inode->i_mode))
4423 		return 1;
4424 	if (S_ISLNK(inode->i_mode))
4425 		return !ext4_inode_is_fast_symlink(inode);
4426 	return 0;
4427 }
4428 
4429 /*
4430  * ext4_truncate()
4431  *
4432  * We block out ext4_get_block() block instantiations across the entire
4433  * transaction, and VFS/VM ensures that ext4_truncate() cannot run
4434  * simultaneously on behalf of the same inode.
4435  *
4436  * As we work through the truncate and commmit bits of it to the journal there
4437  * is one core, guiding principle: the file's tree must always be consistent on
4438  * disk.  We must be able to restart the truncate after a crash.
4439  *
4440  * The file's tree may be transiently inconsistent in memory (although it
4441  * probably isn't), but whenever we close off and commit a journal transaction,
4442  * the contents of (the filesystem + the journal) must be consistent and
4443  * restartable.  It's pretty simple, really: bottom up, right to left (although
4444  * left-to-right works OK too).
4445  *
4446  * Note that at recovery time, journal replay occurs *before* the restart of
4447  * truncate against the orphan inode list.
4448  *
4449  * The committed inode has the new, desired i_size (which is the same as
4450  * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
4451  * that this inode's truncate did not complete and it will again call
4452  * ext4_truncate() to have another go.  So there will be instantiated blocks
4453  * to the right of the truncation point in a crashed ext4 filesystem.  But
4454  * that's fine - as long as they are linked from the inode, the post-crash
4455  * ext4_truncate() run will find them and release them.
4456  */
4457 void ext4_truncate(struct inode *inode)
4458 {
4459 	handle_t *handle;
4460 	struct ext4_inode_info *ei = EXT4_I(inode);
4461 	__le32 *i_data = ei->i_data;
4462 	int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
4463 	struct address_space *mapping = inode->i_mapping;
4464 	ext4_lblk_t offsets[4];
4465 	Indirect chain[4];
4466 	Indirect *partial;
4467 	__le32 nr = 0;
4468 	int n;
4469 	ext4_lblk_t last_block;
4470 	unsigned blocksize = inode->i_sb->s_blocksize;
4471 
4472 	if (!ext4_can_truncate(inode))
4473 		return;
4474 
4475 	ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4476 
4477 	if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4478 		ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
4479 
4480 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
4481 		ext4_ext_truncate(inode);
4482 		return;
4483 	}
4484 
4485 	handle = start_transaction(inode);
4486 	if (IS_ERR(handle))
4487 		return;		/* AKPM: return what? */
4488 
4489 	last_block = (inode->i_size + blocksize-1)
4490 					>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
4491 
4492 	if (inode->i_size & (blocksize - 1))
4493 		if (ext4_block_truncate_page(handle, mapping, inode->i_size))
4494 			goto out_stop;
4495 
4496 	n = ext4_block_to_path(inode, last_block, offsets, NULL);
4497 	if (n == 0)
4498 		goto out_stop;	/* error */
4499 
4500 	/*
4501 	 * OK.  This truncate is going to happen.  We add the inode to the
4502 	 * orphan list, so that if this truncate spans multiple transactions,
4503 	 * and we crash, we will resume the truncate when the filesystem
4504 	 * recovers.  It also marks the inode dirty, to catch the new size.
4505 	 *
4506 	 * Implication: the file must always be in a sane, consistent
4507 	 * truncatable state while each transaction commits.
4508 	 */
4509 	if (ext4_orphan_add(handle, inode))
4510 		goto out_stop;
4511 
4512 	/*
4513 	 * From here we block out all ext4_get_block() callers who want to
4514 	 * modify the block allocation tree.
4515 	 */
4516 	down_write(&ei->i_data_sem);
4517 
4518 	ext4_discard_preallocations(inode);
4519 
4520 	/*
4521 	 * The orphan list entry will now protect us from any crash which
4522 	 * occurs before the truncate completes, so it is now safe to propagate
4523 	 * the new, shorter inode size (held for now in i_size) into the
4524 	 * on-disk inode. We do this via i_disksize, which is the value which
4525 	 * ext4 *really* writes onto the disk inode.
4526 	 */
4527 	ei->i_disksize = inode->i_size;
4528 
4529 	if (n == 1) {		/* direct blocks */
4530 		ext4_free_data(handle, inode, NULL, i_data+offsets[0],
4531 			       i_data + EXT4_NDIR_BLOCKS);
4532 		goto do_indirects;
4533 	}
4534 
4535 	partial = ext4_find_shared(inode, n, offsets, chain, &nr);
4536 	/* Kill the top of shared branch (not detached) */
4537 	if (nr) {
4538 		if (partial == chain) {
4539 			/* Shared branch grows from the inode */
4540 			ext4_free_branches(handle, inode, NULL,
4541 					   &nr, &nr+1, (chain+n-1) - partial);
4542 			*partial->p = 0;
4543 			/*
4544 			 * We mark the inode dirty prior to restart,
4545 			 * and prior to stop.  No need for it here.
4546 			 */
4547 		} else {
4548 			/* Shared branch grows from an indirect block */
4549 			BUFFER_TRACE(partial->bh, "get_write_access");
4550 			ext4_free_branches(handle, inode, partial->bh,
4551 					partial->p,
4552 					partial->p+1, (chain+n-1) - partial);
4553 		}
4554 	}
4555 	/* Clear the ends of indirect blocks on the shared branch */
4556 	while (partial > chain) {
4557 		ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
4558 				   (__le32*)partial->bh->b_data+addr_per_block,
4559 				   (chain+n-1) - partial);
4560 		BUFFER_TRACE(partial->bh, "call brelse");
4561 		brelse(partial->bh);
4562 		partial--;
4563 	}
4564 do_indirects:
4565 	/* Kill the remaining (whole) subtrees */
4566 	switch (offsets[0]) {
4567 	default:
4568 		nr = i_data[EXT4_IND_BLOCK];
4569 		if (nr) {
4570 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
4571 			i_data[EXT4_IND_BLOCK] = 0;
4572 		}
4573 	case EXT4_IND_BLOCK:
4574 		nr = i_data[EXT4_DIND_BLOCK];
4575 		if (nr) {
4576 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
4577 			i_data[EXT4_DIND_BLOCK] = 0;
4578 		}
4579 	case EXT4_DIND_BLOCK:
4580 		nr = i_data[EXT4_TIND_BLOCK];
4581 		if (nr) {
4582 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
4583 			i_data[EXT4_TIND_BLOCK] = 0;
4584 		}
4585 	case EXT4_TIND_BLOCK:
4586 		;
4587 	}
4588 
4589 	up_write(&ei->i_data_sem);
4590 	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4591 	ext4_mark_inode_dirty(handle, inode);
4592 
4593 	/*
4594 	 * In a multi-transaction truncate, we only make the final transaction
4595 	 * synchronous
4596 	 */
4597 	if (IS_SYNC(inode))
4598 		ext4_handle_sync(handle);
4599 out_stop:
4600 	/*
4601 	 * If this was a simple ftruncate(), and the file will remain alive
4602 	 * then we need to clear up the orphan record which we created above.
4603 	 * However, if this was a real unlink then we were called by
4604 	 * ext4_delete_inode(), and we allow that function to clean up the
4605 	 * orphan info for us.
4606 	 */
4607 	if (inode->i_nlink)
4608 		ext4_orphan_del(handle, inode);
4609 
4610 	ext4_journal_stop(handle);
4611 }
4612 
4613 /*
4614  * ext4_get_inode_loc returns with an extra refcount against the inode's
4615  * underlying buffer_head on success. If 'in_mem' is true, we have all
4616  * data in memory that is needed to recreate the on-disk version of this
4617  * inode.
4618  */
4619 static int __ext4_get_inode_loc(struct inode *inode,
4620 				struct ext4_iloc *iloc, int in_mem)
4621 {
4622 	struct ext4_group_desc	*gdp;
4623 	struct buffer_head	*bh;
4624 	struct super_block	*sb = inode->i_sb;
4625 	ext4_fsblk_t		block;
4626 	int			inodes_per_block, inode_offset;
4627 
4628 	iloc->bh = NULL;
4629 	if (!ext4_valid_inum(sb, inode->i_ino))
4630 		return -EIO;
4631 
4632 	iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
4633 	gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
4634 	if (!gdp)
4635 		return -EIO;
4636 
4637 	/*
4638 	 * Figure out the offset within the block group inode table
4639 	 */
4640 	inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb));
4641 	inode_offset = ((inode->i_ino - 1) %
4642 			EXT4_INODES_PER_GROUP(sb));
4643 	block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
4644 	iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
4645 
4646 	bh = sb_getblk(sb, block);
4647 	if (!bh) {
4648 		EXT4_ERROR_INODE_BLOCK(inode, block,
4649 				       "unable to read itable block");
4650 		return -EIO;
4651 	}
4652 	if (!buffer_uptodate(bh)) {
4653 		lock_buffer(bh);
4654 
4655 		/*
4656 		 * If the buffer has the write error flag, we have failed
4657 		 * to write out another inode in the same block.  In this
4658 		 * case, we don't have to read the block because we may
4659 		 * read the old inode data successfully.
4660 		 */
4661 		if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
4662 			set_buffer_uptodate(bh);
4663 
4664 		if (buffer_uptodate(bh)) {
4665 			/* someone brought it uptodate while we waited */
4666 			unlock_buffer(bh);
4667 			goto has_buffer;
4668 		}
4669 
4670 		/*
4671 		 * If we have all information of the inode in memory and this
4672 		 * is the only valid inode in the block, we need not read the
4673 		 * block.
4674 		 */
4675 		if (in_mem) {
4676 			struct buffer_head *bitmap_bh;
4677 			int i, start;
4678 
4679 			start = inode_offset & ~(inodes_per_block - 1);
4680 
4681 			/* Is the inode bitmap in cache? */
4682 			bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
4683 			if (!bitmap_bh)
4684 				goto make_io;
4685 
4686 			/*
4687 			 * If the inode bitmap isn't in cache then the
4688 			 * optimisation may end up performing two reads instead
4689 			 * of one, so skip it.
4690 			 */
4691 			if (!buffer_uptodate(bitmap_bh)) {
4692 				brelse(bitmap_bh);
4693 				goto make_io;
4694 			}
4695 			for (i = start; i < start + inodes_per_block; i++) {
4696 				if (i == inode_offset)
4697 					continue;
4698 				if (ext4_test_bit(i, bitmap_bh->b_data))
4699 					break;
4700 			}
4701 			brelse(bitmap_bh);
4702 			if (i == start + inodes_per_block) {
4703 				/* all other inodes are free, so skip I/O */
4704 				memset(bh->b_data, 0, bh->b_size);
4705 				set_buffer_uptodate(bh);
4706 				unlock_buffer(bh);
4707 				goto has_buffer;
4708 			}
4709 		}
4710 
4711 make_io:
4712 		/*
4713 		 * If we need to do any I/O, try to pre-readahead extra
4714 		 * blocks from the inode table.
4715 		 */
4716 		if (EXT4_SB(sb)->s_inode_readahead_blks) {
4717 			ext4_fsblk_t b, end, table;
4718 			unsigned num;
4719 
4720 			table = ext4_inode_table(sb, gdp);
4721 			/* s_inode_readahead_blks is always a power of 2 */
4722 			b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
4723 			if (table > b)
4724 				b = table;
4725 			end = b + EXT4_SB(sb)->s_inode_readahead_blks;
4726 			num = EXT4_INODES_PER_GROUP(sb);
4727 			if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
4728 				       EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
4729 				num -= ext4_itable_unused_count(sb, gdp);
4730 			table += num / inodes_per_block;
4731 			if (end > table)
4732 				end = table;
4733 			while (b <= end)
4734 				sb_breadahead(sb, b++);
4735 		}
4736 
4737 		/*
4738 		 * There are other valid inodes in the buffer, this inode
4739 		 * has in-inode xattrs, or we don't have this inode in memory.
4740 		 * Read the block from disk.
4741 		 */
4742 		get_bh(bh);
4743 		bh->b_end_io = end_buffer_read_sync;
4744 		submit_bh(READ_META, bh);
4745 		wait_on_buffer(bh);
4746 		if (!buffer_uptodate(bh)) {
4747 			EXT4_ERROR_INODE_BLOCK(inode, block,
4748 					       "unable to read itable block");
4749 			brelse(bh);
4750 			return -EIO;
4751 		}
4752 	}
4753 has_buffer:
4754 	iloc->bh = bh;
4755 	return 0;
4756 }
4757 
4758 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4759 {
4760 	/* We have all inode data except xattrs in memory here. */
4761 	return __ext4_get_inode_loc(inode, iloc,
4762 		!ext4_test_inode_state(inode, EXT4_STATE_XATTR));
4763 }
4764 
4765 void ext4_set_inode_flags(struct inode *inode)
4766 {
4767 	unsigned int flags = EXT4_I(inode)->i_flags;
4768 
4769 	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
4770 	if (flags & EXT4_SYNC_FL)
4771 		inode->i_flags |= S_SYNC;
4772 	if (flags & EXT4_APPEND_FL)
4773 		inode->i_flags |= S_APPEND;
4774 	if (flags & EXT4_IMMUTABLE_FL)
4775 		inode->i_flags |= S_IMMUTABLE;
4776 	if (flags & EXT4_NOATIME_FL)
4777 		inode->i_flags |= S_NOATIME;
4778 	if (flags & EXT4_DIRSYNC_FL)
4779 		inode->i_flags |= S_DIRSYNC;
4780 }
4781 
4782 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
4783 void ext4_get_inode_flags(struct ext4_inode_info *ei)
4784 {
4785 	unsigned int vfs_fl;
4786 	unsigned long old_fl, new_fl;
4787 
4788 	do {
4789 		vfs_fl = ei->vfs_inode.i_flags;
4790 		old_fl = ei->i_flags;
4791 		new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
4792 				EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
4793 				EXT4_DIRSYNC_FL);
4794 		if (vfs_fl & S_SYNC)
4795 			new_fl |= EXT4_SYNC_FL;
4796 		if (vfs_fl & S_APPEND)
4797 			new_fl |= EXT4_APPEND_FL;
4798 		if (vfs_fl & S_IMMUTABLE)
4799 			new_fl |= EXT4_IMMUTABLE_FL;
4800 		if (vfs_fl & S_NOATIME)
4801 			new_fl |= EXT4_NOATIME_FL;
4802 		if (vfs_fl & S_DIRSYNC)
4803 			new_fl |= EXT4_DIRSYNC_FL;
4804 	} while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
4805 }
4806 
4807 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
4808 				  struct ext4_inode_info *ei)
4809 {
4810 	blkcnt_t i_blocks ;
4811 	struct inode *inode = &(ei->vfs_inode);
4812 	struct super_block *sb = inode->i_sb;
4813 
4814 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
4815 				EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
4816 		/* we are using combined 48 bit field */
4817 		i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
4818 					le32_to_cpu(raw_inode->i_blocks_lo);
4819 		if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
4820 			/* i_blocks represent file system block size */
4821 			return i_blocks  << (inode->i_blkbits - 9);
4822 		} else {
4823 			return i_blocks;
4824 		}
4825 	} else {
4826 		return le32_to_cpu(raw_inode->i_blocks_lo);
4827 	}
4828 }
4829 
4830 struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4831 {
4832 	struct ext4_iloc iloc;
4833 	struct ext4_inode *raw_inode;
4834 	struct ext4_inode_info *ei;
4835 	struct inode *inode;
4836 	journal_t *journal = EXT4_SB(sb)->s_journal;
4837 	long ret;
4838 	int block;
4839 
4840 	inode = iget_locked(sb, ino);
4841 	if (!inode)
4842 		return ERR_PTR(-ENOMEM);
4843 	if (!(inode->i_state & I_NEW))
4844 		return inode;
4845 
4846 	ei = EXT4_I(inode);
4847 	iloc.bh = 0;
4848 
4849 	ret = __ext4_get_inode_loc(inode, &iloc, 0);
4850 	if (ret < 0)
4851 		goto bad_inode;
4852 	raw_inode = ext4_raw_inode(&iloc);
4853 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4854 	inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4855 	inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4856 	if (!(test_opt(inode->i_sb, NO_UID32))) {
4857 		inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4858 		inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4859 	}
4860 	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
4861 
4862 	ei->i_state_flags = 0;
4863 	ei->i_dir_start_lookup = 0;
4864 	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4865 	/* We now have enough fields to check if the inode was active or not.
4866 	 * This is needed because nfsd might try to access dead inodes
4867 	 * the test is that same one that e2fsck uses
4868 	 * NeilBrown 1999oct15
4869 	 */
4870 	if (inode->i_nlink == 0) {
4871 		if (inode->i_mode == 0 ||
4872 		    !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
4873 			/* this inode is deleted */
4874 			ret = -ESTALE;
4875 			goto bad_inode;
4876 		}
4877 		/* The only unlinked inodes we let through here have
4878 		 * valid i_mode and are being read by the orphan
4879 		 * recovery code: that's fine, we're about to complete
4880 		 * the process of deleting those. */
4881 	}
4882 	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
4883 	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4884 	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4885 	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
4886 		ei->i_file_acl |=
4887 			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4888 	inode->i_size = ext4_isize(raw_inode);
4889 	ei->i_disksize = inode->i_size;
4890 #ifdef CONFIG_QUOTA
4891 	ei->i_reserved_quota = 0;
4892 #endif
4893 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4894 	ei->i_block_group = iloc.block_group;
4895 	ei->i_last_alloc_group = ~0;
4896 	/*
4897 	 * NOTE! The in-memory inode i_data array is in little-endian order
4898 	 * even on big-endian machines: we do NOT byteswap the block numbers!
4899 	 */
4900 	for (block = 0; block < EXT4_N_BLOCKS; block++)
4901 		ei->i_data[block] = raw_inode->i_block[block];
4902 	INIT_LIST_HEAD(&ei->i_orphan);
4903 
4904 	/*
4905 	 * Set transaction id's of transactions that have to be committed
4906 	 * to finish f[data]sync. We set them to currently running transaction
4907 	 * as we cannot be sure that the inode or some of its metadata isn't
4908 	 * part of the transaction - the inode could have been reclaimed and
4909 	 * now it is reread from disk.
4910 	 */
4911 	if (journal) {
4912 		transaction_t *transaction;
4913 		tid_t tid;
4914 
4915 		read_lock(&journal->j_state_lock);
4916 		if (journal->j_running_transaction)
4917 			transaction = journal->j_running_transaction;
4918 		else
4919 			transaction = journal->j_committing_transaction;
4920 		if (transaction)
4921 			tid = transaction->t_tid;
4922 		else
4923 			tid = journal->j_commit_sequence;
4924 		read_unlock(&journal->j_state_lock);
4925 		ei->i_sync_tid = tid;
4926 		ei->i_datasync_tid = tid;
4927 	}
4928 
4929 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4930 		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4931 		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4932 		    EXT4_INODE_SIZE(inode->i_sb)) {
4933 			ret = -EIO;
4934 			goto bad_inode;
4935 		}
4936 		if (ei->i_extra_isize == 0) {
4937 			/* The extra space is currently unused. Use it. */
4938 			ei->i_extra_isize = sizeof(struct ext4_inode) -
4939 					    EXT4_GOOD_OLD_INODE_SIZE;
4940 		} else {
4941 			__le32 *magic = (void *)raw_inode +
4942 					EXT4_GOOD_OLD_INODE_SIZE +
4943 					ei->i_extra_isize;
4944 			if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
4945 				ext4_set_inode_state(inode, EXT4_STATE_XATTR);
4946 		}
4947 	} else
4948 		ei->i_extra_isize = 0;
4949 
4950 	EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
4951 	EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
4952 	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
4953 	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4954 
4955 	inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
4956 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4957 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4958 			inode->i_version |=
4959 			(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
4960 	}
4961 
4962 	ret = 0;
4963 	if (ei->i_file_acl &&
4964 	    !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
4965 		EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
4966 				 ei->i_file_acl);
4967 		ret = -EIO;
4968 		goto bad_inode;
4969 	} else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
4970 		if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4971 		    (S_ISLNK(inode->i_mode) &&
4972 		     !ext4_inode_is_fast_symlink(inode)))
4973 			/* Validate extent which is part of inode */
4974 			ret = ext4_ext_check_inode(inode);
4975 	} else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4976 		   (S_ISLNK(inode->i_mode) &&
4977 		    !ext4_inode_is_fast_symlink(inode))) {
4978 		/* Validate block references which are part of inode */
4979 		ret = ext4_check_inode_blockref(inode);
4980 	}
4981 	if (ret)
4982 		goto bad_inode;
4983 
4984 	if (S_ISREG(inode->i_mode)) {
4985 		inode->i_op = &ext4_file_inode_operations;
4986 		inode->i_fop = &ext4_file_operations;
4987 		ext4_set_aops(inode);
4988 	} else if (S_ISDIR(inode->i_mode)) {
4989 		inode->i_op = &ext4_dir_inode_operations;
4990 		inode->i_fop = &ext4_dir_operations;
4991 	} else if (S_ISLNK(inode->i_mode)) {
4992 		if (ext4_inode_is_fast_symlink(inode)) {
4993 			inode->i_op = &ext4_fast_symlink_inode_operations;
4994 			nd_terminate_link(ei->i_data, inode->i_size,
4995 				sizeof(ei->i_data) - 1);
4996 		} else {
4997 			inode->i_op = &ext4_symlink_inode_operations;
4998 			ext4_set_aops(inode);
4999 		}
5000 	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
5001 	      S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
5002 		inode->i_op = &ext4_special_inode_operations;
5003 		if (raw_inode->i_block[0])
5004 			init_special_inode(inode, inode->i_mode,
5005 			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
5006 		else
5007 			init_special_inode(inode, inode->i_mode,
5008 			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
5009 	} else {
5010 		ret = -EIO;
5011 		EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
5012 		goto bad_inode;
5013 	}
5014 	brelse(iloc.bh);
5015 	ext4_set_inode_flags(inode);
5016 	unlock_new_inode(inode);
5017 	return inode;
5018 
5019 bad_inode:
5020 	brelse(iloc.bh);
5021 	iget_failed(inode);
5022 	return ERR_PTR(ret);
5023 }
5024 
5025 static int ext4_inode_blocks_set(handle_t *handle,
5026 				struct ext4_inode *raw_inode,
5027 				struct ext4_inode_info *ei)
5028 {
5029 	struct inode *inode = &(ei->vfs_inode);
5030 	u64 i_blocks = inode->i_blocks;
5031 	struct super_block *sb = inode->i_sb;
5032 
5033 	if (i_blocks <= ~0U) {
5034 		/*
5035 		 * i_blocks can be represnted in a 32 bit variable
5036 		 * as multiple of 512 bytes
5037 		 */
5038 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
5039 		raw_inode->i_blocks_high = 0;
5040 		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
5041 		return 0;
5042 	}
5043 	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
5044 		return -EFBIG;
5045 
5046 	if (i_blocks <= 0xffffffffffffULL) {
5047 		/*
5048 		 * i_blocks can be represented in a 48 bit variable
5049 		 * as multiple of 512 bytes
5050 		 */
5051 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
5052 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
5053 		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
5054 	} else {
5055 		ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
5056 		/* i_block is stored in file system block size */
5057 		i_blocks = i_blocks >> (inode->i_blkbits - 9);
5058 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
5059 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
5060 	}
5061 	return 0;
5062 }
5063 
5064 /*
5065  * Post the struct inode info into an on-disk inode location in the
5066  * buffer-cache.  This gobbles the caller's reference to the
5067  * buffer_head in the inode location struct.
5068  *
5069  * The caller must have write access to iloc->bh.
5070  */
5071 static int ext4_do_update_inode(handle_t *handle,
5072 				struct inode *inode,
5073 				struct ext4_iloc *iloc)
5074 {
5075 	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
5076 	struct ext4_inode_info *ei = EXT4_I(inode);
5077 	struct buffer_head *bh = iloc->bh;
5078 	int err = 0, rc, block;
5079 
5080 	/* For fields not not tracking in the in-memory inode,
5081 	 * initialise them to zero for new inodes. */
5082 	if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
5083 		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
5084 
5085 	ext4_get_inode_flags(ei);
5086 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
5087 	if (!(test_opt(inode->i_sb, NO_UID32))) {
5088 		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
5089 		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
5090 /*
5091  * Fix up interoperability with old kernels. Otherwise, old inodes get
5092  * re-used with the upper 16 bits of the uid/gid intact
5093  */
5094 		if (!ei->i_dtime) {
5095 			raw_inode->i_uid_high =
5096 				cpu_to_le16(high_16_bits(inode->i_uid));
5097 			raw_inode->i_gid_high =
5098 				cpu_to_le16(high_16_bits(inode->i_gid));
5099 		} else {
5100 			raw_inode->i_uid_high = 0;
5101 			raw_inode->i_gid_high = 0;
5102 		}
5103 	} else {
5104 		raw_inode->i_uid_low =
5105 			cpu_to_le16(fs_high2lowuid(inode->i_uid));
5106 		raw_inode->i_gid_low =
5107 			cpu_to_le16(fs_high2lowgid(inode->i_gid));
5108 		raw_inode->i_uid_high = 0;
5109 		raw_inode->i_gid_high = 0;
5110 	}
5111 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
5112 
5113 	EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
5114 	EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
5115 	EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
5116 	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
5117 
5118 	if (ext4_inode_blocks_set(handle, raw_inode, ei))
5119 		goto out_brelse;
5120 	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
5121 	raw_inode->i_flags = cpu_to_le32(ei->i_flags);
5122 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
5123 	    cpu_to_le32(EXT4_OS_HURD))
5124 		raw_inode->i_file_acl_high =
5125 			cpu_to_le16(ei->i_file_acl >> 32);
5126 	raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
5127 	ext4_isize_set(raw_inode, ei->i_disksize);
5128 	if (ei->i_disksize > 0x7fffffffULL) {
5129 		struct super_block *sb = inode->i_sb;
5130 		if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
5131 				EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
5132 				EXT4_SB(sb)->s_es->s_rev_level ==
5133 				cpu_to_le32(EXT4_GOOD_OLD_REV)) {
5134 			/* If this is the first large file
5135 			 * created, add a flag to the superblock.
5136 			 */
5137 			err = ext4_journal_get_write_access(handle,
5138 					EXT4_SB(sb)->s_sbh);
5139 			if (err)
5140 				goto out_brelse;
5141 			ext4_update_dynamic_rev(sb);
5142 			EXT4_SET_RO_COMPAT_FEATURE(sb,
5143 					EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
5144 			sb->s_dirt = 1;
5145 			ext4_handle_sync(handle);
5146 			err = ext4_handle_dirty_metadata(handle, NULL,
5147 					EXT4_SB(sb)->s_sbh);
5148 		}
5149 	}
5150 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
5151 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
5152 		if (old_valid_dev(inode->i_rdev)) {
5153 			raw_inode->i_block[0] =
5154 				cpu_to_le32(old_encode_dev(inode->i_rdev));
5155 			raw_inode->i_block[1] = 0;
5156 		} else {
5157 			raw_inode->i_block[0] = 0;
5158 			raw_inode->i_block[1] =
5159 				cpu_to_le32(new_encode_dev(inode->i_rdev));
5160 			raw_inode->i_block[2] = 0;
5161 		}
5162 	} else
5163 		for (block = 0; block < EXT4_N_BLOCKS; block++)
5164 			raw_inode->i_block[block] = ei->i_data[block];
5165 
5166 	raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
5167 	if (ei->i_extra_isize) {
5168 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
5169 			raw_inode->i_version_hi =
5170 			cpu_to_le32(inode->i_version >> 32);
5171 		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
5172 	}
5173 
5174 	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
5175 	rc = ext4_handle_dirty_metadata(handle, NULL, bh);
5176 	if (!err)
5177 		err = rc;
5178 	ext4_clear_inode_state(inode, EXT4_STATE_NEW);
5179 
5180 	ext4_update_inode_fsync_trans(handle, inode, 0);
5181 out_brelse:
5182 	brelse(bh);
5183 	ext4_std_error(inode->i_sb, err);
5184 	return err;
5185 }
5186 
5187 /*
5188  * ext4_write_inode()
5189  *
5190  * We are called from a few places:
5191  *
5192  * - Within generic_file_write() for O_SYNC files.
5193  *   Here, there will be no transaction running. We wait for any running
5194  *   trasnaction to commit.
5195  *
5196  * - Within sys_sync(), kupdate and such.
5197  *   We wait on commit, if tol to.
5198  *
5199  * - Within prune_icache() (PF_MEMALLOC == true)
5200  *   Here we simply return.  We can't afford to block kswapd on the
5201  *   journal commit.
5202  *
5203  * In all cases it is actually safe for us to return without doing anything,
5204  * because the inode has been copied into a raw inode buffer in
5205  * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
5206  * knfsd.
5207  *
5208  * Note that we are absolutely dependent upon all inode dirtiers doing the
5209  * right thing: they *must* call mark_inode_dirty() after dirtying info in
5210  * which we are interested.
5211  *
5212  * It would be a bug for them to not do this.  The code:
5213  *
5214  *	mark_inode_dirty(inode)
5215  *	stuff();
5216  *	inode->i_size = expr;
5217  *
5218  * is in error because a kswapd-driven write_inode() could occur while
5219  * `stuff()' is running, and the new i_size will be lost.  Plus the inode
5220  * will no longer be on the superblock's dirty inode list.
5221  */
5222 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
5223 {
5224 	int err;
5225 
5226 	if (current->flags & PF_MEMALLOC)
5227 		return 0;
5228 
5229 	if (EXT4_SB(inode->i_sb)->s_journal) {
5230 		if (ext4_journal_current_handle()) {
5231 			jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
5232 			dump_stack();
5233 			return -EIO;
5234 		}
5235 
5236 		if (wbc->sync_mode != WB_SYNC_ALL)
5237 			return 0;
5238 
5239 		err = ext4_force_commit(inode->i_sb);
5240 	} else {
5241 		struct ext4_iloc iloc;
5242 
5243 		err = __ext4_get_inode_loc(inode, &iloc, 0);
5244 		if (err)
5245 			return err;
5246 		if (wbc->sync_mode == WB_SYNC_ALL)
5247 			sync_dirty_buffer(iloc.bh);
5248 		if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
5249 			EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
5250 					 "IO error syncing inode");
5251 			err = -EIO;
5252 		}
5253 		brelse(iloc.bh);
5254 	}
5255 	return err;
5256 }
5257 
5258 /*
5259  * ext4_setattr()
5260  *
5261  * Called from notify_change.
5262  *
5263  * We want to trap VFS attempts to truncate the file as soon as
5264  * possible.  In particular, we want to make sure that when the VFS
5265  * shrinks i_size, we put the inode on the orphan list and modify
5266  * i_disksize immediately, so that during the subsequent flushing of
5267  * dirty pages and freeing of disk blocks, we can guarantee that any
5268  * commit will leave the blocks being flushed in an unused state on
5269  * disk.  (On recovery, the inode will get truncated and the blocks will
5270  * be freed, so we have a strong guarantee that no future commit will
5271  * leave these blocks visible to the user.)
5272  *
5273  * Another thing we have to assure is that if we are in ordered mode
5274  * and inode is still attached to the committing transaction, we must
5275  * we start writeout of all the dirty pages which are being truncated.
5276  * This way we are sure that all the data written in the previous
5277  * transaction are already on disk (truncate waits for pages under
5278  * writeback).
5279  *
5280  * Called with inode->i_mutex down.
5281  */
5282 int ext4_setattr(struct dentry *dentry, struct iattr *attr)
5283 {
5284 	struct inode *inode = dentry->d_inode;
5285 	int error, rc = 0;
5286 	int orphan = 0;
5287 	const unsigned int ia_valid = attr->ia_valid;
5288 
5289 	error = inode_change_ok(inode, attr);
5290 	if (error)
5291 		return error;
5292 
5293 	if (is_quota_modification(inode, attr))
5294 		dquot_initialize(inode);
5295 	if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
5296 		(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
5297 		handle_t *handle;
5298 
5299 		/* (user+group)*(old+new) structure, inode write (sb,
5300 		 * inode block, ? - but truncate inode update has it) */
5301 		handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
5302 					EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
5303 		if (IS_ERR(handle)) {
5304 			error = PTR_ERR(handle);
5305 			goto err_out;
5306 		}
5307 		error = dquot_transfer(inode, attr);
5308 		if (error) {
5309 			ext4_journal_stop(handle);
5310 			return error;
5311 		}
5312 		/* Update corresponding info in inode so that everything is in
5313 		 * one transaction */
5314 		if (attr->ia_valid & ATTR_UID)
5315 			inode->i_uid = attr->ia_uid;
5316 		if (attr->ia_valid & ATTR_GID)
5317 			inode->i_gid = attr->ia_gid;
5318 		error = ext4_mark_inode_dirty(handle, inode);
5319 		ext4_journal_stop(handle);
5320 	}
5321 
5322 	if (attr->ia_valid & ATTR_SIZE) {
5323 		if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
5324 			struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5325 
5326 			if (attr->ia_size > sbi->s_bitmap_maxbytes)
5327 				return -EFBIG;
5328 		}
5329 	}
5330 
5331 	if (S_ISREG(inode->i_mode) &&
5332 	    attr->ia_valid & ATTR_SIZE &&
5333 	    (attr->ia_size < inode->i_size ||
5334 	     (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)))) {
5335 		handle_t *handle;
5336 
5337 		handle = ext4_journal_start(inode, 3);
5338 		if (IS_ERR(handle)) {
5339 			error = PTR_ERR(handle);
5340 			goto err_out;
5341 		}
5342 		if (ext4_handle_valid(handle)) {
5343 			error = ext4_orphan_add(handle, inode);
5344 			orphan = 1;
5345 		}
5346 		EXT4_I(inode)->i_disksize = attr->ia_size;
5347 		rc = ext4_mark_inode_dirty(handle, inode);
5348 		if (!error)
5349 			error = rc;
5350 		ext4_journal_stop(handle);
5351 
5352 		if (ext4_should_order_data(inode)) {
5353 			error = ext4_begin_ordered_truncate(inode,
5354 							    attr->ia_size);
5355 			if (error) {
5356 				/* Do as much error cleanup as possible */
5357 				handle = ext4_journal_start(inode, 3);
5358 				if (IS_ERR(handle)) {
5359 					ext4_orphan_del(NULL, inode);
5360 					goto err_out;
5361 				}
5362 				ext4_orphan_del(handle, inode);
5363 				orphan = 0;
5364 				ext4_journal_stop(handle);
5365 				goto err_out;
5366 			}
5367 		}
5368 		/* ext4_truncate will clear the flag */
5369 		if ((ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)))
5370 			ext4_truncate(inode);
5371 	}
5372 
5373 	if ((attr->ia_valid & ATTR_SIZE) &&
5374 	    attr->ia_size != i_size_read(inode))
5375 		rc = vmtruncate(inode, attr->ia_size);
5376 
5377 	if (!rc) {
5378 		setattr_copy(inode, attr);
5379 		mark_inode_dirty(inode);
5380 	}
5381 
5382 	/*
5383 	 * If the call to ext4_truncate failed to get a transaction handle at
5384 	 * all, we need to clean up the in-core orphan list manually.
5385 	 */
5386 	if (orphan && inode->i_nlink)
5387 		ext4_orphan_del(NULL, inode);
5388 
5389 	if (!rc && (ia_valid & ATTR_MODE))
5390 		rc = ext4_acl_chmod(inode);
5391 
5392 err_out:
5393 	ext4_std_error(inode->i_sb, error);
5394 	if (!error)
5395 		error = rc;
5396 	return error;
5397 }
5398 
5399 int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
5400 		 struct kstat *stat)
5401 {
5402 	struct inode *inode;
5403 	unsigned long delalloc_blocks;
5404 
5405 	inode = dentry->d_inode;
5406 	generic_fillattr(inode, stat);
5407 
5408 	/*
5409 	 * We can't update i_blocks if the block allocation is delayed
5410 	 * otherwise in the case of system crash before the real block
5411 	 * allocation is done, we will have i_blocks inconsistent with
5412 	 * on-disk file blocks.
5413 	 * We always keep i_blocks updated together with real
5414 	 * allocation. But to not confuse with user, stat
5415 	 * will return the blocks that include the delayed allocation
5416 	 * blocks for this file.
5417 	 */
5418 	delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
5419 
5420 	stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
5421 	return 0;
5422 }
5423 
5424 static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
5425 				      int chunk)
5426 {
5427 	int indirects;
5428 
5429 	/* if nrblocks are contiguous */
5430 	if (chunk) {
5431 		/*
5432 		 * With N contiguous data blocks, it need at most
5433 		 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks
5434 		 * 2 dindirect blocks
5435 		 * 1 tindirect block
5436 		 */
5437 		indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb);
5438 		return indirects + 3;
5439 	}
5440 	/*
5441 	 * if nrblocks are not contiguous, worse case, each block touch
5442 	 * a indirect block, and each indirect block touch a double indirect
5443 	 * block, plus a triple indirect block
5444 	 */
5445 	indirects = nrblocks * 2 + 1;
5446 	return indirects;
5447 }
5448 
5449 static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
5450 {
5451 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5452 		return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
5453 	return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
5454 }
5455 
5456 /*
5457  * Account for index blocks, block groups bitmaps and block group
5458  * descriptor blocks if modify datablocks and index blocks
5459  * worse case, the indexs blocks spread over different block groups
5460  *
5461  * If datablocks are discontiguous, they are possible to spread over
5462  * different block groups too. If they are contiuguous, with flexbg,
5463  * they could still across block group boundary.
5464  *
5465  * Also account for superblock, inode, quota and xattr blocks
5466  */
5467 static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
5468 {
5469 	ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
5470 	int gdpblocks;
5471 	int idxblocks;
5472 	int ret = 0;
5473 
5474 	/*
5475 	 * How many index blocks need to touch to modify nrblocks?
5476 	 * The "Chunk" flag indicating whether the nrblocks is
5477 	 * physically contiguous on disk
5478 	 *
5479 	 * For Direct IO and fallocate, they calls get_block to allocate
5480 	 * one single extent at a time, so they could set the "Chunk" flag
5481 	 */
5482 	idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
5483 
5484 	ret = idxblocks;
5485 
5486 	/*
5487 	 * Now let's see how many group bitmaps and group descriptors need
5488 	 * to account
5489 	 */
5490 	groups = idxblocks;
5491 	if (chunk)
5492 		groups += 1;
5493 	else
5494 		groups += nrblocks;
5495 
5496 	gdpblocks = groups;
5497 	if (groups > ngroups)
5498 		groups = ngroups;
5499 	if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
5500 		gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
5501 
5502 	/* bitmaps and block group descriptor blocks */
5503 	ret += groups + gdpblocks;
5504 
5505 	/* Blocks for super block, inode, quota and xattr blocks */
5506 	ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
5507 
5508 	return ret;
5509 }
5510 
5511 /*
5512  * Calulate the total number of credits to reserve to fit
5513  * the modification of a single pages into a single transaction,
5514  * which may include multiple chunks of block allocations.
5515  *
5516  * This could be called via ext4_write_begin()
5517  *
5518  * We need to consider the worse case, when
5519  * one new block per extent.
5520  */
5521 int ext4_writepage_trans_blocks(struct inode *inode)
5522 {
5523 	int bpp = ext4_journal_blocks_per_page(inode);
5524 	int ret;
5525 
5526 	ret = ext4_meta_trans_blocks(inode, bpp, 0);
5527 
5528 	/* Account for data blocks for journalled mode */
5529 	if (ext4_should_journal_data(inode))
5530 		ret += bpp;
5531 	return ret;
5532 }
5533 
5534 /*
5535  * Calculate the journal credits for a chunk of data modification.
5536  *
5537  * This is called from DIO, fallocate or whoever calling
5538  * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
5539  *
5540  * journal buffers for data blocks are not included here, as DIO
5541  * and fallocate do no need to journal data buffers.
5542  */
5543 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5544 {
5545 	return ext4_meta_trans_blocks(inode, nrblocks, 1);
5546 }
5547 
5548 /*
5549  * The caller must have previously called ext4_reserve_inode_write().
5550  * Give this, we know that the caller already has write access to iloc->bh.
5551  */
5552 int ext4_mark_iloc_dirty(handle_t *handle,
5553 			 struct inode *inode, struct ext4_iloc *iloc)
5554 {
5555 	int err = 0;
5556 
5557 	if (test_opt(inode->i_sb, I_VERSION))
5558 		inode_inc_iversion(inode);
5559 
5560 	/* the do_update_inode consumes one bh->b_count */
5561 	get_bh(iloc->bh);
5562 
5563 	/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
5564 	err = ext4_do_update_inode(handle, inode, iloc);
5565 	put_bh(iloc->bh);
5566 	return err;
5567 }
5568 
5569 /*
5570  * On success, We end up with an outstanding reference count against
5571  * iloc->bh.  This _must_ be cleaned up later.
5572  */
5573 
5574 int
5575 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5576 			 struct ext4_iloc *iloc)
5577 {
5578 	int err;
5579 
5580 	err = ext4_get_inode_loc(inode, iloc);
5581 	if (!err) {
5582 		BUFFER_TRACE(iloc->bh, "get_write_access");
5583 		err = ext4_journal_get_write_access(handle, iloc->bh);
5584 		if (err) {
5585 			brelse(iloc->bh);
5586 			iloc->bh = NULL;
5587 		}
5588 	}
5589 	ext4_std_error(inode->i_sb, err);
5590 	return err;
5591 }
5592 
5593 /*
5594  * Expand an inode by new_extra_isize bytes.
5595  * Returns 0 on success or negative error number on failure.
5596  */
5597 static int ext4_expand_extra_isize(struct inode *inode,
5598 				   unsigned int new_extra_isize,
5599 				   struct ext4_iloc iloc,
5600 				   handle_t *handle)
5601 {
5602 	struct ext4_inode *raw_inode;
5603 	struct ext4_xattr_ibody_header *header;
5604 
5605 	if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
5606 		return 0;
5607 
5608 	raw_inode = ext4_raw_inode(&iloc);
5609 
5610 	header = IHDR(inode, raw_inode);
5611 
5612 	/* No extended attributes present */
5613 	if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5614 	    header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5615 		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
5616 			new_extra_isize);
5617 		EXT4_I(inode)->i_extra_isize = new_extra_isize;
5618 		return 0;
5619 	}
5620 
5621 	/* try to expand with EAs present */
5622 	return ext4_expand_extra_isize_ea(inode, new_extra_isize,
5623 					  raw_inode, handle);
5624 }
5625 
5626 /*
5627  * What we do here is to mark the in-core inode as clean with respect to inode
5628  * dirtiness (it may still be data-dirty).
5629  * This means that the in-core inode may be reaped by prune_icache
5630  * without having to perform any I/O.  This is a very good thing,
5631  * because *any* task may call prune_icache - even ones which
5632  * have a transaction open against a different journal.
5633  *
5634  * Is this cheating?  Not really.  Sure, we haven't written the
5635  * inode out, but prune_icache isn't a user-visible syncing function.
5636  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
5637  * we start and wait on commits.
5638  *
5639  * Is this efficient/effective?  Well, we're being nice to the system
5640  * by cleaning up our inodes proactively so they can be reaped
5641  * without I/O.  But we are potentially leaving up to five seconds'
5642  * worth of inodes floating about which prune_icache wants us to
5643  * write out.  One way to fix that would be to get prune_icache()
5644  * to do a write_super() to free up some memory.  It has the desired
5645  * effect.
5646  */
5647 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5648 {
5649 	struct ext4_iloc iloc;
5650 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5651 	static unsigned int mnt_count;
5652 	int err, ret;
5653 
5654 	might_sleep();
5655 	trace_ext4_mark_inode_dirty(inode, _RET_IP_);
5656 	err = ext4_reserve_inode_write(handle, inode, &iloc);
5657 	if (ext4_handle_valid(handle) &&
5658 	    EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
5659 	    !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5660 		/*
5661 		 * We need extra buffer credits since we may write into EA block
5662 		 * with this same handle. If journal_extend fails, then it will
5663 		 * only result in a minor loss of functionality for that inode.
5664 		 * If this is felt to be critical, then e2fsck should be run to
5665 		 * force a large enough s_min_extra_isize.
5666 		 */
5667 		if ((jbd2_journal_extend(handle,
5668 			     EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
5669 			ret = ext4_expand_extra_isize(inode,
5670 						      sbi->s_want_extra_isize,
5671 						      iloc, handle);
5672 			if (ret) {
5673 				ext4_set_inode_state(inode,
5674 						     EXT4_STATE_NO_EXPAND);
5675 				if (mnt_count !=
5676 					le16_to_cpu(sbi->s_es->s_mnt_count)) {
5677 					ext4_warning(inode->i_sb,
5678 					"Unable to expand inode %lu. Delete"
5679 					" some EAs or run e2fsck.",
5680 					inode->i_ino);
5681 					mnt_count =
5682 					  le16_to_cpu(sbi->s_es->s_mnt_count);
5683 				}
5684 			}
5685 		}
5686 	}
5687 	if (!err)
5688 		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
5689 	return err;
5690 }
5691 
5692 /*
5693  * ext4_dirty_inode() is called from __mark_inode_dirty()
5694  *
5695  * We're really interested in the case where a file is being extended.
5696  * i_size has been changed by generic_commit_write() and we thus need
5697  * to include the updated inode in the current transaction.
5698  *
5699  * Also, dquot_alloc_block() will always dirty the inode when blocks
5700  * are allocated to the file.
5701  *
5702  * If the inode is marked synchronous, we don't honour that here - doing
5703  * so would cause a commit on atime updates, which we don't bother doing.
5704  * We handle synchronous inodes at the highest possible level.
5705  */
5706 void ext4_dirty_inode(struct inode *inode)
5707 {
5708 	handle_t *handle;
5709 
5710 	handle = ext4_journal_start(inode, 2);
5711 	if (IS_ERR(handle))
5712 		goto out;
5713 
5714 	ext4_mark_inode_dirty(handle, inode);
5715 
5716 	ext4_journal_stop(handle);
5717 out:
5718 	return;
5719 }
5720 
5721 #if 0
5722 /*
5723  * Bind an inode's backing buffer_head into this transaction, to prevent
5724  * it from being flushed to disk early.  Unlike
5725  * ext4_reserve_inode_write, this leaves behind no bh reference and
5726  * returns no iloc structure, so the caller needs to repeat the iloc
5727  * lookup to mark the inode dirty later.
5728  */
5729 static int ext4_pin_inode(handle_t *handle, struct inode *inode)
5730 {
5731 	struct ext4_iloc iloc;
5732 
5733 	int err = 0;
5734 	if (handle) {
5735 		err = ext4_get_inode_loc(inode, &iloc);
5736 		if (!err) {
5737 			BUFFER_TRACE(iloc.bh, "get_write_access");
5738 			err = jbd2_journal_get_write_access(handle, iloc.bh);
5739 			if (!err)
5740 				err = ext4_handle_dirty_metadata(handle,
5741 								 NULL,
5742 								 iloc.bh);
5743 			brelse(iloc.bh);
5744 		}
5745 	}
5746 	ext4_std_error(inode->i_sb, err);
5747 	return err;
5748 }
5749 #endif
5750 
5751 int ext4_change_inode_journal_flag(struct inode *inode, int val)
5752 {
5753 	journal_t *journal;
5754 	handle_t *handle;
5755 	int err;
5756 
5757 	/*
5758 	 * We have to be very careful here: changing a data block's
5759 	 * journaling status dynamically is dangerous.  If we write a
5760 	 * data block to the journal, change the status and then delete
5761 	 * that block, we risk forgetting to revoke the old log record
5762 	 * from the journal and so a subsequent replay can corrupt data.
5763 	 * So, first we make sure that the journal is empty and that
5764 	 * nobody is changing anything.
5765 	 */
5766 
5767 	journal = EXT4_JOURNAL(inode);
5768 	if (!journal)
5769 		return 0;
5770 	if (is_journal_aborted(journal))
5771 		return -EROFS;
5772 
5773 	jbd2_journal_lock_updates(journal);
5774 	jbd2_journal_flush(journal);
5775 
5776 	/*
5777 	 * OK, there are no updates running now, and all cached data is
5778 	 * synced to disk.  We are now in a completely consistent state
5779 	 * which doesn't have anything in the journal, and we know that
5780 	 * no filesystem updates are running, so it is safe to modify
5781 	 * the inode's in-core data-journaling state flag now.
5782 	 */
5783 
5784 	if (val)
5785 		ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5786 	else
5787 		ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5788 	ext4_set_aops(inode);
5789 
5790 	jbd2_journal_unlock_updates(journal);
5791 
5792 	/* Finally we can mark the inode as dirty. */
5793 
5794 	handle = ext4_journal_start(inode, 1);
5795 	if (IS_ERR(handle))
5796 		return PTR_ERR(handle);
5797 
5798 	err = ext4_mark_inode_dirty(handle, inode);
5799 	ext4_handle_sync(handle);
5800 	ext4_journal_stop(handle);
5801 	ext4_std_error(inode->i_sb, err);
5802 
5803 	return err;
5804 }
5805 
5806 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
5807 {
5808 	return !buffer_mapped(bh);
5809 }
5810 
5811 int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5812 {
5813 	struct page *page = vmf->page;
5814 	loff_t size;
5815 	unsigned long len;
5816 	int ret = -EINVAL;
5817 	void *fsdata;
5818 	struct file *file = vma->vm_file;
5819 	struct inode *inode = file->f_path.dentry->d_inode;
5820 	struct address_space *mapping = inode->i_mapping;
5821 
5822 	/*
5823 	 * Get i_alloc_sem to stop truncates messing with the inode. We cannot
5824 	 * get i_mutex because we are already holding mmap_sem.
5825 	 */
5826 	down_read(&inode->i_alloc_sem);
5827 	size = i_size_read(inode);
5828 	if (page->mapping != mapping || size <= page_offset(page)
5829 	    || !PageUptodate(page)) {
5830 		/* page got truncated from under us? */
5831 		goto out_unlock;
5832 	}
5833 	ret = 0;
5834 	if (PageMappedToDisk(page))
5835 		goto out_unlock;
5836 
5837 	if (page->index == size >> PAGE_CACHE_SHIFT)
5838 		len = size & ~PAGE_CACHE_MASK;
5839 	else
5840 		len = PAGE_CACHE_SIZE;
5841 
5842 	lock_page(page);
5843 	/*
5844 	 * return if we have all the buffers mapped. This avoid
5845 	 * the need to call write_begin/write_end which does a
5846 	 * journal_start/journal_stop which can block and take
5847 	 * long time
5848 	 */
5849 	if (page_has_buffers(page)) {
5850 		if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
5851 					ext4_bh_unmapped)) {
5852 			unlock_page(page);
5853 			goto out_unlock;
5854 		}
5855 	}
5856 	unlock_page(page);
5857 	/*
5858 	 * OK, we need to fill the hole... Do write_begin write_end
5859 	 * to do block allocation/reservation.We are not holding
5860 	 * inode.i__mutex here. That allow * parallel write_begin,
5861 	 * write_end call. lock_page prevent this from happening
5862 	 * on the same page though
5863 	 */
5864 	ret = mapping->a_ops->write_begin(file, mapping, page_offset(page),
5865 			len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
5866 	if (ret < 0)
5867 		goto out_unlock;
5868 	ret = mapping->a_ops->write_end(file, mapping, page_offset(page),
5869 			len, len, page, fsdata);
5870 	if (ret < 0)
5871 		goto out_unlock;
5872 	ret = 0;
5873 out_unlock:
5874 	if (ret)
5875 		ret = VM_FAULT_SIGBUS;
5876 	up_read(&inode->i_alloc_sem);
5877 	return ret;
5878 }
5879