xref: /openbmc/linux/fs/ext4/inode.c (revision 1fa6ac37)
1 /*
2  *  linux/fs/ext4/inode.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/inode.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  Goal-directed block allocation by Stephen Tweedie
16  *	(sct@redhat.com), 1993, 1998
17  *  Big-endian to little-endian byte-swapping/bitmaps by
18  *        David S. Miller (davem@caip.rutgers.edu), 1995
19  *  64-bit file support on 64-bit platforms by Jakub Jelinek
20  *	(jj@sunsite.ms.mff.cuni.cz)
21  *
22  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
23  */
24 
25 #include <linux/module.h>
26 #include <linux/fs.h>
27 #include <linux/time.h>
28 #include <linux/jbd2.h>
29 #include <linux/highuid.h>
30 #include <linux/pagemap.h>
31 #include <linux/quotaops.h>
32 #include <linux/string.h>
33 #include <linux/buffer_head.h>
34 #include <linux/writeback.h>
35 #include <linux/pagevec.h>
36 #include <linux/mpage.h>
37 #include <linux/namei.h>
38 #include <linux/uio.h>
39 #include <linux/bio.h>
40 #include <linux/workqueue.h>
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 
44 #include "ext4_jbd2.h"
45 #include "xattr.h"
46 #include "acl.h"
47 #include "ext4_extents.h"
48 
49 #include <trace/events/ext4.h>
50 
51 #define MPAGE_DA_EXTENT_TAIL 0x01
52 
53 static inline int ext4_begin_ordered_truncate(struct inode *inode,
54 					      loff_t new_size)
55 {
56 	return jbd2_journal_begin_ordered_truncate(
57 					EXT4_SB(inode->i_sb)->s_journal,
58 					&EXT4_I(inode)->jinode,
59 					new_size);
60 }
61 
62 static void ext4_invalidatepage(struct page *page, unsigned long offset);
63 
64 /*
65  * Test whether an inode is a fast symlink.
66  */
67 static int ext4_inode_is_fast_symlink(struct inode *inode)
68 {
69 	int ea_blocks = EXT4_I(inode)->i_file_acl ?
70 		(inode->i_sb->s_blocksize >> 9) : 0;
71 
72 	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
73 }
74 
75 /*
76  * Work out how many blocks we need to proceed with the next chunk of a
77  * truncate transaction.
78  */
79 static unsigned long blocks_for_truncate(struct inode *inode)
80 {
81 	ext4_lblk_t needed;
82 
83 	needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
84 
85 	/* Give ourselves just enough room to cope with inodes in which
86 	 * i_blocks is corrupt: we've seen disk corruptions in the past
87 	 * which resulted in random data in an inode which looked enough
88 	 * like a regular file for ext4 to try to delete it.  Things
89 	 * will go a bit crazy if that happens, but at least we should
90 	 * try not to panic the whole kernel. */
91 	if (needed < 2)
92 		needed = 2;
93 
94 	/* But we need to bound the transaction so we don't overflow the
95 	 * journal. */
96 	if (needed > EXT4_MAX_TRANS_DATA)
97 		needed = EXT4_MAX_TRANS_DATA;
98 
99 	return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
100 }
101 
102 /*
103  * Truncate transactions can be complex and absolutely huge.  So we need to
104  * be able to restart the transaction at a conventient checkpoint to make
105  * sure we don't overflow the journal.
106  *
107  * start_transaction gets us a new handle for a truncate transaction,
108  * and extend_transaction tries to extend the existing one a bit.  If
109  * extend fails, we need to propagate the failure up and restart the
110  * transaction in the top-level truncate loop. --sct
111  */
112 static handle_t *start_transaction(struct inode *inode)
113 {
114 	handle_t *result;
115 
116 	result = ext4_journal_start(inode, blocks_for_truncate(inode));
117 	if (!IS_ERR(result))
118 		return result;
119 
120 	ext4_std_error(inode->i_sb, PTR_ERR(result));
121 	return result;
122 }
123 
124 /*
125  * Try to extend this transaction for the purposes of truncation.
126  *
127  * Returns 0 if we managed to create more room.  If we can't create more
128  * room, and the transaction must be restarted we return 1.
129  */
130 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
131 {
132 	if (!ext4_handle_valid(handle))
133 		return 0;
134 	if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
135 		return 0;
136 	if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
137 		return 0;
138 	return 1;
139 }
140 
141 /*
142  * Restart the transaction associated with *handle.  This does a commit,
143  * so before we call here everything must be consistently dirtied against
144  * this transaction.
145  */
146 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
147 				 int nblocks)
148 {
149 	int ret;
150 
151 	/*
152 	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
153 	 * moment, get_block can be called only for blocks inside i_size since
154 	 * page cache has been already dropped and writes are blocked by
155 	 * i_mutex. So we can safely drop the i_data_sem here.
156 	 */
157 	BUG_ON(EXT4_JOURNAL(inode) == NULL);
158 	jbd_debug(2, "restarting handle %p\n", handle);
159 	up_write(&EXT4_I(inode)->i_data_sem);
160 	ret = ext4_journal_restart(handle, blocks_for_truncate(inode));
161 	down_write(&EXT4_I(inode)->i_data_sem);
162 	ext4_discard_preallocations(inode);
163 
164 	return ret;
165 }
166 
167 /*
168  * Called at the last iput() if i_nlink is zero.
169  */
170 void ext4_delete_inode(struct inode *inode)
171 {
172 	handle_t *handle;
173 	int err;
174 
175 	if (!is_bad_inode(inode))
176 		dquot_initialize(inode);
177 
178 	if (ext4_should_order_data(inode))
179 		ext4_begin_ordered_truncate(inode, 0);
180 	truncate_inode_pages(&inode->i_data, 0);
181 
182 	if (is_bad_inode(inode))
183 		goto no_delete;
184 
185 	handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
186 	if (IS_ERR(handle)) {
187 		ext4_std_error(inode->i_sb, PTR_ERR(handle));
188 		/*
189 		 * If we're going to skip the normal cleanup, we still need to
190 		 * make sure that the in-core orphan linked list is properly
191 		 * cleaned up.
192 		 */
193 		ext4_orphan_del(NULL, inode);
194 		goto no_delete;
195 	}
196 
197 	if (IS_SYNC(inode))
198 		ext4_handle_sync(handle);
199 	inode->i_size = 0;
200 	err = ext4_mark_inode_dirty(handle, inode);
201 	if (err) {
202 		ext4_warning(inode->i_sb,
203 			     "couldn't mark inode dirty (err %d)", err);
204 		goto stop_handle;
205 	}
206 	if (inode->i_blocks)
207 		ext4_truncate(inode);
208 
209 	/*
210 	 * ext4_ext_truncate() doesn't reserve any slop when it
211 	 * restarts journal transactions; therefore there may not be
212 	 * enough credits left in the handle to remove the inode from
213 	 * the orphan list and set the dtime field.
214 	 */
215 	if (!ext4_handle_has_enough_credits(handle, 3)) {
216 		err = ext4_journal_extend(handle, 3);
217 		if (err > 0)
218 			err = ext4_journal_restart(handle, 3);
219 		if (err != 0) {
220 			ext4_warning(inode->i_sb,
221 				     "couldn't extend journal (err %d)", err);
222 		stop_handle:
223 			ext4_journal_stop(handle);
224 			goto no_delete;
225 		}
226 	}
227 
228 	/*
229 	 * Kill off the orphan record which ext4_truncate created.
230 	 * AKPM: I think this can be inside the above `if'.
231 	 * Note that ext4_orphan_del() has to be able to cope with the
232 	 * deletion of a non-existent orphan - this is because we don't
233 	 * know if ext4_truncate() actually created an orphan record.
234 	 * (Well, we could do this if we need to, but heck - it works)
235 	 */
236 	ext4_orphan_del(handle, inode);
237 	EXT4_I(inode)->i_dtime	= get_seconds();
238 
239 	/*
240 	 * One subtle ordering requirement: if anything has gone wrong
241 	 * (transaction abort, IO errors, whatever), then we can still
242 	 * do these next steps (the fs will already have been marked as
243 	 * having errors), but we can't free the inode if the mark_dirty
244 	 * fails.
245 	 */
246 	if (ext4_mark_inode_dirty(handle, inode))
247 		/* If that failed, just do the required in-core inode clear. */
248 		clear_inode(inode);
249 	else
250 		ext4_free_inode(handle, inode);
251 	ext4_journal_stop(handle);
252 	return;
253 no_delete:
254 	clear_inode(inode);	/* We must guarantee clearing of inode... */
255 }
256 
257 typedef struct {
258 	__le32	*p;
259 	__le32	key;
260 	struct buffer_head *bh;
261 } Indirect;
262 
263 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
264 {
265 	p->key = *(p->p = v);
266 	p->bh = bh;
267 }
268 
269 /**
270  *	ext4_block_to_path - parse the block number into array of offsets
271  *	@inode: inode in question (we are only interested in its superblock)
272  *	@i_block: block number to be parsed
273  *	@offsets: array to store the offsets in
274  *	@boundary: set this non-zero if the referred-to block is likely to be
275  *	       followed (on disk) by an indirect block.
276  *
277  *	To store the locations of file's data ext4 uses a data structure common
278  *	for UNIX filesystems - tree of pointers anchored in the inode, with
279  *	data blocks at leaves and indirect blocks in intermediate nodes.
280  *	This function translates the block number into path in that tree -
281  *	return value is the path length and @offsets[n] is the offset of
282  *	pointer to (n+1)th node in the nth one. If @block is out of range
283  *	(negative or too large) warning is printed and zero returned.
284  *
285  *	Note: function doesn't find node addresses, so no IO is needed. All
286  *	we need to know is the capacity of indirect blocks (taken from the
287  *	inode->i_sb).
288  */
289 
290 /*
291  * Portability note: the last comparison (check that we fit into triple
292  * indirect block) is spelled differently, because otherwise on an
293  * architecture with 32-bit longs and 8Kb pages we might get into trouble
294  * if our filesystem had 8Kb blocks. We might use long long, but that would
295  * kill us on x86. Oh, well, at least the sign propagation does not matter -
296  * i_block would have to be negative in the very beginning, so we would not
297  * get there at all.
298  */
299 
300 static int ext4_block_to_path(struct inode *inode,
301 			      ext4_lblk_t i_block,
302 			      ext4_lblk_t offsets[4], int *boundary)
303 {
304 	int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
305 	int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
306 	const long direct_blocks = EXT4_NDIR_BLOCKS,
307 		indirect_blocks = ptrs,
308 		double_blocks = (1 << (ptrs_bits * 2));
309 	int n = 0;
310 	int final = 0;
311 
312 	if (i_block < direct_blocks) {
313 		offsets[n++] = i_block;
314 		final = direct_blocks;
315 	} else if ((i_block -= direct_blocks) < indirect_blocks) {
316 		offsets[n++] = EXT4_IND_BLOCK;
317 		offsets[n++] = i_block;
318 		final = ptrs;
319 	} else if ((i_block -= indirect_blocks) < double_blocks) {
320 		offsets[n++] = EXT4_DIND_BLOCK;
321 		offsets[n++] = i_block >> ptrs_bits;
322 		offsets[n++] = i_block & (ptrs - 1);
323 		final = ptrs;
324 	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
325 		offsets[n++] = EXT4_TIND_BLOCK;
326 		offsets[n++] = i_block >> (ptrs_bits * 2);
327 		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
328 		offsets[n++] = i_block & (ptrs - 1);
329 		final = ptrs;
330 	} else {
331 		ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
332 			     i_block + direct_blocks +
333 			     indirect_blocks + double_blocks, inode->i_ino);
334 	}
335 	if (boundary)
336 		*boundary = final - 1 - (i_block & (ptrs - 1));
337 	return n;
338 }
339 
340 static int __ext4_check_blockref(const char *function, struct inode *inode,
341 				 __le32 *p, unsigned int max)
342 {
343 	__le32 *bref = p;
344 	unsigned int blk;
345 
346 	while (bref < p+max) {
347 		blk = le32_to_cpu(*bref++);
348 		if (blk &&
349 		    unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
350 						    blk, 1))) {
351 			ext4_error_inode(function, inode,
352 					 "invalid block reference %u", blk);
353 			return -EIO;
354 		}
355 	}
356 	return 0;
357 }
358 
359 
360 #define ext4_check_indirect_blockref(inode, bh)                         \
361 	__ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data,  \
362 			      EXT4_ADDR_PER_BLOCK((inode)->i_sb))
363 
364 #define ext4_check_inode_blockref(inode)                                \
365 	__ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data,   \
366 			      EXT4_NDIR_BLOCKS)
367 
368 /**
369  *	ext4_get_branch - read the chain of indirect blocks leading to data
370  *	@inode: inode in question
371  *	@depth: depth of the chain (1 - direct pointer, etc.)
372  *	@offsets: offsets of pointers in inode/indirect blocks
373  *	@chain: place to store the result
374  *	@err: here we store the error value
375  *
376  *	Function fills the array of triples <key, p, bh> and returns %NULL
377  *	if everything went OK or the pointer to the last filled triple
378  *	(incomplete one) otherwise. Upon the return chain[i].key contains
379  *	the number of (i+1)-th block in the chain (as it is stored in memory,
380  *	i.e. little-endian 32-bit), chain[i].p contains the address of that
381  *	number (it points into struct inode for i==0 and into the bh->b_data
382  *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
383  *	block for i>0 and NULL for i==0. In other words, it holds the block
384  *	numbers of the chain, addresses they were taken from (and where we can
385  *	verify that chain did not change) and buffer_heads hosting these
386  *	numbers.
387  *
388  *	Function stops when it stumbles upon zero pointer (absent block)
389  *		(pointer to last triple returned, *@err == 0)
390  *	or when it gets an IO error reading an indirect block
391  *		(ditto, *@err == -EIO)
392  *	or when it reads all @depth-1 indirect blocks successfully and finds
393  *	the whole chain, all way to the data (returns %NULL, *err == 0).
394  *
395  *      Need to be called with
396  *      down_read(&EXT4_I(inode)->i_data_sem)
397  */
398 static Indirect *ext4_get_branch(struct inode *inode, int depth,
399 				 ext4_lblk_t  *offsets,
400 				 Indirect chain[4], int *err)
401 {
402 	struct super_block *sb = inode->i_sb;
403 	Indirect *p = chain;
404 	struct buffer_head *bh;
405 
406 	*err = 0;
407 	/* i_data is not going away, no lock needed */
408 	add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
409 	if (!p->key)
410 		goto no_block;
411 	while (--depth) {
412 		bh = sb_getblk(sb, le32_to_cpu(p->key));
413 		if (unlikely(!bh))
414 			goto failure;
415 
416 		if (!bh_uptodate_or_lock(bh)) {
417 			if (bh_submit_read(bh) < 0) {
418 				put_bh(bh);
419 				goto failure;
420 			}
421 			/* validate block references */
422 			if (ext4_check_indirect_blockref(inode, bh)) {
423 				put_bh(bh);
424 				goto failure;
425 			}
426 		}
427 
428 		add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
429 		/* Reader: end */
430 		if (!p->key)
431 			goto no_block;
432 	}
433 	return NULL;
434 
435 failure:
436 	*err = -EIO;
437 no_block:
438 	return p;
439 }
440 
441 /**
442  *	ext4_find_near - find a place for allocation with sufficient locality
443  *	@inode: owner
444  *	@ind: descriptor of indirect block.
445  *
446  *	This function returns the preferred place for block allocation.
447  *	It is used when heuristic for sequential allocation fails.
448  *	Rules are:
449  *	  + if there is a block to the left of our position - allocate near it.
450  *	  + if pointer will live in indirect block - allocate near that block.
451  *	  + if pointer will live in inode - allocate in the same
452  *	    cylinder group.
453  *
454  * In the latter case we colour the starting block by the callers PID to
455  * prevent it from clashing with concurrent allocations for a different inode
456  * in the same block group.   The PID is used here so that functionally related
457  * files will be close-by on-disk.
458  *
459  *	Caller must make sure that @ind is valid and will stay that way.
460  */
461 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
462 {
463 	struct ext4_inode_info *ei = EXT4_I(inode);
464 	__le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
465 	__le32 *p;
466 	ext4_fsblk_t bg_start;
467 	ext4_fsblk_t last_block;
468 	ext4_grpblk_t colour;
469 	ext4_group_t block_group;
470 	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
471 
472 	/* Try to find previous block */
473 	for (p = ind->p - 1; p >= start; p--) {
474 		if (*p)
475 			return le32_to_cpu(*p);
476 	}
477 
478 	/* No such thing, so let's try location of indirect block */
479 	if (ind->bh)
480 		return ind->bh->b_blocknr;
481 
482 	/*
483 	 * It is going to be referred to from the inode itself? OK, just put it
484 	 * into the same cylinder group then.
485 	 */
486 	block_group = ei->i_block_group;
487 	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
488 		block_group &= ~(flex_size-1);
489 		if (S_ISREG(inode->i_mode))
490 			block_group++;
491 	}
492 	bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
493 	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
494 
495 	/*
496 	 * If we are doing delayed allocation, we don't need take
497 	 * colour into account.
498 	 */
499 	if (test_opt(inode->i_sb, DELALLOC))
500 		return bg_start;
501 
502 	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
503 		colour = (current->pid % 16) *
504 			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
505 	else
506 		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
507 	return bg_start + colour;
508 }
509 
510 /**
511  *	ext4_find_goal - find a preferred place for allocation.
512  *	@inode: owner
513  *	@block:  block we want
514  *	@partial: pointer to the last triple within a chain
515  *
516  *	Normally this function find the preferred place for block allocation,
517  *	returns it.
518  *	Because this is only used for non-extent files, we limit the block nr
519  *	to 32 bits.
520  */
521 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
522 				   Indirect *partial)
523 {
524 	ext4_fsblk_t goal;
525 
526 	/*
527 	 * XXX need to get goal block from mballoc's data structures
528 	 */
529 
530 	goal = ext4_find_near(inode, partial);
531 	goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
532 	return goal;
533 }
534 
535 /**
536  *	ext4_blks_to_allocate: Look up the block map and count the number
537  *	of direct blocks need to be allocated for the given branch.
538  *
539  *	@branch: chain of indirect blocks
540  *	@k: number of blocks need for indirect blocks
541  *	@blks: number of data blocks to be mapped.
542  *	@blocks_to_boundary:  the offset in the indirect block
543  *
544  *	return the total number of blocks to be allocate, including the
545  *	direct and indirect blocks.
546  */
547 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
548 				 int blocks_to_boundary)
549 {
550 	unsigned int count = 0;
551 
552 	/*
553 	 * Simple case, [t,d]Indirect block(s) has not allocated yet
554 	 * then it's clear blocks on that path have not allocated
555 	 */
556 	if (k > 0) {
557 		/* right now we don't handle cross boundary allocation */
558 		if (blks < blocks_to_boundary + 1)
559 			count += blks;
560 		else
561 			count += blocks_to_boundary + 1;
562 		return count;
563 	}
564 
565 	count++;
566 	while (count < blks && count <= blocks_to_boundary &&
567 		le32_to_cpu(*(branch[0].p + count)) == 0) {
568 		count++;
569 	}
570 	return count;
571 }
572 
573 /**
574  *	ext4_alloc_blocks: multiple allocate blocks needed for a branch
575  *	@indirect_blks: the number of blocks need to allocate for indirect
576  *			blocks
577  *
578  *	@new_blocks: on return it will store the new block numbers for
579  *	the indirect blocks(if needed) and the first direct block,
580  *	@blks:	on return it will store the total number of allocated
581  *		direct blocks
582  */
583 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
584 			     ext4_lblk_t iblock, ext4_fsblk_t goal,
585 			     int indirect_blks, int blks,
586 			     ext4_fsblk_t new_blocks[4], int *err)
587 {
588 	struct ext4_allocation_request ar;
589 	int target, i;
590 	unsigned long count = 0, blk_allocated = 0;
591 	int index = 0;
592 	ext4_fsblk_t current_block = 0;
593 	int ret = 0;
594 
595 	/*
596 	 * Here we try to allocate the requested multiple blocks at once,
597 	 * on a best-effort basis.
598 	 * To build a branch, we should allocate blocks for
599 	 * the indirect blocks(if not allocated yet), and at least
600 	 * the first direct block of this branch.  That's the
601 	 * minimum number of blocks need to allocate(required)
602 	 */
603 	/* first we try to allocate the indirect blocks */
604 	target = indirect_blks;
605 	while (target > 0) {
606 		count = target;
607 		/* allocating blocks for indirect blocks and direct blocks */
608 		current_block = ext4_new_meta_blocks(handle, inode,
609 							goal, &count, err);
610 		if (*err)
611 			goto failed_out;
612 
613 		if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) {
614 			EXT4_ERROR_INODE(inode,
615 					 "current_block %llu + count %lu > %d!",
616 					 current_block, count,
617 					 EXT4_MAX_BLOCK_FILE_PHYS);
618 			*err = -EIO;
619 			goto failed_out;
620 		}
621 
622 		target -= count;
623 		/* allocate blocks for indirect blocks */
624 		while (index < indirect_blks && count) {
625 			new_blocks[index++] = current_block++;
626 			count--;
627 		}
628 		if (count > 0) {
629 			/*
630 			 * save the new block number
631 			 * for the first direct block
632 			 */
633 			new_blocks[index] = current_block;
634 			printk(KERN_INFO "%s returned more blocks than "
635 						"requested\n", __func__);
636 			WARN_ON(1);
637 			break;
638 		}
639 	}
640 
641 	target = blks - count ;
642 	blk_allocated = count;
643 	if (!target)
644 		goto allocated;
645 	/* Now allocate data blocks */
646 	memset(&ar, 0, sizeof(ar));
647 	ar.inode = inode;
648 	ar.goal = goal;
649 	ar.len = target;
650 	ar.logical = iblock;
651 	if (S_ISREG(inode->i_mode))
652 		/* enable in-core preallocation only for regular files */
653 		ar.flags = EXT4_MB_HINT_DATA;
654 
655 	current_block = ext4_mb_new_blocks(handle, &ar, err);
656 	if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) {
657 		EXT4_ERROR_INODE(inode,
658 				 "current_block %llu + ar.len %d > %d!",
659 				 current_block, ar.len,
660 				 EXT4_MAX_BLOCK_FILE_PHYS);
661 		*err = -EIO;
662 		goto failed_out;
663 	}
664 
665 	if (*err && (target == blks)) {
666 		/*
667 		 * if the allocation failed and we didn't allocate
668 		 * any blocks before
669 		 */
670 		goto failed_out;
671 	}
672 	if (!*err) {
673 		if (target == blks) {
674 			/*
675 			 * save the new block number
676 			 * for the first direct block
677 			 */
678 			new_blocks[index] = current_block;
679 		}
680 		blk_allocated += ar.len;
681 	}
682 allocated:
683 	/* total number of blocks allocated for direct blocks */
684 	ret = blk_allocated;
685 	*err = 0;
686 	return ret;
687 failed_out:
688 	for (i = 0; i < index; i++)
689 		ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
690 	return ret;
691 }
692 
693 /**
694  *	ext4_alloc_branch - allocate and set up a chain of blocks.
695  *	@inode: owner
696  *	@indirect_blks: number of allocated indirect blocks
697  *	@blks: number of allocated direct blocks
698  *	@offsets: offsets (in the blocks) to store the pointers to next.
699  *	@branch: place to store the chain in.
700  *
701  *	This function allocates blocks, zeroes out all but the last one,
702  *	links them into chain and (if we are synchronous) writes them to disk.
703  *	In other words, it prepares a branch that can be spliced onto the
704  *	inode. It stores the information about that chain in the branch[], in
705  *	the same format as ext4_get_branch() would do. We are calling it after
706  *	we had read the existing part of chain and partial points to the last
707  *	triple of that (one with zero ->key). Upon the exit we have the same
708  *	picture as after the successful ext4_get_block(), except that in one
709  *	place chain is disconnected - *branch->p is still zero (we did not
710  *	set the last link), but branch->key contains the number that should
711  *	be placed into *branch->p to fill that gap.
712  *
713  *	If allocation fails we free all blocks we've allocated (and forget
714  *	their buffer_heads) and return the error value the from failed
715  *	ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
716  *	as described above and return 0.
717  */
718 static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
719 			     ext4_lblk_t iblock, int indirect_blks,
720 			     int *blks, ext4_fsblk_t goal,
721 			     ext4_lblk_t *offsets, Indirect *branch)
722 {
723 	int blocksize = inode->i_sb->s_blocksize;
724 	int i, n = 0;
725 	int err = 0;
726 	struct buffer_head *bh;
727 	int num;
728 	ext4_fsblk_t new_blocks[4];
729 	ext4_fsblk_t current_block;
730 
731 	num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
732 				*blks, new_blocks, &err);
733 	if (err)
734 		return err;
735 
736 	branch[0].key = cpu_to_le32(new_blocks[0]);
737 	/*
738 	 * metadata blocks and data blocks are allocated.
739 	 */
740 	for (n = 1; n <= indirect_blks;  n++) {
741 		/*
742 		 * Get buffer_head for parent block, zero it out
743 		 * and set the pointer to new one, then send
744 		 * parent to disk.
745 		 */
746 		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
747 		branch[n].bh = bh;
748 		lock_buffer(bh);
749 		BUFFER_TRACE(bh, "call get_create_access");
750 		err = ext4_journal_get_create_access(handle, bh);
751 		if (err) {
752 			/* Don't brelse(bh) here; it's done in
753 			 * ext4_journal_forget() below */
754 			unlock_buffer(bh);
755 			goto failed;
756 		}
757 
758 		memset(bh->b_data, 0, blocksize);
759 		branch[n].p = (__le32 *) bh->b_data + offsets[n];
760 		branch[n].key = cpu_to_le32(new_blocks[n]);
761 		*branch[n].p = branch[n].key;
762 		if (n == indirect_blks) {
763 			current_block = new_blocks[n];
764 			/*
765 			 * End of chain, update the last new metablock of
766 			 * the chain to point to the new allocated
767 			 * data blocks numbers
768 			 */
769 			for (i = 1; i < num; i++)
770 				*(branch[n].p + i) = cpu_to_le32(++current_block);
771 		}
772 		BUFFER_TRACE(bh, "marking uptodate");
773 		set_buffer_uptodate(bh);
774 		unlock_buffer(bh);
775 
776 		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
777 		err = ext4_handle_dirty_metadata(handle, inode, bh);
778 		if (err)
779 			goto failed;
780 	}
781 	*blks = num;
782 	return err;
783 failed:
784 	/* Allocation failed, free what we already allocated */
785 	ext4_free_blocks(handle, inode, 0, new_blocks[0], 1, 0);
786 	for (i = 1; i <= n ; i++) {
787 		/*
788 		 * branch[i].bh is newly allocated, so there is no
789 		 * need to revoke the block, which is why we don't
790 		 * need to set EXT4_FREE_BLOCKS_METADATA.
791 		 */
792 		ext4_free_blocks(handle, inode, 0, new_blocks[i], 1,
793 				 EXT4_FREE_BLOCKS_FORGET);
794 	}
795 	for (i = n+1; i < indirect_blks; i++)
796 		ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
797 
798 	ext4_free_blocks(handle, inode, 0, new_blocks[i], num, 0);
799 
800 	return err;
801 }
802 
803 /**
804  * ext4_splice_branch - splice the allocated branch onto inode.
805  * @inode: owner
806  * @block: (logical) number of block we are adding
807  * @chain: chain of indirect blocks (with a missing link - see
808  *	ext4_alloc_branch)
809  * @where: location of missing link
810  * @num:   number of indirect blocks we are adding
811  * @blks:  number of direct blocks we are adding
812  *
813  * This function fills the missing link and does all housekeeping needed in
814  * inode (->i_blocks, etc.). In case of success we end up with the full
815  * chain to new block and return 0.
816  */
817 static int ext4_splice_branch(handle_t *handle, struct inode *inode,
818 			      ext4_lblk_t block, Indirect *where, int num,
819 			      int blks)
820 {
821 	int i;
822 	int err = 0;
823 	ext4_fsblk_t current_block;
824 
825 	/*
826 	 * If we're splicing into a [td]indirect block (as opposed to the
827 	 * inode) then we need to get write access to the [td]indirect block
828 	 * before the splice.
829 	 */
830 	if (where->bh) {
831 		BUFFER_TRACE(where->bh, "get_write_access");
832 		err = ext4_journal_get_write_access(handle, where->bh);
833 		if (err)
834 			goto err_out;
835 	}
836 	/* That's it */
837 
838 	*where->p = where->key;
839 
840 	/*
841 	 * Update the host buffer_head or inode to point to more just allocated
842 	 * direct blocks blocks
843 	 */
844 	if (num == 0 && blks > 1) {
845 		current_block = le32_to_cpu(where->key) + 1;
846 		for (i = 1; i < blks; i++)
847 			*(where->p + i) = cpu_to_le32(current_block++);
848 	}
849 
850 	/* We are done with atomic stuff, now do the rest of housekeeping */
851 	/* had we spliced it onto indirect block? */
852 	if (where->bh) {
853 		/*
854 		 * If we spliced it onto an indirect block, we haven't
855 		 * altered the inode.  Note however that if it is being spliced
856 		 * onto an indirect block at the very end of the file (the
857 		 * file is growing) then we *will* alter the inode to reflect
858 		 * the new i_size.  But that is not done here - it is done in
859 		 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
860 		 */
861 		jbd_debug(5, "splicing indirect only\n");
862 		BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
863 		err = ext4_handle_dirty_metadata(handle, inode, where->bh);
864 		if (err)
865 			goto err_out;
866 	} else {
867 		/*
868 		 * OK, we spliced it into the inode itself on a direct block.
869 		 */
870 		ext4_mark_inode_dirty(handle, inode);
871 		jbd_debug(5, "splicing direct\n");
872 	}
873 	return err;
874 
875 err_out:
876 	for (i = 1; i <= num; i++) {
877 		/*
878 		 * branch[i].bh is newly allocated, so there is no
879 		 * need to revoke the block, which is why we don't
880 		 * need to set EXT4_FREE_BLOCKS_METADATA.
881 		 */
882 		ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
883 				 EXT4_FREE_BLOCKS_FORGET);
884 	}
885 	ext4_free_blocks(handle, inode, 0, le32_to_cpu(where[num].key),
886 			 blks, 0);
887 
888 	return err;
889 }
890 
891 /*
892  * The ext4_ind_map_blocks() function handles non-extents inodes
893  * (i.e., using the traditional indirect/double-indirect i_blocks
894  * scheme) for ext4_map_blocks().
895  *
896  * Allocation strategy is simple: if we have to allocate something, we will
897  * have to go the whole way to leaf. So let's do it before attaching anything
898  * to tree, set linkage between the newborn blocks, write them if sync is
899  * required, recheck the path, free and repeat if check fails, otherwise
900  * set the last missing link (that will protect us from any truncate-generated
901  * removals - all blocks on the path are immune now) and possibly force the
902  * write on the parent block.
903  * That has a nice additional property: no special recovery from the failed
904  * allocations is needed - we simply release blocks and do not touch anything
905  * reachable from inode.
906  *
907  * `handle' can be NULL if create == 0.
908  *
909  * return > 0, # of blocks mapped or allocated.
910  * return = 0, if plain lookup failed.
911  * return < 0, error case.
912  *
913  * The ext4_ind_get_blocks() function should be called with
914  * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
915  * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
916  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
917  * blocks.
918  */
919 static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
920 			       struct ext4_map_blocks *map,
921 			       int flags)
922 {
923 	int err = -EIO;
924 	ext4_lblk_t offsets[4];
925 	Indirect chain[4];
926 	Indirect *partial;
927 	ext4_fsblk_t goal;
928 	int indirect_blks;
929 	int blocks_to_boundary = 0;
930 	int depth;
931 	int count = 0;
932 	ext4_fsblk_t first_block = 0;
933 
934 	J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
935 	J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
936 	depth = ext4_block_to_path(inode, map->m_lblk, offsets,
937 				   &blocks_to_boundary);
938 
939 	if (depth == 0)
940 		goto out;
941 
942 	partial = ext4_get_branch(inode, depth, offsets, chain, &err);
943 
944 	/* Simplest case - block found, no allocation needed */
945 	if (!partial) {
946 		first_block = le32_to_cpu(chain[depth - 1].key);
947 		count++;
948 		/*map more blocks*/
949 		while (count < map->m_len && count <= blocks_to_boundary) {
950 			ext4_fsblk_t blk;
951 
952 			blk = le32_to_cpu(*(chain[depth-1].p + count));
953 
954 			if (blk == first_block + count)
955 				count++;
956 			else
957 				break;
958 		}
959 		goto got_it;
960 	}
961 
962 	/* Next simple case - plain lookup or failed read of indirect block */
963 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
964 		goto cleanup;
965 
966 	/*
967 	 * Okay, we need to do block allocation.
968 	*/
969 	goal = ext4_find_goal(inode, map->m_lblk, partial);
970 
971 	/* the number of blocks need to allocate for [d,t]indirect blocks */
972 	indirect_blks = (chain + depth) - partial - 1;
973 
974 	/*
975 	 * Next look up the indirect map to count the totoal number of
976 	 * direct blocks to allocate for this branch.
977 	 */
978 	count = ext4_blks_to_allocate(partial, indirect_blks,
979 				      map->m_len, blocks_to_boundary);
980 	/*
981 	 * Block out ext4_truncate while we alter the tree
982 	 */
983 	err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
984 				&count, goal,
985 				offsets + (partial - chain), partial);
986 
987 	/*
988 	 * The ext4_splice_branch call will free and forget any buffers
989 	 * on the new chain if there is a failure, but that risks using
990 	 * up transaction credits, especially for bitmaps where the
991 	 * credits cannot be returned.  Can we handle this somehow?  We
992 	 * may need to return -EAGAIN upwards in the worst case.  --sct
993 	 */
994 	if (!err)
995 		err = ext4_splice_branch(handle, inode, map->m_lblk,
996 					 partial, indirect_blks, count);
997 	if (err)
998 		goto cleanup;
999 
1000 	map->m_flags |= EXT4_MAP_NEW;
1001 
1002 	ext4_update_inode_fsync_trans(handle, inode, 1);
1003 got_it:
1004 	map->m_flags |= EXT4_MAP_MAPPED;
1005 	map->m_pblk = le32_to_cpu(chain[depth-1].key);
1006 	map->m_len = count;
1007 	if (count > blocks_to_boundary)
1008 		map->m_flags |= EXT4_MAP_BOUNDARY;
1009 	err = count;
1010 	/* Clean up and exit */
1011 	partial = chain + depth - 1;	/* the whole chain */
1012 cleanup:
1013 	while (partial > chain) {
1014 		BUFFER_TRACE(partial->bh, "call brelse");
1015 		brelse(partial->bh);
1016 		partial--;
1017 	}
1018 out:
1019 	return err;
1020 }
1021 
1022 #ifdef CONFIG_QUOTA
1023 qsize_t *ext4_get_reserved_space(struct inode *inode)
1024 {
1025 	return &EXT4_I(inode)->i_reserved_quota;
1026 }
1027 #endif
1028 
1029 /*
1030  * Calculate the number of metadata blocks need to reserve
1031  * to allocate a new block at @lblocks for non extent file based file
1032  */
1033 static int ext4_indirect_calc_metadata_amount(struct inode *inode,
1034 					      sector_t lblock)
1035 {
1036 	struct ext4_inode_info *ei = EXT4_I(inode);
1037 	sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
1038 	int blk_bits;
1039 
1040 	if (lblock < EXT4_NDIR_BLOCKS)
1041 		return 0;
1042 
1043 	lblock -= EXT4_NDIR_BLOCKS;
1044 
1045 	if (ei->i_da_metadata_calc_len &&
1046 	    (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
1047 		ei->i_da_metadata_calc_len++;
1048 		return 0;
1049 	}
1050 	ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
1051 	ei->i_da_metadata_calc_len = 1;
1052 	blk_bits = order_base_2(lblock);
1053 	return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
1054 }
1055 
1056 /*
1057  * Calculate the number of metadata blocks need to reserve
1058  * to allocate a block located at @lblock
1059  */
1060 static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock)
1061 {
1062 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1063 		return ext4_ext_calc_metadata_amount(inode, lblock);
1064 
1065 	return ext4_indirect_calc_metadata_amount(inode, lblock);
1066 }
1067 
1068 /*
1069  * Called with i_data_sem down, which is important since we can call
1070  * ext4_discard_preallocations() from here.
1071  */
1072 void ext4_da_update_reserve_space(struct inode *inode,
1073 					int used, int quota_claim)
1074 {
1075 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1076 	struct ext4_inode_info *ei = EXT4_I(inode);
1077 
1078 	spin_lock(&ei->i_block_reservation_lock);
1079 	trace_ext4_da_update_reserve_space(inode, used);
1080 	if (unlikely(used > ei->i_reserved_data_blocks)) {
1081 		ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
1082 			 "with only %d reserved data blocks\n",
1083 			 __func__, inode->i_ino, used,
1084 			 ei->i_reserved_data_blocks);
1085 		WARN_ON(1);
1086 		used = ei->i_reserved_data_blocks;
1087 	}
1088 
1089 	/* Update per-inode reservations */
1090 	ei->i_reserved_data_blocks -= used;
1091 	ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
1092 	percpu_counter_sub(&sbi->s_dirtyblocks_counter,
1093 			   used + ei->i_allocated_meta_blocks);
1094 	ei->i_allocated_meta_blocks = 0;
1095 
1096 	if (ei->i_reserved_data_blocks == 0) {
1097 		/*
1098 		 * We can release all of the reserved metadata blocks
1099 		 * only when we have written all of the delayed
1100 		 * allocation blocks.
1101 		 */
1102 		percpu_counter_sub(&sbi->s_dirtyblocks_counter,
1103 				   ei->i_reserved_meta_blocks);
1104 		ei->i_reserved_meta_blocks = 0;
1105 		ei->i_da_metadata_calc_len = 0;
1106 	}
1107 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1108 
1109 	/* Update quota subsystem for data blocks */
1110 	if (quota_claim)
1111 		dquot_claim_block(inode, used);
1112 	else {
1113 		/*
1114 		 * We did fallocate with an offset that is already delayed
1115 		 * allocated. So on delayed allocated writeback we should
1116 		 * not re-claim the quota for fallocated blocks.
1117 		 */
1118 		dquot_release_reservation_block(inode, used);
1119 	}
1120 
1121 	/*
1122 	 * If we have done all the pending block allocations and if
1123 	 * there aren't any writers on the inode, we can discard the
1124 	 * inode's preallocations.
1125 	 */
1126 	if ((ei->i_reserved_data_blocks == 0) &&
1127 	    (atomic_read(&inode->i_writecount) == 0))
1128 		ext4_discard_preallocations(inode);
1129 }
1130 
1131 static int check_block_validity(struct inode *inode, const char *func,
1132 				struct ext4_map_blocks *map)
1133 {
1134 	if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
1135 				   map->m_len)) {
1136 		ext4_error_inode(func, inode,
1137 			   "lblock %lu mapped to illegal pblock %llu "
1138 			   "(length %d)", (unsigned long) map->m_lblk,
1139 				 map->m_pblk, map->m_len);
1140 		return -EIO;
1141 	}
1142 	return 0;
1143 }
1144 
1145 /*
1146  * Return the number of contiguous dirty pages in a given inode
1147  * starting at page frame idx.
1148  */
1149 static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
1150 				    unsigned int max_pages)
1151 {
1152 	struct address_space *mapping = inode->i_mapping;
1153 	pgoff_t	index;
1154 	struct pagevec pvec;
1155 	pgoff_t num = 0;
1156 	int i, nr_pages, done = 0;
1157 
1158 	if (max_pages == 0)
1159 		return 0;
1160 	pagevec_init(&pvec, 0);
1161 	while (!done) {
1162 		index = idx;
1163 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1164 					      PAGECACHE_TAG_DIRTY,
1165 					      (pgoff_t)PAGEVEC_SIZE);
1166 		if (nr_pages == 0)
1167 			break;
1168 		for (i = 0; i < nr_pages; i++) {
1169 			struct page *page = pvec.pages[i];
1170 			struct buffer_head *bh, *head;
1171 
1172 			lock_page(page);
1173 			if (unlikely(page->mapping != mapping) ||
1174 			    !PageDirty(page) ||
1175 			    PageWriteback(page) ||
1176 			    page->index != idx) {
1177 				done = 1;
1178 				unlock_page(page);
1179 				break;
1180 			}
1181 			if (page_has_buffers(page)) {
1182 				bh = head = page_buffers(page);
1183 				do {
1184 					if (!buffer_delay(bh) &&
1185 					    !buffer_unwritten(bh))
1186 						done = 1;
1187 					bh = bh->b_this_page;
1188 				} while (!done && (bh != head));
1189 			}
1190 			unlock_page(page);
1191 			if (done)
1192 				break;
1193 			idx++;
1194 			num++;
1195 			if (num >= max_pages)
1196 				break;
1197 		}
1198 		pagevec_release(&pvec);
1199 	}
1200 	return num;
1201 }
1202 
1203 /*
1204  * The ext4_map_blocks() function tries to look up the requested blocks,
1205  * and returns if the blocks are already mapped.
1206  *
1207  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
1208  * and store the allocated blocks in the result buffer head and mark it
1209  * mapped.
1210  *
1211  * If file type is extents based, it will call ext4_ext_map_blocks(),
1212  * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
1213  * based files
1214  *
1215  * On success, it returns the number of blocks being mapped or allocate.
1216  * if create==0 and the blocks are pre-allocated and uninitialized block,
1217  * the result buffer head is unmapped. If the create ==1, it will make sure
1218  * the buffer head is mapped.
1219  *
1220  * It returns 0 if plain look up failed (blocks have not been allocated), in
1221  * that casem, buffer head is unmapped
1222  *
1223  * It returns the error in case of allocation failure.
1224  */
1225 int ext4_map_blocks(handle_t *handle, struct inode *inode,
1226 		    struct ext4_map_blocks *map, int flags)
1227 {
1228 	int retval;
1229 
1230 	map->m_flags = 0;
1231 	ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
1232 		  "logical block %lu\n", inode->i_ino, flags, map->m_len,
1233 		  (unsigned long) map->m_lblk);
1234 	/*
1235 	 * Try to see if we can get the block without requesting a new
1236 	 * file system block.
1237 	 */
1238 	down_read((&EXT4_I(inode)->i_data_sem));
1239 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
1240 		retval = ext4_ext_map_blocks(handle, inode, map, 0);
1241 	} else {
1242 		retval = ext4_ind_map_blocks(handle, inode, map, 0);
1243 	}
1244 	up_read((&EXT4_I(inode)->i_data_sem));
1245 
1246 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
1247 		int ret = check_block_validity(inode, __func__, map);
1248 		if (ret != 0)
1249 			return ret;
1250 	}
1251 
1252 	/* If it is only a block(s) look up */
1253 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
1254 		return retval;
1255 
1256 	/*
1257 	 * Returns if the blocks have already allocated
1258 	 *
1259 	 * Note that if blocks have been preallocated
1260 	 * ext4_ext_get_block() returns th create = 0
1261 	 * with buffer head unmapped.
1262 	 */
1263 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
1264 		return retval;
1265 
1266 	/*
1267 	 * When we call get_blocks without the create flag, the
1268 	 * BH_Unwritten flag could have gotten set if the blocks
1269 	 * requested were part of a uninitialized extent.  We need to
1270 	 * clear this flag now that we are committed to convert all or
1271 	 * part of the uninitialized extent to be an initialized
1272 	 * extent.  This is because we need to avoid the combination
1273 	 * of BH_Unwritten and BH_Mapped flags being simultaneously
1274 	 * set on the buffer_head.
1275 	 */
1276 	map->m_flags &= ~EXT4_MAP_UNWRITTEN;
1277 
1278 	/*
1279 	 * New blocks allocate and/or writing to uninitialized extent
1280 	 * will possibly result in updating i_data, so we take
1281 	 * the write lock of i_data_sem, and call get_blocks()
1282 	 * with create == 1 flag.
1283 	 */
1284 	down_write((&EXT4_I(inode)->i_data_sem));
1285 
1286 	/*
1287 	 * if the caller is from delayed allocation writeout path
1288 	 * we have already reserved fs blocks for allocation
1289 	 * let the underlying get_block() function know to
1290 	 * avoid double accounting
1291 	 */
1292 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1293 		EXT4_I(inode)->i_delalloc_reserved_flag = 1;
1294 	/*
1295 	 * We need to check for EXT4 here because migrate
1296 	 * could have changed the inode type in between
1297 	 */
1298 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
1299 		retval = ext4_ext_map_blocks(handle, inode, map, flags);
1300 	} else {
1301 		retval = ext4_ind_map_blocks(handle, inode, map, flags);
1302 
1303 		if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
1304 			/*
1305 			 * We allocated new blocks which will result in
1306 			 * i_data's format changing.  Force the migrate
1307 			 * to fail by clearing migrate flags
1308 			 */
1309 			ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
1310 		}
1311 
1312 		/*
1313 		 * Update reserved blocks/metadata blocks after successful
1314 		 * block allocation which had been deferred till now. We don't
1315 		 * support fallocate for non extent files. So we can update
1316 		 * reserve space here.
1317 		 */
1318 		if ((retval > 0) &&
1319 			(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
1320 			ext4_da_update_reserve_space(inode, retval, 1);
1321 	}
1322 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1323 		EXT4_I(inode)->i_delalloc_reserved_flag = 0;
1324 
1325 	up_write((&EXT4_I(inode)->i_data_sem));
1326 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
1327 		int ret = check_block_validity(inode,
1328 					       "ext4_map_blocks_after_alloc",
1329 					       map);
1330 		if (ret != 0)
1331 			return ret;
1332 	}
1333 	return retval;
1334 }
1335 
1336 /* Maximum number of blocks we map for direct IO at once. */
1337 #define DIO_MAX_BLOCKS 4096
1338 
1339 static int _ext4_get_block(struct inode *inode, sector_t iblock,
1340 			   struct buffer_head *bh, int flags)
1341 {
1342 	handle_t *handle = ext4_journal_current_handle();
1343 	struct ext4_map_blocks map;
1344 	int ret = 0, started = 0;
1345 	int dio_credits;
1346 
1347 	map.m_lblk = iblock;
1348 	map.m_len = bh->b_size >> inode->i_blkbits;
1349 
1350 	if (flags && !handle) {
1351 		/* Direct IO write... */
1352 		if (map.m_len > DIO_MAX_BLOCKS)
1353 			map.m_len = DIO_MAX_BLOCKS;
1354 		dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
1355 		handle = ext4_journal_start(inode, dio_credits);
1356 		if (IS_ERR(handle)) {
1357 			ret = PTR_ERR(handle);
1358 			return ret;
1359 		}
1360 		started = 1;
1361 	}
1362 
1363 	ret = ext4_map_blocks(handle, inode, &map, flags);
1364 	if (ret > 0) {
1365 		map_bh(bh, inode->i_sb, map.m_pblk);
1366 		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
1367 		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
1368 		ret = 0;
1369 	}
1370 	if (started)
1371 		ext4_journal_stop(handle);
1372 	return ret;
1373 }
1374 
1375 int ext4_get_block(struct inode *inode, sector_t iblock,
1376 		   struct buffer_head *bh, int create)
1377 {
1378 	return _ext4_get_block(inode, iblock, bh,
1379 			       create ? EXT4_GET_BLOCKS_CREATE : 0);
1380 }
1381 
1382 /*
1383  * `handle' can be NULL if create is zero
1384  */
1385 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1386 				ext4_lblk_t block, int create, int *errp)
1387 {
1388 	struct ext4_map_blocks map;
1389 	struct buffer_head *bh;
1390 	int fatal = 0, err;
1391 
1392 	J_ASSERT(handle != NULL || create == 0);
1393 
1394 	map.m_lblk = block;
1395 	map.m_len = 1;
1396 	err = ext4_map_blocks(handle, inode, &map,
1397 			      create ? EXT4_GET_BLOCKS_CREATE : 0);
1398 
1399 	if (err < 0)
1400 		*errp = err;
1401 	if (err <= 0)
1402 		return NULL;
1403 	*errp = 0;
1404 
1405 	bh = sb_getblk(inode->i_sb, map.m_pblk);
1406 	if (!bh) {
1407 		*errp = -EIO;
1408 		return NULL;
1409 	}
1410 	if (map.m_flags & EXT4_MAP_NEW) {
1411 		J_ASSERT(create != 0);
1412 		J_ASSERT(handle != NULL);
1413 
1414 		/*
1415 		 * Now that we do not always journal data, we should
1416 		 * keep in mind whether this should always journal the
1417 		 * new buffer as metadata.  For now, regular file
1418 		 * writes use ext4_get_block instead, so it's not a
1419 		 * problem.
1420 		 */
1421 		lock_buffer(bh);
1422 		BUFFER_TRACE(bh, "call get_create_access");
1423 		fatal = ext4_journal_get_create_access(handle, bh);
1424 		if (!fatal && !buffer_uptodate(bh)) {
1425 			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1426 			set_buffer_uptodate(bh);
1427 		}
1428 		unlock_buffer(bh);
1429 		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1430 		err = ext4_handle_dirty_metadata(handle, inode, bh);
1431 		if (!fatal)
1432 			fatal = err;
1433 	} else {
1434 		BUFFER_TRACE(bh, "not a new buffer");
1435 	}
1436 	if (fatal) {
1437 		*errp = fatal;
1438 		brelse(bh);
1439 		bh = NULL;
1440 	}
1441 	return bh;
1442 }
1443 
1444 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1445 			       ext4_lblk_t block, int create, int *err)
1446 {
1447 	struct buffer_head *bh;
1448 
1449 	bh = ext4_getblk(handle, inode, block, create, err);
1450 	if (!bh)
1451 		return bh;
1452 	if (buffer_uptodate(bh))
1453 		return bh;
1454 	ll_rw_block(READ_META, 1, &bh);
1455 	wait_on_buffer(bh);
1456 	if (buffer_uptodate(bh))
1457 		return bh;
1458 	put_bh(bh);
1459 	*err = -EIO;
1460 	return NULL;
1461 }
1462 
1463 static int walk_page_buffers(handle_t *handle,
1464 			     struct buffer_head *head,
1465 			     unsigned from,
1466 			     unsigned to,
1467 			     int *partial,
1468 			     int (*fn)(handle_t *handle,
1469 				       struct buffer_head *bh))
1470 {
1471 	struct buffer_head *bh;
1472 	unsigned block_start, block_end;
1473 	unsigned blocksize = head->b_size;
1474 	int err, ret = 0;
1475 	struct buffer_head *next;
1476 
1477 	for (bh = head, block_start = 0;
1478 	     ret == 0 && (bh != head || !block_start);
1479 	     block_start = block_end, bh = next) {
1480 		next = bh->b_this_page;
1481 		block_end = block_start + blocksize;
1482 		if (block_end <= from || block_start >= to) {
1483 			if (partial && !buffer_uptodate(bh))
1484 				*partial = 1;
1485 			continue;
1486 		}
1487 		err = (*fn)(handle, bh);
1488 		if (!ret)
1489 			ret = err;
1490 	}
1491 	return ret;
1492 }
1493 
1494 /*
1495  * To preserve ordering, it is essential that the hole instantiation and
1496  * the data write be encapsulated in a single transaction.  We cannot
1497  * close off a transaction and start a new one between the ext4_get_block()
1498  * and the commit_write().  So doing the jbd2_journal_start at the start of
1499  * prepare_write() is the right place.
1500  *
1501  * Also, this function can nest inside ext4_writepage() ->
1502  * block_write_full_page(). In that case, we *know* that ext4_writepage()
1503  * has generated enough buffer credits to do the whole page.  So we won't
1504  * block on the journal in that case, which is good, because the caller may
1505  * be PF_MEMALLOC.
1506  *
1507  * By accident, ext4 can be reentered when a transaction is open via
1508  * quota file writes.  If we were to commit the transaction while thus
1509  * reentered, there can be a deadlock - we would be holding a quota
1510  * lock, and the commit would never complete if another thread had a
1511  * transaction open and was blocking on the quota lock - a ranking
1512  * violation.
1513  *
1514  * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1515  * will _not_ run commit under these circumstances because handle->h_ref
1516  * is elevated.  We'll still have enough credits for the tiny quotafile
1517  * write.
1518  */
1519 static int do_journal_get_write_access(handle_t *handle,
1520 				       struct buffer_head *bh)
1521 {
1522 	if (!buffer_mapped(bh) || buffer_freed(bh))
1523 		return 0;
1524 	return ext4_journal_get_write_access(handle, bh);
1525 }
1526 
1527 /*
1528  * Truncate blocks that were not used by write. We have to truncate the
1529  * pagecache as well so that corresponding buffers get properly unmapped.
1530  */
1531 static void ext4_truncate_failed_write(struct inode *inode)
1532 {
1533 	truncate_inode_pages(inode->i_mapping, inode->i_size);
1534 	ext4_truncate(inode);
1535 }
1536 
1537 static int ext4_get_block_write(struct inode *inode, sector_t iblock,
1538 		   struct buffer_head *bh_result, int create);
1539 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1540 			    loff_t pos, unsigned len, unsigned flags,
1541 			    struct page **pagep, void **fsdata)
1542 {
1543 	struct inode *inode = mapping->host;
1544 	int ret, needed_blocks;
1545 	handle_t *handle;
1546 	int retries = 0;
1547 	struct page *page;
1548 	pgoff_t index;
1549 	unsigned from, to;
1550 
1551 	trace_ext4_write_begin(inode, pos, len, flags);
1552 	/*
1553 	 * Reserve one block more for addition to orphan list in case
1554 	 * we allocate blocks but write fails for some reason
1555 	 */
1556 	needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1557 	index = pos >> PAGE_CACHE_SHIFT;
1558 	from = pos & (PAGE_CACHE_SIZE - 1);
1559 	to = from + len;
1560 
1561 retry:
1562 	handle = ext4_journal_start(inode, needed_blocks);
1563 	if (IS_ERR(handle)) {
1564 		ret = PTR_ERR(handle);
1565 		goto out;
1566 	}
1567 
1568 	/* We cannot recurse into the filesystem as the transaction is already
1569 	 * started */
1570 	flags |= AOP_FLAG_NOFS;
1571 
1572 	page = grab_cache_page_write_begin(mapping, index, flags);
1573 	if (!page) {
1574 		ext4_journal_stop(handle);
1575 		ret = -ENOMEM;
1576 		goto out;
1577 	}
1578 	*pagep = page;
1579 
1580 	if (ext4_should_dioread_nolock(inode))
1581 		ret = block_write_begin(file, mapping, pos, len, flags, pagep,
1582 				fsdata, ext4_get_block_write);
1583 	else
1584 		ret = block_write_begin(file, mapping, pos, len, flags, pagep,
1585 				fsdata, ext4_get_block);
1586 
1587 	if (!ret && ext4_should_journal_data(inode)) {
1588 		ret = walk_page_buffers(handle, page_buffers(page),
1589 				from, to, NULL, do_journal_get_write_access);
1590 	}
1591 
1592 	if (ret) {
1593 		unlock_page(page);
1594 		page_cache_release(page);
1595 		/*
1596 		 * block_write_begin may have instantiated a few blocks
1597 		 * outside i_size.  Trim these off again. Don't need
1598 		 * i_size_read because we hold i_mutex.
1599 		 *
1600 		 * Add inode to orphan list in case we crash before
1601 		 * truncate finishes
1602 		 */
1603 		if (pos + len > inode->i_size && ext4_can_truncate(inode))
1604 			ext4_orphan_add(handle, inode);
1605 
1606 		ext4_journal_stop(handle);
1607 		if (pos + len > inode->i_size) {
1608 			ext4_truncate_failed_write(inode);
1609 			/*
1610 			 * If truncate failed early the inode might
1611 			 * still be on the orphan list; we need to
1612 			 * make sure the inode is removed from the
1613 			 * orphan list in that case.
1614 			 */
1615 			if (inode->i_nlink)
1616 				ext4_orphan_del(NULL, inode);
1617 		}
1618 	}
1619 
1620 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1621 		goto retry;
1622 out:
1623 	return ret;
1624 }
1625 
1626 /* For write_end() in data=journal mode */
1627 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1628 {
1629 	if (!buffer_mapped(bh) || buffer_freed(bh))
1630 		return 0;
1631 	set_buffer_uptodate(bh);
1632 	return ext4_handle_dirty_metadata(handle, NULL, bh);
1633 }
1634 
1635 static int ext4_generic_write_end(struct file *file,
1636 				  struct address_space *mapping,
1637 				  loff_t pos, unsigned len, unsigned copied,
1638 				  struct page *page, void *fsdata)
1639 {
1640 	int i_size_changed = 0;
1641 	struct inode *inode = mapping->host;
1642 	handle_t *handle = ext4_journal_current_handle();
1643 
1644 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1645 
1646 	/*
1647 	 * No need to use i_size_read() here, the i_size
1648 	 * cannot change under us because we hold i_mutex.
1649 	 *
1650 	 * But it's important to update i_size while still holding page lock:
1651 	 * page writeout could otherwise come in and zero beyond i_size.
1652 	 */
1653 	if (pos + copied > inode->i_size) {
1654 		i_size_write(inode, pos + copied);
1655 		i_size_changed = 1;
1656 	}
1657 
1658 	if (pos + copied >  EXT4_I(inode)->i_disksize) {
1659 		/* We need to mark inode dirty even if
1660 		 * new_i_size is less that inode->i_size
1661 		 * bu greater than i_disksize.(hint delalloc)
1662 		 */
1663 		ext4_update_i_disksize(inode, (pos + copied));
1664 		i_size_changed = 1;
1665 	}
1666 	unlock_page(page);
1667 	page_cache_release(page);
1668 
1669 	/*
1670 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
1671 	 * makes the holding time of page lock longer. Second, it forces lock
1672 	 * ordering of page lock and transaction start for journaling
1673 	 * filesystems.
1674 	 */
1675 	if (i_size_changed)
1676 		ext4_mark_inode_dirty(handle, inode);
1677 
1678 	return copied;
1679 }
1680 
1681 /*
1682  * We need to pick up the new inode size which generic_commit_write gave us
1683  * `file' can be NULL - eg, when called from page_symlink().
1684  *
1685  * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1686  * buffers are managed internally.
1687  */
1688 static int ext4_ordered_write_end(struct file *file,
1689 				  struct address_space *mapping,
1690 				  loff_t pos, unsigned len, unsigned copied,
1691 				  struct page *page, void *fsdata)
1692 {
1693 	handle_t *handle = ext4_journal_current_handle();
1694 	struct inode *inode = mapping->host;
1695 	int ret = 0, ret2;
1696 
1697 	trace_ext4_ordered_write_end(inode, pos, len, copied);
1698 	ret = ext4_jbd2_file_inode(handle, inode);
1699 
1700 	if (ret == 0) {
1701 		ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1702 							page, fsdata);
1703 		copied = ret2;
1704 		if (pos + len > inode->i_size && ext4_can_truncate(inode))
1705 			/* if we have allocated more blocks and copied
1706 			 * less. We will have blocks allocated outside
1707 			 * inode->i_size. So truncate them
1708 			 */
1709 			ext4_orphan_add(handle, inode);
1710 		if (ret2 < 0)
1711 			ret = ret2;
1712 	}
1713 	ret2 = ext4_journal_stop(handle);
1714 	if (!ret)
1715 		ret = ret2;
1716 
1717 	if (pos + len > inode->i_size) {
1718 		ext4_truncate_failed_write(inode);
1719 		/*
1720 		 * If truncate failed early the inode might still be
1721 		 * on the orphan list; we need to make sure the inode
1722 		 * is removed from the orphan list in that case.
1723 		 */
1724 		if (inode->i_nlink)
1725 			ext4_orphan_del(NULL, inode);
1726 	}
1727 
1728 
1729 	return ret ? ret : copied;
1730 }
1731 
1732 static int ext4_writeback_write_end(struct file *file,
1733 				    struct address_space *mapping,
1734 				    loff_t pos, unsigned len, unsigned copied,
1735 				    struct page *page, void *fsdata)
1736 {
1737 	handle_t *handle = ext4_journal_current_handle();
1738 	struct inode *inode = mapping->host;
1739 	int ret = 0, ret2;
1740 
1741 	trace_ext4_writeback_write_end(inode, pos, len, copied);
1742 	ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1743 							page, fsdata);
1744 	copied = ret2;
1745 	if (pos + len > inode->i_size && ext4_can_truncate(inode))
1746 		/* if we have allocated more blocks and copied
1747 		 * less. We will have blocks allocated outside
1748 		 * inode->i_size. So truncate them
1749 		 */
1750 		ext4_orphan_add(handle, inode);
1751 
1752 	if (ret2 < 0)
1753 		ret = ret2;
1754 
1755 	ret2 = ext4_journal_stop(handle);
1756 	if (!ret)
1757 		ret = ret2;
1758 
1759 	if (pos + len > inode->i_size) {
1760 		ext4_truncate_failed_write(inode);
1761 		/*
1762 		 * If truncate failed early the inode might still be
1763 		 * on the orphan list; we need to make sure the inode
1764 		 * is removed from the orphan list in that case.
1765 		 */
1766 		if (inode->i_nlink)
1767 			ext4_orphan_del(NULL, inode);
1768 	}
1769 
1770 	return ret ? ret : copied;
1771 }
1772 
1773 static int ext4_journalled_write_end(struct file *file,
1774 				     struct address_space *mapping,
1775 				     loff_t pos, unsigned len, unsigned copied,
1776 				     struct page *page, void *fsdata)
1777 {
1778 	handle_t *handle = ext4_journal_current_handle();
1779 	struct inode *inode = mapping->host;
1780 	int ret = 0, ret2;
1781 	int partial = 0;
1782 	unsigned from, to;
1783 	loff_t new_i_size;
1784 
1785 	trace_ext4_journalled_write_end(inode, pos, len, copied);
1786 	from = pos & (PAGE_CACHE_SIZE - 1);
1787 	to = from + len;
1788 
1789 	if (copied < len) {
1790 		if (!PageUptodate(page))
1791 			copied = 0;
1792 		page_zero_new_buffers(page, from+copied, to);
1793 	}
1794 
1795 	ret = walk_page_buffers(handle, page_buffers(page), from,
1796 				to, &partial, write_end_fn);
1797 	if (!partial)
1798 		SetPageUptodate(page);
1799 	new_i_size = pos + copied;
1800 	if (new_i_size > inode->i_size)
1801 		i_size_write(inode, pos+copied);
1802 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1803 	if (new_i_size > EXT4_I(inode)->i_disksize) {
1804 		ext4_update_i_disksize(inode, new_i_size);
1805 		ret2 = ext4_mark_inode_dirty(handle, inode);
1806 		if (!ret)
1807 			ret = ret2;
1808 	}
1809 
1810 	unlock_page(page);
1811 	page_cache_release(page);
1812 	if (pos + len > inode->i_size && ext4_can_truncate(inode))
1813 		/* if we have allocated more blocks and copied
1814 		 * less. We will have blocks allocated outside
1815 		 * inode->i_size. So truncate them
1816 		 */
1817 		ext4_orphan_add(handle, inode);
1818 
1819 	ret2 = ext4_journal_stop(handle);
1820 	if (!ret)
1821 		ret = ret2;
1822 	if (pos + len > inode->i_size) {
1823 		ext4_truncate_failed_write(inode);
1824 		/*
1825 		 * If truncate failed early the inode might still be
1826 		 * on the orphan list; we need to make sure the inode
1827 		 * is removed from the orphan list in that case.
1828 		 */
1829 		if (inode->i_nlink)
1830 			ext4_orphan_del(NULL, inode);
1831 	}
1832 
1833 	return ret ? ret : copied;
1834 }
1835 
1836 /*
1837  * Reserve a single block located at lblock
1838  */
1839 static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
1840 {
1841 	int retries = 0;
1842 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1843 	struct ext4_inode_info *ei = EXT4_I(inode);
1844 	unsigned long md_needed;
1845 	int ret;
1846 
1847 	/*
1848 	 * recalculate the amount of metadata blocks to reserve
1849 	 * in order to allocate nrblocks
1850 	 * worse case is one extent per block
1851 	 */
1852 repeat:
1853 	spin_lock(&ei->i_block_reservation_lock);
1854 	md_needed = ext4_calc_metadata_amount(inode, lblock);
1855 	trace_ext4_da_reserve_space(inode, md_needed);
1856 	spin_unlock(&ei->i_block_reservation_lock);
1857 
1858 	/*
1859 	 * We will charge metadata quota at writeout time; this saves
1860 	 * us from metadata over-estimation, though we may go over by
1861 	 * a small amount in the end.  Here we just reserve for data.
1862 	 */
1863 	ret = dquot_reserve_block(inode, 1);
1864 	if (ret)
1865 		return ret;
1866 	/*
1867 	 * We do still charge estimated metadata to the sb though;
1868 	 * we cannot afford to run out of free blocks.
1869 	 */
1870 	if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
1871 		dquot_release_reservation_block(inode, 1);
1872 		if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1873 			yield();
1874 			goto repeat;
1875 		}
1876 		return -ENOSPC;
1877 	}
1878 	spin_lock(&ei->i_block_reservation_lock);
1879 	ei->i_reserved_data_blocks++;
1880 	ei->i_reserved_meta_blocks += md_needed;
1881 	spin_unlock(&ei->i_block_reservation_lock);
1882 
1883 	return 0;       /* success */
1884 }
1885 
1886 static void ext4_da_release_space(struct inode *inode, int to_free)
1887 {
1888 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1889 	struct ext4_inode_info *ei = EXT4_I(inode);
1890 
1891 	if (!to_free)
1892 		return;		/* Nothing to release, exit */
1893 
1894 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1895 
1896 	trace_ext4_da_release_space(inode, to_free);
1897 	if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1898 		/*
1899 		 * if there aren't enough reserved blocks, then the
1900 		 * counter is messed up somewhere.  Since this
1901 		 * function is called from invalidate page, it's
1902 		 * harmless to return without any action.
1903 		 */
1904 		ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
1905 			 "ino %lu, to_free %d with only %d reserved "
1906 			 "data blocks\n", inode->i_ino, to_free,
1907 			 ei->i_reserved_data_blocks);
1908 		WARN_ON(1);
1909 		to_free = ei->i_reserved_data_blocks;
1910 	}
1911 	ei->i_reserved_data_blocks -= to_free;
1912 
1913 	if (ei->i_reserved_data_blocks == 0) {
1914 		/*
1915 		 * We can release all of the reserved metadata blocks
1916 		 * only when we have written all of the delayed
1917 		 * allocation blocks.
1918 		 */
1919 		percpu_counter_sub(&sbi->s_dirtyblocks_counter,
1920 				   ei->i_reserved_meta_blocks);
1921 		ei->i_reserved_meta_blocks = 0;
1922 		ei->i_da_metadata_calc_len = 0;
1923 	}
1924 
1925 	/* update fs dirty data blocks counter */
1926 	percpu_counter_sub(&sbi->s_dirtyblocks_counter, to_free);
1927 
1928 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1929 
1930 	dquot_release_reservation_block(inode, to_free);
1931 }
1932 
1933 static void ext4_da_page_release_reservation(struct page *page,
1934 					     unsigned long offset)
1935 {
1936 	int to_release = 0;
1937 	struct buffer_head *head, *bh;
1938 	unsigned int curr_off = 0;
1939 
1940 	head = page_buffers(page);
1941 	bh = head;
1942 	do {
1943 		unsigned int next_off = curr_off + bh->b_size;
1944 
1945 		if ((offset <= curr_off) && (buffer_delay(bh))) {
1946 			to_release++;
1947 			clear_buffer_delay(bh);
1948 		}
1949 		curr_off = next_off;
1950 	} while ((bh = bh->b_this_page) != head);
1951 	ext4_da_release_space(page->mapping->host, to_release);
1952 }
1953 
1954 /*
1955  * Delayed allocation stuff
1956  */
1957 
1958 /*
1959  * mpage_da_submit_io - walks through extent of pages and try to write
1960  * them with writepage() call back
1961  *
1962  * @mpd->inode: inode
1963  * @mpd->first_page: first page of the extent
1964  * @mpd->next_page: page after the last page of the extent
1965  *
1966  * By the time mpage_da_submit_io() is called we expect all blocks
1967  * to be allocated. this may be wrong if allocation failed.
1968  *
1969  * As pages are already locked by write_cache_pages(), we can't use it
1970  */
1971 static int mpage_da_submit_io(struct mpage_da_data *mpd)
1972 {
1973 	long pages_skipped;
1974 	struct pagevec pvec;
1975 	unsigned long index, end;
1976 	int ret = 0, err, nr_pages, i;
1977 	struct inode *inode = mpd->inode;
1978 	struct address_space *mapping = inode->i_mapping;
1979 
1980 	BUG_ON(mpd->next_page <= mpd->first_page);
1981 	/*
1982 	 * We need to start from the first_page to the next_page - 1
1983 	 * to make sure we also write the mapped dirty buffer_heads.
1984 	 * If we look at mpd->b_blocknr we would only be looking
1985 	 * at the currently mapped buffer_heads.
1986 	 */
1987 	index = mpd->first_page;
1988 	end = mpd->next_page - 1;
1989 
1990 	pagevec_init(&pvec, 0);
1991 	while (index <= end) {
1992 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1993 		if (nr_pages == 0)
1994 			break;
1995 		for (i = 0; i < nr_pages; i++) {
1996 			struct page *page = pvec.pages[i];
1997 
1998 			index = page->index;
1999 			if (index > end)
2000 				break;
2001 			index++;
2002 
2003 			BUG_ON(!PageLocked(page));
2004 			BUG_ON(PageWriteback(page));
2005 
2006 			pages_skipped = mpd->wbc->pages_skipped;
2007 			err = mapping->a_ops->writepage(page, mpd->wbc);
2008 			if (!err && (pages_skipped == mpd->wbc->pages_skipped))
2009 				/*
2010 				 * have successfully written the page
2011 				 * without skipping the same
2012 				 */
2013 				mpd->pages_written++;
2014 			/*
2015 			 * In error case, we have to continue because
2016 			 * remaining pages are still locked
2017 			 * XXX: unlock and re-dirty them?
2018 			 */
2019 			if (ret == 0)
2020 				ret = err;
2021 		}
2022 		pagevec_release(&pvec);
2023 	}
2024 	return ret;
2025 }
2026 
2027 /*
2028  * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers
2029  *
2030  * the function goes through all passed space and put actual disk
2031  * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten
2032  */
2033 static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd,
2034 				 struct ext4_map_blocks *map)
2035 {
2036 	struct inode *inode = mpd->inode;
2037 	struct address_space *mapping = inode->i_mapping;
2038 	int blocks = map->m_len;
2039 	sector_t pblock = map->m_pblk, cur_logical;
2040 	struct buffer_head *head, *bh;
2041 	pgoff_t index, end;
2042 	struct pagevec pvec;
2043 	int nr_pages, i;
2044 
2045 	index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
2046 	end = (map->m_lblk + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
2047 	cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2048 
2049 	pagevec_init(&pvec, 0);
2050 
2051 	while (index <= end) {
2052 		/* XXX: optimize tail */
2053 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
2054 		if (nr_pages == 0)
2055 			break;
2056 		for (i = 0; i < nr_pages; i++) {
2057 			struct page *page = pvec.pages[i];
2058 
2059 			index = page->index;
2060 			if (index > end)
2061 				break;
2062 			index++;
2063 
2064 			BUG_ON(!PageLocked(page));
2065 			BUG_ON(PageWriteback(page));
2066 			BUG_ON(!page_has_buffers(page));
2067 
2068 			bh = page_buffers(page);
2069 			head = bh;
2070 
2071 			/* skip blocks out of the range */
2072 			do {
2073 				if (cur_logical >= map->m_lblk)
2074 					break;
2075 				cur_logical++;
2076 			} while ((bh = bh->b_this_page) != head);
2077 
2078 			do {
2079 				if (cur_logical >= map->m_lblk + blocks)
2080 					break;
2081 
2082 				if (buffer_delay(bh) || buffer_unwritten(bh)) {
2083 
2084 					BUG_ON(bh->b_bdev != inode->i_sb->s_bdev);
2085 
2086 					if (buffer_delay(bh)) {
2087 						clear_buffer_delay(bh);
2088 						bh->b_blocknr = pblock;
2089 					} else {
2090 						/*
2091 						 * unwritten already should have
2092 						 * blocknr assigned. Verify that
2093 						 */
2094 						clear_buffer_unwritten(bh);
2095 						BUG_ON(bh->b_blocknr != pblock);
2096 					}
2097 
2098 				} else if (buffer_mapped(bh))
2099 					BUG_ON(bh->b_blocknr != pblock);
2100 
2101 				if (map->m_flags & EXT4_MAP_UNINIT)
2102 					set_buffer_uninit(bh);
2103 				cur_logical++;
2104 				pblock++;
2105 			} while ((bh = bh->b_this_page) != head);
2106 		}
2107 		pagevec_release(&pvec);
2108 	}
2109 }
2110 
2111 
2112 static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
2113 					sector_t logical, long blk_cnt)
2114 {
2115 	int nr_pages, i;
2116 	pgoff_t index, end;
2117 	struct pagevec pvec;
2118 	struct inode *inode = mpd->inode;
2119 	struct address_space *mapping = inode->i_mapping;
2120 
2121 	index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
2122 	end   = (logical + blk_cnt - 1) >>
2123 				(PAGE_CACHE_SHIFT - inode->i_blkbits);
2124 	while (index <= end) {
2125 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
2126 		if (nr_pages == 0)
2127 			break;
2128 		for (i = 0; i < nr_pages; i++) {
2129 			struct page *page = pvec.pages[i];
2130 			if (page->index > end)
2131 				break;
2132 			BUG_ON(!PageLocked(page));
2133 			BUG_ON(PageWriteback(page));
2134 			block_invalidatepage(page, 0);
2135 			ClearPageUptodate(page);
2136 			unlock_page(page);
2137 		}
2138 		index = pvec.pages[nr_pages - 1]->index + 1;
2139 		pagevec_release(&pvec);
2140 	}
2141 	return;
2142 }
2143 
2144 static void ext4_print_free_blocks(struct inode *inode)
2145 {
2146 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2147 	printk(KERN_CRIT "Total free blocks count %lld\n",
2148 	       ext4_count_free_blocks(inode->i_sb));
2149 	printk(KERN_CRIT "Free/Dirty block details\n");
2150 	printk(KERN_CRIT "free_blocks=%lld\n",
2151 	       (long long) percpu_counter_sum(&sbi->s_freeblocks_counter));
2152 	printk(KERN_CRIT "dirty_blocks=%lld\n",
2153 	       (long long) percpu_counter_sum(&sbi->s_dirtyblocks_counter));
2154 	printk(KERN_CRIT "Block reservation details\n");
2155 	printk(KERN_CRIT "i_reserved_data_blocks=%u\n",
2156 	       EXT4_I(inode)->i_reserved_data_blocks);
2157 	printk(KERN_CRIT "i_reserved_meta_blocks=%u\n",
2158 	       EXT4_I(inode)->i_reserved_meta_blocks);
2159 	return;
2160 }
2161 
2162 /*
2163  * mpage_da_map_blocks - go through given space
2164  *
2165  * @mpd - bh describing space
2166  *
2167  * The function skips space we know is already mapped to disk blocks.
2168  *
2169  */
2170 static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2171 {
2172 	int err, blks, get_blocks_flags;
2173 	struct ext4_map_blocks map;
2174 	sector_t next = mpd->b_blocknr;
2175 	unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
2176 	loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
2177 	handle_t *handle = NULL;
2178 
2179 	/*
2180 	 * We consider only non-mapped and non-allocated blocks
2181 	 */
2182 	if ((mpd->b_state  & (1 << BH_Mapped)) &&
2183 		!(mpd->b_state & (1 << BH_Delay)) &&
2184 		!(mpd->b_state & (1 << BH_Unwritten)))
2185 		return 0;
2186 
2187 	/*
2188 	 * If we didn't accumulate anything to write simply return
2189 	 */
2190 	if (!mpd->b_size)
2191 		return 0;
2192 
2193 	handle = ext4_journal_current_handle();
2194 	BUG_ON(!handle);
2195 
2196 	/*
2197 	 * Call ext4_get_blocks() to allocate any delayed allocation
2198 	 * blocks, or to convert an uninitialized extent to be
2199 	 * initialized (in the case where we have written into
2200 	 * one or more preallocated blocks).
2201 	 *
2202 	 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
2203 	 * indicate that we are on the delayed allocation path.  This
2204 	 * affects functions in many different parts of the allocation
2205 	 * call path.  This flag exists primarily because we don't
2206 	 * want to change *many* call functions, so ext4_get_blocks()
2207 	 * will set the magic i_delalloc_reserved_flag once the
2208 	 * inode's allocation semaphore is taken.
2209 	 *
2210 	 * If the blocks in questions were delalloc blocks, set
2211 	 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
2212 	 * variables are updated after the blocks have been allocated.
2213 	 */
2214 	map.m_lblk = next;
2215 	map.m_len = max_blocks;
2216 	get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
2217 	if (ext4_should_dioread_nolock(mpd->inode))
2218 		get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2219 	if (mpd->b_state & (1 << BH_Delay))
2220 		get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2221 
2222 	blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
2223 	if (blks < 0) {
2224 		err = blks;
2225 		/*
2226 		 * If get block returns with error we simply
2227 		 * return. Later writepage will redirty the page and
2228 		 * writepages will find the dirty page again
2229 		 */
2230 		if (err == -EAGAIN)
2231 			return 0;
2232 
2233 		if (err == -ENOSPC &&
2234 		    ext4_count_free_blocks(mpd->inode->i_sb)) {
2235 			mpd->retval = err;
2236 			return 0;
2237 		}
2238 
2239 		/*
2240 		 * get block failure will cause us to loop in
2241 		 * writepages, because a_ops->writepage won't be able
2242 		 * to make progress. The page will be redirtied by
2243 		 * writepage and writepages will again try to write
2244 		 * the same.
2245 		 */
2246 		ext4_msg(mpd->inode->i_sb, KERN_CRIT,
2247 			 "delayed block allocation failed for inode %lu at "
2248 			 "logical offset %llu with max blocks %zd with "
2249 			 "error %d", mpd->inode->i_ino,
2250 			 (unsigned long long) next,
2251 			 mpd->b_size >> mpd->inode->i_blkbits, err);
2252 		printk(KERN_CRIT "This should not happen!!  "
2253 		       "Data will be lost\n");
2254 		if (err == -ENOSPC) {
2255 			ext4_print_free_blocks(mpd->inode);
2256 		}
2257 		/* invalidate all the pages */
2258 		ext4_da_block_invalidatepages(mpd, next,
2259 				mpd->b_size >> mpd->inode->i_blkbits);
2260 		return err;
2261 	}
2262 	BUG_ON(blks == 0);
2263 
2264 	if (map.m_flags & EXT4_MAP_NEW) {
2265 		struct block_device *bdev = mpd->inode->i_sb->s_bdev;
2266 		int i;
2267 
2268 		for (i = 0; i < map.m_len; i++)
2269 			unmap_underlying_metadata(bdev, map.m_pblk + i);
2270 	}
2271 
2272 	/*
2273 	 * If blocks are delayed marked, we need to
2274 	 * put actual blocknr and drop delayed bit
2275 	 */
2276 	if ((mpd->b_state & (1 << BH_Delay)) ||
2277 	    (mpd->b_state & (1 << BH_Unwritten)))
2278 		mpage_put_bnr_to_bhs(mpd, &map);
2279 
2280 	if (ext4_should_order_data(mpd->inode)) {
2281 		err = ext4_jbd2_file_inode(handle, mpd->inode);
2282 		if (err)
2283 			return err;
2284 	}
2285 
2286 	/*
2287 	 * Update on-disk size along with block allocation.
2288 	 */
2289 	disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
2290 	if (disksize > i_size_read(mpd->inode))
2291 		disksize = i_size_read(mpd->inode);
2292 	if (disksize > EXT4_I(mpd->inode)->i_disksize) {
2293 		ext4_update_i_disksize(mpd->inode, disksize);
2294 		return ext4_mark_inode_dirty(handle, mpd->inode);
2295 	}
2296 
2297 	return 0;
2298 }
2299 
2300 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
2301 		(1 << BH_Delay) | (1 << BH_Unwritten))
2302 
2303 /*
2304  * mpage_add_bh_to_extent - try to add one more block to extent of blocks
2305  *
2306  * @mpd->lbh - extent of blocks
2307  * @logical - logical number of the block in the file
2308  * @bh - bh of the block (used to access block's state)
2309  *
2310  * the function is used to collect contig. blocks in same state
2311  */
2312 static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
2313 				   sector_t logical, size_t b_size,
2314 				   unsigned long b_state)
2315 {
2316 	sector_t next;
2317 	int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
2318 
2319 	/*
2320 	 * XXX Don't go larger than mballoc is willing to allocate
2321 	 * This is a stopgap solution.  We eventually need to fold
2322 	 * mpage_da_submit_io() into this function and then call
2323 	 * ext4_get_blocks() multiple times in a loop
2324 	 */
2325 	if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
2326 		goto flush_it;
2327 
2328 	/* check if thereserved journal credits might overflow */
2329 	if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) {
2330 		if (nrblocks >= EXT4_MAX_TRANS_DATA) {
2331 			/*
2332 			 * With non-extent format we are limited by the journal
2333 			 * credit available.  Total credit needed to insert
2334 			 * nrblocks contiguous blocks is dependent on the
2335 			 * nrblocks.  So limit nrblocks.
2336 			 */
2337 			goto flush_it;
2338 		} else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
2339 				EXT4_MAX_TRANS_DATA) {
2340 			/*
2341 			 * Adding the new buffer_head would make it cross the
2342 			 * allowed limit for which we have journal credit
2343 			 * reserved. So limit the new bh->b_size
2344 			 */
2345 			b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
2346 						mpd->inode->i_blkbits;
2347 			/* we will do mpage_da_submit_io in the next loop */
2348 		}
2349 	}
2350 	/*
2351 	 * First block in the extent
2352 	 */
2353 	if (mpd->b_size == 0) {
2354 		mpd->b_blocknr = logical;
2355 		mpd->b_size = b_size;
2356 		mpd->b_state = b_state & BH_FLAGS;
2357 		return;
2358 	}
2359 
2360 	next = mpd->b_blocknr + nrblocks;
2361 	/*
2362 	 * Can we merge the block to our big extent?
2363 	 */
2364 	if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
2365 		mpd->b_size += b_size;
2366 		return;
2367 	}
2368 
2369 flush_it:
2370 	/*
2371 	 * We couldn't merge the block to our extent, so we
2372 	 * need to flush current  extent and start new one
2373 	 */
2374 	if (mpage_da_map_blocks(mpd) == 0)
2375 		mpage_da_submit_io(mpd);
2376 	mpd->io_done = 1;
2377 	return;
2378 }
2379 
2380 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
2381 {
2382 	return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
2383 }
2384 
2385 /*
2386  * __mpage_da_writepage - finds extent of pages and blocks
2387  *
2388  * @page: page to consider
2389  * @wbc: not used, we just follow rules
2390  * @data: context
2391  *
2392  * The function finds extents of pages and scan them for all blocks.
2393  */
2394 static int __mpage_da_writepage(struct page *page,
2395 				struct writeback_control *wbc, void *data)
2396 {
2397 	struct mpage_da_data *mpd = data;
2398 	struct inode *inode = mpd->inode;
2399 	struct buffer_head *bh, *head;
2400 	sector_t logical;
2401 
2402 	/*
2403 	 * Can we merge this page to current extent?
2404 	 */
2405 	if (mpd->next_page != page->index) {
2406 		/*
2407 		 * Nope, we can't. So, we map non-allocated blocks
2408 		 * and start IO on them using writepage()
2409 		 */
2410 		if (mpd->next_page != mpd->first_page) {
2411 			if (mpage_da_map_blocks(mpd) == 0)
2412 				mpage_da_submit_io(mpd);
2413 			/*
2414 			 * skip rest of the page in the page_vec
2415 			 */
2416 			mpd->io_done = 1;
2417 			redirty_page_for_writepage(wbc, page);
2418 			unlock_page(page);
2419 			return MPAGE_DA_EXTENT_TAIL;
2420 		}
2421 
2422 		/*
2423 		 * Start next extent of pages ...
2424 		 */
2425 		mpd->first_page = page->index;
2426 
2427 		/*
2428 		 * ... and blocks
2429 		 */
2430 		mpd->b_size = 0;
2431 		mpd->b_state = 0;
2432 		mpd->b_blocknr = 0;
2433 	}
2434 
2435 	mpd->next_page = page->index + 1;
2436 	logical = (sector_t) page->index <<
2437 		  (PAGE_CACHE_SHIFT - inode->i_blkbits);
2438 
2439 	if (!page_has_buffers(page)) {
2440 		mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE,
2441 				       (1 << BH_Dirty) | (1 << BH_Uptodate));
2442 		if (mpd->io_done)
2443 			return MPAGE_DA_EXTENT_TAIL;
2444 	} else {
2445 		/*
2446 		 * Page with regular buffer heads, just add all dirty ones
2447 		 */
2448 		head = page_buffers(page);
2449 		bh = head;
2450 		do {
2451 			BUG_ON(buffer_locked(bh));
2452 			/*
2453 			 * We need to try to allocate
2454 			 * unmapped blocks in the same page.
2455 			 * Otherwise we won't make progress
2456 			 * with the page in ext4_writepage
2457 			 */
2458 			if (ext4_bh_delay_or_unwritten(NULL, bh)) {
2459 				mpage_add_bh_to_extent(mpd, logical,
2460 						       bh->b_size,
2461 						       bh->b_state);
2462 				if (mpd->io_done)
2463 					return MPAGE_DA_EXTENT_TAIL;
2464 			} else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2465 				/*
2466 				 * mapped dirty buffer. We need to update
2467 				 * the b_state because we look at
2468 				 * b_state in mpage_da_map_blocks. We don't
2469 				 * update b_size because if we find an
2470 				 * unmapped buffer_head later we need to
2471 				 * use the b_state flag of that buffer_head.
2472 				 */
2473 				if (mpd->b_size == 0)
2474 					mpd->b_state = bh->b_state & BH_FLAGS;
2475 			}
2476 			logical++;
2477 		} while ((bh = bh->b_this_page) != head);
2478 	}
2479 
2480 	return 0;
2481 }
2482 
2483 /*
2484  * This is a special get_blocks_t callback which is used by
2485  * ext4_da_write_begin().  It will either return mapped block or
2486  * reserve space for a single block.
2487  *
2488  * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
2489  * We also have b_blocknr = -1 and b_bdev initialized properly
2490  *
2491  * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
2492  * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
2493  * initialized properly.
2494  */
2495 static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2496 				  struct buffer_head *bh, int create)
2497 {
2498 	struct ext4_map_blocks map;
2499 	int ret = 0;
2500 	sector_t invalid_block = ~((sector_t) 0xffff);
2501 
2502 	if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
2503 		invalid_block = ~0;
2504 
2505 	BUG_ON(create == 0);
2506 	BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
2507 
2508 	map.m_lblk = iblock;
2509 	map.m_len = 1;
2510 
2511 	/*
2512 	 * first, we need to know whether the block is allocated already
2513 	 * preallocated blocks are unmapped but should treated
2514 	 * the same as allocated blocks.
2515 	 */
2516 	ret = ext4_map_blocks(NULL, inode, &map, 0);
2517 	if (ret < 0)
2518 		return ret;
2519 	if (ret == 0) {
2520 		if (buffer_delay(bh))
2521 			return 0; /* Not sure this could or should happen */
2522 		/*
2523 		 * XXX: __block_prepare_write() unmaps passed block,
2524 		 * is it OK?
2525 		 */
2526 		ret = ext4_da_reserve_space(inode, iblock);
2527 		if (ret)
2528 			/* not enough space to reserve */
2529 			return ret;
2530 
2531 		map_bh(bh, inode->i_sb, invalid_block);
2532 		set_buffer_new(bh);
2533 		set_buffer_delay(bh);
2534 		return 0;
2535 	}
2536 
2537 	map_bh(bh, inode->i_sb, map.m_pblk);
2538 	bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
2539 
2540 	if (buffer_unwritten(bh)) {
2541 		/* A delayed write to unwritten bh should be marked
2542 		 * new and mapped.  Mapped ensures that we don't do
2543 		 * get_block multiple times when we write to the same
2544 		 * offset and new ensures that we do proper zero out
2545 		 * for partial write.
2546 		 */
2547 		set_buffer_new(bh);
2548 		set_buffer_mapped(bh);
2549 	}
2550 	return 0;
2551 }
2552 
2553 /*
2554  * This function is used as a standard get_block_t calback function
2555  * when there is no desire to allocate any blocks.  It is used as a
2556  * callback function for block_prepare_write(), nobh_writepage(), and
2557  * block_write_full_page().  These functions should only try to map a
2558  * single block at a time.
2559  *
2560  * Since this function doesn't do block allocations even if the caller
2561  * requests it by passing in create=1, it is critically important that
2562  * any caller checks to make sure that any buffer heads are returned
2563  * by this function are either all already mapped or marked for
2564  * delayed allocation before calling nobh_writepage() or
2565  * block_write_full_page().  Otherwise, b_blocknr could be left
2566  * unitialized, and the page write functions will be taken by
2567  * surprise.
2568  */
2569 static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
2570 				   struct buffer_head *bh_result, int create)
2571 {
2572 	BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
2573 	return _ext4_get_block(inode, iblock, bh_result, 0);
2574 }
2575 
2576 static int bget_one(handle_t *handle, struct buffer_head *bh)
2577 {
2578 	get_bh(bh);
2579 	return 0;
2580 }
2581 
2582 static int bput_one(handle_t *handle, struct buffer_head *bh)
2583 {
2584 	put_bh(bh);
2585 	return 0;
2586 }
2587 
2588 static int __ext4_journalled_writepage(struct page *page,
2589 				       unsigned int len)
2590 {
2591 	struct address_space *mapping = page->mapping;
2592 	struct inode *inode = mapping->host;
2593 	struct buffer_head *page_bufs;
2594 	handle_t *handle = NULL;
2595 	int ret = 0;
2596 	int err;
2597 
2598 	page_bufs = page_buffers(page);
2599 	BUG_ON(!page_bufs);
2600 	walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
2601 	/* As soon as we unlock the page, it can go away, but we have
2602 	 * references to buffers so we are safe */
2603 	unlock_page(page);
2604 
2605 	handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
2606 	if (IS_ERR(handle)) {
2607 		ret = PTR_ERR(handle);
2608 		goto out;
2609 	}
2610 
2611 	ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
2612 				do_journal_get_write_access);
2613 
2614 	err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
2615 				write_end_fn);
2616 	if (ret == 0)
2617 		ret = err;
2618 	err = ext4_journal_stop(handle);
2619 	if (!ret)
2620 		ret = err;
2621 
2622 	walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
2623 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
2624 out:
2625 	return ret;
2626 }
2627 
2628 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
2629 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
2630 
2631 /*
2632  * Note that we don't need to start a transaction unless we're journaling data
2633  * because we should have holes filled from ext4_page_mkwrite(). We even don't
2634  * need to file the inode to the transaction's list in ordered mode because if
2635  * we are writing back data added by write(), the inode is already there and if
2636  * we are writing back data modified via mmap(), noone guarantees in which
2637  * transaction the data will hit the disk. In case we are journaling data, we
2638  * cannot start transaction directly because transaction start ranks above page
2639  * lock so we have to do some magic.
2640  *
2641  * This function can get called via...
2642  *   - ext4_da_writepages after taking page lock (have journal handle)
2643  *   - journal_submit_inode_data_buffers (no journal handle)
2644  *   - shrink_page_list via pdflush (no journal handle)
2645  *   - grab_page_cache when doing write_begin (have journal handle)
2646  *
2647  * We don't do any block allocation in this function. If we have page with
2648  * multiple blocks we need to write those buffer_heads that are mapped. This
2649  * is important for mmaped based write. So if we do with blocksize 1K
2650  * truncate(f, 1024);
2651  * a = mmap(f, 0, 4096);
2652  * a[0] = 'a';
2653  * truncate(f, 4096);
2654  * we have in the page first buffer_head mapped via page_mkwrite call back
2655  * but other bufer_heads would be unmapped but dirty(dirty done via the
2656  * do_wp_page). So writepage should write the first block. If we modify
2657  * the mmap area beyond 1024 we will again get a page_fault and the
2658  * page_mkwrite callback will do the block allocation and mark the
2659  * buffer_heads mapped.
2660  *
2661  * We redirty the page if we have any buffer_heads that is either delay or
2662  * unwritten in the page.
2663  *
2664  * We can get recursively called as show below.
2665  *
2666  *	ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
2667  *		ext4_writepage()
2668  *
2669  * But since we don't do any block allocation we should not deadlock.
2670  * Page also have the dirty flag cleared so we don't get recurive page_lock.
2671  */
2672 static int ext4_writepage(struct page *page,
2673 			  struct writeback_control *wbc)
2674 {
2675 	int ret = 0;
2676 	loff_t size;
2677 	unsigned int len;
2678 	struct buffer_head *page_bufs = NULL;
2679 	struct inode *inode = page->mapping->host;
2680 
2681 	trace_ext4_writepage(inode, page);
2682 	size = i_size_read(inode);
2683 	if (page->index == size >> PAGE_CACHE_SHIFT)
2684 		len = size & ~PAGE_CACHE_MASK;
2685 	else
2686 		len = PAGE_CACHE_SIZE;
2687 
2688 	if (page_has_buffers(page)) {
2689 		page_bufs = page_buffers(page);
2690 		if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2691 					ext4_bh_delay_or_unwritten)) {
2692 			/*
2693 			 * We don't want to do  block allocation
2694 			 * So redirty the page and return
2695 			 * We may reach here when we do a journal commit
2696 			 * via journal_submit_inode_data_buffers.
2697 			 * If we don't have mapping block we just ignore
2698 			 * them. We can also reach here via shrink_page_list
2699 			 */
2700 			redirty_page_for_writepage(wbc, page);
2701 			unlock_page(page);
2702 			return 0;
2703 		}
2704 	} else {
2705 		/*
2706 		 * The test for page_has_buffers() is subtle:
2707 		 * We know the page is dirty but it lost buffers. That means
2708 		 * that at some moment in time after write_begin()/write_end()
2709 		 * has been called all buffers have been clean and thus they
2710 		 * must have been written at least once. So they are all
2711 		 * mapped and we can happily proceed with mapping them
2712 		 * and writing the page.
2713 		 *
2714 		 * Try to initialize the buffer_heads and check whether
2715 		 * all are mapped and non delay. We don't want to
2716 		 * do block allocation here.
2717 		 */
2718 		ret = block_prepare_write(page, 0, len,
2719 					  noalloc_get_block_write);
2720 		if (!ret) {
2721 			page_bufs = page_buffers(page);
2722 			/* check whether all are mapped and non delay */
2723 			if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2724 						ext4_bh_delay_or_unwritten)) {
2725 				redirty_page_for_writepage(wbc, page);
2726 				unlock_page(page);
2727 				return 0;
2728 			}
2729 		} else {
2730 			/*
2731 			 * We can't do block allocation here
2732 			 * so just redity the page and unlock
2733 			 * and return
2734 			 */
2735 			redirty_page_for_writepage(wbc, page);
2736 			unlock_page(page);
2737 			return 0;
2738 		}
2739 		/* now mark the buffer_heads as dirty and uptodate */
2740 		block_commit_write(page, 0, len);
2741 	}
2742 
2743 	if (PageChecked(page) && ext4_should_journal_data(inode)) {
2744 		/*
2745 		 * It's mmapped pagecache.  Add buffers and journal it.  There
2746 		 * doesn't seem much point in redirtying the page here.
2747 		 */
2748 		ClearPageChecked(page);
2749 		return __ext4_journalled_writepage(page, len);
2750 	}
2751 
2752 	if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
2753 		ret = nobh_writepage(page, noalloc_get_block_write, wbc);
2754 	else if (page_bufs && buffer_uninit(page_bufs)) {
2755 		ext4_set_bh_endio(page_bufs, inode);
2756 		ret = block_write_full_page_endio(page, noalloc_get_block_write,
2757 					    wbc, ext4_end_io_buffer_write);
2758 	} else
2759 		ret = block_write_full_page(page, noalloc_get_block_write,
2760 					    wbc);
2761 
2762 	return ret;
2763 }
2764 
2765 /*
2766  * This is called via ext4_da_writepages() to
2767  * calulate the total number of credits to reserve to fit
2768  * a single extent allocation into a single transaction,
2769  * ext4_da_writpeages() will loop calling this before
2770  * the block allocation.
2771  */
2772 
2773 static int ext4_da_writepages_trans_blocks(struct inode *inode)
2774 {
2775 	int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
2776 
2777 	/*
2778 	 * With non-extent format the journal credit needed to
2779 	 * insert nrblocks contiguous block is dependent on
2780 	 * number of contiguous block. So we will limit
2781 	 * number of contiguous block to a sane value
2782 	 */
2783 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
2784 	    (max_blocks > EXT4_MAX_TRANS_DATA))
2785 		max_blocks = EXT4_MAX_TRANS_DATA;
2786 
2787 	return ext4_chunk_trans_blocks(inode, max_blocks);
2788 }
2789 
2790 /*
2791  * write_cache_pages_da - walk the list of dirty pages of the given
2792  * address space and call the callback function (which usually writes
2793  * the pages).
2794  *
2795  * This is a forked version of write_cache_pages().  Differences:
2796  *	Range cyclic is ignored.
2797  *	no_nrwrite_index_update is always presumed true
2798  */
2799 static int write_cache_pages_da(struct address_space *mapping,
2800 				struct writeback_control *wbc,
2801 				struct mpage_da_data *mpd)
2802 {
2803 	int ret = 0;
2804 	int done = 0;
2805 	struct pagevec pvec;
2806 	int nr_pages;
2807 	pgoff_t index;
2808 	pgoff_t end;		/* Inclusive */
2809 	long nr_to_write = wbc->nr_to_write;
2810 
2811 	pagevec_init(&pvec, 0);
2812 	index = wbc->range_start >> PAGE_CACHE_SHIFT;
2813 	end = wbc->range_end >> PAGE_CACHE_SHIFT;
2814 
2815 	while (!done && (index <= end)) {
2816 		int i;
2817 
2818 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2819 			      PAGECACHE_TAG_DIRTY,
2820 			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2821 		if (nr_pages == 0)
2822 			break;
2823 
2824 		for (i = 0; i < nr_pages; i++) {
2825 			struct page *page = pvec.pages[i];
2826 
2827 			/*
2828 			 * At this point, the page may be truncated or
2829 			 * invalidated (changing page->mapping to NULL), or
2830 			 * even swizzled back from swapper_space to tmpfs file
2831 			 * mapping. However, page->index will not change
2832 			 * because we have a reference on the page.
2833 			 */
2834 			if (page->index > end) {
2835 				done = 1;
2836 				break;
2837 			}
2838 
2839 			lock_page(page);
2840 
2841 			/*
2842 			 * Page truncated or invalidated. We can freely skip it
2843 			 * then, even for data integrity operations: the page
2844 			 * has disappeared concurrently, so there could be no
2845 			 * real expectation of this data interity operation
2846 			 * even if there is now a new, dirty page at the same
2847 			 * pagecache address.
2848 			 */
2849 			if (unlikely(page->mapping != mapping)) {
2850 continue_unlock:
2851 				unlock_page(page);
2852 				continue;
2853 			}
2854 
2855 			if (!PageDirty(page)) {
2856 				/* someone wrote it for us */
2857 				goto continue_unlock;
2858 			}
2859 
2860 			if (PageWriteback(page)) {
2861 				if (wbc->sync_mode != WB_SYNC_NONE)
2862 					wait_on_page_writeback(page);
2863 				else
2864 					goto continue_unlock;
2865 			}
2866 
2867 			BUG_ON(PageWriteback(page));
2868 			if (!clear_page_dirty_for_io(page))
2869 				goto continue_unlock;
2870 
2871 			ret = __mpage_da_writepage(page, wbc, mpd);
2872 			if (unlikely(ret)) {
2873 				if (ret == AOP_WRITEPAGE_ACTIVATE) {
2874 					unlock_page(page);
2875 					ret = 0;
2876 				} else {
2877 					done = 1;
2878 					break;
2879 				}
2880 			}
2881 
2882 			if (nr_to_write > 0) {
2883 				nr_to_write--;
2884 				if (nr_to_write == 0 &&
2885 				    wbc->sync_mode == WB_SYNC_NONE) {
2886 					/*
2887 					 * We stop writing back only if we are
2888 					 * not doing integrity sync. In case of
2889 					 * integrity sync we have to keep going
2890 					 * because someone may be concurrently
2891 					 * dirtying pages, and we might have
2892 					 * synced a lot of newly appeared dirty
2893 					 * pages, but have not synced all of the
2894 					 * old dirty pages.
2895 					 */
2896 					done = 1;
2897 					break;
2898 				}
2899 			}
2900 		}
2901 		pagevec_release(&pvec);
2902 		cond_resched();
2903 	}
2904 	return ret;
2905 }
2906 
2907 
2908 static int ext4_da_writepages(struct address_space *mapping,
2909 			      struct writeback_control *wbc)
2910 {
2911 	pgoff_t	index;
2912 	int range_whole = 0;
2913 	handle_t *handle = NULL;
2914 	struct mpage_da_data mpd;
2915 	struct inode *inode = mapping->host;
2916 	int pages_written = 0;
2917 	long pages_skipped;
2918 	unsigned int max_pages;
2919 	int range_cyclic, cycled = 1, io_done = 0;
2920 	int needed_blocks, ret = 0;
2921 	long desired_nr_to_write, nr_to_writebump = 0;
2922 	loff_t range_start = wbc->range_start;
2923 	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2924 
2925 	trace_ext4_da_writepages(inode, wbc);
2926 
2927 	/*
2928 	 * No pages to write? This is mainly a kludge to avoid starting
2929 	 * a transaction for special inodes like journal inode on last iput()
2930 	 * because that could violate lock ordering on umount
2931 	 */
2932 	if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2933 		return 0;
2934 
2935 	/*
2936 	 * If the filesystem has aborted, it is read-only, so return
2937 	 * right away instead of dumping stack traces later on that
2938 	 * will obscure the real source of the problem.  We test
2939 	 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
2940 	 * the latter could be true if the filesystem is mounted
2941 	 * read-only, and in that case, ext4_da_writepages should
2942 	 * *never* be called, so if that ever happens, we would want
2943 	 * the stack trace.
2944 	 */
2945 	if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
2946 		return -EROFS;
2947 
2948 	if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2949 		range_whole = 1;
2950 
2951 	range_cyclic = wbc->range_cyclic;
2952 	if (wbc->range_cyclic) {
2953 		index = mapping->writeback_index;
2954 		if (index)
2955 			cycled = 0;
2956 		wbc->range_start = index << PAGE_CACHE_SHIFT;
2957 		wbc->range_end  = LLONG_MAX;
2958 		wbc->range_cyclic = 0;
2959 	} else
2960 		index = wbc->range_start >> PAGE_CACHE_SHIFT;
2961 
2962 	/*
2963 	 * This works around two forms of stupidity.  The first is in
2964 	 * the writeback code, which caps the maximum number of pages
2965 	 * written to be 1024 pages.  This is wrong on multiple
2966 	 * levels; different architectues have a different page size,
2967 	 * which changes the maximum amount of data which gets
2968 	 * written.  Secondly, 4 megabytes is way too small.  XFS
2969 	 * forces this value to be 16 megabytes by multiplying
2970 	 * nr_to_write parameter by four, and then relies on its
2971 	 * allocator to allocate larger extents to make them
2972 	 * contiguous.  Unfortunately this brings us to the second
2973 	 * stupidity, which is that ext4's mballoc code only allocates
2974 	 * at most 2048 blocks.  So we force contiguous writes up to
2975 	 * the number of dirty blocks in the inode, or
2976 	 * sbi->max_writeback_mb_bump whichever is smaller.
2977 	 */
2978 	max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
2979 	if (!range_cyclic && range_whole)
2980 		desired_nr_to_write = wbc->nr_to_write * 8;
2981 	else
2982 		desired_nr_to_write = ext4_num_dirty_pages(inode, index,
2983 							   max_pages);
2984 	if (desired_nr_to_write > max_pages)
2985 		desired_nr_to_write = max_pages;
2986 
2987 	if (wbc->nr_to_write < desired_nr_to_write) {
2988 		nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
2989 		wbc->nr_to_write = desired_nr_to_write;
2990 	}
2991 
2992 	mpd.wbc = wbc;
2993 	mpd.inode = mapping->host;
2994 
2995 	pages_skipped = wbc->pages_skipped;
2996 
2997 retry:
2998 	while (!ret && wbc->nr_to_write > 0) {
2999 
3000 		/*
3001 		 * we  insert one extent at a time. So we need
3002 		 * credit needed for single extent allocation.
3003 		 * journalled mode is currently not supported
3004 		 * by delalloc
3005 		 */
3006 		BUG_ON(ext4_should_journal_data(inode));
3007 		needed_blocks = ext4_da_writepages_trans_blocks(inode);
3008 
3009 		/* start a new transaction*/
3010 		handle = ext4_journal_start(inode, needed_blocks);
3011 		if (IS_ERR(handle)) {
3012 			ret = PTR_ERR(handle);
3013 			ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
3014 			       "%ld pages, ino %lu; err %d", __func__,
3015 				wbc->nr_to_write, inode->i_ino, ret);
3016 			goto out_writepages;
3017 		}
3018 
3019 		/*
3020 		 * Now call __mpage_da_writepage to find the next
3021 		 * contiguous region of logical blocks that need
3022 		 * blocks to be allocated by ext4.  We don't actually
3023 		 * submit the blocks for I/O here, even though
3024 		 * write_cache_pages thinks it will, and will set the
3025 		 * pages as clean for write before calling
3026 		 * __mpage_da_writepage().
3027 		 */
3028 		mpd.b_size = 0;
3029 		mpd.b_state = 0;
3030 		mpd.b_blocknr = 0;
3031 		mpd.first_page = 0;
3032 		mpd.next_page = 0;
3033 		mpd.io_done = 0;
3034 		mpd.pages_written = 0;
3035 		mpd.retval = 0;
3036 		ret = write_cache_pages_da(mapping, wbc, &mpd);
3037 		/*
3038 		 * If we have a contiguous extent of pages and we
3039 		 * haven't done the I/O yet, map the blocks and submit
3040 		 * them for I/O.
3041 		 */
3042 		if (!mpd.io_done && mpd.next_page != mpd.first_page) {
3043 			if (mpage_da_map_blocks(&mpd) == 0)
3044 				mpage_da_submit_io(&mpd);
3045 			mpd.io_done = 1;
3046 			ret = MPAGE_DA_EXTENT_TAIL;
3047 		}
3048 		trace_ext4_da_write_pages(inode, &mpd);
3049 		wbc->nr_to_write -= mpd.pages_written;
3050 
3051 		ext4_journal_stop(handle);
3052 
3053 		if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
3054 			/* commit the transaction which would
3055 			 * free blocks released in the transaction
3056 			 * and try again
3057 			 */
3058 			jbd2_journal_force_commit_nested(sbi->s_journal);
3059 			wbc->pages_skipped = pages_skipped;
3060 			ret = 0;
3061 		} else if (ret == MPAGE_DA_EXTENT_TAIL) {
3062 			/*
3063 			 * got one extent now try with
3064 			 * rest of the pages
3065 			 */
3066 			pages_written += mpd.pages_written;
3067 			wbc->pages_skipped = pages_skipped;
3068 			ret = 0;
3069 			io_done = 1;
3070 		} else if (wbc->nr_to_write)
3071 			/*
3072 			 * There is no more writeout needed
3073 			 * or we requested for a noblocking writeout
3074 			 * and we found the device congested
3075 			 */
3076 			break;
3077 	}
3078 	if (!io_done && !cycled) {
3079 		cycled = 1;
3080 		index = 0;
3081 		wbc->range_start = index << PAGE_CACHE_SHIFT;
3082 		wbc->range_end  = mapping->writeback_index - 1;
3083 		goto retry;
3084 	}
3085 	if (pages_skipped != wbc->pages_skipped)
3086 		ext4_msg(inode->i_sb, KERN_CRIT,
3087 			 "This should not happen leaving %s "
3088 			 "with nr_to_write = %ld ret = %d",
3089 			 __func__, wbc->nr_to_write, ret);
3090 
3091 	/* Update index */
3092 	index += pages_written;
3093 	wbc->range_cyclic = range_cyclic;
3094 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
3095 		/*
3096 		 * set the writeback_index so that range_cyclic
3097 		 * mode will write it back later
3098 		 */
3099 		mapping->writeback_index = index;
3100 
3101 out_writepages:
3102 	wbc->nr_to_write -= nr_to_writebump;
3103 	wbc->range_start = range_start;
3104 	trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
3105 	return ret;
3106 }
3107 
3108 #define FALL_BACK_TO_NONDELALLOC 1
3109 static int ext4_nonda_switch(struct super_block *sb)
3110 {
3111 	s64 free_blocks, dirty_blocks;
3112 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3113 
3114 	/*
3115 	 * switch to non delalloc mode if we are running low
3116 	 * on free block. The free block accounting via percpu
3117 	 * counters can get slightly wrong with percpu_counter_batch getting
3118 	 * accumulated on each CPU without updating global counters
3119 	 * Delalloc need an accurate free block accounting. So switch
3120 	 * to non delalloc when we are near to error range.
3121 	 */
3122 	free_blocks  = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
3123 	dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter);
3124 	if (2 * free_blocks < 3 * dirty_blocks ||
3125 		free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) {
3126 		/*
3127 		 * free block count is less than 150% of dirty blocks
3128 		 * or free blocks is less than watermark
3129 		 */
3130 		return 1;
3131 	}
3132 	/*
3133 	 * Even if we don't switch but are nearing capacity,
3134 	 * start pushing delalloc when 1/2 of free blocks are dirty.
3135 	 */
3136 	if (free_blocks < 2 * dirty_blocks)
3137 		writeback_inodes_sb_if_idle(sb);
3138 
3139 	return 0;
3140 }
3141 
3142 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
3143 			       loff_t pos, unsigned len, unsigned flags,
3144 			       struct page **pagep, void **fsdata)
3145 {
3146 	int ret, retries = 0;
3147 	struct page *page;
3148 	pgoff_t index;
3149 	unsigned from, to;
3150 	struct inode *inode = mapping->host;
3151 	handle_t *handle;
3152 
3153 	index = pos >> PAGE_CACHE_SHIFT;
3154 	from = pos & (PAGE_CACHE_SIZE - 1);
3155 	to = from + len;
3156 
3157 	if (ext4_nonda_switch(inode->i_sb)) {
3158 		*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
3159 		return ext4_write_begin(file, mapping, pos,
3160 					len, flags, pagep, fsdata);
3161 	}
3162 	*fsdata = (void *)0;
3163 	trace_ext4_da_write_begin(inode, pos, len, flags);
3164 retry:
3165 	/*
3166 	 * With delayed allocation, we don't log the i_disksize update
3167 	 * if there is delayed block allocation. But we still need
3168 	 * to journalling the i_disksize update if writes to the end
3169 	 * of file which has an already mapped buffer.
3170 	 */
3171 	handle = ext4_journal_start(inode, 1);
3172 	if (IS_ERR(handle)) {
3173 		ret = PTR_ERR(handle);
3174 		goto out;
3175 	}
3176 	/* We cannot recurse into the filesystem as the transaction is already
3177 	 * started */
3178 	flags |= AOP_FLAG_NOFS;
3179 
3180 	page = grab_cache_page_write_begin(mapping, index, flags);
3181 	if (!page) {
3182 		ext4_journal_stop(handle);
3183 		ret = -ENOMEM;
3184 		goto out;
3185 	}
3186 	*pagep = page;
3187 
3188 	ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
3189 				ext4_da_get_block_prep);
3190 	if (ret < 0) {
3191 		unlock_page(page);
3192 		ext4_journal_stop(handle);
3193 		page_cache_release(page);
3194 		/*
3195 		 * block_write_begin may have instantiated a few blocks
3196 		 * outside i_size.  Trim these off again. Don't need
3197 		 * i_size_read because we hold i_mutex.
3198 		 */
3199 		if (pos + len > inode->i_size)
3200 			ext4_truncate_failed_write(inode);
3201 	}
3202 
3203 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3204 		goto retry;
3205 out:
3206 	return ret;
3207 }
3208 
3209 /*
3210  * Check if we should update i_disksize
3211  * when write to the end of file but not require block allocation
3212  */
3213 static int ext4_da_should_update_i_disksize(struct page *page,
3214 					    unsigned long offset)
3215 {
3216 	struct buffer_head *bh;
3217 	struct inode *inode = page->mapping->host;
3218 	unsigned int idx;
3219 	int i;
3220 
3221 	bh = page_buffers(page);
3222 	idx = offset >> inode->i_blkbits;
3223 
3224 	for (i = 0; i < idx; i++)
3225 		bh = bh->b_this_page;
3226 
3227 	if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
3228 		return 0;
3229 	return 1;
3230 }
3231 
3232 static int ext4_da_write_end(struct file *file,
3233 			     struct address_space *mapping,
3234 			     loff_t pos, unsigned len, unsigned copied,
3235 			     struct page *page, void *fsdata)
3236 {
3237 	struct inode *inode = mapping->host;
3238 	int ret = 0, ret2;
3239 	handle_t *handle = ext4_journal_current_handle();
3240 	loff_t new_i_size;
3241 	unsigned long start, end;
3242 	int write_mode = (int)(unsigned long)fsdata;
3243 
3244 	if (write_mode == FALL_BACK_TO_NONDELALLOC) {
3245 		if (ext4_should_order_data(inode)) {
3246 			return ext4_ordered_write_end(file, mapping, pos,
3247 					len, copied, page, fsdata);
3248 		} else if (ext4_should_writeback_data(inode)) {
3249 			return ext4_writeback_write_end(file, mapping, pos,
3250 					len, copied, page, fsdata);
3251 		} else {
3252 			BUG();
3253 		}
3254 	}
3255 
3256 	trace_ext4_da_write_end(inode, pos, len, copied);
3257 	start = pos & (PAGE_CACHE_SIZE - 1);
3258 	end = start + copied - 1;
3259 
3260 	/*
3261 	 * generic_write_end() will run mark_inode_dirty() if i_size
3262 	 * changes.  So let's piggyback the i_disksize mark_inode_dirty
3263 	 * into that.
3264 	 */
3265 
3266 	new_i_size = pos + copied;
3267 	if (new_i_size > EXT4_I(inode)->i_disksize) {
3268 		if (ext4_da_should_update_i_disksize(page, end)) {
3269 			down_write(&EXT4_I(inode)->i_data_sem);
3270 			if (new_i_size > EXT4_I(inode)->i_disksize) {
3271 				/*
3272 				 * Updating i_disksize when extending file
3273 				 * without needing block allocation
3274 				 */
3275 				if (ext4_should_order_data(inode))
3276 					ret = ext4_jbd2_file_inode(handle,
3277 								   inode);
3278 
3279 				EXT4_I(inode)->i_disksize = new_i_size;
3280 			}
3281 			up_write(&EXT4_I(inode)->i_data_sem);
3282 			/* We need to mark inode dirty even if
3283 			 * new_i_size is less that inode->i_size
3284 			 * bu greater than i_disksize.(hint delalloc)
3285 			 */
3286 			ext4_mark_inode_dirty(handle, inode);
3287 		}
3288 	}
3289 	ret2 = generic_write_end(file, mapping, pos, len, copied,
3290 							page, fsdata);
3291 	copied = ret2;
3292 	if (ret2 < 0)
3293 		ret = ret2;
3294 	ret2 = ext4_journal_stop(handle);
3295 	if (!ret)
3296 		ret = ret2;
3297 
3298 	return ret ? ret : copied;
3299 }
3300 
3301 static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
3302 {
3303 	/*
3304 	 * Drop reserved blocks
3305 	 */
3306 	BUG_ON(!PageLocked(page));
3307 	if (!page_has_buffers(page))
3308 		goto out;
3309 
3310 	ext4_da_page_release_reservation(page, offset);
3311 
3312 out:
3313 	ext4_invalidatepage(page, offset);
3314 
3315 	return;
3316 }
3317 
3318 /*
3319  * Force all delayed allocation blocks to be allocated for a given inode.
3320  */
3321 int ext4_alloc_da_blocks(struct inode *inode)
3322 {
3323 	trace_ext4_alloc_da_blocks(inode);
3324 
3325 	if (!EXT4_I(inode)->i_reserved_data_blocks &&
3326 	    !EXT4_I(inode)->i_reserved_meta_blocks)
3327 		return 0;
3328 
3329 	/*
3330 	 * We do something simple for now.  The filemap_flush() will
3331 	 * also start triggering a write of the data blocks, which is
3332 	 * not strictly speaking necessary (and for users of
3333 	 * laptop_mode, not even desirable).  However, to do otherwise
3334 	 * would require replicating code paths in:
3335 	 *
3336 	 * ext4_da_writepages() ->
3337 	 *    write_cache_pages() ---> (via passed in callback function)
3338 	 *        __mpage_da_writepage() -->
3339 	 *           mpage_add_bh_to_extent()
3340 	 *           mpage_da_map_blocks()
3341 	 *
3342 	 * The problem is that write_cache_pages(), located in
3343 	 * mm/page-writeback.c, marks pages clean in preparation for
3344 	 * doing I/O, which is not desirable if we're not planning on
3345 	 * doing I/O at all.
3346 	 *
3347 	 * We could call write_cache_pages(), and then redirty all of
3348 	 * the pages by calling redirty_page_for_writeback() but that
3349 	 * would be ugly in the extreme.  So instead we would need to
3350 	 * replicate parts of the code in the above functions,
3351 	 * simplifying them becuase we wouldn't actually intend to
3352 	 * write out the pages, but rather only collect contiguous
3353 	 * logical block extents, call the multi-block allocator, and
3354 	 * then update the buffer heads with the block allocations.
3355 	 *
3356 	 * For now, though, we'll cheat by calling filemap_flush(),
3357 	 * which will map the blocks, and start the I/O, but not
3358 	 * actually wait for the I/O to complete.
3359 	 */
3360 	return filemap_flush(inode->i_mapping);
3361 }
3362 
3363 /*
3364  * bmap() is special.  It gets used by applications such as lilo and by
3365  * the swapper to find the on-disk block of a specific piece of data.
3366  *
3367  * Naturally, this is dangerous if the block concerned is still in the
3368  * journal.  If somebody makes a swapfile on an ext4 data-journaling
3369  * filesystem and enables swap, then they may get a nasty shock when the
3370  * data getting swapped to that swapfile suddenly gets overwritten by
3371  * the original zero's written out previously to the journal and
3372  * awaiting writeback in the kernel's buffer cache.
3373  *
3374  * So, if we see any bmap calls here on a modified, data-journaled file,
3375  * take extra steps to flush any blocks which might be in the cache.
3376  */
3377 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3378 {
3379 	struct inode *inode = mapping->host;
3380 	journal_t *journal;
3381 	int err;
3382 
3383 	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
3384 			test_opt(inode->i_sb, DELALLOC)) {
3385 		/*
3386 		 * With delalloc we want to sync the file
3387 		 * so that we can make sure we allocate
3388 		 * blocks for file
3389 		 */
3390 		filemap_write_and_wait(mapping);
3391 	}
3392 
3393 	if (EXT4_JOURNAL(inode) &&
3394 	    ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
3395 		/*
3396 		 * This is a REALLY heavyweight approach, but the use of
3397 		 * bmap on dirty files is expected to be extremely rare:
3398 		 * only if we run lilo or swapon on a freshly made file
3399 		 * do we expect this to happen.
3400 		 *
3401 		 * (bmap requires CAP_SYS_RAWIO so this does not
3402 		 * represent an unprivileged user DOS attack --- we'd be
3403 		 * in trouble if mortal users could trigger this path at
3404 		 * will.)
3405 		 *
3406 		 * NB. EXT4_STATE_JDATA is not set on files other than
3407 		 * regular files.  If somebody wants to bmap a directory
3408 		 * or symlink and gets confused because the buffer
3409 		 * hasn't yet been flushed to disk, they deserve
3410 		 * everything they get.
3411 		 */
3412 
3413 		ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
3414 		journal = EXT4_JOURNAL(inode);
3415 		jbd2_journal_lock_updates(journal);
3416 		err = jbd2_journal_flush(journal);
3417 		jbd2_journal_unlock_updates(journal);
3418 
3419 		if (err)
3420 			return 0;
3421 	}
3422 
3423 	return generic_block_bmap(mapping, block, ext4_get_block);
3424 }
3425 
3426 static int ext4_readpage(struct file *file, struct page *page)
3427 {
3428 	return mpage_readpage(page, ext4_get_block);
3429 }
3430 
3431 static int
3432 ext4_readpages(struct file *file, struct address_space *mapping,
3433 		struct list_head *pages, unsigned nr_pages)
3434 {
3435 	return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
3436 }
3437 
3438 static void ext4_free_io_end(ext4_io_end_t *io)
3439 {
3440 	BUG_ON(!io);
3441 	if (io->page)
3442 		put_page(io->page);
3443 	iput(io->inode);
3444 	kfree(io);
3445 }
3446 
3447 static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset)
3448 {
3449 	struct buffer_head *head, *bh;
3450 	unsigned int curr_off = 0;
3451 
3452 	if (!page_has_buffers(page))
3453 		return;
3454 	head = bh = page_buffers(page);
3455 	do {
3456 		if (offset <= curr_off && test_clear_buffer_uninit(bh)
3457 					&& bh->b_private) {
3458 			ext4_free_io_end(bh->b_private);
3459 			bh->b_private = NULL;
3460 			bh->b_end_io = NULL;
3461 		}
3462 		curr_off = curr_off + bh->b_size;
3463 		bh = bh->b_this_page;
3464 	} while (bh != head);
3465 }
3466 
3467 static void ext4_invalidatepage(struct page *page, unsigned long offset)
3468 {
3469 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3470 
3471 	/*
3472 	 * free any io_end structure allocated for buffers to be discarded
3473 	 */
3474 	if (ext4_should_dioread_nolock(page->mapping->host))
3475 		ext4_invalidatepage_free_endio(page, offset);
3476 	/*
3477 	 * If it's a full truncate we just forget about the pending dirtying
3478 	 */
3479 	if (offset == 0)
3480 		ClearPageChecked(page);
3481 
3482 	if (journal)
3483 		jbd2_journal_invalidatepage(journal, page, offset);
3484 	else
3485 		block_invalidatepage(page, offset);
3486 }
3487 
3488 static int ext4_releasepage(struct page *page, gfp_t wait)
3489 {
3490 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3491 
3492 	WARN_ON(PageChecked(page));
3493 	if (!page_has_buffers(page))
3494 		return 0;
3495 	if (journal)
3496 		return jbd2_journal_try_to_free_buffers(journal, page, wait);
3497 	else
3498 		return try_to_free_buffers(page);
3499 }
3500 
3501 /*
3502  * O_DIRECT for ext3 (or indirect map) based files
3503  *
3504  * If the O_DIRECT write will extend the file then add this inode to the
3505  * orphan list.  So recovery will truncate it back to the original size
3506  * if the machine crashes during the write.
3507  *
3508  * If the O_DIRECT write is intantiating holes inside i_size and the machine
3509  * crashes then stale disk data _may_ be exposed inside the file. But current
3510  * VFS code falls back into buffered path in that case so we are safe.
3511  */
3512 static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
3513 			      const struct iovec *iov, loff_t offset,
3514 			      unsigned long nr_segs)
3515 {
3516 	struct file *file = iocb->ki_filp;
3517 	struct inode *inode = file->f_mapping->host;
3518 	struct ext4_inode_info *ei = EXT4_I(inode);
3519 	handle_t *handle;
3520 	ssize_t ret;
3521 	int orphan = 0;
3522 	size_t count = iov_length(iov, nr_segs);
3523 	int retries = 0;
3524 
3525 	if (rw == WRITE) {
3526 		loff_t final_size = offset + count;
3527 
3528 		if (final_size > inode->i_size) {
3529 			/* Credits for sb + inode write */
3530 			handle = ext4_journal_start(inode, 2);
3531 			if (IS_ERR(handle)) {
3532 				ret = PTR_ERR(handle);
3533 				goto out;
3534 			}
3535 			ret = ext4_orphan_add(handle, inode);
3536 			if (ret) {
3537 				ext4_journal_stop(handle);
3538 				goto out;
3539 			}
3540 			orphan = 1;
3541 			ei->i_disksize = inode->i_size;
3542 			ext4_journal_stop(handle);
3543 		}
3544 	}
3545 
3546 retry:
3547 	if (rw == READ && ext4_should_dioread_nolock(inode))
3548 		ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
3549 				 inode->i_sb->s_bdev, iov,
3550 				 offset, nr_segs,
3551 				 ext4_get_block, NULL);
3552 	else
3553 		ret = blockdev_direct_IO(rw, iocb, inode,
3554 				 inode->i_sb->s_bdev, iov,
3555 				 offset, nr_segs,
3556 				 ext4_get_block, NULL);
3557 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3558 		goto retry;
3559 
3560 	if (orphan) {
3561 		int err;
3562 
3563 		/* Credits for sb + inode write */
3564 		handle = ext4_journal_start(inode, 2);
3565 		if (IS_ERR(handle)) {
3566 			/* This is really bad luck. We've written the data
3567 			 * but cannot extend i_size. Bail out and pretend
3568 			 * the write failed... */
3569 			ret = PTR_ERR(handle);
3570 			if (inode->i_nlink)
3571 				ext4_orphan_del(NULL, inode);
3572 
3573 			goto out;
3574 		}
3575 		if (inode->i_nlink)
3576 			ext4_orphan_del(handle, inode);
3577 		if (ret > 0) {
3578 			loff_t end = offset + ret;
3579 			if (end > inode->i_size) {
3580 				ei->i_disksize = end;
3581 				i_size_write(inode, end);
3582 				/*
3583 				 * We're going to return a positive `ret'
3584 				 * here due to non-zero-length I/O, so there's
3585 				 * no way of reporting error returns from
3586 				 * ext4_mark_inode_dirty() to userspace.  So
3587 				 * ignore it.
3588 				 */
3589 				ext4_mark_inode_dirty(handle, inode);
3590 			}
3591 		}
3592 		err = ext4_journal_stop(handle);
3593 		if (ret == 0)
3594 			ret = err;
3595 	}
3596 out:
3597 	return ret;
3598 }
3599 
3600 /*
3601  * ext4_get_block used when preparing for a DIO write or buffer write.
3602  * We allocate an uinitialized extent if blocks haven't been allocated.
3603  * The extent will be converted to initialized after the IO is complete.
3604  */
3605 static int ext4_get_block_write(struct inode *inode, sector_t iblock,
3606 		   struct buffer_head *bh_result, int create)
3607 {
3608 	ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
3609 		   inode->i_ino, create);
3610 	return _ext4_get_block(inode, iblock, bh_result,
3611 			       EXT4_GET_BLOCKS_IO_CREATE_EXT);
3612 }
3613 
3614 static void dump_completed_IO(struct inode * inode)
3615 {
3616 #ifdef	EXT4_DEBUG
3617 	struct list_head *cur, *before, *after;
3618 	ext4_io_end_t *io, *io0, *io1;
3619 	unsigned long flags;
3620 
3621 	if (list_empty(&EXT4_I(inode)->i_completed_io_list)){
3622 		ext4_debug("inode %lu completed_io list is empty\n", inode->i_ino);
3623 		return;
3624 	}
3625 
3626 	ext4_debug("Dump inode %lu completed_io list \n", inode->i_ino);
3627 	spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
3628 	list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list){
3629 		cur = &io->list;
3630 		before = cur->prev;
3631 		io0 = container_of(before, ext4_io_end_t, list);
3632 		after = cur->next;
3633 		io1 = container_of(after, ext4_io_end_t, list);
3634 
3635 		ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
3636 			    io, inode->i_ino, io0, io1);
3637 	}
3638 	spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
3639 #endif
3640 }
3641 
3642 /*
3643  * check a range of space and convert unwritten extents to written.
3644  */
3645 static int ext4_end_io_nolock(ext4_io_end_t *io)
3646 {
3647 	struct inode *inode = io->inode;
3648 	loff_t offset = io->offset;
3649 	ssize_t size = io->size;
3650 	int ret = 0;
3651 
3652 	ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
3653 		   "list->prev 0x%p\n",
3654 	           io, inode->i_ino, io->list.next, io->list.prev);
3655 
3656 	if (list_empty(&io->list))
3657 		return ret;
3658 
3659 	if (io->flag != EXT4_IO_UNWRITTEN)
3660 		return ret;
3661 
3662 	ret = ext4_convert_unwritten_extents(inode, offset, size);
3663 	if (ret < 0) {
3664 		printk(KERN_EMERG "%s: failed to convert unwritten"
3665 			"extents to written extents, error is %d"
3666 			" io is still on inode %lu aio dio list\n",
3667                        __func__, ret, inode->i_ino);
3668 		return ret;
3669 	}
3670 
3671 	/* clear the DIO AIO unwritten flag */
3672 	io->flag = 0;
3673 	return ret;
3674 }
3675 
3676 /*
3677  * work on completed aio dio IO, to convert unwritten extents to extents
3678  */
3679 static void ext4_end_io_work(struct work_struct *work)
3680 {
3681 	ext4_io_end_t		*io = container_of(work, ext4_io_end_t, work);
3682 	struct inode		*inode = io->inode;
3683 	struct ext4_inode_info	*ei = EXT4_I(inode);
3684 	unsigned long		flags;
3685 	int			ret;
3686 
3687 	mutex_lock(&inode->i_mutex);
3688 	ret = ext4_end_io_nolock(io);
3689 	if (ret < 0) {
3690 		mutex_unlock(&inode->i_mutex);
3691 		return;
3692 	}
3693 
3694 	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
3695 	if (!list_empty(&io->list))
3696 		list_del_init(&io->list);
3697 	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
3698 	mutex_unlock(&inode->i_mutex);
3699 	ext4_free_io_end(io);
3700 }
3701 
3702 /*
3703  * This function is called from ext4_sync_file().
3704  *
3705  * When IO is completed, the work to convert unwritten extents to
3706  * written is queued on workqueue but may not get immediately
3707  * scheduled. When fsync is called, we need to ensure the
3708  * conversion is complete before fsync returns.
3709  * The inode keeps track of a list of pending/completed IO that
3710  * might needs to do the conversion. This function walks through
3711  * the list and convert the related unwritten extents for completed IO
3712  * to written.
3713  * The function return the number of pending IOs on success.
3714  */
3715 int flush_completed_IO(struct inode *inode)
3716 {
3717 	ext4_io_end_t *io;
3718 	struct ext4_inode_info *ei = EXT4_I(inode);
3719 	unsigned long flags;
3720 	int ret = 0;
3721 	int ret2 = 0;
3722 
3723 	if (list_empty(&ei->i_completed_io_list))
3724 		return ret;
3725 
3726 	dump_completed_IO(inode);
3727 	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
3728 	while (!list_empty(&ei->i_completed_io_list)){
3729 		io = list_entry(ei->i_completed_io_list.next,
3730 				ext4_io_end_t, list);
3731 		/*
3732 		 * Calling ext4_end_io_nolock() to convert completed
3733 		 * IO to written.
3734 		 *
3735 		 * When ext4_sync_file() is called, run_queue() may already
3736 		 * about to flush the work corresponding to this io structure.
3737 		 * It will be upset if it founds the io structure related
3738 		 * to the work-to-be schedule is freed.
3739 		 *
3740 		 * Thus we need to keep the io structure still valid here after
3741 		 * convertion finished. The io structure has a flag to
3742 		 * avoid double converting from both fsync and background work
3743 		 * queue work.
3744 		 */
3745 		spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
3746 		ret = ext4_end_io_nolock(io);
3747 		spin_lock_irqsave(&ei->i_completed_io_lock, flags);
3748 		if (ret < 0)
3749 			ret2 = ret;
3750 		else
3751 			list_del_init(&io->list);
3752 	}
3753 	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
3754 	return (ret2 < 0) ? ret2 : 0;
3755 }
3756 
3757 static ext4_io_end_t *ext4_init_io_end (struct inode *inode, gfp_t flags)
3758 {
3759 	ext4_io_end_t *io = NULL;
3760 
3761 	io = kmalloc(sizeof(*io), flags);
3762 
3763 	if (io) {
3764 		igrab(inode);
3765 		io->inode = inode;
3766 		io->flag = 0;
3767 		io->offset = 0;
3768 		io->size = 0;
3769 		io->page = NULL;
3770 		INIT_WORK(&io->work, ext4_end_io_work);
3771 		INIT_LIST_HEAD(&io->list);
3772 	}
3773 
3774 	return io;
3775 }
3776 
3777 static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3778 			    ssize_t size, void *private)
3779 {
3780         ext4_io_end_t *io_end = iocb->private;
3781 	struct workqueue_struct *wq;
3782 	unsigned long flags;
3783 	struct ext4_inode_info *ei;
3784 
3785 	/* if not async direct IO or dio with 0 bytes write, just return */
3786 	if (!io_end || !size)
3787 		return;
3788 
3789 	ext_debug("ext4_end_io_dio(): io_end 0x%p"
3790 		  "for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
3791  		  iocb->private, io_end->inode->i_ino, iocb, offset,
3792 		  size);
3793 
3794 	/* if not aio dio with unwritten extents, just free io and return */
3795 	if (io_end->flag != EXT4_IO_UNWRITTEN){
3796 		ext4_free_io_end(io_end);
3797 		iocb->private = NULL;
3798 		return;
3799 	}
3800 
3801 	io_end->offset = offset;
3802 	io_end->size = size;
3803 	io_end->flag = EXT4_IO_UNWRITTEN;
3804 	wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
3805 
3806 	/* queue the work to convert unwritten extents to written */
3807 	queue_work(wq, &io_end->work);
3808 
3809 	/* Add the io_end to per-inode completed aio dio list*/
3810 	ei = EXT4_I(io_end->inode);
3811 	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
3812 	list_add_tail(&io_end->list, &ei->i_completed_io_list);
3813 	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
3814 	iocb->private = NULL;
3815 }
3816 
3817 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
3818 {
3819 	ext4_io_end_t *io_end = bh->b_private;
3820 	struct workqueue_struct *wq;
3821 	struct inode *inode;
3822 	unsigned long flags;
3823 
3824 	if (!test_clear_buffer_uninit(bh) || !io_end)
3825 		goto out;
3826 
3827 	if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) {
3828 		printk("sb umounted, discard end_io request for inode %lu\n",
3829 			io_end->inode->i_ino);
3830 		ext4_free_io_end(io_end);
3831 		goto out;
3832 	}
3833 
3834 	io_end->flag = EXT4_IO_UNWRITTEN;
3835 	inode = io_end->inode;
3836 
3837 	/* Add the io_end to per-inode completed io list*/
3838 	spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
3839 	list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
3840 	spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
3841 
3842 	wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
3843 	/* queue the work to convert unwritten extents to written */
3844 	queue_work(wq, &io_end->work);
3845 out:
3846 	bh->b_private = NULL;
3847 	bh->b_end_io = NULL;
3848 	clear_buffer_uninit(bh);
3849 	end_buffer_async_write(bh, uptodate);
3850 }
3851 
3852 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
3853 {
3854 	ext4_io_end_t *io_end;
3855 	struct page *page = bh->b_page;
3856 	loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT;
3857 	size_t size = bh->b_size;
3858 
3859 retry:
3860 	io_end = ext4_init_io_end(inode, GFP_ATOMIC);
3861 	if (!io_end) {
3862 		if (printk_ratelimit())
3863 			printk(KERN_WARNING "%s: allocation fail\n", __func__);
3864 		schedule();
3865 		goto retry;
3866 	}
3867 	io_end->offset = offset;
3868 	io_end->size = size;
3869 	/*
3870 	 * We need to hold a reference to the page to make sure it
3871 	 * doesn't get evicted before ext4_end_io_work() has a chance
3872 	 * to convert the extent from written to unwritten.
3873 	 */
3874 	io_end->page = page;
3875 	get_page(io_end->page);
3876 
3877 	bh->b_private = io_end;
3878 	bh->b_end_io = ext4_end_io_buffer_write;
3879 	return 0;
3880 }
3881 
3882 /*
3883  * For ext4 extent files, ext4 will do direct-io write to holes,
3884  * preallocated extents, and those write extend the file, no need to
3885  * fall back to buffered IO.
3886  *
3887  * For holes, we fallocate those blocks, mark them as unintialized
3888  * If those blocks were preallocated, we mark sure they are splited, but
3889  * still keep the range to write as unintialized.
3890  *
3891  * The unwrritten extents will be converted to written when DIO is completed.
3892  * For async direct IO, since the IO may still pending when return, we
3893  * set up an end_io call back function, which will do the convertion
3894  * when async direct IO completed.
3895  *
3896  * If the O_DIRECT write will extend the file then add this inode to the
3897  * orphan list.  So recovery will truncate it back to the original size
3898  * if the machine crashes during the write.
3899  *
3900  */
3901 static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3902 			      const struct iovec *iov, loff_t offset,
3903 			      unsigned long nr_segs)
3904 {
3905 	struct file *file = iocb->ki_filp;
3906 	struct inode *inode = file->f_mapping->host;
3907 	ssize_t ret;
3908 	size_t count = iov_length(iov, nr_segs);
3909 
3910 	loff_t final_size = offset + count;
3911 	if (rw == WRITE && final_size <= inode->i_size) {
3912 		/*
3913  		 * We could direct write to holes and fallocate.
3914 		 *
3915  		 * Allocated blocks to fill the hole are marked as uninitialized
3916  		 * to prevent paralel buffered read to expose the stale data
3917  		 * before DIO complete the data IO.
3918 		 *
3919  		 * As to previously fallocated extents, ext4 get_block
3920  		 * will just simply mark the buffer mapped but still
3921  		 * keep the extents uninitialized.
3922  		 *
3923 		 * for non AIO case, we will convert those unwritten extents
3924 		 * to written after return back from blockdev_direct_IO.
3925 		 *
3926 		 * for async DIO, the conversion needs to be defered when
3927 		 * the IO is completed. The ext4 end_io callback function
3928 		 * will be called to take care of the conversion work.
3929 		 * Here for async case, we allocate an io_end structure to
3930 		 * hook to the iocb.
3931  		 */
3932 		iocb->private = NULL;
3933 		EXT4_I(inode)->cur_aio_dio = NULL;
3934 		if (!is_sync_kiocb(iocb)) {
3935 			iocb->private = ext4_init_io_end(inode, GFP_NOFS);
3936 			if (!iocb->private)
3937 				return -ENOMEM;
3938 			/*
3939 			 * we save the io structure for current async
3940 			 * direct IO, so that later ext4_get_blocks()
3941 			 * could flag the io structure whether there
3942 			 * is a unwritten extents needs to be converted
3943 			 * when IO is completed.
3944 			 */
3945 			EXT4_I(inode)->cur_aio_dio = iocb->private;
3946 		}
3947 
3948 		ret = blockdev_direct_IO(rw, iocb, inode,
3949 					 inode->i_sb->s_bdev, iov,
3950 					 offset, nr_segs,
3951 					 ext4_get_block_write,
3952 					 ext4_end_io_dio);
3953 		if (iocb->private)
3954 			EXT4_I(inode)->cur_aio_dio = NULL;
3955 		/*
3956 		 * The io_end structure takes a reference to the inode,
3957 		 * that structure needs to be destroyed and the
3958 		 * reference to the inode need to be dropped, when IO is
3959 		 * complete, even with 0 byte write, or failed.
3960 		 *
3961 		 * In the successful AIO DIO case, the io_end structure will be
3962 		 * desctroyed and the reference to the inode will be dropped
3963 		 * after the end_io call back function is called.
3964 		 *
3965 		 * In the case there is 0 byte write, or error case, since
3966 		 * VFS direct IO won't invoke the end_io call back function,
3967 		 * we need to free the end_io structure here.
3968 		 */
3969 		if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
3970 			ext4_free_io_end(iocb->private);
3971 			iocb->private = NULL;
3972 		} else if (ret > 0 && ext4_test_inode_state(inode,
3973 						EXT4_STATE_DIO_UNWRITTEN)) {
3974 			int err;
3975 			/*
3976 			 * for non AIO case, since the IO is already
3977 			 * completed, we could do the convertion right here
3978 			 */
3979 			err = ext4_convert_unwritten_extents(inode,
3980 							     offset, ret);
3981 			if (err < 0)
3982 				ret = err;
3983 			ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3984 		}
3985 		return ret;
3986 	}
3987 
3988 	/* for write the the end of file case, we fall back to old way */
3989 	return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3990 }
3991 
3992 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3993 			      const struct iovec *iov, loff_t offset,
3994 			      unsigned long nr_segs)
3995 {
3996 	struct file *file = iocb->ki_filp;
3997 	struct inode *inode = file->f_mapping->host;
3998 
3999 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4000 		return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
4001 
4002 	return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
4003 }
4004 
4005 /*
4006  * Pages can be marked dirty completely asynchronously from ext4's journalling
4007  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
4008  * much here because ->set_page_dirty is called under VFS locks.  The page is
4009  * not necessarily locked.
4010  *
4011  * We cannot just dirty the page and leave attached buffers clean, because the
4012  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
4013  * or jbddirty because all the journalling code will explode.
4014  *
4015  * So what we do is to mark the page "pending dirty" and next time writepage
4016  * is called, propagate that into the buffers appropriately.
4017  */
4018 static int ext4_journalled_set_page_dirty(struct page *page)
4019 {
4020 	SetPageChecked(page);
4021 	return __set_page_dirty_nobuffers(page);
4022 }
4023 
4024 static const struct address_space_operations ext4_ordered_aops = {
4025 	.readpage		= ext4_readpage,
4026 	.readpages		= ext4_readpages,
4027 	.writepage		= ext4_writepage,
4028 	.sync_page		= block_sync_page,
4029 	.write_begin		= ext4_write_begin,
4030 	.write_end		= ext4_ordered_write_end,
4031 	.bmap			= ext4_bmap,
4032 	.invalidatepage		= ext4_invalidatepage,
4033 	.releasepage		= ext4_releasepage,
4034 	.direct_IO		= ext4_direct_IO,
4035 	.migratepage		= buffer_migrate_page,
4036 	.is_partially_uptodate  = block_is_partially_uptodate,
4037 	.error_remove_page	= generic_error_remove_page,
4038 };
4039 
4040 static const struct address_space_operations ext4_writeback_aops = {
4041 	.readpage		= ext4_readpage,
4042 	.readpages		= ext4_readpages,
4043 	.writepage		= ext4_writepage,
4044 	.sync_page		= block_sync_page,
4045 	.write_begin		= ext4_write_begin,
4046 	.write_end		= ext4_writeback_write_end,
4047 	.bmap			= ext4_bmap,
4048 	.invalidatepage		= ext4_invalidatepage,
4049 	.releasepage		= ext4_releasepage,
4050 	.direct_IO		= ext4_direct_IO,
4051 	.migratepage		= buffer_migrate_page,
4052 	.is_partially_uptodate  = block_is_partially_uptodate,
4053 	.error_remove_page	= generic_error_remove_page,
4054 };
4055 
4056 static const struct address_space_operations ext4_journalled_aops = {
4057 	.readpage		= ext4_readpage,
4058 	.readpages		= ext4_readpages,
4059 	.writepage		= ext4_writepage,
4060 	.sync_page		= block_sync_page,
4061 	.write_begin		= ext4_write_begin,
4062 	.write_end		= ext4_journalled_write_end,
4063 	.set_page_dirty		= ext4_journalled_set_page_dirty,
4064 	.bmap			= ext4_bmap,
4065 	.invalidatepage		= ext4_invalidatepage,
4066 	.releasepage		= ext4_releasepage,
4067 	.is_partially_uptodate  = block_is_partially_uptodate,
4068 	.error_remove_page	= generic_error_remove_page,
4069 };
4070 
4071 static const struct address_space_operations ext4_da_aops = {
4072 	.readpage		= ext4_readpage,
4073 	.readpages		= ext4_readpages,
4074 	.writepage		= ext4_writepage,
4075 	.writepages		= ext4_da_writepages,
4076 	.sync_page		= block_sync_page,
4077 	.write_begin		= ext4_da_write_begin,
4078 	.write_end		= ext4_da_write_end,
4079 	.bmap			= ext4_bmap,
4080 	.invalidatepage		= ext4_da_invalidatepage,
4081 	.releasepage		= ext4_releasepage,
4082 	.direct_IO		= ext4_direct_IO,
4083 	.migratepage		= buffer_migrate_page,
4084 	.is_partially_uptodate  = block_is_partially_uptodate,
4085 	.error_remove_page	= generic_error_remove_page,
4086 };
4087 
4088 void ext4_set_aops(struct inode *inode)
4089 {
4090 	if (ext4_should_order_data(inode) &&
4091 		test_opt(inode->i_sb, DELALLOC))
4092 		inode->i_mapping->a_ops = &ext4_da_aops;
4093 	else if (ext4_should_order_data(inode))
4094 		inode->i_mapping->a_ops = &ext4_ordered_aops;
4095 	else if (ext4_should_writeback_data(inode) &&
4096 		 test_opt(inode->i_sb, DELALLOC))
4097 		inode->i_mapping->a_ops = &ext4_da_aops;
4098 	else if (ext4_should_writeback_data(inode))
4099 		inode->i_mapping->a_ops = &ext4_writeback_aops;
4100 	else
4101 		inode->i_mapping->a_ops = &ext4_journalled_aops;
4102 }
4103 
4104 /*
4105  * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
4106  * up to the end of the block which corresponds to `from'.
4107  * This required during truncate. We need to physically zero the tail end
4108  * of that block so it doesn't yield old data if the file is later grown.
4109  */
4110 int ext4_block_truncate_page(handle_t *handle,
4111 		struct address_space *mapping, loff_t from)
4112 {
4113 	ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
4114 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
4115 	unsigned blocksize, length, pos;
4116 	ext4_lblk_t iblock;
4117 	struct inode *inode = mapping->host;
4118 	struct buffer_head *bh;
4119 	struct page *page;
4120 	int err = 0;
4121 
4122 	page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
4123 				   mapping_gfp_mask(mapping) & ~__GFP_FS);
4124 	if (!page)
4125 		return -EINVAL;
4126 
4127 	blocksize = inode->i_sb->s_blocksize;
4128 	length = blocksize - (offset & (blocksize - 1));
4129 	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
4130 
4131 	/*
4132 	 * For "nobh" option,  we can only work if we don't need to
4133 	 * read-in the page - otherwise we create buffers to do the IO.
4134 	 */
4135 	if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
4136 	     ext4_should_writeback_data(inode) && PageUptodate(page)) {
4137 		zero_user(page, offset, length);
4138 		set_page_dirty(page);
4139 		goto unlock;
4140 	}
4141 
4142 	if (!page_has_buffers(page))
4143 		create_empty_buffers(page, blocksize, 0);
4144 
4145 	/* Find the buffer that contains "offset" */
4146 	bh = page_buffers(page);
4147 	pos = blocksize;
4148 	while (offset >= pos) {
4149 		bh = bh->b_this_page;
4150 		iblock++;
4151 		pos += blocksize;
4152 	}
4153 
4154 	err = 0;
4155 	if (buffer_freed(bh)) {
4156 		BUFFER_TRACE(bh, "freed: skip");
4157 		goto unlock;
4158 	}
4159 
4160 	if (!buffer_mapped(bh)) {
4161 		BUFFER_TRACE(bh, "unmapped");
4162 		ext4_get_block(inode, iblock, bh, 0);
4163 		/* unmapped? It's a hole - nothing to do */
4164 		if (!buffer_mapped(bh)) {
4165 			BUFFER_TRACE(bh, "still unmapped");
4166 			goto unlock;
4167 		}
4168 	}
4169 
4170 	/* Ok, it's mapped. Make sure it's up-to-date */
4171 	if (PageUptodate(page))
4172 		set_buffer_uptodate(bh);
4173 
4174 	if (!buffer_uptodate(bh)) {
4175 		err = -EIO;
4176 		ll_rw_block(READ, 1, &bh);
4177 		wait_on_buffer(bh);
4178 		/* Uhhuh. Read error. Complain and punt. */
4179 		if (!buffer_uptodate(bh))
4180 			goto unlock;
4181 	}
4182 
4183 	if (ext4_should_journal_data(inode)) {
4184 		BUFFER_TRACE(bh, "get write access");
4185 		err = ext4_journal_get_write_access(handle, bh);
4186 		if (err)
4187 			goto unlock;
4188 	}
4189 
4190 	zero_user(page, offset, length);
4191 
4192 	BUFFER_TRACE(bh, "zeroed end of block");
4193 
4194 	err = 0;
4195 	if (ext4_should_journal_data(inode)) {
4196 		err = ext4_handle_dirty_metadata(handle, inode, bh);
4197 	} else {
4198 		if (ext4_should_order_data(inode))
4199 			err = ext4_jbd2_file_inode(handle, inode);
4200 		mark_buffer_dirty(bh);
4201 	}
4202 
4203 unlock:
4204 	unlock_page(page);
4205 	page_cache_release(page);
4206 	return err;
4207 }
4208 
4209 /*
4210  * Probably it should be a library function... search for first non-zero word
4211  * or memcmp with zero_page, whatever is better for particular architecture.
4212  * Linus?
4213  */
4214 static inline int all_zeroes(__le32 *p, __le32 *q)
4215 {
4216 	while (p < q)
4217 		if (*p++)
4218 			return 0;
4219 	return 1;
4220 }
4221 
4222 /**
4223  *	ext4_find_shared - find the indirect blocks for partial truncation.
4224  *	@inode:	  inode in question
4225  *	@depth:	  depth of the affected branch
4226  *	@offsets: offsets of pointers in that branch (see ext4_block_to_path)
4227  *	@chain:	  place to store the pointers to partial indirect blocks
4228  *	@top:	  place to the (detached) top of branch
4229  *
4230  *	This is a helper function used by ext4_truncate().
4231  *
4232  *	When we do truncate() we may have to clean the ends of several
4233  *	indirect blocks but leave the blocks themselves alive. Block is
4234  *	partially truncated if some data below the new i_size is refered
4235  *	from it (and it is on the path to the first completely truncated
4236  *	data block, indeed).  We have to free the top of that path along
4237  *	with everything to the right of the path. Since no allocation
4238  *	past the truncation point is possible until ext4_truncate()
4239  *	finishes, we may safely do the latter, but top of branch may
4240  *	require special attention - pageout below the truncation point
4241  *	might try to populate it.
4242  *
4243  *	We atomically detach the top of branch from the tree, store the
4244  *	block number of its root in *@top, pointers to buffer_heads of
4245  *	partially truncated blocks - in @chain[].bh and pointers to
4246  *	their last elements that should not be removed - in
4247  *	@chain[].p. Return value is the pointer to last filled element
4248  *	of @chain.
4249  *
4250  *	The work left to caller to do the actual freeing of subtrees:
4251  *		a) free the subtree starting from *@top
4252  *		b) free the subtrees whose roots are stored in
4253  *			(@chain[i].p+1 .. end of @chain[i].bh->b_data)
4254  *		c) free the subtrees growing from the inode past the @chain[0].
4255  *			(no partially truncated stuff there).  */
4256 
4257 static Indirect *ext4_find_shared(struct inode *inode, int depth,
4258 				  ext4_lblk_t offsets[4], Indirect chain[4],
4259 				  __le32 *top)
4260 {
4261 	Indirect *partial, *p;
4262 	int k, err;
4263 
4264 	*top = 0;
4265 	/* Make k index the deepest non-null offset + 1 */
4266 	for (k = depth; k > 1 && !offsets[k-1]; k--)
4267 		;
4268 	partial = ext4_get_branch(inode, k, offsets, chain, &err);
4269 	/* Writer: pointers */
4270 	if (!partial)
4271 		partial = chain + k-1;
4272 	/*
4273 	 * If the branch acquired continuation since we've looked at it -
4274 	 * fine, it should all survive and (new) top doesn't belong to us.
4275 	 */
4276 	if (!partial->key && *partial->p)
4277 		/* Writer: end */
4278 		goto no_top;
4279 	for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
4280 		;
4281 	/*
4282 	 * OK, we've found the last block that must survive. The rest of our
4283 	 * branch should be detached before unlocking. However, if that rest
4284 	 * of branch is all ours and does not grow immediately from the inode
4285 	 * it's easier to cheat and just decrement partial->p.
4286 	 */
4287 	if (p == chain + k - 1 && p > chain) {
4288 		p->p--;
4289 	} else {
4290 		*top = *p->p;
4291 		/* Nope, don't do this in ext4.  Must leave the tree intact */
4292 #if 0
4293 		*p->p = 0;
4294 #endif
4295 	}
4296 	/* Writer: end */
4297 
4298 	while (partial > p) {
4299 		brelse(partial->bh);
4300 		partial--;
4301 	}
4302 no_top:
4303 	return partial;
4304 }
4305 
4306 /*
4307  * Zero a number of block pointers in either an inode or an indirect block.
4308  * If we restart the transaction we must again get write access to the
4309  * indirect block for further modification.
4310  *
4311  * We release `count' blocks on disk, but (last - first) may be greater
4312  * than `count' because there can be holes in there.
4313  */
4314 static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
4315 			     struct buffer_head *bh,
4316 			     ext4_fsblk_t block_to_free,
4317 			     unsigned long count, __le32 *first,
4318 			     __le32 *last)
4319 {
4320 	__le32 *p;
4321 	int	flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
4322 
4323 	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
4324 		flags |= EXT4_FREE_BLOCKS_METADATA;
4325 
4326 	if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
4327 				   count)) {
4328 		EXT4_ERROR_INODE(inode, "attempt to clear invalid "
4329 				 "blocks %llu len %lu",
4330 				 (unsigned long long) block_to_free, count);
4331 		return 1;
4332 	}
4333 
4334 	if (try_to_extend_transaction(handle, inode)) {
4335 		if (bh) {
4336 			BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4337 			ext4_handle_dirty_metadata(handle, inode, bh);
4338 		}
4339 		ext4_mark_inode_dirty(handle, inode);
4340 		ext4_truncate_restart_trans(handle, inode,
4341 					    blocks_for_truncate(inode));
4342 		if (bh) {
4343 			BUFFER_TRACE(bh, "retaking write access");
4344 			ext4_journal_get_write_access(handle, bh);
4345 		}
4346 	}
4347 
4348 	for (p = first; p < last; p++)
4349 		*p = 0;
4350 
4351 	ext4_free_blocks(handle, inode, 0, block_to_free, count, flags);
4352 	return 0;
4353 }
4354 
4355 /**
4356  * ext4_free_data - free a list of data blocks
4357  * @handle:	handle for this transaction
4358  * @inode:	inode we are dealing with
4359  * @this_bh:	indirect buffer_head which contains *@first and *@last
4360  * @first:	array of block numbers
4361  * @last:	points immediately past the end of array
4362  *
4363  * We are freeing all blocks refered from that array (numbers are stored as
4364  * little-endian 32-bit) and updating @inode->i_blocks appropriately.
4365  *
4366  * We accumulate contiguous runs of blocks to free.  Conveniently, if these
4367  * blocks are contiguous then releasing them at one time will only affect one
4368  * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
4369  * actually use a lot of journal space.
4370  *
4371  * @this_bh will be %NULL if @first and @last point into the inode's direct
4372  * block pointers.
4373  */
4374 static void ext4_free_data(handle_t *handle, struct inode *inode,
4375 			   struct buffer_head *this_bh,
4376 			   __le32 *first, __le32 *last)
4377 {
4378 	ext4_fsblk_t block_to_free = 0;    /* Starting block # of a run */
4379 	unsigned long count = 0;	    /* Number of blocks in the run */
4380 	__le32 *block_to_free_p = NULL;	    /* Pointer into inode/ind
4381 					       corresponding to
4382 					       block_to_free */
4383 	ext4_fsblk_t nr;		    /* Current block # */
4384 	__le32 *p;			    /* Pointer into inode/ind
4385 					       for current block */
4386 	int err;
4387 
4388 	if (this_bh) {				/* For indirect block */
4389 		BUFFER_TRACE(this_bh, "get_write_access");
4390 		err = ext4_journal_get_write_access(handle, this_bh);
4391 		/* Important: if we can't update the indirect pointers
4392 		 * to the blocks, we can't free them. */
4393 		if (err)
4394 			return;
4395 	}
4396 
4397 	for (p = first; p < last; p++) {
4398 		nr = le32_to_cpu(*p);
4399 		if (nr) {
4400 			/* accumulate blocks to free if they're contiguous */
4401 			if (count == 0) {
4402 				block_to_free = nr;
4403 				block_to_free_p = p;
4404 				count = 1;
4405 			} else if (nr == block_to_free + count) {
4406 				count++;
4407 			} else {
4408 				if (ext4_clear_blocks(handle, inode, this_bh,
4409 						      block_to_free, count,
4410 						      block_to_free_p, p))
4411 					break;
4412 				block_to_free = nr;
4413 				block_to_free_p = p;
4414 				count = 1;
4415 			}
4416 		}
4417 	}
4418 
4419 	if (count > 0)
4420 		ext4_clear_blocks(handle, inode, this_bh, block_to_free,
4421 				  count, block_to_free_p, p);
4422 
4423 	if (this_bh) {
4424 		BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
4425 
4426 		/*
4427 		 * The buffer head should have an attached journal head at this
4428 		 * point. However, if the data is corrupted and an indirect
4429 		 * block pointed to itself, it would have been detached when
4430 		 * the block was cleared. Check for this instead of OOPSing.
4431 		 */
4432 		if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
4433 			ext4_handle_dirty_metadata(handle, inode, this_bh);
4434 		else
4435 			EXT4_ERROR_INODE(inode,
4436 					 "circular indirect block detected at "
4437 					 "block %llu",
4438 				(unsigned long long) this_bh->b_blocknr);
4439 	}
4440 }
4441 
4442 /**
4443  *	ext4_free_branches - free an array of branches
4444  *	@handle: JBD handle for this transaction
4445  *	@inode:	inode we are dealing with
4446  *	@parent_bh: the buffer_head which contains *@first and *@last
4447  *	@first:	array of block numbers
4448  *	@last:	pointer immediately past the end of array
4449  *	@depth:	depth of the branches to free
4450  *
4451  *	We are freeing all blocks refered from these branches (numbers are
4452  *	stored as little-endian 32-bit) and updating @inode->i_blocks
4453  *	appropriately.
4454  */
4455 static void ext4_free_branches(handle_t *handle, struct inode *inode,
4456 			       struct buffer_head *parent_bh,
4457 			       __le32 *first, __le32 *last, int depth)
4458 {
4459 	ext4_fsblk_t nr;
4460 	__le32 *p;
4461 
4462 	if (ext4_handle_is_aborted(handle))
4463 		return;
4464 
4465 	if (depth--) {
4466 		struct buffer_head *bh;
4467 		int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
4468 		p = last;
4469 		while (--p >= first) {
4470 			nr = le32_to_cpu(*p);
4471 			if (!nr)
4472 				continue;		/* A hole */
4473 
4474 			if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
4475 						   nr, 1)) {
4476 				EXT4_ERROR_INODE(inode,
4477 						 "invalid indirect mapped "
4478 						 "block %lu (level %d)",
4479 						 (unsigned long) nr, depth);
4480 				break;
4481 			}
4482 
4483 			/* Go read the buffer for the next level down */
4484 			bh = sb_bread(inode->i_sb, nr);
4485 
4486 			/*
4487 			 * A read failure? Report error and clear slot
4488 			 * (should be rare).
4489 			 */
4490 			if (!bh) {
4491 				EXT4_ERROR_INODE(inode,
4492 						 "Read failure block=%llu",
4493 						 (unsigned long long) nr);
4494 				continue;
4495 			}
4496 
4497 			/* This zaps the entire block.  Bottom up. */
4498 			BUFFER_TRACE(bh, "free child branches");
4499 			ext4_free_branches(handle, inode, bh,
4500 					(__le32 *) bh->b_data,
4501 					(__le32 *) bh->b_data + addr_per_block,
4502 					depth);
4503 
4504 			/*
4505 			 * We've probably journalled the indirect block several
4506 			 * times during the truncate.  But it's no longer
4507 			 * needed and we now drop it from the transaction via
4508 			 * jbd2_journal_revoke().
4509 			 *
4510 			 * That's easy if it's exclusively part of this
4511 			 * transaction.  But if it's part of the committing
4512 			 * transaction then jbd2_journal_forget() will simply
4513 			 * brelse() it.  That means that if the underlying
4514 			 * block is reallocated in ext4_get_block(),
4515 			 * unmap_underlying_metadata() will find this block
4516 			 * and will try to get rid of it.  damn, damn.
4517 			 *
4518 			 * If this block has already been committed to the
4519 			 * journal, a revoke record will be written.  And
4520 			 * revoke records must be emitted *before* clearing
4521 			 * this block's bit in the bitmaps.
4522 			 */
4523 			ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
4524 
4525 			/*
4526 			 * Everything below this this pointer has been
4527 			 * released.  Now let this top-of-subtree go.
4528 			 *
4529 			 * We want the freeing of this indirect block to be
4530 			 * atomic in the journal with the updating of the
4531 			 * bitmap block which owns it.  So make some room in
4532 			 * the journal.
4533 			 *
4534 			 * We zero the parent pointer *after* freeing its
4535 			 * pointee in the bitmaps, so if extend_transaction()
4536 			 * for some reason fails to put the bitmap changes and
4537 			 * the release into the same transaction, recovery
4538 			 * will merely complain about releasing a free block,
4539 			 * rather than leaking blocks.
4540 			 */
4541 			if (ext4_handle_is_aborted(handle))
4542 				return;
4543 			if (try_to_extend_transaction(handle, inode)) {
4544 				ext4_mark_inode_dirty(handle, inode);
4545 				ext4_truncate_restart_trans(handle, inode,
4546 					    blocks_for_truncate(inode));
4547 			}
4548 
4549 			ext4_free_blocks(handle, inode, 0, nr, 1,
4550 					 EXT4_FREE_BLOCKS_METADATA);
4551 
4552 			if (parent_bh) {
4553 				/*
4554 				 * The block which we have just freed is
4555 				 * pointed to by an indirect block: journal it
4556 				 */
4557 				BUFFER_TRACE(parent_bh, "get_write_access");
4558 				if (!ext4_journal_get_write_access(handle,
4559 								   parent_bh)){
4560 					*p = 0;
4561 					BUFFER_TRACE(parent_bh,
4562 					"call ext4_handle_dirty_metadata");
4563 					ext4_handle_dirty_metadata(handle,
4564 								   inode,
4565 								   parent_bh);
4566 				}
4567 			}
4568 		}
4569 	} else {
4570 		/* We have reached the bottom of the tree. */
4571 		BUFFER_TRACE(parent_bh, "free data blocks");
4572 		ext4_free_data(handle, inode, parent_bh, first, last);
4573 	}
4574 }
4575 
4576 int ext4_can_truncate(struct inode *inode)
4577 {
4578 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4579 		return 0;
4580 	if (S_ISREG(inode->i_mode))
4581 		return 1;
4582 	if (S_ISDIR(inode->i_mode))
4583 		return 1;
4584 	if (S_ISLNK(inode->i_mode))
4585 		return !ext4_inode_is_fast_symlink(inode);
4586 	return 0;
4587 }
4588 
4589 /*
4590  * ext4_truncate()
4591  *
4592  * We block out ext4_get_block() block instantiations across the entire
4593  * transaction, and VFS/VM ensures that ext4_truncate() cannot run
4594  * simultaneously on behalf of the same inode.
4595  *
4596  * As we work through the truncate and commmit bits of it to the journal there
4597  * is one core, guiding principle: the file's tree must always be consistent on
4598  * disk.  We must be able to restart the truncate after a crash.
4599  *
4600  * The file's tree may be transiently inconsistent in memory (although it
4601  * probably isn't), but whenever we close off and commit a journal transaction,
4602  * the contents of (the filesystem + the journal) must be consistent and
4603  * restartable.  It's pretty simple, really: bottom up, right to left (although
4604  * left-to-right works OK too).
4605  *
4606  * Note that at recovery time, journal replay occurs *before* the restart of
4607  * truncate against the orphan inode list.
4608  *
4609  * The committed inode has the new, desired i_size (which is the same as
4610  * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
4611  * that this inode's truncate did not complete and it will again call
4612  * ext4_truncate() to have another go.  So there will be instantiated blocks
4613  * to the right of the truncation point in a crashed ext4 filesystem.  But
4614  * that's fine - as long as they are linked from the inode, the post-crash
4615  * ext4_truncate() run will find them and release them.
4616  */
4617 void ext4_truncate(struct inode *inode)
4618 {
4619 	handle_t *handle;
4620 	struct ext4_inode_info *ei = EXT4_I(inode);
4621 	__le32 *i_data = ei->i_data;
4622 	int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
4623 	struct address_space *mapping = inode->i_mapping;
4624 	ext4_lblk_t offsets[4];
4625 	Indirect chain[4];
4626 	Indirect *partial;
4627 	__le32 nr = 0;
4628 	int n;
4629 	ext4_lblk_t last_block;
4630 	unsigned blocksize = inode->i_sb->s_blocksize;
4631 
4632 	if (!ext4_can_truncate(inode))
4633 		return;
4634 
4635 	ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4636 
4637 	if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4638 		ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
4639 
4640 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
4641 		ext4_ext_truncate(inode);
4642 		return;
4643 	}
4644 
4645 	handle = start_transaction(inode);
4646 	if (IS_ERR(handle))
4647 		return;		/* AKPM: return what? */
4648 
4649 	last_block = (inode->i_size + blocksize-1)
4650 					>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
4651 
4652 	if (inode->i_size & (blocksize - 1))
4653 		if (ext4_block_truncate_page(handle, mapping, inode->i_size))
4654 			goto out_stop;
4655 
4656 	n = ext4_block_to_path(inode, last_block, offsets, NULL);
4657 	if (n == 0)
4658 		goto out_stop;	/* error */
4659 
4660 	/*
4661 	 * OK.  This truncate is going to happen.  We add the inode to the
4662 	 * orphan list, so that if this truncate spans multiple transactions,
4663 	 * and we crash, we will resume the truncate when the filesystem
4664 	 * recovers.  It also marks the inode dirty, to catch the new size.
4665 	 *
4666 	 * Implication: the file must always be in a sane, consistent
4667 	 * truncatable state while each transaction commits.
4668 	 */
4669 	if (ext4_orphan_add(handle, inode))
4670 		goto out_stop;
4671 
4672 	/*
4673 	 * From here we block out all ext4_get_block() callers who want to
4674 	 * modify the block allocation tree.
4675 	 */
4676 	down_write(&ei->i_data_sem);
4677 
4678 	ext4_discard_preallocations(inode);
4679 
4680 	/*
4681 	 * The orphan list entry will now protect us from any crash which
4682 	 * occurs before the truncate completes, so it is now safe to propagate
4683 	 * the new, shorter inode size (held for now in i_size) into the
4684 	 * on-disk inode. We do this via i_disksize, which is the value which
4685 	 * ext4 *really* writes onto the disk inode.
4686 	 */
4687 	ei->i_disksize = inode->i_size;
4688 
4689 	if (n == 1) {		/* direct blocks */
4690 		ext4_free_data(handle, inode, NULL, i_data+offsets[0],
4691 			       i_data + EXT4_NDIR_BLOCKS);
4692 		goto do_indirects;
4693 	}
4694 
4695 	partial = ext4_find_shared(inode, n, offsets, chain, &nr);
4696 	/* Kill the top of shared branch (not detached) */
4697 	if (nr) {
4698 		if (partial == chain) {
4699 			/* Shared branch grows from the inode */
4700 			ext4_free_branches(handle, inode, NULL,
4701 					   &nr, &nr+1, (chain+n-1) - partial);
4702 			*partial->p = 0;
4703 			/*
4704 			 * We mark the inode dirty prior to restart,
4705 			 * and prior to stop.  No need for it here.
4706 			 */
4707 		} else {
4708 			/* Shared branch grows from an indirect block */
4709 			BUFFER_TRACE(partial->bh, "get_write_access");
4710 			ext4_free_branches(handle, inode, partial->bh,
4711 					partial->p,
4712 					partial->p+1, (chain+n-1) - partial);
4713 		}
4714 	}
4715 	/* Clear the ends of indirect blocks on the shared branch */
4716 	while (partial > chain) {
4717 		ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
4718 				   (__le32*)partial->bh->b_data+addr_per_block,
4719 				   (chain+n-1) - partial);
4720 		BUFFER_TRACE(partial->bh, "call brelse");
4721 		brelse(partial->bh);
4722 		partial--;
4723 	}
4724 do_indirects:
4725 	/* Kill the remaining (whole) subtrees */
4726 	switch (offsets[0]) {
4727 	default:
4728 		nr = i_data[EXT4_IND_BLOCK];
4729 		if (nr) {
4730 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
4731 			i_data[EXT4_IND_BLOCK] = 0;
4732 		}
4733 	case EXT4_IND_BLOCK:
4734 		nr = i_data[EXT4_DIND_BLOCK];
4735 		if (nr) {
4736 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
4737 			i_data[EXT4_DIND_BLOCK] = 0;
4738 		}
4739 	case EXT4_DIND_BLOCK:
4740 		nr = i_data[EXT4_TIND_BLOCK];
4741 		if (nr) {
4742 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
4743 			i_data[EXT4_TIND_BLOCK] = 0;
4744 		}
4745 	case EXT4_TIND_BLOCK:
4746 		;
4747 	}
4748 
4749 	up_write(&ei->i_data_sem);
4750 	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4751 	ext4_mark_inode_dirty(handle, inode);
4752 
4753 	/*
4754 	 * In a multi-transaction truncate, we only make the final transaction
4755 	 * synchronous
4756 	 */
4757 	if (IS_SYNC(inode))
4758 		ext4_handle_sync(handle);
4759 out_stop:
4760 	/*
4761 	 * If this was a simple ftruncate(), and the file will remain alive
4762 	 * then we need to clear up the orphan record which we created above.
4763 	 * However, if this was a real unlink then we were called by
4764 	 * ext4_delete_inode(), and we allow that function to clean up the
4765 	 * orphan info for us.
4766 	 */
4767 	if (inode->i_nlink)
4768 		ext4_orphan_del(handle, inode);
4769 
4770 	ext4_journal_stop(handle);
4771 }
4772 
4773 /*
4774  * ext4_get_inode_loc returns with an extra refcount against the inode's
4775  * underlying buffer_head on success. If 'in_mem' is true, we have all
4776  * data in memory that is needed to recreate the on-disk version of this
4777  * inode.
4778  */
4779 static int __ext4_get_inode_loc(struct inode *inode,
4780 				struct ext4_iloc *iloc, int in_mem)
4781 {
4782 	struct ext4_group_desc	*gdp;
4783 	struct buffer_head	*bh;
4784 	struct super_block	*sb = inode->i_sb;
4785 	ext4_fsblk_t		block;
4786 	int			inodes_per_block, inode_offset;
4787 
4788 	iloc->bh = NULL;
4789 	if (!ext4_valid_inum(sb, inode->i_ino))
4790 		return -EIO;
4791 
4792 	iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
4793 	gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
4794 	if (!gdp)
4795 		return -EIO;
4796 
4797 	/*
4798 	 * Figure out the offset within the block group inode table
4799 	 */
4800 	inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb));
4801 	inode_offset = ((inode->i_ino - 1) %
4802 			EXT4_INODES_PER_GROUP(sb));
4803 	block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
4804 	iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
4805 
4806 	bh = sb_getblk(sb, block);
4807 	if (!bh) {
4808 		EXT4_ERROR_INODE(inode, "unable to read inode block - "
4809 				 "block %llu", block);
4810 		return -EIO;
4811 	}
4812 	if (!buffer_uptodate(bh)) {
4813 		lock_buffer(bh);
4814 
4815 		/*
4816 		 * If the buffer has the write error flag, we have failed
4817 		 * to write out another inode in the same block.  In this
4818 		 * case, we don't have to read the block because we may
4819 		 * read the old inode data successfully.
4820 		 */
4821 		if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
4822 			set_buffer_uptodate(bh);
4823 
4824 		if (buffer_uptodate(bh)) {
4825 			/* someone brought it uptodate while we waited */
4826 			unlock_buffer(bh);
4827 			goto has_buffer;
4828 		}
4829 
4830 		/*
4831 		 * If we have all information of the inode in memory and this
4832 		 * is the only valid inode in the block, we need not read the
4833 		 * block.
4834 		 */
4835 		if (in_mem) {
4836 			struct buffer_head *bitmap_bh;
4837 			int i, start;
4838 
4839 			start = inode_offset & ~(inodes_per_block - 1);
4840 
4841 			/* Is the inode bitmap in cache? */
4842 			bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
4843 			if (!bitmap_bh)
4844 				goto make_io;
4845 
4846 			/*
4847 			 * If the inode bitmap isn't in cache then the
4848 			 * optimisation may end up performing two reads instead
4849 			 * of one, so skip it.
4850 			 */
4851 			if (!buffer_uptodate(bitmap_bh)) {
4852 				brelse(bitmap_bh);
4853 				goto make_io;
4854 			}
4855 			for (i = start; i < start + inodes_per_block; i++) {
4856 				if (i == inode_offset)
4857 					continue;
4858 				if (ext4_test_bit(i, bitmap_bh->b_data))
4859 					break;
4860 			}
4861 			brelse(bitmap_bh);
4862 			if (i == start + inodes_per_block) {
4863 				/* all other inodes are free, so skip I/O */
4864 				memset(bh->b_data, 0, bh->b_size);
4865 				set_buffer_uptodate(bh);
4866 				unlock_buffer(bh);
4867 				goto has_buffer;
4868 			}
4869 		}
4870 
4871 make_io:
4872 		/*
4873 		 * If we need to do any I/O, try to pre-readahead extra
4874 		 * blocks from the inode table.
4875 		 */
4876 		if (EXT4_SB(sb)->s_inode_readahead_blks) {
4877 			ext4_fsblk_t b, end, table;
4878 			unsigned num;
4879 
4880 			table = ext4_inode_table(sb, gdp);
4881 			/* s_inode_readahead_blks is always a power of 2 */
4882 			b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
4883 			if (table > b)
4884 				b = table;
4885 			end = b + EXT4_SB(sb)->s_inode_readahead_blks;
4886 			num = EXT4_INODES_PER_GROUP(sb);
4887 			if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
4888 				       EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
4889 				num -= ext4_itable_unused_count(sb, gdp);
4890 			table += num / inodes_per_block;
4891 			if (end > table)
4892 				end = table;
4893 			while (b <= end)
4894 				sb_breadahead(sb, b++);
4895 		}
4896 
4897 		/*
4898 		 * There are other valid inodes in the buffer, this inode
4899 		 * has in-inode xattrs, or we don't have this inode in memory.
4900 		 * Read the block from disk.
4901 		 */
4902 		get_bh(bh);
4903 		bh->b_end_io = end_buffer_read_sync;
4904 		submit_bh(READ_META, bh);
4905 		wait_on_buffer(bh);
4906 		if (!buffer_uptodate(bh)) {
4907 			EXT4_ERROR_INODE(inode, "unable to read inode "
4908 					 "block %llu", block);
4909 			brelse(bh);
4910 			return -EIO;
4911 		}
4912 	}
4913 has_buffer:
4914 	iloc->bh = bh;
4915 	return 0;
4916 }
4917 
4918 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4919 {
4920 	/* We have all inode data except xattrs in memory here. */
4921 	return __ext4_get_inode_loc(inode, iloc,
4922 		!ext4_test_inode_state(inode, EXT4_STATE_XATTR));
4923 }
4924 
4925 void ext4_set_inode_flags(struct inode *inode)
4926 {
4927 	unsigned int flags = EXT4_I(inode)->i_flags;
4928 
4929 	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
4930 	if (flags & EXT4_SYNC_FL)
4931 		inode->i_flags |= S_SYNC;
4932 	if (flags & EXT4_APPEND_FL)
4933 		inode->i_flags |= S_APPEND;
4934 	if (flags & EXT4_IMMUTABLE_FL)
4935 		inode->i_flags |= S_IMMUTABLE;
4936 	if (flags & EXT4_NOATIME_FL)
4937 		inode->i_flags |= S_NOATIME;
4938 	if (flags & EXT4_DIRSYNC_FL)
4939 		inode->i_flags |= S_DIRSYNC;
4940 }
4941 
4942 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
4943 void ext4_get_inode_flags(struct ext4_inode_info *ei)
4944 {
4945 	unsigned int vfs_fl;
4946 	unsigned long old_fl, new_fl;
4947 
4948 	do {
4949 		vfs_fl = ei->vfs_inode.i_flags;
4950 		old_fl = ei->i_flags;
4951 		new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
4952 				EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
4953 				EXT4_DIRSYNC_FL);
4954 		if (vfs_fl & S_SYNC)
4955 			new_fl |= EXT4_SYNC_FL;
4956 		if (vfs_fl & S_APPEND)
4957 			new_fl |= EXT4_APPEND_FL;
4958 		if (vfs_fl & S_IMMUTABLE)
4959 			new_fl |= EXT4_IMMUTABLE_FL;
4960 		if (vfs_fl & S_NOATIME)
4961 			new_fl |= EXT4_NOATIME_FL;
4962 		if (vfs_fl & S_DIRSYNC)
4963 			new_fl |= EXT4_DIRSYNC_FL;
4964 	} while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
4965 }
4966 
4967 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
4968 				  struct ext4_inode_info *ei)
4969 {
4970 	blkcnt_t i_blocks ;
4971 	struct inode *inode = &(ei->vfs_inode);
4972 	struct super_block *sb = inode->i_sb;
4973 
4974 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
4975 				EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
4976 		/* we are using combined 48 bit field */
4977 		i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
4978 					le32_to_cpu(raw_inode->i_blocks_lo);
4979 		if (ei->i_flags & EXT4_HUGE_FILE_FL) {
4980 			/* i_blocks represent file system block size */
4981 			return i_blocks  << (inode->i_blkbits - 9);
4982 		} else {
4983 			return i_blocks;
4984 		}
4985 	} else {
4986 		return le32_to_cpu(raw_inode->i_blocks_lo);
4987 	}
4988 }
4989 
4990 struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4991 {
4992 	struct ext4_iloc iloc;
4993 	struct ext4_inode *raw_inode;
4994 	struct ext4_inode_info *ei;
4995 	struct inode *inode;
4996 	journal_t *journal = EXT4_SB(sb)->s_journal;
4997 	long ret;
4998 	int block;
4999 
5000 	inode = iget_locked(sb, ino);
5001 	if (!inode)
5002 		return ERR_PTR(-ENOMEM);
5003 	if (!(inode->i_state & I_NEW))
5004 		return inode;
5005 
5006 	ei = EXT4_I(inode);
5007 	iloc.bh = 0;
5008 
5009 	ret = __ext4_get_inode_loc(inode, &iloc, 0);
5010 	if (ret < 0)
5011 		goto bad_inode;
5012 	raw_inode = ext4_raw_inode(&iloc);
5013 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
5014 	inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
5015 	inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
5016 	if (!(test_opt(inode->i_sb, NO_UID32))) {
5017 		inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
5018 		inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
5019 	}
5020 	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
5021 
5022 	ei->i_state_flags = 0;
5023 	ei->i_dir_start_lookup = 0;
5024 	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
5025 	/* We now have enough fields to check if the inode was active or not.
5026 	 * This is needed because nfsd might try to access dead inodes
5027 	 * the test is that same one that e2fsck uses
5028 	 * NeilBrown 1999oct15
5029 	 */
5030 	if (inode->i_nlink == 0) {
5031 		if (inode->i_mode == 0 ||
5032 		    !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
5033 			/* this inode is deleted */
5034 			ret = -ESTALE;
5035 			goto bad_inode;
5036 		}
5037 		/* The only unlinked inodes we let through here have
5038 		 * valid i_mode and are being read by the orphan
5039 		 * recovery code: that's fine, we're about to complete
5040 		 * the process of deleting those. */
5041 	}
5042 	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
5043 	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
5044 	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
5045 	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
5046 		ei->i_file_acl |=
5047 			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
5048 	inode->i_size = ext4_isize(raw_inode);
5049 	ei->i_disksize = inode->i_size;
5050 #ifdef CONFIG_QUOTA
5051 	ei->i_reserved_quota = 0;
5052 #endif
5053 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
5054 	ei->i_block_group = iloc.block_group;
5055 	ei->i_last_alloc_group = ~0;
5056 	/*
5057 	 * NOTE! The in-memory inode i_data array is in little-endian order
5058 	 * even on big-endian machines: we do NOT byteswap the block numbers!
5059 	 */
5060 	for (block = 0; block < EXT4_N_BLOCKS; block++)
5061 		ei->i_data[block] = raw_inode->i_block[block];
5062 	INIT_LIST_HEAD(&ei->i_orphan);
5063 
5064 	/*
5065 	 * Set transaction id's of transactions that have to be committed
5066 	 * to finish f[data]sync. We set them to currently running transaction
5067 	 * as we cannot be sure that the inode or some of its metadata isn't
5068 	 * part of the transaction - the inode could have been reclaimed and
5069 	 * now it is reread from disk.
5070 	 */
5071 	if (journal) {
5072 		transaction_t *transaction;
5073 		tid_t tid;
5074 
5075 		spin_lock(&journal->j_state_lock);
5076 		if (journal->j_running_transaction)
5077 			transaction = journal->j_running_transaction;
5078 		else
5079 			transaction = journal->j_committing_transaction;
5080 		if (transaction)
5081 			tid = transaction->t_tid;
5082 		else
5083 			tid = journal->j_commit_sequence;
5084 		spin_unlock(&journal->j_state_lock);
5085 		ei->i_sync_tid = tid;
5086 		ei->i_datasync_tid = tid;
5087 	}
5088 
5089 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
5090 		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
5091 		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
5092 		    EXT4_INODE_SIZE(inode->i_sb)) {
5093 			ret = -EIO;
5094 			goto bad_inode;
5095 		}
5096 		if (ei->i_extra_isize == 0) {
5097 			/* The extra space is currently unused. Use it. */
5098 			ei->i_extra_isize = sizeof(struct ext4_inode) -
5099 					    EXT4_GOOD_OLD_INODE_SIZE;
5100 		} else {
5101 			__le32 *magic = (void *)raw_inode +
5102 					EXT4_GOOD_OLD_INODE_SIZE +
5103 					ei->i_extra_isize;
5104 			if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
5105 				ext4_set_inode_state(inode, EXT4_STATE_XATTR);
5106 		}
5107 	} else
5108 		ei->i_extra_isize = 0;
5109 
5110 	EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
5111 	EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
5112 	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
5113 	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
5114 
5115 	inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
5116 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
5117 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
5118 			inode->i_version |=
5119 			(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
5120 	}
5121 
5122 	ret = 0;
5123 	if (ei->i_file_acl &&
5124 	    !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
5125 		EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
5126 				 ei->i_file_acl);
5127 		ret = -EIO;
5128 		goto bad_inode;
5129 	} else if (ei->i_flags & EXT4_EXTENTS_FL) {
5130 		if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
5131 		    (S_ISLNK(inode->i_mode) &&
5132 		     !ext4_inode_is_fast_symlink(inode)))
5133 			/* Validate extent which is part of inode */
5134 			ret = ext4_ext_check_inode(inode);
5135 	} else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
5136 		   (S_ISLNK(inode->i_mode) &&
5137 		    !ext4_inode_is_fast_symlink(inode))) {
5138 		/* Validate block references which are part of inode */
5139 		ret = ext4_check_inode_blockref(inode);
5140 	}
5141 	if (ret)
5142 		goto bad_inode;
5143 
5144 	if (S_ISREG(inode->i_mode)) {
5145 		inode->i_op = &ext4_file_inode_operations;
5146 		inode->i_fop = &ext4_file_operations;
5147 		ext4_set_aops(inode);
5148 	} else if (S_ISDIR(inode->i_mode)) {
5149 		inode->i_op = &ext4_dir_inode_operations;
5150 		inode->i_fop = &ext4_dir_operations;
5151 	} else if (S_ISLNK(inode->i_mode)) {
5152 		if (ext4_inode_is_fast_symlink(inode)) {
5153 			inode->i_op = &ext4_fast_symlink_inode_operations;
5154 			nd_terminate_link(ei->i_data, inode->i_size,
5155 				sizeof(ei->i_data) - 1);
5156 		} else {
5157 			inode->i_op = &ext4_symlink_inode_operations;
5158 			ext4_set_aops(inode);
5159 		}
5160 	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
5161 	      S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
5162 		inode->i_op = &ext4_special_inode_operations;
5163 		if (raw_inode->i_block[0])
5164 			init_special_inode(inode, inode->i_mode,
5165 			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
5166 		else
5167 			init_special_inode(inode, inode->i_mode,
5168 			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
5169 	} else {
5170 		ret = -EIO;
5171 		EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
5172 		goto bad_inode;
5173 	}
5174 	brelse(iloc.bh);
5175 	ext4_set_inode_flags(inode);
5176 	unlock_new_inode(inode);
5177 	return inode;
5178 
5179 bad_inode:
5180 	brelse(iloc.bh);
5181 	iget_failed(inode);
5182 	return ERR_PTR(ret);
5183 }
5184 
5185 static int ext4_inode_blocks_set(handle_t *handle,
5186 				struct ext4_inode *raw_inode,
5187 				struct ext4_inode_info *ei)
5188 {
5189 	struct inode *inode = &(ei->vfs_inode);
5190 	u64 i_blocks = inode->i_blocks;
5191 	struct super_block *sb = inode->i_sb;
5192 
5193 	if (i_blocks <= ~0U) {
5194 		/*
5195 		 * i_blocks can be represnted in a 32 bit variable
5196 		 * as multiple of 512 bytes
5197 		 */
5198 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
5199 		raw_inode->i_blocks_high = 0;
5200 		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
5201 		return 0;
5202 	}
5203 	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
5204 		return -EFBIG;
5205 
5206 	if (i_blocks <= 0xffffffffffffULL) {
5207 		/*
5208 		 * i_blocks can be represented in a 48 bit variable
5209 		 * as multiple of 512 bytes
5210 		 */
5211 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
5212 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
5213 		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
5214 	} else {
5215 		ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
5216 		/* i_block is stored in file system block size */
5217 		i_blocks = i_blocks >> (inode->i_blkbits - 9);
5218 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
5219 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
5220 	}
5221 	return 0;
5222 }
5223 
5224 /*
5225  * Post the struct inode info into an on-disk inode location in the
5226  * buffer-cache.  This gobbles the caller's reference to the
5227  * buffer_head in the inode location struct.
5228  *
5229  * The caller must have write access to iloc->bh.
5230  */
5231 static int ext4_do_update_inode(handle_t *handle,
5232 				struct inode *inode,
5233 				struct ext4_iloc *iloc)
5234 {
5235 	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
5236 	struct ext4_inode_info *ei = EXT4_I(inode);
5237 	struct buffer_head *bh = iloc->bh;
5238 	int err = 0, rc, block;
5239 
5240 	/* For fields not not tracking in the in-memory inode,
5241 	 * initialise them to zero for new inodes. */
5242 	if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
5243 		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
5244 
5245 	ext4_get_inode_flags(ei);
5246 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
5247 	if (!(test_opt(inode->i_sb, NO_UID32))) {
5248 		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
5249 		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
5250 /*
5251  * Fix up interoperability with old kernels. Otherwise, old inodes get
5252  * re-used with the upper 16 bits of the uid/gid intact
5253  */
5254 		if (!ei->i_dtime) {
5255 			raw_inode->i_uid_high =
5256 				cpu_to_le16(high_16_bits(inode->i_uid));
5257 			raw_inode->i_gid_high =
5258 				cpu_to_le16(high_16_bits(inode->i_gid));
5259 		} else {
5260 			raw_inode->i_uid_high = 0;
5261 			raw_inode->i_gid_high = 0;
5262 		}
5263 	} else {
5264 		raw_inode->i_uid_low =
5265 			cpu_to_le16(fs_high2lowuid(inode->i_uid));
5266 		raw_inode->i_gid_low =
5267 			cpu_to_le16(fs_high2lowgid(inode->i_gid));
5268 		raw_inode->i_uid_high = 0;
5269 		raw_inode->i_gid_high = 0;
5270 	}
5271 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
5272 
5273 	EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
5274 	EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
5275 	EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
5276 	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
5277 
5278 	if (ext4_inode_blocks_set(handle, raw_inode, ei))
5279 		goto out_brelse;
5280 	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
5281 	raw_inode->i_flags = cpu_to_le32(ei->i_flags);
5282 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
5283 	    cpu_to_le32(EXT4_OS_HURD))
5284 		raw_inode->i_file_acl_high =
5285 			cpu_to_le16(ei->i_file_acl >> 32);
5286 	raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
5287 	ext4_isize_set(raw_inode, ei->i_disksize);
5288 	if (ei->i_disksize > 0x7fffffffULL) {
5289 		struct super_block *sb = inode->i_sb;
5290 		if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
5291 				EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
5292 				EXT4_SB(sb)->s_es->s_rev_level ==
5293 				cpu_to_le32(EXT4_GOOD_OLD_REV)) {
5294 			/* If this is the first large file
5295 			 * created, add a flag to the superblock.
5296 			 */
5297 			err = ext4_journal_get_write_access(handle,
5298 					EXT4_SB(sb)->s_sbh);
5299 			if (err)
5300 				goto out_brelse;
5301 			ext4_update_dynamic_rev(sb);
5302 			EXT4_SET_RO_COMPAT_FEATURE(sb,
5303 					EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
5304 			sb->s_dirt = 1;
5305 			ext4_handle_sync(handle);
5306 			err = ext4_handle_dirty_metadata(handle, NULL,
5307 					EXT4_SB(sb)->s_sbh);
5308 		}
5309 	}
5310 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
5311 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
5312 		if (old_valid_dev(inode->i_rdev)) {
5313 			raw_inode->i_block[0] =
5314 				cpu_to_le32(old_encode_dev(inode->i_rdev));
5315 			raw_inode->i_block[1] = 0;
5316 		} else {
5317 			raw_inode->i_block[0] = 0;
5318 			raw_inode->i_block[1] =
5319 				cpu_to_le32(new_encode_dev(inode->i_rdev));
5320 			raw_inode->i_block[2] = 0;
5321 		}
5322 	} else
5323 		for (block = 0; block < EXT4_N_BLOCKS; block++)
5324 			raw_inode->i_block[block] = ei->i_data[block];
5325 
5326 	raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
5327 	if (ei->i_extra_isize) {
5328 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
5329 			raw_inode->i_version_hi =
5330 			cpu_to_le32(inode->i_version >> 32);
5331 		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
5332 	}
5333 
5334 	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
5335 	rc = ext4_handle_dirty_metadata(handle, NULL, bh);
5336 	if (!err)
5337 		err = rc;
5338 	ext4_clear_inode_state(inode, EXT4_STATE_NEW);
5339 
5340 	ext4_update_inode_fsync_trans(handle, inode, 0);
5341 out_brelse:
5342 	brelse(bh);
5343 	ext4_std_error(inode->i_sb, err);
5344 	return err;
5345 }
5346 
5347 /*
5348  * ext4_write_inode()
5349  *
5350  * We are called from a few places:
5351  *
5352  * - Within generic_file_write() for O_SYNC files.
5353  *   Here, there will be no transaction running. We wait for any running
5354  *   trasnaction to commit.
5355  *
5356  * - Within sys_sync(), kupdate and such.
5357  *   We wait on commit, if tol to.
5358  *
5359  * - Within prune_icache() (PF_MEMALLOC == true)
5360  *   Here we simply return.  We can't afford to block kswapd on the
5361  *   journal commit.
5362  *
5363  * In all cases it is actually safe for us to return without doing anything,
5364  * because the inode has been copied into a raw inode buffer in
5365  * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
5366  * knfsd.
5367  *
5368  * Note that we are absolutely dependent upon all inode dirtiers doing the
5369  * right thing: they *must* call mark_inode_dirty() after dirtying info in
5370  * which we are interested.
5371  *
5372  * It would be a bug for them to not do this.  The code:
5373  *
5374  *	mark_inode_dirty(inode)
5375  *	stuff();
5376  *	inode->i_size = expr;
5377  *
5378  * is in error because a kswapd-driven write_inode() could occur while
5379  * `stuff()' is running, and the new i_size will be lost.  Plus the inode
5380  * will no longer be on the superblock's dirty inode list.
5381  */
5382 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
5383 {
5384 	int err;
5385 
5386 	if (current->flags & PF_MEMALLOC)
5387 		return 0;
5388 
5389 	if (EXT4_SB(inode->i_sb)->s_journal) {
5390 		if (ext4_journal_current_handle()) {
5391 			jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
5392 			dump_stack();
5393 			return -EIO;
5394 		}
5395 
5396 		if (wbc->sync_mode != WB_SYNC_ALL)
5397 			return 0;
5398 
5399 		err = ext4_force_commit(inode->i_sb);
5400 	} else {
5401 		struct ext4_iloc iloc;
5402 
5403 		err = __ext4_get_inode_loc(inode, &iloc, 0);
5404 		if (err)
5405 			return err;
5406 		if (wbc->sync_mode == WB_SYNC_ALL)
5407 			sync_dirty_buffer(iloc.bh);
5408 		if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
5409 			EXT4_ERROR_INODE(inode,
5410 				"IO error syncing inode (block=%llu)",
5411 				(unsigned long long) iloc.bh->b_blocknr);
5412 			err = -EIO;
5413 		}
5414 		brelse(iloc.bh);
5415 	}
5416 	return err;
5417 }
5418 
5419 /*
5420  * ext4_setattr()
5421  *
5422  * Called from notify_change.
5423  *
5424  * We want to trap VFS attempts to truncate the file as soon as
5425  * possible.  In particular, we want to make sure that when the VFS
5426  * shrinks i_size, we put the inode on the orphan list and modify
5427  * i_disksize immediately, so that during the subsequent flushing of
5428  * dirty pages and freeing of disk blocks, we can guarantee that any
5429  * commit will leave the blocks being flushed in an unused state on
5430  * disk.  (On recovery, the inode will get truncated and the blocks will
5431  * be freed, so we have a strong guarantee that no future commit will
5432  * leave these blocks visible to the user.)
5433  *
5434  * Another thing we have to assure is that if we are in ordered mode
5435  * and inode is still attached to the committing transaction, we must
5436  * we start writeout of all the dirty pages which are being truncated.
5437  * This way we are sure that all the data written in the previous
5438  * transaction are already on disk (truncate waits for pages under
5439  * writeback).
5440  *
5441  * Called with inode->i_mutex down.
5442  */
5443 int ext4_setattr(struct dentry *dentry, struct iattr *attr)
5444 {
5445 	struct inode *inode = dentry->d_inode;
5446 	int error, rc = 0;
5447 	const unsigned int ia_valid = attr->ia_valid;
5448 
5449 	error = inode_change_ok(inode, attr);
5450 	if (error)
5451 		return error;
5452 
5453 	if (is_quota_modification(inode, attr))
5454 		dquot_initialize(inode);
5455 	if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
5456 		(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
5457 		handle_t *handle;
5458 
5459 		/* (user+group)*(old+new) structure, inode write (sb,
5460 		 * inode block, ? - but truncate inode update has it) */
5461 		handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
5462 					EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
5463 		if (IS_ERR(handle)) {
5464 			error = PTR_ERR(handle);
5465 			goto err_out;
5466 		}
5467 		error = dquot_transfer(inode, attr);
5468 		if (error) {
5469 			ext4_journal_stop(handle);
5470 			return error;
5471 		}
5472 		/* Update corresponding info in inode so that everything is in
5473 		 * one transaction */
5474 		if (attr->ia_valid & ATTR_UID)
5475 			inode->i_uid = attr->ia_uid;
5476 		if (attr->ia_valid & ATTR_GID)
5477 			inode->i_gid = attr->ia_gid;
5478 		error = ext4_mark_inode_dirty(handle, inode);
5479 		ext4_journal_stop(handle);
5480 	}
5481 
5482 	if (attr->ia_valid & ATTR_SIZE) {
5483 		if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
5484 			struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5485 
5486 			if (attr->ia_size > sbi->s_bitmap_maxbytes) {
5487 				error = -EFBIG;
5488 				goto err_out;
5489 			}
5490 		}
5491 	}
5492 
5493 	if (S_ISREG(inode->i_mode) &&
5494 	    attr->ia_valid & ATTR_SIZE &&
5495 	    (attr->ia_size < inode->i_size ||
5496 	     (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)))) {
5497 		handle_t *handle;
5498 
5499 		handle = ext4_journal_start(inode, 3);
5500 		if (IS_ERR(handle)) {
5501 			error = PTR_ERR(handle);
5502 			goto err_out;
5503 		}
5504 
5505 		error = ext4_orphan_add(handle, inode);
5506 		EXT4_I(inode)->i_disksize = attr->ia_size;
5507 		rc = ext4_mark_inode_dirty(handle, inode);
5508 		if (!error)
5509 			error = rc;
5510 		ext4_journal_stop(handle);
5511 
5512 		if (ext4_should_order_data(inode)) {
5513 			error = ext4_begin_ordered_truncate(inode,
5514 							    attr->ia_size);
5515 			if (error) {
5516 				/* Do as much error cleanup as possible */
5517 				handle = ext4_journal_start(inode, 3);
5518 				if (IS_ERR(handle)) {
5519 					ext4_orphan_del(NULL, inode);
5520 					goto err_out;
5521 				}
5522 				ext4_orphan_del(handle, inode);
5523 				ext4_journal_stop(handle);
5524 				goto err_out;
5525 			}
5526 		}
5527 		/* ext4_truncate will clear the flag */
5528 		if ((ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)))
5529 			ext4_truncate(inode);
5530 	}
5531 
5532 	rc = inode_setattr(inode, attr);
5533 
5534 	/* If inode_setattr's call to ext4_truncate failed to get a
5535 	 * transaction handle at all, we need to clean up the in-core
5536 	 * orphan list manually. */
5537 	if (inode->i_nlink)
5538 		ext4_orphan_del(NULL, inode);
5539 
5540 	if (!rc && (ia_valid & ATTR_MODE))
5541 		rc = ext4_acl_chmod(inode);
5542 
5543 err_out:
5544 	ext4_std_error(inode->i_sb, error);
5545 	if (!error)
5546 		error = rc;
5547 	return error;
5548 }
5549 
5550 int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
5551 		 struct kstat *stat)
5552 {
5553 	struct inode *inode;
5554 	unsigned long delalloc_blocks;
5555 
5556 	inode = dentry->d_inode;
5557 	generic_fillattr(inode, stat);
5558 
5559 	/*
5560 	 * We can't update i_blocks if the block allocation is delayed
5561 	 * otherwise in the case of system crash before the real block
5562 	 * allocation is done, we will have i_blocks inconsistent with
5563 	 * on-disk file blocks.
5564 	 * We always keep i_blocks updated together with real
5565 	 * allocation. But to not confuse with user, stat
5566 	 * will return the blocks that include the delayed allocation
5567 	 * blocks for this file.
5568 	 */
5569 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
5570 	delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
5571 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
5572 
5573 	stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
5574 	return 0;
5575 }
5576 
5577 static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
5578 				      int chunk)
5579 {
5580 	int indirects;
5581 
5582 	/* if nrblocks are contiguous */
5583 	if (chunk) {
5584 		/*
5585 		 * With N contiguous data blocks, it need at most
5586 		 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks
5587 		 * 2 dindirect blocks
5588 		 * 1 tindirect block
5589 		 */
5590 		indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb);
5591 		return indirects + 3;
5592 	}
5593 	/*
5594 	 * if nrblocks are not contiguous, worse case, each block touch
5595 	 * a indirect block, and each indirect block touch a double indirect
5596 	 * block, plus a triple indirect block
5597 	 */
5598 	indirects = nrblocks * 2 + 1;
5599 	return indirects;
5600 }
5601 
5602 static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
5603 {
5604 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5605 		return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
5606 	return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
5607 }
5608 
5609 /*
5610  * Account for index blocks, block groups bitmaps and block group
5611  * descriptor blocks if modify datablocks and index blocks
5612  * worse case, the indexs blocks spread over different block groups
5613  *
5614  * If datablocks are discontiguous, they are possible to spread over
5615  * different block groups too. If they are contiuguous, with flexbg,
5616  * they could still across block group boundary.
5617  *
5618  * Also account for superblock, inode, quota and xattr blocks
5619  */
5620 int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
5621 {
5622 	ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
5623 	int gdpblocks;
5624 	int idxblocks;
5625 	int ret = 0;
5626 
5627 	/*
5628 	 * How many index blocks need to touch to modify nrblocks?
5629 	 * The "Chunk" flag indicating whether the nrblocks is
5630 	 * physically contiguous on disk
5631 	 *
5632 	 * For Direct IO and fallocate, they calls get_block to allocate
5633 	 * one single extent at a time, so they could set the "Chunk" flag
5634 	 */
5635 	idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
5636 
5637 	ret = idxblocks;
5638 
5639 	/*
5640 	 * Now let's see how many group bitmaps and group descriptors need
5641 	 * to account
5642 	 */
5643 	groups = idxblocks;
5644 	if (chunk)
5645 		groups += 1;
5646 	else
5647 		groups += nrblocks;
5648 
5649 	gdpblocks = groups;
5650 	if (groups > ngroups)
5651 		groups = ngroups;
5652 	if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
5653 		gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
5654 
5655 	/* bitmaps and block group descriptor blocks */
5656 	ret += groups + gdpblocks;
5657 
5658 	/* Blocks for super block, inode, quota and xattr blocks */
5659 	ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
5660 
5661 	return ret;
5662 }
5663 
5664 /*
5665  * Calulate the total number of credits to reserve to fit
5666  * the modification of a single pages into a single transaction,
5667  * which may include multiple chunks of block allocations.
5668  *
5669  * This could be called via ext4_write_begin()
5670  *
5671  * We need to consider the worse case, when
5672  * one new block per extent.
5673  */
5674 int ext4_writepage_trans_blocks(struct inode *inode)
5675 {
5676 	int bpp = ext4_journal_blocks_per_page(inode);
5677 	int ret;
5678 
5679 	ret = ext4_meta_trans_blocks(inode, bpp, 0);
5680 
5681 	/* Account for data blocks for journalled mode */
5682 	if (ext4_should_journal_data(inode))
5683 		ret += bpp;
5684 	return ret;
5685 }
5686 
5687 /*
5688  * Calculate the journal credits for a chunk of data modification.
5689  *
5690  * This is called from DIO, fallocate or whoever calling
5691  * ext4_get_blocks() to map/allocate a chunk of contiguous disk blocks.
5692  *
5693  * journal buffers for data blocks are not included here, as DIO
5694  * and fallocate do no need to journal data buffers.
5695  */
5696 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5697 {
5698 	return ext4_meta_trans_blocks(inode, nrblocks, 1);
5699 }
5700 
5701 /*
5702  * The caller must have previously called ext4_reserve_inode_write().
5703  * Give this, we know that the caller already has write access to iloc->bh.
5704  */
5705 int ext4_mark_iloc_dirty(handle_t *handle,
5706 			 struct inode *inode, struct ext4_iloc *iloc)
5707 {
5708 	int err = 0;
5709 
5710 	if (test_opt(inode->i_sb, I_VERSION))
5711 		inode_inc_iversion(inode);
5712 
5713 	/* the do_update_inode consumes one bh->b_count */
5714 	get_bh(iloc->bh);
5715 
5716 	/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
5717 	err = ext4_do_update_inode(handle, inode, iloc);
5718 	put_bh(iloc->bh);
5719 	return err;
5720 }
5721 
5722 /*
5723  * On success, We end up with an outstanding reference count against
5724  * iloc->bh.  This _must_ be cleaned up later.
5725  */
5726 
5727 int
5728 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5729 			 struct ext4_iloc *iloc)
5730 {
5731 	int err;
5732 
5733 	err = ext4_get_inode_loc(inode, iloc);
5734 	if (!err) {
5735 		BUFFER_TRACE(iloc->bh, "get_write_access");
5736 		err = ext4_journal_get_write_access(handle, iloc->bh);
5737 		if (err) {
5738 			brelse(iloc->bh);
5739 			iloc->bh = NULL;
5740 		}
5741 	}
5742 	ext4_std_error(inode->i_sb, err);
5743 	return err;
5744 }
5745 
5746 /*
5747  * Expand an inode by new_extra_isize bytes.
5748  * Returns 0 on success or negative error number on failure.
5749  */
5750 static int ext4_expand_extra_isize(struct inode *inode,
5751 				   unsigned int new_extra_isize,
5752 				   struct ext4_iloc iloc,
5753 				   handle_t *handle)
5754 {
5755 	struct ext4_inode *raw_inode;
5756 	struct ext4_xattr_ibody_header *header;
5757 	struct ext4_xattr_entry *entry;
5758 
5759 	if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
5760 		return 0;
5761 
5762 	raw_inode = ext4_raw_inode(&iloc);
5763 
5764 	header = IHDR(inode, raw_inode);
5765 	entry = IFIRST(header);
5766 
5767 	/* No extended attributes present */
5768 	if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5769 	    header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5770 		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
5771 			new_extra_isize);
5772 		EXT4_I(inode)->i_extra_isize = new_extra_isize;
5773 		return 0;
5774 	}
5775 
5776 	/* try to expand with EAs present */
5777 	return ext4_expand_extra_isize_ea(inode, new_extra_isize,
5778 					  raw_inode, handle);
5779 }
5780 
5781 /*
5782  * What we do here is to mark the in-core inode as clean with respect to inode
5783  * dirtiness (it may still be data-dirty).
5784  * This means that the in-core inode may be reaped by prune_icache
5785  * without having to perform any I/O.  This is a very good thing,
5786  * because *any* task may call prune_icache - even ones which
5787  * have a transaction open against a different journal.
5788  *
5789  * Is this cheating?  Not really.  Sure, we haven't written the
5790  * inode out, but prune_icache isn't a user-visible syncing function.
5791  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
5792  * we start and wait on commits.
5793  *
5794  * Is this efficient/effective?  Well, we're being nice to the system
5795  * by cleaning up our inodes proactively so they can be reaped
5796  * without I/O.  But we are potentially leaving up to five seconds'
5797  * worth of inodes floating about which prune_icache wants us to
5798  * write out.  One way to fix that would be to get prune_icache()
5799  * to do a write_super() to free up some memory.  It has the desired
5800  * effect.
5801  */
5802 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5803 {
5804 	struct ext4_iloc iloc;
5805 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5806 	static unsigned int mnt_count;
5807 	int err, ret;
5808 
5809 	might_sleep();
5810 	err = ext4_reserve_inode_write(handle, inode, &iloc);
5811 	if (ext4_handle_valid(handle) &&
5812 	    EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
5813 	    !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5814 		/*
5815 		 * We need extra buffer credits since we may write into EA block
5816 		 * with this same handle. If journal_extend fails, then it will
5817 		 * only result in a minor loss of functionality for that inode.
5818 		 * If this is felt to be critical, then e2fsck should be run to
5819 		 * force a large enough s_min_extra_isize.
5820 		 */
5821 		if ((jbd2_journal_extend(handle,
5822 			     EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
5823 			ret = ext4_expand_extra_isize(inode,
5824 						      sbi->s_want_extra_isize,
5825 						      iloc, handle);
5826 			if (ret) {
5827 				ext4_set_inode_state(inode,
5828 						     EXT4_STATE_NO_EXPAND);
5829 				if (mnt_count !=
5830 					le16_to_cpu(sbi->s_es->s_mnt_count)) {
5831 					ext4_warning(inode->i_sb,
5832 					"Unable to expand inode %lu. Delete"
5833 					" some EAs or run e2fsck.",
5834 					inode->i_ino);
5835 					mnt_count =
5836 					  le16_to_cpu(sbi->s_es->s_mnt_count);
5837 				}
5838 			}
5839 		}
5840 	}
5841 	if (!err)
5842 		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
5843 	return err;
5844 }
5845 
5846 /*
5847  * ext4_dirty_inode() is called from __mark_inode_dirty()
5848  *
5849  * We're really interested in the case where a file is being extended.
5850  * i_size has been changed by generic_commit_write() and we thus need
5851  * to include the updated inode in the current transaction.
5852  *
5853  * Also, dquot_alloc_block() will always dirty the inode when blocks
5854  * are allocated to the file.
5855  *
5856  * If the inode is marked synchronous, we don't honour that here - doing
5857  * so would cause a commit on atime updates, which we don't bother doing.
5858  * We handle synchronous inodes at the highest possible level.
5859  */
5860 void ext4_dirty_inode(struct inode *inode)
5861 {
5862 	handle_t *handle;
5863 
5864 	handle = ext4_journal_start(inode, 2);
5865 	if (IS_ERR(handle))
5866 		goto out;
5867 
5868 	ext4_mark_inode_dirty(handle, inode);
5869 
5870 	ext4_journal_stop(handle);
5871 out:
5872 	return;
5873 }
5874 
5875 #if 0
5876 /*
5877  * Bind an inode's backing buffer_head into this transaction, to prevent
5878  * it from being flushed to disk early.  Unlike
5879  * ext4_reserve_inode_write, this leaves behind no bh reference and
5880  * returns no iloc structure, so the caller needs to repeat the iloc
5881  * lookup to mark the inode dirty later.
5882  */
5883 static int ext4_pin_inode(handle_t *handle, struct inode *inode)
5884 {
5885 	struct ext4_iloc iloc;
5886 
5887 	int err = 0;
5888 	if (handle) {
5889 		err = ext4_get_inode_loc(inode, &iloc);
5890 		if (!err) {
5891 			BUFFER_TRACE(iloc.bh, "get_write_access");
5892 			err = jbd2_journal_get_write_access(handle, iloc.bh);
5893 			if (!err)
5894 				err = ext4_handle_dirty_metadata(handle,
5895 								 NULL,
5896 								 iloc.bh);
5897 			brelse(iloc.bh);
5898 		}
5899 	}
5900 	ext4_std_error(inode->i_sb, err);
5901 	return err;
5902 }
5903 #endif
5904 
5905 int ext4_change_inode_journal_flag(struct inode *inode, int val)
5906 {
5907 	journal_t *journal;
5908 	handle_t *handle;
5909 	int err;
5910 
5911 	/*
5912 	 * We have to be very careful here: changing a data block's
5913 	 * journaling status dynamically is dangerous.  If we write a
5914 	 * data block to the journal, change the status and then delete
5915 	 * that block, we risk forgetting to revoke the old log record
5916 	 * from the journal and so a subsequent replay can corrupt data.
5917 	 * So, first we make sure that the journal is empty and that
5918 	 * nobody is changing anything.
5919 	 */
5920 
5921 	journal = EXT4_JOURNAL(inode);
5922 	if (!journal)
5923 		return 0;
5924 	if (is_journal_aborted(journal))
5925 		return -EROFS;
5926 
5927 	jbd2_journal_lock_updates(journal);
5928 	jbd2_journal_flush(journal);
5929 
5930 	/*
5931 	 * OK, there are no updates running now, and all cached data is
5932 	 * synced to disk.  We are now in a completely consistent state
5933 	 * which doesn't have anything in the journal, and we know that
5934 	 * no filesystem updates are running, so it is safe to modify
5935 	 * the inode's in-core data-journaling state flag now.
5936 	 */
5937 
5938 	if (val)
5939 		ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5940 	else
5941 		ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5942 	ext4_set_aops(inode);
5943 
5944 	jbd2_journal_unlock_updates(journal);
5945 
5946 	/* Finally we can mark the inode as dirty. */
5947 
5948 	handle = ext4_journal_start(inode, 1);
5949 	if (IS_ERR(handle))
5950 		return PTR_ERR(handle);
5951 
5952 	err = ext4_mark_inode_dirty(handle, inode);
5953 	ext4_handle_sync(handle);
5954 	ext4_journal_stop(handle);
5955 	ext4_std_error(inode->i_sb, err);
5956 
5957 	return err;
5958 }
5959 
5960 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
5961 {
5962 	return !buffer_mapped(bh);
5963 }
5964 
5965 int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5966 {
5967 	struct page *page = vmf->page;
5968 	loff_t size;
5969 	unsigned long len;
5970 	int ret = -EINVAL;
5971 	void *fsdata;
5972 	struct file *file = vma->vm_file;
5973 	struct inode *inode = file->f_path.dentry->d_inode;
5974 	struct address_space *mapping = inode->i_mapping;
5975 
5976 	/*
5977 	 * Get i_alloc_sem to stop truncates messing with the inode. We cannot
5978 	 * get i_mutex because we are already holding mmap_sem.
5979 	 */
5980 	down_read(&inode->i_alloc_sem);
5981 	size = i_size_read(inode);
5982 	if (page->mapping != mapping || size <= page_offset(page)
5983 	    || !PageUptodate(page)) {
5984 		/* page got truncated from under us? */
5985 		goto out_unlock;
5986 	}
5987 	ret = 0;
5988 	if (PageMappedToDisk(page))
5989 		goto out_unlock;
5990 
5991 	if (page->index == size >> PAGE_CACHE_SHIFT)
5992 		len = size & ~PAGE_CACHE_MASK;
5993 	else
5994 		len = PAGE_CACHE_SIZE;
5995 
5996 	lock_page(page);
5997 	/*
5998 	 * return if we have all the buffers mapped. This avoid
5999 	 * the need to call write_begin/write_end which does a
6000 	 * journal_start/journal_stop which can block and take
6001 	 * long time
6002 	 */
6003 	if (page_has_buffers(page)) {
6004 		if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
6005 					ext4_bh_unmapped)) {
6006 			unlock_page(page);
6007 			goto out_unlock;
6008 		}
6009 	}
6010 	unlock_page(page);
6011 	/*
6012 	 * OK, we need to fill the hole... Do write_begin write_end
6013 	 * to do block allocation/reservation.We are not holding
6014 	 * inode.i__mutex here. That allow * parallel write_begin,
6015 	 * write_end call. lock_page prevent this from happening
6016 	 * on the same page though
6017 	 */
6018 	ret = mapping->a_ops->write_begin(file, mapping, page_offset(page),
6019 			len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
6020 	if (ret < 0)
6021 		goto out_unlock;
6022 	ret = mapping->a_ops->write_end(file, mapping, page_offset(page),
6023 			len, len, page, fsdata);
6024 	if (ret < 0)
6025 		goto out_unlock;
6026 	ret = 0;
6027 out_unlock:
6028 	if (ret)
6029 		ret = VM_FAULT_SIGBUS;
6030 	up_read(&inode->i_alloc_sem);
6031 	return ret;
6032 }
6033