xref: /openbmc/linux/fs/ext4/inode.c (revision 22246614)
1 /*
2  *  linux/fs/ext4/inode.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/inode.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  Goal-directed block allocation by Stephen Tweedie
16  *	(sct@redhat.com), 1993, 1998
17  *  Big-endian to little-endian byte-swapping/bitmaps by
18  *        David S. Miller (davem@caip.rutgers.edu), 1995
19  *  64-bit file support on 64-bit platforms by Jakub Jelinek
20  *	(jj@sunsite.ms.mff.cuni.cz)
21  *
22  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
23  */
24 
25 #include <linux/module.h>
26 #include <linux/fs.h>
27 #include <linux/time.h>
28 #include <linux/jbd2.h>
29 #include <linux/highuid.h>
30 #include <linux/pagemap.h>
31 #include <linux/quotaops.h>
32 #include <linux/string.h>
33 #include <linux/buffer_head.h>
34 #include <linux/writeback.h>
35 #include <linux/mpage.h>
36 #include <linux/uio.h>
37 #include <linux/bio.h>
38 #include "ext4_jbd2.h"
39 #include "xattr.h"
40 #include "acl.h"
41 
42 /*
43  * Test whether an inode is a fast symlink.
44  */
45 static int ext4_inode_is_fast_symlink(struct inode *inode)
46 {
47 	int ea_blocks = EXT4_I(inode)->i_file_acl ?
48 		(inode->i_sb->s_blocksize >> 9) : 0;
49 
50 	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
51 }
52 
53 /*
54  * The ext4 forget function must perform a revoke if we are freeing data
55  * which has been journaled.  Metadata (eg. indirect blocks) must be
56  * revoked in all cases.
57  *
58  * "bh" may be NULL: a metadata block may have been freed from memory
59  * but there may still be a record of it in the journal, and that record
60  * still needs to be revoked.
61  */
62 int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
63 			struct buffer_head *bh, ext4_fsblk_t blocknr)
64 {
65 	int err;
66 
67 	might_sleep();
68 
69 	BUFFER_TRACE(bh, "enter");
70 
71 	jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
72 		  "data mode %lx\n",
73 		  bh, is_metadata, inode->i_mode,
74 		  test_opt(inode->i_sb, DATA_FLAGS));
75 
76 	/* Never use the revoke function if we are doing full data
77 	 * journaling: there is no need to, and a V1 superblock won't
78 	 * support it.  Otherwise, only skip the revoke on un-journaled
79 	 * data blocks. */
80 
81 	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
82 	    (!is_metadata && !ext4_should_journal_data(inode))) {
83 		if (bh) {
84 			BUFFER_TRACE(bh, "call jbd2_journal_forget");
85 			return ext4_journal_forget(handle, bh);
86 		}
87 		return 0;
88 	}
89 
90 	/*
91 	 * data!=journal && (is_metadata || should_journal_data(inode))
92 	 */
93 	BUFFER_TRACE(bh, "call ext4_journal_revoke");
94 	err = ext4_journal_revoke(handle, blocknr, bh);
95 	if (err)
96 		ext4_abort(inode->i_sb, __func__,
97 			   "error %d when attempting revoke", err);
98 	BUFFER_TRACE(bh, "exit");
99 	return err;
100 }
101 
102 /*
103  * Work out how many blocks we need to proceed with the next chunk of a
104  * truncate transaction.
105  */
106 static unsigned long blocks_for_truncate(struct inode *inode)
107 {
108 	ext4_lblk_t needed;
109 
110 	needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
111 
112 	/* Give ourselves just enough room to cope with inodes in which
113 	 * i_blocks is corrupt: we've seen disk corruptions in the past
114 	 * which resulted in random data in an inode which looked enough
115 	 * like a regular file for ext4 to try to delete it.  Things
116 	 * will go a bit crazy if that happens, but at least we should
117 	 * try not to panic the whole kernel. */
118 	if (needed < 2)
119 		needed = 2;
120 
121 	/* But we need to bound the transaction so we don't overflow the
122 	 * journal. */
123 	if (needed > EXT4_MAX_TRANS_DATA)
124 		needed = EXT4_MAX_TRANS_DATA;
125 
126 	return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
127 }
128 
129 /*
130  * Truncate transactions can be complex and absolutely huge.  So we need to
131  * be able to restart the transaction at a conventient checkpoint to make
132  * sure we don't overflow the journal.
133  *
134  * start_transaction gets us a new handle for a truncate transaction,
135  * and extend_transaction tries to extend the existing one a bit.  If
136  * extend fails, we need to propagate the failure up and restart the
137  * transaction in the top-level truncate loop. --sct
138  */
139 static handle_t *start_transaction(struct inode *inode)
140 {
141 	handle_t *result;
142 
143 	result = ext4_journal_start(inode, blocks_for_truncate(inode));
144 	if (!IS_ERR(result))
145 		return result;
146 
147 	ext4_std_error(inode->i_sb, PTR_ERR(result));
148 	return result;
149 }
150 
151 /*
152  * Try to extend this transaction for the purposes of truncation.
153  *
154  * Returns 0 if we managed to create more room.  If we can't create more
155  * room, and the transaction must be restarted we return 1.
156  */
157 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
158 {
159 	if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS)
160 		return 0;
161 	if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
162 		return 0;
163 	return 1;
164 }
165 
166 /*
167  * Restart the transaction associated with *handle.  This does a commit,
168  * so before we call here everything must be consistently dirtied against
169  * this transaction.
170  */
171 static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
172 {
173 	jbd_debug(2, "restarting handle %p\n", handle);
174 	return ext4_journal_restart(handle, blocks_for_truncate(inode));
175 }
176 
177 /*
178  * Called at the last iput() if i_nlink is zero.
179  */
180 void ext4_delete_inode (struct inode * inode)
181 {
182 	handle_t *handle;
183 
184 	truncate_inode_pages(&inode->i_data, 0);
185 
186 	if (is_bad_inode(inode))
187 		goto no_delete;
188 
189 	handle = start_transaction(inode);
190 	if (IS_ERR(handle)) {
191 		/*
192 		 * If we're going to skip the normal cleanup, we still need to
193 		 * make sure that the in-core orphan linked list is properly
194 		 * cleaned up.
195 		 */
196 		ext4_orphan_del(NULL, inode);
197 		goto no_delete;
198 	}
199 
200 	if (IS_SYNC(inode))
201 		handle->h_sync = 1;
202 	inode->i_size = 0;
203 	if (inode->i_blocks)
204 		ext4_truncate(inode);
205 	/*
206 	 * Kill off the orphan record which ext4_truncate created.
207 	 * AKPM: I think this can be inside the above `if'.
208 	 * Note that ext4_orphan_del() has to be able to cope with the
209 	 * deletion of a non-existent orphan - this is because we don't
210 	 * know if ext4_truncate() actually created an orphan record.
211 	 * (Well, we could do this if we need to, but heck - it works)
212 	 */
213 	ext4_orphan_del(handle, inode);
214 	EXT4_I(inode)->i_dtime	= get_seconds();
215 
216 	/*
217 	 * One subtle ordering requirement: if anything has gone wrong
218 	 * (transaction abort, IO errors, whatever), then we can still
219 	 * do these next steps (the fs will already have been marked as
220 	 * having errors), but we can't free the inode if the mark_dirty
221 	 * fails.
222 	 */
223 	if (ext4_mark_inode_dirty(handle, inode))
224 		/* If that failed, just do the required in-core inode clear. */
225 		clear_inode(inode);
226 	else
227 		ext4_free_inode(handle, inode);
228 	ext4_journal_stop(handle);
229 	return;
230 no_delete:
231 	clear_inode(inode);	/* We must guarantee clearing of inode... */
232 }
233 
234 typedef struct {
235 	__le32	*p;
236 	__le32	key;
237 	struct buffer_head *bh;
238 } Indirect;
239 
240 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
241 {
242 	p->key = *(p->p = v);
243 	p->bh = bh;
244 }
245 
246 /**
247  *	ext4_block_to_path - parse the block number into array of offsets
248  *	@inode: inode in question (we are only interested in its superblock)
249  *	@i_block: block number to be parsed
250  *	@offsets: array to store the offsets in
251  *	@boundary: set this non-zero if the referred-to block is likely to be
252  *	       followed (on disk) by an indirect block.
253  *
254  *	To store the locations of file's data ext4 uses a data structure common
255  *	for UNIX filesystems - tree of pointers anchored in the inode, with
256  *	data blocks at leaves and indirect blocks in intermediate nodes.
257  *	This function translates the block number into path in that tree -
258  *	return value is the path length and @offsets[n] is the offset of
259  *	pointer to (n+1)th node in the nth one. If @block is out of range
260  *	(negative or too large) warning is printed and zero returned.
261  *
262  *	Note: function doesn't find node addresses, so no IO is needed. All
263  *	we need to know is the capacity of indirect blocks (taken from the
264  *	inode->i_sb).
265  */
266 
267 /*
268  * Portability note: the last comparison (check that we fit into triple
269  * indirect block) is spelled differently, because otherwise on an
270  * architecture with 32-bit longs and 8Kb pages we might get into trouble
271  * if our filesystem had 8Kb blocks. We might use long long, but that would
272  * kill us on x86. Oh, well, at least the sign propagation does not matter -
273  * i_block would have to be negative in the very beginning, so we would not
274  * get there at all.
275  */
276 
277 static int ext4_block_to_path(struct inode *inode,
278 			ext4_lblk_t i_block,
279 			ext4_lblk_t offsets[4], int *boundary)
280 {
281 	int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
282 	int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
283 	const long direct_blocks = EXT4_NDIR_BLOCKS,
284 		indirect_blocks = ptrs,
285 		double_blocks = (1 << (ptrs_bits * 2));
286 	int n = 0;
287 	int final = 0;
288 
289 	if (i_block < 0) {
290 		ext4_warning (inode->i_sb, "ext4_block_to_path", "block < 0");
291 	} else if (i_block < direct_blocks) {
292 		offsets[n++] = i_block;
293 		final = direct_blocks;
294 	} else if ( (i_block -= direct_blocks) < indirect_blocks) {
295 		offsets[n++] = EXT4_IND_BLOCK;
296 		offsets[n++] = i_block;
297 		final = ptrs;
298 	} else if ((i_block -= indirect_blocks) < double_blocks) {
299 		offsets[n++] = EXT4_DIND_BLOCK;
300 		offsets[n++] = i_block >> ptrs_bits;
301 		offsets[n++] = i_block & (ptrs - 1);
302 		final = ptrs;
303 	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
304 		offsets[n++] = EXT4_TIND_BLOCK;
305 		offsets[n++] = i_block >> (ptrs_bits * 2);
306 		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
307 		offsets[n++] = i_block & (ptrs - 1);
308 		final = ptrs;
309 	} else {
310 		ext4_warning(inode->i_sb, "ext4_block_to_path",
311 				"block %lu > max",
312 				i_block + direct_blocks +
313 				indirect_blocks + double_blocks);
314 	}
315 	if (boundary)
316 		*boundary = final - 1 - (i_block & (ptrs - 1));
317 	return n;
318 }
319 
320 /**
321  *	ext4_get_branch - read the chain of indirect blocks leading to data
322  *	@inode: inode in question
323  *	@depth: depth of the chain (1 - direct pointer, etc.)
324  *	@offsets: offsets of pointers in inode/indirect blocks
325  *	@chain: place to store the result
326  *	@err: here we store the error value
327  *
328  *	Function fills the array of triples <key, p, bh> and returns %NULL
329  *	if everything went OK or the pointer to the last filled triple
330  *	(incomplete one) otherwise. Upon the return chain[i].key contains
331  *	the number of (i+1)-th block in the chain (as it is stored in memory,
332  *	i.e. little-endian 32-bit), chain[i].p contains the address of that
333  *	number (it points into struct inode for i==0 and into the bh->b_data
334  *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
335  *	block for i>0 and NULL for i==0. In other words, it holds the block
336  *	numbers of the chain, addresses they were taken from (and where we can
337  *	verify that chain did not change) and buffer_heads hosting these
338  *	numbers.
339  *
340  *	Function stops when it stumbles upon zero pointer (absent block)
341  *		(pointer to last triple returned, *@err == 0)
342  *	or when it gets an IO error reading an indirect block
343  *		(ditto, *@err == -EIO)
344  *	or when it reads all @depth-1 indirect blocks successfully and finds
345  *	the whole chain, all way to the data (returns %NULL, *err == 0).
346  *
347  *      Need to be called with
348  *      down_read(&EXT4_I(inode)->i_data_sem)
349  */
350 static Indirect *ext4_get_branch(struct inode *inode, int depth,
351 				 ext4_lblk_t  *offsets,
352 				 Indirect chain[4], int *err)
353 {
354 	struct super_block *sb = inode->i_sb;
355 	Indirect *p = chain;
356 	struct buffer_head *bh;
357 
358 	*err = 0;
359 	/* i_data is not going away, no lock needed */
360 	add_chain (chain, NULL, EXT4_I(inode)->i_data + *offsets);
361 	if (!p->key)
362 		goto no_block;
363 	while (--depth) {
364 		bh = sb_bread(sb, le32_to_cpu(p->key));
365 		if (!bh)
366 			goto failure;
367 		add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
368 		/* Reader: end */
369 		if (!p->key)
370 			goto no_block;
371 	}
372 	return NULL;
373 
374 failure:
375 	*err = -EIO;
376 no_block:
377 	return p;
378 }
379 
380 /**
381  *	ext4_find_near - find a place for allocation with sufficient locality
382  *	@inode: owner
383  *	@ind: descriptor of indirect block.
384  *
385  *	This function returns the preferred place for block allocation.
386  *	It is used when heuristic for sequential allocation fails.
387  *	Rules are:
388  *	  + if there is a block to the left of our position - allocate near it.
389  *	  + if pointer will live in indirect block - allocate near that block.
390  *	  + if pointer will live in inode - allocate in the same
391  *	    cylinder group.
392  *
393  * In the latter case we colour the starting block by the callers PID to
394  * prevent it from clashing with concurrent allocations for a different inode
395  * in the same block group.   The PID is used here so that functionally related
396  * files will be close-by on-disk.
397  *
398  *	Caller must make sure that @ind is valid and will stay that way.
399  */
400 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
401 {
402 	struct ext4_inode_info *ei = EXT4_I(inode);
403 	__le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
404 	__le32 *p;
405 	ext4_fsblk_t bg_start;
406 	ext4_fsblk_t last_block;
407 	ext4_grpblk_t colour;
408 
409 	/* Try to find previous block */
410 	for (p = ind->p - 1; p >= start; p--) {
411 		if (*p)
412 			return le32_to_cpu(*p);
413 	}
414 
415 	/* No such thing, so let's try location of indirect block */
416 	if (ind->bh)
417 		return ind->bh->b_blocknr;
418 
419 	/*
420 	 * It is going to be referred to from the inode itself? OK, just put it
421 	 * into the same cylinder group then.
422 	 */
423 	bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group);
424 	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
425 
426 	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
427 		colour = (current->pid % 16) *
428 			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
429 	else
430 		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
431 	return bg_start + colour;
432 }
433 
434 /**
435  *	ext4_find_goal - find a preferred place for allocation.
436  *	@inode: owner
437  *	@block:  block we want
438  *	@partial: pointer to the last triple within a chain
439  *
440  *	Normally this function find the preferred place for block allocation,
441  *	returns it.
442  */
443 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
444 		Indirect *partial)
445 {
446 	struct ext4_block_alloc_info *block_i;
447 
448 	block_i =  EXT4_I(inode)->i_block_alloc_info;
449 
450 	/*
451 	 * try the heuristic for sequential allocation,
452 	 * failing that at least try to get decent locality.
453 	 */
454 	if (block_i && (block == block_i->last_alloc_logical_block + 1)
455 		&& (block_i->last_alloc_physical_block != 0)) {
456 		return block_i->last_alloc_physical_block + 1;
457 	}
458 
459 	return ext4_find_near(inode, partial);
460 }
461 
462 /**
463  *	ext4_blks_to_allocate: Look up the block map and count the number
464  *	of direct blocks need to be allocated for the given branch.
465  *
466  *	@branch: chain of indirect blocks
467  *	@k: number of blocks need for indirect blocks
468  *	@blks: number of data blocks to be mapped.
469  *	@blocks_to_boundary:  the offset in the indirect block
470  *
471  *	return the total number of blocks to be allocate, including the
472  *	direct and indirect blocks.
473  */
474 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
475 		int blocks_to_boundary)
476 {
477 	unsigned long count = 0;
478 
479 	/*
480 	 * Simple case, [t,d]Indirect block(s) has not allocated yet
481 	 * then it's clear blocks on that path have not allocated
482 	 */
483 	if (k > 0) {
484 		/* right now we don't handle cross boundary allocation */
485 		if (blks < blocks_to_boundary + 1)
486 			count += blks;
487 		else
488 			count += blocks_to_boundary + 1;
489 		return count;
490 	}
491 
492 	count++;
493 	while (count < blks && count <= blocks_to_boundary &&
494 		le32_to_cpu(*(branch[0].p + count)) == 0) {
495 		count++;
496 	}
497 	return count;
498 }
499 
500 /**
501  *	ext4_alloc_blocks: multiple allocate blocks needed for a branch
502  *	@indirect_blks: the number of blocks need to allocate for indirect
503  *			blocks
504  *
505  *	@new_blocks: on return it will store the new block numbers for
506  *	the indirect blocks(if needed) and the first direct block,
507  *	@blks:	on return it will store the total number of allocated
508  *		direct blocks
509  */
510 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
511 			ext4_fsblk_t goal, int indirect_blks, int blks,
512 			ext4_fsblk_t new_blocks[4], int *err)
513 {
514 	int target, i;
515 	unsigned long count = 0;
516 	int index = 0;
517 	ext4_fsblk_t current_block = 0;
518 	int ret = 0;
519 
520 	/*
521 	 * Here we try to allocate the requested multiple blocks at once,
522 	 * on a best-effort basis.
523 	 * To build a branch, we should allocate blocks for
524 	 * the indirect blocks(if not allocated yet), and at least
525 	 * the first direct block of this branch.  That's the
526 	 * minimum number of blocks need to allocate(required)
527 	 */
528 	target = blks + indirect_blks;
529 
530 	while (1) {
531 		count = target;
532 		/* allocating blocks for indirect blocks and direct blocks */
533 		current_block = ext4_new_blocks(handle,inode,goal,&count,err);
534 		if (*err)
535 			goto failed_out;
536 
537 		target -= count;
538 		/* allocate blocks for indirect blocks */
539 		while (index < indirect_blks && count) {
540 			new_blocks[index++] = current_block++;
541 			count--;
542 		}
543 
544 		if (count > 0)
545 			break;
546 	}
547 
548 	/* save the new block number for the first direct block */
549 	new_blocks[index] = current_block;
550 
551 	/* total number of blocks allocated for direct blocks */
552 	ret = count;
553 	*err = 0;
554 	return ret;
555 failed_out:
556 	for (i = 0; i <index; i++)
557 		ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
558 	return ret;
559 }
560 
561 /**
562  *	ext4_alloc_branch - allocate and set up a chain of blocks.
563  *	@inode: owner
564  *	@indirect_blks: number of allocated indirect blocks
565  *	@blks: number of allocated direct blocks
566  *	@offsets: offsets (in the blocks) to store the pointers to next.
567  *	@branch: place to store the chain in.
568  *
569  *	This function allocates blocks, zeroes out all but the last one,
570  *	links them into chain and (if we are synchronous) writes them to disk.
571  *	In other words, it prepares a branch that can be spliced onto the
572  *	inode. It stores the information about that chain in the branch[], in
573  *	the same format as ext4_get_branch() would do. We are calling it after
574  *	we had read the existing part of chain and partial points to the last
575  *	triple of that (one with zero ->key). Upon the exit we have the same
576  *	picture as after the successful ext4_get_block(), except that in one
577  *	place chain is disconnected - *branch->p is still zero (we did not
578  *	set the last link), but branch->key contains the number that should
579  *	be placed into *branch->p to fill that gap.
580  *
581  *	If allocation fails we free all blocks we've allocated (and forget
582  *	their buffer_heads) and return the error value the from failed
583  *	ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
584  *	as described above and return 0.
585  */
586 static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
587 			int indirect_blks, int *blks, ext4_fsblk_t goal,
588 			ext4_lblk_t *offsets, Indirect *branch)
589 {
590 	int blocksize = inode->i_sb->s_blocksize;
591 	int i, n = 0;
592 	int err = 0;
593 	struct buffer_head *bh;
594 	int num;
595 	ext4_fsblk_t new_blocks[4];
596 	ext4_fsblk_t current_block;
597 
598 	num = ext4_alloc_blocks(handle, inode, goal, indirect_blks,
599 				*blks, new_blocks, &err);
600 	if (err)
601 		return err;
602 
603 	branch[0].key = cpu_to_le32(new_blocks[0]);
604 	/*
605 	 * metadata blocks and data blocks are allocated.
606 	 */
607 	for (n = 1; n <= indirect_blks;  n++) {
608 		/*
609 		 * Get buffer_head for parent block, zero it out
610 		 * and set the pointer to new one, then send
611 		 * parent to disk.
612 		 */
613 		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
614 		branch[n].bh = bh;
615 		lock_buffer(bh);
616 		BUFFER_TRACE(bh, "call get_create_access");
617 		err = ext4_journal_get_create_access(handle, bh);
618 		if (err) {
619 			unlock_buffer(bh);
620 			brelse(bh);
621 			goto failed;
622 		}
623 
624 		memset(bh->b_data, 0, blocksize);
625 		branch[n].p = (__le32 *) bh->b_data + offsets[n];
626 		branch[n].key = cpu_to_le32(new_blocks[n]);
627 		*branch[n].p = branch[n].key;
628 		if ( n == indirect_blks) {
629 			current_block = new_blocks[n];
630 			/*
631 			 * End of chain, update the last new metablock of
632 			 * the chain to point to the new allocated
633 			 * data blocks numbers
634 			 */
635 			for (i=1; i < num; i++)
636 				*(branch[n].p + i) = cpu_to_le32(++current_block);
637 		}
638 		BUFFER_TRACE(bh, "marking uptodate");
639 		set_buffer_uptodate(bh);
640 		unlock_buffer(bh);
641 
642 		BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
643 		err = ext4_journal_dirty_metadata(handle, bh);
644 		if (err)
645 			goto failed;
646 	}
647 	*blks = num;
648 	return err;
649 failed:
650 	/* Allocation failed, free what we already allocated */
651 	for (i = 1; i <= n ; i++) {
652 		BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget");
653 		ext4_journal_forget(handle, branch[i].bh);
654 	}
655 	for (i = 0; i <indirect_blks; i++)
656 		ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
657 
658 	ext4_free_blocks(handle, inode, new_blocks[i], num, 0);
659 
660 	return err;
661 }
662 
663 /**
664  * ext4_splice_branch - splice the allocated branch onto inode.
665  * @inode: owner
666  * @block: (logical) number of block we are adding
667  * @chain: chain of indirect blocks (with a missing link - see
668  *	ext4_alloc_branch)
669  * @where: location of missing link
670  * @num:   number of indirect blocks we are adding
671  * @blks:  number of direct blocks we are adding
672  *
673  * This function fills the missing link and does all housekeeping needed in
674  * inode (->i_blocks, etc.). In case of success we end up with the full
675  * chain to new block and return 0.
676  */
677 static int ext4_splice_branch(handle_t *handle, struct inode *inode,
678 			ext4_lblk_t block, Indirect *where, int num, int blks)
679 {
680 	int i;
681 	int err = 0;
682 	struct ext4_block_alloc_info *block_i;
683 	ext4_fsblk_t current_block;
684 
685 	block_i = EXT4_I(inode)->i_block_alloc_info;
686 	/*
687 	 * If we're splicing into a [td]indirect block (as opposed to the
688 	 * inode) then we need to get write access to the [td]indirect block
689 	 * before the splice.
690 	 */
691 	if (where->bh) {
692 		BUFFER_TRACE(where->bh, "get_write_access");
693 		err = ext4_journal_get_write_access(handle, where->bh);
694 		if (err)
695 			goto err_out;
696 	}
697 	/* That's it */
698 
699 	*where->p = where->key;
700 
701 	/*
702 	 * Update the host buffer_head or inode to point to more just allocated
703 	 * direct blocks blocks
704 	 */
705 	if (num == 0 && blks > 1) {
706 		current_block = le32_to_cpu(where->key) + 1;
707 		for (i = 1; i < blks; i++)
708 			*(where->p + i ) = cpu_to_le32(current_block++);
709 	}
710 
711 	/*
712 	 * update the most recently allocated logical & physical block
713 	 * in i_block_alloc_info, to assist find the proper goal block for next
714 	 * allocation
715 	 */
716 	if (block_i) {
717 		block_i->last_alloc_logical_block = block + blks - 1;
718 		block_i->last_alloc_physical_block =
719 				le32_to_cpu(where[num].key) + blks - 1;
720 	}
721 
722 	/* We are done with atomic stuff, now do the rest of housekeeping */
723 
724 	inode->i_ctime = ext4_current_time(inode);
725 	ext4_mark_inode_dirty(handle, inode);
726 
727 	/* had we spliced it onto indirect block? */
728 	if (where->bh) {
729 		/*
730 		 * If we spliced it onto an indirect block, we haven't
731 		 * altered the inode.  Note however that if it is being spliced
732 		 * onto an indirect block at the very end of the file (the
733 		 * file is growing) then we *will* alter the inode to reflect
734 		 * the new i_size.  But that is not done here - it is done in
735 		 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
736 		 */
737 		jbd_debug(5, "splicing indirect only\n");
738 		BUFFER_TRACE(where->bh, "call ext4_journal_dirty_metadata");
739 		err = ext4_journal_dirty_metadata(handle, where->bh);
740 		if (err)
741 			goto err_out;
742 	} else {
743 		/*
744 		 * OK, we spliced it into the inode itself on a direct block.
745 		 * Inode was dirtied above.
746 		 */
747 		jbd_debug(5, "splicing direct\n");
748 	}
749 	return err;
750 
751 err_out:
752 	for (i = 1; i <= num; i++) {
753 		BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget");
754 		ext4_journal_forget(handle, where[i].bh);
755 		ext4_free_blocks(handle, inode,
756 					le32_to_cpu(where[i-1].key), 1, 0);
757 	}
758 	ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0);
759 
760 	return err;
761 }
762 
763 /*
764  * Allocation strategy is simple: if we have to allocate something, we will
765  * have to go the whole way to leaf. So let's do it before attaching anything
766  * to tree, set linkage between the newborn blocks, write them if sync is
767  * required, recheck the path, free and repeat if check fails, otherwise
768  * set the last missing link (that will protect us from any truncate-generated
769  * removals - all blocks on the path are immune now) and possibly force the
770  * write on the parent block.
771  * That has a nice additional property: no special recovery from the failed
772  * allocations is needed - we simply release blocks and do not touch anything
773  * reachable from inode.
774  *
775  * `handle' can be NULL if create == 0.
776  *
777  * return > 0, # of blocks mapped or allocated.
778  * return = 0, if plain lookup failed.
779  * return < 0, error case.
780  *
781  *
782  * Need to be called with
783  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
784  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
785  */
786 int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
787 		ext4_lblk_t iblock, unsigned long maxblocks,
788 		struct buffer_head *bh_result,
789 		int create, int extend_disksize)
790 {
791 	int err = -EIO;
792 	ext4_lblk_t offsets[4];
793 	Indirect chain[4];
794 	Indirect *partial;
795 	ext4_fsblk_t goal;
796 	int indirect_blks;
797 	int blocks_to_boundary = 0;
798 	int depth;
799 	struct ext4_inode_info *ei = EXT4_I(inode);
800 	int count = 0;
801 	ext4_fsblk_t first_block = 0;
802 
803 
804 	J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
805 	J_ASSERT(handle != NULL || create == 0);
806 	depth = ext4_block_to_path(inode, iblock, offsets,
807 					&blocks_to_boundary);
808 
809 	if (depth == 0)
810 		goto out;
811 
812 	partial = ext4_get_branch(inode, depth, offsets, chain, &err);
813 
814 	/* Simplest case - block found, no allocation needed */
815 	if (!partial) {
816 		first_block = le32_to_cpu(chain[depth - 1].key);
817 		clear_buffer_new(bh_result);
818 		count++;
819 		/*map more blocks*/
820 		while (count < maxblocks && count <= blocks_to_boundary) {
821 			ext4_fsblk_t blk;
822 
823 			blk = le32_to_cpu(*(chain[depth-1].p + count));
824 
825 			if (blk == first_block + count)
826 				count++;
827 			else
828 				break;
829 		}
830 		goto got_it;
831 	}
832 
833 	/* Next simple case - plain lookup or failed read of indirect block */
834 	if (!create || err == -EIO)
835 		goto cleanup;
836 
837 	/*
838 	 * Okay, we need to do block allocation.  Lazily initialize the block
839 	 * allocation info here if necessary
840 	*/
841 	if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
842 		ext4_init_block_alloc_info(inode);
843 
844 	goal = ext4_find_goal(inode, iblock, partial);
845 
846 	/* the number of blocks need to allocate for [d,t]indirect blocks */
847 	indirect_blks = (chain + depth) - partial - 1;
848 
849 	/*
850 	 * Next look up the indirect map to count the totoal number of
851 	 * direct blocks to allocate for this branch.
852 	 */
853 	count = ext4_blks_to_allocate(partial, indirect_blks,
854 					maxblocks, blocks_to_boundary);
855 	/*
856 	 * Block out ext4_truncate while we alter the tree
857 	 */
858 	err = ext4_alloc_branch(handle, inode, indirect_blks, &count, goal,
859 				offsets + (partial - chain), partial);
860 
861 	/*
862 	 * The ext4_splice_branch call will free and forget any buffers
863 	 * on the new chain if there is a failure, but that risks using
864 	 * up transaction credits, especially for bitmaps where the
865 	 * credits cannot be returned.  Can we handle this somehow?  We
866 	 * may need to return -EAGAIN upwards in the worst case.  --sct
867 	 */
868 	if (!err)
869 		err = ext4_splice_branch(handle, inode, iblock,
870 					partial, indirect_blks, count);
871 	/*
872 	 * i_disksize growing is protected by i_data_sem.  Don't forget to
873 	 * protect it if you're about to implement concurrent
874 	 * ext4_get_block() -bzzz
875 	*/
876 	if (!err && extend_disksize && inode->i_size > ei->i_disksize)
877 		ei->i_disksize = inode->i_size;
878 	if (err)
879 		goto cleanup;
880 
881 	set_buffer_new(bh_result);
882 got_it:
883 	map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
884 	if (count > blocks_to_boundary)
885 		set_buffer_boundary(bh_result);
886 	err = count;
887 	/* Clean up and exit */
888 	partial = chain + depth - 1;	/* the whole chain */
889 cleanup:
890 	while (partial > chain) {
891 		BUFFER_TRACE(partial->bh, "call brelse");
892 		brelse(partial->bh);
893 		partial--;
894 	}
895 	BUFFER_TRACE(bh_result, "returned");
896 out:
897 	return err;
898 }
899 
900 /* Maximum number of blocks we map for direct IO at once. */
901 #define DIO_MAX_BLOCKS 4096
902 /*
903  * Number of credits we need for writing DIO_MAX_BLOCKS:
904  * We need sb + group descriptor + bitmap + inode -> 4
905  * For B blocks with A block pointers per block we need:
906  * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
907  * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
908  */
909 #define DIO_CREDITS 25
910 
911 
912 /*
913  *
914  *
915  * ext4_ext4 get_block() wrapper function
916  * It will do a look up first, and returns if the blocks already mapped.
917  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
918  * and store the allocated blocks in the result buffer head and mark it
919  * mapped.
920  *
921  * If file type is extents based, it will call ext4_ext_get_blocks(),
922  * Otherwise, call with ext4_get_blocks_handle() to handle indirect mapping
923  * based files
924  *
925  * On success, it returns the number of blocks being mapped or allocate.
926  * if create==0 and the blocks are pre-allocated and uninitialized block,
927  * the result buffer head is unmapped. If the create ==1, it will make sure
928  * the buffer head is mapped.
929  *
930  * It returns 0 if plain look up failed (blocks have not been allocated), in
931  * that casem, buffer head is unmapped
932  *
933  * It returns the error in case of allocation failure.
934  */
935 int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
936 			unsigned long max_blocks, struct buffer_head *bh,
937 			int create, int extend_disksize)
938 {
939 	int retval;
940 
941 	clear_buffer_mapped(bh);
942 
943 	/*
944 	 * Try to see if we can get  the block without requesting
945 	 * for new file system block.
946 	 */
947 	down_read((&EXT4_I(inode)->i_data_sem));
948 	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
949 		retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
950 				bh, 0, 0);
951 	} else {
952 		retval = ext4_get_blocks_handle(handle,
953 				inode, block, max_blocks, bh, 0, 0);
954 	}
955 	up_read((&EXT4_I(inode)->i_data_sem));
956 
957 	/* If it is only a block(s) look up */
958 	if (!create)
959 		return retval;
960 
961 	/*
962 	 * Returns if the blocks have already allocated
963 	 *
964 	 * Note that if blocks have been preallocated
965 	 * ext4_ext_get_block() returns th create = 0
966 	 * with buffer head unmapped.
967 	 */
968 	if (retval > 0 && buffer_mapped(bh))
969 		return retval;
970 
971 	/*
972 	 * New blocks allocate and/or writing to uninitialized extent
973 	 * will possibly result in updating i_data, so we take
974 	 * the write lock of i_data_sem, and call get_blocks()
975 	 * with create == 1 flag.
976 	 */
977 	down_write((&EXT4_I(inode)->i_data_sem));
978 	/*
979 	 * We need to check for EXT4 here because migrate
980 	 * could have changed the inode type in between
981 	 */
982 	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
983 		retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
984 				bh, create, extend_disksize);
985 	} else {
986 		retval = ext4_get_blocks_handle(handle, inode, block,
987 				max_blocks, bh, create, extend_disksize);
988 
989 		if (retval > 0 && buffer_new(bh)) {
990 			/*
991 			 * We allocated new blocks which will result in
992 			 * i_data's format changing.  Force the migrate
993 			 * to fail by clearing migrate flags
994 			 */
995 			EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags &
996 							~EXT4_EXT_MIGRATE;
997 		}
998 	}
999 	up_write((&EXT4_I(inode)->i_data_sem));
1000 	return retval;
1001 }
1002 
1003 static int ext4_get_block(struct inode *inode, sector_t iblock,
1004 			struct buffer_head *bh_result, int create)
1005 {
1006 	handle_t *handle = ext4_journal_current_handle();
1007 	int ret = 0, started = 0;
1008 	unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
1009 
1010 	if (create && !handle) {
1011 		/* Direct IO write... */
1012 		if (max_blocks > DIO_MAX_BLOCKS)
1013 			max_blocks = DIO_MAX_BLOCKS;
1014 		handle = ext4_journal_start(inode, DIO_CREDITS +
1015 			      2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb));
1016 		if (IS_ERR(handle)) {
1017 			ret = PTR_ERR(handle);
1018 			goto out;
1019 		}
1020 		started = 1;
1021 	}
1022 
1023 	ret = ext4_get_blocks_wrap(handle, inode, iblock,
1024 					max_blocks, bh_result, create, 0);
1025 	if (ret > 0) {
1026 		bh_result->b_size = (ret << inode->i_blkbits);
1027 		ret = 0;
1028 	}
1029 	if (started)
1030 		ext4_journal_stop(handle);
1031 out:
1032 	return ret;
1033 }
1034 
1035 /*
1036  * `handle' can be NULL if create is zero
1037  */
1038 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1039 				ext4_lblk_t block, int create, int *errp)
1040 {
1041 	struct buffer_head dummy;
1042 	int fatal = 0, err;
1043 
1044 	J_ASSERT(handle != NULL || create == 0);
1045 
1046 	dummy.b_state = 0;
1047 	dummy.b_blocknr = -1000;
1048 	buffer_trace_init(&dummy.b_history);
1049 	err = ext4_get_blocks_wrap(handle, inode, block, 1,
1050 					&dummy, create, 1);
1051 	/*
1052 	 * ext4_get_blocks_handle() returns number of blocks
1053 	 * mapped. 0 in case of a HOLE.
1054 	 */
1055 	if (err > 0) {
1056 		if (err > 1)
1057 			WARN_ON(1);
1058 		err = 0;
1059 	}
1060 	*errp = err;
1061 	if (!err && buffer_mapped(&dummy)) {
1062 		struct buffer_head *bh;
1063 		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1064 		if (!bh) {
1065 			*errp = -EIO;
1066 			goto err;
1067 		}
1068 		if (buffer_new(&dummy)) {
1069 			J_ASSERT(create != 0);
1070 			J_ASSERT(handle != NULL);
1071 
1072 			/*
1073 			 * Now that we do not always journal data, we should
1074 			 * keep in mind whether this should always journal the
1075 			 * new buffer as metadata.  For now, regular file
1076 			 * writes use ext4_get_block instead, so it's not a
1077 			 * problem.
1078 			 */
1079 			lock_buffer(bh);
1080 			BUFFER_TRACE(bh, "call get_create_access");
1081 			fatal = ext4_journal_get_create_access(handle, bh);
1082 			if (!fatal && !buffer_uptodate(bh)) {
1083 				memset(bh->b_data,0,inode->i_sb->s_blocksize);
1084 				set_buffer_uptodate(bh);
1085 			}
1086 			unlock_buffer(bh);
1087 			BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
1088 			err = ext4_journal_dirty_metadata(handle, bh);
1089 			if (!fatal)
1090 				fatal = err;
1091 		} else {
1092 			BUFFER_TRACE(bh, "not a new buffer");
1093 		}
1094 		if (fatal) {
1095 			*errp = fatal;
1096 			brelse(bh);
1097 			bh = NULL;
1098 		}
1099 		return bh;
1100 	}
1101 err:
1102 	return NULL;
1103 }
1104 
1105 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1106 			       ext4_lblk_t block, int create, int *err)
1107 {
1108 	struct buffer_head * bh;
1109 
1110 	bh = ext4_getblk(handle, inode, block, create, err);
1111 	if (!bh)
1112 		return bh;
1113 	if (buffer_uptodate(bh))
1114 		return bh;
1115 	ll_rw_block(READ_META, 1, &bh);
1116 	wait_on_buffer(bh);
1117 	if (buffer_uptodate(bh))
1118 		return bh;
1119 	put_bh(bh);
1120 	*err = -EIO;
1121 	return NULL;
1122 }
1123 
1124 static int walk_page_buffers(	handle_t *handle,
1125 				struct buffer_head *head,
1126 				unsigned from,
1127 				unsigned to,
1128 				int *partial,
1129 				int (*fn)(	handle_t *handle,
1130 						struct buffer_head *bh))
1131 {
1132 	struct buffer_head *bh;
1133 	unsigned block_start, block_end;
1134 	unsigned blocksize = head->b_size;
1135 	int err, ret = 0;
1136 	struct buffer_head *next;
1137 
1138 	for (	bh = head, block_start = 0;
1139 		ret == 0 && (bh != head || !block_start);
1140 		block_start = block_end, bh = next)
1141 	{
1142 		next = bh->b_this_page;
1143 		block_end = block_start + blocksize;
1144 		if (block_end <= from || block_start >= to) {
1145 			if (partial && !buffer_uptodate(bh))
1146 				*partial = 1;
1147 			continue;
1148 		}
1149 		err = (*fn)(handle, bh);
1150 		if (!ret)
1151 			ret = err;
1152 	}
1153 	return ret;
1154 }
1155 
1156 /*
1157  * To preserve ordering, it is essential that the hole instantiation and
1158  * the data write be encapsulated in a single transaction.  We cannot
1159  * close off a transaction and start a new one between the ext4_get_block()
1160  * and the commit_write().  So doing the jbd2_journal_start at the start of
1161  * prepare_write() is the right place.
1162  *
1163  * Also, this function can nest inside ext4_writepage() ->
1164  * block_write_full_page(). In that case, we *know* that ext4_writepage()
1165  * has generated enough buffer credits to do the whole page.  So we won't
1166  * block on the journal in that case, which is good, because the caller may
1167  * be PF_MEMALLOC.
1168  *
1169  * By accident, ext4 can be reentered when a transaction is open via
1170  * quota file writes.  If we were to commit the transaction while thus
1171  * reentered, there can be a deadlock - we would be holding a quota
1172  * lock, and the commit would never complete if another thread had a
1173  * transaction open and was blocking on the quota lock - a ranking
1174  * violation.
1175  *
1176  * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1177  * will _not_ run commit under these circumstances because handle->h_ref
1178  * is elevated.  We'll still have enough credits for the tiny quotafile
1179  * write.
1180  */
1181 static int do_journal_get_write_access(handle_t *handle,
1182 					struct buffer_head *bh)
1183 {
1184 	if (!buffer_mapped(bh) || buffer_freed(bh))
1185 		return 0;
1186 	return ext4_journal_get_write_access(handle, bh);
1187 }
1188 
1189 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1190 				loff_t pos, unsigned len, unsigned flags,
1191 				struct page **pagep, void **fsdata)
1192 {
1193  	struct inode *inode = mapping->host;
1194 	int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
1195 	handle_t *handle;
1196 	int retries = 0;
1197  	struct page *page;
1198  	pgoff_t index;
1199  	unsigned from, to;
1200 
1201  	index = pos >> PAGE_CACHE_SHIFT;
1202  	from = pos & (PAGE_CACHE_SIZE - 1);
1203  	to = from + len;
1204 
1205 retry:
1206  	page = __grab_cache_page(mapping, index);
1207  	if (!page)
1208  		return -ENOMEM;
1209  	*pagep = page;
1210 
1211   	handle = ext4_journal_start(inode, needed_blocks);
1212   	if (IS_ERR(handle)) {
1213  		unlock_page(page);
1214  		page_cache_release(page);
1215   		ret = PTR_ERR(handle);
1216   		goto out;
1217 	}
1218 
1219 	ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1220 							ext4_get_block);
1221 
1222 	if (!ret && ext4_should_journal_data(inode)) {
1223 		ret = walk_page_buffers(handle, page_buffers(page),
1224 				from, to, NULL, do_journal_get_write_access);
1225 	}
1226 
1227 	if (ret) {
1228 		ext4_journal_stop(handle);
1229  		unlock_page(page);
1230  		page_cache_release(page);
1231 	}
1232 
1233 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1234 		goto retry;
1235 out:
1236 	return ret;
1237 }
1238 
1239 int ext4_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1240 {
1241 	int err = jbd2_journal_dirty_data(handle, bh);
1242 	if (err)
1243 		ext4_journal_abort_handle(__func__, __func__,
1244 						bh, handle, err);
1245 	return err;
1246 }
1247 
1248 /* For write_end() in data=journal mode */
1249 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1250 {
1251 	if (!buffer_mapped(bh) || buffer_freed(bh))
1252 		return 0;
1253 	set_buffer_uptodate(bh);
1254 	return ext4_journal_dirty_metadata(handle, bh);
1255 }
1256 
1257 /*
1258  * Generic write_end handler for ordered and writeback ext4 journal modes.
1259  * We can't use generic_write_end, because that unlocks the page and we need to
1260  * unlock the page after ext4_journal_stop, but ext4_journal_stop must run
1261  * after block_write_end.
1262  */
1263 static int ext4_generic_write_end(struct file *file,
1264 				struct address_space *mapping,
1265 				loff_t pos, unsigned len, unsigned copied,
1266 				struct page *page, void *fsdata)
1267 {
1268 	struct inode *inode = file->f_mapping->host;
1269 
1270 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1271 
1272 	if (pos+copied > inode->i_size) {
1273 		i_size_write(inode, pos+copied);
1274 		mark_inode_dirty(inode);
1275 	}
1276 
1277 	return copied;
1278 }
1279 
1280 /*
1281  * We need to pick up the new inode size which generic_commit_write gave us
1282  * `file' can be NULL - eg, when called from page_symlink().
1283  *
1284  * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1285  * buffers are managed internally.
1286  */
1287 static int ext4_ordered_write_end(struct file *file,
1288 				struct address_space *mapping,
1289 				loff_t pos, unsigned len, unsigned copied,
1290 				struct page *page, void *fsdata)
1291 {
1292 	handle_t *handle = ext4_journal_current_handle();
1293 	struct inode *inode = file->f_mapping->host;
1294 	unsigned from, to;
1295 	int ret = 0, ret2;
1296 
1297 	from = pos & (PAGE_CACHE_SIZE - 1);
1298 	to = from + len;
1299 
1300 	ret = walk_page_buffers(handle, page_buffers(page),
1301 		from, to, NULL, ext4_journal_dirty_data);
1302 
1303 	if (ret == 0) {
1304 		/*
1305 		 * generic_write_end() will run mark_inode_dirty() if i_size
1306 		 * changes.  So let's piggyback the i_disksize mark_inode_dirty
1307 		 * into that.
1308 		 */
1309 		loff_t new_i_size;
1310 
1311 		new_i_size = pos + copied;
1312 		if (new_i_size > EXT4_I(inode)->i_disksize)
1313 			EXT4_I(inode)->i_disksize = new_i_size;
1314 		ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1315 							page, fsdata);
1316 		copied = ret2;
1317 		if (ret2 < 0)
1318 			ret = ret2;
1319 	}
1320 	ret2 = ext4_journal_stop(handle);
1321 	if (!ret)
1322 		ret = ret2;
1323 	unlock_page(page);
1324 	page_cache_release(page);
1325 
1326 	return ret ? ret : copied;
1327 }
1328 
1329 static int ext4_writeback_write_end(struct file *file,
1330 				struct address_space *mapping,
1331 				loff_t pos, unsigned len, unsigned copied,
1332 				struct page *page, void *fsdata)
1333 {
1334 	handle_t *handle = ext4_journal_current_handle();
1335 	struct inode *inode = file->f_mapping->host;
1336 	int ret = 0, ret2;
1337 	loff_t new_i_size;
1338 
1339 	new_i_size = pos + copied;
1340 	if (new_i_size > EXT4_I(inode)->i_disksize)
1341 		EXT4_I(inode)->i_disksize = new_i_size;
1342 
1343 	ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1344 							page, fsdata);
1345 	copied = ret2;
1346 	if (ret2 < 0)
1347 		ret = ret2;
1348 
1349 	ret2 = ext4_journal_stop(handle);
1350 	if (!ret)
1351 		ret = ret2;
1352 	unlock_page(page);
1353 	page_cache_release(page);
1354 
1355 	return ret ? ret : copied;
1356 }
1357 
1358 static int ext4_journalled_write_end(struct file *file,
1359 				struct address_space *mapping,
1360 				loff_t pos, unsigned len, unsigned copied,
1361 				struct page *page, void *fsdata)
1362 {
1363 	handle_t *handle = ext4_journal_current_handle();
1364 	struct inode *inode = mapping->host;
1365 	int ret = 0, ret2;
1366 	int partial = 0;
1367 	unsigned from, to;
1368 
1369 	from = pos & (PAGE_CACHE_SIZE - 1);
1370 	to = from + len;
1371 
1372 	if (copied < len) {
1373 		if (!PageUptodate(page))
1374 			copied = 0;
1375 		page_zero_new_buffers(page, from+copied, to);
1376 	}
1377 
1378 	ret = walk_page_buffers(handle, page_buffers(page), from,
1379 				to, &partial, write_end_fn);
1380 	if (!partial)
1381 		SetPageUptodate(page);
1382 	if (pos+copied > inode->i_size)
1383 		i_size_write(inode, pos+copied);
1384 	EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1385 	if (inode->i_size > EXT4_I(inode)->i_disksize) {
1386 		EXT4_I(inode)->i_disksize = inode->i_size;
1387 		ret2 = ext4_mark_inode_dirty(handle, inode);
1388 		if (!ret)
1389 			ret = ret2;
1390 	}
1391 
1392 	ret2 = ext4_journal_stop(handle);
1393 	if (!ret)
1394 		ret = ret2;
1395 	unlock_page(page);
1396 	page_cache_release(page);
1397 
1398 	return ret ? ret : copied;
1399 }
1400 
1401 /*
1402  * bmap() is special.  It gets used by applications such as lilo and by
1403  * the swapper to find the on-disk block of a specific piece of data.
1404  *
1405  * Naturally, this is dangerous if the block concerned is still in the
1406  * journal.  If somebody makes a swapfile on an ext4 data-journaling
1407  * filesystem and enables swap, then they may get a nasty shock when the
1408  * data getting swapped to that swapfile suddenly gets overwritten by
1409  * the original zero's written out previously to the journal and
1410  * awaiting writeback in the kernel's buffer cache.
1411  *
1412  * So, if we see any bmap calls here on a modified, data-journaled file,
1413  * take extra steps to flush any blocks which might be in the cache.
1414  */
1415 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
1416 {
1417 	struct inode *inode = mapping->host;
1418 	journal_t *journal;
1419 	int err;
1420 
1421 	if (EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
1422 		/*
1423 		 * This is a REALLY heavyweight approach, but the use of
1424 		 * bmap on dirty files is expected to be extremely rare:
1425 		 * only if we run lilo or swapon on a freshly made file
1426 		 * do we expect this to happen.
1427 		 *
1428 		 * (bmap requires CAP_SYS_RAWIO so this does not
1429 		 * represent an unprivileged user DOS attack --- we'd be
1430 		 * in trouble if mortal users could trigger this path at
1431 		 * will.)
1432 		 *
1433 		 * NB. EXT4_STATE_JDATA is not set on files other than
1434 		 * regular files.  If somebody wants to bmap a directory
1435 		 * or symlink and gets confused because the buffer
1436 		 * hasn't yet been flushed to disk, they deserve
1437 		 * everything they get.
1438 		 */
1439 
1440 		EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
1441 		journal = EXT4_JOURNAL(inode);
1442 		jbd2_journal_lock_updates(journal);
1443 		err = jbd2_journal_flush(journal);
1444 		jbd2_journal_unlock_updates(journal);
1445 
1446 		if (err)
1447 			return 0;
1448 	}
1449 
1450 	return generic_block_bmap(mapping,block,ext4_get_block);
1451 }
1452 
1453 static int bget_one(handle_t *handle, struct buffer_head *bh)
1454 {
1455 	get_bh(bh);
1456 	return 0;
1457 }
1458 
1459 static int bput_one(handle_t *handle, struct buffer_head *bh)
1460 {
1461 	put_bh(bh);
1462 	return 0;
1463 }
1464 
1465 static int jbd2_journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1466 {
1467 	if (buffer_mapped(bh))
1468 		return ext4_journal_dirty_data(handle, bh);
1469 	return 0;
1470 }
1471 
1472 /*
1473  * Note that we always start a transaction even if we're not journalling
1474  * data.  This is to preserve ordering: any hole instantiation within
1475  * __block_write_full_page -> ext4_get_block() should be journalled
1476  * along with the data so we don't crash and then get metadata which
1477  * refers to old data.
1478  *
1479  * In all journalling modes block_write_full_page() will start the I/O.
1480  *
1481  * Problem:
1482  *
1483  *	ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1484  *		ext4_writepage()
1485  *
1486  * Similar for:
1487  *
1488  *	ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1489  *
1490  * Same applies to ext4_get_block().  We will deadlock on various things like
1491  * lock_journal and i_data_sem
1492  *
1493  * Setting PF_MEMALLOC here doesn't work - too many internal memory
1494  * allocations fail.
1495  *
1496  * 16May01: If we're reentered then journal_current_handle() will be
1497  *	    non-zero. We simply *return*.
1498  *
1499  * 1 July 2001: @@@ FIXME:
1500  *   In journalled data mode, a data buffer may be metadata against the
1501  *   current transaction.  But the same file is part of a shared mapping
1502  *   and someone does a writepage() on it.
1503  *
1504  *   We will move the buffer onto the async_data list, but *after* it has
1505  *   been dirtied. So there's a small window where we have dirty data on
1506  *   BJ_Metadata.
1507  *
1508  *   Note that this only applies to the last partial page in the file.  The
1509  *   bit which block_write_full_page() uses prepare/commit for.  (That's
1510  *   broken code anyway: it's wrong for msync()).
1511  *
1512  *   It's a rare case: affects the final partial page, for journalled data
1513  *   where the file is subject to bith write() and writepage() in the same
1514  *   transction.  To fix it we'll need a custom block_write_full_page().
1515  *   We'll probably need that anyway for journalling writepage() output.
1516  *
1517  * We don't honour synchronous mounts for writepage().  That would be
1518  * disastrous.  Any write() or metadata operation will sync the fs for
1519  * us.
1520  *
1521  * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
1522  * we don't need to open a transaction here.
1523  */
1524 static int ext4_ordered_writepage(struct page *page,
1525 				struct writeback_control *wbc)
1526 {
1527 	struct inode *inode = page->mapping->host;
1528 	struct buffer_head *page_bufs;
1529 	handle_t *handle = NULL;
1530 	int ret = 0;
1531 	int err;
1532 
1533 	J_ASSERT(PageLocked(page));
1534 
1535 	/*
1536 	 * We give up here if we're reentered, because it might be for a
1537 	 * different filesystem.
1538 	 */
1539 	if (ext4_journal_current_handle())
1540 		goto out_fail;
1541 
1542 	handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1543 
1544 	if (IS_ERR(handle)) {
1545 		ret = PTR_ERR(handle);
1546 		goto out_fail;
1547 	}
1548 
1549 	if (!page_has_buffers(page)) {
1550 		create_empty_buffers(page, inode->i_sb->s_blocksize,
1551 				(1 << BH_Dirty)|(1 << BH_Uptodate));
1552 	}
1553 	page_bufs = page_buffers(page);
1554 	walk_page_buffers(handle, page_bufs, 0,
1555 			PAGE_CACHE_SIZE, NULL, bget_one);
1556 
1557 	ret = block_write_full_page(page, ext4_get_block, wbc);
1558 
1559 	/*
1560 	 * The page can become unlocked at any point now, and
1561 	 * truncate can then come in and change things.  So we
1562 	 * can't touch *page from now on.  But *page_bufs is
1563 	 * safe due to elevated refcount.
1564 	 */
1565 
1566 	/*
1567 	 * And attach them to the current transaction.  But only if
1568 	 * block_write_full_page() succeeded.  Otherwise they are unmapped,
1569 	 * and generally junk.
1570 	 */
1571 	if (ret == 0) {
1572 		err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1573 					NULL, jbd2_journal_dirty_data_fn);
1574 		if (!ret)
1575 			ret = err;
1576 	}
1577 	walk_page_buffers(handle, page_bufs, 0,
1578 			PAGE_CACHE_SIZE, NULL, bput_one);
1579 	err = ext4_journal_stop(handle);
1580 	if (!ret)
1581 		ret = err;
1582 	return ret;
1583 
1584 out_fail:
1585 	redirty_page_for_writepage(wbc, page);
1586 	unlock_page(page);
1587 	return ret;
1588 }
1589 
1590 static int ext4_writeback_writepage(struct page *page,
1591 				struct writeback_control *wbc)
1592 {
1593 	struct inode *inode = page->mapping->host;
1594 	handle_t *handle = NULL;
1595 	int ret = 0;
1596 	int err;
1597 
1598 	if (ext4_journal_current_handle())
1599 		goto out_fail;
1600 
1601 	handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1602 	if (IS_ERR(handle)) {
1603 		ret = PTR_ERR(handle);
1604 		goto out_fail;
1605 	}
1606 
1607 	if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
1608 		ret = nobh_writepage(page, ext4_get_block, wbc);
1609 	else
1610 		ret = block_write_full_page(page, ext4_get_block, wbc);
1611 
1612 	err = ext4_journal_stop(handle);
1613 	if (!ret)
1614 		ret = err;
1615 	return ret;
1616 
1617 out_fail:
1618 	redirty_page_for_writepage(wbc, page);
1619 	unlock_page(page);
1620 	return ret;
1621 }
1622 
1623 static int ext4_journalled_writepage(struct page *page,
1624 				struct writeback_control *wbc)
1625 {
1626 	struct inode *inode = page->mapping->host;
1627 	handle_t *handle = NULL;
1628 	int ret = 0;
1629 	int err;
1630 
1631 	if (ext4_journal_current_handle())
1632 		goto no_write;
1633 
1634 	handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1635 	if (IS_ERR(handle)) {
1636 		ret = PTR_ERR(handle);
1637 		goto no_write;
1638 	}
1639 
1640 	if (!page_has_buffers(page) || PageChecked(page)) {
1641 		/*
1642 		 * It's mmapped pagecache.  Add buffers and journal it.  There
1643 		 * doesn't seem much point in redirtying the page here.
1644 		 */
1645 		ClearPageChecked(page);
1646 		ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
1647 					ext4_get_block);
1648 		if (ret != 0) {
1649 			ext4_journal_stop(handle);
1650 			goto out_unlock;
1651 		}
1652 		ret = walk_page_buffers(handle, page_buffers(page), 0,
1653 			PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1654 
1655 		err = walk_page_buffers(handle, page_buffers(page), 0,
1656 				PAGE_CACHE_SIZE, NULL, write_end_fn);
1657 		if (ret == 0)
1658 			ret = err;
1659 		EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1660 		unlock_page(page);
1661 	} else {
1662 		/*
1663 		 * It may be a page full of checkpoint-mode buffers.  We don't
1664 		 * really know unless we go poke around in the buffer_heads.
1665 		 * But block_write_full_page will do the right thing.
1666 		 */
1667 		ret = block_write_full_page(page, ext4_get_block, wbc);
1668 	}
1669 	err = ext4_journal_stop(handle);
1670 	if (!ret)
1671 		ret = err;
1672 out:
1673 	return ret;
1674 
1675 no_write:
1676 	redirty_page_for_writepage(wbc, page);
1677 out_unlock:
1678 	unlock_page(page);
1679 	goto out;
1680 }
1681 
1682 static int ext4_readpage(struct file *file, struct page *page)
1683 {
1684 	return mpage_readpage(page, ext4_get_block);
1685 }
1686 
1687 static int
1688 ext4_readpages(struct file *file, struct address_space *mapping,
1689 		struct list_head *pages, unsigned nr_pages)
1690 {
1691 	return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
1692 }
1693 
1694 static void ext4_invalidatepage(struct page *page, unsigned long offset)
1695 {
1696 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
1697 
1698 	/*
1699 	 * If it's a full truncate we just forget about the pending dirtying
1700 	 */
1701 	if (offset == 0)
1702 		ClearPageChecked(page);
1703 
1704 	jbd2_journal_invalidatepage(journal, page, offset);
1705 }
1706 
1707 static int ext4_releasepage(struct page *page, gfp_t wait)
1708 {
1709 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
1710 
1711 	WARN_ON(PageChecked(page));
1712 	if (!page_has_buffers(page))
1713 		return 0;
1714 	return jbd2_journal_try_to_free_buffers(journal, page, wait);
1715 }
1716 
1717 /*
1718  * If the O_DIRECT write will extend the file then add this inode to the
1719  * orphan list.  So recovery will truncate it back to the original size
1720  * if the machine crashes during the write.
1721  *
1722  * If the O_DIRECT write is intantiating holes inside i_size and the machine
1723  * crashes then stale disk data _may_ be exposed inside the file. But current
1724  * VFS code falls back into buffered path in that case so we are safe.
1725  */
1726 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
1727 			const struct iovec *iov, loff_t offset,
1728 			unsigned long nr_segs)
1729 {
1730 	struct file *file = iocb->ki_filp;
1731 	struct inode *inode = file->f_mapping->host;
1732 	struct ext4_inode_info *ei = EXT4_I(inode);
1733 	handle_t *handle;
1734 	ssize_t ret;
1735 	int orphan = 0;
1736 	size_t count = iov_length(iov, nr_segs);
1737 
1738 	if (rw == WRITE) {
1739 		loff_t final_size = offset + count;
1740 
1741 		if (final_size > inode->i_size) {
1742 			/* Credits for sb + inode write */
1743 			handle = ext4_journal_start(inode, 2);
1744 			if (IS_ERR(handle)) {
1745 				ret = PTR_ERR(handle);
1746 				goto out;
1747 			}
1748 			ret = ext4_orphan_add(handle, inode);
1749 			if (ret) {
1750 				ext4_journal_stop(handle);
1751 				goto out;
1752 			}
1753 			orphan = 1;
1754 			ei->i_disksize = inode->i_size;
1755 			ext4_journal_stop(handle);
1756 		}
1757 	}
1758 
1759 	ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1760 				 offset, nr_segs,
1761 				 ext4_get_block, NULL);
1762 
1763 	if (orphan) {
1764 		int err;
1765 
1766 		/* Credits for sb + inode write */
1767 		handle = ext4_journal_start(inode, 2);
1768 		if (IS_ERR(handle)) {
1769 			/* This is really bad luck. We've written the data
1770 			 * but cannot extend i_size. Bail out and pretend
1771 			 * the write failed... */
1772 			ret = PTR_ERR(handle);
1773 			goto out;
1774 		}
1775 		if (inode->i_nlink)
1776 			ext4_orphan_del(handle, inode);
1777 		if (ret > 0) {
1778 			loff_t end = offset + ret;
1779 			if (end > inode->i_size) {
1780 				ei->i_disksize = end;
1781 				i_size_write(inode, end);
1782 				/*
1783 				 * We're going to return a positive `ret'
1784 				 * here due to non-zero-length I/O, so there's
1785 				 * no way of reporting error returns from
1786 				 * ext4_mark_inode_dirty() to userspace.  So
1787 				 * ignore it.
1788 				 */
1789 				ext4_mark_inode_dirty(handle, inode);
1790 			}
1791 		}
1792 		err = ext4_journal_stop(handle);
1793 		if (ret == 0)
1794 			ret = err;
1795 	}
1796 out:
1797 	return ret;
1798 }
1799 
1800 /*
1801  * Pages can be marked dirty completely asynchronously from ext4's journalling
1802  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
1803  * much here because ->set_page_dirty is called under VFS locks.  The page is
1804  * not necessarily locked.
1805  *
1806  * We cannot just dirty the page and leave attached buffers clean, because the
1807  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
1808  * or jbddirty because all the journalling code will explode.
1809  *
1810  * So what we do is to mark the page "pending dirty" and next time writepage
1811  * is called, propagate that into the buffers appropriately.
1812  */
1813 static int ext4_journalled_set_page_dirty(struct page *page)
1814 {
1815 	SetPageChecked(page);
1816 	return __set_page_dirty_nobuffers(page);
1817 }
1818 
1819 static const struct address_space_operations ext4_ordered_aops = {
1820 	.readpage	= ext4_readpage,
1821 	.readpages	= ext4_readpages,
1822 	.writepage	= ext4_ordered_writepage,
1823 	.sync_page	= block_sync_page,
1824 	.write_begin	= ext4_write_begin,
1825 	.write_end	= ext4_ordered_write_end,
1826 	.bmap		= ext4_bmap,
1827 	.invalidatepage	= ext4_invalidatepage,
1828 	.releasepage	= ext4_releasepage,
1829 	.direct_IO	= ext4_direct_IO,
1830 	.migratepage	= buffer_migrate_page,
1831 };
1832 
1833 static const struct address_space_operations ext4_writeback_aops = {
1834 	.readpage	= ext4_readpage,
1835 	.readpages	= ext4_readpages,
1836 	.writepage	= ext4_writeback_writepage,
1837 	.sync_page	= block_sync_page,
1838 	.write_begin	= ext4_write_begin,
1839 	.write_end	= ext4_writeback_write_end,
1840 	.bmap		= ext4_bmap,
1841 	.invalidatepage	= ext4_invalidatepage,
1842 	.releasepage	= ext4_releasepage,
1843 	.direct_IO	= ext4_direct_IO,
1844 	.migratepage	= buffer_migrate_page,
1845 };
1846 
1847 static const struct address_space_operations ext4_journalled_aops = {
1848 	.readpage	= ext4_readpage,
1849 	.readpages	= ext4_readpages,
1850 	.writepage	= ext4_journalled_writepage,
1851 	.sync_page	= block_sync_page,
1852 	.write_begin	= ext4_write_begin,
1853 	.write_end	= ext4_journalled_write_end,
1854 	.set_page_dirty	= ext4_journalled_set_page_dirty,
1855 	.bmap		= ext4_bmap,
1856 	.invalidatepage	= ext4_invalidatepage,
1857 	.releasepage	= ext4_releasepage,
1858 };
1859 
1860 void ext4_set_aops(struct inode *inode)
1861 {
1862 	if (ext4_should_order_data(inode))
1863 		inode->i_mapping->a_ops = &ext4_ordered_aops;
1864 	else if (ext4_should_writeback_data(inode))
1865 		inode->i_mapping->a_ops = &ext4_writeback_aops;
1866 	else
1867 		inode->i_mapping->a_ops = &ext4_journalled_aops;
1868 }
1869 
1870 /*
1871  * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
1872  * up to the end of the block which corresponds to `from'.
1873  * This required during truncate. We need to physically zero the tail end
1874  * of that block so it doesn't yield old data if the file is later grown.
1875  */
1876 int ext4_block_truncate_page(handle_t *handle, struct page *page,
1877 		struct address_space *mapping, loff_t from)
1878 {
1879 	ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
1880 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
1881 	unsigned blocksize, length, pos;
1882 	ext4_lblk_t iblock;
1883 	struct inode *inode = mapping->host;
1884 	struct buffer_head *bh;
1885 	int err = 0;
1886 
1887 	blocksize = inode->i_sb->s_blocksize;
1888 	length = blocksize - (offset & (blocksize - 1));
1889 	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1890 
1891 	/*
1892 	 * For "nobh" option,  we can only work if we don't need to
1893 	 * read-in the page - otherwise we create buffers to do the IO.
1894 	 */
1895 	if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
1896 	     ext4_should_writeback_data(inode) && PageUptodate(page)) {
1897 		zero_user(page, offset, length);
1898 		set_page_dirty(page);
1899 		goto unlock;
1900 	}
1901 
1902 	if (!page_has_buffers(page))
1903 		create_empty_buffers(page, blocksize, 0);
1904 
1905 	/* Find the buffer that contains "offset" */
1906 	bh = page_buffers(page);
1907 	pos = blocksize;
1908 	while (offset >= pos) {
1909 		bh = bh->b_this_page;
1910 		iblock++;
1911 		pos += blocksize;
1912 	}
1913 
1914 	err = 0;
1915 	if (buffer_freed(bh)) {
1916 		BUFFER_TRACE(bh, "freed: skip");
1917 		goto unlock;
1918 	}
1919 
1920 	if (!buffer_mapped(bh)) {
1921 		BUFFER_TRACE(bh, "unmapped");
1922 		ext4_get_block(inode, iblock, bh, 0);
1923 		/* unmapped? It's a hole - nothing to do */
1924 		if (!buffer_mapped(bh)) {
1925 			BUFFER_TRACE(bh, "still unmapped");
1926 			goto unlock;
1927 		}
1928 	}
1929 
1930 	/* Ok, it's mapped. Make sure it's up-to-date */
1931 	if (PageUptodate(page))
1932 		set_buffer_uptodate(bh);
1933 
1934 	if (!buffer_uptodate(bh)) {
1935 		err = -EIO;
1936 		ll_rw_block(READ, 1, &bh);
1937 		wait_on_buffer(bh);
1938 		/* Uhhuh. Read error. Complain and punt. */
1939 		if (!buffer_uptodate(bh))
1940 			goto unlock;
1941 	}
1942 
1943 	if (ext4_should_journal_data(inode)) {
1944 		BUFFER_TRACE(bh, "get write access");
1945 		err = ext4_journal_get_write_access(handle, bh);
1946 		if (err)
1947 			goto unlock;
1948 	}
1949 
1950 	zero_user(page, offset, length);
1951 
1952 	BUFFER_TRACE(bh, "zeroed end of block");
1953 
1954 	err = 0;
1955 	if (ext4_should_journal_data(inode)) {
1956 		err = ext4_journal_dirty_metadata(handle, bh);
1957 	} else {
1958 		if (ext4_should_order_data(inode))
1959 			err = ext4_journal_dirty_data(handle, bh);
1960 		mark_buffer_dirty(bh);
1961 	}
1962 
1963 unlock:
1964 	unlock_page(page);
1965 	page_cache_release(page);
1966 	return err;
1967 }
1968 
1969 /*
1970  * Probably it should be a library function... search for first non-zero word
1971  * or memcmp with zero_page, whatever is better for particular architecture.
1972  * Linus?
1973  */
1974 static inline int all_zeroes(__le32 *p, __le32 *q)
1975 {
1976 	while (p < q)
1977 		if (*p++)
1978 			return 0;
1979 	return 1;
1980 }
1981 
1982 /**
1983  *	ext4_find_shared - find the indirect blocks for partial truncation.
1984  *	@inode:	  inode in question
1985  *	@depth:	  depth of the affected branch
1986  *	@offsets: offsets of pointers in that branch (see ext4_block_to_path)
1987  *	@chain:	  place to store the pointers to partial indirect blocks
1988  *	@top:	  place to the (detached) top of branch
1989  *
1990  *	This is a helper function used by ext4_truncate().
1991  *
1992  *	When we do truncate() we may have to clean the ends of several
1993  *	indirect blocks but leave the blocks themselves alive. Block is
1994  *	partially truncated if some data below the new i_size is refered
1995  *	from it (and it is on the path to the first completely truncated
1996  *	data block, indeed).  We have to free the top of that path along
1997  *	with everything to the right of the path. Since no allocation
1998  *	past the truncation point is possible until ext4_truncate()
1999  *	finishes, we may safely do the latter, but top of branch may
2000  *	require special attention - pageout below the truncation point
2001  *	might try to populate it.
2002  *
2003  *	We atomically detach the top of branch from the tree, store the
2004  *	block number of its root in *@top, pointers to buffer_heads of
2005  *	partially truncated blocks - in @chain[].bh and pointers to
2006  *	their last elements that should not be removed - in
2007  *	@chain[].p. Return value is the pointer to last filled element
2008  *	of @chain.
2009  *
2010  *	The work left to caller to do the actual freeing of subtrees:
2011  *		a) free the subtree starting from *@top
2012  *		b) free the subtrees whose roots are stored in
2013  *			(@chain[i].p+1 .. end of @chain[i].bh->b_data)
2014  *		c) free the subtrees growing from the inode past the @chain[0].
2015  *			(no partially truncated stuff there).  */
2016 
2017 static Indirect *ext4_find_shared(struct inode *inode, int depth,
2018 			ext4_lblk_t offsets[4], Indirect chain[4], __le32 *top)
2019 {
2020 	Indirect *partial, *p;
2021 	int k, err;
2022 
2023 	*top = 0;
2024 	/* Make k index the deepest non-null offest + 1 */
2025 	for (k = depth; k > 1 && !offsets[k-1]; k--)
2026 		;
2027 	partial = ext4_get_branch(inode, k, offsets, chain, &err);
2028 	/* Writer: pointers */
2029 	if (!partial)
2030 		partial = chain + k-1;
2031 	/*
2032 	 * If the branch acquired continuation since we've looked at it -
2033 	 * fine, it should all survive and (new) top doesn't belong to us.
2034 	 */
2035 	if (!partial->key && *partial->p)
2036 		/* Writer: end */
2037 		goto no_top;
2038 	for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
2039 		;
2040 	/*
2041 	 * OK, we've found the last block that must survive. The rest of our
2042 	 * branch should be detached before unlocking. However, if that rest
2043 	 * of branch is all ours and does not grow immediately from the inode
2044 	 * it's easier to cheat and just decrement partial->p.
2045 	 */
2046 	if (p == chain + k - 1 && p > chain) {
2047 		p->p--;
2048 	} else {
2049 		*top = *p->p;
2050 		/* Nope, don't do this in ext4.  Must leave the tree intact */
2051 #if 0
2052 		*p->p = 0;
2053 #endif
2054 	}
2055 	/* Writer: end */
2056 
2057 	while(partial > p) {
2058 		brelse(partial->bh);
2059 		partial--;
2060 	}
2061 no_top:
2062 	return partial;
2063 }
2064 
2065 /*
2066  * Zero a number of block pointers in either an inode or an indirect block.
2067  * If we restart the transaction we must again get write access to the
2068  * indirect block for further modification.
2069  *
2070  * We release `count' blocks on disk, but (last - first) may be greater
2071  * than `count' because there can be holes in there.
2072  */
2073 static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
2074 		struct buffer_head *bh, ext4_fsblk_t block_to_free,
2075 		unsigned long count, __le32 *first, __le32 *last)
2076 {
2077 	__le32 *p;
2078 	if (try_to_extend_transaction(handle, inode)) {
2079 		if (bh) {
2080 			BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
2081 			ext4_journal_dirty_metadata(handle, bh);
2082 		}
2083 		ext4_mark_inode_dirty(handle, inode);
2084 		ext4_journal_test_restart(handle, inode);
2085 		if (bh) {
2086 			BUFFER_TRACE(bh, "retaking write access");
2087 			ext4_journal_get_write_access(handle, bh);
2088 		}
2089 	}
2090 
2091 	/*
2092 	 * Any buffers which are on the journal will be in memory. We find
2093 	 * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget()
2094 	 * on them.  We've already detached each block from the file, so
2095 	 * bforget() in jbd2_journal_forget() should be safe.
2096 	 *
2097 	 * AKPM: turn on bforget in jbd2_journal_forget()!!!
2098 	 */
2099 	for (p = first; p < last; p++) {
2100 		u32 nr = le32_to_cpu(*p);
2101 		if (nr) {
2102 			struct buffer_head *tbh;
2103 
2104 			*p = 0;
2105 			tbh = sb_find_get_block(inode->i_sb, nr);
2106 			ext4_forget(handle, 0, inode, tbh, nr);
2107 		}
2108 	}
2109 
2110 	ext4_free_blocks(handle, inode, block_to_free, count, 0);
2111 }
2112 
2113 /**
2114  * ext4_free_data - free a list of data blocks
2115  * @handle:	handle for this transaction
2116  * @inode:	inode we are dealing with
2117  * @this_bh:	indirect buffer_head which contains *@first and *@last
2118  * @first:	array of block numbers
2119  * @last:	points immediately past the end of array
2120  *
2121  * We are freeing all blocks refered from that array (numbers are stored as
2122  * little-endian 32-bit) and updating @inode->i_blocks appropriately.
2123  *
2124  * We accumulate contiguous runs of blocks to free.  Conveniently, if these
2125  * blocks are contiguous then releasing them at one time will only affect one
2126  * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
2127  * actually use a lot of journal space.
2128  *
2129  * @this_bh will be %NULL if @first and @last point into the inode's direct
2130  * block pointers.
2131  */
2132 static void ext4_free_data(handle_t *handle, struct inode *inode,
2133 			   struct buffer_head *this_bh,
2134 			   __le32 *first, __le32 *last)
2135 {
2136 	ext4_fsblk_t block_to_free = 0;    /* Starting block # of a run */
2137 	unsigned long count = 0;	    /* Number of blocks in the run */
2138 	__le32 *block_to_free_p = NULL;	    /* Pointer into inode/ind
2139 					       corresponding to
2140 					       block_to_free */
2141 	ext4_fsblk_t nr;		    /* Current block # */
2142 	__le32 *p;			    /* Pointer into inode/ind
2143 					       for current block */
2144 	int err;
2145 
2146 	if (this_bh) {				/* For indirect block */
2147 		BUFFER_TRACE(this_bh, "get_write_access");
2148 		err = ext4_journal_get_write_access(handle, this_bh);
2149 		/* Important: if we can't update the indirect pointers
2150 		 * to the blocks, we can't free them. */
2151 		if (err)
2152 			return;
2153 	}
2154 
2155 	for (p = first; p < last; p++) {
2156 		nr = le32_to_cpu(*p);
2157 		if (nr) {
2158 			/* accumulate blocks to free if they're contiguous */
2159 			if (count == 0) {
2160 				block_to_free = nr;
2161 				block_to_free_p = p;
2162 				count = 1;
2163 			} else if (nr == block_to_free + count) {
2164 				count++;
2165 			} else {
2166 				ext4_clear_blocks(handle, inode, this_bh,
2167 						  block_to_free,
2168 						  count, block_to_free_p, p);
2169 				block_to_free = nr;
2170 				block_to_free_p = p;
2171 				count = 1;
2172 			}
2173 		}
2174 	}
2175 
2176 	if (count > 0)
2177 		ext4_clear_blocks(handle, inode, this_bh, block_to_free,
2178 				  count, block_to_free_p, p);
2179 
2180 	if (this_bh) {
2181 		BUFFER_TRACE(this_bh, "call ext4_journal_dirty_metadata");
2182 		ext4_journal_dirty_metadata(handle, this_bh);
2183 	}
2184 }
2185 
2186 /**
2187  *	ext4_free_branches - free an array of branches
2188  *	@handle: JBD handle for this transaction
2189  *	@inode:	inode we are dealing with
2190  *	@parent_bh: the buffer_head which contains *@first and *@last
2191  *	@first:	array of block numbers
2192  *	@last:	pointer immediately past the end of array
2193  *	@depth:	depth of the branches to free
2194  *
2195  *	We are freeing all blocks refered from these branches (numbers are
2196  *	stored as little-endian 32-bit) and updating @inode->i_blocks
2197  *	appropriately.
2198  */
2199 static void ext4_free_branches(handle_t *handle, struct inode *inode,
2200 			       struct buffer_head *parent_bh,
2201 			       __le32 *first, __le32 *last, int depth)
2202 {
2203 	ext4_fsblk_t nr;
2204 	__le32 *p;
2205 
2206 	if (is_handle_aborted(handle))
2207 		return;
2208 
2209 	if (depth--) {
2210 		struct buffer_head *bh;
2211 		int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
2212 		p = last;
2213 		while (--p >= first) {
2214 			nr = le32_to_cpu(*p);
2215 			if (!nr)
2216 				continue;		/* A hole */
2217 
2218 			/* Go read the buffer for the next level down */
2219 			bh = sb_bread(inode->i_sb, nr);
2220 
2221 			/*
2222 			 * A read failure? Report error and clear slot
2223 			 * (should be rare).
2224 			 */
2225 			if (!bh) {
2226 				ext4_error(inode->i_sb, "ext4_free_branches",
2227 					   "Read failure, inode=%lu, block=%llu",
2228 					   inode->i_ino, nr);
2229 				continue;
2230 			}
2231 
2232 			/* This zaps the entire block.  Bottom up. */
2233 			BUFFER_TRACE(bh, "free child branches");
2234 			ext4_free_branches(handle, inode, bh,
2235 					   (__le32*)bh->b_data,
2236 					   (__le32*)bh->b_data + addr_per_block,
2237 					   depth);
2238 
2239 			/*
2240 			 * We've probably journalled the indirect block several
2241 			 * times during the truncate.  But it's no longer
2242 			 * needed and we now drop it from the transaction via
2243 			 * jbd2_journal_revoke().
2244 			 *
2245 			 * That's easy if it's exclusively part of this
2246 			 * transaction.  But if it's part of the committing
2247 			 * transaction then jbd2_journal_forget() will simply
2248 			 * brelse() it.  That means that if the underlying
2249 			 * block is reallocated in ext4_get_block(),
2250 			 * unmap_underlying_metadata() will find this block
2251 			 * and will try to get rid of it.  damn, damn.
2252 			 *
2253 			 * If this block has already been committed to the
2254 			 * journal, a revoke record will be written.  And
2255 			 * revoke records must be emitted *before* clearing
2256 			 * this block's bit in the bitmaps.
2257 			 */
2258 			ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
2259 
2260 			/*
2261 			 * Everything below this this pointer has been
2262 			 * released.  Now let this top-of-subtree go.
2263 			 *
2264 			 * We want the freeing of this indirect block to be
2265 			 * atomic in the journal with the updating of the
2266 			 * bitmap block which owns it.  So make some room in
2267 			 * the journal.
2268 			 *
2269 			 * We zero the parent pointer *after* freeing its
2270 			 * pointee in the bitmaps, so if extend_transaction()
2271 			 * for some reason fails to put the bitmap changes and
2272 			 * the release into the same transaction, recovery
2273 			 * will merely complain about releasing a free block,
2274 			 * rather than leaking blocks.
2275 			 */
2276 			if (is_handle_aborted(handle))
2277 				return;
2278 			if (try_to_extend_transaction(handle, inode)) {
2279 				ext4_mark_inode_dirty(handle, inode);
2280 				ext4_journal_test_restart(handle, inode);
2281 			}
2282 
2283 			ext4_free_blocks(handle, inode, nr, 1, 1);
2284 
2285 			if (parent_bh) {
2286 				/*
2287 				 * The block which we have just freed is
2288 				 * pointed to by an indirect block: journal it
2289 				 */
2290 				BUFFER_TRACE(parent_bh, "get_write_access");
2291 				if (!ext4_journal_get_write_access(handle,
2292 								   parent_bh)){
2293 					*p = 0;
2294 					BUFFER_TRACE(parent_bh,
2295 					"call ext4_journal_dirty_metadata");
2296 					ext4_journal_dirty_metadata(handle,
2297 								    parent_bh);
2298 				}
2299 			}
2300 		}
2301 	} else {
2302 		/* We have reached the bottom of the tree. */
2303 		BUFFER_TRACE(parent_bh, "free data blocks");
2304 		ext4_free_data(handle, inode, parent_bh, first, last);
2305 	}
2306 }
2307 
2308 /*
2309  * ext4_truncate()
2310  *
2311  * We block out ext4_get_block() block instantiations across the entire
2312  * transaction, and VFS/VM ensures that ext4_truncate() cannot run
2313  * simultaneously on behalf of the same inode.
2314  *
2315  * As we work through the truncate and commmit bits of it to the journal there
2316  * is one core, guiding principle: the file's tree must always be consistent on
2317  * disk.  We must be able to restart the truncate after a crash.
2318  *
2319  * The file's tree may be transiently inconsistent in memory (although it
2320  * probably isn't), but whenever we close off and commit a journal transaction,
2321  * the contents of (the filesystem + the journal) must be consistent and
2322  * restartable.  It's pretty simple, really: bottom up, right to left (although
2323  * left-to-right works OK too).
2324  *
2325  * Note that at recovery time, journal replay occurs *before* the restart of
2326  * truncate against the orphan inode list.
2327  *
2328  * The committed inode has the new, desired i_size (which is the same as
2329  * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
2330  * that this inode's truncate did not complete and it will again call
2331  * ext4_truncate() to have another go.  So there will be instantiated blocks
2332  * to the right of the truncation point in a crashed ext4 filesystem.  But
2333  * that's fine - as long as they are linked from the inode, the post-crash
2334  * ext4_truncate() run will find them and release them.
2335  */
2336 void ext4_truncate(struct inode *inode)
2337 {
2338 	handle_t *handle;
2339 	struct ext4_inode_info *ei = EXT4_I(inode);
2340 	__le32 *i_data = ei->i_data;
2341 	int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
2342 	struct address_space *mapping = inode->i_mapping;
2343 	ext4_lblk_t offsets[4];
2344 	Indirect chain[4];
2345 	Indirect *partial;
2346 	__le32 nr = 0;
2347 	int n;
2348 	ext4_lblk_t last_block;
2349 	unsigned blocksize = inode->i_sb->s_blocksize;
2350 	struct page *page;
2351 
2352 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
2353 	    S_ISLNK(inode->i_mode)))
2354 		return;
2355 	if (ext4_inode_is_fast_symlink(inode))
2356 		return;
2357 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2358 		return;
2359 
2360 	/*
2361 	 * We have to lock the EOF page here, because lock_page() nests
2362 	 * outside jbd2_journal_start().
2363 	 */
2364 	if ((inode->i_size & (blocksize - 1)) == 0) {
2365 		/* Block boundary? Nothing to do */
2366 		page = NULL;
2367 	} else {
2368 		page = grab_cache_page(mapping,
2369 				inode->i_size >> PAGE_CACHE_SHIFT);
2370 		if (!page)
2371 			return;
2372 	}
2373 
2374 	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
2375 		ext4_ext_truncate(inode, page);
2376 		return;
2377 	}
2378 
2379 	handle = start_transaction(inode);
2380 	if (IS_ERR(handle)) {
2381 		if (page) {
2382 			clear_highpage(page);
2383 			flush_dcache_page(page);
2384 			unlock_page(page);
2385 			page_cache_release(page);
2386 		}
2387 		return;		/* AKPM: return what? */
2388 	}
2389 
2390 	last_block = (inode->i_size + blocksize-1)
2391 					>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
2392 
2393 	if (page)
2394 		ext4_block_truncate_page(handle, page, mapping, inode->i_size);
2395 
2396 	n = ext4_block_to_path(inode, last_block, offsets, NULL);
2397 	if (n == 0)
2398 		goto out_stop;	/* error */
2399 
2400 	/*
2401 	 * OK.  This truncate is going to happen.  We add the inode to the
2402 	 * orphan list, so that if this truncate spans multiple transactions,
2403 	 * and we crash, we will resume the truncate when the filesystem
2404 	 * recovers.  It also marks the inode dirty, to catch the new size.
2405 	 *
2406 	 * Implication: the file must always be in a sane, consistent
2407 	 * truncatable state while each transaction commits.
2408 	 */
2409 	if (ext4_orphan_add(handle, inode))
2410 		goto out_stop;
2411 
2412 	/*
2413 	 * The orphan list entry will now protect us from any crash which
2414 	 * occurs before the truncate completes, so it is now safe to propagate
2415 	 * the new, shorter inode size (held for now in i_size) into the
2416 	 * on-disk inode. We do this via i_disksize, which is the value which
2417 	 * ext4 *really* writes onto the disk inode.
2418 	 */
2419 	ei->i_disksize = inode->i_size;
2420 
2421 	/*
2422 	 * From here we block out all ext4_get_block() callers who want to
2423 	 * modify the block allocation tree.
2424 	 */
2425 	down_write(&ei->i_data_sem);
2426 
2427 	if (n == 1) {		/* direct blocks */
2428 		ext4_free_data(handle, inode, NULL, i_data+offsets[0],
2429 			       i_data + EXT4_NDIR_BLOCKS);
2430 		goto do_indirects;
2431 	}
2432 
2433 	partial = ext4_find_shared(inode, n, offsets, chain, &nr);
2434 	/* Kill the top of shared branch (not detached) */
2435 	if (nr) {
2436 		if (partial == chain) {
2437 			/* Shared branch grows from the inode */
2438 			ext4_free_branches(handle, inode, NULL,
2439 					   &nr, &nr+1, (chain+n-1) - partial);
2440 			*partial->p = 0;
2441 			/*
2442 			 * We mark the inode dirty prior to restart,
2443 			 * and prior to stop.  No need for it here.
2444 			 */
2445 		} else {
2446 			/* Shared branch grows from an indirect block */
2447 			BUFFER_TRACE(partial->bh, "get_write_access");
2448 			ext4_free_branches(handle, inode, partial->bh,
2449 					partial->p,
2450 					partial->p+1, (chain+n-1) - partial);
2451 		}
2452 	}
2453 	/* Clear the ends of indirect blocks on the shared branch */
2454 	while (partial > chain) {
2455 		ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
2456 				   (__le32*)partial->bh->b_data+addr_per_block,
2457 				   (chain+n-1) - partial);
2458 		BUFFER_TRACE(partial->bh, "call brelse");
2459 		brelse (partial->bh);
2460 		partial--;
2461 	}
2462 do_indirects:
2463 	/* Kill the remaining (whole) subtrees */
2464 	switch (offsets[0]) {
2465 	default:
2466 		nr = i_data[EXT4_IND_BLOCK];
2467 		if (nr) {
2468 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2469 			i_data[EXT4_IND_BLOCK] = 0;
2470 		}
2471 	case EXT4_IND_BLOCK:
2472 		nr = i_data[EXT4_DIND_BLOCK];
2473 		if (nr) {
2474 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2475 			i_data[EXT4_DIND_BLOCK] = 0;
2476 		}
2477 	case EXT4_DIND_BLOCK:
2478 		nr = i_data[EXT4_TIND_BLOCK];
2479 		if (nr) {
2480 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2481 			i_data[EXT4_TIND_BLOCK] = 0;
2482 		}
2483 	case EXT4_TIND_BLOCK:
2484 		;
2485 	}
2486 
2487 	ext4_discard_reservation(inode);
2488 
2489 	up_write(&ei->i_data_sem);
2490 	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
2491 	ext4_mark_inode_dirty(handle, inode);
2492 
2493 	/*
2494 	 * In a multi-transaction truncate, we only make the final transaction
2495 	 * synchronous
2496 	 */
2497 	if (IS_SYNC(inode))
2498 		handle->h_sync = 1;
2499 out_stop:
2500 	/*
2501 	 * If this was a simple ftruncate(), and the file will remain alive
2502 	 * then we need to clear up the orphan record which we created above.
2503 	 * However, if this was a real unlink then we were called by
2504 	 * ext4_delete_inode(), and we allow that function to clean up the
2505 	 * orphan info for us.
2506 	 */
2507 	if (inode->i_nlink)
2508 		ext4_orphan_del(handle, inode);
2509 
2510 	ext4_journal_stop(handle);
2511 }
2512 
2513 static ext4_fsblk_t ext4_get_inode_block(struct super_block *sb,
2514 		unsigned long ino, struct ext4_iloc *iloc)
2515 {
2516 	ext4_group_t block_group;
2517 	unsigned long offset;
2518 	ext4_fsblk_t block;
2519 	struct ext4_group_desc *gdp;
2520 
2521 	if (!ext4_valid_inum(sb, ino)) {
2522 		/*
2523 		 * This error is already checked for in namei.c unless we are
2524 		 * looking at an NFS filehandle, in which case no error
2525 		 * report is needed
2526 		 */
2527 		return 0;
2528 	}
2529 
2530 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
2531 	gdp = ext4_get_group_desc(sb, block_group, NULL);
2532 	if (!gdp)
2533 		return 0;
2534 
2535 	/*
2536 	 * Figure out the offset within the block group inode table
2537 	 */
2538 	offset = ((ino - 1) % EXT4_INODES_PER_GROUP(sb)) *
2539 		EXT4_INODE_SIZE(sb);
2540 	block = ext4_inode_table(sb, gdp) +
2541 		(offset >> EXT4_BLOCK_SIZE_BITS(sb));
2542 
2543 	iloc->block_group = block_group;
2544 	iloc->offset = offset & (EXT4_BLOCK_SIZE(sb) - 1);
2545 	return block;
2546 }
2547 
2548 /*
2549  * ext4_get_inode_loc returns with an extra refcount against the inode's
2550  * underlying buffer_head on success. If 'in_mem' is true, we have all
2551  * data in memory that is needed to recreate the on-disk version of this
2552  * inode.
2553  */
2554 static int __ext4_get_inode_loc(struct inode *inode,
2555 				struct ext4_iloc *iloc, int in_mem)
2556 {
2557 	ext4_fsblk_t block;
2558 	struct buffer_head *bh;
2559 
2560 	block = ext4_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2561 	if (!block)
2562 		return -EIO;
2563 
2564 	bh = sb_getblk(inode->i_sb, block);
2565 	if (!bh) {
2566 		ext4_error (inode->i_sb, "ext4_get_inode_loc",
2567 				"unable to read inode block - "
2568 				"inode=%lu, block=%llu",
2569 				 inode->i_ino, block);
2570 		return -EIO;
2571 	}
2572 	if (!buffer_uptodate(bh)) {
2573 		lock_buffer(bh);
2574 		if (buffer_uptodate(bh)) {
2575 			/* someone brought it uptodate while we waited */
2576 			unlock_buffer(bh);
2577 			goto has_buffer;
2578 		}
2579 
2580 		/*
2581 		 * If we have all information of the inode in memory and this
2582 		 * is the only valid inode in the block, we need not read the
2583 		 * block.
2584 		 */
2585 		if (in_mem) {
2586 			struct buffer_head *bitmap_bh;
2587 			struct ext4_group_desc *desc;
2588 			int inodes_per_buffer;
2589 			int inode_offset, i;
2590 			ext4_group_t block_group;
2591 			int start;
2592 
2593 			block_group = (inode->i_ino - 1) /
2594 					EXT4_INODES_PER_GROUP(inode->i_sb);
2595 			inodes_per_buffer = bh->b_size /
2596 				EXT4_INODE_SIZE(inode->i_sb);
2597 			inode_offset = ((inode->i_ino - 1) %
2598 					EXT4_INODES_PER_GROUP(inode->i_sb));
2599 			start = inode_offset & ~(inodes_per_buffer - 1);
2600 
2601 			/* Is the inode bitmap in cache? */
2602 			desc = ext4_get_group_desc(inode->i_sb,
2603 						block_group, NULL);
2604 			if (!desc)
2605 				goto make_io;
2606 
2607 			bitmap_bh = sb_getblk(inode->i_sb,
2608 				ext4_inode_bitmap(inode->i_sb, desc));
2609 			if (!bitmap_bh)
2610 				goto make_io;
2611 
2612 			/*
2613 			 * If the inode bitmap isn't in cache then the
2614 			 * optimisation may end up performing two reads instead
2615 			 * of one, so skip it.
2616 			 */
2617 			if (!buffer_uptodate(bitmap_bh)) {
2618 				brelse(bitmap_bh);
2619 				goto make_io;
2620 			}
2621 			for (i = start; i < start + inodes_per_buffer; i++) {
2622 				if (i == inode_offset)
2623 					continue;
2624 				if (ext4_test_bit(i, bitmap_bh->b_data))
2625 					break;
2626 			}
2627 			brelse(bitmap_bh);
2628 			if (i == start + inodes_per_buffer) {
2629 				/* all other inodes are free, so skip I/O */
2630 				memset(bh->b_data, 0, bh->b_size);
2631 				set_buffer_uptodate(bh);
2632 				unlock_buffer(bh);
2633 				goto has_buffer;
2634 			}
2635 		}
2636 
2637 make_io:
2638 		/*
2639 		 * There are other valid inodes in the buffer, this inode
2640 		 * has in-inode xattrs, or we don't have this inode in memory.
2641 		 * Read the block from disk.
2642 		 */
2643 		get_bh(bh);
2644 		bh->b_end_io = end_buffer_read_sync;
2645 		submit_bh(READ_META, bh);
2646 		wait_on_buffer(bh);
2647 		if (!buffer_uptodate(bh)) {
2648 			ext4_error(inode->i_sb, "ext4_get_inode_loc",
2649 					"unable to read inode block - "
2650 					"inode=%lu, block=%llu",
2651 					inode->i_ino, block);
2652 			brelse(bh);
2653 			return -EIO;
2654 		}
2655 	}
2656 has_buffer:
2657 	iloc->bh = bh;
2658 	return 0;
2659 }
2660 
2661 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
2662 {
2663 	/* We have all inode data except xattrs in memory here. */
2664 	return __ext4_get_inode_loc(inode, iloc,
2665 		!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
2666 }
2667 
2668 void ext4_set_inode_flags(struct inode *inode)
2669 {
2670 	unsigned int flags = EXT4_I(inode)->i_flags;
2671 
2672 	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2673 	if (flags & EXT4_SYNC_FL)
2674 		inode->i_flags |= S_SYNC;
2675 	if (flags & EXT4_APPEND_FL)
2676 		inode->i_flags |= S_APPEND;
2677 	if (flags & EXT4_IMMUTABLE_FL)
2678 		inode->i_flags |= S_IMMUTABLE;
2679 	if (flags & EXT4_NOATIME_FL)
2680 		inode->i_flags |= S_NOATIME;
2681 	if (flags & EXT4_DIRSYNC_FL)
2682 		inode->i_flags |= S_DIRSYNC;
2683 }
2684 
2685 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
2686 void ext4_get_inode_flags(struct ext4_inode_info *ei)
2687 {
2688 	unsigned int flags = ei->vfs_inode.i_flags;
2689 
2690 	ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
2691 			EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
2692 	if (flags & S_SYNC)
2693 		ei->i_flags |= EXT4_SYNC_FL;
2694 	if (flags & S_APPEND)
2695 		ei->i_flags |= EXT4_APPEND_FL;
2696 	if (flags & S_IMMUTABLE)
2697 		ei->i_flags |= EXT4_IMMUTABLE_FL;
2698 	if (flags & S_NOATIME)
2699 		ei->i_flags |= EXT4_NOATIME_FL;
2700 	if (flags & S_DIRSYNC)
2701 		ei->i_flags |= EXT4_DIRSYNC_FL;
2702 }
2703 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
2704 					struct ext4_inode_info *ei)
2705 {
2706 	blkcnt_t i_blocks ;
2707 	struct inode *inode = &(ei->vfs_inode);
2708 	struct super_block *sb = inode->i_sb;
2709 
2710 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
2711 				EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
2712 		/* we are using combined 48 bit field */
2713 		i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
2714 					le32_to_cpu(raw_inode->i_blocks_lo);
2715 		if (ei->i_flags & EXT4_HUGE_FILE_FL) {
2716 			/* i_blocks represent file system block size */
2717 			return i_blocks  << (inode->i_blkbits - 9);
2718 		} else {
2719 			return i_blocks;
2720 		}
2721 	} else {
2722 		return le32_to_cpu(raw_inode->i_blocks_lo);
2723 	}
2724 }
2725 
2726 struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
2727 {
2728 	struct ext4_iloc iloc;
2729 	struct ext4_inode *raw_inode;
2730 	struct ext4_inode_info *ei;
2731 	struct buffer_head *bh;
2732 	struct inode *inode;
2733 	long ret;
2734 	int block;
2735 
2736 	inode = iget_locked(sb, ino);
2737 	if (!inode)
2738 		return ERR_PTR(-ENOMEM);
2739 	if (!(inode->i_state & I_NEW))
2740 		return inode;
2741 
2742 	ei = EXT4_I(inode);
2743 #ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
2744 	ei->i_acl = EXT4_ACL_NOT_CACHED;
2745 	ei->i_default_acl = EXT4_ACL_NOT_CACHED;
2746 #endif
2747 	ei->i_block_alloc_info = NULL;
2748 
2749 	ret = __ext4_get_inode_loc(inode, &iloc, 0);
2750 	if (ret < 0)
2751 		goto bad_inode;
2752 	bh = iloc.bh;
2753 	raw_inode = ext4_raw_inode(&iloc);
2754 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2755 	inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2756 	inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2757 	if(!(test_opt (inode->i_sb, NO_UID32))) {
2758 		inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2759 		inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2760 	}
2761 	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2762 
2763 	ei->i_state = 0;
2764 	ei->i_dir_start_lookup = 0;
2765 	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2766 	/* We now have enough fields to check if the inode was active or not.
2767 	 * This is needed because nfsd might try to access dead inodes
2768 	 * the test is that same one that e2fsck uses
2769 	 * NeilBrown 1999oct15
2770 	 */
2771 	if (inode->i_nlink == 0) {
2772 		if (inode->i_mode == 0 ||
2773 		    !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
2774 			/* this inode is deleted */
2775 			brelse (bh);
2776 			ret = -ESTALE;
2777 			goto bad_inode;
2778 		}
2779 		/* The only unlinked inodes we let through here have
2780 		 * valid i_mode and are being read by the orphan
2781 		 * recovery code: that's fine, we're about to complete
2782 		 * the process of deleting those. */
2783 	}
2784 	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2785 	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
2786 	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
2787 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
2788 	    cpu_to_le32(EXT4_OS_HURD)) {
2789 		ei->i_file_acl |=
2790 			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
2791 	}
2792 	inode->i_size = ext4_isize(raw_inode);
2793 	ei->i_disksize = inode->i_size;
2794 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2795 	ei->i_block_group = iloc.block_group;
2796 	/*
2797 	 * NOTE! The in-memory inode i_data array is in little-endian order
2798 	 * even on big-endian machines: we do NOT byteswap the block numbers!
2799 	 */
2800 	for (block = 0; block < EXT4_N_BLOCKS; block++)
2801 		ei->i_data[block] = raw_inode->i_block[block];
2802 	INIT_LIST_HEAD(&ei->i_orphan);
2803 
2804 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
2805 		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
2806 		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2807 		    EXT4_INODE_SIZE(inode->i_sb)) {
2808 			brelse (bh);
2809 			ret = -EIO;
2810 			goto bad_inode;
2811 		}
2812 		if (ei->i_extra_isize == 0) {
2813 			/* The extra space is currently unused. Use it. */
2814 			ei->i_extra_isize = sizeof(struct ext4_inode) -
2815 					    EXT4_GOOD_OLD_INODE_SIZE;
2816 		} else {
2817 			__le32 *magic = (void *)raw_inode +
2818 					EXT4_GOOD_OLD_INODE_SIZE +
2819 					ei->i_extra_isize;
2820 			if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
2821 				 ei->i_state |= EXT4_STATE_XATTR;
2822 		}
2823 	} else
2824 		ei->i_extra_isize = 0;
2825 
2826 	EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
2827 	EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
2828 	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
2829 	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
2830 
2831 	inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
2832 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
2833 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
2834 			inode->i_version |=
2835 			(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
2836 	}
2837 
2838 	if (S_ISREG(inode->i_mode)) {
2839 		inode->i_op = &ext4_file_inode_operations;
2840 		inode->i_fop = &ext4_file_operations;
2841 		ext4_set_aops(inode);
2842 	} else if (S_ISDIR(inode->i_mode)) {
2843 		inode->i_op = &ext4_dir_inode_operations;
2844 		inode->i_fop = &ext4_dir_operations;
2845 	} else if (S_ISLNK(inode->i_mode)) {
2846 		if (ext4_inode_is_fast_symlink(inode))
2847 			inode->i_op = &ext4_fast_symlink_inode_operations;
2848 		else {
2849 			inode->i_op = &ext4_symlink_inode_operations;
2850 			ext4_set_aops(inode);
2851 		}
2852 	} else {
2853 		inode->i_op = &ext4_special_inode_operations;
2854 		if (raw_inode->i_block[0])
2855 			init_special_inode(inode, inode->i_mode,
2856 			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2857 		else
2858 			init_special_inode(inode, inode->i_mode,
2859 			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2860 	}
2861 	brelse (iloc.bh);
2862 	ext4_set_inode_flags(inode);
2863 	unlock_new_inode(inode);
2864 	return inode;
2865 
2866 bad_inode:
2867 	iget_failed(inode);
2868 	return ERR_PTR(ret);
2869 }
2870 
2871 static int ext4_inode_blocks_set(handle_t *handle,
2872 				struct ext4_inode *raw_inode,
2873 				struct ext4_inode_info *ei)
2874 {
2875 	struct inode *inode = &(ei->vfs_inode);
2876 	u64 i_blocks = inode->i_blocks;
2877 	struct super_block *sb = inode->i_sb;
2878 	int err = 0;
2879 
2880 	if (i_blocks <= ~0U) {
2881 		/*
2882 		 * i_blocks can be represnted in a 32 bit variable
2883 		 * as multiple of 512 bytes
2884 		 */
2885 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
2886 		raw_inode->i_blocks_high = 0;
2887 		ei->i_flags &= ~EXT4_HUGE_FILE_FL;
2888 	} else if (i_blocks <= 0xffffffffffffULL) {
2889 		/*
2890 		 * i_blocks can be represented in a 48 bit variable
2891 		 * as multiple of 512 bytes
2892 		 */
2893 		err = ext4_update_rocompat_feature(handle, sb,
2894 					    EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
2895 		if (err)
2896 			goto  err_out;
2897 		/* i_block is stored in the split  48 bit fields */
2898 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
2899 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
2900 		ei->i_flags &= ~EXT4_HUGE_FILE_FL;
2901 	} else {
2902 		/*
2903 		 * i_blocks should be represented in a 48 bit variable
2904 		 * as multiple of  file system block size
2905 		 */
2906 		err = ext4_update_rocompat_feature(handle, sb,
2907 					    EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
2908 		if (err)
2909 			goto  err_out;
2910 		ei->i_flags |= EXT4_HUGE_FILE_FL;
2911 		/* i_block is stored in file system block size */
2912 		i_blocks = i_blocks >> (inode->i_blkbits - 9);
2913 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
2914 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
2915 	}
2916 err_out:
2917 	return err;
2918 }
2919 
2920 /*
2921  * Post the struct inode info into an on-disk inode location in the
2922  * buffer-cache.  This gobbles the caller's reference to the
2923  * buffer_head in the inode location struct.
2924  *
2925  * The caller must have write access to iloc->bh.
2926  */
2927 static int ext4_do_update_inode(handle_t *handle,
2928 				struct inode *inode,
2929 				struct ext4_iloc *iloc)
2930 {
2931 	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
2932 	struct ext4_inode_info *ei = EXT4_I(inode);
2933 	struct buffer_head *bh = iloc->bh;
2934 	int err = 0, rc, block;
2935 
2936 	/* For fields not not tracking in the in-memory inode,
2937 	 * initialise them to zero for new inodes. */
2938 	if (ei->i_state & EXT4_STATE_NEW)
2939 		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
2940 
2941 	ext4_get_inode_flags(ei);
2942 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
2943 	if(!(test_opt(inode->i_sb, NO_UID32))) {
2944 		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
2945 		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
2946 /*
2947  * Fix up interoperability with old kernels. Otherwise, old inodes get
2948  * re-used with the upper 16 bits of the uid/gid intact
2949  */
2950 		if(!ei->i_dtime) {
2951 			raw_inode->i_uid_high =
2952 				cpu_to_le16(high_16_bits(inode->i_uid));
2953 			raw_inode->i_gid_high =
2954 				cpu_to_le16(high_16_bits(inode->i_gid));
2955 		} else {
2956 			raw_inode->i_uid_high = 0;
2957 			raw_inode->i_gid_high = 0;
2958 		}
2959 	} else {
2960 		raw_inode->i_uid_low =
2961 			cpu_to_le16(fs_high2lowuid(inode->i_uid));
2962 		raw_inode->i_gid_low =
2963 			cpu_to_le16(fs_high2lowgid(inode->i_gid));
2964 		raw_inode->i_uid_high = 0;
2965 		raw_inode->i_gid_high = 0;
2966 	}
2967 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
2968 
2969 	EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
2970 	EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
2971 	EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
2972 	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
2973 
2974 	if (ext4_inode_blocks_set(handle, raw_inode, ei))
2975 		goto out_brelse;
2976 	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
2977 	/* clear the migrate flag in the raw_inode */
2978 	raw_inode->i_flags = cpu_to_le32(ei->i_flags & ~EXT4_EXT_MIGRATE);
2979 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
2980 	    cpu_to_le32(EXT4_OS_HURD))
2981 		raw_inode->i_file_acl_high =
2982 			cpu_to_le16(ei->i_file_acl >> 32);
2983 	raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
2984 	ext4_isize_set(raw_inode, ei->i_disksize);
2985 	if (ei->i_disksize > 0x7fffffffULL) {
2986 		struct super_block *sb = inode->i_sb;
2987 		if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
2988 				EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
2989 				EXT4_SB(sb)->s_es->s_rev_level ==
2990 				cpu_to_le32(EXT4_GOOD_OLD_REV)) {
2991 			/* If this is the first large file
2992 			 * created, add a flag to the superblock.
2993 			 */
2994 			err = ext4_journal_get_write_access(handle,
2995 					EXT4_SB(sb)->s_sbh);
2996 			if (err)
2997 				goto out_brelse;
2998 			ext4_update_dynamic_rev(sb);
2999 			EXT4_SET_RO_COMPAT_FEATURE(sb,
3000 					EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
3001 			sb->s_dirt = 1;
3002 			handle->h_sync = 1;
3003 			err = ext4_journal_dirty_metadata(handle,
3004 					EXT4_SB(sb)->s_sbh);
3005 		}
3006 	}
3007 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
3008 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
3009 		if (old_valid_dev(inode->i_rdev)) {
3010 			raw_inode->i_block[0] =
3011 				cpu_to_le32(old_encode_dev(inode->i_rdev));
3012 			raw_inode->i_block[1] = 0;
3013 		} else {
3014 			raw_inode->i_block[0] = 0;
3015 			raw_inode->i_block[1] =
3016 				cpu_to_le32(new_encode_dev(inode->i_rdev));
3017 			raw_inode->i_block[2] = 0;
3018 		}
3019 	} else for (block = 0; block < EXT4_N_BLOCKS; block++)
3020 		raw_inode->i_block[block] = ei->i_data[block];
3021 
3022 	raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
3023 	if (ei->i_extra_isize) {
3024 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
3025 			raw_inode->i_version_hi =
3026 			cpu_to_le32(inode->i_version >> 32);
3027 		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
3028 	}
3029 
3030 
3031 	BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
3032 	rc = ext4_journal_dirty_metadata(handle, bh);
3033 	if (!err)
3034 		err = rc;
3035 	ei->i_state &= ~EXT4_STATE_NEW;
3036 
3037 out_brelse:
3038 	brelse (bh);
3039 	ext4_std_error(inode->i_sb, err);
3040 	return err;
3041 }
3042 
3043 /*
3044  * ext4_write_inode()
3045  *
3046  * We are called from a few places:
3047  *
3048  * - Within generic_file_write() for O_SYNC files.
3049  *   Here, there will be no transaction running. We wait for any running
3050  *   trasnaction to commit.
3051  *
3052  * - Within sys_sync(), kupdate and such.
3053  *   We wait on commit, if tol to.
3054  *
3055  * - Within prune_icache() (PF_MEMALLOC == true)
3056  *   Here we simply return.  We can't afford to block kswapd on the
3057  *   journal commit.
3058  *
3059  * In all cases it is actually safe for us to return without doing anything,
3060  * because the inode has been copied into a raw inode buffer in
3061  * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
3062  * knfsd.
3063  *
3064  * Note that we are absolutely dependent upon all inode dirtiers doing the
3065  * right thing: they *must* call mark_inode_dirty() after dirtying info in
3066  * which we are interested.
3067  *
3068  * It would be a bug for them to not do this.  The code:
3069  *
3070  *	mark_inode_dirty(inode)
3071  *	stuff();
3072  *	inode->i_size = expr;
3073  *
3074  * is in error because a kswapd-driven write_inode() could occur while
3075  * `stuff()' is running, and the new i_size will be lost.  Plus the inode
3076  * will no longer be on the superblock's dirty inode list.
3077  */
3078 int ext4_write_inode(struct inode *inode, int wait)
3079 {
3080 	if (current->flags & PF_MEMALLOC)
3081 		return 0;
3082 
3083 	if (ext4_journal_current_handle()) {
3084 		jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
3085 		dump_stack();
3086 		return -EIO;
3087 	}
3088 
3089 	if (!wait)
3090 		return 0;
3091 
3092 	return ext4_force_commit(inode->i_sb);
3093 }
3094 
3095 /*
3096  * ext4_setattr()
3097  *
3098  * Called from notify_change.
3099  *
3100  * We want to trap VFS attempts to truncate the file as soon as
3101  * possible.  In particular, we want to make sure that when the VFS
3102  * shrinks i_size, we put the inode on the orphan list and modify
3103  * i_disksize immediately, so that during the subsequent flushing of
3104  * dirty pages and freeing of disk blocks, we can guarantee that any
3105  * commit will leave the blocks being flushed in an unused state on
3106  * disk.  (On recovery, the inode will get truncated and the blocks will
3107  * be freed, so we have a strong guarantee that no future commit will
3108  * leave these blocks visible to the user.)
3109  *
3110  * Called with inode->sem down.
3111  */
3112 int ext4_setattr(struct dentry *dentry, struct iattr *attr)
3113 {
3114 	struct inode *inode = dentry->d_inode;
3115 	int error, rc = 0;
3116 	const unsigned int ia_valid = attr->ia_valid;
3117 
3118 	error = inode_change_ok(inode, attr);
3119 	if (error)
3120 		return error;
3121 
3122 	if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
3123 		(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
3124 		handle_t *handle;
3125 
3126 		/* (user+group)*(old+new) structure, inode write (sb,
3127 		 * inode block, ? - but truncate inode update has it) */
3128 		handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+
3129 					EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
3130 		if (IS_ERR(handle)) {
3131 			error = PTR_ERR(handle);
3132 			goto err_out;
3133 		}
3134 		error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
3135 		if (error) {
3136 			ext4_journal_stop(handle);
3137 			return error;
3138 		}
3139 		/* Update corresponding info in inode so that everything is in
3140 		 * one transaction */
3141 		if (attr->ia_valid & ATTR_UID)
3142 			inode->i_uid = attr->ia_uid;
3143 		if (attr->ia_valid & ATTR_GID)
3144 			inode->i_gid = attr->ia_gid;
3145 		error = ext4_mark_inode_dirty(handle, inode);
3146 		ext4_journal_stop(handle);
3147 	}
3148 
3149 	if (attr->ia_valid & ATTR_SIZE) {
3150 		if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
3151 			struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3152 
3153 			if (attr->ia_size > sbi->s_bitmap_maxbytes) {
3154 				error = -EFBIG;
3155 				goto err_out;
3156 			}
3157 		}
3158 	}
3159 
3160 	if (S_ISREG(inode->i_mode) &&
3161 	    attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
3162 		handle_t *handle;
3163 
3164 		handle = ext4_journal_start(inode, 3);
3165 		if (IS_ERR(handle)) {
3166 			error = PTR_ERR(handle);
3167 			goto err_out;
3168 		}
3169 
3170 		error = ext4_orphan_add(handle, inode);
3171 		EXT4_I(inode)->i_disksize = attr->ia_size;
3172 		rc = ext4_mark_inode_dirty(handle, inode);
3173 		if (!error)
3174 			error = rc;
3175 		ext4_journal_stop(handle);
3176 	}
3177 
3178 	rc = inode_setattr(inode, attr);
3179 
3180 	/* If inode_setattr's call to ext4_truncate failed to get a
3181 	 * transaction handle at all, we need to clean up the in-core
3182 	 * orphan list manually. */
3183 	if (inode->i_nlink)
3184 		ext4_orphan_del(NULL, inode);
3185 
3186 	if (!rc && (ia_valid & ATTR_MODE))
3187 		rc = ext4_acl_chmod(inode);
3188 
3189 err_out:
3190 	ext4_std_error(inode->i_sb, error);
3191 	if (!error)
3192 		error = rc;
3193 	return error;
3194 }
3195 
3196 
3197 /*
3198  * How many blocks doth make a writepage()?
3199  *
3200  * With N blocks per page, it may be:
3201  * N data blocks
3202  * 2 indirect block
3203  * 2 dindirect
3204  * 1 tindirect
3205  * N+5 bitmap blocks (from the above)
3206  * N+5 group descriptor summary blocks
3207  * 1 inode block
3208  * 1 superblock.
3209  * 2 * EXT4_SINGLEDATA_TRANS_BLOCKS for the quote files
3210  *
3211  * 3 * (N + 5) + 2 + 2 * EXT4_SINGLEDATA_TRANS_BLOCKS
3212  *
3213  * With ordered or writeback data it's the same, less the N data blocks.
3214  *
3215  * If the inode's direct blocks can hold an integral number of pages then a
3216  * page cannot straddle two indirect blocks, and we can only touch one indirect
3217  * and dindirect block, and the "5" above becomes "3".
3218  *
3219  * This still overestimates under most circumstances.  If we were to pass the
3220  * start and end offsets in here as well we could do block_to_path() on each
3221  * block and work out the exact number of indirects which are touched.  Pah.
3222  */
3223 
3224 int ext4_writepage_trans_blocks(struct inode *inode)
3225 {
3226 	int bpp = ext4_journal_blocks_per_page(inode);
3227 	int indirects = (EXT4_NDIR_BLOCKS % bpp) ? 5 : 3;
3228 	int ret;
3229 
3230 	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
3231 		return ext4_ext_writepage_trans_blocks(inode, bpp);
3232 
3233 	if (ext4_should_journal_data(inode))
3234 		ret = 3 * (bpp + indirects) + 2;
3235 	else
3236 		ret = 2 * (bpp + indirects) + 2;
3237 
3238 #ifdef CONFIG_QUOTA
3239 	/* We know that structure was already allocated during DQUOT_INIT so
3240 	 * we will be updating only the data blocks + inodes */
3241 	ret += 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
3242 #endif
3243 
3244 	return ret;
3245 }
3246 
3247 /*
3248  * The caller must have previously called ext4_reserve_inode_write().
3249  * Give this, we know that the caller already has write access to iloc->bh.
3250  */
3251 int ext4_mark_iloc_dirty(handle_t *handle,
3252 		struct inode *inode, struct ext4_iloc *iloc)
3253 {
3254 	int err = 0;
3255 
3256 	if (test_opt(inode->i_sb, I_VERSION))
3257 		inode_inc_iversion(inode);
3258 
3259 	/* the do_update_inode consumes one bh->b_count */
3260 	get_bh(iloc->bh);
3261 
3262 	/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
3263 	err = ext4_do_update_inode(handle, inode, iloc);
3264 	put_bh(iloc->bh);
3265 	return err;
3266 }
3267 
3268 /*
3269  * On success, We end up with an outstanding reference count against
3270  * iloc->bh.  This _must_ be cleaned up later.
3271  */
3272 
3273 int
3274 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
3275 			 struct ext4_iloc *iloc)
3276 {
3277 	int err = 0;
3278 	if (handle) {
3279 		err = ext4_get_inode_loc(inode, iloc);
3280 		if (!err) {
3281 			BUFFER_TRACE(iloc->bh, "get_write_access");
3282 			err = ext4_journal_get_write_access(handle, iloc->bh);
3283 			if (err) {
3284 				brelse(iloc->bh);
3285 				iloc->bh = NULL;
3286 			}
3287 		}
3288 	}
3289 	ext4_std_error(inode->i_sb, err);
3290 	return err;
3291 }
3292 
3293 /*
3294  * Expand an inode by new_extra_isize bytes.
3295  * Returns 0 on success or negative error number on failure.
3296  */
3297 static int ext4_expand_extra_isize(struct inode *inode,
3298 				   unsigned int new_extra_isize,
3299 				   struct ext4_iloc iloc,
3300 				   handle_t *handle)
3301 {
3302 	struct ext4_inode *raw_inode;
3303 	struct ext4_xattr_ibody_header *header;
3304 	struct ext4_xattr_entry *entry;
3305 
3306 	if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
3307 		return 0;
3308 
3309 	raw_inode = ext4_raw_inode(&iloc);
3310 
3311 	header = IHDR(inode, raw_inode);
3312 	entry = IFIRST(header);
3313 
3314 	/* No extended attributes present */
3315 	if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
3316 		header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
3317 		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
3318 			new_extra_isize);
3319 		EXT4_I(inode)->i_extra_isize = new_extra_isize;
3320 		return 0;
3321 	}
3322 
3323 	/* try to expand with EAs present */
3324 	return ext4_expand_extra_isize_ea(inode, new_extra_isize,
3325 					  raw_inode, handle);
3326 }
3327 
3328 /*
3329  * What we do here is to mark the in-core inode as clean with respect to inode
3330  * dirtiness (it may still be data-dirty).
3331  * This means that the in-core inode may be reaped by prune_icache
3332  * without having to perform any I/O.  This is a very good thing,
3333  * because *any* task may call prune_icache - even ones which
3334  * have a transaction open against a different journal.
3335  *
3336  * Is this cheating?  Not really.  Sure, we haven't written the
3337  * inode out, but prune_icache isn't a user-visible syncing function.
3338  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
3339  * we start and wait on commits.
3340  *
3341  * Is this efficient/effective?  Well, we're being nice to the system
3342  * by cleaning up our inodes proactively so they can be reaped
3343  * without I/O.  But we are potentially leaving up to five seconds'
3344  * worth of inodes floating about which prune_icache wants us to
3345  * write out.  One way to fix that would be to get prune_icache()
3346  * to do a write_super() to free up some memory.  It has the desired
3347  * effect.
3348  */
3349 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
3350 {
3351 	struct ext4_iloc iloc;
3352 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3353 	static unsigned int mnt_count;
3354 	int err, ret;
3355 
3356 	might_sleep();
3357 	err = ext4_reserve_inode_write(handle, inode, &iloc);
3358 	if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
3359 	    !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
3360 		/*
3361 		 * We need extra buffer credits since we may write into EA block
3362 		 * with this same handle. If journal_extend fails, then it will
3363 		 * only result in a minor loss of functionality for that inode.
3364 		 * If this is felt to be critical, then e2fsck should be run to
3365 		 * force a large enough s_min_extra_isize.
3366 		 */
3367 		if ((jbd2_journal_extend(handle,
3368 			     EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
3369 			ret = ext4_expand_extra_isize(inode,
3370 						      sbi->s_want_extra_isize,
3371 						      iloc, handle);
3372 			if (ret) {
3373 				EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
3374 				if (mnt_count !=
3375 					le16_to_cpu(sbi->s_es->s_mnt_count)) {
3376 					ext4_warning(inode->i_sb, __func__,
3377 					"Unable to expand inode %lu. Delete"
3378 					" some EAs or run e2fsck.",
3379 					inode->i_ino);
3380 					mnt_count =
3381 					  le16_to_cpu(sbi->s_es->s_mnt_count);
3382 				}
3383 			}
3384 		}
3385 	}
3386 	if (!err)
3387 		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
3388 	return err;
3389 }
3390 
3391 /*
3392  * ext4_dirty_inode() is called from __mark_inode_dirty()
3393  *
3394  * We're really interested in the case where a file is being extended.
3395  * i_size has been changed by generic_commit_write() and we thus need
3396  * to include the updated inode in the current transaction.
3397  *
3398  * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
3399  * are allocated to the file.
3400  *
3401  * If the inode is marked synchronous, we don't honour that here - doing
3402  * so would cause a commit on atime updates, which we don't bother doing.
3403  * We handle synchronous inodes at the highest possible level.
3404  */
3405 void ext4_dirty_inode(struct inode *inode)
3406 {
3407 	handle_t *current_handle = ext4_journal_current_handle();
3408 	handle_t *handle;
3409 
3410 	handle = ext4_journal_start(inode, 2);
3411 	if (IS_ERR(handle))
3412 		goto out;
3413 	if (current_handle &&
3414 		current_handle->h_transaction != handle->h_transaction) {
3415 		/* This task has a transaction open against a different fs */
3416 		printk(KERN_EMERG "%s: transactions do not match!\n",
3417 		       __func__);
3418 	} else {
3419 		jbd_debug(5, "marking dirty.  outer handle=%p\n",
3420 				current_handle);
3421 		ext4_mark_inode_dirty(handle, inode);
3422 	}
3423 	ext4_journal_stop(handle);
3424 out:
3425 	return;
3426 }
3427 
3428 #if 0
3429 /*
3430  * Bind an inode's backing buffer_head into this transaction, to prevent
3431  * it from being flushed to disk early.  Unlike
3432  * ext4_reserve_inode_write, this leaves behind no bh reference and
3433  * returns no iloc structure, so the caller needs to repeat the iloc
3434  * lookup to mark the inode dirty later.
3435  */
3436 static int ext4_pin_inode(handle_t *handle, struct inode *inode)
3437 {
3438 	struct ext4_iloc iloc;
3439 
3440 	int err = 0;
3441 	if (handle) {
3442 		err = ext4_get_inode_loc(inode, &iloc);
3443 		if (!err) {
3444 			BUFFER_TRACE(iloc.bh, "get_write_access");
3445 			err = jbd2_journal_get_write_access(handle, iloc.bh);
3446 			if (!err)
3447 				err = ext4_journal_dirty_metadata(handle,
3448 								  iloc.bh);
3449 			brelse(iloc.bh);
3450 		}
3451 	}
3452 	ext4_std_error(inode->i_sb, err);
3453 	return err;
3454 }
3455 #endif
3456 
3457 int ext4_change_inode_journal_flag(struct inode *inode, int val)
3458 {
3459 	journal_t *journal;
3460 	handle_t *handle;
3461 	int err;
3462 
3463 	/*
3464 	 * We have to be very careful here: changing a data block's
3465 	 * journaling status dynamically is dangerous.  If we write a
3466 	 * data block to the journal, change the status and then delete
3467 	 * that block, we risk forgetting to revoke the old log record
3468 	 * from the journal and so a subsequent replay can corrupt data.
3469 	 * So, first we make sure that the journal is empty and that
3470 	 * nobody is changing anything.
3471 	 */
3472 
3473 	journal = EXT4_JOURNAL(inode);
3474 	if (is_journal_aborted(journal))
3475 		return -EROFS;
3476 
3477 	jbd2_journal_lock_updates(journal);
3478 	jbd2_journal_flush(journal);
3479 
3480 	/*
3481 	 * OK, there are no updates running now, and all cached data is
3482 	 * synced to disk.  We are now in a completely consistent state
3483 	 * which doesn't have anything in the journal, and we know that
3484 	 * no filesystem updates are running, so it is safe to modify
3485 	 * the inode's in-core data-journaling state flag now.
3486 	 */
3487 
3488 	if (val)
3489 		EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
3490 	else
3491 		EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
3492 	ext4_set_aops(inode);
3493 
3494 	jbd2_journal_unlock_updates(journal);
3495 
3496 	/* Finally we can mark the inode as dirty. */
3497 
3498 	handle = ext4_journal_start(inode, 1);
3499 	if (IS_ERR(handle))
3500 		return PTR_ERR(handle);
3501 
3502 	err = ext4_mark_inode_dirty(handle, inode);
3503 	handle->h_sync = 1;
3504 	ext4_journal_stop(handle);
3505 	ext4_std_error(inode->i_sb, err);
3506 
3507 	return err;
3508 }
3509