xref: /openbmc/linux/fs/ext4/inode.c (revision f42b3800)
1 /*
2  *  linux/fs/ext4/inode.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/inode.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  Goal-directed block allocation by Stephen Tweedie
16  *	(sct@redhat.com), 1993, 1998
17  *  Big-endian to little-endian byte-swapping/bitmaps by
18  *        David S. Miller (davem@caip.rutgers.edu), 1995
19  *  64-bit file support on 64-bit platforms by Jakub Jelinek
20  *	(jj@sunsite.ms.mff.cuni.cz)
21  *
22  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
23  */
24 
25 #include <linux/module.h>
26 #include <linux/fs.h>
27 #include <linux/time.h>
28 #include <linux/ext4_jbd2.h>
29 #include <linux/jbd2.h>
30 #include <linux/highuid.h>
31 #include <linux/pagemap.h>
32 #include <linux/quotaops.h>
33 #include <linux/string.h>
34 #include <linux/buffer_head.h>
35 #include <linux/writeback.h>
36 #include <linux/mpage.h>
37 #include <linux/uio.h>
38 #include <linux/bio.h>
39 #include "xattr.h"
40 #include "acl.h"
41 
42 /*
43  * Test whether an inode is a fast symlink.
44  */
45 static int ext4_inode_is_fast_symlink(struct inode *inode)
46 {
47 	int ea_blocks = EXT4_I(inode)->i_file_acl ?
48 		(inode->i_sb->s_blocksize >> 9) : 0;
49 
50 	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
51 }
52 
53 /*
54  * The ext4 forget function must perform a revoke if we are freeing data
55  * which has been journaled.  Metadata (eg. indirect blocks) must be
56  * revoked in all cases.
57  *
58  * "bh" may be NULL: a metadata block may have been freed from memory
59  * but there may still be a record of it in the journal, and that record
60  * still needs to be revoked.
61  */
62 int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
63 			struct buffer_head *bh, ext4_fsblk_t blocknr)
64 {
65 	int err;
66 
67 	might_sleep();
68 
69 	BUFFER_TRACE(bh, "enter");
70 
71 	jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
72 		  "data mode %lx\n",
73 		  bh, is_metadata, inode->i_mode,
74 		  test_opt(inode->i_sb, DATA_FLAGS));
75 
76 	/* Never use the revoke function if we are doing full data
77 	 * journaling: there is no need to, and a V1 superblock won't
78 	 * support it.  Otherwise, only skip the revoke on un-journaled
79 	 * data blocks. */
80 
81 	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
82 	    (!is_metadata && !ext4_should_journal_data(inode))) {
83 		if (bh) {
84 			BUFFER_TRACE(bh, "call jbd2_journal_forget");
85 			return ext4_journal_forget(handle, bh);
86 		}
87 		return 0;
88 	}
89 
90 	/*
91 	 * data!=journal && (is_metadata || should_journal_data(inode))
92 	 */
93 	BUFFER_TRACE(bh, "call ext4_journal_revoke");
94 	err = ext4_journal_revoke(handle, blocknr, bh);
95 	if (err)
96 		ext4_abort(inode->i_sb, __FUNCTION__,
97 			   "error %d when attempting revoke", err);
98 	BUFFER_TRACE(bh, "exit");
99 	return err;
100 }
101 
102 /*
103  * Work out how many blocks we need to proceed with the next chunk of a
104  * truncate transaction.
105  */
106 static unsigned long blocks_for_truncate(struct inode *inode)
107 {
108 	ext4_lblk_t needed;
109 
110 	needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
111 
112 	/* Give ourselves just enough room to cope with inodes in which
113 	 * i_blocks is corrupt: we've seen disk corruptions in the past
114 	 * which resulted in random data in an inode which looked enough
115 	 * like a regular file for ext4 to try to delete it.  Things
116 	 * will go a bit crazy if that happens, but at least we should
117 	 * try not to panic the whole kernel. */
118 	if (needed < 2)
119 		needed = 2;
120 
121 	/* But we need to bound the transaction so we don't overflow the
122 	 * journal. */
123 	if (needed > EXT4_MAX_TRANS_DATA)
124 		needed = EXT4_MAX_TRANS_DATA;
125 
126 	return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
127 }
128 
129 /*
130  * Truncate transactions can be complex and absolutely huge.  So we need to
131  * be able to restart the transaction at a conventient checkpoint to make
132  * sure we don't overflow the journal.
133  *
134  * start_transaction gets us a new handle for a truncate transaction,
135  * and extend_transaction tries to extend the existing one a bit.  If
136  * extend fails, we need to propagate the failure up and restart the
137  * transaction in the top-level truncate loop. --sct
138  */
139 static handle_t *start_transaction(struct inode *inode)
140 {
141 	handle_t *result;
142 
143 	result = ext4_journal_start(inode, blocks_for_truncate(inode));
144 	if (!IS_ERR(result))
145 		return result;
146 
147 	ext4_std_error(inode->i_sb, PTR_ERR(result));
148 	return result;
149 }
150 
151 /*
152  * Try to extend this transaction for the purposes of truncation.
153  *
154  * Returns 0 if we managed to create more room.  If we can't create more
155  * room, and the transaction must be restarted we return 1.
156  */
157 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
158 {
159 	if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS)
160 		return 0;
161 	if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
162 		return 0;
163 	return 1;
164 }
165 
166 /*
167  * Restart the transaction associated with *handle.  This does a commit,
168  * so before we call here everything must be consistently dirtied against
169  * this transaction.
170  */
171 static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
172 {
173 	jbd_debug(2, "restarting handle %p\n", handle);
174 	return ext4_journal_restart(handle, blocks_for_truncate(inode));
175 }
176 
177 /*
178  * Called at the last iput() if i_nlink is zero.
179  */
180 void ext4_delete_inode (struct inode * inode)
181 {
182 	handle_t *handle;
183 
184 	truncate_inode_pages(&inode->i_data, 0);
185 
186 	if (is_bad_inode(inode))
187 		goto no_delete;
188 
189 	handle = start_transaction(inode);
190 	if (IS_ERR(handle)) {
191 		/*
192 		 * If we're going to skip the normal cleanup, we still need to
193 		 * make sure that the in-core orphan linked list is properly
194 		 * cleaned up.
195 		 */
196 		ext4_orphan_del(NULL, inode);
197 		goto no_delete;
198 	}
199 
200 	if (IS_SYNC(inode))
201 		handle->h_sync = 1;
202 	inode->i_size = 0;
203 	if (inode->i_blocks)
204 		ext4_truncate(inode);
205 	/*
206 	 * Kill off the orphan record which ext4_truncate created.
207 	 * AKPM: I think this can be inside the above `if'.
208 	 * Note that ext4_orphan_del() has to be able to cope with the
209 	 * deletion of a non-existent orphan - this is because we don't
210 	 * know if ext4_truncate() actually created an orphan record.
211 	 * (Well, we could do this if we need to, but heck - it works)
212 	 */
213 	ext4_orphan_del(handle, inode);
214 	EXT4_I(inode)->i_dtime	= get_seconds();
215 
216 	/*
217 	 * One subtle ordering requirement: if anything has gone wrong
218 	 * (transaction abort, IO errors, whatever), then we can still
219 	 * do these next steps (the fs will already have been marked as
220 	 * having errors), but we can't free the inode if the mark_dirty
221 	 * fails.
222 	 */
223 	if (ext4_mark_inode_dirty(handle, inode))
224 		/* If that failed, just do the required in-core inode clear. */
225 		clear_inode(inode);
226 	else
227 		ext4_free_inode(handle, inode);
228 	ext4_journal_stop(handle);
229 	return;
230 no_delete:
231 	clear_inode(inode);	/* We must guarantee clearing of inode... */
232 }
233 
234 typedef struct {
235 	__le32	*p;
236 	__le32	key;
237 	struct buffer_head *bh;
238 } Indirect;
239 
240 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
241 {
242 	p->key = *(p->p = v);
243 	p->bh = bh;
244 }
245 
246 /**
247  *	ext4_block_to_path - parse the block number into array of offsets
248  *	@inode: inode in question (we are only interested in its superblock)
249  *	@i_block: block number to be parsed
250  *	@offsets: array to store the offsets in
251  *	@boundary: set this non-zero if the referred-to block is likely to be
252  *	       followed (on disk) by an indirect block.
253  *
254  *	To store the locations of file's data ext4 uses a data structure common
255  *	for UNIX filesystems - tree of pointers anchored in the inode, with
256  *	data blocks at leaves and indirect blocks in intermediate nodes.
257  *	This function translates the block number into path in that tree -
258  *	return value is the path length and @offsets[n] is the offset of
259  *	pointer to (n+1)th node in the nth one. If @block is out of range
260  *	(negative or too large) warning is printed and zero returned.
261  *
262  *	Note: function doesn't find node addresses, so no IO is needed. All
263  *	we need to know is the capacity of indirect blocks (taken from the
264  *	inode->i_sb).
265  */
266 
267 /*
268  * Portability note: the last comparison (check that we fit into triple
269  * indirect block) is spelled differently, because otherwise on an
270  * architecture with 32-bit longs and 8Kb pages we might get into trouble
271  * if our filesystem had 8Kb blocks. We might use long long, but that would
272  * kill us on x86. Oh, well, at least the sign propagation does not matter -
273  * i_block would have to be negative in the very beginning, so we would not
274  * get there at all.
275  */
276 
277 static int ext4_block_to_path(struct inode *inode,
278 			ext4_lblk_t i_block,
279 			ext4_lblk_t offsets[4], int *boundary)
280 {
281 	int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
282 	int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
283 	const long direct_blocks = EXT4_NDIR_BLOCKS,
284 		indirect_blocks = ptrs,
285 		double_blocks = (1 << (ptrs_bits * 2));
286 	int n = 0;
287 	int final = 0;
288 
289 	if (i_block < 0) {
290 		ext4_warning (inode->i_sb, "ext4_block_to_path", "block < 0");
291 	} else if (i_block < direct_blocks) {
292 		offsets[n++] = i_block;
293 		final = direct_blocks;
294 	} else if ( (i_block -= direct_blocks) < indirect_blocks) {
295 		offsets[n++] = EXT4_IND_BLOCK;
296 		offsets[n++] = i_block;
297 		final = ptrs;
298 	} else if ((i_block -= indirect_blocks) < double_blocks) {
299 		offsets[n++] = EXT4_DIND_BLOCK;
300 		offsets[n++] = i_block >> ptrs_bits;
301 		offsets[n++] = i_block & (ptrs - 1);
302 		final = ptrs;
303 	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
304 		offsets[n++] = EXT4_TIND_BLOCK;
305 		offsets[n++] = i_block >> (ptrs_bits * 2);
306 		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
307 		offsets[n++] = i_block & (ptrs - 1);
308 		final = ptrs;
309 	} else {
310 		ext4_warning(inode->i_sb, "ext4_block_to_path",
311 				"block %lu > max",
312 				i_block + direct_blocks +
313 				indirect_blocks + double_blocks);
314 	}
315 	if (boundary)
316 		*boundary = final - 1 - (i_block & (ptrs - 1));
317 	return n;
318 }
319 
320 /**
321  *	ext4_get_branch - read the chain of indirect blocks leading to data
322  *	@inode: inode in question
323  *	@depth: depth of the chain (1 - direct pointer, etc.)
324  *	@offsets: offsets of pointers in inode/indirect blocks
325  *	@chain: place to store the result
326  *	@err: here we store the error value
327  *
328  *	Function fills the array of triples <key, p, bh> and returns %NULL
329  *	if everything went OK or the pointer to the last filled triple
330  *	(incomplete one) otherwise. Upon the return chain[i].key contains
331  *	the number of (i+1)-th block in the chain (as it is stored in memory,
332  *	i.e. little-endian 32-bit), chain[i].p contains the address of that
333  *	number (it points into struct inode for i==0 and into the bh->b_data
334  *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
335  *	block for i>0 and NULL for i==0. In other words, it holds the block
336  *	numbers of the chain, addresses they were taken from (and where we can
337  *	verify that chain did not change) and buffer_heads hosting these
338  *	numbers.
339  *
340  *	Function stops when it stumbles upon zero pointer (absent block)
341  *		(pointer to last triple returned, *@err == 0)
342  *	or when it gets an IO error reading an indirect block
343  *		(ditto, *@err == -EIO)
344  *	or when it reads all @depth-1 indirect blocks successfully and finds
345  *	the whole chain, all way to the data (returns %NULL, *err == 0).
346  *
347  *      Need to be called with
348  *      down_read(&EXT4_I(inode)->i_data_sem)
349  */
350 static Indirect *ext4_get_branch(struct inode *inode, int depth,
351 				 ext4_lblk_t  *offsets,
352 				 Indirect chain[4], int *err)
353 {
354 	struct super_block *sb = inode->i_sb;
355 	Indirect *p = chain;
356 	struct buffer_head *bh;
357 
358 	*err = 0;
359 	/* i_data is not going away, no lock needed */
360 	add_chain (chain, NULL, EXT4_I(inode)->i_data + *offsets);
361 	if (!p->key)
362 		goto no_block;
363 	while (--depth) {
364 		bh = sb_bread(sb, le32_to_cpu(p->key));
365 		if (!bh)
366 			goto failure;
367 		add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
368 		/* Reader: end */
369 		if (!p->key)
370 			goto no_block;
371 	}
372 	return NULL;
373 
374 failure:
375 	*err = -EIO;
376 no_block:
377 	return p;
378 }
379 
380 /**
381  *	ext4_find_near - find a place for allocation with sufficient locality
382  *	@inode: owner
383  *	@ind: descriptor of indirect block.
384  *
385  *	This function returns the prefered place for block allocation.
386  *	It is used when heuristic for sequential allocation fails.
387  *	Rules are:
388  *	  + if there is a block to the left of our position - allocate near it.
389  *	  + if pointer will live in indirect block - allocate near that block.
390  *	  + if pointer will live in inode - allocate in the same
391  *	    cylinder group.
392  *
393  * In the latter case we colour the starting block by the callers PID to
394  * prevent it from clashing with concurrent allocations for a different inode
395  * in the same block group.   The PID is used here so that functionally related
396  * files will be close-by on-disk.
397  *
398  *	Caller must make sure that @ind is valid and will stay that way.
399  */
400 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
401 {
402 	struct ext4_inode_info *ei = EXT4_I(inode);
403 	__le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
404 	__le32 *p;
405 	ext4_fsblk_t bg_start;
406 	ext4_fsblk_t last_block;
407 	ext4_grpblk_t colour;
408 
409 	/* Try to find previous block */
410 	for (p = ind->p - 1; p >= start; p--) {
411 		if (*p)
412 			return le32_to_cpu(*p);
413 	}
414 
415 	/* No such thing, so let's try location of indirect block */
416 	if (ind->bh)
417 		return ind->bh->b_blocknr;
418 
419 	/*
420 	 * It is going to be referred to from the inode itself? OK, just put it
421 	 * into the same cylinder group then.
422 	 */
423 	bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group);
424 	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
425 
426 	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
427 		colour = (current->pid % 16) *
428 			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
429 	else
430 		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
431 	return bg_start + colour;
432 }
433 
434 /**
435  *	ext4_find_goal - find a prefered place for allocation.
436  *	@inode: owner
437  *	@block:  block we want
438  *	@partial: pointer to the last triple within a chain
439  *
440  *	Normally this function find the prefered place for block allocation,
441  *	returns it.
442  */
443 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
444 		Indirect *partial)
445 {
446 	struct ext4_block_alloc_info *block_i;
447 
448 	block_i =  EXT4_I(inode)->i_block_alloc_info;
449 
450 	/*
451 	 * try the heuristic for sequential allocation,
452 	 * failing that at least try to get decent locality.
453 	 */
454 	if (block_i && (block == block_i->last_alloc_logical_block + 1)
455 		&& (block_i->last_alloc_physical_block != 0)) {
456 		return block_i->last_alloc_physical_block + 1;
457 	}
458 
459 	return ext4_find_near(inode, partial);
460 }
461 
462 /**
463  *	ext4_blks_to_allocate: Look up the block map and count the number
464  *	of direct blocks need to be allocated for the given branch.
465  *
466  *	@branch: chain of indirect blocks
467  *	@k: number of blocks need for indirect blocks
468  *	@blks: number of data blocks to be mapped.
469  *	@blocks_to_boundary:  the offset in the indirect block
470  *
471  *	return the total number of blocks to be allocate, including the
472  *	direct and indirect blocks.
473  */
474 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
475 		int blocks_to_boundary)
476 {
477 	unsigned long count = 0;
478 
479 	/*
480 	 * Simple case, [t,d]Indirect block(s) has not allocated yet
481 	 * then it's clear blocks on that path have not allocated
482 	 */
483 	if (k > 0) {
484 		/* right now we don't handle cross boundary allocation */
485 		if (blks < blocks_to_boundary + 1)
486 			count += blks;
487 		else
488 			count += blocks_to_boundary + 1;
489 		return count;
490 	}
491 
492 	count++;
493 	while (count < blks && count <= blocks_to_boundary &&
494 		le32_to_cpu(*(branch[0].p + count)) == 0) {
495 		count++;
496 	}
497 	return count;
498 }
499 
500 /**
501  *	ext4_alloc_blocks: multiple allocate blocks needed for a branch
502  *	@indirect_blks: the number of blocks need to allocate for indirect
503  *			blocks
504  *
505  *	@new_blocks: on return it will store the new block numbers for
506  *	the indirect blocks(if needed) and the first direct block,
507  *	@blks:	on return it will store the total number of allocated
508  *		direct blocks
509  */
510 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
511 			ext4_fsblk_t goal, int indirect_blks, int blks,
512 			ext4_fsblk_t new_blocks[4], int *err)
513 {
514 	int target, i;
515 	unsigned long count = 0;
516 	int index = 0;
517 	ext4_fsblk_t current_block = 0;
518 	int ret = 0;
519 
520 	/*
521 	 * Here we try to allocate the requested multiple blocks at once,
522 	 * on a best-effort basis.
523 	 * To build a branch, we should allocate blocks for
524 	 * the indirect blocks(if not allocated yet), and at least
525 	 * the first direct block of this branch.  That's the
526 	 * minimum number of blocks need to allocate(required)
527 	 */
528 	target = blks + indirect_blks;
529 
530 	while (1) {
531 		count = target;
532 		/* allocating blocks for indirect blocks and direct blocks */
533 		current_block = ext4_new_blocks(handle,inode,goal,&count,err);
534 		if (*err)
535 			goto failed_out;
536 
537 		target -= count;
538 		/* allocate blocks for indirect blocks */
539 		while (index < indirect_blks && count) {
540 			new_blocks[index++] = current_block++;
541 			count--;
542 		}
543 
544 		if (count > 0)
545 			break;
546 	}
547 
548 	/* save the new block number for the first direct block */
549 	new_blocks[index] = current_block;
550 
551 	/* total number of blocks allocated for direct blocks */
552 	ret = count;
553 	*err = 0;
554 	return ret;
555 failed_out:
556 	for (i = 0; i <index; i++)
557 		ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
558 	return ret;
559 }
560 
561 /**
562  *	ext4_alloc_branch - allocate and set up a chain of blocks.
563  *	@inode: owner
564  *	@indirect_blks: number of allocated indirect blocks
565  *	@blks: number of allocated direct blocks
566  *	@offsets: offsets (in the blocks) to store the pointers to next.
567  *	@branch: place to store the chain in.
568  *
569  *	This function allocates blocks, zeroes out all but the last one,
570  *	links them into chain and (if we are synchronous) writes them to disk.
571  *	In other words, it prepares a branch that can be spliced onto the
572  *	inode. It stores the information about that chain in the branch[], in
573  *	the same format as ext4_get_branch() would do. We are calling it after
574  *	we had read the existing part of chain and partial points to the last
575  *	triple of that (one with zero ->key). Upon the exit we have the same
576  *	picture as after the successful ext4_get_block(), except that in one
577  *	place chain is disconnected - *branch->p is still zero (we did not
578  *	set the last link), but branch->key contains the number that should
579  *	be placed into *branch->p to fill that gap.
580  *
581  *	If allocation fails we free all blocks we've allocated (and forget
582  *	their buffer_heads) and return the error value the from failed
583  *	ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
584  *	as described above and return 0.
585  */
586 static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
587 			int indirect_blks, int *blks, ext4_fsblk_t goal,
588 			ext4_lblk_t *offsets, Indirect *branch)
589 {
590 	int blocksize = inode->i_sb->s_blocksize;
591 	int i, n = 0;
592 	int err = 0;
593 	struct buffer_head *bh;
594 	int num;
595 	ext4_fsblk_t new_blocks[4];
596 	ext4_fsblk_t current_block;
597 
598 	num = ext4_alloc_blocks(handle, inode, goal, indirect_blks,
599 				*blks, new_blocks, &err);
600 	if (err)
601 		return err;
602 
603 	branch[0].key = cpu_to_le32(new_blocks[0]);
604 	/*
605 	 * metadata blocks and data blocks are allocated.
606 	 */
607 	for (n = 1; n <= indirect_blks;  n++) {
608 		/*
609 		 * Get buffer_head for parent block, zero it out
610 		 * and set the pointer to new one, then send
611 		 * parent to disk.
612 		 */
613 		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
614 		branch[n].bh = bh;
615 		lock_buffer(bh);
616 		BUFFER_TRACE(bh, "call get_create_access");
617 		err = ext4_journal_get_create_access(handle, bh);
618 		if (err) {
619 			unlock_buffer(bh);
620 			brelse(bh);
621 			goto failed;
622 		}
623 
624 		memset(bh->b_data, 0, blocksize);
625 		branch[n].p = (__le32 *) bh->b_data + offsets[n];
626 		branch[n].key = cpu_to_le32(new_blocks[n]);
627 		*branch[n].p = branch[n].key;
628 		if ( n == indirect_blks) {
629 			current_block = new_blocks[n];
630 			/*
631 			 * End of chain, update the last new metablock of
632 			 * the chain to point to the new allocated
633 			 * data blocks numbers
634 			 */
635 			for (i=1; i < num; i++)
636 				*(branch[n].p + i) = cpu_to_le32(++current_block);
637 		}
638 		BUFFER_TRACE(bh, "marking uptodate");
639 		set_buffer_uptodate(bh);
640 		unlock_buffer(bh);
641 
642 		BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
643 		err = ext4_journal_dirty_metadata(handle, bh);
644 		if (err)
645 			goto failed;
646 	}
647 	*blks = num;
648 	return err;
649 failed:
650 	/* Allocation failed, free what we already allocated */
651 	for (i = 1; i <= n ; i++) {
652 		BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget");
653 		ext4_journal_forget(handle, branch[i].bh);
654 	}
655 	for (i = 0; i <indirect_blks; i++)
656 		ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
657 
658 	ext4_free_blocks(handle, inode, new_blocks[i], num, 0);
659 
660 	return err;
661 }
662 
663 /**
664  * ext4_splice_branch - splice the allocated branch onto inode.
665  * @inode: owner
666  * @block: (logical) number of block we are adding
667  * @chain: chain of indirect blocks (with a missing link - see
668  *	ext4_alloc_branch)
669  * @where: location of missing link
670  * @num:   number of indirect blocks we are adding
671  * @blks:  number of direct blocks we are adding
672  *
673  * This function fills the missing link and does all housekeeping needed in
674  * inode (->i_blocks, etc.). In case of success we end up with the full
675  * chain to new block and return 0.
676  */
677 static int ext4_splice_branch(handle_t *handle, struct inode *inode,
678 			ext4_lblk_t block, Indirect *where, int num, int blks)
679 {
680 	int i;
681 	int err = 0;
682 	struct ext4_block_alloc_info *block_i;
683 	ext4_fsblk_t current_block;
684 
685 	block_i = EXT4_I(inode)->i_block_alloc_info;
686 	/*
687 	 * If we're splicing into a [td]indirect block (as opposed to the
688 	 * inode) then we need to get write access to the [td]indirect block
689 	 * before the splice.
690 	 */
691 	if (where->bh) {
692 		BUFFER_TRACE(where->bh, "get_write_access");
693 		err = ext4_journal_get_write_access(handle, where->bh);
694 		if (err)
695 			goto err_out;
696 	}
697 	/* That's it */
698 
699 	*where->p = where->key;
700 
701 	/*
702 	 * Update the host buffer_head or inode to point to more just allocated
703 	 * direct blocks blocks
704 	 */
705 	if (num == 0 && blks > 1) {
706 		current_block = le32_to_cpu(where->key) + 1;
707 		for (i = 1; i < blks; i++)
708 			*(where->p + i ) = cpu_to_le32(current_block++);
709 	}
710 
711 	/*
712 	 * update the most recently allocated logical & physical block
713 	 * in i_block_alloc_info, to assist find the proper goal block for next
714 	 * allocation
715 	 */
716 	if (block_i) {
717 		block_i->last_alloc_logical_block = block + blks - 1;
718 		block_i->last_alloc_physical_block =
719 				le32_to_cpu(where[num].key) + blks - 1;
720 	}
721 
722 	/* We are done with atomic stuff, now do the rest of housekeeping */
723 
724 	inode->i_ctime = ext4_current_time(inode);
725 	ext4_mark_inode_dirty(handle, inode);
726 
727 	/* had we spliced it onto indirect block? */
728 	if (where->bh) {
729 		/*
730 		 * If we spliced it onto an indirect block, we haven't
731 		 * altered the inode.  Note however that if it is being spliced
732 		 * onto an indirect block at the very end of the file (the
733 		 * file is growing) then we *will* alter the inode to reflect
734 		 * the new i_size.  But that is not done here - it is done in
735 		 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
736 		 */
737 		jbd_debug(5, "splicing indirect only\n");
738 		BUFFER_TRACE(where->bh, "call ext4_journal_dirty_metadata");
739 		err = ext4_journal_dirty_metadata(handle, where->bh);
740 		if (err)
741 			goto err_out;
742 	} else {
743 		/*
744 		 * OK, we spliced it into the inode itself on a direct block.
745 		 * Inode was dirtied above.
746 		 */
747 		jbd_debug(5, "splicing direct\n");
748 	}
749 	return err;
750 
751 err_out:
752 	for (i = 1; i <= num; i++) {
753 		BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget");
754 		ext4_journal_forget(handle, where[i].bh);
755 		ext4_free_blocks(handle, inode,
756 					le32_to_cpu(where[i-1].key), 1, 0);
757 	}
758 	ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0);
759 
760 	return err;
761 }
762 
763 /*
764  * Allocation strategy is simple: if we have to allocate something, we will
765  * have to go the whole way to leaf. So let's do it before attaching anything
766  * to tree, set linkage between the newborn blocks, write them if sync is
767  * required, recheck the path, free and repeat if check fails, otherwise
768  * set the last missing link (that will protect us from any truncate-generated
769  * removals - all blocks on the path are immune now) and possibly force the
770  * write on the parent block.
771  * That has a nice additional property: no special recovery from the failed
772  * allocations is needed - we simply release blocks and do not touch anything
773  * reachable from inode.
774  *
775  * `handle' can be NULL if create == 0.
776  *
777  * return > 0, # of blocks mapped or allocated.
778  * return = 0, if plain lookup failed.
779  * return < 0, error case.
780  *
781  *
782  * Need to be called with
783  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
784  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
785  */
786 int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
787 		ext4_lblk_t iblock, unsigned long maxblocks,
788 		struct buffer_head *bh_result,
789 		int create, int extend_disksize)
790 {
791 	int err = -EIO;
792 	ext4_lblk_t offsets[4];
793 	Indirect chain[4];
794 	Indirect *partial;
795 	ext4_fsblk_t goal;
796 	int indirect_blks;
797 	int blocks_to_boundary = 0;
798 	int depth;
799 	struct ext4_inode_info *ei = EXT4_I(inode);
800 	int count = 0;
801 	ext4_fsblk_t first_block = 0;
802 
803 
804 	J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
805 	J_ASSERT(handle != NULL || create == 0);
806 	depth = ext4_block_to_path(inode, iblock, offsets,
807 					&blocks_to_boundary);
808 
809 	if (depth == 0)
810 		goto out;
811 
812 	partial = ext4_get_branch(inode, depth, offsets, chain, &err);
813 
814 	/* Simplest case - block found, no allocation needed */
815 	if (!partial) {
816 		first_block = le32_to_cpu(chain[depth - 1].key);
817 		clear_buffer_new(bh_result);
818 		count++;
819 		/*map more blocks*/
820 		while (count < maxblocks && count <= blocks_to_boundary) {
821 			ext4_fsblk_t blk;
822 
823 			blk = le32_to_cpu(*(chain[depth-1].p + count));
824 
825 			if (blk == first_block + count)
826 				count++;
827 			else
828 				break;
829 		}
830 		goto got_it;
831 	}
832 
833 	/* Next simple case - plain lookup or failed read of indirect block */
834 	if (!create || err == -EIO)
835 		goto cleanup;
836 
837 	/*
838 	 * Okay, we need to do block allocation.  Lazily initialize the block
839 	 * allocation info here if necessary
840 	*/
841 	if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
842 		ext4_init_block_alloc_info(inode);
843 
844 	goal = ext4_find_goal(inode, iblock, partial);
845 
846 	/* the number of blocks need to allocate for [d,t]indirect blocks */
847 	indirect_blks = (chain + depth) - partial - 1;
848 
849 	/*
850 	 * Next look up the indirect map to count the totoal number of
851 	 * direct blocks to allocate for this branch.
852 	 */
853 	count = ext4_blks_to_allocate(partial, indirect_blks,
854 					maxblocks, blocks_to_boundary);
855 	/*
856 	 * Block out ext4_truncate while we alter the tree
857 	 */
858 	err = ext4_alloc_branch(handle, inode, indirect_blks, &count, goal,
859 				offsets + (partial - chain), partial);
860 
861 	/*
862 	 * The ext4_splice_branch call will free and forget any buffers
863 	 * on the new chain if there is a failure, but that risks using
864 	 * up transaction credits, especially for bitmaps where the
865 	 * credits cannot be returned.  Can we handle this somehow?  We
866 	 * may need to return -EAGAIN upwards in the worst case.  --sct
867 	 */
868 	if (!err)
869 		err = ext4_splice_branch(handle, inode, iblock,
870 					partial, indirect_blks, count);
871 	/*
872 	 * i_disksize growing is protected by i_data_sem.  Don't forget to
873 	 * protect it if you're about to implement concurrent
874 	 * ext4_get_block() -bzzz
875 	*/
876 	if (!err && extend_disksize && inode->i_size > ei->i_disksize)
877 		ei->i_disksize = inode->i_size;
878 	if (err)
879 		goto cleanup;
880 
881 	set_buffer_new(bh_result);
882 got_it:
883 	map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
884 	if (count > blocks_to_boundary)
885 		set_buffer_boundary(bh_result);
886 	err = count;
887 	/* Clean up and exit */
888 	partial = chain + depth - 1;	/* the whole chain */
889 cleanup:
890 	while (partial > chain) {
891 		BUFFER_TRACE(partial->bh, "call brelse");
892 		brelse(partial->bh);
893 		partial--;
894 	}
895 	BUFFER_TRACE(bh_result, "returned");
896 out:
897 	return err;
898 }
899 
900 /* Maximum number of blocks we map for direct IO at once. */
901 #define DIO_MAX_BLOCKS 4096
902 /*
903  * Number of credits we need for writing DIO_MAX_BLOCKS:
904  * We need sb + group descriptor + bitmap + inode -> 4
905  * For B blocks with A block pointers per block we need:
906  * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
907  * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
908  */
909 #define DIO_CREDITS 25
910 
911 
912 /*
913  *
914  *
915  * ext4_ext4 get_block() wrapper function
916  * It will do a look up first, and returns if the blocks already mapped.
917  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
918  * and store the allocated blocks in the result buffer head and mark it
919  * mapped.
920  *
921  * If file type is extents based, it will call ext4_ext_get_blocks(),
922  * Otherwise, call with ext4_get_blocks_handle() to handle indirect mapping
923  * based files
924  *
925  * On success, it returns the number of blocks being mapped or allocate.
926  * if create==0 and the blocks are pre-allocated and uninitialized block,
927  * the result buffer head is unmapped. If the create ==1, it will make sure
928  * the buffer head is mapped.
929  *
930  * It returns 0 if plain look up failed (blocks have not been allocated), in
931  * that casem, buffer head is unmapped
932  *
933  * It returns the error in case of allocation failure.
934  */
935 int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
936 			unsigned long max_blocks, struct buffer_head *bh,
937 			int create, int extend_disksize)
938 {
939 	int retval;
940 
941 	clear_buffer_mapped(bh);
942 
943 	/*
944 	 * Try to see if we can get  the block without requesting
945 	 * for new file system block.
946 	 */
947 	down_read((&EXT4_I(inode)->i_data_sem));
948 	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
949 		retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
950 				bh, 0, 0);
951 	} else {
952 		retval = ext4_get_blocks_handle(handle,
953 				inode, block, max_blocks, bh, 0, 0);
954 	}
955 	up_read((&EXT4_I(inode)->i_data_sem));
956 
957 	/* If it is only a block(s) look up */
958 	if (!create)
959 		return retval;
960 
961 	/*
962 	 * Returns if the blocks have already allocated
963 	 *
964 	 * Note that if blocks have been preallocated
965 	 * ext4_ext_get_block() returns th create = 0
966 	 * with buffer head unmapped.
967 	 */
968 	if (retval > 0 && buffer_mapped(bh))
969 		return retval;
970 
971 	/*
972 	 * New blocks allocate and/or writing to uninitialized extent
973 	 * will possibly result in updating i_data, so we take
974 	 * the write lock of i_data_sem, and call get_blocks()
975 	 * with create == 1 flag.
976 	 */
977 	down_write((&EXT4_I(inode)->i_data_sem));
978 	/*
979 	 * We need to check for EXT4 here because migrate
980 	 * could have changed the inode type in between
981 	 */
982 	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
983 		retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
984 				bh, create, extend_disksize);
985 	} else {
986 		retval = ext4_get_blocks_handle(handle, inode, block,
987 				max_blocks, bh, create, extend_disksize);
988 	}
989 	up_write((&EXT4_I(inode)->i_data_sem));
990 	return retval;
991 }
992 
993 static int ext4_get_block(struct inode *inode, sector_t iblock,
994 			struct buffer_head *bh_result, int create)
995 {
996 	handle_t *handle = ext4_journal_current_handle();
997 	int ret = 0, started = 0;
998 	unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
999 
1000 	if (create && !handle) {
1001 		/* Direct IO write... */
1002 		if (max_blocks > DIO_MAX_BLOCKS)
1003 			max_blocks = DIO_MAX_BLOCKS;
1004 		handle = ext4_journal_start(inode, DIO_CREDITS +
1005 			      2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb));
1006 		if (IS_ERR(handle)) {
1007 			ret = PTR_ERR(handle);
1008 			goto out;
1009 		}
1010 		started = 1;
1011 	}
1012 
1013 	ret = ext4_get_blocks_wrap(handle, inode, iblock,
1014 					max_blocks, bh_result, create, 0);
1015 	if (ret > 0) {
1016 		bh_result->b_size = (ret << inode->i_blkbits);
1017 		ret = 0;
1018 	}
1019 	if (started)
1020 		ext4_journal_stop(handle);
1021 out:
1022 	return ret;
1023 }
1024 
1025 /*
1026  * `handle' can be NULL if create is zero
1027  */
1028 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1029 				ext4_lblk_t block, int create, int *errp)
1030 {
1031 	struct buffer_head dummy;
1032 	int fatal = 0, err;
1033 
1034 	J_ASSERT(handle != NULL || create == 0);
1035 
1036 	dummy.b_state = 0;
1037 	dummy.b_blocknr = -1000;
1038 	buffer_trace_init(&dummy.b_history);
1039 	err = ext4_get_blocks_wrap(handle, inode, block, 1,
1040 					&dummy, create, 1);
1041 	/*
1042 	 * ext4_get_blocks_handle() returns number of blocks
1043 	 * mapped. 0 in case of a HOLE.
1044 	 */
1045 	if (err > 0) {
1046 		if (err > 1)
1047 			WARN_ON(1);
1048 		err = 0;
1049 	}
1050 	*errp = err;
1051 	if (!err && buffer_mapped(&dummy)) {
1052 		struct buffer_head *bh;
1053 		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1054 		if (!bh) {
1055 			*errp = -EIO;
1056 			goto err;
1057 		}
1058 		if (buffer_new(&dummy)) {
1059 			J_ASSERT(create != 0);
1060 			J_ASSERT(handle != NULL);
1061 
1062 			/*
1063 			 * Now that we do not always journal data, we should
1064 			 * keep in mind whether this should always journal the
1065 			 * new buffer as metadata.  For now, regular file
1066 			 * writes use ext4_get_block instead, so it's not a
1067 			 * problem.
1068 			 */
1069 			lock_buffer(bh);
1070 			BUFFER_TRACE(bh, "call get_create_access");
1071 			fatal = ext4_journal_get_create_access(handle, bh);
1072 			if (!fatal && !buffer_uptodate(bh)) {
1073 				memset(bh->b_data,0,inode->i_sb->s_blocksize);
1074 				set_buffer_uptodate(bh);
1075 			}
1076 			unlock_buffer(bh);
1077 			BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
1078 			err = ext4_journal_dirty_metadata(handle, bh);
1079 			if (!fatal)
1080 				fatal = err;
1081 		} else {
1082 			BUFFER_TRACE(bh, "not a new buffer");
1083 		}
1084 		if (fatal) {
1085 			*errp = fatal;
1086 			brelse(bh);
1087 			bh = NULL;
1088 		}
1089 		return bh;
1090 	}
1091 err:
1092 	return NULL;
1093 }
1094 
1095 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1096 			       ext4_lblk_t block, int create, int *err)
1097 {
1098 	struct buffer_head * bh;
1099 
1100 	bh = ext4_getblk(handle, inode, block, create, err);
1101 	if (!bh)
1102 		return bh;
1103 	if (buffer_uptodate(bh))
1104 		return bh;
1105 	ll_rw_block(READ_META, 1, &bh);
1106 	wait_on_buffer(bh);
1107 	if (buffer_uptodate(bh))
1108 		return bh;
1109 	put_bh(bh);
1110 	*err = -EIO;
1111 	return NULL;
1112 }
1113 
1114 static int walk_page_buffers(	handle_t *handle,
1115 				struct buffer_head *head,
1116 				unsigned from,
1117 				unsigned to,
1118 				int *partial,
1119 				int (*fn)(	handle_t *handle,
1120 						struct buffer_head *bh))
1121 {
1122 	struct buffer_head *bh;
1123 	unsigned block_start, block_end;
1124 	unsigned blocksize = head->b_size;
1125 	int err, ret = 0;
1126 	struct buffer_head *next;
1127 
1128 	for (	bh = head, block_start = 0;
1129 		ret == 0 && (bh != head || !block_start);
1130 		block_start = block_end, bh = next)
1131 	{
1132 		next = bh->b_this_page;
1133 		block_end = block_start + blocksize;
1134 		if (block_end <= from || block_start >= to) {
1135 			if (partial && !buffer_uptodate(bh))
1136 				*partial = 1;
1137 			continue;
1138 		}
1139 		err = (*fn)(handle, bh);
1140 		if (!ret)
1141 			ret = err;
1142 	}
1143 	return ret;
1144 }
1145 
1146 /*
1147  * To preserve ordering, it is essential that the hole instantiation and
1148  * the data write be encapsulated in a single transaction.  We cannot
1149  * close off a transaction and start a new one between the ext4_get_block()
1150  * and the commit_write().  So doing the jbd2_journal_start at the start of
1151  * prepare_write() is the right place.
1152  *
1153  * Also, this function can nest inside ext4_writepage() ->
1154  * block_write_full_page(). In that case, we *know* that ext4_writepage()
1155  * has generated enough buffer credits to do the whole page.  So we won't
1156  * block on the journal in that case, which is good, because the caller may
1157  * be PF_MEMALLOC.
1158  *
1159  * By accident, ext4 can be reentered when a transaction is open via
1160  * quota file writes.  If we were to commit the transaction while thus
1161  * reentered, there can be a deadlock - we would be holding a quota
1162  * lock, and the commit would never complete if another thread had a
1163  * transaction open and was blocking on the quota lock - a ranking
1164  * violation.
1165  *
1166  * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1167  * will _not_ run commit under these circumstances because handle->h_ref
1168  * is elevated.  We'll still have enough credits for the tiny quotafile
1169  * write.
1170  */
1171 static int do_journal_get_write_access(handle_t *handle,
1172 					struct buffer_head *bh)
1173 {
1174 	if (!buffer_mapped(bh) || buffer_freed(bh))
1175 		return 0;
1176 	return ext4_journal_get_write_access(handle, bh);
1177 }
1178 
1179 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1180 				loff_t pos, unsigned len, unsigned flags,
1181 				struct page **pagep, void **fsdata)
1182 {
1183  	struct inode *inode = mapping->host;
1184 	int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
1185 	handle_t *handle;
1186 	int retries = 0;
1187  	struct page *page;
1188  	pgoff_t index;
1189  	unsigned from, to;
1190 
1191  	index = pos >> PAGE_CACHE_SHIFT;
1192  	from = pos & (PAGE_CACHE_SIZE - 1);
1193  	to = from + len;
1194 
1195 retry:
1196  	page = __grab_cache_page(mapping, index);
1197  	if (!page)
1198  		return -ENOMEM;
1199  	*pagep = page;
1200 
1201   	handle = ext4_journal_start(inode, needed_blocks);
1202   	if (IS_ERR(handle)) {
1203  		unlock_page(page);
1204  		page_cache_release(page);
1205   		ret = PTR_ERR(handle);
1206   		goto out;
1207 	}
1208 
1209 	ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1210 							ext4_get_block);
1211 
1212 	if (!ret && ext4_should_journal_data(inode)) {
1213 		ret = walk_page_buffers(handle, page_buffers(page),
1214 				from, to, NULL, do_journal_get_write_access);
1215 	}
1216 
1217 	if (ret) {
1218 		ext4_journal_stop(handle);
1219  		unlock_page(page);
1220  		page_cache_release(page);
1221 	}
1222 
1223 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1224 		goto retry;
1225 out:
1226 	return ret;
1227 }
1228 
1229 int ext4_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1230 {
1231 	int err = jbd2_journal_dirty_data(handle, bh);
1232 	if (err)
1233 		ext4_journal_abort_handle(__FUNCTION__, __FUNCTION__,
1234 						bh, handle, err);
1235 	return err;
1236 }
1237 
1238 /* For write_end() in data=journal mode */
1239 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1240 {
1241 	if (!buffer_mapped(bh) || buffer_freed(bh))
1242 		return 0;
1243 	set_buffer_uptodate(bh);
1244 	return ext4_journal_dirty_metadata(handle, bh);
1245 }
1246 
1247 /*
1248  * Generic write_end handler for ordered and writeback ext4 journal modes.
1249  * We can't use generic_write_end, because that unlocks the page and we need to
1250  * unlock the page after ext4_journal_stop, but ext4_journal_stop must run
1251  * after block_write_end.
1252  */
1253 static int ext4_generic_write_end(struct file *file,
1254 				struct address_space *mapping,
1255 				loff_t pos, unsigned len, unsigned copied,
1256 				struct page *page, void *fsdata)
1257 {
1258 	struct inode *inode = file->f_mapping->host;
1259 
1260 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1261 
1262 	if (pos+copied > inode->i_size) {
1263 		i_size_write(inode, pos+copied);
1264 		mark_inode_dirty(inode);
1265 	}
1266 
1267 	return copied;
1268 }
1269 
1270 /*
1271  * We need to pick up the new inode size which generic_commit_write gave us
1272  * `file' can be NULL - eg, when called from page_symlink().
1273  *
1274  * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1275  * buffers are managed internally.
1276  */
1277 static int ext4_ordered_write_end(struct file *file,
1278 				struct address_space *mapping,
1279 				loff_t pos, unsigned len, unsigned copied,
1280 				struct page *page, void *fsdata)
1281 {
1282 	handle_t *handle = ext4_journal_current_handle();
1283 	struct inode *inode = file->f_mapping->host;
1284 	unsigned from, to;
1285 	int ret = 0, ret2;
1286 
1287 	from = pos & (PAGE_CACHE_SIZE - 1);
1288 	to = from + len;
1289 
1290 	ret = walk_page_buffers(handle, page_buffers(page),
1291 		from, to, NULL, ext4_journal_dirty_data);
1292 
1293 	if (ret == 0) {
1294 		/*
1295 		 * generic_write_end() will run mark_inode_dirty() if i_size
1296 		 * changes.  So let's piggyback the i_disksize mark_inode_dirty
1297 		 * into that.
1298 		 */
1299 		loff_t new_i_size;
1300 
1301 		new_i_size = pos + copied;
1302 		if (new_i_size > EXT4_I(inode)->i_disksize)
1303 			EXT4_I(inode)->i_disksize = new_i_size;
1304 		copied = ext4_generic_write_end(file, mapping, pos, len, copied,
1305 							page, fsdata);
1306 		if (copied < 0)
1307 			ret = copied;
1308 	}
1309 	ret2 = ext4_journal_stop(handle);
1310 	if (!ret)
1311 		ret = ret2;
1312 	unlock_page(page);
1313 	page_cache_release(page);
1314 
1315 	return ret ? ret : copied;
1316 }
1317 
1318 static int ext4_writeback_write_end(struct file *file,
1319 				struct address_space *mapping,
1320 				loff_t pos, unsigned len, unsigned copied,
1321 				struct page *page, void *fsdata)
1322 {
1323 	handle_t *handle = ext4_journal_current_handle();
1324 	struct inode *inode = file->f_mapping->host;
1325 	int ret = 0, ret2;
1326 	loff_t new_i_size;
1327 
1328 	new_i_size = pos + copied;
1329 	if (new_i_size > EXT4_I(inode)->i_disksize)
1330 		EXT4_I(inode)->i_disksize = new_i_size;
1331 
1332 	copied = ext4_generic_write_end(file, mapping, pos, len, copied,
1333 							page, fsdata);
1334 	if (copied < 0)
1335 		ret = copied;
1336 
1337 	ret2 = ext4_journal_stop(handle);
1338 	if (!ret)
1339 		ret = ret2;
1340 	unlock_page(page);
1341 	page_cache_release(page);
1342 
1343 	return ret ? ret : copied;
1344 }
1345 
1346 static int ext4_journalled_write_end(struct file *file,
1347 				struct address_space *mapping,
1348 				loff_t pos, unsigned len, unsigned copied,
1349 				struct page *page, void *fsdata)
1350 {
1351 	handle_t *handle = ext4_journal_current_handle();
1352 	struct inode *inode = mapping->host;
1353 	int ret = 0, ret2;
1354 	int partial = 0;
1355 	unsigned from, to;
1356 
1357 	from = pos & (PAGE_CACHE_SIZE - 1);
1358 	to = from + len;
1359 
1360 	if (copied < len) {
1361 		if (!PageUptodate(page))
1362 			copied = 0;
1363 		page_zero_new_buffers(page, from+copied, to);
1364 	}
1365 
1366 	ret = walk_page_buffers(handle, page_buffers(page), from,
1367 				to, &partial, write_end_fn);
1368 	if (!partial)
1369 		SetPageUptodate(page);
1370 	if (pos+copied > inode->i_size)
1371 		i_size_write(inode, pos+copied);
1372 	EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1373 	if (inode->i_size > EXT4_I(inode)->i_disksize) {
1374 		EXT4_I(inode)->i_disksize = inode->i_size;
1375 		ret2 = ext4_mark_inode_dirty(handle, inode);
1376 		if (!ret)
1377 			ret = ret2;
1378 	}
1379 
1380 	ret2 = ext4_journal_stop(handle);
1381 	if (!ret)
1382 		ret = ret2;
1383 	unlock_page(page);
1384 	page_cache_release(page);
1385 
1386 	return ret ? ret : copied;
1387 }
1388 
1389 /*
1390  * bmap() is special.  It gets used by applications such as lilo and by
1391  * the swapper to find the on-disk block of a specific piece of data.
1392  *
1393  * Naturally, this is dangerous if the block concerned is still in the
1394  * journal.  If somebody makes a swapfile on an ext4 data-journaling
1395  * filesystem and enables swap, then they may get a nasty shock when the
1396  * data getting swapped to that swapfile suddenly gets overwritten by
1397  * the original zero's written out previously to the journal and
1398  * awaiting writeback in the kernel's buffer cache.
1399  *
1400  * So, if we see any bmap calls here on a modified, data-journaled file,
1401  * take extra steps to flush any blocks which might be in the cache.
1402  */
1403 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
1404 {
1405 	struct inode *inode = mapping->host;
1406 	journal_t *journal;
1407 	int err;
1408 
1409 	if (EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
1410 		/*
1411 		 * This is a REALLY heavyweight approach, but the use of
1412 		 * bmap on dirty files is expected to be extremely rare:
1413 		 * only if we run lilo or swapon on a freshly made file
1414 		 * do we expect this to happen.
1415 		 *
1416 		 * (bmap requires CAP_SYS_RAWIO so this does not
1417 		 * represent an unprivileged user DOS attack --- we'd be
1418 		 * in trouble if mortal users could trigger this path at
1419 		 * will.)
1420 		 *
1421 		 * NB. EXT4_STATE_JDATA is not set on files other than
1422 		 * regular files.  If somebody wants to bmap a directory
1423 		 * or symlink and gets confused because the buffer
1424 		 * hasn't yet been flushed to disk, they deserve
1425 		 * everything they get.
1426 		 */
1427 
1428 		EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
1429 		journal = EXT4_JOURNAL(inode);
1430 		jbd2_journal_lock_updates(journal);
1431 		err = jbd2_journal_flush(journal);
1432 		jbd2_journal_unlock_updates(journal);
1433 
1434 		if (err)
1435 			return 0;
1436 	}
1437 
1438 	return generic_block_bmap(mapping,block,ext4_get_block);
1439 }
1440 
1441 static int bget_one(handle_t *handle, struct buffer_head *bh)
1442 {
1443 	get_bh(bh);
1444 	return 0;
1445 }
1446 
1447 static int bput_one(handle_t *handle, struct buffer_head *bh)
1448 {
1449 	put_bh(bh);
1450 	return 0;
1451 }
1452 
1453 static int jbd2_journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1454 {
1455 	if (buffer_mapped(bh))
1456 		return ext4_journal_dirty_data(handle, bh);
1457 	return 0;
1458 }
1459 
1460 /*
1461  * Note that we always start a transaction even if we're not journalling
1462  * data.  This is to preserve ordering: any hole instantiation within
1463  * __block_write_full_page -> ext4_get_block() should be journalled
1464  * along with the data so we don't crash and then get metadata which
1465  * refers to old data.
1466  *
1467  * In all journalling modes block_write_full_page() will start the I/O.
1468  *
1469  * Problem:
1470  *
1471  *	ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1472  *		ext4_writepage()
1473  *
1474  * Similar for:
1475  *
1476  *	ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1477  *
1478  * Same applies to ext4_get_block().  We will deadlock on various things like
1479  * lock_journal and i_data_sem
1480  *
1481  * Setting PF_MEMALLOC here doesn't work - too many internal memory
1482  * allocations fail.
1483  *
1484  * 16May01: If we're reentered then journal_current_handle() will be
1485  *	    non-zero. We simply *return*.
1486  *
1487  * 1 July 2001: @@@ FIXME:
1488  *   In journalled data mode, a data buffer may be metadata against the
1489  *   current transaction.  But the same file is part of a shared mapping
1490  *   and someone does a writepage() on it.
1491  *
1492  *   We will move the buffer onto the async_data list, but *after* it has
1493  *   been dirtied. So there's a small window where we have dirty data on
1494  *   BJ_Metadata.
1495  *
1496  *   Note that this only applies to the last partial page in the file.  The
1497  *   bit which block_write_full_page() uses prepare/commit for.  (That's
1498  *   broken code anyway: it's wrong for msync()).
1499  *
1500  *   It's a rare case: affects the final partial page, for journalled data
1501  *   where the file is subject to bith write() and writepage() in the same
1502  *   transction.  To fix it we'll need a custom block_write_full_page().
1503  *   We'll probably need that anyway for journalling writepage() output.
1504  *
1505  * We don't honour synchronous mounts for writepage().  That would be
1506  * disastrous.  Any write() or metadata operation will sync the fs for
1507  * us.
1508  *
1509  * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
1510  * we don't need to open a transaction here.
1511  */
1512 static int ext4_ordered_writepage(struct page *page,
1513 				struct writeback_control *wbc)
1514 {
1515 	struct inode *inode = page->mapping->host;
1516 	struct buffer_head *page_bufs;
1517 	handle_t *handle = NULL;
1518 	int ret = 0;
1519 	int err;
1520 
1521 	J_ASSERT(PageLocked(page));
1522 
1523 	/*
1524 	 * We give up here if we're reentered, because it might be for a
1525 	 * different filesystem.
1526 	 */
1527 	if (ext4_journal_current_handle())
1528 		goto out_fail;
1529 
1530 	handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1531 
1532 	if (IS_ERR(handle)) {
1533 		ret = PTR_ERR(handle);
1534 		goto out_fail;
1535 	}
1536 
1537 	if (!page_has_buffers(page)) {
1538 		create_empty_buffers(page, inode->i_sb->s_blocksize,
1539 				(1 << BH_Dirty)|(1 << BH_Uptodate));
1540 	}
1541 	page_bufs = page_buffers(page);
1542 	walk_page_buffers(handle, page_bufs, 0,
1543 			PAGE_CACHE_SIZE, NULL, bget_one);
1544 
1545 	ret = block_write_full_page(page, ext4_get_block, wbc);
1546 
1547 	/*
1548 	 * The page can become unlocked at any point now, and
1549 	 * truncate can then come in and change things.  So we
1550 	 * can't touch *page from now on.  But *page_bufs is
1551 	 * safe due to elevated refcount.
1552 	 */
1553 
1554 	/*
1555 	 * And attach them to the current transaction.  But only if
1556 	 * block_write_full_page() succeeded.  Otherwise they are unmapped,
1557 	 * and generally junk.
1558 	 */
1559 	if (ret == 0) {
1560 		err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1561 					NULL, jbd2_journal_dirty_data_fn);
1562 		if (!ret)
1563 			ret = err;
1564 	}
1565 	walk_page_buffers(handle, page_bufs, 0,
1566 			PAGE_CACHE_SIZE, NULL, bput_one);
1567 	err = ext4_journal_stop(handle);
1568 	if (!ret)
1569 		ret = err;
1570 	return ret;
1571 
1572 out_fail:
1573 	redirty_page_for_writepage(wbc, page);
1574 	unlock_page(page);
1575 	return ret;
1576 }
1577 
1578 static int ext4_writeback_writepage(struct page *page,
1579 				struct writeback_control *wbc)
1580 {
1581 	struct inode *inode = page->mapping->host;
1582 	handle_t *handle = NULL;
1583 	int ret = 0;
1584 	int err;
1585 
1586 	if (ext4_journal_current_handle())
1587 		goto out_fail;
1588 
1589 	handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1590 	if (IS_ERR(handle)) {
1591 		ret = PTR_ERR(handle);
1592 		goto out_fail;
1593 	}
1594 
1595 	if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
1596 		ret = nobh_writepage(page, ext4_get_block, wbc);
1597 	else
1598 		ret = block_write_full_page(page, ext4_get_block, wbc);
1599 
1600 	err = ext4_journal_stop(handle);
1601 	if (!ret)
1602 		ret = err;
1603 	return ret;
1604 
1605 out_fail:
1606 	redirty_page_for_writepage(wbc, page);
1607 	unlock_page(page);
1608 	return ret;
1609 }
1610 
1611 static int ext4_journalled_writepage(struct page *page,
1612 				struct writeback_control *wbc)
1613 {
1614 	struct inode *inode = page->mapping->host;
1615 	handle_t *handle = NULL;
1616 	int ret = 0;
1617 	int err;
1618 
1619 	if (ext4_journal_current_handle())
1620 		goto no_write;
1621 
1622 	handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1623 	if (IS_ERR(handle)) {
1624 		ret = PTR_ERR(handle);
1625 		goto no_write;
1626 	}
1627 
1628 	if (!page_has_buffers(page) || PageChecked(page)) {
1629 		/*
1630 		 * It's mmapped pagecache.  Add buffers and journal it.  There
1631 		 * doesn't seem much point in redirtying the page here.
1632 		 */
1633 		ClearPageChecked(page);
1634 		ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
1635 					ext4_get_block);
1636 		if (ret != 0) {
1637 			ext4_journal_stop(handle);
1638 			goto out_unlock;
1639 		}
1640 		ret = walk_page_buffers(handle, page_buffers(page), 0,
1641 			PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1642 
1643 		err = walk_page_buffers(handle, page_buffers(page), 0,
1644 				PAGE_CACHE_SIZE, NULL, write_end_fn);
1645 		if (ret == 0)
1646 			ret = err;
1647 		EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1648 		unlock_page(page);
1649 	} else {
1650 		/*
1651 		 * It may be a page full of checkpoint-mode buffers.  We don't
1652 		 * really know unless we go poke around in the buffer_heads.
1653 		 * But block_write_full_page will do the right thing.
1654 		 */
1655 		ret = block_write_full_page(page, ext4_get_block, wbc);
1656 	}
1657 	err = ext4_journal_stop(handle);
1658 	if (!ret)
1659 		ret = err;
1660 out:
1661 	return ret;
1662 
1663 no_write:
1664 	redirty_page_for_writepage(wbc, page);
1665 out_unlock:
1666 	unlock_page(page);
1667 	goto out;
1668 }
1669 
1670 static int ext4_readpage(struct file *file, struct page *page)
1671 {
1672 	return mpage_readpage(page, ext4_get_block);
1673 }
1674 
1675 static int
1676 ext4_readpages(struct file *file, struct address_space *mapping,
1677 		struct list_head *pages, unsigned nr_pages)
1678 {
1679 	return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
1680 }
1681 
1682 static void ext4_invalidatepage(struct page *page, unsigned long offset)
1683 {
1684 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
1685 
1686 	/*
1687 	 * If it's a full truncate we just forget about the pending dirtying
1688 	 */
1689 	if (offset == 0)
1690 		ClearPageChecked(page);
1691 
1692 	jbd2_journal_invalidatepage(journal, page, offset);
1693 }
1694 
1695 static int ext4_releasepage(struct page *page, gfp_t wait)
1696 {
1697 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
1698 
1699 	WARN_ON(PageChecked(page));
1700 	if (!page_has_buffers(page))
1701 		return 0;
1702 	return jbd2_journal_try_to_free_buffers(journal, page, wait);
1703 }
1704 
1705 /*
1706  * If the O_DIRECT write will extend the file then add this inode to the
1707  * orphan list.  So recovery will truncate it back to the original size
1708  * if the machine crashes during the write.
1709  *
1710  * If the O_DIRECT write is intantiating holes inside i_size and the machine
1711  * crashes then stale disk data _may_ be exposed inside the file. But current
1712  * VFS code falls back into buffered path in that case so we are safe.
1713  */
1714 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
1715 			const struct iovec *iov, loff_t offset,
1716 			unsigned long nr_segs)
1717 {
1718 	struct file *file = iocb->ki_filp;
1719 	struct inode *inode = file->f_mapping->host;
1720 	struct ext4_inode_info *ei = EXT4_I(inode);
1721 	handle_t *handle;
1722 	ssize_t ret;
1723 	int orphan = 0;
1724 	size_t count = iov_length(iov, nr_segs);
1725 
1726 	if (rw == WRITE) {
1727 		loff_t final_size = offset + count;
1728 
1729 		if (final_size > inode->i_size) {
1730 			/* Credits for sb + inode write */
1731 			handle = ext4_journal_start(inode, 2);
1732 			if (IS_ERR(handle)) {
1733 				ret = PTR_ERR(handle);
1734 				goto out;
1735 			}
1736 			ret = ext4_orphan_add(handle, inode);
1737 			if (ret) {
1738 				ext4_journal_stop(handle);
1739 				goto out;
1740 			}
1741 			orphan = 1;
1742 			ei->i_disksize = inode->i_size;
1743 			ext4_journal_stop(handle);
1744 		}
1745 	}
1746 
1747 	ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1748 				 offset, nr_segs,
1749 				 ext4_get_block, NULL);
1750 
1751 	if (orphan) {
1752 		int err;
1753 
1754 		/* Credits for sb + inode write */
1755 		handle = ext4_journal_start(inode, 2);
1756 		if (IS_ERR(handle)) {
1757 			/* This is really bad luck. We've written the data
1758 			 * but cannot extend i_size. Bail out and pretend
1759 			 * the write failed... */
1760 			ret = PTR_ERR(handle);
1761 			goto out;
1762 		}
1763 		if (inode->i_nlink)
1764 			ext4_orphan_del(handle, inode);
1765 		if (ret > 0) {
1766 			loff_t end = offset + ret;
1767 			if (end > inode->i_size) {
1768 				ei->i_disksize = end;
1769 				i_size_write(inode, end);
1770 				/*
1771 				 * We're going to return a positive `ret'
1772 				 * here due to non-zero-length I/O, so there's
1773 				 * no way of reporting error returns from
1774 				 * ext4_mark_inode_dirty() to userspace.  So
1775 				 * ignore it.
1776 				 */
1777 				ext4_mark_inode_dirty(handle, inode);
1778 			}
1779 		}
1780 		err = ext4_journal_stop(handle);
1781 		if (ret == 0)
1782 			ret = err;
1783 	}
1784 out:
1785 	return ret;
1786 }
1787 
1788 /*
1789  * Pages can be marked dirty completely asynchronously from ext4's journalling
1790  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
1791  * much here because ->set_page_dirty is called under VFS locks.  The page is
1792  * not necessarily locked.
1793  *
1794  * We cannot just dirty the page and leave attached buffers clean, because the
1795  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
1796  * or jbddirty because all the journalling code will explode.
1797  *
1798  * So what we do is to mark the page "pending dirty" and next time writepage
1799  * is called, propagate that into the buffers appropriately.
1800  */
1801 static int ext4_journalled_set_page_dirty(struct page *page)
1802 {
1803 	SetPageChecked(page);
1804 	return __set_page_dirty_nobuffers(page);
1805 }
1806 
1807 static const struct address_space_operations ext4_ordered_aops = {
1808 	.readpage	= ext4_readpage,
1809 	.readpages	= ext4_readpages,
1810 	.writepage	= ext4_ordered_writepage,
1811 	.sync_page	= block_sync_page,
1812 	.write_begin	= ext4_write_begin,
1813 	.write_end	= ext4_ordered_write_end,
1814 	.bmap		= ext4_bmap,
1815 	.invalidatepage	= ext4_invalidatepage,
1816 	.releasepage	= ext4_releasepage,
1817 	.direct_IO	= ext4_direct_IO,
1818 	.migratepage	= buffer_migrate_page,
1819 };
1820 
1821 static const struct address_space_operations ext4_writeback_aops = {
1822 	.readpage	= ext4_readpage,
1823 	.readpages	= ext4_readpages,
1824 	.writepage	= ext4_writeback_writepage,
1825 	.sync_page	= block_sync_page,
1826 	.write_begin	= ext4_write_begin,
1827 	.write_end	= ext4_writeback_write_end,
1828 	.bmap		= ext4_bmap,
1829 	.invalidatepage	= ext4_invalidatepage,
1830 	.releasepage	= ext4_releasepage,
1831 	.direct_IO	= ext4_direct_IO,
1832 	.migratepage	= buffer_migrate_page,
1833 };
1834 
1835 static const struct address_space_operations ext4_journalled_aops = {
1836 	.readpage	= ext4_readpage,
1837 	.readpages	= ext4_readpages,
1838 	.writepage	= ext4_journalled_writepage,
1839 	.sync_page	= block_sync_page,
1840 	.write_begin	= ext4_write_begin,
1841 	.write_end	= ext4_journalled_write_end,
1842 	.set_page_dirty	= ext4_journalled_set_page_dirty,
1843 	.bmap		= ext4_bmap,
1844 	.invalidatepage	= ext4_invalidatepage,
1845 	.releasepage	= ext4_releasepage,
1846 };
1847 
1848 void ext4_set_aops(struct inode *inode)
1849 {
1850 	if (ext4_should_order_data(inode))
1851 		inode->i_mapping->a_ops = &ext4_ordered_aops;
1852 	else if (ext4_should_writeback_data(inode))
1853 		inode->i_mapping->a_ops = &ext4_writeback_aops;
1854 	else
1855 		inode->i_mapping->a_ops = &ext4_journalled_aops;
1856 }
1857 
1858 /*
1859  * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
1860  * up to the end of the block which corresponds to `from'.
1861  * This required during truncate. We need to physically zero the tail end
1862  * of that block so it doesn't yield old data if the file is later grown.
1863  */
1864 int ext4_block_truncate_page(handle_t *handle, struct page *page,
1865 		struct address_space *mapping, loff_t from)
1866 {
1867 	ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
1868 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
1869 	unsigned blocksize, length, pos;
1870 	ext4_lblk_t iblock;
1871 	struct inode *inode = mapping->host;
1872 	struct buffer_head *bh;
1873 	int err = 0;
1874 
1875 	blocksize = inode->i_sb->s_blocksize;
1876 	length = blocksize - (offset & (blocksize - 1));
1877 	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1878 
1879 	/*
1880 	 * For "nobh" option,  we can only work if we don't need to
1881 	 * read-in the page - otherwise we create buffers to do the IO.
1882 	 */
1883 	if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
1884 	     ext4_should_writeback_data(inode) && PageUptodate(page)) {
1885 		zero_user(page, offset, length);
1886 		set_page_dirty(page);
1887 		goto unlock;
1888 	}
1889 
1890 	if (!page_has_buffers(page))
1891 		create_empty_buffers(page, blocksize, 0);
1892 
1893 	/* Find the buffer that contains "offset" */
1894 	bh = page_buffers(page);
1895 	pos = blocksize;
1896 	while (offset >= pos) {
1897 		bh = bh->b_this_page;
1898 		iblock++;
1899 		pos += blocksize;
1900 	}
1901 
1902 	err = 0;
1903 	if (buffer_freed(bh)) {
1904 		BUFFER_TRACE(bh, "freed: skip");
1905 		goto unlock;
1906 	}
1907 
1908 	if (!buffer_mapped(bh)) {
1909 		BUFFER_TRACE(bh, "unmapped");
1910 		ext4_get_block(inode, iblock, bh, 0);
1911 		/* unmapped? It's a hole - nothing to do */
1912 		if (!buffer_mapped(bh)) {
1913 			BUFFER_TRACE(bh, "still unmapped");
1914 			goto unlock;
1915 		}
1916 	}
1917 
1918 	/* Ok, it's mapped. Make sure it's up-to-date */
1919 	if (PageUptodate(page))
1920 		set_buffer_uptodate(bh);
1921 
1922 	if (!buffer_uptodate(bh)) {
1923 		err = -EIO;
1924 		ll_rw_block(READ, 1, &bh);
1925 		wait_on_buffer(bh);
1926 		/* Uhhuh. Read error. Complain and punt. */
1927 		if (!buffer_uptodate(bh))
1928 			goto unlock;
1929 	}
1930 
1931 	if (ext4_should_journal_data(inode)) {
1932 		BUFFER_TRACE(bh, "get write access");
1933 		err = ext4_journal_get_write_access(handle, bh);
1934 		if (err)
1935 			goto unlock;
1936 	}
1937 
1938 	zero_user(page, offset, length);
1939 
1940 	BUFFER_TRACE(bh, "zeroed end of block");
1941 
1942 	err = 0;
1943 	if (ext4_should_journal_data(inode)) {
1944 		err = ext4_journal_dirty_metadata(handle, bh);
1945 	} else {
1946 		if (ext4_should_order_data(inode))
1947 			err = ext4_journal_dirty_data(handle, bh);
1948 		mark_buffer_dirty(bh);
1949 	}
1950 
1951 unlock:
1952 	unlock_page(page);
1953 	page_cache_release(page);
1954 	return err;
1955 }
1956 
1957 /*
1958  * Probably it should be a library function... search for first non-zero word
1959  * or memcmp with zero_page, whatever is better for particular architecture.
1960  * Linus?
1961  */
1962 static inline int all_zeroes(__le32 *p, __le32 *q)
1963 {
1964 	while (p < q)
1965 		if (*p++)
1966 			return 0;
1967 	return 1;
1968 }
1969 
1970 /**
1971  *	ext4_find_shared - find the indirect blocks for partial truncation.
1972  *	@inode:	  inode in question
1973  *	@depth:	  depth of the affected branch
1974  *	@offsets: offsets of pointers in that branch (see ext4_block_to_path)
1975  *	@chain:	  place to store the pointers to partial indirect blocks
1976  *	@top:	  place to the (detached) top of branch
1977  *
1978  *	This is a helper function used by ext4_truncate().
1979  *
1980  *	When we do truncate() we may have to clean the ends of several
1981  *	indirect blocks but leave the blocks themselves alive. Block is
1982  *	partially truncated if some data below the new i_size is refered
1983  *	from it (and it is on the path to the first completely truncated
1984  *	data block, indeed).  We have to free the top of that path along
1985  *	with everything to the right of the path. Since no allocation
1986  *	past the truncation point is possible until ext4_truncate()
1987  *	finishes, we may safely do the latter, but top of branch may
1988  *	require special attention - pageout below the truncation point
1989  *	might try to populate it.
1990  *
1991  *	We atomically detach the top of branch from the tree, store the
1992  *	block number of its root in *@top, pointers to buffer_heads of
1993  *	partially truncated blocks - in @chain[].bh and pointers to
1994  *	their last elements that should not be removed - in
1995  *	@chain[].p. Return value is the pointer to last filled element
1996  *	of @chain.
1997  *
1998  *	The work left to caller to do the actual freeing of subtrees:
1999  *		a) free the subtree starting from *@top
2000  *		b) free the subtrees whose roots are stored in
2001  *			(@chain[i].p+1 .. end of @chain[i].bh->b_data)
2002  *		c) free the subtrees growing from the inode past the @chain[0].
2003  *			(no partially truncated stuff there).  */
2004 
2005 static Indirect *ext4_find_shared(struct inode *inode, int depth,
2006 			ext4_lblk_t offsets[4], Indirect chain[4], __le32 *top)
2007 {
2008 	Indirect *partial, *p;
2009 	int k, err;
2010 
2011 	*top = 0;
2012 	/* Make k index the deepest non-null offest + 1 */
2013 	for (k = depth; k > 1 && !offsets[k-1]; k--)
2014 		;
2015 	partial = ext4_get_branch(inode, k, offsets, chain, &err);
2016 	/* Writer: pointers */
2017 	if (!partial)
2018 		partial = chain + k-1;
2019 	/*
2020 	 * If the branch acquired continuation since we've looked at it -
2021 	 * fine, it should all survive and (new) top doesn't belong to us.
2022 	 */
2023 	if (!partial->key && *partial->p)
2024 		/* Writer: end */
2025 		goto no_top;
2026 	for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
2027 		;
2028 	/*
2029 	 * OK, we've found the last block that must survive. The rest of our
2030 	 * branch should be detached before unlocking. However, if that rest
2031 	 * of branch is all ours and does not grow immediately from the inode
2032 	 * it's easier to cheat and just decrement partial->p.
2033 	 */
2034 	if (p == chain + k - 1 && p > chain) {
2035 		p->p--;
2036 	} else {
2037 		*top = *p->p;
2038 		/* Nope, don't do this in ext4.  Must leave the tree intact */
2039 #if 0
2040 		*p->p = 0;
2041 #endif
2042 	}
2043 	/* Writer: end */
2044 
2045 	while(partial > p) {
2046 		brelse(partial->bh);
2047 		partial--;
2048 	}
2049 no_top:
2050 	return partial;
2051 }
2052 
2053 /*
2054  * Zero a number of block pointers in either an inode or an indirect block.
2055  * If we restart the transaction we must again get write access to the
2056  * indirect block for further modification.
2057  *
2058  * We release `count' blocks on disk, but (last - first) may be greater
2059  * than `count' because there can be holes in there.
2060  */
2061 static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
2062 		struct buffer_head *bh, ext4_fsblk_t block_to_free,
2063 		unsigned long count, __le32 *first, __le32 *last)
2064 {
2065 	__le32 *p;
2066 	if (try_to_extend_transaction(handle, inode)) {
2067 		if (bh) {
2068 			BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
2069 			ext4_journal_dirty_metadata(handle, bh);
2070 		}
2071 		ext4_mark_inode_dirty(handle, inode);
2072 		ext4_journal_test_restart(handle, inode);
2073 		if (bh) {
2074 			BUFFER_TRACE(bh, "retaking write access");
2075 			ext4_journal_get_write_access(handle, bh);
2076 		}
2077 	}
2078 
2079 	/*
2080 	 * Any buffers which are on the journal will be in memory. We find
2081 	 * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget()
2082 	 * on them.  We've already detached each block from the file, so
2083 	 * bforget() in jbd2_journal_forget() should be safe.
2084 	 *
2085 	 * AKPM: turn on bforget in jbd2_journal_forget()!!!
2086 	 */
2087 	for (p = first; p < last; p++) {
2088 		u32 nr = le32_to_cpu(*p);
2089 		if (nr) {
2090 			struct buffer_head *tbh;
2091 
2092 			*p = 0;
2093 			tbh = sb_find_get_block(inode->i_sb, nr);
2094 			ext4_forget(handle, 0, inode, tbh, nr);
2095 		}
2096 	}
2097 
2098 	ext4_free_blocks(handle, inode, block_to_free, count, 0);
2099 }
2100 
2101 /**
2102  * ext4_free_data - free a list of data blocks
2103  * @handle:	handle for this transaction
2104  * @inode:	inode we are dealing with
2105  * @this_bh:	indirect buffer_head which contains *@first and *@last
2106  * @first:	array of block numbers
2107  * @last:	points immediately past the end of array
2108  *
2109  * We are freeing all blocks refered from that array (numbers are stored as
2110  * little-endian 32-bit) and updating @inode->i_blocks appropriately.
2111  *
2112  * We accumulate contiguous runs of blocks to free.  Conveniently, if these
2113  * blocks are contiguous then releasing them at one time will only affect one
2114  * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
2115  * actually use a lot of journal space.
2116  *
2117  * @this_bh will be %NULL if @first and @last point into the inode's direct
2118  * block pointers.
2119  */
2120 static void ext4_free_data(handle_t *handle, struct inode *inode,
2121 			   struct buffer_head *this_bh,
2122 			   __le32 *first, __le32 *last)
2123 {
2124 	ext4_fsblk_t block_to_free = 0;    /* Starting block # of a run */
2125 	unsigned long count = 0;	    /* Number of blocks in the run */
2126 	__le32 *block_to_free_p = NULL;	    /* Pointer into inode/ind
2127 					       corresponding to
2128 					       block_to_free */
2129 	ext4_fsblk_t nr;		    /* Current block # */
2130 	__le32 *p;			    /* Pointer into inode/ind
2131 					       for current block */
2132 	int err;
2133 
2134 	if (this_bh) {				/* For indirect block */
2135 		BUFFER_TRACE(this_bh, "get_write_access");
2136 		err = ext4_journal_get_write_access(handle, this_bh);
2137 		/* Important: if we can't update the indirect pointers
2138 		 * to the blocks, we can't free them. */
2139 		if (err)
2140 			return;
2141 	}
2142 
2143 	for (p = first; p < last; p++) {
2144 		nr = le32_to_cpu(*p);
2145 		if (nr) {
2146 			/* accumulate blocks to free if they're contiguous */
2147 			if (count == 0) {
2148 				block_to_free = nr;
2149 				block_to_free_p = p;
2150 				count = 1;
2151 			} else if (nr == block_to_free + count) {
2152 				count++;
2153 			} else {
2154 				ext4_clear_blocks(handle, inode, this_bh,
2155 						  block_to_free,
2156 						  count, block_to_free_p, p);
2157 				block_to_free = nr;
2158 				block_to_free_p = p;
2159 				count = 1;
2160 			}
2161 		}
2162 	}
2163 
2164 	if (count > 0)
2165 		ext4_clear_blocks(handle, inode, this_bh, block_to_free,
2166 				  count, block_to_free_p, p);
2167 
2168 	if (this_bh) {
2169 		BUFFER_TRACE(this_bh, "call ext4_journal_dirty_metadata");
2170 		ext4_journal_dirty_metadata(handle, this_bh);
2171 	}
2172 }
2173 
2174 /**
2175  *	ext4_free_branches - free an array of branches
2176  *	@handle: JBD handle for this transaction
2177  *	@inode:	inode we are dealing with
2178  *	@parent_bh: the buffer_head which contains *@first and *@last
2179  *	@first:	array of block numbers
2180  *	@last:	pointer immediately past the end of array
2181  *	@depth:	depth of the branches to free
2182  *
2183  *	We are freeing all blocks refered from these branches (numbers are
2184  *	stored as little-endian 32-bit) and updating @inode->i_blocks
2185  *	appropriately.
2186  */
2187 static void ext4_free_branches(handle_t *handle, struct inode *inode,
2188 			       struct buffer_head *parent_bh,
2189 			       __le32 *first, __le32 *last, int depth)
2190 {
2191 	ext4_fsblk_t nr;
2192 	__le32 *p;
2193 
2194 	if (is_handle_aborted(handle))
2195 		return;
2196 
2197 	if (depth--) {
2198 		struct buffer_head *bh;
2199 		int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
2200 		p = last;
2201 		while (--p >= first) {
2202 			nr = le32_to_cpu(*p);
2203 			if (!nr)
2204 				continue;		/* A hole */
2205 
2206 			/* Go read the buffer for the next level down */
2207 			bh = sb_bread(inode->i_sb, nr);
2208 
2209 			/*
2210 			 * A read failure? Report error and clear slot
2211 			 * (should be rare).
2212 			 */
2213 			if (!bh) {
2214 				ext4_error(inode->i_sb, "ext4_free_branches",
2215 					   "Read failure, inode=%lu, block=%llu",
2216 					   inode->i_ino, nr);
2217 				continue;
2218 			}
2219 
2220 			/* This zaps the entire block.  Bottom up. */
2221 			BUFFER_TRACE(bh, "free child branches");
2222 			ext4_free_branches(handle, inode, bh,
2223 					   (__le32*)bh->b_data,
2224 					   (__le32*)bh->b_data + addr_per_block,
2225 					   depth);
2226 
2227 			/*
2228 			 * We've probably journalled the indirect block several
2229 			 * times during the truncate.  But it's no longer
2230 			 * needed and we now drop it from the transaction via
2231 			 * jbd2_journal_revoke().
2232 			 *
2233 			 * That's easy if it's exclusively part of this
2234 			 * transaction.  But if it's part of the committing
2235 			 * transaction then jbd2_journal_forget() will simply
2236 			 * brelse() it.  That means that if the underlying
2237 			 * block is reallocated in ext4_get_block(),
2238 			 * unmap_underlying_metadata() will find this block
2239 			 * and will try to get rid of it.  damn, damn.
2240 			 *
2241 			 * If this block has already been committed to the
2242 			 * journal, a revoke record will be written.  And
2243 			 * revoke records must be emitted *before* clearing
2244 			 * this block's bit in the bitmaps.
2245 			 */
2246 			ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
2247 
2248 			/*
2249 			 * Everything below this this pointer has been
2250 			 * released.  Now let this top-of-subtree go.
2251 			 *
2252 			 * We want the freeing of this indirect block to be
2253 			 * atomic in the journal with the updating of the
2254 			 * bitmap block which owns it.  So make some room in
2255 			 * the journal.
2256 			 *
2257 			 * We zero the parent pointer *after* freeing its
2258 			 * pointee in the bitmaps, so if extend_transaction()
2259 			 * for some reason fails to put the bitmap changes and
2260 			 * the release into the same transaction, recovery
2261 			 * will merely complain about releasing a free block,
2262 			 * rather than leaking blocks.
2263 			 */
2264 			if (is_handle_aborted(handle))
2265 				return;
2266 			if (try_to_extend_transaction(handle, inode)) {
2267 				ext4_mark_inode_dirty(handle, inode);
2268 				ext4_journal_test_restart(handle, inode);
2269 			}
2270 
2271 			ext4_free_blocks(handle, inode, nr, 1, 1);
2272 
2273 			if (parent_bh) {
2274 				/*
2275 				 * The block which we have just freed is
2276 				 * pointed to by an indirect block: journal it
2277 				 */
2278 				BUFFER_TRACE(parent_bh, "get_write_access");
2279 				if (!ext4_journal_get_write_access(handle,
2280 								   parent_bh)){
2281 					*p = 0;
2282 					BUFFER_TRACE(parent_bh,
2283 					"call ext4_journal_dirty_metadata");
2284 					ext4_journal_dirty_metadata(handle,
2285 								    parent_bh);
2286 				}
2287 			}
2288 		}
2289 	} else {
2290 		/* We have reached the bottom of the tree. */
2291 		BUFFER_TRACE(parent_bh, "free data blocks");
2292 		ext4_free_data(handle, inode, parent_bh, first, last);
2293 	}
2294 }
2295 
2296 /*
2297  * ext4_truncate()
2298  *
2299  * We block out ext4_get_block() block instantiations across the entire
2300  * transaction, and VFS/VM ensures that ext4_truncate() cannot run
2301  * simultaneously on behalf of the same inode.
2302  *
2303  * As we work through the truncate and commmit bits of it to the journal there
2304  * is one core, guiding principle: the file's tree must always be consistent on
2305  * disk.  We must be able to restart the truncate after a crash.
2306  *
2307  * The file's tree may be transiently inconsistent in memory (although it
2308  * probably isn't), but whenever we close off and commit a journal transaction,
2309  * the contents of (the filesystem + the journal) must be consistent and
2310  * restartable.  It's pretty simple, really: bottom up, right to left (although
2311  * left-to-right works OK too).
2312  *
2313  * Note that at recovery time, journal replay occurs *before* the restart of
2314  * truncate against the orphan inode list.
2315  *
2316  * The committed inode has the new, desired i_size (which is the same as
2317  * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
2318  * that this inode's truncate did not complete and it will again call
2319  * ext4_truncate() to have another go.  So there will be instantiated blocks
2320  * to the right of the truncation point in a crashed ext4 filesystem.  But
2321  * that's fine - as long as they are linked from the inode, the post-crash
2322  * ext4_truncate() run will find them and release them.
2323  */
2324 void ext4_truncate(struct inode *inode)
2325 {
2326 	handle_t *handle;
2327 	struct ext4_inode_info *ei = EXT4_I(inode);
2328 	__le32 *i_data = ei->i_data;
2329 	int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
2330 	struct address_space *mapping = inode->i_mapping;
2331 	ext4_lblk_t offsets[4];
2332 	Indirect chain[4];
2333 	Indirect *partial;
2334 	__le32 nr = 0;
2335 	int n;
2336 	ext4_lblk_t last_block;
2337 	unsigned blocksize = inode->i_sb->s_blocksize;
2338 	struct page *page;
2339 
2340 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
2341 	    S_ISLNK(inode->i_mode)))
2342 		return;
2343 	if (ext4_inode_is_fast_symlink(inode))
2344 		return;
2345 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2346 		return;
2347 
2348 	/*
2349 	 * We have to lock the EOF page here, because lock_page() nests
2350 	 * outside jbd2_journal_start().
2351 	 */
2352 	if ((inode->i_size & (blocksize - 1)) == 0) {
2353 		/* Block boundary? Nothing to do */
2354 		page = NULL;
2355 	} else {
2356 		page = grab_cache_page(mapping,
2357 				inode->i_size >> PAGE_CACHE_SHIFT);
2358 		if (!page)
2359 			return;
2360 	}
2361 
2362 	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
2363 		ext4_ext_truncate(inode, page);
2364 		return;
2365 	}
2366 
2367 	handle = start_transaction(inode);
2368 	if (IS_ERR(handle)) {
2369 		if (page) {
2370 			clear_highpage(page);
2371 			flush_dcache_page(page);
2372 			unlock_page(page);
2373 			page_cache_release(page);
2374 		}
2375 		return;		/* AKPM: return what? */
2376 	}
2377 
2378 	last_block = (inode->i_size + blocksize-1)
2379 					>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
2380 
2381 	if (page)
2382 		ext4_block_truncate_page(handle, page, mapping, inode->i_size);
2383 
2384 	n = ext4_block_to_path(inode, last_block, offsets, NULL);
2385 	if (n == 0)
2386 		goto out_stop;	/* error */
2387 
2388 	/*
2389 	 * OK.  This truncate is going to happen.  We add the inode to the
2390 	 * orphan list, so that if this truncate spans multiple transactions,
2391 	 * and we crash, we will resume the truncate when the filesystem
2392 	 * recovers.  It also marks the inode dirty, to catch the new size.
2393 	 *
2394 	 * Implication: the file must always be in a sane, consistent
2395 	 * truncatable state while each transaction commits.
2396 	 */
2397 	if (ext4_orphan_add(handle, inode))
2398 		goto out_stop;
2399 
2400 	/*
2401 	 * The orphan list entry will now protect us from any crash which
2402 	 * occurs before the truncate completes, so it is now safe to propagate
2403 	 * the new, shorter inode size (held for now in i_size) into the
2404 	 * on-disk inode. We do this via i_disksize, which is the value which
2405 	 * ext4 *really* writes onto the disk inode.
2406 	 */
2407 	ei->i_disksize = inode->i_size;
2408 
2409 	/*
2410 	 * From here we block out all ext4_get_block() callers who want to
2411 	 * modify the block allocation tree.
2412 	 */
2413 	down_write(&ei->i_data_sem);
2414 
2415 	if (n == 1) {		/* direct blocks */
2416 		ext4_free_data(handle, inode, NULL, i_data+offsets[0],
2417 			       i_data + EXT4_NDIR_BLOCKS);
2418 		goto do_indirects;
2419 	}
2420 
2421 	partial = ext4_find_shared(inode, n, offsets, chain, &nr);
2422 	/* Kill the top of shared branch (not detached) */
2423 	if (nr) {
2424 		if (partial == chain) {
2425 			/* Shared branch grows from the inode */
2426 			ext4_free_branches(handle, inode, NULL,
2427 					   &nr, &nr+1, (chain+n-1) - partial);
2428 			*partial->p = 0;
2429 			/*
2430 			 * We mark the inode dirty prior to restart,
2431 			 * and prior to stop.  No need for it here.
2432 			 */
2433 		} else {
2434 			/* Shared branch grows from an indirect block */
2435 			BUFFER_TRACE(partial->bh, "get_write_access");
2436 			ext4_free_branches(handle, inode, partial->bh,
2437 					partial->p,
2438 					partial->p+1, (chain+n-1) - partial);
2439 		}
2440 	}
2441 	/* Clear the ends of indirect blocks on the shared branch */
2442 	while (partial > chain) {
2443 		ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
2444 				   (__le32*)partial->bh->b_data+addr_per_block,
2445 				   (chain+n-1) - partial);
2446 		BUFFER_TRACE(partial->bh, "call brelse");
2447 		brelse (partial->bh);
2448 		partial--;
2449 	}
2450 do_indirects:
2451 	/* Kill the remaining (whole) subtrees */
2452 	switch (offsets[0]) {
2453 	default:
2454 		nr = i_data[EXT4_IND_BLOCK];
2455 		if (nr) {
2456 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2457 			i_data[EXT4_IND_BLOCK] = 0;
2458 		}
2459 	case EXT4_IND_BLOCK:
2460 		nr = i_data[EXT4_DIND_BLOCK];
2461 		if (nr) {
2462 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2463 			i_data[EXT4_DIND_BLOCK] = 0;
2464 		}
2465 	case EXT4_DIND_BLOCK:
2466 		nr = i_data[EXT4_TIND_BLOCK];
2467 		if (nr) {
2468 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2469 			i_data[EXT4_TIND_BLOCK] = 0;
2470 		}
2471 	case EXT4_TIND_BLOCK:
2472 		;
2473 	}
2474 
2475 	ext4_discard_reservation(inode);
2476 
2477 	up_write(&ei->i_data_sem);
2478 	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
2479 	ext4_mark_inode_dirty(handle, inode);
2480 
2481 	/*
2482 	 * In a multi-transaction truncate, we only make the final transaction
2483 	 * synchronous
2484 	 */
2485 	if (IS_SYNC(inode))
2486 		handle->h_sync = 1;
2487 out_stop:
2488 	/*
2489 	 * If this was a simple ftruncate(), and the file will remain alive
2490 	 * then we need to clear up the orphan record which we created above.
2491 	 * However, if this was a real unlink then we were called by
2492 	 * ext4_delete_inode(), and we allow that function to clean up the
2493 	 * orphan info for us.
2494 	 */
2495 	if (inode->i_nlink)
2496 		ext4_orphan_del(handle, inode);
2497 
2498 	ext4_journal_stop(handle);
2499 }
2500 
2501 static ext4_fsblk_t ext4_get_inode_block(struct super_block *sb,
2502 		unsigned long ino, struct ext4_iloc *iloc)
2503 {
2504 	unsigned long desc, group_desc;
2505 	ext4_group_t block_group;
2506 	unsigned long offset;
2507 	ext4_fsblk_t block;
2508 	struct buffer_head *bh;
2509 	struct ext4_group_desc * gdp;
2510 
2511 	if (!ext4_valid_inum(sb, ino)) {
2512 		/*
2513 		 * This error is already checked for in namei.c unless we are
2514 		 * looking at an NFS filehandle, in which case no error
2515 		 * report is needed
2516 		 */
2517 		return 0;
2518 	}
2519 
2520 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
2521 	if (block_group >= EXT4_SB(sb)->s_groups_count) {
2522 		ext4_error(sb,"ext4_get_inode_block","group >= groups count");
2523 		return 0;
2524 	}
2525 	smp_rmb();
2526 	group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
2527 	desc = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2528 	bh = EXT4_SB(sb)->s_group_desc[group_desc];
2529 	if (!bh) {
2530 		ext4_error (sb, "ext4_get_inode_block",
2531 			    "Descriptor not loaded");
2532 		return 0;
2533 	}
2534 
2535 	gdp = (struct ext4_group_desc *)((__u8 *)bh->b_data +
2536 		desc * EXT4_DESC_SIZE(sb));
2537 	/*
2538 	 * Figure out the offset within the block group inode table
2539 	 */
2540 	offset = ((ino - 1) % EXT4_INODES_PER_GROUP(sb)) *
2541 		EXT4_INODE_SIZE(sb);
2542 	block = ext4_inode_table(sb, gdp) +
2543 		(offset >> EXT4_BLOCK_SIZE_BITS(sb));
2544 
2545 	iloc->block_group = block_group;
2546 	iloc->offset = offset & (EXT4_BLOCK_SIZE(sb) - 1);
2547 	return block;
2548 }
2549 
2550 /*
2551  * ext4_get_inode_loc returns with an extra refcount against the inode's
2552  * underlying buffer_head on success. If 'in_mem' is true, we have all
2553  * data in memory that is needed to recreate the on-disk version of this
2554  * inode.
2555  */
2556 static int __ext4_get_inode_loc(struct inode *inode,
2557 				struct ext4_iloc *iloc, int in_mem)
2558 {
2559 	ext4_fsblk_t block;
2560 	struct buffer_head *bh;
2561 
2562 	block = ext4_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2563 	if (!block)
2564 		return -EIO;
2565 
2566 	bh = sb_getblk(inode->i_sb, block);
2567 	if (!bh) {
2568 		ext4_error (inode->i_sb, "ext4_get_inode_loc",
2569 				"unable to read inode block - "
2570 				"inode=%lu, block=%llu",
2571 				 inode->i_ino, block);
2572 		return -EIO;
2573 	}
2574 	if (!buffer_uptodate(bh)) {
2575 		lock_buffer(bh);
2576 		if (buffer_uptodate(bh)) {
2577 			/* someone brought it uptodate while we waited */
2578 			unlock_buffer(bh);
2579 			goto has_buffer;
2580 		}
2581 
2582 		/*
2583 		 * If we have all information of the inode in memory and this
2584 		 * is the only valid inode in the block, we need not read the
2585 		 * block.
2586 		 */
2587 		if (in_mem) {
2588 			struct buffer_head *bitmap_bh;
2589 			struct ext4_group_desc *desc;
2590 			int inodes_per_buffer;
2591 			int inode_offset, i;
2592 			ext4_group_t block_group;
2593 			int start;
2594 
2595 			block_group = (inode->i_ino - 1) /
2596 					EXT4_INODES_PER_GROUP(inode->i_sb);
2597 			inodes_per_buffer = bh->b_size /
2598 				EXT4_INODE_SIZE(inode->i_sb);
2599 			inode_offset = ((inode->i_ino - 1) %
2600 					EXT4_INODES_PER_GROUP(inode->i_sb));
2601 			start = inode_offset & ~(inodes_per_buffer - 1);
2602 
2603 			/* Is the inode bitmap in cache? */
2604 			desc = ext4_get_group_desc(inode->i_sb,
2605 						block_group, NULL);
2606 			if (!desc)
2607 				goto make_io;
2608 
2609 			bitmap_bh = sb_getblk(inode->i_sb,
2610 				ext4_inode_bitmap(inode->i_sb, desc));
2611 			if (!bitmap_bh)
2612 				goto make_io;
2613 
2614 			/*
2615 			 * If the inode bitmap isn't in cache then the
2616 			 * optimisation may end up performing two reads instead
2617 			 * of one, so skip it.
2618 			 */
2619 			if (!buffer_uptodate(bitmap_bh)) {
2620 				brelse(bitmap_bh);
2621 				goto make_io;
2622 			}
2623 			for (i = start; i < start + inodes_per_buffer; i++) {
2624 				if (i == inode_offset)
2625 					continue;
2626 				if (ext4_test_bit(i, bitmap_bh->b_data))
2627 					break;
2628 			}
2629 			brelse(bitmap_bh);
2630 			if (i == start + inodes_per_buffer) {
2631 				/* all other inodes are free, so skip I/O */
2632 				memset(bh->b_data, 0, bh->b_size);
2633 				set_buffer_uptodate(bh);
2634 				unlock_buffer(bh);
2635 				goto has_buffer;
2636 			}
2637 		}
2638 
2639 make_io:
2640 		/*
2641 		 * There are other valid inodes in the buffer, this inode
2642 		 * has in-inode xattrs, or we don't have this inode in memory.
2643 		 * Read the block from disk.
2644 		 */
2645 		get_bh(bh);
2646 		bh->b_end_io = end_buffer_read_sync;
2647 		submit_bh(READ_META, bh);
2648 		wait_on_buffer(bh);
2649 		if (!buffer_uptodate(bh)) {
2650 			ext4_error(inode->i_sb, "ext4_get_inode_loc",
2651 					"unable to read inode block - "
2652 					"inode=%lu, block=%llu",
2653 					inode->i_ino, block);
2654 			brelse(bh);
2655 			return -EIO;
2656 		}
2657 	}
2658 has_buffer:
2659 	iloc->bh = bh;
2660 	return 0;
2661 }
2662 
2663 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
2664 {
2665 	/* We have all inode data except xattrs in memory here. */
2666 	return __ext4_get_inode_loc(inode, iloc,
2667 		!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
2668 }
2669 
2670 void ext4_set_inode_flags(struct inode *inode)
2671 {
2672 	unsigned int flags = EXT4_I(inode)->i_flags;
2673 
2674 	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2675 	if (flags & EXT4_SYNC_FL)
2676 		inode->i_flags |= S_SYNC;
2677 	if (flags & EXT4_APPEND_FL)
2678 		inode->i_flags |= S_APPEND;
2679 	if (flags & EXT4_IMMUTABLE_FL)
2680 		inode->i_flags |= S_IMMUTABLE;
2681 	if (flags & EXT4_NOATIME_FL)
2682 		inode->i_flags |= S_NOATIME;
2683 	if (flags & EXT4_DIRSYNC_FL)
2684 		inode->i_flags |= S_DIRSYNC;
2685 }
2686 
2687 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
2688 void ext4_get_inode_flags(struct ext4_inode_info *ei)
2689 {
2690 	unsigned int flags = ei->vfs_inode.i_flags;
2691 
2692 	ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
2693 			EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
2694 	if (flags & S_SYNC)
2695 		ei->i_flags |= EXT4_SYNC_FL;
2696 	if (flags & S_APPEND)
2697 		ei->i_flags |= EXT4_APPEND_FL;
2698 	if (flags & S_IMMUTABLE)
2699 		ei->i_flags |= EXT4_IMMUTABLE_FL;
2700 	if (flags & S_NOATIME)
2701 		ei->i_flags |= EXT4_NOATIME_FL;
2702 	if (flags & S_DIRSYNC)
2703 		ei->i_flags |= EXT4_DIRSYNC_FL;
2704 }
2705 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
2706 					struct ext4_inode_info *ei)
2707 {
2708 	blkcnt_t i_blocks ;
2709 	struct inode *inode = &(ei->vfs_inode);
2710 	struct super_block *sb = inode->i_sb;
2711 
2712 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
2713 				EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
2714 		/* we are using combined 48 bit field */
2715 		i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
2716 					le32_to_cpu(raw_inode->i_blocks_lo);
2717 		if (ei->i_flags & EXT4_HUGE_FILE_FL) {
2718 			/* i_blocks represent file system block size */
2719 			return i_blocks  << (inode->i_blkbits - 9);
2720 		} else {
2721 			return i_blocks;
2722 		}
2723 	} else {
2724 		return le32_to_cpu(raw_inode->i_blocks_lo);
2725 	}
2726 }
2727 
2728 struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
2729 {
2730 	struct ext4_iloc iloc;
2731 	struct ext4_inode *raw_inode;
2732 	struct ext4_inode_info *ei;
2733 	struct buffer_head *bh;
2734 	struct inode *inode;
2735 	long ret;
2736 	int block;
2737 
2738 	inode = iget_locked(sb, ino);
2739 	if (!inode)
2740 		return ERR_PTR(-ENOMEM);
2741 	if (!(inode->i_state & I_NEW))
2742 		return inode;
2743 
2744 	ei = EXT4_I(inode);
2745 #ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
2746 	ei->i_acl = EXT4_ACL_NOT_CACHED;
2747 	ei->i_default_acl = EXT4_ACL_NOT_CACHED;
2748 #endif
2749 	ei->i_block_alloc_info = NULL;
2750 
2751 	ret = __ext4_get_inode_loc(inode, &iloc, 0);
2752 	if (ret < 0)
2753 		goto bad_inode;
2754 	bh = iloc.bh;
2755 	raw_inode = ext4_raw_inode(&iloc);
2756 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2757 	inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2758 	inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2759 	if(!(test_opt (inode->i_sb, NO_UID32))) {
2760 		inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2761 		inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2762 	}
2763 	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2764 
2765 	ei->i_state = 0;
2766 	ei->i_dir_start_lookup = 0;
2767 	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2768 	/* We now have enough fields to check if the inode was active or not.
2769 	 * This is needed because nfsd might try to access dead inodes
2770 	 * the test is that same one that e2fsck uses
2771 	 * NeilBrown 1999oct15
2772 	 */
2773 	if (inode->i_nlink == 0) {
2774 		if (inode->i_mode == 0 ||
2775 		    !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
2776 			/* this inode is deleted */
2777 			brelse (bh);
2778 			ret = -ESTALE;
2779 			goto bad_inode;
2780 		}
2781 		/* The only unlinked inodes we let through here have
2782 		 * valid i_mode and are being read by the orphan
2783 		 * recovery code: that's fine, we're about to complete
2784 		 * the process of deleting those. */
2785 	}
2786 	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2787 	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
2788 	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
2789 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
2790 	    cpu_to_le32(EXT4_OS_HURD)) {
2791 		ei->i_file_acl |=
2792 			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
2793 	}
2794 	inode->i_size = ext4_isize(raw_inode);
2795 	ei->i_disksize = inode->i_size;
2796 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2797 	ei->i_block_group = iloc.block_group;
2798 	/*
2799 	 * NOTE! The in-memory inode i_data array is in little-endian order
2800 	 * even on big-endian machines: we do NOT byteswap the block numbers!
2801 	 */
2802 	for (block = 0; block < EXT4_N_BLOCKS; block++)
2803 		ei->i_data[block] = raw_inode->i_block[block];
2804 	INIT_LIST_HEAD(&ei->i_orphan);
2805 
2806 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
2807 		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
2808 		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2809 		    EXT4_INODE_SIZE(inode->i_sb)) {
2810 			brelse (bh);
2811 			ret = -EIO;
2812 			goto bad_inode;
2813 		}
2814 		if (ei->i_extra_isize == 0) {
2815 			/* The extra space is currently unused. Use it. */
2816 			ei->i_extra_isize = sizeof(struct ext4_inode) -
2817 					    EXT4_GOOD_OLD_INODE_SIZE;
2818 		} else {
2819 			__le32 *magic = (void *)raw_inode +
2820 					EXT4_GOOD_OLD_INODE_SIZE +
2821 					ei->i_extra_isize;
2822 			if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
2823 				 ei->i_state |= EXT4_STATE_XATTR;
2824 		}
2825 	} else
2826 		ei->i_extra_isize = 0;
2827 
2828 	EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
2829 	EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
2830 	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
2831 	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
2832 
2833 	inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
2834 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
2835 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
2836 			inode->i_version |=
2837 			(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
2838 	}
2839 
2840 	if (S_ISREG(inode->i_mode)) {
2841 		inode->i_op = &ext4_file_inode_operations;
2842 		inode->i_fop = &ext4_file_operations;
2843 		ext4_set_aops(inode);
2844 	} else if (S_ISDIR(inode->i_mode)) {
2845 		inode->i_op = &ext4_dir_inode_operations;
2846 		inode->i_fop = &ext4_dir_operations;
2847 	} else if (S_ISLNK(inode->i_mode)) {
2848 		if (ext4_inode_is_fast_symlink(inode))
2849 			inode->i_op = &ext4_fast_symlink_inode_operations;
2850 		else {
2851 			inode->i_op = &ext4_symlink_inode_operations;
2852 			ext4_set_aops(inode);
2853 		}
2854 	} else {
2855 		inode->i_op = &ext4_special_inode_operations;
2856 		if (raw_inode->i_block[0])
2857 			init_special_inode(inode, inode->i_mode,
2858 			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2859 		else
2860 			init_special_inode(inode, inode->i_mode,
2861 			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2862 	}
2863 	brelse (iloc.bh);
2864 	ext4_set_inode_flags(inode);
2865 	unlock_new_inode(inode);
2866 	return inode;
2867 
2868 bad_inode:
2869 	iget_failed(inode);
2870 	return ERR_PTR(ret);
2871 }
2872 
2873 static int ext4_inode_blocks_set(handle_t *handle,
2874 				struct ext4_inode *raw_inode,
2875 				struct ext4_inode_info *ei)
2876 {
2877 	struct inode *inode = &(ei->vfs_inode);
2878 	u64 i_blocks = inode->i_blocks;
2879 	struct super_block *sb = inode->i_sb;
2880 	int err = 0;
2881 
2882 	if (i_blocks <= ~0U) {
2883 		/*
2884 		 * i_blocks can be represnted in a 32 bit variable
2885 		 * as multiple of 512 bytes
2886 		 */
2887 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
2888 		raw_inode->i_blocks_high = 0;
2889 		ei->i_flags &= ~EXT4_HUGE_FILE_FL;
2890 	} else if (i_blocks <= 0xffffffffffffULL) {
2891 		/*
2892 		 * i_blocks can be represented in a 48 bit variable
2893 		 * as multiple of 512 bytes
2894 		 */
2895 		err = ext4_update_rocompat_feature(handle, sb,
2896 					    EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
2897 		if (err)
2898 			goto  err_out;
2899 		/* i_block is stored in the split  48 bit fields */
2900 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
2901 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
2902 		ei->i_flags &= ~EXT4_HUGE_FILE_FL;
2903 	} else {
2904 		/*
2905 		 * i_blocks should be represented in a 48 bit variable
2906 		 * as multiple of  file system block size
2907 		 */
2908 		err = ext4_update_rocompat_feature(handle, sb,
2909 					    EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
2910 		if (err)
2911 			goto  err_out;
2912 		ei->i_flags |= EXT4_HUGE_FILE_FL;
2913 		/* i_block is stored in file system block size */
2914 		i_blocks = i_blocks >> (inode->i_blkbits - 9);
2915 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
2916 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
2917 	}
2918 err_out:
2919 	return err;
2920 }
2921 
2922 /*
2923  * Post the struct inode info into an on-disk inode location in the
2924  * buffer-cache.  This gobbles the caller's reference to the
2925  * buffer_head in the inode location struct.
2926  *
2927  * The caller must have write access to iloc->bh.
2928  */
2929 static int ext4_do_update_inode(handle_t *handle,
2930 				struct inode *inode,
2931 				struct ext4_iloc *iloc)
2932 {
2933 	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
2934 	struct ext4_inode_info *ei = EXT4_I(inode);
2935 	struct buffer_head *bh = iloc->bh;
2936 	int err = 0, rc, block;
2937 
2938 	/* For fields not not tracking in the in-memory inode,
2939 	 * initialise them to zero for new inodes. */
2940 	if (ei->i_state & EXT4_STATE_NEW)
2941 		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
2942 
2943 	ext4_get_inode_flags(ei);
2944 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
2945 	if(!(test_opt(inode->i_sb, NO_UID32))) {
2946 		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
2947 		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
2948 /*
2949  * Fix up interoperability with old kernels. Otherwise, old inodes get
2950  * re-used with the upper 16 bits of the uid/gid intact
2951  */
2952 		if(!ei->i_dtime) {
2953 			raw_inode->i_uid_high =
2954 				cpu_to_le16(high_16_bits(inode->i_uid));
2955 			raw_inode->i_gid_high =
2956 				cpu_to_le16(high_16_bits(inode->i_gid));
2957 		} else {
2958 			raw_inode->i_uid_high = 0;
2959 			raw_inode->i_gid_high = 0;
2960 		}
2961 	} else {
2962 		raw_inode->i_uid_low =
2963 			cpu_to_le16(fs_high2lowuid(inode->i_uid));
2964 		raw_inode->i_gid_low =
2965 			cpu_to_le16(fs_high2lowgid(inode->i_gid));
2966 		raw_inode->i_uid_high = 0;
2967 		raw_inode->i_gid_high = 0;
2968 	}
2969 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
2970 
2971 	EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
2972 	EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
2973 	EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
2974 	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
2975 
2976 	if (ext4_inode_blocks_set(handle, raw_inode, ei))
2977 		goto out_brelse;
2978 	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
2979 	raw_inode->i_flags = cpu_to_le32(ei->i_flags);
2980 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
2981 	    cpu_to_le32(EXT4_OS_HURD))
2982 		raw_inode->i_file_acl_high =
2983 			cpu_to_le16(ei->i_file_acl >> 32);
2984 	raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
2985 	ext4_isize_set(raw_inode, ei->i_disksize);
2986 	if (ei->i_disksize > 0x7fffffffULL) {
2987 		struct super_block *sb = inode->i_sb;
2988 		if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
2989 				EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
2990 				EXT4_SB(sb)->s_es->s_rev_level ==
2991 				cpu_to_le32(EXT4_GOOD_OLD_REV)) {
2992 			/* If this is the first large file
2993 			 * created, add a flag to the superblock.
2994 			 */
2995 			err = ext4_journal_get_write_access(handle,
2996 					EXT4_SB(sb)->s_sbh);
2997 			if (err)
2998 				goto out_brelse;
2999 			ext4_update_dynamic_rev(sb);
3000 			EXT4_SET_RO_COMPAT_FEATURE(sb,
3001 					EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
3002 			sb->s_dirt = 1;
3003 			handle->h_sync = 1;
3004 			err = ext4_journal_dirty_metadata(handle,
3005 					EXT4_SB(sb)->s_sbh);
3006 		}
3007 	}
3008 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
3009 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
3010 		if (old_valid_dev(inode->i_rdev)) {
3011 			raw_inode->i_block[0] =
3012 				cpu_to_le32(old_encode_dev(inode->i_rdev));
3013 			raw_inode->i_block[1] = 0;
3014 		} else {
3015 			raw_inode->i_block[0] = 0;
3016 			raw_inode->i_block[1] =
3017 				cpu_to_le32(new_encode_dev(inode->i_rdev));
3018 			raw_inode->i_block[2] = 0;
3019 		}
3020 	} else for (block = 0; block < EXT4_N_BLOCKS; block++)
3021 		raw_inode->i_block[block] = ei->i_data[block];
3022 
3023 	raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
3024 	if (ei->i_extra_isize) {
3025 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
3026 			raw_inode->i_version_hi =
3027 			cpu_to_le32(inode->i_version >> 32);
3028 		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
3029 	}
3030 
3031 
3032 	BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
3033 	rc = ext4_journal_dirty_metadata(handle, bh);
3034 	if (!err)
3035 		err = rc;
3036 	ei->i_state &= ~EXT4_STATE_NEW;
3037 
3038 out_brelse:
3039 	brelse (bh);
3040 	ext4_std_error(inode->i_sb, err);
3041 	return err;
3042 }
3043 
3044 /*
3045  * ext4_write_inode()
3046  *
3047  * We are called from a few places:
3048  *
3049  * - Within generic_file_write() for O_SYNC files.
3050  *   Here, there will be no transaction running. We wait for any running
3051  *   trasnaction to commit.
3052  *
3053  * - Within sys_sync(), kupdate and such.
3054  *   We wait on commit, if tol to.
3055  *
3056  * - Within prune_icache() (PF_MEMALLOC == true)
3057  *   Here we simply return.  We can't afford to block kswapd on the
3058  *   journal commit.
3059  *
3060  * In all cases it is actually safe for us to return without doing anything,
3061  * because the inode has been copied into a raw inode buffer in
3062  * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
3063  * knfsd.
3064  *
3065  * Note that we are absolutely dependent upon all inode dirtiers doing the
3066  * right thing: they *must* call mark_inode_dirty() after dirtying info in
3067  * which we are interested.
3068  *
3069  * It would be a bug for them to not do this.  The code:
3070  *
3071  *	mark_inode_dirty(inode)
3072  *	stuff();
3073  *	inode->i_size = expr;
3074  *
3075  * is in error because a kswapd-driven write_inode() could occur while
3076  * `stuff()' is running, and the new i_size will be lost.  Plus the inode
3077  * will no longer be on the superblock's dirty inode list.
3078  */
3079 int ext4_write_inode(struct inode *inode, int wait)
3080 {
3081 	if (current->flags & PF_MEMALLOC)
3082 		return 0;
3083 
3084 	if (ext4_journal_current_handle()) {
3085 		jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
3086 		dump_stack();
3087 		return -EIO;
3088 	}
3089 
3090 	if (!wait)
3091 		return 0;
3092 
3093 	return ext4_force_commit(inode->i_sb);
3094 }
3095 
3096 /*
3097  * ext4_setattr()
3098  *
3099  * Called from notify_change.
3100  *
3101  * We want to trap VFS attempts to truncate the file as soon as
3102  * possible.  In particular, we want to make sure that when the VFS
3103  * shrinks i_size, we put the inode on the orphan list and modify
3104  * i_disksize immediately, so that during the subsequent flushing of
3105  * dirty pages and freeing of disk blocks, we can guarantee that any
3106  * commit will leave the blocks being flushed in an unused state on
3107  * disk.  (On recovery, the inode will get truncated and the blocks will
3108  * be freed, so we have a strong guarantee that no future commit will
3109  * leave these blocks visible to the user.)
3110  *
3111  * Called with inode->sem down.
3112  */
3113 int ext4_setattr(struct dentry *dentry, struct iattr *attr)
3114 {
3115 	struct inode *inode = dentry->d_inode;
3116 	int error, rc = 0;
3117 	const unsigned int ia_valid = attr->ia_valid;
3118 
3119 	error = inode_change_ok(inode, attr);
3120 	if (error)
3121 		return error;
3122 
3123 	if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
3124 		(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
3125 		handle_t *handle;
3126 
3127 		/* (user+group)*(old+new) structure, inode write (sb,
3128 		 * inode block, ? - but truncate inode update has it) */
3129 		handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+
3130 					EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
3131 		if (IS_ERR(handle)) {
3132 			error = PTR_ERR(handle);
3133 			goto err_out;
3134 		}
3135 		error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
3136 		if (error) {
3137 			ext4_journal_stop(handle);
3138 			return error;
3139 		}
3140 		/* Update corresponding info in inode so that everything is in
3141 		 * one transaction */
3142 		if (attr->ia_valid & ATTR_UID)
3143 			inode->i_uid = attr->ia_uid;
3144 		if (attr->ia_valid & ATTR_GID)
3145 			inode->i_gid = attr->ia_gid;
3146 		error = ext4_mark_inode_dirty(handle, inode);
3147 		ext4_journal_stop(handle);
3148 	}
3149 
3150 	if (attr->ia_valid & ATTR_SIZE) {
3151 		if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
3152 			struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3153 
3154 			if (attr->ia_size > sbi->s_bitmap_maxbytes) {
3155 				error = -EFBIG;
3156 				goto err_out;
3157 			}
3158 		}
3159 	}
3160 
3161 	if (S_ISREG(inode->i_mode) &&
3162 	    attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
3163 		handle_t *handle;
3164 
3165 		handle = ext4_journal_start(inode, 3);
3166 		if (IS_ERR(handle)) {
3167 			error = PTR_ERR(handle);
3168 			goto err_out;
3169 		}
3170 
3171 		error = ext4_orphan_add(handle, inode);
3172 		EXT4_I(inode)->i_disksize = attr->ia_size;
3173 		rc = ext4_mark_inode_dirty(handle, inode);
3174 		if (!error)
3175 			error = rc;
3176 		ext4_journal_stop(handle);
3177 	}
3178 
3179 	rc = inode_setattr(inode, attr);
3180 
3181 	/* If inode_setattr's call to ext4_truncate failed to get a
3182 	 * transaction handle at all, we need to clean up the in-core
3183 	 * orphan list manually. */
3184 	if (inode->i_nlink)
3185 		ext4_orphan_del(NULL, inode);
3186 
3187 	if (!rc && (ia_valid & ATTR_MODE))
3188 		rc = ext4_acl_chmod(inode);
3189 
3190 err_out:
3191 	ext4_std_error(inode->i_sb, error);
3192 	if (!error)
3193 		error = rc;
3194 	return error;
3195 }
3196 
3197 
3198 /*
3199  * How many blocks doth make a writepage()?
3200  *
3201  * With N blocks per page, it may be:
3202  * N data blocks
3203  * 2 indirect block
3204  * 2 dindirect
3205  * 1 tindirect
3206  * N+5 bitmap blocks (from the above)
3207  * N+5 group descriptor summary blocks
3208  * 1 inode block
3209  * 1 superblock.
3210  * 2 * EXT4_SINGLEDATA_TRANS_BLOCKS for the quote files
3211  *
3212  * 3 * (N + 5) + 2 + 2 * EXT4_SINGLEDATA_TRANS_BLOCKS
3213  *
3214  * With ordered or writeback data it's the same, less the N data blocks.
3215  *
3216  * If the inode's direct blocks can hold an integral number of pages then a
3217  * page cannot straddle two indirect blocks, and we can only touch one indirect
3218  * and dindirect block, and the "5" above becomes "3".
3219  *
3220  * This still overestimates under most circumstances.  If we were to pass the
3221  * start and end offsets in here as well we could do block_to_path() on each
3222  * block and work out the exact number of indirects which are touched.  Pah.
3223  */
3224 
3225 int ext4_writepage_trans_blocks(struct inode *inode)
3226 {
3227 	int bpp = ext4_journal_blocks_per_page(inode);
3228 	int indirects = (EXT4_NDIR_BLOCKS % bpp) ? 5 : 3;
3229 	int ret;
3230 
3231 	if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
3232 		return ext4_ext_writepage_trans_blocks(inode, bpp);
3233 
3234 	if (ext4_should_journal_data(inode))
3235 		ret = 3 * (bpp + indirects) + 2;
3236 	else
3237 		ret = 2 * (bpp + indirects) + 2;
3238 
3239 #ifdef CONFIG_QUOTA
3240 	/* We know that structure was already allocated during DQUOT_INIT so
3241 	 * we will be updating only the data blocks + inodes */
3242 	ret += 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
3243 #endif
3244 
3245 	return ret;
3246 }
3247 
3248 /*
3249  * The caller must have previously called ext4_reserve_inode_write().
3250  * Give this, we know that the caller already has write access to iloc->bh.
3251  */
3252 int ext4_mark_iloc_dirty(handle_t *handle,
3253 		struct inode *inode, struct ext4_iloc *iloc)
3254 {
3255 	int err = 0;
3256 
3257 	if (test_opt(inode->i_sb, I_VERSION))
3258 		inode_inc_iversion(inode);
3259 
3260 	/* the do_update_inode consumes one bh->b_count */
3261 	get_bh(iloc->bh);
3262 
3263 	/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
3264 	err = ext4_do_update_inode(handle, inode, iloc);
3265 	put_bh(iloc->bh);
3266 	return err;
3267 }
3268 
3269 /*
3270  * On success, We end up with an outstanding reference count against
3271  * iloc->bh.  This _must_ be cleaned up later.
3272  */
3273 
3274 int
3275 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
3276 			 struct ext4_iloc *iloc)
3277 {
3278 	int err = 0;
3279 	if (handle) {
3280 		err = ext4_get_inode_loc(inode, iloc);
3281 		if (!err) {
3282 			BUFFER_TRACE(iloc->bh, "get_write_access");
3283 			err = ext4_journal_get_write_access(handle, iloc->bh);
3284 			if (err) {
3285 				brelse(iloc->bh);
3286 				iloc->bh = NULL;
3287 			}
3288 		}
3289 	}
3290 	ext4_std_error(inode->i_sb, err);
3291 	return err;
3292 }
3293 
3294 /*
3295  * Expand an inode by new_extra_isize bytes.
3296  * Returns 0 on success or negative error number on failure.
3297  */
3298 static int ext4_expand_extra_isize(struct inode *inode,
3299 				   unsigned int new_extra_isize,
3300 				   struct ext4_iloc iloc,
3301 				   handle_t *handle)
3302 {
3303 	struct ext4_inode *raw_inode;
3304 	struct ext4_xattr_ibody_header *header;
3305 	struct ext4_xattr_entry *entry;
3306 
3307 	if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
3308 		return 0;
3309 
3310 	raw_inode = ext4_raw_inode(&iloc);
3311 
3312 	header = IHDR(inode, raw_inode);
3313 	entry = IFIRST(header);
3314 
3315 	/* No extended attributes present */
3316 	if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
3317 		header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
3318 		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
3319 			new_extra_isize);
3320 		EXT4_I(inode)->i_extra_isize = new_extra_isize;
3321 		return 0;
3322 	}
3323 
3324 	/* try to expand with EAs present */
3325 	return ext4_expand_extra_isize_ea(inode, new_extra_isize,
3326 					  raw_inode, handle);
3327 }
3328 
3329 /*
3330  * What we do here is to mark the in-core inode as clean with respect to inode
3331  * dirtiness (it may still be data-dirty).
3332  * This means that the in-core inode may be reaped by prune_icache
3333  * without having to perform any I/O.  This is a very good thing,
3334  * because *any* task may call prune_icache - even ones which
3335  * have a transaction open against a different journal.
3336  *
3337  * Is this cheating?  Not really.  Sure, we haven't written the
3338  * inode out, but prune_icache isn't a user-visible syncing function.
3339  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
3340  * we start and wait on commits.
3341  *
3342  * Is this efficient/effective?  Well, we're being nice to the system
3343  * by cleaning up our inodes proactively so they can be reaped
3344  * without I/O.  But we are potentially leaving up to five seconds'
3345  * worth of inodes floating about which prune_icache wants us to
3346  * write out.  One way to fix that would be to get prune_icache()
3347  * to do a write_super() to free up some memory.  It has the desired
3348  * effect.
3349  */
3350 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
3351 {
3352 	struct ext4_iloc iloc;
3353 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3354 	static unsigned int mnt_count;
3355 	int err, ret;
3356 
3357 	might_sleep();
3358 	err = ext4_reserve_inode_write(handle, inode, &iloc);
3359 	if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
3360 	    !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
3361 		/*
3362 		 * We need extra buffer credits since we may write into EA block
3363 		 * with this same handle. If journal_extend fails, then it will
3364 		 * only result in a minor loss of functionality for that inode.
3365 		 * If this is felt to be critical, then e2fsck should be run to
3366 		 * force a large enough s_min_extra_isize.
3367 		 */
3368 		if ((jbd2_journal_extend(handle,
3369 			     EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
3370 			ret = ext4_expand_extra_isize(inode,
3371 						      sbi->s_want_extra_isize,
3372 						      iloc, handle);
3373 			if (ret) {
3374 				EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
3375 				if (mnt_count !=
3376 					le16_to_cpu(sbi->s_es->s_mnt_count)) {
3377 					ext4_warning(inode->i_sb, __FUNCTION__,
3378 					"Unable to expand inode %lu. Delete"
3379 					" some EAs or run e2fsck.",
3380 					inode->i_ino);
3381 					mnt_count =
3382 					  le16_to_cpu(sbi->s_es->s_mnt_count);
3383 				}
3384 			}
3385 		}
3386 	}
3387 	if (!err)
3388 		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
3389 	return err;
3390 }
3391 
3392 /*
3393  * ext4_dirty_inode() is called from __mark_inode_dirty()
3394  *
3395  * We're really interested in the case where a file is being extended.
3396  * i_size has been changed by generic_commit_write() and we thus need
3397  * to include the updated inode in the current transaction.
3398  *
3399  * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
3400  * are allocated to the file.
3401  *
3402  * If the inode is marked synchronous, we don't honour that here - doing
3403  * so would cause a commit on atime updates, which we don't bother doing.
3404  * We handle synchronous inodes at the highest possible level.
3405  */
3406 void ext4_dirty_inode(struct inode *inode)
3407 {
3408 	handle_t *current_handle = ext4_journal_current_handle();
3409 	handle_t *handle;
3410 
3411 	handle = ext4_journal_start(inode, 2);
3412 	if (IS_ERR(handle))
3413 		goto out;
3414 	if (current_handle &&
3415 		current_handle->h_transaction != handle->h_transaction) {
3416 		/* This task has a transaction open against a different fs */
3417 		printk(KERN_EMERG "%s: transactions do not match!\n",
3418 		       __FUNCTION__);
3419 	} else {
3420 		jbd_debug(5, "marking dirty.  outer handle=%p\n",
3421 				current_handle);
3422 		ext4_mark_inode_dirty(handle, inode);
3423 	}
3424 	ext4_journal_stop(handle);
3425 out:
3426 	return;
3427 }
3428 
3429 #if 0
3430 /*
3431  * Bind an inode's backing buffer_head into this transaction, to prevent
3432  * it from being flushed to disk early.  Unlike
3433  * ext4_reserve_inode_write, this leaves behind no bh reference and
3434  * returns no iloc structure, so the caller needs to repeat the iloc
3435  * lookup to mark the inode dirty later.
3436  */
3437 static int ext4_pin_inode(handle_t *handle, struct inode *inode)
3438 {
3439 	struct ext4_iloc iloc;
3440 
3441 	int err = 0;
3442 	if (handle) {
3443 		err = ext4_get_inode_loc(inode, &iloc);
3444 		if (!err) {
3445 			BUFFER_TRACE(iloc.bh, "get_write_access");
3446 			err = jbd2_journal_get_write_access(handle, iloc.bh);
3447 			if (!err)
3448 				err = ext4_journal_dirty_metadata(handle,
3449 								  iloc.bh);
3450 			brelse(iloc.bh);
3451 		}
3452 	}
3453 	ext4_std_error(inode->i_sb, err);
3454 	return err;
3455 }
3456 #endif
3457 
3458 int ext4_change_inode_journal_flag(struct inode *inode, int val)
3459 {
3460 	journal_t *journal;
3461 	handle_t *handle;
3462 	int err;
3463 
3464 	/*
3465 	 * We have to be very careful here: changing a data block's
3466 	 * journaling status dynamically is dangerous.  If we write a
3467 	 * data block to the journal, change the status and then delete
3468 	 * that block, we risk forgetting to revoke the old log record
3469 	 * from the journal and so a subsequent replay can corrupt data.
3470 	 * So, first we make sure that the journal is empty and that
3471 	 * nobody is changing anything.
3472 	 */
3473 
3474 	journal = EXT4_JOURNAL(inode);
3475 	if (is_journal_aborted(journal))
3476 		return -EROFS;
3477 
3478 	jbd2_journal_lock_updates(journal);
3479 	jbd2_journal_flush(journal);
3480 
3481 	/*
3482 	 * OK, there are no updates running now, and all cached data is
3483 	 * synced to disk.  We are now in a completely consistent state
3484 	 * which doesn't have anything in the journal, and we know that
3485 	 * no filesystem updates are running, so it is safe to modify
3486 	 * the inode's in-core data-journaling state flag now.
3487 	 */
3488 
3489 	if (val)
3490 		EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
3491 	else
3492 		EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
3493 	ext4_set_aops(inode);
3494 
3495 	jbd2_journal_unlock_updates(journal);
3496 
3497 	/* Finally we can mark the inode as dirty. */
3498 
3499 	handle = ext4_journal_start(inode, 1);
3500 	if (IS_ERR(handle))
3501 		return PTR_ERR(handle);
3502 
3503 	err = ext4_mark_inode_dirty(handle, inode);
3504 	handle->h_sync = 1;
3505 	ext4_journal_stop(handle);
3506 	ext4_std_error(inode->i_sb, err);
3507 
3508 	return err;
3509 }
3510