xref: /openbmc/linux/fs/ext4/extents.c (revision d0b73b48)
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * Architecture independence:
6  *   Copyright (c) 2005, Bull S.A.
7  *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public Licens
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
21  */
22 
23 /*
24  * Extents support for EXT4
25  *
26  * TODO:
27  *   - ext4*_error() should be used in some situations
28  *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29  *   - smart tree reduction
30  */
31 
32 #include <linux/fs.h>
33 #include <linux/time.h>
34 #include <linux/jbd2.h>
35 #include <linux/highuid.h>
36 #include <linux/pagemap.h>
37 #include <linux/quotaops.h>
38 #include <linux/string.h>
39 #include <linux/slab.h>
40 #include <linux/falloc.h>
41 #include <asm/uaccess.h>
42 #include <linux/fiemap.h>
43 #include "ext4_jbd2.h"
44 #include "ext4_extents.h"
45 #include "xattr.h"
46 
47 #include <trace/events/ext4.h>
48 
49 /*
50  * used by extent splitting.
51  */
52 #define EXT4_EXT_MAY_ZEROOUT	0x1  /* safe to zeroout if split fails \
53 					due to ENOSPC */
54 #define EXT4_EXT_MARK_UNINIT1	0x2  /* mark first half uninitialized */
55 #define EXT4_EXT_MARK_UNINIT2	0x4  /* mark second half uninitialized */
56 
57 #define EXT4_EXT_DATA_VALID1	0x8  /* first half contains valid data */
58 #define EXT4_EXT_DATA_VALID2	0x10 /* second half contains valid data */
59 
60 static __le32 ext4_extent_block_csum(struct inode *inode,
61 				     struct ext4_extent_header *eh)
62 {
63 	struct ext4_inode_info *ei = EXT4_I(inode);
64 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
65 	__u32 csum;
66 
67 	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
68 			   EXT4_EXTENT_TAIL_OFFSET(eh));
69 	return cpu_to_le32(csum);
70 }
71 
72 static int ext4_extent_block_csum_verify(struct inode *inode,
73 					 struct ext4_extent_header *eh)
74 {
75 	struct ext4_extent_tail *et;
76 
77 	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
78 		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
79 		return 1;
80 
81 	et = find_ext4_extent_tail(eh);
82 	if (et->et_checksum != ext4_extent_block_csum(inode, eh))
83 		return 0;
84 	return 1;
85 }
86 
87 static void ext4_extent_block_csum_set(struct inode *inode,
88 				       struct ext4_extent_header *eh)
89 {
90 	struct ext4_extent_tail *et;
91 
92 	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
93 		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
94 		return;
95 
96 	et = find_ext4_extent_tail(eh);
97 	et->et_checksum = ext4_extent_block_csum(inode, eh);
98 }
99 
100 static int ext4_split_extent(handle_t *handle,
101 				struct inode *inode,
102 				struct ext4_ext_path *path,
103 				struct ext4_map_blocks *map,
104 				int split_flag,
105 				int flags);
106 
107 static int ext4_split_extent_at(handle_t *handle,
108 			     struct inode *inode,
109 			     struct ext4_ext_path *path,
110 			     ext4_lblk_t split,
111 			     int split_flag,
112 			     int flags);
113 
114 static int ext4_find_delayed_extent(struct inode *inode,
115 				    struct ext4_ext_cache *newex);
116 
117 static int ext4_ext_truncate_extend_restart(handle_t *handle,
118 					    struct inode *inode,
119 					    int needed)
120 {
121 	int err;
122 
123 	if (!ext4_handle_valid(handle))
124 		return 0;
125 	if (handle->h_buffer_credits > needed)
126 		return 0;
127 	err = ext4_journal_extend(handle, needed);
128 	if (err <= 0)
129 		return err;
130 	err = ext4_truncate_restart_trans(handle, inode, needed);
131 	if (err == 0)
132 		err = -EAGAIN;
133 
134 	return err;
135 }
136 
137 /*
138  * could return:
139  *  - EROFS
140  *  - ENOMEM
141  */
142 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
143 				struct ext4_ext_path *path)
144 {
145 	if (path->p_bh) {
146 		/* path points to block */
147 		return ext4_journal_get_write_access(handle, path->p_bh);
148 	}
149 	/* path points to leaf/index in inode body */
150 	/* we use in-core data, no need to protect them */
151 	return 0;
152 }
153 
154 /*
155  * could return:
156  *  - EROFS
157  *  - ENOMEM
158  *  - EIO
159  */
160 #define ext4_ext_dirty(handle, inode, path) \
161 		__ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
162 static int __ext4_ext_dirty(const char *where, unsigned int line,
163 			    handle_t *handle, struct inode *inode,
164 			    struct ext4_ext_path *path)
165 {
166 	int err;
167 	if (path->p_bh) {
168 		ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
169 		/* path points to block */
170 		err = __ext4_handle_dirty_metadata(where, line, handle,
171 						   inode, path->p_bh);
172 	} else {
173 		/* path points to leaf/index in inode body */
174 		err = ext4_mark_inode_dirty(handle, inode);
175 	}
176 	return err;
177 }
178 
179 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
180 			      struct ext4_ext_path *path,
181 			      ext4_lblk_t block)
182 {
183 	if (path) {
184 		int depth = path->p_depth;
185 		struct ext4_extent *ex;
186 
187 		/*
188 		 * Try to predict block placement assuming that we are
189 		 * filling in a file which will eventually be
190 		 * non-sparse --- i.e., in the case of libbfd writing
191 		 * an ELF object sections out-of-order but in a way
192 		 * the eventually results in a contiguous object or
193 		 * executable file, or some database extending a table
194 		 * space file.  However, this is actually somewhat
195 		 * non-ideal if we are writing a sparse file such as
196 		 * qemu or KVM writing a raw image file that is going
197 		 * to stay fairly sparse, since it will end up
198 		 * fragmenting the file system's free space.  Maybe we
199 		 * should have some hueristics or some way to allow
200 		 * userspace to pass a hint to file system,
201 		 * especially if the latter case turns out to be
202 		 * common.
203 		 */
204 		ex = path[depth].p_ext;
205 		if (ex) {
206 			ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
207 			ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
208 
209 			if (block > ext_block)
210 				return ext_pblk + (block - ext_block);
211 			else
212 				return ext_pblk - (ext_block - block);
213 		}
214 
215 		/* it looks like index is empty;
216 		 * try to find starting block from index itself */
217 		if (path[depth].p_bh)
218 			return path[depth].p_bh->b_blocknr;
219 	}
220 
221 	/* OK. use inode's group */
222 	return ext4_inode_to_goal_block(inode);
223 }
224 
225 /*
226  * Allocation for a meta data block
227  */
228 static ext4_fsblk_t
229 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
230 			struct ext4_ext_path *path,
231 			struct ext4_extent *ex, int *err, unsigned int flags)
232 {
233 	ext4_fsblk_t goal, newblock;
234 
235 	goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
236 	newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
237 					NULL, err);
238 	return newblock;
239 }
240 
241 static inline int ext4_ext_space_block(struct inode *inode, int check)
242 {
243 	int size;
244 
245 	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
246 			/ sizeof(struct ext4_extent);
247 #ifdef AGGRESSIVE_TEST
248 	if (!check && size > 6)
249 		size = 6;
250 #endif
251 	return size;
252 }
253 
254 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
255 {
256 	int size;
257 
258 	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
259 			/ sizeof(struct ext4_extent_idx);
260 #ifdef AGGRESSIVE_TEST
261 	if (!check && size > 5)
262 		size = 5;
263 #endif
264 	return size;
265 }
266 
267 static inline int ext4_ext_space_root(struct inode *inode, int check)
268 {
269 	int size;
270 
271 	size = sizeof(EXT4_I(inode)->i_data);
272 	size -= sizeof(struct ext4_extent_header);
273 	size /= sizeof(struct ext4_extent);
274 #ifdef AGGRESSIVE_TEST
275 	if (!check && size > 3)
276 		size = 3;
277 #endif
278 	return size;
279 }
280 
281 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
282 {
283 	int size;
284 
285 	size = sizeof(EXT4_I(inode)->i_data);
286 	size -= sizeof(struct ext4_extent_header);
287 	size /= sizeof(struct ext4_extent_idx);
288 #ifdef AGGRESSIVE_TEST
289 	if (!check && size > 4)
290 		size = 4;
291 #endif
292 	return size;
293 }
294 
295 /*
296  * Calculate the number of metadata blocks needed
297  * to allocate @blocks
298  * Worse case is one block per extent
299  */
300 int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
301 {
302 	struct ext4_inode_info *ei = EXT4_I(inode);
303 	int idxs;
304 
305 	idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
306 		/ sizeof(struct ext4_extent_idx));
307 
308 	/*
309 	 * If the new delayed allocation block is contiguous with the
310 	 * previous da block, it can share index blocks with the
311 	 * previous block, so we only need to allocate a new index
312 	 * block every idxs leaf blocks.  At ldxs**2 blocks, we need
313 	 * an additional index block, and at ldxs**3 blocks, yet
314 	 * another index blocks.
315 	 */
316 	if (ei->i_da_metadata_calc_len &&
317 	    ei->i_da_metadata_calc_last_lblock+1 == lblock) {
318 		int num = 0;
319 
320 		if ((ei->i_da_metadata_calc_len % idxs) == 0)
321 			num++;
322 		if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
323 			num++;
324 		if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
325 			num++;
326 			ei->i_da_metadata_calc_len = 0;
327 		} else
328 			ei->i_da_metadata_calc_len++;
329 		ei->i_da_metadata_calc_last_lblock++;
330 		return num;
331 	}
332 
333 	/*
334 	 * In the worst case we need a new set of index blocks at
335 	 * every level of the inode's extent tree.
336 	 */
337 	ei->i_da_metadata_calc_len = 1;
338 	ei->i_da_metadata_calc_last_lblock = lblock;
339 	return ext_depth(inode) + 1;
340 }
341 
342 static int
343 ext4_ext_max_entries(struct inode *inode, int depth)
344 {
345 	int max;
346 
347 	if (depth == ext_depth(inode)) {
348 		if (depth == 0)
349 			max = ext4_ext_space_root(inode, 1);
350 		else
351 			max = ext4_ext_space_root_idx(inode, 1);
352 	} else {
353 		if (depth == 0)
354 			max = ext4_ext_space_block(inode, 1);
355 		else
356 			max = ext4_ext_space_block_idx(inode, 1);
357 	}
358 
359 	return max;
360 }
361 
362 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
363 {
364 	ext4_fsblk_t block = ext4_ext_pblock(ext);
365 	int len = ext4_ext_get_actual_len(ext);
366 
367 	if (len == 0)
368 		return 0;
369 	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
370 }
371 
372 static int ext4_valid_extent_idx(struct inode *inode,
373 				struct ext4_extent_idx *ext_idx)
374 {
375 	ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
376 
377 	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
378 }
379 
380 static int ext4_valid_extent_entries(struct inode *inode,
381 				struct ext4_extent_header *eh,
382 				int depth)
383 {
384 	unsigned short entries;
385 	if (eh->eh_entries == 0)
386 		return 1;
387 
388 	entries = le16_to_cpu(eh->eh_entries);
389 
390 	if (depth == 0) {
391 		/* leaf entries */
392 		struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
393 		while (entries) {
394 			if (!ext4_valid_extent(inode, ext))
395 				return 0;
396 			ext++;
397 			entries--;
398 		}
399 	} else {
400 		struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
401 		while (entries) {
402 			if (!ext4_valid_extent_idx(inode, ext_idx))
403 				return 0;
404 			ext_idx++;
405 			entries--;
406 		}
407 	}
408 	return 1;
409 }
410 
411 static int __ext4_ext_check(const char *function, unsigned int line,
412 			    struct inode *inode, struct ext4_extent_header *eh,
413 			    int depth)
414 {
415 	const char *error_msg;
416 	int max = 0;
417 
418 	if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
419 		error_msg = "invalid magic";
420 		goto corrupted;
421 	}
422 	if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
423 		error_msg = "unexpected eh_depth";
424 		goto corrupted;
425 	}
426 	if (unlikely(eh->eh_max == 0)) {
427 		error_msg = "invalid eh_max";
428 		goto corrupted;
429 	}
430 	max = ext4_ext_max_entries(inode, depth);
431 	if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
432 		error_msg = "too large eh_max";
433 		goto corrupted;
434 	}
435 	if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
436 		error_msg = "invalid eh_entries";
437 		goto corrupted;
438 	}
439 	if (!ext4_valid_extent_entries(inode, eh, depth)) {
440 		error_msg = "invalid extent entries";
441 		goto corrupted;
442 	}
443 	/* Verify checksum on non-root extent tree nodes */
444 	if (ext_depth(inode) != depth &&
445 	    !ext4_extent_block_csum_verify(inode, eh)) {
446 		error_msg = "extent tree corrupted";
447 		goto corrupted;
448 	}
449 	return 0;
450 
451 corrupted:
452 	ext4_error_inode(inode, function, line, 0,
453 			"bad header/extent: %s - magic %x, "
454 			"entries %u, max %u(%u), depth %u(%u)",
455 			error_msg, le16_to_cpu(eh->eh_magic),
456 			le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
457 			max, le16_to_cpu(eh->eh_depth), depth);
458 
459 	return -EIO;
460 }
461 
462 #define ext4_ext_check(inode, eh, depth)	\
463 	__ext4_ext_check(__func__, __LINE__, inode, eh, depth)
464 
465 int ext4_ext_check_inode(struct inode *inode)
466 {
467 	return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
468 }
469 
470 static int __ext4_ext_check_block(const char *function, unsigned int line,
471 				  struct inode *inode,
472 				  struct ext4_extent_header *eh,
473 				  int depth,
474 				  struct buffer_head *bh)
475 {
476 	int ret;
477 
478 	if (buffer_verified(bh))
479 		return 0;
480 	ret = ext4_ext_check(inode, eh, depth);
481 	if (ret)
482 		return ret;
483 	set_buffer_verified(bh);
484 	return ret;
485 }
486 
487 #define ext4_ext_check_block(inode, eh, depth, bh)	\
488 	__ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh)
489 
490 #ifdef EXT_DEBUG
491 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
492 {
493 	int k, l = path->p_depth;
494 
495 	ext_debug("path:");
496 	for (k = 0; k <= l; k++, path++) {
497 		if (path->p_idx) {
498 		  ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
499 			    ext4_idx_pblock(path->p_idx));
500 		} else if (path->p_ext) {
501 			ext_debug("  %d:[%d]%d:%llu ",
502 				  le32_to_cpu(path->p_ext->ee_block),
503 				  ext4_ext_is_uninitialized(path->p_ext),
504 				  ext4_ext_get_actual_len(path->p_ext),
505 				  ext4_ext_pblock(path->p_ext));
506 		} else
507 			ext_debug("  []");
508 	}
509 	ext_debug("\n");
510 }
511 
512 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
513 {
514 	int depth = ext_depth(inode);
515 	struct ext4_extent_header *eh;
516 	struct ext4_extent *ex;
517 	int i;
518 
519 	if (!path)
520 		return;
521 
522 	eh = path[depth].p_hdr;
523 	ex = EXT_FIRST_EXTENT(eh);
524 
525 	ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
526 
527 	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
528 		ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
529 			  ext4_ext_is_uninitialized(ex),
530 			  ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
531 	}
532 	ext_debug("\n");
533 }
534 
535 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
536 			ext4_fsblk_t newblock, int level)
537 {
538 	int depth = ext_depth(inode);
539 	struct ext4_extent *ex;
540 
541 	if (depth != level) {
542 		struct ext4_extent_idx *idx;
543 		idx = path[level].p_idx;
544 		while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
545 			ext_debug("%d: move %d:%llu in new index %llu\n", level,
546 					le32_to_cpu(idx->ei_block),
547 					ext4_idx_pblock(idx),
548 					newblock);
549 			idx++;
550 		}
551 
552 		return;
553 	}
554 
555 	ex = path[depth].p_ext;
556 	while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
557 		ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
558 				le32_to_cpu(ex->ee_block),
559 				ext4_ext_pblock(ex),
560 				ext4_ext_is_uninitialized(ex),
561 				ext4_ext_get_actual_len(ex),
562 				newblock);
563 		ex++;
564 	}
565 }
566 
567 #else
568 #define ext4_ext_show_path(inode, path)
569 #define ext4_ext_show_leaf(inode, path)
570 #define ext4_ext_show_move(inode, path, newblock, level)
571 #endif
572 
573 void ext4_ext_drop_refs(struct ext4_ext_path *path)
574 {
575 	int depth = path->p_depth;
576 	int i;
577 
578 	for (i = 0; i <= depth; i++, path++)
579 		if (path->p_bh) {
580 			brelse(path->p_bh);
581 			path->p_bh = NULL;
582 		}
583 }
584 
585 /*
586  * ext4_ext_binsearch_idx:
587  * binary search for the closest index of the given block
588  * the header must be checked before calling this
589  */
590 static void
591 ext4_ext_binsearch_idx(struct inode *inode,
592 			struct ext4_ext_path *path, ext4_lblk_t block)
593 {
594 	struct ext4_extent_header *eh = path->p_hdr;
595 	struct ext4_extent_idx *r, *l, *m;
596 
597 
598 	ext_debug("binsearch for %u(idx):  ", block);
599 
600 	l = EXT_FIRST_INDEX(eh) + 1;
601 	r = EXT_LAST_INDEX(eh);
602 	while (l <= r) {
603 		m = l + (r - l) / 2;
604 		if (block < le32_to_cpu(m->ei_block))
605 			r = m - 1;
606 		else
607 			l = m + 1;
608 		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
609 				m, le32_to_cpu(m->ei_block),
610 				r, le32_to_cpu(r->ei_block));
611 	}
612 
613 	path->p_idx = l - 1;
614 	ext_debug("  -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
615 		  ext4_idx_pblock(path->p_idx));
616 
617 #ifdef CHECK_BINSEARCH
618 	{
619 		struct ext4_extent_idx *chix, *ix;
620 		int k;
621 
622 		chix = ix = EXT_FIRST_INDEX(eh);
623 		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
624 		  if (k != 0 &&
625 		      le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
626 				printk(KERN_DEBUG "k=%d, ix=0x%p, "
627 				       "first=0x%p\n", k,
628 				       ix, EXT_FIRST_INDEX(eh));
629 				printk(KERN_DEBUG "%u <= %u\n",
630 				       le32_to_cpu(ix->ei_block),
631 				       le32_to_cpu(ix[-1].ei_block));
632 			}
633 			BUG_ON(k && le32_to_cpu(ix->ei_block)
634 					   <= le32_to_cpu(ix[-1].ei_block));
635 			if (block < le32_to_cpu(ix->ei_block))
636 				break;
637 			chix = ix;
638 		}
639 		BUG_ON(chix != path->p_idx);
640 	}
641 #endif
642 
643 }
644 
645 /*
646  * ext4_ext_binsearch:
647  * binary search for closest extent of the given block
648  * the header must be checked before calling this
649  */
650 static void
651 ext4_ext_binsearch(struct inode *inode,
652 		struct ext4_ext_path *path, ext4_lblk_t block)
653 {
654 	struct ext4_extent_header *eh = path->p_hdr;
655 	struct ext4_extent *r, *l, *m;
656 
657 	if (eh->eh_entries == 0) {
658 		/*
659 		 * this leaf is empty:
660 		 * we get such a leaf in split/add case
661 		 */
662 		return;
663 	}
664 
665 	ext_debug("binsearch for %u:  ", block);
666 
667 	l = EXT_FIRST_EXTENT(eh) + 1;
668 	r = EXT_LAST_EXTENT(eh);
669 
670 	while (l <= r) {
671 		m = l + (r - l) / 2;
672 		if (block < le32_to_cpu(m->ee_block))
673 			r = m - 1;
674 		else
675 			l = m + 1;
676 		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
677 				m, le32_to_cpu(m->ee_block),
678 				r, le32_to_cpu(r->ee_block));
679 	}
680 
681 	path->p_ext = l - 1;
682 	ext_debug("  -> %d:%llu:[%d]%d ",
683 			le32_to_cpu(path->p_ext->ee_block),
684 			ext4_ext_pblock(path->p_ext),
685 			ext4_ext_is_uninitialized(path->p_ext),
686 			ext4_ext_get_actual_len(path->p_ext));
687 
688 #ifdef CHECK_BINSEARCH
689 	{
690 		struct ext4_extent *chex, *ex;
691 		int k;
692 
693 		chex = ex = EXT_FIRST_EXTENT(eh);
694 		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
695 			BUG_ON(k && le32_to_cpu(ex->ee_block)
696 					  <= le32_to_cpu(ex[-1].ee_block));
697 			if (block < le32_to_cpu(ex->ee_block))
698 				break;
699 			chex = ex;
700 		}
701 		BUG_ON(chex != path->p_ext);
702 	}
703 #endif
704 
705 }
706 
707 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
708 {
709 	struct ext4_extent_header *eh;
710 
711 	eh = ext_inode_hdr(inode);
712 	eh->eh_depth = 0;
713 	eh->eh_entries = 0;
714 	eh->eh_magic = EXT4_EXT_MAGIC;
715 	eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
716 	ext4_mark_inode_dirty(handle, inode);
717 	ext4_ext_invalidate_cache(inode);
718 	return 0;
719 }
720 
721 struct ext4_ext_path *
722 ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
723 					struct ext4_ext_path *path)
724 {
725 	struct ext4_extent_header *eh;
726 	struct buffer_head *bh;
727 	short int depth, i, ppos = 0, alloc = 0;
728 
729 	eh = ext_inode_hdr(inode);
730 	depth = ext_depth(inode);
731 
732 	/* account possible depth increase */
733 	if (!path) {
734 		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
735 				GFP_NOFS);
736 		if (!path)
737 			return ERR_PTR(-ENOMEM);
738 		alloc = 1;
739 	}
740 	path[0].p_hdr = eh;
741 	path[0].p_bh = NULL;
742 
743 	i = depth;
744 	/* walk through the tree */
745 	while (i) {
746 		ext_debug("depth %d: num %d, max %d\n",
747 			  ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
748 
749 		ext4_ext_binsearch_idx(inode, path + ppos, block);
750 		path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
751 		path[ppos].p_depth = i;
752 		path[ppos].p_ext = NULL;
753 
754 		bh = sb_getblk(inode->i_sb, path[ppos].p_block);
755 		if (unlikely(!bh))
756 			goto err;
757 		if (!bh_uptodate_or_lock(bh)) {
758 			trace_ext4_ext_load_extent(inode, block,
759 						path[ppos].p_block);
760 			if (bh_submit_read(bh) < 0) {
761 				put_bh(bh);
762 				goto err;
763 			}
764 		}
765 		eh = ext_block_hdr(bh);
766 		ppos++;
767 		if (unlikely(ppos > depth)) {
768 			put_bh(bh);
769 			EXT4_ERROR_INODE(inode,
770 					 "ppos %d > depth %d", ppos, depth);
771 			goto err;
772 		}
773 		path[ppos].p_bh = bh;
774 		path[ppos].p_hdr = eh;
775 		i--;
776 
777 		if (ext4_ext_check_block(inode, eh, i, bh))
778 			goto err;
779 	}
780 
781 	path[ppos].p_depth = i;
782 	path[ppos].p_ext = NULL;
783 	path[ppos].p_idx = NULL;
784 
785 	/* find extent */
786 	ext4_ext_binsearch(inode, path + ppos, block);
787 	/* if not an empty leaf */
788 	if (path[ppos].p_ext)
789 		path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
790 
791 	ext4_ext_show_path(inode, path);
792 
793 	return path;
794 
795 err:
796 	ext4_ext_drop_refs(path);
797 	if (alloc)
798 		kfree(path);
799 	return ERR_PTR(-EIO);
800 }
801 
802 /*
803  * ext4_ext_insert_index:
804  * insert new index [@logical;@ptr] into the block at @curp;
805  * check where to insert: before @curp or after @curp
806  */
807 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
808 				 struct ext4_ext_path *curp,
809 				 int logical, ext4_fsblk_t ptr)
810 {
811 	struct ext4_extent_idx *ix;
812 	int len, err;
813 
814 	err = ext4_ext_get_access(handle, inode, curp);
815 	if (err)
816 		return err;
817 
818 	if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
819 		EXT4_ERROR_INODE(inode,
820 				 "logical %d == ei_block %d!",
821 				 logical, le32_to_cpu(curp->p_idx->ei_block));
822 		return -EIO;
823 	}
824 
825 	if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
826 			     >= le16_to_cpu(curp->p_hdr->eh_max))) {
827 		EXT4_ERROR_INODE(inode,
828 				 "eh_entries %d >= eh_max %d!",
829 				 le16_to_cpu(curp->p_hdr->eh_entries),
830 				 le16_to_cpu(curp->p_hdr->eh_max));
831 		return -EIO;
832 	}
833 
834 	if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
835 		/* insert after */
836 		ext_debug("insert new index %d after: %llu\n", logical, ptr);
837 		ix = curp->p_idx + 1;
838 	} else {
839 		/* insert before */
840 		ext_debug("insert new index %d before: %llu\n", logical, ptr);
841 		ix = curp->p_idx;
842 	}
843 
844 	len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
845 	BUG_ON(len < 0);
846 	if (len > 0) {
847 		ext_debug("insert new index %d: "
848 				"move %d indices from 0x%p to 0x%p\n",
849 				logical, len, ix, ix + 1);
850 		memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
851 	}
852 
853 	if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
854 		EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
855 		return -EIO;
856 	}
857 
858 	ix->ei_block = cpu_to_le32(logical);
859 	ext4_idx_store_pblock(ix, ptr);
860 	le16_add_cpu(&curp->p_hdr->eh_entries, 1);
861 
862 	if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
863 		EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
864 		return -EIO;
865 	}
866 
867 	err = ext4_ext_dirty(handle, inode, curp);
868 	ext4_std_error(inode->i_sb, err);
869 
870 	return err;
871 }
872 
873 /*
874  * ext4_ext_split:
875  * inserts new subtree into the path, using free index entry
876  * at depth @at:
877  * - allocates all needed blocks (new leaf and all intermediate index blocks)
878  * - makes decision where to split
879  * - moves remaining extents and index entries (right to the split point)
880  *   into the newly allocated blocks
881  * - initializes subtree
882  */
883 static int ext4_ext_split(handle_t *handle, struct inode *inode,
884 			  unsigned int flags,
885 			  struct ext4_ext_path *path,
886 			  struct ext4_extent *newext, int at)
887 {
888 	struct buffer_head *bh = NULL;
889 	int depth = ext_depth(inode);
890 	struct ext4_extent_header *neh;
891 	struct ext4_extent_idx *fidx;
892 	int i = at, k, m, a;
893 	ext4_fsblk_t newblock, oldblock;
894 	__le32 border;
895 	ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
896 	int err = 0;
897 
898 	/* make decision: where to split? */
899 	/* FIXME: now decision is simplest: at current extent */
900 
901 	/* if current leaf will be split, then we should use
902 	 * border from split point */
903 	if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
904 		EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
905 		return -EIO;
906 	}
907 	if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
908 		border = path[depth].p_ext[1].ee_block;
909 		ext_debug("leaf will be split."
910 				" next leaf starts at %d\n",
911 				  le32_to_cpu(border));
912 	} else {
913 		border = newext->ee_block;
914 		ext_debug("leaf will be added."
915 				" next leaf starts at %d\n",
916 				le32_to_cpu(border));
917 	}
918 
919 	/*
920 	 * If error occurs, then we break processing
921 	 * and mark filesystem read-only. index won't
922 	 * be inserted and tree will be in consistent
923 	 * state. Next mount will repair buffers too.
924 	 */
925 
926 	/*
927 	 * Get array to track all allocated blocks.
928 	 * We need this to handle errors and free blocks
929 	 * upon them.
930 	 */
931 	ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
932 	if (!ablocks)
933 		return -ENOMEM;
934 
935 	/* allocate all needed blocks */
936 	ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
937 	for (a = 0; a < depth - at; a++) {
938 		newblock = ext4_ext_new_meta_block(handle, inode, path,
939 						   newext, &err, flags);
940 		if (newblock == 0)
941 			goto cleanup;
942 		ablocks[a] = newblock;
943 	}
944 
945 	/* initialize new leaf */
946 	newblock = ablocks[--a];
947 	if (unlikely(newblock == 0)) {
948 		EXT4_ERROR_INODE(inode, "newblock == 0!");
949 		err = -EIO;
950 		goto cleanup;
951 	}
952 	bh = sb_getblk(inode->i_sb, newblock);
953 	if (!bh) {
954 		err = -EIO;
955 		goto cleanup;
956 	}
957 	lock_buffer(bh);
958 
959 	err = ext4_journal_get_create_access(handle, bh);
960 	if (err)
961 		goto cleanup;
962 
963 	neh = ext_block_hdr(bh);
964 	neh->eh_entries = 0;
965 	neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
966 	neh->eh_magic = EXT4_EXT_MAGIC;
967 	neh->eh_depth = 0;
968 
969 	/* move remainder of path[depth] to the new leaf */
970 	if (unlikely(path[depth].p_hdr->eh_entries !=
971 		     path[depth].p_hdr->eh_max)) {
972 		EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
973 				 path[depth].p_hdr->eh_entries,
974 				 path[depth].p_hdr->eh_max);
975 		err = -EIO;
976 		goto cleanup;
977 	}
978 	/* start copy from next extent */
979 	m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
980 	ext4_ext_show_move(inode, path, newblock, depth);
981 	if (m) {
982 		struct ext4_extent *ex;
983 		ex = EXT_FIRST_EXTENT(neh);
984 		memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
985 		le16_add_cpu(&neh->eh_entries, m);
986 	}
987 
988 	ext4_extent_block_csum_set(inode, neh);
989 	set_buffer_uptodate(bh);
990 	unlock_buffer(bh);
991 
992 	err = ext4_handle_dirty_metadata(handle, inode, bh);
993 	if (err)
994 		goto cleanup;
995 	brelse(bh);
996 	bh = NULL;
997 
998 	/* correct old leaf */
999 	if (m) {
1000 		err = ext4_ext_get_access(handle, inode, path + depth);
1001 		if (err)
1002 			goto cleanup;
1003 		le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
1004 		err = ext4_ext_dirty(handle, inode, path + depth);
1005 		if (err)
1006 			goto cleanup;
1007 
1008 	}
1009 
1010 	/* create intermediate indexes */
1011 	k = depth - at - 1;
1012 	if (unlikely(k < 0)) {
1013 		EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1014 		err = -EIO;
1015 		goto cleanup;
1016 	}
1017 	if (k)
1018 		ext_debug("create %d intermediate indices\n", k);
1019 	/* insert new index into current index block */
1020 	/* current depth stored in i var */
1021 	i = depth - 1;
1022 	while (k--) {
1023 		oldblock = newblock;
1024 		newblock = ablocks[--a];
1025 		bh = sb_getblk(inode->i_sb, newblock);
1026 		if (!bh) {
1027 			err = -EIO;
1028 			goto cleanup;
1029 		}
1030 		lock_buffer(bh);
1031 
1032 		err = ext4_journal_get_create_access(handle, bh);
1033 		if (err)
1034 			goto cleanup;
1035 
1036 		neh = ext_block_hdr(bh);
1037 		neh->eh_entries = cpu_to_le16(1);
1038 		neh->eh_magic = EXT4_EXT_MAGIC;
1039 		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1040 		neh->eh_depth = cpu_to_le16(depth - i);
1041 		fidx = EXT_FIRST_INDEX(neh);
1042 		fidx->ei_block = border;
1043 		ext4_idx_store_pblock(fidx, oldblock);
1044 
1045 		ext_debug("int.index at %d (block %llu): %u -> %llu\n",
1046 				i, newblock, le32_to_cpu(border), oldblock);
1047 
1048 		/* move remainder of path[i] to the new index block */
1049 		if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1050 					EXT_LAST_INDEX(path[i].p_hdr))) {
1051 			EXT4_ERROR_INODE(inode,
1052 					 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1053 					 le32_to_cpu(path[i].p_ext->ee_block));
1054 			err = -EIO;
1055 			goto cleanup;
1056 		}
1057 		/* start copy indexes */
1058 		m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1059 		ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
1060 				EXT_MAX_INDEX(path[i].p_hdr));
1061 		ext4_ext_show_move(inode, path, newblock, i);
1062 		if (m) {
1063 			memmove(++fidx, path[i].p_idx,
1064 				sizeof(struct ext4_extent_idx) * m);
1065 			le16_add_cpu(&neh->eh_entries, m);
1066 		}
1067 		ext4_extent_block_csum_set(inode, neh);
1068 		set_buffer_uptodate(bh);
1069 		unlock_buffer(bh);
1070 
1071 		err = ext4_handle_dirty_metadata(handle, inode, bh);
1072 		if (err)
1073 			goto cleanup;
1074 		brelse(bh);
1075 		bh = NULL;
1076 
1077 		/* correct old index */
1078 		if (m) {
1079 			err = ext4_ext_get_access(handle, inode, path + i);
1080 			if (err)
1081 				goto cleanup;
1082 			le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1083 			err = ext4_ext_dirty(handle, inode, path + i);
1084 			if (err)
1085 				goto cleanup;
1086 		}
1087 
1088 		i--;
1089 	}
1090 
1091 	/* insert new index */
1092 	err = ext4_ext_insert_index(handle, inode, path + at,
1093 				    le32_to_cpu(border), newblock);
1094 
1095 cleanup:
1096 	if (bh) {
1097 		if (buffer_locked(bh))
1098 			unlock_buffer(bh);
1099 		brelse(bh);
1100 	}
1101 
1102 	if (err) {
1103 		/* free all allocated blocks in error case */
1104 		for (i = 0; i < depth; i++) {
1105 			if (!ablocks[i])
1106 				continue;
1107 			ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1108 					 EXT4_FREE_BLOCKS_METADATA);
1109 		}
1110 	}
1111 	kfree(ablocks);
1112 
1113 	return err;
1114 }
1115 
1116 /*
1117  * ext4_ext_grow_indepth:
1118  * implements tree growing procedure:
1119  * - allocates new block
1120  * - moves top-level data (index block or leaf) into the new block
1121  * - initializes new top-level, creating index that points to the
1122  *   just created block
1123  */
1124 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1125 				 unsigned int flags,
1126 				 struct ext4_extent *newext)
1127 {
1128 	struct ext4_extent_header *neh;
1129 	struct buffer_head *bh;
1130 	ext4_fsblk_t newblock;
1131 	int err = 0;
1132 
1133 	newblock = ext4_ext_new_meta_block(handle, inode, NULL,
1134 		newext, &err, flags);
1135 	if (newblock == 0)
1136 		return err;
1137 
1138 	bh = sb_getblk(inode->i_sb, newblock);
1139 	if (!bh) {
1140 		err = -EIO;
1141 		ext4_std_error(inode->i_sb, err);
1142 		return err;
1143 	}
1144 	lock_buffer(bh);
1145 
1146 	err = ext4_journal_get_create_access(handle, bh);
1147 	if (err) {
1148 		unlock_buffer(bh);
1149 		goto out;
1150 	}
1151 
1152 	/* move top-level index/leaf into new block */
1153 	memmove(bh->b_data, EXT4_I(inode)->i_data,
1154 		sizeof(EXT4_I(inode)->i_data));
1155 
1156 	/* set size of new block */
1157 	neh = ext_block_hdr(bh);
1158 	/* old root could have indexes or leaves
1159 	 * so calculate e_max right way */
1160 	if (ext_depth(inode))
1161 		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1162 	else
1163 		neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1164 	neh->eh_magic = EXT4_EXT_MAGIC;
1165 	ext4_extent_block_csum_set(inode, neh);
1166 	set_buffer_uptodate(bh);
1167 	unlock_buffer(bh);
1168 
1169 	err = ext4_handle_dirty_metadata(handle, inode, bh);
1170 	if (err)
1171 		goto out;
1172 
1173 	/* Update top-level index: num,max,pointer */
1174 	neh = ext_inode_hdr(inode);
1175 	neh->eh_entries = cpu_to_le16(1);
1176 	ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1177 	if (neh->eh_depth == 0) {
1178 		/* Root extent block becomes index block */
1179 		neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1180 		EXT_FIRST_INDEX(neh)->ei_block =
1181 			EXT_FIRST_EXTENT(neh)->ee_block;
1182 	}
1183 	ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1184 		  le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1185 		  le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1186 		  ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1187 
1188 	le16_add_cpu(&neh->eh_depth, 1);
1189 	ext4_mark_inode_dirty(handle, inode);
1190 out:
1191 	brelse(bh);
1192 
1193 	return err;
1194 }
1195 
1196 /*
1197  * ext4_ext_create_new_leaf:
1198  * finds empty index and adds new leaf.
1199  * if no free index is found, then it requests in-depth growing.
1200  */
1201 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1202 				    unsigned int flags,
1203 				    struct ext4_ext_path *path,
1204 				    struct ext4_extent *newext)
1205 {
1206 	struct ext4_ext_path *curp;
1207 	int depth, i, err = 0;
1208 
1209 repeat:
1210 	i = depth = ext_depth(inode);
1211 
1212 	/* walk up to the tree and look for free index entry */
1213 	curp = path + depth;
1214 	while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1215 		i--;
1216 		curp--;
1217 	}
1218 
1219 	/* we use already allocated block for index block,
1220 	 * so subsequent data blocks should be contiguous */
1221 	if (EXT_HAS_FREE_INDEX(curp)) {
1222 		/* if we found index with free entry, then use that
1223 		 * entry: create all needed subtree and add new leaf */
1224 		err = ext4_ext_split(handle, inode, flags, path, newext, i);
1225 		if (err)
1226 			goto out;
1227 
1228 		/* refill path */
1229 		ext4_ext_drop_refs(path);
1230 		path = ext4_ext_find_extent(inode,
1231 				    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1232 				    path);
1233 		if (IS_ERR(path))
1234 			err = PTR_ERR(path);
1235 	} else {
1236 		/* tree is full, time to grow in depth */
1237 		err = ext4_ext_grow_indepth(handle, inode, flags, newext);
1238 		if (err)
1239 			goto out;
1240 
1241 		/* refill path */
1242 		ext4_ext_drop_refs(path);
1243 		path = ext4_ext_find_extent(inode,
1244 				   (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1245 				    path);
1246 		if (IS_ERR(path)) {
1247 			err = PTR_ERR(path);
1248 			goto out;
1249 		}
1250 
1251 		/*
1252 		 * only first (depth 0 -> 1) produces free space;
1253 		 * in all other cases we have to split the grown tree
1254 		 */
1255 		depth = ext_depth(inode);
1256 		if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1257 			/* now we need to split */
1258 			goto repeat;
1259 		}
1260 	}
1261 
1262 out:
1263 	return err;
1264 }
1265 
1266 /*
1267  * search the closest allocated block to the left for *logical
1268  * and returns it at @logical + it's physical address at @phys
1269  * if *logical is the smallest allocated block, the function
1270  * returns 0 at @phys
1271  * return value contains 0 (success) or error code
1272  */
1273 static int ext4_ext_search_left(struct inode *inode,
1274 				struct ext4_ext_path *path,
1275 				ext4_lblk_t *logical, ext4_fsblk_t *phys)
1276 {
1277 	struct ext4_extent_idx *ix;
1278 	struct ext4_extent *ex;
1279 	int depth, ee_len;
1280 
1281 	if (unlikely(path == NULL)) {
1282 		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1283 		return -EIO;
1284 	}
1285 	depth = path->p_depth;
1286 	*phys = 0;
1287 
1288 	if (depth == 0 && path->p_ext == NULL)
1289 		return 0;
1290 
1291 	/* usually extent in the path covers blocks smaller
1292 	 * then *logical, but it can be that extent is the
1293 	 * first one in the file */
1294 
1295 	ex = path[depth].p_ext;
1296 	ee_len = ext4_ext_get_actual_len(ex);
1297 	if (*logical < le32_to_cpu(ex->ee_block)) {
1298 		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1299 			EXT4_ERROR_INODE(inode,
1300 					 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1301 					 *logical, le32_to_cpu(ex->ee_block));
1302 			return -EIO;
1303 		}
1304 		while (--depth >= 0) {
1305 			ix = path[depth].p_idx;
1306 			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1307 				EXT4_ERROR_INODE(inode,
1308 				  "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1309 				  ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1310 				  EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1311 		le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1312 				  depth);
1313 				return -EIO;
1314 			}
1315 		}
1316 		return 0;
1317 	}
1318 
1319 	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1320 		EXT4_ERROR_INODE(inode,
1321 				 "logical %d < ee_block %d + ee_len %d!",
1322 				 *logical, le32_to_cpu(ex->ee_block), ee_len);
1323 		return -EIO;
1324 	}
1325 
1326 	*logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1327 	*phys = ext4_ext_pblock(ex) + ee_len - 1;
1328 	return 0;
1329 }
1330 
1331 /*
1332  * search the closest allocated block to the right for *logical
1333  * and returns it at @logical + it's physical address at @phys
1334  * if *logical is the largest allocated block, the function
1335  * returns 0 at @phys
1336  * return value contains 0 (success) or error code
1337  */
1338 static int ext4_ext_search_right(struct inode *inode,
1339 				 struct ext4_ext_path *path,
1340 				 ext4_lblk_t *logical, ext4_fsblk_t *phys,
1341 				 struct ext4_extent **ret_ex)
1342 {
1343 	struct buffer_head *bh = NULL;
1344 	struct ext4_extent_header *eh;
1345 	struct ext4_extent_idx *ix;
1346 	struct ext4_extent *ex;
1347 	ext4_fsblk_t block;
1348 	int depth;	/* Note, NOT eh_depth; depth from top of tree */
1349 	int ee_len;
1350 
1351 	if (unlikely(path == NULL)) {
1352 		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1353 		return -EIO;
1354 	}
1355 	depth = path->p_depth;
1356 	*phys = 0;
1357 
1358 	if (depth == 0 && path->p_ext == NULL)
1359 		return 0;
1360 
1361 	/* usually extent in the path covers blocks smaller
1362 	 * then *logical, but it can be that extent is the
1363 	 * first one in the file */
1364 
1365 	ex = path[depth].p_ext;
1366 	ee_len = ext4_ext_get_actual_len(ex);
1367 	if (*logical < le32_to_cpu(ex->ee_block)) {
1368 		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1369 			EXT4_ERROR_INODE(inode,
1370 					 "first_extent(path[%d].p_hdr) != ex",
1371 					 depth);
1372 			return -EIO;
1373 		}
1374 		while (--depth >= 0) {
1375 			ix = path[depth].p_idx;
1376 			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1377 				EXT4_ERROR_INODE(inode,
1378 						 "ix != EXT_FIRST_INDEX *logical %d!",
1379 						 *logical);
1380 				return -EIO;
1381 			}
1382 		}
1383 		goto found_extent;
1384 	}
1385 
1386 	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1387 		EXT4_ERROR_INODE(inode,
1388 				 "logical %d < ee_block %d + ee_len %d!",
1389 				 *logical, le32_to_cpu(ex->ee_block), ee_len);
1390 		return -EIO;
1391 	}
1392 
1393 	if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1394 		/* next allocated block in this leaf */
1395 		ex++;
1396 		goto found_extent;
1397 	}
1398 
1399 	/* go up and search for index to the right */
1400 	while (--depth >= 0) {
1401 		ix = path[depth].p_idx;
1402 		if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1403 			goto got_index;
1404 	}
1405 
1406 	/* we've gone up to the root and found no index to the right */
1407 	return 0;
1408 
1409 got_index:
1410 	/* we've found index to the right, let's
1411 	 * follow it and find the closest allocated
1412 	 * block to the right */
1413 	ix++;
1414 	block = ext4_idx_pblock(ix);
1415 	while (++depth < path->p_depth) {
1416 		bh = sb_bread(inode->i_sb, block);
1417 		if (bh == NULL)
1418 			return -EIO;
1419 		eh = ext_block_hdr(bh);
1420 		/* subtract from p_depth to get proper eh_depth */
1421 		if (ext4_ext_check_block(inode, eh,
1422 					 path->p_depth - depth, bh)) {
1423 			put_bh(bh);
1424 			return -EIO;
1425 		}
1426 		ix = EXT_FIRST_INDEX(eh);
1427 		block = ext4_idx_pblock(ix);
1428 		put_bh(bh);
1429 	}
1430 
1431 	bh = sb_bread(inode->i_sb, block);
1432 	if (bh == NULL)
1433 		return -EIO;
1434 	eh = ext_block_hdr(bh);
1435 	if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) {
1436 		put_bh(bh);
1437 		return -EIO;
1438 	}
1439 	ex = EXT_FIRST_EXTENT(eh);
1440 found_extent:
1441 	*logical = le32_to_cpu(ex->ee_block);
1442 	*phys = ext4_ext_pblock(ex);
1443 	*ret_ex = ex;
1444 	if (bh)
1445 		put_bh(bh);
1446 	return 0;
1447 }
1448 
1449 /*
1450  * ext4_ext_next_allocated_block:
1451  * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1452  * NOTE: it considers block number from index entry as
1453  * allocated block. Thus, index entries have to be consistent
1454  * with leaves.
1455  */
1456 static ext4_lblk_t
1457 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1458 {
1459 	int depth;
1460 
1461 	BUG_ON(path == NULL);
1462 	depth = path->p_depth;
1463 
1464 	if (depth == 0 && path->p_ext == NULL)
1465 		return EXT_MAX_BLOCKS;
1466 
1467 	while (depth >= 0) {
1468 		if (depth == path->p_depth) {
1469 			/* leaf */
1470 			if (path[depth].p_ext &&
1471 				path[depth].p_ext !=
1472 					EXT_LAST_EXTENT(path[depth].p_hdr))
1473 			  return le32_to_cpu(path[depth].p_ext[1].ee_block);
1474 		} else {
1475 			/* index */
1476 			if (path[depth].p_idx !=
1477 					EXT_LAST_INDEX(path[depth].p_hdr))
1478 			  return le32_to_cpu(path[depth].p_idx[1].ei_block);
1479 		}
1480 		depth--;
1481 	}
1482 
1483 	return EXT_MAX_BLOCKS;
1484 }
1485 
1486 /*
1487  * ext4_ext_next_leaf_block:
1488  * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1489  */
1490 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1491 {
1492 	int depth;
1493 
1494 	BUG_ON(path == NULL);
1495 	depth = path->p_depth;
1496 
1497 	/* zero-tree has no leaf blocks at all */
1498 	if (depth == 0)
1499 		return EXT_MAX_BLOCKS;
1500 
1501 	/* go to index block */
1502 	depth--;
1503 
1504 	while (depth >= 0) {
1505 		if (path[depth].p_idx !=
1506 				EXT_LAST_INDEX(path[depth].p_hdr))
1507 			return (ext4_lblk_t)
1508 				le32_to_cpu(path[depth].p_idx[1].ei_block);
1509 		depth--;
1510 	}
1511 
1512 	return EXT_MAX_BLOCKS;
1513 }
1514 
1515 /*
1516  * ext4_ext_correct_indexes:
1517  * if leaf gets modified and modified extent is first in the leaf,
1518  * then we have to correct all indexes above.
1519  * TODO: do we need to correct tree in all cases?
1520  */
1521 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1522 				struct ext4_ext_path *path)
1523 {
1524 	struct ext4_extent_header *eh;
1525 	int depth = ext_depth(inode);
1526 	struct ext4_extent *ex;
1527 	__le32 border;
1528 	int k, err = 0;
1529 
1530 	eh = path[depth].p_hdr;
1531 	ex = path[depth].p_ext;
1532 
1533 	if (unlikely(ex == NULL || eh == NULL)) {
1534 		EXT4_ERROR_INODE(inode,
1535 				 "ex %p == NULL or eh %p == NULL", ex, eh);
1536 		return -EIO;
1537 	}
1538 
1539 	if (depth == 0) {
1540 		/* there is no tree at all */
1541 		return 0;
1542 	}
1543 
1544 	if (ex != EXT_FIRST_EXTENT(eh)) {
1545 		/* we correct tree if first leaf got modified only */
1546 		return 0;
1547 	}
1548 
1549 	/*
1550 	 * TODO: we need correction if border is smaller than current one
1551 	 */
1552 	k = depth - 1;
1553 	border = path[depth].p_ext->ee_block;
1554 	err = ext4_ext_get_access(handle, inode, path + k);
1555 	if (err)
1556 		return err;
1557 	path[k].p_idx->ei_block = border;
1558 	err = ext4_ext_dirty(handle, inode, path + k);
1559 	if (err)
1560 		return err;
1561 
1562 	while (k--) {
1563 		/* change all left-side indexes */
1564 		if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1565 			break;
1566 		err = ext4_ext_get_access(handle, inode, path + k);
1567 		if (err)
1568 			break;
1569 		path[k].p_idx->ei_block = border;
1570 		err = ext4_ext_dirty(handle, inode, path + k);
1571 		if (err)
1572 			break;
1573 	}
1574 
1575 	return err;
1576 }
1577 
1578 int
1579 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1580 				struct ext4_extent *ex2)
1581 {
1582 	unsigned short ext1_ee_len, ext2_ee_len, max_len;
1583 
1584 	/*
1585 	 * Make sure that either both extents are uninitialized, or
1586 	 * both are _not_.
1587 	 */
1588 	if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1589 		return 0;
1590 
1591 	if (ext4_ext_is_uninitialized(ex1))
1592 		max_len = EXT_UNINIT_MAX_LEN;
1593 	else
1594 		max_len = EXT_INIT_MAX_LEN;
1595 
1596 	ext1_ee_len = ext4_ext_get_actual_len(ex1);
1597 	ext2_ee_len = ext4_ext_get_actual_len(ex2);
1598 
1599 	if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1600 			le32_to_cpu(ex2->ee_block))
1601 		return 0;
1602 
1603 	/*
1604 	 * To allow future support for preallocated extents to be added
1605 	 * as an RO_COMPAT feature, refuse to merge to extents if
1606 	 * this can result in the top bit of ee_len being set.
1607 	 */
1608 	if (ext1_ee_len + ext2_ee_len > max_len)
1609 		return 0;
1610 #ifdef AGGRESSIVE_TEST
1611 	if (ext1_ee_len >= 4)
1612 		return 0;
1613 #endif
1614 
1615 	if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1616 		return 1;
1617 	return 0;
1618 }
1619 
1620 /*
1621  * This function tries to merge the "ex" extent to the next extent in the tree.
1622  * It always tries to merge towards right. If you want to merge towards
1623  * left, pass "ex - 1" as argument instead of "ex".
1624  * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1625  * 1 if they got merged.
1626  */
1627 static int ext4_ext_try_to_merge_right(struct inode *inode,
1628 				 struct ext4_ext_path *path,
1629 				 struct ext4_extent *ex)
1630 {
1631 	struct ext4_extent_header *eh;
1632 	unsigned int depth, len;
1633 	int merge_done = 0;
1634 	int uninitialized = 0;
1635 
1636 	depth = ext_depth(inode);
1637 	BUG_ON(path[depth].p_hdr == NULL);
1638 	eh = path[depth].p_hdr;
1639 
1640 	while (ex < EXT_LAST_EXTENT(eh)) {
1641 		if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1642 			break;
1643 		/* merge with next extent! */
1644 		if (ext4_ext_is_uninitialized(ex))
1645 			uninitialized = 1;
1646 		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1647 				+ ext4_ext_get_actual_len(ex + 1));
1648 		if (uninitialized)
1649 			ext4_ext_mark_uninitialized(ex);
1650 
1651 		if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1652 			len = (EXT_LAST_EXTENT(eh) - ex - 1)
1653 				* sizeof(struct ext4_extent);
1654 			memmove(ex + 1, ex + 2, len);
1655 		}
1656 		le16_add_cpu(&eh->eh_entries, -1);
1657 		merge_done = 1;
1658 		WARN_ON(eh->eh_entries == 0);
1659 		if (!eh->eh_entries)
1660 			EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1661 	}
1662 
1663 	return merge_done;
1664 }
1665 
1666 /*
1667  * This function does a very simple check to see if we can collapse
1668  * an extent tree with a single extent tree leaf block into the inode.
1669  */
1670 static void ext4_ext_try_to_merge_up(handle_t *handle,
1671 				     struct inode *inode,
1672 				     struct ext4_ext_path *path)
1673 {
1674 	size_t s;
1675 	unsigned max_root = ext4_ext_space_root(inode, 0);
1676 	ext4_fsblk_t blk;
1677 
1678 	if ((path[0].p_depth != 1) ||
1679 	    (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
1680 	    (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1681 		return;
1682 
1683 	/*
1684 	 * We need to modify the block allocation bitmap and the block
1685 	 * group descriptor to release the extent tree block.  If we
1686 	 * can't get the journal credits, give up.
1687 	 */
1688 	if (ext4_journal_extend(handle, 2))
1689 		return;
1690 
1691 	/*
1692 	 * Copy the extent data up to the inode
1693 	 */
1694 	blk = ext4_idx_pblock(path[0].p_idx);
1695 	s = le16_to_cpu(path[1].p_hdr->eh_entries) *
1696 		sizeof(struct ext4_extent_idx);
1697 	s += sizeof(struct ext4_extent_header);
1698 
1699 	memcpy(path[0].p_hdr, path[1].p_hdr, s);
1700 	path[0].p_depth = 0;
1701 	path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
1702 		(path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
1703 	path[0].p_hdr->eh_max = cpu_to_le16(max_root);
1704 
1705 	brelse(path[1].p_bh);
1706 	ext4_free_blocks(handle, inode, NULL, blk, 1,
1707 			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
1708 }
1709 
1710 /*
1711  * This function tries to merge the @ex extent to neighbours in the tree.
1712  * return 1 if merge left else 0.
1713  */
1714 static void ext4_ext_try_to_merge(handle_t *handle,
1715 				  struct inode *inode,
1716 				  struct ext4_ext_path *path,
1717 				  struct ext4_extent *ex) {
1718 	struct ext4_extent_header *eh;
1719 	unsigned int depth;
1720 	int merge_done = 0;
1721 
1722 	depth = ext_depth(inode);
1723 	BUG_ON(path[depth].p_hdr == NULL);
1724 	eh = path[depth].p_hdr;
1725 
1726 	if (ex > EXT_FIRST_EXTENT(eh))
1727 		merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1728 
1729 	if (!merge_done)
1730 		(void) ext4_ext_try_to_merge_right(inode, path, ex);
1731 
1732 	ext4_ext_try_to_merge_up(handle, inode, path);
1733 }
1734 
1735 /*
1736  * check if a portion of the "newext" extent overlaps with an
1737  * existing extent.
1738  *
1739  * If there is an overlap discovered, it updates the length of the newext
1740  * such that there will be no overlap, and then returns 1.
1741  * If there is no overlap found, it returns 0.
1742  */
1743 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1744 					   struct inode *inode,
1745 					   struct ext4_extent *newext,
1746 					   struct ext4_ext_path *path)
1747 {
1748 	ext4_lblk_t b1, b2;
1749 	unsigned int depth, len1;
1750 	unsigned int ret = 0;
1751 
1752 	b1 = le32_to_cpu(newext->ee_block);
1753 	len1 = ext4_ext_get_actual_len(newext);
1754 	depth = ext_depth(inode);
1755 	if (!path[depth].p_ext)
1756 		goto out;
1757 	b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1758 	b2 &= ~(sbi->s_cluster_ratio - 1);
1759 
1760 	/*
1761 	 * get the next allocated block if the extent in the path
1762 	 * is before the requested block(s)
1763 	 */
1764 	if (b2 < b1) {
1765 		b2 = ext4_ext_next_allocated_block(path);
1766 		if (b2 == EXT_MAX_BLOCKS)
1767 			goto out;
1768 		b2 &= ~(sbi->s_cluster_ratio - 1);
1769 	}
1770 
1771 	/* check for wrap through zero on extent logical start block*/
1772 	if (b1 + len1 < b1) {
1773 		len1 = EXT_MAX_BLOCKS - b1;
1774 		newext->ee_len = cpu_to_le16(len1);
1775 		ret = 1;
1776 	}
1777 
1778 	/* check for overlap */
1779 	if (b1 + len1 > b2) {
1780 		newext->ee_len = cpu_to_le16(b2 - b1);
1781 		ret = 1;
1782 	}
1783 out:
1784 	return ret;
1785 }
1786 
1787 /*
1788  * ext4_ext_insert_extent:
1789  * tries to merge requsted extent into the existing extent or
1790  * inserts requested extent as new one into the tree,
1791  * creating new leaf in the no-space case.
1792  */
1793 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1794 				struct ext4_ext_path *path,
1795 				struct ext4_extent *newext, int flag)
1796 {
1797 	struct ext4_extent_header *eh;
1798 	struct ext4_extent *ex, *fex;
1799 	struct ext4_extent *nearex; /* nearest extent */
1800 	struct ext4_ext_path *npath = NULL;
1801 	int depth, len, err;
1802 	ext4_lblk_t next;
1803 	unsigned uninitialized = 0;
1804 	int flags = 0;
1805 
1806 	if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1807 		EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1808 		return -EIO;
1809 	}
1810 	depth = ext_depth(inode);
1811 	ex = path[depth].p_ext;
1812 	if (unlikely(path[depth].p_hdr == NULL)) {
1813 		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1814 		return -EIO;
1815 	}
1816 
1817 	/* try to insert block into found extent and return */
1818 	if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
1819 		&& ext4_can_extents_be_merged(inode, ex, newext)) {
1820 		ext_debug("append [%d]%d block to %u:[%d]%d (from %llu)\n",
1821 			  ext4_ext_is_uninitialized(newext),
1822 			  ext4_ext_get_actual_len(newext),
1823 			  le32_to_cpu(ex->ee_block),
1824 			  ext4_ext_is_uninitialized(ex),
1825 			  ext4_ext_get_actual_len(ex),
1826 			  ext4_ext_pblock(ex));
1827 		err = ext4_ext_get_access(handle, inode, path + depth);
1828 		if (err)
1829 			return err;
1830 
1831 		/*
1832 		 * ext4_can_extents_be_merged should have checked that either
1833 		 * both extents are uninitialized, or both aren't. Thus we
1834 		 * need to check only one of them here.
1835 		 */
1836 		if (ext4_ext_is_uninitialized(ex))
1837 			uninitialized = 1;
1838 		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1839 					+ ext4_ext_get_actual_len(newext));
1840 		if (uninitialized)
1841 			ext4_ext_mark_uninitialized(ex);
1842 		eh = path[depth].p_hdr;
1843 		nearex = ex;
1844 		goto merge;
1845 	}
1846 
1847 	depth = ext_depth(inode);
1848 	eh = path[depth].p_hdr;
1849 	if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1850 		goto has_space;
1851 
1852 	/* probably next leaf has space for us? */
1853 	fex = EXT_LAST_EXTENT(eh);
1854 	next = EXT_MAX_BLOCKS;
1855 	if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
1856 		next = ext4_ext_next_leaf_block(path);
1857 	if (next != EXT_MAX_BLOCKS) {
1858 		ext_debug("next leaf block - %u\n", next);
1859 		BUG_ON(npath != NULL);
1860 		npath = ext4_ext_find_extent(inode, next, NULL);
1861 		if (IS_ERR(npath))
1862 			return PTR_ERR(npath);
1863 		BUG_ON(npath->p_depth != path->p_depth);
1864 		eh = npath[depth].p_hdr;
1865 		if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1866 			ext_debug("next leaf isn't full(%d)\n",
1867 				  le16_to_cpu(eh->eh_entries));
1868 			path = npath;
1869 			goto has_space;
1870 		}
1871 		ext_debug("next leaf has no free space(%d,%d)\n",
1872 			  le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1873 	}
1874 
1875 	/*
1876 	 * There is no free space in the found leaf.
1877 	 * We're gonna add a new leaf in the tree.
1878 	 */
1879 	if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT)
1880 		flags = EXT4_MB_USE_ROOT_BLOCKS;
1881 	err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
1882 	if (err)
1883 		goto cleanup;
1884 	depth = ext_depth(inode);
1885 	eh = path[depth].p_hdr;
1886 
1887 has_space:
1888 	nearex = path[depth].p_ext;
1889 
1890 	err = ext4_ext_get_access(handle, inode, path + depth);
1891 	if (err)
1892 		goto cleanup;
1893 
1894 	if (!nearex) {
1895 		/* there is no extent in this leaf, create first one */
1896 		ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
1897 				le32_to_cpu(newext->ee_block),
1898 				ext4_ext_pblock(newext),
1899 				ext4_ext_is_uninitialized(newext),
1900 				ext4_ext_get_actual_len(newext));
1901 		nearex = EXT_FIRST_EXTENT(eh);
1902 	} else {
1903 		if (le32_to_cpu(newext->ee_block)
1904 			   > le32_to_cpu(nearex->ee_block)) {
1905 			/* Insert after */
1906 			ext_debug("insert %u:%llu:[%d]%d before: "
1907 					"nearest %p\n",
1908 					le32_to_cpu(newext->ee_block),
1909 					ext4_ext_pblock(newext),
1910 					ext4_ext_is_uninitialized(newext),
1911 					ext4_ext_get_actual_len(newext),
1912 					nearex);
1913 			nearex++;
1914 		} else {
1915 			/* Insert before */
1916 			BUG_ON(newext->ee_block == nearex->ee_block);
1917 			ext_debug("insert %u:%llu:[%d]%d after: "
1918 					"nearest %p\n",
1919 					le32_to_cpu(newext->ee_block),
1920 					ext4_ext_pblock(newext),
1921 					ext4_ext_is_uninitialized(newext),
1922 					ext4_ext_get_actual_len(newext),
1923 					nearex);
1924 		}
1925 		len = EXT_LAST_EXTENT(eh) - nearex + 1;
1926 		if (len > 0) {
1927 			ext_debug("insert %u:%llu:[%d]%d: "
1928 					"move %d extents from 0x%p to 0x%p\n",
1929 					le32_to_cpu(newext->ee_block),
1930 					ext4_ext_pblock(newext),
1931 					ext4_ext_is_uninitialized(newext),
1932 					ext4_ext_get_actual_len(newext),
1933 					len, nearex, nearex + 1);
1934 			memmove(nearex + 1, nearex,
1935 				len * sizeof(struct ext4_extent));
1936 		}
1937 	}
1938 
1939 	le16_add_cpu(&eh->eh_entries, 1);
1940 	path[depth].p_ext = nearex;
1941 	nearex->ee_block = newext->ee_block;
1942 	ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
1943 	nearex->ee_len = newext->ee_len;
1944 
1945 merge:
1946 	/* try to merge extents */
1947 	if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
1948 		ext4_ext_try_to_merge(handle, inode, path, nearex);
1949 
1950 
1951 	/* time to correct all indexes above */
1952 	err = ext4_ext_correct_indexes(handle, inode, path);
1953 	if (err)
1954 		goto cleanup;
1955 
1956 	err = ext4_ext_dirty(handle, inode, path + path->p_depth);
1957 
1958 cleanup:
1959 	if (npath) {
1960 		ext4_ext_drop_refs(npath);
1961 		kfree(npath);
1962 	}
1963 	ext4_ext_invalidate_cache(inode);
1964 	return err;
1965 }
1966 
1967 static int ext4_fill_fiemap_extents(struct inode *inode,
1968 				    ext4_lblk_t block, ext4_lblk_t num,
1969 				    struct fiemap_extent_info *fieinfo)
1970 {
1971 	struct ext4_ext_path *path = NULL;
1972 	struct ext4_ext_cache newex;
1973 	struct ext4_extent *ex;
1974 	ext4_lblk_t next, next_del, start = 0, end = 0;
1975 	ext4_lblk_t last = block + num;
1976 	int exists, depth = 0, err = 0;
1977 	unsigned int flags = 0;
1978 	unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
1979 
1980 	while (block < last && block != EXT_MAX_BLOCKS) {
1981 		num = last - block;
1982 		/* find extent for this block */
1983 		down_read(&EXT4_I(inode)->i_data_sem);
1984 
1985 		if (path && ext_depth(inode) != depth) {
1986 			/* depth was changed. we have to realloc path */
1987 			kfree(path);
1988 			path = NULL;
1989 		}
1990 
1991 		path = ext4_ext_find_extent(inode, block, path);
1992 		if (IS_ERR(path)) {
1993 			up_read(&EXT4_I(inode)->i_data_sem);
1994 			err = PTR_ERR(path);
1995 			path = NULL;
1996 			break;
1997 		}
1998 
1999 		depth = ext_depth(inode);
2000 		if (unlikely(path[depth].p_hdr == NULL)) {
2001 			up_read(&EXT4_I(inode)->i_data_sem);
2002 			EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2003 			err = -EIO;
2004 			break;
2005 		}
2006 		ex = path[depth].p_ext;
2007 		next = ext4_ext_next_allocated_block(path);
2008 		ext4_ext_drop_refs(path);
2009 
2010 		flags = 0;
2011 		exists = 0;
2012 		if (!ex) {
2013 			/* there is no extent yet, so try to allocate
2014 			 * all requested space */
2015 			start = block;
2016 			end = block + num;
2017 		} else if (le32_to_cpu(ex->ee_block) > block) {
2018 			/* need to allocate space before found extent */
2019 			start = block;
2020 			end = le32_to_cpu(ex->ee_block);
2021 			if (block + num < end)
2022 				end = block + num;
2023 		} else if (block >= le32_to_cpu(ex->ee_block)
2024 					+ ext4_ext_get_actual_len(ex)) {
2025 			/* need to allocate space after found extent */
2026 			start = block;
2027 			end = block + num;
2028 			if (end >= next)
2029 				end = next;
2030 		} else if (block >= le32_to_cpu(ex->ee_block)) {
2031 			/*
2032 			 * some part of requested space is covered
2033 			 * by found extent
2034 			 */
2035 			start = block;
2036 			end = le32_to_cpu(ex->ee_block)
2037 				+ ext4_ext_get_actual_len(ex);
2038 			if (block + num < end)
2039 				end = block + num;
2040 			exists = 1;
2041 		} else {
2042 			BUG();
2043 		}
2044 		BUG_ON(end <= start);
2045 
2046 		if (!exists) {
2047 			newex.ec_block = start;
2048 			newex.ec_len = end - start;
2049 			newex.ec_start = 0;
2050 		} else {
2051 			newex.ec_block = le32_to_cpu(ex->ee_block);
2052 			newex.ec_len = ext4_ext_get_actual_len(ex);
2053 			newex.ec_start = ext4_ext_pblock(ex);
2054 			if (ext4_ext_is_uninitialized(ex))
2055 				flags |= FIEMAP_EXTENT_UNWRITTEN;
2056 		}
2057 
2058 		/*
2059 		 * Find delayed extent and update newex accordingly. We call
2060 		 * it even in !exists case to find out whether newex is the
2061 		 * last existing extent or not.
2062 		 */
2063 		next_del = ext4_find_delayed_extent(inode, &newex);
2064 		if (!exists && next_del) {
2065 			exists = 1;
2066 			flags |= FIEMAP_EXTENT_DELALLOC;
2067 		}
2068 		up_read(&EXT4_I(inode)->i_data_sem);
2069 
2070 		if (unlikely(newex.ec_len == 0)) {
2071 			EXT4_ERROR_INODE(inode, "newex.ec_len == 0");
2072 			err = -EIO;
2073 			break;
2074 		}
2075 
2076 		/* This is possible iff next == next_del == EXT_MAX_BLOCKS */
2077 		if (next == next_del) {
2078 			flags |= FIEMAP_EXTENT_LAST;
2079 			if (unlikely(next_del != EXT_MAX_BLOCKS ||
2080 				     next != EXT_MAX_BLOCKS)) {
2081 				EXT4_ERROR_INODE(inode,
2082 						 "next extent == %u, next "
2083 						 "delalloc extent = %u",
2084 						 next, next_del);
2085 				err = -EIO;
2086 				break;
2087 			}
2088 		}
2089 
2090 		if (exists) {
2091 			err = fiemap_fill_next_extent(fieinfo,
2092 				(__u64)newex.ec_block << blksize_bits,
2093 				(__u64)newex.ec_start << blksize_bits,
2094 				(__u64)newex.ec_len << blksize_bits,
2095 				flags);
2096 			if (err < 0)
2097 				break;
2098 			if (err == 1) {
2099 				err = 0;
2100 				break;
2101 			}
2102 		}
2103 
2104 		block = newex.ec_block + newex.ec_len;
2105 	}
2106 
2107 	if (path) {
2108 		ext4_ext_drop_refs(path);
2109 		kfree(path);
2110 	}
2111 
2112 	return err;
2113 }
2114 
2115 static void
2116 ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
2117 			__u32 len, ext4_fsblk_t start)
2118 {
2119 	struct ext4_ext_cache *cex;
2120 	BUG_ON(len == 0);
2121 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
2122 	trace_ext4_ext_put_in_cache(inode, block, len, start);
2123 	cex = &EXT4_I(inode)->i_cached_extent;
2124 	cex->ec_block = block;
2125 	cex->ec_len = len;
2126 	cex->ec_start = start;
2127 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2128 }
2129 
2130 /*
2131  * ext4_ext_put_gap_in_cache:
2132  * calculate boundaries of the gap that the requested block fits into
2133  * and cache this gap
2134  */
2135 static void
2136 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2137 				ext4_lblk_t block)
2138 {
2139 	int depth = ext_depth(inode);
2140 	unsigned long len;
2141 	ext4_lblk_t lblock;
2142 	struct ext4_extent *ex;
2143 
2144 	ex = path[depth].p_ext;
2145 	if (ex == NULL) {
2146 		/* there is no extent yet, so gap is [0;-] */
2147 		lblock = 0;
2148 		len = EXT_MAX_BLOCKS;
2149 		ext_debug("cache gap(whole file):");
2150 	} else if (block < le32_to_cpu(ex->ee_block)) {
2151 		lblock = block;
2152 		len = le32_to_cpu(ex->ee_block) - block;
2153 		ext_debug("cache gap(before): %u [%u:%u]",
2154 				block,
2155 				le32_to_cpu(ex->ee_block),
2156 				 ext4_ext_get_actual_len(ex));
2157 	} else if (block >= le32_to_cpu(ex->ee_block)
2158 			+ ext4_ext_get_actual_len(ex)) {
2159 		ext4_lblk_t next;
2160 		lblock = le32_to_cpu(ex->ee_block)
2161 			+ ext4_ext_get_actual_len(ex);
2162 
2163 		next = ext4_ext_next_allocated_block(path);
2164 		ext_debug("cache gap(after): [%u:%u] %u",
2165 				le32_to_cpu(ex->ee_block),
2166 				ext4_ext_get_actual_len(ex),
2167 				block);
2168 		BUG_ON(next == lblock);
2169 		len = next - lblock;
2170 	} else {
2171 		lblock = len = 0;
2172 		BUG();
2173 	}
2174 
2175 	ext_debug(" -> %u:%lu\n", lblock, len);
2176 	ext4_ext_put_in_cache(inode, lblock, len, 0);
2177 }
2178 
2179 /*
2180  * ext4_ext_in_cache()
2181  * Checks to see if the given block is in the cache.
2182  * If it is, the cached extent is stored in the given
2183  * cache extent pointer.
2184  *
2185  * @inode: The files inode
2186  * @block: The block to look for in the cache
2187  * @ex:    Pointer where the cached extent will be stored
2188  *         if it contains block
2189  *
2190  * Return 0 if cache is invalid; 1 if the cache is valid
2191  */
2192 static int
2193 ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
2194 		  struct ext4_extent *ex)
2195 {
2196 	struct ext4_ext_cache *cex;
2197 	int ret = 0;
2198 
2199 	/*
2200 	 * We borrow i_block_reservation_lock to protect i_cached_extent
2201 	 */
2202 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
2203 	cex = &EXT4_I(inode)->i_cached_extent;
2204 
2205 	/* has cache valid data? */
2206 	if (cex->ec_len == 0)
2207 		goto errout;
2208 
2209 	if (in_range(block, cex->ec_block, cex->ec_len)) {
2210 		ex->ee_block = cpu_to_le32(cex->ec_block);
2211 		ext4_ext_store_pblock(ex, cex->ec_start);
2212 		ex->ee_len = cpu_to_le16(cex->ec_len);
2213 		ext_debug("%u cached by %u:%u:%llu\n",
2214 				block,
2215 				cex->ec_block, cex->ec_len, cex->ec_start);
2216 		ret = 1;
2217 	}
2218 errout:
2219 	trace_ext4_ext_in_cache(inode, block, ret);
2220 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2221 	return ret;
2222 }
2223 
2224 /*
2225  * ext4_ext_rm_idx:
2226  * removes index from the index block.
2227  */
2228 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2229 			struct ext4_ext_path *path, int depth)
2230 {
2231 	int err;
2232 	ext4_fsblk_t leaf;
2233 
2234 	/* free index block */
2235 	depth--;
2236 	path = path + depth;
2237 	leaf = ext4_idx_pblock(path->p_idx);
2238 	if (unlikely(path->p_hdr->eh_entries == 0)) {
2239 		EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2240 		return -EIO;
2241 	}
2242 	err = ext4_ext_get_access(handle, inode, path);
2243 	if (err)
2244 		return err;
2245 
2246 	if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2247 		int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2248 		len *= sizeof(struct ext4_extent_idx);
2249 		memmove(path->p_idx, path->p_idx + 1, len);
2250 	}
2251 
2252 	le16_add_cpu(&path->p_hdr->eh_entries, -1);
2253 	err = ext4_ext_dirty(handle, inode, path);
2254 	if (err)
2255 		return err;
2256 	ext_debug("index is empty, remove it, free block %llu\n", leaf);
2257 	trace_ext4_ext_rm_idx(inode, leaf);
2258 
2259 	ext4_free_blocks(handle, inode, NULL, leaf, 1,
2260 			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2261 
2262 	while (--depth >= 0) {
2263 		if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
2264 			break;
2265 		path--;
2266 		err = ext4_ext_get_access(handle, inode, path);
2267 		if (err)
2268 			break;
2269 		path->p_idx->ei_block = (path+1)->p_idx->ei_block;
2270 		err = ext4_ext_dirty(handle, inode, path);
2271 		if (err)
2272 			break;
2273 	}
2274 	return err;
2275 }
2276 
2277 /*
2278  * ext4_ext_calc_credits_for_single_extent:
2279  * This routine returns max. credits that needed to insert an extent
2280  * to the extent tree.
2281  * When pass the actual path, the caller should calculate credits
2282  * under i_data_sem.
2283  */
2284 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2285 						struct ext4_ext_path *path)
2286 {
2287 	if (path) {
2288 		int depth = ext_depth(inode);
2289 		int ret = 0;
2290 
2291 		/* probably there is space in leaf? */
2292 		if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2293 				< le16_to_cpu(path[depth].p_hdr->eh_max)) {
2294 
2295 			/*
2296 			 *  There are some space in the leaf tree, no
2297 			 *  need to account for leaf block credit
2298 			 *
2299 			 *  bitmaps and block group descriptor blocks
2300 			 *  and other metadata blocks still need to be
2301 			 *  accounted.
2302 			 */
2303 			/* 1 bitmap, 1 block group descriptor */
2304 			ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2305 			return ret;
2306 		}
2307 	}
2308 
2309 	return ext4_chunk_trans_blocks(inode, nrblocks);
2310 }
2311 
2312 /*
2313  * How many index/leaf blocks need to change/allocate to modify nrblocks?
2314  *
2315  * if nrblocks are fit in a single extent (chunk flag is 1), then
2316  * in the worse case, each tree level index/leaf need to be changed
2317  * if the tree split due to insert a new extent, then the old tree
2318  * index/leaf need to be updated too
2319  *
2320  * If the nrblocks are discontiguous, they could cause
2321  * the whole tree split more than once, but this is really rare.
2322  */
2323 int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
2324 {
2325 	int index;
2326 	int depth;
2327 
2328 	/* If we are converting the inline data, only one is needed here. */
2329 	if (ext4_has_inline_data(inode))
2330 		return 1;
2331 
2332 	depth = ext_depth(inode);
2333 
2334 	if (chunk)
2335 		index = depth * 2;
2336 	else
2337 		index = depth * 3;
2338 
2339 	return index;
2340 }
2341 
2342 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2343 			      struct ext4_extent *ex,
2344 			      ext4_fsblk_t *partial_cluster,
2345 			      ext4_lblk_t from, ext4_lblk_t to)
2346 {
2347 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2348 	unsigned short ee_len =  ext4_ext_get_actual_len(ex);
2349 	ext4_fsblk_t pblk;
2350 	int flags = 0;
2351 
2352 	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2353 		flags |= EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
2354 	else if (ext4_should_journal_data(inode))
2355 		flags |= EXT4_FREE_BLOCKS_FORGET;
2356 
2357 	/*
2358 	 * For bigalloc file systems, we never free a partial cluster
2359 	 * at the beginning of the extent.  Instead, we make a note
2360 	 * that we tried freeing the cluster, and check to see if we
2361 	 * need to free it on a subsequent call to ext4_remove_blocks,
2362 	 * or at the end of the ext4_truncate() operation.
2363 	 */
2364 	flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2365 
2366 	trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
2367 	/*
2368 	 * If we have a partial cluster, and it's different from the
2369 	 * cluster of the last block, we need to explicitly free the
2370 	 * partial cluster here.
2371 	 */
2372 	pblk = ext4_ext_pblock(ex) + ee_len - 1;
2373 	if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
2374 		ext4_free_blocks(handle, inode, NULL,
2375 				 EXT4_C2B(sbi, *partial_cluster),
2376 				 sbi->s_cluster_ratio, flags);
2377 		*partial_cluster = 0;
2378 	}
2379 
2380 #ifdef EXTENTS_STATS
2381 	{
2382 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2383 		spin_lock(&sbi->s_ext_stats_lock);
2384 		sbi->s_ext_blocks += ee_len;
2385 		sbi->s_ext_extents++;
2386 		if (ee_len < sbi->s_ext_min)
2387 			sbi->s_ext_min = ee_len;
2388 		if (ee_len > sbi->s_ext_max)
2389 			sbi->s_ext_max = ee_len;
2390 		if (ext_depth(inode) > sbi->s_depth_max)
2391 			sbi->s_depth_max = ext_depth(inode);
2392 		spin_unlock(&sbi->s_ext_stats_lock);
2393 	}
2394 #endif
2395 	if (from >= le32_to_cpu(ex->ee_block)
2396 	    && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2397 		/* tail removal */
2398 		ext4_lblk_t num;
2399 
2400 		num = le32_to_cpu(ex->ee_block) + ee_len - from;
2401 		pblk = ext4_ext_pblock(ex) + ee_len - num;
2402 		ext_debug("free last %u blocks starting %llu\n", num, pblk);
2403 		ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2404 		/*
2405 		 * If the block range to be freed didn't start at the
2406 		 * beginning of a cluster, and we removed the entire
2407 		 * extent, save the partial cluster here, since we
2408 		 * might need to delete if we determine that the
2409 		 * truncate operation has removed all of the blocks in
2410 		 * the cluster.
2411 		 */
2412 		if (pblk & (sbi->s_cluster_ratio - 1) &&
2413 		    (ee_len == num))
2414 			*partial_cluster = EXT4_B2C(sbi, pblk);
2415 		else
2416 			*partial_cluster = 0;
2417 	} else if (from == le32_to_cpu(ex->ee_block)
2418 		   && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
2419 		/* head removal */
2420 		ext4_lblk_t num;
2421 		ext4_fsblk_t start;
2422 
2423 		num = to - from;
2424 		start = ext4_ext_pblock(ex);
2425 
2426 		ext_debug("free first %u blocks starting %llu\n", num, start);
2427 		ext4_free_blocks(handle, inode, NULL, start, num, flags);
2428 
2429 	} else {
2430 		printk(KERN_INFO "strange request: removal(2) "
2431 				"%u-%u from %u:%u\n",
2432 				from, to, le32_to_cpu(ex->ee_block), ee_len);
2433 	}
2434 	return 0;
2435 }
2436 
2437 
2438 /*
2439  * ext4_ext_rm_leaf() Removes the extents associated with the
2440  * blocks appearing between "start" and "end", and splits the extents
2441  * if "start" and "end" appear in the same extent
2442  *
2443  * @handle: The journal handle
2444  * @inode:  The files inode
2445  * @path:   The path to the leaf
2446  * @start:  The first block to remove
2447  * @end:   The last block to remove
2448  */
2449 static int
2450 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2451 		 struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster,
2452 		 ext4_lblk_t start, ext4_lblk_t end)
2453 {
2454 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2455 	int err = 0, correct_index = 0;
2456 	int depth = ext_depth(inode), credits;
2457 	struct ext4_extent_header *eh;
2458 	ext4_lblk_t a, b;
2459 	unsigned num;
2460 	ext4_lblk_t ex_ee_block;
2461 	unsigned short ex_ee_len;
2462 	unsigned uninitialized = 0;
2463 	struct ext4_extent *ex;
2464 
2465 	/* the header must be checked already in ext4_ext_remove_space() */
2466 	ext_debug("truncate since %u in leaf to %u\n", start, end);
2467 	if (!path[depth].p_hdr)
2468 		path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2469 	eh = path[depth].p_hdr;
2470 	if (unlikely(path[depth].p_hdr == NULL)) {
2471 		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2472 		return -EIO;
2473 	}
2474 	/* find where to start removing */
2475 	ex = EXT_LAST_EXTENT(eh);
2476 
2477 	ex_ee_block = le32_to_cpu(ex->ee_block);
2478 	ex_ee_len = ext4_ext_get_actual_len(ex);
2479 
2480 	trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
2481 
2482 	while (ex >= EXT_FIRST_EXTENT(eh) &&
2483 			ex_ee_block + ex_ee_len > start) {
2484 
2485 		if (ext4_ext_is_uninitialized(ex))
2486 			uninitialized = 1;
2487 		else
2488 			uninitialized = 0;
2489 
2490 		ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2491 			 uninitialized, ex_ee_len);
2492 		path[depth].p_ext = ex;
2493 
2494 		a = ex_ee_block > start ? ex_ee_block : start;
2495 		b = ex_ee_block+ex_ee_len - 1 < end ?
2496 			ex_ee_block+ex_ee_len - 1 : end;
2497 
2498 		ext_debug("  border %u:%u\n", a, b);
2499 
2500 		/* If this extent is beyond the end of the hole, skip it */
2501 		if (end < ex_ee_block) {
2502 			ex--;
2503 			ex_ee_block = le32_to_cpu(ex->ee_block);
2504 			ex_ee_len = ext4_ext_get_actual_len(ex);
2505 			continue;
2506 		} else if (b != ex_ee_block + ex_ee_len - 1) {
2507 			EXT4_ERROR_INODE(inode,
2508 					 "can not handle truncate %u:%u "
2509 					 "on extent %u:%u",
2510 					 start, end, ex_ee_block,
2511 					 ex_ee_block + ex_ee_len - 1);
2512 			err = -EIO;
2513 			goto out;
2514 		} else if (a != ex_ee_block) {
2515 			/* remove tail of the extent */
2516 			num = a - ex_ee_block;
2517 		} else {
2518 			/* remove whole extent: excellent! */
2519 			num = 0;
2520 		}
2521 		/*
2522 		 * 3 for leaf, sb, and inode plus 2 (bmap and group
2523 		 * descriptor) for each block group; assume two block
2524 		 * groups plus ex_ee_len/blocks_per_block_group for
2525 		 * the worst case
2526 		 */
2527 		credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2528 		if (ex == EXT_FIRST_EXTENT(eh)) {
2529 			correct_index = 1;
2530 			credits += (ext_depth(inode)) + 1;
2531 		}
2532 		credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2533 
2534 		err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2535 		if (err)
2536 			goto out;
2537 
2538 		err = ext4_ext_get_access(handle, inode, path + depth);
2539 		if (err)
2540 			goto out;
2541 
2542 		err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
2543 					 a, b);
2544 		if (err)
2545 			goto out;
2546 
2547 		if (num == 0)
2548 			/* this extent is removed; mark slot entirely unused */
2549 			ext4_ext_store_pblock(ex, 0);
2550 
2551 		ex->ee_len = cpu_to_le16(num);
2552 		/*
2553 		 * Do not mark uninitialized if all the blocks in the
2554 		 * extent have been removed.
2555 		 */
2556 		if (uninitialized && num)
2557 			ext4_ext_mark_uninitialized(ex);
2558 		/*
2559 		 * If the extent was completely released,
2560 		 * we need to remove it from the leaf
2561 		 */
2562 		if (num == 0) {
2563 			if (end != EXT_MAX_BLOCKS - 1) {
2564 				/*
2565 				 * For hole punching, we need to scoot all the
2566 				 * extents up when an extent is removed so that
2567 				 * we dont have blank extents in the middle
2568 				 */
2569 				memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2570 					sizeof(struct ext4_extent));
2571 
2572 				/* Now get rid of the one at the end */
2573 				memset(EXT_LAST_EXTENT(eh), 0,
2574 					sizeof(struct ext4_extent));
2575 			}
2576 			le16_add_cpu(&eh->eh_entries, -1);
2577 		} else
2578 			*partial_cluster = 0;
2579 
2580 		err = ext4_ext_dirty(handle, inode, path + depth);
2581 		if (err)
2582 			goto out;
2583 
2584 		ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
2585 				ext4_ext_pblock(ex));
2586 		ex--;
2587 		ex_ee_block = le32_to_cpu(ex->ee_block);
2588 		ex_ee_len = ext4_ext_get_actual_len(ex);
2589 	}
2590 
2591 	if (correct_index && eh->eh_entries)
2592 		err = ext4_ext_correct_indexes(handle, inode, path);
2593 
2594 	/*
2595 	 * If there is still a entry in the leaf node, check to see if
2596 	 * it references the partial cluster.  This is the only place
2597 	 * where it could; if it doesn't, we can free the cluster.
2598 	 */
2599 	if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) &&
2600 	    (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
2601 	     *partial_cluster)) {
2602 		int flags = EXT4_FREE_BLOCKS_FORGET;
2603 
2604 		if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2605 			flags |= EXT4_FREE_BLOCKS_METADATA;
2606 
2607 		ext4_free_blocks(handle, inode, NULL,
2608 				 EXT4_C2B(sbi, *partial_cluster),
2609 				 sbi->s_cluster_ratio, flags);
2610 		*partial_cluster = 0;
2611 	}
2612 
2613 	/* if this leaf is free, then we should
2614 	 * remove it from index block above */
2615 	if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2616 		err = ext4_ext_rm_idx(handle, inode, path, depth);
2617 
2618 out:
2619 	return err;
2620 }
2621 
2622 /*
2623  * ext4_ext_more_to_rm:
2624  * returns 1 if current index has to be freed (even partial)
2625  */
2626 static int
2627 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2628 {
2629 	BUG_ON(path->p_idx == NULL);
2630 
2631 	if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2632 		return 0;
2633 
2634 	/*
2635 	 * if truncate on deeper level happened, it wasn't partial,
2636 	 * so we have to consider current index for truncation
2637 	 */
2638 	if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2639 		return 0;
2640 	return 1;
2641 }
2642 
2643 static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2644 				 ext4_lblk_t end)
2645 {
2646 	struct super_block *sb = inode->i_sb;
2647 	int depth = ext_depth(inode);
2648 	struct ext4_ext_path *path = NULL;
2649 	ext4_fsblk_t partial_cluster = 0;
2650 	handle_t *handle;
2651 	int i = 0, err = 0;
2652 
2653 	ext_debug("truncate since %u to %u\n", start, end);
2654 
2655 	/* probably first extent we're gonna free will be last in block */
2656 	handle = ext4_journal_start(inode, depth + 1);
2657 	if (IS_ERR(handle))
2658 		return PTR_ERR(handle);
2659 
2660 again:
2661 	ext4_ext_invalidate_cache(inode);
2662 
2663 	trace_ext4_ext_remove_space(inode, start, depth);
2664 
2665 	/*
2666 	 * Check if we are removing extents inside the extent tree. If that
2667 	 * is the case, we are going to punch a hole inside the extent tree
2668 	 * so we have to check whether we need to split the extent covering
2669 	 * the last block to remove so we can easily remove the part of it
2670 	 * in ext4_ext_rm_leaf().
2671 	 */
2672 	if (end < EXT_MAX_BLOCKS - 1) {
2673 		struct ext4_extent *ex;
2674 		ext4_lblk_t ee_block;
2675 
2676 		/* find extent for this block */
2677 		path = ext4_ext_find_extent(inode, end, NULL);
2678 		if (IS_ERR(path)) {
2679 			ext4_journal_stop(handle);
2680 			return PTR_ERR(path);
2681 		}
2682 		depth = ext_depth(inode);
2683 		/* Leaf not may not exist only if inode has no blocks at all */
2684 		ex = path[depth].p_ext;
2685 		if (!ex) {
2686 			if (depth) {
2687 				EXT4_ERROR_INODE(inode,
2688 						 "path[%d].p_hdr == NULL",
2689 						 depth);
2690 				err = -EIO;
2691 			}
2692 			goto out;
2693 		}
2694 
2695 		ee_block = le32_to_cpu(ex->ee_block);
2696 
2697 		/*
2698 		 * See if the last block is inside the extent, if so split
2699 		 * the extent at 'end' block so we can easily remove the
2700 		 * tail of the first part of the split extent in
2701 		 * ext4_ext_rm_leaf().
2702 		 */
2703 		if (end >= ee_block &&
2704 		    end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
2705 			int split_flag = 0;
2706 
2707 			if (ext4_ext_is_uninitialized(ex))
2708 				split_flag = EXT4_EXT_MARK_UNINIT1 |
2709 					     EXT4_EXT_MARK_UNINIT2;
2710 
2711 			/*
2712 			 * Split the extent in two so that 'end' is the last
2713 			 * block in the first new extent
2714 			 */
2715 			err = ext4_split_extent_at(handle, inode, path,
2716 						end + 1, split_flag,
2717 						EXT4_GET_BLOCKS_PRE_IO |
2718 						EXT4_GET_BLOCKS_PUNCH_OUT_EXT);
2719 
2720 			if (err < 0)
2721 				goto out;
2722 		}
2723 	}
2724 	/*
2725 	 * We start scanning from right side, freeing all the blocks
2726 	 * after i_size and walking into the tree depth-wise.
2727 	 */
2728 	depth = ext_depth(inode);
2729 	if (path) {
2730 		int k = i = depth;
2731 		while (--k > 0)
2732 			path[k].p_block =
2733 				le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2734 	} else {
2735 		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
2736 			       GFP_NOFS);
2737 		if (path == NULL) {
2738 			ext4_journal_stop(handle);
2739 			return -ENOMEM;
2740 		}
2741 		path[0].p_depth = depth;
2742 		path[0].p_hdr = ext_inode_hdr(inode);
2743 		i = 0;
2744 
2745 		if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
2746 			err = -EIO;
2747 			goto out;
2748 		}
2749 	}
2750 	err = 0;
2751 
2752 	while (i >= 0 && err == 0) {
2753 		if (i == depth) {
2754 			/* this is leaf block */
2755 			err = ext4_ext_rm_leaf(handle, inode, path,
2756 					       &partial_cluster, start,
2757 					       end);
2758 			/* root level has p_bh == NULL, brelse() eats this */
2759 			brelse(path[i].p_bh);
2760 			path[i].p_bh = NULL;
2761 			i--;
2762 			continue;
2763 		}
2764 
2765 		/* this is index block */
2766 		if (!path[i].p_hdr) {
2767 			ext_debug("initialize header\n");
2768 			path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2769 		}
2770 
2771 		if (!path[i].p_idx) {
2772 			/* this level hasn't been touched yet */
2773 			path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2774 			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2775 			ext_debug("init index ptr: hdr 0x%p, num %d\n",
2776 				  path[i].p_hdr,
2777 				  le16_to_cpu(path[i].p_hdr->eh_entries));
2778 		} else {
2779 			/* we were already here, see at next index */
2780 			path[i].p_idx--;
2781 		}
2782 
2783 		ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2784 				i, EXT_FIRST_INDEX(path[i].p_hdr),
2785 				path[i].p_idx);
2786 		if (ext4_ext_more_to_rm(path + i)) {
2787 			struct buffer_head *bh;
2788 			/* go to the next level */
2789 			ext_debug("move to level %d (block %llu)\n",
2790 				  i + 1, ext4_idx_pblock(path[i].p_idx));
2791 			memset(path + i + 1, 0, sizeof(*path));
2792 			bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
2793 			if (!bh) {
2794 				/* should we reset i_size? */
2795 				err = -EIO;
2796 				break;
2797 			}
2798 			if (WARN_ON(i + 1 > depth)) {
2799 				err = -EIO;
2800 				break;
2801 			}
2802 			if (ext4_ext_check_block(inode, ext_block_hdr(bh),
2803 							depth - i - 1, bh)) {
2804 				err = -EIO;
2805 				break;
2806 			}
2807 			path[i + 1].p_bh = bh;
2808 
2809 			/* save actual number of indexes since this
2810 			 * number is changed at the next iteration */
2811 			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2812 			i++;
2813 		} else {
2814 			/* we finished processing this index, go up */
2815 			if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2816 				/* index is empty, remove it;
2817 				 * handle must be already prepared by the
2818 				 * truncatei_leaf() */
2819 				err = ext4_ext_rm_idx(handle, inode, path, i);
2820 			}
2821 			/* root level has p_bh == NULL, brelse() eats this */
2822 			brelse(path[i].p_bh);
2823 			path[i].p_bh = NULL;
2824 			i--;
2825 			ext_debug("return to level %d\n", i);
2826 		}
2827 	}
2828 
2829 	trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster,
2830 			path->p_hdr->eh_entries);
2831 
2832 	/* If we still have something in the partial cluster and we have removed
2833 	 * even the first extent, then we should free the blocks in the partial
2834 	 * cluster as well. */
2835 	if (partial_cluster && path->p_hdr->eh_entries == 0) {
2836 		int flags = EXT4_FREE_BLOCKS_FORGET;
2837 
2838 		if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2839 			flags |= EXT4_FREE_BLOCKS_METADATA;
2840 
2841 		ext4_free_blocks(handle, inode, NULL,
2842 				 EXT4_C2B(EXT4_SB(sb), partial_cluster),
2843 				 EXT4_SB(sb)->s_cluster_ratio, flags);
2844 		partial_cluster = 0;
2845 	}
2846 
2847 	/* TODO: flexible tree reduction should be here */
2848 	if (path->p_hdr->eh_entries == 0) {
2849 		/*
2850 		 * truncate to zero freed all the tree,
2851 		 * so we need to correct eh_depth
2852 		 */
2853 		err = ext4_ext_get_access(handle, inode, path);
2854 		if (err == 0) {
2855 			ext_inode_hdr(inode)->eh_depth = 0;
2856 			ext_inode_hdr(inode)->eh_max =
2857 				cpu_to_le16(ext4_ext_space_root(inode, 0));
2858 			err = ext4_ext_dirty(handle, inode, path);
2859 		}
2860 	}
2861 out:
2862 	ext4_ext_drop_refs(path);
2863 	kfree(path);
2864 	if (err == -EAGAIN) {
2865 		path = NULL;
2866 		goto again;
2867 	}
2868 	ext4_journal_stop(handle);
2869 
2870 	return err;
2871 }
2872 
2873 /*
2874  * called at mount time
2875  */
2876 void ext4_ext_init(struct super_block *sb)
2877 {
2878 	/*
2879 	 * possible initialization would be here
2880 	 */
2881 
2882 	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2883 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2884 		printk(KERN_INFO "EXT4-fs: file extents enabled"
2885 #ifdef AGGRESSIVE_TEST
2886 		       ", aggressive tests"
2887 #endif
2888 #ifdef CHECK_BINSEARCH
2889 		       ", check binsearch"
2890 #endif
2891 #ifdef EXTENTS_STATS
2892 		       ", stats"
2893 #endif
2894 		       "\n");
2895 #endif
2896 #ifdef EXTENTS_STATS
2897 		spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2898 		EXT4_SB(sb)->s_ext_min = 1 << 30;
2899 		EXT4_SB(sb)->s_ext_max = 0;
2900 #endif
2901 	}
2902 }
2903 
2904 /*
2905  * called at umount time
2906  */
2907 void ext4_ext_release(struct super_block *sb)
2908 {
2909 	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
2910 		return;
2911 
2912 #ifdef EXTENTS_STATS
2913 	if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2914 		struct ext4_sb_info *sbi = EXT4_SB(sb);
2915 		printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2916 			sbi->s_ext_blocks, sbi->s_ext_extents,
2917 			sbi->s_ext_blocks / sbi->s_ext_extents);
2918 		printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2919 			sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2920 	}
2921 #endif
2922 }
2923 
2924 /* FIXME!! we need to try to merge to left or right after zero-out  */
2925 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2926 {
2927 	ext4_fsblk_t ee_pblock;
2928 	unsigned int ee_len;
2929 	int ret;
2930 
2931 	ee_len    = ext4_ext_get_actual_len(ex);
2932 	ee_pblock = ext4_ext_pblock(ex);
2933 
2934 	ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
2935 	if (ret > 0)
2936 		ret = 0;
2937 
2938 	return ret;
2939 }
2940 
2941 /*
2942  * ext4_split_extent_at() splits an extent at given block.
2943  *
2944  * @handle: the journal handle
2945  * @inode: the file inode
2946  * @path: the path to the extent
2947  * @split: the logical block where the extent is splitted.
2948  * @split_flags: indicates if the extent could be zeroout if split fails, and
2949  *		 the states(init or uninit) of new extents.
2950  * @flags: flags used to insert new extent to extent tree.
2951  *
2952  *
2953  * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
2954  * of which are deterimined by split_flag.
2955  *
2956  * There are two cases:
2957  *  a> the extent are splitted into two extent.
2958  *  b> split is not needed, and just mark the extent.
2959  *
2960  * return 0 on success.
2961  */
2962 static int ext4_split_extent_at(handle_t *handle,
2963 			     struct inode *inode,
2964 			     struct ext4_ext_path *path,
2965 			     ext4_lblk_t split,
2966 			     int split_flag,
2967 			     int flags)
2968 {
2969 	ext4_fsblk_t newblock;
2970 	ext4_lblk_t ee_block;
2971 	struct ext4_extent *ex, newex, orig_ex;
2972 	struct ext4_extent *ex2 = NULL;
2973 	unsigned int ee_len, depth;
2974 	int err = 0;
2975 
2976 	BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
2977 	       (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
2978 
2979 	ext_debug("ext4_split_extents_at: inode %lu, logical"
2980 		"block %llu\n", inode->i_ino, (unsigned long long)split);
2981 
2982 	ext4_ext_show_leaf(inode, path);
2983 
2984 	depth = ext_depth(inode);
2985 	ex = path[depth].p_ext;
2986 	ee_block = le32_to_cpu(ex->ee_block);
2987 	ee_len = ext4_ext_get_actual_len(ex);
2988 	newblock = split - ee_block + ext4_ext_pblock(ex);
2989 
2990 	BUG_ON(split < ee_block || split >= (ee_block + ee_len));
2991 
2992 	err = ext4_ext_get_access(handle, inode, path + depth);
2993 	if (err)
2994 		goto out;
2995 
2996 	if (split == ee_block) {
2997 		/*
2998 		 * case b: block @split is the block that the extent begins with
2999 		 * then we just change the state of the extent, and splitting
3000 		 * is not needed.
3001 		 */
3002 		if (split_flag & EXT4_EXT_MARK_UNINIT2)
3003 			ext4_ext_mark_uninitialized(ex);
3004 		else
3005 			ext4_ext_mark_initialized(ex);
3006 
3007 		if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
3008 			ext4_ext_try_to_merge(handle, inode, path, ex);
3009 
3010 		err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3011 		goto out;
3012 	}
3013 
3014 	/* case a */
3015 	memcpy(&orig_ex, ex, sizeof(orig_ex));
3016 	ex->ee_len = cpu_to_le16(split - ee_block);
3017 	if (split_flag & EXT4_EXT_MARK_UNINIT1)
3018 		ext4_ext_mark_uninitialized(ex);
3019 
3020 	/*
3021 	 * path may lead to new leaf, not to original leaf any more
3022 	 * after ext4_ext_insert_extent() returns,
3023 	 */
3024 	err = ext4_ext_dirty(handle, inode, path + depth);
3025 	if (err)
3026 		goto fix_extent_len;
3027 
3028 	ex2 = &newex;
3029 	ex2->ee_block = cpu_to_le32(split);
3030 	ex2->ee_len   = cpu_to_le16(ee_len - (split - ee_block));
3031 	ext4_ext_store_pblock(ex2, newblock);
3032 	if (split_flag & EXT4_EXT_MARK_UNINIT2)
3033 		ext4_ext_mark_uninitialized(ex2);
3034 
3035 	err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3036 	if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3037 		if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
3038 			if (split_flag & EXT4_EXT_DATA_VALID1)
3039 				err = ext4_ext_zeroout(inode, ex2);
3040 			else
3041 				err = ext4_ext_zeroout(inode, ex);
3042 		} else
3043 			err = ext4_ext_zeroout(inode, &orig_ex);
3044 
3045 		if (err)
3046 			goto fix_extent_len;
3047 		/* update the extent length and mark as initialized */
3048 		ex->ee_len = cpu_to_le16(ee_len);
3049 		ext4_ext_try_to_merge(handle, inode, path, ex);
3050 		err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3051 		goto out;
3052 	} else if (err)
3053 		goto fix_extent_len;
3054 
3055 out:
3056 	ext4_ext_show_leaf(inode, path);
3057 	return err;
3058 
3059 fix_extent_len:
3060 	ex->ee_len = orig_ex.ee_len;
3061 	ext4_ext_dirty(handle, inode, path + depth);
3062 	return err;
3063 }
3064 
3065 /*
3066  * ext4_split_extents() splits an extent and mark extent which is covered
3067  * by @map as split_flags indicates
3068  *
3069  * It may result in splitting the extent into multiple extents (upto three)
3070  * There are three possibilities:
3071  *   a> There is no split required
3072  *   b> Splits in two extents: Split is happening at either end of the extent
3073  *   c> Splits in three extents: Somone is splitting in middle of the extent
3074  *
3075  */
3076 static int ext4_split_extent(handle_t *handle,
3077 			      struct inode *inode,
3078 			      struct ext4_ext_path *path,
3079 			      struct ext4_map_blocks *map,
3080 			      int split_flag,
3081 			      int flags)
3082 {
3083 	ext4_lblk_t ee_block;
3084 	struct ext4_extent *ex;
3085 	unsigned int ee_len, depth;
3086 	int err = 0;
3087 	int uninitialized;
3088 	int split_flag1, flags1;
3089 
3090 	depth = ext_depth(inode);
3091 	ex = path[depth].p_ext;
3092 	ee_block = le32_to_cpu(ex->ee_block);
3093 	ee_len = ext4_ext_get_actual_len(ex);
3094 	uninitialized = ext4_ext_is_uninitialized(ex);
3095 
3096 	if (map->m_lblk + map->m_len < ee_block + ee_len) {
3097 		split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
3098 		flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3099 		if (uninitialized)
3100 			split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
3101 				       EXT4_EXT_MARK_UNINIT2;
3102 		if (split_flag & EXT4_EXT_DATA_VALID2)
3103 			split_flag1 |= EXT4_EXT_DATA_VALID1;
3104 		err = ext4_split_extent_at(handle, inode, path,
3105 				map->m_lblk + map->m_len, split_flag1, flags1);
3106 		if (err)
3107 			goto out;
3108 	}
3109 
3110 	ext4_ext_drop_refs(path);
3111 	path = ext4_ext_find_extent(inode, map->m_lblk, path);
3112 	if (IS_ERR(path))
3113 		return PTR_ERR(path);
3114 
3115 	if (map->m_lblk >= ee_block) {
3116 		split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT |
3117 					    EXT4_EXT_DATA_VALID2);
3118 		if (uninitialized)
3119 			split_flag1 |= EXT4_EXT_MARK_UNINIT1;
3120 		if (split_flag & EXT4_EXT_MARK_UNINIT2)
3121 			split_flag1 |= EXT4_EXT_MARK_UNINIT2;
3122 		err = ext4_split_extent_at(handle, inode, path,
3123 				map->m_lblk, split_flag1, flags);
3124 		if (err)
3125 			goto out;
3126 	}
3127 
3128 	ext4_ext_show_leaf(inode, path);
3129 out:
3130 	return err ? err : map->m_len;
3131 }
3132 
3133 /*
3134  * This function is called by ext4_ext_map_blocks() if someone tries to write
3135  * to an uninitialized extent. It may result in splitting the uninitialized
3136  * extent into multiple extents (up to three - one initialized and two
3137  * uninitialized).
3138  * There are three possibilities:
3139  *   a> There is no split required: Entire extent should be initialized
3140  *   b> Splits in two extents: Write is happening at either end of the extent
3141  *   c> Splits in three extents: Somone is writing in middle of the extent
3142  *
3143  * Pre-conditions:
3144  *  - The extent pointed to by 'path' is uninitialized.
3145  *  - The extent pointed to by 'path' contains a superset
3146  *    of the logical span [map->m_lblk, map->m_lblk + map->m_len).
3147  *
3148  * Post-conditions on success:
3149  *  - the returned value is the number of blocks beyond map->l_lblk
3150  *    that are allocated and initialized.
3151  *    It is guaranteed to be >= map->m_len.
3152  */
3153 static int ext4_ext_convert_to_initialized(handle_t *handle,
3154 					   struct inode *inode,
3155 					   struct ext4_map_blocks *map,
3156 					   struct ext4_ext_path *path)
3157 {
3158 	struct ext4_sb_info *sbi;
3159 	struct ext4_extent_header *eh;
3160 	struct ext4_map_blocks split_map;
3161 	struct ext4_extent zero_ex;
3162 	struct ext4_extent *ex;
3163 	ext4_lblk_t ee_block, eof_block;
3164 	unsigned int ee_len, depth;
3165 	int allocated, max_zeroout = 0;
3166 	int err = 0;
3167 	int split_flag = 0;
3168 
3169 	ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
3170 		"block %llu, max_blocks %u\n", inode->i_ino,
3171 		(unsigned long long)map->m_lblk, map->m_len);
3172 
3173 	sbi = EXT4_SB(inode->i_sb);
3174 	eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3175 		inode->i_sb->s_blocksize_bits;
3176 	if (eof_block < map->m_lblk + map->m_len)
3177 		eof_block = map->m_lblk + map->m_len;
3178 
3179 	depth = ext_depth(inode);
3180 	eh = path[depth].p_hdr;
3181 	ex = path[depth].p_ext;
3182 	ee_block = le32_to_cpu(ex->ee_block);
3183 	ee_len = ext4_ext_get_actual_len(ex);
3184 	allocated = ee_len - (map->m_lblk - ee_block);
3185 
3186 	trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3187 
3188 	/* Pre-conditions */
3189 	BUG_ON(!ext4_ext_is_uninitialized(ex));
3190 	BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3191 
3192 	/*
3193 	 * Attempt to transfer newly initialized blocks from the currently
3194 	 * uninitialized extent to its left neighbor. This is much cheaper
3195 	 * than an insertion followed by a merge as those involve costly
3196 	 * memmove() calls. This is the common case in steady state for
3197 	 * workloads doing fallocate(FALLOC_FL_KEEP_SIZE) followed by append
3198 	 * writes.
3199 	 *
3200 	 * Limitations of the current logic:
3201 	 *  - L1: we only deal with writes at the start of the extent.
3202 	 *    The approach could be extended to writes at the end
3203 	 *    of the extent but this scenario was deemed less common.
3204 	 *  - L2: we do not deal with writes covering the whole extent.
3205 	 *    This would require removing the extent if the transfer
3206 	 *    is possible.
3207 	 *  - L3: we only attempt to merge with an extent stored in the
3208 	 *    same extent tree node.
3209 	 */
3210 	if ((map->m_lblk == ee_block) &&	/*L1*/
3211 		(map->m_len < ee_len) &&	/*L2*/
3212 		(ex > EXT_FIRST_EXTENT(eh))) {	/*L3*/
3213 		struct ext4_extent *prev_ex;
3214 		ext4_lblk_t prev_lblk;
3215 		ext4_fsblk_t prev_pblk, ee_pblk;
3216 		unsigned int prev_len, write_len;
3217 
3218 		prev_ex = ex - 1;
3219 		prev_lblk = le32_to_cpu(prev_ex->ee_block);
3220 		prev_len = ext4_ext_get_actual_len(prev_ex);
3221 		prev_pblk = ext4_ext_pblock(prev_ex);
3222 		ee_pblk = ext4_ext_pblock(ex);
3223 		write_len = map->m_len;
3224 
3225 		/*
3226 		 * A transfer of blocks from 'ex' to 'prev_ex' is allowed
3227 		 * upon those conditions:
3228 		 * - C1: prev_ex is initialized,
3229 		 * - C2: prev_ex is logically abutting ex,
3230 		 * - C3: prev_ex is physically abutting ex,
3231 		 * - C4: prev_ex can receive the additional blocks without
3232 		 *   overflowing the (initialized) length limit.
3233 		 */
3234 		if ((!ext4_ext_is_uninitialized(prev_ex)) &&		/*C1*/
3235 			((prev_lblk + prev_len) == ee_block) &&		/*C2*/
3236 			((prev_pblk + prev_len) == ee_pblk) &&		/*C3*/
3237 			(prev_len < (EXT_INIT_MAX_LEN - write_len))) {	/*C4*/
3238 			err = ext4_ext_get_access(handle, inode, path + depth);
3239 			if (err)
3240 				goto out;
3241 
3242 			trace_ext4_ext_convert_to_initialized_fastpath(inode,
3243 				map, ex, prev_ex);
3244 
3245 			/* Shift the start of ex by 'write_len' blocks */
3246 			ex->ee_block = cpu_to_le32(ee_block + write_len);
3247 			ext4_ext_store_pblock(ex, ee_pblk + write_len);
3248 			ex->ee_len = cpu_to_le16(ee_len - write_len);
3249 			ext4_ext_mark_uninitialized(ex); /* Restore the flag */
3250 
3251 			/* Extend prev_ex by 'write_len' blocks */
3252 			prev_ex->ee_len = cpu_to_le16(prev_len + write_len);
3253 
3254 			/* Mark the block containing both extents as dirty */
3255 			ext4_ext_dirty(handle, inode, path + depth);
3256 
3257 			/* Update path to point to the right extent */
3258 			path[depth].p_ext = prev_ex;
3259 
3260 			/* Result: number of initialized blocks past m_lblk */
3261 			allocated = write_len;
3262 			goto out;
3263 		}
3264 	}
3265 
3266 	WARN_ON(map->m_lblk < ee_block);
3267 	/*
3268 	 * It is safe to convert extent to initialized via explicit
3269 	 * zeroout only if extent is fully insde i_size or new_size.
3270 	 */
3271 	split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3272 
3273 	if (EXT4_EXT_MAY_ZEROOUT & split_flag)
3274 		max_zeroout = sbi->s_extent_max_zeroout_kb >>
3275 			inode->i_sb->s_blocksize_bits;
3276 
3277 	/* If extent is less than s_max_zeroout_kb, zeroout directly */
3278 	if (max_zeroout && (ee_len <= max_zeroout)) {
3279 		err = ext4_ext_zeroout(inode, ex);
3280 		if (err)
3281 			goto out;
3282 
3283 		err = ext4_ext_get_access(handle, inode, path + depth);
3284 		if (err)
3285 			goto out;
3286 		ext4_ext_mark_initialized(ex);
3287 		ext4_ext_try_to_merge(handle, inode, path, ex);
3288 		err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3289 		goto out;
3290 	}
3291 
3292 	/*
3293 	 * four cases:
3294 	 * 1. split the extent into three extents.
3295 	 * 2. split the extent into two extents, zeroout the first half.
3296 	 * 3. split the extent into two extents, zeroout the second half.
3297 	 * 4. split the extent into two extents with out zeroout.
3298 	 */
3299 	split_map.m_lblk = map->m_lblk;
3300 	split_map.m_len = map->m_len;
3301 
3302 	if (max_zeroout && (allocated > map->m_len)) {
3303 		if (allocated <= max_zeroout) {
3304 			/* case 3 */
3305 			zero_ex.ee_block =
3306 					 cpu_to_le32(map->m_lblk);
3307 			zero_ex.ee_len = cpu_to_le16(allocated);
3308 			ext4_ext_store_pblock(&zero_ex,
3309 				ext4_ext_pblock(ex) + map->m_lblk - ee_block);
3310 			err = ext4_ext_zeroout(inode, &zero_ex);
3311 			if (err)
3312 				goto out;
3313 			split_map.m_lblk = map->m_lblk;
3314 			split_map.m_len = allocated;
3315 		} else if (map->m_lblk - ee_block + map->m_len < max_zeroout) {
3316 			/* case 2 */
3317 			if (map->m_lblk != ee_block) {
3318 				zero_ex.ee_block = ex->ee_block;
3319 				zero_ex.ee_len = cpu_to_le16(map->m_lblk -
3320 							ee_block);
3321 				ext4_ext_store_pblock(&zero_ex,
3322 						      ext4_ext_pblock(ex));
3323 				err = ext4_ext_zeroout(inode, &zero_ex);
3324 				if (err)
3325 					goto out;
3326 			}
3327 
3328 			split_map.m_lblk = ee_block;
3329 			split_map.m_len = map->m_lblk - ee_block + map->m_len;
3330 			allocated = map->m_len;
3331 		}
3332 	}
3333 
3334 	allocated = ext4_split_extent(handle, inode, path,
3335 				      &split_map, split_flag, 0);
3336 	if (allocated < 0)
3337 		err = allocated;
3338 
3339 out:
3340 	return err ? err : allocated;
3341 }
3342 
3343 /*
3344  * This function is called by ext4_ext_map_blocks() from
3345  * ext4_get_blocks_dio_write() when DIO to write
3346  * to an uninitialized extent.
3347  *
3348  * Writing to an uninitialized extent may result in splitting the uninitialized
3349  * extent into multiple initialized/uninitialized extents (up to three)
3350  * There are three possibilities:
3351  *   a> There is no split required: Entire extent should be uninitialized
3352  *   b> Splits in two extents: Write is happening at either end of the extent
3353  *   c> Splits in three extents: Somone is writing in middle of the extent
3354  *
3355  * One of more index blocks maybe needed if the extent tree grow after
3356  * the uninitialized extent split. To prevent ENOSPC occur at the IO
3357  * complete, we need to split the uninitialized extent before DIO submit
3358  * the IO. The uninitialized extent called at this time will be split
3359  * into three uninitialized extent(at most). After IO complete, the part
3360  * being filled will be convert to initialized by the end_io callback function
3361  * via ext4_convert_unwritten_extents().
3362  *
3363  * Returns the size of uninitialized extent to be written on success.
3364  */
3365 static int ext4_split_unwritten_extents(handle_t *handle,
3366 					struct inode *inode,
3367 					struct ext4_map_blocks *map,
3368 					struct ext4_ext_path *path,
3369 					int flags)
3370 {
3371 	ext4_lblk_t eof_block;
3372 	ext4_lblk_t ee_block;
3373 	struct ext4_extent *ex;
3374 	unsigned int ee_len;
3375 	int split_flag = 0, depth;
3376 
3377 	ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
3378 		"block %llu, max_blocks %u\n", inode->i_ino,
3379 		(unsigned long long)map->m_lblk, map->m_len);
3380 
3381 	eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3382 		inode->i_sb->s_blocksize_bits;
3383 	if (eof_block < map->m_lblk + map->m_len)
3384 		eof_block = map->m_lblk + map->m_len;
3385 	/*
3386 	 * It is safe to convert extent to initialized via explicit
3387 	 * zeroout only if extent is fully insde i_size or new_size.
3388 	 */
3389 	depth = ext_depth(inode);
3390 	ex = path[depth].p_ext;
3391 	ee_block = le32_to_cpu(ex->ee_block);
3392 	ee_len = ext4_ext_get_actual_len(ex);
3393 
3394 	split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3395 	split_flag |= EXT4_EXT_MARK_UNINIT2;
3396 	if (flags & EXT4_GET_BLOCKS_CONVERT)
3397 		split_flag |= EXT4_EXT_DATA_VALID2;
3398 	flags |= EXT4_GET_BLOCKS_PRE_IO;
3399 	return ext4_split_extent(handle, inode, path, map, split_flag, flags);
3400 }
3401 
3402 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3403 						struct inode *inode,
3404 						struct ext4_map_blocks *map,
3405 						struct ext4_ext_path *path)
3406 {
3407 	struct ext4_extent *ex;
3408 	ext4_lblk_t ee_block;
3409 	unsigned int ee_len;
3410 	int depth;
3411 	int err = 0;
3412 
3413 	depth = ext_depth(inode);
3414 	ex = path[depth].p_ext;
3415 	ee_block = le32_to_cpu(ex->ee_block);
3416 	ee_len = ext4_ext_get_actual_len(ex);
3417 
3418 	ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3419 		"block %llu, max_blocks %u\n", inode->i_ino,
3420 		  (unsigned long long)ee_block, ee_len);
3421 
3422 	/* If extent is larger than requested then split is required */
3423 	if (ee_block != map->m_lblk || ee_len > map->m_len) {
3424 		err = ext4_split_unwritten_extents(handle, inode, map, path,
3425 						   EXT4_GET_BLOCKS_CONVERT);
3426 		if (err < 0)
3427 			goto out;
3428 		ext4_ext_drop_refs(path);
3429 		path = ext4_ext_find_extent(inode, map->m_lblk, path);
3430 		if (IS_ERR(path)) {
3431 			err = PTR_ERR(path);
3432 			goto out;
3433 		}
3434 		depth = ext_depth(inode);
3435 		ex = path[depth].p_ext;
3436 	}
3437 
3438 	err = ext4_ext_get_access(handle, inode, path + depth);
3439 	if (err)
3440 		goto out;
3441 	/* first mark the extent as initialized */
3442 	ext4_ext_mark_initialized(ex);
3443 
3444 	/* note: ext4_ext_correct_indexes() isn't needed here because
3445 	 * borders are not changed
3446 	 */
3447 	ext4_ext_try_to_merge(handle, inode, path, ex);
3448 
3449 	/* Mark modified extent as dirty */
3450 	err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3451 out:
3452 	ext4_ext_show_leaf(inode, path);
3453 	return err;
3454 }
3455 
3456 static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3457 			sector_t block, int count)
3458 {
3459 	int i;
3460 	for (i = 0; i < count; i++)
3461                 unmap_underlying_metadata(bdev, block + i);
3462 }
3463 
3464 /*
3465  * Handle EOFBLOCKS_FL flag, clearing it if necessary
3466  */
3467 static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3468 			      ext4_lblk_t lblk,
3469 			      struct ext4_ext_path *path,
3470 			      unsigned int len)
3471 {
3472 	int i, depth;
3473 	struct ext4_extent_header *eh;
3474 	struct ext4_extent *last_ex;
3475 
3476 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
3477 		return 0;
3478 
3479 	depth = ext_depth(inode);
3480 	eh = path[depth].p_hdr;
3481 
3482 	/*
3483 	 * We're going to remove EOFBLOCKS_FL entirely in future so we
3484 	 * do not care for this case anymore. Simply remove the flag
3485 	 * if there are no extents.
3486 	 */
3487 	if (unlikely(!eh->eh_entries))
3488 		goto out;
3489 	last_ex = EXT_LAST_EXTENT(eh);
3490 	/*
3491 	 * We should clear the EOFBLOCKS_FL flag if we are writing the
3492 	 * last block in the last extent in the file.  We test this by
3493 	 * first checking to see if the caller to
3494 	 * ext4_ext_get_blocks() was interested in the last block (or
3495 	 * a block beyond the last block) in the current extent.  If
3496 	 * this turns out to be false, we can bail out from this
3497 	 * function immediately.
3498 	 */
3499 	if (lblk + len < le32_to_cpu(last_ex->ee_block) +
3500 	    ext4_ext_get_actual_len(last_ex))
3501 		return 0;
3502 	/*
3503 	 * If the caller does appear to be planning to write at or
3504 	 * beyond the end of the current extent, we then test to see
3505 	 * if the current extent is the last extent in the file, by
3506 	 * checking to make sure it was reached via the rightmost node
3507 	 * at each level of the tree.
3508 	 */
3509 	for (i = depth-1; i >= 0; i--)
3510 		if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
3511 			return 0;
3512 out:
3513 	ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3514 	return ext4_mark_inode_dirty(handle, inode);
3515 }
3516 
3517 /**
3518  * ext4_find_delalloc_range: find delayed allocated block in the given range.
3519  *
3520  * Return 1 if there is a delalloc block in the range, otherwise 0.
3521  */
3522 static int ext4_find_delalloc_range(struct inode *inode,
3523 				    ext4_lblk_t lblk_start,
3524 				    ext4_lblk_t lblk_end)
3525 {
3526 	struct extent_status es;
3527 
3528 	es.start = lblk_start;
3529 	ext4_es_find_extent(inode, &es);
3530 	if (es.len == 0)
3531 		return 0; /* there is no delay extent in this tree */
3532 	else if (es.start <= lblk_start && lblk_start < es.start + es.len)
3533 		return 1;
3534 	else if (lblk_start <= es.start && es.start <= lblk_end)
3535 		return 1;
3536 	else
3537 		return 0;
3538 }
3539 
3540 int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
3541 {
3542 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3543 	ext4_lblk_t lblk_start, lblk_end;
3544 	lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
3545 	lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
3546 
3547 	return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
3548 }
3549 
3550 /**
3551  * Determines how many complete clusters (out of those specified by the 'map')
3552  * are under delalloc and were reserved quota for.
3553  * This function is called when we are writing out the blocks that were
3554  * originally written with their allocation delayed, but then the space was
3555  * allocated using fallocate() before the delayed allocation could be resolved.
3556  * The cases to look for are:
3557  * ('=' indicated delayed allocated blocks
3558  *  '-' indicates non-delayed allocated blocks)
3559  * (a) partial clusters towards beginning and/or end outside of allocated range
3560  *     are not delalloc'ed.
3561  *	Ex:
3562  *	|----c---=|====c====|====c====|===-c----|
3563  *	         |++++++ allocated ++++++|
3564  *	==> 4 complete clusters in above example
3565  *
3566  * (b) partial cluster (outside of allocated range) towards either end is
3567  *     marked for delayed allocation. In this case, we will exclude that
3568  *     cluster.
3569  *	Ex:
3570  *	|----====c========|========c========|
3571  *	     |++++++ allocated ++++++|
3572  *	==> 1 complete clusters in above example
3573  *
3574  *	Ex:
3575  *	|================c================|
3576  *            |++++++ allocated ++++++|
3577  *	==> 0 complete clusters in above example
3578  *
3579  * The ext4_da_update_reserve_space will be called only if we
3580  * determine here that there were some "entire" clusters that span
3581  * this 'allocated' range.
3582  * In the non-bigalloc case, this function will just end up returning num_blks
3583  * without ever calling ext4_find_delalloc_range.
3584  */
3585 static unsigned int
3586 get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3587 			   unsigned int num_blks)
3588 {
3589 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3590 	ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
3591 	ext4_lblk_t lblk_from, lblk_to, c_offset;
3592 	unsigned int allocated_clusters = 0;
3593 
3594 	alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
3595 	alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
3596 
3597 	/* max possible clusters for this allocation */
3598 	allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
3599 
3600 	trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
3601 
3602 	/* Check towards left side */
3603 	c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
3604 	if (c_offset) {
3605 		lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
3606 		lblk_to = lblk_from + c_offset - 1;
3607 
3608 		if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
3609 			allocated_clusters--;
3610 	}
3611 
3612 	/* Now check towards right. */
3613 	c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
3614 	if (allocated_clusters && c_offset) {
3615 		lblk_from = lblk_start + num_blks;
3616 		lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
3617 
3618 		if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
3619 			allocated_clusters--;
3620 	}
3621 
3622 	return allocated_clusters;
3623 }
3624 
3625 static int
3626 ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3627 			struct ext4_map_blocks *map,
3628 			struct ext4_ext_path *path, int flags,
3629 			unsigned int allocated, ext4_fsblk_t newblock)
3630 {
3631 	int ret = 0;
3632 	int err = 0;
3633 	ext4_io_end_t *io = ext4_inode_aio(inode);
3634 
3635 	ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical "
3636 		  "block %llu, max_blocks %u, flags %x, allocated %u\n",
3637 		  inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
3638 		  flags, allocated);
3639 	ext4_ext_show_leaf(inode, path);
3640 
3641 	trace_ext4_ext_handle_uninitialized_extents(inode, map, flags,
3642 						    allocated, newblock);
3643 
3644 	/* get_block() before submit the IO, split the extent */
3645 	if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3646 		ret = ext4_split_unwritten_extents(handle, inode, map,
3647 						   path, flags);
3648 		if (ret <= 0)
3649 			goto out;
3650 		/*
3651 		 * Flag the inode(non aio case) or end_io struct (aio case)
3652 		 * that this IO needs to conversion to written when IO is
3653 		 * completed
3654 		 */
3655 		if (io)
3656 			ext4_set_io_unwritten_flag(inode, io);
3657 		else
3658 			ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3659 		if (ext4_should_dioread_nolock(inode))
3660 			map->m_flags |= EXT4_MAP_UNINIT;
3661 		goto out;
3662 	}
3663 	/* IO end_io complete, convert the filled extent to written */
3664 	if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3665 		ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
3666 							path);
3667 		if (ret >= 0) {
3668 			ext4_update_inode_fsync_trans(handle, inode, 1);
3669 			err = check_eofblocks_fl(handle, inode, map->m_lblk,
3670 						 path, map->m_len);
3671 		} else
3672 			err = ret;
3673 		goto out2;
3674 	}
3675 	/* buffered IO case */
3676 	/*
3677 	 * repeat fallocate creation request
3678 	 * we already have an unwritten extent
3679 	 */
3680 	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
3681 		goto map_out;
3682 
3683 	/* buffered READ or buffered write_begin() lookup */
3684 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3685 		/*
3686 		 * We have blocks reserved already.  We
3687 		 * return allocated blocks so that delalloc
3688 		 * won't do block reservation for us.  But
3689 		 * the buffer head will be unmapped so that
3690 		 * a read from the block returns 0s.
3691 		 */
3692 		map->m_flags |= EXT4_MAP_UNWRITTEN;
3693 		goto out1;
3694 	}
3695 
3696 	/* buffered write, writepage time, convert*/
3697 	ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
3698 	if (ret >= 0)
3699 		ext4_update_inode_fsync_trans(handle, inode, 1);
3700 out:
3701 	if (ret <= 0) {
3702 		err = ret;
3703 		goto out2;
3704 	} else
3705 		allocated = ret;
3706 	map->m_flags |= EXT4_MAP_NEW;
3707 	/*
3708 	 * if we allocated more blocks than requested
3709 	 * we need to make sure we unmap the extra block
3710 	 * allocated. The actual needed block will get
3711 	 * unmapped later when we find the buffer_head marked
3712 	 * new.
3713 	 */
3714 	if (allocated > map->m_len) {
3715 		unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3716 					newblock + map->m_len,
3717 					allocated - map->m_len);
3718 		allocated = map->m_len;
3719 	}
3720 
3721 	/*
3722 	 * If we have done fallocate with the offset that is already
3723 	 * delayed allocated, we would have block reservation
3724 	 * and quota reservation done in the delayed write path.
3725 	 * But fallocate would have already updated quota and block
3726 	 * count for this offset. So cancel these reservation
3727 	 */
3728 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
3729 		unsigned int reserved_clusters;
3730 		reserved_clusters = get_reserved_cluster_alloc(inode,
3731 				map->m_lblk, map->m_len);
3732 		if (reserved_clusters)
3733 			ext4_da_update_reserve_space(inode,
3734 						     reserved_clusters,
3735 						     0);
3736 	}
3737 
3738 map_out:
3739 	map->m_flags |= EXT4_MAP_MAPPED;
3740 	if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
3741 		err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
3742 					 map->m_len);
3743 		if (err < 0)
3744 			goto out2;
3745 	}
3746 out1:
3747 	if (allocated > map->m_len)
3748 		allocated = map->m_len;
3749 	ext4_ext_show_leaf(inode, path);
3750 	map->m_pblk = newblock;
3751 	map->m_len = allocated;
3752 out2:
3753 	if (path) {
3754 		ext4_ext_drop_refs(path);
3755 		kfree(path);
3756 	}
3757 	return err ? err : allocated;
3758 }
3759 
3760 /*
3761  * get_implied_cluster_alloc - check to see if the requested
3762  * allocation (in the map structure) overlaps with a cluster already
3763  * allocated in an extent.
3764  *	@sb	The filesystem superblock structure
3765  *	@map	The requested lblk->pblk mapping
3766  *	@ex	The extent structure which might contain an implied
3767  *			cluster allocation
3768  *
3769  * This function is called by ext4_ext_map_blocks() after we failed to
3770  * find blocks that were already in the inode's extent tree.  Hence,
3771  * we know that the beginning of the requested region cannot overlap
3772  * the extent from the inode's extent tree.  There are three cases we
3773  * want to catch.  The first is this case:
3774  *
3775  *		 |--- cluster # N--|
3776  *    |--- extent ---|	|---- requested region ---|
3777  *			|==========|
3778  *
3779  * The second case that we need to test for is this one:
3780  *
3781  *   |--------- cluster # N ----------------|
3782  *	   |--- requested region --|   |------- extent ----|
3783  *	   |=======================|
3784  *
3785  * The third case is when the requested region lies between two extents
3786  * within the same cluster:
3787  *          |------------- cluster # N-------------|
3788  * |----- ex -----|                  |---- ex_right ----|
3789  *                  |------ requested region ------|
3790  *                  |================|
3791  *
3792  * In each of the above cases, we need to set the map->m_pblk and
3793  * map->m_len so it corresponds to the return the extent labelled as
3794  * "|====|" from cluster #N, since it is already in use for data in
3795  * cluster EXT4_B2C(sbi, map->m_lblk).	We will then return 1 to
3796  * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
3797  * as a new "allocated" block region.  Otherwise, we will return 0 and
3798  * ext4_ext_map_blocks() will then allocate one or more new clusters
3799  * by calling ext4_mb_new_blocks().
3800  */
3801 static int get_implied_cluster_alloc(struct super_block *sb,
3802 				     struct ext4_map_blocks *map,
3803 				     struct ext4_extent *ex,
3804 				     struct ext4_ext_path *path)
3805 {
3806 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3807 	ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
3808 	ext4_lblk_t ex_cluster_start, ex_cluster_end;
3809 	ext4_lblk_t rr_cluster_start;
3810 	ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3811 	ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3812 	unsigned short ee_len = ext4_ext_get_actual_len(ex);
3813 
3814 	/* The extent passed in that we are trying to match */
3815 	ex_cluster_start = EXT4_B2C(sbi, ee_block);
3816 	ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
3817 
3818 	/* The requested region passed into ext4_map_blocks() */
3819 	rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
3820 
3821 	if ((rr_cluster_start == ex_cluster_end) ||
3822 	    (rr_cluster_start == ex_cluster_start)) {
3823 		if (rr_cluster_start == ex_cluster_end)
3824 			ee_start += ee_len - 1;
3825 		map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
3826 			c_offset;
3827 		map->m_len = min(map->m_len,
3828 				 (unsigned) sbi->s_cluster_ratio - c_offset);
3829 		/*
3830 		 * Check for and handle this case:
3831 		 *
3832 		 *   |--------- cluster # N-------------|
3833 		 *		       |------- extent ----|
3834 		 *	   |--- requested region ---|
3835 		 *	   |===========|
3836 		 */
3837 
3838 		if (map->m_lblk < ee_block)
3839 			map->m_len = min(map->m_len, ee_block - map->m_lblk);
3840 
3841 		/*
3842 		 * Check for the case where there is already another allocated
3843 		 * block to the right of 'ex' but before the end of the cluster.
3844 		 *
3845 		 *          |------------- cluster # N-------------|
3846 		 * |----- ex -----|                  |---- ex_right ----|
3847 		 *                  |------ requested region ------|
3848 		 *                  |================|
3849 		 */
3850 		if (map->m_lblk > ee_block) {
3851 			ext4_lblk_t next = ext4_ext_next_allocated_block(path);
3852 			map->m_len = min(map->m_len, next - map->m_lblk);
3853 		}
3854 
3855 		trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
3856 		return 1;
3857 	}
3858 
3859 	trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
3860 	return 0;
3861 }
3862 
3863 
3864 /*
3865  * Block allocation/map/preallocation routine for extents based files
3866  *
3867  *
3868  * Need to be called with
3869  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
3870  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
3871  *
3872  * return > 0, number of of blocks already mapped/allocated
3873  *          if create == 0 and these are pre-allocated blocks
3874  *          	buffer head is unmapped
3875  *          otherwise blocks are mapped
3876  *
3877  * return = 0, if plain look up failed (blocks have not been allocated)
3878  *          buffer head is unmapped
3879  *
3880  * return < 0, error case.
3881  */
3882 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3883 			struct ext4_map_blocks *map, int flags)
3884 {
3885 	struct ext4_ext_path *path = NULL;
3886 	struct ext4_extent newex, *ex, *ex2;
3887 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3888 	ext4_fsblk_t newblock = 0;
3889 	int free_on_err = 0, err = 0, depth;
3890 	unsigned int allocated = 0, offset = 0;
3891 	unsigned int allocated_clusters = 0;
3892 	struct ext4_allocation_request ar;
3893 	ext4_io_end_t *io = ext4_inode_aio(inode);
3894 	ext4_lblk_t cluster_offset;
3895 	int set_unwritten = 0;
3896 
3897 	ext_debug("blocks %u/%u requested for inode %lu\n",
3898 		  map->m_lblk, map->m_len, inode->i_ino);
3899 	trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
3900 
3901 	/* check in cache */
3902 	if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
3903 		if (!newex.ee_start_lo && !newex.ee_start_hi) {
3904 			if ((sbi->s_cluster_ratio > 1) &&
3905 			    ext4_find_delalloc_cluster(inode, map->m_lblk))
3906 				map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3907 
3908 			if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3909 				/*
3910 				 * block isn't allocated yet and
3911 				 * user doesn't want to allocate it
3912 				 */
3913 				goto out2;
3914 			}
3915 			/* we should allocate requested block */
3916 		} else {
3917 			/* block is already allocated */
3918 			if (sbi->s_cluster_ratio > 1)
3919 				map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3920 			newblock = map->m_lblk
3921 				   - le32_to_cpu(newex.ee_block)
3922 				   + ext4_ext_pblock(&newex);
3923 			/* number of remaining blocks in the extent */
3924 			allocated = ext4_ext_get_actual_len(&newex) -
3925 				(map->m_lblk - le32_to_cpu(newex.ee_block));
3926 			goto out;
3927 		}
3928 	}
3929 
3930 	/* find extent for this block */
3931 	path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
3932 	if (IS_ERR(path)) {
3933 		err = PTR_ERR(path);
3934 		path = NULL;
3935 		goto out2;
3936 	}
3937 
3938 	depth = ext_depth(inode);
3939 
3940 	/*
3941 	 * consistent leaf must not be empty;
3942 	 * this situation is possible, though, _during_ tree modification;
3943 	 * this is why assert can't be put in ext4_ext_find_extent()
3944 	 */
3945 	if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
3946 		EXT4_ERROR_INODE(inode, "bad extent address "
3947 				 "lblock: %lu, depth: %d pblock %lld",
3948 				 (unsigned long) map->m_lblk, depth,
3949 				 path[depth].p_block);
3950 		err = -EIO;
3951 		goto out2;
3952 	}
3953 
3954 	ex = path[depth].p_ext;
3955 	if (ex) {
3956 		ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3957 		ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3958 		unsigned short ee_len;
3959 
3960 		/*
3961 		 * Uninitialized extents are treated as holes, except that
3962 		 * we split out initialized portions during a write.
3963 		 */
3964 		ee_len = ext4_ext_get_actual_len(ex);
3965 
3966 		trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
3967 
3968 		/* if found extent covers block, simply return it */
3969 		if (in_range(map->m_lblk, ee_block, ee_len)) {
3970 			newblock = map->m_lblk - ee_block + ee_start;
3971 			/* number of remaining blocks in the extent */
3972 			allocated = ee_len - (map->m_lblk - ee_block);
3973 			ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
3974 				  ee_block, ee_len, newblock);
3975 
3976 			/*
3977 			 * Do not put uninitialized extent
3978 			 * in the cache
3979 			 */
3980 			if (!ext4_ext_is_uninitialized(ex)) {
3981 				ext4_ext_put_in_cache(inode, ee_block,
3982 					ee_len, ee_start);
3983 				goto out;
3984 			}
3985 			allocated = ext4_ext_handle_uninitialized_extents(
3986 				handle, inode, map, path, flags,
3987 				allocated, newblock);
3988 			goto out3;
3989 		}
3990 	}
3991 
3992 	if ((sbi->s_cluster_ratio > 1) &&
3993 	    ext4_find_delalloc_cluster(inode, map->m_lblk))
3994 		map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3995 
3996 	/*
3997 	 * requested block isn't allocated yet;
3998 	 * we couldn't try to create block if create flag is zero
3999 	 */
4000 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4001 		/*
4002 		 * put just found gap into cache to speed up
4003 		 * subsequent requests
4004 		 */
4005 		ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
4006 		goto out2;
4007 	}
4008 
4009 	/*
4010 	 * Okay, we need to do block allocation.
4011 	 */
4012 	map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
4013 	newex.ee_block = cpu_to_le32(map->m_lblk);
4014 	cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
4015 
4016 	/*
4017 	 * If we are doing bigalloc, check to see if the extent returned
4018 	 * by ext4_ext_find_extent() implies a cluster we can use.
4019 	 */
4020 	if (cluster_offset && ex &&
4021 	    get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4022 		ar.len = allocated = map->m_len;
4023 		newblock = map->m_pblk;
4024 		map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4025 		goto got_allocated_blocks;
4026 	}
4027 
4028 	/* find neighbour allocated blocks */
4029 	ar.lleft = map->m_lblk;
4030 	err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4031 	if (err)
4032 		goto out2;
4033 	ar.lright = map->m_lblk;
4034 	ex2 = NULL;
4035 	err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4036 	if (err)
4037 		goto out2;
4038 
4039 	/* Check if the extent after searching to the right implies a
4040 	 * cluster we can use. */
4041 	if ((sbi->s_cluster_ratio > 1) && ex2 &&
4042 	    get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
4043 		ar.len = allocated = map->m_len;
4044 		newblock = map->m_pblk;
4045 		map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4046 		goto got_allocated_blocks;
4047 	}
4048 
4049 	/*
4050 	 * See if request is beyond maximum number of blocks we can have in
4051 	 * a single extent. For an initialized extent this limit is
4052 	 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
4053 	 * EXT_UNINIT_MAX_LEN.
4054 	 */
4055 	if (map->m_len > EXT_INIT_MAX_LEN &&
4056 	    !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4057 		map->m_len = EXT_INIT_MAX_LEN;
4058 	else if (map->m_len > EXT_UNINIT_MAX_LEN &&
4059 		 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4060 		map->m_len = EXT_UNINIT_MAX_LEN;
4061 
4062 	/* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4063 	newex.ee_len = cpu_to_le16(map->m_len);
4064 	err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4065 	if (err)
4066 		allocated = ext4_ext_get_actual_len(&newex);
4067 	else
4068 		allocated = map->m_len;
4069 
4070 	/* allocate new block */
4071 	ar.inode = inode;
4072 	ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4073 	ar.logical = map->m_lblk;
4074 	/*
4075 	 * We calculate the offset from the beginning of the cluster
4076 	 * for the logical block number, since when we allocate a
4077 	 * physical cluster, the physical block should start at the
4078 	 * same offset from the beginning of the cluster.  This is
4079 	 * needed so that future calls to get_implied_cluster_alloc()
4080 	 * work correctly.
4081 	 */
4082 	offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
4083 	ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4084 	ar.goal -= offset;
4085 	ar.logical -= offset;
4086 	if (S_ISREG(inode->i_mode))
4087 		ar.flags = EXT4_MB_HINT_DATA;
4088 	else
4089 		/* disable in-core preallocation for non-regular files */
4090 		ar.flags = 0;
4091 	if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4092 		ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4093 	newblock = ext4_mb_new_blocks(handle, &ar, &err);
4094 	if (!newblock)
4095 		goto out2;
4096 	ext_debug("allocate new block: goal %llu, found %llu/%u\n",
4097 		  ar.goal, newblock, allocated);
4098 	free_on_err = 1;
4099 	allocated_clusters = ar.len;
4100 	ar.len = EXT4_C2B(sbi, ar.len) - offset;
4101 	if (ar.len > allocated)
4102 		ar.len = allocated;
4103 
4104 got_allocated_blocks:
4105 	/* try to insert new extent into found leaf and return */
4106 	ext4_ext_store_pblock(&newex, newblock + offset);
4107 	newex.ee_len = cpu_to_le16(ar.len);
4108 	/* Mark uninitialized */
4109 	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
4110 		ext4_ext_mark_uninitialized(&newex);
4111 		/*
4112 		 * io_end structure was created for every IO write to an
4113 		 * uninitialized extent. To avoid unnecessary conversion,
4114 		 * here we flag the IO that really needs the conversion.
4115 		 * For non asycn direct IO case, flag the inode state
4116 		 * that we need to perform conversion when IO is done.
4117 		 */
4118 		if ((flags & EXT4_GET_BLOCKS_PRE_IO))
4119 			set_unwritten = 1;
4120 		if (ext4_should_dioread_nolock(inode))
4121 			map->m_flags |= EXT4_MAP_UNINIT;
4122 	}
4123 
4124 	err = 0;
4125 	if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
4126 		err = check_eofblocks_fl(handle, inode, map->m_lblk,
4127 					 path, ar.len);
4128 	if (!err)
4129 		err = ext4_ext_insert_extent(handle, inode, path,
4130 					     &newex, flags);
4131 
4132 	if (!err && set_unwritten) {
4133 		if (io)
4134 			ext4_set_io_unwritten_flag(inode, io);
4135 		else
4136 			ext4_set_inode_state(inode,
4137 					     EXT4_STATE_DIO_UNWRITTEN);
4138 	}
4139 
4140 	if (err && free_on_err) {
4141 		int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
4142 			EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
4143 		/* free data blocks we just allocated */
4144 		/* not a good idea to call discard here directly,
4145 		 * but otherwise we'd need to call it every free() */
4146 		ext4_discard_preallocations(inode);
4147 		ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
4148 				 ext4_ext_get_actual_len(&newex), fb_flags);
4149 		goto out2;
4150 	}
4151 
4152 	/* previous routine could use block we allocated */
4153 	newblock = ext4_ext_pblock(&newex);
4154 	allocated = ext4_ext_get_actual_len(&newex);
4155 	if (allocated > map->m_len)
4156 		allocated = map->m_len;
4157 	map->m_flags |= EXT4_MAP_NEW;
4158 
4159 	/*
4160 	 * Update reserved blocks/metadata blocks after successful
4161 	 * block allocation which had been deferred till now.
4162 	 */
4163 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4164 		unsigned int reserved_clusters;
4165 		/*
4166 		 * Check how many clusters we had reserved this allocated range
4167 		 */
4168 		reserved_clusters = get_reserved_cluster_alloc(inode,
4169 						map->m_lblk, allocated);
4170 		if (map->m_flags & EXT4_MAP_FROM_CLUSTER) {
4171 			if (reserved_clusters) {
4172 				/*
4173 				 * We have clusters reserved for this range.
4174 				 * But since we are not doing actual allocation
4175 				 * and are simply using blocks from previously
4176 				 * allocated cluster, we should release the
4177 				 * reservation and not claim quota.
4178 				 */
4179 				ext4_da_update_reserve_space(inode,
4180 						reserved_clusters, 0);
4181 			}
4182 		} else {
4183 			BUG_ON(allocated_clusters < reserved_clusters);
4184 			/* We will claim quota for all newly allocated blocks.*/
4185 			ext4_da_update_reserve_space(inode, allocated_clusters,
4186 							1);
4187 			if (reserved_clusters < allocated_clusters) {
4188 				struct ext4_inode_info *ei = EXT4_I(inode);
4189 				int reservation = allocated_clusters -
4190 						  reserved_clusters;
4191 				/*
4192 				 * It seems we claimed few clusters outside of
4193 				 * the range of this allocation. We should give
4194 				 * it back to the reservation pool. This can
4195 				 * happen in the following case:
4196 				 *
4197 				 * * Suppose s_cluster_ratio is 4 (i.e., each
4198 				 *   cluster has 4 blocks. Thus, the clusters
4199 				 *   are [0-3],[4-7],[8-11]...
4200 				 * * First comes delayed allocation write for
4201 				 *   logical blocks 10 & 11. Since there were no
4202 				 *   previous delayed allocated blocks in the
4203 				 *   range [8-11], we would reserve 1 cluster
4204 				 *   for this write.
4205 				 * * Next comes write for logical blocks 3 to 8.
4206 				 *   In this case, we will reserve 2 clusters
4207 				 *   (for [0-3] and [4-7]; and not for [8-11] as
4208 				 *   that range has a delayed allocated blocks.
4209 				 *   Thus total reserved clusters now becomes 3.
4210 				 * * Now, during the delayed allocation writeout
4211 				 *   time, we will first write blocks [3-8] and
4212 				 *   allocate 3 clusters for writing these
4213 				 *   blocks. Also, we would claim all these
4214 				 *   three clusters above.
4215 				 * * Now when we come here to writeout the
4216 				 *   blocks [10-11], we would expect to claim
4217 				 *   the reservation of 1 cluster we had made
4218 				 *   (and we would claim it since there are no
4219 				 *   more delayed allocated blocks in the range
4220 				 *   [8-11]. But our reserved cluster count had
4221 				 *   already gone to 0.
4222 				 *
4223 				 *   Thus, at the step 4 above when we determine
4224 				 *   that there are still some unwritten delayed
4225 				 *   allocated blocks outside of our current
4226 				 *   block range, we should increment the
4227 				 *   reserved clusters count so that when the
4228 				 *   remaining blocks finally gets written, we
4229 				 *   could claim them.
4230 				 */
4231 				dquot_reserve_block(inode,
4232 						EXT4_C2B(sbi, reservation));
4233 				spin_lock(&ei->i_block_reservation_lock);
4234 				ei->i_reserved_data_blocks += reservation;
4235 				spin_unlock(&ei->i_block_reservation_lock);
4236 			}
4237 		}
4238 	}
4239 
4240 	/*
4241 	 * Cache the extent and update transaction to commit on fdatasync only
4242 	 * when it is _not_ an uninitialized extent.
4243 	 */
4244 	if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
4245 		ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
4246 		ext4_update_inode_fsync_trans(handle, inode, 1);
4247 	} else
4248 		ext4_update_inode_fsync_trans(handle, inode, 0);
4249 out:
4250 	if (allocated > map->m_len)
4251 		allocated = map->m_len;
4252 	ext4_ext_show_leaf(inode, path);
4253 	map->m_flags |= EXT4_MAP_MAPPED;
4254 	map->m_pblk = newblock;
4255 	map->m_len = allocated;
4256 out2:
4257 	if (path) {
4258 		ext4_ext_drop_refs(path);
4259 		kfree(path);
4260 	}
4261 
4262 out3:
4263 	trace_ext4_ext_map_blocks_exit(inode, map, err ? err : allocated);
4264 
4265 	return err ? err : allocated;
4266 }
4267 
4268 void ext4_ext_truncate(struct inode *inode)
4269 {
4270 	struct address_space *mapping = inode->i_mapping;
4271 	struct super_block *sb = inode->i_sb;
4272 	ext4_lblk_t last_block;
4273 	handle_t *handle;
4274 	loff_t page_len;
4275 	int err = 0;
4276 
4277 	/*
4278 	 * finish any pending end_io work so we won't run the risk of
4279 	 * converting any truncated blocks to initialized later
4280 	 */
4281 	ext4_flush_unwritten_io(inode);
4282 
4283 	/*
4284 	 * probably first extent we're gonna free will be last in block
4285 	 */
4286 	err = ext4_writepage_trans_blocks(inode);
4287 	handle = ext4_journal_start(inode, err);
4288 	if (IS_ERR(handle))
4289 		return;
4290 
4291 	if (inode->i_size % PAGE_CACHE_SIZE != 0) {
4292 		page_len = PAGE_CACHE_SIZE -
4293 			(inode->i_size & (PAGE_CACHE_SIZE - 1));
4294 
4295 		err = ext4_discard_partial_page_buffers(handle,
4296 			mapping, inode->i_size, page_len, 0);
4297 
4298 		if (err)
4299 			goto out_stop;
4300 	}
4301 
4302 	if (ext4_orphan_add(handle, inode))
4303 		goto out_stop;
4304 
4305 	down_write(&EXT4_I(inode)->i_data_sem);
4306 	ext4_ext_invalidate_cache(inode);
4307 
4308 	ext4_discard_preallocations(inode);
4309 
4310 	/*
4311 	 * TODO: optimization is possible here.
4312 	 * Probably we need not scan at all,
4313 	 * because page truncation is enough.
4314 	 */
4315 
4316 	/* we have to know where to truncate from in crash case */
4317 	EXT4_I(inode)->i_disksize = inode->i_size;
4318 	ext4_mark_inode_dirty(handle, inode);
4319 
4320 	last_block = (inode->i_size + sb->s_blocksize - 1)
4321 			>> EXT4_BLOCK_SIZE_BITS(sb);
4322 	err = ext4_es_remove_extent(inode, last_block,
4323 				    EXT_MAX_BLOCKS - last_block);
4324 	err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4325 
4326 	/* In a multi-transaction truncate, we only make the final
4327 	 * transaction synchronous.
4328 	 */
4329 	if (IS_SYNC(inode))
4330 		ext4_handle_sync(handle);
4331 
4332 	up_write(&EXT4_I(inode)->i_data_sem);
4333 
4334 out_stop:
4335 	/*
4336 	 * If this was a simple ftruncate() and the file will remain alive,
4337 	 * then we need to clear up the orphan record which we created above.
4338 	 * However, if this was a real unlink then we were called by
4339 	 * ext4_delete_inode(), and we allow that function to clean up the
4340 	 * orphan info for us.
4341 	 */
4342 	if (inode->i_nlink)
4343 		ext4_orphan_del(handle, inode);
4344 
4345 	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4346 	ext4_mark_inode_dirty(handle, inode);
4347 	ext4_journal_stop(handle);
4348 }
4349 
4350 static void ext4_falloc_update_inode(struct inode *inode,
4351 				int mode, loff_t new_size, int update_ctime)
4352 {
4353 	struct timespec now;
4354 
4355 	if (update_ctime) {
4356 		now = current_fs_time(inode->i_sb);
4357 		if (!timespec_equal(&inode->i_ctime, &now))
4358 			inode->i_ctime = now;
4359 	}
4360 	/*
4361 	 * Update only when preallocation was requested beyond
4362 	 * the file size.
4363 	 */
4364 	if (!(mode & FALLOC_FL_KEEP_SIZE)) {
4365 		if (new_size > i_size_read(inode))
4366 			i_size_write(inode, new_size);
4367 		if (new_size > EXT4_I(inode)->i_disksize)
4368 			ext4_update_i_disksize(inode, new_size);
4369 	} else {
4370 		/*
4371 		 * Mark that we allocate beyond EOF so the subsequent truncate
4372 		 * can proceed even if the new size is the same as i_size.
4373 		 */
4374 		if (new_size > i_size_read(inode))
4375 			ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4376 	}
4377 
4378 }
4379 
4380 /*
4381  * preallocate space for a file. This implements ext4's fallocate file
4382  * operation, which gets called from sys_fallocate system call.
4383  * For block-mapped files, posix_fallocate should fall back to the method
4384  * of writing zeroes to the required new blocks (the same behavior which is
4385  * expected for file systems which do not support fallocate() system call).
4386  */
4387 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4388 {
4389 	struct inode *inode = file->f_path.dentry->d_inode;
4390 	handle_t *handle;
4391 	loff_t new_size;
4392 	unsigned int max_blocks;
4393 	int ret = 0;
4394 	int ret2 = 0;
4395 	int retries = 0;
4396 	int flags;
4397 	struct ext4_map_blocks map;
4398 	unsigned int credits, blkbits = inode->i_blkbits;
4399 
4400 	/*
4401 	 * currently supporting (pre)allocate mode for extent-based
4402 	 * files _only_
4403 	 */
4404 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4405 		return -EOPNOTSUPP;
4406 
4407 	/* Return error if mode is not supported */
4408 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
4409 		return -EOPNOTSUPP;
4410 
4411 	if (mode & FALLOC_FL_PUNCH_HOLE)
4412 		return ext4_punch_hole(file, offset, len);
4413 
4414 	ret = ext4_convert_inline_data(inode);
4415 	if (ret)
4416 		return ret;
4417 
4418 	trace_ext4_fallocate_enter(inode, offset, len, mode);
4419 	map.m_lblk = offset >> blkbits;
4420 	/*
4421 	 * We can't just convert len to max_blocks because
4422 	 * If blocksize = 4096 offset = 3072 and len = 2048
4423 	 */
4424 	max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
4425 		- map.m_lblk;
4426 	/*
4427 	 * credits to insert 1 extent into extent tree
4428 	 */
4429 	credits = ext4_chunk_trans_blocks(inode, max_blocks);
4430 	mutex_lock(&inode->i_mutex);
4431 	ret = inode_newsize_ok(inode, (len + offset));
4432 	if (ret) {
4433 		mutex_unlock(&inode->i_mutex);
4434 		trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4435 		return ret;
4436 	}
4437 	flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT;
4438 	if (mode & FALLOC_FL_KEEP_SIZE)
4439 		flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4440 	/*
4441 	 * Don't normalize the request if it can fit in one extent so
4442 	 * that it doesn't get unnecessarily split into multiple
4443 	 * extents.
4444 	 */
4445 	if (len <= EXT_UNINIT_MAX_LEN << blkbits)
4446 		flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4447 
4448 	/* Prevent race condition between unwritten */
4449 	ext4_flush_unwritten_io(inode);
4450 retry:
4451 	while (ret >= 0 && ret < max_blocks) {
4452 		map.m_lblk = map.m_lblk + ret;
4453 		map.m_len = max_blocks = max_blocks - ret;
4454 		handle = ext4_journal_start(inode, credits);
4455 		if (IS_ERR(handle)) {
4456 			ret = PTR_ERR(handle);
4457 			break;
4458 		}
4459 		ret = ext4_map_blocks(handle, inode, &map, flags);
4460 		if (ret <= 0) {
4461 #ifdef EXT4FS_DEBUG
4462 			WARN_ON(ret <= 0);
4463 			printk(KERN_ERR "%s: ext4_ext_map_blocks "
4464 				    "returned error inode#%lu, block=%u, "
4465 				    "max_blocks=%u", __func__,
4466 				    inode->i_ino, map.m_lblk, max_blocks);
4467 #endif
4468 			ext4_mark_inode_dirty(handle, inode);
4469 			ret2 = ext4_journal_stop(handle);
4470 			break;
4471 		}
4472 		if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
4473 						blkbits) >> blkbits))
4474 			new_size = offset + len;
4475 		else
4476 			new_size = ((loff_t) map.m_lblk + ret) << blkbits;
4477 
4478 		ext4_falloc_update_inode(inode, mode, new_size,
4479 					 (map.m_flags & EXT4_MAP_NEW));
4480 		ext4_mark_inode_dirty(handle, inode);
4481 		if ((file->f_flags & O_SYNC) && ret >= max_blocks)
4482 			ext4_handle_sync(handle);
4483 		ret2 = ext4_journal_stop(handle);
4484 		if (ret2)
4485 			break;
4486 	}
4487 	if (ret == -ENOSPC &&
4488 			ext4_should_retry_alloc(inode->i_sb, &retries)) {
4489 		ret = 0;
4490 		goto retry;
4491 	}
4492 	mutex_unlock(&inode->i_mutex);
4493 	trace_ext4_fallocate_exit(inode, offset, max_blocks,
4494 				ret > 0 ? ret2 : ret);
4495 	return ret > 0 ? ret2 : ret;
4496 }
4497 
4498 /*
4499  * This function convert a range of blocks to written extents
4500  * The caller of this function will pass the start offset and the size.
4501  * all unwritten extents within this range will be converted to
4502  * written extents.
4503  *
4504  * This function is called from the direct IO end io call back
4505  * function, to convert the fallocated extents after IO is completed.
4506  * Returns 0 on success.
4507  */
4508 int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
4509 				    ssize_t len)
4510 {
4511 	handle_t *handle;
4512 	unsigned int max_blocks;
4513 	int ret = 0;
4514 	int ret2 = 0;
4515 	struct ext4_map_blocks map;
4516 	unsigned int credits, blkbits = inode->i_blkbits;
4517 
4518 	map.m_lblk = offset >> blkbits;
4519 	/*
4520 	 * We can't just convert len to max_blocks because
4521 	 * If blocksize = 4096 offset = 3072 and len = 2048
4522 	 */
4523 	max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
4524 		      map.m_lblk);
4525 	/*
4526 	 * credits to insert 1 extent into extent tree
4527 	 */
4528 	credits = ext4_chunk_trans_blocks(inode, max_blocks);
4529 	while (ret >= 0 && ret < max_blocks) {
4530 		map.m_lblk += ret;
4531 		map.m_len = (max_blocks -= ret);
4532 		handle = ext4_journal_start(inode, credits);
4533 		if (IS_ERR(handle)) {
4534 			ret = PTR_ERR(handle);
4535 			break;
4536 		}
4537 		ret = ext4_map_blocks(handle, inode, &map,
4538 				      EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4539 		if (ret <= 0) {
4540 			WARN_ON(ret <= 0);
4541 			ext4_msg(inode->i_sb, KERN_ERR,
4542 				 "%s:%d: inode #%lu: block %u: len %u: "
4543 				 "ext4_ext_map_blocks returned %d",
4544 				 __func__, __LINE__, inode->i_ino, map.m_lblk,
4545 				 map.m_len, ret);
4546 		}
4547 		ext4_mark_inode_dirty(handle, inode);
4548 		ret2 = ext4_journal_stop(handle);
4549 		if (ret <= 0 || ret2 )
4550 			break;
4551 	}
4552 	return ret > 0 ? ret2 : ret;
4553 }
4554 
4555 /*
4556  * If newex is not existing extent (newex->ec_start equals zero) find
4557  * delayed extent at start of newex and update newex accordingly and
4558  * return start of the next delayed extent.
4559  *
4560  * If newex is existing extent (newex->ec_start is not equal zero)
4561  * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
4562  * extent found. Leave newex unmodified.
4563  */
4564 static int ext4_find_delayed_extent(struct inode *inode,
4565 				    struct ext4_ext_cache *newex)
4566 {
4567 	struct extent_status es;
4568 	ext4_lblk_t next_del;
4569 
4570 	es.start = newex->ec_block;
4571 	next_del = ext4_es_find_extent(inode, &es);
4572 
4573 	if (newex->ec_start == 0) {
4574 		/*
4575 		 * No extent in extent-tree contains block @newex->ec_start,
4576 		 * then the block may stay in 1)a hole or 2)delayed-extent.
4577 		 */
4578 		if (es.len == 0)
4579 			/* A hole found. */
4580 			return 0;
4581 
4582 		if (es.start > newex->ec_block) {
4583 			/* A hole found. */
4584 			newex->ec_len = min(es.start - newex->ec_block,
4585 					    newex->ec_len);
4586 			return 0;
4587 		}
4588 
4589 		newex->ec_len = es.start + es.len - newex->ec_block;
4590 	}
4591 
4592 	return next_del;
4593 }
4594 /* fiemap flags we can handle specified here */
4595 #define EXT4_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
4596 
4597 static int ext4_xattr_fiemap(struct inode *inode,
4598 				struct fiemap_extent_info *fieinfo)
4599 {
4600 	__u64 physical = 0;
4601 	__u64 length;
4602 	__u32 flags = FIEMAP_EXTENT_LAST;
4603 	int blockbits = inode->i_sb->s_blocksize_bits;
4604 	int error = 0;
4605 
4606 	/* in-inode? */
4607 	if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4608 		struct ext4_iloc iloc;
4609 		int offset;	/* offset of xattr in inode */
4610 
4611 		error = ext4_get_inode_loc(inode, &iloc);
4612 		if (error)
4613 			return error;
4614 		physical = iloc.bh->b_blocknr << blockbits;
4615 		offset = EXT4_GOOD_OLD_INODE_SIZE +
4616 				EXT4_I(inode)->i_extra_isize;
4617 		physical += offset;
4618 		length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
4619 		flags |= FIEMAP_EXTENT_DATA_INLINE;
4620 		brelse(iloc.bh);
4621 	} else { /* external block */
4622 		physical = EXT4_I(inode)->i_file_acl << blockbits;
4623 		length = inode->i_sb->s_blocksize;
4624 	}
4625 
4626 	if (physical)
4627 		error = fiemap_fill_next_extent(fieinfo, 0, physical,
4628 						length, flags);
4629 	return (error < 0 ? error : 0);
4630 }
4631 
4632 /*
4633  * ext4_ext_punch_hole
4634  *
4635  * Punches a hole of "length" bytes in a file starting
4636  * at byte "offset"
4637  *
4638  * @inode:  The inode of the file to punch a hole in
4639  * @offset: The starting byte offset of the hole
4640  * @length: The length of the hole
4641  *
4642  * Returns the number of blocks removed or negative on err
4643  */
4644 int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
4645 {
4646 	struct inode *inode = file->f_path.dentry->d_inode;
4647 	struct super_block *sb = inode->i_sb;
4648 	ext4_lblk_t first_block, stop_block;
4649 	struct address_space *mapping = inode->i_mapping;
4650 	handle_t *handle;
4651 	loff_t first_page, last_page, page_len;
4652 	loff_t first_page_offset, last_page_offset;
4653 	int credits, err = 0;
4654 
4655 	/*
4656 	 * Write out all dirty pages to avoid race conditions
4657 	 * Then release them.
4658 	 */
4659 	if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4660 		err = filemap_write_and_wait_range(mapping,
4661 			offset, offset + length - 1);
4662 
4663 		if (err)
4664 			return err;
4665 	}
4666 
4667 	mutex_lock(&inode->i_mutex);
4668 	/* It's not possible punch hole on append only file */
4669 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
4670 		err = -EPERM;
4671 		goto out_mutex;
4672 	}
4673 	if (IS_SWAPFILE(inode)) {
4674 		err = -ETXTBSY;
4675 		goto out_mutex;
4676 	}
4677 
4678 	/* No need to punch hole beyond i_size */
4679 	if (offset >= inode->i_size)
4680 		goto out_mutex;
4681 
4682 	/*
4683 	 * If the hole extends beyond i_size, set the hole
4684 	 * to end after the page that contains i_size
4685 	 */
4686 	if (offset + length > inode->i_size) {
4687 		length = inode->i_size +
4688 		   PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
4689 		   offset;
4690 	}
4691 
4692 	first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
4693 	last_page = (offset + length) >> PAGE_CACHE_SHIFT;
4694 
4695 	first_page_offset = first_page << PAGE_CACHE_SHIFT;
4696 	last_page_offset = last_page << PAGE_CACHE_SHIFT;
4697 
4698 	/* Now release the pages */
4699 	if (last_page_offset > first_page_offset) {
4700 		truncate_pagecache_range(inode, first_page_offset,
4701 					 last_page_offset - 1);
4702 	}
4703 
4704 	/* Wait all existing dio workers, newcomers will block on i_mutex */
4705 	ext4_inode_block_unlocked_dio(inode);
4706 	err = ext4_flush_unwritten_io(inode);
4707 	if (err)
4708 		goto out_dio;
4709 	inode_dio_wait(inode);
4710 
4711 	credits = ext4_writepage_trans_blocks(inode);
4712 	handle = ext4_journal_start(inode, credits);
4713 	if (IS_ERR(handle)) {
4714 		err = PTR_ERR(handle);
4715 		goto out_dio;
4716 	}
4717 
4718 
4719 	/*
4720 	 * Now we need to zero out the non-page-aligned data in the
4721 	 * pages at the start and tail of the hole, and unmap the buffer
4722 	 * heads for the block aligned regions of the page that were
4723 	 * completely zeroed.
4724 	 */
4725 	if (first_page > last_page) {
4726 		/*
4727 		 * If the file space being truncated is contained within a page
4728 		 * just zero out and unmap the middle of that page
4729 		 */
4730 		err = ext4_discard_partial_page_buffers(handle,
4731 			mapping, offset, length, 0);
4732 
4733 		if (err)
4734 			goto out;
4735 	} else {
4736 		/*
4737 		 * zero out and unmap the partial page that contains
4738 		 * the start of the hole
4739 		 */
4740 		page_len  = first_page_offset - offset;
4741 		if (page_len > 0) {
4742 			err = ext4_discard_partial_page_buffers(handle, mapping,
4743 						   offset, page_len, 0);
4744 			if (err)
4745 				goto out;
4746 		}
4747 
4748 		/*
4749 		 * zero out and unmap the partial page that contains
4750 		 * the end of the hole
4751 		 */
4752 		page_len = offset + length - last_page_offset;
4753 		if (page_len > 0) {
4754 			err = ext4_discard_partial_page_buffers(handle, mapping,
4755 					last_page_offset, page_len, 0);
4756 			if (err)
4757 				goto out;
4758 		}
4759 	}
4760 
4761 	/*
4762 	 * If i_size is contained in the last page, we need to
4763 	 * unmap and zero the partial page after i_size
4764 	 */
4765 	if (inode->i_size >> PAGE_CACHE_SHIFT == last_page &&
4766 	   inode->i_size % PAGE_CACHE_SIZE != 0) {
4767 
4768 		page_len = PAGE_CACHE_SIZE -
4769 			(inode->i_size & (PAGE_CACHE_SIZE - 1));
4770 
4771 		if (page_len > 0) {
4772 			err = ext4_discard_partial_page_buffers(handle,
4773 			  mapping, inode->i_size, page_len, 0);
4774 
4775 			if (err)
4776 				goto out;
4777 		}
4778 	}
4779 
4780 	first_block = (offset + sb->s_blocksize - 1) >>
4781 		EXT4_BLOCK_SIZE_BITS(sb);
4782 	stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
4783 
4784 	/* If there are no blocks to remove, return now */
4785 	if (first_block >= stop_block)
4786 		goto out;
4787 
4788 	down_write(&EXT4_I(inode)->i_data_sem);
4789 	ext4_ext_invalidate_cache(inode);
4790 	ext4_discard_preallocations(inode);
4791 
4792 	err = ext4_es_remove_extent(inode, first_block,
4793 				    stop_block - first_block);
4794 	err = ext4_ext_remove_space(inode, first_block, stop_block - 1);
4795 
4796 	ext4_ext_invalidate_cache(inode);
4797 	ext4_discard_preallocations(inode);
4798 
4799 	if (IS_SYNC(inode))
4800 		ext4_handle_sync(handle);
4801 
4802 	up_write(&EXT4_I(inode)->i_data_sem);
4803 
4804 out:
4805 	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4806 	ext4_mark_inode_dirty(handle, inode);
4807 	ext4_journal_stop(handle);
4808 out_dio:
4809 	ext4_inode_resume_unlocked_dio(inode);
4810 out_mutex:
4811 	mutex_unlock(&inode->i_mutex);
4812 	return err;
4813 }
4814 
4815 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4816 		__u64 start, __u64 len)
4817 {
4818 	ext4_lblk_t start_blk;
4819 	int error = 0;
4820 
4821 	if (ext4_has_inline_data(inode)) {
4822 		int has_inline = 1;
4823 
4824 		error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline);
4825 
4826 		if (has_inline)
4827 			return error;
4828 	}
4829 
4830 	/* fallback to generic here if not in extents fmt */
4831 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4832 		return generic_block_fiemap(inode, fieinfo, start, len,
4833 			ext4_get_block);
4834 
4835 	if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
4836 		return -EBADR;
4837 
4838 	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
4839 		error = ext4_xattr_fiemap(inode, fieinfo);
4840 	} else {
4841 		ext4_lblk_t len_blks;
4842 		__u64 last_blk;
4843 
4844 		start_blk = start >> inode->i_sb->s_blocksize_bits;
4845 		last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
4846 		if (last_blk >= EXT_MAX_BLOCKS)
4847 			last_blk = EXT_MAX_BLOCKS-1;
4848 		len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
4849 
4850 		/*
4851 		 * Walk the extent tree gathering extent information
4852 		 * and pushing extents back to the user.
4853 		 */
4854 		error = ext4_fill_fiemap_extents(inode, start_blk,
4855 						 len_blks, fieinfo);
4856 	}
4857 
4858 	return error;
4859 }
4860