xref: /openbmc/linux/fs/ext4/extents.c (revision e481ff3f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
4  * Written by Alex Tomas <alex@clusterfs.com>
5  *
6  * Architecture independence:
7  *   Copyright (c) 2005, Bull S.A.
8  *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
9  */
10 
11 /*
12  * Extents support for EXT4
13  *
14  * TODO:
15  *   - ext4*_error() should be used in some situations
16  *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
17  *   - smart tree reduction
18  */
19 
20 #include <linux/fs.h>
21 #include <linux/time.h>
22 #include <linux/jbd2.h>
23 #include <linux/highuid.h>
24 #include <linux/pagemap.h>
25 #include <linux/quotaops.h>
26 #include <linux/string.h>
27 #include <linux/slab.h>
28 #include <linux/uaccess.h>
29 #include <linux/fiemap.h>
30 #include <linux/backing-dev.h>
31 #include <linux/iomap.h>
32 #include "ext4_jbd2.h"
33 #include "ext4_extents.h"
34 #include "xattr.h"
35 
36 #include <trace/events/ext4.h>
37 
38 /*
39  * used by extent splitting.
40  */
41 #define EXT4_EXT_MAY_ZEROOUT	0x1  /* safe to zeroout if split fails \
42 					due to ENOSPC */
43 #define EXT4_EXT_MARK_UNWRIT1	0x2  /* mark first half unwritten */
44 #define EXT4_EXT_MARK_UNWRIT2	0x4  /* mark second half unwritten */
45 
46 #define EXT4_EXT_DATA_VALID1	0x8  /* first half contains valid data */
47 #define EXT4_EXT_DATA_VALID2	0x10 /* second half contains valid data */
48 
49 static __le32 ext4_extent_block_csum(struct inode *inode,
50 				     struct ext4_extent_header *eh)
51 {
52 	struct ext4_inode_info *ei = EXT4_I(inode);
53 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
54 	__u32 csum;
55 
56 	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
57 			   EXT4_EXTENT_TAIL_OFFSET(eh));
58 	return cpu_to_le32(csum);
59 }
60 
61 static int ext4_extent_block_csum_verify(struct inode *inode,
62 					 struct ext4_extent_header *eh)
63 {
64 	struct ext4_extent_tail *et;
65 
66 	if (!ext4_has_metadata_csum(inode->i_sb))
67 		return 1;
68 
69 	et = find_ext4_extent_tail(eh);
70 	if (et->et_checksum != ext4_extent_block_csum(inode, eh))
71 		return 0;
72 	return 1;
73 }
74 
75 static void ext4_extent_block_csum_set(struct inode *inode,
76 				       struct ext4_extent_header *eh)
77 {
78 	struct ext4_extent_tail *et;
79 
80 	if (!ext4_has_metadata_csum(inode->i_sb))
81 		return;
82 
83 	et = find_ext4_extent_tail(eh);
84 	et->et_checksum = ext4_extent_block_csum(inode, eh);
85 }
86 
87 static int ext4_split_extent_at(handle_t *handle,
88 			     struct inode *inode,
89 			     struct ext4_ext_path **ppath,
90 			     ext4_lblk_t split,
91 			     int split_flag,
92 			     int flags);
93 
94 static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
95 {
96 	/*
97 	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
98 	 * moment, get_block can be called only for blocks inside i_size since
99 	 * page cache has been already dropped and writes are blocked by
100 	 * i_mutex. So we can safely drop the i_data_sem here.
101 	 */
102 	BUG_ON(EXT4_JOURNAL(inode) == NULL);
103 	ext4_discard_preallocations(inode, 0);
104 	up_write(&EXT4_I(inode)->i_data_sem);
105 	*dropped = 1;
106 	return 0;
107 }
108 
109 /*
110  * Make sure 'handle' has at least 'check_cred' credits. If not, restart
111  * transaction with 'restart_cred' credits. The function drops i_data_sem
112  * when restarting transaction and gets it after transaction is restarted.
113  *
114  * The function returns 0 on success, 1 if transaction had to be restarted,
115  * and < 0 in case of fatal error.
116  */
117 int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode,
118 				int check_cred, int restart_cred,
119 				int revoke_cred)
120 {
121 	int ret;
122 	int dropped = 0;
123 
124 	ret = ext4_journal_ensure_credits_fn(handle, check_cred, restart_cred,
125 		revoke_cred, ext4_ext_trunc_restart_fn(inode, &dropped));
126 	if (dropped)
127 		down_write(&EXT4_I(inode)->i_data_sem);
128 	return ret;
129 }
130 
131 /*
132  * could return:
133  *  - EROFS
134  *  - ENOMEM
135  */
136 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
137 				struct ext4_ext_path *path)
138 {
139 	if (path->p_bh) {
140 		/* path points to block */
141 		BUFFER_TRACE(path->p_bh, "get_write_access");
142 		return ext4_journal_get_write_access(handle, path->p_bh);
143 	}
144 	/* path points to leaf/index in inode body */
145 	/* we use in-core data, no need to protect them */
146 	return 0;
147 }
148 
149 /*
150  * could return:
151  *  - EROFS
152  *  - ENOMEM
153  *  - EIO
154  */
155 static int __ext4_ext_dirty(const char *where, unsigned int line,
156 			    handle_t *handle, struct inode *inode,
157 			    struct ext4_ext_path *path)
158 {
159 	int err;
160 
161 	WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
162 	if (path->p_bh) {
163 		ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
164 		/* path points to block */
165 		err = __ext4_handle_dirty_metadata(where, line, handle,
166 						   inode, path->p_bh);
167 	} else {
168 		/* path points to leaf/index in inode body */
169 		err = ext4_mark_inode_dirty(handle, inode);
170 	}
171 	return err;
172 }
173 
174 #define ext4_ext_dirty(handle, inode, path) \
175 		__ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
176 
177 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
178 			      struct ext4_ext_path *path,
179 			      ext4_lblk_t block)
180 {
181 	if (path) {
182 		int depth = path->p_depth;
183 		struct ext4_extent *ex;
184 
185 		/*
186 		 * Try to predict block placement assuming that we are
187 		 * filling in a file which will eventually be
188 		 * non-sparse --- i.e., in the case of libbfd writing
189 		 * an ELF object sections out-of-order but in a way
190 		 * the eventually results in a contiguous object or
191 		 * executable file, or some database extending a table
192 		 * space file.  However, this is actually somewhat
193 		 * non-ideal if we are writing a sparse file such as
194 		 * qemu or KVM writing a raw image file that is going
195 		 * to stay fairly sparse, since it will end up
196 		 * fragmenting the file system's free space.  Maybe we
197 		 * should have some hueristics or some way to allow
198 		 * userspace to pass a hint to file system,
199 		 * especially if the latter case turns out to be
200 		 * common.
201 		 */
202 		ex = path[depth].p_ext;
203 		if (ex) {
204 			ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
205 			ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
206 
207 			if (block > ext_block)
208 				return ext_pblk + (block - ext_block);
209 			else
210 				return ext_pblk - (ext_block - block);
211 		}
212 
213 		/* it looks like index is empty;
214 		 * try to find starting block from index itself */
215 		if (path[depth].p_bh)
216 			return path[depth].p_bh->b_blocknr;
217 	}
218 
219 	/* OK. use inode's group */
220 	return ext4_inode_to_goal_block(inode);
221 }
222 
223 /*
224  * Allocation for a meta data block
225  */
226 static ext4_fsblk_t
227 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
228 			struct ext4_ext_path *path,
229 			struct ext4_extent *ex, int *err, unsigned int flags)
230 {
231 	ext4_fsblk_t goal, newblock;
232 
233 	goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
234 	newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
235 					NULL, err);
236 	return newblock;
237 }
238 
239 static inline int ext4_ext_space_block(struct inode *inode, int check)
240 {
241 	int size;
242 
243 	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
244 			/ sizeof(struct ext4_extent);
245 #ifdef AGGRESSIVE_TEST
246 	if (!check && size > 6)
247 		size = 6;
248 #endif
249 	return size;
250 }
251 
252 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
253 {
254 	int size;
255 
256 	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
257 			/ sizeof(struct ext4_extent_idx);
258 #ifdef AGGRESSIVE_TEST
259 	if (!check && size > 5)
260 		size = 5;
261 #endif
262 	return size;
263 }
264 
265 static inline int ext4_ext_space_root(struct inode *inode, int check)
266 {
267 	int size;
268 
269 	size = sizeof(EXT4_I(inode)->i_data);
270 	size -= sizeof(struct ext4_extent_header);
271 	size /= sizeof(struct ext4_extent);
272 #ifdef AGGRESSIVE_TEST
273 	if (!check && size > 3)
274 		size = 3;
275 #endif
276 	return size;
277 }
278 
279 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
280 {
281 	int size;
282 
283 	size = sizeof(EXT4_I(inode)->i_data);
284 	size -= sizeof(struct ext4_extent_header);
285 	size /= sizeof(struct ext4_extent_idx);
286 #ifdef AGGRESSIVE_TEST
287 	if (!check && size > 4)
288 		size = 4;
289 #endif
290 	return size;
291 }
292 
293 static inline int
294 ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
295 			   struct ext4_ext_path **ppath, ext4_lblk_t lblk,
296 			   int nofail)
297 {
298 	struct ext4_ext_path *path = *ppath;
299 	int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
300 	int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO;
301 
302 	if (nofail)
303 		flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL;
304 
305 	return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
306 			EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
307 			flags);
308 }
309 
310 static int
311 ext4_ext_max_entries(struct inode *inode, int depth)
312 {
313 	int max;
314 
315 	if (depth == ext_depth(inode)) {
316 		if (depth == 0)
317 			max = ext4_ext_space_root(inode, 1);
318 		else
319 			max = ext4_ext_space_root_idx(inode, 1);
320 	} else {
321 		if (depth == 0)
322 			max = ext4_ext_space_block(inode, 1);
323 		else
324 			max = ext4_ext_space_block_idx(inode, 1);
325 	}
326 
327 	return max;
328 }
329 
330 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
331 {
332 	ext4_fsblk_t block = ext4_ext_pblock(ext);
333 	int len = ext4_ext_get_actual_len(ext);
334 	ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
335 
336 	/*
337 	 * We allow neither:
338 	 *  - zero length
339 	 *  - overflow/wrap-around
340 	 */
341 	if (lblock + len <= lblock)
342 		return 0;
343 	return ext4_inode_block_valid(inode, block, len);
344 }
345 
346 static int ext4_valid_extent_idx(struct inode *inode,
347 				struct ext4_extent_idx *ext_idx)
348 {
349 	ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
350 
351 	return ext4_inode_block_valid(inode, block, 1);
352 }
353 
354 static int ext4_valid_extent_entries(struct inode *inode,
355 				     struct ext4_extent_header *eh,
356 				     ext4_fsblk_t *pblk, int depth)
357 {
358 	unsigned short entries;
359 	if (eh->eh_entries == 0)
360 		return 1;
361 
362 	entries = le16_to_cpu(eh->eh_entries);
363 
364 	if (depth == 0) {
365 		/* leaf entries */
366 		struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
367 		ext4_lblk_t lblock = 0;
368 		ext4_lblk_t prev = 0;
369 		int len = 0;
370 		while (entries) {
371 			if (!ext4_valid_extent(inode, ext))
372 				return 0;
373 
374 			/* Check for overlapping extents */
375 			lblock = le32_to_cpu(ext->ee_block);
376 			len = ext4_ext_get_actual_len(ext);
377 			if ((lblock <= prev) && prev) {
378 				*pblk = ext4_ext_pblock(ext);
379 				return 0;
380 			}
381 			ext++;
382 			entries--;
383 			prev = lblock + len - 1;
384 		}
385 	} else {
386 		struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
387 		while (entries) {
388 			if (!ext4_valid_extent_idx(inode, ext_idx))
389 				return 0;
390 			ext_idx++;
391 			entries--;
392 		}
393 	}
394 	return 1;
395 }
396 
397 static int __ext4_ext_check(const char *function, unsigned int line,
398 			    struct inode *inode, struct ext4_extent_header *eh,
399 			    int depth, ext4_fsblk_t pblk)
400 {
401 	const char *error_msg;
402 	int max = 0, err = -EFSCORRUPTED;
403 
404 	if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
405 		error_msg = "invalid magic";
406 		goto corrupted;
407 	}
408 	if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
409 		error_msg = "unexpected eh_depth";
410 		goto corrupted;
411 	}
412 	if (unlikely(eh->eh_max == 0)) {
413 		error_msg = "invalid eh_max";
414 		goto corrupted;
415 	}
416 	max = ext4_ext_max_entries(inode, depth);
417 	if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
418 		error_msg = "too large eh_max";
419 		goto corrupted;
420 	}
421 	if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
422 		error_msg = "invalid eh_entries";
423 		goto corrupted;
424 	}
425 	if (!ext4_valid_extent_entries(inode, eh, &pblk, depth)) {
426 		error_msg = "invalid extent entries";
427 		goto corrupted;
428 	}
429 	if (unlikely(depth > 32)) {
430 		error_msg = "too large eh_depth";
431 		goto corrupted;
432 	}
433 	/* Verify checksum on non-root extent tree nodes */
434 	if (ext_depth(inode) != depth &&
435 	    !ext4_extent_block_csum_verify(inode, eh)) {
436 		error_msg = "extent tree corrupted";
437 		err = -EFSBADCRC;
438 		goto corrupted;
439 	}
440 	return 0;
441 
442 corrupted:
443 	ext4_error_inode_err(inode, function, line, 0, -err,
444 			     "pblk %llu bad header/extent: %s - magic %x, "
445 			     "entries %u, max %u(%u), depth %u(%u)",
446 			     (unsigned long long) pblk, error_msg,
447 			     le16_to_cpu(eh->eh_magic),
448 			     le16_to_cpu(eh->eh_entries),
449 			     le16_to_cpu(eh->eh_max),
450 			     max, le16_to_cpu(eh->eh_depth), depth);
451 	return err;
452 }
453 
454 #define ext4_ext_check(inode, eh, depth, pblk)			\
455 	__ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk))
456 
457 int ext4_ext_check_inode(struct inode *inode)
458 {
459 	return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
460 }
461 
462 static void ext4_cache_extents(struct inode *inode,
463 			       struct ext4_extent_header *eh)
464 {
465 	struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
466 	ext4_lblk_t prev = 0;
467 	int i;
468 
469 	for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
470 		unsigned int status = EXTENT_STATUS_WRITTEN;
471 		ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
472 		int len = ext4_ext_get_actual_len(ex);
473 
474 		if (prev && (prev != lblk))
475 			ext4_es_cache_extent(inode, prev, lblk - prev, ~0,
476 					     EXTENT_STATUS_HOLE);
477 
478 		if (ext4_ext_is_unwritten(ex))
479 			status = EXTENT_STATUS_UNWRITTEN;
480 		ext4_es_cache_extent(inode, lblk, len,
481 				     ext4_ext_pblock(ex), status);
482 		prev = lblk + len;
483 	}
484 }
485 
486 static struct buffer_head *
487 __read_extent_tree_block(const char *function, unsigned int line,
488 			 struct inode *inode, ext4_fsblk_t pblk, int depth,
489 			 int flags)
490 {
491 	struct buffer_head		*bh;
492 	int				err;
493 	gfp_t				gfp_flags = __GFP_MOVABLE | GFP_NOFS;
494 
495 	if (flags & EXT4_EX_NOFAIL)
496 		gfp_flags |= __GFP_NOFAIL;
497 
498 	bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags);
499 	if (unlikely(!bh))
500 		return ERR_PTR(-ENOMEM);
501 
502 	if (!bh_uptodate_or_lock(bh)) {
503 		trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
504 		err = ext4_read_bh(bh, 0, NULL);
505 		if (err < 0)
506 			goto errout;
507 	}
508 	if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
509 		return bh;
510 	err = __ext4_ext_check(function, line, inode,
511 			       ext_block_hdr(bh), depth, pblk);
512 	if (err)
513 		goto errout;
514 	set_buffer_verified(bh);
515 	/*
516 	 * If this is a leaf block, cache all of its entries
517 	 */
518 	if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
519 		struct ext4_extent_header *eh = ext_block_hdr(bh);
520 		ext4_cache_extents(inode, eh);
521 	}
522 	return bh;
523 errout:
524 	put_bh(bh);
525 	return ERR_PTR(err);
526 
527 }
528 
529 #define read_extent_tree_block(inode, pblk, depth, flags)		\
530 	__read_extent_tree_block(__func__, __LINE__, (inode), (pblk),   \
531 				 (depth), (flags))
532 
533 /*
534  * This function is called to cache a file's extent information in the
535  * extent status tree
536  */
537 int ext4_ext_precache(struct inode *inode)
538 {
539 	struct ext4_inode_info *ei = EXT4_I(inode);
540 	struct ext4_ext_path *path = NULL;
541 	struct buffer_head *bh;
542 	int i = 0, depth, ret = 0;
543 
544 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
545 		return 0;	/* not an extent-mapped inode */
546 
547 	down_read(&ei->i_data_sem);
548 	depth = ext_depth(inode);
549 
550 	/* Don't cache anything if there are no external extent blocks */
551 	if (!depth) {
552 		up_read(&ei->i_data_sem);
553 		return ret;
554 	}
555 
556 	path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
557 		       GFP_NOFS);
558 	if (path == NULL) {
559 		up_read(&ei->i_data_sem);
560 		return -ENOMEM;
561 	}
562 
563 	path[0].p_hdr = ext_inode_hdr(inode);
564 	ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
565 	if (ret)
566 		goto out;
567 	path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
568 	while (i >= 0) {
569 		/*
570 		 * If this is a leaf block or we've reached the end of
571 		 * the index block, go up
572 		 */
573 		if ((i == depth) ||
574 		    path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
575 			brelse(path[i].p_bh);
576 			path[i].p_bh = NULL;
577 			i--;
578 			continue;
579 		}
580 		bh = read_extent_tree_block(inode,
581 					    ext4_idx_pblock(path[i].p_idx++),
582 					    depth - i - 1,
583 					    EXT4_EX_FORCE_CACHE);
584 		if (IS_ERR(bh)) {
585 			ret = PTR_ERR(bh);
586 			break;
587 		}
588 		i++;
589 		path[i].p_bh = bh;
590 		path[i].p_hdr = ext_block_hdr(bh);
591 		path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
592 	}
593 	ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
594 out:
595 	up_read(&ei->i_data_sem);
596 	ext4_ext_drop_refs(path);
597 	kfree(path);
598 	return ret;
599 }
600 
601 #ifdef EXT_DEBUG
602 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
603 {
604 	int k, l = path->p_depth;
605 
606 	ext_debug(inode, "path:");
607 	for (k = 0; k <= l; k++, path++) {
608 		if (path->p_idx) {
609 			ext_debug(inode, "  %d->%llu",
610 				  le32_to_cpu(path->p_idx->ei_block),
611 				  ext4_idx_pblock(path->p_idx));
612 		} else if (path->p_ext) {
613 			ext_debug(inode, "  %d:[%d]%d:%llu ",
614 				  le32_to_cpu(path->p_ext->ee_block),
615 				  ext4_ext_is_unwritten(path->p_ext),
616 				  ext4_ext_get_actual_len(path->p_ext),
617 				  ext4_ext_pblock(path->p_ext));
618 		} else
619 			ext_debug(inode, "  []");
620 	}
621 	ext_debug(inode, "\n");
622 }
623 
624 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
625 {
626 	int depth = ext_depth(inode);
627 	struct ext4_extent_header *eh;
628 	struct ext4_extent *ex;
629 	int i;
630 
631 	if (!path)
632 		return;
633 
634 	eh = path[depth].p_hdr;
635 	ex = EXT_FIRST_EXTENT(eh);
636 
637 	ext_debug(inode, "Displaying leaf extents\n");
638 
639 	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
640 		ext_debug(inode, "%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
641 			  ext4_ext_is_unwritten(ex),
642 			  ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
643 	}
644 	ext_debug(inode, "\n");
645 }
646 
647 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
648 			ext4_fsblk_t newblock, int level)
649 {
650 	int depth = ext_depth(inode);
651 	struct ext4_extent *ex;
652 
653 	if (depth != level) {
654 		struct ext4_extent_idx *idx;
655 		idx = path[level].p_idx;
656 		while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
657 			ext_debug(inode, "%d: move %d:%llu in new index %llu\n",
658 				  level, le32_to_cpu(idx->ei_block),
659 				  ext4_idx_pblock(idx), newblock);
660 			idx++;
661 		}
662 
663 		return;
664 	}
665 
666 	ex = path[depth].p_ext;
667 	while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
668 		ext_debug(inode, "move %d:%llu:[%d]%d in new leaf %llu\n",
669 				le32_to_cpu(ex->ee_block),
670 				ext4_ext_pblock(ex),
671 				ext4_ext_is_unwritten(ex),
672 				ext4_ext_get_actual_len(ex),
673 				newblock);
674 		ex++;
675 	}
676 }
677 
678 #else
679 #define ext4_ext_show_path(inode, path)
680 #define ext4_ext_show_leaf(inode, path)
681 #define ext4_ext_show_move(inode, path, newblock, level)
682 #endif
683 
684 void ext4_ext_drop_refs(struct ext4_ext_path *path)
685 {
686 	int depth, i;
687 
688 	if (!path)
689 		return;
690 	depth = path->p_depth;
691 	for (i = 0; i <= depth; i++, path++) {
692 		brelse(path->p_bh);
693 		path->p_bh = NULL;
694 	}
695 }
696 
697 /*
698  * ext4_ext_binsearch_idx:
699  * binary search for the closest index of the given block
700  * the header must be checked before calling this
701  */
702 static void
703 ext4_ext_binsearch_idx(struct inode *inode,
704 			struct ext4_ext_path *path, ext4_lblk_t block)
705 {
706 	struct ext4_extent_header *eh = path->p_hdr;
707 	struct ext4_extent_idx *r, *l, *m;
708 
709 
710 	ext_debug(inode, "binsearch for %u(idx):  ", block);
711 
712 	l = EXT_FIRST_INDEX(eh) + 1;
713 	r = EXT_LAST_INDEX(eh);
714 	while (l <= r) {
715 		m = l + (r - l) / 2;
716 		if (block < le32_to_cpu(m->ei_block))
717 			r = m - 1;
718 		else
719 			l = m + 1;
720 		ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
721 			  le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block),
722 			  r, le32_to_cpu(r->ei_block));
723 	}
724 
725 	path->p_idx = l - 1;
726 	ext_debug(inode, "  -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
727 		  ext4_idx_pblock(path->p_idx));
728 
729 #ifdef CHECK_BINSEARCH
730 	{
731 		struct ext4_extent_idx *chix, *ix;
732 		int k;
733 
734 		chix = ix = EXT_FIRST_INDEX(eh);
735 		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
736 			if (k != 0 && le32_to_cpu(ix->ei_block) <=
737 			    le32_to_cpu(ix[-1].ei_block)) {
738 				printk(KERN_DEBUG "k=%d, ix=0x%p, "
739 				       "first=0x%p\n", k,
740 				       ix, EXT_FIRST_INDEX(eh));
741 				printk(KERN_DEBUG "%u <= %u\n",
742 				       le32_to_cpu(ix->ei_block),
743 				       le32_to_cpu(ix[-1].ei_block));
744 			}
745 			BUG_ON(k && le32_to_cpu(ix->ei_block)
746 					   <= le32_to_cpu(ix[-1].ei_block));
747 			if (block < le32_to_cpu(ix->ei_block))
748 				break;
749 			chix = ix;
750 		}
751 		BUG_ON(chix != path->p_idx);
752 	}
753 #endif
754 
755 }
756 
757 /*
758  * ext4_ext_binsearch:
759  * binary search for closest extent of the given block
760  * the header must be checked before calling this
761  */
762 static void
763 ext4_ext_binsearch(struct inode *inode,
764 		struct ext4_ext_path *path, ext4_lblk_t block)
765 {
766 	struct ext4_extent_header *eh = path->p_hdr;
767 	struct ext4_extent *r, *l, *m;
768 
769 	if (eh->eh_entries == 0) {
770 		/*
771 		 * this leaf is empty:
772 		 * we get such a leaf in split/add case
773 		 */
774 		return;
775 	}
776 
777 	ext_debug(inode, "binsearch for %u:  ", block);
778 
779 	l = EXT_FIRST_EXTENT(eh) + 1;
780 	r = EXT_LAST_EXTENT(eh);
781 
782 	while (l <= r) {
783 		m = l + (r - l) / 2;
784 		if (block < le32_to_cpu(m->ee_block))
785 			r = m - 1;
786 		else
787 			l = m + 1;
788 		ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
789 			  le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block),
790 			  r, le32_to_cpu(r->ee_block));
791 	}
792 
793 	path->p_ext = l - 1;
794 	ext_debug(inode, "  -> %d:%llu:[%d]%d ",
795 			le32_to_cpu(path->p_ext->ee_block),
796 			ext4_ext_pblock(path->p_ext),
797 			ext4_ext_is_unwritten(path->p_ext),
798 			ext4_ext_get_actual_len(path->p_ext));
799 
800 #ifdef CHECK_BINSEARCH
801 	{
802 		struct ext4_extent *chex, *ex;
803 		int k;
804 
805 		chex = ex = EXT_FIRST_EXTENT(eh);
806 		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
807 			BUG_ON(k && le32_to_cpu(ex->ee_block)
808 					  <= le32_to_cpu(ex[-1].ee_block));
809 			if (block < le32_to_cpu(ex->ee_block))
810 				break;
811 			chex = ex;
812 		}
813 		BUG_ON(chex != path->p_ext);
814 	}
815 #endif
816 
817 }
818 
819 void ext4_ext_tree_init(handle_t *handle, struct inode *inode)
820 {
821 	struct ext4_extent_header *eh;
822 
823 	eh = ext_inode_hdr(inode);
824 	eh->eh_depth = 0;
825 	eh->eh_entries = 0;
826 	eh->eh_magic = EXT4_EXT_MAGIC;
827 	eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
828 	eh->eh_generation = 0;
829 	ext4_mark_inode_dirty(handle, inode);
830 }
831 
832 struct ext4_ext_path *
833 ext4_find_extent(struct inode *inode, ext4_lblk_t block,
834 		 struct ext4_ext_path **orig_path, int flags)
835 {
836 	struct ext4_extent_header *eh;
837 	struct buffer_head *bh;
838 	struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
839 	short int depth, i, ppos = 0;
840 	int ret;
841 	gfp_t gfp_flags = GFP_NOFS;
842 
843 	if (flags & EXT4_EX_NOFAIL)
844 		gfp_flags |= __GFP_NOFAIL;
845 
846 	eh = ext_inode_hdr(inode);
847 	depth = ext_depth(inode);
848 	if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) {
849 		EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d",
850 				 depth);
851 		ret = -EFSCORRUPTED;
852 		goto err;
853 	}
854 
855 	if (path) {
856 		ext4_ext_drop_refs(path);
857 		if (depth > path[0].p_maxdepth) {
858 			kfree(path);
859 			*orig_path = path = NULL;
860 		}
861 	}
862 	if (!path) {
863 		/* account possible depth increase */
864 		path = kcalloc(depth + 2, sizeof(struct ext4_ext_path),
865 				gfp_flags);
866 		if (unlikely(!path))
867 			return ERR_PTR(-ENOMEM);
868 		path[0].p_maxdepth = depth + 1;
869 	}
870 	path[0].p_hdr = eh;
871 	path[0].p_bh = NULL;
872 
873 	i = depth;
874 	if (!(flags & EXT4_EX_NOCACHE) && depth == 0)
875 		ext4_cache_extents(inode, eh);
876 	/* walk through the tree */
877 	while (i) {
878 		ext_debug(inode, "depth %d: num %d, max %d\n",
879 			  ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
880 
881 		ext4_ext_binsearch_idx(inode, path + ppos, block);
882 		path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
883 		path[ppos].p_depth = i;
884 		path[ppos].p_ext = NULL;
885 
886 		bh = read_extent_tree_block(inode, path[ppos].p_block, --i,
887 					    flags);
888 		if (IS_ERR(bh)) {
889 			ret = PTR_ERR(bh);
890 			goto err;
891 		}
892 
893 		eh = ext_block_hdr(bh);
894 		ppos++;
895 		path[ppos].p_bh = bh;
896 		path[ppos].p_hdr = eh;
897 	}
898 
899 	path[ppos].p_depth = i;
900 	path[ppos].p_ext = NULL;
901 	path[ppos].p_idx = NULL;
902 
903 	/* find extent */
904 	ext4_ext_binsearch(inode, path + ppos, block);
905 	/* if not an empty leaf */
906 	if (path[ppos].p_ext)
907 		path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
908 
909 	ext4_ext_show_path(inode, path);
910 
911 	return path;
912 
913 err:
914 	ext4_ext_drop_refs(path);
915 	kfree(path);
916 	if (orig_path)
917 		*orig_path = NULL;
918 	return ERR_PTR(ret);
919 }
920 
921 /*
922  * ext4_ext_insert_index:
923  * insert new index [@logical;@ptr] into the block at @curp;
924  * check where to insert: before @curp or after @curp
925  */
926 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
927 				 struct ext4_ext_path *curp,
928 				 int logical, ext4_fsblk_t ptr)
929 {
930 	struct ext4_extent_idx *ix;
931 	int len, err;
932 
933 	err = ext4_ext_get_access(handle, inode, curp);
934 	if (err)
935 		return err;
936 
937 	if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
938 		EXT4_ERROR_INODE(inode,
939 				 "logical %d == ei_block %d!",
940 				 logical, le32_to_cpu(curp->p_idx->ei_block));
941 		return -EFSCORRUPTED;
942 	}
943 
944 	if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
945 			     >= le16_to_cpu(curp->p_hdr->eh_max))) {
946 		EXT4_ERROR_INODE(inode,
947 				 "eh_entries %d >= eh_max %d!",
948 				 le16_to_cpu(curp->p_hdr->eh_entries),
949 				 le16_to_cpu(curp->p_hdr->eh_max));
950 		return -EFSCORRUPTED;
951 	}
952 
953 	if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
954 		/* insert after */
955 		ext_debug(inode, "insert new index %d after: %llu\n",
956 			  logical, ptr);
957 		ix = curp->p_idx + 1;
958 	} else {
959 		/* insert before */
960 		ext_debug(inode, "insert new index %d before: %llu\n",
961 			  logical, ptr);
962 		ix = curp->p_idx;
963 	}
964 
965 	len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
966 	BUG_ON(len < 0);
967 	if (len > 0) {
968 		ext_debug(inode, "insert new index %d: "
969 				"move %d indices from 0x%p to 0x%p\n",
970 				logical, len, ix, ix + 1);
971 		memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
972 	}
973 
974 	if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
975 		EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
976 		return -EFSCORRUPTED;
977 	}
978 
979 	ix->ei_block = cpu_to_le32(logical);
980 	ext4_idx_store_pblock(ix, ptr);
981 	le16_add_cpu(&curp->p_hdr->eh_entries, 1);
982 
983 	if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
984 		EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
985 		return -EFSCORRUPTED;
986 	}
987 
988 	err = ext4_ext_dirty(handle, inode, curp);
989 	ext4_std_error(inode->i_sb, err);
990 
991 	return err;
992 }
993 
994 /*
995  * ext4_ext_split:
996  * inserts new subtree into the path, using free index entry
997  * at depth @at:
998  * - allocates all needed blocks (new leaf and all intermediate index blocks)
999  * - makes decision where to split
1000  * - moves remaining extents and index entries (right to the split point)
1001  *   into the newly allocated blocks
1002  * - initializes subtree
1003  */
1004 static int ext4_ext_split(handle_t *handle, struct inode *inode,
1005 			  unsigned int flags,
1006 			  struct ext4_ext_path *path,
1007 			  struct ext4_extent *newext, int at)
1008 {
1009 	struct buffer_head *bh = NULL;
1010 	int depth = ext_depth(inode);
1011 	struct ext4_extent_header *neh;
1012 	struct ext4_extent_idx *fidx;
1013 	int i = at, k, m, a;
1014 	ext4_fsblk_t newblock, oldblock;
1015 	__le32 border;
1016 	ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
1017 	gfp_t gfp_flags = GFP_NOFS;
1018 	int err = 0;
1019 	size_t ext_size = 0;
1020 
1021 	if (flags & EXT4_EX_NOFAIL)
1022 		gfp_flags |= __GFP_NOFAIL;
1023 
1024 	/* make decision: where to split? */
1025 	/* FIXME: now decision is simplest: at current extent */
1026 
1027 	/* if current leaf will be split, then we should use
1028 	 * border from split point */
1029 	if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
1030 		EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
1031 		return -EFSCORRUPTED;
1032 	}
1033 	if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
1034 		border = path[depth].p_ext[1].ee_block;
1035 		ext_debug(inode, "leaf will be split."
1036 				" next leaf starts at %d\n",
1037 				  le32_to_cpu(border));
1038 	} else {
1039 		border = newext->ee_block;
1040 		ext_debug(inode, "leaf will be added."
1041 				" next leaf starts at %d\n",
1042 				le32_to_cpu(border));
1043 	}
1044 
1045 	/*
1046 	 * If error occurs, then we break processing
1047 	 * and mark filesystem read-only. index won't
1048 	 * be inserted and tree will be in consistent
1049 	 * state. Next mount will repair buffers too.
1050 	 */
1051 
1052 	/*
1053 	 * Get array to track all allocated blocks.
1054 	 * We need this to handle errors and free blocks
1055 	 * upon them.
1056 	 */
1057 	ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), gfp_flags);
1058 	if (!ablocks)
1059 		return -ENOMEM;
1060 
1061 	/* allocate all needed blocks */
1062 	ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at);
1063 	for (a = 0; a < depth - at; a++) {
1064 		newblock = ext4_ext_new_meta_block(handle, inode, path,
1065 						   newext, &err, flags);
1066 		if (newblock == 0)
1067 			goto cleanup;
1068 		ablocks[a] = newblock;
1069 	}
1070 
1071 	/* initialize new leaf */
1072 	newblock = ablocks[--a];
1073 	if (unlikely(newblock == 0)) {
1074 		EXT4_ERROR_INODE(inode, "newblock == 0!");
1075 		err = -EFSCORRUPTED;
1076 		goto cleanup;
1077 	}
1078 	bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1079 	if (unlikely(!bh)) {
1080 		err = -ENOMEM;
1081 		goto cleanup;
1082 	}
1083 	lock_buffer(bh);
1084 
1085 	err = ext4_journal_get_create_access(handle, bh);
1086 	if (err)
1087 		goto cleanup;
1088 
1089 	neh = ext_block_hdr(bh);
1090 	neh->eh_entries = 0;
1091 	neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1092 	neh->eh_magic = EXT4_EXT_MAGIC;
1093 	neh->eh_depth = 0;
1094 	neh->eh_generation = 0;
1095 
1096 	/* move remainder of path[depth] to the new leaf */
1097 	if (unlikely(path[depth].p_hdr->eh_entries !=
1098 		     path[depth].p_hdr->eh_max)) {
1099 		EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
1100 				 path[depth].p_hdr->eh_entries,
1101 				 path[depth].p_hdr->eh_max);
1102 		err = -EFSCORRUPTED;
1103 		goto cleanup;
1104 	}
1105 	/* start copy from next extent */
1106 	m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
1107 	ext4_ext_show_move(inode, path, newblock, depth);
1108 	if (m) {
1109 		struct ext4_extent *ex;
1110 		ex = EXT_FIRST_EXTENT(neh);
1111 		memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
1112 		le16_add_cpu(&neh->eh_entries, m);
1113 	}
1114 
1115 	/* zero out unused area in the extent block */
1116 	ext_size = sizeof(struct ext4_extent_header) +
1117 		sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries);
1118 	memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
1119 	ext4_extent_block_csum_set(inode, neh);
1120 	set_buffer_uptodate(bh);
1121 	unlock_buffer(bh);
1122 
1123 	err = ext4_handle_dirty_metadata(handle, inode, bh);
1124 	if (err)
1125 		goto cleanup;
1126 	brelse(bh);
1127 	bh = NULL;
1128 
1129 	/* correct old leaf */
1130 	if (m) {
1131 		err = ext4_ext_get_access(handle, inode, path + depth);
1132 		if (err)
1133 			goto cleanup;
1134 		le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
1135 		err = ext4_ext_dirty(handle, inode, path + depth);
1136 		if (err)
1137 			goto cleanup;
1138 
1139 	}
1140 
1141 	/* create intermediate indexes */
1142 	k = depth - at - 1;
1143 	if (unlikely(k < 0)) {
1144 		EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1145 		err = -EFSCORRUPTED;
1146 		goto cleanup;
1147 	}
1148 	if (k)
1149 		ext_debug(inode, "create %d intermediate indices\n", k);
1150 	/* insert new index into current index block */
1151 	/* current depth stored in i var */
1152 	i = depth - 1;
1153 	while (k--) {
1154 		oldblock = newblock;
1155 		newblock = ablocks[--a];
1156 		bh = sb_getblk(inode->i_sb, newblock);
1157 		if (unlikely(!bh)) {
1158 			err = -ENOMEM;
1159 			goto cleanup;
1160 		}
1161 		lock_buffer(bh);
1162 
1163 		err = ext4_journal_get_create_access(handle, bh);
1164 		if (err)
1165 			goto cleanup;
1166 
1167 		neh = ext_block_hdr(bh);
1168 		neh->eh_entries = cpu_to_le16(1);
1169 		neh->eh_magic = EXT4_EXT_MAGIC;
1170 		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1171 		neh->eh_depth = cpu_to_le16(depth - i);
1172 		neh->eh_generation = 0;
1173 		fidx = EXT_FIRST_INDEX(neh);
1174 		fidx->ei_block = border;
1175 		ext4_idx_store_pblock(fidx, oldblock);
1176 
1177 		ext_debug(inode, "int.index at %d (block %llu): %u -> %llu\n",
1178 				i, newblock, le32_to_cpu(border), oldblock);
1179 
1180 		/* move remainder of path[i] to the new index block */
1181 		if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1182 					EXT_LAST_INDEX(path[i].p_hdr))) {
1183 			EXT4_ERROR_INODE(inode,
1184 					 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1185 					 le32_to_cpu(path[i].p_ext->ee_block));
1186 			err = -EFSCORRUPTED;
1187 			goto cleanup;
1188 		}
1189 		/* start copy indexes */
1190 		m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1191 		ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx,
1192 				EXT_MAX_INDEX(path[i].p_hdr));
1193 		ext4_ext_show_move(inode, path, newblock, i);
1194 		if (m) {
1195 			memmove(++fidx, path[i].p_idx,
1196 				sizeof(struct ext4_extent_idx) * m);
1197 			le16_add_cpu(&neh->eh_entries, m);
1198 		}
1199 		/* zero out unused area in the extent block */
1200 		ext_size = sizeof(struct ext4_extent_header) +
1201 		   (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries));
1202 		memset(bh->b_data + ext_size, 0,
1203 			inode->i_sb->s_blocksize - ext_size);
1204 		ext4_extent_block_csum_set(inode, neh);
1205 		set_buffer_uptodate(bh);
1206 		unlock_buffer(bh);
1207 
1208 		err = ext4_handle_dirty_metadata(handle, inode, bh);
1209 		if (err)
1210 			goto cleanup;
1211 		brelse(bh);
1212 		bh = NULL;
1213 
1214 		/* correct old index */
1215 		if (m) {
1216 			err = ext4_ext_get_access(handle, inode, path + i);
1217 			if (err)
1218 				goto cleanup;
1219 			le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1220 			err = ext4_ext_dirty(handle, inode, path + i);
1221 			if (err)
1222 				goto cleanup;
1223 		}
1224 
1225 		i--;
1226 	}
1227 
1228 	/* insert new index */
1229 	err = ext4_ext_insert_index(handle, inode, path + at,
1230 				    le32_to_cpu(border), newblock);
1231 
1232 cleanup:
1233 	if (bh) {
1234 		if (buffer_locked(bh))
1235 			unlock_buffer(bh);
1236 		brelse(bh);
1237 	}
1238 
1239 	if (err) {
1240 		/* free all allocated blocks in error case */
1241 		for (i = 0; i < depth; i++) {
1242 			if (!ablocks[i])
1243 				continue;
1244 			ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1245 					 EXT4_FREE_BLOCKS_METADATA);
1246 		}
1247 	}
1248 	kfree(ablocks);
1249 
1250 	return err;
1251 }
1252 
1253 /*
1254  * ext4_ext_grow_indepth:
1255  * implements tree growing procedure:
1256  * - allocates new block
1257  * - moves top-level data (index block or leaf) into the new block
1258  * - initializes new top-level, creating index that points to the
1259  *   just created block
1260  */
1261 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1262 				 unsigned int flags)
1263 {
1264 	struct ext4_extent_header *neh;
1265 	struct buffer_head *bh;
1266 	ext4_fsblk_t newblock, goal = 0;
1267 	struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
1268 	int err = 0;
1269 	size_t ext_size = 0;
1270 
1271 	/* Try to prepend new index to old one */
1272 	if (ext_depth(inode))
1273 		goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode)));
1274 	if (goal > le32_to_cpu(es->s_first_data_block)) {
1275 		flags |= EXT4_MB_HINT_TRY_GOAL;
1276 		goal--;
1277 	} else
1278 		goal = ext4_inode_to_goal_block(inode);
1279 	newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
1280 					NULL, &err);
1281 	if (newblock == 0)
1282 		return err;
1283 
1284 	bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1285 	if (unlikely(!bh))
1286 		return -ENOMEM;
1287 	lock_buffer(bh);
1288 
1289 	err = ext4_journal_get_create_access(handle, bh);
1290 	if (err) {
1291 		unlock_buffer(bh);
1292 		goto out;
1293 	}
1294 
1295 	ext_size = sizeof(EXT4_I(inode)->i_data);
1296 	/* move top-level index/leaf into new block */
1297 	memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
1298 	/* zero out unused area in the extent block */
1299 	memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
1300 
1301 	/* set size of new block */
1302 	neh = ext_block_hdr(bh);
1303 	/* old root could have indexes or leaves
1304 	 * so calculate e_max right way */
1305 	if (ext_depth(inode))
1306 		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1307 	else
1308 		neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1309 	neh->eh_magic = EXT4_EXT_MAGIC;
1310 	ext4_extent_block_csum_set(inode, neh);
1311 	set_buffer_uptodate(bh);
1312 	set_buffer_verified(bh);
1313 	unlock_buffer(bh);
1314 
1315 	err = ext4_handle_dirty_metadata(handle, inode, bh);
1316 	if (err)
1317 		goto out;
1318 
1319 	/* Update top-level index: num,max,pointer */
1320 	neh = ext_inode_hdr(inode);
1321 	neh->eh_entries = cpu_to_le16(1);
1322 	ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1323 	if (neh->eh_depth == 0) {
1324 		/* Root extent block becomes index block */
1325 		neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1326 		EXT_FIRST_INDEX(neh)->ei_block =
1327 			EXT_FIRST_EXTENT(neh)->ee_block;
1328 	}
1329 	ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %llu\n",
1330 		  le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1331 		  le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1332 		  ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1333 
1334 	le16_add_cpu(&neh->eh_depth, 1);
1335 	err = ext4_mark_inode_dirty(handle, inode);
1336 out:
1337 	brelse(bh);
1338 
1339 	return err;
1340 }
1341 
1342 /*
1343  * ext4_ext_create_new_leaf:
1344  * finds empty index and adds new leaf.
1345  * if no free index is found, then it requests in-depth growing.
1346  */
1347 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1348 				    unsigned int mb_flags,
1349 				    unsigned int gb_flags,
1350 				    struct ext4_ext_path **ppath,
1351 				    struct ext4_extent *newext)
1352 {
1353 	struct ext4_ext_path *path = *ppath;
1354 	struct ext4_ext_path *curp;
1355 	int depth, i, err = 0;
1356 
1357 repeat:
1358 	i = depth = ext_depth(inode);
1359 
1360 	/* walk up to the tree and look for free index entry */
1361 	curp = path + depth;
1362 	while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1363 		i--;
1364 		curp--;
1365 	}
1366 
1367 	/* we use already allocated block for index block,
1368 	 * so subsequent data blocks should be contiguous */
1369 	if (EXT_HAS_FREE_INDEX(curp)) {
1370 		/* if we found index with free entry, then use that
1371 		 * entry: create all needed subtree and add new leaf */
1372 		err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
1373 		if (err)
1374 			goto out;
1375 
1376 		/* refill path */
1377 		path = ext4_find_extent(inode,
1378 				    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1379 				    ppath, gb_flags);
1380 		if (IS_ERR(path))
1381 			err = PTR_ERR(path);
1382 	} else {
1383 		/* tree is full, time to grow in depth */
1384 		err = ext4_ext_grow_indepth(handle, inode, mb_flags);
1385 		if (err)
1386 			goto out;
1387 
1388 		/* refill path */
1389 		path = ext4_find_extent(inode,
1390 				   (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1391 				    ppath, gb_flags);
1392 		if (IS_ERR(path)) {
1393 			err = PTR_ERR(path);
1394 			goto out;
1395 		}
1396 
1397 		/*
1398 		 * only first (depth 0 -> 1) produces free space;
1399 		 * in all other cases we have to split the grown tree
1400 		 */
1401 		depth = ext_depth(inode);
1402 		if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1403 			/* now we need to split */
1404 			goto repeat;
1405 		}
1406 	}
1407 
1408 out:
1409 	return err;
1410 }
1411 
1412 /*
1413  * search the closest allocated block to the left for *logical
1414  * and returns it at @logical + it's physical address at @phys
1415  * if *logical is the smallest allocated block, the function
1416  * returns 0 at @phys
1417  * return value contains 0 (success) or error code
1418  */
1419 static int ext4_ext_search_left(struct inode *inode,
1420 				struct ext4_ext_path *path,
1421 				ext4_lblk_t *logical, ext4_fsblk_t *phys)
1422 {
1423 	struct ext4_extent_idx *ix;
1424 	struct ext4_extent *ex;
1425 	int depth, ee_len;
1426 
1427 	if (unlikely(path == NULL)) {
1428 		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1429 		return -EFSCORRUPTED;
1430 	}
1431 	depth = path->p_depth;
1432 	*phys = 0;
1433 
1434 	if (depth == 0 && path->p_ext == NULL)
1435 		return 0;
1436 
1437 	/* usually extent in the path covers blocks smaller
1438 	 * then *logical, but it can be that extent is the
1439 	 * first one in the file */
1440 
1441 	ex = path[depth].p_ext;
1442 	ee_len = ext4_ext_get_actual_len(ex);
1443 	if (*logical < le32_to_cpu(ex->ee_block)) {
1444 		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1445 			EXT4_ERROR_INODE(inode,
1446 					 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1447 					 *logical, le32_to_cpu(ex->ee_block));
1448 			return -EFSCORRUPTED;
1449 		}
1450 		while (--depth >= 0) {
1451 			ix = path[depth].p_idx;
1452 			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1453 				EXT4_ERROR_INODE(inode,
1454 				  "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1455 				  ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1456 				  EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1457 		le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1458 				  depth);
1459 				return -EFSCORRUPTED;
1460 			}
1461 		}
1462 		return 0;
1463 	}
1464 
1465 	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1466 		EXT4_ERROR_INODE(inode,
1467 				 "logical %d < ee_block %d + ee_len %d!",
1468 				 *logical, le32_to_cpu(ex->ee_block), ee_len);
1469 		return -EFSCORRUPTED;
1470 	}
1471 
1472 	*logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1473 	*phys = ext4_ext_pblock(ex) + ee_len - 1;
1474 	return 0;
1475 }
1476 
1477 /*
1478  * Search the closest allocated block to the right for *logical
1479  * and returns it at @logical + it's physical address at @phys.
1480  * If not exists, return 0 and @phys is set to 0. We will return
1481  * 1 which means we found an allocated block and ret_ex is valid.
1482  * Or return a (< 0) error code.
1483  */
1484 static int ext4_ext_search_right(struct inode *inode,
1485 				 struct ext4_ext_path *path,
1486 				 ext4_lblk_t *logical, ext4_fsblk_t *phys,
1487 				 struct ext4_extent *ret_ex)
1488 {
1489 	struct buffer_head *bh = NULL;
1490 	struct ext4_extent_header *eh;
1491 	struct ext4_extent_idx *ix;
1492 	struct ext4_extent *ex;
1493 	ext4_fsblk_t block;
1494 	int depth;	/* Note, NOT eh_depth; depth from top of tree */
1495 	int ee_len;
1496 
1497 	if (unlikely(path == NULL)) {
1498 		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1499 		return -EFSCORRUPTED;
1500 	}
1501 	depth = path->p_depth;
1502 	*phys = 0;
1503 
1504 	if (depth == 0 && path->p_ext == NULL)
1505 		return 0;
1506 
1507 	/* usually extent in the path covers blocks smaller
1508 	 * then *logical, but it can be that extent is the
1509 	 * first one in the file */
1510 
1511 	ex = path[depth].p_ext;
1512 	ee_len = ext4_ext_get_actual_len(ex);
1513 	if (*logical < le32_to_cpu(ex->ee_block)) {
1514 		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1515 			EXT4_ERROR_INODE(inode,
1516 					 "first_extent(path[%d].p_hdr) != ex",
1517 					 depth);
1518 			return -EFSCORRUPTED;
1519 		}
1520 		while (--depth >= 0) {
1521 			ix = path[depth].p_idx;
1522 			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1523 				EXT4_ERROR_INODE(inode,
1524 						 "ix != EXT_FIRST_INDEX *logical %d!",
1525 						 *logical);
1526 				return -EFSCORRUPTED;
1527 			}
1528 		}
1529 		goto found_extent;
1530 	}
1531 
1532 	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1533 		EXT4_ERROR_INODE(inode,
1534 				 "logical %d < ee_block %d + ee_len %d!",
1535 				 *logical, le32_to_cpu(ex->ee_block), ee_len);
1536 		return -EFSCORRUPTED;
1537 	}
1538 
1539 	if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1540 		/* next allocated block in this leaf */
1541 		ex++;
1542 		goto found_extent;
1543 	}
1544 
1545 	/* go up and search for index to the right */
1546 	while (--depth >= 0) {
1547 		ix = path[depth].p_idx;
1548 		if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1549 			goto got_index;
1550 	}
1551 
1552 	/* we've gone up to the root and found no index to the right */
1553 	return 0;
1554 
1555 got_index:
1556 	/* we've found index to the right, let's
1557 	 * follow it and find the closest allocated
1558 	 * block to the right */
1559 	ix++;
1560 	block = ext4_idx_pblock(ix);
1561 	while (++depth < path->p_depth) {
1562 		/* subtract from p_depth to get proper eh_depth */
1563 		bh = read_extent_tree_block(inode, block,
1564 					    path->p_depth - depth, 0);
1565 		if (IS_ERR(bh))
1566 			return PTR_ERR(bh);
1567 		eh = ext_block_hdr(bh);
1568 		ix = EXT_FIRST_INDEX(eh);
1569 		block = ext4_idx_pblock(ix);
1570 		put_bh(bh);
1571 	}
1572 
1573 	bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0);
1574 	if (IS_ERR(bh))
1575 		return PTR_ERR(bh);
1576 	eh = ext_block_hdr(bh);
1577 	ex = EXT_FIRST_EXTENT(eh);
1578 found_extent:
1579 	*logical = le32_to_cpu(ex->ee_block);
1580 	*phys = ext4_ext_pblock(ex);
1581 	if (ret_ex)
1582 		*ret_ex = *ex;
1583 	if (bh)
1584 		put_bh(bh);
1585 	return 1;
1586 }
1587 
1588 /*
1589  * ext4_ext_next_allocated_block:
1590  * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1591  * NOTE: it considers block number from index entry as
1592  * allocated block. Thus, index entries have to be consistent
1593  * with leaves.
1594  */
1595 ext4_lblk_t
1596 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1597 {
1598 	int depth;
1599 
1600 	BUG_ON(path == NULL);
1601 	depth = path->p_depth;
1602 
1603 	if (depth == 0 && path->p_ext == NULL)
1604 		return EXT_MAX_BLOCKS;
1605 
1606 	while (depth >= 0) {
1607 		struct ext4_ext_path *p = &path[depth];
1608 
1609 		if (depth == path->p_depth) {
1610 			/* leaf */
1611 			if (p->p_ext && p->p_ext != EXT_LAST_EXTENT(p->p_hdr))
1612 				return le32_to_cpu(p->p_ext[1].ee_block);
1613 		} else {
1614 			/* index */
1615 			if (p->p_idx != EXT_LAST_INDEX(p->p_hdr))
1616 				return le32_to_cpu(p->p_idx[1].ei_block);
1617 		}
1618 		depth--;
1619 	}
1620 
1621 	return EXT_MAX_BLOCKS;
1622 }
1623 
1624 /*
1625  * ext4_ext_next_leaf_block:
1626  * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1627  */
1628 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1629 {
1630 	int depth;
1631 
1632 	BUG_ON(path == NULL);
1633 	depth = path->p_depth;
1634 
1635 	/* zero-tree has no leaf blocks at all */
1636 	if (depth == 0)
1637 		return EXT_MAX_BLOCKS;
1638 
1639 	/* go to index block */
1640 	depth--;
1641 
1642 	while (depth >= 0) {
1643 		if (path[depth].p_idx !=
1644 				EXT_LAST_INDEX(path[depth].p_hdr))
1645 			return (ext4_lblk_t)
1646 				le32_to_cpu(path[depth].p_idx[1].ei_block);
1647 		depth--;
1648 	}
1649 
1650 	return EXT_MAX_BLOCKS;
1651 }
1652 
1653 /*
1654  * ext4_ext_correct_indexes:
1655  * if leaf gets modified and modified extent is first in the leaf,
1656  * then we have to correct all indexes above.
1657  * TODO: do we need to correct tree in all cases?
1658  */
1659 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1660 				struct ext4_ext_path *path)
1661 {
1662 	struct ext4_extent_header *eh;
1663 	int depth = ext_depth(inode);
1664 	struct ext4_extent *ex;
1665 	__le32 border;
1666 	int k, err = 0;
1667 
1668 	eh = path[depth].p_hdr;
1669 	ex = path[depth].p_ext;
1670 
1671 	if (unlikely(ex == NULL || eh == NULL)) {
1672 		EXT4_ERROR_INODE(inode,
1673 				 "ex %p == NULL or eh %p == NULL", ex, eh);
1674 		return -EFSCORRUPTED;
1675 	}
1676 
1677 	if (depth == 0) {
1678 		/* there is no tree at all */
1679 		return 0;
1680 	}
1681 
1682 	if (ex != EXT_FIRST_EXTENT(eh)) {
1683 		/* we correct tree if first leaf got modified only */
1684 		return 0;
1685 	}
1686 
1687 	/*
1688 	 * TODO: we need correction if border is smaller than current one
1689 	 */
1690 	k = depth - 1;
1691 	border = path[depth].p_ext->ee_block;
1692 	err = ext4_ext_get_access(handle, inode, path + k);
1693 	if (err)
1694 		return err;
1695 	path[k].p_idx->ei_block = border;
1696 	err = ext4_ext_dirty(handle, inode, path + k);
1697 	if (err)
1698 		return err;
1699 
1700 	while (k--) {
1701 		/* change all left-side indexes */
1702 		if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1703 			break;
1704 		err = ext4_ext_get_access(handle, inode, path + k);
1705 		if (err)
1706 			break;
1707 		path[k].p_idx->ei_block = border;
1708 		err = ext4_ext_dirty(handle, inode, path + k);
1709 		if (err)
1710 			break;
1711 	}
1712 
1713 	return err;
1714 }
1715 
1716 static int ext4_can_extents_be_merged(struct inode *inode,
1717 				      struct ext4_extent *ex1,
1718 				      struct ext4_extent *ex2)
1719 {
1720 	unsigned short ext1_ee_len, ext2_ee_len;
1721 
1722 	if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
1723 		return 0;
1724 
1725 	ext1_ee_len = ext4_ext_get_actual_len(ex1);
1726 	ext2_ee_len = ext4_ext_get_actual_len(ex2);
1727 
1728 	if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1729 			le32_to_cpu(ex2->ee_block))
1730 		return 0;
1731 
1732 	if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
1733 		return 0;
1734 
1735 	if (ext4_ext_is_unwritten(ex1) &&
1736 	    ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)
1737 		return 0;
1738 #ifdef AGGRESSIVE_TEST
1739 	if (ext1_ee_len >= 4)
1740 		return 0;
1741 #endif
1742 
1743 	if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1744 		return 1;
1745 	return 0;
1746 }
1747 
1748 /*
1749  * This function tries to merge the "ex" extent to the next extent in the tree.
1750  * It always tries to merge towards right. If you want to merge towards
1751  * left, pass "ex - 1" as argument instead of "ex".
1752  * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1753  * 1 if they got merged.
1754  */
1755 static int ext4_ext_try_to_merge_right(struct inode *inode,
1756 				 struct ext4_ext_path *path,
1757 				 struct ext4_extent *ex)
1758 {
1759 	struct ext4_extent_header *eh;
1760 	unsigned int depth, len;
1761 	int merge_done = 0, unwritten;
1762 
1763 	depth = ext_depth(inode);
1764 	BUG_ON(path[depth].p_hdr == NULL);
1765 	eh = path[depth].p_hdr;
1766 
1767 	while (ex < EXT_LAST_EXTENT(eh)) {
1768 		if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1769 			break;
1770 		/* merge with next extent! */
1771 		unwritten = ext4_ext_is_unwritten(ex);
1772 		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1773 				+ ext4_ext_get_actual_len(ex + 1));
1774 		if (unwritten)
1775 			ext4_ext_mark_unwritten(ex);
1776 
1777 		if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1778 			len = (EXT_LAST_EXTENT(eh) - ex - 1)
1779 				* sizeof(struct ext4_extent);
1780 			memmove(ex + 1, ex + 2, len);
1781 		}
1782 		le16_add_cpu(&eh->eh_entries, -1);
1783 		merge_done = 1;
1784 		WARN_ON(eh->eh_entries == 0);
1785 		if (!eh->eh_entries)
1786 			EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1787 	}
1788 
1789 	return merge_done;
1790 }
1791 
1792 /*
1793  * This function does a very simple check to see if we can collapse
1794  * an extent tree with a single extent tree leaf block into the inode.
1795  */
1796 static void ext4_ext_try_to_merge_up(handle_t *handle,
1797 				     struct inode *inode,
1798 				     struct ext4_ext_path *path)
1799 {
1800 	size_t s;
1801 	unsigned max_root = ext4_ext_space_root(inode, 0);
1802 	ext4_fsblk_t blk;
1803 
1804 	if ((path[0].p_depth != 1) ||
1805 	    (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
1806 	    (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1807 		return;
1808 
1809 	/*
1810 	 * We need to modify the block allocation bitmap and the block
1811 	 * group descriptor to release the extent tree block.  If we
1812 	 * can't get the journal credits, give up.
1813 	 */
1814 	if (ext4_journal_extend(handle, 2,
1815 			ext4_free_metadata_revoke_credits(inode->i_sb, 1)))
1816 		return;
1817 
1818 	/*
1819 	 * Copy the extent data up to the inode
1820 	 */
1821 	blk = ext4_idx_pblock(path[0].p_idx);
1822 	s = le16_to_cpu(path[1].p_hdr->eh_entries) *
1823 		sizeof(struct ext4_extent_idx);
1824 	s += sizeof(struct ext4_extent_header);
1825 
1826 	path[1].p_maxdepth = path[0].p_maxdepth;
1827 	memcpy(path[0].p_hdr, path[1].p_hdr, s);
1828 	path[0].p_depth = 0;
1829 	path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
1830 		(path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
1831 	path[0].p_hdr->eh_max = cpu_to_le16(max_root);
1832 
1833 	brelse(path[1].p_bh);
1834 	ext4_free_blocks(handle, inode, NULL, blk, 1,
1835 			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
1836 }
1837 
1838 /*
1839  * This function tries to merge the @ex extent to neighbours in the tree, then
1840  * tries to collapse the extent tree into the inode.
1841  */
1842 static void ext4_ext_try_to_merge(handle_t *handle,
1843 				  struct inode *inode,
1844 				  struct ext4_ext_path *path,
1845 				  struct ext4_extent *ex)
1846 {
1847 	struct ext4_extent_header *eh;
1848 	unsigned int depth;
1849 	int merge_done = 0;
1850 
1851 	depth = ext_depth(inode);
1852 	BUG_ON(path[depth].p_hdr == NULL);
1853 	eh = path[depth].p_hdr;
1854 
1855 	if (ex > EXT_FIRST_EXTENT(eh))
1856 		merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1857 
1858 	if (!merge_done)
1859 		(void) ext4_ext_try_to_merge_right(inode, path, ex);
1860 
1861 	ext4_ext_try_to_merge_up(handle, inode, path);
1862 }
1863 
1864 /*
1865  * check if a portion of the "newext" extent overlaps with an
1866  * existing extent.
1867  *
1868  * If there is an overlap discovered, it updates the length of the newext
1869  * such that there will be no overlap, and then returns 1.
1870  * If there is no overlap found, it returns 0.
1871  */
1872 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1873 					   struct inode *inode,
1874 					   struct ext4_extent *newext,
1875 					   struct ext4_ext_path *path)
1876 {
1877 	ext4_lblk_t b1, b2;
1878 	unsigned int depth, len1;
1879 	unsigned int ret = 0;
1880 
1881 	b1 = le32_to_cpu(newext->ee_block);
1882 	len1 = ext4_ext_get_actual_len(newext);
1883 	depth = ext_depth(inode);
1884 	if (!path[depth].p_ext)
1885 		goto out;
1886 	b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
1887 
1888 	/*
1889 	 * get the next allocated block if the extent in the path
1890 	 * is before the requested block(s)
1891 	 */
1892 	if (b2 < b1) {
1893 		b2 = ext4_ext_next_allocated_block(path);
1894 		if (b2 == EXT_MAX_BLOCKS)
1895 			goto out;
1896 		b2 = EXT4_LBLK_CMASK(sbi, b2);
1897 	}
1898 
1899 	/* check for wrap through zero on extent logical start block*/
1900 	if (b1 + len1 < b1) {
1901 		len1 = EXT_MAX_BLOCKS - b1;
1902 		newext->ee_len = cpu_to_le16(len1);
1903 		ret = 1;
1904 	}
1905 
1906 	/* check for overlap */
1907 	if (b1 + len1 > b2) {
1908 		newext->ee_len = cpu_to_le16(b2 - b1);
1909 		ret = 1;
1910 	}
1911 out:
1912 	return ret;
1913 }
1914 
1915 /*
1916  * ext4_ext_insert_extent:
1917  * tries to merge requested extent into the existing extent or
1918  * inserts requested extent as new one into the tree,
1919  * creating new leaf in the no-space case.
1920  */
1921 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1922 				struct ext4_ext_path **ppath,
1923 				struct ext4_extent *newext, int gb_flags)
1924 {
1925 	struct ext4_ext_path *path = *ppath;
1926 	struct ext4_extent_header *eh;
1927 	struct ext4_extent *ex, *fex;
1928 	struct ext4_extent *nearex; /* nearest extent */
1929 	struct ext4_ext_path *npath = NULL;
1930 	int depth, len, err;
1931 	ext4_lblk_t next;
1932 	int mb_flags = 0, unwritten;
1933 
1934 	if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1935 		mb_flags |= EXT4_MB_DELALLOC_RESERVED;
1936 	if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1937 		EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1938 		return -EFSCORRUPTED;
1939 	}
1940 	depth = ext_depth(inode);
1941 	ex = path[depth].p_ext;
1942 	eh = path[depth].p_hdr;
1943 	if (unlikely(path[depth].p_hdr == NULL)) {
1944 		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1945 		return -EFSCORRUPTED;
1946 	}
1947 
1948 	/* try to insert block into found extent and return */
1949 	if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
1950 
1951 		/*
1952 		 * Try to see whether we should rather test the extent on
1953 		 * right from ex, or from the left of ex. This is because
1954 		 * ext4_find_extent() can return either extent on the
1955 		 * left, or on the right from the searched position. This
1956 		 * will make merging more effective.
1957 		 */
1958 		if (ex < EXT_LAST_EXTENT(eh) &&
1959 		    (le32_to_cpu(ex->ee_block) +
1960 		    ext4_ext_get_actual_len(ex) <
1961 		    le32_to_cpu(newext->ee_block))) {
1962 			ex += 1;
1963 			goto prepend;
1964 		} else if ((ex > EXT_FIRST_EXTENT(eh)) &&
1965 			   (le32_to_cpu(newext->ee_block) +
1966 			   ext4_ext_get_actual_len(newext) <
1967 			   le32_to_cpu(ex->ee_block)))
1968 			ex -= 1;
1969 
1970 		/* Try to append newex to the ex */
1971 		if (ext4_can_extents_be_merged(inode, ex, newext)) {
1972 			ext_debug(inode, "append [%d]%d block to %u:[%d]%d"
1973 				  "(from %llu)\n",
1974 				  ext4_ext_is_unwritten(newext),
1975 				  ext4_ext_get_actual_len(newext),
1976 				  le32_to_cpu(ex->ee_block),
1977 				  ext4_ext_is_unwritten(ex),
1978 				  ext4_ext_get_actual_len(ex),
1979 				  ext4_ext_pblock(ex));
1980 			err = ext4_ext_get_access(handle, inode,
1981 						  path + depth);
1982 			if (err)
1983 				return err;
1984 			unwritten = ext4_ext_is_unwritten(ex);
1985 			ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1986 					+ ext4_ext_get_actual_len(newext));
1987 			if (unwritten)
1988 				ext4_ext_mark_unwritten(ex);
1989 			eh = path[depth].p_hdr;
1990 			nearex = ex;
1991 			goto merge;
1992 		}
1993 
1994 prepend:
1995 		/* Try to prepend newex to the ex */
1996 		if (ext4_can_extents_be_merged(inode, newext, ex)) {
1997 			ext_debug(inode, "prepend %u[%d]%d block to %u:[%d]%d"
1998 				  "(from %llu)\n",
1999 				  le32_to_cpu(newext->ee_block),
2000 				  ext4_ext_is_unwritten(newext),
2001 				  ext4_ext_get_actual_len(newext),
2002 				  le32_to_cpu(ex->ee_block),
2003 				  ext4_ext_is_unwritten(ex),
2004 				  ext4_ext_get_actual_len(ex),
2005 				  ext4_ext_pblock(ex));
2006 			err = ext4_ext_get_access(handle, inode,
2007 						  path + depth);
2008 			if (err)
2009 				return err;
2010 
2011 			unwritten = ext4_ext_is_unwritten(ex);
2012 			ex->ee_block = newext->ee_block;
2013 			ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
2014 			ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
2015 					+ ext4_ext_get_actual_len(newext));
2016 			if (unwritten)
2017 				ext4_ext_mark_unwritten(ex);
2018 			eh = path[depth].p_hdr;
2019 			nearex = ex;
2020 			goto merge;
2021 		}
2022 	}
2023 
2024 	depth = ext_depth(inode);
2025 	eh = path[depth].p_hdr;
2026 	if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
2027 		goto has_space;
2028 
2029 	/* probably next leaf has space for us? */
2030 	fex = EXT_LAST_EXTENT(eh);
2031 	next = EXT_MAX_BLOCKS;
2032 	if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
2033 		next = ext4_ext_next_leaf_block(path);
2034 	if (next != EXT_MAX_BLOCKS) {
2035 		ext_debug(inode, "next leaf block - %u\n", next);
2036 		BUG_ON(npath != NULL);
2037 		npath = ext4_find_extent(inode, next, NULL, gb_flags);
2038 		if (IS_ERR(npath))
2039 			return PTR_ERR(npath);
2040 		BUG_ON(npath->p_depth != path->p_depth);
2041 		eh = npath[depth].p_hdr;
2042 		if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
2043 			ext_debug(inode, "next leaf isn't full(%d)\n",
2044 				  le16_to_cpu(eh->eh_entries));
2045 			path = npath;
2046 			goto has_space;
2047 		}
2048 		ext_debug(inode, "next leaf has no free space(%d,%d)\n",
2049 			  le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
2050 	}
2051 
2052 	/*
2053 	 * There is no free space in the found leaf.
2054 	 * We're gonna add a new leaf in the tree.
2055 	 */
2056 	if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
2057 		mb_flags |= EXT4_MB_USE_RESERVED;
2058 	err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
2059 				       ppath, newext);
2060 	if (err)
2061 		goto cleanup;
2062 	depth = ext_depth(inode);
2063 	eh = path[depth].p_hdr;
2064 
2065 has_space:
2066 	nearex = path[depth].p_ext;
2067 
2068 	err = ext4_ext_get_access(handle, inode, path + depth);
2069 	if (err)
2070 		goto cleanup;
2071 
2072 	if (!nearex) {
2073 		/* there is no extent in this leaf, create first one */
2074 		ext_debug(inode, "first extent in the leaf: %u:%llu:[%d]%d\n",
2075 				le32_to_cpu(newext->ee_block),
2076 				ext4_ext_pblock(newext),
2077 				ext4_ext_is_unwritten(newext),
2078 				ext4_ext_get_actual_len(newext));
2079 		nearex = EXT_FIRST_EXTENT(eh);
2080 	} else {
2081 		if (le32_to_cpu(newext->ee_block)
2082 			   > le32_to_cpu(nearex->ee_block)) {
2083 			/* Insert after */
2084 			ext_debug(inode, "insert %u:%llu:[%d]%d before: "
2085 					"nearest %p\n",
2086 					le32_to_cpu(newext->ee_block),
2087 					ext4_ext_pblock(newext),
2088 					ext4_ext_is_unwritten(newext),
2089 					ext4_ext_get_actual_len(newext),
2090 					nearex);
2091 			nearex++;
2092 		} else {
2093 			/* Insert before */
2094 			BUG_ON(newext->ee_block == nearex->ee_block);
2095 			ext_debug(inode, "insert %u:%llu:[%d]%d after: "
2096 					"nearest %p\n",
2097 					le32_to_cpu(newext->ee_block),
2098 					ext4_ext_pblock(newext),
2099 					ext4_ext_is_unwritten(newext),
2100 					ext4_ext_get_actual_len(newext),
2101 					nearex);
2102 		}
2103 		len = EXT_LAST_EXTENT(eh) - nearex + 1;
2104 		if (len > 0) {
2105 			ext_debug(inode, "insert %u:%llu:[%d]%d: "
2106 					"move %d extents from 0x%p to 0x%p\n",
2107 					le32_to_cpu(newext->ee_block),
2108 					ext4_ext_pblock(newext),
2109 					ext4_ext_is_unwritten(newext),
2110 					ext4_ext_get_actual_len(newext),
2111 					len, nearex, nearex + 1);
2112 			memmove(nearex + 1, nearex,
2113 				len * sizeof(struct ext4_extent));
2114 		}
2115 	}
2116 
2117 	le16_add_cpu(&eh->eh_entries, 1);
2118 	path[depth].p_ext = nearex;
2119 	nearex->ee_block = newext->ee_block;
2120 	ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
2121 	nearex->ee_len = newext->ee_len;
2122 
2123 merge:
2124 	/* try to merge extents */
2125 	if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
2126 		ext4_ext_try_to_merge(handle, inode, path, nearex);
2127 
2128 
2129 	/* time to correct all indexes above */
2130 	err = ext4_ext_correct_indexes(handle, inode, path);
2131 	if (err)
2132 		goto cleanup;
2133 
2134 	err = ext4_ext_dirty(handle, inode, path + path->p_depth);
2135 
2136 cleanup:
2137 	ext4_ext_drop_refs(npath);
2138 	kfree(npath);
2139 	return err;
2140 }
2141 
2142 static int ext4_fill_es_cache_info(struct inode *inode,
2143 				   ext4_lblk_t block, ext4_lblk_t num,
2144 				   struct fiemap_extent_info *fieinfo)
2145 {
2146 	ext4_lblk_t next, end = block + num - 1;
2147 	struct extent_status es;
2148 	unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
2149 	unsigned int flags;
2150 	int err;
2151 
2152 	while (block <= end) {
2153 		next = 0;
2154 		flags = 0;
2155 		if (!ext4_es_lookup_extent(inode, block, &next, &es))
2156 			break;
2157 		if (ext4_es_is_unwritten(&es))
2158 			flags |= FIEMAP_EXTENT_UNWRITTEN;
2159 		if (ext4_es_is_delayed(&es))
2160 			flags |= (FIEMAP_EXTENT_DELALLOC |
2161 				  FIEMAP_EXTENT_UNKNOWN);
2162 		if (ext4_es_is_hole(&es))
2163 			flags |= EXT4_FIEMAP_EXTENT_HOLE;
2164 		if (next == 0)
2165 			flags |= FIEMAP_EXTENT_LAST;
2166 		if (flags & (FIEMAP_EXTENT_DELALLOC|
2167 			     EXT4_FIEMAP_EXTENT_HOLE))
2168 			es.es_pblk = 0;
2169 		else
2170 			es.es_pblk = ext4_es_pblock(&es);
2171 		err = fiemap_fill_next_extent(fieinfo,
2172 				(__u64)es.es_lblk << blksize_bits,
2173 				(__u64)es.es_pblk << blksize_bits,
2174 				(__u64)es.es_len << blksize_bits,
2175 				flags);
2176 		if (next == 0)
2177 			break;
2178 		block = next;
2179 		if (err < 0)
2180 			return err;
2181 		if (err == 1)
2182 			return 0;
2183 	}
2184 	return 0;
2185 }
2186 
2187 
2188 /*
2189  * ext4_ext_determine_hole - determine hole around given block
2190  * @inode:	inode we lookup in
2191  * @path:	path in extent tree to @lblk
2192  * @lblk:	pointer to logical block around which we want to determine hole
2193  *
2194  * Determine hole length (and start if easily possible) around given logical
2195  * block. We don't try too hard to find the beginning of the hole but @path
2196  * actually points to extent before @lblk, we provide it.
2197  *
2198  * The function returns the length of a hole starting at @lblk. We update @lblk
2199  * to the beginning of the hole if we managed to find it.
2200  */
2201 static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
2202 					   struct ext4_ext_path *path,
2203 					   ext4_lblk_t *lblk)
2204 {
2205 	int depth = ext_depth(inode);
2206 	struct ext4_extent *ex;
2207 	ext4_lblk_t len;
2208 
2209 	ex = path[depth].p_ext;
2210 	if (ex == NULL) {
2211 		/* there is no extent yet, so gap is [0;-] */
2212 		*lblk = 0;
2213 		len = EXT_MAX_BLOCKS;
2214 	} else if (*lblk < le32_to_cpu(ex->ee_block)) {
2215 		len = le32_to_cpu(ex->ee_block) - *lblk;
2216 	} else if (*lblk >= le32_to_cpu(ex->ee_block)
2217 			+ ext4_ext_get_actual_len(ex)) {
2218 		ext4_lblk_t next;
2219 
2220 		*lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
2221 		next = ext4_ext_next_allocated_block(path);
2222 		BUG_ON(next == *lblk);
2223 		len = next - *lblk;
2224 	} else {
2225 		BUG();
2226 	}
2227 	return len;
2228 }
2229 
2230 /*
2231  * ext4_ext_put_gap_in_cache:
2232  * calculate boundaries of the gap that the requested block fits into
2233  * and cache this gap
2234  */
2235 static void
2236 ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
2237 			  ext4_lblk_t hole_len)
2238 {
2239 	struct extent_status es;
2240 
2241 	ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
2242 				  hole_start + hole_len - 1, &es);
2243 	if (es.es_len) {
2244 		/* There's delayed extent containing lblock? */
2245 		if (es.es_lblk <= hole_start)
2246 			return;
2247 		hole_len = min(es.es_lblk - hole_start, hole_len);
2248 	}
2249 	ext_debug(inode, " -> %u:%u\n", hole_start, hole_len);
2250 	ext4_es_insert_extent(inode, hole_start, hole_len, ~0,
2251 			      EXTENT_STATUS_HOLE);
2252 }
2253 
2254 /*
2255  * ext4_ext_rm_idx:
2256  * removes index from the index block.
2257  */
2258 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2259 			struct ext4_ext_path *path, int depth)
2260 {
2261 	int err;
2262 	ext4_fsblk_t leaf;
2263 
2264 	/* free index block */
2265 	depth--;
2266 	path = path + depth;
2267 	leaf = ext4_idx_pblock(path->p_idx);
2268 	if (unlikely(path->p_hdr->eh_entries == 0)) {
2269 		EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2270 		return -EFSCORRUPTED;
2271 	}
2272 	err = ext4_ext_get_access(handle, inode, path);
2273 	if (err)
2274 		return err;
2275 
2276 	if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2277 		int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2278 		len *= sizeof(struct ext4_extent_idx);
2279 		memmove(path->p_idx, path->p_idx + 1, len);
2280 	}
2281 
2282 	le16_add_cpu(&path->p_hdr->eh_entries, -1);
2283 	err = ext4_ext_dirty(handle, inode, path);
2284 	if (err)
2285 		return err;
2286 	ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf);
2287 	trace_ext4_ext_rm_idx(inode, leaf);
2288 
2289 	ext4_free_blocks(handle, inode, NULL, leaf, 1,
2290 			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2291 
2292 	while (--depth >= 0) {
2293 		if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
2294 			break;
2295 		path--;
2296 		err = ext4_ext_get_access(handle, inode, path);
2297 		if (err)
2298 			break;
2299 		path->p_idx->ei_block = (path+1)->p_idx->ei_block;
2300 		err = ext4_ext_dirty(handle, inode, path);
2301 		if (err)
2302 			break;
2303 	}
2304 	return err;
2305 }
2306 
2307 /*
2308  * ext4_ext_calc_credits_for_single_extent:
2309  * This routine returns max. credits that needed to insert an extent
2310  * to the extent tree.
2311  * When pass the actual path, the caller should calculate credits
2312  * under i_data_sem.
2313  */
2314 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2315 						struct ext4_ext_path *path)
2316 {
2317 	if (path) {
2318 		int depth = ext_depth(inode);
2319 		int ret = 0;
2320 
2321 		/* probably there is space in leaf? */
2322 		if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2323 				< le16_to_cpu(path[depth].p_hdr->eh_max)) {
2324 
2325 			/*
2326 			 *  There are some space in the leaf tree, no
2327 			 *  need to account for leaf block credit
2328 			 *
2329 			 *  bitmaps and block group descriptor blocks
2330 			 *  and other metadata blocks still need to be
2331 			 *  accounted.
2332 			 */
2333 			/* 1 bitmap, 1 block group descriptor */
2334 			ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2335 			return ret;
2336 		}
2337 	}
2338 
2339 	return ext4_chunk_trans_blocks(inode, nrblocks);
2340 }
2341 
2342 /*
2343  * How many index/leaf blocks need to change/allocate to add @extents extents?
2344  *
2345  * If we add a single extent, then in the worse case, each tree level
2346  * index/leaf need to be changed in case of the tree split.
2347  *
2348  * If more extents are inserted, they could cause the whole tree split more
2349  * than once, but this is really rare.
2350  */
2351 int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
2352 {
2353 	int index;
2354 	int depth;
2355 
2356 	/* If we are converting the inline data, only one is needed here. */
2357 	if (ext4_has_inline_data(inode))
2358 		return 1;
2359 
2360 	depth = ext_depth(inode);
2361 
2362 	if (extents <= 1)
2363 		index = depth * 2;
2364 	else
2365 		index = depth * 3;
2366 
2367 	return index;
2368 }
2369 
2370 static inline int get_default_free_blocks_flags(struct inode *inode)
2371 {
2372 	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
2373 	    ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
2374 		return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
2375 	else if (ext4_should_journal_data(inode))
2376 		return EXT4_FREE_BLOCKS_FORGET;
2377 	return 0;
2378 }
2379 
2380 /*
2381  * ext4_rereserve_cluster - increment the reserved cluster count when
2382  *                          freeing a cluster with a pending reservation
2383  *
2384  * @inode - file containing the cluster
2385  * @lblk - logical block in cluster to be reserved
2386  *
2387  * Increments the reserved cluster count and adjusts quota in a bigalloc
2388  * file system when freeing a partial cluster containing at least one
2389  * delayed and unwritten block.  A partial cluster meeting that
2390  * requirement will have a pending reservation.  If so, the
2391  * RERESERVE_CLUSTER flag is used when calling ext4_free_blocks() to
2392  * defer reserved and allocated space accounting to a subsequent call
2393  * to this function.
2394  */
2395 static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk)
2396 {
2397 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2398 	struct ext4_inode_info *ei = EXT4_I(inode);
2399 
2400 	dquot_reclaim_block(inode, EXT4_C2B(sbi, 1));
2401 
2402 	spin_lock(&ei->i_block_reservation_lock);
2403 	ei->i_reserved_data_blocks++;
2404 	percpu_counter_add(&sbi->s_dirtyclusters_counter, 1);
2405 	spin_unlock(&ei->i_block_reservation_lock);
2406 
2407 	percpu_counter_add(&sbi->s_freeclusters_counter, 1);
2408 	ext4_remove_pending(inode, lblk);
2409 }
2410 
2411 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2412 			      struct ext4_extent *ex,
2413 			      struct partial_cluster *partial,
2414 			      ext4_lblk_t from, ext4_lblk_t to)
2415 {
2416 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2417 	unsigned short ee_len = ext4_ext_get_actual_len(ex);
2418 	ext4_fsblk_t last_pblk, pblk;
2419 	ext4_lblk_t num;
2420 	int flags;
2421 
2422 	/* only extent tail removal is allowed */
2423 	if (from < le32_to_cpu(ex->ee_block) ||
2424 	    to != le32_to_cpu(ex->ee_block) + ee_len - 1) {
2425 		ext4_error(sbi->s_sb,
2426 			   "strange request: removal(2) %u-%u from %u:%u",
2427 			   from, to, le32_to_cpu(ex->ee_block), ee_len);
2428 		return 0;
2429 	}
2430 
2431 #ifdef EXTENTS_STATS
2432 	spin_lock(&sbi->s_ext_stats_lock);
2433 	sbi->s_ext_blocks += ee_len;
2434 	sbi->s_ext_extents++;
2435 	if (ee_len < sbi->s_ext_min)
2436 		sbi->s_ext_min = ee_len;
2437 	if (ee_len > sbi->s_ext_max)
2438 		sbi->s_ext_max = ee_len;
2439 	if (ext_depth(inode) > sbi->s_depth_max)
2440 		sbi->s_depth_max = ext_depth(inode);
2441 	spin_unlock(&sbi->s_ext_stats_lock);
2442 #endif
2443 
2444 	trace_ext4_remove_blocks(inode, ex, from, to, partial);
2445 
2446 	/*
2447 	 * if we have a partial cluster, and it's different from the
2448 	 * cluster of the last block in the extent, we free it
2449 	 */
2450 	last_pblk = ext4_ext_pblock(ex) + ee_len - 1;
2451 
2452 	if (partial->state != initial &&
2453 	    partial->pclu != EXT4_B2C(sbi, last_pblk)) {
2454 		if (partial->state == tofree) {
2455 			flags = get_default_free_blocks_flags(inode);
2456 			if (ext4_is_pending(inode, partial->lblk))
2457 				flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2458 			ext4_free_blocks(handle, inode, NULL,
2459 					 EXT4_C2B(sbi, partial->pclu),
2460 					 sbi->s_cluster_ratio, flags);
2461 			if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2462 				ext4_rereserve_cluster(inode, partial->lblk);
2463 		}
2464 		partial->state = initial;
2465 	}
2466 
2467 	num = le32_to_cpu(ex->ee_block) + ee_len - from;
2468 	pblk = ext4_ext_pblock(ex) + ee_len - num;
2469 
2470 	/*
2471 	 * We free the partial cluster at the end of the extent (if any),
2472 	 * unless the cluster is used by another extent (partial_cluster
2473 	 * state is nofree).  If a partial cluster exists here, it must be
2474 	 * shared with the last block in the extent.
2475 	 */
2476 	flags = get_default_free_blocks_flags(inode);
2477 
2478 	/* partial, left end cluster aligned, right end unaligned */
2479 	if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) &&
2480 	    (EXT4_LBLK_CMASK(sbi, to) >= from) &&
2481 	    (partial->state != nofree)) {
2482 		if (ext4_is_pending(inode, to))
2483 			flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2484 		ext4_free_blocks(handle, inode, NULL,
2485 				 EXT4_PBLK_CMASK(sbi, last_pblk),
2486 				 sbi->s_cluster_ratio, flags);
2487 		if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2488 			ext4_rereserve_cluster(inode, to);
2489 		partial->state = initial;
2490 		flags = get_default_free_blocks_flags(inode);
2491 	}
2492 
2493 	flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
2494 
2495 	/*
2496 	 * For bigalloc file systems, we never free a partial cluster
2497 	 * at the beginning of the extent.  Instead, we check to see if we
2498 	 * need to free it on a subsequent call to ext4_remove_blocks,
2499 	 * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space.
2500 	 */
2501 	flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2502 	ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2503 
2504 	/* reset the partial cluster if we've freed past it */
2505 	if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk))
2506 		partial->state = initial;
2507 
2508 	/*
2509 	 * If we've freed the entire extent but the beginning is not left
2510 	 * cluster aligned and is not marked as ineligible for freeing we
2511 	 * record the partial cluster at the beginning of the extent.  It
2512 	 * wasn't freed by the preceding ext4_free_blocks() call, and we
2513 	 * need to look farther to the left to determine if it's to be freed
2514 	 * (not shared with another extent). Else, reset the partial
2515 	 * cluster - we're either  done freeing or the beginning of the
2516 	 * extent is left cluster aligned.
2517 	 */
2518 	if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) {
2519 		if (partial->state == initial) {
2520 			partial->pclu = EXT4_B2C(sbi, pblk);
2521 			partial->lblk = from;
2522 			partial->state = tofree;
2523 		}
2524 	} else {
2525 		partial->state = initial;
2526 	}
2527 
2528 	return 0;
2529 }
2530 
2531 /*
2532  * ext4_ext_rm_leaf() Removes the extents associated with the
2533  * blocks appearing between "start" and "end".  Both "start"
2534  * and "end" must appear in the same extent or EIO is returned.
2535  *
2536  * @handle: The journal handle
2537  * @inode:  The files inode
2538  * @path:   The path to the leaf
2539  * @partial_cluster: The cluster which we'll have to free if all extents
2540  *                   has been released from it.  However, if this value is
2541  *                   negative, it's a cluster just to the right of the
2542  *                   punched region and it must not be freed.
2543  * @start:  The first block to remove
2544  * @end:   The last block to remove
2545  */
2546 static int
2547 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2548 		 struct ext4_ext_path *path,
2549 		 struct partial_cluster *partial,
2550 		 ext4_lblk_t start, ext4_lblk_t end)
2551 {
2552 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2553 	int err = 0, correct_index = 0;
2554 	int depth = ext_depth(inode), credits, revoke_credits;
2555 	struct ext4_extent_header *eh;
2556 	ext4_lblk_t a, b;
2557 	unsigned num;
2558 	ext4_lblk_t ex_ee_block;
2559 	unsigned short ex_ee_len;
2560 	unsigned unwritten = 0;
2561 	struct ext4_extent *ex;
2562 	ext4_fsblk_t pblk;
2563 
2564 	/* the header must be checked already in ext4_ext_remove_space() */
2565 	ext_debug(inode, "truncate since %u in leaf to %u\n", start, end);
2566 	if (!path[depth].p_hdr)
2567 		path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2568 	eh = path[depth].p_hdr;
2569 	if (unlikely(path[depth].p_hdr == NULL)) {
2570 		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2571 		return -EFSCORRUPTED;
2572 	}
2573 	/* find where to start removing */
2574 	ex = path[depth].p_ext;
2575 	if (!ex)
2576 		ex = EXT_LAST_EXTENT(eh);
2577 
2578 	ex_ee_block = le32_to_cpu(ex->ee_block);
2579 	ex_ee_len = ext4_ext_get_actual_len(ex);
2580 
2581 	trace_ext4_ext_rm_leaf(inode, start, ex, partial);
2582 
2583 	while (ex >= EXT_FIRST_EXTENT(eh) &&
2584 			ex_ee_block + ex_ee_len > start) {
2585 
2586 		if (ext4_ext_is_unwritten(ex))
2587 			unwritten = 1;
2588 		else
2589 			unwritten = 0;
2590 
2591 		ext_debug(inode, "remove ext %u:[%d]%d\n", ex_ee_block,
2592 			  unwritten, ex_ee_len);
2593 		path[depth].p_ext = ex;
2594 
2595 		a = ex_ee_block > start ? ex_ee_block : start;
2596 		b = ex_ee_block+ex_ee_len - 1 < end ?
2597 			ex_ee_block+ex_ee_len - 1 : end;
2598 
2599 		ext_debug(inode, "  border %u:%u\n", a, b);
2600 
2601 		/* If this extent is beyond the end of the hole, skip it */
2602 		if (end < ex_ee_block) {
2603 			/*
2604 			 * We're going to skip this extent and move to another,
2605 			 * so note that its first cluster is in use to avoid
2606 			 * freeing it when removing blocks.  Eventually, the
2607 			 * right edge of the truncated/punched region will
2608 			 * be just to the left.
2609 			 */
2610 			if (sbi->s_cluster_ratio > 1) {
2611 				pblk = ext4_ext_pblock(ex);
2612 				partial->pclu = EXT4_B2C(sbi, pblk);
2613 				partial->state = nofree;
2614 			}
2615 			ex--;
2616 			ex_ee_block = le32_to_cpu(ex->ee_block);
2617 			ex_ee_len = ext4_ext_get_actual_len(ex);
2618 			continue;
2619 		} else if (b != ex_ee_block + ex_ee_len - 1) {
2620 			EXT4_ERROR_INODE(inode,
2621 					 "can not handle truncate %u:%u "
2622 					 "on extent %u:%u",
2623 					 start, end, ex_ee_block,
2624 					 ex_ee_block + ex_ee_len - 1);
2625 			err = -EFSCORRUPTED;
2626 			goto out;
2627 		} else if (a != ex_ee_block) {
2628 			/* remove tail of the extent */
2629 			num = a - ex_ee_block;
2630 		} else {
2631 			/* remove whole extent: excellent! */
2632 			num = 0;
2633 		}
2634 		/*
2635 		 * 3 for leaf, sb, and inode plus 2 (bmap and group
2636 		 * descriptor) for each block group; assume two block
2637 		 * groups plus ex_ee_len/blocks_per_block_group for
2638 		 * the worst case
2639 		 */
2640 		credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2641 		if (ex == EXT_FIRST_EXTENT(eh)) {
2642 			correct_index = 1;
2643 			credits += (ext_depth(inode)) + 1;
2644 		}
2645 		credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2646 		/*
2647 		 * We may end up freeing some index blocks and data from the
2648 		 * punched range. Note that partial clusters are accounted for
2649 		 * by ext4_free_data_revoke_credits().
2650 		 */
2651 		revoke_credits =
2652 			ext4_free_metadata_revoke_credits(inode->i_sb,
2653 							  ext_depth(inode)) +
2654 			ext4_free_data_revoke_credits(inode, b - a + 1);
2655 
2656 		err = ext4_datasem_ensure_credits(handle, inode, credits,
2657 						  credits, revoke_credits);
2658 		if (err) {
2659 			if (err > 0)
2660 				err = -EAGAIN;
2661 			goto out;
2662 		}
2663 
2664 		err = ext4_ext_get_access(handle, inode, path + depth);
2665 		if (err)
2666 			goto out;
2667 
2668 		err = ext4_remove_blocks(handle, inode, ex, partial, a, b);
2669 		if (err)
2670 			goto out;
2671 
2672 		if (num == 0)
2673 			/* this extent is removed; mark slot entirely unused */
2674 			ext4_ext_store_pblock(ex, 0);
2675 
2676 		ex->ee_len = cpu_to_le16(num);
2677 		/*
2678 		 * Do not mark unwritten if all the blocks in the
2679 		 * extent have been removed.
2680 		 */
2681 		if (unwritten && num)
2682 			ext4_ext_mark_unwritten(ex);
2683 		/*
2684 		 * If the extent was completely released,
2685 		 * we need to remove it from the leaf
2686 		 */
2687 		if (num == 0) {
2688 			if (end != EXT_MAX_BLOCKS - 1) {
2689 				/*
2690 				 * For hole punching, we need to scoot all the
2691 				 * extents up when an extent is removed so that
2692 				 * we dont have blank extents in the middle
2693 				 */
2694 				memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2695 					sizeof(struct ext4_extent));
2696 
2697 				/* Now get rid of the one at the end */
2698 				memset(EXT_LAST_EXTENT(eh), 0,
2699 					sizeof(struct ext4_extent));
2700 			}
2701 			le16_add_cpu(&eh->eh_entries, -1);
2702 		}
2703 
2704 		err = ext4_ext_dirty(handle, inode, path + depth);
2705 		if (err)
2706 			goto out;
2707 
2708 		ext_debug(inode, "new extent: %u:%u:%llu\n", ex_ee_block, num,
2709 				ext4_ext_pblock(ex));
2710 		ex--;
2711 		ex_ee_block = le32_to_cpu(ex->ee_block);
2712 		ex_ee_len = ext4_ext_get_actual_len(ex);
2713 	}
2714 
2715 	if (correct_index && eh->eh_entries)
2716 		err = ext4_ext_correct_indexes(handle, inode, path);
2717 
2718 	/*
2719 	 * If there's a partial cluster and at least one extent remains in
2720 	 * the leaf, free the partial cluster if it isn't shared with the
2721 	 * current extent.  If it is shared with the current extent
2722 	 * we reset the partial cluster because we've reached the start of the
2723 	 * truncated/punched region and we're done removing blocks.
2724 	 */
2725 	if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) {
2726 		pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
2727 		if (partial->pclu != EXT4_B2C(sbi, pblk)) {
2728 			int flags = get_default_free_blocks_flags(inode);
2729 
2730 			if (ext4_is_pending(inode, partial->lblk))
2731 				flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2732 			ext4_free_blocks(handle, inode, NULL,
2733 					 EXT4_C2B(sbi, partial->pclu),
2734 					 sbi->s_cluster_ratio, flags);
2735 			if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2736 				ext4_rereserve_cluster(inode, partial->lblk);
2737 		}
2738 		partial->state = initial;
2739 	}
2740 
2741 	/* if this leaf is free, then we should
2742 	 * remove it from index block above */
2743 	if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2744 		err = ext4_ext_rm_idx(handle, inode, path, depth);
2745 
2746 out:
2747 	return err;
2748 }
2749 
2750 /*
2751  * ext4_ext_more_to_rm:
2752  * returns 1 if current index has to be freed (even partial)
2753  */
2754 static int
2755 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2756 {
2757 	BUG_ON(path->p_idx == NULL);
2758 
2759 	if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2760 		return 0;
2761 
2762 	/*
2763 	 * if truncate on deeper level happened, it wasn't partial,
2764 	 * so we have to consider current index for truncation
2765 	 */
2766 	if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2767 		return 0;
2768 	return 1;
2769 }
2770 
2771 int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2772 			  ext4_lblk_t end)
2773 {
2774 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2775 	int depth = ext_depth(inode);
2776 	struct ext4_ext_path *path = NULL;
2777 	struct partial_cluster partial;
2778 	handle_t *handle;
2779 	int i = 0, err = 0;
2780 
2781 	partial.pclu = 0;
2782 	partial.lblk = 0;
2783 	partial.state = initial;
2784 
2785 	ext_debug(inode, "truncate since %u to %u\n", start, end);
2786 
2787 	/* probably first extent we're gonna free will be last in block */
2788 	handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE,
2789 			depth + 1,
2790 			ext4_free_metadata_revoke_credits(inode->i_sb, depth));
2791 	if (IS_ERR(handle))
2792 		return PTR_ERR(handle);
2793 
2794 again:
2795 	trace_ext4_ext_remove_space(inode, start, end, depth);
2796 
2797 	/*
2798 	 * Check if we are removing extents inside the extent tree. If that
2799 	 * is the case, we are going to punch a hole inside the extent tree
2800 	 * so we have to check whether we need to split the extent covering
2801 	 * the last block to remove so we can easily remove the part of it
2802 	 * in ext4_ext_rm_leaf().
2803 	 */
2804 	if (end < EXT_MAX_BLOCKS - 1) {
2805 		struct ext4_extent *ex;
2806 		ext4_lblk_t ee_block, ex_end, lblk;
2807 		ext4_fsblk_t pblk;
2808 
2809 		/* find extent for or closest extent to this block */
2810 		path = ext4_find_extent(inode, end, NULL,
2811 					EXT4_EX_NOCACHE | EXT4_EX_NOFAIL);
2812 		if (IS_ERR(path)) {
2813 			ext4_journal_stop(handle);
2814 			return PTR_ERR(path);
2815 		}
2816 		depth = ext_depth(inode);
2817 		/* Leaf not may not exist only if inode has no blocks at all */
2818 		ex = path[depth].p_ext;
2819 		if (!ex) {
2820 			if (depth) {
2821 				EXT4_ERROR_INODE(inode,
2822 						 "path[%d].p_hdr == NULL",
2823 						 depth);
2824 				err = -EFSCORRUPTED;
2825 			}
2826 			goto out;
2827 		}
2828 
2829 		ee_block = le32_to_cpu(ex->ee_block);
2830 		ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1;
2831 
2832 		/*
2833 		 * See if the last block is inside the extent, if so split
2834 		 * the extent at 'end' block so we can easily remove the
2835 		 * tail of the first part of the split extent in
2836 		 * ext4_ext_rm_leaf().
2837 		 */
2838 		if (end >= ee_block && end < ex_end) {
2839 
2840 			/*
2841 			 * If we're going to split the extent, note that
2842 			 * the cluster containing the block after 'end' is
2843 			 * in use to avoid freeing it when removing blocks.
2844 			 */
2845 			if (sbi->s_cluster_ratio > 1) {
2846 				pblk = ext4_ext_pblock(ex) + end - ee_block + 1;
2847 				partial.pclu = EXT4_B2C(sbi, pblk);
2848 				partial.state = nofree;
2849 			}
2850 
2851 			/*
2852 			 * Split the extent in two so that 'end' is the last
2853 			 * block in the first new extent. Also we should not
2854 			 * fail removing space due to ENOSPC so try to use
2855 			 * reserved block if that happens.
2856 			 */
2857 			err = ext4_force_split_extent_at(handle, inode, &path,
2858 							 end + 1, 1);
2859 			if (err < 0)
2860 				goto out;
2861 
2862 		} else if (sbi->s_cluster_ratio > 1 && end >= ex_end &&
2863 			   partial.state == initial) {
2864 			/*
2865 			 * If we're punching, there's an extent to the right.
2866 			 * If the partial cluster hasn't been set, set it to
2867 			 * that extent's first cluster and its state to nofree
2868 			 * so it won't be freed should it contain blocks to be
2869 			 * removed. If it's already set (tofree/nofree), we're
2870 			 * retrying and keep the original partial cluster info
2871 			 * so a cluster marked tofree as a result of earlier
2872 			 * extent removal is not lost.
2873 			 */
2874 			lblk = ex_end + 1;
2875 			err = ext4_ext_search_right(inode, path, &lblk, &pblk,
2876 						    NULL);
2877 			if (err < 0)
2878 				goto out;
2879 			if (pblk) {
2880 				partial.pclu = EXT4_B2C(sbi, pblk);
2881 				partial.state = nofree;
2882 			}
2883 		}
2884 	}
2885 	/*
2886 	 * We start scanning from right side, freeing all the blocks
2887 	 * after i_size and walking into the tree depth-wise.
2888 	 */
2889 	depth = ext_depth(inode);
2890 	if (path) {
2891 		int k = i = depth;
2892 		while (--k > 0)
2893 			path[k].p_block =
2894 				le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2895 	} else {
2896 		path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
2897 			       GFP_NOFS | __GFP_NOFAIL);
2898 		if (path == NULL) {
2899 			ext4_journal_stop(handle);
2900 			return -ENOMEM;
2901 		}
2902 		path[0].p_maxdepth = path[0].p_depth = depth;
2903 		path[0].p_hdr = ext_inode_hdr(inode);
2904 		i = 0;
2905 
2906 		if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
2907 			err = -EFSCORRUPTED;
2908 			goto out;
2909 		}
2910 	}
2911 	err = 0;
2912 
2913 	while (i >= 0 && err == 0) {
2914 		if (i == depth) {
2915 			/* this is leaf block */
2916 			err = ext4_ext_rm_leaf(handle, inode, path,
2917 					       &partial, start, end);
2918 			/* root level has p_bh == NULL, brelse() eats this */
2919 			brelse(path[i].p_bh);
2920 			path[i].p_bh = NULL;
2921 			i--;
2922 			continue;
2923 		}
2924 
2925 		/* this is index block */
2926 		if (!path[i].p_hdr) {
2927 			ext_debug(inode, "initialize header\n");
2928 			path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2929 		}
2930 
2931 		if (!path[i].p_idx) {
2932 			/* this level hasn't been touched yet */
2933 			path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2934 			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2935 			ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n",
2936 				  path[i].p_hdr,
2937 				  le16_to_cpu(path[i].p_hdr->eh_entries));
2938 		} else {
2939 			/* we were already here, see at next index */
2940 			path[i].p_idx--;
2941 		}
2942 
2943 		ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n",
2944 				i, EXT_FIRST_INDEX(path[i].p_hdr),
2945 				path[i].p_idx);
2946 		if (ext4_ext_more_to_rm(path + i)) {
2947 			struct buffer_head *bh;
2948 			/* go to the next level */
2949 			ext_debug(inode, "move to level %d (block %llu)\n",
2950 				  i + 1, ext4_idx_pblock(path[i].p_idx));
2951 			memset(path + i + 1, 0, sizeof(*path));
2952 			bh = read_extent_tree_block(inode,
2953 				ext4_idx_pblock(path[i].p_idx), depth - i - 1,
2954 				EXT4_EX_NOCACHE);
2955 			if (IS_ERR(bh)) {
2956 				/* should we reset i_size? */
2957 				err = PTR_ERR(bh);
2958 				break;
2959 			}
2960 			/* Yield here to deal with large extent trees.
2961 			 * Should be a no-op if we did IO above. */
2962 			cond_resched();
2963 			if (WARN_ON(i + 1 > depth)) {
2964 				err = -EFSCORRUPTED;
2965 				break;
2966 			}
2967 			path[i + 1].p_bh = bh;
2968 
2969 			/* save actual number of indexes since this
2970 			 * number is changed at the next iteration */
2971 			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2972 			i++;
2973 		} else {
2974 			/* we finished processing this index, go up */
2975 			if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2976 				/* index is empty, remove it;
2977 				 * handle must be already prepared by the
2978 				 * truncatei_leaf() */
2979 				err = ext4_ext_rm_idx(handle, inode, path, i);
2980 			}
2981 			/* root level has p_bh == NULL, brelse() eats this */
2982 			brelse(path[i].p_bh);
2983 			path[i].p_bh = NULL;
2984 			i--;
2985 			ext_debug(inode, "return to level %d\n", i);
2986 		}
2987 	}
2988 
2989 	trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial,
2990 					 path->p_hdr->eh_entries);
2991 
2992 	/*
2993 	 * if there's a partial cluster and we have removed the first extent
2994 	 * in the file, then we also free the partial cluster, if any
2995 	 */
2996 	if (partial.state == tofree && err == 0) {
2997 		int flags = get_default_free_blocks_flags(inode);
2998 
2999 		if (ext4_is_pending(inode, partial.lblk))
3000 			flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
3001 		ext4_free_blocks(handle, inode, NULL,
3002 				 EXT4_C2B(sbi, partial.pclu),
3003 				 sbi->s_cluster_ratio, flags);
3004 		if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
3005 			ext4_rereserve_cluster(inode, partial.lblk);
3006 		partial.state = initial;
3007 	}
3008 
3009 	/* TODO: flexible tree reduction should be here */
3010 	if (path->p_hdr->eh_entries == 0) {
3011 		/*
3012 		 * truncate to zero freed all the tree,
3013 		 * so we need to correct eh_depth
3014 		 */
3015 		err = ext4_ext_get_access(handle, inode, path);
3016 		if (err == 0) {
3017 			ext_inode_hdr(inode)->eh_depth = 0;
3018 			ext_inode_hdr(inode)->eh_max =
3019 				cpu_to_le16(ext4_ext_space_root(inode, 0));
3020 			err = ext4_ext_dirty(handle, inode, path);
3021 		}
3022 	}
3023 out:
3024 	ext4_ext_drop_refs(path);
3025 	kfree(path);
3026 	path = NULL;
3027 	if (err == -EAGAIN)
3028 		goto again;
3029 	ext4_journal_stop(handle);
3030 
3031 	return err;
3032 }
3033 
3034 /*
3035  * called at mount time
3036  */
3037 void ext4_ext_init(struct super_block *sb)
3038 {
3039 	/*
3040 	 * possible initialization would be here
3041 	 */
3042 
3043 	if (ext4_has_feature_extents(sb)) {
3044 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
3045 		printk(KERN_INFO "EXT4-fs: file extents enabled"
3046 #ifdef AGGRESSIVE_TEST
3047 		       ", aggressive tests"
3048 #endif
3049 #ifdef CHECK_BINSEARCH
3050 		       ", check binsearch"
3051 #endif
3052 #ifdef EXTENTS_STATS
3053 		       ", stats"
3054 #endif
3055 		       "\n");
3056 #endif
3057 #ifdef EXTENTS_STATS
3058 		spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
3059 		EXT4_SB(sb)->s_ext_min = 1 << 30;
3060 		EXT4_SB(sb)->s_ext_max = 0;
3061 #endif
3062 	}
3063 }
3064 
3065 /*
3066  * called at umount time
3067  */
3068 void ext4_ext_release(struct super_block *sb)
3069 {
3070 	if (!ext4_has_feature_extents(sb))
3071 		return;
3072 
3073 #ifdef EXTENTS_STATS
3074 	if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
3075 		struct ext4_sb_info *sbi = EXT4_SB(sb);
3076 		printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
3077 			sbi->s_ext_blocks, sbi->s_ext_extents,
3078 			sbi->s_ext_blocks / sbi->s_ext_extents);
3079 		printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
3080 			sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
3081 	}
3082 #endif
3083 }
3084 
3085 static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
3086 {
3087 	ext4_lblk_t  ee_block;
3088 	ext4_fsblk_t ee_pblock;
3089 	unsigned int ee_len;
3090 
3091 	ee_block  = le32_to_cpu(ex->ee_block);
3092 	ee_len    = ext4_ext_get_actual_len(ex);
3093 	ee_pblock = ext4_ext_pblock(ex);
3094 
3095 	if (ee_len == 0)
3096 		return 0;
3097 
3098 	return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
3099 				     EXTENT_STATUS_WRITTEN);
3100 }
3101 
3102 /* FIXME!! we need to try to merge to left or right after zero-out  */
3103 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
3104 {
3105 	ext4_fsblk_t ee_pblock;
3106 	unsigned int ee_len;
3107 
3108 	ee_len    = ext4_ext_get_actual_len(ex);
3109 	ee_pblock = ext4_ext_pblock(ex);
3110 	return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock,
3111 				  ee_len);
3112 }
3113 
3114 /*
3115  * ext4_split_extent_at() splits an extent at given block.
3116  *
3117  * @handle: the journal handle
3118  * @inode: the file inode
3119  * @path: the path to the extent
3120  * @split: the logical block where the extent is splitted.
3121  * @split_flags: indicates if the extent could be zeroout if split fails, and
3122  *		 the states(init or unwritten) of new extents.
3123  * @flags: flags used to insert new extent to extent tree.
3124  *
3125  *
3126  * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
3127  * of which are determined by split_flag.
3128  *
3129  * There are two cases:
3130  *  a> the extent are splitted into two extent.
3131  *  b> split is not needed, and just mark the extent.
3132  *
3133  * return 0 on success.
3134  */
3135 static int ext4_split_extent_at(handle_t *handle,
3136 			     struct inode *inode,
3137 			     struct ext4_ext_path **ppath,
3138 			     ext4_lblk_t split,
3139 			     int split_flag,
3140 			     int flags)
3141 {
3142 	struct ext4_ext_path *path = *ppath;
3143 	ext4_fsblk_t newblock;
3144 	ext4_lblk_t ee_block;
3145 	struct ext4_extent *ex, newex, orig_ex, zero_ex;
3146 	struct ext4_extent *ex2 = NULL;
3147 	unsigned int ee_len, depth;
3148 	int err = 0;
3149 
3150 	BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
3151 	       (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
3152 
3153 	ext_debug(inode, "logical block %llu\n", (unsigned long long)split);
3154 
3155 	ext4_ext_show_leaf(inode, path);
3156 
3157 	depth = ext_depth(inode);
3158 	ex = path[depth].p_ext;
3159 	ee_block = le32_to_cpu(ex->ee_block);
3160 	ee_len = ext4_ext_get_actual_len(ex);
3161 	newblock = split - ee_block + ext4_ext_pblock(ex);
3162 
3163 	BUG_ON(split < ee_block || split >= (ee_block + ee_len));
3164 	BUG_ON(!ext4_ext_is_unwritten(ex) &&
3165 	       split_flag & (EXT4_EXT_MAY_ZEROOUT |
3166 			     EXT4_EXT_MARK_UNWRIT1 |
3167 			     EXT4_EXT_MARK_UNWRIT2));
3168 
3169 	err = ext4_ext_get_access(handle, inode, path + depth);
3170 	if (err)
3171 		goto out;
3172 
3173 	if (split == ee_block) {
3174 		/*
3175 		 * case b: block @split is the block that the extent begins with
3176 		 * then we just change the state of the extent, and splitting
3177 		 * is not needed.
3178 		 */
3179 		if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3180 			ext4_ext_mark_unwritten(ex);
3181 		else
3182 			ext4_ext_mark_initialized(ex);
3183 
3184 		if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
3185 			ext4_ext_try_to_merge(handle, inode, path, ex);
3186 
3187 		err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3188 		goto out;
3189 	}
3190 
3191 	/* case a */
3192 	memcpy(&orig_ex, ex, sizeof(orig_ex));
3193 	ex->ee_len = cpu_to_le16(split - ee_block);
3194 	if (split_flag & EXT4_EXT_MARK_UNWRIT1)
3195 		ext4_ext_mark_unwritten(ex);
3196 
3197 	/*
3198 	 * path may lead to new leaf, not to original leaf any more
3199 	 * after ext4_ext_insert_extent() returns,
3200 	 */
3201 	err = ext4_ext_dirty(handle, inode, path + depth);
3202 	if (err)
3203 		goto fix_extent_len;
3204 
3205 	ex2 = &newex;
3206 	ex2->ee_block = cpu_to_le32(split);
3207 	ex2->ee_len   = cpu_to_le16(ee_len - (split - ee_block));
3208 	ext4_ext_store_pblock(ex2, newblock);
3209 	if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3210 		ext4_ext_mark_unwritten(ex2);
3211 
3212 	err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
3213 	if (err != -ENOSPC && err != -EDQUOT)
3214 		goto out;
3215 
3216 	if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
3217 		if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
3218 			if (split_flag & EXT4_EXT_DATA_VALID1) {
3219 				err = ext4_ext_zeroout(inode, ex2);
3220 				zero_ex.ee_block = ex2->ee_block;
3221 				zero_ex.ee_len = cpu_to_le16(
3222 						ext4_ext_get_actual_len(ex2));
3223 				ext4_ext_store_pblock(&zero_ex,
3224 						      ext4_ext_pblock(ex2));
3225 			} else {
3226 				err = ext4_ext_zeroout(inode, ex);
3227 				zero_ex.ee_block = ex->ee_block;
3228 				zero_ex.ee_len = cpu_to_le16(
3229 						ext4_ext_get_actual_len(ex));
3230 				ext4_ext_store_pblock(&zero_ex,
3231 						      ext4_ext_pblock(ex));
3232 			}
3233 		} else {
3234 			err = ext4_ext_zeroout(inode, &orig_ex);
3235 			zero_ex.ee_block = orig_ex.ee_block;
3236 			zero_ex.ee_len = cpu_to_le16(
3237 						ext4_ext_get_actual_len(&orig_ex));
3238 			ext4_ext_store_pblock(&zero_ex,
3239 					      ext4_ext_pblock(&orig_ex));
3240 		}
3241 
3242 		if (!err) {
3243 			/* update the extent length and mark as initialized */
3244 			ex->ee_len = cpu_to_le16(ee_len);
3245 			ext4_ext_try_to_merge(handle, inode, path, ex);
3246 			err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3247 			if (!err)
3248 				/* update extent status tree */
3249 				err = ext4_zeroout_es(inode, &zero_ex);
3250 			/* If we failed at this point, we don't know in which
3251 			 * state the extent tree exactly is so don't try to fix
3252 			 * length of the original extent as it may do even more
3253 			 * damage.
3254 			 */
3255 			goto out;
3256 		}
3257 	}
3258 
3259 fix_extent_len:
3260 	ex->ee_len = orig_ex.ee_len;
3261 	/*
3262 	 * Ignore ext4_ext_dirty return value since we are already in error path
3263 	 * and err is a non-zero error code.
3264 	 */
3265 	ext4_ext_dirty(handle, inode, path + path->p_depth);
3266 	return err;
3267 out:
3268 	ext4_ext_show_leaf(inode, path);
3269 	return err;
3270 }
3271 
3272 /*
3273  * ext4_split_extents() splits an extent and mark extent which is covered
3274  * by @map as split_flags indicates
3275  *
3276  * It may result in splitting the extent into multiple extents (up to three)
3277  * There are three possibilities:
3278  *   a> There is no split required
3279  *   b> Splits in two extents: Split is happening at either end of the extent
3280  *   c> Splits in three extents: Somone is splitting in middle of the extent
3281  *
3282  */
3283 static int ext4_split_extent(handle_t *handle,
3284 			      struct inode *inode,
3285 			      struct ext4_ext_path **ppath,
3286 			      struct ext4_map_blocks *map,
3287 			      int split_flag,
3288 			      int flags)
3289 {
3290 	struct ext4_ext_path *path = *ppath;
3291 	ext4_lblk_t ee_block;
3292 	struct ext4_extent *ex;
3293 	unsigned int ee_len, depth;
3294 	int err = 0;
3295 	int unwritten;
3296 	int split_flag1, flags1;
3297 	int allocated = map->m_len;
3298 
3299 	depth = ext_depth(inode);
3300 	ex = path[depth].p_ext;
3301 	ee_block = le32_to_cpu(ex->ee_block);
3302 	ee_len = ext4_ext_get_actual_len(ex);
3303 	unwritten = ext4_ext_is_unwritten(ex);
3304 
3305 	if (map->m_lblk + map->m_len < ee_block + ee_len) {
3306 		split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
3307 		flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3308 		if (unwritten)
3309 			split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
3310 				       EXT4_EXT_MARK_UNWRIT2;
3311 		if (split_flag & EXT4_EXT_DATA_VALID2)
3312 			split_flag1 |= EXT4_EXT_DATA_VALID1;
3313 		err = ext4_split_extent_at(handle, inode, ppath,
3314 				map->m_lblk + map->m_len, split_flag1, flags1);
3315 		if (err)
3316 			goto out;
3317 	} else {
3318 		allocated = ee_len - (map->m_lblk - ee_block);
3319 	}
3320 	/*
3321 	 * Update path is required because previous ext4_split_extent_at() may
3322 	 * result in split of original leaf or extent zeroout.
3323 	 */
3324 	path = ext4_find_extent(inode, map->m_lblk, ppath, flags);
3325 	if (IS_ERR(path))
3326 		return PTR_ERR(path);
3327 	depth = ext_depth(inode);
3328 	ex = path[depth].p_ext;
3329 	if (!ex) {
3330 		EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3331 				 (unsigned long) map->m_lblk);
3332 		return -EFSCORRUPTED;
3333 	}
3334 	unwritten = ext4_ext_is_unwritten(ex);
3335 	split_flag1 = 0;
3336 
3337 	if (map->m_lblk >= ee_block) {
3338 		split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
3339 		if (unwritten) {
3340 			split_flag1 |= EXT4_EXT_MARK_UNWRIT1;
3341 			split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
3342 						     EXT4_EXT_MARK_UNWRIT2);
3343 		}
3344 		err = ext4_split_extent_at(handle, inode, ppath,
3345 				map->m_lblk, split_flag1, flags);
3346 		if (err)
3347 			goto out;
3348 	}
3349 
3350 	ext4_ext_show_leaf(inode, path);
3351 out:
3352 	return err ? err : allocated;
3353 }
3354 
3355 /*
3356  * This function is called by ext4_ext_map_blocks() if someone tries to write
3357  * to an unwritten extent. It may result in splitting the unwritten
3358  * extent into multiple extents (up to three - one initialized and two
3359  * unwritten).
3360  * There are three possibilities:
3361  *   a> There is no split required: Entire extent should be initialized
3362  *   b> Splits in two extents: Write is happening at either end of the extent
3363  *   c> Splits in three extents: Somone is writing in middle of the extent
3364  *
3365  * Pre-conditions:
3366  *  - The extent pointed to by 'path' is unwritten.
3367  *  - The extent pointed to by 'path' contains a superset
3368  *    of the logical span [map->m_lblk, map->m_lblk + map->m_len).
3369  *
3370  * Post-conditions on success:
3371  *  - the returned value is the number of blocks beyond map->l_lblk
3372  *    that are allocated and initialized.
3373  *    It is guaranteed to be >= map->m_len.
3374  */
3375 static int ext4_ext_convert_to_initialized(handle_t *handle,
3376 					   struct inode *inode,
3377 					   struct ext4_map_blocks *map,
3378 					   struct ext4_ext_path **ppath,
3379 					   int flags)
3380 {
3381 	struct ext4_ext_path *path = *ppath;
3382 	struct ext4_sb_info *sbi;
3383 	struct ext4_extent_header *eh;
3384 	struct ext4_map_blocks split_map;
3385 	struct ext4_extent zero_ex1, zero_ex2;
3386 	struct ext4_extent *ex, *abut_ex;
3387 	ext4_lblk_t ee_block, eof_block;
3388 	unsigned int ee_len, depth, map_len = map->m_len;
3389 	int allocated = 0, max_zeroout = 0;
3390 	int err = 0;
3391 	int split_flag = EXT4_EXT_DATA_VALID2;
3392 
3393 	ext_debug(inode, "logical block %llu, max_blocks %u\n",
3394 		  (unsigned long long)map->m_lblk, map_len);
3395 
3396 	sbi = EXT4_SB(inode->i_sb);
3397 	eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
3398 			>> inode->i_sb->s_blocksize_bits;
3399 	if (eof_block < map->m_lblk + map_len)
3400 		eof_block = map->m_lblk + map_len;
3401 
3402 	depth = ext_depth(inode);
3403 	eh = path[depth].p_hdr;
3404 	ex = path[depth].p_ext;
3405 	ee_block = le32_to_cpu(ex->ee_block);
3406 	ee_len = ext4_ext_get_actual_len(ex);
3407 	zero_ex1.ee_len = 0;
3408 	zero_ex2.ee_len = 0;
3409 
3410 	trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3411 
3412 	/* Pre-conditions */
3413 	BUG_ON(!ext4_ext_is_unwritten(ex));
3414 	BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3415 
3416 	/*
3417 	 * Attempt to transfer newly initialized blocks from the currently
3418 	 * unwritten extent to its neighbor. This is much cheaper
3419 	 * than an insertion followed by a merge as those involve costly
3420 	 * memmove() calls. Transferring to the left is the common case in
3421 	 * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
3422 	 * followed by append writes.
3423 	 *
3424 	 * Limitations of the current logic:
3425 	 *  - L1: we do not deal with writes covering the whole extent.
3426 	 *    This would require removing the extent if the transfer
3427 	 *    is possible.
3428 	 *  - L2: we only attempt to merge with an extent stored in the
3429 	 *    same extent tree node.
3430 	 */
3431 	if ((map->m_lblk == ee_block) &&
3432 		/* See if we can merge left */
3433 		(map_len < ee_len) &&		/*L1*/
3434 		(ex > EXT_FIRST_EXTENT(eh))) {	/*L2*/
3435 		ext4_lblk_t prev_lblk;
3436 		ext4_fsblk_t prev_pblk, ee_pblk;
3437 		unsigned int prev_len;
3438 
3439 		abut_ex = ex - 1;
3440 		prev_lblk = le32_to_cpu(abut_ex->ee_block);
3441 		prev_len = ext4_ext_get_actual_len(abut_ex);
3442 		prev_pblk = ext4_ext_pblock(abut_ex);
3443 		ee_pblk = ext4_ext_pblock(ex);
3444 
3445 		/*
3446 		 * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3447 		 * upon those conditions:
3448 		 * - C1: abut_ex is initialized,
3449 		 * - C2: abut_ex is logically abutting ex,
3450 		 * - C3: abut_ex is physically abutting ex,
3451 		 * - C4: abut_ex can receive the additional blocks without
3452 		 *   overflowing the (initialized) length limit.
3453 		 */
3454 		if ((!ext4_ext_is_unwritten(abut_ex)) &&		/*C1*/
3455 			((prev_lblk + prev_len) == ee_block) &&		/*C2*/
3456 			((prev_pblk + prev_len) == ee_pblk) &&		/*C3*/
3457 			(prev_len < (EXT_INIT_MAX_LEN - map_len))) {	/*C4*/
3458 			err = ext4_ext_get_access(handle, inode, path + depth);
3459 			if (err)
3460 				goto out;
3461 
3462 			trace_ext4_ext_convert_to_initialized_fastpath(inode,
3463 				map, ex, abut_ex);
3464 
3465 			/* Shift the start of ex by 'map_len' blocks */
3466 			ex->ee_block = cpu_to_le32(ee_block + map_len);
3467 			ext4_ext_store_pblock(ex, ee_pblk + map_len);
3468 			ex->ee_len = cpu_to_le16(ee_len - map_len);
3469 			ext4_ext_mark_unwritten(ex); /* Restore the flag */
3470 
3471 			/* Extend abut_ex by 'map_len' blocks */
3472 			abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
3473 
3474 			/* Result: number of initialized blocks past m_lblk */
3475 			allocated = map_len;
3476 		}
3477 	} else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
3478 		   (map_len < ee_len) &&	/*L1*/
3479 		   ex < EXT_LAST_EXTENT(eh)) {	/*L2*/
3480 		/* See if we can merge right */
3481 		ext4_lblk_t next_lblk;
3482 		ext4_fsblk_t next_pblk, ee_pblk;
3483 		unsigned int next_len;
3484 
3485 		abut_ex = ex + 1;
3486 		next_lblk = le32_to_cpu(abut_ex->ee_block);
3487 		next_len = ext4_ext_get_actual_len(abut_ex);
3488 		next_pblk = ext4_ext_pblock(abut_ex);
3489 		ee_pblk = ext4_ext_pblock(ex);
3490 
3491 		/*
3492 		 * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3493 		 * upon those conditions:
3494 		 * - C1: abut_ex is initialized,
3495 		 * - C2: abut_ex is logically abutting ex,
3496 		 * - C3: abut_ex is physically abutting ex,
3497 		 * - C4: abut_ex can receive the additional blocks without
3498 		 *   overflowing the (initialized) length limit.
3499 		 */
3500 		if ((!ext4_ext_is_unwritten(abut_ex)) &&		/*C1*/
3501 		    ((map->m_lblk + map_len) == next_lblk) &&		/*C2*/
3502 		    ((ee_pblk + ee_len) == next_pblk) &&		/*C3*/
3503 		    (next_len < (EXT_INIT_MAX_LEN - map_len))) {	/*C4*/
3504 			err = ext4_ext_get_access(handle, inode, path + depth);
3505 			if (err)
3506 				goto out;
3507 
3508 			trace_ext4_ext_convert_to_initialized_fastpath(inode,
3509 				map, ex, abut_ex);
3510 
3511 			/* Shift the start of abut_ex by 'map_len' blocks */
3512 			abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
3513 			ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
3514 			ex->ee_len = cpu_to_le16(ee_len - map_len);
3515 			ext4_ext_mark_unwritten(ex); /* Restore the flag */
3516 
3517 			/* Extend abut_ex by 'map_len' blocks */
3518 			abut_ex->ee_len = cpu_to_le16(next_len + map_len);
3519 
3520 			/* Result: number of initialized blocks past m_lblk */
3521 			allocated = map_len;
3522 		}
3523 	}
3524 	if (allocated) {
3525 		/* Mark the block containing both extents as dirty */
3526 		err = ext4_ext_dirty(handle, inode, path + depth);
3527 
3528 		/* Update path to point to the right extent */
3529 		path[depth].p_ext = abut_ex;
3530 		goto out;
3531 	} else
3532 		allocated = ee_len - (map->m_lblk - ee_block);
3533 
3534 	WARN_ON(map->m_lblk < ee_block);
3535 	/*
3536 	 * It is safe to convert extent to initialized via explicit
3537 	 * zeroout only if extent is fully inside i_size or new_size.
3538 	 */
3539 	split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3540 
3541 	if (EXT4_EXT_MAY_ZEROOUT & split_flag)
3542 		max_zeroout = sbi->s_extent_max_zeroout_kb >>
3543 			(inode->i_sb->s_blocksize_bits - 10);
3544 
3545 	/*
3546 	 * five cases:
3547 	 * 1. split the extent into three extents.
3548 	 * 2. split the extent into two extents, zeroout the head of the first
3549 	 *    extent.
3550 	 * 3. split the extent into two extents, zeroout the tail of the second
3551 	 *    extent.
3552 	 * 4. split the extent into two extents with out zeroout.
3553 	 * 5. no splitting needed, just possibly zeroout the head and / or the
3554 	 *    tail of the extent.
3555 	 */
3556 	split_map.m_lblk = map->m_lblk;
3557 	split_map.m_len = map->m_len;
3558 
3559 	if (max_zeroout && (allocated > split_map.m_len)) {
3560 		if (allocated <= max_zeroout) {
3561 			/* case 3 or 5 */
3562 			zero_ex1.ee_block =
3563 				 cpu_to_le32(split_map.m_lblk +
3564 					     split_map.m_len);
3565 			zero_ex1.ee_len =
3566 				cpu_to_le16(allocated - split_map.m_len);
3567 			ext4_ext_store_pblock(&zero_ex1,
3568 				ext4_ext_pblock(ex) + split_map.m_lblk +
3569 				split_map.m_len - ee_block);
3570 			err = ext4_ext_zeroout(inode, &zero_ex1);
3571 			if (err)
3572 				goto out;
3573 			split_map.m_len = allocated;
3574 		}
3575 		if (split_map.m_lblk - ee_block + split_map.m_len <
3576 								max_zeroout) {
3577 			/* case 2 or 5 */
3578 			if (split_map.m_lblk != ee_block) {
3579 				zero_ex2.ee_block = ex->ee_block;
3580 				zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
3581 							ee_block);
3582 				ext4_ext_store_pblock(&zero_ex2,
3583 						      ext4_ext_pblock(ex));
3584 				err = ext4_ext_zeroout(inode, &zero_ex2);
3585 				if (err)
3586 					goto out;
3587 			}
3588 
3589 			split_map.m_len += split_map.m_lblk - ee_block;
3590 			split_map.m_lblk = ee_block;
3591 			allocated = map->m_len;
3592 		}
3593 	}
3594 
3595 	err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag,
3596 				flags);
3597 	if (err > 0)
3598 		err = 0;
3599 out:
3600 	/* If we have gotten a failure, don't zero out status tree */
3601 	if (!err) {
3602 		err = ext4_zeroout_es(inode, &zero_ex1);
3603 		if (!err)
3604 			err = ext4_zeroout_es(inode, &zero_ex2);
3605 	}
3606 	return err ? err : allocated;
3607 }
3608 
3609 /*
3610  * This function is called by ext4_ext_map_blocks() from
3611  * ext4_get_blocks_dio_write() when DIO to write
3612  * to an unwritten extent.
3613  *
3614  * Writing to an unwritten extent may result in splitting the unwritten
3615  * extent into multiple initialized/unwritten extents (up to three)
3616  * There are three possibilities:
3617  *   a> There is no split required: Entire extent should be unwritten
3618  *   b> Splits in two extents: Write is happening at either end of the extent
3619  *   c> Splits in three extents: Somone is writing in middle of the extent
3620  *
3621  * This works the same way in the case of initialized -> unwritten conversion.
3622  *
3623  * One of more index blocks maybe needed if the extent tree grow after
3624  * the unwritten extent split. To prevent ENOSPC occur at the IO
3625  * complete, we need to split the unwritten extent before DIO submit
3626  * the IO. The unwritten extent called at this time will be split
3627  * into three unwritten extent(at most). After IO complete, the part
3628  * being filled will be convert to initialized by the end_io callback function
3629  * via ext4_convert_unwritten_extents().
3630  *
3631  * Returns the size of unwritten extent to be written on success.
3632  */
3633 static int ext4_split_convert_extents(handle_t *handle,
3634 					struct inode *inode,
3635 					struct ext4_map_blocks *map,
3636 					struct ext4_ext_path **ppath,
3637 					int flags)
3638 {
3639 	struct ext4_ext_path *path = *ppath;
3640 	ext4_lblk_t eof_block;
3641 	ext4_lblk_t ee_block;
3642 	struct ext4_extent *ex;
3643 	unsigned int ee_len;
3644 	int split_flag = 0, depth;
3645 
3646 	ext_debug(inode, "logical block %llu, max_blocks %u\n",
3647 		  (unsigned long long)map->m_lblk, map->m_len);
3648 
3649 	eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
3650 			>> inode->i_sb->s_blocksize_bits;
3651 	if (eof_block < map->m_lblk + map->m_len)
3652 		eof_block = map->m_lblk + map->m_len;
3653 	/*
3654 	 * It is safe to convert extent to initialized via explicit
3655 	 * zeroout only if extent is fully inside i_size or new_size.
3656 	 */
3657 	depth = ext_depth(inode);
3658 	ex = path[depth].p_ext;
3659 	ee_block = le32_to_cpu(ex->ee_block);
3660 	ee_len = ext4_ext_get_actual_len(ex);
3661 
3662 	/* Convert to unwritten */
3663 	if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
3664 		split_flag |= EXT4_EXT_DATA_VALID1;
3665 	/* Convert to initialized */
3666 	} else if (flags & EXT4_GET_BLOCKS_CONVERT) {
3667 		split_flag |= ee_block + ee_len <= eof_block ?
3668 			      EXT4_EXT_MAY_ZEROOUT : 0;
3669 		split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
3670 	}
3671 	flags |= EXT4_GET_BLOCKS_PRE_IO;
3672 	return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
3673 }
3674 
3675 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3676 						struct inode *inode,
3677 						struct ext4_map_blocks *map,
3678 						struct ext4_ext_path **ppath)
3679 {
3680 	struct ext4_ext_path *path = *ppath;
3681 	struct ext4_extent *ex;
3682 	ext4_lblk_t ee_block;
3683 	unsigned int ee_len;
3684 	int depth;
3685 	int err = 0;
3686 
3687 	depth = ext_depth(inode);
3688 	ex = path[depth].p_ext;
3689 	ee_block = le32_to_cpu(ex->ee_block);
3690 	ee_len = ext4_ext_get_actual_len(ex);
3691 
3692 	ext_debug(inode, "logical block %llu, max_blocks %u\n",
3693 		  (unsigned long long)ee_block, ee_len);
3694 
3695 	/* If extent is larger than requested it is a clear sign that we still
3696 	 * have some extent state machine issues left. So extent_split is still
3697 	 * required.
3698 	 * TODO: Once all related issues will be fixed this situation should be
3699 	 * illegal.
3700 	 */
3701 	if (ee_block != map->m_lblk || ee_len > map->m_len) {
3702 #ifdef CONFIG_EXT4_DEBUG
3703 		ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu,"
3704 			     " len %u; IO logical block %llu, len %u",
3705 			     inode->i_ino, (unsigned long long)ee_block, ee_len,
3706 			     (unsigned long long)map->m_lblk, map->m_len);
3707 #endif
3708 		err = ext4_split_convert_extents(handle, inode, map, ppath,
3709 						 EXT4_GET_BLOCKS_CONVERT);
3710 		if (err < 0)
3711 			return err;
3712 		path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3713 		if (IS_ERR(path))
3714 			return PTR_ERR(path);
3715 		depth = ext_depth(inode);
3716 		ex = path[depth].p_ext;
3717 	}
3718 
3719 	err = ext4_ext_get_access(handle, inode, path + depth);
3720 	if (err)
3721 		goto out;
3722 	/* first mark the extent as initialized */
3723 	ext4_ext_mark_initialized(ex);
3724 
3725 	/* note: ext4_ext_correct_indexes() isn't needed here because
3726 	 * borders are not changed
3727 	 */
3728 	ext4_ext_try_to_merge(handle, inode, path, ex);
3729 
3730 	/* Mark modified extent as dirty */
3731 	err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3732 out:
3733 	ext4_ext_show_leaf(inode, path);
3734 	return err;
3735 }
3736 
3737 static int
3738 convert_initialized_extent(handle_t *handle, struct inode *inode,
3739 			   struct ext4_map_blocks *map,
3740 			   struct ext4_ext_path **ppath,
3741 			   unsigned int *allocated)
3742 {
3743 	struct ext4_ext_path *path = *ppath;
3744 	struct ext4_extent *ex;
3745 	ext4_lblk_t ee_block;
3746 	unsigned int ee_len;
3747 	int depth;
3748 	int err = 0;
3749 
3750 	/*
3751 	 * Make sure that the extent is no bigger than we support with
3752 	 * unwritten extent
3753 	 */
3754 	if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
3755 		map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
3756 
3757 	depth = ext_depth(inode);
3758 	ex = path[depth].p_ext;
3759 	ee_block = le32_to_cpu(ex->ee_block);
3760 	ee_len = ext4_ext_get_actual_len(ex);
3761 
3762 	ext_debug(inode, "logical block %llu, max_blocks %u\n",
3763 		  (unsigned long long)ee_block, ee_len);
3764 
3765 	if (ee_block != map->m_lblk || ee_len > map->m_len) {
3766 		err = ext4_split_convert_extents(handle, inode, map, ppath,
3767 				EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
3768 		if (err < 0)
3769 			return err;
3770 		path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3771 		if (IS_ERR(path))
3772 			return PTR_ERR(path);
3773 		depth = ext_depth(inode);
3774 		ex = path[depth].p_ext;
3775 		if (!ex) {
3776 			EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3777 					 (unsigned long) map->m_lblk);
3778 			return -EFSCORRUPTED;
3779 		}
3780 	}
3781 
3782 	err = ext4_ext_get_access(handle, inode, path + depth);
3783 	if (err)
3784 		return err;
3785 	/* first mark the extent as unwritten */
3786 	ext4_ext_mark_unwritten(ex);
3787 
3788 	/* note: ext4_ext_correct_indexes() isn't needed here because
3789 	 * borders are not changed
3790 	 */
3791 	ext4_ext_try_to_merge(handle, inode, path, ex);
3792 
3793 	/* Mark modified extent as dirty */
3794 	err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3795 	if (err)
3796 		return err;
3797 	ext4_ext_show_leaf(inode, path);
3798 
3799 	ext4_update_inode_fsync_trans(handle, inode, 1);
3800 
3801 	map->m_flags |= EXT4_MAP_UNWRITTEN;
3802 	if (*allocated > map->m_len)
3803 		*allocated = map->m_len;
3804 	map->m_len = *allocated;
3805 	return 0;
3806 }
3807 
3808 static int
3809 ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
3810 			struct ext4_map_blocks *map,
3811 			struct ext4_ext_path **ppath, int flags,
3812 			unsigned int allocated, ext4_fsblk_t newblock)
3813 {
3814 	struct ext4_ext_path __maybe_unused *path = *ppath;
3815 	int ret = 0;
3816 	int err = 0;
3817 
3818 	ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n",
3819 		  (unsigned long long)map->m_lblk, map->m_len, flags,
3820 		  allocated);
3821 	ext4_ext_show_leaf(inode, path);
3822 
3823 	/*
3824 	 * When writing into unwritten space, we should not fail to
3825 	 * allocate metadata blocks for the new extent block if needed.
3826 	 */
3827 	flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
3828 
3829 	trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
3830 						    allocated, newblock);
3831 
3832 	/* get_block() before submitting IO, split the extent */
3833 	if (flags & EXT4_GET_BLOCKS_PRE_IO) {
3834 		ret = ext4_split_convert_extents(handle, inode, map, ppath,
3835 					 flags | EXT4_GET_BLOCKS_CONVERT);
3836 		if (ret < 0) {
3837 			err = ret;
3838 			goto out2;
3839 		}
3840 		/*
3841 		 * shouldn't get a 0 return when splitting an extent unless
3842 		 * m_len is 0 (bug) or extent has been corrupted
3843 		 */
3844 		if (unlikely(ret == 0)) {
3845 			EXT4_ERROR_INODE(inode,
3846 					 "unexpected ret == 0, m_len = %u",
3847 					 map->m_len);
3848 			err = -EFSCORRUPTED;
3849 			goto out2;
3850 		}
3851 		map->m_flags |= EXT4_MAP_UNWRITTEN;
3852 		goto out;
3853 	}
3854 	/* IO end_io complete, convert the filled extent to written */
3855 	if (flags & EXT4_GET_BLOCKS_CONVERT) {
3856 		err = ext4_convert_unwritten_extents_endio(handle, inode, map,
3857 							   ppath);
3858 		if (err < 0)
3859 			goto out2;
3860 		ext4_update_inode_fsync_trans(handle, inode, 1);
3861 		goto map_out;
3862 	}
3863 	/* buffered IO cases */
3864 	/*
3865 	 * repeat fallocate creation request
3866 	 * we already have an unwritten extent
3867 	 */
3868 	if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
3869 		map->m_flags |= EXT4_MAP_UNWRITTEN;
3870 		goto map_out;
3871 	}
3872 
3873 	/* buffered READ or buffered write_begin() lookup */
3874 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3875 		/*
3876 		 * We have blocks reserved already.  We
3877 		 * return allocated blocks so that delalloc
3878 		 * won't do block reservation for us.  But
3879 		 * the buffer head will be unmapped so that
3880 		 * a read from the block returns 0s.
3881 		 */
3882 		map->m_flags |= EXT4_MAP_UNWRITTEN;
3883 		goto out1;
3884 	}
3885 
3886 	/*
3887 	 * Default case when (flags & EXT4_GET_BLOCKS_CREATE) == 1.
3888 	 * For buffered writes, at writepage time, etc.  Convert a
3889 	 * discovered unwritten extent to written.
3890 	 */
3891 	ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
3892 	if (ret < 0) {
3893 		err = ret;
3894 		goto out2;
3895 	}
3896 	ext4_update_inode_fsync_trans(handle, inode, 1);
3897 	/*
3898 	 * shouldn't get a 0 return when converting an unwritten extent
3899 	 * unless m_len is 0 (bug) or extent has been corrupted
3900 	 */
3901 	if (unlikely(ret == 0)) {
3902 		EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u",
3903 				 map->m_len);
3904 		err = -EFSCORRUPTED;
3905 		goto out2;
3906 	}
3907 
3908 out:
3909 	allocated = ret;
3910 	map->m_flags |= EXT4_MAP_NEW;
3911 map_out:
3912 	map->m_flags |= EXT4_MAP_MAPPED;
3913 out1:
3914 	map->m_pblk = newblock;
3915 	if (allocated > map->m_len)
3916 		allocated = map->m_len;
3917 	map->m_len = allocated;
3918 	ext4_ext_show_leaf(inode, path);
3919 out2:
3920 	return err ? err : allocated;
3921 }
3922 
3923 /*
3924  * get_implied_cluster_alloc - check to see if the requested
3925  * allocation (in the map structure) overlaps with a cluster already
3926  * allocated in an extent.
3927  *	@sb	The filesystem superblock structure
3928  *	@map	The requested lblk->pblk mapping
3929  *	@ex	The extent structure which might contain an implied
3930  *			cluster allocation
3931  *
3932  * This function is called by ext4_ext_map_blocks() after we failed to
3933  * find blocks that were already in the inode's extent tree.  Hence,
3934  * we know that the beginning of the requested region cannot overlap
3935  * the extent from the inode's extent tree.  There are three cases we
3936  * want to catch.  The first is this case:
3937  *
3938  *		 |--- cluster # N--|
3939  *    |--- extent ---|	|---- requested region ---|
3940  *			|==========|
3941  *
3942  * The second case that we need to test for is this one:
3943  *
3944  *   |--------- cluster # N ----------------|
3945  *	   |--- requested region --|   |------- extent ----|
3946  *	   |=======================|
3947  *
3948  * The third case is when the requested region lies between two extents
3949  * within the same cluster:
3950  *          |------------- cluster # N-------------|
3951  * |----- ex -----|                  |---- ex_right ----|
3952  *                  |------ requested region ------|
3953  *                  |================|
3954  *
3955  * In each of the above cases, we need to set the map->m_pblk and
3956  * map->m_len so it corresponds to the return the extent labelled as
3957  * "|====|" from cluster #N, since it is already in use for data in
3958  * cluster EXT4_B2C(sbi, map->m_lblk).	We will then return 1 to
3959  * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
3960  * as a new "allocated" block region.  Otherwise, we will return 0 and
3961  * ext4_ext_map_blocks() will then allocate one or more new clusters
3962  * by calling ext4_mb_new_blocks().
3963  */
3964 static int get_implied_cluster_alloc(struct super_block *sb,
3965 				     struct ext4_map_blocks *map,
3966 				     struct ext4_extent *ex,
3967 				     struct ext4_ext_path *path)
3968 {
3969 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3970 	ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
3971 	ext4_lblk_t ex_cluster_start, ex_cluster_end;
3972 	ext4_lblk_t rr_cluster_start;
3973 	ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3974 	ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3975 	unsigned short ee_len = ext4_ext_get_actual_len(ex);
3976 
3977 	/* The extent passed in that we are trying to match */
3978 	ex_cluster_start = EXT4_B2C(sbi, ee_block);
3979 	ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
3980 
3981 	/* The requested region passed into ext4_map_blocks() */
3982 	rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
3983 
3984 	if ((rr_cluster_start == ex_cluster_end) ||
3985 	    (rr_cluster_start == ex_cluster_start)) {
3986 		if (rr_cluster_start == ex_cluster_end)
3987 			ee_start += ee_len - 1;
3988 		map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
3989 		map->m_len = min(map->m_len,
3990 				 (unsigned) sbi->s_cluster_ratio - c_offset);
3991 		/*
3992 		 * Check for and handle this case:
3993 		 *
3994 		 *   |--------- cluster # N-------------|
3995 		 *		       |------- extent ----|
3996 		 *	   |--- requested region ---|
3997 		 *	   |===========|
3998 		 */
3999 
4000 		if (map->m_lblk < ee_block)
4001 			map->m_len = min(map->m_len, ee_block - map->m_lblk);
4002 
4003 		/*
4004 		 * Check for the case where there is already another allocated
4005 		 * block to the right of 'ex' but before the end of the cluster.
4006 		 *
4007 		 *          |------------- cluster # N-------------|
4008 		 * |----- ex -----|                  |---- ex_right ----|
4009 		 *                  |------ requested region ------|
4010 		 *                  |================|
4011 		 */
4012 		if (map->m_lblk > ee_block) {
4013 			ext4_lblk_t next = ext4_ext_next_allocated_block(path);
4014 			map->m_len = min(map->m_len, next - map->m_lblk);
4015 		}
4016 
4017 		trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
4018 		return 1;
4019 	}
4020 
4021 	trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
4022 	return 0;
4023 }
4024 
4025 
4026 /*
4027  * Block allocation/map/preallocation routine for extents based files
4028  *
4029  *
4030  * Need to be called with
4031  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
4032  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
4033  *
4034  * return > 0, number of blocks already mapped/allocated
4035  *          if create == 0 and these are pre-allocated blocks
4036  *          	buffer head is unmapped
4037  *          otherwise blocks are mapped
4038  *
4039  * return = 0, if plain look up failed (blocks have not been allocated)
4040  *          buffer head is unmapped
4041  *
4042  * return < 0, error case.
4043  */
4044 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4045 			struct ext4_map_blocks *map, int flags)
4046 {
4047 	struct ext4_ext_path *path = NULL;
4048 	struct ext4_extent newex, *ex, ex2;
4049 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4050 	ext4_fsblk_t newblock = 0, pblk;
4051 	int err = 0, depth, ret;
4052 	unsigned int allocated = 0, offset = 0;
4053 	unsigned int allocated_clusters = 0;
4054 	struct ext4_allocation_request ar;
4055 	ext4_lblk_t cluster_offset;
4056 
4057 	ext_debug(inode, "blocks %u/%u requested\n", map->m_lblk, map->m_len);
4058 	trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
4059 
4060 	/* find extent for this block */
4061 	path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
4062 	if (IS_ERR(path)) {
4063 		err = PTR_ERR(path);
4064 		path = NULL;
4065 		goto out;
4066 	}
4067 
4068 	depth = ext_depth(inode);
4069 
4070 	/*
4071 	 * consistent leaf must not be empty;
4072 	 * this situation is possible, though, _during_ tree modification;
4073 	 * this is why assert can't be put in ext4_find_extent()
4074 	 */
4075 	if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
4076 		EXT4_ERROR_INODE(inode, "bad extent address "
4077 				 "lblock: %lu, depth: %d pblock %lld",
4078 				 (unsigned long) map->m_lblk, depth,
4079 				 path[depth].p_block);
4080 		err = -EFSCORRUPTED;
4081 		goto out;
4082 	}
4083 
4084 	ex = path[depth].p_ext;
4085 	if (ex) {
4086 		ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4087 		ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4088 		unsigned short ee_len;
4089 
4090 
4091 		/*
4092 		 * unwritten extents are treated as holes, except that
4093 		 * we split out initialized portions during a write.
4094 		 */
4095 		ee_len = ext4_ext_get_actual_len(ex);
4096 
4097 		trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
4098 
4099 		/* if found extent covers block, simply return it */
4100 		if (in_range(map->m_lblk, ee_block, ee_len)) {
4101 			newblock = map->m_lblk - ee_block + ee_start;
4102 			/* number of remaining blocks in the extent */
4103 			allocated = ee_len - (map->m_lblk - ee_block);
4104 			ext_debug(inode, "%u fit into %u:%d -> %llu\n",
4105 				  map->m_lblk, ee_block, ee_len, newblock);
4106 
4107 			/*
4108 			 * If the extent is initialized check whether the
4109 			 * caller wants to convert it to unwritten.
4110 			 */
4111 			if ((!ext4_ext_is_unwritten(ex)) &&
4112 			    (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
4113 				err = convert_initialized_extent(handle,
4114 					inode, map, &path, &allocated);
4115 				goto out;
4116 			} else if (!ext4_ext_is_unwritten(ex)) {
4117 				map->m_flags |= EXT4_MAP_MAPPED;
4118 				map->m_pblk = newblock;
4119 				if (allocated > map->m_len)
4120 					allocated = map->m_len;
4121 				map->m_len = allocated;
4122 				ext4_ext_show_leaf(inode, path);
4123 				goto out;
4124 			}
4125 
4126 			ret = ext4_ext_handle_unwritten_extents(
4127 				handle, inode, map, &path, flags,
4128 				allocated, newblock);
4129 			if (ret < 0)
4130 				err = ret;
4131 			else
4132 				allocated = ret;
4133 			goto out;
4134 		}
4135 	}
4136 
4137 	/*
4138 	 * requested block isn't allocated yet;
4139 	 * we couldn't try to create block if create flag is zero
4140 	 */
4141 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4142 		ext4_lblk_t hole_start, hole_len;
4143 
4144 		hole_start = map->m_lblk;
4145 		hole_len = ext4_ext_determine_hole(inode, path, &hole_start);
4146 		/*
4147 		 * put just found gap into cache to speed up
4148 		 * subsequent requests
4149 		 */
4150 		ext4_ext_put_gap_in_cache(inode, hole_start, hole_len);
4151 
4152 		/* Update hole_len to reflect hole size after map->m_lblk */
4153 		if (hole_start != map->m_lblk)
4154 			hole_len -= map->m_lblk - hole_start;
4155 		map->m_pblk = 0;
4156 		map->m_len = min_t(unsigned int, map->m_len, hole_len);
4157 
4158 		goto out;
4159 	}
4160 
4161 	/*
4162 	 * Okay, we need to do block allocation.
4163 	 */
4164 	newex.ee_block = cpu_to_le32(map->m_lblk);
4165 	cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4166 
4167 	/*
4168 	 * If we are doing bigalloc, check to see if the extent returned
4169 	 * by ext4_find_extent() implies a cluster we can use.
4170 	 */
4171 	if (cluster_offset && ex &&
4172 	    get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4173 		ar.len = allocated = map->m_len;
4174 		newblock = map->m_pblk;
4175 		goto got_allocated_blocks;
4176 	}
4177 
4178 	/* find neighbour allocated blocks */
4179 	ar.lleft = map->m_lblk;
4180 	err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4181 	if (err)
4182 		goto out;
4183 	ar.lright = map->m_lblk;
4184 	err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4185 	if (err < 0)
4186 		goto out;
4187 
4188 	/* Check if the extent after searching to the right implies a
4189 	 * cluster we can use. */
4190 	if ((sbi->s_cluster_ratio > 1) && err &&
4191 	    get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) {
4192 		ar.len = allocated = map->m_len;
4193 		newblock = map->m_pblk;
4194 		goto got_allocated_blocks;
4195 	}
4196 
4197 	/*
4198 	 * See if request is beyond maximum number of blocks we can have in
4199 	 * a single extent. For an initialized extent this limit is
4200 	 * EXT_INIT_MAX_LEN and for an unwritten extent this limit is
4201 	 * EXT_UNWRITTEN_MAX_LEN.
4202 	 */
4203 	if (map->m_len > EXT_INIT_MAX_LEN &&
4204 	    !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4205 		map->m_len = EXT_INIT_MAX_LEN;
4206 	else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
4207 		 (flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4208 		map->m_len = EXT_UNWRITTEN_MAX_LEN;
4209 
4210 	/* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4211 	newex.ee_len = cpu_to_le16(map->m_len);
4212 	err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4213 	if (err)
4214 		allocated = ext4_ext_get_actual_len(&newex);
4215 	else
4216 		allocated = map->m_len;
4217 
4218 	/* allocate new block */
4219 	ar.inode = inode;
4220 	ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4221 	ar.logical = map->m_lblk;
4222 	/*
4223 	 * We calculate the offset from the beginning of the cluster
4224 	 * for the logical block number, since when we allocate a
4225 	 * physical cluster, the physical block should start at the
4226 	 * same offset from the beginning of the cluster.  This is
4227 	 * needed so that future calls to get_implied_cluster_alloc()
4228 	 * work correctly.
4229 	 */
4230 	offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4231 	ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4232 	ar.goal -= offset;
4233 	ar.logical -= offset;
4234 	if (S_ISREG(inode->i_mode))
4235 		ar.flags = EXT4_MB_HINT_DATA;
4236 	else
4237 		/* disable in-core preallocation for non-regular files */
4238 		ar.flags = 0;
4239 	if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4240 		ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4241 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4242 		ar.flags |= EXT4_MB_DELALLOC_RESERVED;
4243 	if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
4244 		ar.flags |= EXT4_MB_USE_RESERVED;
4245 	newblock = ext4_mb_new_blocks(handle, &ar, &err);
4246 	if (!newblock)
4247 		goto out;
4248 	allocated_clusters = ar.len;
4249 	ar.len = EXT4_C2B(sbi, ar.len) - offset;
4250 	ext_debug(inode, "allocate new block: goal %llu, found %llu/%u, requested %u\n",
4251 		  ar.goal, newblock, ar.len, allocated);
4252 	if (ar.len > allocated)
4253 		ar.len = allocated;
4254 
4255 got_allocated_blocks:
4256 	/* try to insert new extent into found leaf and return */
4257 	pblk = newblock + offset;
4258 	ext4_ext_store_pblock(&newex, pblk);
4259 	newex.ee_len = cpu_to_le16(ar.len);
4260 	/* Mark unwritten */
4261 	if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
4262 		ext4_ext_mark_unwritten(&newex);
4263 		map->m_flags |= EXT4_MAP_UNWRITTEN;
4264 	}
4265 
4266 	err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags);
4267 	if (err) {
4268 		if (allocated_clusters) {
4269 			int fb_flags = 0;
4270 
4271 			/*
4272 			 * free data blocks we just allocated.
4273 			 * not a good idea to call discard here directly,
4274 			 * but otherwise we'd need to call it every free().
4275 			 */
4276 			ext4_discard_preallocations(inode, 0);
4277 			if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4278 				fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE;
4279 			ext4_free_blocks(handle, inode, NULL, newblock,
4280 					 EXT4_C2B(sbi, allocated_clusters),
4281 					 fb_flags);
4282 		}
4283 		goto out;
4284 	}
4285 
4286 	/*
4287 	 * Reduce the reserved cluster count to reflect successful deferred
4288 	 * allocation of delayed allocated clusters or direct allocation of
4289 	 * clusters discovered to be delayed allocated.  Once allocated, a
4290 	 * cluster is not included in the reserved count.
4291 	 */
4292 	if (test_opt(inode->i_sb, DELALLOC) && allocated_clusters) {
4293 		if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4294 			/*
4295 			 * When allocating delayed allocated clusters, simply
4296 			 * reduce the reserved cluster count and claim quota
4297 			 */
4298 			ext4_da_update_reserve_space(inode, allocated_clusters,
4299 							1);
4300 		} else {
4301 			ext4_lblk_t lblk, len;
4302 			unsigned int n;
4303 
4304 			/*
4305 			 * When allocating non-delayed allocated clusters
4306 			 * (from fallocate, filemap, DIO, or clusters
4307 			 * allocated when delalloc has been disabled by
4308 			 * ext4_nonda_switch), reduce the reserved cluster
4309 			 * count by the number of allocated clusters that
4310 			 * have previously been delayed allocated.  Quota
4311 			 * has been claimed by ext4_mb_new_blocks() above,
4312 			 * so release the quota reservations made for any
4313 			 * previously delayed allocated clusters.
4314 			 */
4315 			lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk);
4316 			len = allocated_clusters << sbi->s_cluster_bits;
4317 			n = ext4_es_delayed_clu(inode, lblk, len);
4318 			if (n > 0)
4319 				ext4_da_update_reserve_space(inode, (int) n, 0);
4320 		}
4321 	}
4322 
4323 	/*
4324 	 * Cache the extent and update transaction to commit on fdatasync only
4325 	 * when it is _not_ an unwritten extent.
4326 	 */
4327 	if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
4328 		ext4_update_inode_fsync_trans(handle, inode, 1);
4329 	else
4330 		ext4_update_inode_fsync_trans(handle, inode, 0);
4331 
4332 	map->m_flags |= (EXT4_MAP_NEW | EXT4_MAP_MAPPED);
4333 	map->m_pblk = pblk;
4334 	map->m_len = ar.len;
4335 	allocated = map->m_len;
4336 	ext4_ext_show_leaf(inode, path);
4337 out:
4338 	ext4_ext_drop_refs(path);
4339 	kfree(path);
4340 
4341 	trace_ext4_ext_map_blocks_exit(inode, flags, map,
4342 				       err ? err : allocated);
4343 	return err ? err : allocated;
4344 }
4345 
4346 int ext4_ext_truncate(handle_t *handle, struct inode *inode)
4347 {
4348 	struct super_block *sb = inode->i_sb;
4349 	ext4_lblk_t last_block;
4350 	int err = 0;
4351 
4352 	/*
4353 	 * TODO: optimization is possible here.
4354 	 * Probably we need not scan at all,
4355 	 * because page truncation is enough.
4356 	 */
4357 
4358 	/* we have to know where to truncate from in crash case */
4359 	EXT4_I(inode)->i_disksize = inode->i_size;
4360 	err = ext4_mark_inode_dirty(handle, inode);
4361 	if (err)
4362 		return err;
4363 
4364 	last_block = (inode->i_size + sb->s_blocksize - 1)
4365 			>> EXT4_BLOCK_SIZE_BITS(sb);
4366 retry:
4367 	err = ext4_es_remove_extent(inode, last_block,
4368 				    EXT_MAX_BLOCKS - last_block);
4369 	if (err == -ENOMEM) {
4370 		cond_resched();
4371 		congestion_wait(BLK_RW_ASYNC, HZ/50);
4372 		goto retry;
4373 	}
4374 	if (err)
4375 		return err;
4376 retry_remove_space:
4377 	err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4378 	if (err == -ENOMEM) {
4379 		cond_resched();
4380 		congestion_wait(BLK_RW_ASYNC, HZ/50);
4381 		goto retry_remove_space;
4382 	}
4383 	return err;
4384 }
4385 
4386 static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
4387 				  ext4_lblk_t len, loff_t new_size,
4388 				  int flags)
4389 {
4390 	struct inode *inode = file_inode(file);
4391 	handle_t *handle;
4392 	int ret = 0, ret2 = 0, ret3 = 0;
4393 	int retries = 0;
4394 	int depth = 0;
4395 	struct ext4_map_blocks map;
4396 	unsigned int credits;
4397 	loff_t epos;
4398 
4399 	BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS));
4400 	map.m_lblk = offset;
4401 	map.m_len = len;
4402 	/*
4403 	 * Don't normalize the request if it can fit in one extent so
4404 	 * that it doesn't get unnecessarily split into multiple
4405 	 * extents.
4406 	 */
4407 	if (len <= EXT_UNWRITTEN_MAX_LEN)
4408 		flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4409 
4410 	/*
4411 	 * credits to insert 1 extent into extent tree
4412 	 */
4413 	credits = ext4_chunk_trans_blocks(inode, len);
4414 	depth = ext_depth(inode);
4415 
4416 retry:
4417 	while (len) {
4418 		/*
4419 		 * Recalculate credits when extent tree depth changes.
4420 		 */
4421 		if (depth != ext_depth(inode)) {
4422 			credits = ext4_chunk_trans_blocks(inode, len);
4423 			depth = ext_depth(inode);
4424 		}
4425 
4426 		handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4427 					    credits);
4428 		if (IS_ERR(handle)) {
4429 			ret = PTR_ERR(handle);
4430 			break;
4431 		}
4432 		ret = ext4_map_blocks(handle, inode, &map, flags);
4433 		if (ret <= 0) {
4434 			ext4_debug("inode #%lu: block %u: len %u: "
4435 				   "ext4_ext_map_blocks returned %d",
4436 				   inode->i_ino, map.m_lblk,
4437 				   map.m_len, ret);
4438 			ext4_mark_inode_dirty(handle, inode);
4439 			ext4_journal_stop(handle);
4440 			break;
4441 		}
4442 		/*
4443 		 * allow a full retry cycle for any remaining allocations
4444 		 */
4445 		retries = 0;
4446 		map.m_lblk += ret;
4447 		map.m_len = len = len - ret;
4448 		epos = (loff_t)map.m_lblk << inode->i_blkbits;
4449 		inode->i_ctime = current_time(inode);
4450 		if (new_size) {
4451 			if (epos > new_size)
4452 				epos = new_size;
4453 			if (ext4_update_inode_size(inode, epos) & 0x1)
4454 				inode->i_mtime = inode->i_ctime;
4455 		}
4456 		ret2 = ext4_mark_inode_dirty(handle, inode);
4457 		ext4_update_inode_fsync_trans(handle, inode, 1);
4458 		ret3 = ext4_journal_stop(handle);
4459 		ret2 = ret3 ? ret3 : ret2;
4460 		if (unlikely(ret2))
4461 			break;
4462 	}
4463 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
4464 		goto retry;
4465 
4466 	return ret > 0 ? ret2 : ret;
4467 }
4468 
4469 static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
4470 
4471 static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len);
4472 
4473 static long ext4_zero_range(struct file *file, loff_t offset,
4474 			    loff_t len, int mode)
4475 {
4476 	struct inode *inode = file_inode(file);
4477 	handle_t *handle = NULL;
4478 	unsigned int max_blocks;
4479 	loff_t new_size = 0;
4480 	int ret = 0;
4481 	int flags;
4482 	int credits;
4483 	int partial_begin, partial_end;
4484 	loff_t start, end;
4485 	ext4_lblk_t lblk;
4486 	unsigned int blkbits = inode->i_blkbits;
4487 
4488 	trace_ext4_zero_range(inode, offset, len, mode);
4489 
4490 	/* Call ext4_force_commit to flush all data in case of data=journal. */
4491 	if (ext4_should_journal_data(inode)) {
4492 		ret = ext4_force_commit(inode->i_sb);
4493 		if (ret)
4494 			return ret;
4495 	}
4496 
4497 	/*
4498 	 * Round up offset. This is not fallocate, we need to zero out
4499 	 * blocks, so convert interior block aligned part of the range to
4500 	 * unwritten and possibly manually zero out unaligned parts of the
4501 	 * range.
4502 	 */
4503 	start = round_up(offset, 1 << blkbits);
4504 	end = round_down((offset + len), 1 << blkbits);
4505 
4506 	if (start < offset || end > offset + len)
4507 		return -EINVAL;
4508 	partial_begin = offset & ((1 << blkbits) - 1);
4509 	partial_end = (offset + len) & ((1 << blkbits) - 1);
4510 
4511 	lblk = start >> blkbits;
4512 	max_blocks = (end >> blkbits);
4513 	if (max_blocks < lblk)
4514 		max_blocks = 0;
4515 	else
4516 		max_blocks -= lblk;
4517 
4518 	inode_lock(inode);
4519 
4520 	/*
4521 	 * Indirect files do not support unwritten extents
4522 	 */
4523 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4524 		ret = -EOPNOTSUPP;
4525 		goto out_mutex;
4526 	}
4527 
4528 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4529 	    (offset + len > inode->i_size ||
4530 	     offset + len > EXT4_I(inode)->i_disksize)) {
4531 		new_size = offset + len;
4532 		ret = inode_newsize_ok(inode, new_size);
4533 		if (ret)
4534 			goto out_mutex;
4535 	}
4536 
4537 	flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
4538 
4539 	/* Wait all existing dio workers, newcomers will block on i_mutex */
4540 	inode_dio_wait(inode);
4541 
4542 	/* Preallocate the range including the unaligned edges */
4543 	if (partial_begin || partial_end) {
4544 		ret = ext4_alloc_file_blocks(file,
4545 				round_down(offset, 1 << blkbits) >> blkbits,
4546 				(round_up((offset + len), 1 << blkbits) -
4547 				 round_down(offset, 1 << blkbits)) >> blkbits,
4548 				new_size, flags);
4549 		if (ret)
4550 			goto out_mutex;
4551 
4552 	}
4553 
4554 	/* Zero range excluding the unaligned edges */
4555 	if (max_blocks > 0) {
4556 		flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
4557 			  EXT4_EX_NOCACHE);
4558 
4559 		/*
4560 		 * Prevent page faults from reinstantiating pages we have
4561 		 * released from page cache.
4562 		 */
4563 		down_write(&EXT4_I(inode)->i_mmap_sem);
4564 
4565 		ret = ext4_break_layouts(inode);
4566 		if (ret) {
4567 			up_write(&EXT4_I(inode)->i_mmap_sem);
4568 			goto out_mutex;
4569 		}
4570 
4571 		ret = ext4_update_disksize_before_punch(inode, offset, len);
4572 		if (ret) {
4573 			up_write(&EXT4_I(inode)->i_mmap_sem);
4574 			goto out_mutex;
4575 		}
4576 		/* Now release the pages and zero block aligned part of pages */
4577 		truncate_pagecache_range(inode, start, end - 1);
4578 		inode->i_mtime = inode->i_ctime = current_time(inode);
4579 
4580 		ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
4581 					     flags);
4582 		up_write(&EXT4_I(inode)->i_mmap_sem);
4583 		if (ret)
4584 			goto out_mutex;
4585 	}
4586 	if (!partial_begin && !partial_end)
4587 		goto out_mutex;
4588 
4589 	/*
4590 	 * In worst case we have to writeout two nonadjacent unwritten
4591 	 * blocks and update the inode
4592 	 */
4593 	credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
4594 	if (ext4_should_journal_data(inode))
4595 		credits += 2;
4596 	handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
4597 	if (IS_ERR(handle)) {
4598 		ret = PTR_ERR(handle);
4599 		ext4_std_error(inode->i_sb, ret);
4600 		goto out_mutex;
4601 	}
4602 
4603 	inode->i_mtime = inode->i_ctime = current_time(inode);
4604 	if (new_size)
4605 		ext4_update_inode_size(inode, new_size);
4606 	ret = ext4_mark_inode_dirty(handle, inode);
4607 	if (unlikely(ret))
4608 		goto out_handle;
4609 	ext4_fc_track_range(handle, inode, offset >> inode->i_sb->s_blocksize_bits,
4610 			(offset + len - 1) >> inode->i_sb->s_blocksize_bits);
4611 	/* Zero out partial block at the edges of the range */
4612 	ret = ext4_zero_partial_blocks(handle, inode, offset, len);
4613 	if (ret >= 0)
4614 		ext4_update_inode_fsync_trans(handle, inode, 1);
4615 
4616 	if (file->f_flags & O_SYNC)
4617 		ext4_handle_sync(handle);
4618 
4619 out_handle:
4620 	ext4_journal_stop(handle);
4621 out_mutex:
4622 	inode_unlock(inode);
4623 	return ret;
4624 }
4625 
4626 /*
4627  * preallocate space for a file. This implements ext4's fallocate file
4628  * operation, which gets called from sys_fallocate system call.
4629  * For block-mapped files, posix_fallocate should fall back to the method
4630  * of writing zeroes to the required new blocks (the same behavior which is
4631  * expected for file systems which do not support fallocate() system call).
4632  */
4633 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4634 {
4635 	struct inode *inode = file_inode(file);
4636 	loff_t new_size = 0;
4637 	unsigned int max_blocks;
4638 	int ret = 0;
4639 	int flags;
4640 	ext4_lblk_t lblk;
4641 	unsigned int blkbits = inode->i_blkbits;
4642 
4643 	/*
4644 	 * Encrypted inodes can't handle collapse range or insert
4645 	 * range since we would need to re-encrypt blocks with a
4646 	 * different IV or XTS tweak (which are based on the logical
4647 	 * block number).
4648 	 */
4649 	if (IS_ENCRYPTED(inode) &&
4650 	    (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
4651 		return -EOPNOTSUPP;
4652 
4653 	/* Return error if mode is not supported */
4654 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
4655 		     FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
4656 		     FALLOC_FL_INSERT_RANGE))
4657 		return -EOPNOTSUPP;
4658 
4659 	ext4_fc_start_update(inode);
4660 
4661 	if (mode & FALLOC_FL_PUNCH_HOLE) {
4662 		ret = ext4_punch_hole(inode, offset, len);
4663 		goto exit;
4664 	}
4665 
4666 	ret = ext4_convert_inline_data(inode);
4667 	if (ret)
4668 		goto exit;
4669 
4670 	if (mode & FALLOC_FL_COLLAPSE_RANGE) {
4671 		ret = ext4_collapse_range(inode, offset, len);
4672 		goto exit;
4673 	}
4674 
4675 	if (mode & FALLOC_FL_INSERT_RANGE) {
4676 		ret = ext4_insert_range(inode, offset, len);
4677 		goto exit;
4678 	}
4679 
4680 	if (mode & FALLOC_FL_ZERO_RANGE) {
4681 		ret = ext4_zero_range(file, offset, len, mode);
4682 		goto exit;
4683 	}
4684 	trace_ext4_fallocate_enter(inode, offset, len, mode);
4685 	lblk = offset >> blkbits;
4686 
4687 	max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
4688 	flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
4689 
4690 	inode_lock(inode);
4691 
4692 	/*
4693 	 * We only support preallocation for extent-based files only
4694 	 */
4695 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4696 		ret = -EOPNOTSUPP;
4697 		goto out;
4698 	}
4699 
4700 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4701 	    (offset + len > inode->i_size ||
4702 	     offset + len > EXT4_I(inode)->i_disksize)) {
4703 		new_size = offset + len;
4704 		ret = inode_newsize_ok(inode, new_size);
4705 		if (ret)
4706 			goto out;
4707 	}
4708 
4709 	/* Wait all existing dio workers, newcomers will block on i_mutex */
4710 	inode_dio_wait(inode);
4711 
4712 	ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
4713 	if (ret)
4714 		goto out;
4715 
4716 	if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
4717 		ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
4718 					EXT4_I(inode)->i_sync_tid);
4719 	}
4720 out:
4721 	inode_unlock(inode);
4722 	trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4723 exit:
4724 	ext4_fc_stop_update(inode);
4725 	return ret;
4726 }
4727 
4728 /*
4729  * This function convert a range of blocks to written extents
4730  * The caller of this function will pass the start offset and the size.
4731  * all unwritten extents within this range will be converted to
4732  * written extents.
4733  *
4734  * This function is called from the direct IO end io call back
4735  * function, to convert the fallocated extents after IO is completed.
4736  * Returns 0 on success.
4737  */
4738 int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
4739 				   loff_t offset, ssize_t len)
4740 {
4741 	unsigned int max_blocks;
4742 	int ret = 0, ret2 = 0, ret3 = 0;
4743 	struct ext4_map_blocks map;
4744 	unsigned int blkbits = inode->i_blkbits;
4745 	unsigned int credits = 0;
4746 
4747 	map.m_lblk = offset >> blkbits;
4748 	max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
4749 
4750 	if (!handle) {
4751 		/*
4752 		 * credits to insert 1 extent into extent tree
4753 		 */
4754 		credits = ext4_chunk_trans_blocks(inode, max_blocks);
4755 	}
4756 	while (ret >= 0 && ret < max_blocks) {
4757 		map.m_lblk += ret;
4758 		map.m_len = (max_blocks -= ret);
4759 		if (credits) {
4760 			handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4761 						    credits);
4762 			if (IS_ERR(handle)) {
4763 				ret = PTR_ERR(handle);
4764 				break;
4765 			}
4766 		}
4767 		ret = ext4_map_blocks(handle, inode, &map,
4768 				      EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4769 		if (ret <= 0)
4770 			ext4_warning(inode->i_sb,
4771 				     "inode #%lu: block %u: len %u: "
4772 				     "ext4_ext_map_blocks returned %d",
4773 				     inode->i_ino, map.m_lblk,
4774 				     map.m_len, ret);
4775 		ret2 = ext4_mark_inode_dirty(handle, inode);
4776 		if (credits) {
4777 			ret3 = ext4_journal_stop(handle);
4778 			if (unlikely(ret3))
4779 				ret2 = ret3;
4780 		}
4781 
4782 		if (ret <= 0 || ret2)
4783 			break;
4784 	}
4785 	return ret > 0 ? ret2 : ret;
4786 }
4787 
4788 int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end)
4789 {
4790 	int ret = 0, err = 0;
4791 	struct ext4_io_end_vec *io_end_vec;
4792 
4793 	/*
4794 	 * This is somewhat ugly but the idea is clear: When transaction is
4795 	 * reserved, everything goes into it. Otherwise we rather start several
4796 	 * smaller transactions for conversion of each extent separately.
4797 	 */
4798 	if (handle) {
4799 		handle = ext4_journal_start_reserved(handle,
4800 						     EXT4_HT_EXT_CONVERT);
4801 		if (IS_ERR(handle))
4802 			return PTR_ERR(handle);
4803 	}
4804 
4805 	list_for_each_entry(io_end_vec, &io_end->list_vec, list) {
4806 		ret = ext4_convert_unwritten_extents(handle, io_end->inode,
4807 						     io_end_vec->offset,
4808 						     io_end_vec->size);
4809 		if (ret)
4810 			break;
4811 	}
4812 
4813 	if (handle)
4814 		err = ext4_journal_stop(handle);
4815 
4816 	return ret < 0 ? ret : err;
4817 }
4818 
4819 static int ext4_iomap_xattr_fiemap(struct inode *inode, struct iomap *iomap)
4820 {
4821 	__u64 physical = 0;
4822 	__u64 length = 0;
4823 	int blockbits = inode->i_sb->s_blocksize_bits;
4824 	int error = 0;
4825 	u16 iomap_type;
4826 
4827 	/* in-inode? */
4828 	if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4829 		struct ext4_iloc iloc;
4830 		int offset;	/* offset of xattr in inode */
4831 
4832 		error = ext4_get_inode_loc(inode, &iloc);
4833 		if (error)
4834 			return error;
4835 		physical = (__u64)iloc.bh->b_blocknr << blockbits;
4836 		offset = EXT4_GOOD_OLD_INODE_SIZE +
4837 				EXT4_I(inode)->i_extra_isize;
4838 		physical += offset;
4839 		length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
4840 		brelse(iloc.bh);
4841 		iomap_type = IOMAP_INLINE;
4842 	} else if (EXT4_I(inode)->i_file_acl) { /* external block */
4843 		physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
4844 		length = inode->i_sb->s_blocksize;
4845 		iomap_type = IOMAP_MAPPED;
4846 	} else {
4847 		/* no in-inode or external block for xattr, so return -ENOENT */
4848 		error = -ENOENT;
4849 		goto out;
4850 	}
4851 
4852 	iomap->addr = physical;
4853 	iomap->offset = 0;
4854 	iomap->length = length;
4855 	iomap->type = iomap_type;
4856 	iomap->flags = 0;
4857 out:
4858 	return error;
4859 }
4860 
4861 static int ext4_iomap_xattr_begin(struct inode *inode, loff_t offset,
4862 				  loff_t length, unsigned flags,
4863 				  struct iomap *iomap, struct iomap *srcmap)
4864 {
4865 	int error;
4866 
4867 	error = ext4_iomap_xattr_fiemap(inode, iomap);
4868 	if (error == 0 && (offset >= iomap->length))
4869 		error = -ENOENT;
4870 	return error;
4871 }
4872 
4873 static const struct iomap_ops ext4_iomap_xattr_ops = {
4874 	.iomap_begin		= ext4_iomap_xattr_begin,
4875 };
4876 
4877 static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len)
4878 {
4879 	u64 maxbytes;
4880 
4881 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4882 		maxbytes = inode->i_sb->s_maxbytes;
4883 	else
4884 		maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
4885 
4886 	if (*len == 0)
4887 		return -EINVAL;
4888 	if (start > maxbytes)
4889 		return -EFBIG;
4890 
4891 	/*
4892 	 * Shrink request scope to what the fs can actually handle.
4893 	 */
4894 	if (*len > maxbytes || (maxbytes - *len) < start)
4895 		*len = maxbytes - start;
4896 	return 0;
4897 }
4898 
4899 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4900 		u64 start, u64 len)
4901 {
4902 	int error = 0;
4903 
4904 	if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
4905 		error = ext4_ext_precache(inode);
4906 		if (error)
4907 			return error;
4908 		fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
4909 	}
4910 
4911 	/*
4912 	 * For bitmap files the maximum size limit could be smaller than
4913 	 * s_maxbytes, so check len here manually instead of just relying on the
4914 	 * generic check.
4915 	 */
4916 	error = ext4_fiemap_check_ranges(inode, start, &len);
4917 	if (error)
4918 		return error;
4919 
4920 	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
4921 		fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
4922 		return iomap_fiemap(inode, fieinfo, start, len,
4923 				    &ext4_iomap_xattr_ops);
4924 	}
4925 
4926 	return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops);
4927 }
4928 
4929 int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
4930 		      __u64 start, __u64 len)
4931 {
4932 	ext4_lblk_t start_blk, len_blks;
4933 	__u64 last_blk;
4934 	int error = 0;
4935 
4936 	if (ext4_has_inline_data(inode)) {
4937 		int has_inline;
4938 
4939 		down_read(&EXT4_I(inode)->xattr_sem);
4940 		has_inline = ext4_has_inline_data(inode);
4941 		up_read(&EXT4_I(inode)->xattr_sem);
4942 		if (has_inline)
4943 			return 0;
4944 	}
4945 
4946 	if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
4947 		error = ext4_ext_precache(inode);
4948 		if (error)
4949 			return error;
4950 		fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
4951 	}
4952 
4953 	error = fiemap_prep(inode, fieinfo, start, &len, 0);
4954 	if (error)
4955 		return error;
4956 
4957 	error = ext4_fiemap_check_ranges(inode, start, &len);
4958 	if (error)
4959 		return error;
4960 
4961 	start_blk = start >> inode->i_sb->s_blocksize_bits;
4962 	last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
4963 	if (last_blk >= EXT_MAX_BLOCKS)
4964 		last_blk = EXT_MAX_BLOCKS-1;
4965 	len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
4966 
4967 	/*
4968 	 * Walk the extent tree gathering extent information
4969 	 * and pushing extents back to the user.
4970 	 */
4971 	return ext4_fill_es_cache_info(inode, start_blk, len_blks, fieinfo);
4972 }
4973 
4974 /*
4975  * ext4_access_path:
4976  * Function to access the path buffer for marking it dirty.
4977  * It also checks if there are sufficient credits left in the journal handle
4978  * to update path.
4979  */
4980 static int
4981 ext4_access_path(handle_t *handle, struct inode *inode,
4982 		struct ext4_ext_path *path)
4983 {
4984 	int credits, err;
4985 
4986 	if (!ext4_handle_valid(handle))
4987 		return 0;
4988 
4989 	/*
4990 	 * Check if need to extend journal credits
4991 	 * 3 for leaf, sb, and inode plus 2 (bmap and group
4992 	 * descriptor) for each block group; assume two block
4993 	 * groups
4994 	 */
4995 	credits = ext4_writepage_trans_blocks(inode);
4996 	err = ext4_datasem_ensure_credits(handle, inode, 7, credits, 0);
4997 	if (err < 0)
4998 		return err;
4999 
5000 	err = ext4_ext_get_access(handle, inode, path);
5001 	return err;
5002 }
5003 
5004 /*
5005  * ext4_ext_shift_path_extents:
5006  * Shift the extents of a path structure lying between path[depth].p_ext
5007  * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells
5008  * if it is right shift or left shift operation.
5009  */
5010 static int
5011 ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
5012 			    struct inode *inode, handle_t *handle,
5013 			    enum SHIFT_DIRECTION SHIFT)
5014 {
5015 	int depth, err = 0;
5016 	struct ext4_extent *ex_start, *ex_last;
5017 	bool update = false;
5018 	depth = path->p_depth;
5019 
5020 	while (depth >= 0) {
5021 		if (depth == path->p_depth) {
5022 			ex_start = path[depth].p_ext;
5023 			if (!ex_start)
5024 				return -EFSCORRUPTED;
5025 
5026 			ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
5027 
5028 			err = ext4_access_path(handle, inode, path + depth);
5029 			if (err)
5030 				goto out;
5031 
5032 			if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr))
5033 				update = true;
5034 
5035 			while (ex_start <= ex_last) {
5036 				if (SHIFT == SHIFT_LEFT) {
5037 					le32_add_cpu(&ex_start->ee_block,
5038 						-shift);
5039 					/* Try to merge to the left. */
5040 					if ((ex_start >
5041 					    EXT_FIRST_EXTENT(path[depth].p_hdr))
5042 					    &&
5043 					    ext4_ext_try_to_merge_right(inode,
5044 					    path, ex_start - 1))
5045 						ex_last--;
5046 					else
5047 						ex_start++;
5048 				} else {
5049 					le32_add_cpu(&ex_last->ee_block, shift);
5050 					ext4_ext_try_to_merge_right(inode, path,
5051 						ex_last);
5052 					ex_last--;
5053 				}
5054 			}
5055 			err = ext4_ext_dirty(handle, inode, path + depth);
5056 			if (err)
5057 				goto out;
5058 
5059 			if (--depth < 0 || !update)
5060 				break;
5061 		}
5062 
5063 		/* Update index too */
5064 		err = ext4_access_path(handle, inode, path + depth);
5065 		if (err)
5066 			goto out;
5067 
5068 		if (SHIFT == SHIFT_LEFT)
5069 			le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
5070 		else
5071 			le32_add_cpu(&path[depth].p_idx->ei_block, shift);
5072 		err = ext4_ext_dirty(handle, inode, path + depth);
5073 		if (err)
5074 			goto out;
5075 
5076 		/* we are done if current index is not a starting index */
5077 		if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
5078 			break;
5079 
5080 		depth--;
5081 	}
5082 
5083 out:
5084 	return err;
5085 }
5086 
5087 /*
5088  * ext4_ext_shift_extents:
5089  * All the extents which lies in the range from @start to the last allocated
5090  * block for the @inode are shifted either towards left or right (depending
5091  * upon @SHIFT) by @shift blocks.
5092  * On success, 0 is returned, error otherwise.
5093  */
5094 static int
5095 ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5096 		       ext4_lblk_t start, ext4_lblk_t shift,
5097 		       enum SHIFT_DIRECTION SHIFT)
5098 {
5099 	struct ext4_ext_path *path;
5100 	int ret = 0, depth;
5101 	struct ext4_extent *extent;
5102 	ext4_lblk_t stop, *iterator, ex_start, ex_end;
5103 
5104 	/* Let path point to the last extent */
5105 	path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
5106 				EXT4_EX_NOCACHE);
5107 	if (IS_ERR(path))
5108 		return PTR_ERR(path);
5109 
5110 	depth = path->p_depth;
5111 	extent = path[depth].p_ext;
5112 	if (!extent)
5113 		goto out;
5114 
5115 	stop = le32_to_cpu(extent->ee_block);
5116 
5117        /*
5118 	* For left shifts, make sure the hole on the left is big enough to
5119 	* accommodate the shift.  For right shifts, make sure the last extent
5120 	* won't be shifted beyond EXT_MAX_BLOCKS.
5121 	*/
5122 	if (SHIFT == SHIFT_LEFT) {
5123 		path = ext4_find_extent(inode, start - 1, &path,
5124 					EXT4_EX_NOCACHE);
5125 		if (IS_ERR(path))
5126 			return PTR_ERR(path);
5127 		depth = path->p_depth;
5128 		extent =  path[depth].p_ext;
5129 		if (extent) {
5130 			ex_start = le32_to_cpu(extent->ee_block);
5131 			ex_end = le32_to_cpu(extent->ee_block) +
5132 				ext4_ext_get_actual_len(extent);
5133 		} else {
5134 			ex_start = 0;
5135 			ex_end = 0;
5136 		}
5137 
5138 		if ((start == ex_start && shift > ex_start) ||
5139 		    (shift > start - ex_end)) {
5140 			ret = -EINVAL;
5141 			goto out;
5142 		}
5143 	} else {
5144 		if (shift > EXT_MAX_BLOCKS -
5145 		    (stop + ext4_ext_get_actual_len(extent))) {
5146 			ret = -EINVAL;
5147 			goto out;
5148 		}
5149 	}
5150 
5151 	/*
5152 	 * In case of left shift, iterator points to start and it is increased
5153 	 * till we reach stop. In case of right shift, iterator points to stop
5154 	 * and it is decreased till we reach start.
5155 	 */
5156 	if (SHIFT == SHIFT_LEFT)
5157 		iterator = &start;
5158 	else
5159 		iterator = &stop;
5160 
5161 	/*
5162 	 * Its safe to start updating extents.  Start and stop are unsigned, so
5163 	 * in case of right shift if extent with 0 block is reached, iterator
5164 	 * becomes NULL to indicate the end of the loop.
5165 	 */
5166 	while (iterator && start <= stop) {
5167 		path = ext4_find_extent(inode, *iterator, &path,
5168 					EXT4_EX_NOCACHE);
5169 		if (IS_ERR(path))
5170 			return PTR_ERR(path);
5171 		depth = path->p_depth;
5172 		extent = path[depth].p_ext;
5173 		if (!extent) {
5174 			EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
5175 					 (unsigned long) *iterator);
5176 			return -EFSCORRUPTED;
5177 		}
5178 		if (SHIFT == SHIFT_LEFT && *iterator >
5179 		    le32_to_cpu(extent->ee_block)) {
5180 			/* Hole, move to the next extent */
5181 			if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
5182 				path[depth].p_ext++;
5183 			} else {
5184 				*iterator = ext4_ext_next_allocated_block(path);
5185 				continue;
5186 			}
5187 		}
5188 
5189 		if (SHIFT == SHIFT_LEFT) {
5190 			extent = EXT_LAST_EXTENT(path[depth].p_hdr);
5191 			*iterator = le32_to_cpu(extent->ee_block) +
5192 					ext4_ext_get_actual_len(extent);
5193 		} else {
5194 			extent = EXT_FIRST_EXTENT(path[depth].p_hdr);
5195 			if (le32_to_cpu(extent->ee_block) > 0)
5196 				*iterator = le32_to_cpu(extent->ee_block) - 1;
5197 			else
5198 				/* Beginning is reached, end of the loop */
5199 				iterator = NULL;
5200 			/* Update path extent in case we need to stop */
5201 			while (le32_to_cpu(extent->ee_block) < start)
5202 				extent++;
5203 			path[depth].p_ext = extent;
5204 		}
5205 		ret = ext4_ext_shift_path_extents(path, shift, inode,
5206 				handle, SHIFT);
5207 		if (ret)
5208 			break;
5209 	}
5210 out:
5211 	ext4_ext_drop_refs(path);
5212 	kfree(path);
5213 	return ret;
5214 }
5215 
5216 /*
5217  * ext4_collapse_range:
5218  * This implements the fallocate's collapse range functionality for ext4
5219  * Returns: 0 and non-zero on error.
5220  */
5221 static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
5222 {
5223 	struct super_block *sb = inode->i_sb;
5224 	ext4_lblk_t punch_start, punch_stop;
5225 	handle_t *handle;
5226 	unsigned int credits;
5227 	loff_t new_size, ioffset;
5228 	int ret;
5229 
5230 	/*
5231 	 * We need to test this early because xfstests assumes that a
5232 	 * collapse range of (0, 1) will return EOPNOTSUPP if the file
5233 	 * system does not support collapse range.
5234 	 */
5235 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5236 		return -EOPNOTSUPP;
5237 
5238 	/* Collapse range works only on fs cluster size aligned regions. */
5239 	if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
5240 		return -EINVAL;
5241 
5242 	trace_ext4_collapse_range(inode, offset, len);
5243 
5244 	punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
5245 	punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
5246 
5247 	/* Call ext4_force_commit to flush all data in case of data=journal. */
5248 	if (ext4_should_journal_data(inode)) {
5249 		ret = ext4_force_commit(inode->i_sb);
5250 		if (ret)
5251 			return ret;
5252 	}
5253 
5254 	inode_lock(inode);
5255 	/*
5256 	 * There is no need to overlap collapse range with EOF, in which case
5257 	 * it is effectively a truncate operation
5258 	 */
5259 	if (offset + len >= inode->i_size) {
5260 		ret = -EINVAL;
5261 		goto out_mutex;
5262 	}
5263 
5264 	/* Currently just for extent based files */
5265 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
5266 		ret = -EOPNOTSUPP;
5267 		goto out_mutex;
5268 	}
5269 
5270 	/* Wait for existing dio to complete */
5271 	inode_dio_wait(inode);
5272 
5273 	/*
5274 	 * Prevent page faults from reinstantiating pages we have released from
5275 	 * page cache.
5276 	 */
5277 	down_write(&EXT4_I(inode)->i_mmap_sem);
5278 
5279 	ret = ext4_break_layouts(inode);
5280 	if (ret)
5281 		goto out_mmap;
5282 
5283 	/*
5284 	 * Need to round down offset to be aligned with page size boundary
5285 	 * for page size > block size.
5286 	 */
5287 	ioffset = round_down(offset, PAGE_SIZE);
5288 	/*
5289 	 * Write tail of the last page before removed range since it will get
5290 	 * removed from the page cache below.
5291 	 */
5292 	ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset);
5293 	if (ret)
5294 		goto out_mmap;
5295 	/*
5296 	 * Write data that will be shifted to preserve them when discarding
5297 	 * page cache below. We are also protected from pages becoming dirty
5298 	 * by i_mmap_sem.
5299 	 */
5300 	ret = filemap_write_and_wait_range(inode->i_mapping, offset + len,
5301 					   LLONG_MAX);
5302 	if (ret)
5303 		goto out_mmap;
5304 	truncate_pagecache(inode, ioffset);
5305 
5306 	credits = ext4_writepage_trans_blocks(inode);
5307 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
5308 	if (IS_ERR(handle)) {
5309 		ret = PTR_ERR(handle);
5310 		goto out_mmap;
5311 	}
5312 	ext4_fc_start_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
5313 
5314 	down_write(&EXT4_I(inode)->i_data_sem);
5315 	ext4_discard_preallocations(inode, 0);
5316 
5317 	ret = ext4_es_remove_extent(inode, punch_start,
5318 				    EXT_MAX_BLOCKS - punch_start);
5319 	if (ret) {
5320 		up_write(&EXT4_I(inode)->i_data_sem);
5321 		goto out_stop;
5322 	}
5323 
5324 	ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
5325 	if (ret) {
5326 		up_write(&EXT4_I(inode)->i_data_sem);
5327 		goto out_stop;
5328 	}
5329 	ext4_discard_preallocations(inode, 0);
5330 
5331 	ret = ext4_ext_shift_extents(inode, handle, punch_stop,
5332 				     punch_stop - punch_start, SHIFT_LEFT);
5333 	if (ret) {
5334 		up_write(&EXT4_I(inode)->i_data_sem);
5335 		goto out_stop;
5336 	}
5337 
5338 	new_size = inode->i_size - len;
5339 	i_size_write(inode, new_size);
5340 	EXT4_I(inode)->i_disksize = new_size;
5341 
5342 	up_write(&EXT4_I(inode)->i_data_sem);
5343 	if (IS_SYNC(inode))
5344 		ext4_handle_sync(handle);
5345 	inode->i_mtime = inode->i_ctime = current_time(inode);
5346 	ret = ext4_mark_inode_dirty(handle, inode);
5347 	ext4_update_inode_fsync_trans(handle, inode, 1);
5348 
5349 out_stop:
5350 	ext4_journal_stop(handle);
5351 	ext4_fc_stop_ineligible(sb);
5352 out_mmap:
5353 	up_write(&EXT4_I(inode)->i_mmap_sem);
5354 out_mutex:
5355 	inode_unlock(inode);
5356 	return ret;
5357 }
5358 
5359 /*
5360  * ext4_insert_range:
5361  * This function implements the FALLOC_FL_INSERT_RANGE flag of fallocate.
5362  * The data blocks starting from @offset to the EOF are shifted by @len
5363  * towards right to create a hole in the @inode. Inode size is increased
5364  * by len bytes.
5365  * Returns 0 on success, error otherwise.
5366  */
5367 static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
5368 {
5369 	struct super_block *sb = inode->i_sb;
5370 	handle_t *handle;
5371 	struct ext4_ext_path *path;
5372 	struct ext4_extent *extent;
5373 	ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0;
5374 	unsigned int credits, ee_len;
5375 	int ret = 0, depth, split_flag = 0;
5376 	loff_t ioffset;
5377 
5378 	/*
5379 	 * We need to test this early because xfstests assumes that an
5380 	 * insert range of (0, 1) will return EOPNOTSUPP if the file
5381 	 * system does not support insert range.
5382 	 */
5383 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5384 		return -EOPNOTSUPP;
5385 
5386 	/* Insert range works only on fs cluster size aligned regions. */
5387 	if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
5388 		return -EINVAL;
5389 
5390 	trace_ext4_insert_range(inode, offset, len);
5391 
5392 	offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb);
5393 	len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb);
5394 
5395 	/* Call ext4_force_commit to flush all data in case of data=journal */
5396 	if (ext4_should_journal_data(inode)) {
5397 		ret = ext4_force_commit(inode->i_sb);
5398 		if (ret)
5399 			return ret;
5400 	}
5401 
5402 	inode_lock(inode);
5403 	/* Currently just for extent based files */
5404 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
5405 		ret = -EOPNOTSUPP;
5406 		goto out_mutex;
5407 	}
5408 
5409 	/* Check whether the maximum file size would be exceeded */
5410 	if (len > inode->i_sb->s_maxbytes - inode->i_size) {
5411 		ret = -EFBIG;
5412 		goto out_mutex;
5413 	}
5414 
5415 	/* Offset must be less than i_size */
5416 	if (offset >= inode->i_size) {
5417 		ret = -EINVAL;
5418 		goto out_mutex;
5419 	}
5420 
5421 	/* Wait for existing dio to complete */
5422 	inode_dio_wait(inode);
5423 
5424 	/*
5425 	 * Prevent page faults from reinstantiating pages we have released from
5426 	 * page cache.
5427 	 */
5428 	down_write(&EXT4_I(inode)->i_mmap_sem);
5429 
5430 	ret = ext4_break_layouts(inode);
5431 	if (ret)
5432 		goto out_mmap;
5433 
5434 	/*
5435 	 * Need to round down to align start offset to page size boundary
5436 	 * for page size > block size.
5437 	 */
5438 	ioffset = round_down(offset, PAGE_SIZE);
5439 	/* Write out all dirty pages */
5440 	ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
5441 			LLONG_MAX);
5442 	if (ret)
5443 		goto out_mmap;
5444 	truncate_pagecache(inode, ioffset);
5445 
5446 	credits = ext4_writepage_trans_blocks(inode);
5447 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
5448 	if (IS_ERR(handle)) {
5449 		ret = PTR_ERR(handle);
5450 		goto out_mmap;
5451 	}
5452 	ext4_fc_start_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
5453 
5454 	/* Expand file to avoid data loss if there is error while shifting */
5455 	inode->i_size += len;
5456 	EXT4_I(inode)->i_disksize += len;
5457 	inode->i_mtime = inode->i_ctime = current_time(inode);
5458 	ret = ext4_mark_inode_dirty(handle, inode);
5459 	if (ret)
5460 		goto out_stop;
5461 
5462 	down_write(&EXT4_I(inode)->i_data_sem);
5463 	ext4_discard_preallocations(inode, 0);
5464 
5465 	path = ext4_find_extent(inode, offset_lblk, NULL, 0);
5466 	if (IS_ERR(path)) {
5467 		up_write(&EXT4_I(inode)->i_data_sem);
5468 		goto out_stop;
5469 	}
5470 
5471 	depth = ext_depth(inode);
5472 	extent = path[depth].p_ext;
5473 	if (extent) {
5474 		ee_start_lblk = le32_to_cpu(extent->ee_block);
5475 		ee_len = ext4_ext_get_actual_len(extent);
5476 
5477 		/*
5478 		 * If offset_lblk is not the starting block of extent, split
5479 		 * the extent @offset_lblk
5480 		 */
5481 		if ((offset_lblk > ee_start_lblk) &&
5482 				(offset_lblk < (ee_start_lblk + ee_len))) {
5483 			if (ext4_ext_is_unwritten(extent))
5484 				split_flag = EXT4_EXT_MARK_UNWRIT1 |
5485 					EXT4_EXT_MARK_UNWRIT2;
5486 			ret = ext4_split_extent_at(handle, inode, &path,
5487 					offset_lblk, split_flag,
5488 					EXT4_EX_NOCACHE |
5489 					EXT4_GET_BLOCKS_PRE_IO |
5490 					EXT4_GET_BLOCKS_METADATA_NOFAIL);
5491 		}
5492 
5493 		ext4_ext_drop_refs(path);
5494 		kfree(path);
5495 		if (ret < 0) {
5496 			up_write(&EXT4_I(inode)->i_data_sem);
5497 			goto out_stop;
5498 		}
5499 	} else {
5500 		ext4_ext_drop_refs(path);
5501 		kfree(path);
5502 	}
5503 
5504 	ret = ext4_es_remove_extent(inode, offset_lblk,
5505 			EXT_MAX_BLOCKS - offset_lblk);
5506 	if (ret) {
5507 		up_write(&EXT4_I(inode)->i_data_sem);
5508 		goto out_stop;
5509 	}
5510 
5511 	/*
5512 	 * if offset_lblk lies in a hole which is at start of file, use
5513 	 * ee_start_lblk to shift extents
5514 	 */
5515 	ret = ext4_ext_shift_extents(inode, handle,
5516 		ee_start_lblk > offset_lblk ? ee_start_lblk : offset_lblk,
5517 		len_lblk, SHIFT_RIGHT);
5518 
5519 	up_write(&EXT4_I(inode)->i_data_sem);
5520 	if (IS_SYNC(inode))
5521 		ext4_handle_sync(handle);
5522 	if (ret >= 0)
5523 		ext4_update_inode_fsync_trans(handle, inode, 1);
5524 
5525 out_stop:
5526 	ext4_journal_stop(handle);
5527 	ext4_fc_stop_ineligible(sb);
5528 out_mmap:
5529 	up_write(&EXT4_I(inode)->i_mmap_sem);
5530 out_mutex:
5531 	inode_unlock(inode);
5532 	return ret;
5533 }
5534 
5535 /**
5536  * ext4_swap_extents() - Swap extents between two inodes
5537  * @handle: handle for this transaction
5538  * @inode1:	First inode
5539  * @inode2:	Second inode
5540  * @lblk1:	Start block for first inode
5541  * @lblk2:	Start block for second inode
5542  * @count:	Number of blocks to swap
5543  * @unwritten: Mark second inode's extents as unwritten after swap
5544  * @erp:	Pointer to save error value
5545  *
5546  * This helper routine does exactly what is promise "swap extents". All other
5547  * stuff such as page-cache locking consistency, bh mapping consistency or
5548  * extent's data copying must be performed by caller.
5549  * Locking:
5550  * 		i_mutex is held for both inodes
5551  * 		i_data_sem is locked for write for both inodes
5552  * Assumptions:
5553  *		All pages from requested range are locked for both inodes
5554  */
5555 int
5556 ext4_swap_extents(handle_t *handle, struct inode *inode1,
5557 		  struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2,
5558 		  ext4_lblk_t count, int unwritten, int *erp)
5559 {
5560 	struct ext4_ext_path *path1 = NULL;
5561 	struct ext4_ext_path *path2 = NULL;
5562 	int replaced_count = 0;
5563 
5564 	BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem));
5565 	BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem));
5566 	BUG_ON(!inode_is_locked(inode1));
5567 	BUG_ON(!inode_is_locked(inode2));
5568 
5569 	*erp = ext4_es_remove_extent(inode1, lblk1, count);
5570 	if (unlikely(*erp))
5571 		return 0;
5572 	*erp = ext4_es_remove_extent(inode2, lblk2, count);
5573 	if (unlikely(*erp))
5574 		return 0;
5575 
5576 	while (count) {
5577 		struct ext4_extent *ex1, *ex2, tmp_ex;
5578 		ext4_lblk_t e1_blk, e2_blk;
5579 		int e1_len, e2_len, len;
5580 		int split = 0;
5581 
5582 		path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
5583 		if (IS_ERR(path1)) {
5584 			*erp = PTR_ERR(path1);
5585 			path1 = NULL;
5586 		finish:
5587 			count = 0;
5588 			goto repeat;
5589 		}
5590 		path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
5591 		if (IS_ERR(path2)) {
5592 			*erp = PTR_ERR(path2);
5593 			path2 = NULL;
5594 			goto finish;
5595 		}
5596 		ex1 = path1[path1->p_depth].p_ext;
5597 		ex2 = path2[path2->p_depth].p_ext;
5598 		/* Do we have something to swap ? */
5599 		if (unlikely(!ex2 || !ex1))
5600 			goto finish;
5601 
5602 		e1_blk = le32_to_cpu(ex1->ee_block);
5603 		e2_blk = le32_to_cpu(ex2->ee_block);
5604 		e1_len = ext4_ext_get_actual_len(ex1);
5605 		e2_len = ext4_ext_get_actual_len(ex2);
5606 
5607 		/* Hole handling */
5608 		if (!in_range(lblk1, e1_blk, e1_len) ||
5609 		    !in_range(lblk2, e2_blk, e2_len)) {
5610 			ext4_lblk_t next1, next2;
5611 
5612 			/* if hole after extent, then go to next extent */
5613 			next1 = ext4_ext_next_allocated_block(path1);
5614 			next2 = ext4_ext_next_allocated_block(path2);
5615 			/* If hole before extent, then shift to that extent */
5616 			if (e1_blk > lblk1)
5617 				next1 = e1_blk;
5618 			if (e2_blk > lblk2)
5619 				next2 = e2_blk;
5620 			/* Do we have something to swap */
5621 			if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
5622 				goto finish;
5623 			/* Move to the rightest boundary */
5624 			len = next1 - lblk1;
5625 			if (len < next2 - lblk2)
5626 				len = next2 - lblk2;
5627 			if (len > count)
5628 				len = count;
5629 			lblk1 += len;
5630 			lblk2 += len;
5631 			count -= len;
5632 			goto repeat;
5633 		}
5634 
5635 		/* Prepare left boundary */
5636 		if (e1_blk < lblk1) {
5637 			split = 1;
5638 			*erp = ext4_force_split_extent_at(handle, inode1,
5639 						&path1, lblk1, 0);
5640 			if (unlikely(*erp))
5641 				goto finish;
5642 		}
5643 		if (e2_blk < lblk2) {
5644 			split = 1;
5645 			*erp = ext4_force_split_extent_at(handle, inode2,
5646 						&path2,  lblk2, 0);
5647 			if (unlikely(*erp))
5648 				goto finish;
5649 		}
5650 		/* ext4_split_extent_at() may result in leaf extent split,
5651 		 * path must to be revalidated. */
5652 		if (split)
5653 			goto repeat;
5654 
5655 		/* Prepare right boundary */
5656 		len = count;
5657 		if (len > e1_blk + e1_len - lblk1)
5658 			len = e1_blk + e1_len - lblk1;
5659 		if (len > e2_blk + e2_len - lblk2)
5660 			len = e2_blk + e2_len - lblk2;
5661 
5662 		if (len != e1_len) {
5663 			split = 1;
5664 			*erp = ext4_force_split_extent_at(handle, inode1,
5665 						&path1, lblk1 + len, 0);
5666 			if (unlikely(*erp))
5667 				goto finish;
5668 		}
5669 		if (len != e2_len) {
5670 			split = 1;
5671 			*erp = ext4_force_split_extent_at(handle, inode2,
5672 						&path2, lblk2 + len, 0);
5673 			if (*erp)
5674 				goto finish;
5675 		}
5676 		/* ext4_split_extent_at() may result in leaf extent split,
5677 		 * path must to be revalidated. */
5678 		if (split)
5679 			goto repeat;
5680 
5681 		BUG_ON(e2_len != e1_len);
5682 		*erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
5683 		if (unlikely(*erp))
5684 			goto finish;
5685 		*erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
5686 		if (unlikely(*erp))
5687 			goto finish;
5688 
5689 		/* Both extents are fully inside boundaries. Swap it now */
5690 		tmp_ex = *ex1;
5691 		ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2));
5692 		ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex));
5693 		ex1->ee_len = cpu_to_le16(e2_len);
5694 		ex2->ee_len = cpu_to_le16(e1_len);
5695 		if (unwritten)
5696 			ext4_ext_mark_unwritten(ex2);
5697 		if (ext4_ext_is_unwritten(&tmp_ex))
5698 			ext4_ext_mark_unwritten(ex1);
5699 
5700 		ext4_ext_try_to_merge(handle, inode2, path2, ex2);
5701 		ext4_ext_try_to_merge(handle, inode1, path1, ex1);
5702 		*erp = ext4_ext_dirty(handle, inode2, path2 +
5703 				      path2->p_depth);
5704 		if (unlikely(*erp))
5705 			goto finish;
5706 		*erp = ext4_ext_dirty(handle, inode1, path1 +
5707 				      path1->p_depth);
5708 		/*
5709 		 * Looks scarry ah..? second inode already points to new blocks,
5710 		 * and it was successfully dirtied. But luckily error may happen
5711 		 * only due to journal error, so full transaction will be
5712 		 * aborted anyway.
5713 		 */
5714 		if (unlikely(*erp))
5715 			goto finish;
5716 		lblk1 += len;
5717 		lblk2 += len;
5718 		replaced_count += len;
5719 		count -= len;
5720 
5721 	repeat:
5722 		ext4_ext_drop_refs(path1);
5723 		kfree(path1);
5724 		ext4_ext_drop_refs(path2);
5725 		kfree(path2);
5726 		path1 = path2 = NULL;
5727 	}
5728 	return replaced_count;
5729 }
5730 
5731 /*
5732  * ext4_clu_mapped - determine whether any block in a logical cluster has
5733  *                   been mapped to a physical cluster
5734  *
5735  * @inode - file containing the logical cluster
5736  * @lclu - logical cluster of interest
5737  *
5738  * Returns 1 if any block in the logical cluster is mapped, signifying
5739  * that a physical cluster has been allocated for it.  Otherwise,
5740  * returns 0.  Can also return negative error codes.  Derived from
5741  * ext4_ext_map_blocks().
5742  */
5743 int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
5744 {
5745 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5746 	struct ext4_ext_path *path;
5747 	int depth, mapped = 0, err = 0;
5748 	struct ext4_extent *extent;
5749 	ext4_lblk_t first_lblk, first_lclu, last_lclu;
5750 
5751 	/* search for the extent closest to the first block in the cluster */
5752 	path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
5753 	if (IS_ERR(path)) {
5754 		err = PTR_ERR(path);
5755 		path = NULL;
5756 		goto out;
5757 	}
5758 
5759 	depth = ext_depth(inode);
5760 
5761 	/*
5762 	 * A consistent leaf must not be empty.  This situation is possible,
5763 	 * though, _during_ tree modification, and it's why an assert can't
5764 	 * be put in ext4_find_extent().
5765 	 */
5766 	if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
5767 		EXT4_ERROR_INODE(inode,
5768 		    "bad extent address - lblock: %lu, depth: %d, pblock: %lld",
5769 				 (unsigned long) EXT4_C2B(sbi, lclu),
5770 				 depth, path[depth].p_block);
5771 		err = -EFSCORRUPTED;
5772 		goto out;
5773 	}
5774 
5775 	extent = path[depth].p_ext;
5776 
5777 	/* can't be mapped if the extent tree is empty */
5778 	if (extent == NULL)
5779 		goto out;
5780 
5781 	first_lblk = le32_to_cpu(extent->ee_block);
5782 	first_lclu = EXT4_B2C(sbi, first_lblk);
5783 
5784 	/*
5785 	 * Three possible outcomes at this point - found extent spanning
5786 	 * the target cluster, to the left of the target cluster, or to the
5787 	 * right of the target cluster.  The first two cases are handled here.
5788 	 * The last case indicates the target cluster is not mapped.
5789 	 */
5790 	if (lclu >= first_lclu) {
5791 		last_lclu = EXT4_B2C(sbi, first_lblk +
5792 				     ext4_ext_get_actual_len(extent) - 1);
5793 		if (lclu <= last_lclu) {
5794 			mapped = 1;
5795 		} else {
5796 			first_lblk = ext4_ext_next_allocated_block(path);
5797 			first_lclu = EXT4_B2C(sbi, first_lblk);
5798 			if (lclu == first_lclu)
5799 				mapped = 1;
5800 		}
5801 	}
5802 
5803 out:
5804 	ext4_ext_drop_refs(path);
5805 	kfree(path);
5806 
5807 	return err ? err : mapped;
5808 }
5809 
5810 /*
5811  * Updates physical block address and unwritten status of extent
5812  * starting at lblk start and of len. If such an extent doesn't exist,
5813  * this function splits the extent tree appropriately to create an
5814  * extent like this.  This function is called in the fast commit
5815  * replay path.  Returns 0 on success and error on failure.
5816  */
5817 int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
5818 			      int len, int unwritten, ext4_fsblk_t pblk)
5819 {
5820 	struct ext4_ext_path *path = NULL, *ppath;
5821 	struct ext4_extent *ex;
5822 	int ret;
5823 
5824 	path = ext4_find_extent(inode, start, NULL, 0);
5825 	if (IS_ERR(path))
5826 		return PTR_ERR(path);
5827 	ex = path[path->p_depth].p_ext;
5828 	if (!ex) {
5829 		ret = -EFSCORRUPTED;
5830 		goto out;
5831 	}
5832 
5833 	if (le32_to_cpu(ex->ee_block) != start ||
5834 		ext4_ext_get_actual_len(ex) != len) {
5835 		/* We need to split this extent to match our extent first */
5836 		ppath = path;
5837 		down_write(&EXT4_I(inode)->i_data_sem);
5838 		ret = ext4_force_split_extent_at(NULL, inode, &ppath, start, 1);
5839 		up_write(&EXT4_I(inode)->i_data_sem);
5840 		if (ret)
5841 			goto out;
5842 		kfree(path);
5843 		path = ext4_find_extent(inode, start, NULL, 0);
5844 		if (IS_ERR(path))
5845 			return -1;
5846 		ppath = path;
5847 		ex = path[path->p_depth].p_ext;
5848 		WARN_ON(le32_to_cpu(ex->ee_block) != start);
5849 		if (ext4_ext_get_actual_len(ex) != len) {
5850 			down_write(&EXT4_I(inode)->i_data_sem);
5851 			ret = ext4_force_split_extent_at(NULL, inode, &ppath,
5852 							 start + len, 1);
5853 			up_write(&EXT4_I(inode)->i_data_sem);
5854 			if (ret)
5855 				goto out;
5856 			kfree(path);
5857 			path = ext4_find_extent(inode, start, NULL, 0);
5858 			if (IS_ERR(path))
5859 				return -EINVAL;
5860 			ex = path[path->p_depth].p_ext;
5861 		}
5862 	}
5863 	if (unwritten)
5864 		ext4_ext_mark_unwritten(ex);
5865 	else
5866 		ext4_ext_mark_initialized(ex);
5867 	ext4_ext_store_pblock(ex, pblk);
5868 	down_write(&EXT4_I(inode)->i_data_sem);
5869 	ret = ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
5870 	up_write(&EXT4_I(inode)->i_data_sem);
5871 out:
5872 	ext4_ext_drop_refs(path);
5873 	kfree(path);
5874 	ext4_mark_inode_dirty(NULL, inode);
5875 	return ret;
5876 }
5877 
5878 /* Try to shrink the extent tree */
5879 void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end)
5880 {
5881 	struct ext4_ext_path *path = NULL;
5882 	struct ext4_extent *ex;
5883 	ext4_lblk_t old_cur, cur = 0;
5884 
5885 	while (cur < end) {
5886 		path = ext4_find_extent(inode, cur, NULL, 0);
5887 		if (IS_ERR(path))
5888 			return;
5889 		ex = path[path->p_depth].p_ext;
5890 		if (!ex) {
5891 			ext4_ext_drop_refs(path);
5892 			kfree(path);
5893 			ext4_mark_inode_dirty(NULL, inode);
5894 			return;
5895 		}
5896 		old_cur = cur;
5897 		cur = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
5898 		if (cur <= old_cur)
5899 			cur = old_cur + 1;
5900 		ext4_ext_try_to_merge(NULL, inode, path, ex);
5901 		down_write(&EXT4_I(inode)->i_data_sem);
5902 		ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
5903 		up_write(&EXT4_I(inode)->i_data_sem);
5904 		ext4_mark_inode_dirty(NULL, inode);
5905 		ext4_ext_drop_refs(path);
5906 		kfree(path);
5907 	}
5908 }
5909 
5910 /* Check if *cur is a hole and if it is, skip it */
5911 static void skip_hole(struct inode *inode, ext4_lblk_t *cur)
5912 {
5913 	int ret;
5914 	struct ext4_map_blocks map;
5915 
5916 	map.m_lblk = *cur;
5917 	map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur;
5918 
5919 	ret = ext4_map_blocks(NULL, inode, &map, 0);
5920 	if (ret != 0)
5921 		return;
5922 	*cur = *cur + map.m_len;
5923 }
5924 
5925 /* Count number of blocks used by this inode and update i_blocks */
5926 int ext4_ext_replay_set_iblocks(struct inode *inode)
5927 {
5928 	struct ext4_ext_path *path = NULL, *path2 = NULL;
5929 	struct ext4_extent *ex;
5930 	ext4_lblk_t cur = 0, end;
5931 	int numblks = 0, i, ret = 0;
5932 	ext4_fsblk_t cmp1, cmp2;
5933 	struct ext4_map_blocks map;
5934 
5935 	/* Determin the size of the file first */
5936 	path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
5937 					EXT4_EX_NOCACHE);
5938 	if (IS_ERR(path))
5939 		return PTR_ERR(path);
5940 	ex = path[path->p_depth].p_ext;
5941 	if (!ex) {
5942 		ext4_ext_drop_refs(path);
5943 		kfree(path);
5944 		goto out;
5945 	}
5946 	end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
5947 	ext4_ext_drop_refs(path);
5948 	kfree(path);
5949 
5950 	/* Count the number of data blocks */
5951 	cur = 0;
5952 	while (cur < end) {
5953 		map.m_lblk = cur;
5954 		map.m_len = end - cur;
5955 		ret = ext4_map_blocks(NULL, inode, &map, 0);
5956 		if (ret < 0)
5957 			break;
5958 		if (ret > 0)
5959 			numblks += ret;
5960 		cur = cur + map.m_len;
5961 	}
5962 
5963 	/*
5964 	 * Count the number of extent tree blocks. We do it by looking up
5965 	 * two successive extents and determining the difference between
5966 	 * their paths. When path is different for 2 successive extents
5967 	 * we compare the blocks in the path at each level and increment
5968 	 * iblocks by total number of differences found.
5969 	 */
5970 	cur = 0;
5971 	skip_hole(inode, &cur);
5972 	path = ext4_find_extent(inode, cur, NULL, 0);
5973 	if (IS_ERR(path))
5974 		goto out;
5975 	numblks += path->p_depth;
5976 	ext4_ext_drop_refs(path);
5977 	kfree(path);
5978 	while (cur < end) {
5979 		path = ext4_find_extent(inode, cur, NULL, 0);
5980 		if (IS_ERR(path))
5981 			break;
5982 		ex = path[path->p_depth].p_ext;
5983 		if (!ex) {
5984 			ext4_ext_drop_refs(path);
5985 			kfree(path);
5986 			return 0;
5987 		}
5988 		cur = max(cur + 1, le32_to_cpu(ex->ee_block) +
5989 					ext4_ext_get_actual_len(ex));
5990 		skip_hole(inode, &cur);
5991 
5992 		path2 = ext4_find_extent(inode, cur, NULL, 0);
5993 		if (IS_ERR(path2)) {
5994 			ext4_ext_drop_refs(path);
5995 			kfree(path);
5996 			break;
5997 		}
5998 		for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) {
5999 			cmp1 = cmp2 = 0;
6000 			if (i <= path->p_depth)
6001 				cmp1 = path[i].p_bh ?
6002 					path[i].p_bh->b_blocknr : 0;
6003 			if (i <= path2->p_depth)
6004 				cmp2 = path2[i].p_bh ?
6005 					path2[i].p_bh->b_blocknr : 0;
6006 			if (cmp1 != cmp2 && cmp2 != 0)
6007 				numblks++;
6008 		}
6009 		ext4_ext_drop_refs(path);
6010 		ext4_ext_drop_refs(path2);
6011 		kfree(path);
6012 		kfree(path2);
6013 	}
6014 
6015 out:
6016 	inode->i_blocks = numblks << (inode->i_sb->s_blocksize_bits - 9);
6017 	ext4_mark_inode_dirty(NULL, inode);
6018 	return 0;
6019 }
6020 
6021 int ext4_ext_clear_bb(struct inode *inode)
6022 {
6023 	struct ext4_ext_path *path = NULL;
6024 	struct ext4_extent *ex;
6025 	ext4_lblk_t cur = 0, end;
6026 	int j, ret = 0;
6027 	struct ext4_map_blocks map;
6028 
6029 	/* Determin the size of the file first */
6030 	path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
6031 					EXT4_EX_NOCACHE);
6032 	if (IS_ERR(path))
6033 		return PTR_ERR(path);
6034 	ex = path[path->p_depth].p_ext;
6035 	if (!ex) {
6036 		ext4_ext_drop_refs(path);
6037 		kfree(path);
6038 		return 0;
6039 	}
6040 	end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
6041 	ext4_ext_drop_refs(path);
6042 	kfree(path);
6043 
6044 	cur = 0;
6045 	while (cur < end) {
6046 		map.m_lblk = cur;
6047 		map.m_len = end - cur;
6048 		ret = ext4_map_blocks(NULL, inode, &map, 0);
6049 		if (ret < 0)
6050 			break;
6051 		if (ret > 0) {
6052 			path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
6053 			if (!IS_ERR_OR_NULL(path)) {
6054 				for (j = 0; j < path->p_depth; j++) {
6055 
6056 					ext4_mb_mark_bb(inode->i_sb,
6057 							path[j].p_block, 1, 0);
6058 				}
6059 				ext4_ext_drop_refs(path);
6060 				kfree(path);
6061 			}
6062 			ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0);
6063 		}
6064 		cur = cur + map.m_len;
6065 	}
6066 
6067 	return 0;
6068 }
6069