xref: /openbmc/linux/fs/ext4/extents.c (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * Architecture independence:
6  *   Copyright (c) 2005, Bull S.A.
7  *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public Licens
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
21  */
22 
23 /*
24  * Extents support for EXT4
25  *
26  * TODO:
27  *   - ext4*_error() should be used in some situations
28  *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29  *   - smart tree reduction
30  */
31 
32 #include <linux/module.h>
33 #include <linux/fs.h>
34 #include <linux/time.h>
35 #include <linux/ext4_jbd2.h>
36 #include <linux/jbd2.h>
37 #include <linux/highuid.h>
38 #include <linux/pagemap.h>
39 #include <linux/quotaops.h>
40 #include <linux/string.h>
41 #include <linux/slab.h>
42 #include <linux/falloc.h>
43 #include <linux/ext4_fs_extents.h>
44 #include <asm/uaccess.h>
45 
46 
47 /*
48  * ext_pblock:
49  * combine low and high parts of physical block number into ext4_fsblk_t
50  */
51 static ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
52 {
53 	ext4_fsblk_t block;
54 
55 	block = le32_to_cpu(ex->ee_start_lo);
56 	block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
57 	return block;
58 }
59 
60 /*
61  * idx_pblock:
62  * combine low and high parts of a leaf physical block number into ext4_fsblk_t
63  */
64 static ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
65 {
66 	ext4_fsblk_t block;
67 
68 	block = le32_to_cpu(ix->ei_leaf_lo);
69 	block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
70 	return block;
71 }
72 
73 /*
74  * ext4_ext_store_pblock:
75  * stores a large physical block number into an extent struct,
76  * breaking it into parts
77  */
78 static void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
79 {
80 	ex->ee_start_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
81 	ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
82 }
83 
84 /*
85  * ext4_idx_store_pblock:
86  * stores a large physical block number into an index struct,
87  * breaking it into parts
88  */
89 static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
90 {
91 	ix->ei_leaf_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
92 	ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
93 }
94 
95 static handle_t *ext4_ext_journal_restart(handle_t *handle, int needed)
96 {
97 	int err;
98 
99 	if (handle->h_buffer_credits > needed)
100 		return handle;
101 	if (!ext4_journal_extend(handle, needed))
102 		return handle;
103 	err = ext4_journal_restart(handle, needed);
104 
105 	return handle;
106 }
107 
108 /*
109  * could return:
110  *  - EROFS
111  *  - ENOMEM
112  */
113 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
114 				struct ext4_ext_path *path)
115 {
116 	if (path->p_bh) {
117 		/* path points to block */
118 		return ext4_journal_get_write_access(handle, path->p_bh);
119 	}
120 	/* path points to leaf/index in inode body */
121 	/* we use in-core data, no need to protect them */
122 	return 0;
123 }
124 
125 /*
126  * could return:
127  *  - EROFS
128  *  - ENOMEM
129  *  - EIO
130  */
131 static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
132 				struct ext4_ext_path *path)
133 {
134 	int err;
135 	if (path->p_bh) {
136 		/* path points to block */
137 		err = ext4_journal_dirty_metadata(handle, path->p_bh);
138 	} else {
139 		/* path points to leaf/index in inode body */
140 		err = ext4_mark_inode_dirty(handle, inode);
141 	}
142 	return err;
143 }
144 
145 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
146 			      struct ext4_ext_path *path,
147 			      ext4_fsblk_t block)
148 {
149 	struct ext4_inode_info *ei = EXT4_I(inode);
150 	ext4_fsblk_t bg_start;
151 	ext4_grpblk_t colour;
152 	int depth;
153 
154 	if (path) {
155 		struct ext4_extent *ex;
156 		depth = path->p_depth;
157 
158 		/* try to predict block placement */
159 		ex = path[depth].p_ext;
160 		if (ex)
161 			return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
162 
163 		/* it looks like index is empty;
164 		 * try to find starting block from index itself */
165 		if (path[depth].p_bh)
166 			return path[depth].p_bh->b_blocknr;
167 	}
168 
169 	/* OK. use inode's group */
170 	bg_start = (ei->i_block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) +
171 		le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block);
172 	colour = (current->pid % 16) *
173 			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
174 	return bg_start + colour + block;
175 }
176 
177 static ext4_fsblk_t
178 ext4_ext_new_block(handle_t *handle, struct inode *inode,
179 			struct ext4_ext_path *path,
180 			struct ext4_extent *ex, int *err)
181 {
182 	ext4_fsblk_t goal, newblock;
183 
184 	goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
185 	newblock = ext4_new_block(handle, inode, goal, err);
186 	return newblock;
187 }
188 
189 static int ext4_ext_space_block(struct inode *inode)
190 {
191 	int size;
192 
193 	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
194 			/ sizeof(struct ext4_extent);
195 #ifdef AGGRESSIVE_TEST
196 	if (size > 6)
197 		size = 6;
198 #endif
199 	return size;
200 }
201 
202 static int ext4_ext_space_block_idx(struct inode *inode)
203 {
204 	int size;
205 
206 	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
207 			/ sizeof(struct ext4_extent_idx);
208 #ifdef AGGRESSIVE_TEST
209 	if (size > 5)
210 		size = 5;
211 #endif
212 	return size;
213 }
214 
215 static int ext4_ext_space_root(struct inode *inode)
216 {
217 	int size;
218 
219 	size = sizeof(EXT4_I(inode)->i_data);
220 	size -= sizeof(struct ext4_extent_header);
221 	size /= sizeof(struct ext4_extent);
222 #ifdef AGGRESSIVE_TEST
223 	if (size > 3)
224 		size = 3;
225 #endif
226 	return size;
227 }
228 
229 static int ext4_ext_space_root_idx(struct inode *inode)
230 {
231 	int size;
232 
233 	size = sizeof(EXT4_I(inode)->i_data);
234 	size -= sizeof(struct ext4_extent_header);
235 	size /= sizeof(struct ext4_extent_idx);
236 #ifdef AGGRESSIVE_TEST
237 	if (size > 4)
238 		size = 4;
239 #endif
240 	return size;
241 }
242 
243 static int
244 ext4_ext_max_entries(struct inode *inode, int depth)
245 {
246 	int max;
247 
248 	if (depth == ext_depth(inode)) {
249 		if (depth == 0)
250 			max = ext4_ext_space_root(inode);
251 		else
252 			max = ext4_ext_space_root_idx(inode);
253 	} else {
254 		if (depth == 0)
255 			max = ext4_ext_space_block(inode);
256 		else
257 			max = ext4_ext_space_block_idx(inode);
258 	}
259 
260 	return max;
261 }
262 
263 static int __ext4_ext_check_header(const char *function, struct inode *inode,
264 					struct ext4_extent_header *eh,
265 					int depth)
266 {
267 	const char *error_msg;
268 	int max = 0;
269 
270 	if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
271 		error_msg = "invalid magic";
272 		goto corrupted;
273 	}
274 	if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
275 		error_msg = "unexpected eh_depth";
276 		goto corrupted;
277 	}
278 	if (unlikely(eh->eh_max == 0)) {
279 		error_msg = "invalid eh_max";
280 		goto corrupted;
281 	}
282 	max = ext4_ext_max_entries(inode, depth);
283 	if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
284 		error_msg = "too large eh_max";
285 		goto corrupted;
286 	}
287 	if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
288 		error_msg = "invalid eh_entries";
289 		goto corrupted;
290 	}
291 	return 0;
292 
293 corrupted:
294 	ext4_error(inode->i_sb, function,
295 			"bad header in inode #%lu: %s - magic %x, "
296 			"entries %u, max %u(%u), depth %u(%u)",
297 			inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
298 			le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
299 			max, le16_to_cpu(eh->eh_depth), depth);
300 
301 	return -EIO;
302 }
303 
304 #define ext4_ext_check_header(inode, eh, depth)	\
305 	__ext4_ext_check_header(__FUNCTION__, inode, eh, depth)
306 
307 #ifdef EXT_DEBUG
308 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
309 {
310 	int k, l = path->p_depth;
311 
312 	ext_debug("path:");
313 	for (k = 0; k <= l; k++, path++) {
314 		if (path->p_idx) {
315 		  ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
316 			    idx_pblock(path->p_idx));
317 		} else if (path->p_ext) {
318 			ext_debug("  %d:%d:%llu ",
319 				  le32_to_cpu(path->p_ext->ee_block),
320 				  ext4_ext_get_actual_len(path->p_ext),
321 				  ext_pblock(path->p_ext));
322 		} else
323 			ext_debug("  []");
324 	}
325 	ext_debug("\n");
326 }
327 
328 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
329 {
330 	int depth = ext_depth(inode);
331 	struct ext4_extent_header *eh;
332 	struct ext4_extent *ex;
333 	int i;
334 
335 	if (!path)
336 		return;
337 
338 	eh = path[depth].p_hdr;
339 	ex = EXT_FIRST_EXTENT(eh);
340 
341 	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
342 		ext_debug("%d:%d:%llu ", le32_to_cpu(ex->ee_block),
343 			  ext4_ext_get_actual_len(ex), ext_pblock(ex));
344 	}
345 	ext_debug("\n");
346 }
347 #else
348 #define ext4_ext_show_path(inode,path)
349 #define ext4_ext_show_leaf(inode,path)
350 #endif
351 
352 static void ext4_ext_drop_refs(struct ext4_ext_path *path)
353 {
354 	int depth = path->p_depth;
355 	int i;
356 
357 	for (i = 0; i <= depth; i++, path++)
358 		if (path->p_bh) {
359 			brelse(path->p_bh);
360 			path->p_bh = NULL;
361 		}
362 }
363 
364 /*
365  * ext4_ext_binsearch_idx:
366  * binary search for the closest index of the given block
367  * the header must be checked before calling this
368  */
369 static void
370 ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int block)
371 {
372 	struct ext4_extent_header *eh = path->p_hdr;
373 	struct ext4_extent_idx *r, *l, *m;
374 
375 
376 	ext_debug("binsearch for %d(idx):  ", block);
377 
378 	l = EXT_FIRST_INDEX(eh) + 1;
379 	r = EXT_LAST_INDEX(eh);
380 	while (l <= r) {
381 		m = l + (r - l) / 2;
382 		if (block < le32_to_cpu(m->ei_block))
383 			r = m - 1;
384 		else
385 			l = m + 1;
386 		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
387 				m, le32_to_cpu(m->ei_block),
388 				r, le32_to_cpu(r->ei_block));
389 	}
390 
391 	path->p_idx = l - 1;
392 	ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
393 		  idx_pblock(path->p_idx));
394 
395 #ifdef CHECK_BINSEARCH
396 	{
397 		struct ext4_extent_idx *chix, *ix;
398 		int k;
399 
400 		chix = ix = EXT_FIRST_INDEX(eh);
401 		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
402 		  if (k != 0 &&
403 		      le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
404 				printk("k=%d, ix=0x%p, first=0x%p\n", k,
405 					ix, EXT_FIRST_INDEX(eh));
406 				printk("%u <= %u\n",
407 				       le32_to_cpu(ix->ei_block),
408 				       le32_to_cpu(ix[-1].ei_block));
409 			}
410 			BUG_ON(k && le32_to_cpu(ix->ei_block)
411 					   <= le32_to_cpu(ix[-1].ei_block));
412 			if (block < le32_to_cpu(ix->ei_block))
413 				break;
414 			chix = ix;
415 		}
416 		BUG_ON(chix != path->p_idx);
417 	}
418 #endif
419 
420 }
421 
422 /*
423  * ext4_ext_binsearch:
424  * binary search for closest extent of the given block
425  * the header must be checked before calling this
426  */
427 static void
428 ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
429 {
430 	struct ext4_extent_header *eh = path->p_hdr;
431 	struct ext4_extent *r, *l, *m;
432 
433 	if (eh->eh_entries == 0) {
434 		/*
435 		 * this leaf is empty:
436 		 * we get such a leaf in split/add case
437 		 */
438 		return;
439 	}
440 
441 	ext_debug("binsearch for %d:  ", block);
442 
443 	l = EXT_FIRST_EXTENT(eh) + 1;
444 	r = EXT_LAST_EXTENT(eh);
445 
446 	while (l <= r) {
447 		m = l + (r - l) / 2;
448 		if (block < le32_to_cpu(m->ee_block))
449 			r = m - 1;
450 		else
451 			l = m + 1;
452 		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
453 				m, le32_to_cpu(m->ee_block),
454 				r, le32_to_cpu(r->ee_block));
455 	}
456 
457 	path->p_ext = l - 1;
458 	ext_debug("  -> %d:%llu:%d ",
459 			le32_to_cpu(path->p_ext->ee_block),
460 			ext_pblock(path->p_ext),
461 			ext4_ext_get_actual_len(path->p_ext));
462 
463 #ifdef CHECK_BINSEARCH
464 	{
465 		struct ext4_extent *chex, *ex;
466 		int k;
467 
468 		chex = ex = EXT_FIRST_EXTENT(eh);
469 		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
470 			BUG_ON(k && le32_to_cpu(ex->ee_block)
471 					  <= le32_to_cpu(ex[-1].ee_block));
472 			if (block < le32_to_cpu(ex->ee_block))
473 				break;
474 			chex = ex;
475 		}
476 		BUG_ON(chex != path->p_ext);
477 	}
478 #endif
479 
480 }
481 
482 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
483 {
484 	struct ext4_extent_header *eh;
485 
486 	eh = ext_inode_hdr(inode);
487 	eh->eh_depth = 0;
488 	eh->eh_entries = 0;
489 	eh->eh_magic = EXT4_EXT_MAGIC;
490 	eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode));
491 	ext4_mark_inode_dirty(handle, inode);
492 	ext4_ext_invalidate_cache(inode);
493 	return 0;
494 }
495 
496 struct ext4_ext_path *
497 ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path)
498 {
499 	struct ext4_extent_header *eh;
500 	struct buffer_head *bh;
501 	short int depth, i, ppos = 0, alloc = 0;
502 
503 	eh = ext_inode_hdr(inode);
504 	depth = ext_depth(inode);
505 	if (ext4_ext_check_header(inode, eh, depth))
506 		return ERR_PTR(-EIO);
507 
508 
509 	/* account possible depth increase */
510 	if (!path) {
511 		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
512 				GFP_NOFS);
513 		if (!path)
514 			return ERR_PTR(-ENOMEM);
515 		alloc = 1;
516 	}
517 	path[0].p_hdr = eh;
518 
519 	i = depth;
520 	/* walk through the tree */
521 	while (i) {
522 		ext_debug("depth %d: num %d, max %d\n",
523 			  ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
524 
525 		ext4_ext_binsearch_idx(inode, path + ppos, block);
526 		path[ppos].p_block = idx_pblock(path[ppos].p_idx);
527 		path[ppos].p_depth = i;
528 		path[ppos].p_ext = NULL;
529 
530 		bh = sb_bread(inode->i_sb, path[ppos].p_block);
531 		if (!bh)
532 			goto err;
533 
534 		eh = ext_block_hdr(bh);
535 		ppos++;
536 		BUG_ON(ppos > depth);
537 		path[ppos].p_bh = bh;
538 		path[ppos].p_hdr = eh;
539 		i--;
540 
541 		if (ext4_ext_check_header(inode, eh, i))
542 			goto err;
543 	}
544 
545 	path[ppos].p_depth = i;
546 	path[ppos].p_hdr = eh;
547 	path[ppos].p_ext = NULL;
548 	path[ppos].p_idx = NULL;
549 
550 	/* find extent */
551 	ext4_ext_binsearch(inode, path + ppos, block);
552 
553 	ext4_ext_show_path(inode, path);
554 
555 	return path;
556 
557 err:
558 	ext4_ext_drop_refs(path);
559 	if (alloc)
560 		kfree(path);
561 	return ERR_PTR(-EIO);
562 }
563 
564 /*
565  * ext4_ext_insert_index:
566  * insert new index [@logical;@ptr] into the block at @curp;
567  * check where to insert: before @curp or after @curp
568  */
569 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
570 				struct ext4_ext_path *curp,
571 				int logical, ext4_fsblk_t ptr)
572 {
573 	struct ext4_extent_idx *ix;
574 	int len, err;
575 
576 	err = ext4_ext_get_access(handle, inode, curp);
577 	if (err)
578 		return err;
579 
580 	BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
581 	len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
582 	if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
583 		/* insert after */
584 		if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
585 			len = (len - 1) * sizeof(struct ext4_extent_idx);
586 			len = len < 0 ? 0 : len;
587 			ext_debug("insert new index %d after: %llu. "
588 					"move %d from 0x%p to 0x%p\n",
589 					logical, ptr, len,
590 					(curp->p_idx + 1), (curp->p_idx + 2));
591 			memmove(curp->p_idx + 2, curp->p_idx + 1, len);
592 		}
593 		ix = curp->p_idx + 1;
594 	} else {
595 		/* insert before */
596 		len = len * sizeof(struct ext4_extent_idx);
597 		len = len < 0 ? 0 : len;
598 		ext_debug("insert new index %d before: %llu. "
599 				"move %d from 0x%p to 0x%p\n",
600 				logical, ptr, len,
601 				curp->p_idx, (curp->p_idx + 1));
602 		memmove(curp->p_idx + 1, curp->p_idx, len);
603 		ix = curp->p_idx;
604 	}
605 
606 	ix->ei_block = cpu_to_le32(logical);
607 	ext4_idx_store_pblock(ix, ptr);
608 	curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1);
609 
610 	BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
611 			     > le16_to_cpu(curp->p_hdr->eh_max));
612 	BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
613 
614 	err = ext4_ext_dirty(handle, inode, curp);
615 	ext4_std_error(inode->i_sb, err);
616 
617 	return err;
618 }
619 
620 /*
621  * ext4_ext_split:
622  * inserts new subtree into the path, using free index entry
623  * at depth @at:
624  * - allocates all needed blocks (new leaf and all intermediate index blocks)
625  * - makes decision where to split
626  * - moves remaining extents and index entries (right to the split point)
627  *   into the newly allocated blocks
628  * - initializes subtree
629  */
630 static int ext4_ext_split(handle_t *handle, struct inode *inode,
631 				struct ext4_ext_path *path,
632 				struct ext4_extent *newext, int at)
633 {
634 	struct buffer_head *bh = NULL;
635 	int depth = ext_depth(inode);
636 	struct ext4_extent_header *neh;
637 	struct ext4_extent_idx *fidx;
638 	struct ext4_extent *ex;
639 	int i = at, k, m, a;
640 	ext4_fsblk_t newblock, oldblock;
641 	__le32 border;
642 	ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
643 	int err = 0;
644 
645 	/* make decision: where to split? */
646 	/* FIXME: now decision is simplest: at current extent */
647 
648 	/* if current leaf will be split, then we should use
649 	 * border from split point */
650 	BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));
651 	if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
652 		border = path[depth].p_ext[1].ee_block;
653 		ext_debug("leaf will be split."
654 				" next leaf starts at %d\n",
655 				  le32_to_cpu(border));
656 	} else {
657 		border = newext->ee_block;
658 		ext_debug("leaf will be added."
659 				" next leaf starts at %d\n",
660 				le32_to_cpu(border));
661 	}
662 
663 	/*
664 	 * If error occurs, then we break processing
665 	 * and mark filesystem read-only. index won't
666 	 * be inserted and tree will be in consistent
667 	 * state. Next mount will repair buffers too.
668 	 */
669 
670 	/*
671 	 * Get array to track all allocated blocks.
672 	 * We need this to handle errors and free blocks
673 	 * upon them.
674 	 */
675 	ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
676 	if (!ablocks)
677 		return -ENOMEM;
678 
679 	/* allocate all needed blocks */
680 	ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
681 	for (a = 0; a < depth - at; a++) {
682 		newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
683 		if (newblock == 0)
684 			goto cleanup;
685 		ablocks[a] = newblock;
686 	}
687 
688 	/* initialize new leaf */
689 	newblock = ablocks[--a];
690 	BUG_ON(newblock == 0);
691 	bh = sb_getblk(inode->i_sb, newblock);
692 	if (!bh) {
693 		err = -EIO;
694 		goto cleanup;
695 	}
696 	lock_buffer(bh);
697 
698 	err = ext4_journal_get_create_access(handle, bh);
699 	if (err)
700 		goto cleanup;
701 
702 	neh = ext_block_hdr(bh);
703 	neh->eh_entries = 0;
704 	neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
705 	neh->eh_magic = EXT4_EXT_MAGIC;
706 	neh->eh_depth = 0;
707 	ex = EXT_FIRST_EXTENT(neh);
708 
709 	/* move remainder of path[depth] to the new leaf */
710 	BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);
711 	/* start copy from next extent */
712 	/* TODO: we could do it by single memmove */
713 	m = 0;
714 	path[depth].p_ext++;
715 	while (path[depth].p_ext <=
716 			EXT_MAX_EXTENT(path[depth].p_hdr)) {
717 		ext_debug("move %d:%llu:%d in new leaf %llu\n",
718 				le32_to_cpu(path[depth].p_ext->ee_block),
719 				ext_pblock(path[depth].p_ext),
720 				ext4_ext_get_actual_len(path[depth].p_ext),
721 				newblock);
722 		/*memmove(ex++, path[depth].p_ext++,
723 				sizeof(struct ext4_extent));
724 		neh->eh_entries++;*/
725 		path[depth].p_ext++;
726 		m++;
727 	}
728 	if (m) {
729 		memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
730 		neh->eh_entries = cpu_to_le16(le16_to_cpu(neh->eh_entries)+m);
731 	}
732 
733 	set_buffer_uptodate(bh);
734 	unlock_buffer(bh);
735 
736 	err = ext4_journal_dirty_metadata(handle, bh);
737 	if (err)
738 		goto cleanup;
739 	brelse(bh);
740 	bh = NULL;
741 
742 	/* correct old leaf */
743 	if (m) {
744 		err = ext4_ext_get_access(handle, inode, path + depth);
745 		if (err)
746 			goto cleanup;
747 		path[depth].p_hdr->eh_entries =
748 		     cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);
749 		err = ext4_ext_dirty(handle, inode, path + depth);
750 		if (err)
751 			goto cleanup;
752 
753 	}
754 
755 	/* create intermediate indexes */
756 	k = depth - at - 1;
757 	BUG_ON(k < 0);
758 	if (k)
759 		ext_debug("create %d intermediate indices\n", k);
760 	/* insert new index into current index block */
761 	/* current depth stored in i var */
762 	i = depth - 1;
763 	while (k--) {
764 		oldblock = newblock;
765 		newblock = ablocks[--a];
766 		bh = sb_getblk(inode->i_sb, (ext4_fsblk_t)newblock);
767 		if (!bh) {
768 			err = -EIO;
769 			goto cleanup;
770 		}
771 		lock_buffer(bh);
772 
773 		err = ext4_journal_get_create_access(handle, bh);
774 		if (err)
775 			goto cleanup;
776 
777 		neh = ext_block_hdr(bh);
778 		neh->eh_entries = cpu_to_le16(1);
779 		neh->eh_magic = EXT4_EXT_MAGIC;
780 		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
781 		neh->eh_depth = cpu_to_le16(depth - i);
782 		fidx = EXT_FIRST_INDEX(neh);
783 		fidx->ei_block = border;
784 		ext4_idx_store_pblock(fidx, oldblock);
785 
786 		ext_debug("int.index at %d (block %llu): %lu -> %llu\n", i,
787 				newblock, (unsigned long) le32_to_cpu(border),
788 				oldblock);
789 		/* copy indexes */
790 		m = 0;
791 		path[i].p_idx++;
792 
793 		ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
794 				EXT_MAX_INDEX(path[i].p_hdr));
795 		BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
796 				EXT_LAST_INDEX(path[i].p_hdr));
797 		while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
798 			ext_debug("%d: move %d:%llu in new index %llu\n", i,
799 					le32_to_cpu(path[i].p_idx->ei_block),
800 					idx_pblock(path[i].p_idx),
801 					newblock);
802 			/*memmove(++fidx, path[i].p_idx++,
803 					sizeof(struct ext4_extent_idx));
804 			neh->eh_entries++;
805 			BUG_ON(neh->eh_entries > neh->eh_max);*/
806 			path[i].p_idx++;
807 			m++;
808 		}
809 		if (m) {
810 			memmove(++fidx, path[i].p_idx - m,
811 				sizeof(struct ext4_extent_idx) * m);
812 			neh->eh_entries =
813 				cpu_to_le16(le16_to_cpu(neh->eh_entries) + m);
814 		}
815 		set_buffer_uptodate(bh);
816 		unlock_buffer(bh);
817 
818 		err = ext4_journal_dirty_metadata(handle, bh);
819 		if (err)
820 			goto cleanup;
821 		brelse(bh);
822 		bh = NULL;
823 
824 		/* correct old index */
825 		if (m) {
826 			err = ext4_ext_get_access(handle, inode, path + i);
827 			if (err)
828 				goto cleanup;
829 			path[i].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[i].p_hdr->eh_entries)-m);
830 			err = ext4_ext_dirty(handle, inode, path + i);
831 			if (err)
832 				goto cleanup;
833 		}
834 
835 		i--;
836 	}
837 
838 	/* insert new index */
839 	err = ext4_ext_insert_index(handle, inode, path + at,
840 				    le32_to_cpu(border), newblock);
841 
842 cleanup:
843 	if (bh) {
844 		if (buffer_locked(bh))
845 			unlock_buffer(bh);
846 		brelse(bh);
847 	}
848 
849 	if (err) {
850 		/* free all allocated blocks in error case */
851 		for (i = 0; i < depth; i++) {
852 			if (!ablocks[i])
853 				continue;
854 			ext4_free_blocks(handle, inode, ablocks[i], 1);
855 		}
856 	}
857 	kfree(ablocks);
858 
859 	return err;
860 }
861 
862 /*
863  * ext4_ext_grow_indepth:
864  * implements tree growing procedure:
865  * - allocates new block
866  * - moves top-level data (index block or leaf) into the new block
867  * - initializes new top-level, creating index that points to the
868  *   just created block
869  */
870 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
871 					struct ext4_ext_path *path,
872 					struct ext4_extent *newext)
873 {
874 	struct ext4_ext_path *curp = path;
875 	struct ext4_extent_header *neh;
876 	struct ext4_extent_idx *fidx;
877 	struct buffer_head *bh;
878 	ext4_fsblk_t newblock;
879 	int err = 0;
880 
881 	newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
882 	if (newblock == 0)
883 		return err;
884 
885 	bh = sb_getblk(inode->i_sb, newblock);
886 	if (!bh) {
887 		err = -EIO;
888 		ext4_std_error(inode->i_sb, err);
889 		return err;
890 	}
891 	lock_buffer(bh);
892 
893 	err = ext4_journal_get_create_access(handle, bh);
894 	if (err) {
895 		unlock_buffer(bh);
896 		goto out;
897 	}
898 
899 	/* move top-level index/leaf into new block */
900 	memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
901 
902 	/* set size of new block */
903 	neh = ext_block_hdr(bh);
904 	/* old root could have indexes or leaves
905 	 * so calculate e_max right way */
906 	if (ext_depth(inode))
907 	  neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
908 	else
909 	  neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
910 	neh->eh_magic = EXT4_EXT_MAGIC;
911 	set_buffer_uptodate(bh);
912 	unlock_buffer(bh);
913 
914 	err = ext4_journal_dirty_metadata(handle, bh);
915 	if (err)
916 		goto out;
917 
918 	/* create index in new top-level index: num,max,pointer */
919 	err = ext4_ext_get_access(handle, inode, curp);
920 	if (err)
921 		goto out;
922 
923 	curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
924 	curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode));
925 	curp->p_hdr->eh_entries = cpu_to_le16(1);
926 	curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
927 
928 	if (path[0].p_hdr->eh_depth)
929 		curp->p_idx->ei_block =
930 			EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
931 	else
932 		curp->p_idx->ei_block =
933 			EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
934 	ext4_idx_store_pblock(curp->p_idx, newblock);
935 
936 	neh = ext_inode_hdr(inode);
937 	fidx = EXT_FIRST_INDEX(neh);
938 	ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
939 		  le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
940 		  le32_to_cpu(fidx->ei_block), idx_pblock(fidx));
941 
942 	neh->eh_depth = cpu_to_le16(path->p_depth + 1);
943 	err = ext4_ext_dirty(handle, inode, curp);
944 out:
945 	brelse(bh);
946 
947 	return err;
948 }
949 
950 /*
951  * ext4_ext_create_new_leaf:
952  * finds empty index and adds new leaf.
953  * if no free index is found, then it requests in-depth growing.
954  */
955 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
956 					struct ext4_ext_path *path,
957 					struct ext4_extent *newext)
958 {
959 	struct ext4_ext_path *curp;
960 	int depth, i, err = 0;
961 
962 repeat:
963 	i = depth = ext_depth(inode);
964 
965 	/* walk up to the tree and look for free index entry */
966 	curp = path + depth;
967 	while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
968 		i--;
969 		curp--;
970 	}
971 
972 	/* we use already allocated block for index block,
973 	 * so subsequent data blocks should be contiguous */
974 	if (EXT_HAS_FREE_INDEX(curp)) {
975 		/* if we found index with free entry, then use that
976 		 * entry: create all needed subtree and add new leaf */
977 		err = ext4_ext_split(handle, inode, path, newext, i);
978 
979 		/* refill path */
980 		ext4_ext_drop_refs(path);
981 		path = ext4_ext_find_extent(inode,
982 					    le32_to_cpu(newext->ee_block),
983 					    path);
984 		if (IS_ERR(path))
985 			err = PTR_ERR(path);
986 	} else {
987 		/* tree is full, time to grow in depth */
988 		err = ext4_ext_grow_indepth(handle, inode, path, newext);
989 		if (err)
990 			goto out;
991 
992 		/* refill path */
993 		ext4_ext_drop_refs(path);
994 		path = ext4_ext_find_extent(inode,
995 					    le32_to_cpu(newext->ee_block),
996 					    path);
997 		if (IS_ERR(path)) {
998 			err = PTR_ERR(path);
999 			goto out;
1000 		}
1001 
1002 		/*
1003 		 * only first (depth 0 -> 1) produces free space;
1004 		 * in all other cases we have to split the grown tree
1005 		 */
1006 		depth = ext_depth(inode);
1007 		if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1008 			/* now we need to split */
1009 			goto repeat;
1010 		}
1011 	}
1012 
1013 out:
1014 	return err;
1015 }
1016 
1017 /*
1018  * ext4_ext_next_allocated_block:
1019  * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
1020  * NOTE: it considers block number from index entry as
1021  * allocated block. Thus, index entries have to be consistent
1022  * with leaves.
1023  */
1024 static unsigned long
1025 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1026 {
1027 	int depth;
1028 
1029 	BUG_ON(path == NULL);
1030 	depth = path->p_depth;
1031 
1032 	if (depth == 0 && path->p_ext == NULL)
1033 		return EXT_MAX_BLOCK;
1034 
1035 	while (depth >= 0) {
1036 		if (depth == path->p_depth) {
1037 			/* leaf */
1038 			if (path[depth].p_ext !=
1039 					EXT_LAST_EXTENT(path[depth].p_hdr))
1040 			  return le32_to_cpu(path[depth].p_ext[1].ee_block);
1041 		} else {
1042 			/* index */
1043 			if (path[depth].p_idx !=
1044 					EXT_LAST_INDEX(path[depth].p_hdr))
1045 			  return le32_to_cpu(path[depth].p_idx[1].ei_block);
1046 		}
1047 		depth--;
1048 	}
1049 
1050 	return EXT_MAX_BLOCK;
1051 }
1052 
1053 /*
1054  * ext4_ext_next_leaf_block:
1055  * returns first allocated block from next leaf or EXT_MAX_BLOCK
1056  */
1057 static unsigned ext4_ext_next_leaf_block(struct inode *inode,
1058 					struct ext4_ext_path *path)
1059 {
1060 	int depth;
1061 
1062 	BUG_ON(path == NULL);
1063 	depth = path->p_depth;
1064 
1065 	/* zero-tree has no leaf blocks at all */
1066 	if (depth == 0)
1067 		return EXT_MAX_BLOCK;
1068 
1069 	/* go to index block */
1070 	depth--;
1071 
1072 	while (depth >= 0) {
1073 		if (path[depth].p_idx !=
1074 				EXT_LAST_INDEX(path[depth].p_hdr))
1075 		  return le32_to_cpu(path[depth].p_idx[1].ei_block);
1076 		depth--;
1077 	}
1078 
1079 	return EXT_MAX_BLOCK;
1080 }
1081 
1082 /*
1083  * ext4_ext_correct_indexes:
1084  * if leaf gets modified and modified extent is first in the leaf,
1085  * then we have to correct all indexes above.
1086  * TODO: do we need to correct tree in all cases?
1087  */
1088 int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1089 				struct ext4_ext_path *path)
1090 {
1091 	struct ext4_extent_header *eh;
1092 	int depth = ext_depth(inode);
1093 	struct ext4_extent *ex;
1094 	__le32 border;
1095 	int k, err = 0;
1096 
1097 	eh = path[depth].p_hdr;
1098 	ex = path[depth].p_ext;
1099 	BUG_ON(ex == NULL);
1100 	BUG_ON(eh == NULL);
1101 
1102 	if (depth == 0) {
1103 		/* there is no tree at all */
1104 		return 0;
1105 	}
1106 
1107 	if (ex != EXT_FIRST_EXTENT(eh)) {
1108 		/* we correct tree if first leaf got modified only */
1109 		return 0;
1110 	}
1111 
1112 	/*
1113 	 * TODO: we need correction if border is smaller than current one
1114 	 */
1115 	k = depth - 1;
1116 	border = path[depth].p_ext->ee_block;
1117 	err = ext4_ext_get_access(handle, inode, path + k);
1118 	if (err)
1119 		return err;
1120 	path[k].p_idx->ei_block = border;
1121 	err = ext4_ext_dirty(handle, inode, path + k);
1122 	if (err)
1123 		return err;
1124 
1125 	while (k--) {
1126 		/* change all left-side indexes */
1127 		if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1128 			break;
1129 		err = ext4_ext_get_access(handle, inode, path + k);
1130 		if (err)
1131 			break;
1132 		path[k].p_idx->ei_block = border;
1133 		err = ext4_ext_dirty(handle, inode, path + k);
1134 		if (err)
1135 			break;
1136 	}
1137 
1138 	return err;
1139 }
1140 
1141 static int
1142 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1143 				struct ext4_extent *ex2)
1144 {
1145 	unsigned short ext1_ee_len, ext2_ee_len, max_len;
1146 
1147 	/*
1148 	 * Make sure that either both extents are uninitialized, or
1149 	 * both are _not_.
1150 	 */
1151 	if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1152 		return 0;
1153 
1154 	if (ext4_ext_is_uninitialized(ex1))
1155 		max_len = EXT_UNINIT_MAX_LEN;
1156 	else
1157 		max_len = EXT_INIT_MAX_LEN;
1158 
1159 	ext1_ee_len = ext4_ext_get_actual_len(ex1);
1160 	ext2_ee_len = ext4_ext_get_actual_len(ex2);
1161 
1162 	if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1163 			le32_to_cpu(ex2->ee_block))
1164 		return 0;
1165 
1166 	/*
1167 	 * To allow future support for preallocated extents to be added
1168 	 * as an RO_COMPAT feature, refuse to merge to extents if
1169 	 * this can result in the top bit of ee_len being set.
1170 	 */
1171 	if (ext1_ee_len + ext2_ee_len > max_len)
1172 		return 0;
1173 #ifdef AGGRESSIVE_TEST
1174 	if (le16_to_cpu(ex1->ee_len) >= 4)
1175 		return 0;
1176 #endif
1177 
1178 	if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2))
1179 		return 1;
1180 	return 0;
1181 }
1182 
1183 /*
1184  * This function tries to merge the "ex" extent to the next extent in the tree.
1185  * It always tries to merge towards right. If you want to merge towards
1186  * left, pass "ex - 1" as argument instead of "ex".
1187  * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1188  * 1 if they got merged.
1189  */
1190 int ext4_ext_try_to_merge(struct inode *inode,
1191 			  struct ext4_ext_path *path,
1192 			  struct ext4_extent *ex)
1193 {
1194 	struct ext4_extent_header *eh;
1195 	unsigned int depth, len;
1196 	int merge_done = 0;
1197 	int uninitialized = 0;
1198 
1199 	depth = ext_depth(inode);
1200 	BUG_ON(path[depth].p_hdr == NULL);
1201 	eh = path[depth].p_hdr;
1202 
1203 	while (ex < EXT_LAST_EXTENT(eh)) {
1204 		if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1205 			break;
1206 		/* merge with next extent! */
1207 		if (ext4_ext_is_uninitialized(ex))
1208 			uninitialized = 1;
1209 		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1210 				+ ext4_ext_get_actual_len(ex + 1));
1211 		if (uninitialized)
1212 			ext4_ext_mark_uninitialized(ex);
1213 
1214 		if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1215 			len = (EXT_LAST_EXTENT(eh) - ex - 1)
1216 				* sizeof(struct ext4_extent);
1217 			memmove(ex + 1, ex + 2, len);
1218 		}
1219 		eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries) - 1);
1220 		merge_done = 1;
1221 		WARN_ON(eh->eh_entries == 0);
1222 		if (!eh->eh_entries)
1223 			ext4_error(inode->i_sb, "ext4_ext_try_to_merge",
1224 			   "inode#%lu, eh->eh_entries = 0!", inode->i_ino);
1225 	}
1226 
1227 	return merge_done;
1228 }
1229 
1230 /*
1231  * check if a portion of the "newext" extent overlaps with an
1232  * existing extent.
1233  *
1234  * If there is an overlap discovered, it updates the length of the newext
1235  * such that there will be no overlap, and then returns 1.
1236  * If there is no overlap found, it returns 0.
1237  */
1238 unsigned int ext4_ext_check_overlap(struct inode *inode,
1239 				    struct ext4_extent *newext,
1240 				    struct ext4_ext_path *path)
1241 {
1242 	unsigned long b1, b2;
1243 	unsigned int depth, len1;
1244 	unsigned int ret = 0;
1245 
1246 	b1 = le32_to_cpu(newext->ee_block);
1247 	len1 = ext4_ext_get_actual_len(newext);
1248 	depth = ext_depth(inode);
1249 	if (!path[depth].p_ext)
1250 		goto out;
1251 	b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1252 
1253 	/*
1254 	 * get the next allocated block if the extent in the path
1255 	 * is before the requested block(s)
1256 	 */
1257 	if (b2 < b1) {
1258 		b2 = ext4_ext_next_allocated_block(path);
1259 		if (b2 == EXT_MAX_BLOCK)
1260 			goto out;
1261 	}
1262 
1263 	/* check for wrap through zero */
1264 	if (b1 + len1 < b1) {
1265 		len1 = EXT_MAX_BLOCK - b1;
1266 		newext->ee_len = cpu_to_le16(len1);
1267 		ret = 1;
1268 	}
1269 
1270 	/* check for overlap */
1271 	if (b1 + len1 > b2) {
1272 		newext->ee_len = cpu_to_le16(b2 - b1);
1273 		ret = 1;
1274 	}
1275 out:
1276 	return ret;
1277 }
1278 
1279 /*
1280  * ext4_ext_insert_extent:
1281  * tries to merge requsted extent into the existing extent or
1282  * inserts requested extent as new one into the tree,
1283  * creating new leaf in the no-space case.
1284  */
1285 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1286 				struct ext4_ext_path *path,
1287 				struct ext4_extent *newext)
1288 {
1289 	struct ext4_extent_header * eh;
1290 	struct ext4_extent *ex, *fex;
1291 	struct ext4_extent *nearex; /* nearest extent */
1292 	struct ext4_ext_path *npath = NULL;
1293 	int depth, len, err, next;
1294 	unsigned uninitialized = 0;
1295 
1296 	BUG_ON(ext4_ext_get_actual_len(newext) == 0);
1297 	depth = ext_depth(inode);
1298 	ex = path[depth].p_ext;
1299 	BUG_ON(path[depth].p_hdr == NULL);
1300 
1301 	/* try to insert block into found extent and return */
1302 	if (ex && ext4_can_extents_be_merged(inode, ex, newext)) {
1303 		ext_debug("append %d block to %d:%d (from %llu)\n",
1304 				ext4_ext_get_actual_len(newext),
1305 				le32_to_cpu(ex->ee_block),
1306 				ext4_ext_get_actual_len(ex), ext_pblock(ex));
1307 		err = ext4_ext_get_access(handle, inode, path + depth);
1308 		if (err)
1309 			return err;
1310 
1311 		/*
1312 		 * ext4_can_extents_be_merged should have checked that either
1313 		 * both extents are uninitialized, or both aren't. Thus we
1314 		 * need to check only one of them here.
1315 		 */
1316 		if (ext4_ext_is_uninitialized(ex))
1317 			uninitialized = 1;
1318 		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1319 					+ ext4_ext_get_actual_len(newext));
1320 		if (uninitialized)
1321 			ext4_ext_mark_uninitialized(ex);
1322 		eh = path[depth].p_hdr;
1323 		nearex = ex;
1324 		goto merge;
1325 	}
1326 
1327 repeat:
1328 	depth = ext_depth(inode);
1329 	eh = path[depth].p_hdr;
1330 	if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1331 		goto has_space;
1332 
1333 	/* probably next leaf has space for us? */
1334 	fex = EXT_LAST_EXTENT(eh);
1335 	next = ext4_ext_next_leaf_block(inode, path);
1336 	if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
1337 	    && next != EXT_MAX_BLOCK) {
1338 		ext_debug("next leaf block - %d\n", next);
1339 		BUG_ON(npath != NULL);
1340 		npath = ext4_ext_find_extent(inode, next, NULL);
1341 		if (IS_ERR(npath))
1342 			return PTR_ERR(npath);
1343 		BUG_ON(npath->p_depth != path->p_depth);
1344 		eh = npath[depth].p_hdr;
1345 		if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1346 			ext_debug("next leaf isnt full(%d)\n",
1347 				  le16_to_cpu(eh->eh_entries));
1348 			path = npath;
1349 			goto repeat;
1350 		}
1351 		ext_debug("next leaf has no free space(%d,%d)\n",
1352 			  le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1353 	}
1354 
1355 	/*
1356 	 * There is no free space in the found leaf.
1357 	 * We're gonna add a new leaf in the tree.
1358 	 */
1359 	err = ext4_ext_create_new_leaf(handle, inode, path, newext);
1360 	if (err)
1361 		goto cleanup;
1362 	depth = ext_depth(inode);
1363 	eh = path[depth].p_hdr;
1364 
1365 has_space:
1366 	nearex = path[depth].p_ext;
1367 
1368 	err = ext4_ext_get_access(handle, inode, path + depth);
1369 	if (err)
1370 		goto cleanup;
1371 
1372 	if (!nearex) {
1373 		/* there is no extent in this leaf, create first one */
1374 		ext_debug("first extent in the leaf: %d:%llu:%d\n",
1375 				le32_to_cpu(newext->ee_block),
1376 				ext_pblock(newext),
1377 				ext4_ext_get_actual_len(newext));
1378 		path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1379 	} else if (le32_to_cpu(newext->ee_block)
1380 			   > le32_to_cpu(nearex->ee_block)) {
1381 /*		BUG_ON(newext->ee_block == nearex->ee_block); */
1382 		if (nearex != EXT_LAST_EXTENT(eh)) {
1383 			len = EXT_MAX_EXTENT(eh) - nearex;
1384 			len = (len - 1) * sizeof(struct ext4_extent);
1385 			len = len < 0 ? 0 : len;
1386 			ext_debug("insert %d:%llu:%d after: nearest 0x%p, "
1387 					"move %d from 0x%p to 0x%p\n",
1388 					le32_to_cpu(newext->ee_block),
1389 					ext_pblock(newext),
1390 					ext4_ext_get_actual_len(newext),
1391 					nearex, len, nearex + 1, nearex + 2);
1392 			memmove(nearex + 2, nearex + 1, len);
1393 		}
1394 		path[depth].p_ext = nearex + 1;
1395 	} else {
1396 		BUG_ON(newext->ee_block == nearex->ee_block);
1397 		len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1398 		len = len < 0 ? 0 : len;
1399 		ext_debug("insert %d:%llu:%d before: nearest 0x%p, "
1400 				"move %d from 0x%p to 0x%p\n",
1401 				le32_to_cpu(newext->ee_block),
1402 				ext_pblock(newext),
1403 				ext4_ext_get_actual_len(newext),
1404 				nearex, len, nearex + 1, nearex + 2);
1405 		memmove(nearex + 1, nearex, len);
1406 		path[depth].p_ext = nearex;
1407 	}
1408 
1409 	eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)+1);
1410 	nearex = path[depth].p_ext;
1411 	nearex->ee_block = newext->ee_block;
1412 	ext4_ext_store_pblock(nearex, ext_pblock(newext));
1413 	nearex->ee_len = newext->ee_len;
1414 
1415 merge:
1416 	/* try to merge extents to the right */
1417 	ext4_ext_try_to_merge(inode, path, nearex);
1418 
1419 	/* try to merge extents to the left */
1420 
1421 	/* time to correct all indexes above */
1422 	err = ext4_ext_correct_indexes(handle, inode, path);
1423 	if (err)
1424 		goto cleanup;
1425 
1426 	err = ext4_ext_dirty(handle, inode, path + depth);
1427 
1428 cleanup:
1429 	if (npath) {
1430 		ext4_ext_drop_refs(npath);
1431 		kfree(npath);
1432 	}
1433 	ext4_ext_tree_changed(inode);
1434 	ext4_ext_invalidate_cache(inode);
1435 	return err;
1436 }
1437 
1438 int ext4_ext_walk_space(struct inode *inode, unsigned long block,
1439 			unsigned long num, ext_prepare_callback func,
1440 			void *cbdata)
1441 {
1442 	struct ext4_ext_path *path = NULL;
1443 	struct ext4_ext_cache cbex;
1444 	struct ext4_extent *ex;
1445 	unsigned long next, start = 0, end = 0;
1446 	unsigned long last = block + num;
1447 	int depth, exists, err = 0;
1448 
1449 	BUG_ON(func == NULL);
1450 	BUG_ON(inode == NULL);
1451 
1452 	while (block < last && block != EXT_MAX_BLOCK) {
1453 		num = last - block;
1454 		/* find extent for this block */
1455 		path = ext4_ext_find_extent(inode, block, path);
1456 		if (IS_ERR(path)) {
1457 			err = PTR_ERR(path);
1458 			path = NULL;
1459 			break;
1460 		}
1461 
1462 		depth = ext_depth(inode);
1463 		BUG_ON(path[depth].p_hdr == NULL);
1464 		ex = path[depth].p_ext;
1465 		next = ext4_ext_next_allocated_block(path);
1466 
1467 		exists = 0;
1468 		if (!ex) {
1469 			/* there is no extent yet, so try to allocate
1470 			 * all requested space */
1471 			start = block;
1472 			end = block + num;
1473 		} else if (le32_to_cpu(ex->ee_block) > block) {
1474 			/* need to allocate space before found extent */
1475 			start = block;
1476 			end = le32_to_cpu(ex->ee_block);
1477 			if (block + num < end)
1478 				end = block + num;
1479 		} else if (block >= le32_to_cpu(ex->ee_block)
1480 					+ ext4_ext_get_actual_len(ex)) {
1481 			/* need to allocate space after found extent */
1482 			start = block;
1483 			end = block + num;
1484 			if (end >= next)
1485 				end = next;
1486 		} else if (block >= le32_to_cpu(ex->ee_block)) {
1487 			/*
1488 			 * some part of requested space is covered
1489 			 * by found extent
1490 			 */
1491 			start = block;
1492 			end = le32_to_cpu(ex->ee_block)
1493 				+ ext4_ext_get_actual_len(ex);
1494 			if (block + num < end)
1495 				end = block + num;
1496 			exists = 1;
1497 		} else {
1498 			BUG();
1499 		}
1500 		BUG_ON(end <= start);
1501 
1502 		if (!exists) {
1503 			cbex.ec_block = start;
1504 			cbex.ec_len = end - start;
1505 			cbex.ec_start = 0;
1506 			cbex.ec_type = EXT4_EXT_CACHE_GAP;
1507 		} else {
1508 			cbex.ec_block = le32_to_cpu(ex->ee_block);
1509 			cbex.ec_len = ext4_ext_get_actual_len(ex);
1510 			cbex.ec_start = ext_pblock(ex);
1511 			cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
1512 		}
1513 
1514 		BUG_ON(cbex.ec_len == 0);
1515 		err = func(inode, path, &cbex, cbdata);
1516 		ext4_ext_drop_refs(path);
1517 
1518 		if (err < 0)
1519 			break;
1520 		if (err == EXT_REPEAT)
1521 			continue;
1522 		else if (err == EXT_BREAK) {
1523 			err = 0;
1524 			break;
1525 		}
1526 
1527 		if (ext_depth(inode) != depth) {
1528 			/* depth was changed. we have to realloc path */
1529 			kfree(path);
1530 			path = NULL;
1531 		}
1532 
1533 		block = cbex.ec_block + cbex.ec_len;
1534 	}
1535 
1536 	if (path) {
1537 		ext4_ext_drop_refs(path);
1538 		kfree(path);
1539 	}
1540 
1541 	return err;
1542 }
1543 
1544 static void
1545 ext4_ext_put_in_cache(struct inode *inode, __u32 block,
1546 			__u32 len, ext4_fsblk_t start, int type)
1547 {
1548 	struct ext4_ext_cache *cex;
1549 	BUG_ON(len == 0);
1550 	cex = &EXT4_I(inode)->i_cached_extent;
1551 	cex->ec_type = type;
1552 	cex->ec_block = block;
1553 	cex->ec_len = len;
1554 	cex->ec_start = start;
1555 }
1556 
1557 /*
1558  * ext4_ext_put_gap_in_cache:
1559  * calculate boundaries of the gap that the requested block fits into
1560  * and cache this gap
1561  */
1562 static void
1563 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1564 				unsigned long block)
1565 {
1566 	int depth = ext_depth(inode);
1567 	unsigned long lblock, len;
1568 	struct ext4_extent *ex;
1569 
1570 	ex = path[depth].p_ext;
1571 	if (ex == NULL) {
1572 		/* there is no extent yet, so gap is [0;-] */
1573 		lblock = 0;
1574 		len = EXT_MAX_BLOCK;
1575 		ext_debug("cache gap(whole file):");
1576 	} else if (block < le32_to_cpu(ex->ee_block)) {
1577 		lblock = block;
1578 		len = le32_to_cpu(ex->ee_block) - block;
1579 		ext_debug("cache gap(before): %lu [%lu:%lu]",
1580 				(unsigned long) block,
1581 				(unsigned long) le32_to_cpu(ex->ee_block),
1582 				(unsigned long) ext4_ext_get_actual_len(ex));
1583 	} else if (block >= le32_to_cpu(ex->ee_block)
1584 			+ ext4_ext_get_actual_len(ex)) {
1585 		lblock = le32_to_cpu(ex->ee_block)
1586 			+ ext4_ext_get_actual_len(ex);
1587 		len = ext4_ext_next_allocated_block(path);
1588 		ext_debug("cache gap(after): [%lu:%lu] %lu",
1589 				(unsigned long) le32_to_cpu(ex->ee_block),
1590 				(unsigned long) ext4_ext_get_actual_len(ex),
1591 				(unsigned long) block);
1592 		BUG_ON(len == lblock);
1593 		len = len - lblock;
1594 	} else {
1595 		lblock = len = 0;
1596 		BUG();
1597 	}
1598 
1599 	ext_debug(" -> %lu:%lu\n", (unsigned long) lblock, len);
1600 	ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
1601 }
1602 
1603 static int
1604 ext4_ext_in_cache(struct inode *inode, unsigned long block,
1605 			struct ext4_extent *ex)
1606 {
1607 	struct ext4_ext_cache *cex;
1608 
1609 	cex = &EXT4_I(inode)->i_cached_extent;
1610 
1611 	/* has cache valid data? */
1612 	if (cex->ec_type == EXT4_EXT_CACHE_NO)
1613 		return EXT4_EXT_CACHE_NO;
1614 
1615 	BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
1616 			cex->ec_type != EXT4_EXT_CACHE_EXTENT);
1617 	if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
1618 		ex->ee_block = cpu_to_le32(cex->ec_block);
1619 		ext4_ext_store_pblock(ex, cex->ec_start);
1620 		ex->ee_len = cpu_to_le16(cex->ec_len);
1621 		ext_debug("%lu cached by %lu:%lu:%llu\n",
1622 				(unsigned long) block,
1623 				(unsigned long) cex->ec_block,
1624 				(unsigned long) cex->ec_len,
1625 				cex->ec_start);
1626 		return cex->ec_type;
1627 	}
1628 
1629 	/* not in cache */
1630 	return EXT4_EXT_CACHE_NO;
1631 }
1632 
1633 /*
1634  * ext4_ext_rm_idx:
1635  * removes index from the index block.
1636  * It's used in truncate case only, thus all requests are for
1637  * last index in the block only.
1638  */
1639 int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1640 			struct ext4_ext_path *path)
1641 {
1642 	struct buffer_head *bh;
1643 	int err;
1644 	ext4_fsblk_t leaf;
1645 
1646 	/* free index block */
1647 	path--;
1648 	leaf = idx_pblock(path->p_idx);
1649 	BUG_ON(path->p_hdr->eh_entries == 0);
1650 	err = ext4_ext_get_access(handle, inode, path);
1651 	if (err)
1652 		return err;
1653 	path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1);
1654 	err = ext4_ext_dirty(handle, inode, path);
1655 	if (err)
1656 		return err;
1657 	ext_debug("index is empty, remove it, free block %llu\n", leaf);
1658 	bh = sb_find_get_block(inode->i_sb, leaf);
1659 	ext4_forget(handle, 1, inode, bh, leaf);
1660 	ext4_free_blocks(handle, inode, leaf, 1);
1661 	return err;
1662 }
1663 
1664 /*
1665  * ext4_ext_calc_credits_for_insert:
1666  * This routine returns max. credits that the extent tree can consume.
1667  * It should be OK for low-performance paths like ->writepage()
1668  * To allow many writing processes to fit into a single transaction,
1669  * the caller should calculate credits under truncate_mutex and
1670  * pass the actual path.
1671  */
1672 int ext4_ext_calc_credits_for_insert(struct inode *inode,
1673 						struct ext4_ext_path *path)
1674 {
1675 	int depth, needed;
1676 
1677 	if (path) {
1678 		/* probably there is space in leaf? */
1679 		depth = ext_depth(inode);
1680 		if (le16_to_cpu(path[depth].p_hdr->eh_entries)
1681 				< le16_to_cpu(path[depth].p_hdr->eh_max))
1682 			return 1;
1683 	}
1684 
1685 	/*
1686 	 * given 32-bit logical block (4294967296 blocks), max. tree
1687 	 * can be 4 levels in depth -- 4 * 340^4 == 53453440000.
1688 	 * Let's also add one more level for imbalance.
1689 	 */
1690 	depth = 5;
1691 
1692 	/* allocation of new data block(s) */
1693 	needed = 2;
1694 
1695 	/*
1696 	 * tree can be full, so it would need to grow in depth:
1697 	 * we need one credit to modify old root, credits for
1698 	 * new root will be added in split accounting
1699 	 */
1700 	needed += 1;
1701 
1702 	/*
1703 	 * Index split can happen, we would need:
1704 	 *    allocate intermediate indexes (bitmap + group)
1705 	 *  + change two blocks at each level, but root (already included)
1706 	 */
1707 	needed += (depth * 2) + (depth * 2);
1708 
1709 	/* any allocation modifies superblock */
1710 	needed += 1;
1711 
1712 	return needed;
1713 }
1714 
1715 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
1716 				struct ext4_extent *ex,
1717 				unsigned long from, unsigned long to)
1718 {
1719 	struct buffer_head *bh;
1720 	unsigned short ee_len =  ext4_ext_get_actual_len(ex);
1721 	int i;
1722 
1723 #ifdef EXTENTS_STATS
1724 	{
1725 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1726 		spin_lock(&sbi->s_ext_stats_lock);
1727 		sbi->s_ext_blocks += ee_len;
1728 		sbi->s_ext_extents++;
1729 		if (ee_len < sbi->s_ext_min)
1730 			sbi->s_ext_min = ee_len;
1731 		if (ee_len > sbi->s_ext_max)
1732 			sbi->s_ext_max = ee_len;
1733 		if (ext_depth(inode) > sbi->s_depth_max)
1734 			sbi->s_depth_max = ext_depth(inode);
1735 		spin_unlock(&sbi->s_ext_stats_lock);
1736 	}
1737 #endif
1738 	if (from >= le32_to_cpu(ex->ee_block)
1739 	    && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
1740 		/* tail removal */
1741 		unsigned long num;
1742 		ext4_fsblk_t start;
1743 		num = le32_to_cpu(ex->ee_block) + ee_len - from;
1744 		start = ext_pblock(ex) + ee_len - num;
1745 		ext_debug("free last %lu blocks starting %llu\n", num, start);
1746 		for (i = 0; i < num; i++) {
1747 			bh = sb_find_get_block(inode->i_sb, start + i);
1748 			ext4_forget(handle, 0, inode, bh, start + i);
1749 		}
1750 		ext4_free_blocks(handle, inode, start, num);
1751 	} else if (from == le32_to_cpu(ex->ee_block)
1752 		   && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
1753 		printk("strange request: removal %lu-%lu from %u:%u\n",
1754 			from, to, le32_to_cpu(ex->ee_block), ee_len);
1755 	} else {
1756 		printk("strange request: removal(2) %lu-%lu from %u:%u\n",
1757 			from, to, le32_to_cpu(ex->ee_block), ee_len);
1758 	}
1759 	return 0;
1760 }
1761 
1762 static int
1763 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
1764 		struct ext4_ext_path *path, unsigned long start)
1765 {
1766 	int err = 0, correct_index = 0;
1767 	int depth = ext_depth(inode), credits;
1768 	struct ext4_extent_header *eh;
1769 	unsigned a, b, block, num;
1770 	unsigned long ex_ee_block;
1771 	unsigned short ex_ee_len;
1772 	unsigned uninitialized = 0;
1773 	struct ext4_extent *ex;
1774 
1775 	/* the header must be checked already in ext4_ext_remove_space() */
1776 	ext_debug("truncate since %lu in leaf\n", start);
1777 	if (!path[depth].p_hdr)
1778 		path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
1779 	eh = path[depth].p_hdr;
1780 	BUG_ON(eh == NULL);
1781 
1782 	/* find where to start removing */
1783 	ex = EXT_LAST_EXTENT(eh);
1784 
1785 	ex_ee_block = le32_to_cpu(ex->ee_block);
1786 	if (ext4_ext_is_uninitialized(ex))
1787 		uninitialized = 1;
1788 	ex_ee_len = ext4_ext_get_actual_len(ex);
1789 
1790 	while (ex >= EXT_FIRST_EXTENT(eh) &&
1791 			ex_ee_block + ex_ee_len > start) {
1792 		ext_debug("remove ext %lu:%u\n", ex_ee_block, ex_ee_len);
1793 		path[depth].p_ext = ex;
1794 
1795 		a = ex_ee_block > start ? ex_ee_block : start;
1796 		b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
1797 			ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
1798 
1799 		ext_debug("  border %u:%u\n", a, b);
1800 
1801 		if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
1802 			block = 0;
1803 			num = 0;
1804 			BUG();
1805 		} else if (a != ex_ee_block) {
1806 			/* remove tail of the extent */
1807 			block = ex_ee_block;
1808 			num = a - block;
1809 		} else if (b != ex_ee_block + ex_ee_len - 1) {
1810 			/* remove head of the extent */
1811 			block = a;
1812 			num = b - a;
1813 			/* there is no "make a hole" API yet */
1814 			BUG();
1815 		} else {
1816 			/* remove whole extent: excellent! */
1817 			block = ex_ee_block;
1818 			num = 0;
1819 			BUG_ON(a != ex_ee_block);
1820 			BUG_ON(b != ex_ee_block + ex_ee_len - 1);
1821 		}
1822 
1823 		/* at present, extent can't cross block group: */
1824 		/* leaf + bitmap + group desc + sb + inode */
1825 		credits = 5;
1826 		if (ex == EXT_FIRST_EXTENT(eh)) {
1827 			correct_index = 1;
1828 			credits += (ext_depth(inode)) + 1;
1829 		}
1830 #ifdef CONFIG_QUOTA
1831 		credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
1832 #endif
1833 
1834 		handle = ext4_ext_journal_restart(handle, credits);
1835 		if (IS_ERR(handle)) {
1836 			err = PTR_ERR(handle);
1837 			goto out;
1838 		}
1839 
1840 		err = ext4_ext_get_access(handle, inode, path + depth);
1841 		if (err)
1842 			goto out;
1843 
1844 		err = ext4_remove_blocks(handle, inode, ex, a, b);
1845 		if (err)
1846 			goto out;
1847 
1848 		if (num == 0) {
1849 			/* this extent is removed; mark slot entirely unused */
1850 			ext4_ext_store_pblock(ex, 0);
1851 			eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
1852 		}
1853 
1854 		ex->ee_block = cpu_to_le32(block);
1855 		ex->ee_len = cpu_to_le16(num);
1856 		/*
1857 		 * Do not mark uninitialized if all the blocks in the
1858 		 * extent have been removed.
1859 		 */
1860 		if (uninitialized && num)
1861 			ext4_ext_mark_uninitialized(ex);
1862 
1863 		err = ext4_ext_dirty(handle, inode, path + depth);
1864 		if (err)
1865 			goto out;
1866 
1867 		ext_debug("new extent: %u:%u:%llu\n", block, num,
1868 				ext_pblock(ex));
1869 		ex--;
1870 		ex_ee_block = le32_to_cpu(ex->ee_block);
1871 		ex_ee_len = ext4_ext_get_actual_len(ex);
1872 	}
1873 
1874 	if (correct_index && eh->eh_entries)
1875 		err = ext4_ext_correct_indexes(handle, inode, path);
1876 
1877 	/* if this leaf is free, then we should
1878 	 * remove it from index block above */
1879 	if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
1880 		err = ext4_ext_rm_idx(handle, inode, path + depth);
1881 
1882 out:
1883 	return err;
1884 }
1885 
1886 /*
1887  * ext4_ext_more_to_rm:
1888  * returns 1 if current index has to be freed (even partial)
1889  */
1890 static int
1891 ext4_ext_more_to_rm(struct ext4_ext_path *path)
1892 {
1893 	BUG_ON(path->p_idx == NULL);
1894 
1895 	if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
1896 		return 0;
1897 
1898 	/*
1899 	 * if truncate on deeper level happened, it wasn't partial,
1900 	 * so we have to consider current index for truncation
1901 	 */
1902 	if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
1903 		return 0;
1904 	return 1;
1905 }
1906 
1907 int ext4_ext_remove_space(struct inode *inode, unsigned long start)
1908 {
1909 	struct super_block *sb = inode->i_sb;
1910 	int depth = ext_depth(inode);
1911 	struct ext4_ext_path *path;
1912 	handle_t *handle;
1913 	int i = 0, err = 0;
1914 
1915 	ext_debug("truncate since %lu\n", start);
1916 
1917 	/* probably first extent we're gonna free will be last in block */
1918 	handle = ext4_journal_start(inode, depth + 1);
1919 	if (IS_ERR(handle))
1920 		return PTR_ERR(handle);
1921 
1922 	ext4_ext_invalidate_cache(inode);
1923 
1924 	/*
1925 	 * We start scanning from right side, freeing all the blocks
1926 	 * after i_size and walking into the tree depth-wise.
1927 	 */
1928 	path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);
1929 	if (path == NULL) {
1930 		ext4_journal_stop(handle);
1931 		return -ENOMEM;
1932 	}
1933 	path[0].p_hdr = ext_inode_hdr(inode);
1934 	if (ext4_ext_check_header(inode, path[0].p_hdr, depth)) {
1935 		err = -EIO;
1936 		goto out;
1937 	}
1938 	path[0].p_depth = depth;
1939 
1940 	while (i >= 0 && err == 0) {
1941 		if (i == depth) {
1942 			/* this is leaf block */
1943 			err = ext4_ext_rm_leaf(handle, inode, path, start);
1944 			/* root level has p_bh == NULL, brelse() eats this */
1945 			brelse(path[i].p_bh);
1946 			path[i].p_bh = NULL;
1947 			i--;
1948 			continue;
1949 		}
1950 
1951 		/* this is index block */
1952 		if (!path[i].p_hdr) {
1953 			ext_debug("initialize header\n");
1954 			path[i].p_hdr = ext_block_hdr(path[i].p_bh);
1955 		}
1956 
1957 		if (!path[i].p_idx) {
1958 			/* this level hasn't been touched yet */
1959 			path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
1960 			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
1961 			ext_debug("init index ptr: hdr 0x%p, num %d\n",
1962 				  path[i].p_hdr,
1963 				  le16_to_cpu(path[i].p_hdr->eh_entries));
1964 		} else {
1965 			/* we were already here, see at next index */
1966 			path[i].p_idx--;
1967 		}
1968 
1969 		ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
1970 				i, EXT_FIRST_INDEX(path[i].p_hdr),
1971 				path[i].p_idx);
1972 		if (ext4_ext_more_to_rm(path + i)) {
1973 			struct buffer_head *bh;
1974 			/* go to the next level */
1975 			ext_debug("move to level %d (block %llu)\n",
1976 				  i + 1, idx_pblock(path[i].p_idx));
1977 			memset(path + i + 1, 0, sizeof(*path));
1978 			bh = sb_bread(sb, idx_pblock(path[i].p_idx));
1979 			if (!bh) {
1980 				/* should we reset i_size? */
1981 				err = -EIO;
1982 				break;
1983 			}
1984 			if (WARN_ON(i + 1 > depth)) {
1985 				err = -EIO;
1986 				break;
1987 			}
1988 			if (ext4_ext_check_header(inode, ext_block_hdr(bh),
1989 							depth - i - 1)) {
1990 				err = -EIO;
1991 				break;
1992 			}
1993 			path[i + 1].p_bh = bh;
1994 
1995 			/* save actual number of indexes since this
1996 			 * number is changed at the next iteration */
1997 			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
1998 			i++;
1999 		} else {
2000 			/* we finished processing this index, go up */
2001 			if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2002 				/* index is empty, remove it;
2003 				 * handle must be already prepared by the
2004 				 * truncatei_leaf() */
2005 				err = ext4_ext_rm_idx(handle, inode, path + i);
2006 			}
2007 			/* root level has p_bh == NULL, brelse() eats this */
2008 			brelse(path[i].p_bh);
2009 			path[i].p_bh = NULL;
2010 			i--;
2011 			ext_debug("return to level %d\n", i);
2012 		}
2013 	}
2014 
2015 	/* TODO: flexible tree reduction should be here */
2016 	if (path->p_hdr->eh_entries == 0) {
2017 		/*
2018 		 * truncate to zero freed all the tree,
2019 		 * so we need to correct eh_depth
2020 		 */
2021 		err = ext4_ext_get_access(handle, inode, path);
2022 		if (err == 0) {
2023 			ext_inode_hdr(inode)->eh_depth = 0;
2024 			ext_inode_hdr(inode)->eh_max =
2025 				cpu_to_le16(ext4_ext_space_root(inode));
2026 			err = ext4_ext_dirty(handle, inode, path);
2027 		}
2028 	}
2029 out:
2030 	ext4_ext_tree_changed(inode);
2031 	ext4_ext_drop_refs(path);
2032 	kfree(path);
2033 	ext4_journal_stop(handle);
2034 
2035 	return err;
2036 }
2037 
2038 /*
2039  * called at mount time
2040  */
2041 void ext4_ext_init(struct super_block *sb)
2042 {
2043 	/*
2044 	 * possible initialization would be here
2045 	 */
2046 
2047 	if (test_opt(sb, EXTENTS)) {
2048 		printk("EXT4-fs: file extents enabled");
2049 #ifdef AGGRESSIVE_TEST
2050 		printk(", aggressive tests");
2051 #endif
2052 #ifdef CHECK_BINSEARCH
2053 		printk(", check binsearch");
2054 #endif
2055 #ifdef EXTENTS_STATS
2056 		printk(", stats");
2057 #endif
2058 		printk("\n");
2059 #ifdef EXTENTS_STATS
2060 		spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2061 		EXT4_SB(sb)->s_ext_min = 1 << 30;
2062 		EXT4_SB(sb)->s_ext_max = 0;
2063 #endif
2064 	}
2065 }
2066 
2067 /*
2068  * called at umount time
2069  */
2070 void ext4_ext_release(struct super_block *sb)
2071 {
2072 	if (!test_opt(sb, EXTENTS))
2073 		return;
2074 
2075 #ifdef EXTENTS_STATS
2076 	if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2077 		struct ext4_sb_info *sbi = EXT4_SB(sb);
2078 		printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2079 			sbi->s_ext_blocks, sbi->s_ext_extents,
2080 			sbi->s_ext_blocks / sbi->s_ext_extents);
2081 		printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2082 			sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2083 	}
2084 #endif
2085 }
2086 
2087 /*
2088  * This function is called by ext4_ext_get_blocks() if someone tries to write
2089  * to an uninitialized extent. It may result in splitting the uninitialized
2090  * extent into multiple extents (upto three - one initialized and two
2091  * uninitialized).
2092  * There are three possibilities:
2093  *   a> There is no split required: Entire extent should be initialized
2094  *   b> Splits in two extents: Write is happening at either end of the extent
2095  *   c> Splits in three extents: Somone is writing in middle of the extent
2096  */
2097 int ext4_ext_convert_to_initialized(handle_t *handle, struct inode *inode,
2098 					struct ext4_ext_path *path,
2099 					ext4_fsblk_t iblock,
2100 					unsigned long max_blocks)
2101 {
2102 	struct ext4_extent *ex, newex;
2103 	struct ext4_extent *ex1 = NULL;
2104 	struct ext4_extent *ex2 = NULL;
2105 	struct ext4_extent *ex3 = NULL;
2106 	struct ext4_extent_header *eh;
2107 	unsigned int allocated, ee_block, ee_len, depth;
2108 	ext4_fsblk_t newblock;
2109 	int err = 0;
2110 	int ret = 0;
2111 
2112 	depth = ext_depth(inode);
2113 	eh = path[depth].p_hdr;
2114 	ex = path[depth].p_ext;
2115 	ee_block = le32_to_cpu(ex->ee_block);
2116 	ee_len = ext4_ext_get_actual_len(ex);
2117 	allocated = ee_len - (iblock - ee_block);
2118 	newblock = iblock - ee_block + ext_pblock(ex);
2119 	ex2 = ex;
2120 
2121 	/* ex1: ee_block to iblock - 1 : uninitialized */
2122 	if (iblock > ee_block) {
2123 		ex1 = ex;
2124 		ex1->ee_len = cpu_to_le16(iblock - ee_block);
2125 		ext4_ext_mark_uninitialized(ex1);
2126 		ex2 = &newex;
2127 	}
2128 	/*
2129 	 * for sanity, update the length of the ex2 extent before
2130 	 * we insert ex3, if ex1 is NULL. This is to avoid temporary
2131 	 * overlap of blocks.
2132 	 */
2133 	if (!ex1 && allocated > max_blocks)
2134 		ex2->ee_len = cpu_to_le16(max_blocks);
2135 	/* ex3: to ee_block + ee_len : uninitialised */
2136 	if (allocated > max_blocks) {
2137 		unsigned int newdepth;
2138 		ex3 = &newex;
2139 		ex3->ee_block = cpu_to_le32(iblock + max_blocks);
2140 		ext4_ext_store_pblock(ex3, newblock + max_blocks);
2141 		ex3->ee_len = cpu_to_le16(allocated - max_blocks);
2142 		ext4_ext_mark_uninitialized(ex3);
2143 		err = ext4_ext_insert_extent(handle, inode, path, ex3);
2144 		if (err)
2145 			goto out;
2146 		/*
2147 		 * The depth, and hence eh & ex might change
2148 		 * as part of the insert above.
2149 		 */
2150 		newdepth = ext_depth(inode);
2151 		if (newdepth != depth) {
2152 			depth = newdepth;
2153 			path = ext4_ext_find_extent(inode, iblock, NULL);
2154 			if (IS_ERR(path)) {
2155 				err = PTR_ERR(path);
2156 				path = NULL;
2157 				goto out;
2158 			}
2159 			eh = path[depth].p_hdr;
2160 			ex = path[depth].p_ext;
2161 			if (ex2 != &newex)
2162 				ex2 = ex;
2163 		}
2164 		allocated = max_blocks;
2165 	}
2166 	/*
2167 	 * If there was a change of depth as part of the
2168 	 * insertion of ex3 above, we need to update the length
2169 	 * of the ex1 extent again here
2170 	 */
2171 	if (ex1 && ex1 != ex) {
2172 		ex1 = ex;
2173 		ex1->ee_len = cpu_to_le16(iblock - ee_block);
2174 		ext4_ext_mark_uninitialized(ex1);
2175 		ex2 = &newex;
2176 	}
2177 	/* ex2: iblock to iblock + maxblocks-1 : initialised */
2178 	ex2->ee_block = cpu_to_le32(iblock);
2179 	ext4_ext_store_pblock(ex2, newblock);
2180 	ex2->ee_len = cpu_to_le16(allocated);
2181 	if (ex2 != ex)
2182 		goto insert;
2183 	err = ext4_ext_get_access(handle, inode, path + depth);
2184 	if (err)
2185 		goto out;
2186 	/*
2187 	 * New (initialized) extent starts from the first block
2188 	 * in the current extent. i.e., ex2 == ex
2189 	 * We have to see if it can be merged with the extent
2190 	 * on the left.
2191 	 */
2192 	if (ex2 > EXT_FIRST_EXTENT(eh)) {
2193 		/*
2194 		 * To merge left, pass "ex2 - 1" to try_to_merge(),
2195 		 * since it merges towards right _only_.
2196 		 */
2197 		ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);
2198 		if (ret) {
2199 			err = ext4_ext_correct_indexes(handle, inode, path);
2200 			if (err)
2201 				goto out;
2202 			depth = ext_depth(inode);
2203 			ex2--;
2204 		}
2205 	}
2206 	/*
2207 	 * Try to Merge towards right. This might be required
2208 	 * only when the whole extent is being written to.
2209 	 * i.e. ex2 == ex and ex3 == NULL.
2210 	 */
2211 	if (!ex3) {
2212 		ret = ext4_ext_try_to_merge(inode, path, ex2);
2213 		if (ret) {
2214 			err = ext4_ext_correct_indexes(handle, inode, path);
2215 			if (err)
2216 				goto out;
2217 		}
2218 	}
2219 	/* Mark modified extent as dirty */
2220 	err = ext4_ext_dirty(handle, inode, path + depth);
2221 	goto out;
2222 insert:
2223 	err = ext4_ext_insert_extent(handle, inode, path, &newex);
2224 out:
2225 	return err ? err : allocated;
2226 }
2227 
2228 int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
2229 			ext4_fsblk_t iblock,
2230 			unsigned long max_blocks, struct buffer_head *bh_result,
2231 			int create, int extend_disksize)
2232 {
2233 	struct ext4_ext_path *path = NULL;
2234 	struct ext4_extent_header *eh;
2235 	struct ext4_extent newex, *ex;
2236 	ext4_fsblk_t goal, newblock;
2237 	int err = 0, depth, ret;
2238 	unsigned long allocated = 0;
2239 
2240 	__clear_bit(BH_New, &bh_result->b_state);
2241 	ext_debug("blocks %d/%lu requested for inode %u\n", (int) iblock,
2242 			max_blocks, (unsigned) inode->i_ino);
2243 	mutex_lock(&EXT4_I(inode)->truncate_mutex);
2244 
2245 	/* check in cache */
2246 	goal = ext4_ext_in_cache(inode, iblock, &newex);
2247 	if (goal) {
2248 		if (goal == EXT4_EXT_CACHE_GAP) {
2249 			if (!create) {
2250 				/*
2251 				 * block isn't allocated yet and
2252 				 * user doesn't want to allocate it
2253 				 */
2254 				goto out2;
2255 			}
2256 			/* we should allocate requested block */
2257 		} else if (goal == EXT4_EXT_CACHE_EXTENT) {
2258 			/* block is already allocated */
2259 			newblock = iblock
2260 				   - le32_to_cpu(newex.ee_block)
2261 				   + ext_pblock(&newex);
2262 			/* number of remaining blocks in the extent */
2263 			allocated = le16_to_cpu(newex.ee_len) -
2264 					(iblock - le32_to_cpu(newex.ee_block));
2265 			goto out;
2266 		} else {
2267 			BUG();
2268 		}
2269 	}
2270 
2271 	/* find extent for this block */
2272 	path = ext4_ext_find_extent(inode, iblock, NULL);
2273 	if (IS_ERR(path)) {
2274 		err = PTR_ERR(path);
2275 		path = NULL;
2276 		goto out2;
2277 	}
2278 
2279 	depth = ext_depth(inode);
2280 
2281 	/*
2282 	 * consistent leaf must not be empty;
2283 	 * this situation is possible, though, _during_ tree modification;
2284 	 * this is why assert can't be put in ext4_ext_find_extent()
2285 	 */
2286 	BUG_ON(path[depth].p_ext == NULL && depth != 0);
2287 	eh = path[depth].p_hdr;
2288 
2289 	ex = path[depth].p_ext;
2290 	if (ex) {
2291 		unsigned long ee_block = le32_to_cpu(ex->ee_block);
2292 		ext4_fsblk_t ee_start = ext_pblock(ex);
2293 		unsigned short ee_len;
2294 
2295 		/*
2296 		 * Uninitialized extents are treated as holes, except that
2297 		 * we split out initialized portions during a write.
2298 		 */
2299 		ee_len = ext4_ext_get_actual_len(ex);
2300 		/* if found extent covers block, simply return it */
2301 		if (iblock >= ee_block && iblock < ee_block + ee_len) {
2302 			newblock = iblock - ee_block + ee_start;
2303 			/* number of remaining blocks in the extent */
2304 			allocated = ee_len - (iblock - ee_block);
2305 			ext_debug("%d fit into %lu:%d -> %llu\n", (int) iblock,
2306 					ee_block, ee_len, newblock);
2307 
2308 			/* Do not put uninitialized extent in the cache */
2309 			if (!ext4_ext_is_uninitialized(ex)) {
2310 				ext4_ext_put_in_cache(inode, ee_block,
2311 							ee_len, ee_start,
2312 							EXT4_EXT_CACHE_EXTENT);
2313 				goto out;
2314 			}
2315 			if (create == EXT4_CREATE_UNINITIALIZED_EXT)
2316 				goto out;
2317 			if (!create)
2318 				goto out2;
2319 
2320 			ret = ext4_ext_convert_to_initialized(handle, inode,
2321 								path, iblock,
2322 								max_blocks);
2323 			if (ret <= 0)
2324 				goto out2;
2325 			else
2326 				allocated = ret;
2327 			goto outnew;
2328 		}
2329 	}
2330 
2331 	/*
2332 	 * requested block isn't allocated yet;
2333 	 * we couldn't try to create block if create flag is zero
2334 	 */
2335 	if (!create) {
2336 		/*
2337 		 * put just found gap into cache to speed up
2338 		 * subsequent requests
2339 		 */
2340 		ext4_ext_put_gap_in_cache(inode, path, iblock);
2341 		goto out2;
2342 	}
2343 	/*
2344 	 * Okay, we need to do block allocation.  Lazily initialize the block
2345 	 * allocation info here if necessary.
2346 	 */
2347 	if (S_ISREG(inode->i_mode) && (!EXT4_I(inode)->i_block_alloc_info))
2348 		ext4_init_block_alloc_info(inode);
2349 
2350 	/* allocate new block */
2351 	goal = ext4_ext_find_goal(inode, path, iblock);
2352 
2353 	/*
2354 	 * See if request is beyond maximum number of blocks we can have in
2355 	 * a single extent. For an initialized extent this limit is
2356 	 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
2357 	 * EXT_UNINIT_MAX_LEN.
2358 	 */
2359 	if (max_blocks > EXT_INIT_MAX_LEN &&
2360 	    create != EXT4_CREATE_UNINITIALIZED_EXT)
2361 		max_blocks = EXT_INIT_MAX_LEN;
2362 	else if (max_blocks > EXT_UNINIT_MAX_LEN &&
2363 		 create == EXT4_CREATE_UNINITIALIZED_EXT)
2364 		max_blocks = EXT_UNINIT_MAX_LEN;
2365 
2366 	/* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
2367 	newex.ee_block = cpu_to_le32(iblock);
2368 	newex.ee_len = cpu_to_le16(max_blocks);
2369 	err = ext4_ext_check_overlap(inode, &newex, path);
2370 	if (err)
2371 		allocated = le16_to_cpu(newex.ee_len);
2372 	else
2373 		allocated = max_blocks;
2374 	newblock = ext4_new_blocks(handle, inode, goal, &allocated, &err);
2375 	if (!newblock)
2376 		goto out2;
2377 	ext_debug("allocate new block: goal %llu, found %llu/%lu\n",
2378 			goal, newblock, allocated);
2379 
2380 	/* try to insert new extent into found leaf and return */
2381 	ext4_ext_store_pblock(&newex, newblock);
2382 	newex.ee_len = cpu_to_le16(allocated);
2383 	if (create == EXT4_CREATE_UNINITIALIZED_EXT)  /* Mark uninitialized */
2384 		ext4_ext_mark_uninitialized(&newex);
2385 	err = ext4_ext_insert_extent(handle, inode, path, &newex);
2386 	if (err) {
2387 		/* free data blocks we just allocated */
2388 		ext4_free_blocks(handle, inode, ext_pblock(&newex),
2389 					le16_to_cpu(newex.ee_len));
2390 		goto out2;
2391 	}
2392 
2393 	if (extend_disksize && inode->i_size > EXT4_I(inode)->i_disksize)
2394 		EXT4_I(inode)->i_disksize = inode->i_size;
2395 
2396 	/* previous routine could use block we allocated */
2397 	newblock = ext_pblock(&newex);
2398 outnew:
2399 	__set_bit(BH_New, &bh_result->b_state);
2400 
2401 	/* Cache only when it is _not_ an uninitialized extent */
2402 	if (create != EXT4_CREATE_UNINITIALIZED_EXT)
2403 		ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
2404 						EXT4_EXT_CACHE_EXTENT);
2405 out:
2406 	if (allocated > max_blocks)
2407 		allocated = max_blocks;
2408 	ext4_ext_show_leaf(inode, path);
2409 	__set_bit(BH_Mapped, &bh_result->b_state);
2410 	bh_result->b_bdev = inode->i_sb->s_bdev;
2411 	bh_result->b_blocknr = newblock;
2412 out2:
2413 	if (path) {
2414 		ext4_ext_drop_refs(path);
2415 		kfree(path);
2416 	}
2417 	mutex_unlock(&EXT4_I(inode)->truncate_mutex);
2418 
2419 	return err ? err : allocated;
2420 }
2421 
2422 void ext4_ext_truncate(struct inode * inode, struct page *page)
2423 {
2424 	struct address_space *mapping = inode->i_mapping;
2425 	struct super_block *sb = inode->i_sb;
2426 	unsigned long last_block;
2427 	handle_t *handle;
2428 	int err = 0;
2429 
2430 	/*
2431 	 * probably first extent we're gonna free will be last in block
2432 	 */
2433 	err = ext4_writepage_trans_blocks(inode) + 3;
2434 	handle = ext4_journal_start(inode, err);
2435 	if (IS_ERR(handle)) {
2436 		if (page) {
2437 			clear_highpage(page);
2438 			flush_dcache_page(page);
2439 			unlock_page(page);
2440 			page_cache_release(page);
2441 		}
2442 		return;
2443 	}
2444 
2445 	if (page)
2446 		ext4_block_truncate_page(handle, page, mapping, inode->i_size);
2447 
2448 	mutex_lock(&EXT4_I(inode)->truncate_mutex);
2449 	ext4_ext_invalidate_cache(inode);
2450 
2451 	/*
2452 	 * TODO: optimization is possible here.
2453 	 * Probably we need not scan at all,
2454 	 * because page truncation is enough.
2455 	 */
2456 	if (ext4_orphan_add(handle, inode))
2457 		goto out_stop;
2458 
2459 	/* we have to know where to truncate from in crash case */
2460 	EXT4_I(inode)->i_disksize = inode->i_size;
2461 	ext4_mark_inode_dirty(handle, inode);
2462 
2463 	last_block = (inode->i_size + sb->s_blocksize - 1)
2464 			>> EXT4_BLOCK_SIZE_BITS(sb);
2465 	err = ext4_ext_remove_space(inode, last_block);
2466 
2467 	/* In a multi-transaction truncate, we only make the final
2468 	 * transaction synchronous.
2469 	 */
2470 	if (IS_SYNC(inode))
2471 		handle->h_sync = 1;
2472 
2473 out_stop:
2474 	/*
2475 	 * If this was a simple ftruncate() and the file will remain alive,
2476 	 * then we need to clear up the orphan record which we created above.
2477 	 * However, if this was a real unlink then we were called by
2478 	 * ext4_delete_inode(), and we allow that function to clean up the
2479 	 * orphan info for us.
2480 	 */
2481 	if (inode->i_nlink)
2482 		ext4_orphan_del(handle, inode);
2483 
2484 	mutex_unlock(&EXT4_I(inode)->truncate_mutex);
2485 	ext4_journal_stop(handle);
2486 }
2487 
2488 /*
2489  * ext4_ext_writepage_trans_blocks:
2490  * calculate max number of blocks we could modify
2491  * in order to allocate new block for an inode
2492  */
2493 int ext4_ext_writepage_trans_blocks(struct inode *inode, int num)
2494 {
2495 	int needed;
2496 
2497 	needed = ext4_ext_calc_credits_for_insert(inode, NULL);
2498 
2499 	/* caller wants to allocate num blocks, but note it includes sb */
2500 	needed = needed * num - (num - 1);
2501 
2502 #ifdef CONFIG_QUOTA
2503 	needed += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
2504 #endif
2505 
2506 	return needed;
2507 }
2508 
2509 /*
2510  * preallocate space for a file. This implements ext4's fallocate inode
2511  * operation, which gets called from sys_fallocate system call.
2512  * For block-mapped files, posix_fallocate should fall back to the method
2513  * of writing zeroes to the required new blocks (the same behavior which is
2514  * expected for file systems which do not support fallocate() system call).
2515  */
2516 long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
2517 {
2518 	handle_t *handle;
2519 	ext4_fsblk_t block, max_blocks;
2520 	ext4_fsblk_t nblocks = 0;
2521 	int ret = 0;
2522 	int ret2 = 0;
2523 	int retries = 0;
2524 	struct buffer_head map_bh;
2525 	unsigned int credits, blkbits = inode->i_blkbits;
2526 
2527 	/*
2528 	 * currently supporting (pre)allocate mode for extent-based
2529 	 * files _only_
2530 	 */
2531 	if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
2532 		return -EOPNOTSUPP;
2533 
2534 	/* preallocation to directories is currently not supported */
2535 	if (S_ISDIR(inode->i_mode))
2536 		return -ENODEV;
2537 
2538 	block = offset >> blkbits;
2539 	max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
2540 			- block;
2541 
2542 	/*
2543 	 * credits to insert 1 extent into extent tree + buffers to be able to
2544 	 * modify 1 super block, 1 block bitmap and 1 group descriptor.
2545 	 */
2546 	credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + 3;
2547 retry:
2548 	while (ret >= 0 && ret < max_blocks) {
2549 		block = block + ret;
2550 		max_blocks = max_blocks - ret;
2551 		handle = ext4_journal_start(inode, credits);
2552 		if (IS_ERR(handle)) {
2553 			ret = PTR_ERR(handle);
2554 			break;
2555 		}
2556 
2557 		ret = ext4_ext_get_blocks(handle, inode, block,
2558 					  max_blocks, &map_bh,
2559 					  EXT4_CREATE_UNINITIALIZED_EXT, 0);
2560 		WARN_ON(!ret);
2561 		if (!ret) {
2562 			ext4_error(inode->i_sb, "ext4_fallocate",
2563 				   "ext4_ext_get_blocks returned 0! inode#%lu"
2564 				   ", block=%llu, max_blocks=%llu",
2565 				   inode->i_ino, block, max_blocks);
2566 			ret = -EIO;
2567 			ext4_mark_inode_dirty(handle, inode);
2568 			ret2 = ext4_journal_stop(handle);
2569 			break;
2570 		}
2571 		if (ret > 0) {
2572 			/* check wrap through sign-bit/zero here */
2573 			if ((block + ret) < 0 || (block + ret) < block) {
2574 				ret = -EIO;
2575 				ext4_mark_inode_dirty(handle, inode);
2576 				ret2 = ext4_journal_stop(handle);
2577 				break;
2578 			}
2579 			if (buffer_new(&map_bh) && ((block + ret) >
2580 			    (EXT4_BLOCK_ALIGN(i_size_read(inode), blkbits)
2581 			    >> blkbits)))
2582 					nblocks = nblocks + ret;
2583 		}
2584 
2585 		/* Update ctime if new blocks get allocated */
2586 		if (nblocks) {
2587 			struct timespec now;
2588 
2589 			now = current_fs_time(inode->i_sb);
2590 			if (!timespec_equal(&inode->i_ctime, &now))
2591 				inode->i_ctime = now;
2592 		}
2593 
2594 		ext4_mark_inode_dirty(handle, inode);
2595 		ret2 = ext4_journal_stop(handle);
2596 		if (ret2)
2597 			break;
2598 	}
2599 
2600 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
2601 		goto retry;
2602 
2603 	/*
2604 	 * Time to update the file size.
2605 	 * Update only when preallocation was requested beyond the file size.
2606 	 */
2607 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2608 	    (offset + len) > i_size_read(inode)) {
2609 		if (ret > 0) {
2610 			/*
2611 			 * if no error, we assume preallocation succeeded
2612 			 * completely
2613 			 */
2614 			mutex_lock(&inode->i_mutex);
2615 			i_size_write(inode, offset + len);
2616 			EXT4_I(inode)->i_disksize = i_size_read(inode);
2617 			mutex_unlock(&inode->i_mutex);
2618 		} else if (ret < 0 && nblocks) {
2619 			/* Handle partial allocation scenario */
2620 			loff_t newsize;
2621 
2622 			mutex_lock(&inode->i_mutex);
2623 			newsize  = (nblocks << blkbits) + i_size_read(inode);
2624 			i_size_write(inode, EXT4_BLOCK_ALIGN(newsize, blkbits));
2625 			EXT4_I(inode)->i_disksize = i_size_read(inode);
2626 			mutex_unlock(&inode->i_mutex);
2627 		}
2628 	}
2629 
2630 	return ret > 0 ? ret2 : ret;
2631 }
2632