xref: /openbmc/linux/fs/ext4/extents.c (revision 64c70b1c)
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * Architecture independence:
6  *   Copyright (c) 2005, Bull S.A.
7  *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public Licens
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
21  */
22 
23 /*
24  * Extents support for EXT4
25  *
26  * TODO:
27  *   - ext4*_error() should be used in some situations
28  *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29  *   - smart tree reduction
30  */
31 
32 #include <linux/module.h>
33 #include <linux/fs.h>
34 #include <linux/time.h>
35 #include <linux/ext4_jbd2.h>
36 #include <linux/jbd.h>
37 #include <linux/highuid.h>
38 #include <linux/pagemap.h>
39 #include <linux/quotaops.h>
40 #include <linux/string.h>
41 #include <linux/slab.h>
42 #include <linux/ext4_fs_extents.h>
43 #include <asm/uaccess.h>
44 
45 
46 /*
47  * ext_pblock:
48  * combine low and high parts of physical block number into ext4_fsblk_t
49  */
50 static ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
51 {
52 	ext4_fsblk_t block;
53 
54 	block = le32_to_cpu(ex->ee_start);
55 	block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
56 	return block;
57 }
58 
59 /*
60  * idx_pblock:
61  * combine low and high parts of a leaf physical block number into ext4_fsblk_t
62  */
63 static ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
64 {
65 	ext4_fsblk_t block;
66 
67 	block = le32_to_cpu(ix->ei_leaf);
68 	block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
69 	return block;
70 }
71 
72 /*
73  * ext4_ext_store_pblock:
74  * stores a large physical block number into an extent struct,
75  * breaking it into parts
76  */
77 static void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
78 {
79 	ex->ee_start = cpu_to_le32((unsigned long) (pb & 0xffffffff));
80 	ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
81 }
82 
83 /*
84  * ext4_idx_store_pblock:
85  * stores a large physical block number into an index struct,
86  * breaking it into parts
87  */
88 static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
89 {
90 	ix->ei_leaf = cpu_to_le32((unsigned long) (pb & 0xffffffff));
91 	ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
92 }
93 
94 static int ext4_ext_check_header(const char *function, struct inode *inode,
95 				struct ext4_extent_header *eh)
96 {
97 	const char *error_msg = NULL;
98 
99 	if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
100 		error_msg = "invalid magic";
101 		goto corrupted;
102 	}
103 	if (unlikely(eh->eh_max == 0)) {
104 		error_msg = "invalid eh_max";
105 		goto corrupted;
106 	}
107 	if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
108 		error_msg = "invalid eh_entries";
109 		goto corrupted;
110 	}
111 	return 0;
112 
113 corrupted:
114 	ext4_error(inode->i_sb, function,
115 			"bad header in inode #%lu: %s - magic %x, "
116 			"entries %u, max %u, depth %u",
117 			inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
118 			le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
119 			le16_to_cpu(eh->eh_depth));
120 
121 	return -EIO;
122 }
123 
124 static handle_t *ext4_ext_journal_restart(handle_t *handle, int needed)
125 {
126 	int err;
127 
128 	if (handle->h_buffer_credits > needed)
129 		return handle;
130 	if (!ext4_journal_extend(handle, needed))
131 		return handle;
132 	err = ext4_journal_restart(handle, needed);
133 
134 	return handle;
135 }
136 
137 /*
138  * could return:
139  *  - EROFS
140  *  - ENOMEM
141  */
142 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
143 				struct ext4_ext_path *path)
144 {
145 	if (path->p_bh) {
146 		/* path points to block */
147 		return ext4_journal_get_write_access(handle, path->p_bh);
148 	}
149 	/* path points to leaf/index in inode body */
150 	/* we use in-core data, no need to protect them */
151 	return 0;
152 }
153 
154 /*
155  * could return:
156  *  - EROFS
157  *  - ENOMEM
158  *  - EIO
159  */
160 static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
161 				struct ext4_ext_path *path)
162 {
163 	int err;
164 	if (path->p_bh) {
165 		/* path points to block */
166 		err = ext4_journal_dirty_metadata(handle, path->p_bh);
167 	} else {
168 		/* path points to leaf/index in inode body */
169 		err = ext4_mark_inode_dirty(handle, inode);
170 	}
171 	return err;
172 }
173 
174 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
175 			      struct ext4_ext_path *path,
176 			      ext4_fsblk_t block)
177 {
178 	struct ext4_inode_info *ei = EXT4_I(inode);
179 	ext4_fsblk_t bg_start;
180 	ext4_grpblk_t colour;
181 	int depth;
182 
183 	if (path) {
184 		struct ext4_extent *ex;
185 		depth = path->p_depth;
186 
187 		/* try to predict block placement */
188 		ex = path[depth].p_ext;
189 		if (ex)
190 			return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
191 
192 		/* it looks like index is empty;
193 		 * try to find starting block from index itself */
194 		if (path[depth].p_bh)
195 			return path[depth].p_bh->b_blocknr;
196 	}
197 
198 	/* OK. use inode's group */
199 	bg_start = (ei->i_block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) +
200 		le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block);
201 	colour = (current->pid % 16) *
202 			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
203 	return bg_start + colour + block;
204 }
205 
206 static ext4_fsblk_t
207 ext4_ext_new_block(handle_t *handle, struct inode *inode,
208 			struct ext4_ext_path *path,
209 			struct ext4_extent *ex, int *err)
210 {
211 	ext4_fsblk_t goal, newblock;
212 
213 	goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
214 	newblock = ext4_new_block(handle, inode, goal, err);
215 	return newblock;
216 }
217 
218 static int ext4_ext_space_block(struct inode *inode)
219 {
220 	int size;
221 
222 	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
223 			/ sizeof(struct ext4_extent);
224 #ifdef AGGRESSIVE_TEST
225 	if (size > 6)
226 		size = 6;
227 #endif
228 	return size;
229 }
230 
231 static int ext4_ext_space_block_idx(struct inode *inode)
232 {
233 	int size;
234 
235 	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
236 			/ sizeof(struct ext4_extent_idx);
237 #ifdef AGGRESSIVE_TEST
238 	if (size > 5)
239 		size = 5;
240 #endif
241 	return size;
242 }
243 
244 static int ext4_ext_space_root(struct inode *inode)
245 {
246 	int size;
247 
248 	size = sizeof(EXT4_I(inode)->i_data);
249 	size -= sizeof(struct ext4_extent_header);
250 	size /= sizeof(struct ext4_extent);
251 #ifdef AGGRESSIVE_TEST
252 	if (size > 3)
253 		size = 3;
254 #endif
255 	return size;
256 }
257 
258 static int ext4_ext_space_root_idx(struct inode *inode)
259 {
260 	int size;
261 
262 	size = sizeof(EXT4_I(inode)->i_data);
263 	size -= sizeof(struct ext4_extent_header);
264 	size /= sizeof(struct ext4_extent_idx);
265 #ifdef AGGRESSIVE_TEST
266 	if (size > 4)
267 		size = 4;
268 #endif
269 	return size;
270 }
271 
272 #ifdef EXT_DEBUG
273 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
274 {
275 	int k, l = path->p_depth;
276 
277 	ext_debug("path:");
278 	for (k = 0; k <= l; k++, path++) {
279 		if (path->p_idx) {
280 		  ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
281 			    idx_pblock(path->p_idx));
282 		} else if (path->p_ext) {
283 			ext_debug("  %d:%d:%llu ",
284 				  le32_to_cpu(path->p_ext->ee_block),
285 				  le16_to_cpu(path->p_ext->ee_len),
286 				  ext_pblock(path->p_ext));
287 		} else
288 			ext_debug("  []");
289 	}
290 	ext_debug("\n");
291 }
292 
293 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
294 {
295 	int depth = ext_depth(inode);
296 	struct ext4_extent_header *eh;
297 	struct ext4_extent *ex;
298 	int i;
299 
300 	if (!path)
301 		return;
302 
303 	eh = path[depth].p_hdr;
304 	ex = EXT_FIRST_EXTENT(eh);
305 
306 	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
307 		ext_debug("%d:%d:%llu ", le32_to_cpu(ex->ee_block),
308 			  le16_to_cpu(ex->ee_len), ext_pblock(ex));
309 	}
310 	ext_debug("\n");
311 }
312 #else
313 #define ext4_ext_show_path(inode,path)
314 #define ext4_ext_show_leaf(inode,path)
315 #endif
316 
317 static void ext4_ext_drop_refs(struct ext4_ext_path *path)
318 {
319 	int depth = path->p_depth;
320 	int i;
321 
322 	for (i = 0; i <= depth; i++, path++)
323 		if (path->p_bh) {
324 			brelse(path->p_bh);
325 			path->p_bh = NULL;
326 		}
327 }
328 
329 /*
330  * ext4_ext_binsearch_idx:
331  * binary search for the closest index of the given block
332  */
333 static void
334 ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int block)
335 {
336 	struct ext4_extent_header *eh = path->p_hdr;
337 	struct ext4_extent_idx *r, *l, *m;
338 
339 	BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
340 	BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
341 	BUG_ON(le16_to_cpu(eh->eh_entries) <= 0);
342 
343 	ext_debug("binsearch for %d(idx):  ", block);
344 
345 	l = EXT_FIRST_INDEX(eh) + 1;
346 	r = EXT_FIRST_INDEX(eh) + le16_to_cpu(eh->eh_entries) - 1;
347 	while (l <= r) {
348 		m = l + (r - l) / 2;
349 		if (block < le32_to_cpu(m->ei_block))
350 			r = m - 1;
351 		else
352 			l = m + 1;
353 		ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ei_block,
354 				m, m->ei_block, r, r->ei_block);
355 	}
356 
357 	path->p_idx = l - 1;
358 	ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
359 		  idx_block(path->p_idx));
360 
361 #ifdef CHECK_BINSEARCH
362 	{
363 		struct ext4_extent_idx *chix, *ix;
364 		int k;
365 
366 		chix = ix = EXT_FIRST_INDEX(eh);
367 		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
368 		  if (k != 0 &&
369 		      le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
370 				printk("k=%d, ix=0x%p, first=0x%p\n", k,
371 					ix, EXT_FIRST_INDEX(eh));
372 				printk("%u <= %u\n",
373 				       le32_to_cpu(ix->ei_block),
374 				       le32_to_cpu(ix[-1].ei_block));
375 			}
376 			BUG_ON(k && le32_to_cpu(ix->ei_block)
377 					   <= le32_to_cpu(ix[-1].ei_block));
378 			if (block < le32_to_cpu(ix->ei_block))
379 				break;
380 			chix = ix;
381 		}
382 		BUG_ON(chix != path->p_idx);
383 	}
384 #endif
385 
386 }
387 
388 /*
389  * ext4_ext_binsearch:
390  * binary search for closest extent of the given block
391  */
392 static void
393 ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
394 {
395 	struct ext4_extent_header *eh = path->p_hdr;
396 	struct ext4_extent *r, *l, *m;
397 
398 	BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
399 	BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
400 
401 	if (eh->eh_entries == 0) {
402 		/*
403 		 * this leaf is empty:
404 		 * we get such a leaf in split/add case
405 		 */
406 		return;
407 	}
408 
409 	ext_debug("binsearch for %d:  ", block);
410 
411 	l = EXT_FIRST_EXTENT(eh) + 1;
412 	r = EXT_FIRST_EXTENT(eh) + le16_to_cpu(eh->eh_entries) - 1;
413 
414 	while (l <= r) {
415 		m = l + (r - l) / 2;
416 		if (block < le32_to_cpu(m->ee_block))
417 			r = m - 1;
418 		else
419 			l = m + 1;
420 		ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ee_block,
421 				m, m->ee_block, r, r->ee_block);
422 	}
423 
424 	path->p_ext = l - 1;
425 	ext_debug("  -> %d:%llu:%d ",
426 			le32_to_cpu(path->p_ext->ee_block),
427 			ext_pblock(path->p_ext),
428 			le16_to_cpu(path->p_ext->ee_len));
429 
430 #ifdef CHECK_BINSEARCH
431 	{
432 		struct ext4_extent *chex, *ex;
433 		int k;
434 
435 		chex = ex = EXT_FIRST_EXTENT(eh);
436 		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
437 			BUG_ON(k && le32_to_cpu(ex->ee_block)
438 					  <= le32_to_cpu(ex[-1].ee_block));
439 			if (block < le32_to_cpu(ex->ee_block))
440 				break;
441 			chex = ex;
442 		}
443 		BUG_ON(chex != path->p_ext);
444 	}
445 #endif
446 
447 }
448 
449 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
450 {
451 	struct ext4_extent_header *eh;
452 
453 	eh = ext_inode_hdr(inode);
454 	eh->eh_depth = 0;
455 	eh->eh_entries = 0;
456 	eh->eh_magic = EXT4_EXT_MAGIC;
457 	eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode));
458 	ext4_mark_inode_dirty(handle, inode);
459 	ext4_ext_invalidate_cache(inode);
460 	return 0;
461 }
462 
463 struct ext4_ext_path *
464 ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path)
465 {
466 	struct ext4_extent_header *eh;
467 	struct buffer_head *bh;
468 	short int depth, i, ppos = 0, alloc = 0;
469 
470 	eh = ext_inode_hdr(inode);
471 	BUG_ON(eh == NULL);
472 	if (ext4_ext_check_header(__FUNCTION__, inode, eh))
473 		return ERR_PTR(-EIO);
474 
475 	i = depth = ext_depth(inode);
476 
477 	/* account possible depth increase */
478 	if (!path) {
479 		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
480 				GFP_NOFS);
481 		if (!path)
482 			return ERR_PTR(-ENOMEM);
483 		alloc = 1;
484 	}
485 	path[0].p_hdr = eh;
486 
487 	/* walk through the tree */
488 	while (i) {
489 		ext_debug("depth %d: num %d, max %d\n",
490 			  ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
491 		ext4_ext_binsearch_idx(inode, path + ppos, block);
492 		path[ppos].p_block = idx_pblock(path[ppos].p_idx);
493 		path[ppos].p_depth = i;
494 		path[ppos].p_ext = NULL;
495 
496 		bh = sb_bread(inode->i_sb, path[ppos].p_block);
497 		if (!bh)
498 			goto err;
499 
500 		eh = ext_block_hdr(bh);
501 		ppos++;
502 		BUG_ON(ppos > depth);
503 		path[ppos].p_bh = bh;
504 		path[ppos].p_hdr = eh;
505 		i--;
506 
507 		if (ext4_ext_check_header(__FUNCTION__, inode, eh))
508 			goto err;
509 	}
510 
511 	path[ppos].p_depth = i;
512 	path[ppos].p_hdr = eh;
513 	path[ppos].p_ext = NULL;
514 	path[ppos].p_idx = NULL;
515 
516 	if (ext4_ext_check_header(__FUNCTION__, inode, eh))
517 		goto err;
518 
519 	/* find extent */
520 	ext4_ext_binsearch(inode, path + ppos, block);
521 
522 	ext4_ext_show_path(inode, path);
523 
524 	return path;
525 
526 err:
527 	ext4_ext_drop_refs(path);
528 	if (alloc)
529 		kfree(path);
530 	return ERR_PTR(-EIO);
531 }
532 
533 /*
534  * ext4_ext_insert_index:
535  * insert new index [@logical;@ptr] into the block at @curp;
536  * check where to insert: before @curp or after @curp
537  */
538 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
539 				struct ext4_ext_path *curp,
540 				int logical, ext4_fsblk_t ptr)
541 {
542 	struct ext4_extent_idx *ix;
543 	int len, err;
544 
545 	err = ext4_ext_get_access(handle, inode, curp);
546 	if (err)
547 		return err;
548 
549 	BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
550 	len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
551 	if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
552 		/* insert after */
553 		if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
554 			len = (len - 1) * sizeof(struct ext4_extent_idx);
555 			len = len < 0 ? 0 : len;
556 			ext_debug("insert new index %d after: %d. "
557 					"move %d from 0x%p to 0x%p\n",
558 					logical, ptr, len,
559 					(curp->p_idx + 1), (curp->p_idx + 2));
560 			memmove(curp->p_idx + 2, curp->p_idx + 1, len);
561 		}
562 		ix = curp->p_idx + 1;
563 	} else {
564 		/* insert before */
565 		len = len * sizeof(struct ext4_extent_idx);
566 		len = len < 0 ? 0 : len;
567 		ext_debug("insert new index %d before: %d. "
568 				"move %d from 0x%p to 0x%p\n",
569 				logical, ptr, len,
570 				curp->p_idx, (curp->p_idx + 1));
571 		memmove(curp->p_idx + 1, curp->p_idx, len);
572 		ix = curp->p_idx;
573 	}
574 
575 	ix->ei_block = cpu_to_le32(logical);
576 	ext4_idx_store_pblock(ix, ptr);
577 	curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1);
578 
579 	BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
580 			     > le16_to_cpu(curp->p_hdr->eh_max));
581 	BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
582 
583 	err = ext4_ext_dirty(handle, inode, curp);
584 	ext4_std_error(inode->i_sb, err);
585 
586 	return err;
587 }
588 
589 /*
590  * ext4_ext_split:
591  * inserts new subtree into the path, using free index entry
592  * at depth @at:
593  * - allocates all needed blocks (new leaf and all intermediate index blocks)
594  * - makes decision where to split
595  * - moves remaining extents and index entries (right to the split point)
596  *   into the newly allocated blocks
597  * - initializes subtree
598  */
599 static int ext4_ext_split(handle_t *handle, struct inode *inode,
600 				struct ext4_ext_path *path,
601 				struct ext4_extent *newext, int at)
602 {
603 	struct buffer_head *bh = NULL;
604 	int depth = ext_depth(inode);
605 	struct ext4_extent_header *neh;
606 	struct ext4_extent_idx *fidx;
607 	struct ext4_extent *ex;
608 	int i = at, k, m, a;
609 	ext4_fsblk_t newblock, oldblock;
610 	__le32 border;
611 	ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
612 	int err = 0;
613 
614 	/* make decision: where to split? */
615 	/* FIXME: now decision is simplest: at current extent */
616 
617 	/* if current leaf will be split, then we should use
618 	 * border from split point */
619 	BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));
620 	if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
621 		border = path[depth].p_ext[1].ee_block;
622 		ext_debug("leaf will be split."
623 				" next leaf starts at %d\n",
624 				  le32_to_cpu(border));
625 	} else {
626 		border = newext->ee_block;
627 		ext_debug("leaf will be added."
628 				" next leaf starts at %d\n",
629 				le32_to_cpu(border));
630 	}
631 
632 	/*
633 	 * If error occurs, then we break processing
634 	 * and mark filesystem read-only. index won't
635 	 * be inserted and tree will be in consistent
636 	 * state. Next mount will repair buffers too.
637 	 */
638 
639 	/*
640 	 * Get array to track all allocated blocks.
641 	 * We need this to handle errors and free blocks
642 	 * upon them.
643 	 */
644 	ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
645 	if (!ablocks)
646 		return -ENOMEM;
647 
648 	/* allocate all needed blocks */
649 	ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
650 	for (a = 0; a < depth - at; a++) {
651 		newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
652 		if (newblock == 0)
653 			goto cleanup;
654 		ablocks[a] = newblock;
655 	}
656 
657 	/* initialize new leaf */
658 	newblock = ablocks[--a];
659 	BUG_ON(newblock == 0);
660 	bh = sb_getblk(inode->i_sb, newblock);
661 	if (!bh) {
662 		err = -EIO;
663 		goto cleanup;
664 	}
665 	lock_buffer(bh);
666 
667 	err = ext4_journal_get_create_access(handle, bh);
668 	if (err)
669 		goto cleanup;
670 
671 	neh = ext_block_hdr(bh);
672 	neh->eh_entries = 0;
673 	neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
674 	neh->eh_magic = EXT4_EXT_MAGIC;
675 	neh->eh_depth = 0;
676 	ex = EXT_FIRST_EXTENT(neh);
677 
678 	/* move remainder of path[depth] to the new leaf */
679 	BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);
680 	/* start copy from next extent */
681 	/* TODO: we could do it by single memmove */
682 	m = 0;
683 	path[depth].p_ext++;
684 	while (path[depth].p_ext <=
685 			EXT_MAX_EXTENT(path[depth].p_hdr)) {
686 		ext_debug("move %d:%llu:%d in new leaf %llu\n",
687 				le32_to_cpu(path[depth].p_ext->ee_block),
688 				ext_pblock(path[depth].p_ext),
689 				le16_to_cpu(path[depth].p_ext->ee_len),
690 				newblock);
691 		/*memmove(ex++, path[depth].p_ext++,
692 				sizeof(struct ext4_extent));
693 		neh->eh_entries++;*/
694 		path[depth].p_ext++;
695 		m++;
696 	}
697 	if (m) {
698 		memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
699 		neh->eh_entries = cpu_to_le16(le16_to_cpu(neh->eh_entries)+m);
700 	}
701 
702 	set_buffer_uptodate(bh);
703 	unlock_buffer(bh);
704 
705 	err = ext4_journal_dirty_metadata(handle, bh);
706 	if (err)
707 		goto cleanup;
708 	brelse(bh);
709 	bh = NULL;
710 
711 	/* correct old leaf */
712 	if (m) {
713 		err = ext4_ext_get_access(handle, inode, path + depth);
714 		if (err)
715 			goto cleanup;
716 		path[depth].p_hdr->eh_entries =
717 		     cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);
718 		err = ext4_ext_dirty(handle, inode, path + depth);
719 		if (err)
720 			goto cleanup;
721 
722 	}
723 
724 	/* create intermediate indexes */
725 	k = depth - at - 1;
726 	BUG_ON(k < 0);
727 	if (k)
728 		ext_debug("create %d intermediate indices\n", k);
729 	/* insert new index into current index block */
730 	/* current depth stored in i var */
731 	i = depth - 1;
732 	while (k--) {
733 		oldblock = newblock;
734 		newblock = ablocks[--a];
735 		bh = sb_getblk(inode->i_sb, (ext4_fsblk_t)newblock);
736 		if (!bh) {
737 			err = -EIO;
738 			goto cleanup;
739 		}
740 		lock_buffer(bh);
741 
742 		err = ext4_journal_get_create_access(handle, bh);
743 		if (err)
744 			goto cleanup;
745 
746 		neh = ext_block_hdr(bh);
747 		neh->eh_entries = cpu_to_le16(1);
748 		neh->eh_magic = EXT4_EXT_MAGIC;
749 		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
750 		neh->eh_depth = cpu_to_le16(depth - i);
751 		fidx = EXT_FIRST_INDEX(neh);
752 		fidx->ei_block = border;
753 		ext4_idx_store_pblock(fidx, oldblock);
754 
755 		ext_debug("int.index at %d (block %llu): %lu -> %llu\n", i,
756 				newblock, (unsigned long) le32_to_cpu(border),
757 				oldblock);
758 		/* copy indexes */
759 		m = 0;
760 		path[i].p_idx++;
761 
762 		ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
763 				EXT_MAX_INDEX(path[i].p_hdr));
764 		BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
765 				EXT_LAST_INDEX(path[i].p_hdr));
766 		while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
767 			ext_debug("%d: move %d:%d in new index %llu\n", i,
768 					le32_to_cpu(path[i].p_idx->ei_block),
769 					idx_pblock(path[i].p_idx),
770 					newblock);
771 			/*memmove(++fidx, path[i].p_idx++,
772 					sizeof(struct ext4_extent_idx));
773 			neh->eh_entries++;
774 			BUG_ON(neh->eh_entries > neh->eh_max);*/
775 			path[i].p_idx++;
776 			m++;
777 		}
778 		if (m) {
779 			memmove(++fidx, path[i].p_idx - m,
780 				sizeof(struct ext4_extent_idx) * m);
781 			neh->eh_entries =
782 				cpu_to_le16(le16_to_cpu(neh->eh_entries) + m);
783 		}
784 		set_buffer_uptodate(bh);
785 		unlock_buffer(bh);
786 
787 		err = ext4_journal_dirty_metadata(handle, bh);
788 		if (err)
789 			goto cleanup;
790 		brelse(bh);
791 		bh = NULL;
792 
793 		/* correct old index */
794 		if (m) {
795 			err = ext4_ext_get_access(handle, inode, path + i);
796 			if (err)
797 				goto cleanup;
798 			path[i].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[i].p_hdr->eh_entries)-m);
799 			err = ext4_ext_dirty(handle, inode, path + i);
800 			if (err)
801 				goto cleanup;
802 		}
803 
804 		i--;
805 	}
806 
807 	/* insert new index */
808 	err = ext4_ext_insert_index(handle, inode, path + at,
809 				    le32_to_cpu(border), newblock);
810 
811 cleanup:
812 	if (bh) {
813 		if (buffer_locked(bh))
814 			unlock_buffer(bh);
815 		brelse(bh);
816 	}
817 
818 	if (err) {
819 		/* free all allocated blocks in error case */
820 		for (i = 0; i < depth; i++) {
821 			if (!ablocks[i])
822 				continue;
823 			ext4_free_blocks(handle, inode, ablocks[i], 1);
824 		}
825 	}
826 	kfree(ablocks);
827 
828 	return err;
829 }
830 
831 /*
832  * ext4_ext_grow_indepth:
833  * implements tree growing procedure:
834  * - allocates new block
835  * - moves top-level data (index block or leaf) into the new block
836  * - initializes new top-level, creating index that points to the
837  *   just created block
838  */
839 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
840 					struct ext4_ext_path *path,
841 					struct ext4_extent *newext)
842 {
843 	struct ext4_ext_path *curp = path;
844 	struct ext4_extent_header *neh;
845 	struct ext4_extent_idx *fidx;
846 	struct buffer_head *bh;
847 	ext4_fsblk_t newblock;
848 	int err = 0;
849 
850 	newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
851 	if (newblock == 0)
852 		return err;
853 
854 	bh = sb_getblk(inode->i_sb, newblock);
855 	if (!bh) {
856 		err = -EIO;
857 		ext4_std_error(inode->i_sb, err);
858 		return err;
859 	}
860 	lock_buffer(bh);
861 
862 	err = ext4_journal_get_create_access(handle, bh);
863 	if (err) {
864 		unlock_buffer(bh);
865 		goto out;
866 	}
867 
868 	/* move top-level index/leaf into new block */
869 	memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
870 
871 	/* set size of new block */
872 	neh = ext_block_hdr(bh);
873 	/* old root could have indexes or leaves
874 	 * so calculate e_max right way */
875 	if (ext_depth(inode))
876 	  neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
877 	else
878 	  neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
879 	neh->eh_magic = EXT4_EXT_MAGIC;
880 	set_buffer_uptodate(bh);
881 	unlock_buffer(bh);
882 
883 	err = ext4_journal_dirty_metadata(handle, bh);
884 	if (err)
885 		goto out;
886 
887 	/* create index in new top-level index: num,max,pointer */
888 	err = ext4_ext_get_access(handle, inode, curp);
889 	if (err)
890 		goto out;
891 
892 	curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
893 	curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode));
894 	curp->p_hdr->eh_entries = cpu_to_le16(1);
895 	curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
896 	/* FIXME: it works, but actually path[0] can be index */
897 	curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
898 	ext4_idx_store_pblock(curp->p_idx, newblock);
899 
900 	neh = ext_inode_hdr(inode);
901 	fidx = EXT_FIRST_INDEX(neh);
902 	ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
903 		  le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
904 		  le32_to_cpu(fidx->ei_block), idx_pblock(fidx));
905 
906 	neh->eh_depth = cpu_to_le16(path->p_depth + 1);
907 	err = ext4_ext_dirty(handle, inode, curp);
908 out:
909 	brelse(bh);
910 
911 	return err;
912 }
913 
914 /*
915  * ext4_ext_create_new_leaf:
916  * finds empty index and adds new leaf.
917  * if no free index is found, then it requests in-depth growing.
918  */
919 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
920 					struct ext4_ext_path *path,
921 					struct ext4_extent *newext)
922 {
923 	struct ext4_ext_path *curp;
924 	int depth, i, err = 0;
925 
926 repeat:
927 	i = depth = ext_depth(inode);
928 
929 	/* walk up to the tree and look for free index entry */
930 	curp = path + depth;
931 	while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
932 		i--;
933 		curp--;
934 	}
935 
936 	/* we use already allocated block for index block,
937 	 * so subsequent data blocks should be contiguous */
938 	if (EXT_HAS_FREE_INDEX(curp)) {
939 		/* if we found index with free entry, then use that
940 		 * entry: create all needed subtree and add new leaf */
941 		err = ext4_ext_split(handle, inode, path, newext, i);
942 
943 		/* refill path */
944 		ext4_ext_drop_refs(path);
945 		path = ext4_ext_find_extent(inode,
946 					    le32_to_cpu(newext->ee_block),
947 					    path);
948 		if (IS_ERR(path))
949 			err = PTR_ERR(path);
950 	} else {
951 		/* tree is full, time to grow in depth */
952 		err = ext4_ext_grow_indepth(handle, inode, path, newext);
953 		if (err)
954 			goto out;
955 
956 		/* refill path */
957 		ext4_ext_drop_refs(path);
958 		path = ext4_ext_find_extent(inode,
959 					    le32_to_cpu(newext->ee_block),
960 					    path);
961 		if (IS_ERR(path)) {
962 			err = PTR_ERR(path);
963 			goto out;
964 		}
965 
966 		/*
967 		 * only first (depth 0 -> 1) produces free space;
968 		 * in all other cases we have to split the grown tree
969 		 */
970 		depth = ext_depth(inode);
971 		if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
972 			/* now we need to split */
973 			goto repeat;
974 		}
975 	}
976 
977 out:
978 	return err;
979 }
980 
981 /*
982  * ext4_ext_next_allocated_block:
983  * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
984  * NOTE: it considers block number from index entry as
985  * allocated block. Thus, index entries have to be consistent
986  * with leaves.
987  */
988 static unsigned long
989 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
990 {
991 	int depth;
992 
993 	BUG_ON(path == NULL);
994 	depth = path->p_depth;
995 
996 	if (depth == 0 && path->p_ext == NULL)
997 		return EXT_MAX_BLOCK;
998 
999 	while (depth >= 0) {
1000 		if (depth == path->p_depth) {
1001 			/* leaf */
1002 			if (path[depth].p_ext !=
1003 					EXT_LAST_EXTENT(path[depth].p_hdr))
1004 			  return le32_to_cpu(path[depth].p_ext[1].ee_block);
1005 		} else {
1006 			/* index */
1007 			if (path[depth].p_idx !=
1008 					EXT_LAST_INDEX(path[depth].p_hdr))
1009 			  return le32_to_cpu(path[depth].p_idx[1].ei_block);
1010 		}
1011 		depth--;
1012 	}
1013 
1014 	return EXT_MAX_BLOCK;
1015 }
1016 
1017 /*
1018  * ext4_ext_next_leaf_block:
1019  * returns first allocated block from next leaf or EXT_MAX_BLOCK
1020  */
1021 static unsigned ext4_ext_next_leaf_block(struct inode *inode,
1022 					struct ext4_ext_path *path)
1023 {
1024 	int depth;
1025 
1026 	BUG_ON(path == NULL);
1027 	depth = path->p_depth;
1028 
1029 	/* zero-tree has no leaf blocks at all */
1030 	if (depth == 0)
1031 		return EXT_MAX_BLOCK;
1032 
1033 	/* go to index block */
1034 	depth--;
1035 
1036 	while (depth >= 0) {
1037 		if (path[depth].p_idx !=
1038 				EXT_LAST_INDEX(path[depth].p_hdr))
1039 		  return le32_to_cpu(path[depth].p_idx[1].ei_block);
1040 		depth--;
1041 	}
1042 
1043 	return EXT_MAX_BLOCK;
1044 }
1045 
1046 /*
1047  * ext4_ext_correct_indexes:
1048  * if leaf gets modified and modified extent is first in the leaf,
1049  * then we have to correct all indexes above.
1050  * TODO: do we need to correct tree in all cases?
1051  */
1052 int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1053 				struct ext4_ext_path *path)
1054 {
1055 	struct ext4_extent_header *eh;
1056 	int depth = ext_depth(inode);
1057 	struct ext4_extent *ex;
1058 	__le32 border;
1059 	int k, err = 0;
1060 
1061 	eh = path[depth].p_hdr;
1062 	ex = path[depth].p_ext;
1063 	BUG_ON(ex == NULL);
1064 	BUG_ON(eh == NULL);
1065 
1066 	if (depth == 0) {
1067 		/* there is no tree at all */
1068 		return 0;
1069 	}
1070 
1071 	if (ex != EXT_FIRST_EXTENT(eh)) {
1072 		/* we correct tree if first leaf got modified only */
1073 		return 0;
1074 	}
1075 
1076 	/*
1077 	 * TODO: we need correction if border is smaller than current one
1078 	 */
1079 	k = depth - 1;
1080 	border = path[depth].p_ext->ee_block;
1081 	err = ext4_ext_get_access(handle, inode, path + k);
1082 	if (err)
1083 		return err;
1084 	path[k].p_idx->ei_block = border;
1085 	err = ext4_ext_dirty(handle, inode, path + k);
1086 	if (err)
1087 		return err;
1088 
1089 	while (k--) {
1090 		/* change all left-side indexes */
1091 		if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1092 			break;
1093 		err = ext4_ext_get_access(handle, inode, path + k);
1094 		if (err)
1095 			break;
1096 		path[k].p_idx->ei_block = border;
1097 		err = ext4_ext_dirty(handle, inode, path + k);
1098 		if (err)
1099 			break;
1100 	}
1101 
1102 	return err;
1103 }
1104 
1105 static int
1106 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1107 				struct ext4_extent *ex2)
1108 {
1109 	if (le32_to_cpu(ex1->ee_block) + le16_to_cpu(ex1->ee_len) !=
1110 			le32_to_cpu(ex2->ee_block))
1111 		return 0;
1112 
1113 	/*
1114 	 * To allow future support for preallocated extents to be added
1115 	 * as an RO_COMPAT feature, refuse to merge to extents if
1116 	 * this can result in the top bit of ee_len being set.
1117 	 */
1118 	if (le16_to_cpu(ex1->ee_len) + le16_to_cpu(ex2->ee_len) > EXT_MAX_LEN)
1119 		return 0;
1120 #ifdef AGGRESSIVE_TEST
1121 	if (le16_to_cpu(ex1->ee_len) >= 4)
1122 		return 0;
1123 #endif
1124 
1125 	if (ext_pblock(ex1) + le16_to_cpu(ex1->ee_len) == ext_pblock(ex2))
1126 		return 1;
1127 	return 0;
1128 }
1129 
1130 /*
1131  * check if a portion of the "newext" extent overlaps with an
1132  * existing extent.
1133  *
1134  * If there is an overlap discovered, it updates the length of the newext
1135  * such that there will be no overlap, and then returns 1.
1136  * If there is no overlap found, it returns 0.
1137  */
1138 unsigned int ext4_ext_check_overlap(struct inode *inode,
1139 				    struct ext4_extent *newext,
1140 				    struct ext4_ext_path *path)
1141 {
1142 	unsigned long b1, b2;
1143 	unsigned int depth, len1;
1144 	unsigned int ret = 0;
1145 
1146 	b1 = le32_to_cpu(newext->ee_block);
1147 	len1 = le16_to_cpu(newext->ee_len);
1148 	depth = ext_depth(inode);
1149 	if (!path[depth].p_ext)
1150 		goto out;
1151 	b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1152 
1153 	/*
1154 	 * get the next allocated block if the extent in the path
1155 	 * is before the requested block(s)
1156 	 */
1157 	if (b2 < b1) {
1158 		b2 = ext4_ext_next_allocated_block(path);
1159 		if (b2 == EXT_MAX_BLOCK)
1160 			goto out;
1161 	}
1162 
1163 	/* check for wrap through zero */
1164 	if (b1 + len1 < b1) {
1165 		len1 = EXT_MAX_BLOCK - b1;
1166 		newext->ee_len = cpu_to_le16(len1);
1167 		ret = 1;
1168 	}
1169 
1170 	/* check for overlap */
1171 	if (b1 + len1 > b2) {
1172 		newext->ee_len = cpu_to_le16(b2 - b1);
1173 		ret = 1;
1174 	}
1175 out:
1176 	return ret;
1177 }
1178 
1179 /*
1180  * ext4_ext_insert_extent:
1181  * tries to merge requsted extent into the existing extent or
1182  * inserts requested extent as new one into the tree,
1183  * creating new leaf in the no-space case.
1184  */
1185 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1186 				struct ext4_ext_path *path,
1187 				struct ext4_extent *newext)
1188 {
1189 	struct ext4_extent_header * eh;
1190 	struct ext4_extent *ex, *fex;
1191 	struct ext4_extent *nearex; /* nearest extent */
1192 	struct ext4_ext_path *npath = NULL;
1193 	int depth, len, err, next;
1194 
1195 	BUG_ON(newext->ee_len == 0);
1196 	depth = ext_depth(inode);
1197 	ex = path[depth].p_ext;
1198 	BUG_ON(path[depth].p_hdr == NULL);
1199 
1200 	/* try to insert block into found extent and return */
1201 	if (ex && ext4_can_extents_be_merged(inode, ex, newext)) {
1202 		ext_debug("append %d block to %d:%d (from %llu)\n",
1203 				le16_to_cpu(newext->ee_len),
1204 				le32_to_cpu(ex->ee_block),
1205 				le16_to_cpu(ex->ee_len), ext_pblock(ex));
1206 		err = ext4_ext_get_access(handle, inode, path + depth);
1207 		if (err)
1208 			return err;
1209 		ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len)
1210 					 + le16_to_cpu(newext->ee_len));
1211 		eh = path[depth].p_hdr;
1212 		nearex = ex;
1213 		goto merge;
1214 	}
1215 
1216 repeat:
1217 	depth = ext_depth(inode);
1218 	eh = path[depth].p_hdr;
1219 	if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1220 		goto has_space;
1221 
1222 	/* probably next leaf has space for us? */
1223 	fex = EXT_LAST_EXTENT(eh);
1224 	next = ext4_ext_next_leaf_block(inode, path);
1225 	if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
1226 	    && next != EXT_MAX_BLOCK) {
1227 		ext_debug("next leaf block - %d\n", next);
1228 		BUG_ON(npath != NULL);
1229 		npath = ext4_ext_find_extent(inode, next, NULL);
1230 		if (IS_ERR(npath))
1231 			return PTR_ERR(npath);
1232 		BUG_ON(npath->p_depth != path->p_depth);
1233 		eh = npath[depth].p_hdr;
1234 		if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1235 			ext_debug("next leaf isnt full(%d)\n",
1236 				  le16_to_cpu(eh->eh_entries));
1237 			path = npath;
1238 			goto repeat;
1239 		}
1240 		ext_debug("next leaf has no free space(%d,%d)\n",
1241 			  le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1242 	}
1243 
1244 	/*
1245 	 * There is no free space in the found leaf.
1246 	 * We're gonna add a new leaf in the tree.
1247 	 */
1248 	err = ext4_ext_create_new_leaf(handle, inode, path, newext);
1249 	if (err)
1250 		goto cleanup;
1251 	depth = ext_depth(inode);
1252 	eh = path[depth].p_hdr;
1253 
1254 has_space:
1255 	nearex = path[depth].p_ext;
1256 
1257 	err = ext4_ext_get_access(handle, inode, path + depth);
1258 	if (err)
1259 		goto cleanup;
1260 
1261 	if (!nearex) {
1262 		/* there is no extent in this leaf, create first one */
1263 		ext_debug("first extent in the leaf: %d:%llu:%d\n",
1264 				le32_to_cpu(newext->ee_block),
1265 				ext_pblock(newext),
1266 				le16_to_cpu(newext->ee_len));
1267 		path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1268 	} else if (le32_to_cpu(newext->ee_block)
1269 			   > le32_to_cpu(nearex->ee_block)) {
1270 /*		BUG_ON(newext->ee_block == nearex->ee_block); */
1271 		if (nearex != EXT_LAST_EXTENT(eh)) {
1272 			len = EXT_MAX_EXTENT(eh) - nearex;
1273 			len = (len - 1) * sizeof(struct ext4_extent);
1274 			len = len < 0 ? 0 : len;
1275 			ext_debug("insert %d:%llu:%d after: nearest 0x%p, "
1276 					"move %d from 0x%p to 0x%p\n",
1277 					le32_to_cpu(newext->ee_block),
1278 					ext_pblock(newext),
1279 					le16_to_cpu(newext->ee_len),
1280 					nearex, len, nearex + 1, nearex + 2);
1281 			memmove(nearex + 2, nearex + 1, len);
1282 		}
1283 		path[depth].p_ext = nearex + 1;
1284 	} else {
1285 		BUG_ON(newext->ee_block == nearex->ee_block);
1286 		len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1287 		len = len < 0 ? 0 : len;
1288 		ext_debug("insert %d:%llu:%d before: nearest 0x%p, "
1289 				"move %d from 0x%p to 0x%p\n",
1290 				le32_to_cpu(newext->ee_block),
1291 				ext_pblock(newext),
1292 				le16_to_cpu(newext->ee_len),
1293 				nearex, len, nearex + 1, nearex + 2);
1294 		memmove(nearex + 1, nearex, len);
1295 		path[depth].p_ext = nearex;
1296 	}
1297 
1298 	eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)+1);
1299 	nearex = path[depth].p_ext;
1300 	nearex->ee_block = newext->ee_block;
1301 	nearex->ee_start = newext->ee_start;
1302 	nearex->ee_start_hi = newext->ee_start_hi;
1303 	nearex->ee_len = newext->ee_len;
1304 
1305 merge:
1306 	/* try to merge extents to the right */
1307 	while (nearex < EXT_LAST_EXTENT(eh)) {
1308 		if (!ext4_can_extents_be_merged(inode, nearex, nearex + 1))
1309 			break;
1310 		/* merge with next extent! */
1311 		nearex->ee_len = cpu_to_le16(le16_to_cpu(nearex->ee_len)
1312 					     + le16_to_cpu(nearex[1].ee_len));
1313 		if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
1314 			len = (EXT_LAST_EXTENT(eh) - nearex - 1)
1315 					* sizeof(struct ext4_extent);
1316 			memmove(nearex + 1, nearex + 2, len);
1317 		}
1318 		eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
1319 		BUG_ON(eh->eh_entries == 0);
1320 	}
1321 
1322 	/* try to merge extents to the left */
1323 
1324 	/* time to correct all indexes above */
1325 	err = ext4_ext_correct_indexes(handle, inode, path);
1326 	if (err)
1327 		goto cleanup;
1328 
1329 	err = ext4_ext_dirty(handle, inode, path + depth);
1330 
1331 cleanup:
1332 	if (npath) {
1333 		ext4_ext_drop_refs(npath);
1334 		kfree(npath);
1335 	}
1336 	ext4_ext_tree_changed(inode);
1337 	ext4_ext_invalidate_cache(inode);
1338 	return err;
1339 }
1340 
1341 int ext4_ext_walk_space(struct inode *inode, unsigned long block,
1342 			unsigned long num, ext_prepare_callback func,
1343 			void *cbdata)
1344 {
1345 	struct ext4_ext_path *path = NULL;
1346 	struct ext4_ext_cache cbex;
1347 	struct ext4_extent *ex;
1348 	unsigned long next, start = 0, end = 0;
1349 	unsigned long last = block + num;
1350 	int depth, exists, err = 0;
1351 
1352 	BUG_ON(func == NULL);
1353 	BUG_ON(inode == NULL);
1354 
1355 	while (block < last && block != EXT_MAX_BLOCK) {
1356 		num = last - block;
1357 		/* find extent for this block */
1358 		path = ext4_ext_find_extent(inode, block, path);
1359 		if (IS_ERR(path)) {
1360 			err = PTR_ERR(path);
1361 			path = NULL;
1362 			break;
1363 		}
1364 
1365 		depth = ext_depth(inode);
1366 		BUG_ON(path[depth].p_hdr == NULL);
1367 		ex = path[depth].p_ext;
1368 		next = ext4_ext_next_allocated_block(path);
1369 
1370 		exists = 0;
1371 		if (!ex) {
1372 			/* there is no extent yet, so try to allocate
1373 			 * all requested space */
1374 			start = block;
1375 			end = block + num;
1376 		} else if (le32_to_cpu(ex->ee_block) > block) {
1377 			/* need to allocate space before found extent */
1378 			start = block;
1379 			end = le32_to_cpu(ex->ee_block);
1380 			if (block + num < end)
1381 				end = block + num;
1382 		} else if (block >=
1383 			     le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len)) {
1384 			/* need to allocate space after found extent */
1385 			start = block;
1386 			end = block + num;
1387 			if (end >= next)
1388 				end = next;
1389 		} else if (block >= le32_to_cpu(ex->ee_block)) {
1390 			/*
1391 			 * some part of requested space is covered
1392 			 * by found extent
1393 			 */
1394 			start = block;
1395 			end = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len);
1396 			if (block + num < end)
1397 				end = block + num;
1398 			exists = 1;
1399 		} else {
1400 			BUG();
1401 		}
1402 		BUG_ON(end <= start);
1403 
1404 		if (!exists) {
1405 			cbex.ec_block = start;
1406 			cbex.ec_len = end - start;
1407 			cbex.ec_start = 0;
1408 			cbex.ec_type = EXT4_EXT_CACHE_GAP;
1409 		} else {
1410 			cbex.ec_block = le32_to_cpu(ex->ee_block);
1411 			cbex.ec_len = le16_to_cpu(ex->ee_len);
1412 			cbex.ec_start = ext_pblock(ex);
1413 			cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
1414 		}
1415 
1416 		BUG_ON(cbex.ec_len == 0);
1417 		err = func(inode, path, &cbex, cbdata);
1418 		ext4_ext_drop_refs(path);
1419 
1420 		if (err < 0)
1421 			break;
1422 		if (err == EXT_REPEAT)
1423 			continue;
1424 		else if (err == EXT_BREAK) {
1425 			err = 0;
1426 			break;
1427 		}
1428 
1429 		if (ext_depth(inode) != depth) {
1430 			/* depth was changed. we have to realloc path */
1431 			kfree(path);
1432 			path = NULL;
1433 		}
1434 
1435 		block = cbex.ec_block + cbex.ec_len;
1436 	}
1437 
1438 	if (path) {
1439 		ext4_ext_drop_refs(path);
1440 		kfree(path);
1441 	}
1442 
1443 	return err;
1444 }
1445 
1446 static void
1447 ext4_ext_put_in_cache(struct inode *inode, __u32 block,
1448 			__u32 len, __u32 start, int type)
1449 {
1450 	struct ext4_ext_cache *cex;
1451 	BUG_ON(len == 0);
1452 	cex = &EXT4_I(inode)->i_cached_extent;
1453 	cex->ec_type = type;
1454 	cex->ec_block = block;
1455 	cex->ec_len = len;
1456 	cex->ec_start = start;
1457 }
1458 
1459 /*
1460  * ext4_ext_put_gap_in_cache:
1461  * calculate boundaries of the gap that the requested block fits into
1462  * and cache this gap
1463  */
1464 static void
1465 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1466 				unsigned long block)
1467 {
1468 	int depth = ext_depth(inode);
1469 	unsigned long lblock, len;
1470 	struct ext4_extent *ex;
1471 
1472 	ex = path[depth].p_ext;
1473 	if (ex == NULL) {
1474 		/* there is no extent yet, so gap is [0;-] */
1475 		lblock = 0;
1476 		len = EXT_MAX_BLOCK;
1477 		ext_debug("cache gap(whole file):");
1478 	} else if (block < le32_to_cpu(ex->ee_block)) {
1479 		lblock = block;
1480 		len = le32_to_cpu(ex->ee_block) - block;
1481 		ext_debug("cache gap(before): %lu [%lu:%lu]",
1482 				(unsigned long) block,
1483 				(unsigned long) le32_to_cpu(ex->ee_block),
1484 				(unsigned long) le16_to_cpu(ex->ee_len));
1485 	} else if (block >= le32_to_cpu(ex->ee_block)
1486 			    + le16_to_cpu(ex->ee_len)) {
1487 		lblock = le32_to_cpu(ex->ee_block)
1488 			 + le16_to_cpu(ex->ee_len);
1489 		len = ext4_ext_next_allocated_block(path);
1490 		ext_debug("cache gap(after): [%lu:%lu] %lu",
1491 				(unsigned long) le32_to_cpu(ex->ee_block),
1492 				(unsigned long) le16_to_cpu(ex->ee_len),
1493 				(unsigned long) block);
1494 		BUG_ON(len == lblock);
1495 		len = len - lblock;
1496 	} else {
1497 		lblock = len = 0;
1498 		BUG();
1499 	}
1500 
1501 	ext_debug(" -> %lu:%lu\n", (unsigned long) lblock, len);
1502 	ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
1503 }
1504 
1505 static int
1506 ext4_ext_in_cache(struct inode *inode, unsigned long block,
1507 			struct ext4_extent *ex)
1508 {
1509 	struct ext4_ext_cache *cex;
1510 
1511 	cex = &EXT4_I(inode)->i_cached_extent;
1512 
1513 	/* has cache valid data? */
1514 	if (cex->ec_type == EXT4_EXT_CACHE_NO)
1515 		return EXT4_EXT_CACHE_NO;
1516 
1517 	BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
1518 			cex->ec_type != EXT4_EXT_CACHE_EXTENT);
1519 	if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
1520 		ex->ee_block = cpu_to_le32(cex->ec_block);
1521 		ext4_ext_store_pblock(ex, cex->ec_start);
1522 		ex->ee_len = cpu_to_le16(cex->ec_len);
1523 		ext_debug("%lu cached by %lu:%lu:%llu\n",
1524 				(unsigned long) block,
1525 				(unsigned long) cex->ec_block,
1526 				(unsigned long) cex->ec_len,
1527 				cex->ec_start);
1528 		return cex->ec_type;
1529 	}
1530 
1531 	/* not in cache */
1532 	return EXT4_EXT_CACHE_NO;
1533 }
1534 
1535 /*
1536  * ext4_ext_rm_idx:
1537  * removes index from the index block.
1538  * It's used in truncate case only, thus all requests are for
1539  * last index in the block only.
1540  */
1541 int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1542 			struct ext4_ext_path *path)
1543 {
1544 	struct buffer_head *bh;
1545 	int err;
1546 	ext4_fsblk_t leaf;
1547 
1548 	/* free index block */
1549 	path--;
1550 	leaf = idx_pblock(path->p_idx);
1551 	BUG_ON(path->p_hdr->eh_entries == 0);
1552 	err = ext4_ext_get_access(handle, inode, path);
1553 	if (err)
1554 		return err;
1555 	path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1);
1556 	err = ext4_ext_dirty(handle, inode, path);
1557 	if (err)
1558 		return err;
1559 	ext_debug("index is empty, remove it, free block %llu\n", leaf);
1560 	bh = sb_find_get_block(inode->i_sb, leaf);
1561 	ext4_forget(handle, 1, inode, bh, leaf);
1562 	ext4_free_blocks(handle, inode, leaf, 1);
1563 	return err;
1564 }
1565 
1566 /*
1567  * ext4_ext_calc_credits_for_insert:
1568  * This routine returns max. credits that the extent tree can consume.
1569  * It should be OK for low-performance paths like ->writepage()
1570  * To allow many writing processes to fit into a single transaction,
1571  * the caller should calculate credits under truncate_mutex and
1572  * pass the actual path.
1573  */
1574 int ext4_ext_calc_credits_for_insert(struct inode *inode,
1575 						struct ext4_ext_path *path)
1576 {
1577 	int depth, needed;
1578 
1579 	if (path) {
1580 		/* probably there is space in leaf? */
1581 		depth = ext_depth(inode);
1582 		if (le16_to_cpu(path[depth].p_hdr->eh_entries)
1583 				< le16_to_cpu(path[depth].p_hdr->eh_max))
1584 			return 1;
1585 	}
1586 
1587 	/*
1588 	 * given 32-bit logical block (4294967296 blocks), max. tree
1589 	 * can be 4 levels in depth -- 4 * 340^4 == 53453440000.
1590 	 * Let's also add one more level for imbalance.
1591 	 */
1592 	depth = 5;
1593 
1594 	/* allocation of new data block(s) */
1595 	needed = 2;
1596 
1597 	/*
1598 	 * tree can be full, so it would need to grow in depth:
1599 	 * we need one credit to modify old root, credits for
1600 	 * new root will be added in split accounting
1601 	 */
1602 	needed += 1;
1603 
1604 	/*
1605 	 * Index split can happen, we would need:
1606 	 *    allocate intermediate indexes (bitmap + group)
1607 	 *  + change two blocks at each level, but root (already included)
1608 	 */
1609 	needed += (depth * 2) + (depth * 2);
1610 
1611 	/* any allocation modifies superblock */
1612 	needed += 1;
1613 
1614 	return needed;
1615 }
1616 
1617 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
1618 				struct ext4_extent *ex,
1619 				unsigned long from, unsigned long to)
1620 {
1621 	struct buffer_head *bh;
1622 	int i;
1623 
1624 #ifdef EXTENTS_STATS
1625 	{
1626 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1627 		unsigned short ee_len =  le16_to_cpu(ex->ee_len);
1628 		spin_lock(&sbi->s_ext_stats_lock);
1629 		sbi->s_ext_blocks += ee_len;
1630 		sbi->s_ext_extents++;
1631 		if (ee_len < sbi->s_ext_min)
1632 			sbi->s_ext_min = ee_len;
1633 		if (ee_len > sbi->s_ext_max)
1634 			sbi->s_ext_max = ee_len;
1635 		if (ext_depth(inode) > sbi->s_depth_max)
1636 			sbi->s_depth_max = ext_depth(inode);
1637 		spin_unlock(&sbi->s_ext_stats_lock);
1638 	}
1639 #endif
1640 	if (from >= le32_to_cpu(ex->ee_block)
1641 	    && to == le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
1642 		/* tail removal */
1643 		unsigned long num;
1644 		ext4_fsblk_t start;
1645 		num = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - from;
1646 		start = ext_pblock(ex) + le16_to_cpu(ex->ee_len) - num;
1647 		ext_debug("free last %lu blocks starting %llu\n", num, start);
1648 		for (i = 0; i < num; i++) {
1649 			bh = sb_find_get_block(inode->i_sb, start + i);
1650 			ext4_forget(handle, 0, inode, bh, start + i);
1651 		}
1652 		ext4_free_blocks(handle, inode, start, num);
1653 	} else if (from == le32_to_cpu(ex->ee_block)
1654 		   && to <= le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
1655 		printk("strange request: removal %lu-%lu from %u:%u\n",
1656 		       from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));
1657 	} else {
1658 		printk("strange request: removal(2) %lu-%lu from %u:%u\n",
1659 		       from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));
1660 	}
1661 	return 0;
1662 }
1663 
1664 static int
1665 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
1666 		struct ext4_ext_path *path, unsigned long start)
1667 {
1668 	int err = 0, correct_index = 0;
1669 	int depth = ext_depth(inode), credits;
1670 	struct ext4_extent_header *eh;
1671 	unsigned a, b, block, num;
1672 	unsigned long ex_ee_block;
1673 	unsigned short ex_ee_len;
1674 	struct ext4_extent *ex;
1675 
1676 	ext_debug("truncate since %lu in leaf\n", start);
1677 	if (!path[depth].p_hdr)
1678 		path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
1679 	eh = path[depth].p_hdr;
1680 	BUG_ON(eh == NULL);
1681 	BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
1682 	BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
1683 
1684 	/* find where to start removing */
1685 	ex = EXT_LAST_EXTENT(eh);
1686 
1687 	ex_ee_block = le32_to_cpu(ex->ee_block);
1688 	ex_ee_len = le16_to_cpu(ex->ee_len);
1689 
1690 	while (ex >= EXT_FIRST_EXTENT(eh) &&
1691 			ex_ee_block + ex_ee_len > start) {
1692 		ext_debug("remove ext %lu:%u\n", ex_ee_block, ex_ee_len);
1693 		path[depth].p_ext = ex;
1694 
1695 		a = ex_ee_block > start ? ex_ee_block : start;
1696 		b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
1697 			ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
1698 
1699 		ext_debug("  border %u:%u\n", a, b);
1700 
1701 		if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
1702 			block = 0;
1703 			num = 0;
1704 			BUG();
1705 		} else if (a != ex_ee_block) {
1706 			/* remove tail of the extent */
1707 			block = ex_ee_block;
1708 			num = a - block;
1709 		} else if (b != ex_ee_block + ex_ee_len - 1) {
1710 			/* remove head of the extent */
1711 			block = a;
1712 			num = b - a;
1713 			/* there is no "make a hole" API yet */
1714 			BUG();
1715 		} else {
1716 			/* remove whole extent: excellent! */
1717 			block = ex_ee_block;
1718 			num = 0;
1719 			BUG_ON(a != ex_ee_block);
1720 			BUG_ON(b != ex_ee_block + ex_ee_len - 1);
1721 		}
1722 
1723 		/* at present, extent can't cross block group: */
1724 		/* leaf + bitmap + group desc + sb + inode */
1725 		credits = 5;
1726 		if (ex == EXT_FIRST_EXTENT(eh)) {
1727 			correct_index = 1;
1728 			credits += (ext_depth(inode)) + 1;
1729 		}
1730 #ifdef CONFIG_QUOTA
1731 		credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
1732 #endif
1733 
1734 		handle = ext4_ext_journal_restart(handle, credits);
1735 		if (IS_ERR(handle)) {
1736 			err = PTR_ERR(handle);
1737 			goto out;
1738 		}
1739 
1740 		err = ext4_ext_get_access(handle, inode, path + depth);
1741 		if (err)
1742 			goto out;
1743 
1744 		err = ext4_remove_blocks(handle, inode, ex, a, b);
1745 		if (err)
1746 			goto out;
1747 
1748 		if (num == 0) {
1749 			/* this extent is removed; mark slot entirely unused */
1750 			ext4_ext_store_pblock(ex, 0);
1751 			eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
1752 		}
1753 
1754 		ex->ee_block = cpu_to_le32(block);
1755 		ex->ee_len = cpu_to_le16(num);
1756 
1757 		err = ext4_ext_dirty(handle, inode, path + depth);
1758 		if (err)
1759 			goto out;
1760 
1761 		ext_debug("new extent: %u:%u:%llu\n", block, num,
1762 				ext_pblock(ex));
1763 		ex--;
1764 		ex_ee_block = le32_to_cpu(ex->ee_block);
1765 		ex_ee_len = le16_to_cpu(ex->ee_len);
1766 	}
1767 
1768 	if (correct_index && eh->eh_entries)
1769 		err = ext4_ext_correct_indexes(handle, inode, path);
1770 
1771 	/* if this leaf is free, then we should
1772 	 * remove it from index block above */
1773 	if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
1774 		err = ext4_ext_rm_idx(handle, inode, path + depth);
1775 
1776 out:
1777 	return err;
1778 }
1779 
1780 /*
1781  * ext4_ext_more_to_rm:
1782  * returns 1 if current index has to be freed (even partial)
1783  */
1784 static int
1785 ext4_ext_more_to_rm(struct ext4_ext_path *path)
1786 {
1787 	BUG_ON(path->p_idx == NULL);
1788 
1789 	if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
1790 		return 0;
1791 
1792 	/*
1793 	 * if truncate on deeper level happened, it wasn't partial,
1794 	 * so we have to consider current index for truncation
1795 	 */
1796 	if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
1797 		return 0;
1798 	return 1;
1799 }
1800 
1801 int ext4_ext_remove_space(struct inode *inode, unsigned long start)
1802 {
1803 	struct super_block *sb = inode->i_sb;
1804 	int depth = ext_depth(inode);
1805 	struct ext4_ext_path *path;
1806 	handle_t *handle;
1807 	int i = 0, err = 0;
1808 
1809 	ext_debug("truncate since %lu\n", start);
1810 
1811 	/* probably first extent we're gonna free will be last in block */
1812 	handle = ext4_journal_start(inode, depth + 1);
1813 	if (IS_ERR(handle))
1814 		return PTR_ERR(handle);
1815 
1816 	ext4_ext_invalidate_cache(inode);
1817 
1818 	/*
1819 	 * We start scanning from right side, freeing all the blocks
1820 	 * after i_size and walking into the tree depth-wise.
1821 	 */
1822 	path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);
1823 	if (path == NULL) {
1824 		ext4_journal_stop(handle);
1825 		return -ENOMEM;
1826 	}
1827 	path[0].p_hdr = ext_inode_hdr(inode);
1828 	if (ext4_ext_check_header(__FUNCTION__, inode, path[0].p_hdr)) {
1829 		err = -EIO;
1830 		goto out;
1831 	}
1832 	path[0].p_depth = depth;
1833 
1834 	while (i >= 0 && err == 0) {
1835 		if (i == depth) {
1836 			/* this is leaf block */
1837 			err = ext4_ext_rm_leaf(handle, inode, path, start);
1838 			/* root level has p_bh == NULL, brelse() eats this */
1839 			brelse(path[i].p_bh);
1840 			path[i].p_bh = NULL;
1841 			i--;
1842 			continue;
1843 		}
1844 
1845 		/* this is index block */
1846 		if (!path[i].p_hdr) {
1847 			ext_debug("initialize header\n");
1848 			path[i].p_hdr = ext_block_hdr(path[i].p_bh);
1849 			if (ext4_ext_check_header(__FUNCTION__, inode,
1850 							path[i].p_hdr)) {
1851 				err = -EIO;
1852 				goto out;
1853 			}
1854 		}
1855 
1856 		BUG_ON(le16_to_cpu(path[i].p_hdr->eh_entries)
1857 			   > le16_to_cpu(path[i].p_hdr->eh_max));
1858 		BUG_ON(path[i].p_hdr->eh_magic != EXT4_EXT_MAGIC);
1859 
1860 		if (!path[i].p_idx) {
1861 			/* this level hasn't been touched yet */
1862 			path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
1863 			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
1864 			ext_debug("init index ptr: hdr 0x%p, num %d\n",
1865 				  path[i].p_hdr,
1866 				  le16_to_cpu(path[i].p_hdr->eh_entries));
1867 		} else {
1868 			/* we were already here, see at next index */
1869 			path[i].p_idx--;
1870 		}
1871 
1872 		ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
1873 				i, EXT_FIRST_INDEX(path[i].p_hdr),
1874 				path[i].p_idx);
1875 		if (ext4_ext_more_to_rm(path + i)) {
1876 			/* go to the next level */
1877 			ext_debug("move to level %d (block %llu)\n",
1878 				  i + 1, idx_pblock(path[i].p_idx));
1879 			memset(path + i + 1, 0, sizeof(*path));
1880 			path[i+1].p_bh =
1881 				sb_bread(sb, idx_pblock(path[i].p_idx));
1882 			if (!path[i+1].p_bh) {
1883 				/* should we reset i_size? */
1884 				err = -EIO;
1885 				break;
1886 			}
1887 
1888 			/* save actual number of indexes since this
1889 			 * number is changed at the next iteration */
1890 			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
1891 			i++;
1892 		} else {
1893 			/* we finished processing this index, go up */
1894 			if (path[i].p_hdr->eh_entries == 0 && i > 0) {
1895 				/* index is empty, remove it;
1896 				 * handle must be already prepared by the
1897 				 * truncatei_leaf() */
1898 				err = ext4_ext_rm_idx(handle, inode, path + i);
1899 			}
1900 			/* root level has p_bh == NULL, brelse() eats this */
1901 			brelse(path[i].p_bh);
1902 			path[i].p_bh = NULL;
1903 			i--;
1904 			ext_debug("return to level %d\n", i);
1905 		}
1906 	}
1907 
1908 	/* TODO: flexible tree reduction should be here */
1909 	if (path->p_hdr->eh_entries == 0) {
1910 		/*
1911 		 * truncate to zero freed all the tree,
1912 		 * so we need to correct eh_depth
1913 		 */
1914 		err = ext4_ext_get_access(handle, inode, path);
1915 		if (err == 0) {
1916 			ext_inode_hdr(inode)->eh_depth = 0;
1917 			ext_inode_hdr(inode)->eh_max =
1918 				cpu_to_le16(ext4_ext_space_root(inode));
1919 			err = ext4_ext_dirty(handle, inode, path);
1920 		}
1921 	}
1922 out:
1923 	ext4_ext_tree_changed(inode);
1924 	ext4_ext_drop_refs(path);
1925 	kfree(path);
1926 	ext4_journal_stop(handle);
1927 
1928 	return err;
1929 }
1930 
1931 /*
1932  * called at mount time
1933  */
1934 void ext4_ext_init(struct super_block *sb)
1935 {
1936 	/*
1937 	 * possible initialization would be here
1938 	 */
1939 
1940 	if (test_opt(sb, EXTENTS)) {
1941 		printk("EXT4-fs: file extents enabled");
1942 #ifdef AGGRESSIVE_TEST
1943 		printk(", aggressive tests");
1944 #endif
1945 #ifdef CHECK_BINSEARCH
1946 		printk(", check binsearch");
1947 #endif
1948 #ifdef EXTENTS_STATS
1949 		printk(", stats");
1950 #endif
1951 		printk("\n");
1952 #ifdef EXTENTS_STATS
1953 		spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
1954 		EXT4_SB(sb)->s_ext_min = 1 << 30;
1955 		EXT4_SB(sb)->s_ext_max = 0;
1956 #endif
1957 	}
1958 }
1959 
1960 /*
1961  * called at umount time
1962  */
1963 void ext4_ext_release(struct super_block *sb)
1964 {
1965 	if (!test_opt(sb, EXTENTS))
1966 		return;
1967 
1968 #ifdef EXTENTS_STATS
1969 	if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
1970 		struct ext4_sb_info *sbi = EXT4_SB(sb);
1971 		printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
1972 			sbi->s_ext_blocks, sbi->s_ext_extents,
1973 			sbi->s_ext_blocks / sbi->s_ext_extents);
1974 		printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
1975 			sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
1976 	}
1977 #endif
1978 }
1979 
1980 int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
1981 			ext4_fsblk_t iblock,
1982 			unsigned long max_blocks, struct buffer_head *bh_result,
1983 			int create, int extend_disksize)
1984 {
1985 	struct ext4_ext_path *path = NULL;
1986 	struct ext4_extent newex, *ex;
1987 	ext4_fsblk_t goal, newblock;
1988 	int err = 0, depth;
1989 	unsigned long allocated = 0;
1990 
1991 	__clear_bit(BH_New, &bh_result->b_state);
1992 	ext_debug("blocks %d/%lu requested for inode %u\n", (int) iblock,
1993 			max_blocks, (unsigned) inode->i_ino);
1994 	mutex_lock(&EXT4_I(inode)->truncate_mutex);
1995 
1996 	/* check in cache */
1997 	goal = ext4_ext_in_cache(inode, iblock, &newex);
1998 	if (goal) {
1999 		if (goal == EXT4_EXT_CACHE_GAP) {
2000 			if (!create) {
2001 				/* block isn't allocated yet and
2002 				 * user doesn't want to allocate it */
2003 				goto out2;
2004 			}
2005 			/* we should allocate requested block */
2006 		} else if (goal == EXT4_EXT_CACHE_EXTENT) {
2007 			/* block is already allocated */
2008 			newblock = iblock
2009 				   - le32_to_cpu(newex.ee_block)
2010 				   + ext_pblock(&newex);
2011 			/* number of remaining blocks in the extent */
2012 			allocated = le16_to_cpu(newex.ee_len) -
2013 					(iblock - le32_to_cpu(newex.ee_block));
2014 			goto out;
2015 		} else {
2016 			BUG();
2017 		}
2018 	}
2019 
2020 	/* find extent for this block */
2021 	path = ext4_ext_find_extent(inode, iblock, NULL);
2022 	if (IS_ERR(path)) {
2023 		err = PTR_ERR(path);
2024 		path = NULL;
2025 		goto out2;
2026 	}
2027 
2028 	depth = ext_depth(inode);
2029 
2030 	/*
2031 	 * consistent leaf must not be empty;
2032 	 * this situation is possible, though, _during_ tree modification;
2033 	 * this is why assert can't be put in ext4_ext_find_extent()
2034 	 */
2035 	BUG_ON(path[depth].p_ext == NULL && depth != 0);
2036 
2037 	ex = path[depth].p_ext;
2038 	if (ex) {
2039 		unsigned long ee_block = le32_to_cpu(ex->ee_block);
2040 		ext4_fsblk_t ee_start = ext_pblock(ex);
2041 		unsigned short ee_len  = le16_to_cpu(ex->ee_len);
2042 
2043 		/*
2044 		 * Allow future support for preallocated extents to be added
2045 		 * as an RO_COMPAT feature:
2046 		 * Uninitialized extents are treated as holes, except that
2047 		 * we avoid (fail) allocating new blocks during a write.
2048 		 */
2049 		if (ee_len > EXT_MAX_LEN)
2050 			goto out2;
2051 		/* if found extent covers block, simply return it */
2052 		if (iblock >= ee_block && iblock < ee_block + ee_len) {
2053 			newblock = iblock - ee_block + ee_start;
2054 			/* number of remaining blocks in the extent */
2055 			allocated = ee_len - (iblock - ee_block);
2056 			ext_debug("%d fit into %lu:%d -> %llu\n", (int) iblock,
2057 					ee_block, ee_len, newblock);
2058 			ext4_ext_put_in_cache(inode, ee_block, ee_len,
2059 						ee_start, EXT4_EXT_CACHE_EXTENT);
2060 			goto out;
2061 		}
2062 	}
2063 
2064 	/*
2065 	 * requested block isn't allocated yet;
2066 	 * we couldn't try to create block if create flag is zero
2067 	 */
2068 	if (!create) {
2069 		/* put just found gap into cache to speed up
2070 		 * subsequent requests */
2071 		ext4_ext_put_gap_in_cache(inode, path, iblock);
2072 		goto out2;
2073 	}
2074 	/*
2075 	 * Okay, we need to do block allocation.  Lazily initialize the block
2076 	 * allocation info here if necessary.
2077 	 */
2078 	if (S_ISREG(inode->i_mode) && (!EXT4_I(inode)->i_block_alloc_info))
2079 		ext4_init_block_alloc_info(inode);
2080 
2081 	/* allocate new block */
2082 	goal = ext4_ext_find_goal(inode, path, iblock);
2083 
2084 	/* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
2085 	newex.ee_block = cpu_to_le32(iblock);
2086 	newex.ee_len = cpu_to_le16(max_blocks);
2087 	err = ext4_ext_check_overlap(inode, &newex, path);
2088 	if (err)
2089 		allocated = le16_to_cpu(newex.ee_len);
2090 	else
2091 		allocated = max_blocks;
2092 	newblock = ext4_new_blocks(handle, inode, goal, &allocated, &err);
2093 	if (!newblock)
2094 		goto out2;
2095 	ext_debug("allocate new block: goal %llu, found %llu/%lu\n",
2096 			goal, newblock, allocated);
2097 
2098 	/* try to insert new extent into found leaf and return */
2099 	ext4_ext_store_pblock(&newex, newblock);
2100 	newex.ee_len = cpu_to_le16(allocated);
2101 	err = ext4_ext_insert_extent(handle, inode, path, &newex);
2102 	if (err) {
2103 		/* free data blocks we just allocated */
2104 		ext4_free_blocks(handle, inode, ext_pblock(&newex),
2105 					le16_to_cpu(newex.ee_len));
2106 		goto out2;
2107 	}
2108 
2109 	if (extend_disksize && inode->i_size > EXT4_I(inode)->i_disksize)
2110 		EXT4_I(inode)->i_disksize = inode->i_size;
2111 
2112 	/* previous routine could use block we allocated */
2113 	newblock = ext_pblock(&newex);
2114 	__set_bit(BH_New, &bh_result->b_state);
2115 
2116 	ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
2117 				EXT4_EXT_CACHE_EXTENT);
2118 out:
2119 	if (allocated > max_blocks)
2120 		allocated = max_blocks;
2121 	ext4_ext_show_leaf(inode, path);
2122 	__set_bit(BH_Mapped, &bh_result->b_state);
2123 	bh_result->b_bdev = inode->i_sb->s_bdev;
2124 	bh_result->b_blocknr = newblock;
2125 out2:
2126 	if (path) {
2127 		ext4_ext_drop_refs(path);
2128 		kfree(path);
2129 	}
2130 	mutex_unlock(&EXT4_I(inode)->truncate_mutex);
2131 
2132 	return err ? err : allocated;
2133 }
2134 
2135 void ext4_ext_truncate(struct inode * inode, struct page *page)
2136 {
2137 	struct address_space *mapping = inode->i_mapping;
2138 	struct super_block *sb = inode->i_sb;
2139 	unsigned long last_block;
2140 	handle_t *handle;
2141 	int err = 0;
2142 
2143 	/*
2144 	 * probably first extent we're gonna free will be last in block
2145 	 */
2146 	err = ext4_writepage_trans_blocks(inode) + 3;
2147 	handle = ext4_journal_start(inode, err);
2148 	if (IS_ERR(handle)) {
2149 		if (page) {
2150 			clear_highpage(page);
2151 			flush_dcache_page(page);
2152 			unlock_page(page);
2153 			page_cache_release(page);
2154 		}
2155 		return;
2156 	}
2157 
2158 	if (page)
2159 		ext4_block_truncate_page(handle, page, mapping, inode->i_size);
2160 
2161 	mutex_lock(&EXT4_I(inode)->truncate_mutex);
2162 	ext4_ext_invalidate_cache(inode);
2163 
2164 	/*
2165 	 * TODO: optimization is possible here.
2166 	 * Probably we need not scan at all,
2167 	 * because page truncation is enough.
2168 	 */
2169 	if (ext4_orphan_add(handle, inode))
2170 		goto out_stop;
2171 
2172 	/* we have to know where to truncate from in crash case */
2173 	EXT4_I(inode)->i_disksize = inode->i_size;
2174 	ext4_mark_inode_dirty(handle, inode);
2175 
2176 	last_block = (inode->i_size + sb->s_blocksize - 1)
2177 			>> EXT4_BLOCK_SIZE_BITS(sb);
2178 	err = ext4_ext_remove_space(inode, last_block);
2179 
2180 	/* In a multi-transaction truncate, we only make the final
2181 	 * transaction synchronous. */
2182 	if (IS_SYNC(inode))
2183 		handle->h_sync = 1;
2184 
2185 out_stop:
2186 	/*
2187 	 * If this was a simple ftruncate() and the file will remain alive,
2188 	 * then we need to clear up the orphan record which we created above.
2189 	 * However, if this was a real unlink then we were called by
2190 	 * ext4_delete_inode(), and we allow that function to clean up the
2191 	 * orphan info for us.
2192 	 */
2193 	if (inode->i_nlink)
2194 		ext4_orphan_del(handle, inode);
2195 
2196 	mutex_unlock(&EXT4_I(inode)->truncate_mutex);
2197 	ext4_journal_stop(handle);
2198 }
2199 
2200 /*
2201  * ext4_ext_writepage_trans_blocks:
2202  * calculate max number of blocks we could modify
2203  * in order to allocate new block for an inode
2204  */
2205 int ext4_ext_writepage_trans_blocks(struct inode *inode, int num)
2206 {
2207 	int needed;
2208 
2209 	needed = ext4_ext_calc_credits_for_insert(inode, NULL);
2210 
2211 	/* caller wants to allocate num blocks, but note it includes sb */
2212 	needed = needed * num - (num - 1);
2213 
2214 #ifdef CONFIG_QUOTA
2215 	needed += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
2216 #endif
2217 
2218 	return needed;
2219 }
2220