1 /* 2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 3 * Written by Alex Tomas <alex@clusterfs.com> 4 * 5 * Architecture independence: 6 * Copyright (c) 2005, Bull S.A. 7 * Written by Pierre Peiffer <pierre.peiffer@bull.net> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public Licens 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 21 */ 22 23 /* 24 * Extents support for EXT4 25 * 26 * TODO: 27 * - ext4*_error() should be used in some situations 28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate 29 * - smart tree reduction 30 */ 31 32 #include <linux/module.h> 33 #include <linux/fs.h> 34 #include <linux/time.h> 35 #include <linux/jbd2.h> 36 #include <linux/highuid.h> 37 #include <linux/pagemap.h> 38 #include <linux/quotaops.h> 39 #include <linux/string.h> 40 #include <linux/slab.h> 41 #include <linux/falloc.h> 42 #include <asm/uaccess.h> 43 #include <linux/fiemap.h> 44 #include "ext4_jbd2.h" 45 #include "ext4_extents.h" 46 47 48 /* 49 * ext_pblock: 50 * combine low and high parts of physical block number into ext4_fsblk_t 51 */ 52 static ext4_fsblk_t ext_pblock(struct ext4_extent *ex) 53 { 54 ext4_fsblk_t block; 55 56 block = le32_to_cpu(ex->ee_start_lo); 57 block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1; 58 return block; 59 } 60 61 /* 62 * idx_pblock: 63 * combine low and high parts of a leaf physical block number into ext4_fsblk_t 64 */ 65 ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix) 66 { 67 ext4_fsblk_t block; 68 69 block = le32_to_cpu(ix->ei_leaf_lo); 70 block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1; 71 return block; 72 } 73 74 /* 75 * ext4_ext_store_pblock: 76 * stores a large physical block number into an extent struct, 77 * breaking it into parts 78 */ 79 void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb) 80 { 81 ex->ee_start_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff)); 82 ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff); 83 } 84 85 /* 86 * ext4_idx_store_pblock: 87 * stores a large physical block number into an index struct, 88 * breaking it into parts 89 */ 90 static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb) 91 { 92 ix->ei_leaf_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff)); 93 ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff); 94 } 95 96 static int ext4_ext_journal_restart(handle_t *handle, int needed) 97 { 98 int err; 99 100 if (!ext4_handle_valid(handle)) 101 return 0; 102 if (handle->h_buffer_credits > needed) 103 return 0; 104 err = ext4_journal_extend(handle, needed); 105 if (err <= 0) 106 return err; 107 return ext4_journal_restart(handle, needed); 108 } 109 110 /* 111 * could return: 112 * - EROFS 113 * - ENOMEM 114 */ 115 static int ext4_ext_get_access(handle_t *handle, struct inode *inode, 116 struct ext4_ext_path *path) 117 { 118 if (path->p_bh) { 119 /* path points to block */ 120 return ext4_journal_get_write_access(handle, path->p_bh); 121 } 122 /* path points to leaf/index in inode body */ 123 /* we use in-core data, no need to protect them */ 124 return 0; 125 } 126 127 /* 128 * could return: 129 * - EROFS 130 * - ENOMEM 131 * - EIO 132 */ 133 static int ext4_ext_dirty(handle_t *handle, struct inode *inode, 134 struct ext4_ext_path *path) 135 { 136 int err; 137 if (path->p_bh) { 138 /* path points to block */ 139 err = ext4_handle_dirty_metadata(handle, inode, path->p_bh); 140 } else { 141 /* path points to leaf/index in inode body */ 142 err = ext4_mark_inode_dirty(handle, inode); 143 } 144 return err; 145 } 146 147 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, 148 struct ext4_ext_path *path, 149 ext4_lblk_t block) 150 { 151 struct ext4_inode_info *ei = EXT4_I(inode); 152 ext4_fsblk_t bg_start; 153 ext4_fsblk_t last_block; 154 ext4_grpblk_t colour; 155 ext4_group_t block_group; 156 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); 157 int depth; 158 159 if (path) { 160 struct ext4_extent *ex; 161 depth = path->p_depth; 162 163 /* try to predict block placement */ 164 ex = path[depth].p_ext; 165 if (ex) 166 return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block)); 167 168 /* it looks like index is empty; 169 * try to find starting block from index itself */ 170 if (path[depth].p_bh) 171 return path[depth].p_bh->b_blocknr; 172 } 173 174 /* OK. use inode's group */ 175 block_group = ei->i_block_group; 176 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { 177 /* 178 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME 179 * block groups per flexgroup, reserve the first block 180 * group for directories and special files. Regular 181 * files will start at the second block group. This 182 * tends to speed up directory access and improves 183 * fsck times. 184 */ 185 block_group &= ~(flex_size-1); 186 if (S_ISREG(inode->i_mode)) 187 block_group++; 188 } 189 bg_start = (block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) + 190 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block); 191 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 192 193 /* 194 * If we are doing delayed allocation, we don't need take 195 * colour into account. 196 */ 197 if (test_opt(inode->i_sb, DELALLOC)) 198 return bg_start; 199 200 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) 201 colour = (current->pid % 16) * 202 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 203 else 204 colour = (current->pid % 16) * ((last_block - bg_start) / 16); 205 return bg_start + colour + block; 206 } 207 208 /* 209 * Allocation for a meta data block 210 */ 211 static ext4_fsblk_t 212 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, 213 struct ext4_ext_path *path, 214 struct ext4_extent *ex, int *err) 215 { 216 ext4_fsblk_t goal, newblock; 217 218 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); 219 newblock = ext4_new_meta_blocks(handle, inode, goal, NULL, err); 220 return newblock; 221 } 222 223 static int ext4_ext_space_block(struct inode *inode) 224 { 225 int size; 226 227 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 228 / sizeof(struct ext4_extent); 229 #ifdef AGGRESSIVE_TEST 230 if (size > 6) 231 size = 6; 232 #endif 233 return size; 234 } 235 236 static int ext4_ext_space_block_idx(struct inode *inode) 237 { 238 int size; 239 240 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 241 / sizeof(struct ext4_extent_idx); 242 #ifdef AGGRESSIVE_TEST 243 if (size > 5) 244 size = 5; 245 #endif 246 return size; 247 } 248 249 static int ext4_ext_space_root(struct inode *inode) 250 { 251 int size; 252 253 size = sizeof(EXT4_I(inode)->i_data); 254 size -= sizeof(struct ext4_extent_header); 255 size /= sizeof(struct ext4_extent); 256 #ifdef AGGRESSIVE_TEST 257 if (size > 3) 258 size = 3; 259 #endif 260 return size; 261 } 262 263 static int ext4_ext_space_root_idx(struct inode *inode) 264 { 265 int size; 266 267 size = sizeof(EXT4_I(inode)->i_data); 268 size -= sizeof(struct ext4_extent_header); 269 size /= sizeof(struct ext4_extent_idx); 270 #ifdef AGGRESSIVE_TEST 271 if (size > 4) 272 size = 4; 273 #endif 274 return size; 275 } 276 277 /* 278 * Calculate the number of metadata blocks needed 279 * to allocate @blocks 280 * Worse case is one block per extent 281 */ 282 int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks) 283 { 284 int lcap, icap, rcap, leafs, idxs, num; 285 int newextents = blocks; 286 287 rcap = ext4_ext_space_root_idx(inode); 288 lcap = ext4_ext_space_block(inode); 289 icap = ext4_ext_space_block_idx(inode); 290 291 /* number of new leaf blocks needed */ 292 num = leafs = (newextents + lcap - 1) / lcap; 293 294 /* 295 * Worse case, we need separate index block(s) 296 * to link all new leaf blocks 297 */ 298 idxs = (leafs + icap - 1) / icap; 299 do { 300 num += idxs; 301 idxs = (idxs + icap - 1) / icap; 302 } while (idxs > rcap); 303 304 return num; 305 } 306 307 static int 308 ext4_ext_max_entries(struct inode *inode, int depth) 309 { 310 int max; 311 312 if (depth == ext_depth(inode)) { 313 if (depth == 0) 314 max = ext4_ext_space_root(inode); 315 else 316 max = ext4_ext_space_root_idx(inode); 317 } else { 318 if (depth == 0) 319 max = ext4_ext_space_block(inode); 320 else 321 max = ext4_ext_space_block_idx(inode); 322 } 323 324 return max; 325 } 326 327 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) 328 { 329 ext4_fsblk_t block = ext_pblock(ext), valid_block; 330 int len = ext4_ext_get_actual_len(ext); 331 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; 332 333 valid_block = le32_to_cpu(es->s_first_data_block) + 334 EXT4_SB(inode->i_sb)->s_gdb_count; 335 if (unlikely(block <= valid_block || 336 ((block + len) > ext4_blocks_count(es)))) 337 return 0; 338 else 339 return 1; 340 } 341 342 static int ext4_valid_extent_idx(struct inode *inode, 343 struct ext4_extent_idx *ext_idx) 344 { 345 ext4_fsblk_t block = idx_pblock(ext_idx), valid_block; 346 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; 347 348 valid_block = le32_to_cpu(es->s_first_data_block) + 349 EXT4_SB(inode->i_sb)->s_gdb_count; 350 if (unlikely(block <= valid_block || 351 (block >= ext4_blocks_count(es)))) 352 return 0; 353 else 354 return 1; 355 } 356 357 static int ext4_valid_extent_entries(struct inode *inode, 358 struct ext4_extent_header *eh, 359 int depth) 360 { 361 struct ext4_extent *ext; 362 struct ext4_extent_idx *ext_idx; 363 unsigned short entries; 364 if (eh->eh_entries == 0) 365 return 1; 366 367 entries = le16_to_cpu(eh->eh_entries); 368 369 if (depth == 0) { 370 /* leaf entries */ 371 ext = EXT_FIRST_EXTENT(eh); 372 while (entries) { 373 if (!ext4_valid_extent(inode, ext)) 374 return 0; 375 ext++; 376 entries--; 377 } 378 } else { 379 ext_idx = EXT_FIRST_INDEX(eh); 380 while (entries) { 381 if (!ext4_valid_extent_idx(inode, ext_idx)) 382 return 0; 383 ext_idx++; 384 entries--; 385 } 386 } 387 return 1; 388 } 389 390 static int __ext4_ext_check(const char *function, struct inode *inode, 391 struct ext4_extent_header *eh, 392 int depth) 393 { 394 const char *error_msg; 395 int max = 0; 396 397 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { 398 error_msg = "invalid magic"; 399 goto corrupted; 400 } 401 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { 402 error_msg = "unexpected eh_depth"; 403 goto corrupted; 404 } 405 if (unlikely(eh->eh_max == 0)) { 406 error_msg = "invalid eh_max"; 407 goto corrupted; 408 } 409 max = ext4_ext_max_entries(inode, depth); 410 if (unlikely(le16_to_cpu(eh->eh_max) > max)) { 411 error_msg = "too large eh_max"; 412 goto corrupted; 413 } 414 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { 415 error_msg = "invalid eh_entries"; 416 goto corrupted; 417 } 418 if (!ext4_valid_extent_entries(inode, eh, depth)) { 419 error_msg = "invalid extent entries"; 420 goto corrupted; 421 } 422 return 0; 423 424 corrupted: 425 ext4_error(inode->i_sb, function, 426 "bad header/extent in inode #%lu: %s - magic %x, " 427 "entries %u, max %u(%u), depth %u(%u)", 428 inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic), 429 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), 430 max, le16_to_cpu(eh->eh_depth), depth); 431 432 return -EIO; 433 } 434 435 #define ext4_ext_check(inode, eh, depth) \ 436 __ext4_ext_check(__func__, inode, eh, depth) 437 438 int ext4_ext_check_inode(struct inode *inode) 439 { 440 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode)); 441 } 442 443 #ifdef EXT_DEBUG 444 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) 445 { 446 int k, l = path->p_depth; 447 448 ext_debug("path:"); 449 for (k = 0; k <= l; k++, path++) { 450 if (path->p_idx) { 451 ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block), 452 idx_pblock(path->p_idx)); 453 } else if (path->p_ext) { 454 ext_debug(" %d:%d:%llu ", 455 le32_to_cpu(path->p_ext->ee_block), 456 ext4_ext_get_actual_len(path->p_ext), 457 ext_pblock(path->p_ext)); 458 } else 459 ext_debug(" []"); 460 } 461 ext_debug("\n"); 462 } 463 464 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) 465 { 466 int depth = ext_depth(inode); 467 struct ext4_extent_header *eh; 468 struct ext4_extent *ex; 469 int i; 470 471 if (!path) 472 return; 473 474 eh = path[depth].p_hdr; 475 ex = EXT_FIRST_EXTENT(eh); 476 477 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { 478 ext_debug("%d:%d:%llu ", le32_to_cpu(ex->ee_block), 479 ext4_ext_get_actual_len(ex), ext_pblock(ex)); 480 } 481 ext_debug("\n"); 482 } 483 #else 484 #define ext4_ext_show_path(inode, path) 485 #define ext4_ext_show_leaf(inode, path) 486 #endif 487 488 void ext4_ext_drop_refs(struct ext4_ext_path *path) 489 { 490 int depth = path->p_depth; 491 int i; 492 493 for (i = 0; i <= depth; i++, path++) 494 if (path->p_bh) { 495 brelse(path->p_bh); 496 path->p_bh = NULL; 497 } 498 } 499 500 /* 501 * ext4_ext_binsearch_idx: 502 * binary search for the closest index of the given block 503 * the header must be checked before calling this 504 */ 505 static void 506 ext4_ext_binsearch_idx(struct inode *inode, 507 struct ext4_ext_path *path, ext4_lblk_t block) 508 { 509 struct ext4_extent_header *eh = path->p_hdr; 510 struct ext4_extent_idx *r, *l, *m; 511 512 513 ext_debug("binsearch for %u(idx): ", block); 514 515 l = EXT_FIRST_INDEX(eh) + 1; 516 r = EXT_LAST_INDEX(eh); 517 while (l <= r) { 518 m = l + (r - l) / 2; 519 if (block < le32_to_cpu(m->ei_block)) 520 r = m - 1; 521 else 522 l = m + 1; 523 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), 524 m, le32_to_cpu(m->ei_block), 525 r, le32_to_cpu(r->ei_block)); 526 } 527 528 path->p_idx = l - 1; 529 ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block), 530 idx_pblock(path->p_idx)); 531 532 #ifdef CHECK_BINSEARCH 533 { 534 struct ext4_extent_idx *chix, *ix; 535 int k; 536 537 chix = ix = EXT_FIRST_INDEX(eh); 538 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { 539 if (k != 0 && 540 le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { 541 printk(KERN_DEBUG "k=%d, ix=0x%p, " 542 "first=0x%p\n", k, 543 ix, EXT_FIRST_INDEX(eh)); 544 printk(KERN_DEBUG "%u <= %u\n", 545 le32_to_cpu(ix->ei_block), 546 le32_to_cpu(ix[-1].ei_block)); 547 } 548 BUG_ON(k && le32_to_cpu(ix->ei_block) 549 <= le32_to_cpu(ix[-1].ei_block)); 550 if (block < le32_to_cpu(ix->ei_block)) 551 break; 552 chix = ix; 553 } 554 BUG_ON(chix != path->p_idx); 555 } 556 #endif 557 558 } 559 560 /* 561 * ext4_ext_binsearch: 562 * binary search for closest extent of the given block 563 * the header must be checked before calling this 564 */ 565 static void 566 ext4_ext_binsearch(struct inode *inode, 567 struct ext4_ext_path *path, ext4_lblk_t block) 568 { 569 struct ext4_extent_header *eh = path->p_hdr; 570 struct ext4_extent *r, *l, *m; 571 572 if (eh->eh_entries == 0) { 573 /* 574 * this leaf is empty: 575 * we get such a leaf in split/add case 576 */ 577 return; 578 } 579 580 ext_debug("binsearch for %u: ", block); 581 582 l = EXT_FIRST_EXTENT(eh) + 1; 583 r = EXT_LAST_EXTENT(eh); 584 585 while (l <= r) { 586 m = l + (r - l) / 2; 587 if (block < le32_to_cpu(m->ee_block)) 588 r = m - 1; 589 else 590 l = m + 1; 591 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), 592 m, le32_to_cpu(m->ee_block), 593 r, le32_to_cpu(r->ee_block)); 594 } 595 596 path->p_ext = l - 1; 597 ext_debug(" -> %d:%llu:%d ", 598 le32_to_cpu(path->p_ext->ee_block), 599 ext_pblock(path->p_ext), 600 ext4_ext_get_actual_len(path->p_ext)); 601 602 #ifdef CHECK_BINSEARCH 603 { 604 struct ext4_extent *chex, *ex; 605 int k; 606 607 chex = ex = EXT_FIRST_EXTENT(eh); 608 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { 609 BUG_ON(k && le32_to_cpu(ex->ee_block) 610 <= le32_to_cpu(ex[-1].ee_block)); 611 if (block < le32_to_cpu(ex->ee_block)) 612 break; 613 chex = ex; 614 } 615 BUG_ON(chex != path->p_ext); 616 } 617 #endif 618 619 } 620 621 int ext4_ext_tree_init(handle_t *handle, struct inode *inode) 622 { 623 struct ext4_extent_header *eh; 624 625 eh = ext_inode_hdr(inode); 626 eh->eh_depth = 0; 627 eh->eh_entries = 0; 628 eh->eh_magic = EXT4_EXT_MAGIC; 629 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode)); 630 ext4_mark_inode_dirty(handle, inode); 631 ext4_ext_invalidate_cache(inode); 632 return 0; 633 } 634 635 struct ext4_ext_path * 636 ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, 637 struct ext4_ext_path *path) 638 { 639 struct ext4_extent_header *eh; 640 struct buffer_head *bh; 641 short int depth, i, ppos = 0, alloc = 0; 642 643 eh = ext_inode_hdr(inode); 644 depth = ext_depth(inode); 645 646 /* account possible depth increase */ 647 if (!path) { 648 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), 649 GFP_NOFS); 650 if (!path) 651 return ERR_PTR(-ENOMEM); 652 alloc = 1; 653 } 654 path[0].p_hdr = eh; 655 path[0].p_bh = NULL; 656 657 i = depth; 658 /* walk through the tree */ 659 while (i) { 660 int need_to_validate = 0; 661 662 ext_debug("depth %d: num %d, max %d\n", 663 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 664 665 ext4_ext_binsearch_idx(inode, path + ppos, block); 666 path[ppos].p_block = idx_pblock(path[ppos].p_idx); 667 path[ppos].p_depth = i; 668 path[ppos].p_ext = NULL; 669 670 bh = sb_getblk(inode->i_sb, path[ppos].p_block); 671 if (unlikely(!bh)) 672 goto err; 673 if (!bh_uptodate_or_lock(bh)) { 674 if (bh_submit_read(bh) < 0) { 675 put_bh(bh); 676 goto err; 677 } 678 /* validate the extent entries */ 679 need_to_validate = 1; 680 } 681 eh = ext_block_hdr(bh); 682 ppos++; 683 BUG_ON(ppos > depth); 684 path[ppos].p_bh = bh; 685 path[ppos].p_hdr = eh; 686 i--; 687 688 if (need_to_validate && ext4_ext_check(inode, eh, i)) 689 goto err; 690 } 691 692 path[ppos].p_depth = i; 693 path[ppos].p_ext = NULL; 694 path[ppos].p_idx = NULL; 695 696 /* find extent */ 697 ext4_ext_binsearch(inode, path + ppos, block); 698 /* if not an empty leaf */ 699 if (path[ppos].p_ext) 700 path[ppos].p_block = ext_pblock(path[ppos].p_ext); 701 702 ext4_ext_show_path(inode, path); 703 704 return path; 705 706 err: 707 ext4_ext_drop_refs(path); 708 if (alloc) 709 kfree(path); 710 return ERR_PTR(-EIO); 711 } 712 713 /* 714 * ext4_ext_insert_index: 715 * insert new index [@logical;@ptr] into the block at @curp; 716 * check where to insert: before @curp or after @curp 717 */ 718 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, 719 struct ext4_ext_path *curp, 720 int logical, ext4_fsblk_t ptr) 721 { 722 struct ext4_extent_idx *ix; 723 int len, err; 724 725 err = ext4_ext_get_access(handle, inode, curp); 726 if (err) 727 return err; 728 729 BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block)); 730 len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx; 731 if (logical > le32_to_cpu(curp->p_idx->ei_block)) { 732 /* insert after */ 733 if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) { 734 len = (len - 1) * sizeof(struct ext4_extent_idx); 735 len = len < 0 ? 0 : len; 736 ext_debug("insert new index %d after: %llu. " 737 "move %d from 0x%p to 0x%p\n", 738 logical, ptr, len, 739 (curp->p_idx + 1), (curp->p_idx + 2)); 740 memmove(curp->p_idx + 2, curp->p_idx + 1, len); 741 } 742 ix = curp->p_idx + 1; 743 } else { 744 /* insert before */ 745 len = len * sizeof(struct ext4_extent_idx); 746 len = len < 0 ? 0 : len; 747 ext_debug("insert new index %d before: %llu. " 748 "move %d from 0x%p to 0x%p\n", 749 logical, ptr, len, 750 curp->p_idx, (curp->p_idx + 1)); 751 memmove(curp->p_idx + 1, curp->p_idx, len); 752 ix = curp->p_idx; 753 } 754 755 ix->ei_block = cpu_to_le32(logical); 756 ext4_idx_store_pblock(ix, ptr); 757 le16_add_cpu(&curp->p_hdr->eh_entries, 1); 758 759 BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries) 760 > le16_to_cpu(curp->p_hdr->eh_max)); 761 BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr)); 762 763 err = ext4_ext_dirty(handle, inode, curp); 764 ext4_std_error(inode->i_sb, err); 765 766 return err; 767 } 768 769 /* 770 * ext4_ext_split: 771 * inserts new subtree into the path, using free index entry 772 * at depth @at: 773 * - allocates all needed blocks (new leaf and all intermediate index blocks) 774 * - makes decision where to split 775 * - moves remaining extents and index entries (right to the split point) 776 * into the newly allocated blocks 777 * - initializes subtree 778 */ 779 static int ext4_ext_split(handle_t *handle, struct inode *inode, 780 struct ext4_ext_path *path, 781 struct ext4_extent *newext, int at) 782 { 783 struct buffer_head *bh = NULL; 784 int depth = ext_depth(inode); 785 struct ext4_extent_header *neh; 786 struct ext4_extent_idx *fidx; 787 struct ext4_extent *ex; 788 int i = at, k, m, a; 789 ext4_fsblk_t newblock, oldblock; 790 __le32 border; 791 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ 792 int err = 0; 793 794 /* make decision: where to split? */ 795 /* FIXME: now decision is simplest: at current extent */ 796 797 /* if current leaf will be split, then we should use 798 * border from split point */ 799 BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr)); 800 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { 801 border = path[depth].p_ext[1].ee_block; 802 ext_debug("leaf will be split." 803 " next leaf starts at %d\n", 804 le32_to_cpu(border)); 805 } else { 806 border = newext->ee_block; 807 ext_debug("leaf will be added." 808 " next leaf starts at %d\n", 809 le32_to_cpu(border)); 810 } 811 812 /* 813 * If error occurs, then we break processing 814 * and mark filesystem read-only. index won't 815 * be inserted and tree will be in consistent 816 * state. Next mount will repair buffers too. 817 */ 818 819 /* 820 * Get array to track all allocated blocks. 821 * We need this to handle errors and free blocks 822 * upon them. 823 */ 824 ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); 825 if (!ablocks) 826 return -ENOMEM; 827 828 /* allocate all needed blocks */ 829 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); 830 for (a = 0; a < depth - at; a++) { 831 newblock = ext4_ext_new_meta_block(handle, inode, path, 832 newext, &err); 833 if (newblock == 0) 834 goto cleanup; 835 ablocks[a] = newblock; 836 } 837 838 /* initialize new leaf */ 839 newblock = ablocks[--a]; 840 BUG_ON(newblock == 0); 841 bh = sb_getblk(inode->i_sb, newblock); 842 if (!bh) { 843 err = -EIO; 844 goto cleanup; 845 } 846 lock_buffer(bh); 847 848 err = ext4_journal_get_create_access(handle, bh); 849 if (err) 850 goto cleanup; 851 852 neh = ext_block_hdr(bh); 853 neh->eh_entries = 0; 854 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode)); 855 neh->eh_magic = EXT4_EXT_MAGIC; 856 neh->eh_depth = 0; 857 ex = EXT_FIRST_EXTENT(neh); 858 859 /* move remainder of path[depth] to the new leaf */ 860 BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max); 861 /* start copy from next extent */ 862 /* TODO: we could do it by single memmove */ 863 m = 0; 864 path[depth].p_ext++; 865 while (path[depth].p_ext <= 866 EXT_MAX_EXTENT(path[depth].p_hdr)) { 867 ext_debug("move %d:%llu:%d in new leaf %llu\n", 868 le32_to_cpu(path[depth].p_ext->ee_block), 869 ext_pblock(path[depth].p_ext), 870 ext4_ext_get_actual_len(path[depth].p_ext), 871 newblock); 872 /*memmove(ex++, path[depth].p_ext++, 873 sizeof(struct ext4_extent)); 874 neh->eh_entries++;*/ 875 path[depth].p_ext++; 876 m++; 877 } 878 if (m) { 879 memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m); 880 le16_add_cpu(&neh->eh_entries, m); 881 } 882 883 set_buffer_uptodate(bh); 884 unlock_buffer(bh); 885 886 err = ext4_handle_dirty_metadata(handle, inode, bh); 887 if (err) 888 goto cleanup; 889 brelse(bh); 890 bh = NULL; 891 892 /* correct old leaf */ 893 if (m) { 894 err = ext4_ext_get_access(handle, inode, path + depth); 895 if (err) 896 goto cleanup; 897 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); 898 err = ext4_ext_dirty(handle, inode, path + depth); 899 if (err) 900 goto cleanup; 901 902 } 903 904 /* create intermediate indexes */ 905 k = depth - at - 1; 906 BUG_ON(k < 0); 907 if (k) 908 ext_debug("create %d intermediate indices\n", k); 909 /* insert new index into current index block */ 910 /* current depth stored in i var */ 911 i = depth - 1; 912 while (k--) { 913 oldblock = newblock; 914 newblock = ablocks[--a]; 915 bh = sb_getblk(inode->i_sb, newblock); 916 if (!bh) { 917 err = -EIO; 918 goto cleanup; 919 } 920 lock_buffer(bh); 921 922 err = ext4_journal_get_create_access(handle, bh); 923 if (err) 924 goto cleanup; 925 926 neh = ext_block_hdr(bh); 927 neh->eh_entries = cpu_to_le16(1); 928 neh->eh_magic = EXT4_EXT_MAGIC; 929 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode)); 930 neh->eh_depth = cpu_to_le16(depth - i); 931 fidx = EXT_FIRST_INDEX(neh); 932 fidx->ei_block = border; 933 ext4_idx_store_pblock(fidx, oldblock); 934 935 ext_debug("int.index at %d (block %llu): %u -> %llu\n", 936 i, newblock, le32_to_cpu(border), oldblock); 937 /* copy indexes */ 938 m = 0; 939 path[i].p_idx++; 940 941 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, 942 EXT_MAX_INDEX(path[i].p_hdr)); 943 BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) != 944 EXT_LAST_INDEX(path[i].p_hdr)); 945 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { 946 ext_debug("%d: move %d:%llu in new index %llu\n", i, 947 le32_to_cpu(path[i].p_idx->ei_block), 948 idx_pblock(path[i].p_idx), 949 newblock); 950 /*memmove(++fidx, path[i].p_idx++, 951 sizeof(struct ext4_extent_idx)); 952 neh->eh_entries++; 953 BUG_ON(neh->eh_entries > neh->eh_max);*/ 954 path[i].p_idx++; 955 m++; 956 } 957 if (m) { 958 memmove(++fidx, path[i].p_idx - m, 959 sizeof(struct ext4_extent_idx) * m); 960 le16_add_cpu(&neh->eh_entries, m); 961 } 962 set_buffer_uptodate(bh); 963 unlock_buffer(bh); 964 965 err = ext4_handle_dirty_metadata(handle, inode, bh); 966 if (err) 967 goto cleanup; 968 brelse(bh); 969 bh = NULL; 970 971 /* correct old index */ 972 if (m) { 973 err = ext4_ext_get_access(handle, inode, path + i); 974 if (err) 975 goto cleanup; 976 le16_add_cpu(&path[i].p_hdr->eh_entries, -m); 977 err = ext4_ext_dirty(handle, inode, path + i); 978 if (err) 979 goto cleanup; 980 } 981 982 i--; 983 } 984 985 /* insert new index */ 986 err = ext4_ext_insert_index(handle, inode, path + at, 987 le32_to_cpu(border), newblock); 988 989 cleanup: 990 if (bh) { 991 if (buffer_locked(bh)) 992 unlock_buffer(bh); 993 brelse(bh); 994 } 995 996 if (err) { 997 /* free all allocated blocks in error case */ 998 for (i = 0; i < depth; i++) { 999 if (!ablocks[i]) 1000 continue; 1001 ext4_free_blocks(handle, inode, ablocks[i], 1, 1); 1002 } 1003 } 1004 kfree(ablocks); 1005 1006 return err; 1007 } 1008 1009 /* 1010 * ext4_ext_grow_indepth: 1011 * implements tree growing procedure: 1012 * - allocates new block 1013 * - moves top-level data (index block or leaf) into the new block 1014 * - initializes new top-level, creating index that points to the 1015 * just created block 1016 */ 1017 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, 1018 struct ext4_ext_path *path, 1019 struct ext4_extent *newext) 1020 { 1021 struct ext4_ext_path *curp = path; 1022 struct ext4_extent_header *neh; 1023 struct ext4_extent_idx *fidx; 1024 struct buffer_head *bh; 1025 ext4_fsblk_t newblock; 1026 int err = 0; 1027 1028 newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err); 1029 if (newblock == 0) 1030 return err; 1031 1032 bh = sb_getblk(inode->i_sb, newblock); 1033 if (!bh) { 1034 err = -EIO; 1035 ext4_std_error(inode->i_sb, err); 1036 return err; 1037 } 1038 lock_buffer(bh); 1039 1040 err = ext4_journal_get_create_access(handle, bh); 1041 if (err) { 1042 unlock_buffer(bh); 1043 goto out; 1044 } 1045 1046 /* move top-level index/leaf into new block */ 1047 memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data)); 1048 1049 /* set size of new block */ 1050 neh = ext_block_hdr(bh); 1051 /* old root could have indexes or leaves 1052 * so calculate e_max right way */ 1053 if (ext_depth(inode)) 1054 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode)); 1055 else 1056 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode)); 1057 neh->eh_magic = EXT4_EXT_MAGIC; 1058 set_buffer_uptodate(bh); 1059 unlock_buffer(bh); 1060 1061 err = ext4_handle_dirty_metadata(handle, inode, bh); 1062 if (err) 1063 goto out; 1064 1065 /* create index in new top-level index: num,max,pointer */ 1066 err = ext4_ext_get_access(handle, inode, curp); 1067 if (err) 1068 goto out; 1069 1070 curp->p_hdr->eh_magic = EXT4_EXT_MAGIC; 1071 curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode)); 1072 curp->p_hdr->eh_entries = cpu_to_le16(1); 1073 curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr); 1074 1075 if (path[0].p_hdr->eh_depth) 1076 curp->p_idx->ei_block = 1077 EXT_FIRST_INDEX(path[0].p_hdr)->ei_block; 1078 else 1079 curp->p_idx->ei_block = 1080 EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block; 1081 ext4_idx_store_pblock(curp->p_idx, newblock); 1082 1083 neh = ext_inode_hdr(inode); 1084 fidx = EXT_FIRST_INDEX(neh); 1085 ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", 1086 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), 1087 le32_to_cpu(fidx->ei_block), idx_pblock(fidx)); 1088 1089 neh->eh_depth = cpu_to_le16(path->p_depth + 1); 1090 err = ext4_ext_dirty(handle, inode, curp); 1091 out: 1092 brelse(bh); 1093 1094 return err; 1095 } 1096 1097 /* 1098 * ext4_ext_create_new_leaf: 1099 * finds empty index and adds new leaf. 1100 * if no free index is found, then it requests in-depth growing. 1101 */ 1102 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, 1103 struct ext4_ext_path *path, 1104 struct ext4_extent *newext) 1105 { 1106 struct ext4_ext_path *curp; 1107 int depth, i, err = 0; 1108 1109 repeat: 1110 i = depth = ext_depth(inode); 1111 1112 /* walk up to the tree and look for free index entry */ 1113 curp = path + depth; 1114 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { 1115 i--; 1116 curp--; 1117 } 1118 1119 /* we use already allocated block for index block, 1120 * so subsequent data blocks should be contiguous */ 1121 if (EXT_HAS_FREE_INDEX(curp)) { 1122 /* if we found index with free entry, then use that 1123 * entry: create all needed subtree and add new leaf */ 1124 err = ext4_ext_split(handle, inode, path, newext, i); 1125 if (err) 1126 goto out; 1127 1128 /* refill path */ 1129 ext4_ext_drop_refs(path); 1130 path = ext4_ext_find_extent(inode, 1131 (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1132 path); 1133 if (IS_ERR(path)) 1134 err = PTR_ERR(path); 1135 } else { 1136 /* tree is full, time to grow in depth */ 1137 err = ext4_ext_grow_indepth(handle, inode, path, newext); 1138 if (err) 1139 goto out; 1140 1141 /* refill path */ 1142 ext4_ext_drop_refs(path); 1143 path = ext4_ext_find_extent(inode, 1144 (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1145 path); 1146 if (IS_ERR(path)) { 1147 err = PTR_ERR(path); 1148 goto out; 1149 } 1150 1151 /* 1152 * only first (depth 0 -> 1) produces free space; 1153 * in all other cases we have to split the grown tree 1154 */ 1155 depth = ext_depth(inode); 1156 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { 1157 /* now we need to split */ 1158 goto repeat; 1159 } 1160 } 1161 1162 out: 1163 return err; 1164 } 1165 1166 /* 1167 * search the closest allocated block to the left for *logical 1168 * and returns it at @logical + it's physical address at @phys 1169 * if *logical is the smallest allocated block, the function 1170 * returns 0 at @phys 1171 * return value contains 0 (success) or error code 1172 */ 1173 int 1174 ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path, 1175 ext4_lblk_t *logical, ext4_fsblk_t *phys) 1176 { 1177 struct ext4_extent_idx *ix; 1178 struct ext4_extent *ex; 1179 int depth, ee_len; 1180 1181 BUG_ON(path == NULL); 1182 depth = path->p_depth; 1183 *phys = 0; 1184 1185 if (depth == 0 && path->p_ext == NULL) 1186 return 0; 1187 1188 /* usually extent in the path covers blocks smaller 1189 * then *logical, but it can be that extent is the 1190 * first one in the file */ 1191 1192 ex = path[depth].p_ext; 1193 ee_len = ext4_ext_get_actual_len(ex); 1194 if (*logical < le32_to_cpu(ex->ee_block)) { 1195 BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex); 1196 while (--depth >= 0) { 1197 ix = path[depth].p_idx; 1198 BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr)); 1199 } 1200 return 0; 1201 } 1202 1203 BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len)); 1204 1205 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; 1206 *phys = ext_pblock(ex) + ee_len - 1; 1207 return 0; 1208 } 1209 1210 /* 1211 * search the closest allocated block to the right for *logical 1212 * and returns it at @logical + it's physical address at @phys 1213 * if *logical is the smallest allocated block, the function 1214 * returns 0 at @phys 1215 * return value contains 0 (success) or error code 1216 */ 1217 int 1218 ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path, 1219 ext4_lblk_t *logical, ext4_fsblk_t *phys) 1220 { 1221 struct buffer_head *bh = NULL; 1222 struct ext4_extent_header *eh; 1223 struct ext4_extent_idx *ix; 1224 struct ext4_extent *ex; 1225 ext4_fsblk_t block; 1226 int depth; /* Note, NOT eh_depth; depth from top of tree */ 1227 int ee_len; 1228 1229 BUG_ON(path == NULL); 1230 depth = path->p_depth; 1231 *phys = 0; 1232 1233 if (depth == 0 && path->p_ext == NULL) 1234 return 0; 1235 1236 /* usually extent in the path covers blocks smaller 1237 * then *logical, but it can be that extent is the 1238 * first one in the file */ 1239 1240 ex = path[depth].p_ext; 1241 ee_len = ext4_ext_get_actual_len(ex); 1242 if (*logical < le32_to_cpu(ex->ee_block)) { 1243 BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex); 1244 while (--depth >= 0) { 1245 ix = path[depth].p_idx; 1246 BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr)); 1247 } 1248 *logical = le32_to_cpu(ex->ee_block); 1249 *phys = ext_pblock(ex); 1250 return 0; 1251 } 1252 1253 BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len)); 1254 1255 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { 1256 /* next allocated block in this leaf */ 1257 ex++; 1258 *logical = le32_to_cpu(ex->ee_block); 1259 *phys = ext_pblock(ex); 1260 return 0; 1261 } 1262 1263 /* go up and search for index to the right */ 1264 while (--depth >= 0) { 1265 ix = path[depth].p_idx; 1266 if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) 1267 goto got_index; 1268 } 1269 1270 /* we've gone up to the root and found no index to the right */ 1271 return 0; 1272 1273 got_index: 1274 /* we've found index to the right, let's 1275 * follow it and find the closest allocated 1276 * block to the right */ 1277 ix++; 1278 block = idx_pblock(ix); 1279 while (++depth < path->p_depth) { 1280 bh = sb_bread(inode->i_sb, block); 1281 if (bh == NULL) 1282 return -EIO; 1283 eh = ext_block_hdr(bh); 1284 /* subtract from p_depth to get proper eh_depth */ 1285 if (ext4_ext_check(inode, eh, path->p_depth - depth)) { 1286 put_bh(bh); 1287 return -EIO; 1288 } 1289 ix = EXT_FIRST_INDEX(eh); 1290 block = idx_pblock(ix); 1291 put_bh(bh); 1292 } 1293 1294 bh = sb_bread(inode->i_sb, block); 1295 if (bh == NULL) 1296 return -EIO; 1297 eh = ext_block_hdr(bh); 1298 if (ext4_ext_check(inode, eh, path->p_depth - depth)) { 1299 put_bh(bh); 1300 return -EIO; 1301 } 1302 ex = EXT_FIRST_EXTENT(eh); 1303 *logical = le32_to_cpu(ex->ee_block); 1304 *phys = ext_pblock(ex); 1305 put_bh(bh); 1306 return 0; 1307 } 1308 1309 /* 1310 * ext4_ext_next_allocated_block: 1311 * returns allocated block in subsequent extent or EXT_MAX_BLOCK. 1312 * NOTE: it considers block number from index entry as 1313 * allocated block. Thus, index entries have to be consistent 1314 * with leaves. 1315 */ 1316 static ext4_lblk_t 1317 ext4_ext_next_allocated_block(struct ext4_ext_path *path) 1318 { 1319 int depth; 1320 1321 BUG_ON(path == NULL); 1322 depth = path->p_depth; 1323 1324 if (depth == 0 && path->p_ext == NULL) 1325 return EXT_MAX_BLOCK; 1326 1327 while (depth >= 0) { 1328 if (depth == path->p_depth) { 1329 /* leaf */ 1330 if (path[depth].p_ext != 1331 EXT_LAST_EXTENT(path[depth].p_hdr)) 1332 return le32_to_cpu(path[depth].p_ext[1].ee_block); 1333 } else { 1334 /* index */ 1335 if (path[depth].p_idx != 1336 EXT_LAST_INDEX(path[depth].p_hdr)) 1337 return le32_to_cpu(path[depth].p_idx[1].ei_block); 1338 } 1339 depth--; 1340 } 1341 1342 return EXT_MAX_BLOCK; 1343 } 1344 1345 /* 1346 * ext4_ext_next_leaf_block: 1347 * returns first allocated block from next leaf or EXT_MAX_BLOCK 1348 */ 1349 static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode, 1350 struct ext4_ext_path *path) 1351 { 1352 int depth; 1353 1354 BUG_ON(path == NULL); 1355 depth = path->p_depth; 1356 1357 /* zero-tree has no leaf blocks at all */ 1358 if (depth == 0) 1359 return EXT_MAX_BLOCK; 1360 1361 /* go to index block */ 1362 depth--; 1363 1364 while (depth >= 0) { 1365 if (path[depth].p_idx != 1366 EXT_LAST_INDEX(path[depth].p_hdr)) 1367 return (ext4_lblk_t) 1368 le32_to_cpu(path[depth].p_idx[1].ei_block); 1369 depth--; 1370 } 1371 1372 return EXT_MAX_BLOCK; 1373 } 1374 1375 /* 1376 * ext4_ext_correct_indexes: 1377 * if leaf gets modified and modified extent is first in the leaf, 1378 * then we have to correct all indexes above. 1379 * TODO: do we need to correct tree in all cases? 1380 */ 1381 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, 1382 struct ext4_ext_path *path) 1383 { 1384 struct ext4_extent_header *eh; 1385 int depth = ext_depth(inode); 1386 struct ext4_extent *ex; 1387 __le32 border; 1388 int k, err = 0; 1389 1390 eh = path[depth].p_hdr; 1391 ex = path[depth].p_ext; 1392 BUG_ON(ex == NULL); 1393 BUG_ON(eh == NULL); 1394 1395 if (depth == 0) { 1396 /* there is no tree at all */ 1397 return 0; 1398 } 1399 1400 if (ex != EXT_FIRST_EXTENT(eh)) { 1401 /* we correct tree if first leaf got modified only */ 1402 return 0; 1403 } 1404 1405 /* 1406 * TODO: we need correction if border is smaller than current one 1407 */ 1408 k = depth - 1; 1409 border = path[depth].p_ext->ee_block; 1410 err = ext4_ext_get_access(handle, inode, path + k); 1411 if (err) 1412 return err; 1413 path[k].p_idx->ei_block = border; 1414 err = ext4_ext_dirty(handle, inode, path + k); 1415 if (err) 1416 return err; 1417 1418 while (k--) { 1419 /* change all left-side indexes */ 1420 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) 1421 break; 1422 err = ext4_ext_get_access(handle, inode, path + k); 1423 if (err) 1424 break; 1425 path[k].p_idx->ei_block = border; 1426 err = ext4_ext_dirty(handle, inode, path + k); 1427 if (err) 1428 break; 1429 } 1430 1431 return err; 1432 } 1433 1434 static int 1435 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, 1436 struct ext4_extent *ex2) 1437 { 1438 unsigned short ext1_ee_len, ext2_ee_len, max_len; 1439 1440 /* 1441 * Make sure that either both extents are uninitialized, or 1442 * both are _not_. 1443 */ 1444 if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2)) 1445 return 0; 1446 1447 if (ext4_ext_is_uninitialized(ex1)) 1448 max_len = EXT_UNINIT_MAX_LEN; 1449 else 1450 max_len = EXT_INIT_MAX_LEN; 1451 1452 ext1_ee_len = ext4_ext_get_actual_len(ex1); 1453 ext2_ee_len = ext4_ext_get_actual_len(ex2); 1454 1455 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != 1456 le32_to_cpu(ex2->ee_block)) 1457 return 0; 1458 1459 /* 1460 * To allow future support for preallocated extents to be added 1461 * as an RO_COMPAT feature, refuse to merge to extents if 1462 * this can result in the top bit of ee_len being set. 1463 */ 1464 if (ext1_ee_len + ext2_ee_len > max_len) 1465 return 0; 1466 #ifdef AGGRESSIVE_TEST 1467 if (ext1_ee_len >= 4) 1468 return 0; 1469 #endif 1470 1471 if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2)) 1472 return 1; 1473 return 0; 1474 } 1475 1476 /* 1477 * This function tries to merge the "ex" extent to the next extent in the tree. 1478 * It always tries to merge towards right. If you want to merge towards 1479 * left, pass "ex - 1" as argument instead of "ex". 1480 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns 1481 * 1 if they got merged. 1482 */ 1483 int ext4_ext_try_to_merge(struct inode *inode, 1484 struct ext4_ext_path *path, 1485 struct ext4_extent *ex) 1486 { 1487 struct ext4_extent_header *eh; 1488 unsigned int depth, len; 1489 int merge_done = 0; 1490 int uninitialized = 0; 1491 1492 depth = ext_depth(inode); 1493 BUG_ON(path[depth].p_hdr == NULL); 1494 eh = path[depth].p_hdr; 1495 1496 while (ex < EXT_LAST_EXTENT(eh)) { 1497 if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) 1498 break; 1499 /* merge with next extent! */ 1500 if (ext4_ext_is_uninitialized(ex)) 1501 uninitialized = 1; 1502 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1503 + ext4_ext_get_actual_len(ex + 1)); 1504 if (uninitialized) 1505 ext4_ext_mark_uninitialized(ex); 1506 1507 if (ex + 1 < EXT_LAST_EXTENT(eh)) { 1508 len = (EXT_LAST_EXTENT(eh) - ex - 1) 1509 * sizeof(struct ext4_extent); 1510 memmove(ex + 1, ex + 2, len); 1511 } 1512 le16_add_cpu(&eh->eh_entries, -1); 1513 merge_done = 1; 1514 WARN_ON(eh->eh_entries == 0); 1515 if (!eh->eh_entries) 1516 ext4_error(inode->i_sb, "ext4_ext_try_to_merge", 1517 "inode#%lu, eh->eh_entries = 0!", inode->i_ino); 1518 } 1519 1520 return merge_done; 1521 } 1522 1523 /* 1524 * check if a portion of the "newext" extent overlaps with an 1525 * existing extent. 1526 * 1527 * If there is an overlap discovered, it updates the length of the newext 1528 * such that there will be no overlap, and then returns 1. 1529 * If there is no overlap found, it returns 0. 1530 */ 1531 unsigned int ext4_ext_check_overlap(struct inode *inode, 1532 struct ext4_extent *newext, 1533 struct ext4_ext_path *path) 1534 { 1535 ext4_lblk_t b1, b2; 1536 unsigned int depth, len1; 1537 unsigned int ret = 0; 1538 1539 b1 = le32_to_cpu(newext->ee_block); 1540 len1 = ext4_ext_get_actual_len(newext); 1541 depth = ext_depth(inode); 1542 if (!path[depth].p_ext) 1543 goto out; 1544 b2 = le32_to_cpu(path[depth].p_ext->ee_block); 1545 1546 /* 1547 * get the next allocated block if the extent in the path 1548 * is before the requested block(s) 1549 */ 1550 if (b2 < b1) { 1551 b2 = ext4_ext_next_allocated_block(path); 1552 if (b2 == EXT_MAX_BLOCK) 1553 goto out; 1554 } 1555 1556 /* check for wrap through zero on extent logical start block*/ 1557 if (b1 + len1 < b1) { 1558 len1 = EXT_MAX_BLOCK - b1; 1559 newext->ee_len = cpu_to_le16(len1); 1560 ret = 1; 1561 } 1562 1563 /* check for overlap */ 1564 if (b1 + len1 > b2) { 1565 newext->ee_len = cpu_to_le16(b2 - b1); 1566 ret = 1; 1567 } 1568 out: 1569 return ret; 1570 } 1571 1572 /* 1573 * ext4_ext_insert_extent: 1574 * tries to merge requsted extent into the existing extent or 1575 * inserts requested extent as new one into the tree, 1576 * creating new leaf in the no-space case. 1577 */ 1578 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, 1579 struct ext4_ext_path *path, 1580 struct ext4_extent *newext) 1581 { 1582 struct ext4_extent_header *eh; 1583 struct ext4_extent *ex, *fex; 1584 struct ext4_extent *nearex; /* nearest extent */ 1585 struct ext4_ext_path *npath = NULL; 1586 int depth, len, err; 1587 ext4_lblk_t next; 1588 unsigned uninitialized = 0; 1589 1590 BUG_ON(ext4_ext_get_actual_len(newext) == 0); 1591 depth = ext_depth(inode); 1592 ex = path[depth].p_ext; 1593 BUG_ON(path[depth].p_hdr == NULL); 1594 1595 /* try to insert block into found extent and return */ 1596 if (ex && ext4_can_extents_be_merged(inode, ex, newext)) { 1597 ext_debug("append %d block to %d:%d (from %llu)\n", 1598 ext4_ext_get_actual_len(newext), 1599 le32_to_cpu(ex->ee_block), 1600 ext4_ext_get_actual_len(ex), ext_pblock(ex)); 1601 err = ext4_ext_get_access(handle, inode, path + depth); 1602 if (err) 1603 return err; 1604 1605 /* 1606 * ext4_can_extents_be_merged should have checked that either 1607 * both extents are uninitialized, or both aren't. Thus we 1608 * need to check only one of them here. 1609 */ 1610 if (ext4_ext_is_uninitialized(ex)) 1611 uninitialized = 1; 1612 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1613 + ext4_ext_get_actual_len(newext)); 1614 if (uninitialized) 1615 ext4_ext_mark_uninitialized(ex); 1616 eh = path[depth].p_hdr; 1617 nearex = ex; 1618 goto merge; 1619 } 1620 1621 repeat: 1622 depth = ext_depth(inode); 1623 eh = path[depth].p_hdr; 1624 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) 1625 goto has_space; 1626 1627 /* probably next leaf has space for us? */ 1628 fex = EXT_LAST_EXTENT(eh); 1629 next = ext4_ext_next_leaf_block(inode, path); 1630 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block) 1631 && next != EXT_MAX_BLOCK) { 1632 ext_debug("next leaf block - %d\n", next); 1633 BUG_ON(npath != NULL); 1634 npath = ext4_ext_find_extent(inode, next, NULL); 1635 if (IS_ERR(npath)) 1636 return PTR_ERR(npath); 1637 BUG_ON(npath->p_depth != path->p_depth); 1638 eh = npath[depth].p_hdr; 1639 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { 1640 ext_debug("next leaf isnt full(%d)\n", 1641 le16_to_cpu(eh->eh_entries)); 1642 path = npath; 1643 goto repeat; 1644 } 1645 ext_debug("next leaf has no free space(%d,%d)\n", 1646 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 1647 } 1648 1649 /* 1650 * There is no free space in the found leaf. 1651 * We're gonna add a new leaf in the tree. 1652 */ 1653 err = ext4_ext_create_new_leaf(handle, inode, path, newext); 1654 if (err) 1655 goto cleanup; 1656 depth = ext_depth(inode); 1657 eh = path[depth].p_hdr; 1658 1659 has_space: 1660 nearex = path[depth].p_ext; 1661 1662 err = ext4_ext_get_access(handle, inode, path + depth); 1663 if (err) 1664 goto cleanup; 1665 1666 if (!nearex) { 1667 /* there is no extent in this leaf, create first one */ 1668 ext_debug("first extent in the leaf: %d:%llu:%d\n", 1669 le32_to_cpu(newext->ee_block), 1670 ext_pblock(newext), 1671 ext4_ext_get_actual_len(newext)); 1672 path[depth].p_ext = EXT_FIRST_EXTENT(eh); 1673 } else if (le32_to_cpu(newext->ee_block) 1674 > le32_to_cpu(nearex->ee_block)) { 1675 /* BUG_ON(newext->ee_block == nearex->ee_block); */ 1676 if (nearex != EXT_LAST_EXTENT(eh)) { 1677 len = EXT_MAX_EXTENT(eh) - nearex; 1678 len = (len - 1) * sizeof(struct ext4_extent); 1679 len = len < 0 ? 0 : len; 1680 ext_debug("insert %d:%llu:%d after: nearest 0x%p, " 1681 "move %d from 0x%p to 0x%p\n", 1682 le32_to_cpu(newext->ee_block), 1683 ext_pblock(newext), 1684 ext4_ext_get_actual_len(newext), 1685 nearex, len, nearex + 1, nearex + 2); 1686 memmove(nearex + 2, nearex + 1, len); 1687 } 1688 path[depth].p_ext = nearex + 1; 1689 } else { 1690 BUG_ON(newext->ee_block == nearex->ee_block); 1691 len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent); 1692 len = len < 0 ? 0 : len; 1693 ext_debug("insert %d:%llu:%d before: nearest 0x%p, " 1694 "move %d from 0x%p to 0x%p\n", 1695 le32_to_cpu(newext->ee_block), 1696 ext_pblock(newext), 1697 ext4_ext_get_actual_len(newext), 1698 nearex, len, nearex + 1, nearex + 2); 1699 memmove(nearex + 1, nearex, len); 1700 path[depth].p_ext = nearex; 1701 } 1702 1703 le16_add_cpu(&eh->eh_entries, 1); 1704 nearex = path[depth].p_ext; 1705 nearex->ee_block = newext->ee_block; 1706 ext4_ext_store_pblock(nearex, ext_pblock(newext)); 1707 nearex->ee_len = newext->ee_len; 1708 1709 merge: 1710 /* try to merge extents to the right */ 1711 ext4_ext_try_to_merge(inode, path, nearex); 1712 1713 /* try to merge extents to the left */ 1714 1715 /* time to correct all indexes above */ 1716 err = ext4_ext_correct_indexes(handle, inode, path); 1717 if (err) 1718 goto cleanup; 1719 1720 err = ext4_ext_dirty(handle, inode, path + depth); 1721 1722 cleanup: 1723 if (npath) { 1724 ext4_ext_drop_refs(npath); 1725 kfree(npath); 1726 } 1727 ext4_ext_invalidate_cache(inode); 1728 return err; 1729 } 1730 1731 int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, 1732 ext4_lblk_t num, ext_prepare_callback func, 1733 void *cbdata) 1734 { 1735 struct ext4_ext_path *path = NULL; 1736 struct ext4_ext_cache cbex; 1737 struct ext4_extent *ex; 1738 ext4_lblk_t next, start = 0, end = 0; 1739 ext4_lblk_t last = block + num; 1740 int depth, exists, err = 0; 1741 1742 BUG_ON(func == NULL); 1743 BUG_ON(inode == NULL); 1744 1745 while (block < last && block != EXT_MAX_BLOCK) { 1746 num = last - block; 1747 /* find extent for this block */ 1748 path = ext4_ext_find_extent(inode, block, path); 1749 if (IS_ERR(path)) { 1750 err = PTR_ERR(path); 1751 path = NULL; 1752 break; 1753 } 1754 1755 depth = ext_depth(inode); 1756 BUG_ON(path[depth].p_hdr == NULL); 1757 ex = path[depth].p_ext; 1758 next = ext4_ext_next_allocated_block(path); 1759 1760 exists = 0; 1761 if (!ex) { 1762 /* there is no extent yet, so try to allocate 1763 * all requested space */ 1764 start = block; 1765 end = block + num; 1766 } else if (le32_to_cpu(ex->ee_block) > block) { 1767 /* need to allocate space before found extent */ 1768 start = block; 1769 end = le32_to_cpu(ex->ee_block); 1770 if (block + num < end) 1771 end = block + num; 1772 } else if (block >= le32_to_cpu(ex->ee_block) 1773 + ext4_ext_get_actual_len(ex)) { 1774 /* need to allocate space after found extent */ 1775 start = block; 1776 end = block + num; 1777 if (end >= next) 1778 end = next; 1779 } else if (block >= le32_to_cpu(ex->ee_block)) { 1780 /* 1781 * some part of requested space is covered 1782 * by found extent 1783 */ 1784 start = block; 1785 end = le32_to_cpu(ex->ee_block) 1786 + ext4_ext_get_actual_len(ex); 1787 if (block + num < end) 1788 end = block + num; 1789 exists = 1; 1790 } else { 1791 BUG(); 1792 } 1793 BUG_ON(end <= start); 1794 1795 if (!exists) { 1796 cbex.ec_block = start; 1797 cbex.ec_len = end - start; 1798 cbex.ec_start = 0; 1799 cbex.ec_type = EXT4_EXT_CACHE_GAP; 1800 } else { 1801 cbex.ec_block = le32_to_cpu(ex->ee_block); 1802 cbex.ec_len = ext4_ext_get_actual_len(ex); 1803 cbex.ec_start = ext_pblock(ex); 1804 cbex.ec_type = EXT4_EXT_CACHE_EXTENT; 1805 } 1806 1807 BUG_ON(cbex.ec_len == 0); 1808 err = func(inode, path, &cbex, ex, cbdata); 1809 ext4_ext_drop_refs(path); 1810 1811 if (err < 0) 1812 break; 1813 1814 if (err == EXT_REPEAT) 1815 continue; 1816 else if (err == EXT_BREAK) { 1817 err = 0; 1818 break; 1819 } 1820 1821 if (ext_depth(inode) != depth) { 1822 /* depth was changed. we have to realloc path */ 1823 kfree(path); 1824 path = NULL; 1825 } 1826 1827 block = cbex.ec_block + cbex.ec_len; 1828 } 1829 1830 if (path) { 1831 ext4_ext_drop_refs(path); 1832 kfree(path); 1833 } 1834 1835 return err; 1836 } 1837 1838 static void 1839 ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block, 1840 __u32 len, ext4_fsblk_t start, int type) 1841 { 1842 struct ext4_ext_cache *cex; 1843 BUG_ON(len == 0); 1844 cex = &EXT4_I(inode)->i_cached_extent; 1845 cex->ec_type = type; 1846 cex->ec_block = block; 1847 cex->ec_len = len; 1848 cex->ec_start = start; 1849 } 1850 1851 /* 1852 * ext4_ext_put_gap_in_cache: 1853 * calculate boundaries of the gap that the requested block fits into 1854 * and cache this gap 1855 */ 1856 static void 1857 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, 1858 ext4_lblk_t block) 1859 { 1860 int depth = ext_depth(inode); 1861 unsigned long len; 1862 ext4_lblk_t lblock; 1863 struct ext4_extent *ex; 1864 1865 ex = path[depth].p_ext; 1866 if (ex == NULL) { 1867 /* there is no extent yet, so gap is [0;-] */ 1868 lblock = 0; 1869 len = EXT_MAX_BLOCK; 1870 ext_debug("cache gap(whole file):"); 1871 } else if (block < le32_to_cpu(ex->ee_block)) { 1872 lblock = block; 1873 len = le32_to_cpu(ex->ee_block) - block; 1874 ext_debug("cache gap(before): %u [%u:%u]", 1875 block, 1876 le32_to_cpu(ex->ee_block), 1877 ext4_ext_get_actual_len(ex)); 1878 } else if (block >= le32_to_cpu(ex->ee_block) 1879 + ext4_ext_get_actual_len(ex)) { 1880 ext4_lblk_t next; 1881 lblock = le32_to_cpu(ex->ee_block) 1882 + ext4_ext_get_actual_len(ex); 1883 1884 next = ext4_ext_next_allocated_block(path); 1885 ext_debug("cache gap(after): [%u:%u] %u", 1886 le32_to_cpu(ex->ee_block), 1887 ext4_ext_get_actual_len(ex), 1888 block); 1889 BUG_ON(next == lblock); 1890 len = next - lblock; 1891 } else { 1892 lblock = len = 0; 1893 BUG(); 1894 } 1895 1896 ext_debug(" -> %u:%lu\n", lblock, len); 1897 ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP); 1898 } 1899 1900 static int 1901 ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, 1902 struct ext4_extent *ex) 1903 { 1904 struct ext4_ext_cache *cex; 1905 1906 cex = &EXT4_I(inode)->i_cached_extent; 1907 1908 /* has cache valid data? */ 1909 if (cex->ec_type == EXT4_EXT_CACHE_NO) 1910 return EXT4_EXT_CACHE_NO; 1911 1912 BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP && 1913 cex->ec_type != EXT4_EXT_CACHE_EXTENT); 1914 if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) { 1915 ex->ee_block = cpu_to_le32(cex->ec_block); 1916 ext4_ext_store_pblock(ex, cex->ec_start); 1917 ex->ee_len = cpu_to_le16(cex->ec_len); 1918 ext_debug("%u cached by %u:%u:%llu\n", 1919 block, 1920 cex->ec_block, cex->ec_len, cex->ec_start); 1921 return cex->ec_type; 1922 } 1923 1924 /* not in cache */ 1925 return EXT4_EXT_CACHE_NO; 1926 } 1927 1928 /* 1929 * ext4_ext_rm_idx: 1930 * removes index from the index block. 1931 * It's used in truncate case only, thus all requests are for 1932 * last index in the block only. 1933 */ 1934 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, 1935 struct ext4_ext_path *path) 1936 { 1937 struct buffer_head *bh; 1938 int err; 1939 ext4_fsblk_t leaf; 1940 1941 /* free index block */ 1942 path--; 1943 leaf = idx_pblock(path->p_idx); 1944 BUG_ON(path->p_hdr->eh_entries == 0); 1945 err = ext4_ext_get_access(handle, inode, path); 1946 if (err) 1947 return err; 1948 le16_add_cpu(&path->p_hdr->eh_entries, -1); 1949 err = ext4_ext_dirty(handle, inode, path); 1950 if (err) 1951 return err; 1952 ext_debug("index is empty, remove it, free block %llu\n", leaf); 1953 bh = sb_find_get_block(inode->i_sb, leaf); 1954 ext4_forget(handle, 1, inode, bh, leaf); 1955 ext4_free_blocks(handle, inode, leaf, 1, 1); 1956 return err; 1957 } 1958 1959 /* 1960 * ext4_ext_calc_credits_for_single_extent: 1961 * This routine returns max. credits that needed to insert an extent 1962 * to the extent tree. 1963 * When pass the actual path, the caller should calculate credits 1964 * under i_data_sem. 1965 */ 1966 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, 1967 struct ext4_ext_path *path) 1968 { 1969 if (path) { 1970 int depth = ext_depth(inode); 1971 int ret = 0; 1972 1973 /* probably there is space in leaf? */ 1974 if (le16_to_cpu(path[depth].p_hdr->eh_entries) 1975 < le16_to_cpu(path[depth].p_hdr->eh_max)) { 1976 1977 /* 1978 * There are some space in the leaf tree, no 1979 * need to account for leaf block credit 1980 * 1981 * bitmaps and block group descriptor blocks 1982 * and other metadat blocks still need to be 1983 * accounted. 1984 */ 1985 /* 1 bitmap, 1 block group descriptor */ 1986 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); 1987 } 1988 } 1989 1990 return ext4_chunk_trans_blocks(inode, nrblocks); 1991 } 1992 1993 /* 1994 * How many index/leaf blocks need to change/allocate to modify nrblocks? 1995 * 1996 * if nrblocks are fit in a single extent (chunk flag is 1), then 1997 * in the worse case, each tree level index/leaf need to be changed 1998 * if the tree split due to insert a new extent, then the old tree 1999 * index/leaf need to be updated too 2000 * 2001 * If the nrblocks are discontiguous, they could cause 2002 * the whole tree split more than once, but this is really rare. 2003 */ 2004 int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 2005 { 2006 int index; 2007 int depth = ext_depth(inode); 2008 2009 if (chunk) 2010 index = depth * 2; 2011 else 2012 index = depth * 3; 2013 2014 return index; 2015 } 2016 2017 static int ext4_remove_blocks(handle_t *handle, struct inode *inode, 2018 struct ext4_extent *ex, 2019 ext4_lblk_t from, ext4_lblk_t to) 2020 { 2021 struct buffer_head *bh; 2022 unsigned short ee_len = ext4_ext_get_actual_len(ex); 2023 int i, metadata = 0; 2024 2025 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 2026 metadata = 1; 2027 #ifdef EXTENTS_STATS 2028 { 2029 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2030 spin_lock(&sbi->s_ext_stats_lock); 2031 sbi->s_ext_blocks += ee_len; 2032 sbi->s_ext_extents++; 2033 if (ee_len < sbi->s_ext_min) 2034 sbi->s_ext_min = ee_len; 2035 if (ee_len > sbi->s_ext_max) 2036 sbi->s_ext_max = ee_len; 2037 if (ext_depth(inode) > sbi->s_depth_max) 2038 sbi->s_depth_max = ext_depth(inode); 2039 spin_unlock(&sbi->s_ext_stats_lock); 2040 } 2041 #endif 2042 if (from >= le32_to_cpu(ex->ee_block) 2043 && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { 2044 /* tail removal */ 2045 ext4_lblk_t num; 2046 ext4_fsblk_t start; 2047 2048 num = le32_to_cpu(ex->ee_block) + ee_len - from; 2049 start = ext_pblock(ex) + ee_len - num; 2050 ext_debug("free last %u blocks starting %llu\n", num, start); 2051 for (i = 0; i < num; i++) { 2052 bh = sb_find_get_block(inode->i_sb, start + i); 2053 ext4_forget(handle, 0, inode, bh, start + i); 2054 } 2055 ext4_free_blocks(handle, inode, start, num, metadata); 2056 } else if (from == le32_to_cpu(ex->ee_block) 2057 && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) { 2058 printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n", 2059 from, to, le32_to_cpu(ex->ee_block), ee_len); 2060 } else { 2061 printk(KERN_INFO "strange request: removal(2) " 2062 "%u-%u from %u:%u\n", 2063 from, to, le32_to_cpu(ex->ee_block), ee_len); 2064 } 2065 return 0; 2066 } 2067 2068 static int 2069 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, 2070 struct ext4_ext_path *path, ext4_lblk_t start) 2071 { 2072 int err = 0, correct_index = 0; 2073 int depth = ext_depth(inode), credits; 2074 struct ext4_extent_header *eh; 2075 ext4_lblk_t a, b, block; 2076 unsigned num; 2077 ext4_lblk_t ex_ee_block; 2078 unsigned short ex_ee_len; 2079 unsigned uninitialized = 0; 2080 struct ext4_extent *ex; 2081 2082 /* the header must be checked already in ext4_ext_remove_space() */ 2083 ext_debug("truncate since %u in leaf\n", start); 2084 if (!path[depth].p_hdr) 2085 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); 2086 eh = path[depth].p_hdr; 2087 BUG_ON(eh == NULL); 2088 2089 /* find where to start removing */ 2090 ex = EXT_LAST_EXTENT(eh); 2091 2092 ex_ee_block = le32_to_cpu(ex->ee_block); 2093 if (ext4_ext_is_uninitialized(ex)) 2094 uninitialized = 1; 2095 ex_ee_len = ext4_ext_get_actual_len(ex); 2096 2097 while (ex >= EXT_FIRST_EXTENT(eh) && 2098 ex_ee_block + ex_ee_len > start) { 2099 ext_debug("remove ext %lu:%u\n", ex_ee_block, ex_ee_len); 2100 path[depth].p_ext = ex; 2101 2102 a = ex_ee_block > start ? ex_ee_block : start; 2103 b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ? 2104 ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK; 2105 2106 ext_debug(" border %u:%u\n", a, b); 2107 2108 if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) { 2109 block = 0; 2110 num = 0; 2111 BUG(); 2112 } else if (a != ex_ee_block) { 2113 /* remove tail of the extent */ 2114 block = ex_ee_block; 2115 num = a - block; 2116 } else if (b != ex_ee_block + ex_ee_len - 1) { 2117 /* remove head of the extent */ 2118 block = a; 2119 num = b - a; 2120 /* there is no "make a hole" API yet */ 2121 BUG(); 2122 } else { 2123 /* remove whole extent: excellent! */ 2124 block = ex_ee_block; 2125 num = 0; 2126 BUG_ON(a != ex_ee_block); 2127 BUG_ON(b != ex_ee_block + ex_ee_len - 1); 2128 } 2129 2130 /* 2131 * 3 for leaf, sb, and inode plus 2 (bmap and group 2132 * descriptor) for each block group; assume two block 2133 * groups plus ex_ee_len/blocks_per_block_group for 2134 * the worst case 2135 */ 2136 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); 2137 if (ex == EXT_FIRST_EXTENT(eh)) { 2138 correct_index = 1; 2139 credits += (ext_depth(inode)) + 1; 2140 } 2141 credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb); 2142 2143 err = ext4_ext_journal_restart(handle, credits); 2144 if (err) 2145 goto out; 2146 2147 err = ext4_ext_get_access(handle, inode, path + depth); 2148 if (err) 2149 goto out; 2150 2151 err = ext4_remove_blocks(handle, inode, ex, a, b); 2152 if (err) 2153 goto out; 2154 2155 if (num == 0) { 2156 /* this extent is removed; mark slot entirely unused */ 2157 ext4_ext_store_pblock(ex, 0); 2158 le16_add_cpu(&eh->eh_entries, -1); 2159 } 2160 2161 ex->ee_block = cpu_to_le32(block); 2162 ex->ee_len = cpu_to_le16(num); 2163 /* 2164 * Do not mark uninitialized if all the blocks in the 2165 * extent have been removed. 2166 */ 2167 if (uninitialized && num) 2168 ext4_ext_mark_uninitialized(ex); 2169 2170 err = ext4_ext_dirty(handle, inode, path + depth); 2171 if (err) 2172 goto out; 2173 2174 ext_debug("new extent: %u:%u:%llu\n", block, num, 2175 ext_pblock(ex)); 2176 ex--; 2177 ex_ee_block = le32_to_cpu(ex->ee_block); 2178 ex_ee_len = ext4_ext_get_actual_len(ex); 2179 } 2180 2181 if (correct_index && eh->eh_entries) 2182 err = ext4_ext_correct_indexes(handle, inode, path); 2183 2184 /* if this leaf is free, then we should 2185 * remove it from index block above */ 2186 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) 2187 err = ext4_ext_rm_idx(handle, inode, path + depth); 2188 2189 out: 2190 return err; 2191 } 2192 2193 /* 2194 * ext4_ext_more_to_rm: 2195 * returns 1 if current index has to be freed (even partial) 2196 */ 2197 static int 2198 ext4_ext_more_to_rm(struct ext4_ext_path *path) 2199 { 2200 BUG_ON(path->p_idx == NULL); 2201 2202 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) 2203 return 0; 2204 2205 /* 2206 * if truncate on deeper level happened, it wasn't partial, 2207 * so we have to consider current index for truncation 2208 */ 2209 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) 2210 return 0; 2211 return 1; 2212 } 2213 2214 static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start) 2215 { 2216 struct super_block *sb = inode->i_sb; 2217 int depth = ext_depth(inode); 2218 struct ext4_ext_path *path; 2219 handle_t *handle; 2220 int i = 0, err = 0; 2221 2222 ext_debug("truncate since %u\n", start); 2223 2224 /* probably first extent we're gonna free will be last in block */ 2225 handle = ext4_journal_start(inode, depth + 1); 2226 if (IS_ERR(handle)) 2227 return PTR_ERR(handle); 2228 2229 ext4_ext_invalidate_cache(inode); 2230 2231 /* 2232 * We start scanning from right side, freeing all the blocks 2233 * after i_size and walking into the tree depth-wise. 2234 */ 2235 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS); 2236 if (path == NULL) { 2237 ext4_journal_stop(handle); 2238 return -ENOMEM; 2239 } 2240 path[0].p_hdr = ext_inode_hdr(inode); 2241 if (ext4_ext_check(inode, path[0].p_hdr, depth)) { 2242 err = -EIO; 2243 goto out; 2244 } 2245 path[0].p_depth = depth; 2246 2247 while (i >= 0 && err == 0) { 2248 if (i == depth) { 2249 /* this is leaf block */ 2250 err = ext4_ext_rm_leaf(handle, inode, path, start); 2251 /* root level has p_bh == NULL, brelse() eats this */ 2252 brelse(path[i].p_bh); 2253 path[i].p_bh = NULL; 2254 i--; 2255 continue; 2256 } 2257 2258 /* this is index block */ 2259 if (!path[i].p_hdr) { 2260 ext_debug("initialize header\n"); 2261 path[i].p_hdr = ext_block_hdr(path[i].p_bh); 2262 } 2263 2264 if (!path[i].p_idx) { 2265 /* this level hasn't been touched yet */ 2266 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); 2267 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; 2268 ext_debug("init index ptr: hdr 0x%p, num %d\n", 2269 path[i].p_hdr, 2270 le16_to_cpu(path[i].p_hdr->eh_entries)); 2271 } else { 2272 /* we were already here, see at next index */ 2273 path[i].p_idx--; 2274 } 2275 2276 ext_debug("level %d - index, first 0x%p, cur 0x%p\n", 2277 i, EXT_FIRST_INDEX(path[i].p_hdr), 2278 path[i].p_idx); 2279 if (ext4_ext_more_to_rm(path + i)) { 2280 struct buffer_head *bh; 2281 /* go to the next level */ 2282 ext_debug("move to level %d (block %llu)\n", 2283 i + 1, idx_pblock(path[i].p_idx)); 2284 memset(path + i + 1, 0, sizeof(*path)); 2285 bh = sb_bread(sb, idx_pblock(path[i].p_idx)); 2286 if (!bh) { 2287 /* should we reset i_size? */ 2288 err = -EIO; 2289 break; 2290 } 2291 if (WARN_ON(i + 1 > depth)) { 2292 err = -EIO; 2293 break; 2294 } 2295 if (ext4_ext_check(inode, ext_block_hdr(bh), 2296 depth - i - 1)) { 2297 err = -EIO; 2298 break; 2299 } 2300 path[i + 1].p_bh = bh; 2301 2302 /* save actual number of indexes since this 2303 * number is changed at the next iteration */ 2304 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); 2305 i++; 2306 } else { 2307 /* we finished processing this index, go up */ 2308 if (path[i].p_hdr->eh_entries == 0 && i > 0) { 2309 /* index is empty, remove it; 2310 * handle must be already prepared by the 2311 * truncatei_leaf() */ 2312 err = ext4_ext_rm_idx(handle, inode, path + i); 2313 } 2314 /* root level has p_bh == NULL, brelse() eats this */ 2315 brelse(path[i].p_bh); 2316 path[i].p_bh = NULL; 2317 i--; 2318 ext_debug("return to level %d\n", i); 2319 } 2320 } 2321 2322 /* TODO: flexible tree reduction should be here */ 2323 if (path->p_hdr->eh_entries == 0) { 2324 /* 2325 * truncate to zero freed all the tree, 2326 * so we need to correct eh_depth 2327 */ 2328 err = ext4_ext_get_access(handle, inode, path); 2329 if (err == 0) { 2330 ext_inode_hdr(inode)->eh_depth = 0; 2331 ext_inode_hdr(inode)->eh_max = 2332 cpu_to_le16(ext4_ext_space_root(inode)); 2333 err = ext4_ext_dirty(handle, inode, path); 2334 } 2335 } 2336 out: 2337 ext4_ext_drop_refs(path); 2338 kfree(path); 2339 ext4_journal_stop(handle); 2340 2341 return err; 2342 } 2343 2344 /* 2345 * called at mount time 2346 */ 2347 void ext4_ext_init(struct super_block *sb) 2348 { 2349 /* 2350 * possible initialization would be here 2351 */ 2352 2353 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { 2354 printk(KERN_INFO "EXT4-fs: file extents enabled"); 2355 #ifdef AGGRESSIVE_TEST 2356 printk(", aggressive tests"); 2357 #endif 2358 #ifdef CHECK_BINSEARCH 2359 printk(", check binsearch"); 2360 #endif 2361 #ifdef EXTENTS_STATS 2362 printk(", stats"); 2363 #endif 2364 printk("\n"); 2365 #ifdef EXTENTS_STATS 2366 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); 2367 EXT4_SB(sb)->s_ext_min = 1 << 30; 2368 EXT4_SB(sb)->s_ext_max = 0; 2369 #endif 2370 } 2371 } 2372 2373 /* 2374 * called at umount time 2375 */ 2376 void ext4_ext_release(struct super_block *sb) 2377 { 2378 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) 2379 return; 2380 2381 #ifdef EXTENTS_STATS 2382 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { 2383 struct ext4_sb_info *sbi = EXT4_SB(sb); 2384 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", 2385 sbi->s_ext_blocks, sbi->s_ext_extents, 2386 sbi->s_ext_blocks / sbi->s_ext_extents); 2387 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", 2388 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); 2389 } 2390 #endif 2391 } 2392 2393 static void bi_complete(struct bio *bio, int error) 2394 { 2395 complete((struct completion *)bio->bi_private); 2396 } 2397 2398 /* FIXME!! we need to try to merge to left or right after zero-out */ 2399 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) 2400 { 2401 int ret = -EIO; 2402 struct bio *bio; 2403 int blkbits, blocksize; 2404 sector_t ee_pblock; 2405 struct completion event; 2406 unsigned int ee_len, len, done, offset; 2407 2408 2409 blkbits = inode->i_blkbits; 2410 blocksize = inode->i_sb->s_blocksize; 2411 ee_len = ext4_ext_get_actual_len(ex); 2412 ee_pblock = ext_pblock(ex); 2413 2414 /* convert ee_pblock to 512 byte sectors */ 2415 ee_pblock = ee_pblock << (blkbits - 9); 2416 2417 while (ee_len > 0) { 2418 2419 if (ee_len > BIO_MAX_PAGES) 2420 len = BIO_MAX_PAGES; 2421 else 2422 len = ee_len; 2423 2424 bio = bio_alloc(GFP_NOIO, len); 2425 bio->bi_sector = ee_pblock; 2426 bio->bi_bdev = inode->i_sb->s_bdev; 2427 2428 done = 0; 2429 offset = 0; 2430 while (done < len) { 2431 ret = bio_add_page(bio, ZERO_PAGE(0), 2432 blocksize, offset); 2433 if (ret != blocksize) { 2434 /* 2435 * We can't add any more pages because of 2436 * hardware limitations. Start a new bio. 2437 */ 2438 break; 2439 } 2440 done++; 2441 offset += blocksize; 2442 if (offset >= PAGE_CACHE_SIZE) 2443 offset = 0; 2444 } 2445 2446 init_completion(&event); 2447 bio->bi_private = &event; 2448 bio->bi_end_io = bi_complete; 2449 submit_bio(WRITE, bio); 2450 wait_for_completion(&event); 2451 2452 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 2453 ret = 0; 2454 else { 2455 ret = -EIO; 2456 break; 2457 } 2458 bio_put(bio); 2459 ee_len -= done; 2460 ee_pblock += done << (blkbits - 9); 2461 } 2462 return ret; 2463 } 2464 2465 #define EXT4_EXT_ZERO_LEN 7 2466 2467 /* 2468 * This function is called by ext4_ext_get_blocks() if someone tries to write 2469 * to an uninitialized extent. It may result in splitting the uninitialized 2470 * extent into multiple extents (upto three - one initialized and two 2471 * uninitialized). 2472 * There are three possibilities: 2473 * a> There is no split required: Entire extent should be initialized 2474 * b> Splits in two extents: Write is happening at either end of the extent 2475 * c> Splits in three extents: Somone is writing in middle of the extent 2476 */ 2477 static int ext4_ext_convert_to_initialized(handle_t *handle, 2478 struct inode *inode, 2479 struct ext4_ext_path *path, 2480 ext4_lblk_t iblock, 2481 unsigned int max_blocks) 2482 { 2483 struct ext4_extent *ex, newex, orig_ex; 2484 struct ext4_extent *ex1 = NULL; 2485 struct ext4_extent *ex2 = NULL; 2486 struct ext4_extent *ex3 = NULL; 2487 struct ext4_extent_header *eh; 2488 ext4_lblk_t ee_block; 2489 unsigned int allocated, ee_len, depth; 2490 ext4_fsblk_t newblock; 2491 int err = 0; 2492 int ret = 0; 2493 2494 depth = ext_depth(inode); 2495 eh = path[depth].p_hdr; 2496 ex = path[depth].p_ext; 2497 ee_block = le32_to_cpu(ex->ee_block); 2498 ee_len = ext4_ext_get_actual_len(ex); 2499 allocated = ee_len - (iblock - ee_block); 2500 newblock = iblock - ee_block + ext_pblock(ex); 2501 ex2 = ex; 2502 orig_ex.ee_block = ex->ee_block; 2503 orig_ex.ee_len = cpu_to_le16(ee_len); 2504 ext4_ext_store_pblock(&orig_ex, ext_pblock(ex)); 2505 2506 err = ext4_ext_get_access(handle, inode, path + depth); 2507 if (err) 2508 goto out; 2509 /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */ 2510 if (ee_len <= 2*EXT4_EXT_ZERO_LEN) { 2511 err = ext4_ext_zeroout(inode, &orig_ex); 2512 if (err) 2513 goto fix_extent_len; 2514 /* update the extent length and mark as initialized */ 2515 ex->ee_block = orig_ex.ee_block; 2516 ex->ee_len = orig_ex.ee_len; 2517 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); 2518 ext4_ext_dirty(handle, inode, path + depth); 2519 /* zeroed the full extent */ 2520 return allocated; 2521 } 2522 2523 /* ex1: ee_block to iblock - 1 : uninitialized */ 2524 if (iblock > ee_block) { 2525 ex1 = ex; 2526 ex1->ee_len = cpu_to_le16(iblock - ee_block); 2527 ext4_ext_mark_uninitialized(ex1); 2528 ex2 = &newex; 2529 } 2530 /* 2531 * for sanity, update the length of the ex2 extent before 2532 * we insert ex3, if ex1 is NULL. This is to avoid temporary 2533 * overlap of blocks. 2534 */ 2535 if (!ex1 && allocated > max_blocks) 2536 ex2->ee_len = cpu_to_le16(max_blocks); 2537 /* ex3: to ee_block + ee_len : uninitialised */ 2538 if (allocated > max_blocks) { 2539 unsigned int newdepth; 2540 /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */ 2541 if (allocated <= EXT4_EXT_ZERO_LEN) { 2542 /* 2543 * iblock == ee_block is handled by the zerouout 2544 * at the beginning. 2545 * Mark first half uninitialized. 2546 * Mark second half initialized and zero out the 2547 * initialized extent 2548 */ 2549 ex->ee_block = orig_ex.ee_block; 2550 ex->ee_len = cpu_to_le16(ee_len - allocated); 2551 ext4_ext_mark_uninitialized(ex); 2552 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); 2553 ext4_ext_dirty(handle, inode, path + depth); 2554 2555 ex3 = &newex; 2556 ex3->ee_block = cpu_to_le32(iblock); 2557 ext4_ext_store_pblock(ex3, newblock); 2558 ex3->ee_len = cpu_to_le16(allocated); 2559 err = ext4_ext_insert_extent(handle, inode, path, ex3); 2560 if (err == -ENOSPC) { 2561 err = ext4_ext_zeroout(inode, &orig_ex); 2562 if (err) 2563 goto fix_extent_len; 2564 ex->ee_block = orig_ex.ee_block; 2565 ex->ee_len = orig_ex.ee_len; 2566 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); 2567 ext4_ext_dirty(handle, inode, path + depth); 2568 /* blocks available from iblock */ 2569 return allocated; 2570 2571 } else if (err) 2572 goto fix_extent_len; 2573 2574 /* 2575 * We need to zero out the second half because 2576 * an fallocate request can update file size and 2577 * converting the second half to initialized extent 2578 * implies that we can leak some junk data to user 2579 * space. 2580 */ 2581 err = ext4_ext_zeroout(inode, ex3); 2582 if (err) { 2583 /* 2584 * We should actually mark the 2585 * second half as uninit and return error 2586 * Insert would have changed the extent 2587 */ 2588 depth = ext_depth(inode); 2589 ext4_ext_drop_refs(path); 2590 path = ext4_ext_find_extent(inode, 2591 iblock, path); 2592 if (IS_ERR(path)) { 2593 err = PTR_ERR(path); 2594 return err; 2595 } 2596 /* get the second half extent details */ 2597 ex = path[depth].p_ext; 2598 err = ext4_ext_get_access(handle, inode, 2599 path + depth); 2600 if (err) 2601 return err; 2602 ext4_ext_mark_uninitialized(ex); 2603 ext4_ext_dirty(handle, inode, path + depth); 2604 return err; 2605 } 2606 2607 /* zeroed the second half */ 2608 return allocated; 2609 } 2610 ex3 = &newex; 2611 ex3->ee_block = cpu_to_le32(iblock + max_blocks); 2612 ext4_ext_store_pblock(ex3, newblock + max_blocks); 2613 ex3->ee_len = cpu_to_le16(allocated - max_blocks); 2614 ext4_ext_mark_uninitialized(ex3); 2615 err = ext4_ext_insert_extent(handle, inode, path, ex3); 2616 if (err == -ENOSPC) { 2617 err = ext4_ext_zeroout(inode, &orig_ex); 2618 if (err) 2619 goto fix_extent_len; 2620 /* update the extent length and mark as initialized */ 2621 ex->ee_block = orig_ex.ee_block; 2622 ex->ee_len = orig_ex.ee_len; 2623 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); 2624 ext4_ext_dirty(handle, inode, path + depth); 2625 /* zeroed the full extent */ 2626 /* blocks available from iblock */ 2627 return allocated; 2628 2629 } else if (err) 2630 goto fix_extent_len; 2631 /* 2632 * The depth, and hence eh & ex might change 2633 * as part of the insert above. 2634 */ 2635 newdepth = ext_depth(inode); 2636 /* 2637 * update the extent length after successful insert of the 2638 * split extent 2639 */ 2640 orig_ex.ee_len = cpu_to_le16(ee_len - 2641 ext4_ext_get_actual_len(ex3)); 2642 depth = newdepth; 2643 ext4_ext_drop_refs(path); 2644 path = ext4_ext_find_extent(inode, iblock, path); 2645 if (IS_ERR(path)) { 2646 err = PTR_ERR(path); 2647 goto out; 2648 } 2649 eh = path[depth].p_hdr; 2650 ex = path[depth].p_ext; 2651 if (ex2 != &newex) 2652 ex2 = ex; 2653 2654 err = ext4_ext_get_access(handle, inode, path + depth); 2655 if (err) 2656 goto out; 2657 2658 allocated = max_blocks; 2659 2660 /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying 2661 * to insert a extent in the middle zerout directly 2662 * otherwise give the extent a chance to merge to left 2663 */ 2664 if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN && 2665 iblock != ee_block) { 2666 err = ext4_ext_zeroout(inode, &orig_ex); 2667 if (err) 2668 goto fix_extent_len; 2669 /* update the extent length and mark as initialized */ 2670 ex->ee_block = orig_ex.ee_block; 2671 ex->ee_len = orig_ex.ee_len; 2672 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); 2673 ext4_ext_dirty(handle, inode, path + depth); 2674 /* zero out the first half */ 2675 /* blocks available from iblock */ 2676 return allocated; 2677 } 2678 } 2679 /* 2680 * If there was a change of depth as part of the 2681 * insertion of ex3 above, we need to update the length 2682 * of the ex1 extent again here 2683 */ 2684 if (ex1 && ex1 != ex) { 2685 ex1 = ex; 2686 ex1->ee_len = cpu_to_le16(iblock - ee_block); 2687 ext4_ext_mark_uninitialized(ex1); 2688 ex2 = &newex; 2689 } 2690 /* ex2: iblock to iblock + maxblocks-1 : initialised */ 2691 ex2->ee_block = cpu_to_le32(iblock); 2692 ext4_ext_store_pblock(ex2, newblock); 2693 ex2->ee_len = cpu_to_le16(allocated); 2694 if (ex2 != ex) 2695 goto insert; 2696 /* 2697 * New (initialized) extent starts from the first block 2698 * in the current extent. i.e., ex2 == ex 2699 * We have to see if it can be merged with the extent 2700 * on the left. 2701 */ 2702 if (ex2 > EXT_FIRST_EXTENT(eh)) { 2703 /* 2704 * To merge left, pass "ex2 - 1" to try_to_merge(), 2705 * since it merges towards right _only_. 2706 */ 2707 ret = ext4_ext_try_to_merge(inode, path, ex2 - 1); 2708 if (ret) { 2709 err = ext4_ext_correct_indexes(handle, inode, path); 2710 if (err) 2711 goto out; 2712 depth = ext_depth(inode); 2713 ex2--; 2714 } 2715 } 2716 /* 2717 * Try to Merge towards right. This might be required 2718 * only when the whole extent is being written to. 2719 * i.e. ex2 == ex and ex3 == NULL. 2720 */ 2721 if (!ex3) { 2722 ret = ext4_ext_try_to_merge(inode, path, ex2); 2723 if (ret) { 2724 err = ext4_ext_correct_indexes(handle, inode, path); 2725 if (err) 2726 goto out; 2727 } 2728 } 2729 /* Mark modified extent as dirty */ 2730 err = ext4_ext_dirty(handle, inode, path + depth); 2731 goto out; 2732 insert: 2733 err = ext4_ext_insert_extent(handle, inode, path, &newex); 2734 if (err == -ENOSPC) { 2735 err = ext4_ext_zeroout(inode, &orig_ex); 2736 if (err) 2737 goto fix_extent_len; 2738 /* update the extent length and mark as initialized */ 2739 ex->ee_block = orig_ex.ee_block; 2740 ex->ee_len = orig_ex.ee_len; 2741 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); 2742 ext4_ext_dirty(handle, inode, path + depth); 2743 /* zero out the first half */ 2744 return allocated; 2745 } else if (err) 2746 goto fix_extent_len; 2747 out: 2748 return err ? err : allocated; 2749 2750 fix_extent_len: 2751 ex->ee_block = orig_ex.ee_block; 2752 ex->ee_len = orig_ex.ee_len; 2753 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); 2754 ext4_ext_mark_uninitialized(ex); 2755 ext4_ext_dirty(handle, inode, path + depth); 2756 return err; 2757 } 2758 2759 /* 2760 * Block allocation/map/preallocation routine for extents based files 2761 * 2762 * 2763 * Need to be called with 2764 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 2765 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 2766 * 2767 * return > 0, number of of blocks already mapped/allocated 2768 * if create == 0 and these are pre-allocated blocks 2769 * buffer head is unmapped 2770 * otherwise blocks are mapped 2771 * 2772 * return = 0, if plain look up failed (blocks have not been allocated) 2773 * buffer head is unmapped 2774 * 2775 * return < 0, error case. 2776 */ 2777 int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, 2778 ext4_lblk_t iblock, 2779 unsigned int max_blocks, struct buffer_head *bh_result, 2780 int create, int extend_disksize) 2781 { 2782 struct ext4_ext_path *path = NULL; 2783 struct ext4_extent_header *eh; 2784 struct ext4_extent newex, *ex; 2785 ext4_fsblk_t newblock; 2786 int err = 0, depth, ret, cache_type; 2787 unsigned int allocated = 0; 2788 struct ext4_allocation_request ar; 2789 loff_t disksize; 2790 2791 __clear_bit(BH_New, &bh_result->b_state); 2792 ext_debug("blocks %u/%u requested for inode %u\n", 2793 iblock, max_blocks, inode->i_ino); 2794 2795 /* check in cache */ 2796 cache_type = ext4_ext_in_cache(inode, iblock, &newex); 2797 if (cache_type) { 2798 if (cache_type == EXT4_EXT_CACHE_GAP) { 2799 if (!create) { 2800 /* 2801 * block isn't allocated yet and 2802 * user doesn't want to allocate it 2803 */ 2804 goto out2; 2805 } 2806 /* we should allocate requested block */ 2807 } else if (cache_type == EXT4_EXT_CACHE_EXTENT) { 2808 /* block is already allocated */ 2809 newblock = iblock 2810 - le32_to_cpu(newex.ee_block) 2811 + ext_pblock(&newex); 2812 /* number of remaining blocks in the extent */ 2813 allocated = ext4_ext_get_actual_len(&newex) - 2814 (iblock - le32_to_cpu(newex.ee_block)); 2815 goto out; 2816 } else { 2817 BUG(); 2818 } 2819 } 2820 2821 /* find extent for this block */ 2822 path = ext4_ext_find_extent(inode, iblock, NULL); 2823 if (IS_ERR(path)) { 2824 err = PTR_ERR(path); 2825 path = NULL; 2826 goto out2; 2827 } 2828 2829 depth = ext_depth(inode); 2830 2831 /* 2832 * consistent leaf must not be empty; 2833 * this situation is possible, though, _during_ tree modification; 2834 * this is why assert can't be put in ext4_ext_find_extent() 2835 */ 2836 BUG_ON(path[depth].p_ext == NULL && depth != 0); 2837 eh = path[depth].p_hdr; 2838 2839 ex = path[depth].p_ext; 2840 if (ex) { 2841 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 2842 ext4_fsblk_t ee_start = ext_pblock(ex); 2843 unsigned short ee_len; 2844 2845 /* 2846 * Uninitialized extents are treated as holes, except that 2847 * we split out initialized portions during a write. 2848 */ 2849 ee_len = ext4_ext_get_actual_len(ex); 2850 /* if found extent covers block, simply return it */ 2851 if (iblock >= ee_block && iblock < ee_block + ee_len) { 2852 newblock = iblock - ee_block + ee_start; 2853 /* number of remaining blocks in the extent */ 2854 allocated = ee_len - (iblock - ee_block); 2855 ext_debug("%u fit into %lu:%d -> %llu\n", iblock, 2856 ee_block, ee_len, newblock); 2857 2858 /* Do not put uninitialized extent in the cache */ 2859 if (!ext4_ext_is_uninitialized(ex)) { 2860 ext4_ext_put_in_cache(inode, ee_block, 2861 ee_len, ee_start, 2862 EXT4_EXT_CACHE_EXTENT); 2863 goto out; 2864 } 2865 if (create == EXT4_CREATE_UNINITIALIZED_EXT) 2866 goto out; 2867 if (!create) { 2868 /* 2869 * We have blocks reserved already. We 2870 * return allocated blocks so that delalloc 2871 * won't do block reservation for us. But 2872 * the buffer head will be unmapped so that 2873 * a read from the block returns 0s. 2874 */ 2875 if (allocated > max_blocks) 2876 allocated = max_blocks; 2877 set_buffer_unwritten(bh_result); 2878 goto out2; 2879 } 2880 2881 ret = ext4_ext_convert_to_initialized(handle, inode, 2882 path, iblock, 2883 max_blocks); 2884 if (ret <= 0) { 2885 err = ret; 2886 goto out2; 2887 } else 2888 allocated = ret; 2889 goto outnew; 2890 } 2891 } 2892 2893 /* 2894 * requested block isn't allocated yet; 2895 * we couldn't try to create block if create flag is zero 2896 */ 2897 if (!create) { 2898 /* 2899 * put just found gap into cache to speed up 2900 * subsequent requests 2901 */ 2902 ext4_ext_put_gap_in_cache(inode, path, iblock); 2903 goto out2; 2904 } 2905 /* 2906 * Okay, we need to do block allocation. 2907 */ 2908 2909 /* find neighbour allocated blocks */ 2910 ar.lleft = iblock; 2911 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); 2912 if (err) 2913 goto out2; 2914 ar.lright = iblock; 2915 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright); 2916 if (err) 2917 goto out2; 2918 2919 /* 2920 * See if request is beyond maximum number of blocks we can have in 2921 * a single extent. For an initialized extent this limit is 2922 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is 2923 * EXT_UNINIT_MAX_LEN. 2924 */ 2925 if (max_blocks > EXT_INIT_MAX_LEN && 2926 create != EXT4_CREATE_UNINITIALIZED_EXT) 2927 max_blocks = EXT_INIT_MAX_LEN; 2928 else if (max_blocks > EXT_UNINIT_MAX_LEN && 2929 create == EXT4_CREATE_UNINITIALIZED_EXT) 2930 max_blocks = EXT_UNINIT_MAX_LEN; 2931 2932 /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */ 2933 newex.ee_block = cpu_to_le32(iblock); 2934 newex.ee_len = cpu_to_le16(max_blocks); 2935 err = ext4_ext_check_overlap(inode, &newex, path); 2936 if (err) 2937 allocated = ext4_ext_get_actual_len(&newex); 2938 else 2939 allocated = max_blocks; 2940 2941 /* allocate new block */ 2942 ar.inode = inode; 2943 ar.goal = ext4_ext_find_goal(inode, path, iblock); 2944 ar.logical = iblock; 2945 ar.len = allocated; 2946 if (S_ISREG(inode->i_mode)) 2947 ar.flags = EXT4_MB_HINT_DATA; 2948 else 2949 /* disable in-core preallocation for non-regular files */ 2950 ar.flags = 0; 2951 newblock = ext4_mb_new_blocks(handle, &ar, &err); 2952 if (!newblock) 2953 goto out2; 2954 ext_debug("allocate new block: goal %llu, found %llu/%lu\n", 2955 ar.goal, newblock, allocated); 2956 2957 /* try to insert new extent into found leaf and return */ 2958 ext4_ext_store_pblock(&newex, newblock); 2959 newex.ee_len = cpu_to_le16(ar.len); 2960 if (create == EXT4_CREATE_UNINITIALIZED_EXT) /* Mark uninitialized */ 2961 ext4_ext_mark_uninitialized(&newex); 2962 err = ext4_ext_insert_extent(handle, inode, path, &newex); 2963 if (err) { 2964 /* free data blocks we just allocated */ 2965 /* not a good idea to call discard here directly, 2966 * but otherwise we'd need to call it every free() */ 2967 ext4_discard_preallocations(inode); 2968 ext4_free_blocks(handle, inode, ext_pblock(&newex), 2969 ext4_ext_get_actual_len(&newex), 0); 2970 goto out2; 2971 } 2972 2973 /* previous routine could use block we allocated */ 2974 newblock = ext_pblock(&newex); 2975 allocated = ext4_ext_get_actual_len(&newex); 2976 outnew: 2977 if (extend_disksize) { 2978 disksize = ((loff_t) iblock + ar.len) << inode->i_blkbits; 2979 if (disksize > i_size_read(inode)) 2980 disksize = i_size_read(inode); 2981 if (disksize > EXT4_I(inode)->i_disksize) 2982 EXT4_I(inode)->i_disksize = disksize; 2983 } 2984 2985 set_buffer_new(bh_result); 2986 2987 /* Cache only when it is _not_ an uninitialized extent */ 2988 if (create != EXT4_CREATE_UNINITIALIZED_EXT) 2989 ext4_ext_put_in_cache(inode, iblock, allocated, newblock, 2990 EXT4_EXT_CACHE_EXTENT); 2991 out: 2992 if (allocated > max_blocks) 2993 allocated = max_blocks; 2994 ext4_ext_show_leaf(inode, path); 2995 set_buffer_mapped(bh_result); 2996 bh_result->b_bdev = inode->i_sb->s_bdev; 2997 bh_result->b_blocknr = newblock; 2998 out2: 2999 if (path) { 3000 ext4_ext_drop_refs(path); 3001 kfree(path); 3002 } 3003 return err ? err : allocated; 3004 } 3005 3006 void ext4_ext_truncate(struct inode *inode) 3007 { 3008 struct address_space *mapping = inode->i_mapping; 3009 struct super_block *sb = inode->i_sb; 3010 ext4_lblk_t last_block; 3011 handle_t *handle; 3012 int err = 0; 3013 3014 /* 3015 * probably first extent we're gonna free will be last in block 3016 */ 3017 err = ext4_writepage_trans_blocks(inode); 3018 handle = ext4_journal_start(inode, err); 3019 if (IS_ERR(handle)) 3020 return; 3021 3022 if (inode->i_size & (sb->s_blocksize - 1)) 3023 ext4_block_truncate_page(handle, mapping, inode->i_size); 3024 3025 if (ext4_orphan_add(handle, inode)) 3026 goto out_stop; 3027 3028 down_write(&EXT4_I(inode)->i_data_sem); 3029 ext4_ext_invalidate_cache(inode); 3030 3031 ext4_discard_preallocations(inode); 3032 3033 /* 3034 * TODO: optimization is possible here. 3035 * Probably we need not scan at all, 3036 * because page truncation is enough. 3037 */ 3038 3039 /* we have to know where to truncate from in crash case */ 3040 EXT4_I(inode)->i_disksize = inode->i_size; 3041 ext4_mark_inode_dirty(handle, inode); 3042 3043 last_block = (inode->i_size + sb->s_blocksize - 1) 3044 >> EXT4_BLOCK_SIZE_BITS(sb); 3045 err = ext4_ext_remove_space(inode, last_block); 3046 3047 /* In a multi-transaction truncate, we only make the final 3048 * transaction synchronous. 3049 */ 3050 if (IS_SYNC(inode)) 3051 ext4_handle_sync(handle); 3052 3053 out_stop: 3054 up_write(&EXT4_I(inode)->i_data_sem); 3055 /* 3056 * If this was a simple ftruncate() and the file will remain alive, 3057 * then we need to clear up the orphan record which we created above. 3058 * However, if this was a real unlink then we were called by 3059 * ext4_delete_inode(), and we allow that function to clean up the 3060 * orphan info for us. 3061 */ 3062 if (inode->i_nlink) 3063 ext4_orphan_del(handle, inode); 3064 3065 inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 3066 ext4_mark_inode_dirty(handle, inode); 3067 ext4_journal_stop(handle); 3068 } 3069 3070 static void ext4_falloc_update_inode(struct inode *inode, 3071 int mode, loff_t new_size, int update_ctime) 3072 { 3073 struct timespec now; 3074 3075 if (update_ctime) { 3076 now = current_fs_time(inode->i_sb); 3077 if (!timespec_equal(&inode->i_ctime, &now)) 3078 inode->i_ctime = now; 3079 } 3080 /* 3081 * Update only when preallocation was requested beyond 3082 * the file size. 3083 */ 3084 if (!(mode & FALLOC_FL_KEEP_SIZE)) { 3085 if (new_size > i_size_read(inode)) 3086 i_size_write(inode, new_size); 3087 if (new_size > EXT4_I(inode)->i_disksize) 3088 ext4_update_i_disksize(inode, new_size); 3089 } 3090 3091 } 3092 3093 /* 3094 * preallocate space for a file. This implements ext4's fallocate inode 3095 * operation, which gets called from sys_fallocate system call. 3096 * For block-mapped files, posix_fallocate should fall back to the method 3097 * of writing zeroes to the required new blocks (the same behavior which is 3098 * expected for file systems which do not support fallocate() system call). 3099 */ 3100 long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len) 3101 { 3102 handle_t *handle; 3103 ext4_lblk_t block; 3104 loff_t new_size; 3105 unsigned int max_blocks; 3106 int ret = 0; 3107 int ret2 = 0; 3108 int retries = 0; 3109 struct buffer_head map_bh; 3110 unsigned int credits, blkbits = inode->i_blkbits; 3111 3112 /* 3113 * currently supporting (pre)allocate mode for extent-based 3114 * files _only_ 3115 */ 3116 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) 3117 return -EOPNOTSUPP; 3118 3119 /* preallocation to directories is currently not supported */ 3120 if (S_ISDIR(inode->i_mode)) 3121 return -ENODEV; 3122 3123 block = offset >> blkbits; 3124 /* 3125 * We can't just convert len to max_blocks because 3126 * If blocksize = 4096 offset = 3072 and len = 2048 3127 */ 3128 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) 3129 - block; 3130 /* 3131 * credits to insert 1 extent into extent tree 3132 */ 3133 credits = ext4_chunk_trans_blocks(inode, max_blocks); 3134 mutex_lock(&inode->i_mutex); 3135 retry: 3136 while (ret >= 0 && ret < max_blocks) { 3137 block = block + ret; 3138 max_blocks = max_blocks - ret; 3139 handle = ext4_journal_start(inode, credits); 3140 if (IS_ERR(handle)) { 3141 ret = PTR_ERR(handle); 3142 break; 3143 } 3144 ret = ext4_get_blocks_wrap(handle, inode, block, 3145 max_blocks, &map_bh, 3146 EXT4_CREATE_UNINITIALIZED_EXT, 0, 0); 3147 if (ret <= 0) { 3148 #ifdef EXT4FS_DEBUG 3149 WARN_ON(ret <= 0); 3150 printk(KERN_ERR "%s: ext4_ext_get_blocks " 3151 "returned error inode#%lu, block=%u, " 3152 "max_blocks=%u", __func__, 3153 inode->i_ino, block, max_blocks); 3154 #endif 3155 ext4_mark_inode_dirty(handle, inode); 3156 ret2 = ext4_journal_stop(handle); 3157 break; 3158 } 3159 if ((block + ret) >= (EXT4_BLOCK_ALIGN(offset + len, 3160 blkbits) >> blkbits)) 3161 new_size = offset + len; 3162 else 3163 new_size = (block + ret) << blkbits; 3164 3165 ext4_falloc_update_inode(inode, mode, new_size, 3166 buffer_new(&map_bh)); 3167 ext4_mark_inode_dirty(handle, inode); 3168 ret2 = ext4_journal_stop(handle); 3169 if (ret2) 3170 break; 3171 } 3172 if (ret == -ENOSPC && 3173 ext4_should_retry_alloc(inode->i_sb, &retries)) { 3174 ret = 0; 3175 goto retry; 3176 } 3177 mutex_unlock(&inode->i_mutex); 3178 return ret > 0 ? ret2 : ret; 3179 } 3180 3181 /* 3182 * Callback function called for each extent to gather FIEMAP information. 3183 */ 3184 static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path, 3185 struct ext4_ext_cache *newex, struct ext4_extent *ex, 3186 void *data) 3187 { 3188 struct fiemap_extent_info *fieinfo = data; 3189 unsigned long blksize_bits = inode->i_sb->s_blocksize_bits; 3190 __u64 logical; 3191 __u64 physical; 3192 __u64 length; 3193 __u32 flags = 0; 3194 int error; 3195 3196 logical = (__u64)newex->ec_block << blksize_bits; 3197 3198 if (newex->ec_type == EXT4_EXT_CACHE_GAP) { 3199 pgoff_t offset; 3200 struct page *page; 3201 struct buffer_head *bh = NULL; 3202 3203 offset = logical >> PAGE_SHIFT; 3204 page = find_get_page(inode->i_mapping, offset); 3205 if (!page || !page_has_buffers(page)) 3206 return EXT_CONTINUE; 3207 3208 bh = page_buffers(page); 3209 3210 if (!bh) 3211 return EXT_CONTINUE; 3212 3213 if (buffer_delay(bh)) { 3214 flags |= FIEMAP_EXTENT_DELALLOC; 3215 page_cache_release(page); 3216 } else { 3217 page_cache_release(page); 3218 return EXT_CONTINUE; 3219 } 3220 } 3221 3222 physical = (__u64)newex->ec_start << blksize_bits; 3223 length = (__u64)newex->ec_len << blksize_bits; 3224 3225 if (ex && ext4_ext_is_uninitialized(ex)) 3226 flags |= FIEMAP_EXTENT_UNWRITTEN; 3227 3228 /* 3229 * If this extent reaches EXT_MAX_BLOCK, it must be last. 3230 * 3231 * Or if ext4_ext_next_allocated_block is EXT_MAX_BLOCK, 3232 * this also indicates no more allocated blocks. 3233 * 3234 * XXX this might miss a single-block extent at EXT_MAX_BLOCK 3235 */ 3236 if (logical + length - 1 == EXT_MAX_BLOCK || 3237 ext4_ext_next_allocated_block(path) == EXT_MAX_BLOCK) 3238 flags |= FIEMAP_EXTENT_LAST; 3239 3240 error = fiemap_fill_next_extent(fieinfo, logical, physical, 3241 length, flags); 3242 if (error < 0) 3243 return error; 3244 if (error == 1) 3245 return EXT_BREAK; 3246 3247 return EXT_CONTINUE; 3248 } 3249 3250 /* fiemap flags we can handle specified here */ 3251 #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) 3252 3253 static int ext4_xattr_fiemap(struct inode *inode, 3254 struct fiemap_extent_info *fieinfo) 3255 { 3256 __u64 physical = 0; 3257 __u64 length; 3258 __u32 flags = FIEMAP_EXTENT_LAST; 3259 int blockbits = inode->i_sb->s_blocksize_bits; 3260 int error = 0; 3261 3262 /* in-inode? */ 3263 if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) { 3264 struct ext4_iloc iloc; 3265 int offset; /* offset of xattr in inode */ 3266 3267 error = ext4_get_inode_loc(inode, &iloc); 3268 if (error) 3269 return error; 3270 physical = iloc.bh->b_blocknr << blockbits; 3271 offset = EXT4_GOOD_OLD_INODE_SIZE + 3272 EXT4_I(inode)->i_extra_isize; 3273 physical += offset; 3274 length = EXT4_SB(inode->i_sb)->s_inode_size - offset; 3275 flags |= FIEMAP_EXTENT_DATA_INLINE; 3276 } else { /* external block */ 3277 physical = EXT4_I(inode)->i_file_acl << blockbits; 3278 length = inode->i_sb->s_blocksize; 3279 } 3280 3281 if (physical) 3282 error = fiemap_fill_next_extent(fieinfo, 0, physical, 3283 length, flags); 3284 return (error < 0 ? error : 0); 3285 } 3286 3287 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 3288 __u64 start, __u64 len) 3289 { 3290 ext4_lblk_t start_blk; 3291 ext4_lblk_t len_blks; 3292 int error = 0; 3293 3294 /* fallback to generic here if not in extents fmt */ 3295 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) 3296 return generic_block_fiemap(inode, fieinfo, start, len, 3297 ext4_get_block); 3298 3299 if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) 3300 return -EBADR; 3301 3302 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { 3303 error = ext4_xattr_fiemap(inode, fieinfo); 3304 } else { 3305 start_blk = start >> inode->i_sb->s_blocksize_bits; 3306 len_blks = len >> inode->i_sb->s_blocksize_bits; 3307 3308 /* 3309 * Walk the extent tree gathering extent information. 3310 * ext4_ext_fiemap_cb will push extents back to user. 3311 */ 3312 down_write(&EXT4_I(inode)->i_data_sem); 3313 error = ext4_ext_walk_space(inode, start_blk, len_blks, 3314 ext4_ext_fiemap_cb, fieinfo); 3315 up_write(&EXT4_I(inode)->i_data_sem); 3316 } 3317 3318 return error; 3319 } 3320 3321