1 /* 2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 3 * Written by Alex Tomas <alex@clusterfs.com> 4 * 5 * Architecture independence: 6 * Copyright (c) 2005, Bull S.A. 7 * Written by Pierre Peiffer <pierre.peiffer@bull.net> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public Licens 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 21 */ 22 23 /* 24 * Extents support for EXT4 25 * 26 * TODO: 27 * - ext4*_error() should be used in some situations 28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate 29 * - smart tree reduction 30 */ 31 32 #include <linux/module.h> 33 #include <linux/fs.h> 34 #include <linux/time.h> 35 #include <linux/ext4_jbd2.h> 36 #include <linux/jbd.h> 37 #include <linux/highuid.h> 38 #include <linux/pagemap.h> 39 #include <linux/quotaops.h> 40 #include <linux/string.h> 41 #include <linux/slab.h> 42 #include <linux/ext4_fs_extents.h> 43 #include <asm/uaccess.h> 44 45 46 /* 47 * ext_pblock: 48 * combine low and high parts of physical block number into ext4_fsblk_t 49 */ 50 static ext4_fsblk_t ext_pblock(struct ext4_extent *ex) 51 { 52 ext4_fsblk_t block; 53 54 block = le32_to_cpu(ex->ee_start); 55 block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1; 56 return block; 57 } 58 59 /* 60 * idx_pblock: 61 * combine low and high parts of a leaf physical block number into ext4_fsblk_t 62 */ 63 static ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix) 64 { 65 ext4_fsblk_t block; 66 67 block = le32_to_cpu(ix->ei_leaf); 68 block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1; 69 return block; 70 } 71 72 /* 73 * ext4_ext_store_pblock: 74 * stores a large physical block number into an extent struct, 75 * breaking it into parts 76 */ 77 static void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb) 78 { 79 ex->ee_start = cpu_to_le32((unsigned long) (pb & 0xffffffff)); 80 ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff); 81 } 82 83 /* 84 * ext4_idx_store_pblock: 85 * stores a large physical block number into an index struct, 86 * breaking it into parts 87 */ 88 static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb) 89 { 90 ix->ei_leaf = cpu_to_le32((unsigned long) (pb & 0xffffffff)); 91 ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff); 92 } 93 94 static int ext4_ext_check_header(const char *function, struct inode *inode, 95 struct ext4_extent_header *eh) 96 { 97 const char *error_msg = NULL; 98 99 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { 100 error_msg = "invalid magic"; 101 goto corrupted; 102 } 103 if (unlikely(eh->eh_max == 0)) { 104 error_msg = "invalid eh_max"; 105 goto corrupted; 106 } 107 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { 108 error_msg = "invalid eh_entries"; 109 goto corrupted; 110 } 111 return 0; 112 113 corrupted: 114 ext4_error(inode->i_sb, function, 115 "bad header in inode #%lu: %s - magic %x, " 116 "entries %u, max %u, depth %u", 117 inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic), 118 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), 119 le16_to_cpu(eh->eh_depth)); 120 121 return -EIO; 122 } 123 124 static handle_t *ext4_ext_journal_restart(handle_t *handle, int needed) 125 { 126 int err; 127 128 if (handle->h_buffer_credits > needed) 129 return handle; 130 if (!ext4_journal_extend(handle, needed)) 131 return handle; 132 err = ext4_journal_restart(handle, needed); 133 134 return handle; 135 } 136 137 /* 138 * could return: 139 * - EROFS 140 * - ENOMEM 141 */ 142 static int ext4_ext_get_access(handle_t *handle, struct inode *inode, 143 struct ext4_ext_path *path) 144 { 145 if (path->p_bh) { 146 /* path points to block */ 147 return ext4_journal_get_write_access(handle, path->p_bh); 148 } 149 /* path points to leaf/index in inode body */ 150 /* we use in-core data, no need to protect them */ 151 return 0; 152 } 153 154 /* 155 * could return: 156 * - EROFS 157 * - ENOMEM 158 * - EIO 159 */ 160 static int ext4_ext_dirty(handle_t *handle, struct inode *inode, 161 struct ext4_ext_path *path) 162 { 163 int err; 164 if (path->p_bh) { 165 /* path points to block */ 166 err = ext4_journal_dirty_metadata(handle, path->p_bh); 167 } else { 168 /* path points to leaf/index in inode body */ 169 err = ext4_mark_inode_dirty(handle, inode); 170 } 171 return err; 172 } 173 174 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, 175 struct ext4_ext_path *path, 176 ext4_fsblk_t block) 177 { 178 struct ext4_inode_info *ei = EXT4_I(inode); 179 ext4_fsblk_t bg_start; 180 ext4_grpblk_t colour; 181 int depth; 182 183 if (path) { 184 struct ext4_extent *ex; 185 depth = path->p_depth; 186 187 /* try to predict block placement */ 188 ex = path[depth].p_ext; 189 if (ex) 190 return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block)); 191 192 /* it looks like index is empty; 193 * try to find starting block from index itself */ 194 if (path[depth].p_bh) 195 return path[depth].p_bh->b_blocknr; 196 } 197 198 /* OK. use inode's group */ 199 bg_start = (ei->i_block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) + 200 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block); 201 colour = (current->pid % 16) * 202 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 203 return bg_start + colour + block; 204 } 205 206 static ext4_fsblk_t 207 ext4_ext_new_block(handle_t *handle, struct inode *inode, 208 struct ext4_ext_path *path, 209 struct ext4_extent *ex, int *err) 210 { 211 ext4_fsblk_t goal, newblock; 212 213 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); 214 newblock = ext4_new_block(handle, inode, goal, err); 215 return newblock; 216 } 217 218 static int ext4_ext_space_block(struct inode *inode) 219 { 220 int size; 221 222 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 223 / sizeof(struct ext4_extent); 224 #ifdef AGGRESSIVE_TEST 225 if (size > 6) 226 size = 6; 227 #endif 228 return size; 229 } 230 231 static int ext4_ext_space_block_idx(struct inode *inode) 232 { 233 int size; 234 235 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 236 / sizeof(struct ext4_extent_idx); 237 #ifdef AGGRESSIVE_TEST 238 if (size > 5) 239 size = 5; 240 #endif 241 return size; 242 } 243 244 static int ext4_ext_space_root(struct inode *inode) 245 { 246 int size; 247 248 size = sizeof(EXT4_I(inode)->i_data); 249 size -= sizeof(struct ext4_extent_header); 250 size /= sizeof(struct ext4_extent); 251 #ifdef AGGRESSIVE_TEST 252 if (size > 3) 253 size = 3; 254 #endif 255 return size; 256 } 257 258 static int ext4_ext_space_root_idx(struct inode *inode) 259 { 260 int size; 261 262 size = sizeof(EXT4_I(inode)->i_data); 263 size -= sizeof(struct ext4_extent_header); 264 size /= sizeof(struct ext4_extent_idx); 265 #ifdef AGGRESSIVE_TEST 266 if (size > 4) 267 size = 4; 268 #endif 269 return size; 270 } 271 272 #ifdef EXT_DEBUG 273 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) 274 { 275 int k, l = path->p_depth; 276 277 ext_debug("path:"); 278 for (k = 0; k <= l; k++, path++) { 279 if (path->p_idx) { 280 ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block), 281 idx_pblock(path->p_idx)); 282 } else if (path->p_ext) { 283 ext_debug(" %d:%d:%llu ", 284 le32_to_cpu(path->p_ext->ee_block), 285 le16_to_cpu(path->p_ext->ee_len), 286 ext_pblock(path->p_ext)); 287 } else 288 ext_debug(" []"); 289 } 290 ext_debug("\n"); 291 } 292 293 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) 294 { 295 int depth = ext_depth(inode); 296 struct ext4_extent_header *eh; 297 struct ext4_extent *ex; 298 int i; 299 300 if (!path) 301 return; 302 303 eh = path[depth].p_hdr; 304 ex = EXT_FIRST_EXTENT(eh); 305 306 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { 307 ext_debug("%d:%d:%llu ", le32_to_cpu(ex->ee_block), 308 le16_to_cpu(ex->ee_len), ext_pblock(ex)); 309 } 310 ext_debug("\n"); 311 } 312 #else 313 #define ext4_ext_show_path(inode,path) 314 #define ext4_ext_show_leaf(inode,path) 315 #endif 316 317 static void ext4_ext_drop_refs(struct ext4_ext_path *path) 318 { 319 int depth = path->p_depth; 320 int i; 321 322 for (i = 0; i <= depth; i++, path++) 323 if (path->p_bh) { 324 brelse(path->p_bh); 325 path->p_bh = NULL; 326 } 327 } 328 329 /* 330 * ext4_ext_binsearch_idx: 331 * binary search for the closest index of the given block 332 */ 333 static void 334 ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int block) 335 { 336 struct ext4_extent_header *eh = path->p_hdr; 337 struct ext4_extent_idx *r, *l, *m; 338 339 BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC); 340 BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max)); 341 BUG_ON(le16_to_cpu(eh->eh_entries) <= 0); 342 343 ext_debug("binsearch for %d(idx): ", block); 344 345 l = EXT_FIRST_INDEX(eh) + 1; 346 r = EXT_FIRST_INDEX(eh) + le16_to_cpu(eh->eh_entries) - 1; 347 while (l <= r) { 348 m = l + (r - l) / 2; 349 if (block < le32_to_cpu(m->ei_block)) 350 r = m - 1; 351 else 352 l = m + 1; 353 ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ei_block, 354 m, m->ei_block, r, r->ei_block); 355 } 356 357 path->p_idx = l - 1; 358 ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block), 359 idx_block(path->p_idx)); 360 361 #ifdef CHECK_BINSEARCH 362 { 363 struct ext4_extent_idx *chix, *ix; 364 int k; 365 366 chix = ix = EXT_FIRST_INDEX(eh); 367 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { 368 if (k != 0 && 369 le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { 370 printk("k=%d, ix=0x%p, first=0x%p\n", k, 371 ix, EXT_FIRST_INDEX(eh)); 372 printk("%u <= %u\n", 373 le32_to_cpu(ix->ei_block), 374 le32_to_cpu(ix[-1].ei_block)); 375 } 376 BUG_ON(k && le32_to_cpu(ix->ei_block) 377 <= le32_to_cpu(ix[-1].ei_block)); 378 if (block < le32_to_cpu(ix->ei_block)) 379 break; 380 chix = ix; 381 } 382 BUG_ON(chix != path->p_idx); 383 } 384 #endif 385 386 } 387 388 /* 389 * ext4_ext_binsearch: 390 * binary search for closest extent of the given block 391 */ 392 static void 393 ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block) 394 { 395 struct ext4_extent_header *eh = path->p_hdr; 396 struct ext4_extent *r, *l, *m; 397 398 BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC); 399 BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max)); 400 401 if (eh->eh_entries == 0) { 402 /* 403 * this leaf is empty: 404 * we get such a leaf in split/add case 405 */ 406 return; 407 } 408 409 ext_debug("binsearch for %d: ", block); 410 411 l = EXT_FIRST_EXTENT(eh) + 1; 412 r = EXT_FIRST_EXTENT(eh) + le16_to_cpu(eh->eh_entries) - 1; 413 414 while (l <= r) { 415 m = l + (r - l) / 2; 416 if (block < le32_to_cpu(m->ee_block)) 417 r = m - 1; 418 else 419 l = m + 1; 420 ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ee_block, 421 m, m->ee_block, r, r->ee_block); 422 } 423 424 path->p_ext = l - 1; 425 ext_debug(" -> %d:%llu:%d ", 426 le32_to_cpu(path->p_ext->ee_block), 427 ext_pblock(path->p_ext), 428 le16_to_cpu(path->p_ext->ee_len)); 429 430 #ifdef CHECK_BINSEARCH 431 { 432 struct ext4_extent *chex, *ex; 433 int k; 434 435 chex = ex = EXT_FIRST_EXTENT(eh); 436 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { 437 BUG_ON(k && le32_to_cpu(ex->ee_block) 438 <= le32_to_cpu(ex[-1].ee_block)); 439 if (block < le32_to_cpu(ex->ee_block)) 440 break; 441 chex = ex; 442 } 443 BUG_ON(chex != path->p_ext); 444 } 445 #endif 446 447 } 448 449 int ext4_ext_tree_init(handle_t *handle, struct inode *inode) 450 { 451 struct ext4_extent_header *eh; 452 453 eh = ext_inode_hdr(inode); 454 eh->eh_depth = 0; 455 eh->eh_entries = 0; 456 eh->eh_magic = EXT4_EXT_MAGIC; 457 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode)); 458 ext4_mark_inode_dirty(handle, inode); 459 ext4_ext_invalidate_cache(inode); 460 return 0; 461 } 462 463 struct ext4_ext_path * 464 ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path) 465 { 466 struct ext4_extent_header *eh; 467 struct buffer_head *bh; 468 short int depth, i, ppos = 0, alloc = 0; 469 470 eh = ext_inode_hdr(inode); 471 BUG_ON(eh == NULL); 472 if (ext4_ext_check_header(__FUNCTION__, inode, eh)) 473 return ERR_PTR(-EIO); 474 475 i = depth = ext_depth(inode); 476 477 /* account possible depth increase */ 478 if (!path) { 479 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), 480 GFP_NOFS); 481 if (!path) 482 return ERR_PTR(-ENOMEM); 483 alloc = 1; 484 } 485 path[0].p_hdr = eh; 486 487 /* walk through the tree */ 488 while (i) { 489 ext_debug("depth %d: num %d, max %d\n", 490 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 491 ext4_ext_binsearch_idx(inode, path + ppos, block); 492 path[ppos].p_block = idx_pblock(path[ppos].p_idx); 493 path[ppos].p_depth = i; 494 path[ppos].p_ext = NULL; 495 496 bh = sb_bread(inode->i_sb, path[ppos].p_block); 497 if (!bh) 498 goto err; 499 500 eh = ext_block_hdr(bh); 501 ppos++; 502 BUG_ON(ppos > depth); 503 path[ppos].p_bh = bh; 504 path[ppos].p_hdr = eh; 505 i--; 506 507 if (ext4_ext_check_header(__FUNCTION__, inode, eh)) 508 goto err; 509 } 510 511 path[ppos].p_depth = i; 512 path[ppos].p_hdr = eh; 513 path[ppos].p_ext = NULL; 514 path[ppos].p_idx = NULL; 515 516 if (ext4_ext_check_header(__FUNCTION__, inode, eh)) 517 goto err; 518 519 /* find extent */ 520 ext4_ext_binsearch(inode, path + ppos, block); 521 522 ext4_ext_show_path(inode, path); 523 524 return path; 525 526 err: 527 ext4_ext_drop_refs(path); 528 if (alloc) 529 kfree(path); 530 return ERR_PTR(-EIO); 531 } 532 533 /* 534 * ext4_ext_insert_index: 535 * insert new index [@logical;@ptr] into the block at @curp; 536 * check where to insert: before @curp or after @curp 537 */ 538 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, 539 struct ext4_ext_path *curp, 540 int logical, ext4_fsblk_t ptr) 541 { 542 struct ext4_extent_idx *ix; 543 int len, err; 544 545 err = ext4_ext_get_access(handle, inode, curp); 546 if (err) 547 return err; 548 549 BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block)); 550 len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx; 551 if (logical > le32_to_cpu(curp->p_idx->ei_block)) { 552 /* insert after */ 553 if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) { 554 len = (len - 1) * sizeof(struct ext4_extent_idx); 555 len = len < 0 ? 0 : len; 556 ext_debug("insert new index %d after: %d. " 557 "move %d from 0x%p to 0x%p\n", 558 logical, ptr, len, 559 (curp->p_idx + 1), (curp->p_idx + 2)); 560 memmove(curp->p_idx + 2, curp->p_idx + 1, len); 561 } 562 ix = curp->p_idx + 1; 563 } else { 564 /* insert before */ 565 len = len * sizeof(struct ext4_extent_idx); 566 len = len < 0 ? 0 : len; 567 ext_debug("insert new index %d before: %d. " 568 "move %d from 0x%p to 0x%p\n", 569 logical, ptr, len, 570 curp->p_idx, (curp->p_idx + 1)); 571 memmove(curp->p_idx + 1, curp->p_idx, len); 572 ix = curp->p_idx; 573 } 574 575 ix->ei_block = cpu_to_le32(logical); 576 ext4_idx_store_pblock(ix, ptr); 577 curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1); 578 579 BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries) 580 > le16_to_cpu(curp->p_hdr->eh_max)); 581 BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr)); 582 583 err = ext4_ext_dirty(handle, inode, curp); 584 ext4_std_error(inode->i_sb, err); 585 586 return err; 587 } 588 589 /* 590 * ext4_ext_split: 591 * inserts new subtree into the path, using free index entry 592 * at depth @at: 593 * - allocates all needed blocks (new leaf and all intermediate index blocks) 594 * - makes decision where to split 595 * - moves remaining extents and index entries (right to the split point) 596 * into the newly allocated blocks 597 * - initializes subtree 598 */ 599 static int ext4_ext_split(handle_t *handle, struct inode *inode, 600 struct ext4_ext_path *path, 601 struct ext4_extent *newext, int at) 602 { 603 struct buffer_head *bh = NULL; 604 int depth = ext_depth(inode); 605 struct ext4_extent_header *neh; 606 struct ext4_extent_idx *fidx; 607 struct ext4_extent *ex; 608 int i = at, k, m, a; 609 ext4_fsblk_t newblock, oldblock; 610 __le32 border; 611 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ 612 int err = 0; 613 614 /* make decision: where to split? */ 615 /* FIXME: now decision is simplest: at current extent */ 616 617 /* if current leaf will be split, then we should use 618 * border from split point */ 619 BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr)); 620 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { 621 border = path[depth].p_ext[1].ee_block; 622 ext_debug("leaf will be split." 623 " next leaf starts at %d\n", 624 le32_to_cpu(border)); 625 } else { 626 border = newext->ee_block; 627 ext_debug("leaf will be added." 628 " next leaf starts at %d\n", 629 le32_to_cpu(border)); 630 } 631 632 /* 633 * If error occurs, then we break processing 634 * and mark filesystem read-only. index won't 635 * be inserted and tree will be in consistent 636 * state. Next mount will repair buffers too. 637 */ 638 639 /* 640 * Get array to track all allocated blocks. 641 * We need this to handle errors and free blocks 642 * upon them. 643 */ 644 ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); 645 if (!ablocks) 646 return -ENOMEM; 647 648 /* allocate all needed blocks */ 649 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); 650 for (a = 0; a < depth - at; a++) { 651 newblock = ext4_ext_new_block(handle, inode, path, newext, &err); 652 if (newblock == 0) 653 goto cleanup; 654 ablocks[a] = newblock; 655 } 656 657 /* initialize new leaf */ 658 newblock = ablocks[--a]; 659 BUG_ON(newblock == 0); 660 bh = sb_getblk(inode->i_sb, newblock); 661 if (!bh) { 662 err = -EIO; 663 goto cleanup; 664 } 665 lock_buffer(bh); 666 667 err = ext4_journal_get_create_access(handle, bh); 668 if (err) 669 goto cleanup; 670 671 neh = ext_block_hdr(bh); 672 neh->eh_entries = 0; 673 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode)); 674 neh->eh_magic = EXT4_EXT_MAGIC; 675 neh->eh_depth = 0; 676 ex = EXT_FIRST_EXTENT(neh); 677 678 /* move remainder of path[depth] to the new leaf */ 679 BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max); 680 /* start copy from next extent */ 681 /* TODO: we could do it by single memmove */ 682 m = 0; 683 path[depth].p_ext++; 684 while (path[depth].p_ext <= 685 EXT_MAX_EXTENT(path[depth].p_hdr)) { 686 ext_debug("move %d:%llu:%d in new leaf %llu\n", 687 le32_to_cpu(path[depth].p_ext->ee_block), 688 ext_pblock(path[depth].p_ext), 689 le16_to_cpu(path[depth].p_ext->ee_len), 690 newblock); 691 /*memmove(ex++, path[depth].p_ext++, 692 sizeof(struct ext4_extent)); 693 neh->eh_entries++;*/ 694 path[depth].p_ext++; 695 m++; 696 } 697 if (m) { 698 memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m); 699 neh->eh_entries = cpu_to_le16(le16_to_cpu(neh->eh_entries)+m); 700 } 701 702 set_buffer_uptodate(bh); 703 unlock_buffer(bh); 704 705 err = ext4_journal_dirty_metadata(handle, bh); 706 if (err) 707 goto cleanup; 708 brelse(bh); 709 bh = NULL; 710 711 /* correct old leaf */ 712 if (m) { 713 err = ext4_ext_get_access(handle, inode, path + depth); 714 if (err) 715 goto cleanup; 716 path[depth].p_hdr->eh_entries = 717 cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m); 718 err = ext4_ext_dirty(handle, inode, path + depth); 719 if (err) 720 goto cleanup; 721 722 } 723 724 /* create intermediate indexes */ 725 k = depth - at - 1; 726 BUG_ON(k < 0); 727 if (k) 728 ext_debug("create %d intermediate indices\n", k); 729 /* insert new index into current index block */ 730 /* current depth stored in i var */ 731 i = depth - 1; 732 while (k--) { 733 oldblock = newblock; 734 newblock = ablocks[--a]; 735 bh = sb_getblk(inode->i_sb, (ext4_fsblk_t)newblock); 736 if (!bh) { 737 err = -EIO; 738 goto cleanup; 739 } 740 lock_buffer(bh); 741 742 err = ext4_journal_get_create_access(handle, bh); 743 if (err) 744 goto cleanup; 745 746 neh = ext_block_hdr(bh); 747 neh->eh_entries = cpu_to_le16(1); 748 neh->eh_magic = EXT4_EXT_MAGIC; 749 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode)); 750 neh->eh_depth = cpu_to_le16(depth - i); 751 fidx = EXT_FIRST_INDEX(neh); 752 fidx->ei_block = border; 753 ext4_idx_store_pblock(fidx, oldblock); 754 755 ext_debug("int.index at %d (block %llu): %lu -> %llu\n", i, 756 newblock, (unsigned long) le32_to_cpu(border), 757 oldblock); 758 /* copy indexes */ 759 m = 0; 760 path[i].p_idx++; 761 762 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, 763 EXT_MAX_INDEX(path[i].p_hdr)); 764 BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) != 765 EXT_LAST_INDEX(path[i].p_hdr)); 766 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { 767 ext_debug("%d: move %d:%d in new index %llu\n", i, 768 le32_to_cpu(path[i].p_idx->ei_block), 769 idx_pblock(path[i].p_idx), 770 newblock); 771 /*memmove(++fidx, path[i].p_idx++, 772 sizeof(struct ext4_extent_idx)); 773 neh->eh_entries++; 774 BUG_ON(neh->eh_entries > neh->eh_max);*/ 775 path[i].p_idx++; 776 m++; 777 } 778 if (m) { 779 memmove(++fidx, path[i].p_idx - m, 780 sizeof(struct ext4_extent_idx) * m); 781 neh->eh_entries = 782 cpu_to_le16(le16_to_cpu(neh->eh_entries) + m); 783 } 784 set_buffer_uptodate(bh); 785 unlock_buffer(bh); 786 787 err = ext4_journal_dirty_metadata(handle, bh); 788 if (err) 789 goto cleanup; 790 brelse(bh); 791 bh = NULL; 792 793 /* correct old index */ 794 if (m) { 795 err = ext4_ext_get_access(handle, inode, path + i); 796 if (err) 797 goto cleanup; 798 path[i].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[i].p_hdr->eh_entries)-m); 799 err = ext4_ext_dirty(handle, inode, path + i); 800 if (err) 801 goto cleanup; 802 } 803 804 i--; 805 } 806 807 /* insert new index */ 808 err = ext4_ext_insert_index(handle, inode, path + at, 809 le32_to_cpu(border), newblock); 810 811 cleanup: 812 if (bh) { 813 if (buffer_locked(bh)) 814 unlock_buffer(bh); 815 brelse(bh); 816 } 817 818 if (err) { 819 /* free all allocated blocks in error case */ 820 for (i = 0; i < depth; i++) { 821 if (!ablocks[i]) 822 continue; 823 ext4_free_blocks(handle, inode, ablocks[i], 1); 824 } 825 } 826 kfree(ablocks); 827 828 return err; 829 } 830 831 /* 832 * ext4_ext_grow_indepth: 833 * implements tree growing procedure: 834 * - allocates new block 835 * - moves top-level data (index block or leaf) into the new block 836 * - initializes new top-level, creating index that points to the 837 * just created block 838 */ 839 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, 840 struct ext4_ext_path *path, 841 struct ext4_extent *newext) 842 { 843 struct ext4_ext_path *curp = path; 844 struct ext4_extent_header *neh; 845 struct ext4_extent_idx *fidx; 846 struct buffer_head *bh; 847 ext4_fsblk_t newblock; 848 int err = 0; 849 850 newblock = ext4_ext_new_block(handle, inode, path, newext, &err); 851 if (newblock == 0) 852 return err; 853 854 bh = sb_getblk(inode->i_sb, newblock); 855 if (!bh) { 856 err = -EIO; 857 ext4_std_error(inode->i_sb, err); 858 return err; 859 } 860 lock_buffer(bh); 861 862 err = ext4_journal_get_create_access(handle, bh); 863 if (err) { 864 unlock_buffer(bh); 865 goto out; 866 } 867 868 /* move top-level index/leaf into new block */ 869 memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data)); 870 871 /* set size of new block */ 872 neh = ext_block_hdr(bh); 873 /* old root could have indexes or leaves 874 * so calculate e_max right way */ 875 if (ext_depth(inode)) 876 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode)); 877 else 878 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode)); 879 neh->eh_magic = EXT4_EXT_MAGIC; 880 set_buffer_uptodate(bh); 881 unlock_buffer(bh); 882 883 err = ext4_journal_dirty_metadata(handle, bh); 884 if (err) 885 goto out; 886 887 /* create index in new top-level index: num,max,pointer */ 888 err = ext4_ext_get_access(handle, inode, curp); 889 if (err) 890 goto out; 891 892 curp->p_hdr->eh_magic = EXT4_EXT_MAGIC; 893 curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode)); 894 curp->p_hdr->eh_entries = cpu_to_le16(1); 895 curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr); 896 /* FIXME: it works, but actually path[0] can be index */ 897 curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block; 898 ext4_idx_store_pblock(curp->p_idx, newblock); 899 900 neh = ext_inode_hdr(inode); 901 fidx = EXT_FIRST_INDEX(neh); 902 ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", 903 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), 904 le32_to_cpu(fidx->ei_block), idx_pblock(fidx)); 905 906 neh->eh_depth = cpu_to_le16(path->p_depth + 1); 907 err = ext4_ext_dirty(handle, inode, curp); 908 out: 909 brelse(bh); 910 911 return err; 912 } 913 914 /* 915 * ext4_ext_create_new_leaf: 916 * finds empty index and adds new leaf. 917 * if no free index is found, then it requests in-depth growing. 918 */ 919 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, 920 struct ext4_ext_path *path, 921 struct ext4_extent *newext) 922 { 923 struct ext4_ext_path *curp; 924 int depth, i, err = 0; 925 926 repeat: 927 i = depth = ext_depth(inode); 928 929 /* walk up to the tree and look for free index entry */ 930 curp = path + depth; 931 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { 932 i--; 933 curp--; 934 } 935 936 /* we use already allocated block for index block, 937 * so subsequent data blocks should be contiguous */ 938 if (EXT_HAS_FREE_INDEX(curp)) { 939 /* if we found index with free entry, then use that 940 * entry: create all needed subtree and add new leaf */ 941 err = ext4_ext_split(handle, inode, path, newext, i); 942 943 /* refill path */ 944 ext4_ext_drop_refs(path); 945 path = ext4_ext_find_extent(inode, 946 le32_to_cpu(newext->ee_block), 947 path); 948 if (IS_ERR(path)) 949 err = PTR_ERR(path); 950 } else { 951 /* tree is full, time to grow in depth */ 952 err = ext4_ext_grow_indepth(handle, inode, path, newext); 953 if (err) 954 goto out; 955 956 /* refill path */ 957 ext4_ext_drop_refs(path); 958 path = ext4_ext_find_extent(inode, 959 le32_to_cpu(newext->ee_block), 960 path); 961 if (IS_ERR(path)) { 962 err = PTR_ERR(path); 963 goto out; 964 } 965 966 /* 967 * only first (depth 0 -> 1) produces free space; 968 * in all other cases we have to split the grown tree 969 */ 970 depth = ext_depth(inode); 971 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { 972 /* now we need to split */ 973 goto repeat; 974 } 975 } 976 977 out: 978 return err; 979 } 980 981 /* 982 * ext4_ext_next_allocated_block: 983 * returns allocated block in subsequent extent or EXT_MAX_BLOCK. 984 * NOTE: it considers block number from index entry as 985 * allocated block. Thus, index entries have to be consistent 986 * with leaves. 987 */ 988 static unsigned long 989 ext4_ext_next_allocated_block(struct ext4_ext_path *path) 990 { 991 int depth; 992 993 BUG_ON(path == NULL); 994 depth = path->p_depth; 995 996 if (depth == 0 && path->p_ext == NULL) 997 return EXT_MAX_BLOCK; 998 999 while (depth >= 0) { 1000 if (depth == path->p_depth) { 1001 /* leaf */ 1002 if (path[depth].p_ext != 1003 EXT_LAST_EXTENT(path[depth].p_hdr)) 1004 return le32_to_cpu(path[depth].p_ext[1].ee_block); 1005 } else { 1006 /* index */ 1007 if (path[depth].p_idx != 1008 EXT_LAST_INDEX(path[depth].p_hdr)) 1009 return le32_to_cpu(path[depth].p_idx[1].ei_block); 1010 } 1011 depth--; 1012 } 1013 1014 return EXT_MAX_BLOCK; 1015 } 1016 1017 /* 1018 * ext4_ext_next_leaf_block: 1019 * returns first allocated block from next leaf or EXT_MAX_BLOCK 1020 */ 1021 static unsigned ext4_ext_next_leaf_block(struct inode *inode, 1022 struct ext4_ext_path *path) 1023 { 1024 int depth; 1025 1026 BUG_ON(path == NULL); 1027 depth = path->p_depth; 1028 1029 /* zero-tree has no leaf blocks at all */ 1030 if (depth == 0) 1031 return EXT_MAX_BLOCK; 1032 1033 /* go to index block */ 1034 depth--; 1035 1036 while (depth >= 0) { 1037 if (path[depth].p_idx != 1038 EXT_LAST_INDEX(path[depth].p_hdr)) 1039 return le32_to_cpu(path[depth].p_idx[1].ei_block); 1040 depth--; 1041 } 1042 1043 return EXT_MAX_BLOCK; 1044 } 1045 1046 /* 1047 * ext4_ext_correct_indexes: 1048 * if leaf gets modified and modified extent is first in the leaf, 1049 * then we have to correct all indexes above. 1050 * TODO: do we need to correct tree in all cases? 1051 */ 1052 int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, 1053 struct ext4_ext_path *path) 1054 { 1055 struct ext4_extent_header *eh; 1056 int depth = ext_depth(inode); 1057 struct ext4_extent *ex; 1058 __le32 border; 1059 int k, err = 0; 1060 1061 eh = path[depth].p_hdr; 1062 ex = path[depth].p_ext; 1063 BUG_ON(ex == NULL); 1064 BUG_ON(eh == NULL); 1065 1066 if (depth == 0) { 1067 /* there is no tree at all */ 1068 return 0; 1069 } 1070 1071 if (ex != EXT_FIRST_EXTENT(eh)) { 1072 /* we correct tree if first leaf got modified only */ 1073 return 0; 1074 } 1075 1076 /* 1077 * TODO: we need correction if border is smaller than current one 1078 */ 1079 k = depth - 1; 1080 border = path[depth].p_ext->ee_block; 1081 err = ext4_ext_get_access(handle, inode, path + k); 1082 if (err) 1083 return err; 1084 path[k].p_idx->ei_block = border; 1085 err = ext4_ext_dirty(handle, inode, path + k); 1086 if (err) 1087 return err; 1088 1089 while (k--) { 1090 /* change all left-side indexes */ 1091 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) 1092 break; 1093 err = ext4_ext_get_access(handle, inode, path + k); 1094 if (err) 1095 break; 1096 path[k].p_idx->ei_block = border; 1097 err = ext4_ext_dirty(handle, inode, path + k); 1098 if (err) 1099 break; 1100 } 1101 1102 return err; 1103 } 1104 1105 static int 1106 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, 1107 struct ext4_extent *ex2) 1108 { 1109 if (le32_to_cpu(ex1->ee_block) + le16_to_cpu(ex1->ee_len) != 1110 le32_to_cpu(ex2->ee_block)) 1111 return 0; 1112 1113 /* 1114 * To allow future support for preallocated extents to be added 1115 * as an RO_COMPAT feature, refuse to merge to extents if 1116 * this can result in the top bit of ee_len being set. 1117 */ 1118 if (le16_to_cpu(ex1->ee_len) + le16_to_cpu(ex2->ee_len) > EXT_MAX_LEN) 1119 return 0; 1120 #ifdef AGGRESSIVE_TEST 1121 if (le16_to_cpu(ex1->ee_len) >= 4) 1122 return 0; 1123 #endif 1124 1125 if (ext_pblock(ex1) + le16_to_cpu(ex1->ee_len) == ext_pblock(ex2)) 1126 return 1; 1127 return 0; 1128 } 1129 1130 /* 1131 * ext4_ext_insert_extent: 1132 * tries to merge requsted extent into the existing extent or 1133 * inserts requested extent as new one into the tree, 1134 * creating new leaf in the no-space case. 1135 */ 1136 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, 1137 struct ext4_ext_path *path, 1138 struct ext4_extent *newext) 1139 { 1140 struct ext4_extent_header * eh; 1141 struct ext4_extent *ex, *fex; 1142 struct ext4_extent *nearex; /* nearest extent */ 1143 struct ext4_ext_path *npath = NULL; 1144 int depth, len, err, next; 1145 1146 BUG_ON(newext->ee_len == 0); 1147 depth = ext_depth(inode); 1148 ex = path[depth].p_ext; 1149 BUG_ON(path[depth].p_hdr == NULL); 1150 1151 /* try to insert block into found extent and return */ 1152 if (ex && ext4_can_extents_be_merged(inode, ex, newext)) { 1153 ext_debug("append %d block to %d:%d (from %llu)\n", 1154 le16_to_cpu(newext->ee_len), 1155 le32_to_cpu(ex->ee_block), 1156 le16_to_cpu(ex->ee_len), ext_pblock(ex)); 1157 err = ext4_ext_get_access(handle, inode, path + depth); 1158 if (err) 1159 return err; 1160 ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len) 1161 + le16_to_cpu(newext->ee_len)); 1162 eh = path[depth].p_hdr; 1163 nearex = ex; 1164 goto merge; 1165 } 1166 1167 repeat: 1168 depth = ext_depth(inode); 1169 eh = path[depth].p_hdr; 1170 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) 1171 goto has_space; 1172 1173 /* probably next leaf has space for us? */ 1174 fex = EXT_LAST_EXTENT(eh); 1175 next = ext4_ext_next_leaf_block(inode, path); 1176 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block) 1177 && next != EXT_MAX_BLOCK) { 1178 ext_debug("next leaf block - %d\n", next); 1179 BUG_ON(npath != NULL); 1180 npath = ext4_ext_find_extent(inode, next, NULL); 1181 if (IS_ERR(npath)) 1182 return PTR_ERR(npath); 1183 BUG_ON(npath->p_depth != path->p_depth); 1184 eh = npath[depth].p_hdr; 1185 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { 1186 ext_debug("next leaf isnt full(%d)\n", 1187 le16_to_cpu(eh->eh_entries)); 1188 path = npath; 1189 goto repeat; 1190 } 1191 ext_debug("next leaf has no free space(%d,%d)\n", 1192 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 1193 } 1194 1195 /* 1196 * There is no free space in the found leaf. 1197 * We're gonna add a new leaf in the tree. 1198 */ 1199 err = ext4_ext_create_new_leaf(handle, inode, path, newext); 1200 if (err) 1201 goto cleanup; 1202 depth = ext_depth(inode); 1203 eh = path[depth].p_hdr; 1204 1205 has_space: 1206 nearex = path[depth].p_ext; 1207 1208 err = ext4_ext_get_access(handle, inode, path + depth); 1209 if (err) 1210 goto cleanup; 1211 1212 if (!nearex) { 1213 /* there is no extent in this leaf, create first one */ 1214 ext_debug("first extent in the leaf: %d:%llu:%d\n", 1215 le32_to_cpu(newext->ee_block), 1216 ext_pblock(newext), 1217 le16_to_cpu(newext->ee_len)); 1218 path[depth].p_ext = EXT_FIRST_EXTENT(eh); 1219 } else if (le32_to_cpu(newext->ee_block) 1220 > le32_to_cpu(nearex->ee_block)) { 1221 /* BUG_ON(newext->ee_block == nearex->ee_block); */ 1222 if (nearex != EXT_LAST_EXTENT(eh)) { 1223 len = EXT_MAX_EXTENT(eh) - nearex; 1224 len = (len - 1) * sizeof(struct ext4_extent); 1225 len = len < 0 ? 0 : len; 1226 ext_debug("insert %d:%llu:%d after: nearest 0x%p, " 1227 "move %d from 0x%p to 0x%p\n", 1228 le32_to_cpu(newext->ee_block), 1229 ext_pblock(newext), 1230 le16_to_cpu(newext->ee_len), 1231 nearex, len, nearex + 1, nearex + 2); 1232 memmove(nearex + 2, nearex + 1, len); 1233 } 1234 path[depth].p_ext = nearex + 1; 1235 } else { 1236 BUG_ON(newext->ee_block == nearex->ee_block); 1237 len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent); 1238 len = len < 0 ? 0 : len; 1239 ext_debug("insert %d:%llu:%d before: nearest 0x%p, " 1240 "move %d from 0x%p to 0x%p\n", 1241 le32_to_cpu(newext->ee_block), 1242 ext_pblock(newext), 1243 le16_to_cpu(newext->ee_len), 1244 nearex, len, nearex + 1, nearex + 2); 1245 memmove(nearex + 1, nearex, len); 1246 path[depth].p_ext = nearex; 1247 } 1248 1249 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)+1); 1250 nearex = path[depth].p_ext; 1251 nearex->ee_block = newext->ee_block; 1252 nearex->ee_start = newext->ee_start; 1253 nearex->ee_start_hi = newext->ee_start_hi; 1254 nearex->ee_len = newext->ee_len; 1255 1256 merge: 1257 /* try to merge extents to the right */ 1258 while (nearex < EXT_LAST_EXTENT(eh)) { 1259 if (!ext4_can_extents_be_merged(inode, nearex, nearex + 1)) 1260 break; 1261 /* merge with next extent! */ 1262 nearex->ee_len = cpu_to_le16(le16_to_cpu(nearex->ee_len) 1263 + le16_to_cpu(nearex[1].ee_len)); 1264 if (nearex + 1 < EXT_LAST_EXTENT(eh)) { 1265 len = (EXT_LAST_EXTENT(eh) - nearex - 1) 1266 * sizeof(struct ext4_extent); 1267 memmove(nearex + 1, nearex + 2, len); 1268 } 1269 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1); 1270 BUG_ON(eh->eh_entries == 0); 1271 } 1272 1273 /* try to merge extents to the left */ 1274 1275 /* time to correct all indexes above */ 1276 err = ext4_ext_correct_indexes(handle, inode, path); 1277 if (err) 1278 goto cleanup; 1279 1280 err = ext4_ext_dirty(handle, inode, path + depth); 1281 1282 cleanup: 1283 if (npath) { 1284 ext4_ext_drop_refs(npath); 1285 kfree(npath); 1286 } 1287 ext4_ext_tree_changed(inode); 1288 ext4_ext_invalidate_cache(inode); 1289 return err; 1290 } 1291 1292 int ext4_ext_walk_space(struct inode *inode, unsigned long block, 1293 unsigned long num, ext_prepare_callback func, 1294 void *cbdata) 1295 { 1296 struct ext4_ext_path *path = NULL; 1297 struct ext4_ext_cache cbex; 1298 struct ext4_extent *ex; 1299 unsigned long next, start = 0, end = 0; 1300 unsigned long last = block + num; 1301 int depth, exists, err = 0; 1302 1303 BUG_ON(func == NULL); 1304 BUG_ON(inode == NULL); 1305 1306 while (block < last && block != EXT_MAX_BLOCK) { 1307 num = last - block; 1308 /* find extent for this block */ 1309 path = ext4_ext_find_extent(inode, block, path); 1310 if (IS_ERR(path)) { 1311 err = PTR_ERR(path); 1312 path = NULL; 1313 break; 1314 } 1315 1316 depth = ext_depth(inode); 1317 BUG_ON(path[depth].p_hdr == NULL); 1318 ex = path[depth].p_ext; 1319 next = ext4_ext_next_allocated_block(path); 1320 1321 exists = 0; 1322 if (!ex) { 1323 /* there is no extent yet, so try to allocate 1324 * all requested space */ 1325 start = block; 1326 end = block + num; 1327 } else if (le32_to_cpu(ex->ee_block) > block) { 1328 /* need to allocate space before found extent */ 1329 start = block; 1330 end = le32_to_cpu(ex->ee_block); 1331 if (block + num < end) 1332 end = block + num; 1333 } else if (block >= 1334 le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len)) { 1335 /* need to allocate space after found extent */ 1336 start = block; 1337 end = block + num; 1338 if (end >= next) 1339 end = next; 1340 } else if (block >= le32_to_cpu(ex->ee_block)) { 1341 /* 1342 * some part of requested space is covered 1343 * by found extent 1344 */ 1345 start = block; 1346 end = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len); 1347 if (block + num < end) 1348 end = block + num; 1349 exists = 1; 1350 } else { 1351 BUG(); 1352 } 1353 BUG_ON(end <= start); 1354 1355 if (!exists) { 1356 cbex.ec_block = start; 1357 cbex.ec_len = end - start; 1358 cbex.ec_start = 0; 1359 cbex.ec_type = EXT4_EXT_CACHE_GAP; 1360 } else { 1361 cbex.ec_block = le32_to_cpu(ex->ee_block); 1362 cbex.ec_len = le16_to_cpu(ex->ee_len); 1363 cbex.ec_start = ext_pblock(ex); 1364 cbex.ec_type = EXT4_EXT_CACHE_EXTENT; 1365 } 1366 1367 BUG_ON(cbex.ec_len == 0); 1368 err = func(inode, path, &cbex, cbdata); 1369 ext4_ext_drop_refs(path); 1370 1371 if (err < 0) 1372 break; 1373 if (err == EXT_REPEAT) 1374 continue; 1375 else if (err == EXT_BREAK) { 1376 err = 0; 1377 break; 1378 } 1379 1380 if (ext_depth(inode) != depth) { 1381 /* depth was changed. we have to realloc path */ 1382 kfree(path); 1383 path = NULL; 1384 } 1385 1386 block = cbex.ec_block + cbex.ec_len; 1387 } 1388 1389 if (path) { 1390 ext4_ext_drop_refs(path); 1391 kfree(path); 1392 } 1393 1394 return err; 1395 } 1396 1397 static void 1398 ext4_ext_put_in_cache(struct inode *inode, __u32 block, 1399 __u32 len, __u32 start, int type) 1400 { 1401 struct ext4_ext_cache *cex; 1402 BUG_ON(len == 0); 1403 cex = &EXT4_I(inode)->i_cached_extent; 1404 cex->ec_type = type; 1405 cex->ec_block = block; 1406 cex->ec_len = len; 1407 cex->ec_start = start; 1408 } 1409 1410 /* 1411 * ext4_ext_put_gap_in_cache: 1412 * calculate boundaries of the gap that the requested block fits into 1413 * and cache this gap 1414 */ 1415 static void 1416 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, 1417 unsigned long block) 1418 { 1419 int depth = ext_depth(inode); 1420 unsigned long lblock, len; 1421 struct ext4_extent *ex; 1422 1423 ex = path[depth].p_ext; 1424 if (ex == NULL) { 1425 /* there is no extent yet, so gap is [0;-] */ 1426 lblock = 0; 1427 len = EXT_MAX_BLOCK; 1428 ext_debug("cache gap(whole file):"); 1429 } else if (block < le32_to_cpu(ex->ee_block)) { 1430 lblock = block; 1431 len = le32_to_cpu(ex->ee_block) - block; 1432 ext_debug("cache gap(before): %lu [%lu:%lu]", 1433 (unsigned long) block, 1434 (unsigned long) le32_to_cpu(ex->ee_block), 1435 (unsigned long) le16_to_cpu(ex->ee_len)); 1436 } else if (block >= le32_to_cpu(ex->ee_block) 1437 + le16_to_cpu(ex->ee_len)) { 1438 lblock = le32_to_cpu(ex->ee_block) 1439 + le16_to_cpu(ex->ee_len); 1440 len = ext4_ext_next_allocated_block(path); 1441 ext_debug("cache gap(after): [%lu:%lu] %lu", 1442 (unsigned long) le32_to_cpu(ex->ee_block), 1443 (unsigned long) le16_to_cpu(ex->ee_len), 1444 (unsigned long) block); 1445 BUG_ON(len == lblock); 1446 len = len - lblock; 1447 } else { 1448 lblock = len = 0; 1449 BUG(); 1450 } 1451 1452 ext_debug(" -> %lu:%lu\n", (unsigned long) lblock, len); 1453 ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP); 1454 } 1455 1456 static int 1457 ext4_ext_in_cache(struct inode *inode, unsigned long block, 1458 struct ext4_extent *ex) 1459 { 1460 struct ext4_ext_cache *cex; 1461 1462 cex = &EXT4_I(inode)->i_cached_extent; 1463 1464 /* has cache valid data? */ 1465 if (cex->ec_type == EXT4_EXT_CACHE_NO) 1466 return EXT4_EXT_CACHE_NO; 1467 1468 BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP && 1469 cex->ec_type != EXT4_EXT_CACHE_EXTENT); 1470 if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) { 1471 ex->ee_block = cpu_to_le32(cex->ec_block); 1472 ext4_ext_store_pblock(ex, cex->ec_start); 1473 ex->ee_len = cpu_to_le16(cex->ec_len); 1474 ext_debug("%lu cached by %lu:%lu:%llu\n", 1475 (unsigned long) block, 1476 (unsigned long) cex->ec_block, 1477 (unsigned long) cex->ec_len, 1478 cex->ec_start); 1479 return cex->ec_type; 1480 } 1481 1482 /* not in cache */ 1483 return EXT4_EXT_CACHE_NO; 1484 } 1485 1486 /* 1487 * ext4_ext_rm_idx: 1488 * removes index from the index block. 1489 * It's used in truncate case only, thus all requests are for 1490 * last index in the block only. 1491 */ 1492 int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, 1493 struct ext4_ext_path *path) 1494 { 1495 struct buffer_head *bh; 1496 int err; 1497 ext4_fsblk_t leaf; 1498 1499 /* free index block */ 1500 path--; 1501 leaf = idx_pblock(path->p_idx); 1502 BUG_ON(path->p_hdr->eh_entries == 0); 1503 err = ext4_ext_get_access(handle, inode, path); 1504 if (err) 1505 return err; 1506 path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1); 1507 err = ext4_ext_dirty(handle, inode, path); 1508 if (err) 1509 return err; 1510 ext_debug("index is empty, remove it, free block %llu\n", leaf); 1511 bh = sb_find_get_block(inode->i_sb, leaf); 1512 ext4_forget(handle, 1, inode, bh, leaf); 1513 ext4_free_blocks(handle, inode, leaf, 1); 1514 return err; 1515 } 1516 1517 /* 1518 * ext4_ext_calc_credits_for_insert: 1519 * This routine returns max. credits that the extent tree can consume. 1520 * It should be OK for low-performance paths like ->writepage() 1521 * To allow many writing processes to fit into a single transaction, 1522 * the caller should calculate credits under truncate_mutex and 1523 * pass the actual path. 1524 */ 1525 int ext4_ext_calc_credits_for_insert(struct inode *inode, 1526 struct ext4_ext_path *path) 1527 { 1528 int depth, needed; 1529 1530 if (path) { 1531 /* probably there is space in leaf? */ 1532 depth = ext_depth(inode); 1533 if (le16_to_cpu(path[depth].p_hdr->eh_entries) 1534 < le16_to_cpu(path[depth].p_hdr->eh_max)) 1535 return 1; 1536 } 1537 1538 /* 1539 * given 32-bit logical block (4294967296 blocks), max. tree 1540 * can be 4 levels in depth -- 4 * 340^4 == 53453440000. 1541 * Let's also add one more level for imbalance. 1542 */ 1543 depth = 5; 1544 1545 /* allocation of new data block(s) */ 1546 needed = 2; 1547 1548 /* 1549 * tree can be full, so it would need to grow in depth: 1550 * we need one credit to modify old root, credits for 1551 * new root will be added in split accounting 1552 */ 1553 needed += 1; 1554 1555 /* 1556 * Index split can happen, we would need: 1557 * allocate intermediate indexes (bitmap + group) 1558 * + change two blocks at each level, but root (already included) 1559 */ 1560 needed += (depth * 2) + (depth * 2); 1561 1562 /* any allocation modifies superblock */ 1563 needed += 1; 1564 1565 return needed; 1566 } 1567 1568 static int ext4_remove_blocks(handle_t *handle, struct inode *inode, 1569 struct ext4_extent *ex, 1570 unsigned long from, unsigned long to) 1571 { 1572 struct buffer_head *bh; 1573 int i; 1574 1575 #ifdef EXTENTS_STATS 1576 { 1577 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1578 unsigned short ee_len = le16_to_cpu(ex->ee_len); 1579 spin_lock(&sbi->s_ext_stats_lock); 1580 sbi->s_ext_blocks += ee_len; 1581 sbi->s_ext_extents++; 1582 if (ee_len < sbi->s_ext_min) 1583 sbi->s_ext_min = ee_len; 1584 if (ee_len > sbi->s_ext_max) 1585 sbi->s_ext_max = ee_len; 1586 if (ext_depth(inode) > sbi->s_depth_max) 1587 sbi->s_depth_max = ext_depth(inode); 1588 spin_unlock(&sbi->s_ext_stats_lock); 1589 } 1590 #endif 1591 if (from >= le32_to_cpu(ex->ee_block) 1592 && to == le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) { 1593 /* tail removal */ 1594 unsigned long num; 1595 ext4_fsblk_t start; 1596 num = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - from; 1597 start = ext_pblock(ex) + le16_to_cpu(ex->ee_len) - num; 1598 ext_debug("free last %lu blocks starting %llu\n", num, start); 1599 for (i = 0; i < num; i++) { 1600 bh = sb_find_get_block(inode->i_sb, start + i); 1601 ext4_forget(handle, 0, inode, bh, start + i); 1602 } 1603 ext4_free_blocks(handle, inode, start, num); 1604 } else if (from == le32_to_cpu(ex->ee_block) 1605 && to <= le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) { 1606 printk("strange request: removal %lu-%lu from %u:%u\n", 1607 from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len)); 1608 } else { 1609 printk("strange request: removal(2) %lu-%lu from %u:%u\n", 1610 from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len)); 1611 } 1612 return 0; 1613 } 1614 1615 static int 1616 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, 1617 struct ext4_ext_path *path, unsigned long start) 1618 { 1619 int err = 0, correct_index = 0; 1620 int depth = ext_depth(inode), credits; 1621 struct ext4_extent_header *eh; 1622 unsigned a, b, block, num; 1623 unsigned long ex_ee_block; 1624 unsigned short ex_ee_len; 1625 struct ext4_extent *ex; 1626 1627 ext_debug("truncate since %lu in leaf\n", start); 1628 if (!path[depth].p_hdr) 1629 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); 1630 eh = path[depth].p_hdr; 1631 BUG_ON(eh == NULL); 1632 BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max)); 1633 BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC); 1634 1635 /* find where to start removing */ 1636 ex = EXT_LAST_EXTENT(eh); 1637 1638 ex_ee_block = le32_to_cpu(ex->ee_block); 1639 ex_ee_len = le16_to_cpu(ex->ee_len); 1640 1641 while (ex >= EXT_FIRST_EXTENT(eh) && 1642 ex_ee_block + ex_ee_len > start) { 1643 ext_debug("remove ext %lu:%u\n", ex_ee_block, ex_ee_len); 1644 path[depth].p_ext = ex; 1645 1646 a = ex_ee_block > start ? ex_ee_block : start; 1647 b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ? 1648 ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK; 1649 1650 ext_debug(" border %u:%u\n", a, b); 1651 1652 if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) { 1653 block = 0; 1654 num = 0; 1655 BUG(); 1656 } else if (a != ex_ee_block) { 1657 /* remove tail of the extent */ 1658 block = ex_ee_block; 1659 num = a - block; 1660 } else if (b != ex_ee_block + ex_ee_len - 1) { 1661 /* remove head of the extent */ 1662 block = a; 1663 num = b - a; 1664 /* there is no "make a hole" API yet */ 1665 BUG(); 1666 } else { 1667 /* remove whole extent: excellent! */ 1668 block = ex_ee_block; 1669 num = 0; 1670 BUG_ON(a != ex_ee_block); 1671 BUG_ON(b != ex_ee_block + ex_ee_len - 1); 1672 } 1673 1674 /* at present, extent can't cross block group: */ 1675 /* leaf + bitmap + group desc + sb + inode */ 1676 credits = 5; 1677 if (ex == EXT_FIRST_EXTENT(eh)) { 1678 correct_index = 1; 1679 credits += (ext_depth(inode)) + 1; 1680 } 1681 #ifdef CONFIG_QUOTA 1682 credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb); 1683 #endif 1684 1685 handle = ext4_ext_journal_restart(handle, credits); 1686 if (IS_ERR(handle)) { 1687 err = PTR_ERR(handle); 1688 goto out; 1689 } 1690 1691 err = ext4_ext_get_access(handle, inode, path + depth); 1692 if (err) 1693 goto out; 1694 1695 err = ext4_remove_blocks(handle, inode, ex, a, b); 1696 if (err) 1697 goto out; 1698 1699 if (num == 0) { 1700 /* this extent is removed; mark slot entirely unused */ 1701 ext4_ext_store_pblock(ex, 0); 1702 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1); 1703 } 1704 1705 ex->ee_block = cpu_to_le32(block); 1706 ex->ee_len = cpu_to_le16(num); 1707 1708 err = ext4_ext_dirty(handle, inode, path + depth); 1709 if (err) 1710 goto out; 1711 1712 ext_debug("new extent: %u:%u:%llu\n", block, num, 1713 ext_pblock(ex)); 1714 ex--; 1715 ex_ee_block = le32_to_cpu(ex->ee_block); 1716 ex_ee_len = le16_to_cpu(ex->ee_len); 1717 } 1718 1719 if (correct_index && eh->eh_entries) 1720 err = ext4_ext_correct_indexes(handle, inode, path); 1721 1722 /* if this leaf is free, then we should 1723 * remove it from index block above */ 1724 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) 1725 err = ext4_ext_rm_idx(handle, inode, path + depth); 1726 1727 out: 1728 return err; 1729 } 1730 1731 /* 1732 * ext4_ext_more_to_rm: 1733 * returns 1 if current index has to be freed (even partial) 1734 */ 1735 static int 1736 ext4_ext_more_to_rm(struct ext4_ext_path *path) 1737 { 1738 BUG_ON(path->p_idx == NULL); 1739 1740 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) 1741 return 0; 1742 1743 /* 1744 * if truncate on deeper level happened, it wasn't partial, 1745 * so we have to consider current index for truncation 1746 */ 1747 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) 1748 return 0; 1749 return 1; 1750 } 1751 1752 int ext4_ext_remove_space(struct inode *inode, unsigned long start) 1753 { 1754 struct super_block *sb = inode->i_sb; 1755 int depth = ext_depth(inode); 1756 struct ext4_ext_path *path; 1757 handle_t *handle; 1758 int i = 0, err = 0; 1759 1760 ext_debug("truncate since %lu\n", start); 1761 1762 /* probably first extent we're gonna free will be last in block */ 1763 handle = ext4_journal_start(inode, depth + 1); 1764 if (IS_ERR(handle)) 1765 return PTR_ERR(handle); 1766 1767 ext4_ext_invalidate_cache(inode); 1768 1769 /* 1770 * We start scanning from right side, freeing all the blocks 1771 * after i_size and walking into the tree depth-wise. 1772 */ 1773 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL); 1774 if (path == NULL) { 1775 ext4_journal_stop(handle); 1776 return -ENOMEM; 1777 } 1778 path[0].p_hdr = ext_inode_hdr(inode); 1779 if (ext4_ext_check_header(__FUNCTION__, inode, path[0].p_hdr)) { 1780 err = -EIO; 1781 goto out; 1782 } 1783 path[0].p_depth = depth; 1784 1785 while (i >= 0 && err == 0) { 1786 if (i == depth) { 1787 /* this is leaf block */ 1788 err = ext4_ext_rm_leaf(handle, inode, path, start); 1789 /* root level has p_bh == NULL, brelse() eats this */ 1790 brelse(path[i].p_bh); 1791 path[i].p_bh = NULL; 1792 i--; 1793 continue; 1794 } 1795 1796 /* this is index block */ 1797 if (!path[i].p_hdr) { 1798 ext_debug("initialize header\n"); 1799 path[i].p_hdr = ext_block_hdr(path[i].p_bh); 1800 if (ext4_ext_check_header(__FUNCTION__, inode, 1801 path[i].p_hdr)) { 1802 err = -EIO; 1803 goto out; 1804 } 1805 } 1806 1807 BUG_ON(le16_to_cpu(path[i].p_hdr->eh_entries) 1808 > le16_to_cpu(path[i].p_hdr->eh_max)); 1809 BUG_ON(path[i].p_hdr->eh_magic != EXT4_EXT_MAGIC); 1810 1811 if (!path[i].p_idx) { 1812 /* this level hasn't been touched yet */ 1813 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); 1814 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; 1815 ext_debug("init index ptr: hdr 0x%p, num %d\n", 1816 path[i].p_hdr, 1817 le16_to_cpu(path[i].p_hdr->eh_entries)); 1818 } else { 1819 /* we were already here, see at next index */ 1820 path[i].p_idx--; 1821 } 1822 1823 ext_debug("level %d - index, first 0x%p, cur 0x%p\n", 1824 i, EXT_FIRST_INDEX(path[i].p_hdr), 1825 path[i].p_idx); 1826 if (ext4_ext_more_to_rm(path + i)) { 1827 /* go to the next level */ 1828 ext_debug("move to level %d (block %llu)\n", 1829 i + 1, idx_pblock(path[i].p_idx)); 1830 memset(path + i + 1, 0, sizeof(*path)); 1831 path[i+1].p_bh = 1832 sb_bread(sb, idx_pblock(path[i].p_idx)); 1833 if (!path[i+1].p_bh) { 1834 /* should we reset i_size? */ 1835 err = -EIO; 1836 break; 1837 } 1838 1839 /* save actual number of indexes since this 1840 * number is changed at the next iteration */ 1841 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); 1842 i++; 1843 } else { 1844 /* we finished processing this index, go up */ 1845 if (path[i].p_hdr->eh_entries == 0 && i > 0) { 1846 /* index is empty, remove it; 1847 * handle must be already prepared by the 1848 * truncatei_leaf() */ 1849 err = ext4_ext_rm_idx(handle, inode, path + i); 1850 } 1851 /* root level has p_bh == NULL, brelse() eats this */ 1852 brelse(path[i].p_bh); 1853 path[i].p_bh = NULL; 1854 i--; 1855 ext_debug("return to level %d\n", i); 1856 } 1857 } 1858 1859 /* TODO: flexible tree reduction should be here */ 1860 if (path->p_hdr->eh_entries == 0) { 1861 /* 1862 * truncate to zero freed all the tree, 1863 * so we need to correct eh_depth 1864 */ 1865 err = ext4_ext_get_access(handle, inode, path); 1866 if (err == 0) { 1867 ext_inode_hdr(inode)->eh_depth = 0; 1868 ext_inode_hdr(inode)->eh_max = 1869 cpu_to_le16(ext4_ext_space_root(inode)); 1870 err = ext4_ext_dirty(handle, inode, path); 1871 } 1872 } 1873 out: 1874 ext4_ext_tree_changed(inode); 1875 ext4_ext_drop_refs(path); 1876 kfree(path); 1877 ext4_journal_stop(handle); 1878 1879 return err; 1880 } 1881 1882 /* 1883 * called at mount time 1884 */ 1885 void ext4_ext_init(struct super_block *sb) 1886 { 1887 /* 1888 * possible initialization would be here 1889 */ 1890 1891 if (test_opt(sb, EXTENTS)) { 1892 printk("EXT4-fs: file extents enabled"); 1893 #ifdef AGGRESSIVE_TEST 1894 printk(", aggressive tests"); 1895 #endif 1896 #ifdef CHECK_BINSEARCH 1897 printk(", check binsearch"); 1898 #endif 1899 #ifdef EXTENTS_STATS 1900 printk(", stats"); 1901 #endif 1902 printk("\n"); 1903 #ifdef EXTENTS_STATS 1904 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); 1905 EXT4_SB(sb)->s_ext_min = 1 << 30; 1906 EXT4_SB(sb)->s_ext_max = 0; 1907 #endif 1908 } 1909 } 1910 1911 /* 1912 * called at umount time 1913 */ 1914 void ext4_ext_release(struct super_block *sb) 1915 { 1916 if (!test_opt(sb, EXTENTS)) 1917 return; 1918 1919 #ifdef EXTENTS_STATS 1920 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { 1921 struct ext4_sb_info *sbi = EXT4_SB(sb); 1922 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", 1923 sbi->s_ext_blocks, sbi->s_ext_extents, 1924 sbi->s_ext_blocks / sbi->s_ext_extents); 1925 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", 1926 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); 1927 } 1928 #endif 1929 } 1930 1931 int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, 1932 ext4_fsblk_t iblock, 1933 unsigned long max_blocks, struct buffer_head *bh_result, 1934 int create, int extend_disksize) 1935 { 1936 struct ext4_ext_path *path = NULL; 1937 struct ext4_extent newex, *ex; 1938 ext4_fsblk_t goal, newblock; 1939 int err = 0, depth; 1940 unsigned long allocated = 0; 1941 1942 __clear_bit(BH_New, &bh_result->b_state); 1943 ext_debug("blocks %d/%lu requested for inode %u\n", (int) iblock, 1944 max_blocks, (unsigned) inode->i_ino); 1945 mutex_lock(&EXT4_I(inode)->truncate_mutex); 1946 1947 /* check in cache */ 1948 goal = ext4_ext_in_cache(inode, iblock, &newex); 1949 if (goal) { 1950 if (goal == EXT4_EXT_CACHE_GAP) { 1951 if (!create) { 1952 /* block isn't allocated yet and 1953 * user doesn't want to allocate it */ 1954 goto out2; 1955 } 1956 /* we should allocate requested block */ 1957 } else if (goal == EXT4_EXT_CACHE_EXTENT) { 1958 /* block is already allocated */ 1959 newblock = iblock 1960 - le32_to_cpu(newex.ee_block) 1961 + ext_pblock(&newex); 1962 /* number of remaining blocks in the extent */ 1963 allocated = le16_to_cpu(newex.ee_len) - 1964 (iblock - le32_to_cpu(newex.ee_block)); 1965 goto out; 1966 } else { 1967 BUG(); 1968 } 1969 } 1970 1971 /* find extent for this block */ 1972 path = ext4_ext_find_extent(inode, iblock, NULL); 1973 if (IS_ERR(path)) { 1974 err = PTR_ERR(path); 1975 path = NULL; 1976 goto out2; 1977 } 1978 1979 depth = ext_depth(inode); 1980 1981 /* 1982 * consistent leaf must not be empty; 1983 * this situation is possible, though, _during_ tree modification; 1984 * this is why assert can't be put in ext4_ext_find_extent() 1985 */ 1986 BUG_ON(path[depth].p_ext == NULL && depth != 0); 1987 1988 ex = path[depth].p_ext; 1989 if (ex) { 1990 unsigned long ee_block = le32_to_cpu(ex->ee_block); 1991 ext4_fsblk_t ee_start = ext_pblock(ex); 1992 unsigned short ee_len = le16_to_cpu(ex->ee_len); 1993 1994 /* 1995 * Allow future support for preallocated extents to be added 1996 * as an RO_COMPAT feature: 1997 * Uninitialized extents are treated as holes, except that 1998 * we avoid (fail) allocating new blocks during a write. 1999 */ 2000 if (ee_len > EXT_MAX_LEN) 2001 goto out2; 2002 /* if found extent covers block, simply return it */ 2003 if (iblock >= ee_block && iblock < ee_block + ee_len) { 2004 newblock = iblock - ee_block + ee_start; 2005 /* number of remaining blocks in the extent */ 2006 allocated = ee_len - (iblock - ee_block); 2007 ext_debug("%d fit into %lu:%d -> %llu\n", (int) iblock, 2008 ee_block, ee_len, newblock); 2009 ext4_ext_put_in_cache(inode, ee_block, ee_len, 2010 ee_start, EXT4_EXT_CACHE_EXTENT); 2011 goto out; 2012 } 2013 } 2014 2015 /* 2016 * requested block isn't allocated yet; 2017 * we couldn't try to create block if create flag is zero 2018 */ 2019 if (!create) { 2020 /* put just found gap into cache to speed up 2021 * subsequent requests */ 2022 ext4_ext_put_gap_in_cache(inode, path, iblock); 2023 goto out2; 2024 } 2025 /* 2026 * Okay, we need to do block allocation. Lazily initialize the block 2027 * allocation info here if necessary. 2028 */ 2029 if (S_ISREG(inode->i_mode) && (!EXT4_I(inode)->i_block_alloc_info)) 2030 ext4_init_block_alloc_info(inode); 2031 2032 /* allocate new block */ 2033 goal = ext4_ext_find_goal(inode, path, iblock); 2034 allocated = max_blocks; 2035 newblock = ext4_new_blocks(handle, inode, goal, &allocated, &err); 2036 if (!newblock) 2037 goto out2; 2038 ext_debug("allocate new block: goal %llu, found %llu/%lu\n", 2039 goal, newblock, allocated); 2040 2041 /* try to insert new extent into found leaf and return */ 2042 newex.ee_block = cpu_to_le32(iblock); 2043 ext4_ext_store_pblock(&newex, newblock); 2044 newex.ee_len = cpu_to_le16(allocated); 2045 err = ext4_ext_insert_extent(handle, inode, path, &newex); 2046 if (err) 2047 goto out2; 2048 2049 if (extend_disksize && inode->i_size > EXT4_I(inode)->i_disksize) 2050 EXT4_I(inode)->i_disksize = inode->i_size; 2051 2052 /* previous routine could use block we allocated */ 2053 newblock = ext_pblock(&newex); 2054 __set_bit(BH_New, &bh_result->b_state); 2055 2056 ext4_ext_put_in_cache(inode, iblock, allocated, newblock, 2057 EXT4_EXT_CACHE_EXTENT); 2058 out: 2059 if (allocated > max_blocks) 2060 allocated = max_blocks; 2061 ext4_ext_show_leaf(inode, path); 2062 __set_bit(BH_Mapped, &bh_result->b_state); 2063 bh_result->b_bdev = inode->i_sb->s_bdev; 2064 bh_result->b_blocknr = newblock; 2065 out2: 2066 if (path) { 2067 ext4_ext_drop_refs(path); 2068 kfree(path); 2069 } 2070 mutex_unlock(&EXT4_I(inode)->truncate_mutex); 2071 2072 return err ? err : allocated; 2073 } 2074 2075 void ext4_ext_truncate(struct inode * inode, struct page *page) 2076 { 2077 struct address_space *mapping = inode->i_mapping; 2078 struct super_block *sb = inode->i_sb; 2079 unsigned long last_block; 2080 handle_t *handle; 2081 int err = 0; 2082 2083 /* 2084 * probably first extent we're gonna free will be last in block 2085 */ 2086 err = ext4_writepage_trans_blocks(inode) + 3; 2087 handle = ext4_journal_start(inode, err); 2088 if (IS_ERR(handle)) { 2089 if (page) { 2090 clear_highpage(page); 2091 flush_dcache_page(page); 2092 unlock_page(page); 2093 page_cache_release(page); 2094 } 2095 return; 2096 } 2097 2098 if (page) 2099 ext4_block_truncate_page(handle, page, mapping, inode->i_size); 2100 2101 mutex_lock(&EXT4_I(inode)->truncate_mutex); 2102 ext4_ext_invalidate_cache(inode); 2103 2104 /* 2105 * TODO: optimization is possible here. 2106 * Probably we need not scan at all, 2107 * because page truncation is enough. 2108 */ 2109 if (ext4_orphan_add(handle, inode)) 2110 goto out_stop; 2111 2112 /* we have to know where to truncate from in crash case */ 2113 EXT4_I(inode)->i_disksize = inode->i_size; 2114 ext4_mark_inode_dirty(handle, inode); 2115 2116 last_block = (inode->i_size + sb->s_blocksize - 1) 2117 >> EXT4_BLOCK_SIZE_BITS(sb); 2118 err = ext4_ext_remove_space(inode, last_block); 2119 2120 /* In a multi-transaction truncate, we only make the final 2121 * transaction synchronous. */ 2122 if (IS_SYNC(inode)) 2123 handle->h_sync = 1; 2124 2125 out_stop: 2126 /* 2127 * If this was a simple ftruncate() and the file will remain alive, 2128 * then we need to clear up the orphan record which we created above. 2129 * However, if this was a real unlink then we were called by 2130 * ext4_delete_inode(), and we allow that function to clean up the 2131 * orphan info for us. 2132 */ 2133 if (inode->i_nlink) 2134 ext4_orphan_del(handle, inode); 2135 2136 mutex_unlock(&EXT4_I(inode)->truncate_mutex); 2137 ext4_journal_stop(handle); 2138 } 2139 2140 /* 2141 * ext4_ext_writepage_trans_blocks: 2142 * calculate max number of blocks we could modify 2143 * in order to allocate new block for an inode 2144 */ 2145 int ext4_ext_writepage_trans_blocks(struct inode *inode, int num) 2146 { 2147 int needed; 2148 2149 needed = ext4_ext_calc_credits_for_insert(inode, NULL); 2150 2151 /* caller wants to allocate num blocks, but note it includes sb */ 2152 needed = needed * num - (num - 1); 2153 2154 #ifdef CONFIG_QUOTA 2155 needed += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb); 2156 #endif 2157 2158 return needed; 2159 } 2160 2161 EXPORT_SYMBOL(ext4_mark_inode_dirty); 2162 EXPORT_SYMBOL(ext4_ext_invalidate_cache); 2163 EXPORT_SYMBOL(ext4_ext_insert_extent); 2164 EXPORT_SYMBOL(ext4_ext_walk_space); 2165 EXPORT_SYMBOL(ext4_ext_find_goal); 2166 EXPORT_SYMBOL(ext4_ext_calc_credits_for_insert); 2167 2168