1 /* 2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 3 * Written by Alex Tomas <alex@clusterfs.com> 4 * 5 * Architecture independence: 6 * Copyright (c) 2005, Bull S.A. 7 * Written by Pierre Peiffer <pierre.peiffer@bull.net> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public Licens 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 21 */ 22 23 /* 24 * Extents support for EXT4 25 * 26 * TODO: 27 * - ext4*_error() should be used in some situations 28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate 29 * - smart tree reduction 30 */ 31 32 #include <linux/fs.h> 33 #include <linux/time.h> 34 #include <linux/jbd2.h> 35 #include <linux/highuid.h> 36 #include <linux/pagemap.h> 37 #include <linux/quotaops.h> 38 #include <linux/string.h> 39 #include <linux/slab.h> 40 #include <asm/uaccess.h> 41 #include <linux/fiemap.h> 42 #include "ext4_jbd2.h" 43 #include "ext4_extents.h" 44 #include "xattr.h" 45 46 #include <trace/events/ext4.h> 47 48 /* 49 * used by extent splitting. 50 */ 51 #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ 52 due to ENOSPC */ 53 #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */ 54 #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */ 55 56 #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */ 57 #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */ 58 59 static __le32 ext4_extent_block_csum(struct inode *inode, 60 struct ext4_extent_header *eh) 61 { 62 struct ext4_inode_info *ei = EXT4_I(inode); 63 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 64 __u32 csum; 65 66 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, 67 EXT4_EXTENT_TAIL_OFFSET(eh)); 68 return cpu_to_le32(csum); 69 } 70 71 static int ext4_extent_block_csum_verify(struct inode *inode, 72 struct ext4_extent_header *eh) 73 { 74 struct ext4_extent_tail *et; 75 76 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 77 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 78 return 1; 79 80 et = find_ext4_extent_tail(eh); 81 if (et->et_checksum != ext4_extent_block_csum(inode, eh)) 82 return 0; 83 return 1; 84 } 85 86 static void ext4_extent_block_csum_set(struct inode *inode, 87 struct ext4_extent_header *eh) 88 { 89 struct ext4_extent_tail *et; 90 91 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 92 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 93 return; 94 95 et = find_ext4_extent_tail(eh); 96 et->et_checksum = ext4_extent_block_csum(inode, eh); 97 } 98 99 static int ext4_split_extent(handle_t *handle, 100 struct inode *inode, 101 struct ext4_ext_path *path, 102 struct ext4_map_blocks *map, 103 int split_flag, 104 int flags); 105 106 static int ext4_split_extent_at(handle_t *handle, 107 struct inode *inode, 108 struct ext4_ext_path *path, 109 ext4_lblk_t split, 110 int split_flag, 111 int flags); 112 113 static int ext4_find_delayed_extent(struct inode *inode, 114 struct extent_status *newes); 115 116 static int ext4_ext_truncate_extend_restart(handle_t *handle, 117 struct inode *inode, 118 int needed) 119 { 120 int err; 121 122 if (!ext4_handle_valid(handle)) 123 return 0; 124 if (handle->h_buffer_credits > needed) 125 return 0; 126 err = ext4_journal_extend(handle, needed); 127 if (err <= 0) 128 return err; 129 err = ext4_truncate_restart_trans(handle, inode, needed); 130 if (err == 0) 131 err = -EAGAIN; 132 133 return err; 134 } 135 136 /* 137 * could return: 138 * - EROFS 139 * - ENOMEM 140 */ 141 static int ext4_ext_get_access(handle_t *handle, struct inode *inode, 142 struct ext4_ext_path *path) 143 { 144 if (path->p_bh) { 145 /* path points to block */ 146 return ext4_journal_get_write_access(handle, path->p_bh); 147 } 148 /* path points to leaf/index in inode body */ 149 /* we use in-core data, no need to protect them */ 150 return 0; 151 } 152 153 /* 154 * could return: 155 * - EROFS 156 * - ENOMEM 157 * - EIO 158 */ 159 int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle, 160 struct inode *inode, struct ext4_ext_path *path) 161 { 162 int err; 163 if (path->p_bh) { 164 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); 165 /* path points to block */ 166 err = __ext4_handle_dirty_metadata(where, line, handle, 167 inode, path->p_bh); 168 } else { 169 /* path points to leaf/index in inode body */ 170 err = ext4_mark_inode_dirty(handle, inode); 171 } 172 return err; 173 } 174 175 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, 176 struct ext4_ext_path *path, 177 ext4_lblk_t block) 178 { 179 if (path) { 180 int depth = path->p_depth; 181 struct ext4_extent *ex; 182 183 /* 184 * Try to predict block placement assuming that we are 185 * filling in a file which will eventually be 186 * non-sparse --- i.e., in the case of libbfd writing 187 * an ELF object sections out-of-order but in a way 188 * the eventually results in a contiguous object or 189 * executable file, or some database extending a table 190 * space file. However, this is actually somewhat 191 * non-ideal if we are writing a sparse file such as 192 * qemu or KVM writing a raw image file that is going 193 * to stay fairly sparse, since it will end up 194 * fragmenting the file system's free space. Maybe we 195 * should have some hueristics or some way to allow 196 * userspace to pass a hint to file system, 197 * especially if the latter case turns out to be 198 * common. 199 */ 200 ex = path[depth].p_ext; 201 if (ex) { 202 ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); 203 ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); 204 205 if (block > ext_block) 206 return ext_pblk + (block - ext_block); 207 else 208 return ext_pblk - (ext_block - block); 209 } 210 211 /* it looks like index is empty; 212 * try to find starting block from index itself */ 213 if (path[depth].p_bh) 214 return path[depth].p_bh->b_blocknr; 215 } 216 217 /* OK. use inode's group */ 218 return ext4_inode_to_goal_block(inode); 219 } 220 221 /* 222 * Allocation for a meta data block 223 */ 224 static ext4_fsblk_t 225 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, 226 struct ext4_ext_path *path, 227 struct ext4_extent *ex, int *err, unsigned int flags) 228 { 229 ext4_fsblk_t goal, newblock; 230 231 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); 232 newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 233 NULL, err); 234 return newblock; 235 } 236 237 static inline int ext4_ext_space_block(struct inode *inode, int check) 238 { 239 int size; 240 241 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 242 / sizeof(struct ext4_extent); 243 #ifdef AGGRESSIVE_TEST 244 if (!check && size > 6) 245 size = 6; 246 #endif 247 return size; 248 } 249 250 static inline int ext4_ext_space_block_idx(struct inode *inode, int check) 251 { 252 int size; 253 254 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 255 / sizeof(struct ext4_extent_idx); 256 #ifdef AGGRESSIVE_TEST 257 if (!check && size > 5) 258 size = 5; 259 #endif 260 return size; 261 } 262 263 static inline int ext4_ext_space_root(struct inode *inode, int check) 264 { 265 int size; 266 267 size = sizeof(EXT4_I(inode)->i_data); 268 size -= sizeof(struct ext4_extent_header); 269 size /= sizeof(struct ext4_extent); 270 #ifdef AGGRESSIVE_TEST 271 if (!check && size > 3) 272 size = 3; 273 #endif 274 return size; 275 } 276 277 static inline int ext4_ext_space_root_idx(struct inode *inode, int check) 278 { 279 int size; 280 281 size = sizeof(EXT4_I(inode)->i_data); 282 size -= sizeof(struct ext4_extent_header); 283 size /= sizeof(struct ext4_extent_idx); 284 #ifdef AGGRESSIVE_TEST 285 if (!check && size > 4) 286 size = 4; 287 #endif 288 return size; 289 } 290 291 /* 292 * Calculate the number of metadata blocks needed 293 * to allocate @blocks 294 * Worse case is one block per extent 295 */ 296 int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) 297 { 298 struct ext4_inode_info *ei = EXT4_I(inode); 299 int idxs; 300 301 idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 302 / sizeof(struct ext4_extent_idx)); 303 304 /* 305 * If the new delayed allocation block is contiguous with the 306 * previous da block, it can share index blocks with the 307 * previous block, so we only need to allocate a new index 308 * block every idxs leaf blocks. At ldxs**2 blocks, we need 309 * an additional index block, and at ldxs**3 blocks, yet 310 * another index blocks. 311 */ 312 if (ei->i_da_metadata_calc_len && 313 ei->i_da_metadata_calc_last_lblock+1 == lblock) { 314 int num = 0; 315 316 if ((ei->i_da_metadata_calc_len % idxs) == 0) 317 num++; 318 if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0) 319 num++; 320 if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) { 321 num++; 322 ei->i_da_metadata_calc_len = 0; 323 } else 324 ei->i_da_metadata_calc_len++; 325 ei->i_da_metadata_calc_last_lblock++; 326 return num; 327 } 328 329 /* 330 * In the worst case we need a new set of index blocks at 331 * every level of the inode's extent tree. 332 */ 333 ei->i_da_metadata_calc_len = 1; 334 ei->i_da_metadata_calc_last_lblock = lblock; 335 return ext_depth(inode) + 1; 336 } 337 338 static int 339 ext4_ext_max_entries(struct inode *inode, int depth) 340 { 341 int max; 342 343 if (depth == ext_depth(inode)) { 344 if (depth == 0) 345 max = ext4_ext_space_root(inode, 1); 346 else 347 max = ext4_ext_space_root_idx(inode, 1); 348 } else { 349 if (depth == 0) 350 max = ext4_ext_space_block(inode, 1); 351 else 352 max = ext4_ext_space_block_idx(inode, 1); 353 } 354 355 return max; 356 } 357 358 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) 359 { 360 ext4_fsblk_t block = ext4_ext_pblock(ext); 361 int len = ext4_ext_get_actual_len(ext); 362 ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); 363 ext4_lblk_t last = lblock + len - 1; 364 365 if (lblock > last) 366 return 0; 367 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); 368 } 369 370 static int ext4_valid_extent_idx(struct inode *inode, 371 struct ext4_extent_idx *ext_idx) 372 { 373 ext4_fsblk_t block = ext4_idx_pblock(ext_idx); 374 375 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); 376 } 377 378 static int ext4_valid_extent_entries(struct inode *inode, 379 struct ext4_extent_header *eh, 380 int depth) 381 { 382 unsigned short entries; 383 if (eh->eh_entries == 0) 384 return 1; 385 386 entries = le16_to_cpu(eh->eh_entries); 387 388 if (depth == 0) { 389 /* leaf entries */ 390 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); 391 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; 392 ext4_fsblk_t pblock = 0; 393 ext4_lblk_t lblock = 0; 394 ext4_lblk_t prev = 0; 395 int len = 0; 396 while (entries) { 397 if (!ext4_valid_extent(inode, ext)) 398 return 0; 399 400 /* Check for overlapping extents */ 401 lblock = le32_to_cpu(ext->ee_block); 402 len = ext4_ext_get_actual_len(ext); 403 if ((lblock <= prev) && prev) { 404 pblock = ext4_ext_pblock(ext); 405 es->s_last_error_block = cpu_to_le64(pblock); 406 return 0; 407 } 408 ext++; 409 entries--; 410 prev = lblock + len - 1; 411 } 412 } else { 413 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); 414 while (entries) { 415 if (!ext4_valid_extent_idx(inode, ext_idx)) 416 return 0; 417 ext_idx++; 418 entries--; 419 } 420 } 421 return 1; 422 } 423 424 static int __ext4_ext_check(const char *function, unsigned int line, 425 struct inode *inode, struct ext4_extent_header *eh, 426 int depth, ext4_fsblk_t pblk) 427 { 428 const char *error_msg; 429 int max = 0; 430 431 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { 432 error_msg = "invalid magic"; 433 goto corrupted; 434 } 435 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { 436 error_msg = "unexpected eh_depth"; 437 goto corrupted; 438 } 439 if (unlikely(eh->eh_max == 0)) { 440 error_msg = "invalid eh_max"; 441 goto corrupted; 442 } 443 max = ext4_ext_max_entries(inode, depth); 444 if (unlikely(le16_to_cpu(eh->eh_max) > max)) { 445 error_msg = "too large eh_max"; 446 goto corrupted; 447 } 448 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { 449 error_msg = "invalid eh_entries"; 450 goto corrupted; 451 } 452 if (!ext4_valid_extent_entries(inode, eh, depth)) { 453 error_msg = "invalid extent entries"; 454 goto corrupted; 455 } 456 /* Verify checksum on non-root extent tree nodes */ 457 if (ext_depth(inode) != depth && 458 !ext4_extent_block_csum_verify(inode, eh)) { 459 error_msg = "extent tree corrupted"; 460 goto corrupted; 461 } 462 return 0; 463 464 corrupted: 465 ext4_error_inode(inode, function, line, 0, 466 "pblk %llu bad header/extent: %s - magic %x, " 467 "entries %u, max %u(%u), depth %u(%u)", 468 (unsigned long long) pblk, error_msg, 469 le16_to_cpu(eh->eh_magic), 470 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), 471 max, le16_to_cpu(eh->eh_depth), depth); 472 return -EIO; 473 } 474 475 #define ext4_ext_check(inode, eh, depth, pblk) \ 476 __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk)) 477 478 int ext4_ext_check_inode(struct inode *inode) 479 { 480 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0); 481 } 482 483 static struct buffer_head * 484 __read_extent_tree_block(const char *function, unsigned int line, 485 struct inode *inode, ext4_fsblk_t pblk, int depth, 486 int flags) 487 { 488 struct buffer_head *bh; 489 int err; 490 491 bh = sb_getblk(inode->i_sb, pblk); 492 if (unlikely(!bh)) 493 return ERR_PTR(-ENOMEM); 494 495 if (!bh_uptodate_or_lock(bh)) { 496 trace_ext4_ext_load_extent(inode, pblk, _RET_IP_); 497 err = bh_submit_read(bh); 498 if (err < 0) 499 goto errout; 500 } 501 if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE)) 502 return bh; 503 err = __ext4_ext_check(function, line, inode, 504 ext_block_hdr(bh), depth, pblk); 505 if (err) 506 goto errout; 507 set_buffer_verified(bh); 508 /* 509 * If this is a leaf block, cache all of its entries 510 */ 511 if (!(flags & EXT4_EX_NOCACHE) && depth == 0) { 512 struct ext4_extent_header *eh = ext_block_hdr(bh); 513 struct ext4_extent *ex = EXT_FIRST_EXTENT(eh); 514 ext4_lblk_t prev = 0; 515 int i; 516 517 for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) { 518 unsigned int status = EXTENT_STATUS_WRITTEN; 519 ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); 520 int len = ext4_ext_get_actual_len(ex); 521 522 if (prev && (prev != lblk)) 523 ext4_es_cache_extent(inode, prev, 524 lblk - prev, ~0, 525 EXTENT_STATUS_HOLE); 526 527 if (ext4_ext_is_uninitialized(ex)) 528 status = EXTENT_STATUS_UNWRITTEN; 529 ext4_es_cache_extent(inode, lblk, len, 530 ext4_ext_pblock(ex), status); 531 prev = lblk + len; 532 } 533 } 534 return bh; 535 errout: 536 put_bh(bh); 537 return ERR_PTR(err); 538 539 } 540 541 #define read_extent_tree_block(inode, pblk, depth, flags) \ 542 __read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \ 543 (depth), (flags)) 544 545 /* 546 * This function is called to cache a file's extent information in the 547 * extent status tree 548 */ 549 int ext4_ext_precache(struct inode *inode) 550 { 551 struct ext4_inode_info *ei = EXT4_I(inode); 552 struct ext4_ext_path *path = NULL; 553 struct buffer_head *bh; 554 int i = 0, depth, ret = 0; 555 556 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 557 return 0; /* not an extent-mapped inode */ 558 559 down_read(&ei->i_data_sem); 560 depth = ext_depth(inode); 561 562 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), 563 GFP_NOFS); 564 if (path == NULL) { 565 up_read(&ei->i_data_sem); 566 return -ENOMEM; 567 } 568 569 /* Don't cache anything if there are no external extent blocks */ 570 if (depth == 0) 571 goto out; 572 path[0].p_hdr = ext_inode_hdr(inode); 573 ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0); 574 if (ret) 575 goto out; 576 path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr); 577 while (i >= 0) { 578 /* 579 * If this is a leaf block or we've reached the end of 580 * the index block, go up 581 */ 582 if ((i == depth) || 583 path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) { 584 brelse(path[i].p_bh); 585 path[i].p_bh = NULL; 586 i--; 587 continue; 588 } 589 bh = read_extent_tree_block(inode, 590 ext4_idx_pblock(path[i].p_idx++), 591 depth - i - 1, 592 EXT4_EX_FORCE_CACHE); 593 if (IS_ERR(bh)) { 594 ret = PTR_ERR(bh); 595 break; 596 } 597 i++; 598 path[i].p_bh = bh; 599 path[i].p_hdr = ext_block_hdr(bh); 600 path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr); 601 } 602 ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED); 603 out: 604 up_read(&ei->i_data_sem); 605 ext4_ext_drop_refs(path); 606 kfree(path); 607 return ret; 608 } 609 610 #ifdef EXT_DEBUG 611 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) 612 { 613 int k, l = path->p_depth; 614 615 ext_debug("path:"); 616 for (k = 0; k <= l; k++, path++) { 617 if (path->p_idx) { 618 ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block), 619 ext4_idx_pblock(path->p_idx)); 620 } else if (path->p_ext) { 621 ext_debug(" %d:[%d]%d:%llu ", 622 le32_to_cpu(path->p_ext->ee_block), 623 ext4_ext_is_uninitialized(path->p_ext), 624 ext4_ext_get_actual_len(path->p_ext), 625 ext4_ext_pblock(path->p_ext)); 626 } else 627 ext_debug(" []"); 628 } 629 ext_debug("\n"); 630 } 631 632 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) 633 { 634 int depth = ext_depth(inode); 635 struct ext4_extent_header *eh; 636 struct ext4_extent *ex; 637 int i; 638 639 if (!path) 640 return; 641 642 eh = path[depth].p_hdr; 643 ex = EXT_FIRST_EXTENT(eh); 644 645 ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino); 646 647 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { 648 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), 649 ext4_ext_is_uninitialized(ex), 650 ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); 651 } 652 ext_debug("\n"); 653 } 654 655 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, 656 ext4_fsblk_t newblock, int level) 657 { 658 int depth = ext_depth(inode); 659 struct ext4_extent *ex; 660 661 if (depth != level) { 662 struct ext4_extent_idx *idx; 663 idx = path[level].p_idx; 664 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { 665 ext_debug("%d: move %d:%llu in new index %llu\n", level, 666 le32_to_cpu(idx->ei_block), 667 ext4_idx_pblock(idx), 668 newblock); 669 idx++; 670 } 671 672 return; 673 } 674 675 ex = path[depth].p_ext; 676 while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { 677 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", 678 le32_to_cpu(ex->ee_block), 679 ext4_ext_pblock(ex), 680 ext4_ext_is_uninitialized(ex), 681 ext4_ext_get_actual_len(ex), 682 newblock); 683 ex++; 684 } 685 } 686 687 #else 688 #define ext4_ext_show_path(inode, path) 689 #define ext4_ext_show_leaf(inode, path) 690 #define ext4_ext_show_move(inode, path, newblock, level) 691 #endif 692 693 void ext4_ext_drop_refs(struct ext4_ext_path *path) 694 { 695 int depth = path->p_depth; 696 int i; 697 698 for (i = 0; i <= depth; i++, path++) 699 if (path->p_bh) { 700 brelse(path->p_bh); 701 path->p_bh = NULL; 702 } 703 } 704 705 /* 706 * ext4_ext_binsearch_idx: 707 * binary search for the closest index of the given block 708 * the header must be checked before calling this 709 */ 710 static void 711 ext4_ext_binsearch_idx(struct inode *inode, 712 struct ext4_ext_path *path, ext4_lblk_t block) 713 { 714 struct ext4_extent_header *eh = path->p_hdr; 715 struct ext4_extent_idx *r, *l, *m; 716 717 718 ext_debug("binsearch for %u(idx): ", block); 719 720 l = EXT_FIRST_INDEX(eh) + 1; 721 r = EXT_LAST_INDEX(eh); 722 while (l <= r) { 723 m = l + (r - l) / 2; 724 if (block < le32_to_cpu(m->ei_block)) 725 r = m - 1; 726 else 727 l = m + 1; 728 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), 729 m, le32_to_cpu(m->ei_block), 730 r, le32_to_cpu(r->ei_block)); 731 } 732 733 path->p_idx = l - 1; 734 ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), 735 ext4_idx_pblock(path->p_idx)); 736 737 #ifdef CHECK_BINSEARCH 738 { 739 struct ext4_extent_idx *chix, *ix; 740 int k; 741 742 chix = ix = EXT_FIRST_INDEX(eh); 743 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { 744 if (k != 0 && 745 le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { 746 printk(KERN_DEBUG "k=%d, ix=0x%p, " 747 "first=0x%p\n", k, 748 ix, EXT_FIRST_INDEX(eh)); 749 printk(KERN_DEBUG "%u <= %u\n", 750 le32_to_cpu(ix->ei_block), 751 le32_to_cpu(ix[-1].ei_block)); 752 } 753 BUG_ON(k && le32_to_cpu(ix->ei_block) 754 <= le32_to_cpu(ix[-1].ei_block)); 755 if (block < le32_to_cpu(ix->ei_block)) 756 break; 757 chix = ix; 758 } 759 BUG_ON(chix != path->p_idx); 760 } 761 #endif 762 763 } 764 765 /* 766 * ext4_ext_binsearch: 767 * binary search for closest extent of the given block 768 * the header must be checked before calling this 769 */ 770 static void 771 ext4_ext_binsearch(struct inode *inode, 772 struct ext4_ext_path *path, ext4_lblk_t block) 773 { 774 struct ext4_extent_header *eh = path->p_hdr; 775 struct ext4_extent *r, *l, *m; 776 777 if (eh->eh_entries == 0) { 778 /* 779 * this leaf is empty: 780 * we get such a leaf in split/add case 781 */ 782 return; 783 } 784 785 ext_debug("binsearch for %u: ", block); 786 787 l = EXT_FIRST_EXTENT(eh) + 1; 788 r = EXT_LAST_EXTENT(eh); 789 790 while (l <= r) { 791 m = l + (r - l) / 2; 792 if (block < le32_to_cpu(m->ee_block)) 793 r = m - 1; 794 else 795 l = m + 1; 796 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), 797 m, le32_to_cpu(m->ee_block), 798 r, le32_to_cpu(r->ee_block)); 799 } 800 801 path->p_ext = l - 1; 802 ext_debug(" -> %d:%llu:[%d]%d ", 803 le32_to_cpu(path->p_ext->ee_block), 804 ext4_ext_pblock(path->p_ext), 805 ext4_ext_is_uninitialized(path->p_ext), 806 ext4_ext_get_actual_len(path->p_ext)); 807 808 #ifdef CHECK_BINSEARCH 809 { 810 struct ext4_extent *chex, *ex; 811 int k; 812 813 chex = ex = EXT_FIRST_EXTENT(eh); 814 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { 815 BUG_ON(k && le32_to_cpu(ex->ee_block) 816 <= le32_to_cpu(ex[-1].ee_block)); 817 if (block < le32_to_cpu(ex->ee_block)) 818 break; 819 chex = ex; 820 } 821 BUG_ON(chex != path->p_ext); 822 } 823 #endif 824 825 } 826 827 int ext4_ext_tree_init(handle_t *handle, struct inode *inode) 828 { 829 struct ext4_extent_header *eh; 830 831 eh = ext_inode_hdr(inode); 832 eh->eh_depth = 0; 833 eh->eh_entries = 0; 834 eh->eh_magic = EXT4_EXT_MAGIC; 835 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); 836 ext4_mark_inode_dirty(handle, inode); 837 return 0; 838 } 839 840 struct ext4_ext_path * 841 ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, 842 struct ext4_ext_path *path, int flags) 843 { 844 struct ext4_extent_header *eh; 845 struct buffer_head *bh; 846 short int depth, i, ppos = 0, alloc = 0; 847 int ret; 848 849 eh = ext_inode_hdr(inode); 850 depth = ext_depth(inode); 851 852 /* account possible depth increase */ 853 if (!path) { 854 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), 855 GFP_NOFS); 856 if (!path) 857 return ERR_PTR(-ENOMEM); 858 alloc = 1; 859 } 860 path[0].p_hdr = eh; 861 path[0].p_bh = NULL; 862 863 i = depth; 864 /* walk through the tree */ 865 while (i) { 866 ext_debug("depth %d: num %d, max %d\n", 867 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 868 869 ext4_ext_binsearch_idx(inode, path + ppos, block); 870 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); 871 path[ppos].p_depth = i; 872 path[ppos].p_ext = NULL; 873 874 bh = read_extent_tree_block(inode, path[ppos].p_block, --i, 875 flags); 876 if (IS_ERR(bh)) { 877 ret = PTR_ERR(bh); 878 goto err; 879 } 880 881 eh = ext_block_hdr(bh); 882 ppos++; 883 if (unlikely(ppos > depth)) { 884 put_bh(bh); 885 EXT4_ERROR_INODE(inode, 886 "ppos %d > depth %d", ppos, depth); 887 ret = -EIO; 888 goto err; 889 } 890 path[ppos].p_bh = bh; 891 path[ppos].p_hdr = eh; 892 } 893 894 path[ppos].p_depth = i; 895 path[ppos].p_ext = NULL; 896 path[ppos].p_idx = NULL; 897 898 /* find extent */ 899 ext4_ext_binsearch(inode, path + ppos, block); 900 /* if not an empty leaf */ 901 if (path[ppos].p_ext) 902 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); 903 904 ext4_ext_show_path(inode, path); 905 906 return path; 907 908 err: 909 ext4_ext_drop_refs(path); 910 if (alloc) 911 kfree(path); 912 return ERR_PTR(ret); 913 } 914 915 /* 916 * ext4_ext_insert_index: 917 * insert new index [@logical;@ptr] into the block at @curp; 918 * check where to insert: before @curp or after @curp 919 */ 920 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, 921 struct ext4_ext_path *curp, 922 int logical, ext4_fsblk_t ptr) 923 { 924 struct ext4_extent_idx *ix; 925 int len, err; 926 927 err = ext4_ext_get_access(handle, inode, curp); 928 if (err) 929 return err; 930 931 if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { 932 EXT4_ERROR_INODE(inode, 933 "logical %d == ei_block %d!", 934 logical, le32_to_cpu(curp->p_idx->ei_block)); 935 return -EIO; 936 } 937 938 if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) 939 >= le16_to_cpu(curp->p_hdr->eh_max))) { 940 EXT4_ERROR_INODE(inode, 941 "eh_entries %d >= eh_max %d!", 942 le16_to_cpu(curp->p_hdr->eh_entries), 943 le16_to_cpu(curp->p_hdr->eh_max)); 944 return -EIO; 945 } 946 947 if (logical > le32_to_cpu(curp->p_idx->ei_block)) { 948 /* insert after */ 949 ext_debug("insert new index %d after: %llu\n", logical, ptr); 950 ix = curp->p_idx + 1; 951 } else { 952 /* insert before */ 953 ext_debug("insert new index %d before: %llu\n", logical, ptr); 954 ix = curp->p_idx; 955 } 956 957 len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; 958 BUG_ON(len < 0); 959 if (len > 0) { 960 ext_debug("insert new index %d: " 961 "move %d indices from 0x%p to 0x%p\n", 962 logical, len, ix, ix + 1); 963 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); 964 } 965 966 if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { 967 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); 968 return -EIO; 969 } 970 971 ix->ei_block = cpu_to_le32(logical); 972 ext4_idx_store_pblock(ix, ptr); 973 le16_add_cpu(&curp->p_hdr->eh_entries, 1); 974 975 if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { 976 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); 977 return -EIO; 978 } 979 980 err = ext4_ext_dirty(handle, inode, curp); 981 ext4_std_error(inode->i_sb, err); 982 983 return err; 984 } 985 986 /* 987 * ext4_ext_split: 988 * inserts new subtree into the path, using free index entry 989 * at depth @at: 990 * - allocates all needed blocks (new leaf and all intermediate index blocks) 991 * - makes decision where to split 992 * - moves remaining extents and index entries (right to the split point) 993 * into the newly allocated blocks 994 * - initializes subtree 995 */ 996 static int ext4_ext_split(handle_t *handle, struct inode *inode, 997 unsigned int flags, 998 struct ext4_ext_path *path, 999 struct ext4_extent *newext, int at) 1000 { 1001 struct buffer_head *bh = NULL; 1002 int depth = ext_depth(inode); 1003 struct ext4_extent_header *neh; 1004 struct ext4_extent_idx *fidx; 1005 int i = at, k, m, a; 1006 ext4_fsblk_t newblock, oldblock; 1007 __le32 border; 1008 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ 1009 int err = 0; 1010 1011 /* make decision: where to split? */ 1012 /* FIXME: now decision is simplest: at current extent */ 1013 1014 /* if current leaf will be split, then we should use 1015 * border from split point */ 1016 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { 1017 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); 1018 return -EIO; 1019 } 1020 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { 1021 border = path[depth].p_ext[1].ee_block; 1022 ext_debug("leaf will be split." 1023 " next leaf starts at %d\n", 1024 le32_to_cpu(border)); 1025 } else { 1026 border = newext->ee_block; 1027 ext_debug("leaf will be added." 1028 " next leaf starts at %d\n", 1029 le32_to_cpu(border)); 1030 } 1031 1032 /* 1033 * If error occurs, then we break processing 1034 * and mark filesystem read-only. index won't 1035 * be inserted and tree will be in consistent 1036 * state. Next mount will repair buffers too. 1037 */ 1038 1039 /* 1040 * Get array to track all allocated blocks. 1041 * We need this to handle errors and free blocks 1042 * upon them. 1043 */ 1044 ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); 1045 if (!ablocks) 1046 return -ENOMEM; 1047 1048 /* allocate all needed blocks */ 1049 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); 1050 for (a = 0; a < depth - at; a++) { 1051 newblock = ext4_ext_new_meta_block(handle, inode, path, 1052 newext, &err, flags); 1053 if (newblock == 0) 1054 goto cleanup; 1055 ablocks[a] = newblock; 1056 } 1057 1058 /* initialize new leaf */ 1059 newblock = ablocks[--a]; 1060 if (unlikely(newblock == 0)) { 1061 EXT4_ERROR_INODE(inode, "newblock == 0!"); 1062 err = -EIO; 1063 goto cleanup; 1064 } 1065 bh = sb_getblk(inode->i_sb, newblock); 1066 if (unlikely(!bh)) { 1067 err = -ENOMEM; 1068 goto cleanup; 1069 } 1070 lock_buffer(bh); 1071 1072 err = ext4_journal_get_create_access(handle, bh); 1073 if (err) 1074 goto cleanup; 1075 1076 neh = ext_block_hdr(bh); 1077 neh->eh_entries = 0; 1078 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1079 neh->eh_magic = EXT4_EXT_MAGIC; 1080 neh->eh_depth = 0; 1081 1082 /* move remainder of path[depth] to the new leaf */ 1083 if (unlikely(path[depth].p_hdr->eh_entries != 1084 path[depth].p_hdr->eh_max)) { 1085 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", 1086 path[depth].p_hdr->eh_entries, 1087 path[depth].p_hdr->eh_max); 1088 err = -EIO; 1089 goto cleanup; 1090 } 1091 /* start copy from next extent */ 1092 m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; 1093 ext4_ext_show_move(inode, path, newblock, depth); 1094 if (m) { 1095 struct ext4_extent *ex; 1096 ex = EXT_FIRST_EXTENT(neh); 1097 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); 1098 le16_add_cpu(&neh->eh_entries, m); 1099 } 1100 1101 ext4_extent_block_csum_set(inode, neh); 1102 set_buffer_uptodate(bh); 1103 unlock_buffer(bh); 1104 1105 err = ext4_handle_dirty_metadata(handle, inode, bh); 1106 if (err) 1107 goto cleanup; 1108 brelse(bh); 1109 bh = NULL; 1110 1111 /* correct old leaf */ 1112 if (m) { 1113 err = ext4_ext_get_access(handle, inode, path + depth); 1114 if (err) 1115 goto cleanup; 1116 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); 1117 err = ext4_ext_dirty(handle, inode, path + depth); 1118 if (err) 1119 goto cleanup; 1120 1121 } 1122 1123 /* create intermediate indexes */ 1124 k = depth - at - 1; 1125 if (unlikely(k < 0)) { 1126 EXT4_ERROR_INODE(inode, "k %d < 0!", k); 1127 err = -EIO; 1128 goto cleanup; 1129 } 1130 if (k) 1131 ext_debug("create %d intermediate indices\n", k); 1132 /* insert new index into current index block */ 1133 /* current depth stored in i var */ 1134 i = depth - 1; 1135 while (k--) { 1136 oldblock = newblock; 1137 newblock = ablocks[--a]; 1138 bh = sb_getblk(inode->i_sb, newblock); 1139 if (unlikely(!bh)) { 1140 err = -ENOMEM; 1141 goto cleanup; 1142 } 1143 lock_buffer(bh); 1144 1145 err = ext4_journal_get_create_access(handle, bh); 1146 if (err) 1147 goto cleanup; 1148 1149 neh = ext_block_hdr(bh); 1150 neh->eh_entries = cpu_to_le16(1); 1151 neh->eh_magic = EXT4_EXT_MAGIC; 1152 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1153 neh->eh_depth = cpu_to_le16(depth - i); 1154 fidx = EXT_FIRST_INDEX(neh); 1155 fidx->ei_block = border; 1156 ext4_idx_store_pblock(fidx, oldblock); 1157 1158 ext_debug("int.index at %d (block %llu): %u -> %llu\n", 1159 i, newblock, le32_to_cpu(border), oldblock); 1160 1161 /* move remainder of path[i] to the new index block */ 1162 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != 1163 EXT_LAST_INDEX(path[i].p_hdr))) { 1164 EXT4_ERROR_INODE(inode, 1165 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", 1166 le32_to_cpu(path[i].p_ext->ee_block)); 1167 err = -EIO; 1168 goto cleanup; 1169 } 1170 /* start copy indexes */ 1171 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; 1172 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, 1173 EXT_MAX_INDEX(path[i].p_hdr)); 1174 ext4_ext_show_move(inode, path, newblock, i); 1175 if (m) { 1176 memmove(++fidx, path[i].p_idx, 1177 sizeof(struct ext4_extent_idx) * m); 1178 le16_add_cpu(&neh->eh_entries, m); 1179 } 1180 ext4_extent_block_csum_set(inode, neh); 1181 set_buffer_uptodate(bh); 1182 unlock_buffer(bh); 1183 1184 err = ext4_handle_dirty_metadata(handle, inode, bh); 1185 if (err) 1186 goto cleanup; 1187 brelse(bh); 1188 bh = NULL; 1189 1190 /* correct old index */ 1191 if (m) { 1192 err = ext4_ext_get_access(handle, inode, path + i); 1193 if (err) 1194 goto cleanup; 1195 le16_add_cpu(&path[i].p_hdr->eh_entries, -m); 1196 err = ext4_ext_dirty(handle, inode, path + i); 1197 if (err) 1198 goto cleanup; 1199 } 1200 1201 i--; 1202 } 1203 1204 /* insert new index */ 1205 err = ext4_ext_insert_index(handle, inode, path + at, 1206 le32_to_cpu(border), newblock); 1207 1208 cleanup: 1209 if (bh) { 1210 if (buffer_locked(bh)) 1211 unlock_buffer(bh); 1212 brelse(bh); 1213 } 1214 1215 if (err) { 1216 /* free all allocated blocks in error case */ 1217 for (i = 0; i < depth; i++) { 1218 if (!ablocks[i]) 1219 continue; 1220 ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, 1221 EXT4_FREE_BLOCKS_METADATA); 1222 } 1223 } 1224 kfree(ablocks); 1225 1226 return err; 1227 } 1228 1229 /* 1230 * ext4_ext_grow_indepth: 1231 * implements tree growing procedure: 1232 * - allocates new block 1233 * - moves top-level data (index block or leaf) into the new block 1234 * - initializes new top-level, creating index that points to the 1235 * just created block 1236 */ 1237 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, 1238 unsigned int flags, 1239 struct ext4_extent *newext) 1240 { 1241 struct ext4_extent_header *neh; 1242 struct buffer_head *bh; 1243 ext4_fsblk_t newblock; 1244 int err = 0; 1245 1246 newblock = ext4_ext_new_meta_block(handle, inode, NULL, 1247 newext, &err, flags); 1248 if (newblock == 0) 1249 return err; 1250 1251 bh = sb_getblk(inode->i_sb, newblock); 1252 if (unlikely(!bh)) 1253 return -ENOMEM; 1254 lock_buffer(bh); 1255 1256 err = ext4_journal_get_create_access(handle, bh); 1257 if (err) { 1258 unlock_buffer(bh); 1259 goto out; 1260 } 1261 1262 /* move top-level index/leaf into new block */ 1263 memmove(bh->b_data, EXT4_I(inode)->i_data, 1264 sizeof(EXT4_I(inode)->i_data)); 1265 1266 /* set size of new block */ 1267 neh = ext_block_hdr(bh); 1268 /* old root could have indexes or leaves 1269 * so calculate e_max right way */ 1270 if (ext_depth(inode)) 1271 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1272 else 1273 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1274 neh->eh_magic = EXT4_EXT_MAGIC; 1275 ext4_extent_block_csum_set(inode, neh); 1276 set_buffer_uptodate(bh); 1277 unlock_buffer(bh); 1278 1279 err = ext4_handle_dirty_metadata(handle, inode, bh); 1280 if (err) 1281 goto out; 1282 1283 /* Update top-level index: num,max,pointer */ 1284 neh = ext_inode_hdr(inode); 1285 neh->eh_entries = cpu_to_le16(1); 1286 ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); 1287 if (neh->eh_depth == 0) { 1288 /* Root extent block becomes index block */ 1289 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); 1290 EXT_FIRST_INDEX(neh)->ei_block = 1291 EXT_FIRST_EXTENT(neh)->ee_block; 1292 } 1293 ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", 1294 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), 1295 le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), 1296 ext4_idx_pblock(EXT_FIRST_INDEX(neh))); 1297 1298 le16_add_cpu(&neh->eh_depth, 1); 1299 ext4_mark_inode_dirty(handle, inode); 1300 out: 1301 brelse(bh); 1302 1303 return err; 1304 } 1305 1306 /* 1307 * ext4_ext_create_new_leaf: 1308 * finds empty index and adds new leaf. 1309 * if no free index is found, then it requests in-depth growing. 1310 */ 1311 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, 1312 unsigned int mb_flags, 1313 unsigned int gb_flags, 1314 struct ext4_ext_path *path, 1315 struct ext4_extent *newext) 1316 { 1317 struct ext4_ext_path *curp; 1318 int depth, i, err = 0; 1319 1320 repeat: 1321 i = depth = ext_depth(inode); 1322 1323 /* walk up to the tree and look for free index entry */ 1324 curp = path + depth; 1325 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { 1326 i--; 1327 curp--; 1328 } 1329 1330 /* we use already allocated block for index block, 1331 * so subsequent data blocks should be contiguous */ 1332 if (EXT_HAS_FREE_INDEX(curp)) { 1333 /* if we found index with free entry, then use that 1334 * entry: create all needed subtree and add new leaf */ 1335 err = ext4_ext_split(handle, inode, mb_flags, path, newext, i); 1336 if (err) 1337 goto out; 1338 1339 /* refill path */ 1340 ext4_ext_drop_refs(path); 1341 path = ext4_ext_find_extent(inode, 1342 (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1343 path, gb_flags); 1344 if (IS_ERR(path)) 1345 err = PTR_ERR(path); 1346 } else { 1347 /* tree is full, time to grow in depth */ 1348 err = ext4_ext_grow_indepth(handle, inode, mb_flags, newext); 1349 if (err) 1350 goto out; 1351 1352 /* refill path */ 1353 ext4_ext_drop_refs(path); 1354 path = ext4_ext_find_extent(inode, 1355 (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1356 path, gb_flags); 1357 if (IS_ERR(path)) { 1358 err = PTR_ERR(path); 1359 goto out; 1360 } 1361 1362 /* 1363 * only first (depth 0 -> 1) produces free space; 1364 * in all other cases we have to split the grown tree 1365 */ 1366 depth = ext_depth(inode); 1367 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { 1368 /* now we need to split */ 1369 goto repeat; 1370 } 1371 } 1372 1373 out: 1374 return err; 1375 } 1376 1377 /* 1378 * search the closest allocated block to the left for *logical 1379 * and returns it at @logical + it's physical address at @phys 1380 * if *logical is the smallest allocated block, the function 1381 * returns 0 at @phys 1382 * return value contains 0 (success) or error code 1383 */ 1384 static int ext4_ext_search_left(struct inode *inode, 1385 struct ext4_ext_path *path, 1386 ext4_lblk_t *logical, ext4_fsblk_t *phys) 1387 { 1388 struct ext4_extent_idx *ix; 1389 struct ext4_extent *ex; 1390 int depth, ee_len; 1391 1392 if (unlikely(path == NULL)) { 1393 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1394 return -EIO; 1395 } 1396 depth = path->p_depth; 1397 *phys = 0; 1398 1399 if (depth == 0 && path->p_ext == NULL) 1400 return 0; 1401 1402 /* usually extent in the path covers blocks smaller 1403 * then *logical, but it can be that extent is the 1404 * first one in the file */ 1405 1406 ex = path[depth].p_ext; 1407 ee_len = ext4_ext_get_actual_len(ex); 1408 if (*logical < le32_to_cpu(ex->ee_block)) { 1409 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1410 EXT4_ERROR_INODE(inode, 1411 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", 1412 *logical, le32_to_cpu(ex->ee_block)); 1413 return -EIO; 1414 } 1415 while (--depth >= 0) { 1416 ix = path[depth].p_idx; 1417 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1418 EXT4_ERROR_INODE(inode, 1419 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", 1420 ix != NULL ? le32_to_cpu(ix->ei_block) : 0, 1421 EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? 1422 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0, 1423 depth); 1424 return -EIO; 1425 } 1426 } 1427 return 0; 1428 } 1429 1430 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1431 EXT4_ERROR_INODE(inode, 1432 "logical %d < ee_block %d + ee_len %d!", 1433 *logical, le32_to_cpu(ex->ee_block), ee_len); 1434 return -EIO; 1435 } 1436 1437 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; 1438 *phys = ext4_ext_pblock(ex) + ee_len - 1; 1439 return 0; 1440 } 1441 1442 /* 1443 * search the closest allocated block to the right for *logical 1444 * and returns it at @logical + it's physical address at @phys 1445 * if *logical is the largest allocated block, the function 1446 * returns 0 at @phys 1447 * return value contains 0 (success) or error code 1448 */ 1449 static int ext4_ext_search_right(struct inode *inode, 1450 struct ext4_ext_path *path, 1451 ext4_lblk_t *logical, ext4_fsblk_t *phys, 1452 struct ext4_extent **ret_ex) 1453 { 1454 struct buffer_head *bh = NULL; 1455 struct ext4_extent_header *eh; 1456 struct ext4_extent_idx *ix; 1457 struct ext4_extent *ex; 1458 ext4_fsblk_t block; 1459 int depth; /* Note, NOT eh_depth; depth from top of tree */ 1460 int ee_len; 1461 1462 if (unlikely(path == NULL)) { 1463 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1464 return -EIO; 1465 } 1466 depth = path->p_depth; 1467 *phys = 0; 1468 1469 if (depth == 0 && path->p_ext == NULL) 1470 return 0; 1471 1472 /* usually extent in the path covers blocks smaller 1473 * then *logical, but it can be that extent is the 1474 * first one in the file */ 1475 1476 ex = path[depth].p_ext; 1477 ee_len = ext4_ext_get_actual_len(ex); 1478 if (*logical < le32_to_cpu(ex->ee_block)) { 1479 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1480 EXT4_ERROR_INODE(inode, 1481 "first_extent(path[%d].p_hdr) != ex", 1482 depth); 1483 return -EIO; 1484 } 1485 while (--depth >= 0) { 1486 ix = path[depth].p_idx; 1487 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1488 EXT4_ERROR_INODE(inode, 1489 "ix != EXT_FIRST_INDEX *logical %d!", 1490 *logical); 1491 return -EIO; 1492 } 1493 } 1494 goto found_extent; 1495 } 1496 1497 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1498 EXT4_ERROR_INODE(inode, 1499 "logical %d < ee_block %d + ee_len %d!", 1500 *logical, le32_to_cpu(ex->ee_block), ee_len); 1501 return -EIO; 1502 } 1503 1504 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { 1505 /* next allocated block in this leaf */ 1506 ex++; 1507 goto found_extent; 1508 } 1509 1510 /* go up and search for index to the right */ 1511 while (--depth >= 0) { 1512 ix = path[depth].p_idx; 1513 if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) 1514 goto got_index; 1515 } 1516 1517 /* we've gone up to the root and found no index to the right */ 1518 return 0; 1519 1520 got_index: 1521 /* we've found index to the right, let's 1522 * follow it and find the closest allocated 1523 * block to the right */ 1524 ix++; 1525 block = ext4_idx_pblock(ix); 1526 while (++depth < path->p_depth) { 1527 /* subtract from p_depth to get proper eh_depth */ 1528 bh = read_extent_tree_block(inode, block, 1529 path->p_depth - depth, 0); 1530 if (IS_ERR(bh)) 1531 return PTR_ERR(bh); 1532 eh = ext_block_hdr(bh); 1533 ix = EXT_FIRST_INDEX(eh); 1534 block = ext4_idx_pblock(ix); 1535 put_bh(bh); 1536 } 1537 1538 bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0); 1539 if (IS_ERR(bh)) 1540 return PTR_ERR(bh); 1541 eh = ext_block_hdr(bh); 1542 ex = EXT_FIRST_EXTENT(eh); 1543 found_extent: 1544 *logical = le32_to_cpu(ex->ee_block); 1545 *phys = ext4_ext_pblock(ex); 1546 *ret_ex = ex; 1547 if (bh) 1548 put_bh(bh); 1549 return 0; 1550 } 1551 1552 /* 1553 * ext4_ext_next_allocated_block: 1554 * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. 1555 * NOTE: it considers block number from index entry as 1556 * allocated block. Thus, index entries have to be consistent 1557 * with leaves. 1558 */ 1559 static ext4_lblk_t 1560 ext4_ext_next_allocated_block(struct ext4_ext_path *path) 1561 { 1562 int depth; 1563 1564 BUG_ON(path == NULL); 1565 depth = path->p_depth; 1566 1567 if (depth == 0 && path->p_ext == NULL) 1568 return EXT_MAX_BLOCKS; 1569 1570 while (depth >= 0) { 1571 if (depth == path->p_depth) { 1572 /* leaf */ 1573 if (path[depth].p_ext && 1574 path[depth].p_ext != 1575 EXT_LAST_EXTENT(path[depth].p_hdr)) 1576 return le32_to_cpu(path[depth].p_ext[1].ee_block); 1577 } else { 1578 /* index */ 1579 if (path[depth].p_idx != 1580 EXT_LAST_INDEX(path[depth].p_hdr)) 1581 return le32_to_cpu(path[depth].p_idx[1].ei_block); 1582 } 1583 depth--; 1584 } 1585 1586 return EXT_MAX_BLOCKS; 1587 } 1588 1589 /* 1590 * ext4_ext_next_leaf_block: 1591 * returns first allocated block from next leaf or EXT_MAX_BLOCKS 1592 */ 1593 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) 1594 { 1595 int depth; 1596 1597 BUG_ON(path == NULL); 1598 depth = path->p_depth; 1599 1600 /* zero-tree has no leaf blocks at all */ 1601 if (depth == 0) 1602 return EXT_MAX_BLOCKS; 1603 1604 /* go to index block */ 1605 depth--; 1606 1607 while (depth >= 0) { 1608 if (path[depth].p_idx != 1609 EXT_LAST_INDEX(path[depth].p_hdr)) 1610 return (ext4_lblk_t) 1611 le32_to_cpu(path[depth].p_idx[1].ei_block); 1612 depth--; 1613 } 1614 1615 return EXT_MAX_BLOCKS; 1616 } 1617 1618 /* 1619 * ext4_ext_correct_indexes: 1620 * if leaf gets modified and modified extent is first in the leaf, 1621 * then we have to correct all indexes above. 1622 * TODO: do we need to correct tree in all cases? 1623 */ 1624 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, 1625 struct ext4_ext_path *path) 1626 { 1627 struct ext4_extent_header *eh; 1628 int depth = ext_depth(inode); 1629 struct ext4_extent *ex; 1630 __le32 border; 1631 int k, err = 0; 1632 1633 eh = path[depth].p_hdr; 1634 ex = path[depth].p_ext; 1635 1636 if (unlikely(ex == NULL || eh == NULL)) { 1637 EXT4_ERROR_INODE(inode, 1638 "ex %p == NULL or eh %p == NULL", ex, eh); 1639 return -EIO; 1640 } 1641 1642 if (depth == 0) { 1643 /* there is no tree at all */ 1644 return 0; 1645 } 1646 1647 if (ex != EXT_FIRST_EXTENT(eh)) { 1648 /* we correct tree if first leaf got modified only */ 1649 return 0; 1650 } 1651 1652 /* 1653 * TODO: we need correction if border is smaller than current one 1654 */ 1655 k = depth - 1; 1656 border = path[depth].p_ext->ee_block; 1657 err = ext4_ext_get_access(handle, inode, path + k); 1658 if (err) 1659 return err; 1660 path[k].p_idx->ei_block = border; 1661 err = ext4_ext_dirty(handle, inode, path + k); 1662 if (err) 1663 return err; 1664 1665 while (k--) { 1666 /* change all left-side indexes */ 1667 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) 1668 break; 1669 err = ext4_ext_get_access(handle, inode, path + k); 1670 if (err) 1671 break; 1672 path[k].p_idx->ei_block = border; 1673 err = ext4_ext_dirty(handle, inode, path + k); 1674 if (err) 1675 break; 1676 } 1677 1678 return err; 1679 } 1680 1681 int 1682 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, 1683 struct ext4_extent *ex2) 1684 { 1685 unsigned short ext1_ee_len, ext2_ee_len; 1686 1687 /* 1688 * Make sure that both extents are initialized. We don't merge 1689 * uninitialized extents so that we can be sure that end_io code has 1690 * the extent that was written properly split out and conversion to 1691 * initialized is trivial. 1692 */ 1693 if (ext4_ext_is_uninitialized(ex1) != ext4_ext_is_uninitialized(ex2)) 1694 return 0; 1695 1696 ext1_ee_len = ext4_ext_get_actual_len(ex1); 1697 ext2_ee_len = ext4_ext_get_actual_len(ex2); 1698 1699 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != 1700 le32_to_cpu(ex2->ee_block)) 1701 return 0; 1702 1703 /* 1704 * To allow future support for preallocated extents to be added 1705 * as an RO_COMPAT feature, refuse to merge to extents if 1706 * this can result in the top bit of ee_len being set. 1707 */ 1708 if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN) 1709 return 0; 1710 if (ext4_ext_is_uninitialized(ex1) && 1711 (ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) || 1712 atomic_read(&EXT4_I(inode)->i_unwritten) || 1713 (ext1_ee_len + ext2_ee_len > EXT_UNINIT_MAX_LEN))) 1714 return 0; 1715 #ifdef AGGRESSIVE_TEST 1716 if (ext1_ee_len >= 4) 1717 return 0; 1718 #endif 1719 1720 if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) 1721 return 1; 1722 return 0; 1723 } 1724 1725 /* 1726 * This function tries to merge the "ex" extent to the next extent in the tree. 1727 * It always tries to merge towards right. If you want to merge towards 1728 * left, pass "ex - 1" as argument instead of "ex". 1729 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns 1730 * 1 if they got merged. 1731 */ 1732 static int ext4_ext_try_to_merge_right(struct inode *inode, 1733 struct ext4_ext_path *path, 1734 struct ext4_extent *ex) 1735 { 1736 struct ext4_extent_header *eh; 1737 unsigned int depth, len; 1738 int merge_done = 0, uninit; 1739 1740 depth = ext_depth(inode); 1741 BUG_ON(path[depth].p_hdr == NULL); 1742 eh = path[depth].p_hdr; 1743 1744 while (ex < EXT_LAST_EXTENT(eh)) { 1745 if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) 1746 break; 1747 /* merge with next extent! */ 1748 uninit = ext4_ext_is_uninitialized(ex); 1749 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1750 + ext4_ext_get_actual_len(ex + 1)); 1751 if (uninit) 1752 ext4_ext_mark_uninitialized(ex); 1753 1754 if (ex + 1 < EXT_LAST_EXTENT(eh)) { 1755 len = (EXT_LAST_EXTENT(eh) - ex - 1) 1756 * sizeof(struct ext4_extent); 1757 memmove(ex + 1, ex + 2, len); 1758 } 1759 le16_add_cpu(&eh->eh_entries, -1); 1760 merge_done = 1; 1761 WARN_ON(eh->eh_entries == 0); 1762 if (!eh->eh_entries) 1763 EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); 1764 } 1765 1766 return merge_done; 1767 } 1768 1769 /* 1770 * This function does a very simple check to see if we can collapse 1771 * an extent tree with a single extent tree leaf block into the inode. 1772 */ 1773 static void ext4_ext_try_to_merge_up(handle_t *handle, 1774 struct inode *inode, 1775 struct ext4_ext_path *path) 1776 { 1777 size_t s; 1778 unsigned max_root = ext4_ext_space_root(inode, 0); 1779 ext4_fsblk_t blk; 1780 1781 if ((path[0].p_depth != 1) || 1782 (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) || 1783 (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root)) 1784 return; 1785 1786 /* 1787 * We need to modify the block allocation bitmap and the block 1788 * group descriptor to release the extent tree block. If we 1789 * can't get the journal credits, give up. 1790 */ 1791 if (ext4_journal_extend(handle, 2)) 1792 return; 1793 1794 /* 1795 * Copy the extent data up to the inode 1796 */ 1797 blk = ext4_idx_pblock(path[0].p_idx); 1798 s = le16_to_cpu(path[1].p_hdr->eh_entries) * 1799 sizeof(struct ext4_extent_idx); 1800 s += sizeof(struct ext4_extent_header); 1801 1802 memcpy(path[0].p_hdr, path[1].p_hdr, s); 1803 path[0].p_depth = 0; 1804 path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + 1805 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr)); 1806 path[0].p_hdr->eh_max = cpu_to_le16(max_root); 1807 1808 brelse(path[1].p_bh); 1809 ext4_free_blocks(handle, inode, NULL, blk, 1, 1810 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET | 1811 EXT4_FREE_BLOCKS_RESERVE); 1812 } 1813 1814 /* 1815 * This function tries to merge the @ex extent to neighbours in the tree. 1816 * return 1 if merge left else 0. 1817 */ 1818 static void ext4_ext_try_to_merge(handle_t *handle, 1819 struct inode *inode, 1820 struct ext4_ext_path *path, 1821 struct ext4_extent *ex) { 1822 struct ext4_extent_header *eh; 1823 unsigned int depth; 1824 int merge_done = 0; 1825 1826 depth = ext_depth(inode); 1827 BUG_ON(path[depth].p_hdr == NULL); 1828 eh = path[depth].p_hdr; 1829 1830 if (ex > EXT_FIRST_EXTENT(eh)) 1831 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); 1832 1833 if (!merge_done) 1834 (void) ext4_ext_try_to_merge_right(inode, path, ex); 1835 1836 ext4_ext_try_to_merge_up(handle, inode, path); 1837 } 1838 1839 /* 1840 * check if a portion of the "newext" extent overlaps with an 1841 * existing extent. 1842 * 1843 * If there is an overlap discovered, it updates the length of the newext 1844 * such that there will be no overlap, and then returns 1. 1845 * If there is no overlap found, it returns 0. 1846 */ 1847 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, 1848 struct inode *inode, 1849 struct ext4_extent *newext, 1850 struct ext4_ext_path *path) 1851 { 1852 ext4_lblk_t b1, b2; 1853 unsigned int depth, len1; 1854 unsigned int ret = 0; 1855 1856 b1 = le32_to_cpu(newext->ee_block); 1857 len1 = ext4_ext_get_actual_len(newext); 1858 depth = ext_depth(inode); 1859 if (!path[depth].p_ext) 1860 goto out; 1861 b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); 1862 1863 /* 1864 * get the next allocated block if the extent in the path 1865 * is before the requested block(s) 1866 */ 1867 if (b2 < b1) { 1868 b2 = ext4_ext_next_allocated_block(path); 1869 if (b2 == EXT_MAX_BLOCKS) 1870 goto out; 1871 b2 = EXT4_LBLK_CMASK(sbi, b2); 1872 } 1873 1874 /* check for wrap through zero on extent logical start block*/ 1875 if (b1 + len1 < b1) { 1876 len1 = EXT_MAX_BLOCKS - b1; 1877 newext->ee_len = cpu_to_le16(len1); 1878 ret = 1; 1879 } 1880 1881 /* check for overlap */ 1882 if (b1 + len1 > b2) { 1883 newext->ee_len = cpu_to_le16(b2 - b1); 1884 ret = 1; 1885 } 1886 out: 1887 return ret; 1888 } 1889 1890 /* 1891 * ext4_ext_insert_extent: 1892 * tries to merge requsted extent into the existing extent or 1893 * inserts requested extent as new one into the tree, 1894 * creating new leaf in the no-space case. 1895 */ 1896 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, 1897 struct ext4_ext_path *path, 1898 struct ext4_extent *newext, int gb_flags) 1899 { 1900 struct ext4_extent_header *eh; 1901 struct ext4_extent *ex, *fex; 1902 struct ext4_extent *nearex; /* nearest extent */ 1903 struct ext4_ext_path *npath = NULL; 1904 int depth, len, err; 1905 ext4_lblk_t next; 1906 int mb_flags = 0, uninit; 1907 1908 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { 1909 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); 1910 return -EIO; 1911 } 1912 depth = ext_depth(inode); 1913 ex = path[depth].p_ext; 1914 eh = path[depth].p_hdr; 1915 if (unlikely(path[depth].p_hdr == NULL)) { 1916 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 1917 return -EIO; 1918 } 1919 1920 /* try to insert block into found extent and return */ 1921 if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) { 1922 1923 /* 1924 * Try to see whether we should rather test the extent on 1925 * right from ex, or from the left of ex. This is because 1926 * ext4_ext_find_extent() can return either extent on the 1927 * left, or on the right from the searched position. This 1928 * will make merging more effective. 1929 */ 1930 if (ex < EXT_LAST_EXTENT(eh) && 1931 (le32_to_cpu(ex->ee_block) + 1932 ext4_ext_get_actual_len(ex) < 1933 le32_to_cpu(newext->ee_block))) { 1934 ex += 1; 1935 goto prepend; 1936 } else if ((ex > EXT_FIRST_EXTENT(eh)) && 1937 (le32_to_cpu(newext->ee_block) + 1938 ext4_ext_get_actual_len(newext) < 1939 le32_to_cpu(ex->ee_block))) 1940 ex -= 1; 1941 1942 /* Try to append newex to the ex */ 1943 if (ext4_can_extents_be_merged(inode, ex, newext)) { 1944 ext_debug("append [%d]%d block to %u:[%d]%d" 1945 "(from %llu)\n", 1946 ext4_ext_is_uninitialized(newext), 1947 ext4_ext_get_actual_len(newext), 1948 le32_to_cpu(ex->ee_block), 1949 ext4_ext_is_uninitialized(ex), 1950 ext4_ext_get_actual_len(ex), 1951 ext4_ext_pblock(ex)); 1952 err = ext4_ext_get_access(handle, inode, 1953 path + depth); 1954 if (err) 1955 return err; 1956 uninit = ext4_ext_is_uninitialized(ex); 1957 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1958 + ext4_ext_get_actual_len(newext)); 1959 if (uninit) 1960 ext4_ext_mark_uninitialized(ex); 1961 eh = path[depth].p_hdr; 1962 nearex = ex; 1963 goto merge; 1964 } 1965 1966 prepend: 1967 /* Try to prepend newex to the ex */ 1968 if (ext4_can_extents_be_merged(inode, newext, ex)) { 1969 ext_debug("prepend %u[%d]%d block to %u:[%d]%d" 1970 "(from %llu)\n", 1971 le32_to_cpu(newext->ee_block), 1972 ext4_ext_is_uninitialized(newext), 1973 ext4_ext_get_actual_len(newext), 1974 le32_to_cpu(ex->ee_block), 1975 ext4_ext_is_uninitialized(ex), 1976 ext4_ext_get_actual_len(ex), 1977 ext4_ext_pblock(ex)); 1978 err = ext4_ext_get_access(handle, inode, 1979 path + depth); 1980 if (err) 1981 return err; 1982 1983 uninit = ext4_ext_is_uninitialized(ex); 1984 ex->ee_block = newext->ee_block; 1985 ext4_ext_store_pblock(ex, ext4_ext_pblock(newext)); 1986 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1987 + ext4_ext_get_actual_len(newext)); 1988 if (uninit) 1989 ext4_ext_mark_uninitialized(ex); 1990 eh = path[depth].p_hdr; 1991 nearex = ex; 1992 goto merge; 1993 } 1994 } 1995 1996 depth = ext_depth(inode); 1997 eh = path[depth].p_hdr; 1998 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) 1999 goto has_space; 2000 2001 /* probably next leaf has space for us? */ 2002 fex = EXT_LAST_EXTENT(eh); 2003 next = EXT_MAX_BLOCKS; 2004 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) 2005 next = ext4_ext_next_leaf_block(path); 2006 if (next != EXT_MAX_BLOCKS) { 2007 ext_debug("next leaf block - %u\n", next); 2008 BUG_ON(npath != NULL); 2009 npath = ext4_ext_find_extent(inode, next, NULL, 0); 2010 if (IS_ERR(npath)) 2011 return PTR_ERR(npath); 2012 BUG_ON(npath->p_depth != path->p_depth); 2013 eh = npath[depth].p_hdr; 2014 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { 2015 ext_debug("next leaf isn't full(%d)\n", 2016 le16_to_cpu(eh->eh_entries)); 2017 path = npath; 2018 goto has_space; 2019 } 2020 ext_debug("next leaf has no free space(%d,%d)\n", 2021 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 2022 } 2023 2024 /* 2025 * There is no free space in the found leaf. 2026 * We're gonna add a new leaf in the tree. 2027 */ 2028 if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) 2029 mb_flags = EXT4_MB_USE_RESERVED; 2030 err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags, 2031 path, newext); 2032 if (err) 2033 goto cleanup; 2034 depth = ext_depth(inode); 2035 eh = path[depth].p_hdr; 2036 2037 has_space: 2038 nearex = path[depth].p_ext; 2039 2040 err = ext4_ext_get_access(handle, inode, path + depth); 2041 if (err) 2042 goto cleanup; 2043 2044 if (!nearex) { 2045 /* there is no extent in this leaf, create first one */ 2046 ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n", 2047 le32_to_cpu(newext->ee_block), 2048 ext4_ext_pblock(newext), 2049 ext4_ext_is_uninitialized(newext), 2050 ext4_ext_get_actual_len(newext)); 2051 nearex = EXT_FIRST_EXTENT(eh); 2052 } else { 2053 if (le32_to_cpu(newext->ee_block) 2054 > le32_to_cpu(nearex->ee_block)) { 2055 /* Insert after */ 2056 ext_debug("insert %u:%llu:[%d]%d before: " 2057 "nearest %p\n", 2058 le32_to_cpu(newext->ee_block), 2059 ext4_ext_pblock(newext), 2060 ext4_ext_is_uninitialized(newext), 2061 ext4_ext_get_actual_len(newext), 2062 nearex); 2063 nearex++; 2064 } else { 2065 /* Insert before */ 2066 BUG_ON(newext->ee_block == nearex->ee_block); 2067 ext_debug("insert %u:%llu:[%d]%d after: " 2068 "nearest %p\n", 2069 le32_to_cpu(newext->ee_block), 2070 ext4_ext_pblock(newext), 2071 ext4_ext_is_uninitialized(newext), 2072 ext4_ext_get_actual_len(newext), 2073 nearex); 2074 } 2075 len = EXT_LAST_EXTENT(eh) - nearex + 1; 2076 if (len > 0) { 2077 ext_debug("insert %u:%llu:[%d]%d: " 2078 "move %d extents from 0x%p to 0x%p\n", 2079 le32_to_cpu(newext->ee_block), 2080 ext4_ext_pblock(newext), 2081 ext4_ext_is_uninitialized(newext), 2082 ext4_ext_get_actual_len(newext), 2083 len, nearex, nearex + 1); 2084 memmove(nearex + 1, nearex, 2085 len * sizeof(struct ext4_extent)); 2086 } 2087 } 2088 2089 le16_add_cpu(&eh->eh_entries, 1); 2090 path[depth].p_ext = nearex; 2091 nearex->ee_block = newext->ee_block; 2092 ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); 2093 nearex->ee_len = newext->ee_len; 2094 2095 merge: 2096 /* try to merge extents */ 2097 if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) 2098 ext4_ext_try_to_merge(handle, inode, path, nearex); 2099 2100 2101 /* time to correct all indexes above */ 2102 err = ext4_ext_correct_indexes(handle, inode, path); 2103 if (err) 2104 goto cleanup; 2105 2106 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 2107 2108 cleanup: 2109 if (npath) { 2110 ext4_ext_drop_refs(npath); 2111 kfree(npath); 2112 } 2113 return err; 2114 } 2115 2116 static int ext4_fill_fiemap_extents(struct inode *inode, 2117 ext4_lblk_t block, ext4_lblk_t num, 2118 struct fiemap_extent_info *fieinfo) 2119 { 2120 struct ext4_ext_path *path = NULL; 2121 struct ext4_extent *ex; 2122 struct extent_status es; 2123 ext4_lblk_t next, next_del, start = 0, end = 0; 2124 ext4_lblk_t last = block + num; 2125 int exists, depth = 0, err = 0; 2126 unsigned int flags = 0; 2127 unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; 2128 2129 while (block < last && block != EXT_MAX_BLOCKS) { 2130 num = last - block; 2131 /* find extent for this block */ 2132 down_read(&EXT4_I(inode)->i_data_sem); 2133 2134 if (path && ext_depth(inode) != depth) { 2135 /* depth was changed. we have to realloc path */ 2136 kfree(path); 2137 path = NULL; 2138 } 2139 2140 path = ext4_ext_find_extent(inode, block, path, 0); 2141 if (IS_ERR(path)) { 2142 up_read(&EXT4_I(inode)->i_data_sem); 2143 err = PTR_ERR(path); 2144 path = NULL; 2145 break; 2146 } 2147 2148 depth = ext_depth(inode); 2149 if (unlikely(path[depth].p_hdr == NULL)) { 2150 up_read(&EXT4_I(inode)->i_data_sem); 2151 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 2152 err = -EIO; 2153 break; 2154 } 2155 ex = path[depth].p_ext; 2156 next = ext4_ext_next_allocated_block(path); 2157 ext4_ext_drop_refs(path); 2158 2159 flags = 0; 2160 exists = 0; 2161 if (!ex) { 2162 /* there is no extent yet, so try to allocate 2163 * all requested space */ 2164 start = block; 2165 end = block + num; 2166 } else if (le32_to_cpu(ex->ee_block) > block) { 2167 /* need to allocate space before found extent */ 2168 start = block; 2169 end = le32_to_cpu(ex->ee_block); 2170 if (block + num < end) 2171 end = block + num; 2172 } else if (block >= le32_to_cpu(ex->ee_block) 2173 + ext4_ext_get_actual_len(ex)) { 2174 /* need to allocate space after found extent */ 2175 start = block; 2176 end = block + num; 2177 if (end >= next) 2178 end = next; 2179 } else if (block >= le32_to_cpu(ex->ee_block)) { 2180 /* 2181 * some part of requested space is covered 2182 * by found extent 2183 */ 2184 start = block; 2185 end = le32_to_cpu(ex->ee_block) 2186 + ext4_ext_get_actual_len(ex); 2187 if (block + num < end) 2188 end = block + num; 2189 exists = 1; 2190 } else { 2191 BUG(); 2192 } 2193 BUG_ON(end <= start); 2194 2195 if (!exists) { 2196 es.es_lblk = start; 2197 es.es_len = end - start; 2198 es.es_pblk = 0; 2199 } else { 2200 es.es_lblk = le32_to_cpu(ex->ee_block); 2201 es.es_len = ext4_ext_get_actual_len(ex); 2202 es.es_pblk = ext4_ext_pblock(ex); 2203 if (ext4_ext_is_uninitialized(ex)) 2204 flags |= FIEMAP_EXTENT_UNWRITTEN; 2205 } 2206 2207 /* 2208 * Find delayed extent and update es accordingly. We call 2209 * it even in !exists case to find out whether es is the 2210 * last existing extent or not. 2211 */ 2212 next_del = ext4_find_delayed_extent(inode, &es); 2213 if (!exists && next_del) { 2214 exists = 1; 2215 flags |= (FIEMAP_EXTENT_DELALLOC | 2216 FIEMAP_EXTENT_UNKNOWN); 2217 } 2218 up_read(&EXT4_I(inode)->i_data_sem); 2219 2220 if (unlikely(es.es_len == 0)) { 2221 EXT4_ERROR_INODE(inode, "es.es_len == 0"); 2222 err = -EIO; 2223 break; 2224 } 2225 2226 /* 2227 * This is possible iff next == next_del == EXT_MAX_BLOCKS. 2228 * we need to check next == EXT_MAX_BLOCKS because it is 2229 * possible that an extent is with unwritten and delayed 2230 * status due to when an extent is delayed allocated and 2231 * is allocated by fallocate status tree will track both of 2232 * them in a extent. 2233 * 2234 * So we could return a unwritten and delayed extent, and 2235 * its block is equal to 'next'. 2236 */ 2237 if (next == next_del && next == EXT_MAX_BLOCKS) { 2238 flags |= FIEMAP_EXTENT_LAST; 2239 if (unlikely(next_del != EXT_MAX_BLOCKS || 2240 next != EXT_MAX_BLOCKS)) { 2241 EXT4_ERROR_INODE(inode, 2242 "next extent == %u, next " 2243 "delalloc extent = %u", 2244 next, next_del); 2245 err = -EIO; 2246 break; 2247 } 2248 } 2249 2250 if (exists) { 2251 err = fiemap_fill_next_extent(fieinfo, 2252 (__u64)es.es_lblk << blksize_bits, 2253 (__u64)es.es_pblk << blksize_bits, 2254 (__u64)es.es_len << blksize_bits, 2255 flags); 2256 if (err < 0) 2257 break; 2258 if (err == 1) { 2259 err = 0; 2260 break; 2261 } 2262 } 2263 2264 block = es.es_lblk + es.es_len; 2265 } 2266 2267 if (path) { 2268 ext4_ext_drop_refs(path); 2269 kfree(path); 2270 } 2271 2272 return err; 2273 } 2274 2275 /* 2276 * ext4_ext_put_gap_in_cache: 2277 * calculate boundaries of the gap that the requested block fits into 2278 * and cache this gap 2279 */ 2280 static void 2281 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, 2282 ext4_lblk_t block) 2283 { 2284 int depth = ext_depth(inode); 2285 unsigned long len = 0; 2286 ext4_lblk_t lblock = 0; 2287 struct ext4_extent *ex; 2288 2289 ex = path[depth].p_ext; 2290 if (ex == NULL) { 2291 /* 2292 * there is no extent yet, so gap is [0;-] and we 2293 * don't cache it 2294 */ 2295 ext_debug("cache gap(whole file):"); 2296 } else if (block < le32_to_cpu(ex->ee_block)) { 2297 lblock = block; 2298 len = le32_to_cpu(ex->ee_block) - block; 2299 ext_debug("cache gap(before): %u [%u:%u]", 2300 block, 2301 le32_to_cpu(ex->ee_block), 2302 ext4_ext_get_actual_len(ex)); 2303 if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1)) 2304 ext4_es_insert_extent(inode, lblock, len, ~0, 2305 EXTENT_STATUS_HOLE); 2306 } else if (block >= le32_to_cpu(ex->ee_block) 2307 + ext4_ext_get_actual_len(ex)) { 2308 ext4_lblk_t next; 2309 lblock = le32_to_cpu(ex->ee_block) 2310 + ext4_ext_get_actual_len(ex); 2311 2312 next = ext4_ext_next_allocated_block(path); 2313 ext_debug("cache gap(after): [%u:%u] %u", 2314 le32_to_cpu(ex->ee_block), 2315 ext4_ext_get_actual_len(ex), 2316 block); 2317 BUG_ON(next == lblock); 2318 len = next - lblock; 2319 if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1)) 2320 ext4_es_insert_extent(inode, lblock, len, ~0, 2321 EXTENT_STATUS_HOLE); 2322 } else { 2323 BUG(); 2324 } 2325 2326 ext_debug(" -> %u:%lu\n", lblock, len); 2327 } 2328 2329 /* 2330 * ext4_ext_rm_idx: 2331 * removes index from the index block. 2332 */ 2333 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, 2334 struct ext4_ext_path *path, int depth) 2335 { 2336 int err; 2337 ext4_fsblk_t leaf; 2338 2339 /* free index block */ 2340 depth--; 2341 path = path + depth; 2342 leaf = ext4_idx_pblock(path->p_idx); 2343 if (unlikely(path->p_hdr->eh_entries == 0)) { 2344 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); 2345 return -EIO; 2346 } 2347 err = ext4_ext_get_access(handle, inode, path); 2348 if (err) 2349 return err; 2350 2351 if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { 2352 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; 2353 len *= sizeof(struct ext4_extent_idx); 2354 memmove(path->p_idx, path->p_idx + 1, len); 2355 } 2356 2357 le16_add_cpu(&path->p_hdr->eh_entries, -1); 2358 err = ext4_ext_dirty(handle, inode, path); 2359 if (err) 2360 return err; 2361 ext_debug("index is empty, remove it, free block %llu\n", leaf); 2362 trace_ext4_ext_rm_idx(inode, leaf); 2363 2364 ext4_free_blocks(handle, inode, NULL, leaf, 1, 2365 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 2366 2367 while (--depth >= 0) { 2368 if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr)) 2369 break; 2370 path--; 2371 err = ext4_ext_get_access(handle, inode, path); 2372 if (err) 2373 break; 2374 path->p_idx->ei_block = (path+1)->p_idx->ei_block; 2375 err = ext4_ext_dirty(handle, inode, path); 2376 if (err) 2377 break; 2378 } 2379 return err; 2380 } 2381 2382 /* 2383 * ext4_ext_calc_credits_for_single_extent: 2384 * This routine returns max. credits that needed to insert an extent 2385 * to the extent tree. 2386 * When pass the actual path, the caller should calculate credits 2387 * under i_data_sem. 2388 */ 2389 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, 2390 struct ext4_ext_path *path) 2391 { 2392 if (path) { 2393 int depth = ext_depth(inode); 2394 int ret = 0; 2395 2396 /* probably there is space in leaf? */ 2397 if (le16_to_cpu(path[depth].p_hdr->eh_entries) 2398 < le16_to_cpu(path[depth].p_hdr->eh_max)) { 2399 2400 /* 2401 * There are some space in the leaf tree, no 2402 * need to account for leaf block credit 2403 * 2404 * bitmaps and block group descriptor blocks 2405 * and other metadata blocks still need to be 2406 * accounted. 2407 */ 2408 /* 1 bitmap, 1 block group descriptor */ 2409 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); 2410 return ret; 2411 } 2412 } 2413 2414 return ext4_chunk_trans_blocks(inode, nrblocks); 2415 } 2416 2417 /* 2418 * How many index/leaf blocks need to change/allocate to add @extents extents? 2419 * 2420 * If we add a single extent, then in the worse case, each tree level 2421 * index/leaf need to be changed in case of the tree split. 2422 * 2423 * If more extents are inserted, they could cause the whole tree split more 2424 * than once, but this is really rare. 2425 */ 2426 int ext4_ext_index_trans_blocks(struct inode *inode, int extents) 2427 { 2428 int index; 2429 int depth; 2430 2431 /* If we are converting the inline data, only one is needed here. */ 2432 if (ext4_has_inline_data(inode)) 2433 return 1; 2434 2435 depth = ext_depth(inode); 2436 2437 if (extents <= 1) 2438 index = depth * 2; 2439 else 2440 index = depth * 3; 2441 2442 return index; 2443 } 2444 2445 static inline int get_default_free_blocks_flags(struct inode *inode) 2446 { 2447 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 2448 return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; 2449 else if (ext4_should_journal_data(inode)) 2450 return EXT4_FREE_BLOCKS_FORGET; 2451 return 0; 2452 } 2453 2454 static int ext4_remove_blocks(handle_t *handle, struct inode *inode, 2455 struct ext4_extent *ex, 2456 long long *partial_cluster, 2457 ext4_lblk_t from, ext4_lblk_t to) 2458 { 2459 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2460 unsigned short ee_len = ext4_ext_get_actual_len(ex); 2461 ext4_fsblk_t pblk; 2462 int flags = get_default_free_blocks_flags(inode); 2463 2464 /* 2465 * For bigalloc file systems, we never free a partial cluster 2466 * at the beginning of the extent. Instead, we make a note 2467 * that we tried freeing the cluster, and check to see if we 2468 * need to free it on a subsequent call to ext4_remove_blocks, 2469 * or at the end of the ext4_truncate() operation. 2470 */ 2471 flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; 2472 2473 trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster); 2474 /* 2475 * If we have a partial cluster, and it's different from the 2476 * cluster of the last block, we need to explicitly free the 2477 * partial cluster here. 2478 */ 2479 pblk = ext4_ext_pblock(ex) + ee_len - 1; 2480 if ((*partial_cluster > 0) && 2481 (EXT4_B2C(sbi, pblk) != *partial_cluster)) { 2482 ext4_free_blocks(handle, inode, NULL, 2483 EXT4_C2B(sbi, *partial_cluster), 2484 sbi->s_cluster_ratio, flags); 2485 *partial_cluster = 0; 2486 } 2487 2488 #ifdef EXTENTS_STATS 2489 { 2490 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2491 spin_lock(&sbi->s_ext_stats_lock); 2492 sbi->s_ext_blocks += ee_len; 2493 sbi->s_ext_extents++; 2494 if (ee_len < sbi->s_ext_min) 2495 sbi->s_ext_min = ee_len; 2496 if (ee_len > sbi->s_ext_max) 2497 sbi->s_ext_max = ee_len; 2498 if (ext_depth(inode) > sbi->s_depth_max) 2499 sbi->s_depth_max = ext_depth(inode); 2500 spin_unlock(&sbi->s_ext_stats_lock); 2501 } 2502 #endif 2503 if (from >= le32_to_cpu(ex->ee_block) 2504 && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { 2505 /* tail removal */ 2506 ext4_lblk_t num; 2507 unsigned int unaligned; 2508 2509 num = le32_to_cpu(ex->ee_block) + ee_len - from; 2510 pblk = ext4_ext_pblock(ex) + ee_len - num; 2511 /* 2512 * Usually we want to free partial cluster at the end of the 2513 * extent, except for the situation when the cluster is still 2514 * used by any other extent (partial_cluster is negative). 2515 */ 2516 if (*partial_cluster < 0 && 2517 -(*partial_cluster) == EXT4_B2C(sbi, pblk + num - 1)) 2518 flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER; 2519 2520 ext_debug("free last %u blocks starting %llu partial %lld\n", 2521 num, pblk, *partial_cluster); 2522 ext4_free_blocks(handle, inode, NULL, pblk, num, flags); 2523 /* 2524 * If the block range to be freed didn't start at the 2525 * beginning of a cluster, and we removed the entire 2526 * extent and the cluster is not used by any other extent, 2527 * save the partial cluster here, since we might need to 2528 * delete if we determine that the truncate operation has 2529 * removed all of the blocks in the cluster. 2530 * 2531 * On the other hand, if we did not manage to free the whole 2532 * extent, we have to mark the cluster as used (store negative 2533 * cluster number in partial_cluster). 2534 */ 2535 unaligned = EXT4_PBLK_COFF(sbi, pblk); 2536 if (unaligned && (ee_len == num) && 2537 (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk)))) 2538 *partial_cluster = EXT4_B2C(sbi, pblk); 2539 else if (unaligned) 2540 *partial_cluster = -((long long)EXT4_B2C(sbi, pblk)); 2541 else if (*partial_cluster > 0) 2542 *partial_cluster = 0; 2543 } else 2544 ext4_error(sbi->s_sb, "strange request: removal(2) " 2545 "%u-%u from %u:%u\n", 2546 from, to, le32_to_cpu(ex->ee_block), ee_len); 2547 return 0; 2548 } 2549 2550 2551 /* 2552 * ext4_ext_rm_leaf() Removes the extents associated with the 2553 * blocks appearing between "start" and "end", and splits the extents 2554 * if "start" and "end" appear in the same extent 2555 * 2556 * @handle: The journal handle 2557 * @inode: The files inode 2558 * @path: The path to the leaf 2559 * @partial_cluster: The cluster which we'll have to free if all extents 2560 * has been released from it. It gets negative in case 2561 * that the cluster is still used. 2562 * @start: The first block to remove 2563 * @end: The last block to remove 2564 */ 2565 static int 2566 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, 2567 struct ext4_ext_path *path, 2568 long long *partial_cluster, 2569 ext4_lblk_t start, ext4_lblk_t end) 2570 { 2571 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2572 int err = 0, correct_index = 0; 2573 int depth = ext_depth(inode), credits; 2574 struct ext4_extent_header *eh; 2575 ext4_lblk_t a, b; 2576 unsigned num; 2577 ext4_lblk_t ex_ee_block; 2578 unsigned short ex_ee_len; 2579 unsigned uninitialized = 0; 2580 struct ext4_extent *ex; 2581 ext4_fsblk_t pblk; 2582 2583 /* the header must be checked already in ext4_ext_remove_space() */ 2584 ext_debug("truncate since %u in leaf to %u\n", start, end); 2585 if (!path[depth].p_hdr) 2586 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); 2587 eh = path[depth].p_hdr; 2588 if (unlikely(path[depth].p_hdr == NULL)) { 2589 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 2590 return -EIO; 2591 } 2592 /* find where to start removing */ 2593 ex = path[depth].p_ext; 2594 if (!ex) 2595 ex = EXT_LAST_EXTENT(eh); 2596 2597 ex_ee_block = le32_to_cpu(ex->ee_block); 2598 ex_ee_len = ext4_ext_get_actual_len(ex); 2599 2600 /* 2601 * If we're starting with an extent other than the last one in the 2602 * node, we need to see if it shares a cluster with the extent to 2603 * the right (towards the end of the file). If its leftmost cluster 2604 * is this extent's rightmost cluster and it is not cluster aligned, 2605 * we'll mark it as a partial that is not to be deallocated. 2606 */ 2607 2608 if (ex != EXT_LAST_EXTENT(eh)) { 2609 ext4_fsblk_t current_pblk, right_pblk; 2610 long long current_cluster, right_cluster; 2611 2612 current_pblk = ext4_ext_pblock(ex) + ex_ee_len - 1; 2613 current_cluster = (long long)EXT4_B2C(sbi, current_pblk); 2614 right_pblk = ext4_ext_pblock(ex + 1); 2615 right_cluster = (long long)EXT4_B2C(sbi, right_pblk); 2616 if (current_cluster == right_cluster && 2617 EXT4_PBLK_COFF(sbi, right_pblk)) 2618 *partial_cluster = -right_cluster; 2619 } 2620 2621 trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster); 2622 2623 while (ex >= EXT_FIRST_EXTENT(eh) && 2624 ex_ee_block + ex_ee_len > start) { 2625 2626 if (ext4_ext_is_uninitialized(ex)) 2627 uninitialized = 1; 2628 else 2629 uninitialized = 0; 2630 2631 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block, 2632 uninitialized, ex_ee_len); 2633 path[depth].p_ext = ex; 2634 2635 a = ex_ee_block > start ? ex_ee_block : start; 2636 b = ex_ee_block+ex_ee_len - 1 < end ? 2637 ex_ee_block+ex_ee_len - 1 : end; 2638 2639 ext_debug(" border %u:%u\n", a, b); 2640 2641 /* If this extent is beyond the end of the hole, skip it */ 2642 if (end < ex_ee_block) { 2643 /* 2644 * We're going to skip this extent and move to another, 2645 * so if this extent is not cluster aligned we have 2646 * to mark the current cluster as used to avoid 2647 * accidentally freeing it later on 2648 */ 2649 pblk = ext4_ext_pblock(ex); 2650 if (EXT4_PBLK_COFF(sbi, pblk)) 2651 *partial_cluster = 2652 -((long long)EXT4_B2C(sbi, pblk)); 2653 ex--; 2654 ex_ee_block = le32_to_cpu(ex->ee_block); 2655 ex_ee_len = ext4_ext_get_actual_len(ex); 2656 continue; 2657 } else if (b != ex_ee_block + ex_ee_len - 1) { 2658 EXT4_ERROR_INODE(inode, 2659 "can not handle truncate %u:%u " 2660 "on extent %u:%u", 2661 start, end, ex_ee_block, 2662 ex_ee_block + ex_ee_len - 1); 2663 err = -EIO; 2664 goto out; 2665 } else if (a != ex_ee_block) { 2666 /* remove tail of the extent */ 2667 num = a - ex_ee_block; 2668 } else { 2669 /* remove whole extent: excellent! */ 2670 num = 0; 2671 } 2672 /* 2673 * 3 for leaf, sb, and inode plus 2 (bmap and group 2674 * descriptor) for each block group; assume two block 2675 * groups plus ex_ee_len/blocks_per_block_group for 2676 * the worst case 2677 */ 2678 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); 2679 if (ex == EXT_FIRST_EXTENT(eh)) { 2680 correct_index = 1; 2681 credits += (ext_depth(inode)) + 1; 2682 } 2683 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); 2684 2685 err = ext4_ext_truncate_extend_restart(handle, inode, credits); 2686 if (err) 2687 goto out; 2688 2689 err = ext4_ext_get_access(handle, inode, path + depth); 2690 if (err) 2691 goto out; 2692 2693 err = ext4_remove_blocks(handle, inode, ex, partial_cluster, 2694 a, b); 2695 if (err) 2696 goto out; 2697 2698 if (num == 0) 2699 /* this extent is removed; mark slot entirely unused */ 2700 ext4_ext_store_pblock(ex, 0); 2701 2702 ex->ee_len = cpu_to_le16(num); 2703 /* 2704 * Do not mark uninitialized if all the blocks in the 2705 * extent have been removed. 2706 */ 2707 if (uninitialized && num) 2708 ext4_ext_mark_uninitialized(ex); 2709 /* 2710 * If the extent was completely released, 2711 * we need to remove it from the leaf 2712 */ 2713 if (num == 0) { 2714 if (end != EXT_MAX_BLOCKS - 1) { 2715 /* 2716 * For hole punching, we need to scoot all the 2717 * extents up when an extent is removed so that 2718 * we dont have blank extents in the middle 2719 */ 2720 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * 2721 sizeof(struct ext4_extent)); 2722 2723 /* Now get rid of the one at the end */ 2724 memset(EXT_LAST_EXTENT(eh), 0, 2725 sizeof(struct ext4_extent)); 2726 } 2727 le16_add_cpu(&eh->eh_entries, -1); 2728 } else if (*partial_cluster > 0) 2729 *partial_cluster = 0; 2730 2731 err = ext4_ext_dirty(handle, inode, path + depth); 2732 if (err) 2733 goto out; 2734 2735 ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num, 2736 ext4_ext_pblock(ex)); 2737 ex--; 2738 ex_ee_block = le32_to_cpu(ex->ee_block); 2739 ex_ee_len = ext4_ext_get_actual_len(ex); 2740 } 2741 2742 if (correct_index && eh->eh_entries) 2743 err = ext4_ext_correct_indexes(handle, inode, path); 2744 2745 /* 2746 * If there's a partial cluster and at least one extent remains in 2747 * the leaf, free the partial cluster if it isn't shared with the 2748 * current extent. If there's a partial cluster and no extents 2749 * remain in the leaf, it can't be freed here. It can only be 2750 * freed when it's possible to determine if it's not shared with 2751 * any other extent - when the next leaf is processed or when space 2752 * removal is complete. 2753 */ 2754 if (*partial_cluster > 0 && eh->eh_entries && 2755 (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) != 2756 *partial_cluster)) { 2757 int flags = get_default_free_blocks_flags(inode); 2758 2759 ext4_free_blocks(handle, inode, NULL, 2760 EXT4_C2B(sbi, *partial_cluster), 2761 sbi->s_cluster_ratio, flags); 2762 *partial_cluster = 0; 2763 } 2764 2765 /* if this leaf is free, then we should 2766 * remove it from index block above */ 2767 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) 2768 err = ext4_ext_rm_idx(handle, inode, path, depth); 2769 2770 out: 2771 return err; 2772 } 2773 2774 /* 2775 * ext4_ext_more_to_rm: 2776 * returns 1 if current index has to be freed (even partial) 2777 */ 2778 static int 2779 ext4_ext_more_to_rm(struct ext4_ext_path *path) 2780 { 2781 BUG_ON(path->p_idx == NULL); 2782 2783 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) 2784 return 0; 2785 2786 /* 2787 * if truncate on deeper level happened, it wasn't partial, 2788 * so we have to consider current index for truncation 2789 */ 2790 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) 2791 return 0; 2792 return 1; 2793 } 2794 2795 int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, 2796 ext4_lblk_t end) 2797 { 2798 struct super_block *sb = inode->i_sb; 2799 int depth = ext_depth(inode); 2800 struct ext4_ext_path *path = NULL; 2801 long long partial_cluster = 0; 2802 handle_t *handle; 2803 int i = 0, err = 0; 2804 2805 ext_debug("truncate since %u to %u\n", start, end); 2806 2807 /* probably first extent we're gonna free will be last in block */ 2808 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1); 2809 if (IS_ERR(handle)) 2810 return PTR_ERR(handle); 2811 2812 again: 2813 trace_ext4_ext_remove_space(inode, start, end, depth); 2814 2815 /* 2816 * Check if we are removing extents inside the extent tree. If that 2817 * is the case, we are going to punch a hole inside the extent tree 2818 * so we have to check whether we need to split the extent covering 2819 * the last block to remove so we can easily remove the part of it 2820 * in ext4_ext_rm_leaf(). 2821 */ 2822 if (end < EXT_MAX_BLOCKS - 1) { 2823 struct ext4_extent *ex; 2824 ext4_lblk_t ee_block; 2825 2826 /* find extent for this block */ 2827 path = ext4_ext_find_extent(inode, end, NULL, EXT4_EX_NOCACHE); 2828 if (IS_ERR(path)) { 2829 ext4_journal_stop(handle); 2830 return PTR_ERR(path); 2831 } 2832 depth = ext_depth(inode); 2833 /* Leaf not may not exist only if inode has no blocks at all */ 2834 ex = path[depth].p_ext; 2835 if (!ex) { 2836 if (depth) { 2837 EXT4_ERROR_INODE(inode, 2838 "path[%d].p_hdr == NULL", 2839 depth); 2840 err = -EIO; 2841 } 2842 goto out; 2843 } 2844 2845 ee_block = le32_to_cpu(ex->ee_block); 2846 2847 /* 2848 * See if the last block is inside the extent, if so split 2849 * the extent at 'end' block so we can easily remove the 2850 * tail of the first part of the split extent in 2851 * ext4_ext_rm_leaf(). 2852 */ 2853 if (end >= ee_block && 2854 end < ee_block + ext4_ext_get_actual_len(ex) - 1) { 2855 int split_flag = 0; 2856 2857 if (ext4_ext_is_uninitialized(ex)) 2858 split_flag = EXT4_EXT_MARK_UNINIT1 | 2859 EXT4_EXT_MARK_UNINIT2; 2860 2861 /* 2862 * Split the extent in two so that 'end' is the last 2863 * block in the first new extent. Also we should not 2864 * fail removing space due to ENOSPC so try to use 2865 * reserved block if that happens. 2866 */ 2867 err = ext4_split_extent_at(handle, inode, path, 2868 end + 1, split_flag, 2869 EXT4_EX_NOCACHE | 2870 EXT4_GET_BLOCKS_PRE_IO | 2871 EXT4_GET_BLOCKS_METADATA_NOFAIL); 2872 2873 if (err < 0) 2874 goto out; 2875 } 2876 } 2877 /* 2878 * We start scanning from right side, freeing all the blocks 2879 * after i_size and walking into the tree depth-wise. 2880 */ 2881 depth = ext_depth(inode); 2882 if (path) { 2883 int k = i = depth; 2884 while (--k > 0) 2885 path[k].p_block = 2886 le16_to_cpu(path[k].p_hdr->eh_entries)+1; 2887 } else { 2888 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), 2889 GFP_NOFS); 2890 if (path == NULL) { 2891 ext4_journal_stop(handle); 2892 return -ENOMEM; 2893 } 2894 path[0].p_depth = depth; 2895 path[0].p_hdr = ext_inode_hdr(inode); 2896 i = 0; 2897 2898 if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) { 2899 err = -EIO; 2900 goto out; 2901 } 2902 } 2903 err = 0; 2904 2905 while (i >= 0 && err == 0) { 2906 if (i == depth) { 2907 /* this is leaf block */ 2908 err = ext4_ext_rm_leaf(handle, inode, path, 2909 &partial_cluster, start, 2910 end); 2911 /* root level has p_bh == NULL, brelse() eats this */ 2912 brelse(path[i].p_bh); 2913 path[i].p_bh = NULL; 2914 i--; 2915 continue; 2916 } 2917 2918 /* this is index block */ 2919 if (!path[i].p_hdr) { 2920 ext_debug("initialize header\n"); 2921 path[i].p_hdr = ext_block_hdr(path[i].p_bh); 2922 } 2923 2924 if (!path[i].p_idx) { 2925 /* this level hasn't been touched yet */ 2926 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); 2927 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; 2928 ext_debug("init index ptr: hdr 0x%p, num %d\n", 2929 path[i].p_hdr, 2930 le16_to_cpu(path[i].p_hdr->eh_entries)); 2931 } else { 2932 /* we were already here, see at next index */ 2933 path[i].p_idx--; 2934 } 2935 2936 ext_debug("level %d - index, first 0x%p, cur 0x%p\n", 2937 i, EXT_FIRST_INDEX(path[i].p_hdr), 2938 path[i].p_idx); 2939 if (ext4_ext_more_to_rm(path + i)) { 2940 struct buffer_head *bh; 2941 /* go to the next level */ 2942 ext_debug("move to level %d (block %llu)\n", 2943 i + 1, ext4_idx_pblock(path[i].p_idx)); 2944 memset(path + i + 1, 0, sizeof(*path)); 2945 bh = read_extent_tree_block(inode, 2946 ext4_idx_pblock(path[i].p_idx), depth - i - 1, 2947 EXT4_EX_NOCACHE); 2948 if (IS_ERR(bh)) { 2949 /* should we reset i_size? */ 2950 err = PTR_ERR(bh); 2951 break; 2952 } 2953 /* Yield here to deal with large extent trees. 2954 * Should be a no-op if we did IO above. */ 2955 cond_resched(); 2956 if (WARN_ON(i + 1 > depth)) { 2957 err = -EIO; 2958 break; 2959 } 2960 path[i + 1].p_bh = bh; 2961 2962 /* save actual number of indexes since this 2963 * number is changed at the next iteration */ 2964 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); 2965 i++; 2966 } else { 2967 /* we finished processing this index, go up */ 2968 if (path[i].p_hdr->eh_entries == 0 && i > 0) { 2969 /* index is empty, remove it; 2970 * handle must be already prepared by the 2971 * truncatei_leaf() */ 2972 err = ext4_ext_rm_idx(handle, inode, path, i); 2973 } 2974 /* root level has p_bh == NULL, brelse() eats this */ 2975 brelse(path[i].p_bh); 2976 path[i].p_bh = NULL; 2977 i--; 2978 ext_debug("return to level %d\n", i); 2979 } 2980 } 2981 2982 trace_ext4_ext_remove_space_done(inode, start, end, depth, 2983 partial_cluster, path->p_hdr->eh_entries); 2984 2985 /* If we still have something in the partial cluster and we have removed 2986 * even the first extent, then we should free the blocks in the partial 2987 * cluster as well. */ 2988 if (partial_cluster > 0 && path->p_hdr->eh_entries == 0) { 2989 int flags = get_default_free_blocks_flags(inode); 2990 2991 ext4_free_blocks(handle, inode, NULL, 2992 EXT4_C2B(EXT4_SB(sb), partial_cluster), 2993 EXT4_SB(sb)->s_cluster_ratio, flags); 2994 partial_cluster = 0; 2995 } 2996 2997 /* TODO: flexible tree reduction should be here */ 2998 if (path->p_hdr->eh_entries == 0) { 2999 /* 3000 * truncate to zero freed all the tree, 3001 * so we need to correct eh_depth 3002 */ 3003 err = ext4_ext_get_access(handle, inode, path); 3004 if (err == 0) { 3005 ext_inode_hdr(inode)->eh_depth = 0; 3006 ext_inode_hdr(inode)->eh_max = 3007 cpu_to_le16(ext4_ext_space_root(inode, 0)); 3008 err = ext4_ext_dirty(handle, inode, path); 3009 } 3010 } 3011 out: 3012 ext4_ext_drop_refs(path); 3013 kfree(path); 3014 if (err == -EAGAIN) { 3015 path = NULL; 3016 goto again; 3017 } 3018 ext4_journal_stop(handle); 3019 3020 return err; 3021 } 3022 3023 /* 3024 * called at mount time 3025 */ 3026 void ext4_ext_init(struct super_block *sb) 3027 { 3028 /* 3029 * possible initialization would be here 3030 */ 3031 3032 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { 3033 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) 3034 printk(KERN_INFO "EXT4-fs: file extents enabled" 3035 #ifdef AGGRESSIVE_TEST 3036 ", aggressive tests" 3037 #endif 3038 #ifdef CHECK_BINSEARCH 3039 ", check binsearch" 3040 #endif 3041 #ifdef EXTENTS_STATS 3042 ", stats" 3043 #endif 3044 "\n"); 3045 #endif 3046 #ifdef EXTENTS_STATS 3047 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); 3048 EXT4_SB(sb)->s_ext_min = 1 << 30; 3049 EXT4_SB(sb)->s_ext_max = 0; 3050 #endif 3051 } 3052 } 3053 3054 /* 3055 * called at umount time 3056 */ 3057 void ext4_ext_release(struct super_block *sb) 3058 { 3059 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) 3060 return; 3061 3062 #ifdef EXTENTS_STATS 3063 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { 3064 struct ext4_sb_info *sbi = EXT4_SB(sb); 3065 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", 3066 sbi->s_ext_blocks, sbi->s_ext_extents, 3067 sbi->s_ext_blocks / sbi->s_ext_extents); 3068 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", 3069 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); 3070 } 3071 #endif 3072 } 3073 3074 static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex) 3075 { 3076 ext4_lblk_t ee_block; 3077 ext4_fsblk_t ee_pblock; 3078 unsigned int ee_len; 3079 3080 ee_block = le32_to_cpu(ex->ee_block); 3081 ee_len = ext4_ext_get_actual_len(ex); 3082 ee_pblock = ext4_ext_pblock(ex); 3083 3084 if (ee_len == 0) 3085 return 0; 3086 3087 return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock, 3088 EXTENT_STATUS_WRITTEN); 3089 } 3090 3091 /* FIXME!! we need to try to merge to left or right after zero-out */ 3092 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) 3093 { 3094 ext4_fsblk_t ee_pblock; 3095 unsigned int ee_len; 3096 int ret; 3097 3098 ee_len = ext4_ext_get_actual_len(ex); 3099 ee_pblock = ext4_ext_pblock(ex); 3100 3101 ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS); 3102 if (ret > 0) 3103 ret = 0; 3104 3105 return ret; 3106 } 3107 3108 /* 3109 * ext4_split_extent_at() splits an extent at given block. 3110 * 3111 * @handle: the journal handle 3112 * @inode: the file inode 3113 * @path: the path to the extent 3114 * @split: the logical block where the extent is splitted. 3115 * @split_flags: indicates if the extent could be zeroout if split fails, and 3116 * the states(init or uninit) of new extents. 3117 * @flags: flags used to insert new extent to extent tree. 3118 * 3119 * 3120 * Splits extent [a, b] into two extents [a, @split) and [@split, b], states 3121 * of which are deterimined by split_flag. 3122 * 3123 * There are two cases: 3124 * a> the extent are splitted into two extent. 3125 * b> split is not needed, and just mark the extent. 3126 * 3127 * return 0 on success. 3128 */ 3129 static int ext4_split_extent_at(handle_t *handle, 3130 struct inode *inode, 3131 struct ext4_ext_path *path, 3132 ext4_lblk_t split, 3133 int split_flag, 3134 int flags) 3135 { 3136 ext4_fsblk_t newblock; 3137 ext4_lblk_t ee_block; 3138 struct ext4_extent *ex, newex, orig_ex, zero_ex; 3139 struct ext4_extent *ex2 = NULL; 3140 unsigned int ee_len, depth; 3141 int err = 0; 3142 3143 BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) == 3144 (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)); 3145 3146 ext_debug("ext4_split_extents_at: inode %lu, logical" 3147 "block %llu\n", inode->i_ino, (unsigned long long)split); 3148 3149 ext4_ext_show_leaf(inode, path); 3150 3151 depth = ext_depth(inode); 3152 ex = path[depth].p_ext; 3153 ee_block = le32_to_cpu(ex->ee_block); 3154 ee_len = ext4_ext_get_actual_len(ex); 3155 newblock = split - ee_block + ext4_ext_pblock(ex); 3156 3157 BUG_ON(split < ee_block || split >= (ee_block + ee_len)); 3158 BUG_ON(!ext4_ext_is_uninitialized(ex) && 3159 split_flag & (EXT4_EXT_MAY_ZEROOUT | 3160 EXT4_EXT_MARK_UNINIT1 | 3161 EXT4_EXT_MARK_UNINIT2)); 3162 3163 err = ext4_ext_get_access(handle, inode, path + depth); 3164 if (err) 3165 goto out; 3166 3167 if (split == ee_block) { 3168 /* 3169 * case b: block @split is the block that the extent begins with 3170 * then we just change the state of the extent, and splitting 3171 * is not needed. 3172 */ 3173 if (split_flag & EXT4_EXT_MARK_UNINIT2) 3174 ext4_ext_mark_uninitialized(ex); 3175 else 3176 ext4_ext_mark_initialized(ex); 3177 3178 if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) 3179 ext4_ext_try_to_merge(handle, inode, path, ex); 3180 3181 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3182 goto out; 3183 } 3184 3185 /* case a */ 3186 memcpy(&orig_ex, ex, sizeof(orig_ex)); 3187 ex->ee_len = cpu_to_le16(split - ee_block); 3188 if (split_flag & EXT4_EXT_MARK_UNINIT1) 3189 ext4_ext_mark_uninitialized(ex); 3190 3191 /* 3192 * path may lead to new leaf, not to original leaf any more 3193 * after ext4_ext_insert_extent() returns, 3194 */ 3195 err = ext4_ext_dirty(handle, inode, path + depth); 3196 if (err) 3197 goto fix_extent_len; 3198 3199 ex2 = &newex; 3200 ex2->ee_block = cpu_to_le32(split); 3201 ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); 3202 ext4_ext_store_pblock(ex2, newblock); 3203 if (split_flag & EXT4_EXT_MARK_UNINIT2) 3204 ext4_ext_mark_uninitialized(ex2); 3205 3206 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); 3207 if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 3208 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { 3209 if (split_flag & EXT4_EXT_DATA_VALID1) { 3210 err = ext4_ext_zeroout(inode, ex2); 3211 zero_ex.ee_block = ex2->ee_block; 3212 zero_ex.ee_len = cpu_to_le16( 3213 ext4_ext_get_actual_len(ex2)); 3214 ext4_ext_store_pblock(&zero_ex, 3215 ext4_ext_pblock(ex2)); 3216 } else { 3217 err = ext4_ext_zeroout(inode, ex); 3218 zero_ex.ee_block = ex->ee_block; 3219 zero_ex.ee_len = cpu_to_le16( 3220 ext4_ext_get_actual_len(ex)); 3221 ext4_ext_store_pblock(&zero_ex, 3222 ext4_ext_pblock(ex)); 3223 } 3224 } else { 3225 err = ext4_ext_zeroout(inode, &orig_ex); 3226 zero_ex.ee_block = orig_ex.ee_block; 3227 zero_ex.ee_len = cpu_to_le16( 3228 ext4_ext_get_actual_len(&orig_ex)); 3229 ext4_ext_store_pblock(&zero_ex, 3230 ext4_ext_pblock(&orig_ex)); 3231 } 3232 3233 if (err) 3234 goto fix_extent_len; 3235 /* update the extent length and mark as initialized */ 3236 ex->ee_len = cpu_to_le16(ee_len); 3237 ext4_ext_try_to_merge(handle, inode, path, ex); 3238 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3239 if (err) 3240 goto fix_extent_len; 3241 3242 /* update extent status tree */ 3243 err = ext4_zeroout_es(inode, &zero_ex); 3244 3245 goto out; 3246 } else if (err) 3247 goto fix_extent_len; 3248 3249 out: 3250 ext4_ext_show_leaf(inode, path); 3251 return err; 3252 3253 fix_extent_len: 3254 ex->ee_len = orig_ex.ee_len; 3255 ext4_ext_dirty(handle, inode, path + depth); 3256 return err; 3257 } 3258 3259 /* 3260 * ext4_split_extents() splits an extent and mark extent which is covered 3261 * by @map as split_flags indicates 3262 * 3263 * It may result in splitting the extent into multiple extents (up to three) 3264 * There are three possibilities: 3265 * a> There is no split required 3266 * b> Splits in two extents: Split is happening at either end of the extent 3267 * c> Splits in three extents: Somone is splitting in middle of the extent 3268 * 3269 */ 3270 static int ext4_split_extent(handle_t *handle, 3271 struct inode *inode, 3272 struct ext4_ext_path *path, 3273 struct ext4_map_blocks *map, 3274 int split_flag, 3275 int flags) 3276 { 3277 ext4_lblk_t ee_block; 3278 struct ext4_extent *ex; 3279 unsigned int ee_len, depth; 3280 int err = 0; 3281 int uninitialized; 3282 int split_flag1, flags1; 3283 int allocated = map->m_len; 3284 3285 depth = ext_depth(inode); 3286 ex = path[depth].p_ext; 3287 ee_block = le32_to_cpu(ex->ee_block); 3288 ee_len = ext4_ext_get_actual_len(ex); 3289 uninitialized = ext4_ext_is_uninitialized(ex); 3290 3291 if (map->m_lblk + map->m_len < ee_block + ee_len) { 3292 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT; 3293 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; 3294 if (uninitialized) 3295 split_flag1 |= EXT4_EXT_MARK_UNINIT1 | 3296 EXT4_EXT_MARK_UNINIT2; 3297 if (split_flag & EXT4_EXT_DATA_VALID2) 3298 split_flag1 |= EXT4_EXT_DATA_VALID1; 3299 err = ext4_split_extent_at(handle, inode, path, 3300 map->m_lblk + map->m_len, split_flag1, flags1); 3301 if (err) 3302 goto out; 3303 } else { 3304 allocated = ee_len - (map->m_lblk - ee_block); 3305 } 3306 /* 3307 * Update path is required because previous ext4_split_extent_at() may 3308 * result in split of original leaf or extent zeroout. 3309 */ 3310 ext4_ext_drop_refs(path); 3311 path = ext4_ext_find_extent(inode, map->m_lblk, path, 0); 3312 if (IS_ERR(path)) 3313 return PTR_ERR(path); 3314 depth = ext_depth(inode); 3315 ex = path[depth].p_ext; 3316 if (!ex) { 3317 EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 3318 (unsigned long) map->m_lblk); 3319 return -EIO; 3320 } 3321 uninitialized = ext4_ext_is_uninitialized(ex); 3322 split_flag1 = 0; 3323 3324 if (map->m_lblk >= ee_block) { 3325 split_flag1 = split_flag & EXT4_EXT_DATA_VALID2; 3326 if (uninitialized) { 3327 split_flag1 |= EXT4_EXT_MARK_UNINIT1; 3328 split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT | 3329 EXT4_EXT_MARK_UNINIT2); 3330 } 3331 err = ext4_split_extent_at(handle, inode, path, 3332 map->m_lblk, split_flag1, flags); 3333 if (err) 3334 goto out; 3335 } 3336 3337 ext4_ext_show_leaf(inode, path); 3338 out: 3339 return err ? err : allocated; 3340 } 3341 3342 /* 3343 * This function is called by ext4_ext_map_blocks() if someone tries to write 3344 * to an uninitialized extent. It may result in splitting the uninitialized 3345 * extent into multiple extents (up to three - one initialized and two 3346 * uninitialized). 3347 * There are three possibilities: 3348 * a> There is no split required: Entire extent should be initialized 3349 * b> Splits in two extents: Write is happening at either end of the extent 3350 * c> Splits in three extents: Somone is writing in middle of the extent 3351 * 3352 * Pre-conditions: 3353 * - The extent pointed to by 'path' is uninitialized. 3354 * - The extent pointed to by 'path' contains a superset 3355 * of the logical span [map->m_lblk, map->m_lblk + map->m_len). 3356 * 3357 * Post-conditions on success: 3358 * - the returned value is the number of blocks beyond map->l_lblk 3359 * that are allocated and initialized. 3360 * It is guaranteed to be >= map->m_len. 3361 */ 3362 static int ext4_ext_convert_to_initialized(handle_t *handle, 3363 struct inode *inode, 3364 struct ext4_map_blocks *map, 3365 struct ext4_ext_path *path, 3366 int flags) 3367 { 3368 struct ext4_sb_info *sbi; 3369 struct ext4_extent_header *eh; 3370 struct ext4_map_blocks split_map; 3371 struct ext4_extent zero_ex; 3372 struct ext4_extent *ex, *abut_ex; 3373 ext4_lblk_t ee_block, eof_block; 3374 unsigned int ee_len, depth, map_len = map->m_len; 3375 int allocated = 0, max_zeroout = 0; 3376 int err = 0; 3377 int split_flag = 0; 3378 3379 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" 3380 "block %llu, max_blocks %u\n", inode->i_ino, 3381 (unsigned long long)map->m_lblk, map_len); 3382 3383 sbi = EXT4_SB(inode->i_sb); 3384 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 3385 inode->i_sb->s_blocksize_bits; 3386 if (eof_block < map->m_lblk + map_len) 3387 eof_block = map->m_lblk + map_len; 3388 3389 depth = ext_depth(inode); 3390 eh = path[depth].p_hdr; 3391 ex = path[depth].p_ext; 3392 ee_block = le32_to_cpu(ex->ee_block); 3393 ee_len = ext4_ext_get_actual_len(ex); 3394 zero_ex.ee_len = 0; 3395 3396 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); 3397 3398 /* Pre-conditions */ 3399 BUG_ON(!ext4_ext_is_uninitialized(ex)); 3400 BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); 3401 3402 /* 3403 * Attempt to transfer newly initialized blocks from the currently 3404 * uninitialized extent to its neighbor. This is much cheaper 3405 * than an insertion followed by a merge as those involve costly 3406 * memmove() calls. Transferring to the left is the common case in 3407 * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE) 3408 * followed by append writes. 3409 * 3410 * Limitations of the current logic: 3411 * - L1: we do not deal with writes covering the whole extent. 3412 * This would require removing the extent if the transfer 3413 * is possible. 3414 * - L2: we only attempt to merge with an extent stored in the 3415 * same extent tree node. 3416 */ 3417 if ((map->m_lblk == ee_block) && 3418 /* See if we can merge left */ 3419 (map_len < ee_len) && /*L1*/ 3420 (ex > EXT_FIRST_EXTENT(eh))) { /*L2*/ 3421 ext4_lblk_t prev_lblk; 3422 ext4_fsblk_t prev_pblk, ee_pblk; 3423 unsigned int prev_len; 3424 3425 abut_ex = ex - 1; 3426 prev_lblk = le32_to_cpu(abut_ex->ee_block); 3427 prev_len = ext4_ext_get_actual_len(abut_ex); 3428 prev_pblk = ext4_ext_pblock(abut_ex); 3429 ee_pblk = ext4_ext_pblock(ex); 3430 3431 /* 3432 * A transfer of blocks from 'ex' to 'abut_ex' is allowed 3433 * upon those conditions: 3434 * - C1: abut_ex is initialized, 3435 * - C2: abut_ex is logically abutting ex, 3436 * - C3: abut_ex is physically abutting ex, 3437 * - C4: abut_ex can receive the additional blocks without 3438 * overflowing the (initialized) length limit. 3439 */ 3440 if ((!ext4_ext_is_uninitialized(abut_ex)) && /*C1*/ 3441 ((prev_lblk + prev_len) == ee_block) && /*C2*/ 3442 ((prev_pblk + prev_len) == ee_pblk) && /*C3*/ 3443 (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 3444 err = ext4_ext_get_access(handle, inode, path + depth); 3445 if (err) 3446 goto out; 3447 3448 trace_ext4_ext_convert_to_initialized_fastpath(inode, 3449 map, ex, abut_ex); 3450 3451 /* Shift the start of ex by 'map_len' blocks */ 3452 ex->ee_block = cpu_to_le32(ee_block + map_len); 3453 ext4_ext_store_pblock(ex, ee_pblk + map_len); 3454 ex->ee_len = cpu_to_le16(ee_len - map_len); 3455 ext4_ext_mark_uninitialized(ex); /* Restore the flag */ 3456 3457 /* Extend abut_ex by 'map_len' blocks */ 3458 abut_ex->ee_len = cpu_to_le16(prev_len + map_len); 3459 3460 /* Result: number of initialized blocks past m_lblk */ 3461 allocated = map_len; 3462 } 3463 } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) && 3464 (map_len < ee_len) && /*L1*/ 3465 ex < EXT_LAST_EXTENT(eh)) { /*L2*/ 3466 /* See if we can merge right */ 3467 ext4_lblk_t next_lblk; 3468 ext4_fsblk_t next_pblk, ee_pblk; 3469 unsigned int next_len; 3470 3471 abut_ex = ex + 1; 3472 next_lblk = le32_to_cpu(abut_ex->ee_block); 3473 next_len = ext4_ext_get_actual_len(abut_ex); 3474 next_pblk = ext4_ext_pblock(abut_ex); 3475 ee_pblk = ext4_ext_pblock(ex); 3476 3477 /* 3478 * A transfer of blocks from 'ex' to 'abut_ex' is allowed 3479 * upon those conditions: 3480 * - C1: abut_ex is initialized, 3481 * - C2: abut_ex is logically abutting ex, 3482 * - C3: abut_ex is physically abutting ex, 3483 * - C4: abut_ex can receive the additional blocks without 3484 * overflowing the (initialized) length limit. 3485 */ 3486 if ((!ext4_ext_is_uninitialized(abut_ex)) && /*C1*/ 3487 ((map->m_lblk + map_len) == next_lblk) && /*C2*/ 3488 ((ee_pblk + ee_len) == next_pblk) && /*C3*/ 3489 (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 3490 err = ext4_ext_get_access(handle, inode, path + depth); 3491 if (err) 3492 goto out; 3493 3494 trace_ext4_ext_convert_to_initialized_fastpath(inode, 3495 map, ex, abut_ex); 3496 3497 /* Shift the start of abut_ex by 'map_len' blocks */ 3498 abut_ex->ee_block = cpu_to_le32(next_lblk - map_len); 3499 ext4_ext_store_pblock(abut_ex, next_pblk - map_len); 3500 ex->ee_len = cpu_to_le16(ee_len - map_len); 3501 ext4_ext_mark_uninitialized(ex); /* Restore the flag */ 3502 3503 /* Extend abut_ex by 'map_len' blocks */ 3504 abut_ex->ee_len = cpu_to_le16(next_len + map_len); 3505 3506 /* Result: number of initialized blocks past m_lblk */ 3507 allocated = map_len; 3508 } 3509 } 3510 if (allocated) { 3511 /* Mark the block containing both extents as dirty */ 3512 ext4_ext_dirty(handle, inode, path + depth); 3513 3514 /* Update path to point to the right extent */ 3515 path[depth].p_ext = abut_ex; 3516 goto out; 3517 } else 3518 allocated = ee_len - (map->m_lblk - ee_block); 3519 3520 WARN_ON(map->m_lblk < ee_block); 3521 /* 3522 * It is safe to convert extent to initialized via explicit 3523 * zeroout only if extent is fully inside i_size or new_size. 3524 */ 3525 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 3526 3527 if (EXT4_EXT_MAY_ZEROOUT & split_flag) 3528 max_zeroout = sbi->s_extent_max_zeroout_kb >> 3529 (inode->i_sb->s_blocksize_bits - 10); 3530 3531 /* If extent is less than s_max_zeroout_kb, zeroout directly */ 3532 if (max_zeroout && (ee_len <= max_zeroout)) { 3533 err = ext4_ext_zeroout(inode, ex); 3534 if (err) 3535 goto out; 3536 zero_ex.ee_block = ex->ee_block; 3537 zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)); 3538 ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex)); 3539 3540 err = ext4_ext_get_access(handle, inode, path + depth); 3541 if (err) 3542 goto out; 3543 ext4_ext_mark_initialized(ex); 3544 ext4_ext_try_to_merge(handle, inode, path, ex); 3545 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3546 goto out; 3547 } 3548 3549 /* 3550 * four cases: 3551 * 1. split the extent into three extents. 3552 * 2. split the extent into two extents, zeroout the first half. 3553 * 3. split the extent into two extents, zeroout the second half. 3554 * 4. split the extent into two extents with out zeroout. 3555 */ 3556 split_map.m_lblk = map->m_lblk; 3557 split_map.m_len = map->m_len; 3558 3559 if (max_zeroout && (allocated > map->m_len)) { 3560 if (allocated <= max_zeroout) { 3561 /* case 3 */ 3562 zero_ex.ee_block = 3563 cpu_to_le32(map->m_lblk); 3564 zero_ex.ee_len = cpu_to_le16(allocated); 3565 ext4_ext_store_pblock(&zero_ex, 3566 ext4_ext_pblock(ex) + map->m_lblk - ee_block); 3567 err = ext4_ext_zeroout(inode, &zero_ex); 3568 if (err) 3569 goto out; 3570 split_map.m_lblk = map->m_lblk; 3571 split_map.m_len = allocated; 3572 } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) { 3573 /* case 2 */ 3574 if (map->m_lblk != ee_block) { 3575 zero_ex.ee_block = ex->ee_block; 3576 zero_ex.ee_len = cpu_to_le16(map->m_lblk - 3577 ee_block); 3578 ext4_ext_store_pblock(&zero_ex, 3579 ext4_ext_pblock(ex)); 3580 err = ext4_ext_zeroout(inode, &zero_ex); 3581 if (err) 3582 goto out; 3583 } 3584 3585 split_map.m_lblk = ee_block; 3586 split_map.m_len = map->m_lblk - ee_block + map->m_len; 3587 allocated = map->m_len; 3588 } 3589 } 3590 3591 allocated = ext4_split_extent(handle, inode, path, 3592 &split_map, split_flag, flags); 3593 if (allocated < 0) 3594 err = allocated; 3595 3596 out: 3597 /* If we have gotten a failure, don't zero out status tree */ 3598 if (!err) 3599 err = ext4_zeroout_es(inode, &zero_ex); 3600 return err ? err : allocated; 3601 } 3602 3603 /* 3604 * This function is called by ext4_ext_map_blocks() from 3605 * ext4_get_blocks_dio_write() when DIO to write 3606 * to an uninitialized extent. 3607 * 3608 * Writing to an uninitialized extent may result in splitting the uninitialized 3609 * extent into multiple initialized/uninitialized extents (up to three) 3610 * There are three possibilities: 3611 * a> There is no split required: Entire extent should be uninitialized 3612 * b> Splits in two extents: Write is happening at either end of the extent 3613 * c> Splits in three extents: Somone is writing in middle of the extent 3614 * 3615 * This works the same way in the case of initialized -> unwritten conversion. 3616 * 3617 * One of more index blocks maybe needed if the extent tree grow after 3618 * the uninitialized extent split. To prevent ENOSPC occur at the IO 3619 * complete, we need to split the uninitialized extent before DIO submit 3620 * the IO. The uninitialized extent called at this time will be split 3621 * into three uninitialized extent(at most). After IO complete, the part 3622 * being filled will be convert to initialized by the end_io callback function 3623 * via ext4_convert_unwritten_extents(). 3624 * 3625 * Returns the size of uninitialized extent to be written on success. 3626 */ 3627 static int ext4_split_convert_extents(handle_t *handle, 3628 struct inode *inode, 3629 struct ext4_map_blocks *map, 3630 struct ext4_ext_path *path, 3631 int flags) 3632 { 3633 ext4_lblk_t eof_block; 3634 ext4_lblk_t ee_block; 3635 struct ext4_extent *ex; 3636 unsigned int ee_len; 3637 int split_flag = 0, depth; 3638 3639 ext_debug("%s: inode %lu, logical block %llu, max_blocks %u\n", 3640 __func__, inode->i_ino, 3641 (unsigned long long)map->m_lblk, map->m_len); 3642 3643 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 3644 inode->i_sb->s_blocksize_bits; 3645 if (eof_block < map->m_lblk + map->m_len) 3646 eof_block = map->m_lblk + map->m_len; 3647 /* 3648 * It is safe to convert extent to initialized via explicit 3649 * zeroout only if extent is fully insde i_size or new_size. 3650 */ 3651 depth = ext_depth(inode); 3652 ex = path[depth].p_ext; 3653 ee_block = le32_to_cpu(ex->ee_block); 3654 ee_len = ext4_ext_get_actual_len(ex); 3655 3656 /* Convert to unwritten */ 3657 if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) { 3658 split_flag |= EXT4_EXT_DATA_VALID1; 3659 /* Convert to initialized */ 3660 } else if (flags & EXT4_GET_BLOCKS_CONVERT) { 3661 split_flag |= ee_block + ee_len <= eof_block ? 3662 EXT4_EXT_MAY_ZEROOUT : 0; 3663 split_flag |= (EXT4_EXT_MARK_UNINIT2 | EXT4_EXT_DATA_VALID2); 3664 } 3665 flags |= EXT4_GET_BLOCKS_PRE_IO; 3666 return ext4_split_extent(handle, inode, path, map, split_flag, flags); 3667 } 3668 3669 static int ext4_convert_initialized_extents(handle_t *handle, 3670 struct inode *inode, 3671 struct ext4_map_blocks *map, 3672 struct ext4_ext_path *path) 3673 { 3674 struct ext4_extent *ex; 3675 ext4_lblk_t ee_block; 3676 unsigned int ee_len; 3677 int depth; 3678 int err = 0; 3679 3680 depth = ext_depth(inode); 3681 ex = path[depth].p_ext; 3682 ee_block = le32_to_cpu(ex->ee_block); 3683 ee_len = ext4_ext_get_actual_len(ex); 3684 3685 ext_debug("%s: inode %lu, logical" 3686 "block %llu, max_blocks %u\n", __func__, inode->i_ino, 3687 (unsigned long long)ee_block, ee_len); 3688 3689 if (ee_block != map->m_lblk || ee_len > map->m_len) { 3690 err = ext4_split_convert_extents(handle, inode, map, path, 3691 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN); 3692 if (err < 0) 3693 goto out; 3694 ext4_ext_drop_refs(path); 3695 path = ext4_ext_find_extent(inode, map->m_lblk, path, 0); 3696 if (IS_ERR(path)) { 3697 err = PTR_ERR(path); 3698 goto out; 3699 } 3700 depth = ext_depth(inode); 3701 ex = path[depth].p_ext; 3702 if (!ex) { 3703 EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 3704 (unsigned long) map->m_lblk); 3705 err = -EIO; 3706 goto out; 3707 } 3708 } 3709 3710 err = ext4_ext_get_access(handle, inode, path + depth); 3711 if (err) 3712 goto out; 3713 /* first mark the extent as uninitialized */ 3714 ext4_ext_mark_uninitialized(ex); 3715 3716 /* note: ext4_ext_correct_indexes() isn't needed here because 3717 * borders are not changed 3718 */ 3719 ext4_ext_try_to_merge(handle, inode, path, ex); 3720 3721 /* Mark modified extent as dirty */ 3722 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3723 out: 3724 ext4_ext_show_leaf(inode, path); 3725 return err; 3726 } 3727 3728 3729 static int ext4_convert_unwritten_extents_endio(handle_t *handle, 3730 struct inode *inode, 3731 struct ext4_map_blocks *map, 3732 struct ext4_ext_path *path) 3733 { 3734 struct ext4_extent *ex; 3735 ext4_lblk_t ee_block; 3736 unsigned int ee_len; 3737 int depth; 3738 int err = 0; 3739 3740 depth = ext_depth(inode); 3741 ex = path[depth].p_ext; 3742 ee_block = le32_to_cpu(ex->ee_block); 3743 ee_len = ext4_ext_get_actual_len(ex); 3744 3745 ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical" 3746 "block %llu, max_blocks %u\n", inode->i_ino, 3747 (unsigned long long)ee_block, ee_len); 3748 3749 /* If extent is larger than requested it is a clear sign that we still 3750 * have some extent state machine issues left. So extent_split is still 3751 * required. 3752 * TODO: Once all related issues will be fixed this situation should be 3753 * illegal. 3754 */ 3755 if (ee_block != map->m_lblk || ee_len > map->m_len) { 3756 #ifdef EXT4_DEBUG 3757 ext4_warning("Inode (%ld) finished: extent logical block %llu," 3758 " len %u; IO logical block %llu, len %u\n", 3759 inode->i_ino, (unsigned long long)ee_block, ee_len, 3760 (unsigned long long)map->m_lblk, map->m_len); 3761 #endif 3762 err = ext4_split_convert_extents(handle, inode, map, path, 3763 EXT4_GET_BLOCKS_CONVERT); 3764 if (err < 0) 3765 goto out; 3766 ext4_ext_drop_refs(path); 3767 path = ext4_ext_find_extent(inode, map->m_lblk, path, 0); 3768 if (IS_ERR(path)) { 3769 err = PTR_ERR(path); 3770 goto out; 3771 } 3772 depth = ext_depth(inode); 3773 ex = path[depth].p_ext; 3774 } 3775 3776 err = ext4_ext_get_access(handle, inode, path + depth); 3777 if (err) 3778 goto out; 3779 /* first mark the extent as initialized */ 3780 ext4_ext_mark_initialized(ex); 3781 3782 /* note: ext4_ext_correct_indexes() isn't needed here because 3783 * borders are not changed 3784 */ 3785 ext4_ext_try_to_merge(handle, inode, path, ex); 3786 3787 /* Mark modified extent as dirty */ 3788 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3789 out: 3790 ext4_ext_show_leaf(inode, path); 3791 return err; 3792 } 3793 3794 static void unmap_underlying_metadata_blocks(struct block_device *bdev, 3795 sector_t block, int count) 3796 { 3797 int i; 3798 for (i = 0; i < count; i++) 3799 unmap_underlying_metadata(bdev, block + i); 3800 } 3801 3802 /* 3803 * Handle EOFBLOCKS_FL flag, clearing it if necessary 3804 */ 3805 static int check_eofblocks_fl(handle_t *handle, struct inode *inode, 3806 ext4_lblk_t lblk, 3807 struct ext4_ext_path *path, 3808 unsigned int len) 3809 { 3810 int i, depth; 3811 struct ext4_extent_header *eh; 3812 struct ext4_extent *last_ex; 3813 3814 if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)) 3815 return 0; 3816 3817 depth = ext_depth(inode); 3818 eh = path[depth].p_hdr; 3819 3820 /* 3821 * We're going to remove EOFBLOCKS_FL entirely in future so we 3822 * do not care for this case anymore. Simply remove the flag 3823 * if there are no extents. 3824 */ 3825 if (unlikely(!eh->eh_entries)) 3826 goto out; 3827 last_ex = EXT_LAST_EXTENT(eh); 3828 /* 3829 * We should clear the EOFBLOCKS_FL flag if we are writing the 3830 * last block in the last extent in the file. We test this by 3831 * first checking to see if the caller to 3832 * ext4_ext_get_blocks() was interested in the last block (or 3833 * a block beyond the last block) in the current extent. If 3834 * this turns out to be false, we can bail out from this 3835 * function immediately. 3836 */ 3837 if (lblk + len < le32_to_cpu(last_ex->ee_block) + 3838 ext4_ext_get_actual_len(last_ex)) 3839 return 0; 3840 /* 3841 * If the caller does appear to be planning to write at or 3842 * beyond the end of the current extent, we then test to see 3843 * if the current extent is the last extent in the file, by 3844 * checking to make sure it was reached via the rightmost node 3845 * at each level of the tree. 3846 */ 3847 for (i = depth-1; i >= 0; i--) 3848 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr)) 3849 return 0; 3850 out: 3851 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 3852 return ext4_mark_inode_dirty(handle, inode); 3853 } 3854 3855 /** 3856 * ext4_find_delalloc_range: find delayed allocated block in the given range. 3857 * 3858 * Return 1 if there is a delalloc block in the range, otherwise 0. 3859 */ 3860 int ext4_find_delalloc_range(struct inode *inode, 3861 ext4_lblk_t lblk_start, 3862 ext4_lblk_t lblk_end) 3863 { 3864 struct extent_status es; 3865 3866 ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es); 3867 if (es.es_len == 0) 3868 return 0; /* there is no delay extent in this tree */ 3869 else if (es.es_lblk <= lblk_start && 3870 lblk_start < es.es_lblk + es.es_len) 3871 return 1; 3872 else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end) 3873 return 1; 3874 else 3875 return 0; 3876 } 3877 3878 int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk) 3879 { 3880 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3881 ext4_lblk_t lblk_start, lblk_end; 3882 lblk_start = EXT4_LBLK_CMASK(sbi, lblk); 3883 lblk_end = lblk_start + sbi->s_cluster_ratio - 1; 3884 3885 return ext4_find_delalloc_range(inode, lblk_start, lblk_end); 3886 } 3887 3888 /** 3889 * Determines how many complete clusters (out of those specified by the 'map') 3890 * are under delalloc and were reserved quota for. 3891 * This function is called when we are writing out the blocks that were 3892 * originally written with their allocation delayed, but then the space was 3893 * allocated using fallocate() before the delayed allocation could be resolved. 3894 * The cases to look for are: 3895 * ('=' indicated delayed allocated blocks 3896 * '-' indicates non-delayed allocated blocks) 3897 * (a) partial clusters towards beginning and/or end outside of allocated range 3898 * are not delalloc'ed. 3899 * Ex: 3900 * |----c---=|====c====|====c====|===-c----| 3901 * |++++++ allocated ++++++| 3902 * ==> 4 complete clusters in above example 3903 * 3904 * (b) partial cluster (outside of allocated range) towards either end is 3905 * marked for delayed allocation. In this case, we will exclude that 3906 * cluster. 3907 * Ex: 3908 * |----====c========|========c========| 3909 * |++++++ allocated ++++++| 3910 * ==> 1 complete clusters in above example 3911 * 3912 * Ex: 3913 * |================c================| 3914 * |++++++ allocated ++++++| 3915 * ==> 0 complete clusters in above example 3916 * 3917 * The ext4_da_update_reserve_space will be called only if we 3918 * determine here that there were some "entire" clusters that span 3919 * this 'allocated' range. 3920 * In the non-bigalloc case, this function will just end up returning num_blks 3921 * without ever calling ext4_find_delalloc_range. 3922 */ 3923 static unsigned int 3924 get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, 3925 unsigned int num_blks) 3926 { 3927 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3928 ext4_lblk_t alloc_cluster_start, alloc_cluster_end; 3929 ext4_lblk_t lblk_from, lblk_to, c_offset; 3930 unsigned int allocated_clusters = 0; 3931 3932 alloc_cluster_start = EXT4_B2C(sbi, lblk_start); 3933 alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1); 3934 3935 /* max possible clusters for this allocation */ 3936 allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1; 3937 3938 trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); 3939 3940 /* Check towards left side */ 3941 c_offset = EXT4_LBLK_COFF(sbi, lblk_start); 3942 if (c_offset) { 3943 lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start); 3944 lblk_to = lblk_from + c_offset - 1; 3945 3946 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) 3947 allocated_clusters--; 3948 } 3949 3950 /* Now check towards right. */ 3951 c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks); 3952 if (allocated_clusters && c_offset) { 3953 lblk_from = lblk_start + num_blks; 3954 lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; 3955 3956 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) 3957 allocated_clusters--; 3958 } 3959 3960 return allocated_clusters; 3961 } 3962 3963 static int 3964 ext4_ext_convert_initialized_extent(handle_t *handle, struct inode *inode, 3965 struct ext4_map_blocks *map, 3966 struct ext4_ext_path *path, int flags, 3967 unsigned int allocated, ext4_fsblk_t newblock) 3968 { 3969 int ret = 0; 3970 int err = 0; 3971 3972 /* 3973 * Make sure that the extent is no bigger than we support with 3974 * uninitialized extent 3975 */ 3976 if (map->m_len > EXT_UNINIT_MAX_LEN) 3977 map->m_len = EXT_UNINIT_MAX_LEN / 2; 3978 3979 ret = ext4_convert_initialized_extents(handle, inode, map, 3980 path); 3981 if (ret >= 0) { 3982 ext4_update_inode_fsync_trans(handle, inode, 1); 3983 err = check_eofblocks_fl(handle, inode, map->m_lblk, 3984 path, map->m_len); 3985 } else 3986 err = ret; 3987 map->m_flags |= EXT4_MAP_UNWRITTEN; 3988 if (allocated > map->m_len) 3989 allocated = map->m_len; 3990 map->m_len = allocated; 3991 3992 return err ? err : allocated; 3993 } 3994 3995 static int 3996 ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, 3997 struct ext4_map_blocks *map, 3998 struct ext4_ext_path *path, int flags, 3999 unsigned int allocated, ext4_fsblk_t newblock) 4000 { 4001 int ret = 0; 4002 int err = 0; 4003 ext4_io_end_t *io = ext4_inode_aio(inode); 4004 4005 ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical " 4006 "block %llu, max_blocks %u, flags %x, allocated %u\n", 4007 inode->i_ino, (unsigned long long)map->m_lblk, map->m_len, 4008 flags, allocated); 4009 ext4_ext_show_leaf(inode, path); 4010 4011 /* 4012 * When writing into uninitialized space, we should not fail to 4013 * allocate metadata blocks for the new extent block if needed. 4014 */ 4015 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL; 4016 4017 trace_ext4_ext_handle_uninitialized_extents(inode, map, flags, 4018 allocated, newblock); 4019 4020 /* get_block() before submit the IO, split the extent */ 4021 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { 4022 ret = ext4_split_convert_extents(handle, inode, map, 4023 path, flags | EXT4_GET_BLOCKS_CONVERT); 4024 if (ret <= 0) 4025 goto out; 4026 /* 4027 * Flag the inode(non aio case) or end_io struct (aio case) 4028 * that this IO needs to conversion to written when IO is 4029 * completed 4030 */ 4031 if (io) 4032 ext4_set_io_unwritten_flag(inode, io); 4033 else 4034 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 4035 map->m_flags |= EXT4_MAP_UNWRITTEN; 4036 if (ext4_should_dioread_nolock(inode)) 4037 map->m_flags |= EXT4_MAP_UNINIT; 4038 goto out; 4039 } 4040 /* IO end_io complete, convert the filled extent to written */ 4041 if ((flags & EXT4_GET_BLOCKS_CONVERT)) { 4042 ret = ext4_convert_unwritten_extents_endio(handle, inode, map, 4043 path); 4044 if (ret >= 0) { 4045 ext4_update_inode_fsync_trans(handle, inode, 1); 4046 err = check_eofblocks_fl(handle, inode, map->m_lblk, 4047 path, map->m_len); 4048 } else 4049 err = ret; 4050 map->m_flags |= EXT4_MAP_MAPPED; 4051 map->m_pblk = newblock; 4052 if (allocated > map->m_len) 4053 allocated = map->m_len; 4054 map->m_len = allocated; 4055 goto out2; 4056 } 4057 /* buffered IO case */ 4058 /* 4059 * repeat fallocate creation request 4060 * we already have an unwritten extent 4061 */ 4062 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) { 4063 map->m_flags |= EXT4_MAP_UNWRITTEN; 4064 goto map_out; 4065 } 4066 4067 /* buffered READ or buffered write_begin() lookup */ 4068 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 4069 /* 4070 * We have blocks reserved already. We 4071 * return allocated blocks so that delalloc 4072 * won't do block reservation for us. But 4073 * the buffer head will be unmapped so that 4074 * a read from the block returns 0s. 4075 */ 4076 map->m_flags |= EXT4_MAP_UNWRITTEN; 4077 goto out1; 4078 } 4079 4080 /* buffered write, writepage time, convert*/ 4081 ret = ext4_ext_convert_to_initialized(handle, inode, map, path, flags); 4082 if (ret >= 0) 4083 ext4_update_inode_fsync_trans(handle, inode, 1); 4084 out: 4085 if (ret <= 0) { 4086 err = ret; 4087 goto out2; 4088 } else 4089 allocated = ret; 4090 map->m_flags |= EXT4_MAP_NEW; 4091 /* 4092 * if we allocated more blocks than requested 4093 * we need to make sure we unmap the extra block 4094 * allocated. The actual needed block will get 4095 * unmapped later when we find the buffer_head marked 4096 * new. 4097 */ 4098 if (allocated > map->m_len) { 4099 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, 4100 newblock + map->m_len, 4101 allocated - map->m_len); 4102 allocated = map->m_len; 4103 } 4104 map->m_len = allocated; 4105 4106 /* 4107 * If we have done fallocate with the offset that is already 4108 * delayed allocated, we would have block reservation 4109 * and quota reservation done in the delayed write path. 4110 * But fallocate would have already updated quota and block 4111 * count for this offset. So cancel these reservation 4112 */ 4113 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 4114 unsigned int reserved_clusters; 4115 reserved_clusters = get_reserved_cluster_alloc(inode, 4116 map->m_lblk, map->m_len); 4117 if (reserved_clusters) 4118 ext4_da_update_reserve_space(inode, 4119 reserved_clusters, 4120 0); 4121 } 4122 4123 map_out: 4124 map->m_flags |= EXT4_MAP_MAPPED; 4125 if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) { 4126 err = check_eofblocks_fl(handle, inode, map->m_lblk, path, 4127 map->m_len); 4128 if (err < 0) 4129 goto out2; 4130 } 4131 out1: 4132 if (allocated > map->m_len) 4133 allocated = map->m_len; 4134 ext4_ext_show_leaf(inode, path); 4135 map->m_pblk = newblock; 4136 map->m_len = allocated; 4137 out2: 4138 return err ? err : allocated; 4139 } 4140 4141 /* 4142 * get_implied_cluster_alloc - check to see if the requested 4143 * allocation (in the map structure) overlaps with a cluster already 4144 * allocated in an extent. 4145 * @sb The filesystem superblock structure 4146 * @map The requested lblk->pblk mapping 4147 * @ex The extent structure which might contain an implied 4148 * cluster allocation 4149 * 4150 * This function is called by ext4_ext_map_blocks() after we failed to 4151 * find blocks that were already in the inode's extent tree. Hence, 4152 * we know that the beginning of the requested region cannot overlap 4153 * the extent from the inode's extent tree. There are three cases we 4154 * want to catch. The first is this case: 4155 * 4156 * |--- cluster # N--| 4157 * |--- extent ---| |---- requested region ---| 4158 * |==========| 4159 * 4160 * The second case that we need to test for is this one: 4161 * 4162 * |--------- cluster # N ----------------| 4163 * |--- requested region --| |------- extent ----| 4164 * |=======================| 4165 * 4166 * The third case is when the requested region lies between two extents 4167 * within the same cluster: 4168 * |------------- cluster # N-------------| 4169 * |----- ex -----| |---- ex_right ----| 4170 * |------ requested region ------| 4171 * |================| 4172 * 4173 * In each of the above cases, we need to set the map->m_pblk and 4174 * map->m_len so it corresponds to the return the extent labelled as 4175 * "|====|" from cluster #N, since it is already in use for data in 4176 * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to 4177 * signal to ext4_ext_map_blocks() that map->m_pblk should be treated 4178 * as a new "allocated" block region. Otherwise, we will return 0 and 4179 * ext4_ext_map_blocks() will then allocate one or more new clusters 4180 * by calling ext4_mb_new_blocks(). 4181 */ 4182 static int get_implied_cluster_alloc(struct super_block *sb, 4183 struct ext4_map_blocks *map, 4184 struct ext4_extent *ex, 4185 struct ext4_ext_path *path) 4186 { 4187 struct ext4_sb_info *sbi = EXT4_SB(sb); 4188 ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 4189 ext4_lblk_t ex_cluster_start, ex_cluster_end; 4190 ext4_lblk_t rr_cluster_start; 4191 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 4192 ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 4193 unsigned short ee_len = ext4_ext_get_actual_len(ex); 4194 4195 /* The extent passed in that we are trying to match */ 4196 ex_cluster_start = EXT4_B2C(sbi, ee_block); 4197 ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1); 4198 4199 /* The requested region passed into ext4_map_blocks() */ 4200 rr_cluster_start = EXT4_B2C(sbi, map->m_lblk); 4201 4202 if ((rr_cluster_start == ex_cluster_end) || 4203 (rr_cluster_start == ex_cluster_start)) { 4204 if (rr_cluster_start == ex_cluster_end) 4205 ee_start += ee_len - 1; 4206 map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset; 4207 map->m_len = min(map->m_len, 4208 (unsigned) sbi->s_cluster_ratio - c_offset); 4209 /* 4210 * Check for and handle this case: 4211 * 4212 * |--------- cluster # N-------------| 4213 * |------- extent ----| 4214 * |--- requested region ---| 4215 * |===========| 4216 */ 4217 4218 if (map->m_lblk < ee_block) 4219 map->m_len = min(map->m_len, ee_block - map->m_lblk); 4220 4221 /* 4222 * Check for the case where there is already another allocated 4223 * block to the right of 'ex' but before the end of the cluster. 4224 * 4225 * |------------- cluster # N-------------| 4226 * |----- ex -----| |---- ex_right ----| 4227 * |------ requested region ------| 4228 * |================| 4229 */ 4230 if (map->m_lblk > ee_block) { 4231 ext4_lblk_t next = ext4_ext_next_allocated_block(path); 4232 map->m_len = min(map->m_len, next - map->m_lblk); 4233 } 4234 4235 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1); 4236 return 1; 4237 } 4238 4239 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0); 4240 return 0; 4241 } 4242 4243 4244 /* 4245 * Block allocation/map/preallocation routine for extents based files 4246 * 4247 * 4248 * Need to be called with 4249 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 4250 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 4251 * 4252 * return > 0, number of of blocks already mapped/allocated 4253 * if create == 0 and these are pre-allocated blocks 4254 * buffer head is unmapped 4255 * otherwise blocks are mapped 4256 * 4257 * return = 0, if plain look up failed (blocks have not been allocated) 4258 * buffer head is unmapped 4259 * 4260 * return < 0, error case. 4261 */ 4262 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, 4263 struct ext4_map_blocks *map, int flags) 4264 { 4265 struct ext4_ext_path *path = NULL; 4266 struct ext4_extent newex, *ex, *ex2; 4267 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4268 ext4_fsblk_t newblock = 0; 4269 int free_on_err = 0, err = 0, depth, ret; 4270 unsigned int allocated = 0, offset = 0; 4271 unsigned int allocated_clusters = 0; 4272 struct ext4_allocation_request ar; 4273 ext4_io_end_t *io = ext4_inode_aio(inode); 4274 ext4_lblk_t cluster_offset; 4275 int set_unwritten = 0; 4276 4277 ext_debug("blocks %u/%u requested for inode %lu\n", 4278 map->m_lblk, map->m_len, inode->i_ino); 4279 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 4280 4281 /* find extent for this block */ 4282 path = ext4_ext_find_extent(inode, map->m_lblk, NULL, 0); 4283 if (IS_ERR(path)) { 4284 err = PTR_ERR(path); 4285 path = NULL; 4286 goto out2; 4287 } 4288 4289 depth = ext_depth(inode); 4290 4291 /* 4292 * consistent leaf must not be empty; 4293 * this situation is possible, though, _during_ tree modification; 4294 * this is why assert can't be put in ext4_ext_find_extent() 4295 */ 4296 if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 4297 EXT4_ERROR_INODE(inode, "bad extent address " 4298 "lblock: %lu, depth: %d pblock %lld", 4299 (unsigned long) map->m_lblk, depth, 4300 path[depth].p_block); 4301 err = -EIO; 4302 goto out2; 4303 } 4304 4305 ex = path[depth].p_ext; 4306 if (ex) { 4307 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 4308 ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 4309 unsigned short ee_len; 4310 4311 4312 /* 4313 * Uninitialized extents are treated as holes, except that 4314 * we split out initialized portions during a write. 4315 */ 4316 ee_len = ext4_ext_get_actual_len(ex); 4317 4318 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); 4319 4320 /* if found extent covers block, simply return it */ 4321 if (in_range(map->m_lblk, ee_block, ee_len)) { 4322 newblock = map->m_lblk - ee_block + ee_start; 4323 /* number of remaining blocks in the extent */ 4324 allocated = ee_len - (map->m_lblk - ee_block); 4325 ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, 4326 ee_block, ee_len, newblock); 4327 4328 /* 4329 * If the extent is initialized check whether the 4330 * caller wants to convert it to unwritten. 4331 */ 4332 if ((!ext4_ext_is_uninitialized(ex)) && 4333 (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) { 4334 allocated = ext4_ext_convert_initialized_extent( 4335 handle, inode, map, path, flags, 4336 allocated, newblock); 4337 goto out2; 4338 } else if (!ext4_ext_is_uninitialized(ex)) 4339 goto out; 4340 4341 ret = ext4_ext_handle_uninitialized_extents( 4342 handle, inode, map, path, flags, 4343 allocated, newblock); 4344 if (ret < 0) 4345 err = ret; 4346 else 4347 allocated = ret; 4348 goto out2; 4349 } 4350 } 4351 4352 if ((sbi->s_cluster_ratio > 1) && 4353 ext4_find_delalloc_cluster(inode, map->m_lblk)) 4354 map->m_flags |= EXT4_MAP_FROM_CLUSTER; 4355 4356 /* 4357 * requested block isn't allocated yet; 4358 * we couldn't try to create block if create flag is zero 4359 */ 4360 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 4361 /* 4362 * put just found gap into cache to speed up 4363 * subsequent requests 4364 */ 4365 if ((flags & EXT4_GET_BLOCKS_NO_PUT_HOLE) == 0) 4366 ext4_ext_put_gap_in_cache(inode, path, map->m_lblk); 4367 goto out2; 4368 } 4369 4370 /* 4371 * Okay, we need to do block allocation. 4372 */ 4373 map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; 4374 newex.ee_block = cpu_to_le32(map->m_lblk); 4375 cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 4376 4377 /* 4378 * If we are doing bigalloc, check to see if the extent returned 4379 * by ext4_ext_find_extent() implies a cluster we can use. 4380 */ 4381 if (cluster_offset && ex && 4382 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { 4383 ar.len = allocated = map->m_len; 4384 newblock = map->m_pblk; 4385 map->m_flags |= EXT4_MAP_FROM_CLUSTER; 4386 goto got_allocated_blocks; 4387 } 4388 4389 /* find neighbour allocated blocks */ 4390 ar.lleft = map->m_lblk; 4391 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); 4392 if (err) 4393 goto out2; 4394 ar.lright = map->m_lblk; 4395 ex2 = NULL; 4396 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); 4397 if (err) 4398 goto out2; 4399 4400 /* Check if the extent after searching to the right implies a 4401 * cluster we can use. */ 4402 if ((sbi->s_cluster_ratio > 1) && ex2 && 4403 get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) { 4404 ar.len = allocated = map->m_len; 4405 newblock = map->m_pblk; 4406 map->m_flags |= EXT4_MAP_FROM_CLUSTER; 4407 goto got_allocated_blocks; 4408 } 4409 4410 /* 4411 * See if request is beyond maximum number of blocks we can have in 4412 * a single extent. For an initialized extent this limit is 4413 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is 4414 * EXT_UNINIT_MAX_LEN. 4415 */ 4416 if (map->m_len > EXT_INIT_MAX_LEN && 4417 !(flags & EXT4_GET_BLOCKS_UNINIT_EXT)) 4418 map->m_len = EXT_INIT_MAX_LEN; 4419 else if (map->m_len > EXT_UNINIT_MAX_LEN && 4420 (flags & EXT4_GET_BLOCKS_UNINIT_EXT)) 4421 map->m_len = EXT_UNINIT_MAX_LEN; 4422 4423 /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ 4424 newex.ee_len = cpu_to_le16(map->m_len); 4425 err = ext4_ext_check_overlap(sbi, inode, &newex, path); 4426 if (err) 4427 allocated = ext4_ext_get_actual_len(&newex); 4428 else 4429 allocated = map->m_len; 4430 4431 /* allocate new block */ 4432 ar.inode = inode; 4433 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); 4434 ar.logical = map->m_lblk; 4435 /* 4436 * We calculate the offset from the beginning of the cluster 4437 * for the logical block number, since when we allocate a 4438 * physical cluster, the physical block should start at the 4439 * same offset from the beginning of the cluster. This is 4440 * needed so that future calls to get_implied_cluster_alloc() 4441 * work correctly. 4442 */ 4443 offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 4444 ar.len = EXT4_NUM_B2C(sbi, offset+allocated); 4445 ar.goal -= offset; 4446 ar.logical -= offset; 4447 if (S_ISREG(inode->i_mode)) 4448 ar.flags = EXT4_MB_HINT_DATA; 4449 else 4450 /* disable in-core preallocation for non-regular files */ 4451 ar.flags = 0; 4452 if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) 4453 ar.flags |= EXT4_MB_HINT_NOPREALLOC; 4454 newblock = ext4_mb_new_blocks(handle, &ar, &err); 4455 if (!newblock) 4456 goto out2; 4457 ext_debug("allocate new block: goal %llu, found %llu/%u\n", 4458 ar.goal, newblock, allocated); 4459 free_on_err = 1; 4460 allocated_clusters = ar.len; 4461 ar.len = EXT4_C2B(sbi, ar.len) - offset; 4462 if (ar.len > allocated) 4463 ar.len = allocated; 4464 4465 got_allocated_blocks: 4466 /* try to insert new extent into found leaf and return */ 4467 ext4_ext_store_pblock(&newex, newblock + offset); 4468 newex.ee_len = cpu_to_le16(ar.len); 4469 /* Mark uninitialized */ 4470 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){ 4471 ext4_ext_mark_uninitialized(&newex); 4472 map->m_flags |= EXT4_MAP_UNWRITTEN; 4473 /* 4474 * io_end structure was created for every IO write to an 4475 * uninitialized extent. To avoid unnecessary conversion, 4476 * here we flag the IO that really needs the conversion. 4477 * For non asycn direct IO case, flag the inode state 4478 * that we need to perform conversion when IO is done. 4479 */ 4480 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) 4481 set_unwritten = 1; 4482 if (ext4_should_dioread_nolock(inode)) 4483 map->m_flags |= EXT4_MAP_UNINIT; 4484 } 4485 4486 err = 0; 4487 if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) 4488 err = check_eofblocks_fl(handle, inode, map->m_lblk, 4489 path, ar.len); 4490 if (!err) 4491 err = ext4_ext_insert_extent(handle, inode, path, 4492 &newex, flags); 4493 4494 if (!err && set_unwritten) { 4495 if (io) 4496 ext4_set_io_unwritten_flag(inode, io); 4497 else 4498 ext4_set_inode_state(inode, 4499 EXT4_STATE_DIO_UNWRITTEN); 4500 } 4501 4502 if (err && free_on_err) { 4503 int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ? 4504 EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0; 4505 /* free data blocks we just allocated */ 4506 /* not a good idea to call discard here directly, 4507 * but otherwise we'd need to call it every free() */ 4508 ext4_discard_preallocations(inode); 4509 ext4_free_blocks(handle, inode, NULL, newblock, 4510 EXT4_C2B(sbi, allocated_clusters), fb_flags); 4511 goto out2; 4512 } 4513 4514 /* previous routine could use block we allocated */ 4515 newblock = ext4_ext_pblock(&newex); 4516 allocated = ext4_ext_get_actual_len(&newex); 4517 if (allocated > map->m_len) 4518 allocated = map->m_len; 4519 map->m_flags |= EXT4_MAP_NEW; 4520 4521 /* 4522 * Update reserved blocks/metadata blocks after successful 4523 * block allocation which had been deferred till now. 4524 */ 4525 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 4526 unsigned int reserved_clusters; 4527 /* 4528 * Check how many clusters we had reserved this allocated range 4529 */ 4530 reserved_clusters = get_reserved_cluster_alloc(inode, 4531 map->m_lblk, allocated); 4532 if (map->m_flags & EXT4_MAP_FROM_CLUSTER) { 4533 if (reserved_clusters) { 4534 /* 4535 * We have clusters reserved for this range. 4536 * But since we are not doing actual allocation 4537 * and are simply using blocks from previously 4538 * allocated cluster, we should release the 4539 * reservation and not claim quota. 4540 */ 4541 ext4_da_update_reserve_space(inode, 4542 reserved_clusters, 0); 4543 } 4544 } else { 4545 BUG_ON(allocated_clusters < reserved_clusters); 4546 if (reserved_clusters < allocated_clusters) { 4547 struct ext4_inode_info *ei = EXT4_I(inode); 4548 int reservation = allocated_clusters - 4549 reserved_clusters; 4550 /* 4551 * It seems we claimed few clusters outside of 4552 * the range of this allocation. We should give 4553 * it back to the reservation pool. This can 4554 * happen in the following case: 4555 * 4556 * * Suppose s_cluster_ratio is 4 (i.e., each 4557 * cluster has 4 blocks. Thus, the clusters 4558 * are [0-3],[4-7],[8-11]... 4559 * * First comes delayed allocation write for 4560 * logical blocks 10 & 11. Since there were no 4561 * previous delayed allocated blocks in the 4562 * range [8-11], we would reserve 1 cluster 4563 * for this write. 4564 * * Next comes write for logical blocks 3 to 8. 4565 * In this case, we will reserve 2 clusters 4566 * (for [0-3] and [4-7]; and not for [8-11] as 4567 * that range has a delayed allocated blocks. 4568 * Thus total reserved clusters now becomes 3. 4569 * * Now, during the delayed allocation writeout 4570 * time, we will first write blocks [3-8] and 4571 * allocate 3 clusters for writing these 4572 * blocks. Also, we would claim all these 4573 * three clusters above. 4574 * * Now when we come here to writeout the 4575 * blocks [10-11], we would expect to claim 4576 * the reservation of 1 cluster we had made 4577 * (and we would claim it since there are no 4578 * more delayed allocated blocks in the range 4579 * [8-11]. But our reserved cluster count had 4580 * already gone to 0. 4581 * 4582 * Thus, at the step 4 above when we determine 4583 * that there are still some unwritten delayed 4584 * allocated blocks outside of our current 4585 * block range, we should increment the 4586 * reserved clusters count so that when the 4587 * remaining blocks finally gets written, we 4588 * could claim them. 4589 */ 4590 dquot_reserve_block(inode, 4591 EXT4_C2B(sbi, reservation)); 4592 spin_lock(&ei->i_block_reservation_lock); 4593 ei->i_reserved_data_blocks += reservation; 4594 spin_unlock(&ei->i_block_reservation_lock); 4595 } 4596 /* 4597 * We will claim quota for all newly allocated blocks. 4598 * We're updating the reserved space *after* the 4599 * correction above so we do not accidentally free 4600 * all the metadata reservation because we might 4601 * actually need it later on. 4602 */ 4603 ext4_da_update_reserve_space(inode, allocated_clusters, 4604 1); 4605 } 4606 } 4607 4608 /* 4609 * Cache the extent and update transaction to commit on fdatasync only 4610 * when it is _not_ an uninitialized extent. 4611 */ 4612 if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) 4613 ext4_update_inode_fsync_trans(handle, inode, 1); 4614 else 4615 ext4_update_inode_fsync_trans(handle, inode, 0); 4616 out: 4617 if (allocated > map->m_len) 4618 allocated = map->m_len; 4619 ext4_ext_show_leaf(inode, path); 4620 map->m_flags |= EXT4_MAP_MAPPED; 4621 map->m_pblk = newblock; 4622 map->m_len = allocated; 4623 out2: 4624 if (path) { 4625 ext4_ext_drop_refs(path); 4626 kfree(path); 4627 } 4628 4629 trace_ext4_ext_map_blocks_exit(inode, flags, map, 4630 err ? err : allocated); 4631 ext4_es_lru_add(inode); 4632 return err ? err : allocated; 4633 } 4634 4635 void ext4_ext_truncate(handle_t *handle, struct inode *inode) 4636 { 4637 struct super_block *sb = inode->i_sb; 4638 ext4_lblk_t last_block; 4639 int err = 0; 4640 4641 /* 4642 * TODO: optimization is possible here. 4643 * Probably we need not scan at all, 4644 * because page truncation is enough. 4645 */ 4646 4647 /* we have to know where to truncate from in crash case */ 4648 EXT4_I(inode)->i_disksize = inode->i_size; 4649 ext4_mark_inode_dirty(handle, inode); 4650 4651 last_block = (inode->i_size + sb->s_blocksize - 1) 4652 >> EXT4_BLOCK_SIZE_BITS(sb); 4653 retry: 4654 err = ext4_es_remove_extent(inode, last_block, 4655 EXT_MAX_BLOCKS - last_block); 4656 if (err == -ENOMEM) { 4657 cond_resched(); 4658 congestion_wait(BLK_RW_ASYNC, HZ/50); 4659 goto retry; 4660 } 4661 if (err) { 4662 ext4_std_error(inode->i_sb, err); 4663 return; 4664 } 4665 err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); 4666 ext4_std_error(inode->i_sb, err); 4667 } 4668 4669 static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, 4670 ext4_lblk_t len, int flags, int mode) 4671 { 4672 struct inode *inode = file_inode(file); 4673 handle_t *handle; 4674 int ret = 0; 4675 int ret2 = 0; 4676 int retries = 0; 4677 struct ext4_map_blocks map; 4678 unsigned int credits; 4679 4680 map.m_lblk = offset; 4681 /* 4682 * Don't normalize the request if it can fit in one extent so 4683 * that it doesn't get unnecessarily split into multiple 4684 * extents. 4685 */ 4686 if (len <= EXT_UNINIT_MAX_LEN) 4687 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; 4688 4689 /* 4690 * credits to insert 1 extent into extent tree 4691 */ 4692 credits = ext4_chunk_trans_blocks(inode, len); 4693 4694 retry: 4695 while (ret >= 0 && ret < len) { 4696 map.m_lblk = map.m_lblk + ret; 4697 map.m_len = len = len - ret; 4698 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 4699 credits); 4700 if (IS_ERR(handle)) { 4701 ret = PTR_ERR(handle); 4702 break; 4703 } 4704 ret = ext4_map_blocks(handle, inode, &map, flags); 4705 if (ret <= 0) { 4706 ext4_debug("inode #%lu: block %u: len %u: " 4707 "ext4_ext_map_blocks returned %d", 4708 inode->i_ino, map.m_lblk, 4709 map.m_len, ret); 4710 ext4_mark_inode_dirty(handle, inode); 4711 ret2 = ext4_journal_stop(handle); 4712 break; 4713 } 4714 ret2 = ext4_journal_stop(handle); 4715 if (ret2) 4716 break; 4717 } 4718 if (ret == -ENOSPC && 4719 ext4_should_retry_alloc(inode->i_sb, &retries)) { 4720 ret = 0; 4721 goto retry; 4722 } 4723 4724 return ret > 0 ? ret2 : ret; 4725 } 4726 4727 static long ext4_zero_range(struct file *file, loff_t offset, 4728 loff_t len, int mode) 4729 { 4730 struct inode *inode = file_inode(file); 4731 handle_t *handle = NULL; 4732 unsigned int max_blocks; 4733 loff_t new_size = 0; 4734 int ret = 0; 4735 int flags; 4736 int partial; 4737 loff_t start, end; 4738 ext4_lblk_t lblk; 4739 struct address_space *mapping = inode->i_mapping; 4740 unsigned int blkbits = inode->i_blkbits; 4741 4742 trace_ext4_zero_range(inode, offset, len, mode); 4743 4744 if (!S_ISREG(inode->i_mode)) 4745 return -EINVAL; 4746 4747 /* 4748 * Write out all dirty pages to avoid race conditions 4749 * Then release them. 4750 */ 4751 if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 4752 ret = filemap_write_and_wait_range(mapping, offset, 4753 offset + len - 1); 4754 if (ret) 4755 return ret; 4756 } 4757 4758 /* 4759 * Round up offset. This is not fallocate, we neet to zero out 4760 * blocks, so convert interior block aligned part of the range to 4761 * unwritten and possibly manually zero out unaligned parts of the 4762 * range. 4763 */ 4764 start = round_up(offset, 1 << blkbits); 4765 end = round_down((offset + len), 1 << blkbits); 4766 4767 if (start < offset || end > offset + len) 4768 return -EINVAL; 4769 partial = (offset + len) & ((1 << blkbits) - 1); 4770 4771 lblk = start >> blkbits; 4772 max_blocks = (end >> blkbits); 4773 if (max_blocks < lblk) 4774 max_blocks = 0; 4775 else 4776 max_blocks -= lblk; 4777 4778 flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT | 4779 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN; 4780 if (mode & FALLOC_FL_KEEP_SIZE) 4781 flags |= EXT4_GET_BLOCKS_KEEP_SIZE; 4782 4783 mutex_lock(&inode->i_mutex); 4784 4785 /* 4786 * Indirect files do not support unwritten extnets 4787 */ 4788 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4789 ret = -EOPNOTSUPP; 4790 goto out_mutex; 4791 } 4792 4793 if (!(mode & FALLOC_FL_KEEP_SIZE) && 4794 offset + len > i_size_read(inode)) { 4795 new_size = offset + len; 4796 ret = inode_newsize_ok(inode, new_size); 4797 if (ret) 4798 goto out_mutex; 4799 /* 4800 * If we have a partial block after EOF we have to allocate 4801 * the entire block. 4802 */ 4803 if (partial) 4804 max_blocks += 1; 4805 } 4806 4807 if (max_blocks > 0) { 4808 4809 /* Now release the pages and zero block aligned part of pages*/ 4810 truncate_pagecache_range(inode, start, end - 1); 4811 4812 /* Wait all existing dio workers, newcomers will block on i_mutex */ 4813 ext4_inode_block_unlocked_dio(inode); 4814 inode_dio_wait(inode); 4815 4816 /* 4817 * Remove entire range from the extent status tree. 4818 */ 4819 ret = ext4_es_remove_extent(inode, lblk, max_blocks); 4820 if (ret) 4821 goto out_dio; 4822 4823 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, flags, 4824 mode); 4825 if (ret) 4826 goto out_dio; 4827 } 4828 4829 handle = ext4_journal_start(inode, EXT4_HT_MISC, 4); 4830 if (IS_ERR(handle)) { 4831 ret = PTR_ERR(handle); 4832 ext4_std_error(inode->i_sb, ret); 4833 goto out_dio; 4834 } 4835 4836 inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4837 4838 if (new_size) { 4839 if (new_size > i_size_read(inode)) 4840 i_size_write(inode, new_size); 4841 if (new_size > EXT4_I(inode)->i_disksize) 4842 ext4_update_i_disksize(inode, new_size); 4843 } else { 4844 /* 4845 * Mark that we allocate beyond EOF so the subsequent truncate 4846 * can proceed even if the new size is the same as i_size. 4847 */ 4848 if ((offset + len) > i_size_read(inode)) 4849 ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 4850 } 4851 4852 ext4_mark_inode_dirty(handle, inode); 4853 4854 /* Zero out partial block at the edges of the range */ 4855 ret = ext4_zero_partial_blocks(handle, inode, offset, len); 4856 4857 if (file->f_flags & O_SYNC) 4858 ext4_handle_sync(handle); 4859 4860 ext4_journal_stop(handle); 4861 out_dio: 4862 ext4_inode_resume_unlocked_dio(inode); 4863 out_mutex: 4864 mutex_unlock(&inode->i_mutex); 4865 return ret; 4866 } 4867 4868 /* 4869 * preallocate space for a file. This implements ext4's fallocate file 4870 * operation, which gets called from sys_fallocate system call. 4871 * For block-mapped files, posix_fallocate should fall back to the method 4872 * of writing zeroes to the required new blocks (the same behavior which is 4873 * expected for file systems which do not support fallocate() system call). 4874 */ 4875 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) 4876 { 4877 struct inode *inode = file_inode(file); 4878 handle_t *handle; 4879 loff_t new_size = 0; 4880 unsigned int max_blocks; 4881 int ret = 0; 4882 int flags; 4883 ext4_lblk_t lblk; 4884 struct timespec tv; 4885 unsigned int blkbits = inode->i_blkbits; 4886 4887 /* Return error if mode is not supported */ 4888 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 4889 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE)) 4890 return -EOPNOTSUPP; 4891 4892 if (mode & FALLOC_FL_PUNCH_HOLE) 4893 return ext4_punch_hole(inode, offset, len); 4894 4895 ret = ext4_convert_inline_data(inode); 4896 if (ret) 4897 return ret; 4898 4899 /* 4900 * currently supporting (pre)allocate mode for extent-based 4901 * files _only_ 4902 */ 4903 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 4904 return -EOPNOTSUPP; 4905 4906 if (mode & FALLOC_FL_COLLAPSE_RANGE) 4907 return ext4_collapse_range(inode, offset, len); 4908 4909 if (mode & FALLOC_FL_ZERO_RANGE) 4910 return ext4_zero_range(file, offset, len, mode); 4911 4912 trace_ext4_fallocate_enter(inode, offset, len, mode); 4913 lblk = offset >> blkbits; 4914 /* 4915 * We can't just convert len to max_blocks because 4916 * If blocksize = 4096 offset = 3072 and len = 2048 4917 */ 4918 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) 4919 - lblk; 4920 4921 flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT; 4922 if (mode & FALLOC_FL_KEEP_SIZE) 4923 flags |= EXT4_GET_BLOCKS_KEEP_SIZE; 4924 4925 mutex_lock(&inode->i_mutex); 4926 4927 if (!(mode & FALLOC_FL_KEEP_SIZE) && 4928 offset + len > i_size_read(inode)) { 4929 new_size = offset + len; 4930 ret = inode_newsize_ok(inode, new_size); 4931 if (ret) 4932 goto out; 4933 } 4934 4935 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, flags, mode); 4936 if (ret) 4937 goto out; 4938 4939 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 4940 if (IS_ERR(handle)) 4941 goto out; 4942 4943 tv = inode->i_ctime = ext4_current_time(inode); 4944 4945 if (new_size) { 4946 if (new_size > i_size_read(inode)) { 4947 i_size_write(inode, new_size); 4948 inode->i_mtime = tv; 4949 } 4950 if (new_size > EXT4_I(inode)->i_disksize) 4951 ext4_update_i_disksize(inode, new_size); 4952 } else { 4953 /* 4954 * Mark that we allocate beyond EOF so the subsequent truncate 4955 * can proceed even if the new size is the same as i_size. 4956 */ 4957 if ((offset + len) > i_size_read(inode)) 4958 ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 4959 } 4960 ext4_mark_inode_dirty(handle, inode); 4961 if (file->f_flags & O_SYNC) 4962 ext4_handle_sync(handle); 4963 4964 ext4_journal_stop(handle); 4965 out: 4966 mutex_unlock(&inode->i_mutex); 4967 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); 4968 return ret; 4969 } 4970 4971 /* 4972 * This function convert a range of blocks to written extents 4973 * The caller of this function will pass the start offset and the size. 4974 * all unwritten extents within this range will be converted to 4975 * written extents. 4976 * 4977 * This function is called from the direct IO end io call back 4978 * function, to convert the fallocated extents after IO is completed. 4979 * Returns 0 on success. 4980 */ 4981 int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, 4982 loff_t offset, ssize_t len) 4983 { 4984 unsigned int max_blocks; 4985 int ret = 0; 4986 int ret2 = 0; 4987 struct ext4_map_blocks map; 4988 unsigned int credits, blkbits = inode->i_blkbits; 4989 4990 map.m_lblk = offset >> blkbits; 4991 /* 4992 * We can't just convert len to max_blocks because 4993 * If blocksize = 4096 offset = 3072 and len = 2048 4994 */ 4995 max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - 4996 map.m_lblk); 4997 /* 4998 * This is somewhat ugly but the idea is clear: When transaction is 4999 * reserved, everything goes into it. Otherwise we rather start several 5000 * smaller transactions for conversion of each extent separately. 5001 */ 5002 if (handle) { 5003 handle = ext4_journal_start_reserved(handle, 5004 EXT4_HT_EXT_CONVERT); 5005 if (IS_ERR(handle)) 5006 return PTR_ERR(handle); 5007 credits = 0; 5008 } else { 5009 /* 5010 * credits to insert 1 extent into extent tree 5011 */ 5012 credits = ext4_chunk_trans_blocks(inode, max_blocks); 5013 } 5014 while (ret >= 0 && ret < max_blocks) { 5015 map.m_lblk += ret; 5016 map.m_len = (max_blocks -= ret); 5017 if (credits) { 5018 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 5019 credits); 5020 if (IS_ERR(handle)) { 5021 ret = PTR_ERR(handle); 5022 break; 5023 } 5024 } 5025 ret = ext4_map_blocks(handle, inode, &map, 5026 EXT4_GET_BLOCKS_IO_CONVERT_EXT); 5027 if (ret <= 0) 5028 ext4_warning(inode->i_sb, 5029 "inode #%lu: block %u: len %u: " 5030 "ext4_ext_map_blocks returned %d", 5031 inode->i_ino, map.m_lblk, 5032 map.m_len, ret); 5033 ext4_mark_inode_dirty(handle, inode); 5034 if (credits) 5035 ret2 = ext4_journal_stop(handle); 5036 if (ret <= 0 || ret2) 5037 break; 5038 } 5039 if (!credits) 5040 ret2 = ext4_journal_stop(handle); 5041 return ret > 0 ? ret2 : ret; 5042 } 5043 5044 /* 5045 * If newes is not existing extent (newes->ec_pblk equals zero) find 5046 * delayed extent at start of newes and update newes accordingly and 5047 * return start of the next delayed extent. 5048 * 5049 * If newes is existing extent (newes->ec_pblk is not equal zero) 5050 * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed 5051 * extent found. Leave newes unmodified. 5052 */ 5053 static int ext4_find_delayed_extent(struct inode *inode, 5054 struct extent_status *newes) 5055 { 5056 struct extent_status es; 5057 ext4_lblk_t block, next_del; 5058 5059 if (newes->es_pblk == 0) { 5060 ext4_es_find_delayed_extent_range(inode, newes->es_lblk, 5061 newes->es_lblk + newes->es_len - 1, &es); 5062 5063 /* 5064 * No extent in extent-tree contains block @newes->es_pblk, 5065 * then the block may stay in 1)a hole or 2)delayed-extent. 5066 */ 5067 if (es.es_len == 0) 5068 /* A hole found. */ 5069 return 0; 5070 5071 if (es.es_lblk > newes->es_lblk) { 5072 /* A hole found. */ 5073 newes->es_len = min(es.es_lblk - newes->es_lblk, 5074 newes->es_len); 5075 return 0; 5076 } 5077 5078 newes->es_len = es.es_lblk + es.es_len - newes->es_lblk; 5079 } 5080 5081 block = newes->es_lblk + newes->es_len; 5082 ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es); 5083 if (es.es_len == 0) 5084 next_del = EXT_MAX_BLOCKS; 5085 else 5086 next_del = es.es_lblk; 5087 5088 return next_del; 5089 } 5090 /* fiemap flags we can handle specified here */ 5091 #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) 5092 5093 static int ext4_xattr_fiemap(struct inode *inode, 5094 struct fiemap_extent_info *fieinfo) 5095 { 5096 __u64 physical = 0; 5097 __u64 length; 5098 __u32 flags = FIEMAP_EXTENT_LAST; 5099 int blockbits = inode->i_sb->s_blocksize_bits; 5100 int error = 0; 5101 5102 /* in-inode? */ 5103 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { 5104 struct ext4_iloc iloc; 5105 int offset; /* offset of xattr in inode */ 5106 5107 error = ext4_get_inode_loc(inode, &iloc); 5108 if (error) 5109 return error; 5110 physical = (__u64)iloc.bh->b_blocknr << blockbits; 5111 offset = EXT4_GOOD_OLD_INODE_SIZE + 5112 EXT4_I(inode)->i_extra_isize; 5113 physical += offset; 5114 length = EXT4_SB(inode->i_sb)->s_inode_size - offset; 5115 flags |= FIEMAP_EXTENT_DATA_INLINE; 5116 brelse(iloc.bh); 5117 } else { /* external block */ 5118 physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits; 5119 length = inode->i_sb->s_blocksize; 5120 } 5121 5122 if (physical) 5123 error = fiemap_fill_next_extent(fieinfo, 0, physical, 5124 length, flags); 5125 return (error < 0 ? error : 0); 5126 } 5127 5128 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 5129 __u64 start, __u64 len) 5130 { 5131 ext4_lblk_t start_blk; 5132 int error = 0; 5133 5134 if (ext4_has_inline_data(inode)) { 5135 int has_inline = 1; 5136 5137 error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline); 5138 5139 if (has_inline) 5140 return error; 5141 } 5142 5143 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { 5144 error = ext4_ext_precache(inode); 5145 if (error) 5146 return error; 5147 } 5148 5149 /* fallback to generic here if not in extents fmt */ 5150 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 5151 return generic_block_fiemap(inode, fieinfo, start, len, 5152 ext4_get_block); 5153 5154 if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) 5155 return -EBADR; 5156 5157 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { 5158 error = ext4_xattr_fiemap(inode, fieinfo); 5159 } else { 5160 ext4_lblk_t len_blks; 5161 __u64 last_blk; 5162 5163 start_blk = start >> inode->i_sb->s_blocksize_bits; 5164 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; 5165 if (last_blk >= EXT_MAX_BLOCKS) 5166 last_blk = EXT_MAX_BLOCKS-1; 5167 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; 5168 5169 /* 5170 * Walk the extent tree gathering extent information 5171 * and pushing extents back to the user. 5172 */ 5173 error = ext4_fill_fiemap_extents(inode, start_blk, 5174 len_blks, fieinfo); 5175 } 5176 ext4_es_lru_add(inode); 5177 return error; 5178 } 5179 5180 /* 5181 * ext4_access_path: 5182 * Function to access the path buffer for marking it dirty. 5183 * It also checks if there are sufficient credits left in the journal handle 5184 * to update path. 5185 */ 5186 static int 5187 ext4_access_path(handle_t *handle, struct inode *inode, 5188 struct ext4_ext_path *path) 5189 { 5190 int credits, err; 5191 5192 if (!ext4_handle_valid(handle)) 5193 return 0; 5194 5195 /* 5196 * Check if need to extend journal credits 5197 * 3 for leaf, sb, and inode plus 2 (bmap and group 5198 * descriptor) for each block group; assume two block 5199 * groups 5200 */ 5201 if (handle->h_buffer_credits < 7) { 5202 credits = ext4_writepage_trans_blocks(inode); 5203 err = ext4_ext_truncate_extend_restart(handle, inode, credits); 5204 /* EAGAIN is success */ 5205 if (err && err != -EAGAIN) 5206 return err; 5207 } 5208 5209 err = ext4_ext_get_access(handle, inode, path); 5210 return err; 5211 } 5212 5213 /* 5214 * ext4_ext_shift_path_extents: 5215 * Shift the extents of a path structure lying between path[depth].p_ext 5216 * and EXT_LAST_EXTENT(path[depth].p_hdr) downwards, by subtracting shift 5217 * from starting block for each extent. 5218 */ 5219 static int 5220 ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift, 5221 struct inode *inode, handle_t *handle, 5222 ext4_lblk_t *start) 5223 { 5224 int depth, err = 0; 5225 struct ext4_extent *ex_start, *ex_last; 5226 bool update = 0; 5227 depth = path->p_depth; 5228 5229 while (depth >= 0) { 5230 if (depth == path->p_depth) { 5231 ex_start = path[depth].p_ext; 5232 if (!ex_start) 5233 return -EIO; 5234 5235 ex_last = EXT_LAST_EXTENT(path[depth].p_hdr); 5236 if (!ex_last) 5237 return -EIO; 5238 5239 err = ext4_access_path(handle, inode, path + depth); 5240 if (err) 5241 goto out; 5242 5243 if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) 5244 update = 1; 5245 5246 *start = le32_to_cpu(ex_last->ee_block) + 5247 ext4_ext_get_actual_len(ex_last); 5248 5249 while (ex_start <= ex_last) { 5250 le32_add_cpu(&ex_start->ee_block, -shift); 5251 /* Try to merge to the left. */ 5252 if ((ex_start > 5253 EXT_FIRST_EXTENT(path[depth].p_hdr)) && 5254 ext4_ext_try_to_merge_right(inode, 5255 path, ex_start - 1)) 5256 ex_last--; 5257 else 5258 ex_start++; 5259 } 5260 err = ext4_ext_dirty(handle, inode, path + depth); 5261 if (err) 5262 goto out; 5263 5264 if (--depth < 0 || !update) 5265 break; 5266 } 5267 5268 /* Update index too */ 5269 err = ext4_access_path(handle, inode, path + depth); 5270 if (err) 5271 goto out; 5272 5273 le32_add_cpu(&path[depth].p_idx->ei_block, -shift); 5274 err = ext4_ext_dirty(handle, inode, path + depth); 5275 if (err) 5276 goto out; 5277 5278 /* we are done if current index is not a starting index */ 5279 if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr)) 5280 break; 5281 5282 depth--; 5283 } 5284 5285 out: 5286 return err; 5287 } 5288 5289 /* 5290 * ext4_ext_shift_extents: 5291 * All the extents which lies in the range from start to the last allocated 5292 * block for the file are shifted downwards by shift blocks. 5293 * On success, 0 is returned, error otherwise. 5294 */ 5295 static int 5296 ext4_ext_shift_extents(struct inode *inode, handle_t *handle, 5297 ext4_lblk_t start, ext4_lblk_t shift) 5298 { 5299 struct ext4_ext_path *path; 5300 int ret = 0, depth; 5301 struct ext4_extent *extent; 5302 ext4_lblk_t stop_block, current_block; 5303 ext4_lblk_t ex_start, ex_end; 5304 5305 /* Let path point to the last extent */ 5306 path = ext4_ext_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0); 5307 if (IS_ERR(path)) 5308 return PTR_ERR(path); 5309 5310 depth = path->p_depth; 5311 extent = path[depth].p_ext; 5312 if (!extent) { 5313 ext4_ext_drop_refs(path); 5314 kfree(path); 5315 return ret; 5316 } 5317 5318 stop_block = le32_to_cpu(extent->ee_block) + 5319 ext4_ext_get_actual_len(extent); 5320 ext4_ext_drop_refs(path); 5321 kfree(path); 5322 5323 /* Nothing to shift, if hole is at the end of file */ 5324 if (start >= stop_block) 5325 return ret; 5326 5327 /* 5328 * Don't start shifting extents until we make sure the hole is big 5329 * enough to accomodate the shift. 5330 */ 5331 path = ext4_ext_find_extent(inode, start - 1, NULL, 0); 5332 if (IS_ERR(path)) 5333 return PTR_ERR(path); 5334 depth = path->p_depth; 5335 extent = path[depth].p_ext; 5336 if (extent) { 5337 ex_start = le32_to_cpu(extent->ee_block); 5338 ex_end = le32_to_cpu(extent->ee_block) + 5339 ext4_ext_get_actual_len(extent); 5340 } else { 5341 ex_start = 0; 5342 ex_end = 0; 5343 } 5344 ext4_ext_drop_refs(path); 5345 kfree(path); 5346 5347 if ((start == ex_start && shift > ex_start) || 5348 (shift > start - ex_end)) 5349 return -EINVAL; 5350 5351 /* Its safe to start updating extents */ 5352 while (start < stop_block) { 5353 path = ext4_ext_find_extent(inode, start, NULL, 0); 5354 if (IS_ERR(path)) 5355 return PTR_ERR(path); 5356 depth = path->p_depth; 5357 extent = path[depth].p_ext; 5358 if (!extent) { 5359 EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 5360 (unsigned long) start); 5361 return -EIO; 5362 } 5363 5364 current_block = le32_to_cpu(extent->ee_block); 5365 if (start > current_block) { 5366 /* Hole, move to the next extent */ 5367 ret = mext_next_extent(inode, path, &extent); 5368 if (ret != 0) { 5369 ext4_ext_drop_refs(path); 5370 kfree(path); 5371 if (ret == 1) 5372 ret = 0; 5373 break; 5374 } 5375 } 5376 ret = ext4_ext_shift_path_extents(path, shift, inode, 5377 handle, &start); 5378 ext4_ext_drop_refs(path); 5379 kfree(path); 5380 if (ret) 5381 break; 5382 } 5383 5384 return ret; 5385 } 5386 5387 /* 5388 * ext4_collapse_range: 5389 * This implements the fallocate's collapse range functionality for ext4 5390 * Returns: 0 and non-zero on error. 5391 */ 5392 int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) 5393 { 5394 struct super_block *sb = inode->i_sb; 5395 ext4_lblk_t punch_start, punch_stop; 5396 handle_t *handle; 5397 unsigned int credits; 5398 loff_t new_size, ioffset; 5399 int ret; 5400 5401 /* Collapse range works only on fs block size aligned offsets. */ 5402 if (offset & (EXT4_BLOCK_SIZE(sb) - 1) || 5403 len & (EXT4_BLOCK_SIZE(sb) - 1)) 5404 return -EINVAL; 5405 5406 if (!S_ISREG(inode->i_mode)) 5407 return -EINVAL; 5408 5409 if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) 5410 return -EOPNOTSUPP; 5411 5412 trace_ext4_collapse_range(inode, offset, len); 5413 5414 punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb); 5415 punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb); 5416 5417 /* Call ext4_force_commit to flush all data in case of data=journal. */ 5418 if (ext4_should_journal_data(inode)) { 5419 ret = ext4_force_commit(inode->i_sb); 5420 if (ret) 5421 return ret; 5422 } 5423 5424 /* 5425 * Need to round down offset to be aligned with page size boundary 5426 * for page size > block size. 5427 */ 5428 ioffset = round_down(offset, PAGE_SIZE); 5429 5430 /* Write out all dirty pages */ 5431 ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, 5432 LLONG_MAX); 5433 if (ret) 5434 return ret; 5435 5436 /* Take mutex lock */ 5437 mutex_lock(&inode->i_mutex); 5438 5439 /* 5440 * There is no need to overlap collapse range with EOF, in which case 5441 * it is effectively a truncate operation 5442 */ 5443 if (offset + len >= i_size_read(inode)) { 5444 ret = -EINVAL; 5445 goto out_mutex; 5446 } 5447 5448 /* Currently just for extent based files */ 5449 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 5450 ret = -EOPNOTSUPP; 5451 goto out_mutex; 5452 } 5453 5454 truncate_pagecache(inode, ioffset); 5455 5456 /* Wait for existing dio to complete */ 5457 ext4_inode_block_unlocked_dio(inode); 5458 inode_dio_wait(inode); 5459 5460 credits = ext4_writepage_trans_blocks(inode); 5461 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 5462 if (IS_ERR(handle)) { 5463 ret = PTR_ERR(handle); 5464 goto out_dio; 5465 } 5466 5467 down_write(&EXT4_I(inode)->i_data_sem); 5468 ext4_discard_preallocations(inode); 5469 5470 ret = ext4_es_remove_extent(inode, punch_start, 5471 EXT_MAX_BLOCKS - punch_start); 5472 if (ret) { 5473 up_write(&EXT4_I(inode)->i_data_sem); 5474 goto out_stop; 5475 } 5476 5477 ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1); 5478 if (ret) { 5479 up_write(&EXT4_I(inode)->i_data_sem); 5480 goto out_stop; 5481 } 5482 ext4_discard_preallocations(inode); 5483 5484 ret = ext4_ext_shift_extents(inode, handle, punch_stop, 5485 punch_stop - punch_start); 5486 if (ret) { 5487 up_write(&EXT4_I(inode)->i_data_sem); 5488 goto out_stop; 5489 } 5490 5491 new_size = i_size_read(inode) - len; 5492 i_size_write(inode, new_size); 5493 EXT4_I(inode)->i_disksize = new_size; 5494 5495 up_write(&EXT4_I(inode)->i_data_sem); 5496 if (IS_SYNC(inode)) 5497 ext4_handle_sync(handle); 5498 inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 5499 ext4_mark_inode_dirty(handle, inode); 5500 5501 out_stop: 5502 ext4_journal_stop(handle); 5503 out_dio: 5504 ext4_inode_resume_unlocked_dio(inode); 5505 out_mutex: 5506 mutex_unlock(&inode->i_mutex); 5507 return ret; 5508 } 5509