1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 4 * Written by Alex Tomas <alex@clusterfs.com> 5 * 6 * Architecture independence: 7 * Copyright (c) 2005, Bull S.A. 8 * Written by Pierre Peiffer <pierre.peiffer@bull.net> 9 */ 10 11 /* 12 * Extents support for EXT4 13 * 14 * TODO: 15 * - ext4*_error() should be used in some situations 16 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate 17 * - smart tree reduction 18 */ 19 20 #include <linux/fs.h> 21 #include <linux/time.h> 22 #include <linux/jbd2.h> 23 #include <linux/highuid.h> 24 #include <linux/pagemap.h> 25 #include <linux/quotaops.h> 26 #include <linux/string.h> 27 #include <linux/slab.h> 28 #include <linux/uaccess.h> 29 #include <linux/fiemap.h> 30 #include <linux/iomap.h> 31 #include <linux/sched/mm.h> 32 #include "ext4_jbd2.h" 33 #include "ext4_extents.h" 34 #include "xattr.h" 35 36 #include <trace/events/ext4.h> 37 38 /* 39 * used by extent splitting. 40 */ 41 #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ 42 due to ENOSPC */ 43 #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */ 44 #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */ 45 46 #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */ 47 #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */ 48 49 static __le32 ext4_extent_block_csum(struct inode *inode, 50 struct ext4_extent_header *eh) 51 { 52 struct ext4_inode_info *ei = EXT4_I(inode); 53 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 54 __u32 csum; 55 56 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, 57 EXT4_EXTENT_TAIL_OFFSET(eh)); 58 return cpu_to_le32(csum); 59 } 60 61 static int ext4_extent_block_csum_verify(struct inode *inode, 62 struct ext4_extent_header *eh) 63 { 64 struct ext4_extent_tail *et; 65 66 if (!ext4_has_metadata_csum(inode->i_sb)) 67 return 1; 68 69 et = find_ext4_extent_tail(eh); 70 if (et->et_checksum != ext4_extent_block_csum(inode, eh)) 71 return 0; 72 return 1; 73 } 74 75 static void ext4_extent_block_csum_set(struct inode *inode, 76 struct ext4_extent_header *eh) 77 { 78 struct ext4_extent_tail *et; 79 80 if (!ext4_has_metadata_csum(inode->i_sb)) 81 return; 82 83 et = find_ext4_extent_tail(eh); 84 et->et_checksum = ext4_extent_block_csum(inode, eh); 85 } 86 87 static int ext4_split_extent_at(handle_t *handle, 88 struct inode *inode, 89 struct ext4_ext_path **ppath, 90 ext4_lblk_t split, 91 int split_flag, 92 int flags); 93 94 static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped) 95 { 96 /* 97 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 98 * moment, get_block can be called only for blocks inside i_size since 99 * page cache has been already dropped and writes are blocked by 100 * i_rwsem. So we can safely drop the i_data_sem here. 101 */ 102 BUG_ON(EXT4_JOURNAL(inode) == NULL); 103 ext4_discard_preallocations(inode, 0); 104 up_write(&EXT4_I(inode)->i_data_sem); 105 *dropped = 1; 106 return 0; 107 } 108 109 static void ext4_ext_drop_refs(struct ext4_ext_path *path) 110 { 111 int depth, i; 112 113 if (!path) 114 return; 115 depth = path->p_depth; 116 for (i = 0; i <= depth; i++, path++) { 117 brelse(path->p_bh); 118 path->p_bh = NULL; 119 } 120 } 121 122 void ext4_free_ext_path(struct ext4_ext_path *path) 123 { 124 ext4_ext_drop_refs(path); 125 kfree(path); 126 } 127 128 /* 129 * Make sure 'handle' has at least 'check_cred' credits. If not, restart 130 * transaction with 'restart_cred' credits. The function drops i_data_sem 131 * when restarting transaction and gets it after transaction is restarted. 132 * 133 * The function returns 0 on success, 1 if transaction had to be restarted, 134 * and < 0 in case of fatal error. 135 */ 136 int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode, 137 int check_cred, int restart_cred, 138 int revoke_cred) 139 { 140 int ret; 141 int dropped = 0; 142 143 ret = ext4_journal_ensure_credits_fn(handle, check_cred, restart_cred, 144 revoke_cred, ext4_ext_trunc_restart_fn(inode, &dropped)); 145 if (dropped) 146 down_write(&EXT4_I(inode)->i_data_sem); 147 return ret; 148 } 149 150 /* 151 * could return: 152 * - EROFS 153 * - ENOMEM 154 */ 155 static int ext4_ext_get_access(handle_t *handle, struct inode *inode, 156 struct ext4_ext_path *path) 157 { 158 int err = 0; 159 160 if (path->p_bh) { 161 /* path points to block */ 162 BUFFER_TRACE(path->p_bh, "get_write_access"); 163 err = ext4_journal_get_write_access(handle, inode->i_sb, 164 path->p_bh, EXT4_JTR_NONE); 165 /* 166 * The extent buffer's verified bit will be set again in 167 * __ext4_ext_dirty(). We could leave an inconsistent 168 * buffer if the extents updating procudure break off du 169 * to some error happens, force to check it again. 170 */ 171 if (!err) 172 clear_buffer_verified(path->p_bh); 173 } 174 /* path points to leaf/index in inode body */ 175 /* we use in-core data, no need to protect them */ 176 return err; 177 } 178 179 /* 180 * could return: 181 * - EROFS 182 * - ENOMEM 183 * - EIO 184 */ 185 static int __ext4_ext_dirty(const char *where, unsigned int line, 186 handle_t *handle, struct inode *inode, 187 struct ext4_ext_path *path) 188 { 189 int err; 190 191 WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); 192 if (path->p_bh) { 193 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); 194 /* path points to block */ 195 err = __ext4_handle_dirty_metadata(where, line, handle, 196 inode, path->p_bh); 197 /* Extents updating done, re-set verified flag */ 198 if (!err) 199 set_buffer_verified(path->p_bh); 200 } else { 201 /* path points to leaf/index in inode body */ 202 err = ext4_mark_inode_dirty(handle, inode); 203 } 204 return err; 205 } 206 207 #define ext4_ext_dirty(handle, inode, path) \ 208 __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path)) 209 210 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, 211 struct ext4_ext_path *path, 212 ext4_lblk_t block) 213 { 214 if (path) { 215 int depth = path->p_depth; 216 struct ext4_extent *ex; 217 218 /* 219 * Try to predict block placement assuming that we are 220 * filling in a file which will eventually be 221 * non-sparse --- i.e., in the case of libbfd writing 222 * an ELF object sections out-of-order but in a way 223 * the eventually results in a contiguous object or 224 * executable file, or some database extending a table 225 * space file. However, this is actually somewhat 226 * non-ideal if we are writing a sparse file such as 227 * qemu or KVM writing a raw image file that is going 228 * to stay fairly sparse, since it will end up 229 * fragmenting the file system's free space. Maybe we 230 * should have some hueristics or some way to allow 231 * userspace to pass a hint to file system, 232 * especially if the latter case turns out to be 233 * common. 234 */ 235 ex = path[depth].p_ext; 236 if (ex) { 237 ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); 238 ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); 239 240 if (block > ext_block) 241 return ext_pblk + (block - ext_block); 242 else 243 return ext_pblk - (ext_block - block); 244 } 245 246 /* it looks like index is empty; 247 * try to find starting block from index itself */ 248 if (path[depth].p_bh) 249 return path[depth].p_bh->b_blocknr; 250 } 251 252 /* OK. use inode's group */ 253 return ext4_inode_to_goal_block(inode); 254 } 255 256 /* 257 * Allocation for a meta data block 258 */ 259 static ext4_fsblk_t 260 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, 261 struct ext4_ext_path *path, 262 struct ext4_extent *ex, int *err, unsigned int flags) 263 { 264 ext4_fsblk_t goal, newblock; 265 266 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); 267 newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 268 NULL, err); 269 return newblock; 270 } 271 272 static inline int ext4_ext_space_block(struct inode *inode, int check) 273 { 274 int size; 275 276 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 277 / sizeof(struct ext4_extent); 278 #ifdef AGGRESSIVE_TEST 279 if (!check && size > 6) 280 size = 6; 281 #endif 282 return size; 283 } 284 285 static inline int ext4_ext_space_block_idx(struct inode *inode, int check) 286 { 287 int size; 288 289 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 290 / sizeof(struct ext4_extent_idx); 291 #ifdef AGGRESSIVE_TEST 292 if (!check && size > 5) 293 size = 5; 294 #endif 295 return size; 296 } 297 298 static inline int ext4_ext_space_root(struct inode *inode, int check) 299 { 300 int size; 301 302 size = sizeof(EXT4_I(inode)->i_data); 303 size -= sizeof(struct ext4_extent_header); 304 size /= sizeof(struct ext4_extent); 305 #ifdef AGGRESSIVE_TEST 306 if (!check && size > 3) 307 size = 3; 308 #endif 309 return size; 310 } 311 312 static inline int ext4_ext_space_root_idx(struct inode *inode, int check) 313 { 314 int size; 315 316 size = sizeof(EXT4_I(inode)->i_data); 317 size -= sizeof(struct ext4_extent_header); 318 size /= sizeof(struct ext4_extent_idx); 319 #ifdef AGGRESSIVE_TEST 320 if (!check && size > 4) 321 size = 4; 322 #endif 323 return size; 324 } 325 326 static inline int 327 ext4_force_split_extent_at(handle_t *handle, struct inode *inode, 328 struct ext4_ext_path **ppath, ext4_lblk_t lblk, 329 int nofail) 330 { 331 struct ext4_ext_path *path = *ppath; 332 int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext); 333 int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO; 334 335 if (nofail) 336 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL; 337 338 return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ? 339 EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0, 340 flags); 341 } 342 343 static int 344 ext4_ext_max_entries(struct inode *inode, int depth) 345 { 346 int max; 347 348 if (depth == ext_depth(inode)) { 349 if (depth == 0) 350 max = ext4_ext_space_root(inode, 1); 351 else 352 max = ext4_ext_space_root_idx(inode, 1); 353 } else { 354 if (depth == 0) 355 max = ext4_ext_space_block(inode, 1); 356 else 357 max = ext4_ext_space_block_idx(inode, 1); 358 } 359 360 return max; 361 } 362 363 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) 364 { 365 ext4_fsblk_t block = ext4_ext_pblock(ext); 366 int len = ext4_ext_get_actual_len(ext); 367 ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); 368 369 /* 370 * We allow neither: 371 * - zero length 372 * - overflow/wrap-around 373 */ 374 if (lblock + len <= lblock) 375 return 0; 376 return ext4_inode_block_valid(inode, block, len); 377 } 378 379 static int ext4_valid_extent_idx(struct inode *inode, 380 struct ext4_extent_idx *ext_idx) 381 { 382 ext4_fsblk_t block = ext4_idx_pblock(ext_idx); 383 384 return ext4_inode_block_valid(inode, block, 1); 385 } 386 387 static int ext4_valid_extent_entries(struct inode *inode, 388 struct ext4_extent_header *eh, 389 ext4_lblk_t lblk, ext4_fsblk_t *pblk, 390 int depth) 391 { 392 unsigned short entries; 393 ext4_lblk_t lblock = 0; 394 ext4_lblk_t cur = 0; 395 396 if (eh->eh_entries == 0) 397 return 1; 398 399 entries = le16_to_cpu(eh->eh_entries); 400 401 if (depth == 0) { 402 /* leaf entries */ 403 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); 404 405 /* 406 * The logical block in the first entry should equal to 407 * the number in the index block. 408 */ 409 if (depth != ext_depth(inode) && 410 lblk != le32_to_cpu(ext->ee_block)) 411 return 0; 412 while (entries) { 413 if (!ext4_valid_extent(inode, ext)) 414 return 0; 415 416 /* Check for overlapping extents */ 417 lblock = le32_to_cpu(ext->ee_block); 418 if (lblock < cur) { 419 *pblk = ext4_ext_pblock(ext); 420 return 0; 421 } 422 cur = lblock + ext4_ext_get_actual_len(ext); 423 ext++; 424 entries--; 425 } 426 } else { 427 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); 428 429 /* 430 * The logical block in the first entry should equal to 431 * the number in the parent index block. 432 */ 433 if (depth != ext_depth(inode) && 434 lblk != le32_to_cpu(ext_idx->ei_block)) 435 return 0; 436 while (entries) { 437 if (!ext4_valid_extent_idx(inode, ext_idx)) 438 return 0; 439 440 /* Check for overlapping index extents */ 441 lblock = le32_to_cpu(ext_idx->ei_block); 442 if (lblock < cur) { 443 *pblk = ext4_idx_pblock(ext_idx); 444 return 0; 445 } 446 ext_idx++; 447 entries--; 448 cur = lblock + 1; 449 } 450 } 451 return 1; 452 } 453 454 static int __ext4_ext_check(const char *function, unsigned int line, 455 struct inode *inode, struct ext4_extent_header *eh, 456 int depth, ext4_fsblk_t pblk, ext4_lblk_t lblk) 457 { 458 const char *error_msg; 459 int max = 0, err = -EFSCORRUPTED; 460 461 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { 462 error_msg = "invalid magic"; 463 goto corrupted; 464 } 465 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { 466 error_msg = "unexpected eh_depth"; 467 goto corrupted; 468 } 469 if (unlikely(eh->eh_max == 0)) { 470 error_msg = "invalid eh_max"; 471 goto corrupted; 472 } 473 max = ext4_ext_max_entries(inode, depth); 474 if (unlikely(le16_to_cpu(eh->eh_max) > max)) { 475 error_msg = "too large eh_max"; 476 goto corrupted; 477 } 478 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { 479 error_msg = "invalid eh_entries"; 480 goto corrupted; 481 } 482 if (unlikely((eh->eh_entries == 0) && (depth > 0))) { 483 error_msg = "eh_entries is 0 but eh_depth is > 0"; 484 goto corrupted; 485 } 486 if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) { 487 error_msg = "invalid extent entries"; 488 goto corrupted; 489 } 490 if (unlikely(depth > 32)) { 491 error_msg = "too large eh_depth"; 492 goto corrupted; 493 } 494 /* Verify checksum on non-root extent tree nodes */ 495 if (ext_depth(inode) != depth && 496 !ext4_extent_block_csum_verify(inode, eh)) { 497 error_msg = "extent tree corrupted"; 498 err = -EFSBADCRC; 499 goto corrupted; 500 } 501 return 0; 502 503 corrupted: 504 ext4_error_inode_err(inode, function, line, 0, -err, 505 "pblk %llu bad header/extent: %s - magic %x, " 506 "entries %u, max %u(%u), depth %u(%u)", 507 (unsigned long long) pblk, error_msg, 508 le16_to_cpu(eh->eh_magic), 509 le16_to_cpu(eh->eh_entries), 510 le16_to_cpu(eh->eh_max), 511 max, le16_to_cpu(eh->eh_depth), depth); 512 return err; 513 } 514 515 #define ext4_ext_check(inode, eh, depth, pblk) \ 516 __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk), 0) 517 518 int ext4_ext_check_inode(struct inode *inode) 519 { 520 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0); 521 } 522 523 static void ext4_cache_extents(struct inode *inode, 524 struct ext4_extent_header *eh) 525 { 526 struct ext4_extent *ex = EXT_FIRST_EXTENT(eh); 527 ext4_lblk_t prev = 0; 528 int i; 529 530 for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) { 531 unsigned int status = EXTENT_STATUS_WRITTEN; 532 ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); 533 int len = ext4_ext_get_actual_len(ex); 534 535 if (prev && (prev != lblk)) 536 ext4_es_cache_extent(inode, prev, lblk - prev, ~0, 537 EXTENT_STATUS_HOLE); 538 539 if (ext4_ext_is_unwritten(ex)) 540 status = EXTENT_STATUS_UNWRITTEN; 541 ext4_es_cache_extent(inode, lblk, len, 542 ext4_ext_pblock(ex), status); 543 prev = lblk + len; 544 } 545 } 546 547 static struct buffer_head * 548 __read_extent_tree_block(const char *function, unsigned int line, 549 struct inode *inode, struct ext4_extent_idx *idx, 550 int depth, int flags) 551 { 552 struct buffer_head *bh; 553 int err; 554 gfp_t gfp_flags = __GFP_MOVABLE | GFP_NOFS; 555 ext4_fsblk_t pblk; 556 557 if (flags & EXT4_EX_NOFAIL) 558 gfp_flags |= __GFP_NOFAIL; 559 560 pblk = ext4_idx_pblock(idx); 561 bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags); 562 if (unlikely(!bh)) 563 return ERR_PTR(-ENOMEM); 564 565 if (!bh_uptodate_or_lock(bh)) { 566 trace_ext4_ext_load_extent(inode, pblk, _RET_IP_); 567 err = ext4_read_bh(bh, 0, NULL); 568 if (err < 0) 569 goto errout; 570 } 571 if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE)) 572 return bh; 573 err = __ext4_ext_check(function, line, inode, ext_block_hdr(bh), 574 depth, pblk, le32_to_cpu(idx->ei_block)); 575 if (err) 576 goto errout; 577 set_buffer_verified(bh); 578 /* 579 * If this is a leaf block, cache all of its entries 580 */ 581 if (!(flags & EXT4_EX_NOCACHE) && depth == 0) { 582 struct ext4_extent_header *eh = ext_block_hdr(bh); 583 ext4_cache_extents(inode, eh); 584 } 585 return bh; 586 errout: 587 put_bh(bh); 588 return ERR_PTR(err); 589 590 } 591 592 #define read_extent_tree_block(inode, idx, depth, flags) \ 593 __read_extent_tree_block(__func__, __LINE__, (inode), (idx), \ 594 (depth), (flags)) 595 596 /* 597 * This function is called to cache a file's extent information in the 598 * extent status tree 599 */ 600 int ext4_ext_precache(struct inode *inode) 601 { 602 struct ext4_inode_info *ei = EXT4_I(inode); 603 struct ext4_ext_path *path = NULL; 604 struct buffer_head *bh; 605 int i = 0, depth, ret = 0; 606 607 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 608 return 0; /* not an extent-mapped inode */ 609 610 down_read(&ei->i_data_sem); 611 depth = ext_depth(inode); 612 613 /* Don't cache anything if there are no external extent blocks */ 614 if (!depth) { 615 up_read(&ei->i_data_sem); 616 return ret; 617 } 618 619 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path), 620 GFP_NOFS); 621 if (path == NULL) { 622 up_read(&ei->i_data_sem); 623 return -ENOMEM; 624 } 625 626 path[0].p_hdr = ext_inode_hdr(inode); 627 ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0); 628 if (ret) 629 goto out; 630 path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr); 631 while (i >= 0) { 632 /* 633 * If this is a leaf block or we've reached the end of 634 * the index block, go up 635 */ 636 if ((i == depth) || 637 path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) { 638 brelse(path[i].p_bh); 639 path[i].p_bh = NULL; 640 i--; 641 continue; 642 } 643 bh = read_extent_tree_block(inode, path[i].p_idx++, 644 depth - i - 1, 645 EXT4_EX_FORCE_CACHE); 646 if (IS_ERR(bh)) { 647 ret = PTR_ERR(bh); 648 break; 649 } 650 i++; 651 path[i].p_bh = bh; 652 path[i].p_hdr = ext_block_hdr(bh); 653 path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr); 654 } 655 ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED); 656 out: 657 up_read(&ei->i_data_sem); 658 ext4_free_ext_path(path); 659 return ret; 660 } 661 662 #ifdef EXT_DEBUG 663 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) 664 { 665 int k, l = path->p_depth; 666 667 ext_debug(inode, "path:"); 668 for (k = 0; k <= l; k++, path++) { 669 if (path->p_idx) { 670 ext_debug(inode, " %d->%llu", 671 le32_to_cpu(path->p_idx->ei_block), 672 ext4_idx_pblock(path->p_idx)); 673 } else if (path->p_ext) { 674 ext_debug(inode, " %d:[%d]%d:%llu ", 675 le32_to_cpu(path->p_ext->ee_block), 676 ext4_ext_is_unwritten(path->p_ext), 677 ext4_ext_get_actual_len(path->p_ext), 678 ext4_ext_pblock(path->p_ext)); 679 } else 680 ext_debug(inode, " []"); 681 } 682 ext_debug(inode, "\n"); 683 } 684 685 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) 686 { 687 int depth = ext_depth(inode); 688 struct ext4_extent_header *eh; 689 struct ext4_extent *ex; 690 int i; 691 692 if (!path) 693 return; 694 695 eh = path[depth].p_hdr; 696 ex = EXT_FIRST_EXTENT(eh); 697 698 ext_debug(inode, "Displaying leaf extents\n"); 699 700 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { 701 ext_debug(inode, "%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), 702 ext4_ext_is_unwritten(ex), 703 ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); 704 } 705 ext_debug(inode, "\n"); 706 } 707 708 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, 709 ext4_fsblk_t newblock, int level) 710 { 711 int depth = ext_depth(inode); 712 struct ext4_extent *ex; 713 714 if (depth != level) { 715 struct ext4_extent_idx *idx; 716 idx = path[level].p_idx; 717 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { 718 ext_debug(inode, "%d: move %d:%llu in new index %llu\n", 719 level, le32_to_cpu(idx->ei_block), 720 ext4_idx_pblock(idx), newblock); 721 idx++; 722 } 723 724 return; 725 } 726 727 ex = path[depth].p_ext; 728 while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { 729 ext_debug(inode, "move %d:%llu:[%d]%d in new leaf %llu\n", 730 le32_to_cpu(ex->ee_block), 731 ext4_ext_pblock(ex), 732 ext4_ext_is_unwritten(ex), 733 ext4_ext_get_actual_len(ex), 734 newblock); 735 ex++; 736 } 737 } 738 739 #else 740 #define ext4_ext_show_path(inode, path) 741 #define ext4_ext_show_leaf(inode, path) 742 #define ext4_ext_show_move(inode, path, newblock, level) 743 #endif 744 745 /* 746 * ext4_ext_binsearch_idx: 747 * binary search for the closest index of the given block 748 * the header must be checked before calling this 749 */ 750 static void 751 ext4_ext_binsearch_idx(struct inode *inode, 752 struct ext4_ext_path *path, ext4_lblk_t block) 753 { 754 struct ext4_extent_header *eh = path->p_hdr; 755 struct ext4_extent_idx *r, *l, *m; 756 757 758 ext_debug(inode, "binsearch for %u(idx): ", block); 759 760 l = EXT_FIRST_INDEX(eh) + 1; 761 r = EXT_LAST_INDEX(eh); 762 while (l <= r) { 763 m = l + (r - l) / 2; 764 ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l, 765 le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block), 766 r, le32_to_cpu(r->ei_block)); 767 768 if (block < le32_to_cpu(m->ei_block)) 769 r = m - 1; 770 else 771 l = m + 1; 772 } 773 774 path->p_idx = l - 1; 775 ext_debug(inode, " -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), 776 ext4_idx_pblock(path->p_idx)); 777 778 #ifdef CHECK_BINSEARCH 779 { 780 struct ext4_extent_idx *chix, *ix; 781 int k; 782 783 chix = ix = EXT_FIRST_INDEX(eh); 784 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { 785 if (k != 0 && le32_to_cpu(ix->ei_block) <= 786 le32_to_cpu(ix[-1].ei_block)) { 787 printk(KERN_DEBUG "k=%d, ix=0x%p, " 788 "first=0x%p\n", k, 789 ix, EXT_FIRST_INDEX(eh)); 790 printk(KERN_DEBUG "%u <= %u\n", 791 le32_to_cpu(ix->ei_block), 792 le32_to_cpu(ix[-1].ei_block)); 793 } 794 BUG_ON(k && le32_to_cpu(ix->ei_block) 795 <= le32_to_cpu(ix[-1].ei_block)); 796 if (block < le32_to_cpu(ix->ei_block)) 797 break; 798 chix = ix; 799 } 800 BUG_ON(chix != path->p_idx); 801 } 802 #endif 803 804 } 805 806 /* 807 * ext4_ext_binsearch: 808 * binary search for closest extent of the given block 809 * the header must be checked before calling this 810 */ 811 static void 812 ext4_ext_binsearch(struct inode *inode, 813 struct ext4_ext_path *path, ext4_lblk_t block) 814 { 815 struct ext4_extent_header *eh = path->p_hdr; 816 struct ext4_extent *r, *l, *m; 817 818 if (eh->eh_entries == 0) { 819 /* 820 * this leaf is empty: 821 * we get such a leaf in split/add case 822 */ 823 return; 824 } 825 826 ext_debug(inode, "binsearch for %u: ", block); 827 828 l = EXT_FIRST_EXTENT(eh) + 1; 829 r = EXT_LAST_EXTENT(eh); 830 831 while (l <= r) { 832 m = l + (r - l) / 2; 833 ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l, 834 le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block), 835 r, le32_to_cpu(r->ee_block)); 836 837 if (block < le32_to_cpu(m->ee_block)) 838 r = m - 1; 839 else 840 l = m + 1; 841 } 842 843 path->p_ext = l - 1; 844 ext_debug(inode, " -> %d:%llu:[%d]%d ", 845 le32_to_cpu(path->p_ext->ee_block), 846 ext4_ext_pblock(path->p_ext), 847 ext4_ext_is_unwritten(path->p_ext), 848 ext4_ext_get_actual_len(path->p_ext)); 849 850 #ifdef CHECK_BINSEARCH 851 { 852 struct ext4_extent *chex, *ex; 853 int k; 854 855 chex = ex = EXT_FIRST_EXTENT(eh); 856 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { 857 BUG_ON(k && le32_to_cpu(ex->ee_block) 858 <= le32_to_cpu(ex[-1].ee_block)); 859 if (block < le32_to_cpu(ex->ee_block)) 860 break; 861 chex = ex; 862 } 863 BUG_ON(chex != path->p_ext); 864 } 865 #endif 866 867 } 868 869 void ext4_ext_tree_init(handle_t *handle, struct inode *inode) 870 { 871 struct ext4_extent_header *eh; 872 873 eh = ext_inode_hdr(inode); 874 eh->eh_depth = 0; 875 eh->eh_entries = 0; 876 eh->eh_magic = EXT4_EXT_MAGIC; 877 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); 878 eh->eh_generation = 0; 879 ext4_mark_inode_dirty(handle, inode); 880 } 881 882 struct ext4_ext_path * 883 ext4_find_extent(struct inode *inode, ext4_lblk_t block, 884 struct ext4_ext_path **orig_path, int flags) 885 { 886 struct ext4_extent_header *eh; 887 struct buffer_head *bh; 888 struct ext4_ext_path *path = orig_path ? *orig_path : NULL; 889 short int depth, i, ppos = 0; 890 int ret; 891 gfp_t gfp_flags = GFP_NOFS; 892 893 if (flags & EXT4_EX_NOFAIL) 894 gfp_flags |= __GFP_NOFAIL; 895 896 eh = ext_inode_hdr(inode); 897 depth = ext_depth(inode); 898 if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) { 899 EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d", 900 depth); 901 ret = -EFSCORRUPTED; 902 goto err; 903 } 904 905 if (path) { 906 ext4_ext_drop_refs(path); 907 if (depth > path[0].p_maxdepth) { 908 kfree(path); 909 *orig_path = path = NULL; 910 } 911 } 912 if (!path) { 913 /* account possible depth increase */ 914 path = kcalloc(depth + 2, sizeof(struct ext4_ext_path), 915 gfp_flags); 916 if (unlikely(!path)) 917 return ERR_PTR(-ENOMEM); 918 path[0].p_maxdepth = depth + 1; 919 } 920 path[0].p_hdr = eh; 921 path[0].p_bh = NULL; 922 923 i = depth; 924 if (!(flags & EXT4_EX_NOCACHE) && depth == 0) 925 ext4_cache_extents(inode, eh); 926 /* walk through the tree */ 927 while (i) { 928 ext_debug(inode, "depth %d: num %d, max %d\n", 929 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 930 931 ext4_ext_binsearch_idx(inode, path + ppos, block); 932 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); 933 path[ppos].p_depth = i; 934 path[ppos].p_ext = NULL; 935 936 bh = read_extent_tree_block(inode, path[ppos].p_idx, --i, flags); 937 if (IS_ERR(bh)) { 938 ret = PTR_ERR(bh); 939 goto err; 940 } 941 942 eh = ext_block_hdr(bh); 943 ppos++; 944 path[ppos].p_bh = bh; 945 path[ppos].p_hdr = eh; 946 } 947 948 path[ppos].p_depth = i; 949 path[ppos].p_ext = NULL; 950 path[ppos].p_idx = NULL; 951 952 /* find extent */ 953 ext4_ext_binsearch(inode, path + ppos, block); 954 /* if not an empty leaf */ 955 if (path[ppos].p_ext) 956 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); 957 958 ext4_ext_show_path(inode, path); 959 960 return path; 961 962 err: 963 ext4_free_ext_path(path); 964 if (orig_path) 965 *orig_path = NULL; 966 return ERR_PTR(ret); 967 } 968 969 /* 970 * ext4_ext_insert_index: 971 * insert new index [@logical;@ptr] into the block at @curp; 972 * check where to insert: before @curp or after @curp 973 */ 974 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, 975 struct ext4_ext_path *curp, 976 int logical, ext4_fsblk_t ptr) 977 { 978 struct ext4_extent_idx *ix; 979 int len, err; 980 981 err = ext4_ext_get_access(handle, inode, curp); 982 if (err) 983 return err; 984 985 if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { 986 EXT4_ERROR_INODE(inode, 987 "logical %d == ei_block %d!", 988 logical, le32_to_cpu(curp->p_idx->ei_block)); 989 return -EFSCORRUPTED; 990 } 991 992 if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) 993 >= le16_to_cpu(curp->p_hdr->eh_max))) { 994 EXT4_ERROR_INODE(inode, 995 "eh_entries %d >= eh_max %d!", 996 le16_to_cpu(curp->p_hdr->eh_entries), 997 le16_to_cpu(curp->p_hdr->eh_max)); 998 return -EFSCORRUPTED; 999 } 1000 1001 if (logical > le32_to_cpu(curp->p_idx->ei_block)) { 1002 /* insert after */ 1003 ext_debug(inode, "insert new index %d after: %llu\n", 1004 logical, ptr); 1005 ix = curp->p_idx + 1; 1006 } else { 1007 /* insert before */ 1008 ext_debug(inode, "insert new index %d before: %llu\n", 1009 logical, ptr); 1010 ix = curp->p_idx; 1011 } 1012 1013 len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; 1014 BUG_ON(len < 0); 1015 if (len > 0) { 1016 ext_debug(inode, "insert new index %d: " 1017 "move %d indices from 0x%p to 0x%p\n", 1018 logical, len, ix, ix + 1); 1019 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); 1020 } 1021 1022 if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { 1023 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); 1024 return -EFSCORRUPTED; 1025 } 1026 1027 ix->ei_block = cpu_to_le32(logical); 1028 ext4_idx_store_pblock(ix, ptr); 1029 le16_add_cpu(&curp->p_hdr->eh_entries, 1); 1030 1031 if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { 1032 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); 1033 return -EFSCORRUPTED; 1034 } 1035 1036 err = ext4_ext_dirty(handle, inode, curp); 1037 ext4_std_error(inode->i_sb, err); 1038 1039 return err; 1040 } 1041 1042 /* 1043 * ext4_ext_split: 1044 * inserts new subtree into the path, using free index entry 1045 * at depth @at: 1046 * - allocates all needed blocks (new leaf and all intermediate index blocks) 1047 * - makes decision where to split 1048 * - moves remaining extents and index entries (right to the split point) 1049 * into the newly allocated blocks 1050 * - initializes subtree 1051 */ 1052 static int ext4_ext_split(handle_t *handle, struct inode *inode, 1053 unsigned int flags, 1054 struct ext4_ext_path *path, 1055 struct ext4_extent *newext, int at) 1056 { 1057 struct buffer_head *bh = NULL; 1058 int depth = ext_depth(inode); 1059 struct ext4_extent_header *neh; 1060 struct ext4_extent_idx *fidx; 1061 int i = at, k, m, a; 1062 ext4_fsblk_t newblock, oldblock; 1063 __le32 border; 1064 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ 1065 gfp_t gfp_flags = GFP_NOFS; 1066 int err = 0; 1067 size_t ext_size = 0; 1068 1069 if (flags & EXT4_EX_NOFAIL) 1070 gfp_flags |= __GFP_NOFAIL; 1071 1072 /* make decision: where to split? */ 1073 /* FIXME: now decision is simplest: at current extent */ 1074 1075 /* if current leaf will be split, then we should use 1076 * border from split point */ 1077 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { 1078 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); 1079 return -EFSCORRUPTED; 1080 } 1081 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { 1082 border = path[depth].p_ext[1].ee_block; 1083 ext_debug(inode, "leaf will be split." 1084 " next leaf starts at %d\n", 1085 le32_to_cpu(border)); 1086 } else { 1087 border = newext->ee_block; 1088 ext_debug(inode, "leaf will be added." 1089 " next leaf starts at %d\n", 1090 le32_to_cpu(border)); 1091 } 1092 1093 /* 1094 * If error occurs, then we break processing 1095 * and mark filesystem read-only. index won't 1096 * be inserted and tree will be in consistent 1097 * state. Next mount will repair buffers too. 1098 */ 1099 1100 /* 1101 * Get array to track all allocated blocks. 1102 * We need this to handle errors and free blocks 1103 * upon them. 1104 */ 1105 ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), gfp_flags); 1106 if (!ablocks) 1107 return -ENOMEM; 1108 1109 /* allocate all needed blocks */ 1110 ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at); 1111 for (a = 0; a < depth - at; a++) { 1112 newblock = ext4_ext_new_meta_block(handle, inode, path, 1113 newext, &err, flags); 1114 if (newblock == 0) 1115 goto cleanup; 1116 ablocks[a] = newblock; 1117 } 1118 1119 /* initialize new leaf */ 1120 newblock = ablocks[--a]; 1121 if (unlikely(newblock == 0)) { 1122 EXT4_ERROR_INODE(inode, "newblock == 0!"); 1123 err = -EFSCORRUPTED; 1124 goto cleanup; 1125 } 1126 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); 1127 if (unlikely(!bh)) { 1128 err = -ENOMEM; 1129 goto cleanup; 1130 } 1131 lock_buffer(bh); 1132 1133 err = ext4_journal_get_create_access(handle, inode->i_sb, bh, 1134 EXT4_JTR_NONE); 1135 if (err) 1136 goto cleanup; 1137 1138 neh = ext_block_hdr(bh); 1139 neh->eh_entries = 0; 1140 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1141 neh->eh_magic = EXT4_EXT_MAGIC; 1142 neh->eh_depth = 0; 1143 neh->eh_generation = 0; 1144 1145 /* move remainder of path[depth] to the new leaf */ 1146 if (unlikely(path[depth].p_hdr->eh_entries != 1147 path[depth].p_hdr->eh_max)) { 1148 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", 1149 path[depth].p_hdr->eh_entries, 1150 path[depth].p_hdr->eh_max); 1151 err = -EFSCORRUPTED; 1152 goto cleanup; 1153 } 1154 /* start copy from next extent */ 1155 m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; 1156 ext4_ext_show_move(inode, path, newblock, depth); 1157 if (m) { 1158 struct ext4_extent *ex; 1159 ex = EXT_FIRST_EXTENT(neh); 1160 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); 1161 le16_add_cpu(&neh->eh_entries, m); 1162 } 1163 1164 /* zero out unused area in the extent block */ 1165 ext_size = sizeof(struct ext4_extent_header) + 1166 sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries); 1167 memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); 1168 ext4_extent_block_csum_set(inode, neh); 1169 set_buffer_uptodate(bh); 1170 unlock_buffer(bh); 1171 1172 err = ext4_handle_dirty_metadata(handle, inode, bh); 1173 if (err) 1174 goto cleanup; 1175 brelse(bh); 1176 bh = NULL; 1177 1178 /* correct old leaf */ 1179 if (m) { 1180 err = ext4_ext_get_access(handle, inode, path + depth); 1181 if (err) 1182 goto cleanup; 1183 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); 1184 err = ext4_ext_dirty(handle, inode, path + depth); 1185 if (err) 1186 goto cleanup; 1187 1188 } 1189 1190 /* create intermediate indexes */ 1191 k = depth - at - 1; 1192 if (unlikely(k < 0)) { 1193 EXT4_ERROR_INODE(inode, "k %d < 0!", k); 1194 err = -EFSCORRUPTED; 1195 goto cleanup; 1196 } 1197 if (k) 1198 ext_debug(inode, "create %d intermediate indices\n", k); 1199 /* insert new index into current index block */ 1200 /* current depth stored in i var */ 1201 i = depth - 1; 1202 while (k--) { 1203 oldblock = newblock; 1204 newblock = ablocks[--a]; 1205 bh = sb_getblk(inode->i_sb, newblock); 1206 if (unlikely(!bh)) { 1207 err = -ENOMEM; 1208 goto cleanup; 1209 } 1210 lock_buffer(bh); 1211 1212 err = ext4_journal_get_create_access(handle, inode->i_sb, bh, 1213 EXT4_JTR_NONE); 1214 if (err) 1215 goto cleanup; 1216 1217 neh = ext_block_hdr(bh); 1218 neh->eh_entries = cpu_to_le16(1); 1219 neh->eh_magic = EXT4_EXT_MAGIC; 1220 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1221 neh->eh_depth = cpu_to_le16(depth - i); 1222 neh->eh_generation = 0; 1223 fidx = EXT_FIRST_INDEX(neh); 1224 fidx->ei_block = border; 1225 ext4_idx_store_pblock(fidx, oldblock); 1226 1227 ext_debug(inode, "int.index at %d (block %llu): %u -> %llu\n", 1228 i, newblock, le32_to_cpu(border), oldblock); 1229 1230 /* move remainder of path[i] to the new index block */ 1231 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != 1232 EXT_LAST_INDEX(path[i].p_hdr))) { 1233 EXT4_ERROR_INODE(inode, 1234 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", 1235 le32_to_cpu(path[i].p_ext->ee_block)); 1236 err = -EFSCORRUPTED; 1237 goto cleanup; 1238 } 1239 /* start copy indexes */ 1240 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; 1241 ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx, 1242 EXT_MAX_INDEX(path[i].p_hdr)); 1243 ext4_ext_show_move(inode, path, newblock, i); 1244 if (m) { 1245 memmove(++fidx, path[i].p_idx, 1246 sizeof(struct ext4_extent_idx) * m); 1247 le16_add_cpu(&neh->eh_entries, m); 1248 } 1249 /* zero out unused area in the extent block */ 1250 ext_size = sizeof(struct ext4_extent_header) + 1251 (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries)); 1252 memset(bh->b_data + ext_size, 0, 1253 inode->i_sb->s_blocksize - ext_size); 1254 ext4_extent_block_csum_set(inode, neh); 1255 set_buffer_uptodate(bh); 1256 unlock_buffer(bh); 1257 1258 err = ext4_handle_dirty_metadata(handle, inode, bh); 1259 if (err) 1260 goto cleanup; 1261 brelse(bh); 1262 bh = NULL; 1263 1264 /* correct old index */ 1265 if (m) { 1266 err = ext4_ext_get_access(handle, inode, path + i); 1267 if (err) 1268 goto cleanup; 1269 le16_add_cpu(&path[i].p_hdr->eh_entries, -m); 1270 err = ext4_ext_dirty(handle, inode, path + i); 1271 if (err) 1272 goto cleanup; 1273 } 1274 1275 i--; 1276 } 1277 1278 /* insert new index */ 1279 err = ext4_ext_insert_index(handle, inode, path + at, 1280 le32_to_cpu(border), newblock); 1281 1282 cleanup: 1283 if (bh) { 1284 if (buffer_locked(bh)) 1285 unlock_buffer(bh); 1286 brelse(bh); 1287 } 1288 1289 if (err) { 1290 /* free all allocated blocks in error case */ 1291 for (i = 0; i < depth; i++) { 1292 if (!ablocks[i]) 1293 continue; 1294 ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, 1295 EXT4_FREE_BLOCKS_METADATA); 1296 } 1297 } 1298 kfree(ablocks); 1299 1300 return err; 1301 } 1302 1303 /* 1304 * ext4_ext_grow_indepth: 1305 * implements tree growing procedure: 1306 * - allocates new block 1307 * - moves top-level data (index block or leaf) into the new block 1308 * - initializes new top-level, creating index that points to the 1309 * just created block 1310 */ 1311 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, 1312 unsigned int flags) 1313 { 1314 struct ext4_extent_header *neh; 1315 struct buffer_head *bh; 1316 ext4_fsblk_t newblock, goal = 0; 1317 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; 1318 int err = 0; 1319 size_t ext_size = 0; 1320 1321 /* Try to prepend new index to old one */ 1322 if (ext_depth(inode)) 1323 goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode))); 1324 if (goal > le32_to_cpu(es->s_first_data_block)) { 1325 flags |= EXT4_MB_HINT_TRY_GOAL; 1326 goal--; 1327 } else 1328 goal = ext4_inode_to_goal_block(inode); 1329 newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 1330 NULL, &err); 1331 if (newblock == 0) 1332 return err; 1333 1334 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); 1335 if (unlikely(!bh)) 1336 return -ENOMEM; 1337 lock_buffer(bh); 1338 1339 err = ext4_journal_get_create_access(handle, inode->i_sb, bh, 1340 EXT4_JTR_NONE); 1341 if (err) { 1342 unlock_buffer(bh); 1343 goto out; 1344 } 1345 1346 ext_size = sizeof(EXT4_I(inode)->i_data); 1347 /* move top-level index/leaf into new block */ 1348 memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size); 1349 /* zero out unused area in the extent block */ 1350 memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); 1351 1352 /* set size of new block */ 1353 neh = ext_block_hdr(bh); 1354 /* old root could have indexes or leaves 1355 * so calculate e_max right way */ 1356 if (ext_depth(inode)) 1357 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1358 else 1359 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1360 neh->eh_magic = EXT4_EXT_MAGIC; 1361 ext4_extent_block_csum_set(inode, neh); 1362 set_buffer_uptodate(bh); 1363 set_buffer_verified(bh); 1364 unlock_buffer(bh); 1365 1366 err = ext4_handle_dirty_metadata(handle, inode, bh); 1367 if (err) 1368 goto out; 1369 1370 /* Update top-level index: num,max,pointer */ 1371 neh = ext_inode_hdr(inode); 1372 neh->eh_entries = cpu_to_le16(1); 1373 ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); 1374 if (neh->eh_depth == 0) { 1375 /* Root extent block becomes index block */ 1376 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); 1377 EXT_FIRST_INDEX(neh)->ei_block = 1378 EXT_FIRST_EXTENT(neh)->ee_block; 1379 } 1380 ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %llu\n", 1381 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), 1382 le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), 1383 ext4_idx_pblock(EXT_FIRST_INDEX(neh))); 1384 1385 le16_add_cpu(&neh->eh_depth, 1); 1386 err = ext4_mark_inode_dirty(handle, inode); 1387 out: 1388 brelse(bh); 1389 1390 return err; 1391 } 1392 1393 /* 1394 * ext4_ext_create_new_leaf: 1395 * finds empty index and adds new leaf. 1396 * if no free index is found, then it requests in-depth growing. 1397 */ 1398 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, 1399 unsigned int mb_flags, 1400 unsigned int gb_flags, 1401 struct ext4_ext_path **ppath, 1402 struct ext4_extent *newext) 1403 { 1404 struct ext4_ext_path *path = *ppath; 1405 struct ext4_ext_path *curp; 1406 int depth, i, err = 0; 1407 1408 repeat: 1409 i = depth = ext_depth(inode); 1410 1411 /* walk up to the tree and look for free index entry */ 1412 curp = path + depth; 1413 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { 1414 i--; 1415 curp--; 1416 } 1417 1418 /* we use already allocated block for index block, 1419 * so subsequent data blocks should be contiguous */ 1420 if (EXT_HAS_FREE_INDEX(curp)) { 1421 /* if we found index with free entry, then use that 1422 * entry: create all needed subtree and add new leaf */ 1423 err = ext4_ext_split(handle, inode, mb_flags, path, newext, i); 1424 if (err) 1425 goto out; 1426 1427 /* refill path */ 1428 path = ext4_find_extent(inode, 1429 (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1430 ppath, gb_flags); 1431 if (IS_ERR(path)) 1432 err = PTR_ERR(path); 1433 } else { 1434 /* tree is full, time to grow in depth */ 1435 err = ext4_ext_grow_indepth(handle, inode, mb_flags); 1436 if (err) 1437 goto out; 1438 1439 /* refill path */ 1440 path = ext4_find_extent(inode, 1441 (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1442 ppath, gb_flags); 1443 if (IS_ERR(path)) { 1444 err = PTR_ERR(path); 1445 goto out; 1446 } 1447 1448 /* 1449 * only first (depth 0 -> 1) produces free space; 1450 * in all other cases we have to split the grown tree 1451 */ 1452 depth = ext_depth(inode); 1453 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { 1454 /* now we need to split */ 1455 goto repeat; 1456 } 1457 } 1458 1459 out: 1460 return err; 1461 } 1462 1463 /* 1464 * search the closest allocated block to the left for *logical 1465 * and returns it at @logical + it's physical address at @phys 1466 * if *logical is the smallest allocated block, the function 1467 * returns 0 at @phys 1468 * return value contains 0 (success) or error code 1469 */ 1470 static int ext4_ext_search_left(struct inode *inode, 1471 struct ext4_ext_path *path, 1472 ext4_lblk_t *logical, ext4_fsblk_t *phys) 1473 { 1474 struct ext4_extent_idx *ix; 1475 struct ext4_extent *ex; 1476 int depth, ee_len; 1477 1478 if (unlikely(path == NULL)) { 1479 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1480 return -EFSCORRUPTED; 1481 } 1482 depth = path->p_depth; 1483 *phys = 0; 1484 1485 if (depth == 0 && path->p_ext == NULL) 1486 return 0; 1487 1488 /* usually extent in the path covers blocks smaller 1489 * then *logical, but it can be that extent is the 1490 * first one in the file */ 1491 1492 ex = path[depth].p_ext; 1493 ee_len = ext4_ext_get_actual_len(ex); 1494 if (*logical < le32_to_cpu(ex->ee_block)) { 1495 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1496 EXT4_ERROR_INODE(inode, 1497 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", 1498 *logical, le32_to_cpu(ex->ee_block)); 1499 return -EFSCORRUPTED; 1500 } 1501 while (--depth >= 0) { 1502 ix = path[depth].p_idx; 1503 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1504 EXT4_ERROR_INODE(inode, 1505 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", 1506 ix != NULL ? le32_to_cpu(ix->ei_block) : 0, 1507 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block), 1508 depth); 1509 return -EFSCORRUPTED; 1510 } 1511 } 1512 return 0; 1513 } 1514 1515 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1516 EXT4_ERROR_INODE(inode, 1517 "logical %d < ee_block %d + ee_len %d!", 1518 *logical, le32_to_cpu(ex->ee_block), ee_len); 1519 return -EFSCORRUPTED; 1520 } 1521 1522 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; 1523 *phys = ext4_ext_pblock(ex) + ee_len - 1; 1524 return 0; 1525 } 1526 1527 /* 1528 * Search the closest allocated block to the right for *logical 1529 * and returns it at @logical + it's physical address at @phys. 1530 * If not exists, return 0 and @phys is set to 0. We will return 1531 * 1 which means we found an allocated block and ret_ex is valid. 1532 * Or return a (< 0) error code. 1533 */ 1534 static int ext4_ext_search_right(struct inode *inode, 1535 struct ext4_ext_path *path, 1536 ext4_lblk_t *logical, ext4_fsblk_t *phys, 1537 struct ext4_extent *ret_ex) 1538 { 1539 struct buffer_head *bh = NULL; 1540 struct ext4_extent_header *eh; 1541 struct ext4_extent_idx *ix; 1542 struct ext4_extent *ex; 1543 int depth; /* Note, NOT eh_depth; depth from top of tree */ 1544 int ee_len; 1545 1546 if (unlikely(path == NULL)) { 1547 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1548 return -EFSCORRUPTED; 1549 } 1550 depth = path->p_depth; 1551 *phys = 0; 1552 1553 if (depth == 0 && path->p_ext == NULL) 1554 return 0; 1555 1556 /* usually extent in the path covers blocks smaller 1557 * then *logical, but it can be that extent is the 1558 * first one in the file */ 1559 1560 ex = path[depth].p_ext; 1561 ee_len = ext4_ext_get_actual_len(ex); 1562 if (*logical < le32_to_cpu(ex->ee_block)) { 1563 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1564 EXT4_ERROR_INODE(inode, 1565 "first_extent(path[%d].p_hdr) != ex", 1566 depth); 1567 return -EFSCORRUPTED; 1568 } 1569 while (--depth >= 0) { 1570 ix = path[depth].p_idx; 1571 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1572 EXT4_ERROR_INODE(inode, 1573 "ix != EXT_FIRST_INDEX *logical %d!", 1574 *logical); 1575 return -EFSCORRUPTED; 1576 } 1577 } 1578 goto found_extent; 1579 } 1580 1581 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1582 EXT4_ERROR_INODE(inode, 1583 "logical %d < ee_block %d + ee_len %d!", 1584 *logical, le32_to_cpu(ex->ee_block), ee_len); 1585 return -EFSCORRUPTED; 1586 } 1587 1588 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { 1589 /* next allocated block in this leaf */ 1590 ex++; 1591 goto found_extent; 1592 } 1593 1594 /* go up and search for index to the right */ 1595 while (--depth >= 0) { 1596 ix = path[depth].p_idx; 1597 if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) 1598 goto got_index; 1599 } 1600 1601 /* we've gone up to the root and found no index to the right */ 1602 return 0; 1603 1604 got_index: 1605 /* we've found index to the right, let's 1606 * follow it and find the closest allocated 1607 * block to the right */ 1608 ix++; 1609 while (++depth < path->p_depth) { 1610 /* subtract from p_depth to get proper eh_depth */ 1611 bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0); 1612 if (IS_ERR(bh)) 1613 return PTR_ERR(bh); 1614 eh = ext_block_hdr(bh); 1615 ix = EXT_FIRST_INDEX(eh); 1616 put_bh(bh); 1617 } 1618 1619 bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0); 1620 if (IS_ERR(bh)) 1621 return PTR_ERR(bh); 1622 eh = ext_block_hdr(bh); 1623 ex = EXT_FIRST_EXTENT(eh); 1624 found_extent: 1625 *logical = le32_to_cpu(ex->ee_block); 1626 *phys = ext4_ext_pblock(ex); 1627 if (ret_ex) 1628 *ret_ex = *ex; 1629 if (bh) 1630 put_bh(bh); 1631 return 1; 1632 } 1633 1634 /* 1635 * ext4_ext_next_allocated_block: 1636 * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. 1637 * NOTE: it considers block number from index entry as 1638 * allocated block. Thus, index entries have to be consistent 1639 * with leaves. 1640 */ 1641 ext4_lblk_t 1642 ext4_ext_next_allocated_block(struct ext4_ext_path *path) 1643 { 1644 int depth; 1645 1646 BUG_ON(path == NULL); 1647 depth = path->p_depth; 1648 1649 if (depth == 0 && path->p_ext == NULL) 1650 return EXT_MAX_BLOCKS; 1651 1652 while (depth >= 0) { 1653 struct ext4_ext_path *p = &path[depth]; 1654 1655 if (depth == path->p_depth) { 1656 /* leaf */ 1657 if (p->p_ext && p->p_ext != EXT_LAST_EXTENT(p->p_hdr)) 1658 return le32_to_cpu(p->p_ext[1].ee_block); 1659 } else { 1660 /* index */ 1661 if (p->p_idx != EXT_LAST_INDEX(p->p_hdr)) 1662 return le32_to_cpu(p->p_idx[1].ei_block); 1663 } 1664 depth--; 1665 } 1666 1667 return EXT_MAX_BLOCKS; 1668 } 1669 1670 /* 1671 * ext4_ext_next_leaf_block: 1672 * returns first allocated block from next leaf or EXT_MAX_BLOCKS 1673 */ 1674 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) 1675 { 1676 int depth; 1677 1678 BUG_ON(path == NULL); 1679 depth = path->p_depth; 1680 1681 /* zero-tree has no leaf blocks at all */ 1682 if (depth == 0) 1683 return EXT_MAX_BLOCKS; 1684 1685 /* go to index block */ 1686 depth--; 1687 1688 while (depth >= 0) { 1689 if (path[depth].p_idx != 1690 EXT_LAST_INDEX(path[depth].p_hdr)) 1691 return (ext4_lblk_t) 1692 le32_to_cpu(path[depth].p_idx[1].ei_block); 1693 depth--; 1694 } 1695 1696 return EXT_MAX_BLOCKS; 1697 } 1698 1699 /* 1700 * ext4_ext_correct_indexes: 1701 * if leaf gets modified and modified extent is first in the leaf, 1702 * then we have to correct all indexes above. 1703 * TODO: do we need to correct tree in all cases? 1704 */ 1705 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, 1706 struct ext4_ext_path *path) 1707 { 1708 struct ext4_extent_header *eh; 1709 int depth = ext_depth(inode); 1710 struct ext4_extent *ex; 1711 __le32 border; 1712 int k, err = 0; 1713 1714 eh = path[depth].p_hdr; 1715 ex = path[depth].p_ext; 1716 1717 if (unlikely(ex == NULL || eh == NULL)) { 1718 EXT4_ERROR_INODE(inode, 1719 "ex %p == NULL or eh %p == NULL", ex, eh); 1720 return -EFSCORRUPTED; 1721 } 1722 1723 if (depth == 0) { 1724 /* there is no tree at all */ 1725 return 0; 1726 } 1727 1728 if (ex != EXT_FIRST_EXTENT(eh)) { 1729 /* we correct tree if first leaf got modified only */ 1730 return 0; 1731 } 1732 1733 /* 1734 * TODO: we need correction if border is smaller than current one 1735 */ 1736 k = depth - 1; 1737 border = path[depth].p_ext->ee_block; 1738 err = ext4_ext_get_access(handle, inode, path + k); 1739 if (err) 1740 return err; 1741 path[k].p_idx->ei_block = border; 1742 err = ext4_ext_dirty(handle, inode, path + k); 1743 if (err) 1744 return err; 1745 1746 while (k--) { 1747 /* change all left-side indexes */ 1748 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) 1749 break; 1750 err = ext4_ext_get_access(handle, inode, path + k); 1751 if (err) 1752 break; 1753 path[k].p_idx->ei_block = border; 1754 err = ext4_ext_dirty(handle, inode, path + k); 1755 if (err) 1756 break; 1757 } 1758 1759 return err; 1760 } 1761 1762 static int ext4_can_extents_be_merged(struct inode *inode, 1763 struct ext4_extent *ex1, 1764 struct ext4_extent *ex2) 1765 { 1766 unsigned short ext1_ee_len, ext2_ee_len; 1767 1768 if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2)) 1769 return 0; 1770 1771 ext1_ee_len = ext4_ext_get_actual_len(ex1); 1772 ext2_ee_len = ext4_ext_get_actual_len(ex2); 1773 1774 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != 1775 le32_to_cpu(ex2->ee_block)) 1776 return 0; 1777 1778 if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN) 1779 return 0; 1780 1781 if (ext4_ext_is_unwritten(ex1) && 1782 ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN) 1783 return 0; 1784 #ifdef AGGRESSIVE_TEST 1785 if (ext1_ee_len >= 4) 1786 return 0; 1787 #endif 1788 1789 if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) 1790 return 1; 1791 return 0; 1792 } 1793 1794 /* 1795 * This function tries to merge the "ex" extent to the next extent in the tree. 1796 * It always tries to merge towards right. If you want to merge towards 1797 * left, pass "ex - 1" as argument instead of "ex". 1798 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns 1799 * 1 if they got merged. 1800 */ 1801 static int ext4_ext_try_to_merge_right(struct inode *inode, 1802 struct ext4_ext_path *path, 1803 struct ext4_extent *ex) 1804 { 1805 struct ext4_extent_header *eh; 1806 unsigned int depth, len; 1807 int merge_done = 0, unwritten; 1808 1809 depth = ext_depth(inode); 1810 BUG_ON(path[depth].p_hdr == NULL); 1811 eh = path[depth].p_hdr; 1812 1813 while (ex < EXT_LAST_EXTENT(eh)) { 1814 if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) 1815 break; 1816 /* merge with next extent! */ 1817 unwritten = ext4_ext_is_unwritten(ex); 1818 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1819 + ext4_ext_get_actual_len(ex + 1)); 1820 if (unwritten) 1821 ext4_ext_mark_unwritten(ex); 1822 1823 if (ex + 1 < EXT_LAST_EXTENT(eh)) { 1824 len = (EXT_LAST_EXTENT(eh) - ex - 1) 1825 * sizeof(struct ext4_extent); 1826 memmove(ex + 1, ex + 2, len); 1827 } 1828 le16_add_cpu(&eh->eh_entries, -1); 1829 merge_done = 1; 1830 WARN_ON(eh->eh_entries == 0); 1831 if (!eh->eh_entries) 1832 EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); 1833 } 1834 1835 return merge_done; 1836 } 1837 1838 /* 1839 * This function does a very simple check to see if we can collapse 1840 * an extent tree with a single extent tree leaf block into the inode. 1841 */ 1842 static void ext4_ext_try_to_merge_up(handle_t *handle, 1843 struct inode *inode, 1844 struct ext4_ext_path *path) 1845 { 1846 size_t s; 1847 unsigned max_root = ext4_ext_space_root(inode, 0); 1848 ext4_fsblk_t blk; 1849 1850 if ((path[0].p_depth != 1) || 1851 (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) || 1852 (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root)) 1853 return; 1854 1855 /* 1856 * We need to modify the block allocation bitmap and the block 1857 * group descriptor to release the extent tree block. If we 1858 * can't get the journal credits, give up. 1859 */ 1860 if (ext4_journal_extend(handle, 2, 1861 ext4_free_metadata_revoke_credits(inode->i_sb, 1))) 1862 return; 1863 1864 /* 1865 * Copy the extent data up to the inode 1866 */ 1867 blk = ext4_idx_pblock(path[0].p_idx); 1868 s = le16_to_cpu(path[1].p_hdr->eh_entries) * 1869 sizeof(struct ext4_extent_idx); 1870 s += sizeof(struct ext4_extent_header); 1871 1872 path[1].p_maxdepth = path[0].p_maxdepth; 1873 memcpy(path[0].p_hdr, path[1].p_hdr, s); 1874 path[0].p_depth = 0; 1875 path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + 1876 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr)); 1877 path[0].p_hdr->eh_max = cpu_to_le16(max_root); 1878 1879 brelse(path[1].p_bh); 1880 ext4_free_blocks(handle, inode, NULL, blk, 1, 1881 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 1882 } 1883 1884 /* 1885 * This function tries to merge the @ex extent to neighbours in the tree, then 1886 * tries to collapse the extent tree into the inode. 1887 */ 1888 static void ext4_ext_try_to_merge(handle_t *handle, 1889 struct inode *inode, 1890 struct ext4_ext_path *path, 1891 struct ext4_extent *ex) 1892 { 1893 struct ext4_extent_header *eh; 1894 unsigned int depth; 1895 int merge_done = 0; 1896 1897 depth = ext_depth(inode); 1898 BUG_ON(path[depth].p_hdr == NULL); 1899 eh = path[depth].p_hdr; 1900 1901 if (ex > EXT_FIRST_EXTENT(eh)) 1902 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); 1903 1904 if (!merge_done) 1905 (void) ext4_ext_try_to_merge_right(inode, path, ex); 1906 1907 ext4_ext_try_to_merge_up(handle, inode, path); 1908 } 1909 1910 /* 1911 * check if a portion of the "newext" extent overlaps with an 1912 * existing extent. 1913 * 1914 * If there is an overlap discovered, it updates the length of the newext 1915 * such that there will be no overlap, and then returns 1. 1916 * If there is no overlap found, it returns 0. 1917 */ 1918 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, 1919 struct inode *inode, 1920 struct ext4_extent *newext, 1921 struct ext4_ext_path *path) 1922 { 1923 ext4_lblk_t b1, b2; 1924 unsigned int depth, len1; 1925 unsigned int ret = 0; 1926 1927 b1 = le32_to_cpu(newext->ee_block); 1928 len1 = ext4_ext_get_actual_len(newext); 1929 depth = ext_depth(inode); 1930 if (!path[depth].p_ext) 1931 goto out; 1932 b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); 1933 1934 /* 1935 * get the next allocated block if the extent in the path 1936 * is before the requested block(s) 1937 */ 1938 if (b2 < b1) { 1939 b2 = ext4_ext_next_allocated_block(path); 1940 if (b2 == EXT_MAX_BLOCKS) 1941 goto out; 1942 b2 = EXT4_LBLK_CMASK(sbi, b2); 1943 } 1944 1945 /* check for wrap through zero on extent logical start block*/ 1946 if (b1 + len1 < b1) { 1947 len1 = EXT_MAX_BLOCKS - b1; 1948 newext->ee_len = cpu_to_le16(len1); 1949 ret = 1; 1950 } 1951 1952 /* check for overlap */ 1953 if (b1 + len1 > b2) { 1954 newext->ee_len = cpu_to_le16(b2 - b1); 1955 ret = 1; 1956 } 1957 out: 1958 return ret; 1959 } 1960 1961 /* 1962 * ext4_ext_insert_extent: 1963 * tries to merge requested extent into the existing extent or 1964 * inserts requested extent as new one into the tree, 1965 * creating new leaf in the no-space case. 1966 */ 1967 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, 1968 struct ext4_ext_path **ppath, 1969 struct ext4_extent *newext, int gb_flags) 1970 { 1971 struct ext4_ext_path *path = *ppath; 1972 struct ext4_extent_header *eh; 1973 struct ext4_extent *ex, *fex; 1974 struct ext4_extent *nearex; /* nearest extent */ 1975 struct ext4_ext_path *npath = NULL; 1976 int depth, len, err; 1977 ext4_lblk_t next; 1978 int mb_flags = 0, unwritten; 1979 1980 if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1981 mb_flags |= EXT4_MB_DELALLOC_RESERVED; 1982 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { 1983 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); 1984 return -EFSCORRUPTED; 1985 } 1986 depth = ext_depth(inode); 1987 ex = path[depth].p_ext; 1988 eh = path[depth].p_hdr; 1989 if (unlikely(path[depth].p_hdr == NULL)) { 1990 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 1991 return -EFSCORRUPTED; 1992 } 1993 1994 /* try to insert block into found extent and return */ 1995 if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) { 1996 1997 /* 1998 * Try to see whether we should rather test the extent on 1999 * right from ex, or from the left of ex. This is because 2000 * ext4_find_extent() can return either extent on the 2001 * left, or on the right from the searched position. This 2002 * will make merging more effective. 2003 */ 2004 if (ex < EXT_LAST_EXTENT(eh) && 2005 (le32_to_cpu(ex->ee_block) + 2006 ext4_ext_get_actual_len(ex) < 2007 le32_to_cpu(newext->ee_block))) { 2008 ex += 1; 2009 goto prepend; 2010 } else if ((ex > EXT_FIRST_EXTENT(eh)) && 2011 (le32_to_cpu(newext->ee_block) + 2012 ext4_ext_get_actual_len(newext) < 2013 le32_to_cpu(ex->ee_block))) 2014 ex -= 1; 2015 2016 /* Try to append newex to the ex */ 2017 if (ext4_can_extents_be_merged(inode, ex, newext)) { 2018 ext_debug(inode, "append [%d]%d block to %u:[%d]%d" 2019 "(from %llu)\n", 2020 ext4_ext_is_unwritten(newext), 2021 ext4_ext_get_actual_len(newext), 2022 le32_to_cpu(ex->ee_block), 2023 ext4_ext_is_unwritten(ex), 2024 ext4_ext_get_actual_len(ex), 2025 ext4_ext_pblock(ex)); 2026 err = ext4_ext_get_access(handle, inode, 2027 path + depth); 2028 if (err) 2029 return err; 2030 unwritten = ext4_ext_is_unwritten(ex); 2031 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 2032 + ext4_ext_get_actual_len(newext)); 2033 if (unwritten) 2034 ext4_ext_mark_unwritten(ex); 2035 nearex = ex; 2036 goto merge; 2037 } 2038 2039 prepend: 2040 /* Try to prepend newex to the ex */ 2041 if (ext4_can_extents_be_merged(inode, newext, ex)) { 2042 ext_debug(inode, "prepend %u[%d]%d block to %u:[%d]%d" 2043 "(from %llu)\n", 2044 le32_to_cpu(newext->ee_block), 2045 ext4_ext_is_unwritten(newext), 2046 ext4_ext_get_actual_len(newext), 2047 le32_to_cpu(ex->ee_block), 2048 ext4_ext_is_unwritten(ex), 2049 ext4_ext_get_actual_len(ex), 2050 ext4_ext_pblock(ex)); 2051 err = ext4_ext_get_access(handle, inode, 2052 path + depth); 2053 if (err) 2054 return err; 2055 2056 unwritten = ext4_ext_is_unwritten(ex); 2057 ex->ee_block = newext->ee_block; 2058 ext4_ext_store_pblock(ex, ext4_ext_pblock(newext)); 2059 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 2060 + ext4_ext_get_actual_len(newext)); 2061 if (unwritten) 2062 ext4_ext_mark_unwritten(ex); 2063 nearex = ex; 2064 goto merge; 2065 } 2066 } 2067 2068 depth = ext_depth(inode); 2069 eh = path[depth].p_hdr; 2070 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) 2071 goto has_space; 2072 2073 /* probably next leaf has space for us? */ 2074 fex = EXT_LAST_EXTENT(eh); 2075 next = EXT_MAX_BLOCKS; 2076 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) 2077 next = ext4_ext_next_leaf_block(path); 2078 if (next != EXT_MAX_BLOCKS) { 2079 ext_debug(inode, "next leaf block - %u\n", next); 2080 BUG_ON(npath != NULL); 2081 npath = ext4_find_extent(inode, next, NULL, gb_flags); 2082 if (IS_ERR(npath)) 2083 return PTR_ERR(npath); 2084 BUG_ON(npath->p_depth != path->p_depth); 2085 eh = npath[depth].p_hdr; 2086 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { 2087 ext_debug(inode, "next leaf isn't full(%d)\n", 2088 le16_to_cpu(eh->eh_entries)); 2089 path = npath; 2090 goto has_space; 2091 } 2092 ext_debug(inode, "next leaf has no free space(%d,%d)\n", 2093 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 2094 } 2095 2096 /* 2097 * There is no free space in the found leaf. 2098 * We're gonna add a new leaf in the tree. 2099 */ 2100 if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) 2101 mb_flags |= EXT4_MB_USE_RESERVED; 2102 err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags, 2103 ppath, newext); 2104 if (err) 2105 goto cleanup; 2106 depth = ext_depth(inode); 2107 eh = path[depth].p_hdr; 2108 2109 has_space: 2110 nearex = path[depth].p_ext; 2111 2112 err = ext4_ext_get_access(handle, inode, path + depth); 2113 if (err) 2114 goto cleanup; 2115 2116 if (!nearex) { 2117 /* there is no extent in this leaf, create first one */ 2118 ext_debug(inode, "first extent in the leaf: %u:%llu:[%d]%d\n", 2119 le32_to_cpu(newext->ee_block), 2120 ext4_ext_pblock(newext), 2121 ext4_ext_is_unwritten(newext), 2122 ext4_ext_get_actual_len(newext)); 2123 nearex = EXT_FIRST_EXTENT(eh); 2124 } else { 2125 if (le32_to_cpu(newext->ee_block) 2126 > le32_to_cpu(nearex->ee_block)) { 2127 /* Insert after */ 2128 ext_debug(inode, "insert %u:%llu:[%d]%d before: " 2129 "nearest %p\n", 2130 le32_to_cpu(newext->ee_block), 2131 ext4_ext_pblock(newext), 2132 ext4_ext_is_unwritten(newext), 2133 ext4_ext_get_actual_len(newext), 2134 nearex); 2135 nearex++; 2136 } else { 2137 /* Insert before */ 2138 BUG_ON(newext->ee_block == nearex->ee_block); 2139 ext_debug(inode, "insert %u:%llu:[%d]%d after: " 2140 "nearest %p\n", 2141 le32_to_cpu(newext->ee_block), 2142 ext4_ext_pblock(newext), 2143 ext4_ext_is_unwritten(newext), 2144 ext4_ext_get_actual_len(newext), 2145 nearex); 2146 } 2147 len = EXT_LAST_EXTENT(eh) - nearex + 1; 2148 if (len > 0) { 2149 ext_debug(inode, "insert %u:%llu:[%d]%d: " 2150 "move %d extents from 0x%p to 0x%p\n", 2151 le32_to_cpu(newext->ee_block), 2152 ext4_ext_pblock(newext), 2153 ext4_ext_is_unwritten(newext), 2154 ext4_ext_get_actual_len(newext), 2155 len, nearex, nearex + 1); 2156 memmove(nearex + 1, nearex, 2157 len * sizeof(struct ext4_extent)); 2158 } 2159 } 2160 2161 le16_add_cpu(&eh->eh_entries, 1); 2162 path[depth].p_ext = nearex; 2163 nearex->ee_block = newext->ee_block; 2164 ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); 2165 nearex->ee_len = newext->ee_len; 2166 2167 merge: 2168 /* try to merge extents */ 2169 if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) 2170 ext4_ext_try_to_merge(handle, inode, path, nearex); 2171 2172 2173 /* time to correct all indexes above */ 2174 err = ext4_ext_correct_indexes(handle, inode, path); 2175 if (err) 2176 goto cleanup; 2177 2178 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 2179 2180 cleanup: 2181 ext4_free_ext_path(npath); 2182 return err; 2183 } 2184 2185 static int ext4_fill_es_cache_info(struct inode *inode, 2186 ext4_lblk_t block, ext4_lblk_t num, 2187 struct fiemap_extent_info *fieinfo) 2188 { 2189 ext4_lblk_t next, end = block + num - 1; 2190 struct extent_status es; 2191 unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; 2192 unsigned int flags; 2193 int err; 2194 2195 while (block <= end) { 2196 next = 0; 2197 flags = 0; 2198 if (!ext4_es_lookup_extent(inode, block, &next, &es)) 2199 break; 2200 if (ext4_es_is_unwritten(&es)) 2201 flags |= FIEMAP_EXTENT_UNWRITTEN; 2202 if (ext4_es_is_delayed(&es)) 2203 flags |= (FIEMAP_EXTENT_DELALLOC | 2204 FIEMAP_EXTENT_UNKNOWN); 2205 if (ext4_es_is_hole(&es)) 2206 flags |= EXT4_FIEMAP_EXTENT_HOLE; 2207 if (next == 0) 2208 flags |= FIEMAP_EXTENT_LAST; 2209 if (flags & (FIEMAP_EXTENT_DELALLOC| 2210 EXT4_FIEMAP_EXTENT_HOLE)) 2211 es.es_pblk = 0; 2212 else 2213 es.es_pblk = ext4_es_pblock(&es); 2214 err = fiemap_fill_next_extent(fieinfo, 2215 (__u64)es.es_lblk << blksize_bits, 2216 (__u64)es.es_pblk << blksize_bits, 2217 (__u64)es.es_len << blksize_bits, 2218 flags); 2219 if (next == 0) 2220 break; 2221 block = next; 2222 if (err < 0) 2223 return err; 2224 if (err == 1) 2225 return 0; 2226 } 2227 return 0; 2228 } 2229 2230 2231 /* 2232 * ext4_ext_determine_hole - determine hole around given block 2233 * @inode: inode we lookup in 2234 * @path: path in extent tree to @lblk 2235 * @lblk: pointer to logical block around which we want to determine hole 2236 * 2237 * Determine hole length (and start if easily possible) around given logical 2238 * block. We don't try too hard to find the beginning of the hole but @path 2239 * actually points to extent before @lblk, we provide it. 2240 * 2241 * The function returns the length of a hole starting at @lblk. We update @lblk 2242 * to the beginning of the hole if we managed to find it. 2243 */ 2244 static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode, 2245 struct ext4_ext_path *path, 2246 ext4_lblk_t *lblk) 2247 { 2248 int depth = ext_depth(inode); 2249 struct ext4_extent *ex; 2250 ext4_lblk_t len; 2251 2252 ex = path[depth].p_ext; 2253 if (ex == NULL) { 2254 /* there is no extent yet, so gap is [0;-] */ 2255 *lblk = 0; 2256 len = EXT_MAX_BLOCKS; 2257 } else if (*lblk < le32_to_cpu(ex->ee_block)) { 2258 len = le32_to_cpu(ex->ee_block) - *lblk; 2259 } else if (*lblk >= le32_to_cpu(ex->ee_block) 2260 + ext4_ext_get_actual_len(ex)) { 2261 ext4_lblk_t next; 2262 2263 *lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); 2264 next = ext4_ext_next_allocated_block(path); 2265 BUG_ON(next == *lblk); 2266 len = next - *lblk; 2267 } else { 2268 BUG(); 2269 } 2270 return len; 2271 } 2272 2273 /* 2274 * ext4_ext_put_gap_in_cache: 2275 * calculate boundaries of the gap that the requested block fits into 2276 * and cache this gap 2277 */ 2278 static void 2279 ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start, 2280 ext4_lblk_t hole_len) 2281 { 2282 struct extent_status es; 2283 2284 ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start, 2285 hole_start + hole_len - 1, &es); 2286 if (es.es_len) { 2287 /* There's delayed extent containing lblock? */ 2288 if (es.es_lblk <= hole_start) 2289 return; 2290 hole_len = min(es.es_lblk - hole_start, hole_len); 2291 } 2292 ext_debug(inode, " -> %u:%u\n", hole_start, hole_len); 2293 ext4_es_insert_extent(inode, hole_start, hole_len, ~0, 2294 EXTENT_STATUS_HOLE); 2295 } 2296 2297 /* 2298 * ext4_ext_rm_idx: 2299 * removes index from the index block. 2300 */ 2301 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, 2302 struct ext4_ext_path *path, int depth) 2303 { 2304 int err; 2305 ext4_fsblk_t leaf; 2306 2307 /* free index block */ 2308 depth--; 2309 path = path + depth; 2310 leaf = ext4_idx_pblock(path->p_idx); 2311 if (unlikely(path->p_hdr->eh_entries == 0)) { 2312 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); 2313 return -EFSCORRUPTED; 2314 } 2315 err = ext4_ext_get_access(handle, inode, path); 2316 if (err) 2317 return err; 2318 2319 if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { 2320 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; 2321 len *= sizeof(struct ext4_extent_idx); 2322 memmove(path->p_idx, path->p_idx + 1, len); 2323 } 2324 2325 le16_add_cpu(&path->p_hdr->eh_entries, -1); 2326 err = ext4_ext_dirty(handle, inode, path); 2327 if (err) 2328 return err; 2329 ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf); 2330 trace_ext4_ext_rm_idx(inode, leaf); 2331 2332 ext4_free_blocks(handle, inode, NULL, leaf, 1, 2333 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 2334 2335 while (--depth >= 0) { 2336 if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr)) 2337 break; 2338 path--; 2339 err = ext4_ext_get_access(handle, inode, path); 2340 if (err) 2341 break; 2342 path->p_idx->ei_block = (path+1)->p_idx->ei_block; 2343 err = ext4_ext_dirty(handle, inode, path); 2344 if (err) 2345 break; 2346 } 2347 return err; 2348 } 2349 2350 /* 2351 * ext4_ext_calc_credits_for_single_extent: 2352 * This routine returns max. credits that needed to insert an extent 2353 * to the extent tree. 2354 * When pass the actual path, the caller should calculate credits 2355 * under i_data_sem. 2356 */ 2357 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, 2358 struct ext4_ext_path *path) 2359 { 2360 if (path) { 2361 int depth = ext_depth(inode); 2362 int ret = 0; 2363 2364 /* probably there is space in leaf? */ 2365 if (le16_to_cpu(path[depth].p_hdr->eh_entries) 2366 < le16_to_cpu(path[depth].p_hdr->eh_max)) { 2367 2368 /* 2369 * There are some space in the leaf tree, no 2370 * need to account for leaf block credit 2371 * 2372 * bitmaps and block group descriptor blocks 2373 * and other metadata blocks still need to be 2374 * accounted. 2375 */ 2376 /* 1 bitmap, 1 block group descriptor */ 2377 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); 2378 return ret; 2379 } 2380 } 2381 2382 return ext4_chunk_trans_blocks(inode, nrblocks); 2383 } 2384 2385 /* 2386 * How many index/leaf blocks need to change/allocate to add @extents extents? 2387 * 2388 * If we add a single extent, then in the worse case, each tree level 2389 * index/leaf need to be changed in case of the tree split. 2390 * 2391 * If more extents are inserted, they could cause the whole tree split more 2392 * than once, but this is really rare. 2393 */ 2394 int ext4_ext_index_trans_blocks(struct inode *inode, int extents) 2395 { 2396 int index; 2397 int depth; 2398 2399 /* If we are converting the inline data, only one is needed here. */ 2400 if (ext4_has_inline_data(inode)) 2401 return 1; 2402 2403 depth = ext_depth(inode); 2404 2405 if (extents <= 1) 2406 index = depth * 2; 2407 else 2408 index = depth * 3; 2409 2410 return index; 2411 } 2412 2413 static inline int get_default_free_blocks_flags(struct inode *inode) 2414 { 2415 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) || 2416 ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE)) 2417 return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; 2418 else if (ext4_should_journal_data(inode)) 2419 return EXT4_FREE_BLOCKS_FORGET; 2420 return 0; 2421 } 2422 2423 /* 2424 * ext4_rereserve_cluster - increment the reserved cluster count when 2425 * freeing a cluster with a pending reservation 2426 * 2427 * @inode - file containing the cluster 2428 * @lblk - logical block in cluster to be reserved 2429 * 2430 * Increments the reserved cluster count and adjusts quota in a bigalloc 2431 * file system when freeing a partial cluster containing at least one 2432 * delayed and unwritten block. A partial cluster meeting that 2433 * requirement will have a pending reservation. If so, the 2434 * RERESERVE_CLUSTER flag is used when calling ext4_free_blocks() to 2435 * defer reserved and allocated space accounting to a subsequent call 2436 * to this function. 2437 */ 2438 static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk) 2439 { 2440 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2441 struct ext4_inode_info *ei = EXT4_I(inode); 2442 2443 dquot_reclaim_block(inode, EXT4_C2B(sbi, 1)); 2444 2445 spin_lock(&ei->i_block_reservation_lock); 2446 ei->i_reserved_data_blocks++; 2447 percpu_counter_add(&sbi->s_dirtyclusters_counter, 1); 2448 spin_unlock(&ei->i_block_reservation_lock); 2449 2450 percpu_counter_add(&sbi->s_freeclusters_counter, 1); 2451 ext4_remove_pending(inode, lblk); 2452 } 2453 2454 static int ext4_remove_blocks(handle_t *handle, struct inode *inode, 2455 struct ext4_extent *ex, 2456 struct partial_cluster *partial, 2457 ext4_lblk_t from, ext4_lblk_t to) 2458 { 2459 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2460 unsigned short ee_len = ext4_ext_get_actual_len(ex); 2461 ext4_fsblk_t last_pblk, pblk; 2462 ext4_lblk_t num; 2463 int flags; 2464 2465 /* only extent tail removal is allowed */ 2466 if (from < le32_to_cpu(ex->ee_block) || 2467 to != le32_to_cpu(ex->ee_block) + ee_len - 1) { 2468 ext4_error(sbi->s_sb, 2469 "strange request: removal(2) %u-%u from %u:%u", 2470 from, to, le32_to_cpu(ex->ee_block), ee_len); 2471 return 0; 2472 } 2473 2474 #ifdef EXTENTS_STATS 2475 spin_lock(&sbi->s_ext_stats_lock); 2476 sbi->s_ext_blocks += ee_len; 2477 sbi->s_ext_extents++; 2478 if (ee_len < sbi->s_ext_min) 2479 sbi->s_ext_min = ee_len; 2480 if (ee_len > sbi->s_ext_max) 2481 sbi->s_ext_max = ee_len; 2482 if (ext_depth(inode) > sbi->s_depth_max) 2483 sbi->s_depth_max = ext_depth(inode); 2484 spin_unlock(&sbi->s_ext_stats_lock); 2485 #endif 2486 2487 trace_ext4_remove_blocks(inode, ex, from, to, partial); 2488 2489 /* 2490 * if we have a partial cluster, and it's different from the 2491 * cluster of the last block in the extent, we free it 2492 */ 2493 last_pblk = ext4_ext_pblock(ex) + ee_len - 1; 2494 2495 if (partial->state != initial && 2496 partial->pclu != EXT4_B2C(sbi, last_pblk)) { 2497 if (partial->state == tofree) { 2498 flags = get_default_free_blocks_flags(inode); 2499 if (ext4_is_pending(inode, partial->lblk)) 2500 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 2501 ext4_free_blocks(handle, inode, NULL, 2502 EXT4_C2B(sbi, partial->pclu), 2503 sbi->s_cluster_ratio, flags); 2504 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 2505 ext4_rereserve_cluster(inode, partial->lblk); 2506 } 2507 partial->state = initial; 2508 } 2509 2510 num = le32_to_cpu(ex->ee_block) + ee_len - from; 2511 pblk = ext4_ext_pblock(ex) + ee_len - num; 2512 2513 /* 2514 * We free the partial cluster at the end of the extent (if any), 2515 * unless the cluster is used by another extent (partial_cluster 2516 * state is nofree). If a partial cluster exists here, it must be 2517 * shared with the last block in the extent. 2518 */ 2519 flags = get_default_free_blocks_flags(inode); 2520 2521 /* partial, left end cluster aligned, right end unaligned */ 2522 if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) && 2523 (EXT4_LBLK_CMASK(sbi, to) >= from) && 2524 (partial->state != nofree)) { 2525 if (ext4_is_pending(inode, to)) 2526 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 2527 ext4_free_blocks(handle, inode, NULL, 2528 EXT4_PBLK_CMASK(sbi, last_pblk), 2529 sbi->s_cluster_ratio, flags); 2530 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 2531 ext4_rereserve_cluster(inode, to); 2532 partial->state = initial; 2533 flags = get_default_free_blocks_flags(inode); 2534 } 2535 2536 flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER; 2537 2538 /* 2539 * For bigalloc file systems, we never free a partial cluster 2540 * at the beginning of the extent. Instead, we check to see if we 2541 * need to free it on a subsequent call to ext4_remove_blocks, 2542 * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space. 2543 */ 2544 flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; 2545 ext4_free_blocks(handle, inode, NULL, pblk, num, flags); 2546 2547 /* reset the partial cluster if we've freed past it */ 2548 if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk)) 2549 partial->state = initial; 2550 2551 /* 2552 * If we've freed the entire extent but the beginning is not left 2553 * cluster aligned and is not marked as ineligible for freeing we 2554 * record the partial cluster at the beginning of the extent. It 2555 * wasn't freed by the preceding ext4_free_blocks() call, and we 2556 * need to look farther to the left to determine if it's to be freed 2557 * (not shared with another extent). Else, reset the partial 2558 * cluster - we're either done freeing or the beginning of the 2559 * extent is left cluster aligned. 2560 */ 2561 if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) { 2562 if (partial->state == initial) { 2563 partial->pclu = EXT4_B2C(sbi, pblk); 2564 partial->lblk = from; 2565 partial->state = tofree; 2566 } 2567 } else { 2568 partial->state = initial; 2569 } 2570 2571 return 0; 2572 } 2573 2574 /* 2575 * ext4_ext_rm_leaf() Removes the extents associated with the 2576 * blocks appearing between "start" and "end". Both "start" 2577 * and "end" must appear in the same extent or EIO is returned. 2578 * 2579 * @handle: The journal handle 2580 * @inode: The files inode 2581 * @path: The path to the leaf 2582 * @partial_cluster: The cluster which we'll have to free if all extents 2583 * has been released from it. However, if this value is 2584 * negative, it's a cluster just to the right of the 2585 * punched region and it must not be freed. 2586 * @start: The first block to remove 2587 * @end: The last block to remove 2588 */ 2589 static int 2590 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, 2591 struct ext4_ext_path *path, 2592 struct partial_cluster *partial, 2593 ext4_lblk_t start, ext4_lblk_t end) 2594 { 2595 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2596 int err = 0, correct_index = 0; 2597 int depth = ext_depth(inode), credits, revoke_credits; 2598 struct ext4_extent_header *eh; 2599 ext4_lblk_t a, b; 2600 unsigned num; 2601 ext4_lblk_t ex_ee_block; 2602 unsigned short ex_ee_len; 2603 unsigned unwritten = 0; 2604 struct ext4_extent *ex; 2605 ext4_fsblk_t pblk; 2606 2607 /* the header must be checked already in ext4_ext_remove_space() */ 2608 ext_debug(inode, "truncate since %u in leaf to %u\n", start, end); 2609 if (!path[depth].p_hdr) 2610 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); 2611 eh = path[depth].p_hdr; 2612 if (unlikely(path[depth].p_hdr == NULL)) { 2613 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 2614 return -EFSCORRUPTED; 2615 } 2616 /* find where to start removing */ 2617 ex = path[depth].p_ext; 2618 if (!ex) 2619 ex = EXT_LAST_EXTENT(eh); 2620 2621 ex_ee_block = le32_to_cpu(ex->ee_block); 2622 ex_ee_len = ext4_ext_get_actual_len(ex); 2623 2624 trace_ext4_ext_rm_leaf(inode, start, ex, partial); 2625 2626 while (ex >= EXT_FIRST_EXTENT(eh) && 2627 ex_ee_block + ex_ee_len > start) { 2628 2629 if (ext4_ext_is_unwritten(ex)) 2630 unwritten = 1; 2631 else 2632 unwritten = 0; 2633 2634 ext_debug(inode, "remove ext %u:[%d]%d\n", ex_ee_block, 2635 unwritten, ex_ee_len); 2636 path[depth].p_ext = ex; 2637 2638 a = max(ex_ee_block, start); 2639 b = min(ex_ee_block + ex_ee_len - 1, end); 2640 2641 ext_debug(inode, " border %u:%u\n", a, b); 2642 2643 /* If this extent is beyond the end of the hole, skip it */ 2644 if (end < ex_ee_block) { 2645 /* 2646 * We're going to skip this extent and move to another, 2647 * so note that its first cluster is in use to avoid 2648 * freeing it when removing blocks. Eventually, the 2649 * right edge of the truncated/punched region will 2650 * be just to the left. 2651 */ 2652 if (sbi->s_cluster_ratio > 1) { 2653 pblk = ext4_ext_pblock(ex); 2654 partial->pclu = EXT4_B2C(sbi, pblk); 2655 partial->state = nofree; 2656 } 2657 ex--; 2658 ex_ee_block = le32_to_cpu(ex->ee_block); 2659 ex_ee_len = ext4_ext_get_actual_len(ex); 2660 continue; 2661 } else if (b != ex_ee_block + ex_ee_len - 1) { 2662 EXT4_ERROR_INODE(inode, 2663 "can not handle truncate %u:%u " 2664 "on extent %u:%u", 2665 start, end, ex_ee_block, 2666 ex_ee_block + ex_ee_len - 1); 2667 err = -EFSCORRUPTED; 2668 goto out; 2669 } else if (a != ex_ee_block) { 2670 /* remove tail of the extent */ 2671 num = a - ex_ee_block; 2672 } else { 2673 /* remove whole extent: excellent! */ 2674 num = 0; 2675 } 2676 /* 2677 * 3 for leaf, sb, and inode plus 2 (bmap and group 2678 * descriptor) for each block group; assume two block 2679 * groups plus ex_ee_len/blocks_per_block_group for 2680 * the worst case 2681 */ 2682 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); 2683 if (ex == EXT_FIRST_EXTENT(eh)) { 2684 correct_index = 1; 2685 credits += (ext_depth(inode)) + 1; 2686 } 2687 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); 2688 /* 2689 * We may end up freeing some index blocks and data from the 2690 * punched range. Note that partial clusters are accounted for 2691 * by ext4_free_data_revoke_credits(). 2692 */ 2693 revoke_credits = 2694 ext4_free_metadata_revoke_credits(inode->i_sb, 2695 ext_depth(inode)) + 2696 ext4_free_data_revoke_credits(inode, b - a + 1); 2697 2698 err = ext4_datasem_ensure_credits(handle, inode, credits, 2699 credits, revoke_credits); 2700 if (err) { 2701 if (err > 0) 2702 err = -EAGAIN; 2703 goto out; 2704 } 2705 2706 err = ext4_ext_get_access(handle, inode, path + depth); 2707 if (err) 2708 goto out; 2709 2710 err = ext4_remove_blocks(handle, inode, ex, partial, a, b); 2711 if (err) 2712 goto out; 2713 2714 if (num == 0) 2715 /* this extent is removed; mark slot entirely unused */ 2716 ext4_ext_store_pblock(ex, 0); 2717 2718 ex->ee_len = cpu_to_le16(num); 2719 /* 2720 * Do not mark unwritten if all the blocks in the 2721 * extent have been removed. 2722 */ 2723 if (unwritten && num) 2724 ext4_ext_mark_unwritten(ex); 2725 /* 2726 * If the extent was completely released, 2727 * we need to remove it from the leaf 2728 */ 2729 if (num == 0) { 2730 if (end != EXT_MAX_BLOCKS - 1) { 2731 /* 2732 * For hole punching, we need to scoot all the 2733 * extents up when an extent is removed so that 2734 * we dont have blank extents in the middle 2735 */ 2736 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * 2737 sizeof(struct ext4_extent)); 2738 2739 /* Now get rid of the one at the end */ 2740 memset(EXT_LAST_EXTENT(eh), 0, 2741 sizeof(struct ext4_extent)); 2742 } 2743 le16_add_cpu(&eh->eh_entries, -1); 2744 } 2745 2746 err = ext4_ext_dirty(handle, inode, path + depth); 2747 if (err) 2748 goto out; 2749 2750 ext_debug(inode, "new extent: %u:%u:%llu\n", ex_ee_block, num, 2751 ext4_ext_pblock(ex)); 2752 ex--; 2753 ex_ee_block = le32_to_cpu(ex->ee_block); 2754 ex_ee_len = ext4_ext_get_actual_len(ex); 2755 } 2756 2757 if (correct_index && eh->eh_entries) 2758 err = ext4_ext_correct_indexes(handle, inode, path); 2759 2760 /* 2761 * If there's a partial cluster and at least one extent remains in 2762 * the leaf, free the partial cluster if it isn't shared with the 2763 * current extent. If it is shared with the current extent 2764 * we reset the partial cluster because we've reached the start of the 2765 * truncated/punched region and we're done removing blocks. 2766 */ 2767 if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) { 2768 pblk = ext4_ext_pblock(ex) + ex_ee_len - 1; 2769 if (partial->pclu != EXT4_B2C(sbi, pblk)) { 2770 int flags = get_default_free_blocks_flags(inode); 2771 2772 if (ext4_is_pending(inode, partial->lblk)) 2773 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 2774 ext4_free_blocks(handle, inode, NULL, 2775 EXT4_C2B(sbi, partial->pclu), 2776 sbi->s_cluster_ratio, flags); 2777 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 2778 ext4_rereserve_cluster(inode, partial->lblk); 2779 } 2780 partial->state = initial; 2781 } 2782 2783 /* if this leaf is free, then we should 2784 * remove it from index block above */ 2785 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) 2786 err = ext4_ext_rm_idx(handle, inode, path, depth); 2787 2788 out: 2789 return err; 2790 } 2791 2792 /* 2793 * ext4_ext_more_to_rm: 2794 * returns 1 if current index has to be freed (even partial) 2795 */ 2796 static int 2797 ext4_ext_more_to_rm(struct ext4_ext_path *path) 2798 { 2799 BUG_ON(path->p_idx == NULL); 2800 2801 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) 2802 return 0; 2803 2804 /* 2805 * if truncate on deeper level happened, it wasn't partial, 2806 * so we have to consider current index for truncation 2807 */ 2808 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) 2809 return 0; 2810 return 1; 2811 } 2812 2813 int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, 2814 ext4_lblk_t end) 2815 { 2816 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2817 int depth = ext_depth(inode); 2818 struct ext4_ext_path *path = NULL; 2819 struct partial_cluster partial; 2820 handle_t *handle; 2821 int i = 0, err = 0; 2822 2823 partial.pclu = 0; 2824 partial.lblk = 0; 2825 partial.state = initial; 2826 2827 ext_debug(inode, "truncate since %u to %u\n", start, end); 2828 2829 /* probably first extent we're gonna free will be last in block */ 2830 handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE, 2831 depth + 1, 2832 ext4_free_metadata_revoke_credits(inode->i_sb, depth)); 2833 if (IS_ERR(handle)) 2834 return PTR_ERR(handle); 2835 2836 again: 2837 trace_ext4_ext_remove_space(inode, start, end, depth); 2838 2839 /* 2840 * Check if we are removing extents inside the extent tree. If that 2841 * is the case, we are going to punch a hole inside the extent tree 2842 * so we have to check whether we need to split the extent covering 2843 * the last block to remove so we can easily remove the part of it 2844 * in ext4_ext_rm_leaf(). 2845 */ 2846 if (end < EXT_MAX_BLOCKS - 1) { 2847 struct ext4_extent *ex; 2848 ext4_lblk_t ee_block, ex_end, lblk; 2849 ext4_fsblk_t pblk; 2850 2851 /* find extent for or closest extent to this block */ 2852 path = ext4_find_extent(inode, end, NULL, 2853 EXT4_EX_NOCACHE | EXT4_EX_NOFAIL); 2854 if (IS_ERR(path)) { 2855 ext4_journal_stop(handle); 2856 return PTR_ERR(path); 2857 } 2858 depth = ext_depth(inode); 2859 /* Leaf not may not exist only if inode has no blocks at all */ 2860 ex = path[depth].p_ext; 2861 if (!ex) { 2862 if (depth) { 2863 EXT4_ERROR_INODE(inode, 2864 "path[%d].p_hdr == NULL", 2865 depth); 2866 err = -EFSCORRUPTED; 2867 } 2868 goto out; 2869 } 2870 2871 ee_block = le32_to_cpu(ex->ee_block); 2872 ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1; 2873 2874 /* 2875 * See if the last block is inside the extent, if so split 2876 * the extent at 'end' block so we can easily remove the 2877 * tail of the first part of the split extent in 2878 * ext4_ext_rm_leaf(). 2879 */ 2880 if (end >= ee_block && end < ex_end) { 2881 2882 /* 2883 * If we're going to split the extent, note that 2884 * the cluster containing the block after 'end' is 2885 * in use to avoid freeing it when removing blocks. 2886 */ 2887 if (sbi->s_cluster_ratio > 1) { 2888 pblk = ext4_ext_pblock(ex) + end - ee_block + 1; 2889 partial.pclu = EXT4_B2C(sbi, pblk); 2890 partial.state = nofree; 2891 } 2892 2893 /* 2894 * Split the extent in two so that 'end' is the last 2895 * block in the first new extent. Also we should not 2896 * fail removing space due to ENOSPC so try to use 2897 * reserved block if that happens. 2898 */ 2899 err = ext4_force_split_extent_at(handle, inode, &path, 2900 end + 1, 1); 2901 if (err < 0) 2902 goto out; 2903 2904 } else if (sbi->s_cluster_ratio > 1 && end >= ex_end && 2905 partial.state == initial) { 2906 /* 2907 * If we're punching, there's an extent to the right. 2908 * If the partial cluster hasn't been set, set it to 2909 * that extent's first cluster and its state to nofree 2910 * so it won't be freed should it contain blocks to be 2911 * removed. If it's already set (tofree/nofree), we're 2912 * retrying and keep the original partial cluster info 2913 * so a cluster marked tofree as a result of earlier 2914 * extent removal is not lost. 2915 */ 2916 lblk = ex_end + 1; 2917 err = ext4_ext_search_right(inode, path, &lblk, &pblk, 2918 NULL); 2919 if (err < 0) 2920 goto out; 2921 if (pblk) { 2922 partial.pclu = EXT4_B2C(sbi, pblk); 2923 partial.state = nofree; 2924 } 2925 } 2926 } 2927 /* 2928 * We start scanning from right side, freeing all the blocks 2929 * after i_size and walking into the tree depth-wise. 2930 */ 2931 depth = ext_depth(inode); 2932 if (path) { 2933 int k = i = depth; 2934 while (--k > 0) 2935 path[k].p_block = 2936 le16_to_cpu(path[k].p_hdr->eh_entries)+1; 2937 } else { 2938 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path), 2939 GFP_NOFS | __GFP_NOFAIL); 2940 if (path == NULL) { 2941 ext4_journal_stop(handle); 2942 return -ENOMEM; 2943 } 2944 path[0].p_maxdepth = path[0].p_depth = depth; 2945 path[0].p_hdr = ext_inode_hdr(inode); 2946 i = 0; 2947 2948 if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) { 2949 err = -EFSCORRUPTED; 2950 goto out; 2951 } 2952 } 2953 err = 0; 2954 2955 while (i >= 0 && err == 0) { 2956 if (i == depth) { 2957 /* this is leaf block */ 2958 err = ext4_ext_rm_leaf(handle, inode, path, 2959 &partial, start, end); 2960 /* root level has p_bh == NULL, brelse() eats this */ 2961 brelse(path[i].p_bh); 2962 path[i].p_bh = NULL; 2963 i--; 2964 continue; 2965 } 2966 2967 /* this is index block */ 2968 if (!path[i].p_hdr) { 2969 ext_debug(inode, "initialize header\n"); 2970 path[i].p_hdr = ext_block_hdr(path[i].p_bh); 2971 } 2972 2973 if (!path[i].p_idx) { 2974 /* this level hasn't been touched yet */ 2975 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); 2976 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; 2977 ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n", 2978 path[i].p_hdr, 2979 le16_to_cpu(path[i].p_hdr->eh_entries)); 2980 } else { 2981 /* we were already here, see at next index */ 2982 path[i].p_idx--; 2983 } 2984 2985 ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n", 2986 i, EXT_FIRST_INDEX(path[i].p_hdr), 2987 path[i].p_idx); 2988 if (ext4_ext_more_to_rm(path + i)) { 2989 struct buffer_head *bh; 2990 /* go to the next level */ 2991 ext_debug(inode, "move to level %d (block %llu)\n", 2992 i + 1, ext4_idx_pblock(path[i].p_idx)); 2993 memset(path + i + 1, 0, sizeof(*path)); 2994 bh = read_extent_tree_block(inode, path[i].p_idx, 2995 depth - i - 1, 2996 EXT4_EX_NOCACHE); 2997 if (IS_ERR(bh)) { 2998 /* should we reset i_size? */ 2999 err = PTR_ERR(bh); 3000 break; 3001 } 3002 /* Yield here to deal with large extent trees. 3003 * Should be a no-op if we did IO above. */ 3004 cond_resched(); 3005 if (WARN_ON(i + 1 > depth)) { 3006 err = -EFSCORRUPTED; 3007 break; 3008 } 3009 path[i + 1].p_bh = bh; 3010 3011 /* save actual number of indexes since this 3012 * number is changed at the next iteration */ 3013 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); 3014 i++; 3015 } else { 3016 /* we finished processing this index, go up */ 3017 if (path[i].p_hdr->eh_entries == 0 && i > 0) { 3018 /* index is empty, remove it; 3019 * handle must be already prepared by the 3020 * truncatei_leaf() */ 3021 err = ext4_ext_rm_idx(handle, inode, path, i); 3022 } 3023 /* root level has p_bh == NULL, brelse() eats this */ 3024 brelse(path[i].p_bh); 3025 path[i].p_bh = NULL; 3026 i--; 3027 ext_debug(inode, "return to level %d\n", i); 3028 } 3029 } 3030 3031 trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial, 3032 path->p_hdr->eh_entries); 3033 3034 /* 3035 * if there's a partial cluster and we have removed the first extent 3036 * in the file, then we also free the partial cluster, if any 3037 */ 3038 if (partial.state == tofree && err == 0) { 3039 int flags = get_default_free_blocks_flags(inode); 3040 3041 if (ext4_is_pending(inode, partial.lblk)) 3042 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 3043 ext4_free_blocks(handle, inode, NULL, 3044 EXT4_C2B(sbi, partial.pclu), 3045 sbi->s_cluster_ratio, flags); 3046 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 3047 ext4_rereserve_cluster(inode, partial.lblk); 3048 partial.state = initial; 3049 } 3050 3051 /* TODO: flexible tree reduction should be here */ 3052 if (path->p_hdr->eh_entries == 0) { 3053 /* 3054 * truncate to zero freed all the tree, 3055 * so we need to correct eh_depth 3056 */ 3057 err = ext4_ext_get_access(handle, inode, path); 3058 if (err == 0) { 3059 ext_inode_hdr(inode)->eh_depth = 0; 3060 ext_inode_hdr(inode)->eh_max = 3061 cpu_to_le16(ext4_ext_space_root(inode, 0)); 3062 err = ext4_ext_dirty(handle, inode, path); 3063 } 3064 } 3065 out: 3066 ext4_free_ext_path(path); 3067 path = NULL; 3068 if (err == -EAGAIN) 3069 goto again; 3070 ext4_journal_stop(handle); 3071 3072 return err; 3073 } 3074 3075 /* 3076 * called at mount time 3077 */ 3078 void ext4_ext_init(struct super_block *sb) 3079 { 3080 /* 3081 * possible initialization would be here 3082 */ 3083 3084 if (ext4_has_feature_extents(sb)) { 3085 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) 3086 printk(KERN_INFO "EXT4-fs: file extents enabled" 3087 #ifdef AGGRESSIVE_TEST 3088 ", aggressive tests" 3089 #endif 3090 #ifdef CHECK_BINSEARCH 3091 ", check binsearch" 3092 #endif 3093 #ifdef EXTENTS_STATS 3094 ", stats" 3095 #endif 3096 "\n"); 3097 #endif 3098 #ifdef EXTENTS_STATS 3099 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); 3100 EXT4_SB(sb)->s_ext_min = 1 << 30; 3101 EXT4_SB(sb)->s_ext_max = 0; 3102 #endif 3103 } 3104 } 3105 3106 /* 3107 * called at umount time 3108 */ 3109 void ext4_ext_release(struct super_block *sb) 3110 { 3111 if (!ext4_has_feature_extents(sb)) 3112 return; 3113 3114 #ifdef EXTENTS_STATS 3115 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { 3116 struct ext4_sb_info *sbi = EXT4_SB(sb); 3117 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", 3118 sbi->s_ext_blocks, sbi->s_ext_extents, 3119 sbi->s_ext_blocks / sbi->s_ext_extents); 3120 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", 3121 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); 3122 } 3123 #endif 3124 } 3125 3126 static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex) 3127 { 3128 ext4_lblk_t ee_block; 3129 ext4_fsblk_t ee_pblock; 3130 unsigned int ee_len; 3131 3132 ee_block = le32_to_cpu(ex->ee_block); 3133 ee_len = ext4_ext_get_actual_len(ex); 3134 ee_pblock = ext4_ext_pblock(ex); 3135 3136 if (ee_len == 0) 3137 return 0; 3138 3139 return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock, 3140 EXTENT_STATUS_WRITTEN); 3141 } 3142 3143 /* FIXME!! we need to try to merge to left or right after zero-out */ 3144 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) 3145 { 3146 ext4_fsblk_t ee_pblock; 3147 unsigned int ee_len; 3148 3149 ee_len = ext4_ext_get_actual_len(ex); 3150 ee_pblock = ext4_ext_pblock(ex); 3151 return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock, 3152 ee_len); 3153 } 3154 3155 /* 3156 * ext4_split_extent_at() splits an extent at given block. 3157 * 3158 * @handle: the journal handle 3159 * @inode: the file inode 3160 * @path: the path to the extent 3161 * @split: the logical block where the extent is splitted. 3162 * @split_flags: indicates if the extent could be zeroout if split fails, and 3163 * the states(init or unwritten) of new extents. 3164 * @flags: flags used to insert new extent to extent tree. 3165 * 3166 * 3167 * Splits extent [a, b] into two extents [a, @split) and [@split, b], states 3168 * of which are determined by split_flag. 3169 * 3170 * There are two cases: 3171 * a> the extent are splitted into two extent. 3172 * b> split is not needed, and just mark the extent. 3173 * 3174 * return 0 on success. 3175 */ 3176 static int ext4_split_extent_at(handle_t *handle, 3177 struct inode *inode, 3178 struct ext4_ext_path **ppath, 3179 ext4_lblk_t split, 3180 int split_flag, 3181 int flags) 3182 { 3183 struct ext4_ext_path *path = *ppath; 3184 ext4_fsblk_t newblock; 3185 ext4_lblk_t ee_block; 3186 struct ext4_extent *ex, newex, orig_ex, zero_ex; 3187 struct ext4_extent *ex2 = NULL; 3188 unsigned int ee_len, depth; 3189 int err = 0; 3190 3191 BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) == 3192 (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)); 3193 3194 ext_debug(inode, "logical block %llu\n", (unsigned long long)split); 3195 3196 ext4_ext_show_leaf(inode, path); 3197 3198 depth = ext_depth(inode); 3199 ex = path[depth].p_ext; 3200 ee_block = le32_to_cpu(ex->ee_block); 3201 ee_len = ext4_ext_get_actual_len(ex); 3202 newblock = split - ee_block + ext4_ext_pblock(ex); 3203 3204 BUG_ON(split < ee_block || split >= (ee_block + ee_len)); 3205 BUG_ON(!ext4_ext_is_unwritten(ex) && 3206 split_flag & (EXT4_EXT_MAY_ZEROOUT | 3207 EXT4_EXT_MARK_UNWRIT1 | 3208 EXT4_EXT_MARK_UNWRIT2)); 3209 3210 err = ext4_ext_get_access(handle, inode, path + depth); 3211 if (err) 3212 goto out; 3213 3214 if (split == ee_block) { 3215 /* 3216 * case b: block @split is the block that the extent begins with 3217 * then we just change the state of the extent, and splitting 3218 * is not needed. 3219 */ 3220 if (split_flag & EXT4_EXT_MARK_UNWRIT2) 3221 ext4_ext_mark_unwritten(ex); 3222 else 3223 ext4_ext_mark_initialized(ex); 3224 3225 if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) 3226 ext4_ext_try_to_merge(handle, inode, path, ex); 3227 3228 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3229 goto out; 3230 } 3231 3232 /* case a */ 3233 memcpy(&orig_ex, ex, sizeof(orig_ex)); 3234 ex->ee_len = cpu_to_le16(split - ee_block); 3235 if (split_flag & EXT4_EXT_MARK_UNWRIT1) 3236 ext4_ext_mark_unwritten(ex); 3237 3238 /* 3239 * path may lead to new leaf, not to original leaf any more 3240 * after ext4_ext_insert_extent() returns, 3241 */ 3242 err = ext4_ext_dirty(handle, inode, path + depth); 3243 if (err) 3244 goto fix_extent_len; 3245 3246 ex2 = &newex; 3247 ex2->ee_block = cpu_to_le32(split); 3248 ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); 3249 ext4_ext_store_pblock(ex2, newblock); 3250 if (split_flag & EXT4_EXT_MARK_UNWRIT2) 3251 ext4_ext_mark_unwritten(ex2); 3252 3253 err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags); 3254 if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM) 3255 goto out; 3256 3257 if (EXT4_EXT_MAY_ZEROOUT & split_flag) { 3258 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { 3259 if (split_flag & EXT4_EXT_DATA_VALID1) { 3260 err = ext4_ext_zeroout(inode, ex2); 3261 zero_ex.ee_block = ex2->ee_block; 3262 zero_ex.ee_len = cpu_to_le16( 3263 ext4_ext_get_actual_len(ex2)); 3264 ext4_ext_store_pblock(&zero_ex, 3265 ext4_ext_pblock(ex2)); 3266 } else { 3267 err = ext4_ext_zeroout(inode, ex); 3268 zero_ex.ee_block = ex->ee_block; 3269 zero_ex.ee_len = cpu_to_le16( 3270 ext4_ext_get_actual_len(ex)); 3271 ext4_ext_store_pblock(&zero_ex, 3272 ext4_ext_pblock(ex)); 3273 } 3274 } else { 3275 err = ext4_ext_zeroout(inode, &orig_ex); 3276 zero_ex.ee_block = orig_ex.ee_block; 3277 zero_ex.ee_len = cpu_to_le16( 3278 ext4_ext_get_actual_len(&orig_ex)); 3279 ext4_ext_store_pblock(&zero_ex, 3280 ext4_ext_pblock(&orig_ex)); 3281 } 3282 3283 if (!err) { 3284 /* update the extent length and mark as initialized */ 3285 ex->ee_len = cpu_to_le16(ee_len); 3286 ext4_ext_try_to_merge(handle, inode, path, ex); 3287 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3288 if (!err) 3289 /* update extent status tree */ 3290 err = ext4_zeroout_es(inode, &zero_ex); 3291 /* If we failed at this point, we don't know in which 3292 * state the extent tree exactly is so don't try to fix 3293 * length of the original extent as it may do even more 3294 * damage. 3295 */ 3296 goto out; 3297 } 3298 } 3299 3300 fix_extent_len: 3301 ex->ee_len = orig_ex.ee_len; 3302 /* 3303 * Ignore ext4_ext_dirty return value since we are already in error path 3304 * and err is a non-zero error code. 3305 */ 3306 ext4_ext_dirty(handle, inode, path + path->p_depth); 3307 return err; 3308 out: 3309 ext4_ext_show_leaf(inode, path); 3310 return err; 3311 } 3312 3313 /* 3314 * ext4_split_extents() splits an extent and mark extent which is covered 3315 * by @map as split_flags indicates 3316 * 3317 * It may result in splitting the extent into multiple extents (up to three) 3318 * There are three possibilities: 3319 * a> There is no split required 3320 * b> Splits in two extents: Split is happening at either end of the extent 3321 * c> Splits in three extents: Somone is splitting in middle of the extent 3322 * 3323 */ 3324 static int ext4_split_extent(handle_t *handle, 3325 struct inode *inode, 3326 struct ext4_ext_path **ppath, 3327 struct ext4_map_blocks *map, 3328 int split_flag, 3329 int flags) 3330 { 3331 struct ext4_ext_path *path = *ppath; 3332 ext4_lblk_t ee_block; 3333 struct ext4_extent *ex; 3334 unsigned int ee_len, depth; 3335 int err = 0; 3336 int unwritten; 3337 int split_flag1, flags1; 3338 int allocated = map->m_len; 3339 3340 depth = ext_depth(inode); 3341 ex = path[depth].p_ext; 3342 ee_block = le32_to_cpu(ex->ee_block); 3343 ee_len = ext4_ext_get_actual_len(ex); 3344 unwritten = ext4_ext_is_unwritten(ex); 3345 3346 if (map->m_lblk + map->m_len < ee_block + ee_len) { 3347 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT; 3348 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; 3349 if (unwritten) 3350 split_flag1 |= EXT4_EXT_MARK_UNWRIT1 | 3351 EXT4_EXT_MARK_UNWRIT2; 3352 if (split_flag & EXT4_EXT_DATA_VALID2) 3353 split_flag1 |= EXT4_EXT_DATA_VALID1; 3354 err = ext4_split_extent_at(handle, inode, ppath, 3355 map->m_lblk + map->m_len, split_flag1, flags1); 3356 if (err) 3357 goto out; 3358 } else { 3359 allocated = ee_len - (map->m_lblk - ee_block); 3360 } 3361 /* 3362 * Update path is required because previous ext4_split_extent_at() may 3363 * result in split of original leaf or extent zeroout. 3364 */ 3365 path = ext4_find_extent(inode, map->m_lblk, ppath, flags); 3366 if (IS_ERR(path)) 3367 return PTR_ERR(path); 3368 depth = ext_depth(inode); 3369 ex = path[depth].p_ext; 3370 if (!ex) { 3371 EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 3372 (unsigned long) map->m_lblk); 3373 return -EFSCORRUPTED; 3374 } 3375 unwritten = ext4_ext_is_unwritten(ex); 3376 3377 if (map->m_lblk >= ee_block) { 3378 split_flag1 = split_flag & EXT4_EXT_DATA_VALID2; 3379 if (unwritten) { 3380 split_flag1 |= EXT4_EXT_MARK_UNWRIT1; 3381 split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT | 3382 EXT4_EXT_MARK_UNWRIT2); 3383 } 3384 err = ext4_split_extent_at(handle, inode, ppath, 3385 map->m_lblk, split_flag1, flags); 3386 if (err) 3387 goto out; 3388 } 3389 3390 ext4_ext_show_leaf(inode, path); 3391 out: 3392 return err ? err : allocated; 3393 } 3394 3395 /* 3396 * This function is called by ext4_ext_map_blocks() if someone tries to write 3397 * to an unwritten extent. It may result in splitting the unwritten 3398 * extent into multiple extents (up to three - one initialized and two 3399 * unwritten). 3400 * There are three possibilities: 3401 * a> There is no split required: Entire extent should be initialized 3402 * b> Splits in two extents: Write is happening at either end of the extent 3403 * c> Splits in three extents: Somone is writing in middle of the extent 3404 * 3405 * Pre-conditions: 3406 * - The extent pointed to by 'path' is unwritten. 3407 * - The extent pointed to by 'path' contains a superset 3408 * of the logical span [map->m_lblk, map->m_lblk + map->m_len). 3409 * 3410 * Post-conditions on success: 3411 * - the returned value is the number of blocks beyond map->l_lblk 3412 * that are allocated and initialized. 3413 * It is guaranteed to be >= map->m_len. 3414 */ 3415 static int ext4_ext_convert_to_initialized(handle_t *handle, 3416 struct inode *inode, 3417 struct ext4_map_blocks *map, 3418 struct ext4_ext_path **ppath, 3419 int flags) 3420 { 3421 struct ext4_ext_path *path = *ppath; 3422 struct ext4_sb_info *sbi; 3423 struct ext4_extent_header *eh; 3424 struct ext4_map_blocks split_map; 3425 struct ext4_extent zero_ex1, zero_ex2; 3426 struct ext4_extent *ex, *abut_ex; 3427 ext4_lblk_t ee_block, eof_block; 3428 unsigned int ee_len, depth, map_len = map->m_len; 3429 int allocated = 0, max_zeroout = 0; 3430 int err = 0; 3431 int split_flag = EXT4_EXT_DATA_VALID2; 3432 3433 ext_debug(inode, "logical block %llu, max_blocks %u\n", 3434 (unsigned long long)map->m_lblk, map_len); 3435 3436 sbi = EXT4_SB(inode->i_sb); 3437 eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1) 3438 >> inode->i_sb->s_blocksize_bits; 3439 if (eof_block < map->m_lblk + map_len) 3440 eof_block = map->m_lblk + map_len; 3441 3442 depth = ext_depth(inode); 3443 eh = path[depth].p_hdr; 3444 ex = path[depth].p_ext; 3445 ee_block = le32_to_cpu(ex->ee_block); 3446 ee_len = ext4_ext_get_actual_len(ex); 3447 zero_ex1.ee_len = 0; 3448 zero_ex2.ee_len = 0; 3449 3450 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); 3451 3452 /* Pre-conditions */ 3453 BUG_ON(!ext4_ext_is_unwritten(ex)); 3454 BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); 3455 3456 /* 3457 * Attempt to transfer newly initialized blocks from the currently 3458 * unwritten extent to its neighbor. This is much cheaper 3459 * than an insertion followed by a merge as those involve costly 3460 * memmove() calls. Transferring to the left is the common case in 3461 * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE) 3462 * followed by append writes. 3463 * 3464 * Limitations of the current logic: 3465 * - L1: we do not deal with writes covering the whole extent. 3466 * This would require removing the extent if the transfer 3467 * is possible. 3468 * - L2: we only attempt to merge with an extent stored in the 3469 * same extent tree node. 3470 */ 3471 if ((map->m_lblk == ee_block) && 3472 /* See if we can merge left */ 3473 (map_len < ee_len) && /*L1*/ 3474 (ex > EXT_FIRST_EXTENT(eh))) { /*L2*/ 3475 ext4_lblk_t prev_lblk; 3476 ext4_fsblk_t prev_pblk, ee_pblk; 3477 unsigned int prev_len; 3478 3479 abut_ex = ex - 1; 3480 prev_lblk = le32_to_cpu(abut_ex->ee_block); 3481 prev_len = ext4_ext_get_actual_len(abut_ex); 3482 prev_pblk = ext4_ext_pblock(abut_ex); 3483 ee_pblk = ext4_ext_pblock(ex); 3484 3485 /* 3486 * A transfer of blocks from 'ex' to 'abut_ex' is allowed 3487 * upon those conditions: 3488 * - C1: abut_ex is initialized, 3489 * - C2: abut_ex is logically abutting ex, 3490 * - C3: abut_ex is physically abutting ex, 3491 * - C4: abut_ex can receive the additional blocks without 3492 * overflowing the (initialized) length limit. 3493 */ 3494 if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ 3495 ((prev_lblk + prev_len) == ee_block) && /*C2*/ 3496 ((prev_pblk + prev_len) == ee_pblk) && /*C3*/ 3497 (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 3498 err = ext4_ext_get_access(handle, inode, path + depth); 3499 if (err) 3500 goto out; 3501 3502 trace_ext4_ext_convert_to_initialized_fastpath(inode, 3503 map, ex, abut_ex); 3504 3505 /* Shift the start of ex by 'map_len' blocks */ 3506 ex->ee_block = cpu_to_le32(ee_block + map_len); 3507 ext4_ext_store_pblock(ex, ee_pblk + map_len); 3508 ex->ee_len = cpu_to_le16(ee_len - map_len); 3509 ext4_ext_mark_unwritten(ex); /* Restore the flag */ 3510 3511 /* Extend abut_ex by 'map_len' blocks */ 3512 abut_ex->ee_len = cpu_to_le16(prev_len + map_len); 3513 3514 /* Result: number of initialized blocks past m_lblk */ 3515 allocated = map_len; 3516 } 3517 } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) && 3518 (map_len < ee_len) && /*L1*/ 3519 ex < EXT_LAST_EXTENT(eh)) { /*L2*/ 3520 /* See if we can merge right */ 3521 ext4_lblk_t next_lblk; 3522 ext4_fsblk_t next_pblk, ee_pblk; 3523 unsigned int next_len; 3524 3525 abut_ex = ex + 1; 3526 next_lblk = le32_to_cpu(abut_ex->ee_block); 3527 next_len = ext4_ext_get_actual_len(abut_ex); 3528 next_pblk = ext4_ext_pblock(abut_ex); 3529 ee_pblk = ext4_ext_pblock(ex); 3530 3531 /* 3532 * A transfer of blocks from 'ex' to 'abut_ex' is allowed 3533 * upon those conditions: 3534 * - C1: abut_ex is initialized, 3535 * - C2: abut_ex is logically abutting ex, 3536 * - C3: abut_ex is physically abutting ex, 3537 * - C4: abut_ex can receive the additional blocks without 3538 * overflowing the (initialized) length limit. 3539 */ 3540 if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ 3541 ((map->m_lblk + map_len) == next_lblk) && /*C2*/ 3542 ((ee_pblk + ee_len) == next_pblk) && /*C3*/ 3543 (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 3544 err = ext4_ext_get_access(handle, inode, path + depth); 3545 if (err) 3546 goto out; 3547 3548 trace_ext4_ext_convert_to_initialized_fastpath(inode, 3549 map, ex, abut_ex); 3550 3551 /* Shift the start of abut_ex by 'map_len' blocks */ 3552 abut_ex->ee_block = cpu_to_le32(next_lblk - map_len); 3553 ext4_ext_store_pblock(abut_ex, next_pblk - map_len); 3554 ex->ee_len = cpu_to_le16(ee_len - map_len); 3555 ext4_ext_mark_unwritten(ex); /* Restore the flag */ 3556 3557 /* Extend abut_ex by 'map_len' blocks */ 3558 abut_ex->ee_len = cpu_to_le16(next_len + map_len); 3559 3560 /* Result: number of initialized blocks past m_lblk */ 3561 allocated = map_len; 3562 } 3563 } 3564 if (allocated) { 3565 /* Mark the block containing both extents as dirty */ 3566 err = ext4_ext_dirty(handle, inode, path + depth); 3567 3568 /* Update path to point to the right extent */ 3569 path[depth].p_ext = abut_ex; 3570 goto out; 3571 } else 3572 allocated = ee_len - (map->m_lblk - ee_block); 3573 3574 WARN_ON(map->m_lblk < ee_block); 3575 /* 3576 * It is safe to convert extent to initialized via explicit 3577 * zeroout only if extent is fully inside i_size or new_size. 3578 */ 3579 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 3580 3581 if (EXT4_EXT_MAY_ZEROOUT & split_flag) 3582 max_zeroout = sbi->s_extent_max_zeroout_kb >> 3583 (inode->i_sb->s_blocksize_bits - 10); 3584 3585 /* 3586 * five cases: 3587 * 1. split the extent into three extents. 3588 * 2. split the extent into two extents, zeroout the head of the first 3589 * extent. 3590 * 3. split the extent into two extents, zeroout the tail of the second 3591 * extent. 3592 * 4. split the extent into two extents with out zeroout. 3593 * 5. no splitting needed, just possibly zeroout the head and / or the 3594 * tail of the extent. 3595 */ 3596 split_map.m_lblk = map->m_lblk; 3597 split_map.m_len = map->m_len; 3598 3599 if (max_zeroout && (allocated > split_map.m_len)) { 3600 if (allocated <= max_zeroout) { 3601 /* case 3 or 5 */ 3602 zero_ex1.ee_block = 3603 cpu_to_le32(split_map.m_lblk + 3604 split_map.m_len); 3605 zero_ex1.ee_len = 3606 cpu_to_le16(allocated - split_map.m_len); 3607 ext4_ext_store_pblock(&zero_ex1, 3608 ext4_ext_pblock(ex) + split_map.m_lblk + 3609 split_map.m_len - ee_block); 3610 err = ext4_ext_zeroout(inode, &zero_ex1); 3611 if (err) 3612 goto fallback; 3613 split_map.m_len = allocated; 3614 } 3615 if (split_map.m_lblk - ee_block + split_map.m_len < 3616 max_zeroout) { 3617 /* case 2 or 5 */ 3618 if (split_map.m_lblk != ee_block) { 3619 zero_ex2.ee_block = ex->ee_block; 3620 zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk - 3621 ee_block); 3622 ext4_ext_store_pblock(&zero_ex2, 3623 ext4_ext_pblock(ex)); 3624 err = ext4_ext_zeroout(inode, &zero_ex2); 3625 if (err) 3626 goto fallback; 3627 } 3628 3629 split_map.m_len += split_map.m_lblk - ee_block; 3630 split_map.m_lblk = ee_block; 3631 allocated = map->m_len; 3632 } 3633 } 3634 3635 fallback: 3636 err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag, 3637 flags); 3638 if (err > 0) 3639 err = 0; 3640 out: 3641 /* If we have gotten a failure, don't zero out status tree */ 3642 if (!err) { 3643 err = ext4_zeroout_es(inode, &zero_ex1); 3644 if (!err) 3645 err = ext4_zeroout_es(inode, &zero_ex2); 3646 } 3647 return err ? err : allocated; 3648 } 3649 3650 /* 3651 * This function is called by ext4_ext_map_blocks() from 3652 * ext4_get_blocks_dio_write() when DIO to write 3653 * to an unwritten extent. 3654 * 3655 * Writing to an unwritten extent may result in splitting the unwritten 3656 * extent into multiple initialized/unwritten extents (up to three) 3657 * There are three possibilities: 3658 * a> There is no split required: Entire extent should be unwritten 3659 * b> Splits in two extents: Write is happening at either end of the extent 3660 * c> Splits in three extents: Somone is writing in middle of the extent 3661 * 3662 * This works the same way in the case of initialized -> unwritten conversion. 3663 * 3664 * One of more index blocks maybe needed if the extent tree grow after 3665 * the unwritten extent split. To prevent ENOSPC occur at the IO 3666 * complete, we need to split the unwritten extent before DIO submit 3667 * the IO. The unwritten extent called at this time will be split 3668 * into three unwritten extent(at most). After IO complete, the part 3669 * being filled will be convert to initialized by the end_io callback function 3670 * via ext4_convert_unwritten_extents(). 3671 * 3672 * Returns the size of unwritten extent to be written on success. 3673 */ 3674 static int ext4_split_convert_extents(handle_t *handle, 3675 struct inode *inode, 3676 struct ext4_map_blocks *map, 3677 struct ext4_ext_path **ppath, 3678 int flags) 3679 { 3680 struct ext4_ext_path *path = *ppath; 3681 ext4_lblk_t eof_block; 3682 ext4_lblk_t ee_block; 3683 struct ext4_extent *ex; 3684 unsigned int ee_len; 3685 int split_flag = 0, depth; 3686 3687 ext_debug(inode, "logical block %llu, max_blocks %u\n", 3688 (unsigned long long)map->m_lblk, map->m_len); 3689 3690 eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1) 3691 >> inode->i_sb->s_blocksize_bits; 3692 if (eof_block < map->m_lblk + map->m_len) 3693 eof_block = map->m_lblk + map->m_len; 3694 /* 3695 * It is safe to convert extent to initialized via explicit 3696 * zeroout only if extent is fully inside i_size or new_size. 3697 */ 3698 depth = ext_depth(inode); 3699 ex = path[depth].p_ext; 3700 ee_block = le32_to_cpu(ex->ee_block); 3701 ee_len = ext4_ext_get_actual_len(ex); 3702 3703 /* Convert to unwritten */ 3704 if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) { 3705 split_flag |= EXT4_EXT_DATA_VALID1; 3706 /* Convert to initialized */ 3707 } else if (flags & EXT4_GET_BLOCKS_CONVERT) { 3708 split_flag |= ee_block + ee_len <= eof_block ? 3709 EXT4_EXT_MAY_ZEROOUT : 0; 3710 split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2); 3711 } 3712 flags |= EXT4_GET_BLOCKS_PRE_IO; 3713 return ext4_split_extent(handle, inode, ppath, map, split_flag, flags); 3714 } 3715 3716 static int ext4_convert_unwritten_extents_endio(handle_t *handle, 3717 struct inode *inode, 3718 struct ext4_map_blocks *map, 3719 struct ext4_ext_path **ppath) 3720 { 3721 struct ext4_ext_path *path = *ppath; 3722 struct ext4_extent *ex; 3723 ext4_lblk_t ee_block; 3724 unsigned int ee_len; 3725 int depth; 3726 int err = 0; 3727 3728 depth = ext_depth(inode); 3729 ex = path[depth].p_ext; 3730 ee_block = le32_to_cpu(ex->ee_block); 3731 ee_len = ext4_ext_get_actual_len(ex); 3732 3733 ext_debug(inode, "logical block %llu, max_blocks %u\n", 3734 (unsigned long long)ee_block, ee_len); 3735 3736 /* If extent is larger than requested it is a clear sign that we still 3737 * have some extent state machine issues left. So extent_split is still 3738 * required. 3739 * TODO: Once all related issues will be fixed this situation should be 3740 * illegal. 3741 */ 3742 if (ee_block != map->m_lblk || ee_len > map->m_len) { 3743 #ifdef CONFIG_EXT4_DEBUG 3744 ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu," 3745 " len %u; IO logical block %llu, len %u", 3746 inode->i_ino, (unsigned long long)ee_block, ee_len, 3747 (unsigned long long)map->m_lblk, map->m_len); 3748 #endif 3749 err = ext4_split_convert_extents(handle, inode, map, ppath, 3750 EXT4_GET_BLOCKS_CONVERT); 3751 if (err < 0) 3752 return err; 3753 path = ext4_find_extent(inode, map->m_lblk, ppath, 0); 3754 if (IS_ERR(path)) 3755 return PTR_ERR(path); 3756 depth = ext_depth(inode); 3757 ex = path[depth].p_ext; 3758 } 3759 3760 err = ext4_ext_get_access(handle, inode, path + depth); 3761 if (err) 3762 goto out; 3763 /* first mark the extent as initialized */ 3764 ext4_ext_mark_initialized(ex); 3765 3766 /* note: ext4_ext_correct_indexes() isn't needed here because 3767 * borders are not changed 3768 */ 3769 ext4_ext_try_to_merge(handle, inode, path, ex); 3770 3771 /* Mark modified extent as dirty */ 3772 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3773 out: 3774 ext4_ext_show_leaf(inode, path); 3775 return err; 3776 } 3777 3778 static int 3779 convert_initialized_extent(handle_t *handle, struct inode *inode, 3780 struct ext4_map_blocks *map, 3781 struct ext4_ext_path **ppath, 3782 unsigned int *allocated) 3783 { 3784 struct ext4_ext_path *path = *ppath; 3785 struct ext4_extent *ex; 3786 ext4_lblk_t ee_block; 3787 unsigned int ee_len; 3788 int depth; 3789 int err = 0; 3790 3791 /* 3792 * Make sure that the extent is no bigger than we support with 3793 * unwritten extent 3794 */ 3795 if (map->m_len > EXT_UNWRITTEN_MAX_LEN) 3796 map->m_len = EXT_UNWRITTEN_MAX_LEN / 2; 3797 3798 depth = ext_depth(inode); 3799 ex = path[depth].p_ext; 3800 ee_block = le32_to_cpu(ex->ee_block); 3801 ee_len = ext4_ext_get_actual_len(ex); 3802 3803 ext_debug(inode, "logical block %llu, max_blocks %u\n", 3804 (unsigned long long)ee_block, ee_len); 3805 3806 if (ee_block != map->m_lblk || ee_len > map->m_len) { 3807 err = ext4_split_convert_extents(handle, inode, map, ppath, 3808 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN); 3809 if (err < 0) 3810 return err; 3811 path = ext4_find_extent(inode, map->m_lblk, ppath, 0); 3812 if (IS_ERR(path)) 3813 return PTR_ERR(path); 3814 depth = ext_depth(inode); 3815 ex = path[depth].p_ext; 3816 if (!ex) { 3817 EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 3818 (unsigned long) map->m_lblk); 3819 return -EFSCORRUPTED; 3820 } 3821 } 3822 3823 err = ext4_ext_get_access(handle, inode, path + depth); 3824 if (err) 3825 return err; 3826 /* first mark the extent as unwritten */ 3827 ext4_ext_mark_unwritten(ex); 3828 3829 /* note: ext4_ext_correct_indexes() isn't needed here because 3830 * borders are not changed 3831 */ 3832 ext4_ext_try_to_merge(handle, inode, path, ex); 3833 3834 /* Mark modified extent as dirty */ 3835 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3836 if (err) 3837 return err; 3838 ext4_ext_show_leaf(inode, path); 3839 3840 ext4_update_inode_fsync_trans(handle, inode, 1); 3841 3842 map->m_flags |= EXT4_MAP_UNWRITTEN; 3843 if (*allocated > map->m_len) 3844 *allocated = map->m_len; 3845 map->m_len = *allocated; 3846 return 0; 3847 } 3848 3849 static int 3850 ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, 3851 struct ext4_map_blocks *map, 3852 struct ext4_ext_path **ppath, int flags, 3853 unsigned int allocated, ext4_fsblk_t newblock) 3854 { 3855 struct ext4_ext_path __maybe_unused *path = *ppath; 3856 int ret = 0; 3857 int err = 0; 3858 3859 ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n", 3860 (unsigned long long)map->m_lblk, map->m_len, flags, 3861 allocated); 3862 ext4_ext_show_leaf(inode, path); 3863 3864 /* 3865 * When writing into unwritten space, we should not fail to 3866 * allocate metadata blocks for the new extent block if needed. 3867 */ 3868 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL; 3869 3870 trace_ext4_ext_handle_unwritten_extents(inode, map, flags, 3871 allocated, newblock); 3872 3873 /* get_block() before submitting IO, split the extent */ 3874 if (flags & EXT4_GET_BLOCKS_PRE_IO) { 3875 ret = ext4_split_convert_extents(handle, inode, map, ppath, 3876 flags | EXT4_GET_BLOCKS_CONVERT); 3877 if (ret < 0) { 3878 err = ret; 3879 goto out2; 3880 } 3881 /* 3882 * shouldn't get a 0 return when splitting an extent unless 3883 * m_len is 0 (bug) or extent has been corrupted 3884 */ 3885 if (unlikely(ret == 0)) { 3886 EXT4_ERROR_INODE(inode, 3887 "unexpected ret == 0, m_len = %u", 3888 map->m_len); 3889 err = -EFSCORRUPTED; 3890 goto out2; 3891 } 3892 map->m_flags |= EXT4_MAP_UNWRITTEN; 3893 goto out; 3894 } 3895 /* IO end_io complete, convert the filled extent to written */ 3896 if (flags & EXT4_GET_BLOCKS_CONVERT) { 3897 err = ext4_convert_unwritten_extents_endio(handle, inode, map, 3898 ppath); 3899 if (err < 0) 3900 goto out2; 3901 ext4_update_inode_fsync_trans(handle, inode, 1); 3902 goto map_out; 3903 } 3904 /* buffered IO cases */ 3905 /* 3906 * repeat fallocate creation request 3907 * we already have an unwritten extent 3908 */ 3909 if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) { 3910 map->m_flags |= EXT4_MAP_UNWRITTEN; 3911 goto map_out; 3912 } 3913 3914 /* buffered READ or buffered write_begin() lookup */ 3915 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 3916 /* 3917 * We have blocks reserved already. We 3918 * return allocated blocks so that delalloc 3919 * won't do block reservation for us. But 3920 * the buffer head will be unmapped so that 3921 * a read from the block returns 0s. 3922 */ 3923 map->m_flags |= EXT4_MAP_UNWRITTEN; 3924 goto out1; 3925 } 3926 3927 /* 3928 * Default case when (flags & EXT4_GET_BLOCKS_CREATE) == 1. 3929 * For buffered writes, at writepage time, etc. Convert a 3930 * discovered unwritten extent to written. 3931 */ 3932 ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags); 3933 if (ret < 0) { 3934 err = ret; 3935 goto out2; 3936 } 3937 ext4_update_inode_fsync_trans(handle, inode, 1); 3938 /* 3939 * shouldn't get a 0 return when converting an unwritten extent 3940 * unless m_len is 0 (bug) or extent has been corrupted 3941 */ 3942 if (unlikely(ret == 0)) { 3943 EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u", 3944 map->m_len); 3945 err = -EFSCORRUPTED; 3946 goto out2; 3947 } 3948 3949 out: 3950 allocated = ret; 3951 map->m_flags |= EXT4_MAP_NEW; 3952 map_out: 3953 map->m_flags |= EXT4_MAP_MAPPED; 3954 out1: 3955 map->m_pblk = newblock; 3956 if (allocated > map->m_len) 3957 allocated = map->m_len; 3958 map->m_len = allocated; 3959 ext4_ext_show_leaf(inode, path); 3960 out2: 3961 return err ? err : allocated; 3962 } 3963 3964 /* 3965 * get_implied_cluster_alloc - check to see if the requested 3966 * allocation (in the map structure) overlaps with a cluster already 3967 * allocated in an extent. 3968 * @sb The filesystem superblock structure 3969 * @map The requested lblk->pblk mapping 3970 * @ex The extent structure which might contain an implied 3971 * cluster allocation 3972 * 3973 * This function is called by ext4_ext_map_blocks() after we failed to 3974 * find blocks that were already in the inode's extent tree. Hence, 3975 * we know that the beginning of the requested region cannot overlap 3976 * the extent from the inode's extent tree. There are three cases we 3977 * want to catch. The first is this case: 3978 * 3979 * |--- cluster # N--| 3980 * |--- extent ---| |---- requested region ---| 3981 * |==========| 3982 * 3983 * The second case that we need to test for is this one: 3984 * 3985 * |--------- cluster # N ----------------| 3986 * |--- requested region --| |------- extent ----| 3987 * |=======================| 3988 * 3989 * The third case is when the requested region lies between two extents 3990 * within the same cluster: 3991 * |------------- cluster # N-------------| 3992 * |----- ex -----| |---- ex_right ----| 3993 * |------ requested region ------| 3994 * |================| 3995 * 3996 * In each of the above cases, we need to set the map->m_pblk and 3997 * map->m_len so it corresponds to the return the extent labelled as 3998 * "|====|" from cluster #N, since it is already in use for data in 3999 * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to 4000 * signal to ext4_ext_map_blocks() that map->m_pblk should be treated 4001 * as a new "allocated" block region. Otherwise, we will return 0 and 4002 * ext4_ext_map_blocks() will then allocate one or more new clusters 4003 * by calling ext4_mb_new_blocks(). 4004 */ 4005 static int get_implied_cluster_alloc(struct super_block *sb, 4006 struct ext4_map_blocks *map, 4007 struct ext4_extent *ex, 4008 struct ext4_ext_path *path) 4009 { 4010 struct ext4_sb_info *sbi = EXT4_SB(sb); 4011 ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 4012 ext4_lblk_t ex_cluster_start, ex_cluster_end; 4013 ext4_lblk_t rr_cluster_start; 4014 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 4015 ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 4016 unsigned short ee_len = ext4_ext_get_actual_len(ex); 4017 4018 /* The extent passed in that we are trying to match */ 4019 ex_cluster_start = EXT4_B2C(sbi, ee_block); 4020 ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1); 4021 4022 /* The requested region passed into ext4_map_blocks() */ 4023 rr_cluster_start = EXT4_B2C(sbi, map->m_lblk); 4024 4025 if ((rr_cluster_start == ex_cluster_end) || 4026 (rr_cluster_start == ex_cluster_start)) { 4027 if (rr_cluster_start == ex_cluster_end) 4028 ee_start += ee_len - 1; 4029 map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset; 4030 map->m_len = min(map->m_len, 4031 (unsigned) sbi->s_cluster_ratio - c_offset); 4032 /* 4033 * Check for and handle this case: 4034 * 4035 * |--------- cluster # N-------------| 4036 * |------- extent ----| 4037 * |--- requested region ---| 4038 * |===========| 4039 */ 4040 4041 if (map->m_lblk < ee_block) 4042 map->m_len = min(map->m_len, ee_block - map->m_lblk); 4043 4044 /* 4045 * Check for the case where there is already another allocated 4046 * block to the right of 'ex' but before the end of the cluster. 4047 * 4048 * |------------- cluster # N-------------| 4049 * |----- ex -----| |---- ex_right ----| 4050 * |------ requested region ------| 4051 * |================| 4052 */ 4053 if (map->m_lblk > ee_block) { 4054 ext4_lblk_t next = ext4_ext_next_allocated_block(path); 4055 map->m_len = min(map->m_len, next - map->m_lblk); 4056 } 4057 4058 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1); 4059 return 1; 4060 } 4061 4062 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0); 4063 return 0; 4064 } 4065 4066 4067 /* 4068 * Block allocation/map/preallocation routine for extents based files 4069 * 4070 * 4071 * Need to be called with 4072 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 4073 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 4074 * 4075 * return > 0, number of blocks already mapped/allocated 4076 * if create == 0 and these are pre-allocated blocks 4077 * buffer head is unmapped 4078 * otherwise blocks are mapped 4079 * 4080 * return = 0, if plain look up failed (blocks have not been allocated) 4081 * buffer head is unmapped 4082 * 4083 * return < 0, error case. 4084 */ 4085 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, 4086 struct ext4_map_blocks *map, int flags) 4087 { 4088 struct ext4_ext_path *path = NULL; 4089 struct ext4_extent newex, *ex, ex2; 4090 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4091 ext4_fsblk_t newblock = 0, pblk; 4092 int err = 0, depth, ret; 4093 unsigned int allocated = 0, offset = 0; 4094 unsigned int allocated_clusters = 0; 4095 struct ext4_allocation_request ar; 4096 ext4_lblk_t cluster_offset; 4097 4098 ext_debug(inode, "blocks %u/%u requested\n", map->m_lblk, map->m_len); 4099 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 4100 4101 /* find extent for this block */ 4102 path = ext4_find_extent(inode, map->m_lblk, NULL, 0); 4103 if (IS_ERR(path)) { 4104 err = PTR_ERR(path); 4105 path = NULL; 4106 goto out; 4107 } 4108 4109 depth = ext_depth(inode); 4110 4111 /* 4112 * consistent leaf must not be empty; 4113 * this situation is possible, though, _during_ tree modification; 4114 * this is why assert can't be put in ext4_find_extent() 4115 */ 4116 if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 4117 EXT4_ERROR_INODE(inode, "bad extent address " 4118 "lblock: %lu, depth: %d pblock %lld", 4119 (unsigned long) map->m_lblk, depth, 4120 path[depth].p_block); 4121 err = -EFSCORRUPTED; 4122 goto out; 4123 } 4124 4125 ex = path[depth].p_ext; 4126 if (ex) { 4127 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 4128 ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 4129 unsigned short ee_len; 4130 4131 4132 /* 4133 * unwritten extents are treated as holes, except that 4134 * we split out initialized portions during a write. 4135 */ 4136 ee_len = ext4_ext_get_actual_len(ex); 4137 4138 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); 4139 4140 /* if found extent covers block, simply return it */ 4141 if (in_range(map->m_lblk, ee_block, ee_len)) { 4142 newblock = map->m_lblk - ee_block + ee_start; 4143 /* number of remaining blocks in the extent */ 4144 allocated = ee_len - (map->m_lblk - ee_block); 4145 ext_debug(inode, "%u fit into %u:%d -> %llu\n", 4146 map->m_lblk, ee_block, ee_len, newblock); 4147 4148 /* 4149 * If the extent is initialized check whether the 4150 * caller wants to convert it to unwritten. 4151 */ 4152 if ((!ext4_ext_is_unwritten(ex)) && 4153 (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) { 4154 err = convert_initialized_extent(handle, 4155 inode, map, &path, &allocated); 4156 goto out; 4157 } else if (!ext4_ext_is_unwritten(ex)) { 4158 map->m_flags |= EXT4_MAP_MAPPED; 4159 map->m_pblk = newblock; 4160 if (allocated > map->m_len) 4161 allocated = map->m_len; 4162 map->m_len = allocated; 4163 ext4_ext_show_leaf(inode, path); 4164 goto out; 4165 } 4166 4167 ret = ext4_ext_handle_unwritten_extents( 4168 handle, inode, map, &path, flags, 4169 allocated, newblock); 4170 if (ret < 0) 4171 err = ret; 4172 else 4173 allocated = ret; 4174 goto out; 4175 } 4176 } 4177 4178 /* 4179 * requested block isn't allocated yet; 4180 * we couldn't try to create block if create flag is zero 4181 */ 4182 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 4183 ext4_lblk_t hole_start, hole_len; 4184 4185 hole_start = map->m_lblk; 4186 hole_len = ext4_ext_determine_hole(inode, path, &hole_start); 4187 /* 4188 * put just found gap into cache to speed up 4189 * subsequent requests 4190 */ 4191 ext4_ext_put_gap_in_cache(inode, hole_start, hole_len); 4192 4193 /* Update hole_len to reflect hole size after map->m_lblk */ 4194 if (hole_start != map->m_lblk) 4195 hole_len -= map->m_lblk - hole_start; 4196 map->m_pblk = 0; 4197 map->m_len = min_t(unsigned int, map->m_len, hole_len); 4198 4199 goto out; 4200 } 4201 4202 /* 4203 * Okay, we need to do block allocation. 4204 */ 4205 newex.ee_block = cpu_to_le32(map->m_lblk); 4206 cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 4207 4208 /* 4209 * If we are doing bigalloc, check to see if the extent returned 4210 * by ext4_find_extent() implies a cluster we can use. 4211 */ 4212 if (cluster_offset && ex && 4213 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { 4214 ar.len = allocated = map->m_len; 4215 newblock = map->m_pblk; 4216 goto got_allocated_blocks; 4217 } 4218 4219 /* find neighbour allocated blocks */ 4220 ar.lleft = map->m_lblk; 4221 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); 4222 if (err) 4223 goto out; 4224 ar.lright = map->m_lblk; 4225 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); 4226 if (err < 0) 4227 goto out; 4228 4229 /* Check if the extent after searching to the right implies a 4230 * cluster we can use. */ 4231 if ((sbi->s_cluster_ratio > 1) && err && 4232 get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) { 4233 ar.len = allocated = map->m_len; 4234 newblock = map->m_pblk; 4235 goto got_allocated_blocks; 4236 } 4237 4238 /* 4239 * See if request is beyond maximum number of blocks we can have in 4240 * a single extent. For an initialized extent this limit is 4241 * EXT_INIT_MAX_LEN and for an unwritten extent this limit is 4242 * EXT_UNWRITTEN_MAX_LEN. 4243 */ 4244 if (map->m_len > EXT_INIT_MAX_LEN && 4245 !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) 4246 map->m_len = EXT_INIT_MAX_LEN; 4247 else if (map->m_len > EXT_UNWRITTEN_MAX_LEN && 4248 (flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) 4249 map->m_len = EXT_UNWRITTEN_MAX_LEN; 4250 4251 /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ 4252 newex.ee_len = cpu_to_le16(map->m_len); 4253 err = ext4_ext_check_overlap(sbi, inode, &newex, path); 4254 if (err) 4255 allocated = ext4_ext_get_actual_len(&newex); 4256 else 4257 allocated = map->m_len; 4258 4259 /* allocate new block */ 4260 ar.inode = inode; 4261 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); 4262 ar.logical = map->m_lblk; 4263 /* 4264 * We calculate the offset from the beginning of the cluster 4265 * for the logical block number, since when we allocate a 4266 * physical cluster, the physical block should start at the 4267 * same offset from the beginning of the cluster. This is 4268 * needed so that future calls to get_implied_cluster_alloc() 4269 * work correctly. 4270 */ 4271 offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 4272 ar.len = EXT4_NUM_B2C(sbi, offset+allocated); 4273 ar.goal -= offset; 4274 ar.logical -= offset; 4275 if (S_ISREG(inode->i_mode)) 4276 ar.flags = EXT4_MB_HINT_DATA; 4277 else 4278 /* disable in-core preallocation for non-regular files */ 4279 ar.flags = 0; 4280 if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) 4281 ar.flags |= EXT4_MB_HINT_NOPREALLOC; 4282 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 4283 ar.flags |= EXT4_MB_DELALLOC_RESERVED; 4284 if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) 4285 ar.flags |= EXT4_MB_USE_RESERVED; 4286 newblock = ext4_mb_new_blocks(handle, &ar, &err); 4287 if (!newblock) 4288 goto out; 4289 allocated_clusters = ar.len; 4290 ar.len = EXT4_C2B(sbi, ar.len) - offset; 4291 ext_debug(inode, "allocate new block: goal %llu, found %llu/%u, requested %u\n", 4292 ar.goal, newblock, ar.len, allocated); 4293 if (ar.len > allocated) 4294 ar.len = allocated; 4295 4296 got_allocated_blocks: 4297 /* try to insert new extent into found leaf and return */ 4298 pblk = newblock + offset; 4299 ext4_ext_store_pblock(&newex, pblk); 4300 newex.ee_len = cpu_to_le16(ar.len); 4301 /* Mark unwritten */ 4302 if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) { 4303 ext4_ext_mark_unwritten(&newex); 4304 map->m_flags |= EXT4_MAP_UNWRITTEN; 4305 } 4306 4307 err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags); 4308 if (err) { 4309 if (allocated_clusters) { 4310 int fb_flags = 0; 4311 4312 /* 4313 * free data blocks we just allocated. 4314 * not a good idea to call discard here directly, 4315 * but otherwise we'd need to call it every free(). 4316 */ 4317 ext4_discard_preallocations(inode, 0); 4318 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 4319 fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE; 4320 ext4_free_blocks(handle, inode, NULL, newblock, 4321 EXT4_C2B(sbi, allocated_clusters), 4322 fb_flags); 4323 } 4324 goto out; 4325 } 4326 4327 /* 4328 * Reduce the reserved cluster count to reflect successful deferred 4329 * allocation of delayed allocated clusters or direct allocation of 4330 * clusters discovered to be delayed allocated. Once allocated, a 4331 * cluster is not included in the reserved count. 4332 */ 4333 if (test_opt(inode->i_sb, DELALLOC) && allocated_clusters) { 4334 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 4335 /* 4336 * When allocating delayed allocated clusters, simply 4337 * reduce the reserved cluster count and claim quota 4338 */ 4339 ext4_da_update_reserve_space(inode, allocated_clusters, 4340 1); 4341 } else { 4342 ext4_lblk_t lblk, len; 4343 unsigned int n; 4344 4345 /* 4346 * When allocating non-delayed allocated clusters 4347 * (from fallocate, filemap, DIO, or clusters 4348 * allocated when delalloc has been disabled by 4349 * ext4_nonda_switch), reduce the reserved cluster 4350 * count by the number of allocated clusters that 4351 * have previously been delayed allocated. Quota 4352 * has been claimed by ext4_mb_new_blocks() above, 4353 * so release the quota reservations made for any 4354 * previously delayed allocated clusters. 4355 */ 4356 lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk); 4357 len = allocated_clusters << sbi->s_cluster_bits; 4358 n = ext4_es_delayed_clu(inode, lblk, len); 4359 if (n > 0) 4360 ext4_da_update_reserve_space(inode, (int) n, 0); 4361 } 4362 } 4363 4364 /* 4365 * Cache the extent and update transaction to commit on fdatasync only 4366 * when it is _not_ an unwritten extent. 4367 */ 4368 if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0) 4369 ext4_update_inode_fsync_trans(handle, inode, 1); 4370 else 4371 ext4_update_inode_fsync_trans(handle, inode, 0); 4372 4373 map->m_flags |= (EXT4_MAP_NEW | EXT4_MAP_MAPPED); 4374 map->m_pblk = pblk; 4375 map->m_len = ar.len; 4376 allocated = map->m_len; 4377 ext4_ext_show_leaf(inode, path); 4378 out: 4379 ext4_free_ext_path(path); 4380 4381 trace_ext4_ext_map_blocks_exit(inode, flags, map, 4382 err ? err : allocated); 4383 return err ? err : allocated; 4384 } 4385 4386 int ext4_ext_truncate(handle_t *handle, struct inode *inode) 4387 { 4388 struct super_block *sb = inode->i_sb; 4389 ext4_lblk_t last_block; 4390 int err = 0; 4391 4392 /* 4393 * TODO: optimization is possible here. 4394 * Probably we need not scan at all, 4395 * because page truncation is enough. 4396 */ 4397 4398 /* we have to know where to truncate from in crash case */ 4399 EXT4_I(inode)->i_disksize = inode->i_size; 4400 err = ext4_mark_inode_dirty(handle, inode); 4401 if (err) 4402 return err; 4403 4404 last_block = (inode->i_size + sb->s_blocksize - 1) 4405 >> EXT4_BLOCK_SIZE_BITS(sb); 4406 retry: 4407 err = ext4_es_remove_extent(inode, last_block, 4408 EXT_MAX_BLOCKS - last_block); 4409 if (err == -ENOMEM) { 4410 memalloc_retry_wait(GFP_ATOMIC); 4411 goto retry; 4412 } 4413 if (err) 4414 return err; 4415 retry_remove_space: 4416 err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); 4417 if (err == -ENOMEM) { 4418 memalloc_retry_wait(GFP_ATOMIC); 4419 goto retry_remove_space; 4420 } 4421 return err; 4422 } 4423 4424 static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, 4425 ext4_lblk_t len, loff_t new_size, 4426 int flags) 4427 { 4428 struct inode *inode = file_inode(file); 4429 handle_t *handle; 4430 int ret = 0, ret2 = 0, ret3 = 0; 4431 int retries = 0; 4432 int depth = 0; 4433 struct ext4_map_blocks map; 4434 unsigned int credits; 4435 loff_t epos; 4436 4437 BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)); 4438 map.m_lblk = offset; 4439 map.m_len = len; 4440 /* 4441 * Don't normalize the request if it can fit in one extent so 4442 * that it doesn't get unnecessarily split into multiple 4443 * extents. 4444 */ 4445 if (len <= EXT_UNWRITTEN_MAX_LEN) 4446 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; 4447 4448 /* 4449 * credits to insert 1 extent into extent tree 4450 */ 4451 credits = ext4_chunk_trans_blocks(inode, len); 4452 depth = ext_depth(inode); 4453 4454 retry: 4455 while (len) { 4456 /* 4457 * Recalculate credits when extent tree depth changes. 4458 */ 4459 if (depth != ext_depth(inode)) { 4460 credits = ext4_chunk_trans_blocks(inode, len); 4461 depth = ext_depth(inode); 4462 } 4463 4464 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 4465 credits); 4466 if (IS_ERR(handle)) { 4467 ret = PTR_ERR(handle); 4468 break; 4469 } 4470 ret = ext4_map_blocks(handle, inode, &map, flags); 4471 if (ret <= 0) { 4472 ext4_debug("inode #%lu: block %u: len %u: " 4473 "ext4_ext_map_blocks returned %d", 4474 inode->i_ino, map.m_lblk, 4475 map.m_len, ret); 4476 ext4_mark_inode_dirty(handle, inode); 4477 ext4_journal_stop(handle); 4478 break; 4479 } 4480 /* 4481 * allow a full retry cycle for any remaining allocations 4482 */ 4483 retries = 0; 4484 map.m_lblk += ret; 4485 map.m_len = len = len - ret; 4486 epos = (loff_t)map.m_lblk << inode->i_blkbits; 4487 inode->i_ctime = current_time(inode); 4488 if (new_size) { 4489 if (epos > new_size) 4490 epos = new_size; 4491 if (ext4_update_inode_size(inode, epos) & 0x1) 4492 inode->i_mtime = inode->i_ctime; 4493 } 4494 ret2 = ext4_mark_inode_dirty(handle, inode); 4495 ext4_update_inode_fsync_trans(handle, inode, 1); 4496 ret3 = ext4_journal_stop(handle); 4497 ret2 = ret3 ? ret3 : ret2; 4498 if (unlikely(ret2)) 4499 break; 4500 } 4501 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 4502 goto retry; 4503 4504 return ret > 0 ? ret2 : ret; 4505 } 4506 4507 static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len); 4508 4509 static int ext4_insert_range(struct file *file, loff_t offset, loff_t len); 4510 4511 static long ext4_zero_range(struct file *file, loff_t offset, 4512 loff_t len, int mode) 4513 { 4514 struct inode *inode = file_inode(file); 4515 struct address_space *mapping = file->f_mapping; 4516 handle_t *handle = NULL; 4517 unsigned int max_blocks; 4518 loff_t new_size = 0; 4519 int ret = 0; 4520 int flags; 4521 int credits; 4522 int partial_begin, partial_end; 4523 loff_t start, end; 4524 ext4_lblk_t lblk; 4525 unsigned int blkbits = inode->i_blkbits; 4526 4527 trace_ext4_zero_range(inode, offset, len, mode); 4528 4529 /* Call ext4_force_commit to flush all data in case of data=journal. */ 4530 if (ext4_should_journal_data(inode)) { 4531 ret = ext4_force_commit(inode->i_sb); 4532 if (ret) 4533 return ret; 4534 } 4535 4536 /* 4537 * Round up offset. This is not fallocate, we need to zero out 4538 * blocks, so convert interior block aligned part of the range to 4539 * unwritten and possibly manually zero out unaligned parts of the 4540 * range. 4541 */ 4542 start = round_up(offset, 1 << blkbits); 4543 end = round_down((offset + len), 1 << blkbits); 4544 4545 if (start < offset || end > offset + len) 4546 return -EINVAL; 4547 partial_begin = offset & ((1 << blkbits) - 1); 4548 partial_end = (offset + len) & ((1 << blkbits) - 1); 4549 4550 lblk = start >> blkbits; 4551 max_blocks = (end >> blkbits); 4552 if (max_blocks < lblk) 4553 max_blocks = 0; 4554 else 4555 max_blocks -= lblk; 4556 4557 inode_lock(inode); 4558 4559 /* 4560 * Indirect files do not support unwritten extents 4561 */ 4562 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4563 ret = -EOPNOTSUPP; 4564 goto out_mutex; 4565 } 4566 4567 if (!(mode & FALLOC_FL_KEEP_SIZE) && 4568 (offset + len > inode->i_size || 4569 offset + len > EXT4_I(inode)->i_disksize)) { 4570 new_size = offset + len; 4571 ret = inode_newsize_ok(inode, new_size); 4572 if (ret) 4573 goto out_mutex; 4574 } 4575 4576 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; 4577 4578 /* Wait all existing dio workers, newcomers will block on i_rwsem */ 4579 inode_dio_wait(inode); 4580 4581 ret = file_modified(file); 4582 if (ret) 4583 goto out_mutex; 4584 4585 /* Preallocate the range including the unaligned edges */ 4586 if (partial_begin || partial_end) { 4587 ret = ext4_alloc_file_blocks(file, 4588 round_down(offset, 1 << blkbits) >> blkbits, 4589 (round_up((offset + len), 1 << blkbits) - 4590 round_down(offset, 1 << blkbits)) >> blkbits, 4591 new_size, flags); 4592 if (ret) 4593 goto out_mutex; 4594 4595 } 4596 4597 /* Zero range excluding the unaligned edges */ 4598 if (max_blocks > 0) { 4599 flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | 4600 EXT4_EX_NOCACHE); 4601 4602 /* 4603 * Prevent page faults from reinstantiating pages we have 4604 * released from page cache. 4605 */ 4606 filemap_invalidate_lock(mapping); 4607 4608 ret = ext4_break_layouts(inode); 4609 if (ret) { 4610 filemap_invalidate_unlock(mapping); 4611 goto out_mutex; 4612 } 4613 4614 ret = ext4_update_disksize_before_punch(inode, offset, len); 4615 if (ret) { 4616 filemap_invalidate_unlock(mapping); 4617 goto out_mutex; 4618 } 4619 /* Now release the pages and zero block aligned part of pages */ 4620 truncate_pagecache_range(inode, start, end - 1); 4621 inode->i_mtime = inode->i_ctime = current_time(inode); 4622 4623 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, 4624 flags); 4625 filemap_invalidate_unlock(mapping); 4626 if (ret) 4627 goto out_mutex; 4628 } 4629 if (!partial_begin && !partial_end) 4630 goto out_mutex; 4631 4632 /* 4633 * In worst case we have to writeout two nonadjacent unwritten 4634 * blocks and update the inode 4635 */ 4636 credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1; 4637 if (ext4_should_journal_data(inode)) 4638 credits += 2; 4639 handle = ext4_journal_start(inode, EXT4_HT_MISC, credits); 4640 if (IS_ERR(handle)) { 4641 ret = PTR_ERR(handle); 4642 ext4_std_error(inode->i_sb, ret); 4643 goto out_mutex; 4644 } 4645 4646 inode->i_mtime = inode->i_ctime = current_time(inode); 4647 if (new_size) 4648 ext4_update_inode_size(inode, new_size); 4649 ret = ext4_mark_inode_dirty(handle, inode); 4650 if (unlikely(ret)) 4651 goto out_handle; 4652 /* Zero out partial block at the edges of the range */ 4653 ret = ext4_zero_partial_blocks(handle, inode, offset, len); 4654 if (ret >= 0) 4655 ext4_update_inode_fsync_trans(handle, inode, 1); 4656 4657 if (file->f_flags & O_SYNC) 4658 ext4_handle_sync(handle); 4659 4660 out_handle: 4661 ext4_journal_stop(handle); 4662 out_mutex: 4663 inode_unlock(inode); 4664 return ret; 4665 } 4666 4667 /* 4668 * preallocate space for a file. This implements ext4's fallocate file 4669 * operation, which gets called from sys_fallocate system call. 4670 * For block-mapped files, posix_fallocate should fall back to the method 4671 * of writing zeroes to the required new blocks (the same behavior which is 4672 * expected for file systems which do not support fallocate() system call). 4673 */ 4674 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) 4675 { 4676 struct inode *inode = file_inode(file); 4677 loff_t new_size = 0; 4678 unsigned int max_blocks; 4679 int ret = 0; 4680 int flags; 4681 ext4_lblk_t lblk; 4682 unsigned int blkbits = inode->i_blkbits; 4683 4684 /* 4685 * Encrypted inodes can't handle collapse range or insert 4686 * range since we would need to re-encrypt blocks with a 4687 * different IV or XTS tweak (which are based on the logical 4688 * block number). 4689 */ 4690 if (IS_ENCRYPTED(inode) && 4691 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) 4692 return -EOPNOTSUPP; 4693 4694 /* Return error if mode is not supported */ 4695 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 4696 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | 4697 FALLOC_FL_INSERT_RANGE)) 4698 return -EOPNOTSUPP; 4699 4700 inode_lock(inode); 4701 ret = ext4_convert_inline_data(inode); 4702 inode_unlock(inode); 4703 if (ret) 4704 goto exit; 4705 4706 if (mode & FALLOC_FL_PUNCH_HOLE) { 4707 ret = ext4_punch_hole(file, offset, len); 4708 goto exit; 4709 } 4710 4711 if (mode & FALLOC_FL_COLLAPSE_RANGE) { 4712 ret = ext4_collapse_range(file, offset, len); 4713 goto exit; 4714 } 4715 4716 if (mode & FALLOC_FL_INSERT_RANGE) { 4717 ret = ext4_insert_range(file, offset, len); 4718 goto exit; 4719 } 4720 4721 if (mode & FALLOC_FL_ZERO_RANGE) { 4722 ret = ext4_zero_range(file, offset, len, mode); 4723 goto exit; 4724 } 4725 trace_ext4_fallocate_enter(inode, offset, len, mode); 4726 lblk = offset >> blkbits; 4727 4728 max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits); 4729 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; 4730 4731 inode_lock(inode); 4732 4733 /* 4734 * We only support preallocation for extent-based files only 4735 */ 4736 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4737 ret = -EOPNOTSUPP; 4738 goto out; 4739 } 4740 4741 if (!(mode & FALLOC_FL_KEEP_SIZE) && 4742 (offset + len > inode->i_size || 4743 offset + len > EXT4_I(inode)->i_disksize)) { 4744 new_size = offset + len; 4745 ret = inode_newsize_ok(inode, new_size); 4746 if (ret) 4747 goto out; 4748 } 4749 4750 /* Wait all existing dio workers, newcomers will block on i_rwsem */ 4751 inode_dio_wait(inode); 4752 4753 ret = file_modified(file); 4754 if (ret) 4755 goto out; 4756 4757 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags); 4758 if (ret) 4759 goto out; 4760 4761 if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) { 4762 ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal, 4763 EXT4_I(inode)->i_sync_tid); 4764 } 4765 out: 4766 inode_unlock(inode); 4767 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); 4768 exit: 4769 return ret; 4770 } 4771 4772 /* 4773 * This function convert a range of blocks to written extents 4774 * The caller of this function will pass the start offset and the size. 4775 * all unwritten extents within this range will be converted to 4776 * written extents. 4777 * 4778 * This function is called from the direct IO end io call back 4779 * function, to convert the fallocated extents after IO is completed. 4780 * Returns 0 on success. 4781 */ 4782 int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, 4783 loff_t offset, ssize_t len) 4784 { 4785 unsigned int max_blocks; 4786 int ret = 0, ret2 = 0, ret3 = 0; 4787 struct ext4_map_blocks map; 4788 unsigned int blkbits = inode->i_blkbits; 4789 unsigned int credits = 0; 4790 4791 map.m_lblk = offset >> blkbits; 4792 max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits); 4793 4794 if (!handle) { 4795 /* 4796 * credits to insert 1 extent into extent tree 4797 */ 4798 credits = ext4_chunk_trans_blocks(inode, max_blocks); 4799 } 4800 while (ret >= 0 && ret < max_blocks) { 4801 map.m_lblk += ret; 4802 map.m_len = (max_blocks -= ret); 4803 if (credits) { 4804 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 4805 credits); 4806 if (IS_ERR(handle)) { 4807 ret = PTR_ERR(handle); 4808 break; 4809 } 4810 } 4811 ret = ext4_map_blocks(handle, inode, &map, 4812 EXT4_GET_BLOCKS_IO_CONVERT_EXT); 4813 if (ret <= 0) 4814 ext4_warning(inode->i_sb, 4815 "inode #%lu: block %u: len %u: " 4816 "ext4_ext_map_blocks returned %d", 4817 inode->i_ino, map.m_lblk, 4818 map.m_len, ret); 4819 ret2 = ext4_mark_inode_dirty(handle, inode); 4820 if (credits) { 4821 ret3 = ext4_journal_stop(handle); 4822 if (unlikely(ret3)) 4823 ret2 = ret3; 4824 } 4825 4826 if (ret <= 0 || ret2) 4827 break; 4828 } 4829 return ret > 0 ? ret2 : ret; 4830 } 4831 4832 int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end) 4833 { 4834 int ret = 0, err = 0; 4835 struct ext4_io_end_vec *io_end_vec; 4836 4837 /* 4838 * This is somewhat ugly but the idea is clear: When transaction is 4839 * reserved, everything goes into it. Otherwise we rather start several 4840 * smaller transactions for conversion of each extent separately. 4841 */ 4842 if (handle) { 4843 handle = ext4_journal_start_reserved(handle, 4844 EXT4_HT_EXT_CONVERT); 4845 if (IS_ERR(handle)) 4846 return PTR_ERR(handle); 4847 } 4848 4849 list_for_each_entry(io_end_vec, &io_end->list_vec, list) { 4850 ret = ext4_convert_unwritten_extents(handle, io_end->inode, 4851 io_end_vec->offset, 4852 io_end_vec->size); 4853 if (ret) 4854 break; 4855 } 4856 4857 if (handle) 4858 err = ext4_journal_stop(handle); 4859 4860 return ret < 0 ? ret : err; 4861 } 4862 4863 static int ext4_iomap_xattr_fiemap(struct inode *inode, struct iomap *iomap) 4864 { 4865 __u64 physical = 0; 4866 __u64 length = 0; 4867 int blockbits = inode->i_sb->s_blocksize_bits; 4868 int error = 0; 4869 u16 iomap_type; 4870 4871 /* in-inode? */ 4872 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { 4873 struct ext4_iloc iloc; 4874 int offset; /* offset of xattr in inode */ 4875 4876 error = ext4_get_inode_loc(inode, &iloc); 4877 if (error) 4878 return error; 4879 physical = (__u64)iloc.bh->b_blocknr << blockbits; 4880 offset = EXT4_GOOD_OLD_INODE_SIZE + 4881 EXT4_I(inode)->i_extra_isize; 4882 physical += offset; 4883 length = EXT4_SB(inode->i_sb)->s_inode_size - offset; 4884 brelse(iloc.bh); 4885 iomap_type = IOMAP_INLINE; 4886 } else if (EXT4_I(inode)->i_file_acl) { /* external block */ 4887 physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits; 4888 length = inode->i_sb->s_blocksize; 4889 iomap_type = IOMAP_MAPPED; 4890 } else { 4891 /* no in-inode or external block for xattr, so return -ENOENT */ 4892 error = -ENOENT; 4893 goto out; 4894 } 4895 4896 iomap->addr = physical; 4897 iomap->offset = 0; 4898 iomap->length = length; 4899 iomap->type = iomap_type; 4900 iomap->flags = 0; 4901 out: 4902 return error; 4903 } 4904 4905 static int ext4_iomap_xattr_begin(struct inode *inode, loff_t offset, 4906 loff_t length, unsigned flags, 4907 struct iomap *iomap, struct iomap *srcmap) 4908 { 4909 int error; 4910 4911 error = ext4_iomap_xattr_fiemap(inode, iomap); 4912 if (error == 0 && (offset >= iomap->length)) 4913 error = -ENOENT; 4914 return error; 4915 } 4916 4917 static const struct iomap_ops ext4_iomap_xattr_ops = { 4918 .iomap_begin = ext4_iomap_xattr_begin, 4919 }; 4920 4921 static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len) 4922 { 4923 u64 maxbytes; 4924 4925 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4926 maxbytes = inode->i_sb->s_maxbytes; 4927 else 4928 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes; 4929 4930 if (*len == 0) 4931 return -EINVAL; 4932 if (start > maxbytes) 4933 return -EFBIG; 4934 4935 /* 4936 * Shrink request scope to what the fs can actually handle. 4937 */ 4938 if (*len > maxbytes || (maxbytes - *len) < start) 4939 *len = maxbytes - start; 4940 return 0; 4941 } 4942 4943 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 4944 u64 start, u64 len) 4945 { 4946 int error = 0; 4947 4948 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { 4949 error = ext4_ext_precache(inode); 4950 if (error) 4951 return error; 4952 fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE; 4953 } 4954 4955 /* 4956 * For bitmap files the maximum size limit could be smaller than 4957 * s_maxbytes, so check len here manually instead of just relying on the 4958 * generic check. 4959 */ 4960 error = ext4_fiemap_check_ranges(inode, start, &len); 4961 if (error) 4962 return error; 4963 4964 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { 4965 fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR; 4966 return iomap_fiemap(inode, fieinfo, start, len, 4967 &ext4_iomap_xattr_ops); 4968 } 4969 4970 return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops); 4971 } 4972 4973 int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo, 4974 __u64 start, __u64 len) 4975 { 4976 ext4_lblk_t start_blk, len_blks; 4977 __u64 last_blk; 4978 int error = 0; 4979 4980 if (ext4_has_inline_data(inode)) { 4981 int has_inline; 4982 4983 down_read(&EXT4_I(inode)->xattr_sem); 4984 has_inline = ext4_has_inline_data(inode); 4985 up_read(&EXT4_I(inode)->xattr_sem); 4986 if (has_inline) 4987 return 0; 4988 } 4989 4990 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { 4991 error = ext4_ext_precache(inode); 4992 if (error) 4993 return error; 4994 fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE; 4995 } 4996 4997 error = fiemap_prep(inode, fieinfo, start, &len, 0); 4998 if (error) 4999 return error; 5000 5001 error = ext4_fiemap_check_ranges(inode, start, &len); 5002 if (error) 5003 return error; 5004 5005 start_blk = start >> inode->i_sb->s_blocksize_bits; 5006 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; 5007 if (last_blk >= EXT_MAX_BLOCKS) 5008 last_blk = EXT_MAX_BLOCKS-1; 5009 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; 5010 5011 /* 5012 * Walk the extent tree gathering extent information 5013 * and pushing extents back to the user. 5014 */ 5015 return ext4_fill_es_cache_info(inode, start_blk, len_blks, fieinfo); 5016 } 5017 5018 /* 5019 * ext4_ext_shift_path_extents: 5020 * Shift the extents of a path structure lying between path[depth].p_ext 5021 * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells 5022 * if it is right shift or left shift operation. 5023 */ 5024 static int 5025 ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift, 5026 struct inode *inode, handle_t *handle, 5027 enum SHIFT_DIRECTION SHIFT) 5028 { 5029 int depth, err = 0; 5030 struct ext4_extent *ex_start, *ex_last; 5031 bool update = false; 5032 int credits, restart_credits; 5033 depth = path->p_depth; 5034 5035 while (depth >= 0) { 5036 if (depth == path->p_depth) { 5037 ex_start = path[depth].p_ext; 5038 if (!ex_start) 5039 return -EFSCORRUPTED; 5040 5041 ex_last = EXT_LAST_EXTENT(path[depth].p_hdr); 5042 /* leaf + sb + inode */ 5043 credits = 3; 5044 if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) { 5045 update = true; 5046 /* extent tree + sb + inode */ 5047 credits = depth + 2; 5048 } 5049 5050 restart_credits = ext4_writepage_trans_blocks(inode); 5051 err = ext4_datasem_ensure_credits(handle, inode, credits, 5052 restart_credits, 0); 5053 if (err) { 5054 if (err > 0) 5055 err = -EAGAIN; 5056 goto out; 5057 } 5058 5059 err = ext4_ext_get_access(handle, inode, path + depth); 5060 if (err) 5061 goto out; 5062 5063 while (ex_start <= ex_last) { 5064 if (SHIFT == SHIFT_LEFT) { 5065 le32_add_cpu(&ex_start->ee_block, 5066 -shift); 5067 /* Try to merge to the left. */ 5068 if ((ex_start > 5069 EXT_FIRST_EXTENT(path[depth].p_hdr)) 5070 && 5071 ext4_ext_try_to_merge_right(inode, 5072 path, ex_start - 1)) 5073 ex_last--; 5074 else 5075 ex_start++; 5076 } else { 5077 le32_add_cpu(&ex_last->ee_block, shift); 5078 ext4_ext_try_to_merge_right(inode, path, 5079 ex_last); 5080 ex_last--; 5081 } 5082 } 5083 err = ext4_ext_dirty(handle, inode, path + depth); 5084 if (err) 5085 goto out; 5086 5087 if (--depth < 0 || !update) 5088 break; 5089 } 5090 5091 /* Update index too */ 5092 err = ext4_ext_get_access(handle, inode, path + depth); 5093 if (err) 5094 goto out; 5095 5096 if (SHIFT == SHIFT_LEFT) 5097 le32_add_cpu(&path[depth].p_idx->ei_block, -shift); 5098 else 5099 le32_add_cpu(&path[depth].p_idx->ei_block, shift); 5100 err = ext4_ext_dirty(handle, inode, path + depth); 5101 if (err) 5102 goto out; 5103 5104 /* we are done if current index is not a starting index */ 5105 if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr)) 5106 break; 5107 5108 depth--; 5109 } 5110 5111 out: 5112 return err; 5113 } 5114 5115 /* 5116 * ext4_ext_shift_extents: 5117 * All the extents which lies in the range from @start to the last allocated 5118 * block for the @inode are shifted either towards left or right (depending 5119 * upon @SHIFT) by @shift blocks. 5120 * On success, 0 is returned, error otherwise. 5121 */ 5122 static int 5123 ext4_ext_shift_extents(struct inode *inode, handle_t *handle, 5124 ext4_lblk_t start, ext4_lblk_t shift, 5125 enum SHIFT_DIRECTION SHIFT) 5126 { 5127 struct ext4_ext_path *path; 5128 int ret = 0, depth; 5129 struct ext4_extent *extent; 5130 ext4_lblk_t stop, *iterator, ex_start, ex_end; 5131 ext4_lblk_t tmp = EXT_MAX_BLOCKS; 5132 5133 /* Let path point to the last extent */ 5134 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 5135 EXT4_EX_NOCACHE); 5136 if (IS_ERR(path)) 5137 return PTR_ERR(path); 5138 5139 depth = path->p_depth; 5140 extent = path[depth].p_ext; 5141 if (!extent) 5142 goto out; 5143 5144 stop = le32_to_cpu(extent->ee_block); 5145 5146 /* 5147 * For left shifts, make sure the hole on the left is big enough to 5148 * accommodate the shift. For right shifts, make sure the last extent 5149 * won't be shifted beyond EXT_MAX_BLOCKS. 5150 */ 5151 if (SHIFT == SHIFT_LEFT) { 5152 path = ext4_find_extent(inode, start - 1, &path, 5153 EXT4_EX_NOCACHE); 5154 if (IS_ERR(path)) 5155 return PTR_ERR(path); 5156 depth = path->p_depth; 5157 extent = path[depth].p_ext; 5158 if (extent) { 5159 ex_start = le32_to_cpu(extent->ee_block); 5160 ex_end = le32_to_cpu(extent->ee_block) + 5161 ext4_ext_get_actual_len(extent); 5162 } else { 5163 ex_start = 0; 5164 ex_end = 0; 5165 } 5166 5167 if ((start == ex_start && shift > ex_start) || 5168 (shift > start - ex_end)) { 5169 ret = -EINVAL; 5170 goto out; 5171 } 5172 } else { 5173 if (shift > EXT_MAX_BLOCKS - 5174 (stop + ext4_ext_get_actual_len(extent))) { 5175 ret = -EINVAL; 5176 goto out; 5177 } 5178 } 5179 5180 /* 5181 * In case of left shift, iterator points to start and it is increased 5182 * till we reach stop. In case of right shift, iterator points to stop 5183 * and it is decreased till we reach start. 5184 */ 5185 again: 5186 ret = 0; 5187 if (SHIFT == SHIFT_LEFT) 5188 iterator = &start; 5189 else 5190 iterator = &stop; 5191 5192 if (tmp != EXT_MAX_BLOCKS) 5193 *iterator = tmp; 5194 5195 /* 5196 * Its safe to start updating extents. Start and stop are unsigned, so 5197 * in case of right shift if extent with 0 block is reached, iterator 5198 * becomes NULL to indicate the end of the loop. 5199 */ 5200 while (iterator && start <= stop) { 5201 path = ext4_find_extent(inode, *iterator, &path, 5202 EXT4_EX_NOCACHE); 5203 if (IS_ERR(path)) 5204 return PTR_ERR(path); 5205 depth = path->p_depth; 5206 extent = path[depth].p_ext; 5207 if (!extent) { 5208 EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 5209 (unsigned long) *iterator); 5210 return -EFSCORRUPTED; 5211 } 5212 if (SHIFT == SHIFT_LEFT && *iterator > 5213 le32_to_cpu(extent->ee_block)) { 5214 /* Hole, move to the next extent */ 5215 if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) { 5216 path[depth].p_ext++; 5217 } else { 5218 *iterator = ext4_ext_next_allocated_block(path); 5219 continue; 5220 } 5221 } 5222 5223 tmp = *iterator; 5224 if (SHIFT == SHIFT_LEFT) { 5225 extent = EXT_LAST_EXTENT(path[depth].p_hdr); 5226 *iterator = le32_to_cpu(extent->ee_block) + 5227 ext4_ext_get_actual_len(extent); 5228 } else { 5229 extent = EXT_FIRST_EXTENT(path[depth].p_hdr); 5230 if (le32_to_cpu(extent->ee_block) > start) 5231 *iterator = le32_to_cpu(extent->ee_block) - 1; 5232 else if (le32_to_cpu(extent->ee_block) == start) 5233 iterator = NULL; 5234 else { 5235 extent = EXT_LAST_EXTENT(path[depth].p_hdr); 5236 while (le32_to_cpu(extent->ee_block) >= start) 5237 extent--; 5238 5239 if (extent == EXT_LAST_EXTENT(path[depth].p_hdr)) 5240 break; 5241 5242 extent++; 5243 iterator = NULL; 5244 } 5245 path[depth].p_ext = extent; 5246 } 5247 ret = ext4_ext_shift_path_extents(path, shift, inode, 5248 handle, SHIFT); 5249 /* iterator can be NULL which means we should break */ 5250 if (ret == -EAGAIN) 5251 goto again; 5252 if (ret) 5253 break; 5254 } 5255 out: 5256 ext4_free_ext_path(path); 5257 return ret; 5258 } 5259 5260 /* 5261 * ext4_collapse_range: 5262 * This implements the fallocate's collapse range functionality for ext4 5263 * Returns: 0 and non-zero on error. 5264 */ 5265 static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len) 5266 { 5267 struct inode *inode = file_inode(file); 5268 struct super_block *sb = inode->i_sb; 5269 struct address_space *mapping = inode->i_mapping; 5270 ext4_lblk_t punch_start, punch_stop; 5271 handle_t *handle; 5272 unsigned int credits; 5273 loff_t new_size, ioffset; 5274 int ret; 5275 5276 /* 5277 * We need to test this early because xfstests assumes that a 5278 * collapse range of (0, 1) will return EOPNOTSUPP if the file 5279 * system does not support collapse range. 5280 */ 5281 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 5282 return -EOPNOTSUPP; 5283 5284 /* Collapse range works only on fs cluster size aligned regions. */ 5285 if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) 5286 return -EINVAL; 5287 5288 trace_ext4_collapse_range(inode, offset, len); 5289 5290 punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb); 5291 punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb); 5292 5293 /* Call ext4_force_commit to flush all data in case of data=journal. */ 5294 if (ext4_should_journal_data(inode)) { 5295 ret = ext4_force_commit(inode->i_sb); 5296 if (ret) 5297 return ret; 5298 } 5299 5300 inode_lock(inode); 5301 /* 5302 * There is no need to overlap collapse range with EOF, in which case 5303 * it is effectively a truncate operation 5304 */ 5305 if (offset + len >= inode->i_size) { 5306 ret = -EINVAL; 5307 goto out_mutex; 5308 } 5309 5310 /* Currently just for extent based files */ 5311 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 5312 ret = -EOPNOTSUPP; 5313 goto out_mutex; 5314 } 5315 5316 /* Wait for existing dio to complete */ 5317 inode_dio_wait(inode); 5318 5319 ret = file_modified(file); 5320 if (ret) 5321 goto out_mutex; 5322 5323 /* 5324 * Prevent page faults from reinstantiating pages we have released from 5325 * page cache. 5326 */ 5327 filemap_invalidate_lock(mapping); 5328 5329 ret = ext4_break_layouts(inode); 5330 if (ret) 5331 goto out_mmap; 5332 5333 /* 5334 * Need to round down offset to be aligned with page size boundary 5335 * for page size > block size. 5336 */ 5337 ioffset = round_down(offset, PAGE_SIZE); 5338 /* 5339 * Write tail of the last page before removed range since it will get 5340 * removed from the page cache below. 5341 */ 5342 ret = filemap_write_and_wait_range(mapping, ioffset, offset); 5343 if (ret) 5344 goto out_mmap; 5345 /* 5346 * Write data that will be shifted to preserve them when discarding 5347 * page cache below. We are also protected from pages becoming dirty 5348 * by i_rwsem and invalidate_lock. 5349 */ 5350 ret = filemap_write_and_wait_range(mapping, offset + len, 5351 LLONG_MAX); 5352 if (ret) 5353 goto out_mmap; 5354 truncate_pagecache(inode, ioffset); 5355 5356 credits = ext4_writepage_trans_blocks(inode); 5357 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 5358 if (IS_ERR(handle)) { 5359 ret = PTR_ERR(handle); 5360 goto out_mmap; 5361 } 5362 ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle); 5363 5364 down_write(&EXT4_I(inode)->i_data_sem); 5365 ext4_discard_preallocations(inode, 0); 5366 5367 ret = ext4_es_remove_extent(inode, punch_start, 5368 EXT_MAX_BLOCKS - punch_start); 5369 if (ret) { 5370 up_write(&EXT4_I(inode)->i_data_sem); 5371 goto out_stop; 5372 } 5373 5374 ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1); 5375 if (ret) { 5376 up_write(&EXT4_I(inode)->i_data_sem); 5377 goto out_stop; 5378 } 5379 ext4_discard_preallocations(inode, 0); 5380 5381 ret = ext4_ext_shift_extents(inode, handle, punch_stop, 5382 punch_stop - punch_start, SHIFT_LEFT); 5383 if (ret) { 5384 up_write(&EXT4_I(inode)->i_data_sem); 5385 goto out_stop; 5386 } 5387 5388 new_size = inode->i_size - len; 5389 i_size_write(inode, new_size); 5390 EXT4_I(inode)->i_disksize = new_size; 5391 5392 up_write(&EXT4_I(inode)->i_data_sem); 5393 if (IS_SYNC(inode)) 5394 ext4_handle_sync(handle); 5395 inode->i_mtime = inode->i_ctime = current_time(inode); 5396 ret = ext4_mark_inode_dirty(handle, inode); 5397 ext4_update_inode_fsync_trans(handle, inode, 1); 5398 5399 out_stop: 5400 ext4_journal_stop(handle); 5401 out_mmap: 5402 filemap_invalidate_unlock(mapping); 5403 out_mutex: 5404 inode_unlock(inode); 5405 return ret; 5406 } 5407 5408 /* 5409 * ext4_insert_range: 5410 * This function implements the FALLOC_FL_INSERT_RANGE flag of fallocate. 5411 * The data blocks starting from @offset to the EOF are shifted by @len 5412 * towards right to create a hole in the @inode. Inode size is increased 5413 * by len bytes. 5414 * Returns 0 on success, error otherwise. 5415 */ 5416 static int ext4_insert_range(struct file *file, loff_t offset, loff_t len) 5417 { 5418 struct inode *inode = file_inode(file); 5419 struct super_block *sb = inode->i_sb; 5420 struct address_space *mapping = inode->i_mapping; 5421 handle_t *handle; 5422 struct ext4_ext_path *path; 5423 struct ext4_extent *extent; 5424 ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0; 5425 unsigned int credits, ee_len; 5426 int ret = 0, depth, split_flag = 0; 5427 loff_t ioffset; 5428 5429 /* 5430 * We need to test this early because xfstests assumes that an 5431 * insert range of (0, 1) will return EOPNOTSUPP if the file 5432 * system does not support insert range. 5433 */ 5434 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 5435 return -EOPNOTSUPP; 5436 5437 /* Insert range works only on fs cluster size aligned regions. */ 5438 if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) 5439 return -EINVAL; 5440 5441 trace_ext4_insert_range(inode, offset, len); 5442 5443 offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb); 5444 len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb); 5445 5446 /* Call ext4_force_commit to flush all data in case of data=journal */ 5447 if (ext4_should_journal_data(inode)) { 5448 ret = ext4_force_commit(inode->i_sb); 5449 if (ret) 5450 return ret; 5451 } 5452 5453 inode_lock(inode); 5454 /* Currently just for extent based files */ 5455 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 5456 ret = -EOPNOTSUPP; 5457 goto out_mutex; 5458 } 5459 5460 /* Check whether the maximum file size would be exceeded */ 5461 if (len > inode->i_sb->s_maxbytes - inode->i_size) { 5462 ret = -EFBIG; 5463 goto out_mutex; 5464 } 5465 5466 /* Offset must be less than i_size */ 5467 if (offset >= inode->i_size) { 5468 ret = -EINVAL; 5469 goto out_mutex; 5470 } 5471 5472 /* Wait for existing dio to complete */ 5473 inode_dio_wait(inode); 5474 5475 ret = file_modified(file); 5476 if (ret) 5477 goto out_mutex; 5478 5479 /* 5480 * Prevent page faults from reinstantiating pages we have released from 5481 * page cache. 5482 */ 5483 filemap_invalidate_lock(mapping); 5484 5485 ret = ext4_break_layouts(inode); 5486 if (ret) 5487 goto out_mmap; 5488 5489 /* 5490 * Need to round down to align start offset to page size boundary 5491 * for page size > block size. 5492 */ 5493 ioffset = round_down(offset, PAGE_SIZE); 5494 /* Write out all dirty pages */ 5495 ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, 5496 LLONG_MAX); 5497 if (ret) 5498 goto out_mmap; 5499 truncate_pagecache(inode, ioffset); 5500 5501 credits = ext4_writepage_trans_blocks(inode); 5502 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 5503 if (IS_ERR(handle)) { 5504 ret = PTR_ERR(handle); 5505 goto out_mmap; 5506 } 5507 ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle); 5508 5509 /* Expand file to avoid data loss if there is error while shifting */ 5510 inode->i_size += len; 5511 EXT4_I(inode)->i_disksize += len; 5512 inode->i_mtime = inode->i_ctime = current_time(inode); 5513 ret = ext4_mark_inode_dirty(handle, inode); 5514 if (ret) 5515 goto out_stop; 5516 5517 down_write(&EXT4_I(inode)->i_data_sem); 5518 ext4_discard_preallocations(inode, 0); 5519 5520 path = ext4_find_extent(inode, offset_lblk, NULL, 0); 5521 if (IS_ERR(path)) { 5522 up_write(&EXT4_I(inode)->i_data_sem); 5523 goto out_stop; 5524 } 5525 5526 depth = ext_depth(inode); 5527 extent = path[depth].p_ext; 5528 if (extent) { 5529 ee_start_lblk = le32_to_cpu(extent->ee_block); 5530 ee_len = ext4_ext_get_actual_len(extent); 5531 5532 /* 5533 * If offset_lblk is not the starting block of extent, split 5534 * the extent @offset_lblk 5535 */ 5536 if ((offset_lblk > ee_start_lblk) && 5537 (offset_lblk < (ee_start_lblk + ee_len))) { 5538 if (ext4_ext_is_unwritten(extent)) 5539 split_flag = EXT4_EXT_MARK_UNWRIT1 | 5540 EXT4_EXT_MARK_UNWRIT2; 5541 ret = ext4_split_extent_at(handle, inode, &path, 5542 offset_lblk, split_flag, 5543 EXT4_EX_NOCACHE | 5544 EXT4_GET_BLOCKS_PRE_IO | 5545 EXT4_GET_BLOCKS_METADATA_NOFAIL); 5546 } 5547 5548 ext4_free_ext_path(path); 5549 if (ret < 0) { 5550 up_write(&EXT4_I(inode)->i_data_sem); 5551 goto out_stop; 5552 } 5553 } else { 5554 ext4_free_ext_path(path); 5555 } 5556 5557 ret = ext4_es_remove_extent(inode, offset_lblk, 5558 EXT_MAX_BLOCKS - offset_lblk); 5559 if (ret) { 5560 up_write(&EXT4_I(inode)->i_data_sem); 5561 goto out_stop; 5562 } 5563 5564 /* 5565 * if offset_lblk lies in a hole which is at start of file, use 5566 * ee_start_lblk to shift extents 5567 */ 5568 ret = ext4_ext_shift_extents(inode, handle, 5569 max(ee_start_lblk, offset_lblk), len_lblk, SHIFT_RIGHT); 5570 5571 up_write(&EXT4_I(inode)->i_data_sem); 5572 if (IS_SYNC(inode)) 5573 ext4_handle_sync(handle); 5574 if (ret >= 0) 5575 ext4_update_inode_fsync_trans(handle, inode, 1); 5576 5577 out_stop: 5578 ext4_journal_stop(handle); 5579 out_mmap: 5580 filemap_invalidate_unlock(mapping); 5581 out_mutex: 5582 inode_unlock(inode); 5583 return ret; 5584 } 5585 5586 /** 5587 * ext4_swap_extents() - Swap extents between two inodes 5588 * @handle: handle for this transaction 5589 * @inode1: First inode 5590 * @inode2: Second inode 5591 * @lblk1: Start block for first inode 5592 * @lblk2: Start block for second inode 5593 * @count: Number of blocks to swap 5594 * @unwritten: Mark second inode's extents as unwritten after swap 5595 * @erp: Pointer to save error value 5596 * 5597 * This helper routine does exactly what is promise "swap extents". All other 5598 * stuff such as page-cache locking consistency, bh mapping consistency or 5599 * extent's data copying must be performed by caller. 5600 * Locking: 5601 * i_rwsem is held for both inodes 5602 * i_data_sem is locked for write for both inodes 5603 * Assumptions: 5604 * All pages from requested range are locked for both inodes 5605 */ 5606 int 5607 ext4_swap_extents(handle_t *handle, struct inode *inode1, 5608 struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2, 5609 ext4_lblk_t count, int unwritten, int *erp) 5610 { 5611 struct ext4_ext_path *path1 = NULL; 5612 struct ext4_ext_path *path2 = NULL; 5613 int replaced_count = 0; 5614 5615 BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem)); 5616 BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem)); 5617 BUG_ON(!inode_is_locked(inode1)); 5618 BUG_ON(!inode_is_locked(inode2)); 5619 5620 *erp = ext4_es_remove_extent(inode1, lblk1, count); 5621 if (unlikely(*erp)) 5622 return 0; 5623 *erp = ext4_es_remove_extent(inode2, lblk2, count); 5624 if (unlikely(*erp)) 5625 return 0; 5626 5627 while (count) { 5628 struct ext4_extent *ex1, *ex2, tmp_ex; 5629 ext4_lblk_t e1_blk, e2_blk; 5630 int e1_len, e2_len, len; 5631 int split = 0; 5632 5633 path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE); 5634 if (IS_ERR(path1)) { 5635 *erp = PTR_ERR(path1); 5636 path1 = NULL; 5637 finish: 5638 count = 0; 5639 goto repeat; 5640 } 5641 path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE); 5642 if (IS_ERR(path2)) { 5643 *erp = PTR_ERR(path2); 5644 path2 = NULL; 5645 goto finish; 5646 } 5647 ex1 = path1[path1->p_depth].p_ext; 5648 ex2 = path2[path2->p_depth].p_ext; 5649 /* Do we have something to swap ? */ 5650 if (unlikely(!ex2 || !ex1)) 5651 goto finish; 5652 5653 e1_blk = le32_to_cpu(ex1->ee_block); 5654 e2_blk = le32_to_cpu(ex2->ee_block); 5655 e1_len = ext4_ext_get_actual_len(ex1); 5656 e2_len = ext4_ext_get_actual_len(ex2); 5657 5658 /* Hole handling */ 5659 if (!in_range(lblk1, e1_blk, e1_len) || 5660 !in_range(lblk2, e2_blk, e2_len)) { 5661 ext4_lblk_t next1, next2; 5662 5663 /* if hole after extent, then go to next extent */ 5664 next1 = ext4_ext_next_allocated_block(path1); 5665 next2 = ext4_ext_next_allocated_block(path2); 5666 /* If hole before extent, then shift to that extent */ 5667 if (e1_blk > lblk1) 5668 next1 = e1_blk; 5669 if (e2_blk > lblk2) 5670 next2 = e2_blk; 5671 /* Do we have something to swap */ 5672 if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS) 5673 goto finish; 5674 /* Move to the rightest boundary */ 5675 len = next1 - lblk1; 5676 if (len < next2 - lblk2) 5677 len = next2 - lblk2; 5678 if (len > count) 5679 len = count; 5680 lblk1 += len; 5681 lblk2 += len; 5682 count -= len; 5683 goto repeat; 5684 } 5685 5686 /* Prepare left boundary */ 5687 if (e1_blk < lblk1) { 5688 split = 1; 5689 *erp = ext4_force_split_extent_at(handle, inode1, 5690 &path1, lblk1, 0); 5691 if (unlikely(*erp)) 5692 goto finish; 5693 } 5694 if (e2_blk < lblk2) { 5695 split = 1; 5696 *erp = ext4_force_split_extent_at(handle, inode2, 5697 &path2, lblk2, 0); 5698 if (unlikely(*erp)) 5699 goto finish; 5700 } 5701 /* ext4_split_extent_at() may result in leaf extent split, 5702 * path must to be revalidated. */ 5703 if (split) 5704 goto repeat; 5705 5706 /* Prepare right boundary */ 5707 len = count; 5708 if (len > e1_blk + e1_len - lblk1) 5709 len = e1_blk + e1_len - lblk1; 5710 if (len > e2_blk + e2_len - lblk2) 5711 len = e2_blk + e2_len - lblk2; 5712 5713 if (len != e1_len) { 5714 split = 1; 5715 *erp = ext4_force_split_extent_at(handle, inode1, 5716 &path1, lblk1 + len, 0); 5717 if (unlikely(*erp)) 5718 goto finish; 5719 } 5720 if (len != e2_len) { 5721 split = 1; 5722 *erp = ext4_force_split_extent_at(handle, inode2, 5723 &path2, lblk2 + len, 0); 5724 if (*erp) 5725 goto finish; 5726 } 5727 /* ext4_split_extent_at() may result in leaf extent split, 5728 * path must to be revalidated. */ 5729 if (split) 5730 goto repeat; 5731 5732 BUG_ON(e2_len != e1_len); 5733 *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth); 5734 if (unlikely(*erp)) 5735 goto finish; 5736 *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth); 5737 if (unlikely(*erp)) 5738 goto finish; 5739 5740 /* Both extents are fully inside boundaries. Swap it now */ 5741 tmp_ex = *ex1; 5742 ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2)); 5743 ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex)); 5744 ex1->ee_len = cpu_to_le16(e2_len); 5745 ex2->ee_len = cpu_to_le16(e1_len); 5746 if (unwritten) 5747 ext4_ext_mark_unwritten(ex2); 5748 if (ext4_ext_is_unwritten(&tmp_ex)) 5749 ext4_ext_mark_unwritten(ex1); 5750 5751 ext4_ext_try_to_merge(handle, inode2, path2, ex2); 5752 ext4_ext_try_to_merge(handle, inode1, path1, ex1); 5753 *erp = ext4_ext_dirty(handle, inode2, path2 + 5754 path2->p_depth); 5755 if (unlikely(*erp)) 5756 goto finish; 5757 *erp = ext4_ext_dirty(handle, inode1, path1 + 5758 path1->p_depth); 5759 /* 5760 * Looks scarry ah..? second inode already points to new blocks, 5761 * and it was successfully dirtied. But luckily error may happen 5762 * only due to journal error, so full transaction will be 5763 * aborted anyway. 5764 */ 5765 if (unlikely(*erp)) 5766 goto finish; 5767 lblk1 += len; 5768 lblk2 += len; 5769 replaced_count += len; 5770 count -= len; 5771 5772 repeat: 5773 ext4_free_ext_path(path1); 5774 ext4_free_ext_path(path2); 5775 path1 = path2 = NULL; 5776 } 5777 return replaced_count; 5778 } 5779 5780 /* 5781 * ext4_clu_mapped - determine whether any block in a logical cluster has 5782 * been mapped to a physical cluster 5783 * 5784 * @inode - file containing the logical cluster 5785 * @lclu - logical cluster of interest 5786 * 5787 * Returns 1 if any block in the logical cluster is mapped, signifying 5788 * that a physical cluster has been allocated for it. Otherwise, 5789 * returns 0. Can also return negative error codes. Derived from 5790 * ext4_ext_map_blocks(). 5791 */ 5792 int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu) 5793 { 5794 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5795 struct ext4_ext_path *path; 5796 int depth, mapped = 0, err = 0; 5797 struct ext4_extent *extent; 5798 ext4_lblk_t first_lblk, first_lclu, last_lclu; 5799 5800 /* 5801 * if data can be stored inline, the logical cluster isn't 5802 * mapped - no physical clusters have been allocated, and the 5803 * file has no extents 5804 */ 5805 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) 5806 return 0; 5807 5808 /* search for the extent closest to the first block in the cluster */ 5809 path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0); 5810 if (IS_ERR(path)) { 5811 err = PTR_ERR(path); 5812 path = NULL; 5813 goto out; 5814 } 5815 5816 depth = ext_depth(inode); 5817 5818 /* 5819 * A consistent leaf must not be empty. This situation is possible, 5820 * though, _during_ tree modification, and it's why an assert can't 5821 * be put in ext4_find_extent(). 5822 */ 5823 if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 5824 EXT4_ERROR_INODE(inode, 5825 "bad extent address - lblock: %lu, depth: %d, pblock: %lld", 5826 (unsigned long) EXT4_C2B(sbi, lclu), 5827 depth, path[depth].p_block); 5828 err = -EFSCORRUPTED; 5829 goto out; 5830 } 5831 5832 extent = path[depth].p_ext; 5833 5834 /* can't be mapped if the extent tree is empty */ 5835 if (extent == NULL) 5836 goto out; 5837 5838 first_lblk = le32_to_cpu(extent->ee_block); 5839 first_lclu = EXT4_B2C(sbi, first_lblk); 5840 5841 /* 5842 * Three possible outcomes at this point - found extent spanning 5843 * the target cluster, to the left of the target cluster, or to the 5844 * right of the target cluster. The first two cases are handled here. 5845 * The last case indicates the target cluster is not mapped. 5846 */ 5847 if (lclu >= first_lclu) { 5848 last_lclu = EXT4_B2C(sbi, first_lblk + 5849 ext4_ext_get_actual_len(extent) - 1); 5850 if (lclu <= last_lclu) { 5851 mapped = 1; 5852 } else { 5853 first_lblk = ext4_ext_next_allocated_block(path); 5854 first_lclu = EXT4_B2C(sbi, first_lblk); 5855 if (lclu == first_lclu) 5856 mapped = 1; 5857 } 5858 } 5859 5860 out: 5861 ext4_free_ext_path(path); 5862 5863 return err ? err : mapped; 5864 } 5865 5866 /* 5867 * Updates physical block address and unwritten status of extent 5868 * starting at lblk start and of len. If such an extent doesn't exist, 5869 * this function splits the extent tree appropriately to create an 5870 * extent like this. This function is called in the fast commit 5871 * replay path. Returns 0 on success and error on failure. 5872 */ 5873 int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start, 5874 int len, int unwritten, ext4_fsblk_t pblk) 5875 { 5876 struct ext4_ext_path *path = NULL, *ppath; 5877 struct ext4_extent *ex; 5878 int ret; 5879 5880 path = ext4_find_extent(inode, start, NULL, 0); 5881 if (IS_ERR(path)) 5882 return PTR_ERR(path); 5883 ex = path[path->p_depth].p_ext; 5884 if (!ex) { 5885 ret = -EFSCORRUPTED; 5886 goto out; 5887 } 5888 5889 if (le32_to_cpu(ex->ee_block) != start || 5890 ext4_ext_get_actual_len(ex) != len) { 5891 /* We need to split this extent to match our extent first */ 5892 ppath = path; 5893 down_write(&EXT4_I(inode)->i_data_sem); 5894 ret = ext4_force_split_extent_at(NULL, inode, &ppath, start, 1); 5895 up_write(&EXT4_I(inode)->i_data_sem); 5896 if (ret) 5897 goto out; 5898 kfree(path); 5899 path = ext4_find_extent(inode, start, NULL, 0); 5900 if (IS_ERR(path)) 5901 return -1; 5902 ppath = path; 5903 ex = path[path->p_depth].p_ext; 5904 WARN_ON(le32_to_cpu(ex->ee_block) != start); 5905 if (ext4_ext_get_actual_len(ex) != len) { 5906 down_write(&EXT4_I(inode)->i_data_sem); 5907 ret = ext4_force_split_extent_at(NULL, inode, &ppath, 5908 start + len, 1); 5909 up_write(&EXT4_I(inode)->i_data_sem); 5910 if (ret) 5911 goto out; 5912 kfree(path); 5913 path = ext4_find_extent(inode, start, NULL, 0); 5914 if (IS_ERR(path)) 5915 return -EINVAL; 5916 ex = path[path->p_depth].p_ext; 5917 } 5918 } 5919 if (unwritten) 5920 ext4_ext_mark_unwritten(ex); 5921 else 5922 ext4_ext_mark_initialized(ex); 5923 ext4_ext_store_pblock(ex, pblk); 5924 down_write(&EXT4_I(inode)->i_data_sem); 5925 ret = ext4_ext_dirty(NULL, inode, &path[path->p_depth]); 5926 up_write(&EXT4_I(inode)->i_data_sem); 5927 out: 5928 ext4_free_ext_path(path); 5929 ext4_mark_inode_dirty(NULL, inode); 5930 return ret; 5931 } 5932 5933 /* Try to shrink the extent tree */ 5934 void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end) 5935 { 5936 struct ext4_ext_path *path = NULL; 5937 struct ext4_extent *ex; 5938 ext4_lblk_t old_cur, cur = 0; 5939 5940 while (cur < end) { 5941 path = ext4_find_extent(inode, cur, NULL, 0); 5942 if (IS_ERR(path)) 5943 return; 5944 ex = path[path->p_depth].p_ext; 5945 if (!ex) { 5946 ext4_free_ext_path(path); 5947 ext4_mark_inode_dirty(NULL, inode); 5948 return; 5949 } 5950 old_cur = cur; 5951 cur = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); 5952 if (cur <= old_cur) 5953 cur = old_cur + 1; 5954 ext4_ext_try_to_merge(NULL, inode, path, ex); 5955 down_write(&EXT4_I(inode)->i_data_sem); 5956 ext4_ext_dirty(NULL, inode, &path[path->p_depth]); 5957 up_write(&EXT4_I(inode)->i_data_sem); 5958 ext4_mark_inode_dirty(NULL, inode); 5959 ext4_free_ext_path(path); 5960 } 5961 } 5962 5963 /* Check if *cur is a hole and if it is, skip it */ 5964 static int skip_hole(struct inode *inode, ext4_lblk_t *cur) 5965 { 5966 int ret; 5967 struct ext4_map_blocks map; 5968 5969 map.m_lblk = *cur; 5970 map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur; 5971 5972 ret = ext4_map_blocks(NULL, inode, &map, 0); 5973 if (ret < 0) 5974 return ret; 5975 if (ret != 0) 5976 return 0; 5977 *cur = *cur + map.m_len; 5978 return 0; 5979 } 5980 5981 /* Count number of blocks used by this inode and update i_blocks */ 5982 int ext4_ext_replay_set_iblocks(struct inode *inode) 5983 { 5984 struct ext4_ext_path *path = NULL, *path2 = NULL; 5985 struct ext4_extent *ex; 5986 ext4_lblk_t cur = 0, end; 5987 int numblks = 0, i, ret = 0; 5988 ext4_fsblk_t cmp1, cmp2; 5989 struct ext4_map_blocks map; 5990 5991 /* Determin the size of the file first */ 5992 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 5993 EXT4_EX_NOCACHE); 5994 if (IS_ERR(path)) 5995 return PTR_ERR(path); 5996 ex = path[path->p_depth].p_ext; 5997 if (!ex) { 5998 ext4_free_ext_path(path); 5999 goto out; 6000 } 6001 end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); 6002 ext4_free_ext_path(path); 6003 6004 /* Count the number of data blocks */ 6005 cur = 0; 6006 while (cur < end) { 6007 map.m_lblk = cur; 6008 map.m_len = end - cur; 6009 ret = ext4_map_blocks(NULL, inode, &map, 0); 6010 if (ret < 0) 6011 break; 6012 if (ret > 0) 6013 numblks += ret; 6014 cur = cur + map.m_len; 6015 } 6016 6017 /* 6018 * Count the number of extent tree blocks. We do it by looking up 6019 * two successive extents and determining the difference between 6020 * their paths. When path is different for 2 successive extents 6021 * we compare the blocks in the path at each level and increment 6022 * iblocks by total number of differences found. 6023 */ 6024 cur = 0; 6025 ret = skip_hole(inode, &cur); 6026 if (ret < 0) 6027 goto out; 6028 path = ext4_find_extent(inode, cur, NULL, 0); 6029 if (IS_ERR(path)) 6030 goto out; 6031 numblks += path->p_depth; 6032 ext4_free_ext_path(path); 6033 while (cur < end) { 6034 path = ext4_find_extent(inode, cur, NULL, 0); 6035 if (IS_ERR(path)) 6036 break; 6037 ex = path[path->p_depth].p_ext; 6038 if (!ex) { 6039 ext4_free_ext_path(path); 6040 return 0; 6041 } 6042 cur = max(cur + 1, le32_to_cpu(ex->ee_block) + 6043 ext4_ext_get_actual_len(ex)); 6044 ret = skip_hole(inode, &cur); 6045 if (ret < 0) { 6046 ext4_free_ext_path(path); 6047 break; 6048 } 6049 path2 = ext4_find_extent(inode, cur, NULL, 0); 6050 if (IS_ERR(path2)) { 6051 ext4_free_ext_path(path); 6052 break; 6053 } 6054 for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) { 6055 cmp1 = cmp2 = 0; 6056 if (i <= path->p_depth) 6057 cmp1 = path[i].p_bh ? 6058 path[i].p_bh->b_blocknr : 0; 6059 if (i <= path2->p_depth) 6060 cmp2 = path2[i].p_bh ? 6061 path2[i].p_bh->b_blocknr : 0; 6062 if (cmp1 != cmp2 && cmp2 != 0) 6063 numblks++; 6064 } 6065 ext4_free_ext_path(path); 6066 ext4_free_ext_path(path2); 6067 } 6068 6069 out: 6070 inode->i_blocks = numblks << (inode->i_sb->s_blocksize_bits - 9); 6071 ext4_mark_inode_dirty(NULL, inode); 6072 return 0; 6073 } 6074 6075 int ext4_ext_clear_bb(struct inode *inode) 6076 { 6077 struct ext4_ext_path *path = NULL; 6078 struct ext4_extent *ex; 6079 ext4_lblk_t cur = 0, end; 6080 int j, ret = 0; 6081 struct ext4_map_blocks map; 6082 6083 if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA)) 6084 return 0; 6085 6086 /* Determin the size of the file first */ 6087 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 6088 EXT4_EX_NOCACHE); 6089 if (IS_ERR(path)) 6090 return PTR_ERR(path); 6091 ex = path[path->p_depth].p_ext; 6092 if (!ex) { 6093 ext4_free_ext_path(path); 6094 return 0; 6095 } 6096 end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); 6097 ext4_free_ext_path(path); 6098 6099 cur = 0; 6100 while (cur < end) { 6101 map.m_lblk = cur; 6102 map.m_len = end - cur; 6103 ret = ext4_map_blocks(NULL, inode, &map, 0); 6104 if (ret < 0) 6105 break; 6106 if (ret > 0) { 6107 path = ext4_find_extent(inode, map.m_lblk, NULL, 0); 6108 if (!IS_ERR_OR_NULL(path)) { 6109 for (j = 0; j < path->p_depth; j++) { 6110 6111 ext4_mb_mark_bb(inode->i_sb, 6112 path[j].p_block, 1, 0); 6113 ext4_fc_record_regions(inode->i_sb, inode->i_ino, 6114 0, path[j].p_block, 1, 1); 6115 } 6116 ext4_free_ext_path(path); 6117 } 6118 ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0); 6119 ext4_fc_record_regions(inode->i_sb, inode->i_ino, 6120 map.m_lblk, map.m_pblk, map.m_len, 1); 6121 } 6122 cur = cur + map.m_len; 6123 } 6124 6125 return 0; 6126 } 6127