1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext4/indirect.c 4 * 5 * from 6 * 7 * linux/fs/ext4/inode.c 8 * 9 * Copyright (C) 1992, 1993, 1994, 1995 10 * Remy Card (card@masi.ibp.fr) 11 * Laboratoire MASI - Institut Blaise Pascal 12 * Universite Pierre et Marie Curie (Paris VI) 13 * 14 * from 15 * 16 * linux/fs/minix/inode.c 17 * 18 * Copyright (C) 1991, 1992 Linus Torvalds 19 * 20 * Goal-directed block allocation by Stephen Tweedie 21 * (sct@redhat.com), 1993, 1998 22 */ 23 24 #include "ext4_jbd2.h" 25 #include "truncate.h" 26 #include <linux/dax.h> 27 #include <linux/uio.h> 28 29 #include <trace/events/ext4.h> 30 31 typedef struct { 32 __le32 *p; 33 __le32 key; 34 struct buffer_head *bh; 35 } Indirect; 36 37 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) 38 { 39 p->key = *(p->p = v); 40 p->bh = bh; 41 } 42 43 /** 44 * ext4_block_to_path - parse the block number into array of offsets 45 * @inode: inode in question (we are only interested in its superblock) 46 * @i_block: block number to be parsed 47 * @offsets: array to store the offsets in 48 * @boundary: set this non-zero if the referred-to block is likely to be 49 * followed (on disk) by an indirect block. 50 * 51 * To store the locations of file's data ext4 uses a data structure common 52 * for UNIX filesystems - tree of pointers anchored in the inode, with 53 * data blocks at leaves and indirect blocks in intermediate nodes. 54 * This function translates the block number into path in that tree - 55 * return value is the path length and @offsets[n] is the offset of 56 * pointer to (n+1)th node in the nth one. If @block is out of range 57 * (negative or too large) warning is printed and zero returned. 58 * 59 * Note: function doesn't find node addresses, so no IO is needed. All 60 * we need to know is the capacity of indirect blocks (taken from the 61 * inode->i_sb). 62 */ 63 64 /* 65 * Portability note: the last comparison (check that we fit into triple 66 * indirect block) is spelled differently, because otherwise on an 67 * architecture with 32-bit longs and 8Kb pages we might get into trouble 68 * if our filesystem had 8Kb blocks. We might use long long, but that would 69 * kill us on x86. Oh, well, at least the sign propagation does not matter - 70 * i_block would have to be negative in the very beginning, so we would not 71 * get there at all. 72 */ 73 74 static int ext4_block_to_path(struct inode *inode, 75 ext4_lblk_t i_block, 76 ext4_lblk_t offsets[4], int *boundary) 77 { 78 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); 79 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); 80 const long direct_blocks = EXT4_NDIR_BLOCKS, 81 indirect_blocks = ptrs, 82 double_blocks = (1 << (ptrs_bits * 2)); 83 int n = 0; 84 int final = 0; 85 86 if (i_block < direct_blocks) { 87 offsets[n++] = i_block; 88 final = direct_blocks; 89 } else if ((i_block -= direct_blocks) < indirect_blocks) { 90 offsets[n++] = EXT4_IND_BLOCK; 91 offsets[n++] = i_block; 92 final = ptrs; 93 } else if ((i_block -= indirect_blocks) < double_blocks) { 94 offsets[n++] = EXT4_DIND_BLOCK; 95 offsets[n++] = i_block >> ptrs_bits; 96 offsets[n++] = i_block & (ptrs - 1); 97 final = ptrs; 98 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 99 offsets[n++] = EXT4_TIND_BLOCK; 100 offsets[n++] = i_block >> (ptrs_bits * 2); 101 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 102 offsets[n++] = i_block & (ptrs - 1); 103 final = ptrs; 104 } else { 105 ext4_warning(inode->i_sb, "block %lu > max in inode %lu", 106 i_block + direct_blocks + 107 indirect_blocks + double_blocks, inode->i_ino); 108 } 109 if (boundary) 110 *boundary = final - 1 - (i_block & (ptrs - 1)); 111 return n; 112 } 113 114 /** 115 * ext4_get_branch - read the chain of indirect blocks leading to data 116 * @inode: inode in question 117 * @depth: depth of the chain (1 - direct pointer, etc.) 118 * @offsets: offsets of pointers in inode/indirect blocks 119 * @chain: place to store the result 120 * @err: here we store the error value 121 * 122 * Function fills the array of triples <key, p, bh> and returns %NULL 123 * if everything went OK or the pointer to the last filled triple 124 * (incomplete one) otherwise. Upon the return chain[i].key contains 125 * the number of (i+1)-th block in the chain (as it is stored in memory, 126 * i.e. little-endian 32-bit), chain[i].p contains the address of that 127 * number (it points into struct inode for i==0 and into the bh->b_data 128 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect 129 * block for i>0 and NULL for i==0. In other words, it holds the block 130 * numbers of the chain, addresses they were taken from (and where we can 131 * verify that chain did not change) and buffer_heads hosting these 132 * numbers. 133 * 134 * Function stops when it stumbles upon zero pointer (absent block) 135 * (pointer to last triple returned, *@err == 0) 136 * or when it gets an IO error reading an indirect block 137 * (ditto, *@err == -EIO) 138 * or when it reads all @depth-1 indirect blocks successfully and finds 139 * the whole chain, all way to the data (returns %NULL, *err == 0). 140 * 141 * Need to be called with 142 * down_read(&EXT4_I(inode)->i_data_sem) 143 */ 144 static Indirect *ext4_get_branch(struct inode *inode, int depth, 145 ext4_lblk_t *offsets, 146 Indirect chain[4], int *err) 147 { 148 struct super_block *sb = inode->i_sb; 149 Indirect *p = chain; 150 struct buffer_head *bh; 151 unsigned int key; 152 int ret = -EIO; 153 154 *err = 0; 155 /* i_data is not going away, no lock needed */ 156 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets); 157 if (!p->key) 158 goto no_block; 159 while (--depth) { 160 key = le32_to_cpu(p->key); 161 if (key > ext4_blocks_count(EXT4_SB(sb)->s_es)) { 162 /* the block was out of range */ 163 ret = -EFSCORRUPTED; 164 goto failure; 165 } 166 bh = sb_getblk(sb, key); 167 if (unlikely(!bh)) { 168 ret = -ENOMEM; 169 goto failure; 170 } 171 172 if (!bh_uptodate_or_lock(bh)) { 173 if (ext4_read_bh(bh, 0, NULL) < 0) { 174 put_bh(bh); 175 goto failure; 176 } 177 /* validate block references */ 178 if (ext4_check_indirect_blockref(inode, bh)) { 179 put_bh(bh); 180 goto failure; 181 } 182 } 183 184 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets); 185 /* Reader: end */ 186 if (!p->key) 187 goto no_block; 188 } 189 return NULL; 190 191 failure: 192 *err = ret; 193 no_block: 194 return p; 195 } 196 197 /** 198 * ext4_find_near - find a place for allocation with sufficient locality 199 * @inode: owner 200 * @ind: descriptor of indirect block. 201 * 202 * This function returns the preferred place for block allocation. 203 * It is used when heuristic for sequential allocation fails. 204 * Rules are: 205 * + if there is a block to the left of our position - allocate near it. 206 * + if pointer will live in indirect block - allocate near that block. 207 * + if pointer will live in inode - allocate in the same 208 * cylinder group. 209 * 210 * In the latter case we colour the starting block by the callers PID to 211 * prevent it from clashing with concurrent allocations for a different inode 212 * in the same block group. The PID is used here so that functionally related 213 * files will be close-by on-disk. 214 * 215 * Caller must make sure that @ind is valid and will stay that way. 216 */ 217 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) 218 { 219 struct ext4_inode_info *ei = EXT4_I(inode); 220 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; 221 __le32 *p; 222 223 /* Try to find previous block */ 224 for (p = ind->p - 1; p >= start; p--) { 225 if (*p) 226 return le32_to_cpu(*p); 227 } 228 229 /* No such thing, so let's try location of indirect block */ 230 if (ind->bh) 231 return ind->bh->b_blocknr; 232 233 /* 234 * It is going to be referred to from the inode itself? OK, just put it 235 * into the same cylinder group then. 236 */ 237 return ext4_inode_to_goal_block(inode); 238 } 239 240 /** 241 * ext4_find_goal - find a preferred place for allocation. 242 * @inode: owner 243 * @block: block we want 244 * @partial: pointer to the last triple within a chain 245 * 246 * Normally this function find the preferred place for block allocation, 247 * returns it. 248 * Because this is only used for non-extent files, we limit the block nr 249 * to 32 bits. 250 */ 251 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, 252 Indirect *partial) 253 { 254 ext4_fsblk_t goal; 255 256 /* 257 * XXX need to get goal block from mballoc's data structures 258 */ 259 260 goal = ext4_find_near(inode, partial); 261 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; 262 return goal; 263 } 264 265 /** 266 * ext4_blks_to_allocate - Look up the block map and count the number 267 * of direct blocks need to be allocated for the given branch. 268 * 269 * @branch: chain of indirect blocks 270 * @k: number of blocks need for indirect blocks 271 * @blks: number of data blocks to be mapped. 272 * @blocks_to_boundary: the offset in the indirect block 273 * 274 * return the total number of blocks to be allocate, including the 275 * direct and indirect blocks. 276 */ 277 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, 278 int blocks_to_boundary) 279 { 280 unsigned int count = 0; 281 282 /* 283 * Simple case, [t,d]Indirect block(s) has not allocated yet 284 * then it's clear blocks on that path have not allocated 285 */ 286 if (k > 0) { 287 /* right now we don't handle cross boundary allocation */ 288 if (blks < blocks_to_boundary + 1) 289 count += blks; 290 else 291 count += blocks_to_boundary + 1; 292 return count; 293 } 294 295 count++; 296 while (count < blks && count <= blocks_to_boundary && 297 le32_to_cpu(*(branch[0].p + count)) == 0) { 298 count++; 299 } 300 return count; 301 } 302 303 /** 304 * ext4_alloc_branch() - allocate and set up a chain of blocks 305 * @handle: handle for this transaction 306 * @ar: structure describing the allocation request 307 * @indirect_blks: number of allocated indirect blocks 308 * @offsets: offsets (in the blocks) to store the pointers to next. 309 * @branch: place to store the chain in. 310 * 311 * This function allocates blocks, zeroes out all but the last one, 312 * links them into chain and (if we are synchronous) writes them to disk. 313 * In other words, it prepares a branch that can be spliced onto the 314 * inode. It stores the information about that chain in the branch[], in 315 * the same format as ext4_get_branch() would do. We are calling it after 316 * we had read the existing part of chain and partial points to the last 317 * triple of that (one with zero ->key). Upon the exit we have the same 318 * picture as after the successful ext4_get_block(), except that in one 319 * place chain is disconnected - *branch->p is still zero (we did not 320 * set the last link), but branch->key contains the number that should 321 * be placed into *branch->p to fill that gap. 322 * 323 * If allocation fails we free all blocks we've allocated (and forget 324 * their buffer_heads) and return the error value the from failed 325 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain 326 * as described above and return 0. 327 */ 328 static int ext4_alloc_branch(handle_t *handle, 329 struct ext4_allocation_request *ar, 330 int indirect_blks, ext4_lblk_t *offsets, 331 Indirect *branch) 332 { 333 struct buffer_head * bh; 334 ext4_fsblk_t b, new_blocks[4]; 335 __le32 *p; 336 int i, j, err, len = 1; 337 338 for (i = 0; i <= indirect_blks; i++) { 339 if (i == indirect_blks) { 340 new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err); 341 } else { 342 ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle, 343 ar->inode, ar->goal, 344 ar->flags & EXT4_MB_DELALLOC_RESERVED, 345 NULL, &err); 346 /* Simplify error cleanup... */ 347 branch[i+1].bh = NULL; 348 } 349 if (err) { 350 i--; 351 goto failed; 352 } 353 branch[i].key = cpu_to_le32(new_blocks[i]); 354 if (i == 0) 355 continue; 356 357 bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]); 358 if (unlikely(!bh)) { 359 err = -ENOMEM; 360 goto failed; 361 } 362 lock_buffer(bh); 363 BUFFER_TRACE(bh, "call get_create_access"); 364 err = ext4_journal_get_create_access(handle, ar->inode->i_sb, 365 bh, EXT4_JTR_NONE); 366 if (err) { 367 unlock_buffer(bh); 368 goto failed; 369 } 370 371 memset(bh->b_data, 0, bh->b_size); 372 p = branch[i].p = (__le32 *) bh->b_data + offsets[i]; 373 b = new_blocks[i]; 374 375 if (i == indirect_blks) 376 len = ar->len; 377 for (j = 0; j < len; j++) 378 *p++ = cpu_to_le32(b++); 379 380 BUFFER_TRACE(bh, "marking uptodate"); 381 set_buffer_uptodate(bh); 382 unlock_buffer(bh); 383 384 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 385 err = ext4_handle_dirty_metadata(handle, ar->inode, bh); 386 if (err) 387 goto failed; 388 } 389 return 0; 390 failed: 391 if (i == indirect_blks) { 392 /* Free data blocks */ 393 ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i], 394 ar->len, 0); 395 i--; 396 } 397 for (; i >= 0; i--) { 398 /* 399 * We want to ext4_forget() only freshly allocated indirect 400 * blocks. Buffer for new_blocks[i] is at branch[i+1].bh 401 * (buffer at branch[0].bh is indirect block / inode already 402 * existing before ext4_alloc_branch() was called). Also 403 * because blocks are freshly allocated, we don't need to 404 * revoke them which is why we don't set 405 * EXT4_FREE_BLOCKS_METADATA. 406 */ 407 ext4_free_blocks(handle, ar->inode, branch[i+1].bh, 408 new_blocks[i], 1, 409 branch[i+1].bh ? EXT4_FREE_BLOCKS_FORGET : 0); 410 } 411 return err; 412 } 413 414 /** 415 * ext4_splice_branch() - splice the allocated branch onto inode. 416 * @handle: handle for this transaction 417 * @ar: structure describing the allocation request 418 * @where: location of missing link 419 * @num: number of indirect blocks we are adding 420 * 421 * This function fills the missing link and does all housekeeping needed in 422 * inode (->i_blocks, etc.). In case of success we end up with the full 423 * chain to new block and return 0. 424 */ 425 static int ext4_splice_branch(handle_t *handle, 426 struct ext4_allocation_request *ar, 427 Indirect *where, int num) 428 { 429 int i; 430 int err = 0; 431 ext4_fsblk_t current_block; 432 433 /* 434 * If we're splicing into a [td]indirect block (as opposed to the 435 * inode) then we need to get write access to the [td]indirect block 436 * before the splice. 437 */ 438 if (where->bh) { 439 BUFFER_TRACE(where->bh, "get_write_access"); 440 err = ext4_journal_get_write_access(handle, ar->inode->i_sb, 441 where->bh, EXT4_JTR_NONE); 442 if (err) 443 goto err_out; 444 } 445 /* That's it */ 446 447 *where->p = where->key; 448 449 /* 450 * Update the host buffer_head or inode to point to more just allocated 451 * direct blocks blocks 452 */ 453 if (num == 0 && ar->len > 1) { 454 current_block = le32_to_cpu(where->key) + 1; 455 for (i = 1; i < ar->len; i++) 456 *(where->p + i) = cpu_to_le32(current_block++); 457 } 458 459 /* We are done with atomic stuff, now do the rest of housekeeping */ 460 /* had we spliced it onto indirect block? */ 461 if (where->bh) { 462 /* 463 * If we spliced it onto an indirect block, we haven't 464 * altered the inode. Note however that if it is being spliced 465 * onto an indirect block at the very end of the file (the 466 * file is growing) then we *will* alter the inode to reflect 467 * the new i_size. But that is not done here - it is done in 468 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. 469 */ 470 ext4_debug("splicing indirect only\n"); 471 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); 472 err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh); 473 if (err) 474 goto err_out; 475 } else { 476 /* 477 * OK, we spliced it into the inode itself on a direct block. 478 */ 479 err = ext4_mark_inode_dirty(handle, ar->inode); 480 if (unlikely(err)) 481 goto err_out; 482 ext4_debug("splicing direct\n"); 483 } 484 return err; 485 486 err_out: 487 for (i = 1; i <= num; i++) { 488 /* 489 * branch[i].bh is newly allocated, so there is no 490 * need to revoke the block, which is why we don't 491 * need to set EXT4_FREE_BLOCKS_METADATA. 492 */ 493 ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1, 494 EXT4_FREE_BLOCKS_FORGET); 495 } 496 ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key), 497 ar->len, 0); 498 499 return err; 500 } 501 502 /* 503 * The ext4_ind_map_blocks() function handles non-extents inodes 504 * (i.e., using the traditional indirect/double-indirect i_blocks 505 * scheme) for ext4_map_blocks(). 506 * 507 * Allocation strategy is simple: if we have to allocate something, we will 508 * have to go the whole way to leaf. So let's do it before attaching anything 509 * to tree, set linkage between the newborn blocks, write them if sync is 510 * required, recheck the path, free and repeat if check fails, otherwise 511 * set the last missing link (that will protect us from any truncate-generated 512 * removals - all blocks on the path are immune now) and possibly force the 513 * write on the parent block. 514 * That has a nice additional property: no special recovery from the failed 515 * allocations is needed - we simply release blocks and do not touch anything 516 * reachable from inode. 517 * 518 * `handle' can be NULL if create == 0. 519 * 520 * return > 0, # of blocks mapped or allocated. 521 * return = 0, if plain lookup failed. 522 * return < 0, error case. 523 * 524 * The ext4_ind_get_blocks() function should be called with 525 * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem 526 * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or 527 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system 528 * blocks. 529 */ 530 int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, 531 struct ext4_map_blocks *map, 532 int flags) 533 { 534 struct ext4_allocation_request ar; 535 int err = -EIO; 536 ext4_lblk_t offsets[4]; 537 Indirect chain[4]; 538 Indirect *partial; 539 int indirect_blks; 540 int blocks_to_boundary = 0; 541 int depth; 542 int count = 0; 543 ext4_fsblk_t first_block = 0; 544 545 trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 546 ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))); 547 ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); 548 depth = ext4_block_to_path(inode, map->m_lblk, offsets, 549 &blocks_to_boundary); 550 551 if (depth == 0) 552 goto out; 553 554 partial = ext4_get_branch(inode, depth, offsets, chain, &err); 555 556 /* Simplest case - block found, no allocation needed */ 557 if (!partial) { 558 first_block = le32_to_cpu(chain[depth - 1].key); 559 count++; 560 /*map more blocks*/ 561 while (count < map->m_len && count <= blocks_to_boundary) { 562 ext4_fsblk_t blk; 563 564 blk = le32_to_cpu(*(chain[depth-1].p + count)); 565 566 if (blk == first_block + count) 567 count++; 568 else 569 break; 570 } 571 goto got_it; 572 } 573 574 /* Next simple case - plain lookup failed */ 575 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 576 unsigned epb = inode->i_sb->s_blocksize / sizeof(u32); 577 int i; 578 579 /* 580 * Count number blocks in a subtree under 'partial'. At each 581 * level we count number of complete empty subtrees beyond 582 * current offset and then descend into the subtree only 583 * partially beyond current offset. 584 */ 585 count = 0; 586 for (i = partial - chain + 1; i < depth; i++) 587 count = count * epb + (epb - offsets[i] - 1); 588 count++; 589 /* Fill in size of a hole we found */ 590 map->m_pblk = 0; 591 map->m_len = min_t(unsigned int, map->m_len, count); 592 goto cleanup; 593 } 594 595 /* Failed read of indirect block */ 596 if (err == -EIO) 597 goto cleanup; 598 599 /* 600 * Okay, we need to do block allocation. 601 */ 602 if (ext4_has_feature_bigalloc(inode->i_sb)) { 603 EXT4_ERROR_INODE(inode, "Can't allocate blocks for " 604 "non-extent mapped inodes with bigalloc"); 605 err = -EFSCORRUPTED; 606 goto out; 607 } 608 609 /* Set up for the direct block allocation */ 610 memset(&ar, 0, sizeof(ar)); 611 ar.inode = inode; 612 ar.logical = map->m_lblk; 613 if (S_ISREG(inode->i_mode)) 614 ar.flags = EXT4_MB_HINT_DATA; 615 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 616 ar.flags |= EXT4_MB_DELALLOC_RESERVED; 617 if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) 618 ar.flags |= EXT4_MB_USE_RESERVED; 619 620 ar.goal = ext4_find_goal(inode, map->m_lblk, partial); 621 622 /* the number of blocks need to allocate for [d,t]indirect blocks */ 623 indirect_blks = (chain + depth) - partial - 1; 624 625 /* 626 * Next look up the indirect map to count the totoal number of 627 * direct blocks to allocate for this branch. 628 */ 629 ar.len = ext4_blks_to_allocate(partial, indirect_blks, 630 map->m_len, blocks_to_boundary); 631 632 /* 633 * Block out ext4_truncate while we alter the tree 634 */ 635 err = ext4_alloc_branch(handle, &ar, indirect_blks, 636 offsets + (partial - chain), partial); 637 638 /* 639 * The ext4_splice_branch call will free and forget any buffers 640 * on the new chain if there is a failure, but that risks using 641 * up transaction credits, especially for bitmaps where the 642 * credits cannot be returned. Can we handle this somehow? We 643 * may need to return -EAGAIN upwards in the worst case. --sct 644 */ 645 if (!err) 646 err = ext4_splice_branch(handle, &ar, partial, indirect_blks); 647 if (err) 648 goto cleanup; 649 650 map->m_flags |= EXT4_MAP_NEW; 651 652 ext4_update_inode_fsync_trans(handle, inode, 1); 653 count = ar.len; 654 got_it: 655 map->m_flags |= EXT4_MAP_MAPPED; 656 map->m_pblk = le32_to_cpu(chain[depth-1].key); 657 map->m_len = count; 658 if (count > blocks_to_boundary) 659 map->m_flags |= EXT4_MAP_BOUNDARY; 660 err = count; 661 /* Clean up and exit */ 662 partial = chain + depth - 1; /* the whole chain */ 663 cleanup: 664 while (partial > chain) { 665 BUFFER_TRACE(partial->bh, "call brelse"); 666 brelse(partial->bh); 667 partial--; 668 } 669 out: 670 trace_ext4_ind_map_blocks_exit(inode, flags, map, err); 671 return err; 672 } 673 674 /* 675 * Calculate number of indirect blocks touched by mapping @nrblocks logically 676 * contiguous blocks 677 */ 678 int ext4_ind_trans_blocks(struct inode *inode, int nrblocks) 679 { 680 /* 681 * With N contiguous data blocks, we need at most 682 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks, 683 * 2 dindirect blocks, and 1 tindirect block 684 */ 685 return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4; 686 } 687 688 static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode, 689 struct buffer_head *bh, int *dropped) 690 { 691 int err; 692 693 if (bh) { 694 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 695 err = ext4_handle_dirty_metadata(handle, inode, bh); 696 if (unlikely(err)) 697 return err; 698 } 699 err = ext4_mark_inode_dirty(handle, inode); 700 if (unlikely(err)) 701 return err; 702 /* 703 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 704 * moment, get_block can be called only for blocks inside i_size since 705 * page cache has been already dropped and writes are blocked by 706 * i_rwsem. So we can safely drop the i_data_sem here. 707 */ 708 BUG_ON(EXT4_JOURNAL(inode) == NULL); 709 ext4_discard_preallocations(inode, 0); 710 up_write(&EXT4_I(inode)->i_data_sem); 711 *dropped = 1; 712 return 0; 713 } 714 715 /* 716 * Truncate transactions can be complex and absolutely huge. So we need to 717 * be able to restart the transaction at a convenient checkpoint to make 718 * sure we don't overflow the journal. 719 * 720 * Try to extend this transaction for the purposes of truncation. If 721 * extend fails, we restart transaction. 722 */ 723 static int ext4_ind_truncate_ensure_credits(handle_t *handle, 724 struct inode *inode, 725 struct buffer_head *bh, 726 int revoke_creds) 727 { 728 int ret; 729 int dropped = 0; 730 731 ret = ext4_journal_ensure_credits_fn(handle, EXT4_RESERVE_TRANS_BLOCKS, 732 ext4_blocks_for_truncate(inode), revoke_creds, 733 ext4_ind_trunc_restart_fn(handle, inode, bh, &dropped)); 734 if (dropped) 735 down_write(&EXT4_I(inode)->i_data_sem); 736 if (ret <= 0) 737 return ret; 738 if (bh) { 739 BUFFER_TRACE(bh, "retaking write access"); 740 ret = ext4_journal_get_write_access(handle, inode->i_sb, bh, 741 EXT4_JTR_NONE); 742 if (unlikely(ret)) 743 return ret; 744 } 745 return 0; 746 } 747 748 /* 749 * Probably it should be a library function... search for first non-zero word 750 * or memcmp with zero_page, whatever is better for particular architecture. 751 * Linus? 752 */ 753 static inline int all_zeroes(__le32 *p, __le32 *q) 754 { 755 while (p < q) 756 if (*p++) 757 return 0; 758 return 1; 759 } 760 761 /** 762 * ext4_find_shared - find the indirect blocks for partial truncation. 763 * @inode: inode in question 764 * @depth: depth of the affected branch 765 * @offsets: offsets of pointers in that branch (see ext4_block_to_path) 766 * @chain: place to store the pointers to partial indirect blocks 767 * @top: place to the (detached) top of branch 768 * 769 * This is a helper function used by ext4_truncate(). 770 * 771 * When we do truncate() we may have to clean the ends of several 772 * indirect blocks but leave the blocks themselves alive. Block is 773 * partially truncated if some data below the new i_size is referred 774 * from it (and it is on the path to the first completely truncated 775 * data block, indeed). We have to free the top of that path along 776 * with everything to the right of the path. Since no allocation 777 * past the truncation point is possible until ext4_truncate() 778 * finishes, we may safely do the latter, but top of branch may 779 * require special attention - pageout below the truncation point 780 * might try to populate it. 781 * 782 * We atomically detach the top of branch from the tree, store the 783 * block number of its root in *@top, pointers to buffer_heads of 784 * partially truncated blocks - in @chain[].bh and pointers to 785 * their last elements that should not be removed - in 786 * @chain[].p. Return value is the pointer to last filled element 787 * of @chain. 788 * 789 * The work left to caller to do the actual freeing of subtrees: 790 * a) free the subtree starting from *@top 791 * b) free the subtrees whose roots are stored in 792 * (@chain[i].p+1 .. end of @chain[i].bh->b_data) 793 * c) free the subtrees growing from the inode past the @chain[0]. 794 * (no partially truncated stuff there). */ 795 796 static Indirect *ext4_find_shared(struct inode *inode, int depth, 797 ext4_lblk_t offsets[4], Indirect chain[4], 798 __le32 *top) 799 { 800 Indirect *partial, *p; 801 int k, err; 802 803 *top = 0; 804 /* Make k index the deepest non-null offset + 1 */ 805 for (k = depth; k > 1 && !offsets[k-1]; k--) 806 ; 807 partial = ext4_get_branch(inode, k, offsets, chain, &err); 808 /* Writer: pointers */ 809 if (!partial) 810 partial = chain + k-1; 811 /* 812 * If the branch acquired continuation since we've looked at it - 813 * fine, it should all survive and (new) top doesn't belong to us. 814 */ 815 if (!partial->key && *partial->p) 816 /* Writer: end */ 817 goto no_top; 818 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--) 819 ; 820 /* 821 * OK, we've found the last block that must survive. The rest of our 822 * branch should be detached before unlocking. However, if that rest 823 * of branch is all ours and does not grow immediately from the inode 824 * it's easier to cheat and just decrement partial->p. 825 */ 826 if (p == chain + k - 1 && p > chain) { 827 p->p--; 828 } else { 829 *top = *p->p; 830 /* Nope, don't do this in ext4. Must leave the tree intact */ 831 #if 0 832 *p->p = 0; 833 #endif 834 } 835 /* Writer: end */ 836 837 while (partial > p) { 838 brelse(partial->bh); 839 partial--; 840 } 841 no_top: 842 return partial; 843 } 844 845 /* 846 * Zero a number of block pointers in either an inode or an indirect block. 847 * If we restart the transaction we must again get write access to the 848 * indirect block for further modification. 849 * 850 * We release `count' blocks on disk, but (last - first) may be greater 851 * than `count' because there can be holes in there. 852 * 853 * Return 0 on success, 1 on invalid block range 854 * and < 0 on fatal error. 855 */ 856 static int ext4_clear_blocks(handle_t *handle, struct inode *inode, 857 struct buffer_head *bh, 858 ext4_fsblk_t block_to_free, 859 unsigned long count, __le32 *first, 860 __le32 *last) 861 { 862 __le32 *p; 863 int flags = EXT4_FREE_BLOCKS_VALIDATED; 864 int err; 865 866 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) || 867 ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE)) 868 flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA; 869 else if (ext4_should_journal_data(inode)) 870 flags |= EXT4_FREE_BLOCKS_FORGET; 871 872 if (!ext4_inode_block_valid(inode, block_to_free, count)) { 873 EXT4_ERROR_INODE(inode, "attempt to clear invalid " 874 "blocks %llu len %lu", 875 (unsigned long long) block_to_free, count); 876 return 1; 877 } 878 879 err = ext4_ind_truncate_ensure_credits(handle, inode, bh, 880 ext4_free_data_revoke_credits(inode, count)); 881 if (err < 0) 882 goto out_err; 883 884 for (p = first; p < last; p++) 885 *p = 0; 886 887 ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags); 888 return 0; 889 out_err: 890 ext4_std_error(inode->i_sb, err); 891 return err; 892 } 893 894 /** 895 * ext4_free_data - free a list of data blocks 896 * @handle: handle for this transaction 897 * @inode: inode we are dealing with 898 * @this_bh: indirect buffer_head which contains *@first and *@last 899 * @first: array of block numbers 900 * @last: points immediately past the end of array 901 * 902 * We are freeing all blocks referred from that array (numbers are stored as 903 * little-endian 32-bit) and updating @inode->i_blocks appropriately. 904 * 905 * We accumulate contiguous runs of blocks to free. Conveniently, if these 906 * blocks are contiguous then releasing them at one time will only affect one 907 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't 908 * actually use a lot of journal space. 909 * 910 * @this_bh will be %NULL if @first and @last point into the inode's direct 911 * block pointers. 912 */ 913 static void ext4_free_data(handle_t *handle, struct inode *inode, 914 struct buffer_head *this_bh, 915 __le32 *first, __le32 *last) 916 { 917 ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */ 918 unsigned long count = 0; /* Number of blocks in the run */ 919 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind 920 corresponding to 921 block_to_free */ 922 ext4_fsblk_t nr; /* Current block # */ 923 __le32 *p; /* Pointer into inode/ind 924 for current block */ 925 int err = 0; 926 927 if (this_bh) { /* For indirect block */ 928 BUFFER_TRACE(this_bh, "get_write_access"); 929 err = ext4_journal_get_write_access(handle, inode->i_sb, 930 this_bh, EXT4_JTR_NONE); 931 /* Important: if we can't update the indirect pointers 932 * to the blocks, we can't free them. */ 933 if (err) 934 return; 935 } 936 937 for (p = first; p < last; p++) { 938 nr = le32_to_cpu(*p); 939 if (nr) { 940 /* accumulate blocks to free if they're contiguous */ 941 if (count == 0) { 942 block_to_free = nr; 943 block_to_free_p = p; 944 count = 1; 945 } else if (nr == block_to_free + count) { 946 count++; 947 } else { 948 err = ext4_clear_blocks(handle, inode, this_bh, 949 block_to_free, count, 950 block_to_free_p, p); 951 if (err) 952 break; 953 block_to_free = nr; 954 block_to_free_p = p; 955 count = 1; 956 } 957 } 958 } 959 960 if (!err && count > 0) 961 err = ext4_clear_blocks(handle, inode, this_bh, block_to_free, 962 count, block_to_free_p, p); 963 if (err < 0) 964 /* fatal error */ 965 return; 966 967 if (this_bh) { 968 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata"); 969 970 /* 971 * The buffer head should have an attached journal head at this 972 * point. However, if the data is corrupted and an indirect 973 * block pointed to itself, it would have been detached when 974 * the block was cleared. Check for this instead of OOPSing. 975 */ 976 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) 977 ext4_handle_dirty_metadata(handle, inode, this_bh); 978 else 979 EXT4_ERROR_INODE(inode, 980 "circular indirect block detected at " 981 "block %llu", 982 (unsigned long long) this_bh->b_blocknr); 983 } 984 } 985 986 /** 987 * ext4_free_branches - free an array of branches 988 * @handle: JBD handle for this transaction 989 * @inode: inode we are dealing with 990 * @parent_bh: the buffer_head which contains *@first and *@last 991 * @first: array of block numbers 992 * @last: pointer immediately past the end of array 993 * @depth: depth of the branches to free 994 * 995 * We are freeing all blocks referred from these branches (numbers are 996 * stored as little-endian 32-bit) and updating @inode->i_blocks 997 * appropriately. 998 */ 999 static void ext4_free_branches(handle_t *handle, struct inode *inode, 1000 struct buffer_head *parent_bh, 1001 __le32 *first, __le32 *last, int depth) 1002 { 1003 ext4_fsblk_t nr; 1004 __le32 *p; 1005 1006 if (ext4_handle_is_aborted(handle)) 1007 return; 1008 1009 if (depth--) { 1010 struct buffer_head *bh; 1011 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 1012 p = last; 1013 while (--p >= first) { 1014 nr = le32_to_cpu(*p); 1015 if (!nr) 1016 continue; /* A hole */ 1017 1018 if (!ext4_inode_block_valid(inode, nr, 1)) { 1019 EXT4_ERROR_INODE(inode, 1020 "invalid indirect mapped " 1021 "block %lu (level %d)", 1022 (unsigned long) nr, depth); 1023 break; 1024 } 1025 1026 /* Go read the buffer for the next level down */ 1027 bh = ext4_sb_bread(inode->i_sb, nr, 0); 1028 1029 /* 1030 * A read failure? Report error and clear slot 1031 * (should be rare). 1032 */ 1033 if (IS_ERR(bh)) { 1034 ext4_error_inode_block(inode, nr, -PTR_ERR(bh), 1035 "Read failure"); 1036 continue; 1037 } 1038 1039 /* This zaps the entire block. Bottom up. */ 1040 BUFFER_TRACE(bh, "free child branches"); 1041 ext4_free_branches(handle, inode, bh, 1042 (__le32 *) bh->b_data, 1043 (__le32 *) bh->b_data + addr_per_block, 1044 depth); 1045 brelse(bh); 1046 1047 /* 1048 * Everything below this pointer has been 1049 * released. Now let this top-of-subtree go. 1050 * 1051 * We want the freeing of this indirect block to be 1052 * atomic in the journal with the updating of the 1053 * bitmap block which owns it. So make some room in 1054 * the journal. 1055 * 1056 * We zero the parent pointer *after* freeing its 1057 * pointee in the bitmaps, so if extend_transaction() 1058 * for some reason fails to put the bitmap changes and 1059 * the release into the same transaction, recovery 1060 * will merely complain about releasing a free block, 1061 * rather than leaking blocks. 1062 */ 1063 if (ext4_handle_is_aborted(handle)) 1064 return; 1065 if (ext4_ind_truncate_ensure_credits(handle, inode, 1066 NULL, 1067 ext4_free_metadata_revoke_credits( 1068 inode->i_sb, 1)) < 0) 1069 return; 1070 1071 /* 1072 * The forget flag here is critical because if 1073 * we are journaling (and not doing data 1074 * journaling), we have to make sure a revoke 1075 * record is written to prevent the journal 1076 * replay from overwriting the (former) 1077 * indirect block if it gets reallocated as a 1078 * data block. This must happen in the same 1079 * transaction where the data blocks are 1080 * actually freed. 1081 */ 1082 ext4_free_blocks(handle, inode, NULL, nr, 1, 1083 EXT4_FREE_BLOCKS_METADATA| 1084 EXT4_FREE_BLOCKS_FORGET); 1085 1086 if (parent_bh) { 1087 /* 1088 * The block which we have just freed is 1089 * pointed to by an indirect block: journal it 1090 */ 1091 BUFFER_TRACE(parent_bh, "get_write_access"); 1092 if (!ext4_journal_get_write_access(handle, 1093 inode->i_sb, parent_bh, 1094 EXT4_JTR_NONE)) { 1095 *p = 0; 1096 BUFFER_TRACE(parent_bh, 1097 "call ext4_handle_dirty_metadata"); 1098 ext4_handle_dirty_metadata(handle, 1099 inode, 1100 parent_bh); 1101 } 1102 } 1103 } 1104 } else { 1105 /* We have reached the bottom of the tree. */ 1106 BUFFER_TRACE(parent_bh, "free data blocks"); 1107 ext4_free_data(handle, inode, parent_bh, first, last); 1108 } 1109 } 1110 1111 void ext4_ind_truncate(handle_t *handle, struct inode *inode) 1112 { 1113 struct ext4_inode_info *ei = EXT4_I(inode); 1114 __le32 *i_data = ei->i_data; 1115 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 1116 ext4_lblk_t offsets[4]; 1117 Indirect chain[4]; 1118 Indirect *partial; 1119 __le32 nr = 0; 1120 int n = 0; 1121 ext4_lblk_t last_block, max_block; 1122 unsigned blocksize = inode->i_sb->s_blocksize; 1123 1124 last_block = (inode->i_size + blocksize-1) 1125 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 1126 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1) 1127 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 1128 1129 if (last_block != max_block) { 1130 n = ext4_block_to_path(inode, last_block, offsets, NULL); 1131 if (n == 0) 1132 return; 1133 } 1134 1135 ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block); 1136 1137 /* 1138 * The orphan list entry will now protect us from any crash which 1139 * occurs before the truncate completes, so it is now safe to propagate 1140 * the new, shorter inode size (held for now in i_size) into the 1141 * on-disk inode. We do this via i_disksize, which is the value which 1142 * ext4 *really* writes onto the disk inode. 1143 */ 1144 ei->i_disksize = inode->i_size; 1145 1146 if (last_block == max_block) { 1147 /* 1148 * It is unnecessary to free any data blocks if last_block is 1149 * equal to the indirect block limit. 1150 */ 1151 return; 1152 } else if (n == 1) { /* direct blocks */ 1153 ext4_free_data(handle, inode, NULL, i_data+offsets[0], 1154 i_data + EXT4_NDIR_BLOCKS); 1155 goto do_indirects; 1156 } 1157 1158 partial = ext4_find_shared(inode, n, offsets, chain, &nr); 1159 /* Kill the top of shared branch (not detached) */ 1160 if (nr) { 1161 if (partial == chain) { 1162 /* Shared branch grows from the inode */ 1163 ext4_free_branches(handle, inode, NULL, 1164 &nr, &nr+1, (chain+n-1) - partial); 1165 *partial->p = 0; 1166 /* 1167 * We mark the inode dirty prior to restart, 1168 * and prior to stop. No need for it here. 1169 */ 1170 } else { 1171 /* Shared branch grows from an indirect block */ 1172 BUFFER_TRACE(partial->bh, "get_write_access"); 1173 ext4_free_branches(handle, inode, partial->bh, 1174 partial->p, 1175 partial->p+1, (chain+n-1) - partial); 1176 } 1177 } 1178 /* Clear the ends of indirect blocks on the shared branch */ 1179 while (partial > chain) { 1180 ext4_free_branches(handle, inode, partial->bh, partial->p + 1, 1181 (__le32*)partial->bh->b_data+addr_per_block, 1182 (chain+n-1) - partial); 1183 BUFFER_TRACE(partial->bh, "call brelse"); 1184 brelse(partial->bh); 1185 partial--; 1186 } 1187 do_indirects: 1188 /* Kill the remaining (whole) subtrees */ 1189 switch (offsets[0]) { 1190 default: 1191 nr = i_data[EXT4_IND_BLOCK]; 1192 if (nr) { 1193 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); 1194 i_data[EXT4_IND_BLOCK] = 0; 1195 } 1196 fallthrough; 1197 case EXT4_IND_BLOCK: 1198 nr = i_data[EXT4_DIND_BLOCK]; 1199 if (nr) { 1200 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); 1201 i_data[EXT4_DIND_BLOCK] = 0; 1202 } 1203 fallthrough; 1204 case EXT4_DIND_BLOCK: 1205 nr = i_data[EXT4_TIND_BLOCK]; 1206 if (nr) { 1207 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); 1208 i_data[EXT4_TIND_BLOCK] = 0; 1209 } 1210 fallthrough; 1211 case EXT4_TIND_BLOCK: 1212 ; 1213 } 1214 } 1215 1216 /** 1217 * ext4_ind_remove_space - remove space from the range 1218 * @handle: JBD handle for this transaction 1219 * @inode: inode we are dealing with 1220 * @start: First block to remove 1221 * @end: One block after the last block to remove (exclusive) 1222 * 1223 * Free the blocks in the defined range (end is exclusive endpoint of 1224 * range). This is used by ext4_punch_hole(). 1225 */ 1226 int ext4_ind_remove_space(handle_t *handle, struct inode *inode, 1227 ext4_lblk_t start, ext4_lblk_t end) 1228 { 1229 struct ext4_inode_info *ei = EXT4_I(inode); 1230 __le32 *i_data = ei->i_data; 1231 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 1232 ext4_lblk_t offsets[4], offsets2[4]; 1233 Indirect chain[4], chain2[4]; 1234 Indirect *partial, *partial2; 1235 Indirect *p = NULL, *p2 = NULL; 1236 ext4_lblk_t max_block; 1237 __le32 nr = 0, nr2 = 0; 1238 int n = 0, n2 = 0; 1239 unsigned blocksize = inode->i_sb->s_blocksize; 1240 1241 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1) 1242 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 1243 if (end >= max_block) 1244 end = max_block; 1245 if ((start >= end) || (start > max_block)) 1246 return 0; 1247 1248 n = ext4_block_to_path(inode, start, offsets, NULL); 1249 n2 = ext4_block_to_path(inode, end, offsets2, NULL); 1250 1251 BUG_ON(n > n2); 1252 1253 if ((n == 1) && (n == n2)) { 1254 /* We're punching only within direct block range */ 1255 ext4_free_data(handle, inode, NULL, i_data + offsets[0], 1256 i_data + offsets2[0]); 1257 return 0; 1258 } else if (n2 > n) { 1259 /* 1260 * Start and end are on a different levels so we're going to 1261 * free partial block at start, and partial block at end of 1262 * the range. If there are some levels in between then 1263 * do_indirects label will take care of that. 1264 */ 1265 1266 if (n == 1) { 1267 /* 1268 * Start is at the direct block level, free 1269 * everything to the end of the level. 1270 */ 1271 ext4_free_data(handle, inode, NULL, i_data + offsets[0], 1272 i_data + EXT4_NDIR_BLOCKS); 1273 goto end_range; 1274 } 1275 1276 1277 partial = p = ext4_find_shared(inode, n, offsets, chain, &nr); 1278 if (nr) { 1279 if (partial == chain) { 1280 /* Shared branch grows from the inode */ 1281 ext4_free_branches(handle, inode, NULL, 1282 &nr, &nr+1, (chain+n-1) - partial); 1283 *partial->p = 0; 1284 } else { 1285 /* Shared branch grows from an indirect block */ 1286 BUFFER_TRACE(partial->bh, "get_write_access"); 1287 ext4_free_branches(handle, inode, partial->bh, 1288 partial->p, 1289 partial->p+1, (chain+n-1) - partial); 1290 } 1291 } 1292 1293 /* 1294 * Clear the ends of indirect blocks on the shared branch 1295 * at the start of the range 1296 */ 1297 while (partial > chain) { 1298 ext4_free_branches(handle, inode, partial->bh, 1299 partial->p + 1, 1300 (__le32 *)partial->bh->b_data+addr_per_block, 1301 (chain+n-1) - partial); 1302 partial--; 1303 } 1304 1305 end_range: 1306 partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2); 1307 if (nr2) { 1308 if (partial2 == chain2) { 1309 /* 1310 * Remember, end is exclusive so here we're at 1311 * the start of the next level we're not going 1312 * to free. Everything was covered by the start 1313 * of the range. 1314 */ 1315 goto do_indirects; 1316 } 1317 } else { 1318 /* 1319 * ext4_find_shared returns Indirect structure which 1320 * points to the last element which should not be 1321 * removed by truncate. But this is end of the range 1322 * in punch_hole so we need to point to the next element 1323 */ 1324 partial2->p++; 1325 } 1326 1327 /* 1328 * Clear the ends of indirect blocks on the shared branch 1329 * at the end of the range 1330 */ 1331 while (partial2 > chain2) { 1332 ext4_free_branches(handle, inode, partial2->bh, 1333 (__le32 *)partial2->bh->b_data, 1334 partial2->p, 1335 (chain2+n2-1) - partial2); 1336 partial2--; 1337 } 1338 goto do_indirects; 1339 } 1340 1341 /* Punch happened within the same level (n == n2) */ 1342 partial = p = ext4_find_shared(inode, n, offsets, chain, &nr); 1343 partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2); 1344 1345 /* Free top, but only if partial2 isn't its subtree. */ 1346 if (nr) { 1347 int level = min(partial - chain, partial2 - chain2); 1348 int i; 1349 int subtree = 1; 1350 1351 for (i = 0; i <= level; i++) { 1352 if (offsets[i] != offsets2[i]) { 1353 subtree = 0; 1354 break; 1355 } 1356 } 1357 1358 if (!subtree) { 1359 if (partial == chain) { 1360 /* Shared branch grows from the inode */ 1361 ext4_free_branches(handle, inode, NULL, 1362 &nr, &nr+1, 1363 (chain+n-1) - partial); 1364 *partial->p = 0; 1365 } else { 1366 /* Shared branch grows from an indirect block */ 1367 BUFFER_TRACE(partial->bh, "get_write_access"); 1368 ext4_free_branches(handle, inode, partial->bh, 1369 partial->p, 1370 partial->p+1, 1371 (chain+n-1) - partial); 1372 } 1373 } 1374 } 1375 1376 if (!nr2) { 1377 /* 1378 * ext4_find_shared returns Indirect structure which 1379 * points to the last element which should not be 1380 * removed by truncate. But this is end of the range 1381 * in punch_hole so we need to point to the next element 1382 */ 1383 partial2->p++; 1384 } 1385 1386 while (partial > chain || partial2 > chain2) { 1387 int depth = (chain+n-1) - partial; 1388 int depth2 = (chain2+n2-1) - partial2; 1389 1390 if (partial > chain && partial2 > chain2 && 1391 partial->bh->b_blocknr == partial2->bh->b_blocknr) { 1392 /* 1393 * We've converged on the same block. Clear the range, 1394 * then we're done. 1395 */ 1396 ext4_free_branches(handle, inode, partial->bh, 1397 partial->p + 1, 1398 partial2->p, 1399 (chain+n-1) - partial); 1400 goto cleanup; 1401 } 1402 1403 /* 1404 * The start and end partial branches may not be at the same 1405 * level even though the punch happened within one level. So, we 1406 * give them a chance to arrive at the same level, then walk 1407 * them in step with each other until we converge on the same 1408 * block. 1409 */ 1410 if (partial > chain && depth <= depth2) { 1411 ext4_free_branches(handle, inode, partial->bh, 1412 partial->p + 1, 1413 (__le32 *)partial->bh->b_data+addr_per_block, 1414 (chain+n-1) - partial); 1415 partial--; 1416 } 1417 if (partial2 > chain2 && depth2 <= depth) { 1418 ext4_free_branches(handle, inode, partial2->bh, 1419 (__le32 *)partial2->bh->b_data, 1420 partial2->p, 1421 (chain2+n2-1) - partial2); 1422 partial2--; 1423 } 1424 } 1425 1426 cleanup: 1427 while (p && p > chain) { 1428 BUFFER_TRACE(p->bh, "call brelse"); 1429 brelse(p->bh); 1430 p--; 1431 } 1432 while (p2 && p2 > chain2) { 1433 BUFFER_TRACE(p2->bh, "call brelse"); 1434 brelse(p2->bh); 1435 p2--; 1436 } 1437 return 0; 1438 1439 do_indirects: 1440 /* Kill the remaining (whole) subtrees */ 1441 switch (offsets[0]) { 1442 default: 1443 if (++n >= n2) 1444 break; 1445 nr = i_data[EXT4_IND_BLOCK]; 1446 if (nr) { 1447 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); 1448 i_data[EXT4_IND_BLOCK] = 0; 1449 } 1450 fallthrough; 1451 case EXT4_IND_BLOCK: 1452 if (++n >= n2) 1453 break; 1454 nr = i_data[EXT4_DIND_BLOCK]; 1455 if (nr) { 1456 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); 1457 i_data[EXT4_DIND_BLOCK] = 0; 1458 } 1459 fallthrough; 1460 case EXT4_DIND_BLOCK: 1461 if (++n >= n2) 1462 break; 1463 nr = i_data[EXT4_TIND_BLOCK]; 1464 if (nr) { 1465 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); 1466 i_data[EXT4_TIND_BLOCK] = 0; 1467 } 1468 fallthrough; 1469 case EXT4_TIND_BLOCK: 1470 ; 1471 } 1472 goto cleanup; 1473 } 1474