1 /* 2 * linux/fs/ext4/indirect.c 3 * 4 * from 5 * 6 * linux/fs/ext4/inode.c 7 * 8 * Copyright (C) 1992, 1993, 1994, 1995 9 * Remy Card (card@masi.ibp.fr) 10 * Laboratoire MASI - Institut Blaise Pascal 11 * Universite Pierre et Marie Curie (Paris VI) 12 * 13 * from 14 * 15 * linux/fs/minix/inode.c 16 * 17 * Copyright (C) 1991, 1992 Linus Torvalds 18 * 19 * Goal-directed block allocation by Stephen Tweedie 20 * (sct@redhat.com), 1993, 1998 21 */ 22 23 #include <linux/aio.h> 24 #include "ext4_jbd2.h" 25 #include "truncate.h" 26 #include "ext4_extents.h" /* Needed for EXT_MAX_BLOCKS */ 27 28 #include <trace/events/ext4.h> 29 30 typedef struct { 31 __le32 *p; 32 __le32 key; 33 struct buffer_head *bh; 34 } Indirect; 35 36 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) 37 { 38 p->key = *(p->p = v); 39 p->bh = bh; 40 } 41 42 /** 43 * ext4_block_to_path - parse the block number into array of offsets 44 * @inode: inode in question (we are only interested in its superblock) 45 * @i_block: block number to be parsed 46 * @offsets: array to store the offsets in 47 * @boundary: set this non-zero if the referred-to block is likely to be 48 * followed (on disk) by an indirect block. 49 * 50 * To store the locations of file's data ext4 uses a data structure common 51 * for UNIX filesystems - tree of pointers anchored in the inode, with 52 * data blocks at leaves and indirect blocks in intermediate nodes. 53 * This function translates the block number into path in that tree - 54 * return value is the path length and @offsets[n] is the offset of 55 * pointer to (n+1)th node in the nth one. If @block is out of range 56 * (negative or too large) warning is printed and zero returned. 57 * 58 * Note: function doesn't find node addresses, so no IO is needed. All 59 * we need to know is the capacity of indirect blocks (taken from the 60 * inode->i_sb). 61 */ 62 63 /* 64 * Portability note: the last comparison (check that we fit into triple 65 * indirect block) is spelled differently, because otherwise on an 66 * architecture with 32-bit longs and 8Kb pages we might get into trouble 67 * if our filesystem had 8Kb blocks. We might use long long, but that would 68 * kill us on x86. Oh, well, at least the sign propagation does not matter - 69 * i_block would have to be negative in the very beginning, so we would not 70 * get there at all. 71 */ 72 73 static int ext4_block_to_path(struct inode *inode, 74 ext4_lblk_t i_block, 75 ext4_lblk_t offsets[4], int *boundary) 76 { 77 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); 78 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); 79 const long direct_blocks = EXT4_NDIR_BLOCKS, 80 indirect_blocks = ptrs, 81 double_blocks = (1 << (ptrs_bits * 2)); 82 int n = 0; 83 int final = 0; 84 85 if (i_block < direct_blocks) { 86 offsets[n++] = i_block; 87 final = direct_blocks; 88 } else if ((i_block -= direct_blocks) < indirect_blocks) { 89 offsets[n++] = EXT4_IND_BLOCK; 90 offsets[n++] = i_block; 91 final = ptrs; 92 } else if ((i_block -= indirect_blocks) < double_blocks) { 93 offsets[n++] = EXT4_DIND_BLOCK; 94 offsets[n++] = i_block >> ptrs_bits; 95 offsets[n++] = i_block & (ptrs - 1); 96 final = ptrs; 97 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 98 offsets[n++] = EXT4_TIND_BLOCK; 99 offsets[n++] = i_block >> (ptrs_bits * 2); 100 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 101 offsets[n++] = i_block & (ptrs - 1); 102 final = ptrs; 103 } else { 104 ext4_warning(inode->i_sb, "block %lu > max in inode %lu", 105 i_block + direct_blocks + 106 indirect_blocks + double_blocks, inode->i_ino); 107 } 108 if (boundary) 109 *boundary = final - 1 - (i_block & (ptrs - 1)); 110 return n; 111 } 112 113 /** 114 * ext4_get_branch - read the chain of indirect blocks leading to data 115 * @inode: inode in question 116 * @depth: depth of the chain (1 - direct pointer, etc.) 117 * @offsets: offsets of pointers in inode/indirect blocks 118 * @chain: place to store the result 119 * @err: here we store the error value 120 * 121 * Function fills the array of triples <key, p, bh> and returns %NULL 122 * if everything went OK or the pointer to the last filled triple 123 * (incomplete one) otherwise. Upon the return chain[i].key contains 124 * the number of (i+1)-th block in the chain (as it is stored in memory, 125 * i.e. little-endian 32-bit), chain[i].p contains the address of that 126 * number (it points into struct inode for i==0 and into the bh->b_data 127 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect 128 * block for i>0 and NULL for i==0. In other words, it holds the block 129 * numbers of the chain, addresses they were taken from (and where we can 130 * verify that chain did not change) and buffer_heads hosting these 131 * numbers. 132 * 133 * Function stops when it stumbles upon zero pointer (absent block) 134 * (pointer to last triple returned, *@err == 0) 135 * or when it gets an IO error reading an indirect block 136 * (ditto, *@err == -EIO) 137 * or when it reads all @depth-1 indirect blocks successfully and finds 138 * the whole chain, all way to the data (returns %NULL, *err == 0). 139 * 140 * Need to be called with 141 * down_read(&EXT4_I(inode)->i_data_sem) 142 */ 143 static Indirect *ext4_get_branch(struct inode *inode, int depth, 144 ext4_lblk_t *offsets, 145 Indirect chain[4], int *err) 146 { 147 struct super_block *sb = inode->i_sb; 148 Indirect *p = chain; 149 struct buffer_head *bh; 150 int ret = -EIO; 151 152 *err = 0; 153 /* i_data is not going away, no lock needed */ 154 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets); 155 if (!p->key) 156 goto no_block; 157 while (--depth) { 158 bh = sb_getblk(sb, le32_to_cpu(p->key)); 159 if (unlikely(!bh)) { 160 ret = -ENOMEM; 161 goto failure; 162 } 163 164 if (!bh_uptodate_or_lock(bh)) { 165 if (bh_submit_read(bh) < 0) { 166 put_bh(bh); 167 goto failure; 168 } 169 /* validate block references */ 170 if (ext4_check_indirect_blockref(inode, bh)) { 171 put_bh(bh); 172 goto failure; 173 } 174 } 175 176 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets); 177 /* Reader: end */ 178 if (!p->key) 179 goto no_block; 180 } 181 return NULL; 182 183 failure: 184 *err = ret; 185 no_block: 186 return p; 187 } 188 189 /** 190 * ext4_find_near - find a place for allocation with sufficient locality 191 * @inode: owner 192 * @ind: descriptor of indirect block. 193 * 194 * This function returns the preferred place for block allocation. 195 * It is used when heuristic for sequential allocation fails. 196 * Rules are: 197 * + if there is a block to the left of our position - allocate near it. 198 * + if pointer will live in indirect block - allocate near that block. 199 * + if pointer will live in inode - allocate in the same 200 * cylinder group. 201 * 202 * In the latter case we colour the starting block by the callers PID to 203 * prevent it from clashing with concurrent allocations for a different inode 204 * in the same block group. The PID is used here so that functionally related 205 * files will be close-by on-disk. 206 * 207 * Caller must make sure that @ind is valid and will stay that way. 208 */ 209 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) 210 { 211 struct ext4_inode_info *ei = EXT4_I(inode); 212 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; 213 __le32 *p; 214 215 /* Try to find previous block */ 216 for (p = ind->p - 1; p >= start; p--) { 217 if (*p) 218 return le32_to_cpu(*p); 219 } 220 221 /* No such thing, so let's try location of indirect block */ 222 if (ind->bh) 223 return ind->bh->b_blocknr; 224 225 /* 226 * It is going to be referred to from the inode itself? OK, just put it 227 * into the same cylinder group then. 228 */ 229 return ext4_inode_to_goal_block(inode); 230 } 231 232 /** 233 * ext4_find_goal - find a preferred place for allocation. 234 * @inode: owner 235 * @block: block we want 236 * @partial: pointer to the last triple within a chain 237 * 238 * Normally this function find the preferred place for block allocation, 239 * returns it. 240 * Because this is only used for non-extent files, we limit the block nr 241 * to 32 bits. 242 */ 243 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, 244 Indirect *partial) 245 { 246 ext4_fsblk_t goal; 247 248 /* 249 * XXX need to get goal block from mballoc's data structures 250 */ 251 252 goal = ext4_find_near(inode, partial); 253 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; 254 return goal; 255 } 256 257 /** 258 * ext4_blks_to_allocate - Look up the block map and count the number 259 * of direct blocks need to be allocated for the given branch. 260 * 261 * @branch: chain of indirect blocks 262 * @k: number of blocks need for indirect blocks 263 * @blks: number of data blocks to be mapped. 264 * @blocks_to_boundary: the offset in the indirect block 265 * 266 * return the total number of blocks to be allocate, including the 267 * direct and indirect blocks. 268 */ 269 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, 270 int blocks_to_boundary) 271 { 272 unsigned int count = 0; 273 274 /* 275 * Simple case, [t,d]Indirect block(s) has not allocated yet 276 * then it's clear blocks on that path have not allocated 277 */ 278 if (k > 0) { 279 /* right now we don't handle cross boundary allocation */ 280 if (blks < blocks_to_boundary + 1) 281 count += blks; 282 else 283 count += blocks_to_boundary + 1; 284 return count; 285 } 286 287 count++; 288 while (count < blks && count <= blocks_to_boundary && 289 le32_to_cpu(*(branch[0].p + count)) == 0) { 290 count++; 291 } 292 return count; 293 } 294 295 /** 296 * ext4_alloc_branch - allocate and set up a chain of blocks. 297 * @handle: handle for this transaction 298 * @inode: owner 299 * @indirect_blks: number of allocated indirect blocks 300 * @blks: number of allocated direct blocks 301 * @goal: preferred place for allocation 302 * @offsets: offsets (in the blocks) to store the pointers to next. 303 * @branch: place to store the chain in. 304 * 305 * This function allocates blocks, zeroes out all but the last one, 306 * links them into chain and (if we are synchronous) writes them to disk. 307 * In other words, it prepares a branch that can be spliced onto the 308 * inode. It stores the information about that chain in the branch[], in 309 * the same format as ext4_get_branch() would do. We are calling it after 310 * we had read the existing part of chain and partial points to the last 311 * triple of that (one with zero ->key). Upon the exit we have the same 312 * picture as after the successful ext4_get_block(), except that in one 313 * place chain is disconnected - *branch->p is still zero (we did not 314 * set the last link), but branch->key contains the number that should 315 * be placed into *branch->p to fill that gap. 316 * 317 * If allocation fails we free all blocks we've allocated (and forget 318 * their buffer_heads) and return the error value the from failed 319 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain 320 * as described above and return 0. 321 */ 322 static int ext4_alloc_branch(handle_t *handle, struct inode *inode, 323 ext4_lblk_t iblock, int indirect_blks, 324 int *blks, ext4_fsblk_t goal, 325 ext4_lblk_t *offsets, Indirect *branch) 326 { 327 struct ext4_allocation_request ar; 328 struct buffer_head * bh; 329 ext4_fsblk_t b, new_blocks[4]; 330 __le32 *p; 331 int i, j, err, len = 1; 332 333 /* 334 * Set up for the direct block allocation 335 */ 336 memset(&ar, 0, sizeof(ar)); 337 ar.inode = inode; 338 ar.len = *blks; 339 ar.logical = iblock; 340 if (S_ISREG(inode->i_mode)) 341 ar.flags = EXT4_MB_HINT_DATA; 342 343 for (i = 0; i <= indirect_blks; i++) { 344 if (i == indirect_blks) { 345 ar.goal = goal; 346 new_blocks[i] = ext4_mb_new_blocks(handle, &ar, &err); 347 } else 348 goal = new_blocks[i] = ext4_new_meta_blocks(handle, inode, 349 goal, 0, NULL, &err); 350 if (err) { 351 i--; 352 goto failed; 353 } 354 branch[i].key = cpu_to_le32(new_blocks[i]); 355 if (i == 0) 356 continue; 357 358 bh = branch[i].bh = sb_getblk(inode->i_sb, new_blocks[i-1]); 359 if (unlikely(!bh)) { 360 err = -ENOMEM; 361 goto failed; 362 } 363 lock_buffer(bh); 364 BUFFER_TRACE(bh, "call get_create_access"); 365 err = ext4_journal_get_create_access(handle, bh); 366 if (err) { 367 unlock_buffer(bh); 368 goto failed; 369 } 370 371 memset(bh->b_data, 0, bh->b_size); 372 p = branch[i].p = (__le32 *) bh->b_data + offsets[i]; 373 b = new_blocks[i]; 374 375 if (i == indirect_blks) 376 len = ar.len; 377 for (j = 0; j < len; j++) 378 *p++ = cpu_to_le32(b++); 379 380 BUFFER_TRACE(bh, "marking uptodate"); 381 set_buffer_uptodate(bh); 382 unlock_buffer(bh); 383 384 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 385 err = ext4_handle_dirty_metadata(handle, inode, bh); 386 if (err) 387 goto failed; 388 } 389 *blks = ar.len; 390 return 0; 391 failed: 392 for (; i >= 0; i--) { 393 if (i != indirect_blks && branch[i].bh) 394 ext4_forget(handle, 1, inode, branch[i].bh, 395 branch[i].bh->b_blocknr); 396 ext4_free_blocks(handle, inode, NULL, new_blocks[i], 397 (i == indirect_blks) ? ar.len : 1, 0); 398 } 399 return err; 400 } 401 402 /** 403 * ext4_splice_branch - splice the allocated branch onto inode. 404 * @handle: handle for this transaction 405 * @inode: owner 406 * @block: (logical) number of block we are adding 407 * @chain: chain of indirect blocks (with a missing link - see 408 * ext4_alloc_branch) 409 * @where: location of missing link 410 * @num: number of indirect blocks we are adding 411 * @blks: number of direct blocks we are adding 412 * 413 * This function fills the missing link and does all housekeeping needed in 414 * inode (->i_blocks, etc.). In case of success we end up with the full 415 * chain to new block and return 0. 416 */ 417 static int ext4_splice_branch(handle_t *handle, struct inode *inode, 418 ext4_lblk_t block, Indirect *where, int num, 419 int blks) 420 { 421 int i; 422 int err = 0; 423 ext4_fsblk_t current_block; 424 425 /* 426 * If we're splicing into a [td]indirect block (as opposed to the 427 * inode) then we need to get write access to the [td]indirect block 428 * before the splice. 429 */ 430 if (where->bh) { 431 BUFFER_TRACE(where->bh, "get_write_access"); 432 err = ext4_journal_get_write_access(handle, where->bh); 433 if (err) 434 goto err_out; 435 } 436 /* That's it */ 437 438 *where->p = where->key; 439 440 /* 441 * Update the host buffer_head or inode to point to more just allocated 442 * direct blocks blocks 443 */ 444 if (num == 0 && blks > 1) { 445 current_block = le32_to_cpu(where->key) + 1; 446 for (i = 1; i < blks; i++) 447 *(where->p + i) = cpu_to_le32(current_block++); 448 } 449 450 /* We are done with atomic stuff, now do the rest of housekeeping */ 451 /* had we spliced it onto indirect block? */ 452 if (where->bh) { 453 /* 454 * If we spliced it onto an indirect block, we haven't 455 * altered the inode. Note however that if it is being spliced 456 * onto an indirect block at the very end of the file (the 457 * file is growing) then we *will* alter the inode to reflect 458 * the new i_size. But that is not done here - it is done in 459 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. 460 */ 461 jbd_debug(5, "splicing indirect only\n"); 462 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); 463 err = ext4_handle_dirty_metadata(handle, inode, where->bh); 464 if (err) 465 goto err_out; 466 } else { 467 /* 468 * OK, we spliced it into the inode itself on a direct block. 469 */ 470 ext4_mark_inode_dirty(handle, inode); 471 jbd_debug(5, "splicing direct\n"); 472 } 473 return err; 474 475 err_out: 476 for (i = 1; i <= num; i++) { 477 /* 478 * branch[i].bh is newly allocated, so there is no 479 * need to revoke the block, which is why we don't 480 * need to set EXT4_FREE_BLOCKS_METADATA. 481 */ 482 ext4_free_blocks(handle, inode, where[i].bh, 0, 1, 483 EXT4_FREE_BLOCKS_FORGET); 484 } 485 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key), 486 blks, 0); 487 488 return err; 489 } 490 491 /* 492 * The ext4_ind_map_blocks() function handles non-extents inodes 493 * (i.e., using the traditional indirect/double-indirect i_blocks 494 * scheme) for ext4_map_blocks(). 495 * 496 * Allocation strategy is simple: if we have to allocate something, we will 497 * have to go the whole way to leaf. So let's do it before attaching anything 498 * to tree, set linkage between the newborn blocks, write them if sync is 499 * required, recheck the path, free and repeat if check fails, otherwise 500 * set the last missing link (that will protect us from any truncate-generated 501 * removals - all blocks on the path are immune now) and possibly force the 502 * write on the parent block. 503 * That has a nice additional property: no special recovery from the failed 504 * allocations is needed - we simply release blocks and do not touch anything 505 * reachable from inode. 506 * 507 * `handle' can be NULL if create == 0. 508 * 509 * return > 0, # of blocks mapped or allocated. 510 * return = 0, if plain lookup failed. 511 * return < 0, error case. 512 * 513 * The ext4_ind_get_blocks() function should be called with 514 * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem 515 * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or 516 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system 517 * blocks. 518 */ 519 int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, 520 struct ext4_map_blocks *map, 521 int flags) 522 { 523 int err = -EIO; 524 ext4_lblk_t offsets[4]; 525 Indirect chain[4]; 526 Indirect *partial; 527 ext4_fsblk_t goal; 528 int indirect_blks; 529 int blocks_to_boundary = 0; 530 int depth; 531 int count = 0; 532 ext4_fsblk_t first_block = 0; 533 534 trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 535 J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))); 536 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); 537 depth = ext4_block_to_path(inode, map->m_lblk, offsets, 538 &blocks_to_boundary); 539 540 if (depth == 0) 541 goto out; 542 543 partial = ext4_get_branch(inode, depth, offsets, chain, &err); 544 545 /* Simplest case - block found, no allocation needed */ 546 if (!partial) { 547 first_block = le32_to_cpu(chain[depth - 1].key); 548 count++; 549 /*map more blocks*/ 550 while (count < map->m_len && count <= blocks_to_boundary) { 551 ext4_fsblk_t blk; 552 553 blk = le32_to_cpu(*(chain[depth-1].p + count)); 554 555 if (blk == first_block + count) 556 count++; 557 else 558 break; 559 } 560 goto got_it; 561 } 562 563 /* Next simple case - plain lookup or failed read of indirect block */ 564 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO) 565 goto cleanup; 566 567 /* 568 * Okay, we need to do block allocation. 569 */ 570 if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 571 EXT4_FEATURE_RO_COMPAT_BIGALLOC)) { 572 EXT4_ERROR_INODE(inode, "Can't allocate blocks for " 573 "non-extent mapped inodes with bigalloc"); 574 return -ENOSPC; 575 } 576 577 goal = ext4_find_goal(inode, map->m_lblk, partial); 578 579 /* the number of blocks need to allocate for [d,t]indirect blocks */ 580 indirect_blks = (chain + depth) - partial - 1; 581 582 /* 583 * Next look up the indirect map to count the totoal number of 584 * direct blocks to allocate for this branch. 585 */ 586 count = ext4_blks_to_allocate(partial, indirect_blks, 587 map->m_len, blocks_to_boundary); 588 /* 589 * Block out ext4_truncate while we alter the tree 590 */ 591 err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks, 592 &count, goal, 593 offsets + (partial - chain), partial); 594 595 /* 596 * The ext4_splice_branch call will free and forget any buffers 597 * on the new chain if there is a failure, but that risks using 598 * up transaction credits, especially for bitmaps where the 599 * credits cannot be returned. Can we handle this somehow? We 600 * may need to return -EAGAIN upwards in the worst case. --sct 601 */ 602 if (!err) 603 err = ext4_splice_branch(handle, inode, map->m_lblk, 604 partial, indirect_blks, count); 605 if (err) 606 goto cleanup; 607 608 map->m_flags |= EXT4_MAP_NEW; 609 610 ext4_update_inode_fsync_trans(handle, inode, 1); 611 got_it: 612 map->m_flags |= EXT4_MAP_MAPPED; 613 map->m_pblk = le32_to_cpu(chain[depth-1].key); 614 map->m_len = count; 615 if (count > blocks_to_boundary) 616 map->m_flags |= EXT4_MAP_BOUNDARY; 617 err = count; 618 /* Clean up and exit */ 619 partial = chain + depth - 1; /* the whole chain */ 620 cleanup: 621 while (partial > chain) { 622 BUFFER_TRACE(partial->bh, "call brelse"); 623 brelse(partial->bh); 624 partial--; 625 } 626 out: 627 trace_ext4_ind_map_blocks_exit(inode, map, err); 628 return err; 629 } 630 631 /* 632 * O_DIRECT for ext3 (or indirect map) based files 633 * 634 * If the O_DIRECT write will extend the file then add this inode to the 635 * orphan list. So recovery will truncate it back to the original size 636 * if the machine crashes during the write. 637 * 638 * If the O_DIRECT write is intantiating holes inside i_size and the machine 639 * crashes then stale disk data _may_ be exposed inside the file. But current 640 * VFS code falls back into buffered path in that case so we are safe. 641 */ 642 ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, 643 const struct iovec *iov, loff_t offset, 644 unsigned long nr_segs) 645 { 646 struct file *file = iocb->ki_filp; 647 struct inode *inode = file->f_mapping->host; 648 struct ext4_inode_info *ei = EXT4_I(inode); 649 handle_t *handle; 650 ssize_t ret; 651 int orphan = 0; 652 size_t count = iov_length(iov, nr_segs); 653 int retries = 0; 654 655 if (rw == WRITE) { 656 loff_t final_size = offset + count; 657 658 if (final_size > inode->i_size) { 659 /* Credits for sb + inode write */ 660 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 661 if (IS_ERR(handle)) { 662 ret = PTR_ERR(handle); 663 goto out; 664 } 665 ret = ext4_orphan_add(handle, inode); 666 if (ret) { 667 ext4_journal_stop(handle); 668 goto out; 669 } 670 orphan = 1; 671 ei->i_disksize = inode->i_size; 672 ext4_journal_stop(handle); 673 } 674 } 675 676 retry: 677 if (rw == READ && ext4_should_dioread_nolock(inode)) { 678 if (unlikely(atomic_read(&EXT4_I(inode)->i_unwritten))) { 679 mutex_lock(&inode->i_mutex); 680 ext4_flush_unwritten_io(inode); 681 mutex_unlock(&inode->i_mutex); 682 } 683 /* 684 * Nolock dioread optimization may be dynamically disabled 685 * via ext4_inode_block_unlocked_dio(). Check inode's state 686 * while holding extra i_dio_count ref. 687 */ 688 atomic_inc(&inode->i_dio_count); 689 smp_mb(); 690 if (unlikely(ext4_test_inode_state(inode, 691 EXT4_STATE_DIOREAD_LOCK))) { 692 inode_dio_done(inode); 693 goto locked; 694 } 695 ret = __blockdev_direct_IO(rw, iocb, inode, 696 inode->i_sb->s_bdev, iov, 697 offset, nr_segs, 698 ext4_get_block, NULL, NULL, 0); 699 inode_dio_done(inode); 700 } else { 701 locked: 702 ret = blockdev_direct_IO(rw, iocb, inode, iov, 703 offset, nr_segs, ext4_get_block); 704 705 if (unlikely((rw & WRITE) && ret < 0)) { 706 loff_t isize = i_size_read(inode); 707 loff_t end = offset + iov_length(iov, nr_segs); 708 709 if (end > isize) 710 ext4_truncate_failed_write(inode); 711 } 712 } 713 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 714 goto retry; 715 716 if (orphan) { 717 int err; 718 719 /* Credits for sb + inode write */ 720 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 721 if (IS_ERR(handle)) { 722 /* This is really bad luck. We've written the data 723 * but cannot extend i_size. Bail out and pretend 724 * the write failed... */ 725 ret = PTR_ERR(handle); 726 if (inode->i_nlink) 727 ext4_orphan_del(NULL, inode); 728 729 goto out; 730 } 731 if (inode->i_nlink) 732 ext4_orphan_del(handle, inode); 733 if (ret > 0) { 734 loff_t end = offset + ret; 735 if (end > inode->i_size) { 736 ei->i_disksize = end; 737 i_size_write(inode, end); 738 /* 739 * We're going to return a positive `ret' 740 * here due to non-zero-length I/O, so there's 741 * no way of reporting error returns from 742 * ext4_mark_inode_dirty() to userspace. So 743 * ignore it. 744 */ 745 ext4_mark_inode_dirty(handle, inode); 746 } 747 } 748 err = ext4_journal_stop(handle); 749 if (ret == 0) 750 ret = err; 751 } 752 out: 753 return ret; 754 } 755 756 /* 757 * Calculate the number of metadata blocks need to reserve 758 * to allocate a new block at @lblocks for non extent file based file 759 */ 760 int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock) 761 { 762 struct ext4_inode_info *ei = EXT4_I(inode); 763 sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1); 764 int blk_bits; 765 766 if (lblock < EXT4_NDIR_BLOCKS) 767 return 0; 768 769 lblock -= EXT4_NDIR_BLOCKS; 770 771 if (ei->i_da_metadata_calc_len && 772 (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) { 773 ei->i_da_metadata_calc_len++; 774 return 0; 775 } 776 ei->i_da_metadata_calc_last_lblock = lblock & dind_mask; 777 ei->i_da_metadata_calc_len = 1; 778 blk_bits = order_base_2(lblock); 779 return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1; 780 } 781 782 int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk) 783 { 784 int indirects; 785 786 /* if nrblocks are contiguous */ 787 if (chunk) { 788 /* 789 * With N contiguous data blocks, we need at most 790 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks, 791 * 2 dindirect blocks, and 1 tindirect block 792 */ 793 return DIV_ROUND_UP(nrblocks, 794 EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4; 795 } 796 /* 797 * if nrblocks are not contiguous, worse case, each block touch 798 * a indirect block, and each indirect block touch a double indirect 799 * block, plus a triple indirect block 800 */ 801 indirects = nrblocks * 2 + 1; 802 return indirects; 803 } 804 805 /* 806 * Truncate transactions can be complex and absolutely huge. So we need to 807 * be able to restart the transaction at a conventient checkpoint to make 808 * sure we don't overflow the journal. 809 * 810 * Try to extend this transaction for the purposes of truncation. If 811 * extend fails, we need to propagate the failure up and restart the 812 * transaction in the top-level truncate loop. --sct 813 * 814 * Returns 0 if we managed to create more room. If we can't create more 815 * room, and the transaction must be restarted we return 1. 816 */ 817 static int try_to_extend_transaction(handle_t *handle, struct inode *inode) 818 { 819 if (!ext4_handle_valid(handle)) 820 return 0; 821 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) 822 return 0; 823 if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode))) 824 return 0; 825 return 1; 826 } 827 828 /* 829 * Probably it should be a library function... search for first non-zero word 830 * or memcmp with zero_page, whatever is better for particular architecture. 831 * Linus? 832 */ 833 static inline int all_zeroes(__le32 *p, __le32 *q) 834 { 835 while (p < q) 836 if (*p++) 837 return 0; 838 return 1; 839 } 840 841 /** 842 * ext4_find_shared - find the indirect blocks for partial truncation. 843 * @inode: inode in question 844 * @depth: depth of the affected branch 845 * @offsets: offsets of pointers in that branch (see ext4_block_to_path) 846 * @chain: place to store the pointers to partial indirect blocks 847 * @top: place to the (detached) top of branch 848 * 849 * This is a helper function used by ext4_truncate(). 850 * 851 * When we do truncate() we may have to clean the ends of several 852 * indirect blocks but leave the blocks themselves alive. Block is 853 * partially truncated if some data below the new i_size is referred 854 * from it (and it is on the path to the first completely truncated 855 * data block, indeed). We have to free the top of that path along 856 * with everything to the right of the path. Since no allocation 857 * past the truncation point is possible until ext4_truncate() 858 * finishes, we may safely do the latter, but top of branch may 859 * require special attention - pageout below the truncation point 860 * might try to populate it. 861 * 862 * We atomically detach the top of branch from the tree, store the 863 * block number of its root in *@top, pointers to buffer_heads of 864 * partially truncated blocks - in @chain[].bh and pointers to 865 * their last elements that should not be removed - in 866 * @chain[].p. Return value is the pointer to last filled element 867 * of @chain. 868 * 869 * The work left to caller to do the actual freeing of subtrees: 870 * a) free the subtree starting from *@top 871 * b) free the subtrees whose roots are stored in 872 * (@chain[i].p+1 .. end of @chain[i].bh->b_data) 873 * c) free the subtrees growing from the inode past the @chain[0]. 874 * (no partially truncated stuff there). */ 875 876 static Indirect *ext4_find_shared(struct inode *inode, int depth, 877 ext4_lblk_t offsets[4], Indirect chain[4], 878 __le32 *top) 879 { 880 Indirect *partial, *p; 881 int k, err; 882 883 *top = 0; 884 /* Make k index the deepest non-null offset + 1 */ 885 for (k = depth; k > 1 && !offsets[k-1]; k--) 886 ; 887 partial = ext4_get_branch(inode, k, offsets, chain, &err); 888 /* Writer: pointers */ 889 if (!partial) 890 partial = chain + k-1; 891 /* 892 * If the branch acquired continuation since we've looked at it - 893 * fine, it should all survive and (new) top doesn't belong to us. 894 */ 895 if (!partial->key && *partial->p) 896 /* Writer: end */ 897 goto no_top; 898 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--) 899 ; 900 /* 901 * OK, we've found the last block that must survive. The rest of our 902 * branch should be detached before unlocking. However, if that rest 903 * of branch is all ours and does not grow immediately from the inode 904 * it's easier to cheat and just decrement partial->p. 905 */ 906 if (p == chain + k - 1 && p > chain) { 907 p->p--; 908 } else { 909 *top = *p->p; 910 /* Nope, don't do this in ext4. Must leave the tree intact */ 911 #if 0 912 *p->p = 0; 913 #endif 914 } 915 /* Writer: end */ 916 917 while (partial > p) { 918 brelse(partial->bh); 919 partial--; 920 } 921 no_top: 922 return partial; 923 } 924 925 /* 926 * Zero a number of block pointers in either an inode or an indirect block. 927 * If we restart the transaction we must again get write access to the 928 * indirect block for further modification. 929 * 930 * We release `count' blocks on disk, but (last - first) may be greater 931 * than `count' because there can be holes in there. 932 * 933 * Return 0 on success, 1 on invalid block range 934 * and < 0 on fatal error. 935 */ 936 static int ext4_clear_blocks(handle_t *handle, struct inode *inode, 937 struct buffer_head *bh, 938 ext4_fsblk_t block_to_free, 939 unsigned long count, __le32 *first, 940 __le32 *last) 941 { 942 __le32 *p; 943 int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED; 944 int err; 945 946 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 947 flags |= EXT4_FREE_BLOCKS_METADATA; 948 949 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free, 950 count)) { 951 EXT4_ERROR_INODE(inode, "attempt to clear invalid " 952 "blocks %llu len %lu", 953 (unsigned long long) block_to_free, count); 954 return 1; 955 } 956 957 if (try_to_extend_transaction(handle, inode)) { 958 if (bh) { 959 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 960 err = ext4_handle_dirty_metadata(handle, inode, bh); 961 if (unlikely(err)) 962 goto out_err; 963 } 964 err = ext4_mark_inode_dirty(handle, inode); 965 if (unlikely(err)) 966 goto out_err; 967 err = ext4_truncate_restart_trans(handle, inode, 968 ext4_blocks_for_truncate(inode)); 969 if (unlikely(err)) 970 goto out_err; 971 if (bh) { 972 BUFFER_TRACE(bh, "retaking write access"); 973 err = ext4_journal_get_write_access(handle, bh); 974 if (unlikely(err)) 975 goto out_err; 976 } 977 } 978 979 for (p = first; p < last; p++) 980 *p = 0; 981 982 ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags); 983 return 0; 984 out_err: 985 ext4_std_error(inode->i_sb, err); 986 return err; 987 } 988 989 /** 990 * ext4_free_data - free a list of data blocks 991 * @handle: handle for this transaction 992 * @inode: inode we are dealing with 993 * @this_bh: indirect buffer_head which contains *@first and *@last 994 * @first: array of block numbers 995 * @last: points immediately past the end of array 996 * 997 * We are freeing all blocks referred from that array (numbers are stored as 998 * little-endian 32-bit) and updating @inode->i_blocks appropriately. 999 * 1000 * We accumulate contiguous runs of blocks to free. Conveniently, if these 1001 * blocks are contiguous then releasing them at one time will only affect one 1002 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't 1003 * actually use a lot of journal space. 1004 * 1005 * @this_bh will be %NULL if @first and @last point into the inode's direct 1006 * block pointers. 1007 */ 1008 static void ext4_free_data(handle_t *handle, struct inode *inode, 1009 struct buffer_head *this_bh, 1010 __le32 *first, __le32 *last) 1011 { 1012 ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */ 1013 unsigned long count = 0; /* Number of blocks in the run */ 1014 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind 1015 corresponding to 1016 block_to_free */ 1017 ext4_fsblk_t nr; /* Current block # */ 1018 __le32 *p; /* Pointer into inode/ind 1019 for current block */ 1020 int err = 0; 1021 1022 if (this_bh) { /* For indirect block */ 1023 BUFFER_TRACE(this_bh, "get_write_access"); 1024 err = ext4_journal_get_write_access(handle, this_bh); 1025 /* Important: if we can't update the indirect pointers 1026 * to the blocks, we can't free them. */ 1027 if (err) 1028 return; 1029 } 1030 1031 for (p = first; p < last; p++) { 1032 nr = le32_to_cpu(*p); 1033 if (nr) { 1034 /* accumulate blocks to free if they're contiguous */ 1035 if (count == 0) { 1036 block_to_free = nr; 1037 block_to_free_p = p; 1038 count = 1; 1039 } else if (nr == block_to_free + count) { 1040 count++; 1041 } else { 1042 err = ext4_clear_blocks(handle, inode, this_bh, 1043 block_to_free, count, 1044 block_to_free_p, p); 1045 if (err) 1046 break; 1047 block_to_free = nr; 1048 block_to_free_p = p; 1049 count = 1; 1050 } 1051 } 1052 } 1053 1054 if (!err && count > 0) 1055 err = ext4_clear_blocks(handle, inode, this_bh, block_to_free, 1056 count, block_to_free_p, p); 1057 if (err < 0) 1058 /* fatal error */ 1059 return; 1060 1061 if (this_bh) { 1062 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata"); 1063 1064 /* 1065 * The buffer head should have an attached journal head at this 1066 * point. However, if the data is corrupted and an indirect 1067 * block pointed to itself, it would have been detached when 1068 * the block was cleared. Check for this instead of OOPSing. 1069 */ 1070 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) 1071 ext4_handle_dirty_metadata(handle, inode, this_bh); 1072 else 1073 EXT4_ERROR_INODE(inode, 1074 "circular indirect block detected at " 1075 "block %llu", 1076 (unsigned long long) this_bh->b_blocknr); 1077 } 1078 } 1079 1080 /** 1081 * ext4_free_branches - free an array of branches 1082 * @handle: JBD handle for this transaction 1083 * @inode: inode we are dealing with 1084 * @parent_bh: the buffer_head which contains *@first and *@last 1085 * @first: array of block numbers 1086 * @last: pointer immediately past the end of array 1087 * @depth: depth of the branches to free 1088 * 1089 * We are freeing all blocks referred from these branches (numbers are 1090 * stored as little-endian 32-bit) and updating @inode->i_blocks 1091 * appropriately. 1092 */ 1093 static void ext4_free_branches(handle_t *handle, struct inode *inode, 1094 struct buffer_head *parent_bh, 1095 __le32 *first, __le32 *last, int depth) 1096 { 1097 ext4_fsblk_t nr; 1098 __le32 *p; 1099 1100 if (ext4_handle_is_aborted(handle)) 1101 return; 1102 1103 if (depth--) { 1104 struct buffer_head *bh; 1105 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 1106 p = last; 1107 while (--p >= first) { 1108 nr = le32_to_cpu(*p); 1109 if (!nr) 1110 continue; /* A hole */ 1111 1112 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), 1113 nr, 1)) { 1114 EXT4_ERROR_INODE(inode, 1115 "invalid indirect mapped " 1116 "block %lu (level %d)", 1117 (unsigned long) nr, depth); 1118 break; 1119 } 1120 1121 /* Go read the buffer for the next level down */ 1122 bh = sb_bread(inode->i_sb, nr); 1123 1124 /* 1125 * A read failure? Report error and clear slot 1126 * (should be rare). 1127 */ 1128 if (!bh) { 1129 EXT4_ERROR_INODE_BLOCK(inode, nr, 1130 "Read failure"); 1131 continue; 1132 } 1133 1134 /* This zaps the entire block. Bottom up. */ 1135 BUFFER_TRACE(bh, "free child branches"); 1136 ext4_free_branches(handle, inode, bh, 1137 (__le32 *) bh->b_data, 1138 (__le32 *) bh->b_data + addr_per_block, 1139 depth); 1140 brelse(bh); 1141 1142 /* 1143 * Everything below this this pointer has been 1144 * released. Now let this top-of-subtree go. 1145 * 1146 * We want the freeing of this indirect block to be 1147 * atomic in the journal with the updating of the 1148 * bitmap block which owns it. So make some room in 1149 * the journal. 1150 * 1151 * We zero the parent pointer *after* freeing its 1152 * pointee in the bitmaps, so if extend_transaction() 1153 * for some reason fails to put the bitmap changes and 1154 * the release into the same transaction, recovery 1155 * will merely complain about releasing a free block, 1156 * rather than leaking blocks. 1157 */ 1158 if (ext4_handle_is_aborted(handle)) 1159 return; 1160 if (try_to_extend_transaction(handle, inode)) { 1161 ext4_mark_inode_dirty(handle, inode); 1162 ext4_truncate_restart_trans(handle, inode, 1163 ext4_blocks_for_truncate(inode)); 1164 } 1165 1166 /* 1167 * The forget flag here is critical because if 1168 * we are journaling (and not doing data 1169 * journaling), we have to make sure a revoke 1170 * record is written to prevent the journal 1171 * replay from overwriting the (former) 1172 * indirect block if it gets reallocated as a 1173 * data block. This must happen in the same 1174 * transaction where the data blocks are 1175 * actually freed. 1176 */ 1177 ext4_free_blocks(handle, inode, NULL, nr, 1, 1178 EXT4_FREE_BLOCKS_METADATA| 1179 EXT4_FREE_BLOCKS_FORGET); 1180 1181 if (parent_bh) { 1182 /* 1183 * The block which we have just freed is 1184 * pointed to by an indirect block: journal it 1185 */ 1186 BUFFER_TRACE(parent_bh, "get_write_access"); 1187 if (!ext4_journal_get_write_access(handle, 1188 parent_bh)){ 1189 *p = 0; 1190 BUFFER_TRACE(parent_bh, 1191 "call ext4_handle_dirty_metadata"); 1192 ext4_handle_dirty_metadata(handle, 1193 inode, 1194 parent_bh); 1195 } 1196 } 1197 } 1198 } else { 1199 /* We have reached the bottom of the tree. */ 1200 BUFFER_TRACE(parent_bh, "free data blocks"); 1201 ext4_free_data(handle, inode, parent_bh, first, last); 1202 } 1203 } 1204 1205 void ext4_ind_truncate(handle_t *handle, struct inode *inode) 1206 { 1207 struct ext4_inode_info *ei = EXT4_I(inode); 1208 __le32 *i_data = ei->i_data; 1209 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 1210 ext4_lblk_t offsets[4]; 1211 Indirect chain[4]; 1212 Indirect *partial; 1213 __le32 nr = 0; 1214 int n = 0; 1215 ext4_lblk_t last_block, max_block; 1216 unsigned blocksize = inode->i_sb->s_blocksize; 1217 1218 last_block = (inode->i_size + blocksize-1) 1219 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 1220 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1) 1221 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 1222 1223 if (last_block != max_block) { 1224 n = ext4_block_to_path(inode, last_block, offsets, NULL); 1225 if (n == 0) 1226 return; 1227 } 1228 1229 ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block); 1230 1231 /* 1232 * The orphan list entry will now protect us from any crash which 1233 * occurs before the truncate completes, so it is now safe to propagate 1234 * the new, shorter inode size (held for now in i_size) into the 1235 * on-disk inode. We do this via i_disksize, which is the value which 1236 * ext4 *really* writes onto the disk inode. 1237 */ 1238 ei->i_disksize = inode->i_size; 1239 1240 if (last_block == max_block) { 1241 /* 1242 * It is unnecessary to free any data blocks if last_block is 1243 * equal to the indirect block limit. 1244 */ 1245 return; 1246 } else if (n == 1) { /* direct blocks */ 1247 ext4_free_data(handle, inode, NULL, i_data+offsets[0], 1248 i_data + EXT4_NDIR_BLOCKS); 1249 goto do_indirects; 1250 } 1251 1252 partial = ext4_find_shared(inode, n, offsets, chain, &nr); 1253 /* Kill the top of shared branch (not detached) */ 1254 if (nr) { 1255 if (partial == chain) { 1256 /* Shared branch grows from the inode */ 1257 ext4_free_branches(handle, inode, NULL, 1258 &nr, &nr+1, (chain+n-1) - partial); 1259 *partial->p = 0; 1260 /* 1261 * We mark the inode dirty prior to restart, 1262 * and prior to stop. No need for it here. 1263 */ 1264 } else { 1265 /* Shared branch grows from an indirect block */ 1266 BUFFER_TRACE(partial->bh, "get_write_access"); 1267 ext4_free_branches(handle, inode, partial->bh, 1268 partial->p, 1269 partial->p+1, (chain+n-1) - partial); 1270 } 1271 } 1272 /* Clear the ends of indirect blocks on the shared branch */ 1273 while (partial > chain) { 1274 ext4_free_branches(handle, inode, partial->bh, partial->p + 1, 1275 (__le32*)partial->bh->b_data+addr_per_block, 1276 (chain+n-1) - partial); 1277 BUFFER_TRACE(partial->bh, "call brelse"); 1278 brelse(partial->bh); 1279 partial--; 1280 } 1281 do_indirects: 1282 /* Kill the remaining (whole) subtrees */ 1283 switch (offsets[0]) { 1284 default: 1285 nr = i_data[EXT4_IND_BLOCK]; 1286 if (nr) { 1287 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); 1288 i_data[EXT4_IND_BLOCK] = 0; 1289 } 1290 case EXT4_IND_BLOCK: 1291 nr = i_data[EXT4_DIND_BLOCK]; 1292 if (nr) { 1293 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); 1294 i_data[EXT4_DIND_BLOCK] = 0; 1295 } 1296 case EXT4_DIND_BLOCK: 1297 nr = i_data[EXT4_TIND_BLOCK]; 1298 if (nr) { 1299 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); 1300 i_data[EXT4_TIND_BLOCK] = 0; 1301 } 1302 case EXT4_TIND_BLOCK: 1303 ; 1304 } 1305 } 1306 1307 static int free_hole_blocks(handle_t *handle, struct inode *inode, 1308 struct buffer_head *parent_bh, __le32 *i_data, 1309 int level, ext4_lblk_t first, 1310 ext4_lblk_t count, int max) 1311 { 1312 struct buffer_head *bh = NULL; 1313 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 1314 int ret = 0; 1315 int i, inc; 1316 ext4_lblk_t offset; 1317 __le32 blk; 1318 1319 inc = 1 << ((EXT4_BLOCK_SIZE_BITS(inode->i_sb) - 2) * level); 1320 for (i = 0, offset = 0; i < max; i++, i_data++, offset += inc) { 1321 if (offset >= count + first) 1322 break; 1323 if (*i_data == 0 || (offset + inc) <= first) 1324 continue; 1325 blk = *i_data; 1326 if (level > 0) { 1327 ext4_lblk_t first2; 1328 bh = sb_bread(inode->i_sb, le32_to_cpu(blk)); 1329 if (!bh) { 1330 EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk), 1331 "Read failure"); 1332 return -EIO; 1333 } 1334 first2 = (first > offset) ? first - offset : 0; 1335 ret = free_hole_blocks(handle, inode, bh, 1336 (__le32 *)bh->b_data, level - 1, 1337 first2, count - offset, 1338 inode->i_sb->s_blocksize >> 2); 1339 if (ret) { 1340 brelse(bh); 1341 goto err; 1342 } 1343 } 1344 if (level == 0 || 1345 (bh && all_zeroes((__le32 *)bh->b_data, 1346 (__le32 *)bh->b_data + addr_per_block))) { 1347 ext4_free_data(handle, inode, parent_bh, &blk, &blk+1); 1348 *i_data = 0; 1349 } 1350 brelse(bh); 1351 bh = NULL; 1352 } 1353 1354 err: 1355 return ret; 1356 } 1357 1358 int ext4_free_hole_blocks(handle_t *handle, struct inode *inode, 1359 ext4_lblk_t first, ext4_lblk_t stop) 1360 { 1361 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 1362 int level, ret = 0; 1363 int num = EXT4_NDIR_BLOCKS; 1364 ext4_lblk_t count, max = EXT4_NDIR_BLOCKS; 1365 __le32 *i_data = EXT4_I(inode)->i_data; 1366 1367 count = stop - first; 1368 for (level = 0; level < 4; level++, max *= addr_per_block) { 1369 if (first < max) { 1370 ret = free_hole_blocks(handle, inode, NULL, i_data, 1371 level, first, count, num); 1372 if (ret) 1373 goto err; 1374 if (count > max - first) 1375 count -= max - first; 1376 else 1377 break; 1378 first = 0; 1379 } else { 1380 first -= max; 1381 } 1382 i_data += num; 1383 if (level == 0) { 1384 num = 1; 1385 max = 1; 1386 } 1387 } 1388 1389 err: 1390 return ret; 1391 } 1392 1393