1 /* 2 * linux/fs/ext4/inode.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/inode.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * Goal-directed block allocation by Stephen Tweedie 16 * (sct@redhat.com), 1993, 1998 17 * Big-endian to little-endian byte-swapping/bitmaps by 18 * David S. Miller (davem@caip.rutgers.edu), 1995 19 * 64-bit file support on 64-bit platforms by Jakub Jelinek 20 * (jj@sunsite.ms.mff.cuni.cz) 21 * 22 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 23 */ 24 25 #include <linux/module.h> 26 #include <linux/fs.h> 27 #include <linux/time.h> 28 #include <linux/jbd2.h> 29 #include <linux/highuid.h> 30 #include <linux/pagemap.h> 31 #include <linux/quotaops.h> 32 #include <linux/string.h> 33 #include <linux/buffer_head.h> 34 #include <linux/writeback.h> 35 #include <linux/pagevec.h> 36 #include <linux/mpage.h> 37 #include <linux/namei.h> 38 #include <linux/uio.h> 39 #include <linux/bio.h> 40 #include <linux/workqueue.h> 41 42 #include "ext4_jbd2.h" 43 #include "xattr.h" 44 #include "acl.h" 45 #include "ext4_extents.h" 46 47 #include <trace/events/ext4.h> 48 49 #define MPAGE_DA_EXTENT_TAIL 0x01 50 51 static inline int ext4_begin_ordered_truncate(struct inode *inode, 52 loff_t new_size) 53 { 54 return jbd2_journal_begin_ordered_truncate( 55 EXT4_SB(inode->i_sb)->s_journal, 56 &EXT4_I(inode)->jinode, 57 new_size); 58 } 59 60 static void ext4_invalidatepage(struct page *page, unsigned long offset); 61 62 /* 63 * Test whether an inode is a fast symlink. 64 */ 65 static int ext4_inode_is_fast_symlink(struct inode *inode) 66 { 67 int ea_blocks = EXT4_I(inode)->i_file_acl ? 68 (inode->i_sb->s_blocksize >> 9) : 0; 69 70 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 71 } 72 73 /* 74 * Work out how many blocks we need to proceed with the next chunk of a 75 * truncate transaction. 76 */ 77 static unsigned long blocks_for_truncate(struct inode *inode) 78 { 79 ext4_lblk_t needed; 80 81 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); 82 83 /* Give ourselves just enough room to cope with inodes in which 84 * i_blocks is corrupt: we've seen disk corruptions in the past 85 * which resulted in random data in an inode which looked enough 86 * like a regular file for ext4 to try to delete it. Things 87 * will go a bit crazy if that happens, but at least we should 88 * try not to panic the whole kernel. */ 89 if (needed < 2) 90 needed = 2; 91 92 /* But we need to bound the transaction so we don't overflow the 93 * journal. */ 94 if (needed > EXT4_MAX_TRANS_DATA) 95 needed = EXT4_MAX_TRANS_DATA; 96 97 return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed; 98 } 99 100 /* 101 * Truncate transactions can be complex and absolutely huge. So we need to 102 * be able to restart the transaction at a conventient checkpoint to make 103 * sure we don't overflow the journal. 104 * 105 * start_transaction gets us a new handle for a truncate transaction, 106 * and extend_transaction tries to extend the existing one a bit. If 107 * extend fails, we need to propagate the failure up and restart the 108 * transaction in the top-level truncate loop. --sct 109 */ 110 static handle_t *start_transaction(struct inode *inode) 111 { 112 handle_t *result; 113 114 result = ext4_journal_start(inode, blocks_for_truncate(inode)); 115 if (!IS_ERR(result)) 116 return result; 117 118 ext4_std_error(inode->i_sb, PTR_ERR(result)); 119 return result; 120 } 121 122 /* 123 * Try to extend this transaction for the purposes of truncation. 124 * 125 * Returns 0 if we managed to create more room. If we can't create more 126 * room, and the transaction must be restarted we return 1. 127 */ 128 static int try_to_extend_transaction(handle_t *handle, struct inode *inode) 129 { 130 if (!ext4_handle_valid(handle)) 131 return 0; 132 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) 133 return 0; 134 if (!ext4_journal_extend(handle, blocks_for_truncate(inode))) 135 return 0; 136 return 1; 137 } 138 139 /* 140 * Restart the transaction associated with *handle. This does a commit, 141 * so before we call here everything must be consistently dirtied against 142 * this transaction. 143 */ 144 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, 145 int nblocks) 146 { 147 int ret; 148 149 /* 150 * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this 151 * moment, get_block can be called only for blocks inside i_size since 152 * page cache has been already dropped and writes are blocked by 153 * i_mutex. So we can safely drop the i_data_sem here. 154 */ 155 BUG_ON(EXT4_JOURNAL(inode) == NULL); 156 jbd_debug(2, "restarting handle %p\n", handle); 157 up_write(&EXT4_I(inode)->i_data_sem); 158 ret = ext4_journal_restart(handle, blocks_for_truncate(inode)); 159 down_write(&EXT4_I(inode)->i_data_sem); 160 ext4_discard_preallocations(inode); 161 162 return ret; 163 } 164 165 /* 166 * Called at the last iput() if i_nlink is zero. 167 */ 168 void ext4_delete_inode(struct inode *inode) 169 { 170 handle_t *handle; 171 int err; 172 173 if (ext4_should_order_data(inode)) 174 ext4_begin_ordered_truncate(inode, 0); 175 truncate_inode_pages(&inode->i_data, 0); 176 177 if (is_bad_inode(inode)) 178 goto no_delete; 179 180 handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3); 181 if (IS_ERR(handle)) { 182 ext4_std_error(inode->i_sb, PTR_ERR(handle)); 183 /* 184 * If we're going to skip the normal cleanup, we still need to 185 * make sure that the in-core orphan linked list is properly 186 * cleaned up. 187 */ 188 ext4_orphan_del(NULL, inode); 189 goto no_delete; 190 } 191 192 if (IS_SYNC(inode)) 193 ext4_handle_sync(handle); 194 inode->i_size = 0; 195 err = ext4_mark_inode_dirty(handle, inode); 196 if (err) { 197 ext4_warning(inode->i_sb, __func__, 198 "couldn't mark inode dirty (err %d)", err); 199 goto stop_handle; 200 } 201 if (inode->i_blocks) 202 ext4_truncate(inode); 203 204 /* 205 * ext4_ext_truncate() doesn't reserve any slop when it 206 * restarts journal transactions; therefore there may not be 207 * enough credits left in the handle to remove the inode from 208 * the orphan list and set the dtime field. 209 */ 210 if (!ext4_handle_has_enough_credits(handle, 3)) { 211 err = ext4_journal_extend(handle, 3); 212 if (err > 0) 213 err = ext4_journal_restart(handle, 3); 214 if (err != 0) { 215 ext4_warning(inode->i_sb, __func__, 216 "couldn't extend journal (err %d)", err); 217 stop_handle: 218 ext4_journal_stop(handle); 219 goto no_delete; 220 } 221 } 222 223 /* 224 * Kill off the orphan record which ext4_truncate created. 225 * AKPM: I think this can be inside the above `if'. 226 * Note that ext4_orphan_del() has to be able to cope with the 227 * deletion of a non-existent orphan - this is because we don't 228 * know if ext4_truncate() actually created an orphan record. 229 * (Well, we could do this if we need to, but heck - it works) 230 */ 231 ext4_orphan_del(handle, inode); 232 EXT4_I(inode)->i_dtime = get_seconds(); 233 234 /* 235 * One subtle ordering requirement: if anything has gone wrong 236 * (transaction abort, IO errors, whatever), then we can still 237 * do these next steps (the fs will already have been marked as 238 * having errors), but we can't free the inode if the mark_dirty 239 * fails. 240 */ 241 if (ext4_mark_inode_dirty(handle, inode)) 242 /* If that failed, just do the required in-core inode clear. */ 243 clear_inode(inode); 244 else 245 ext4_free_inode(handle, inode); 246 ext4_journal_stop(handle); 247 return; 248 no_delete: 249 clear_inode(inode); /* We must guarantee clearing of inode... */ 250 } 251 252 typedef struct { 253 __le32 *p; 254 __le32 key; 255 struct buffer_head *bh; 256 } Indirect; 257 258 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) 259 { 260 p->key = *(p->p = v); 261 p->bh = bh; 262 } 263 264 /** 265 * ext4_block_to_path - parse the block number into array of offsets 266 * @inode: inode in question (we are only interested in its superblock) 267 * @i_block: block number to be parsed 268 * @offsets: array to store the offsets in 269 * @boundary: set this non-zero if the referred-to block is likely to be 270 * followed (on disk) by an indirect block. 271 * 272 * To store the locations of file's data ext4 uses a data structure common 273 * for UNIX filesystems - tree of pointers anchored in the inode, with 274 * data blocks at leaves and indirect blocks in intermediate nodes. 275 * This function translates the block number into path in that tree - 276 * return value is the path length and @offsets[n] is the offset of 277 * pointer to (n+1)th node in the nth one. If @block is out of range 278 * (negative or too large) warning is printed and zero returned. 279 * 280 * Note: function doesn't find node addresses, so no IO is needed. All 281 * we need to know is the capacity of indirect blocks (taken from the 282 * inode->i_sb). 283 */ 284 285 /* 286 * Portability note: the last comparison (check that we fit into triple 287 * indirect block) is spelled differently, because otherwise on an 288 * architecture with 32-bit longs and 8Kb pages we might get into trouble 289 * if our filesystem had 8Kb blocks. We might use long long, but that would 290 * kill us on x86. Oh, well, at least the sign propagation does not matter - 291 * i_block would have to be negative in the very beginning, so we would not 292 * get there at all. 293 */ 294 295 static int ext4_block_to_path(struct inode *inode, 296 ext4_lblk_t i_block, 297 ext4_lblk_t offsets[4], int *boundary) 298 { 299 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); 300 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); 301 const long direct_blocks = EXT4_NDIR_BLOCKS, 302 indirect_blocks = ptrs, 303 double_blocks = (1 << (ptrs_bits * 2)); 304 int n = 0; 305 int final = 0; 306 307 if (i_block < direct_blocks) { 308 offsets[n++] = i_block; 309 final = direct_blocks; 310 } else if ((i_block -= direct_blocks) < indirect_blocks) { 311 offsets[n++] = EXT4_IND_BLOCK; 312 offsets[n++] = i_block; 313 final = ptrs; 314 } else if ((i_block -= indirect_blocks) < double_blocks) { 315 offsets[n++] = EXT4_DIND_BLOCK; 316 offsets[n++] = i_block >> ptrs_bits; 317 offsets[n++] = i_block & (ptrs - 1); 318 final = ptrs; 319 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 320 offsets[n++] = EXT4_TIND_BLOCK; 321 offsets[n++] = i_block >> (ptrs_bits * 2); 322 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 323 offsets[n++] = i_block & (ptrs - 1); 324 final = ptrs; 325 } else { 326 ext4_warning(inode->i_sb, "ext4_block_to_path", 327 "block %lu > max in inode %lu", 328 i_block + direct_blocks + 329 indirect_blocks + double_blocks, inode->i_ino); 330 } 331 if (boundary) 332 *boundary = final - 1 - (i_block & (ptrs - 1)); 333 return n; 334 } 335 336 static int __ext4_check_blockref(const char *function, struct inode *inode, 337 __le32 *p, unsigned int max) 338 { 339 __le32 *bref = p; 340 unsigned int blk; 341 342 while (bref < p+max) { 343 blk = le32_to_cpu(*bref++); 344 if (blk && 345 unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), 346 blk, 1))) { 347 ext4_error(inode->i_sb, function, 348 "invalid block reference %u " 349 "in inode #%lu", blk, inode->i_ino); 350 return -EIO; 351 } 352 } 353 return 0; 354 } 355 356 357 #define ext4_check_indirect_blockref(inode, bh) \ 358 __ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data, \ 359 EXT4_ADDR_PER_BLOCK((inode)->i_sb)) 360 361 #define ext4_check_inode_blockref(inode) \ 362 __ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data, \ 363 EXT4_NDIR_BLOCKS) 364 365 /** 366 * ext4_get_branch - read the chain of indirect blocks leading to data 367 * @inode: inode in question 368 * @depth: depth of the chain (1 - direct pointer, etc.) 369 * @offsets: offsets of pointers in inode/indirect blocks 370 * @chain: place to store the result 371 * @err: here we store the error value 372 * 373 * Function fills the array of triples <key, p, bh> and returns %NULL 374 * if everything went OK or the pointer to the last filled triple 375 * (incomplete one) otherwise. Upon the return chain[i].key contains 376 * the number of (i+1)-th block in the chain (as it is stored in memory, 377 * i.e. little-endian 32-bit), chain[i].p contains the address of that 378 * number (it points into struct inode for i==0 and into the bh->b_data 379 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect 380 * block for i>0 and NULL for i==0. In other words, it holds the block 381 * numbers of the chain, addresses they were taken from (and where we can 382 * verify that chain did not change) and buffer_heads hosting these 383 * numbers. 384 * 385 * Function stops when it stumbles upon zero pointer (absent block) 386 * (pointer to last triple returned, *@err == 0) 387 * or when it gets an IO error reading an indirect block 388 * (ditto, *@err == -EIO) 389 * or when it reads all @depth-1 indirect blocks successfully and finds 390 * the whole chain, all way to the data (returns %NULL, *err == 0). 391 * 392 * Need to be called with 393 * down_read(&EXT4_I(inode)->i_data_sem) 394 */ 395 static Indirect *ext4_get_branch(struct inode *inode, int depth, 396 ext4_lblk_t *offsets, 397 Indirect chain[4], int *err) 398 { 399 struct super_block *sb = inode->i_sb; 400 Indirect *p = chain; 401 struct buffer_head *bh; 402 403 *err = 0; 404 /* i_data is not going away, no lock needed */ 405 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets); 406 if (!p->key) 407 goto no_block; 408 while (--depth) { 409 bh = sb_getblk(sb, le32_to_cpu(p->key)); 410 if (unlikely(!bh)) 411 goto failure; 412 413 if (!bh_uptodate_or_lock(bh)) { 414 if (bh_submit_read(bh) < 0) { 415 put_bh(bh); 416 goto failure; 417 } 418 /* validate block references */ 419 if (ext4_check_indirect_blockref(inode, bh)) { 420 put_bh(bh); 421 goto failure; 422 } 423 } 424 425 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets); 426 /* Reader: end */ 427 if (!p->key) 428 goto no_block; 429 } 430 return NULL; 431 432 failure: 433 *err = -EIO; 434 no_block: 435 return p; 436 } 437 438 /** 439 * ext4_find_near - find a place for allocation with sufficient locality 440 * @inode: owner 441 * @ind: descriptor of indirect block. 442 * 443 * This function returns the preferred place for block allocation. 444 * It is used when heuristic for sequential allocation fails. 445 * Rules are: 446 * + if there is a block to the left of our position - allocate near it. 447 * + if pointer will live in indirect block - allocate near that block. 448 * + if pointer will live in inode - allocate in the same 449 * cylinder group. 450 * 451 * In the latter case we colour the starting block by the callers PID to 452 * prevent it from clashing with concurrent allocations for a different inode 453 * in the same block group. The PID is used here so that functionally related 454 * files will be close-by on-disk. 455 * 456 * Caller must make sure that @ind is valid and will stay that way. 457 */ 458 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) 459 { 460 struct ext4_inode_info *ei = EXT4_I(inode); 461 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; 462 __le32 *p; 463 ext4_fsblk_t bg_start; 464 ext4_fsblk_t last_block; 465 ext4_grpblk_t colour; 466 ext4_group_t block_group; 467 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); 468 469 /* Try to find previous block */ 470 for (p = ind->p - 1; p >= start; p--) { 471 if (*p) 472 return le32_to_cpu(*p); 473 } 474 475 /* No such thing, so let's try location of indirect block */ 476 if (ind->bh) 477 return ind->bh->b_blocknr; 478 479 /* 480 * It is going to be referred to from the inode itself? OK, just put it 481 * into the same cylinder group then. 482 */ 483 block_group = ei->i_block_group; 484 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { 485 block_group &= ~(flex_size-1); 486 if (S_ISREG(inode->i_mode)) 487 block_group++; 488 } 489 bg_start = ext4_group_first_block_no(inode->i_sb, block_group); 490 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 491 492 /* 493 * If we are doing delayed allocation, we don't need take 494 * colour into account. 495 */ 496 if (test_opt(inode->i_sb, DELALLOC)) 497 return bg_start; 498 499 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) 500 colour = (current->pid % 16) * 501 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 502 else 503 colour = (current->pid % 16) * ((last_block - bg_start) / 16); 504 return bg_start + colour; 505 } 506 507 /** 508 * ext4_find_goal - find a preferred place for allocation. 509 * @inode: owner 510 * @block: block we want 511 * @partial: pointer to the last triple within a chain 512 * 513 * Normally this function find the preferred place for block allocation, 514 * returns it. 515 * Because this is only used for non-extent files, we limit the block nr 516 * to 32 bits. 517 */ 518 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, 519 Indirect *partial) 520 { 521 ext4_fsblk_t goal; 522 523 /* 524 * XXX need to get goal block from mballoc's data structures 525 */ 526 527 goal = ext4_find_near(inode, partial); 528 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; 529 return goal; 530 } 531 532 /** 533 * ext4_blks_to_allocate: Look up the block map and count the number 534 * of direct blocks need to be allocated for the given branch. 535 * 536 * @branch: chain of indirect blocks 537 * @k: number of blocks need for indirect blocks 538 * @blks: number of data blocks to be mapped. 539 * @blocks_to_boundary: the offset in the indirect block 540 * 541 * return the total number of blocks to be allocate, including the 542 * direct and indirect blocks. 543 */ 544 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, 545 int blocks_to_boundary) 546 { 547 unsigned int count = 0; 548 549 /* 550 * Simple case, [t,d]Indirect block(s) has not allocated yet 551 * then it's clear blocks on that path have not allocated 552 */ 553 if (k > 0) { 554 /* right now we don't handle cross boundary allocation */ 555 if (blks < blocks_to_boundary + 1) 556 count += blks; 557 else 558 count += blocks_to_boundary + 1; 559 return count; 560 } 561 562 count++; 563 while (count < blks && count <= blocks_to_boundary && 564 le32_to_cpu(*(branch[0].p + count)) == 0) { 565 count++; 566 } 567 return count; 568 } 569 570 /** 571 * ext4_alloc_blocks: multiple allocate blocks needed for a branch 572 * @indirect_blks: the number of blocks need to allocate for indirect 573 * blocks 574 * 575 * @new_blocks: on return it will store the new block numbers for 576 * the indirect blocks(if needed) and the first direct block, 577 * @blks: on return it will store the total number of allocated 578 * direct blocks 579 */ 580 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, 581 ext4_lblk_t iblock, ext4_fsblk_t goal, 582 int indirect_blks, int blks, 583 ext4_fsblk_t new_blocks[4], int *err) 584 { 585 struct ext4_allocation_request ar; 586 int target, i; 587 unsigned long count = 0, blk_allocated = 0; 588 int index = 0; 589 ext4_fsblk_t current_block = 0; 590 int ret = 0; 591 592 /* 593 * Here we try to allocate the requested multiple blocks at once, 594 * on a best-effort basis. 595 * To build a branch, we should allocate blocks for 596 * the indirect blocks(if not allocated yet), and at least 597 * the first direct block of this branch. That's the 598 * minimum number of blocks need to allocate(required) 599 */ 600 /* first we try to allocate the indirect blocks */ 601 target = indirect_blks; 602 while (target > 0) { 603 count = target; 604 /* allocating blocks for indirect blocks and direct blocks */ 605 current_block = ext4_new_meta_blocks(handle, inode, 606 goal, &count, err); 607 if (*err) 608 goto failed_out; 609 610 BUG_ON(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS); 611 612 target -= count; 613 /* allocate blocks for indirect blocks */ 614 while (index < indirect_blks && count) { 615 new_blocks[index++] = current_block++; 616 count--; 617 } 618 if (count > 0) { 619 /* 620 * save the new block number 621 * for the first direct block 622 */ 623 new_blocks[index] = current_block; 624 printk(KERN_INFO "%s returned more blocks than " 625 "requested\n", __func__); 626 WARN_ON(1); 627 break; 628 } 629 } 630 631 target = blks - count ; 632 blk_allocated = count; 633 if (!target) 634 goto allocated; 635 /* Now allocate data blocks */ 636 memset(&ar, 0, sizeof(ar)); 637 ar.inode = inode; 638 ar.goal = goal; 639 ar.len = target; 640 ar.logical = iblock; 641 if (S_ISREG(inode->i_mode)) 642 /* enable in-core preallocation only for regular files */ 643 ar.flags = EXT4_MB_HINT_DATA; 644 645 current_block = ext4_mb_new_blocks(handle, &ar, err); 646 BUG_ON(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS); 647 648 if (*err && (target == blks)) { 649 /* 650 * if the allocation failed and we didn't allocate 651 * any blocks before 652 */ 653 goto failed_out; 654 } 655 if (!*err) { 656 if (target == blks) { 657 /* 658 * save the new block number 659 * for the first direct block 660 */ 661 new_blocks[index] = current_block; 662 } 663 blk_allocated += ar.len; 664 } 665 allocated: 666 /* total number of blocks allocated for direct blocks */ 667 ret = blk_allocated; 668 *err = 0; 669 return ret; 670 failed_out: 671 for (i = 0; i < index; i++) 672 ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0); 673 return ret; 674 } 675 676 /** 677 * ext4_alloc_branch - allocate and set up a chain of blocks. 678 * @inode: owner 679 * @indirect_blks: number of allocated indirect blocks 680 * @blks: number of allocated direct blocks 681 * @offsets: offsets (in the blocks) to store the pointers to next. 682 * @branch: place to store the chain in. 683 * 684 * This function allocates blocks, zeroes out all but the last one, 685 * links them into chain and (if we are synchronous) writes them to disk. 686 * In other words, it prepares a branch that can be spliced onto the 687 * inode. It stores the information about that chain in the branch[], in 688 * the same format as ext4_get_branch() would do. We are calling it after 689 * we had read the existing part of chain and partial points to the last 690 * triple of that (one with zero ->key). Upon the exit we have the same 691 * picture as after the successful ext4_get_block(), except that in one 692 * place chain is disconnected - *branch->p is still zero (we did not 693 * set the last link), but branch->key contains the number that should 694 * be placed into *branch->p to fill that gap. 695 * 696 * If allocation fails we free all blocks we've allocated (and forget 697 * their buffer_heads) and return the error value the from failed 698 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain 699 * as described above and return 0. 700 */ 701 static int ext4_alloc_branch(handle_t *handle, struct inode *inode, 702 ext4_lblk_t iblock, int indirect_blks, 703 int *blks, ext4_fsblk_t goal, 704 ext4_lblk_t *offsets, Indirect *branch) 705 { 706 int blocksize = inode->i_sb->s_blocksize; 707 int i, n = 0; 708 int err = 0; 709 struct buffer_head *bh; 710 int num; 711 ext4_fsblk_t new_blocks[4]; 712 ext4_fsblk_t current_block; 713 714 num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks, 715 *blks, new_blocks, &err); 716 if (err) 717 return err; 718 719 branch[0].key = cpu_to_le32(new_blocks[0]); 720 /* 721 * metadata blocks and data blocks are allocated. 722 */ 723 for (n = 1; n <= indirect_blks; n++) { 724 /* 725 * Get buffer_head for parent block, zero it out 726 * and set the pointer to new one, then send 727 * parent to disk. 728 */ 729 bh = sb_getblk(inode->i_sb, new_blocks[n-1]); 730 branch[n].bh = bh; 731 lock_buffer(bh); 732 BUFFER_TRACE(bh, "call get_create_access"); 733 err = ext4_journal_get_create_access(handle, bh); 734 if (err) { 735 /* Don't brelse(bh) here; it's done in 736 * ext4_journal_forget() below */ 737 unlock_buffer(bh); 738 goto failed; 739 } 740 741 memset(bh->b_data, 0, blocksize); 742 branch[n].p = (__le32 *) bh->b_data + offsets[n]; 743 branch[n].key = cpu_to_le32(new_blocks[n]); 744 *branch[n].p = branch[n].key; 745 if (n == indirect_blks) { 746 current_block = new_blocks[n]; 747 /* 748 * End of chain, update the last new metablock of 749 * the chain to point to the new allocated 750 * data blocks numbers 751 */ 752 for (i = 1; i < num; i++) 753 *(branch[n].p + i) = cpu_to_le32(++current_block); 754 } 755 BUFFER_TRACE(bh, "marking uptodate"); 756 set_buffer_uptodate(bh); 757 unlock_buffer(bh); 758 759 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 760 err = ext4_handle_dirty_metadata(handle, inode, bh); 761 if (err) 762 goto failed; 763 } 764 *blks = num; 765 return err; 766 failed: 767 /* Allocation failed, free what we already allocated */ 768 ext4_free_blocks(handle, inode, 0, new_blocks[0], 1, 0); 769 for (i = 1; i <= n ; i++) { 770 /* 771 * branch[i].bh is newly allocated, so there is no 772 * need to revoke the block, which is why we don't 773 * need to set EXT4_FREE_BLOCKS_METADATA. 774 */ 775 ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 776 EXT4_FREE_BLOCKS_FORGET); 777 } 778 for (i = n+1; i < indirect_blks; i++) 779 ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0); 780 781 ext4_free_blocks(handle, inode, 0, new_blocks[i], num, 0); 782 783 return err; 784 } 785 786 /** 787 * ext4_splice_branch - splice the allocated branch onto inode. 788 * @inode: owner 789 * @block: (logical) number of block we are adding 790 * @chain: chain of indirect blocks (with a missing link - see 791 * ext4_alloc_branch) 792 * @where: location of missing link 793 * @num: number of indirect blocks we are adding 794 * @blks: number of direct blocks we are adding 795 * 796 * This function fills the missing link and does all housekeeping needed in 797 * inode (->i_blocks, etc.). In case of success we end up with the full 798 * chain to new block and return 0. 799 */ 800 static int ext4_splice_branch(handle_t *handle, struct inode *inode, 801 ext4_lblk_t block, Indirect *where, int num, 802 int blks) 803 { 804 int i; 805 int err = 0; 806 ext4_fsblk_t current_block; 807 808 /* 809 * If we're splicing into a [td]indirect block (as opposed to the 810 * inode) then we need to get write access to the [td]indirect block 811 * before the splice. 812 */ 813 if (where->bh) { 814 BUFFER_TRACE(where->bh, "get_write_access"); 815 err = ext4_journal_get_write_access(handle, where->bh); 816 if (err) 817 goto err_out; 818 } 819 /* That's it */ 820 821 *where->p = where->key; 822 823 /* 824 * Update the host buffer_head or inode to point to more just allocated 825 * direct blocks blocks 826 */ 827 if (num == 0 && blks > 1) { 828 current_block = le32_to_cpu(where->key) + 1; 829 for (i = 1; i < blks; i++) 830 *(where->p + i) = cpu_to_le32(current_block++); 831 } 832 833 /* We are done with atomic stuff, now do the rest of housekeeping */ 834 /* had we spliced it onto indirect block? */ 835 if (where->bh) { 836 /* 837 * If we spliced it onto an indirect block, we haven't 838 * altered the inode. Note however that if it is being spliced 839 * onto an indirect block at the very end of the file (the 840 * file is growing) then we *will* alter the inode to reflect 841 * the new i_size. But that is not done here - it is done in 842 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. 843 */ 844 jbd_debug(5, "splicing indirect only\n"); 845 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); 846 err = ext4_handle_dirty_metadata(handle, inode, where->bh); 847 if (err) 848 goto err_out; 849 } else { 850 /* 851 * OK, we spliced it into the inode itself on a direct block. 852 */ 853 ext4_mark_inode_dirty(handle, inode); 854 jbd_debug(5, "splicing direct\n"); 855 } 856 return err; 857 858 err_out: 859 for (i = 1; i <= num; i++) { 860 /* 861 * branch[i].bh is newly allocated, so there is no 862 * need to revoke the block, which is why we don't 863 * need to set EXT4_FREE_BLOCKS_METADATA. 864 */ 865 ext4_free_blocks(handle, inode, where[i].bh, 0, 1, 866 EXT4_FREE_BLOCKS_FORGET); 867 } 868 ext4_free_blocks(handle, inode, 0, le32_to_cpu(where[num].key), 869 blks, 0); 870 871 return err; 872 } 873 874 /* 875 * The ext4_ind_get_blocks() function handles non-extents inodes 876 * (i.e., using the traditional indirect/double-indirect i_blocks 877 * scheme) for ext4_get_blocks(). 878 * 879 * Allocation strategy is simple: if we have to allocate something, we will 880 * have to go the whole way to leaf. So let's do it before attaching anything 881 * to tree, set linkage between the newborn blocks, write them if sync is 882 * required, recheck the path, free and repeat if check fails, otherwise 883 * set the last missing link (that will protect us from any truncate-generated 884 * removals - all blocks on the path are immune now) and possibly force the 885 * write on the parent block. 886 * That has a nice additional property: no special recovery from the failed 887 * allocations is needed - we simply release blocks and do not touch anything 888 * reachable from inode. 889 * 890 * `handle' can be NULL if create == 0. 891 * 892 * return > 0, # of blocks mapped or allocated. 893 * return = 0, if plain lookup failed. 894 * return < 0, error case. 895 * 896 * The ext4_ind_get_blocks() function should be called with 897 * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem 898 * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or 899 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system 900 * blocks. 901 */ 902 static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, 903 ext4_lblk_t iblock, unsigned int maxblocks, 904 struct buffer_head *bh_result, 905 int flags) 906 { 907 int err = -EIO; 908 ext4_lblk_t offsets[4]; 909 Indirect chain[4]; 910 Indirect *partial; 911 ext4_fsblk_t goal; 912 int indirect_blks; 913 int blocks_to_boundary = 0; 914 int depth; 915 int count = 0; 916 ext4_fsblk_t first_block = 0; 917 918 J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)); 919 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); 920 depth = ext4_block_to_path(inode, iblock, offsets, 921 &blocks_to_boundary); 922 923 if (depth == 0) 924 goto out; 925 926 partial = ext4_get_branch(inode, depth, offsets, chain, &err); 927 928 /* Simplest case - block found, no allocation needed */ 929 if (!partial) { 930 first_block = le32_to_cpu(chain[depth - 1].key); 931 clear_buffer_new(bh_result); 932 count++; 933 /*map more blocks*/ 934 while (count < maxblocks && count <= blocks_to_boundary) { 935 ext4_fsblk_t blk; 936 937 blk = le32_to_cpu(*(chain[depth-1].p + count)); 938 939 if (blk == first_block + count) 940 count++; 941 else 942 break; 943 } 944 goto got_it; 945 } 946 947 /* Next simple case - plain lookup or failed read of indirect block */ 948 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO) 949 goto cleanup; 950 951 /* 952 * Okay, we need to do block allocation. 953 */ 954 goal = ext4_find_goal(inode, iblock, partial); 955 956 /* the number of blocks need to allocate for [d,t]indirect blocks */ 957 indirect_blks = (chain + depth) - partial - 1; 958 959 /* 960 * Next look up the indirect map to count the totoal number of 961 * direct blocks to allocate for this branch. 962 */ 963 count = ext4_blks_to_allocate(partial, indirect_blks, 964 maxblocks, blocks_to_boundary); 965 /* 966 * Block out ext4_truncate while we alter the tree 967 */ 968 err = ext4_alloc_branch(handle, inode, iblock, indirect_blks, 969 &count, goal, 970 offsets + (partial - chain), partial); 971 972 /* 973 * The ext4_splice_branch call will free and forget any buffers 974 * on the new chain if there is a failure, but that risks using 975 * up transaction credits, especially for bitmaps where the 976 * credits cannot be returned. Can we handle this somehow? We 977 * may need to return -EAGAIN upwards in the worst case. --sct 978 */ 979 if (!err) 980 err = ext4_splice_branch(handle, inode, iblock, 981 partial, indirect_blks, count); 982 if (err) 983 goto cleanup; 984 985 set_buffer_new(bh_result); 986 987 ext4_update_inode_fsync_trans(handle, inode, 1); 988 got_it: 989 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); 990 if (count > blocks_to_boundary) 991 set_buffer_boundary(bh_result); 992 err = count; 993 /* Clean up and exit */ 994 partial = chain + depth - 1; /* the whole chain */ 995 cleanup: 996 while (partial > chain) { 997 BUFFER_TRACE(partial->bh, "call brelse"); 998 brelse(partial->bh); 999 partial--; 1000 } 1001 BUFFER_TRACE(bh_result, "returned"); 1002 out: 1003 return err; 1004 } 1005 1006 #ifdef CONFIG_QUOTA 1007 qsize_t *ext4_get_reserved_space(struct inode *inode) 1008 { 1009 return &EXT4_I(inode)->i_reserved_quota; 1010 } 1011 #endif 1012 1013 /* 1014 * Calculate the number of metadata blocks need to reserve 1015 * to allocate a new block at @lblocks for non extent file based file 1016 */ 1017 static int ext4_indirect_calc_metadata_amount(struct inode *inode, 1018 sector_t lblock) 1019 { 1020 struct ext4_inode_info *ei = EXT4_I(inode); 1021 int dind_mask = EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1; 1022 int blk_bits; 1023 1024 if (lblock < EXT4_NDIR_BLOCKS) 1025 return 0; 1026 1027 lblock -= EXT4_NDIR_BLOCKS; 1028 1029 if (ei->i_da_metadata_calc_len && 1030 (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) { 1031 ei->i_da_metadata_calc_len++; 1032 return 0; 1033 } 1034 ei->i_da_metadata_calc_last_lblock = lblock & dind_mask; 1035 ei->i_da_metadata_calc_len = 1; 1036 blk_bits = roundup_pow_of_two(lblock + 1); 1037 return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1; 1038 } 1039 1040 /* 1041 * Calculate the number of metadata blocks need to reserve 1042 * to allocate a block located at @lblock 1043 */ 1044 static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock) 1045 { 1046 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) 1047 return ext4_ext_calc_metadata_amount(inode, lblock); 1048 1049 return ext4_indirect_calc_metadata_amount(inode, lblock); 1050 } 1051 1052 /* 1053 * Called with i_data_sem down, which is important since we can call 1054 * ext4_discard_preallocations() from here. 1055 */ 1056 static void ext4_da_update_reserve_space(struct inode *inode, int used) 1057 { 1058 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1059 struct ext4_inode_info *ei = EXT4_I(inode); 1060 int mdb_free = 0; 1061 1062 spin_lock(&ei->i_block_reservation_lock); 1063 if (unlikely(used > ei->i_reserved_data_blocks)) { 1064 ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d " 1065 "with only %d reserved data blocks\n", 1066 __func__, inode->i_ino, used, 1067 ei->i_reserved_data_blocks); 1068 WARN_ON(1); 1069 used = ei->i_reserved_data_blocks; 1070 } 1071 1072 /* Update per-inode reservations */ 1073 ei->i_reserved_data_blocks -= used; 1074 used += ei->i_allocated_meta_blocks; 1075 ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; 1076 ei->i_allocated_meta_blocks = 0; 1077 percpu_counter_sub(&sbi->s_dirtyblocks_counter, used); 1078 1079 if (ei->i_reserved_data_blocks == 0) { 1080 /* 1081 * We can release all of the reserved metadata blocks 1082 * only when we have written all of the delayed 1083 * allocation blocks. 1084 */ 1085 mdb_free = ei->i_reserved_meta_blocks; 1086 ei->i_reserved_meta_blocks = 0; 1087 ei->i_da_metadata_calc_len = 0; 1088 percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free); 1089 } 1090 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1091 1092 /* Update quota subsystem */ 1093 vfs_dq_claim_block(inode, used); 1094 if (mdb_free) 1095 vfs_dq_release_reservation_block(inode, mdb_free); 1096 1097 /* 1098 * If we have done all the pending block allocations and if 1099 * there aren't any writers on the inode, we can discard the 1100 * inode's preallocations. 1101 */ 1102 if ((ei->i_reserved_data_blocks == 0) && 1103 (atomic_read(&inode->i_writecount) == 0)) 1104 ext4_discard_preallocations(inode); 1105 } 1106 1107 static int check_block_validity(struct inode *inode, const char *msg, 1108 sector_t logical, sector_t phys, int len) 1109 { 1110 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) { 1111 ext4_error(inode->i_sb, msg, 1112 "inode #%lu logical block %llu mapped to %llu " 1113 "(size %d)", inode->i_ino, 1114 (unsigned long long) logical, 1115 (unsigned long long) phys, len); 1116 return -EIO; 1117 } 1118 return 0; 1119 } 1120 1121 /* 1122 * Return the number of contiguous dirty pages in a given inode 1123 * starting at page frame idx. 1124 */ 1125 static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, 1126 unsigned int max_pages) 1127 { 1128 struct address_space *mapping = inode->i_mapping; 1129 pgoff_t index; 1130 struct pagevec pvec; 1131 pgoff_t num = 0; 1132 int i, nr_pages, done = 0; 1133 1134 if (max_pages == 0) 1135 return 0; 1136 pagevec_init(&pvec, 0); 1137 while (!done) { 1138 index = idx; 1139 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 1140 PAGECACHE_TAG_DIRTY, 1141 (pgoff_t)PAGEVEC_SIZE); 1142 if (nr_pages == 0) 1143 break; 1144 for (i = 0; i < nr_pages; i++) { 1145 struct page *page = pvec.pages[i]; 1146 struct buffer_head *bh, *head; 1147 1148 lock_page(page); 1149 if (unlikely(page->mapping != mapping) || 1150 !PageDirty(page) || 1151 PageWriteback(page) || 1152 page->index != idx) { 1153 done = 1; 1154 unlock_page(page); 1155 break; 1156 } 1157 if (page_has_buffers(page)) { 1158 bh = head = page_buffers(page); 1159 do { 1160 if (!buffer_delay(bh) && 1161 !buffer_unwritten(bh)) 1162 done = 1; 1163 bh = bh->b_this_page; 1164 } while (!done && (bh != head)); 1165 } 1166 unlock_page(page); 1167 if (done) 1168 break; 1169 idx++; 1170 num++; 1171 if (num >= max_pages) 1172 break; 1173 } 1174 pagevec_release(&pvec); 1175 } 1176 return num; 1177 } 1178 1179 /* 1180 * The ext4_get_blocks() function tries to look up the requested blocks, 1181 * and returns if the blocks are already mapped. 1182 * 1183 * Otherwise it takes the write lock of the i_data_sem and allocate blocks 1184 * and store the allocated blocks in the result buffer head and mark it 1185 * mapped. 1186 * 1187 * If file type is extents based, it will call ext4_ext_get_blocks(), 1188 * Otherwise, call with ext4_ind_get_blocks() to handle indirect mapping 1189 * based files 1190 * 1191 * On success, it returns the number of blocks being mapped or allocate. 1192 * if create==0 and the blocks are pre-allocated and uninitialized block, 1193 * the result buffer head is unmapped. If the create ==1, it will make sure 1194 * the buffer head is mapped. 1195 * 1196 * It returns 0 if plain look up failed (blocks have not been allocated), in 1197 * that casem, buffer head is unmapped 1198 * 1199 * It returns the error in case of allocation failure. 1200 */ 1201 int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, 1202 unsigned int max_blocks, struct buffer_head *bh, 1203 int flags) 1204 { 1205 int retval; 1206 1207 clear_buffer_mapped(bh); 1208 clear_buffer_unwritten(bh); 1209 1210 ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u," 1211 "logical block %lu\n", inode->i_ino, flags, max_blocks, 1212 (unsigned long)block); 1213 /* 1214 * Try to see if we can get the block without requesting a new 1215 * file system block. 1216 */ 1217 down_read((&EXT4_I(inode)->i_data_sem)); 1218 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 1219 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, 1220 bh, 0); 1221 } else { 1222 retval = ext4_ind_get_blocks(handle, inode, block, max_blocks, 1223 bh, 0); 1224 } 1225 up_read((&EXT4_I(inode)->i_data_sem)); 1226 1227 if (retval > 0 && buffer_mapped(bh)) { 1228 int ret = check_block_validity(inode, "file system corruption", 1229 block, bh->b_blocknr, retval); 1230 if (ret != 0) 1231 return ret; 1232 } 1233 1234 /* If it is only a block(s) look up */ 1235 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 1236 return retval; 1237 1238 /* 1239 * Returns if the blocks have already allocated 1240 * 1241 * Note that if blocks have been preallocated 1242 * ext4_ext_get_block() returns th create = 0 1243 * with buffer head unmapped. 1244 */ 1245 if (retval > 0 && buffer_mapped(bh)) 1246 return retval; 1247 1248 /* 1249 * When we call get_blocks without the create flag, the 1250 * BH_Unwritten flag could have gotten set if the blocks 1251 * requested were part of a uninitialized extent. We need to 1252 * clear this flag now that we are committed to convert all or 1253 * part of the uninitialized extent to be an initialized 1254 * extent. This is because we need to avoid the combination 1255 * of BH_Unwritten and BH_Mapped flags being simultaneously 1256 * set on the buffer_head. 1257 */ 1258 clear_buffer_unwritten(bh); 1259 1260 /* 1261 * New blocks allocate and/or writing to uninitialized extent 1262 * will possibly result in updating i_data, so we take 1263 * the write lock of i_data_sem, and call get_blocks() 1264 * with create == 1 flag. 1265 */ 1266 down_write((&EXT4_I(inode)->i_data_sem)); 1267 1268 /* 1269 * if the caller is from delayed allocation writeout path 1270 * we have already reserved fs blocks for allocation 1271 * let the underlying get_block() function know to 1272 * avoid double accounting 1273 */ 1274 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1275 EXT4_I(inode)->i_delalloc_reserved_flag = 1; 1276 /* 1277 * We need to check for EXT4 here because migrate 1278 * could have changed the inode type in between 1279 */ 1280 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 1281 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, 1282 bh, flags); 1283 } else { 1284 retval = ext4_ind_get_blocks(handle, inode, block, 1285 max_blocks, bh, flags); 1286 1287 if (retval > 0 && buffer_new(bh)) { 1288 /* 1289 * We allocated new blocks which will result in 1290 * i_data's format changing. Force the migrate 1291 * to fail by clearing migrate flags 1292 */ 1293 EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE; 1294 } 1295 } 1296 1297 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1298 EXT4_I(inode)->i_delalloc_reserved_flag = 0; 1299 1300 /* 1301 * Update reserved blocks/metadata blocks after successful 1302 * block allocation which had been deferred till now. 1303 */ 1304 if ((retval > 0) && (flags & EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE)) 1305 ext4_da_update_reserve_space(inode, retval); 1306 1307 up_write((&EXT4_I(inode)->i_data_sem)); 1308 if (retval > 0 && buffer_mapped(bh)) { 1309 int ret = check_block_validity(inode, "file system " 1310 "corruption after allocation", 1311 block, bh->b_blocknr, retval); 1312 if (ret != 0) 1313 return ret; 1314 } 1315 return retval; 1316 } 1317 1318 /* Maximum number of blocks we map for direct IO at once. */ 1319 #define DIO_MAX_BLOCKS 4096 1320 1321 int ext4_get_block(struct inode *inode, sector_t iblock, 1322 struct buffer_head *bh_result, int create) 1323 { 1324 handle_t *handle = ext4_journal_current_handle(); 1325 int ret = 0, started = 0; 1326 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 1327 int dio_credits; 1328 1329 if (create && !handle) { 1330 /* Direct IO write... */ 1331 if (max_blocks > DIO_MAX_BLOCKS) 1332 max_blocks = DIO_MAX_BLOCKS; 1333 dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); 1334 handle = ext4_journal_start(inode, dio_credits); 1335 if (IS_ERR(handle)) { 1336 ret = PTR_ERR(handle); 1337 goto out; 1338 } 1339 started = 1; 1340 } 1341 1342 ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, 1343 create ? EXT4_GET_BLOCKS_CREATE : 0); 1344 if (ret > 0) { 1345 bh_result->b_size = (ret << inode->i_blkbits); 1346 ret = 0; 1347 } 1348 if (started) 1349 ext4_journal_stop(handle); 1350 out: 1351 return ret; 1352 } 1353 1354 /* 1355 * `handle' can be NULL if create is zero 1356 */ 1357 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 1358 ext4_lblk_t block, int create, int *errp) 1359 { 1360 struct buffer_head dummy; 1361 int fatal = 0, err; 1362 int flags = 0; 1363 1364 J_ASSERT(handle != NULL || create == 0); 1365 1366 dummy.b_state = 0; 1367 dummy.b_blocknr = -1000; 1368 buffer_trace_init(&dummy.b_history); 1369 if (create) 1370 flags |= EXT4_GET_BLOCKS_CREATE; 1371 err = ext4_get_blocks(handle, inode, block, 1, &dummy, flags); 1372 /* 1373 * ext4_get_blocks() returns number of blocks mapped. 0 in 1374 * case of a HOLE. 1375 */ 1376 if (err > 0) { 1377 if (err > 1) 1378 WARN_ON(1); 1379 err = 0; 1380 } 1381 *errp = err; 1382 if (!err && buffer_mapped(&dummy)) { 1383 struct buffer_head *bh; 1384 bh = sb_getblk(inode->i_sb, dummy.b_blocknr); 1385 if (!bh) { 1386 *errp = -EIO; 1387 goto err; 1388 } 1389 if (buffer_new(&dummy)) { 1390 J_ASSERT(create != 0); 1391 J_ASSERT(handle != NULL); 1392 1393 /* 1394 * Now that we do not always journal data, we should 1395 * keep in mind whether this should always journal the 1396 * new buffer as metadata. For now, regular file 1397 * writes use ext4_get_block instead, so it's not a 1398 * problem. 1399 */ 1400 lock_buffer(bh); 1401 BUFFER_TRACE(bh, "call get_create_access"); 1402 fatal = ext4_journal_get_create_access(handle, bh); 1403 if (!fatal && !buffer_uptodate(bh)) { 1404 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 1405 set_buffer_uptodate(bh); 1406 } 1407 unlock_buffer(bh); 1408 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 1409 err = ext4_handle_dirty_metadata(handle, inode, bh); 1410 if (!fatal) 1411 fatal = err; 1412 } else { 1413 BUFFER_TRACE(bh, "not a new buffer"); 1414 } 1415 if (fatal) { 1416 *errp = fatal; 1417 brelse(bh); 1418 bh = NULL; 1419 } 1420 return bh; 1421 } 1422 err: 1423 return NULL; 1424 } 1425 1426 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 1427 ext4_lblk_t block, int create, int *err) 1428 { 1429 struct buffer_head *bh; 1430 1431 bh = ext4_getblk(handle, inode, block, create, err); 1432 if (!bh) 1433 return bh; 1434 if (buffer_uptodate(bh)) 1435 return bh; 1436 ll_rw_block(READ_META, 1, &bh); 1437 wait_on_buffer(bh); 1438 if (buffer_uptodate(bh)) 1439 return bh; 1440 put_bh(bh); 1441 *err = -EIO; 1442 return NULL; 1443 } 1444 1445 static int walk_page_buffers(handle_t *handle, 1446 struct buffer_head *head, 1447 unsigned from, 1448 unsigned to, 1449 int *partial, 1450 int (*fn)(handle_t *handle, 1451 struct buffer_head *bh)) 1452 { 1453 struct buffer_head *bh; 1454 unsigned block_start, block_end; 1455 unsigned blocksize = head->b_size; 1456 int err, ret = 0; 1457 struct buffer_head *next; 1458 1459 for (bh = head, block_start = 0; 1460 ret == 0 && (bh != head || !block_start); 1461 block_start = block_end, bh = next) { 1462 next = bh->b_this_page; 1463 block_end = block_start + blocksize; 1464 if (block_end <= from || block_start >= to) { 1465 if (partial && !buffer_uptodate(bh)) 1466 *partial = 1; 1467 continue; 1468 } 1469 err = (*fn)(handle, bh); 1470 if (!ret) 1471 ret = err; 1472 } 1473 return ret; 1474 } 1475 1476 /* 1477 * To preserve ordering, it is essential that the hole instantiation and 1478 * the data write be encapsulated in a single transaction. We cannot 1479 * close off a transaction and start a new one between the ext4_get_block() 1480 * and the commit_write(). So doing the jbd2_journal_start at the start of 1481 * prepare_write() is the right place. 1482 * 1483 * Also, this function can nest inside ext4_writepage() -> 1484 * block_write_full_page(). In that case, we *know* that ext4_writepage() 1485 * has generated enough buffer credits to do the whole page. So we won't 1486 * block on the journal in that case, which is good, because the caller may 1487 * be PF_MEMALLOC. 1488 * 1489 * By accident, ext4 can be reentered when a transaction is open via 1490 * quota file writes. If we were to commit the transaction while thus 1491 * reentered, there can be a deadlock - we would be holding a quota 1492 * lock, and the commit would never complete if another thread had a 1493 * transaction open and was blocking on the quota lock - a ranking 1494 * violation. 1495 * 1496 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 1497 * will _not_ run commit under these circumstances because handle->h_ref 1498 * is elevated. We'll still have enough credits for the tiny quotafile 1499 * write. 1500 */ 1501 static int do_journal_get_write_access(handle_t *handle, 1502 struct buffer_head *bh) 1503 { 1504 if (!buffer_mapped(bh) || buffer_freed(bh)) 1505 return 0; 1506 return ext4_journal_get_write_access(handle, bh); 1507 } 1508 1509 /* 1510 * Truncate blocks that were not used by write. We have to truncate the 1511 * pagecache as well so that corresponding buffers get properly unmapped. 1512 */ 1513 static void ext4_truncate_failed_write(struct inode *inode) 1514 { 1515 truncate_inode_pages(inode->i_mapping, inode->i_size); 1516 ext4_truncate(inode); 1517 } 1518 1519 static int ext4_write_begin(struct file *file, struct address_space *mapping, 1520 loff_t pos, unsigned len, unsigned flags, 1521 struct page **pagep, void **fsdata) 1522 { 1523 struct inode *inode = mapping->host; 1524 int ret, needed_blocks; 1525 handle_t *handle; 1526 int retries = 0; 1527 struct page *page; 1528 pgoff_t index; 1529 unsigned from, to; 1530 1531 trace_ext4_write_begin(inode, pos, len, flags); 1532 /* 1533 * Reserve one block more for addition to orphan list in case 1534 * we allocate blocks but write fails for some reason 1535 */ 1536 needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 1537 index = pos >> PAGE_CACHE_SHIFT; 1538 from = pos & (PAGE_CACHE_SIZE - 1); 1539 to = from + len; 1540 1541 retry: 1542 handle = ext4_journal_start(inode, needed_blocks); 1543 if (IS_ERR(handle)) { 1544 ret = PTR_ERR(handle); 1545 goto out; 1546 } 1547 1548 /* We cannot recurse into the filesystem as the transaction is already 1549 * started */ 1550 flags |= AOP_FLAG_NOFS; 1551 1552 page = grab_cache_page_write_begin(mapping, index, flags); 1553 if (!page) { 1554 ext4_journal_stop(handle); 1555 ret = -ENOMEM; 1556 goto out; 1557 } 1558 *pagep = page; 1559 1560 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 1561 ext4_get_block); 1562 1563 if (!ret && ext4_should_journal_data(inode)) { 1564 ret = walk_page_buffers(handle, page_buffers(page), 1565 from, to, NULL, do_journal_get_write_access); 1566 } 1567 1568 if (ret) { 1569 unlock_page(page); 1570 page_cache_release(page); 1571 /* 1572 * block_write_begin may have instantiated a few blocks 1573 * outside i_size. Trim these off again. Don't need 1574 * i_size_read because we hold i_mutex. 1575 * 1576 * Add inode to orphan list in case we crash before 1577 * truncate finishes 1578 */ 1579 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1580 ext4_orphan_add(handle, inode); 1581 1582 ext4_journal_stop(handle); 1583 if (pos + len > inode->i_size) { 1584 ext4_truncate_failed_write(inode); 1585 /* 1586 * If truncate failed early the inode might 1587 * still be on the orphan list; we need to 1588 * make sure the inode is removed from the 1589 * orphan list in that case. 1590 */ 1591 if (inode->i_nlink) 1592 ext4_orphan_del(NULL, inode); 1593 } 1594 } 1595 1596 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 1597 goto retry; 1598 out: 1599 return ret; 1600 } 1601 1602 /* For write_end() in data=journal mode */ 1603 static int write_end_fn(handle_t *handle, struct buffer_head *bh) 1604 { 1605 if (!buffer_mapped(bh) || buffer_freed(bh)) 1606 return 0; 1607 set_buffer_uptodate(bh); 1608 return ext4_handle_dirty_metadata(handle, NULL, bh); 1609 } 1610 1611 static int ext4_generic_write_end(struct file *file, 1612 struct address_space *mapping, 1613 loff_t pos, unsigned len, unsigned copied, 1614 struct page *page, void *fsdata) 1615 { 1616 int i_size_changed = 0; 1617 struct inode *inode = mapping->host; 1618 handle_t *handle = ext4_journal_current_handle(); 1619 1620 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 1621 1622 /* 1623 * No need to use i_size_read() here, the i_size 1624 * cannot change under us because we hold i_mutex. 1625 * 1626 * But it's important to update i_size while still holding page lock: 1627 * page writeout could otherwise come in and zero beyond i_size. 1628 */ 1629 if (pos + copied > inode->i_size) { 1630 i_size_write(inode, pos + copied); 1631 i_size_changed = 1; 1632 } 1633 1634 if (pos + copied > EXT4_I(inode)->i_disksize) { 1635 /* We need to mark inode dirty even if 1636 * new_i_size is less that inode->i_size 1637 * bu greater than i_disksize.(hint delalloc) 1638 */ 1639 ext4_update_i_disksize(inode, (pos + copied)); 1640 i_size_changed = 1; 1641 } 1642 unlock_page(page); 1643 page_cache_release(page); 1644 1645 /* 1646 * Don't mark the inode dirty under page lock. First, it unnecessarily 1647 * makes the holding time of page lock longer. Second, it forces lock 1648 * ordering of page lock and transaction start for journaling 1649 * filesystems. 1650 */ 1651 if (i_size_changed) 1652 ext4_mark_inode_dirty(handle, inode); 1653 1654 return copied; 1655 } 1656 1657 /* 1658 * We need to pick up the new inode size which generic_commit_write gave us 1659 * `file' can be NULL - eg, when called from page_symlink(). 1660 * 1661 * ext4 never places buffers on inode->i_mapping->private_list. metadata 1662 * buffers are managed internally. 1663 */ 1664 static int ext4_ordered_write_end(struct file *file, 1665 struct address_space *mapping, 1666 loff_t pos, unsigned len, unsigned copied, 1667 struct page *page, void *fsdata) 1668 { 1669 handle_t *handle = ext4_journal_current_handle(); 1670 struct inode *inode = mapping->host; 1671 int ret = 0, ret2; 1672 1673 trace_ext4_ordered_write_end(inode, pos, len, copied); 1674 ret = ext4_jbd2_file_inode(handle, inode); 1675 1676 if (ret == 0) { 1677 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1678 page, fsdata); 1679 copied = ret2; 1680 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1681 /* if we have allocated more blocks and copied 1682 * less. We will have blocks allocated outside 1683 * inode->i_size. So truncate them 1684 */ 1685 ext4_orphan_add(handle, inode); 1686 if (ret2 < 0) 1687 ret = ret2; 1688 } 1689 ret2 = ext4_journal_stop(handle); 1690 if (!ret) 1691 ret = ret2; 1692 1693 if (pos + len > inode->i_size) { 1694 ext4_truncate_failed_write(inode); 1695 /* 1696 * If truncate failed early the inode might still be 1697 * on the orphan list; we need to make sure the inode 1698 * is removed from the orphan list in that case. 1699 */ 1700 if (inode->i_nlink) 1701 ext4_orphan_del(NULL, inode); 1702 } 1703 1704 1705 return ret ? ret : copied; 1706 } 1707 1708 static int ext4_writeback_write_end(struct file *file, 1709 struct address_space *mapping, 1710 loff_t pos, unsigned len, unsigned copied, 1711 struct page *page, void *fsdata) 1712 { 1713 handle_t *handle = ext4_journal_current_handle(); 1714 struct inode *inode = mapping->host; 1715 int ret = 0, ret2; 1716 1717 trace_ext4_writeback_write_end(inode, pos, len, copied); 1718 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1719 page, fsdata); 1720 copied = ret2; 1721 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1722 /* if we have allocated more blocks and copied 1723 * less. We will have blocks allocated outside 1724 * inode->i_size. So truncate them 1725 */ 1726 ext4_orphan_add(handle, inode); 1727 1728 if (ret2 < 0) 1729 ret = ret2; 1730 1731 ret2 = ext4_journal_stop(handle); 1732 if (!ret) 1733 ret = ret2; 1734 1735 if (pos + len > inode->i_size) { 1736 ext4_truncate_failed_write(inode); 1737 /* 1738 * If truncate failed early the inode might still be 1739 * on the orphan list; we need to make sure the inode 1740 * is removed from the orphan list in that case. 1741 */ 1742 if (inode->i_nlink) 1743 ext4_orphan_del(NULL, inode); 1744 } 1745 1746 return ret ? ret : copied; 1747 } 1748 1749 static int ext4_journalled_write_end(struct file *file, 1750 struct address_space *mapping, 1751 loff_t pos, unsigned len, unsigned copied, 1752 struct page *page, void *fsdata) 1753 { 1754 handle_t *handle = ext4_journal_current_handle(); 1755 struct inode *inode = mapping->host; 1756 int ret = 0, ret2; 1757 int partial = 0; 1758 unsigned from, to; 1759 loff_t new_i_size; 1760 1761 trace_ext4_journalled_write_end(inode, pos, len, copied); 1762 from = pos & (PAGE_CACHE_SIZE - 1); 1763 to = from + len; 1764 1765 if (copied < len) { 1766 if (!PageUptodate(page)) 1767 copied = 0; 1768 page_zero_new_buffers(page, from+copied, to); 1769 } 1770 1771 ret = walk_page_buffers(handle, page_buffers(page), from, 1772 to, &partial, write_end_fn); 1773 if (!partial) 1774 SetPageUptodate(page); 1775 new_i_size = pos + copied; 1776 if (new_i_size > inode->i_size) 1777 i_size_write(inode, pos+copied); 1778 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; 1779 if (new_i_size > EXT4_I(inode)->i_disksize) { 1780 ext4_update_i_disksize(inode, new_i_size); 1781 ret2 = ext4_mark_inode_dirty(handle, inode); 1782 if (!ret) 1783 ret = ret2; 1784 } 1785 1786 unlock_page(page); 1787 page_cache_release(page); 1788 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1789 /* if we have allocated more blocks and copied 1790 * less. We will have blocks allocated outside 1791 * inode->i_size. So truncate them 1792 */ 1793 ext4_orphan_add(handle, inode); 1794 1795 ret2 = ext4_journal_stop(handle); 1796 if (!ret) 1797 ret = ret2; 1798 if (pos + len > inode->i_size) { 1799 ext4_truncate_failed_write(inode); 1800 /* 1801 * If truncate failed early the inode might still be 1802 * on the orphan list; we need to make sure the inode 1803 * is removed from the orphan list in that case. 1804 */ 1805 if (inode->i_nlink) 1806 ext4_orphan_del(NULL, inode); 1807 } 1808 1809 return ret ? ret : copied; 1810 } 1811 1812 /* 1813 * Reserve a single block located at lblock 1814 */ 1815 static int ext4_da_reserve_space(struct inode *inode, sector_t lblock) 1816 { 1817 int retries = 0; 1818 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1819 struct ext4_inode_info *ei = EXT4_I(inode); 1820 unsigned long md_needed, md_reserved; 1821 1822 /* 1823 * recalculate the amount of metadata blocks to reserve 1824 * in order to allocate nrblocks 1825 * worse case is one extent per block 1826 */ 1827 repeat: 1828 spin_lock(&ei->i_block_reservation_lock); 1829 md_reserved = ei->i_reserved_meta_blocks; 1830 md_needed = ext4_calc_metadata_amount(inode, lblock); 1831 spin_unlock(&ei->i_block_reservation_lock); 1832 1833 /* 1834 * Make quota reservation here to prevent quota overflow 1835 * later. Real quota accounting is done at pages writeout 1836 * time. 1837 */ 1838 if (vfs_dq_reserve_block(inode, md_needed + 1)) { 1839 /* 1840 * We tend to badly over-estimate the amount of 1841 * metadata blocks which are needed, so if we have 1842 * reserved any metadata blocks, try to force out the 1843 * inode and see if we have any better luck. 1844 */ 1845 if (md_reserved && retries++ <= 3) 1846 goto retry; 1847 return -EDQUOT; 1848 } 1849 1850 if (ext4_claim_free_blocks(sbi, md_needed + 1)) { 1851 vfs_dq_release_reservation_block(inode, md_needed + 1); 1852 if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1853 retry: 1854 if (md_reserved) 1855 write_inode_now(inode, (retries == 3)); 1856 yield(); 1857 goto repeat; 1858 } 1859 return -ENOSPC; 1860 } 1861 spin_lock(&ei->i_block_reservation_lock); 1862 ei->i_reserved_data_blocks++; 1863 ei->i_reserved_meta_blocks += md_needed; 1864 spin_unlock(&ei->i_block_reservation_lock); 1865 1866 return 0; /* success */ 1867 } 1868 1869 static void ext4_da_release_space(struct inode *inode, int to_free) 1870 { 1871 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1872 struct ext4_inode_info *ei = EXT4_I(inode); 1873 1874 if (!to_free) 1875 return; /* Nothing to release, exit */ 1876 1877 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1878 1879 if (unlikely(to_free > ei->i_reserved_data_blocks)) { 1880 /* 1881 * if there aren't enough reserved blocks, then the 1882 * counter is messed up somewhere. Since this 1883 * function is called from invalidate page, it's 1884 * harmless to return without any action. 1885 */ 1886 ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: " 1887 "ino %lu, to_free %d with only %d reserved " 1888 "data blocks\n", inode->i_ino, to_free, 1889 ei->i_reserved_data_blocks); 1890 WARN_ON(1); 1891 to_free = ei->i_reserved_data_blocks; 1892 } 1893 ei->i_reserved_data_blocks -= to_free; 1894 1895 if (ei->i_reserved_data_blocks == 0) { 1896 /* 1897 * We can release all of the reserved metadata blocks 1898 * only when we have written all of the delayed 1899 * allocation blocks. 1900 */ 1901 to_free += ei->i_reserved_meta_blocks; 1902 ei->i_reserved_meta_blocks = 0; 1903 ei->i_da_metadata_calc_len = 0; 1904 } 1905 1906 /* update fs dirty blocks counter */ 1907 percpu_counter_sub(&sbi->s_dirtyblocks_counter, to_free); 1908 1909 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1910 1911 vfs_dq_release_reservation_block(inode, to_free); 1912 } 1913 1914 static void ext4_da_page_release_reservation(struct page *page, 1915 unsigned long offset) 1916 { 1917 int to_release = 0; 1918 struct buffer_head *head, *bh; 1919 unsigned int curr_off = 0; 1920 1921 head = page_buffers(page); 1922 bh = head; 1923 do { 1924 unsigned int next_off = curr_off + bh->b_size; 1925 1926 if ((offset <= curr_off) && (buffer_delay(bh))) { 1927 to_release++; 1928 clear_buffer_delay(bh); 1929 } 1930 curr_off = next_off; 1931 } while ((bh = bh->b_this_page) != head); 1932 ext4_da_release_space(page->mapping->host, to_release); 1933 } 1934 1935 /* 1936 * Delayed allocation stuff 1937 */ 1938 1939 /* 1940 * mpage_da_submit_io - walks through extent of pages and try to write 1941 * them with writepage() call back 1942 * 1943 * @mpd->inode: inode 1944 * @mpd->first_page: first page of the extent 1945 * @mpd->next_page: page after the last page of the extent 1946 * 1947 * By the time mpage_da_submit_io() is called we expect all blocks 1948 * to be allocated. this may be wrong if allocation failed. 1949 * 1950 * As pages are already locked by write_cache_pages(), we can't use it 1951 */ 1952 static int mpage_da_submit_io(struct mpage_da_data *mpd) 1953 { 1954 long pages_skipped; 1955 struct pagevec pvec; 1956 unsigned long index, end; 1957 int ret = 0, err, nr_pages, i; 1958 struct inode *inode = mpd->inode; 1959 struct address_space *mapping = inode->i_mapping; 1960 1961 BUG_ON(mpd->next_page <= mpd->first_page); 1962 /* 1963 * We need to start from the first_page to the next_page - 1 1964 * to make sure we also write the mapped dirty buffer_heads. 1965 * If we look at mpd->b_blocknr we would only be looking 1966 * at the currently mapped buffer_heads. 1967 */ 1968 index = mpd->first_page; 1969 end = mpd->next_page - 1; 1970 1971 pagevec_init(&pvec, 0); 1972 while (index <= end) { 1973 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1974 if (nr_pages == 0) 1975 break; 1976 for (i = 0; i < nr_pages; i++) { 1977 struct page *page = pvec.pages[i]; 1978 1979 index = page->index; 1980 if (index > end) 1981 break; 1982 index++; 1983 1984 BUG_ON(!PageLocked(page)); 1985 BUG_ON(PageWriteback(page)); 1986 1987 pages_skipped = mpd->wbc->pages_skipped; 1988 err = mapping->a_ops->writepage(page, mpd->wbc); 1989 if (!err && (pages_skipped == mpd->wbc->pages_skipped)) 1990 /* 1991 * have successfully written the page 1992 * without skipping the same 1993 */ 1994 mpd->pages_written++; 1995 /* 1996 * In error case, we have to continue because 1997 * remaining pages are still locked 1998 * XXX: unlock and re-dirty them? 1999 */ 2000 if (ret == 0) 2001 ret = err; 2002 } 2003 pagevec_release(&pvec); 2004 } 2005 return ret; 2006 } 2007 2008 /* 2009 * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers 2010 * 2011 * @mpd->inode - inode to walk through 2012 * @exbh->b_blocknr - first block on a disk 2013 * @exbh->b_size - amount of space in bytes 2014 * @logical - first logical block to start assignment with 2015 * 2016 * the function goes through all passed space and put actual disk 2017 * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten 2018 */ 2019 static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical, 2020 struct buffer_head *exbh) 2021 { 2022 struct inode *inode = mpd->inode; 2023 struct address_space *mapping = inode->i_mapping; 2024 int blocks = exbh->b_size >> inode->i_blkbits; 2025 sector_t pblock = exbh->b_blocknr, cur_logical; 2026 struct buffer_head *head, *bh; 2027 pgoff_t index, end; 2028 struct pagevec pvec; 2029 int nr_pages, i; 2030 2031 index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 2032 end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 2033 cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 2034 2035 pagevec_init(&pvec, 0); 2036 2037 while (index <= end) { 2038 /* XXX: optimize tail */ 2039 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 2040 if (nr_pages == 0) 2041 break; 2042 for (i = 0; i < nr_pages; i++) { 2043 struct page *page = pvec.pages[i]; 2044 2045 index = page->index; 2046 if (index > end) 2047 break; 2048 index++; 2049 2050 BUG_ON(!PageLocked(page)); 2051 BUG_ON(PageWriteback(page)); 2052 BUG_ON(!page_has_buffers(page)); 2053 2054 bh = page_buffers(page); 2055 head = bh; 2056 2057 /* skip blocks out of the range */ 2058 do { 2059 if (cur_logical >= logical) 2060 break; 2061 cur_logical++; 2062 } while ((bh = bh->b_this_page) != head); 2063 2064 do { 2065 if (cur_logical >= logical + blocks) 2066 break; 2067 2068 if (buffer_delay(bh) || 2069 buffer_unwritten(bh)) { 2070 2071 BUG_ON(bh->b_bdev != inode->i_sb->s_bdev); 2072 2073 if (buffer_delay(bh)) { 2074 clear_buffer_delay(bh); 2075 bh->b_blocknr = pblock; 2076 } else { 2077 /* 2078 * unwritten already should have 2079 * blocknr assigned. Verify that 2080 */ 2081 clear_buffer_unwritten(bh); 2082 BUG_ON(bh->b_blocknr != pblock); 2083 } 2084 2085 } else if (buffer_mapped(bh)) 2086 BUG_ON(bh->b_blocknr != pblock); 2087 2088 cur_logical++; 2089 pblock++; 2090 } while ((bh = bh->b_this_page) != head); 2091 } 2092 pagevec_release(&pvec); 2093 } 2094 } 2095 2096 2097 /* 2098 * __unmap_underlying_blocks - just a helper function to unmap 2099 * set of blocks described by @bh 2100 */ 2101 static inline void __unmap_underlying_blocks(struct inode *inode, 2102 struct buffer_head *bh) 2103 { 2104 struct block_device *bdev = inode->i_sb->s_bdev; 2105 int blocks, i; 2106 2107 blocks = bh->b_size >> inode->i_blkbits; 2108 for (i = 0; i < blocks; i++) 2109 unmap_underlying_metadata(bdev, bh->b_blocknr + i); 2110 } 2111 2112 static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd, 2113 sector_t logical, long blk_cnt) 2114 { 2115 int nr_pages, i; 2116 pgoff_t index, end; 2117 struct pagevec pvec; 2118 struct inode *inode = mpd->inode; 2119 struct address_space *mapping = inode->i_mapping; 2120 2121 index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 2122 end = (logical + blk_cnt - 1) >> 2123 (PAGE_CACHE_SHIFT - inode->i_blkbits); 2124 while (index <= end) { 2125 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 2126 if (nr_pages == 0) 2127 break; 2128 for (i = 0; i < nr_pages; i++) { 2129 struct page *page = pvec.pages[i]; 2130 index = page->index; 2131 if (index > end) 2132 break; 2133 index++; 2134 2135 BUG_ON(!PageLocked(page)); 2136 BUG_ON(PageWriteback(page)); 2137 block_invalidatepage(page, 0); 2138 ClearPageUptodate(page); 2139 unlock_page(page); 2140 } 2141 } 2142 return; 2143 } 2144 2145 static void ext4_print_free_blocks(struct inode *inode) 2146 { 2147 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2148 printk(KERN_CRIT "Total free blocks count %lld\n", 2149 ext4_count_free_blocks(inode->i_sb)); 2150 printk(KERN_CRIT "Free/Dirty block details\n"); 2151 printk(KERN_CRIT "free_blocks=%lld\n", 2152 (long long) percpu_counter_sum(&sbi->s_freeblocks_counter)); 2153 printk(KERN_CRIT "dirty_blocks=%lld\n", 2154 (long long) percpu_counter_sum(&sbi->s_dirtyblocks_counter)); 2155 printk(KERN_CRIT "Block reservation details\n"); 2156 printk(KERN_CRIT "i_reserved_data_blocks=%u\n", 2157 EXT4_I(inode)->i_reserved_data_blocks); 2158 printk(KERN_CRIT "i_reserved_meta_blocks=%u\n", 2159 EXT4_I(inode)->i_reserved_meta_blocks); 2160 return; 2161 } 2162 2163 /* 2164 * mpage_da_map_blocks - go through given space 2165 * 2166 * @mpd - bh describing space 2167 * 2168 * The function skips space we know is already mapped to disk blocks. 2169 * 2170 */ 2171 static int mpage_da_map_blocks(struct mpage_da_data *mpd) 2172 { 2173 int err, blks, get_blocks_flags; 2174 struct buffer_head new; 2175 sector_t next = mpd->b_blocknr; 2176 unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; 2177 loff_t disksize = EXT4_I(mpd->inode)->i_disksize; 2178 handle_t *handle = NULL; 2179 2180 /* 2181 * We consider only non-mapped and non-allocated blocks 2182 */ 2183 if ((mpd->b_state & (1 << BH_Mapped)) && 2184 !(mpd->b_state & (1 << BH_Delay)) && 2185 !(mpd->b_state & (1 << BH_Unwritten))) 2186 return 0; 2187 2188 /* 2189 * If we didn't accumulate anything to write simply return 2190 */ 2191 if (!mpd->b_size) 2192 return 0; 2193 2194 handle = ext4_journal_current_handle(); 2195 BUG_ON(!handle); 2196 2197 /* 2198 * Call ext4_get_blocks() to allocate any delayed allocation 2199 * blocks, or to convert an uninitialized extent to be 2200 * initialized (in the case where we have written into 2201 * one or more preallocated blocks). 2202 * 2203 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to 2204 * indicate that we are on the delayed allocation path. This 2205 * affects functions in many different parts of the allocation 2206 * call path. This flag exists primarily because we don't 2207 * want to change *many* call functions, so ext4_get_blocks() 2208 * will set the magic i_delalloc_reserved_flag once the 2209 * inode's allocation semaphore is taken. 2210 * 2211 * If the blocks in questions were delalloc blocks, set 2212 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting 2213 * variables are updated after the blocks have been allocated. 2214 */ 2215 new.b_state = 0; 2216 get_blocks_flags = (EXT4_GET_BLOCKS_CREATE | 2217 EXT4_GET_BLOCKS_DELALLOC_RESERVE); 2218 if (mpd->b_state & (1 << BH_Delay)) 2219 get_blocks_flags |= EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE; 2220 blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks, 2221 &new, get_blocks_flags); 2222 if (blks < 0) { 2223 err = blks; 2224 /* 2225 * If get block returns with error we simply 2226 * return. Later writepage will redirty the page and 2227 * writepages will find the dirty page again 2228 */ 2229 if (err == -EAGAIN) 2230 return 0; 2231 2232 if (err == -ENOSPC && 2233 ext4_count_free_blocks(mpd->inode->i_sb)) { 2234 mpd->retval = err; 2235 return 0; 2236 } 2237 2238 /* 2239 * get block failure will cause us to loop in 2240 * writepages, because a_ops->writepage won't be able 2241 * to make progress. The page will be redirtied by 2242 * writepage and writepages will again try to write 2243 * the same. 2244 */ 2245 ext4_msg(mpd->inode->i_sb, KERN_CRIT, 2246 "delayed block allocation failed for inode %lu at " 2247 "logical offset %llu with max blocks %zd with " 2248 "error %d\n", mpd->inode->i_ino, 2249 (unsigned long long) next, 2250 mpd->b_size >> mpd->inode->i_blkbits, err); 2251 printk(KERN_CRIT "This should not happen!! " 2252 "Data will be lost\n"); 2253 if (err == -ENOSPC) { 2254 ext4_print_free_blocks(mpd->inode); 2255 } 2256 /* invalidate all the pages */ 2257 ext4_da_block_invalidatepages(mpd, next, 2258 mpd->b_size >> mpd->inode->i_blkbits); 2259 return err; 2260 } 2261 BUG_ON(blks == 0); 2262 2263 new.b_size = (blks << mpd->inode->i_blkbits); 2264 2265 if (buffer_new(&new)) 2266 __unmap_underlying_blocks(mpd->inode, &new); 2267 2268 /* 2269 * If blocks are delayed marked, we need to 2270 * put actual blocknr and drop delayed bit 2271 */ 2272 if ((mpd->b_state & (1 << BH_Delay)) || 2273 (mpd->b_state & (1 << BH_Unwritten))) 2274 mpage_put_bnr_to_bhs(mpd, next, &new); 2275 2276 if (ext4_should_order_data(mpd->inode)) { 2277 err = ext4_jbd2_file_inode(handle, mpd->inode); 2278 if (err) 2279 return err; 2280 } 2281 2282 /* 2283 * Update on-disk size along with block allocation. 2284 */ 2285 disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; 2286 if (disksize > i_size_read(mpd->inode)) 2287 disksize = i_size_read(mpd->inode); 2288 if (disksize > EXT4_I(mpd->inode)->i_disksize) { 2289 ext4_update_i_disksize(mpd->inode, disksize); 2290 return ext4_mark_inode_dirty(handle, mpd->inode); 2291 } 2292 2293 return 0; 2294 } 2295 2296 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ 2297 (1 << BH_Delay) | (1 << BH_Unwritten)) 2298 2299 /* 2300 * mpage_add_bh_to_extent - try to add one more block to extent of blocks 2301 * 2302 * @mpd->lbh - extent of blocks 2303 * @logical - logical number of the block in the file 2304 * @bh - bh of the block (used to access block's state) 2305 * 2306 * the function is used to collect contig. blocks in same state 2307 */ 2308 static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, 2309 sector_t logical, size_t b_size, 2310 unsigned long b_state) 2311 { 2312 sector_t next; 2313 int nrblocks = mpd->b_size >> mpd->inode->i_blkbits; 2314 2315 /* check if thereserved journal credits might overflow */ 2316 if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) { 2317 if (nrblocks >= EXT4_MAX_TRANS_DATA) { 2318 /* 2319 * With non-extent format we are limited by the journal 2320 * credit available. Total credit needed to insert 2321 * nrblocks contiguous blocks is dependent on the 2322 * nrblocks. So limit nrblocks. 2323 */ 2324 goto flush_it; 2325 } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) > 2326 EXT4_MAX_TRANS_DATA) { 2327 /* 2328 * Adding the new buffer_head would make it cross the 2329 * allowed limit for which we have journal credit 2330 * reserved. So limit the new bh->b_size 2331 */ 2332 b_size = (EXT4_MAX_TRANS_DATA - nrblocks) << 2333 mpd->inode->i_blkbits; 2334 /* we will do mpage_da_submit_io in the next loop */ 2335 } 2336 } 2337 /* 2338 * First block in the extent 2339 */ 2340 if (mpd->b_size == 0) { 2341 mpd->b_blocknr = logical; 2342 mpd->b_size = b_size; 2343 mpd->b_state = b_state & BH_FLAGS; 2344 return; 2345 } 2346 2347 next = mpd->b_blocknr + nrblocks; 2348 /* 2349 * Can we merge the block to our big extent? 2350 */ 2351 if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) { 2352 mpd->b_size += b_size; 2353 return; 2354 } 2355 2356 flush_it: 2357 /* 2358 * We couldn't merge the block to our extent, so we 2359 * need to flush current extent and start new one 2360 */ 2361 if (mpage_da_map_blocks(mpd) == 0) 2362 mpage_da_submit_io(mpd); 2363 mpd->io_done = 1; 2364 return; 2365 } 2366 2367 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) 2368 { 2369 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); 2370 } 2371 2372 /* 2373 * __mpage_da_writepage - finds extent of pages and blocks 2374 * 2375 * @page: page to consider 2376 * @wbc: not used, we just follow rules 2377 * @data: context 2378 * 2379 * The function finds extents of pages and scan them for all blocks. 2380 */ 2381 static int __mpage_da_writepage(struct page *page, 2382 struct writeback_control *wbc, void *data) 2383 { 2384 struct mpage_da_data *mpd = data; 2385 struct inode *inode = mpd->inode; 2386 struct buffer_head *bh, *head; 2387 sector_t logical; 2388 2389 if (mpd->io_done) { 2390 /* 2391 * Rest of the page in the page_vec 2392 * redirty then and skip then. We will 2393 * try to write them again after 2394 * starting a new transaction 2395 */ 2396 redirty_page_for_writepage(wbc, page); 2397 unlock_page(page); 2398 return MPAGE_DA_EXTENT_TAIL; 2399 } 2400 /* 2401 * Can we merge this page to current extent? 2402 */ 2403 if (mpd->next_page != page->index) { 2404 /* 2405 * Nope, we can't. So, we map non-allocated blocks 2406 * and start IO on them using writepage() 2407 */ 2408 if (mpd->next_page != mpd->first_page) { 2409 if (mpage_da_map_blocks(mpd) == 0) 2410 mpage_da_submit_io(mpd); 2411 /* 2412 * skip rest of the page in the page_vec 2413 */ 2414 mpd->io_done = 1; 2415 redirty_page_for_writepage(wbc, page); 2416 unlock_page(page); 2417 return MPAGE_DA_EXTENT_TAIL; 2418 } 2419 2420 /* 2421 * Start next extent of pages ... 2422 */ 2423 mpd->first_page = page->index; 2424 2425 /* 2426 * ... and blocks 2427 */ 2428 mpd->b_size = 0; 2429 mpd->b_state = 0; 2430 mpd->b_blocknr = 0; 2431 } 2432 2433 mpd->next_page = page->index + 1; 2434 logical = (sector_t) page->index << 2435 (PAGE_CACHE_SHIFT - inode->i_blkbits); 2436 2437 if (!page_has_buffers(page)) { 2438 mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE, 2439 (1 << BH_Dirty) | (1 << BH_Uptodate)); 2440 if (mpd->io_done) 2441 return MPAGE_DA_EXTENT_TAIL; 2442 } else { 2443 /* 2444 * Page with regular buffer heads, just add all dirty ones 2445 */ 2446 head = page_buffers(page); 2447 bh = head; 2448 do { 2449 BUG_ON(buffer_locked(bh)); 2450 /* 2451 * We need to try to allocate 2452 * unmapped blocks in the same page. 2453 * Otherwise we won't make progress 2454 * with the page in ext4_writepage 2455 */ 2456 if (ext4_bh_delay_or_unwritten(NULL, bh)) { 2457 mpage_add_bh_to_extent(mpd, logical, 2458 bh->b_size, 2459 bh->b_state); 2460 if (mpd->io_done) 2461 return MPAGE_DA_EXTENT_TAIL; 2462 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) { 2463 /* 2464 * mapped dirty buffer. We need to update 2465 * the b_state because we look at 2466 * b_state in mpage_da_map_blocks. We don't 2467 * update b_size because if we find an 2468 * unmapped buffer_head later we need to 2469 * use the b_state flag of that buffer_head. 2470 */ 2471 if (mpd->b_size == 0) 2472 mpd->b_state = bh->b_state & BH_FLAGS; 2473 } 2474 logical++; 2475 } while ((bh = bh->b_this_page) != head); 2476 } 2477 2478 return 0; 2479 } 2480 2481 /* 2482 * This is a special get_blocks_t callback which is used by 2483 * ext4_da_write_begin(). It will either return mapped block or 2484 * reserve space for a single block. 2485 * 2486 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 2487 * We also have b_blocknr = -1 and b_bdev initialized properly 2488 * 2489 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 2490 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 2491 * initialized properly. 2492 */ 2493 static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 2494 struct buffer_head *bh_result, int create) 2495 { 2496 int ret = 0; 2497 sector_t invalid_block = ~((sector_t) 0xffff); 2498 2499 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 2500 invalid_block = ~0; 2501 2502 BUG_ON(create == 0); 2503 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 2504 2505 /* 2506 * first, we need to know whether the block is allocated already 2507 * preallocated blocks are unmapped but should treated 2508 * the same as allocated blocks. 2509 */ 2510 ret = ext4_get_blocks(NULL, inode, iblock, 1, bh_result, 0); 2511 if ((ret == 0) && !buffer_delay(bh_result)) { 2512 /* the block isn't (pre)allocated yet, let's reserve space */ 2513 /* 2514 * XXX: __block_prepare_write() unmaps passed block, 2515 * is it OK? 2516 */ 2517 ret = ext4_da_reserve_space(inode, iblock); 2518 if (ret) 2519 /* not enough space to reserve */ 2520 return ret; 2521 2522 map_bh(bh_result, inode->i_sb, invalid_block); 2523 set_buffer_new(bh_result); 2524 set_buffer_delay(bh_result); 2525 } else if (ret > 0) { 2526 bh_result->b_size = (ret << inode->i_blkbits); 2527 if (buffer_unwritten(bh_result)) { 2528 /* A delayed write to unwritten bh should 2529 * be marked new and mapped. Mapped ensures 2530 * that we don't do get_block multiple times 2531 * when we write to the same offset and new 2532 * ensures that we do proper zero out for 2533 * partial write. 2534 */ 2535 set_buffer_new(bh_result); 2536 set_buffer_mapped(bh_result); 2537 } 2538 ret = 0; 2539 } 2540 2541 return ret; 2542 } 2543 2544 /* 2545 * This function is used as a standard get_block_t calback function 2546 * when there is no desire to allocate any blocks. It is used as a 2547 * callback function for block_prepare_write(), nobh_writepage(), and 2548 * block_write_full_page(). These functions should only try to map a 2549 * single block at a time. 2550 * 2551 * Since this function doesn't do block allocations even if the caller 2552 * requests it by passing in create=1, it is critically important that 2553 * any caller checks to make sure that any buffer heads are returned 2554 * by this function are either all already mapped or marked for 2555 * delayed allocation before calling nobh_writepage() or 2556 * block_write_full_page(). Otherwise, b_blocknr could be left 2557 * unitialized, and the page write functions will be taken by 2558 * surprise. 2559 */ 2560 static int noalloc_get_block_write(struct inode *inode, sector_t iblock, 2561 struct buffer_head *bh_result, int create) 2562 { 2563 int ret = 0; 2564 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 2565 2566 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 2567 2568 /* 2569 * we don't want to do block allocation in writepage 2570 * so call get_block_wrap with create = 0 2571 */ 2572 ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0); 2573 if (ret > 0) { 2574 bh_result->b_size = (ret << inode->i_blkbits); 2575 ret = 0; 2576 } 2577 return ret; 2578 } 2579 2580 static int bget_one(handle_t *handle, struct buffer_head *bh) 2581 { 2582 get_bh(bh); 2583 return 0; 2584 } 2585 2586 static int bput_one(handle_t *handle, struct buffer_head *bh) 2587 { 2588 put_bh(bh); 2589 return 0; 2590 } 2591 2592 static int __ext4_journalled_writepage(struct page *page, 2593 unsigned int len) 2594 { 2595 struct address_space *mapping = page->mapping; 2596 struct inode *inode = mapping->host; 2597 struct buffer_head *page_bufs; 2598 handle_t *handle = NULL; 2599 int ret = 0; 2600 int err; 2601 2602 page_bufs = page_buffers(page); 2603 BUG_ON(!page_bufs); 2604 walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one); 2605 /* As soon as we unlock the page, it can go away, but we have 2606 * references to buffers so we are safe */ 2607 unlock_page(page); 2608 2609 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 2610 if (IS_ERR(handle)) { 2611 ret = PTR_ERR(handle); 2612 goto out; 2613 } 2614 2615 ret = walk_page_buffers(handle, page_bufs, 0, len, NULL, 2616 do_journal_get_write_access); 2617 2618 err = walk_page_buffers(handle, page_bufs, 0, len, NULL, 2619 write_end_fn); 2620 if (ret == 0) 2621 ret = err; 2622 err = ext4_journal_stop(handle); 2623 if (!ret) 2624 ret = err; 2625 2626 walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one); 2627 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; 2628 out: 2629 return ret; 2630 } 2631 2632 /* 2633 * Note that we don't need to start a transaction unless we're journaling data 2634 * because we should have holes filled from ext4_page_mkwrite(). We even don't 2635 * need to file the inode to the transaction's list in ordered mode because if 2636 * we are writing back data added by write(), the inode is already there and if 2637 * we are writing back data modified via mmap(), noone guarantees in which 2638 * transaction the data will hit the disk. In case we are journaling data, we 2639 * cannot start transaction directly because transaction start ranks above page 2640 * lock so we have to do some magic. 2641 * 2642 * This function can get called via... 2643 * - ext4_da_writepages after taking page lock (have journal handle) 2644 * - journal_submit_inode_data_buffers (no journal handle) 2645 * - shrink_page_list via pdflush (no journal handle) 2646 * - grab_page_cache when doing write_begin (have journal handle) 2647 * 2648 * We don't do any block allocation in this function. If we have page with 2649 * multiple blocks we need to write those buffer_heads that are mapped. This 2650 * is important for mmaped based write. So if we do with blocksize 1K 2651 * truncate(f, 1024); 2652 * a = mmap(f, 0, 4096); 2653 * a[0] = 'a'; 2654 * truncate(f, 4096); 2655 * we have in the page first buffer_head mapped via page_mkwrite call back 2656 * but other bufer_heads would be unmapped but dirty(dirty done via the 2657 * do_wp_page). So writepage should write the first block. If we modify 2658 * the mmap area beyond 1024 we will again get a page_fault and the 2659 * page_mkwrite callback will do the block allocation and mark the 2660 * buffer_heads mapped. 2661 * 2662 * We redirty the page if we have any buffer_heads that is either delay or 2663 * unwritten in the page. 2664 * 2665 * We can get recursively called as show below. 2666 * 2667 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 2668 * ext4_writepage() 2669 * 2670 * But since we don't do any block allocation we should not deadlock. 2671 * Page also have the dirty flag cleared so we don't get recurive page_lock. 2672 */ 2673 static int ext4_writepage(struct page *page, 2674 struct writeback_control *wbc) 2675 { 2676 int ret = 0; 2677 loff_t size; 2678 unsigned int len; 2679 struct buffer_head *page_bufs; 2680 struct inode *inode = page->mapping->host; 2681 2682 trace_ext4_writepage(inode, page); 2683 size = i_size_read(inode); 2684 if (page->index == size >> PAGE_CACHE_SHIFT) 2685 len = size & ~PAGE_CACHE_MASK; 2686 else 2687 len = PAGE_CACHE_SIZE; 2688 2689 if (page_has_buffers(page)) { 2690 page_bufs = page_buffers(page); 2691 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2692 ext4_bh_delay_or_unwritten)) { 2693 /* 2694 * We don't want to do block allocation 2695 * So redirty the page and return 2696 * We may reach here when we do a journal commit 2697 * via journal_submit_inode_data_buffers. 2698 * If we don't have mapping block we just ignore 2699 * them. We can also reach here via shrink_page_list 2700 */ 2701 redirty_page_for_writepage(wbc, page); 2702 unlock_page(page); 2703 return 0; 2704 } 2705 } else { 2706 /* 2707 * The test for page_has_buffers() is subtle: 2708 * We know the page is dirty but it lost buffers. That means 2709 * that at some moment in time after write_begin()/write_end() 2710 * has been called all buffers have been clean and thus they 2711 * must have been written at least once. So they are all 2712 * mapped and we can happily proceed with mapping them 2713 * and writing the page. 2714 * 2715 * Try to initialize the buffer_heads and check whether 2716 * all are mapped and non delay. We don't want to 2717 * do block allocation here. 2718 */ 2719 ret = block_prepare_write(page, 0, len, 2720 noalloc_get_block_write); 2721 if (!ret) { 2722 page_bufs = page_buffers(page); 2723 /* check whether all are mapped and non delay */ 2724 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2725 ext4_bh_delay_or_unwritten)) { 2726 redirty_page_for_writepage(wbc, page); 2727 unlock_page(page); 2728 return 0; 2729 } 2730 } else { 2731 /* 2732 * We can't do block allocation here 2733 * so just redity the page and unlock 2734 * and return 2735 */ 2736 redirty_page_for_writepage(wbc, page); 2737 unlock_page(page); 2738 return 0; 2739 } 2740 /* now mark the buffer_heads as dirty and uptodate */ 2741 block_commit_write(page, 0, len); 2742 } 2743 2744 if (PageChecked(page) && ext4_should_journal_data(inode)) { 2745 /* 2746 * It's mmapped pagecache. Add buffers and journal it. There 2747 * doesn't seem much point in redirtying the page here. 2748 */ 2749 ClearPageChecked(page); 2750 return __ext4_journalled_writepage(page, len); 2751 } 2752 2753 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) 2754 ret = nobh_writepage(page, noalloc_get_block_write, wbc); 2755 else 2756 ret = block_write_full_page(page, noalloc_get_block_write, 2757 wbc); 2758 2759 return ret; 2760 } 2761 2762 /* 2763 * This is called via ext4_da_writepages() to 2764 * calulate the total number of credits to reserve to fit 2765 * a single extent allocation into a single transaction, 2766 * ext4_da_writpeages() will loop calling this before 2767 * the block allocation. 2768 */ 2769 2770 static int ext4_da_writepages_trans_blocks(struct inode *inode) 2771 { 2772 int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; 2773 2774 /* 2775 * With non-extent format the journal credit needed to 2776 * insert nrblocks contiguous block is dependent on 2777 * number of contiguous block. So we will limit 2778 * number of contiguous block to a sane value 2779 */ 2780 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) && 2781 (max_blocks > EXT4_MAX_TRANS_DATA)) 2782 max_blocks = EXT4_MAX_TRANS_DATA; 2783 2784 return ext4_chunk_trans_blocks(inode, max_blocks); 2785 } 2786 2787 static int ext4_da_writepages(struct address_space *mapping, 2788 struct writeback_control *wbc) 2789 { 2790 pgoff_t index; 2791 int range_whole = 0; 2792 handle_t *handle = NULL; 2793 struct mpage_da_data mpd; 2794 struct inode *inode = mapping->host; 2795 int no_nrwrite_index_update; 2796 int pages_written = 0; 2797 long pages_skipped; 2798 unsigned int max_pages; 2799 int range_cyclic, cycled = 1, io_done = 0; 2800 int needed_blocks, ret = 0; 2801 long desired_nr_to_write, nr_to_writebump = 0; 2802 loff_t range_start = wbc->range_start; 2803 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2804 2805 trace_ext4_da_writepages(inode, wbc); 2806 2807 /* 2808 * No pages to write? This is mainly a kludge to avoid starting 2809 * a transaction for special inodes like journal inode on last iput() 2810 * because that could violate lock ordering on umount 2811 */ 2812 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 2813 return 0; 2814 2815 /* 2816 * If the filesystem has aborted, it is read-only, so return 2817 * right away instead of dumping stack traces later on that 2818 * will obscure the real source of the problem. We test 2819 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because 2820 * the latter could be true if the filesystem is mounted 2821 * read-only, and in that case, ext4_da_writepages should 2822 * *never* be called, so if that ever happens, we would want 2823 * the stack trace. 2824 */ 2825 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) 2826 return -EROFS; 2827 2828 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2829 range_whole = 1; 2830 2831 range_cyclic = wbc->range_cyclic; 2832 if (wbc->range_cyclic) { 2833 index = mapping->writeback_index; 2834 if (index) 2835 cycled = 0; 2836 wbc->range_start = index << PAGE_CACHE_SHIFT; 2837 wbc->range_end = LLONG_MAX; 2838 wbc->range_cyclic = 0; 2839 } else 2840 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2841 2842 /* 2843 * This works around two forms of stupidity. The first is in 2844 * the writeback code, which caps the maximum number of pages 2845 * written to be 1024 pages. This is wrong on multiple 2846 * levels; different architectues have a different page size, 2847 * which changes the maximum amount of data which gets 2848 * written. Secondly, 4 megabytes is way too small. XFS 2849 * forces this value to be 16 megabytes by multiplying 2850 * nr_to_write parameter by four, and then relies on its 2851 * allocator to allocate larger extents to make them 2852 * contiguous. Unfortunately this brings us to the second 2853 * stupidity, which is that ext4's mballoc code only allocates 2854 * at most 2048 blocks. So we force contiguous writes up to 2855 * the number of dirty blocks in the inode, or 2856 * sbi->max_writeback_mb_bump whichever is smaller. 2857 */ 2858 max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT); 2859 if (!range_cyclic && range_whole) 2860 desired_nr_to_write = wbc->nr_to_write * 8; 2861 else 2862 desired_nr_to_write = ext4_num_dirty_pages(inode, index, 2863 max_pages); 2864 if (desired_nr_to_write > max_pages) 2865 desired_nr_to_write = max_pages; 2866 2867 if (wbc->nr_to_write < desired_nr_to_write) { 2868 nr_to_writebump = desired_nr_to_write - wbc->nr_to_write; 2869 wbc->nr_to_write = desired_nr_to_write; 2870 } 2871 2872 mpd.wbc = wbc; 2873 mpd.inode = mapping->host; 2874 2875 /* 2876 * we don't want write_cache_pages to update 2877 * nr_to_write and writeback_index 2878 */ 2879 no_nrwrite_index_update = wbc->no_nrwrite_index_update; 2880 wbc->no_nrwrite_index_update = 1; 2881 pages_skipped = wbc->pages_skipped; 2882 2883 retry: 2884 while (!ret && wbc->nr_to_write > 0) { 2885 2886 /* 2887 * we insert one extent at a time. So we need 2888 * credit needed for single extent allocation. 2889 * journalled mode is currently not supported 2890 * by delalloc 2891 */ 2892 BUG_ON(ext4_should_journal_data(inode)); 2893 needed_blocks = ext4_da_writepages_trans_blocks(inode); 2894 2895 /* start a new transaction*/ 2896 handle = ext4_journal_start(inode, needed_blocks); 2897 if (IS_ERR(handle)) { 2898 ret = PTR_ERR(handle); 2899 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2900 "%ld pages, ino %lu; err %d\n", __func__, 2901 wbc->nr_to_write, inode->i_ino, ret); 2902 goto out_writepages; 2903 } 2904 2905 /* 2906 * Now call __mpage_da_writepage to find the next 2907 * contiguous region of logical blocks that need 2908 * blocks to be allocated by ext4. We don't actually 2909 * submit the blocks for I/O here, even though 2910 * write_cache_pages thinks it will, and will set the 2911 * pages as clean for write before calling 2912 * __mpage_da_writepage(). 2913 */ 2914 mpd.b_size = 0; 2915 mpd.b_state = 0; 2916 mpd.b_blocknr = 0; 2917 mpd.first_page = 0; 2918 mpd.next_page = 0; 2919 mpd.io_done = 0; 2920 mpd.pages_written = 0; 2921 mpd.retval = 0; 2922 ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, 2923 &mpd); 2924 /* 2925 * If we have a contiguous extent of pages and we 2926 * haven't done the I/O yet, map the blocks and submit 2927 * them for I/O. 2928 */ 2929 if (!mpd.io_done && mpd.next_page != mpd.first_page) { 2930 if (mpage_da_map_blocks(&mpd) == 0) 2931 mpage_da_submit_io(&mpd); 2932 mpd.io_done = 1; 2933 ret = MPAGE_DA_EXTENT_TAIL; 2934 } 2935 trace_ext4_da_write_pages(inode, &mpd); 2936 wbc->nr_to_write -= mpd.pages_written; 2937 2938 ext4_journal_stop(handle); 2939 2940 if ((mpd.retval == -ENOSPC) && sbi->s_journal) { 2941 /* commit the transaction which would 2942 * free blocks released in the transaction 2943 * and try again 2944 */ 2945 jbd2_journal_force_commit_nested(sbi->s_journal); 2946 wbc->pages_skipped = pages_skipped; 2947 ret = 0; 2948 } else if (ret == MPAGE_DA_EXTENT_TAIL) { 2949 /* 2950 * got one extent now try with 2951 * rest of the pages 2952 */ 2953 pages_written += mpd.pages_written; 2954 wbc->pages_skipped = pages_skipped; 2955 ret = 0; 2956 io_done = 1; 2957 } else if (wbc->nr_to_write) 2958 /* 2959 * There is no more writeout needed 2960 * or we requested for a noblocking writeout 2961 * and we found the device congested 2962 */ 2963 break; 2964 } 2965 if (!io_done && !cycled) { 2966 cycled = 1; 2967 index = 0; 2968 wbc->range_start = index << PAGE_CACHE_SHIFT; 2969 wbc->range_end = mapping->writeback_index - 1; 2970 goto retry; 2971 } 2972 if (pages_skipped != wbc->pages_skipped) 2973 ext4_msg(inode->i_sb, KERN_CRIT, 2974 "This should not happen leaving %s " 2975 "with nr_to_write = %ld ret = %d\n", 2976 __func__, wbc->nr_to_write, ret); 2977 2978 /* Update index */ 2979 index += pages_written; 2980 wbc->range_cyclic = range_cyclic; 2981 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2982 /* 2983 * set the writeback_index so that range_cyclic 2984 * mode will write it back later 2985 */ 2986 mapping->writeback_index = index; 2987 2988 out_writepages: 2989 if (!no_nrwrite_index_update) 2990 wbc->no_nrwrite_index_update = 0; 2991 wbc->nr_to_write -= nr_to_writebump; 2992 wbc->range_start = range_start; 2993 trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); 2994 return ret; 2995 } 2996 2997 #define FALL_BACK_TO_NONDELALLOC 1 2998 static int ext4_nonda_switch(struct super_block *sb) 2999 { 3000 s64 free_blocks, dirty_blocks; 3001 struct ext4_sb_info *sbi = EXT4_SB(sb); 3002 3003 /* 3004 * switch to non delalloc mode if we are running low 3005 * on free block. The free block accounting via percpu 3006 * counters can get slightly wrong with percpu_counter_batch getting 3007 * accumulated on each CPU without updating global counters 3008 * Delalloc need an accurate free block accounting. So switch 3009 * to non delalloc when we are near to error range. 3010 */ 3011 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); 3012 dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter); 3013 if (2 * free_blocks < 3 * dirty_blocks || 3014 free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) { 3015 /* 3016 * free block count is less than 150% of dirty blocks 3017 * or free blocks is less than watermark 3018 */ 3019 return 1; 3020 } 3021 /* 3022 * Even if we don't switch but are nearing capacity, 3023 * start pushing delalloc when 1/2 of free blocks are dirty. 3024 */ 3025 if (free_blocks < 2 * dirty_blocks) 3026 writeback_inodes_sb_if_idle(sb); 3027 3028 return 0; 3029 } 3030 3031 static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 3032 loff_t pos, unsigned len, unsigned flags, 3033 struct page **pagep, void **fsdata) 3034 { 3035 int ret, retries = 0; 3036 struct page *page; 3037 pgoff_t index; 3038 unsigned from, to; 3039 struct inode *inode = mapping->host; 3040 handle_t *handle; 3041 3042 index = pos >> PAGE_CACHE_SHIFT; 3043 from = pos & (PAGE_CACHE_SIZE - 1); 3044 to = from + len; 3045 3046 if (ext4_nonda_switch(inode->i_sb)) { 3047 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 3048 return ext4_write_begin(file, mapping, pos, 3049 len, flags, pagep, fsdata); 3050 } 3051 *fsdata = (void *)0; 3052 trace_ext4_da_write_begin(inode, pos, len, flags); 3053 retry: 3054 /* 3055 * With delayed allocation, we don't log the i_disksize update 3056 * if there is delayed block allocation. But we still need 3057 * to journalling the i_disksize update if writes to the end 3058 * of file which has an already mapped buffer. 3059 */ 3060 handle = ext4_journal_start(inode, 1); 3061 if (IS_ERR(handle)) { 3062 ret = PTR_ERR(handle); 3063 goto out; 3064 } 3065 /* We cannot recurse into the filesystem as the transaction is already 3066 * started */ 3067 flags |= AOP_FLAG_NOFS; 3068 3069 page = grab_cache_page_write_begin(mapping, index, flags); 3070 if (!page) { 3071 ext4_journal_stop(handle); 3072 ret = -ENOMEM; 3073 goto out; 3074 } 3075 *pagep = page; 3076 3077 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 3078 ext4_da_get_block_prep); 3079 if (ret < 0) { 3080 unlock_page(page); 3081 ext4_journal_stop(handle); 3082 page_cache_release(page); 3083 /* 3084 * block_write_begin may have instantiated a few blocks 3085 * outside i_size. Trim these off again. Don't need 3086 * i_size_read because we hold i_mutex. 3087 */ 3088 if (pos + len > inode->i_size) 3089 ext4_truncate_failed_write(inode); 3090 } 3091 3092 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 3093 goto retry; 3094 out: 3095 return ret; 3096 } 3097 3098 /* 3099 * Check if we should update i_disksize 3100 * when write to the end of file but not require block allocation 3101 */ 3102 static int ext4_da_should_update_i_disksize(struct page *page, 3103 unsigned long offset) 3104 { 3105 struct buffer_head *bh; 3106 struct inode *inode = page->mapping->host; 3107 unsigned int idx; 3108 int i; 3109 3110 bh = page_buffers(page); 3111 idx = offset >> inode->i_blkbits; 3112 3113 for (i = 0; i < idx; i++) 3114 bh = bh->b_this_page; 3115 3116 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 3117 return 0; 3118 return 1; 3119 } 3120 3121 static int ext4_da_write_end(struct file *file, 3122 struct address_space *mapping, 3123 loff_t pos, unsigned len, unsigned copied, 3124 struct page *page, void *fsdata) 3125 { 3126 struct inode *inode = mapping->host; 3127 int ret = 0, ret2; 3128 handle_t *handle = ext4_journal_current_handle(); 3129 loff_t new_i_size; 3130 unsigned long start, end; 3131 int write_mode = (int)(unsigned long)fsdata; 3132 3133 if (write_mode == FALL_BACK_TO_NONDELALLOC) { 3134 if (ext4_should_order_data(inode)) { 3135 return ext4_ordered_write_end(file, mapping, pos, 3136 len, copied, page, fsdata); 3137 } else if (ext4_should_writeback_data(inode)) { 3138 return ext4_writeback_write_end(file, mapping, pos, 3139 len, copied, page, fsdata); 3140 } else { 3141 BUG(); 3142 } 3143 } 3144 3145 trace_ext4_da_write_end(inode, pos, len, copied); 3146 start = pos & (PAGE_CACHE_SIZE - 1); 3147 end = start + copied - 1; 3148 3149 /* 3150 * generic_write_end() will run mark_inode_dirty() if i_size 3151 * changes. So let's piggyback the i_disksize mark_inode_dirty 3152 * into that. 3153 */ 3154 3155 new_i_size = pos + copied; 3156 if (new_i_size > EXT4_I(inode)->i_disksize) { 3157 if (ext4_da_should_update_i_disksize(page, end)) { 3158 down_write(&EXT4_I(inode)->i_data_sem); 3159 if (new_i_size > EXT4_I(inode)->i_disksize) { 3160 /* 3161 * Updating i_disksize when extending file 3162 * without needing block allocation 3163 */ 3164 if (ext4_should_order_data(inode)) 3165 ret = ext4_jbd2_file_inode(handle, 3166 inode); 3167 3168 EXT4_I(inode)->i_disksize = new_i_size; 3169 } 3170 up_write(&EXT4_I(inode)->i_data_sem); 3171 /* We need to mark inode dirty even if 3172 * new_i_size is less that inode->i_size 3173 * bu greater than i_disksize.(hint delalloc) 3174 */ 3175 ext4_mark_inode_dirty(handle, inode); 3176 } 3177 } 3178 ret2 = generic_write_end(file, mapping, pos, len, copied, 3179 page, fsdata); 3180 copied = ret2; 3181 if (ret2 < 0) 3182 ret = ret2; 3183 ret2 = ext4_journal_stop(handle); 3184 if (!ret) 3185 ret = ret2; 3186 3187 return ret ? ret : copied; 3188 } 3189 3190 static void ext4_da_invalidatepage(struct page *page, unsigned long offset) 3191 { 3192 /* 3193 * Drop reserved blocks 3194 */ 3195 BUG_ON(!PageLocked(page)); 3196 if (!page_has_buffers(page)) 3197 goto out; 3198 3199 ext4_da_page_release_reservation(page, offset); 3200 3201 out: 3202 ext4_invalidatepage(page, offset); 3203 3204 return; 3205 } 3206 3207 /* 3208 * Force all delayed allocation blocks to be allocated for a given inode. 3209 */ 3210 int ext4_alloc_da_blocks(struct inode *inode) 3211 { 3212 trace_ext4_alloc_da_blocks(inode); 3213 3214 if (!EXT4_I(inode)->i_reserved_data_blocks && 3215 !EXT4_I(inode)->i_reserved_meta_blocks) 3216 return 0; 3217 3218 /* 3219 * We do something simple for now. The filemap_flush() will 3220 * also start triggering a write of the data blocks, which is 3221 * not strictly speaking necessary (and for users of 3222 * laptop_mode, not even desirable). However, to do otherwise 3223 * would require replicating code paths in: 3224 * 3225 * ext4_da_writepages() -> 3226 * write_cache_pages() ---> (via passed in callback function) 3227 * __mpage_da_writepage() --> 3228 * mpage_add_bh_to_extent() 3229 * mpage_da_map_blocks() 3230 * 3231 * The problem is that write_cache_pages(), located in 3232 * mm/page-writeback.c, marks pages clean in preparation for 3233 * doing I/O, which is not desirable if we're not planning on 3234 * doing I/O at all. 3235 * 3236 * We could call write_cache_pages(), and then redirty all of 3237 * the pages by calling redirty_page_for_writeback() but that 3238 * would be ugly in the extreme. So instead we would need to 3239 * replicate parts of the code in the above functions, 3240 * simplifying them becuase we wouldn't actually intend to 3241 * write out the pages, but rather only collect contiguous 3242 * logical block extents, call the multi-block allocator, and 3243 * then update the buffer heads with the block allocations. 3244 * 3245 * For now, though, we'll cheat by calling filemap_flush(), 3246 * which will map the blocks, and start the I/O, but not 3247 * actually wait for the I/O to complete. 3248 */ 3249 return filemap_flush(inode->i_mapping); 3250 } 3251 3252 /* 3253 * bmap() is special. It gets used by applications such as lilo and by 3254 * the swapper to find the on-disk block of a specific piece of data. 3255 * 3256 * Naturally, this is dangerous if the block concerned is still in the 3257 * journal. If somebody makes a swapfile on an ext4 data-journaling 3258 * filesystem and enables swap, then they may get a nasty shock when the 3259 * data getting swapped to that swapfile suddenly gets overwritten by 3260 * the original zero's written out previously to the journal and 3261 * awaiting writeback in the kernel's buffer cache. 3262 * 3263 * So, if we see any bmap calls here on a modified, data-journaled file, 3264 * take extra steps to flush any blocks which might be in the cache. 3265 */ 3266 static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 3267 { 3268 struct inode *inode = mapping->host; 3269 journal_t *journal; 3270 int err; 3271 3272 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 3273 test_opt(inode->i_sb, DELALLOC)) { 3274 /* 3275 * With delalloc we want to sync the file 3276 * so that we can make sure we allocate 3277 * blocks for file 3278 */ 3279 filemap_write_and_wait(mapping); 3280 } 3281 3282 if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) { 3283 /* 3284 * This is a REALLY heavyweight approach, but the use of 3285 * bmap on dirty files is expected to be extremely rare: 3286 * only if we run lilo or swapon on a freshly made file 3287 * do we expect this to happen. 3288 * 3289 * (bmap requires CAP_SYS_RAWIO so this does not 3290 * represent an unprivileged user DOS attack --- we'd be 3291 * in trouble if mortal users could trigger this path at 3292 * will.) 3293 * 3294 * NB. EXT4_STATE_JDATA is not set on files other than 3295 * regular files. If somebody wants to bmap a directory 3296 * or symlink and gets confused because the buffer 3297 * hasn't yet been flushed to disk, they deserve 3298 * everything they get. 3299 */ 3300 3301 EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA; 3302 journal = EXT4_JOURNAL(inode); 3303 jbd2_journal_lock_updates(journal); 3304 err = jbd2_journal_flush(journal); 3305 jbd2_journal_unlock_updates(journal); 3306 3307 if (err) 3308 return 0; 3309 } 3310 3311 return generic_block_bmap(mapping, block, ext4_get_block); 3312 } 3313 3314 static int ext4_readpage(struct file *file, struct page *page) 3315 { 3316 return mpage_readpage(page, ext4_get_block); 3317 } 3318 3319 static int 3320 ext4_readpages(struct file *file, struct address_space *mapping, 3321 struct list_head *pages, unsigned nr_pages) 3322 { 3323 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); 3324 } 3325 3326 static void ext4_invalidatepage(struct page *page, unsigned long offset) 3327 { 3328 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3329 3330 /* 3331 * If it's a full truncate we just forget about the pending dirtying 3332 */ 3333 if (offset == 0) 3334 ClearPageChecked(page); 3335 3336 if (journal) 3337 jbd2_journal_invalidatepage(journal, page, offset); 3338 else 3339 block_invalidatepage(page, offset); 3340 } 3341 3342 static int ext4_releasepage(struct page *page, gfp_t wait) 3343 { 3344 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3345 3346 WARN_ON(PageChecked(page)); 3347 if (!page_has_buffers(page)) 3348 return 0; 3349 if (journal) 3350 return jbd2_journal_try_to_free_buffers(journal, page, wait); 3351 else 3352 return try_to_free_buffers(page); 3353 } 3354 3355 /* 3356 * O_DIRECT for ext3 (or indirect map) based files 3357 * 3358 * If the O_DIRECT write will extend the file then add this inode to the 3359 * orphan list. So recovery will truncate it back to the original size 3360 * if the machine crashes during the write. 3361 * 3362 * If the O_DIRECT write is intantiating holes inside i_size and the machine 3363 * crashes then stale disk data _may_ be exposed inside the file. But current 3364 * VFS code falls back into buffered path in that case so we are safe. 3365 */ 3366 static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, 3367 const struct iovec *iov, loff_t offset, 3368 unsigned long nr_segs) 3369 { 3370 struct file *file = iocb->ki_filp; 3371 struct inode *inode = file->f_mapping->host; 3372 struct ext4_inode_info *ei = EXT4_I(inode); 3373 handle_t *handle; 3374 ssize_t ret; 3375 int orphan = 0; 3376 size_t count = iov_length(iov, nr_segs); 3377 int retries = 0; 3378 3379 if (rw == WRITE) { 3380 loff_t final_size = offset + count; 3381 3382 if (final_size > inode->i_size) { 3383 /* Credits for sb + inode write */ 3384 handle = ext4_journal_start(inode, 2); 3385 if (IS_ERR(handle)) { 3386 ret = PTR_ERR(handle); 3387 goto out; 3388 } 3389 ret = ext4_orphan_add(handle, inode); 3390 if (ret) { 3391 ext4_journal_stop(handle); 3392 goto out; 3393 } 3394 orphan = 1; 3395 ei->i_disksize = inode->i_size; 3396 ext4_journal_stop(handle); 3397 } 3398 } 3399 3400 retry: 3401 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 3402 offset, nr_segs, 3403 ext4_get_block, NULL); 3404 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 3405 goto retry; 3406 3407 if (orphan) { 3408 int err; 3409 3410 /* Credits for sb + inode write */ 3411 handle = ext4_journal_start(inode, 2); 3412 if (IS_ERR(handle)) { 3413 /* This is really bad luck. We've written the data 3414 * but cannot extend i_size. Bail out and pretend 3415 * the write failed... */ 3416 ret = PTR_ERR(handle); 3417 goto out; 3418 } 3419 if (inode->i_nlink) 3420 ext4_orphan_del(handle, inode); 3421 if (ret > 0) { 3422 loff_t end = offset + ret; 3423 if (end > inode->i_size) { 3424 ei->i_disksize = end; 3425 i_size_write(inode, end); 3426 /* 3427 * We're going to return a positive `ret' 3428 * here due to non-zero-length I/O, so there's 3429 * no way of reporting error returns from 3430 * ext4_mark_inode_dirty() to userspace. So 3431 * ignore it. 3432 */ 3433 ext4_mark_inode_dirty(handle, inode); 3434 } 3435 } 3436 err = ext4_journal_stop(handle); 3437 if (ret == 0) 3438 ret = err; 3439 } 3440 out: 3441 return ret; 3442 } 3443 3444 static int ext4_get_block_dio_write(struct inode *inode, sector_t iblock, 3445 struct buffer_head *bh_result, int create) 3446 { 3447 handle_t *handle = NULL; 3448 int ret = 0; 3449 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 3450 int dio_credits; 3451 3452 ext4_debug("ext4_get_block_dio_write: inode %lu, create flag %d\n", 3453 inode->i_ino, create); 3454 /* 3455 * DIO VFS code passes create = 0 flag for write to 3456 * the middle of file. It does this to avoid block 3457 * allocation for holes, to prevent expose stale data 3458 * out when there is parallel buffered read (which does 3459 * not hold the i_mutex lock) while direct IO write has 3460 * not completed. DIO request on holes finally falls back 3461 * to buffered IO for this reason. 3462 * 3463 * For ext4 extent based file, since we support fallocate, 3464 * new allocated extent as uninitialized, for holes, we 3465 * could fallocate blocks for holes, thus parallel 3466 * buffered IO read will zero out the page when read on 3467 * a hole while parallel DIO write to the hole has not completed. 3468 * 3469 * when we come here, we know it's a direct IO write to 3470 * to the middle of file (<i_size) 3471 * so it's safe to override the create flag from VFS. 3472 */ 3473 create = EXT4_GET_BLOCKS_DIO_CREATE_EXT; 3474 3475 if (max_blocks > DIO_MAX_BLOCKS) 3476 max_blocks = DIO_MAX_BLOCKS; 3477 dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); 3478 handle = ext4_journal_start(inode, dio_credits); 3479 if (IS_ERR(handle)) { 3480 ret = PTR_ERR(handle); 3481 goto out; 3482 } 3483 ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, 3484 create); 3485 if (ret > 0) { 3486 bh_result->b_size = (ret << inode->i_blkbits); 3487 ret = 0; 3488 } 3489 ext4_journal_stop(handle); 3490 out: 3491 return ret; 3492 } 3493 3494 static void ext4_free_io_end(ext4_io_end_t *io) 3495 { 3496 BUG_ON(!io); 3497 iput(io->inode); 3498 kfree(io); 3499 } 3500 static void dump_aio_dio_list(struct inode * inode) 3501 { 3502 #ifdef EXT4_DEBUG 3503 struct list_head *cur, *before, *after; 3504 ext4_io_end_t *io, *io0, *io1; 3505 3506 if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){ 3507 ext4_debug("inode %lu aio dio list is empty\n", inode->i_ino); 3508 return; 3509 } 3510 3511 ext4_debug("Dump inode %lu aio_dio_completed_IO list \n", inode->i_ino); 3512 list_for_each_entry(io, &EXT4_I(inode)->i_aio_dio_complete_list, list){ 3513 cur = &io->list; 3514 before = cur->prev; 3515 io0 = container_of(before, ext4_io_end_t, list); 3516 after = cur->next; 3517 io1 = container_of(after, ext4_io_end_t, list); 3518 3519 ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n", 3520 io, inode->i_ino, io0, io1); 3521 } 3522 #endif 3523 } 3524 3525 /* 3526 * check a range of space and convert unwritten extents to written. 3527 */ 3528 static int ext4_end_aio_dio_nolock(ext4_io_end_t *io) 3529 { 3530 struct inode *inode = io->inode; 3531 loff_t offset = io->offset; 3532 size_t size = io->size; 3533 int ret = 0; 3534 3535 ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p," 3536 "list->prev 0x%p\n", 3537 io, inode->i_ino, io->list.next, io->list.prev); 3538 3539 if (list_empty(&io->list)) 3540 return ret; 3541 3542 if (io->flag != DIO_AIO_UNWRITTEN) 3543 return ret; 3544 3545 if (offset + size <= i_size_read(inode)) 3546 ret = ext4_convert_unwritten_extents(inode, offset, size); 3547 3548 if (ret < 0) { 3549 printk(KERN_EMERG "%s: failed to convert unwritten" 3550 "extents to written extents, error is %d" 3551 " io is still on inode %lu aio dio list\n", 3552 __func__, ret, inode->i_ino); 3553 return ret; 3554 } 3555 3556 /* clear the DIO AIO unwritten flag */ 3557 io->flag = 0; 3558 return ret; 3559 } 3560 /* 3561 * work on completed aio dio IO, to convert unwritten extents to extents 3562 */ 3563 static void ext4_end_aio_dio_work(struct work_struct *work) 3564 { 3565 ext4_io_end_t *io = container_of(work, ext4_io_end_t, work); 3566 struct inode *inode = io->inode; 3567 int ret = 0; 3568 3569 mutex_lock(&inode->i_mutex); 3570 ret = ext4_end_aio_dio_nolock(io); 3571 if (ret >= 0) { 3572 if (!list_empty(&io->list)) 3573 list_del_init(&io->list); 3574 ext4_free_io_end(io); 3575 } 3576 mutex_unlock(&inode->i_mutex); 3577 } 3578 /* 3579 * This function is called from ext4_sync_file(). 3580 * 3581 * When AIO DIO IO is completed, the work to convert unwritten 3582 * extents to written is queued on workqueue but may not get immediately 3583 * scheduled. When fsync is called, we need to ensure the 3584 * conversion is complete before fsync returns. 3585 * The inode keeps track of a list of completed AIO from DIO path 3586 * that might needs to do the conversion. This function walks through 3587 * the list and convert the related unwritten extents to written. 3588 */ 3589 int flush_aio_dio_completed_IO(struct inode *inode) 3590 { 3591 ext4_io_end_t *io; 3592 int ret = 0; 3593 int ret2 = 0; 3594 3595 if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)) 3596 return ret; 3597 3598 dump_aio_dio_list(inode); 3599 while (!list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){ 3600 io = list_entry(EXT4_I(inode)->i_aio_dio_complete_list.next, 3601 ext4_io_end_t, list); 3602 /* 3603 * Calling ext4_end_aio_dio_nolock() to convert completed 3604 * IO to written. 3605 * 3606 * When ext4_sync_file() is called, run_queue() may already 3607 * about to flush the work corresponding to this io structure. 3608 * It will be upset if it founds the io structure related 3609 * to the work-to-be schedule is freed. 3610 * 3611 * Thus we need to keep the io structure still valid here after 3612 * convertion finished. The io structure has a flag to 3613 * avoid double converting from both fsync and background work 3614 * queue work. 3615 */ 3616 ret = ext4_end_aio_dio_nolock(io); 3617 if (ret < 0) 3618 ret2 = ret; 3619 else 3620 list_del_init(&io->list); 3621 } 3622 return (ret2 < 0) ? ret2 : 0; 3623 } 3624 3625 static ext4_io_end_t *ext4_init_io_end (struct inode *inode) 3626 { 3627 ext4_io_end_t *io = NULL; 3628 3629 io = kmalloc(sizeof(*io), GFP_NOFS); 3630 3631 if (io) { 3632 igrab(inode); 3633 io->inode = inode; 3634 io->flag = 0; 3635 io->offset = 0; 3636 io->size = 0; 3637 io->error = 0; 3638 INIT_WORK(&io->work, ext4_end_aio_dio_work); 3639 INIT_LIST_HEAD(&io->list); 3640 } 3641 3642 return io; 3643 } 3644 3645 static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, 3646 ssize_t size, void *private) 3647 { 3648 ext4_io_end_t *io_end = iocb->private; 3649 struct workqueue_struct *wq; 3650 3651 /* if not async direct IO or dio with 0 bytes write, just return */ 3652 if (!io_end || !size) 3653 return; 3654 3655 ext_debug("ext4_end_io_dio(): io_end 0x%p" 3656 "for inode %lu, iocb 0x%p, offset %llu, size %llu\n", 3657 iocb->private, io_end->inode->i_ino, iocb, offset, 3658 size); 3659 3660 /* if not aio dio with unwritten extents, just free io and return */ 3661 if (io_end->flag != DIO_AIO_UNWRITTEN){ 3662 ext4_free_io_end(io_end); 3663 iocb->private = NULL; 3664 return; 3665 } 3666 3667 io_end->offset = offset; 3668 io_end->size = size; 3669 wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq; 3670 3671 /* queue the work to convert unwritten extents to written */ 3672 queue_work(wq, &io_end->work); 3673 3674 /* Add the io_end to per-inode completed aio dio list*/ 3675 list_add_tail(&io_end->list, 3676 &EXT4_I(io_end->inode)->i_aio_dio_complete_list); 3677 iocb->private = NULL; 3678 } 3679 /* 3680 * For ext4 extent files, ext4 will do direct-io write to holes, 3681 * preallocated extents, and those write extend the file, no need to 3682 * fall back to buffered IO. 3683 * 3684 * For holes, we fallocate those blocks, mark them as unintialized 3685 * If those blocks were preallocated, we mark sure they are splited, but 3686 * still keep the range to write as unintialized. 3687 * 3688 * The unwrritten extents will be converted to written when DIO is completed. 3689 * For async direct IO, since the IO may still pending when return, we 3690 * set up an end_io call back function, which will do the convertion 3691 * when async direct IO completed. 3692 * 3693 * If the O_DIRECT write will extend the file then add this inode to the 3694 * orphan list. So recovery will truncate it back to the original size 3695 * if the machine crashes during the write. 3696 * 3697 */ 3698 static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, 3699 const struct iovec *iov, loff_t offset, 3700 unsigned long nr_segs) 3701 { 3702 struct file *file = iocb->ki_filp; 3703 struct inode *inode = file->f_mapping->host; 3704 ssize_t ret; 3705 size_t count = iov_length(iov, nr_segs); 3706 3707 loff_t final_size = offset + count; 3708 if (rw == WRITE && final_size <= inode->i_size) { 3709 /* 3710 * We could direct write to holes and fallocate. 3711 * 3712 * Allocated blocks to fill the hole are marked as uninitialized 3713 * to prevent paralel buffered read to expose the stale data 3714 * before DIO complete the data IO. 3715 * 3716 * As to previously fallocated extents, ext4 get_block 3717 * will just simply mark the buffer mapped but still 3718 * keep the extents uninitialized. 3719 * 3720 * for non AIO case, we will convert those unwritten extents 3721 * to written after return back from blockdev_direct_IO. 3722 * 3723 * for async DIO, the conversion needs to be defered when 3724 * the IO is completed. The ext4 end_io callback function 3725 * will be called to take care of the conversion work. 3726 * Here for async case, we allocate an io_end structure to 3727 * hook to the iocb. 3728 */ 3729 iocb->private = NULL; 3730 EXT4_I(inode)->cur_aio_dio = NULL; 3731 if (!is_sync_kiocb(iocb)) { 3732 iocb->private = ext4_init_io_end(inode); 3733 if (!iocb->private) 3734 return -ENOMEM; 3735 /* 3736 * we save the io structure for current async 3737 * direct IO, so that later ext4_get_blocks() 3738 * could flag the io structure whether there 3739 * is a unwritten extents needs to be converted 3740 * when IO is completed. 3741 */ 3742 EXT4_I(inode)->cur_aio_dio = iocb->private; 3743 } 3744 3745 ret = blockdev_direct_IO(rw, iocb, inode, 3746 inode->i_sb->s_bdev, iov, 3747 offset, nr_segs, 3748 ext4_get_block_dio_write, 3749 ext4_end_io_dio); 3750 if (iocb->private) 3751 EXT4_I(inode)->cur_aio_dio = NULL; 3752 /* 3753 * The io_end structure takes a reference to the inode, 3754 * that structure needs to be destroyed and the 3755 * reference to the inode need to be dropped, when IO is 3756 * complete, even with 0 byte write, or failed. 3757 * 3758 * In the successful AIO DIO case, the io_end structure will be 3759 * desctroyed and the reference to the inode will be dropped 3760 * after the end_io call back function is called. 3761 * 3762 * In the case there is 0 byte write, or error case, since 3763 * VFS direct IO won't invoke the end_io call back function, 3764 * we need to free the end_io structure here. 3765 */ 3766 if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { 3767 ext4_free_io_end(iocb->private); 3768 iocb->private = NULL; 3769 } else if (ret > 0 && (EXT4_I(inode)->i_state & 3770 EXT4_STATE_DIO_UNWRITTEN)) { 3771 int err; 3772 /* 3773 * for non AIO case, since the IO is already 3774 * completed, we could do the convertion right here 3775 */ 3776 err = ext4_convert_unwritten_extents(inode, 3777 offset, ret); 3778 if (err < 0) 3779 ret = err; 3780 EXT4_I(inode)->i_state &= ~EXT4_STATE_DIO_UNWRITTEN; 3781 } 3782 return ret; 3783 } 3784 3785 /* for write the the end of file case, we fall back to old way */ 3786 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 3787 } 3788 3789 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 3790 const struct iovec *iov, loff_t offset, 3791 unsigned long nr_segs) 3792 { 3793 struct file *file = iocb->ki_filp; 3794 struct inode *inode = file->f_mapping->host; 3795 3796 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) 3797 return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs); 3798 3799 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 3800 } 3801 3802 /* 3803 * Pages can be marked dirty completely asynchronously from ext4's journalling 3804 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3805 * much here because ->set_page_dirty is called under VFS locks. The page is 3806 * not necessarily locked. 3807 * 3808 * We cannot just dirty the page and leave attached buffers clean, because the 3809 * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3810 * or jbddirty because all the journalling code will explode. 3811 * 3812 * So what we do is to mark the page "pending dirty" and next time writepage 3813 * is called, propagate that into the buffers appropriately. 3814 */ 3815 static int ext4_journalled_set_page_dirty(struct page *page) 3816 { 3817 SetPageChecked(page); 3818 return __set_page_dirty_nobuffers(page); 3819 } 3820 3821 static const struct address_space_operations ext4_ordered_aops = { 3822 .readpage = ext4_readpage, 3823 .readpages = ext4_readpages, 3824 .writepage = ext4_writepage, 3825 .sync_page = block_sync_page, 3826 .write_begin = ext4_write_begin, 3827 .write_end = ext4_ordered_write_end, 3828 .bmap = ext4_bmap, 3829 .invalidatepage = ext4_invalidatepage, 3830 .releasepage = ext4_releasepage, 3831 .direct_IO = ext4_direct_IO, 3832 .migratepage = buffer_migrate_page, 3833 .is_partially_uptodate = block_is_partially_uptodate, 3834 .error_remove_page = generic_error_remove_page, 3835 }; 3836 3837 static const struct address_space_operations ext4_writeback_aops = { 3838 .readpage = ext4_readpage, 3839 .readpages = ext4_readpages, 3840 .writepage = ext4_writepage, 3841 .sync_page = block_sync_page, 3842 .write_begin = ext4_write_begin, 3843 .write_end = ext4_writeback_write_end, 3844 .bmap = ext4_bmap, 3845 .invalidatepage = ext4_invalidatepage, 3846 .releasepage = ext4_releasepage, 3847 .direct_IO = ext4_direct_IO, 3848 .migratepage = buffer_migrate_page, 3849 .is_partially_uptodate = block_is_partially_uptodate, 3850 .error_remove_page = generic_error_remove_page, 3851 }; 3852 3853 static const struct address_space_operations ext4_journalled_aops = { 3854 .readpage = ext4_readpage, 3855 .readpages = ext4_readpages, 3856 .writepage = ext4_writepage, 3857 .sync_page = block_sync_page, 3858 .write_begin = ext4_write_begin, 3859 .write_end = ext4_journalled_write_end, 3860 .set_page_dirty = ext4_journalled_set_page_dirty, 3861 .bmap = ext4_bmap, 3862 .invalidatepage = ext4_invalidatepage, 3863 .releasepage = ext4_releasepage, 3864 .is_partially_uptodate = block_is_partially_uptodate, 3865 .error_remove_page = generic_error_remove_page, 3866 }; 3867 3868 static const struct address_space_operations ext4_da_aops = { 3869 .readpage = ext4_readpage, 3870 .readpages = ext4_readpages, 3871 .writepage = ext4_writepage, 3872 .writepages = ext4_da_writepages, 3873 .sync_page = block_sync_page, 3874 .write_begin = ext4_da_write_begin, 3875 .write_end = ext4_da_write_end, 3876 .bmap = ext4_bmap, 3877 .invalidatepage = ext4_da_invalidatepage, 3878 .releasepage = ext4_releasepage, 3879 .direct_IO = ext4_direct_IO, 3880 .migratepage = buffer_migrate_page, 3881 .is_partially_uptodate = block_is_partially_uptodate, 3882 .error_remove_page = generic_error_remove_page, 3883 }; 3884 3885 void ext4_set_aops(struct inode *inode) 3886 { 3887 if (ext4_should_order_data(inode) && 3888 test_opt(inode->i_sb, DELALLOC)) 3889 inode->i_mapping->a_ops = &ext4_da_aops; 3890 else if (ext4_should_order_data(inode)) 3891 inode->i_mapping->a_ops = &ext4_ordered_aops; 3892 else if (ext4_should_writeback_data(inode) && 3893 test_opt(inode->i_sb, DELALLOC)) 3894 inode->i_mapping->a_ops = &ext4_da_aops; 3895 else if (ext4_should_writeback_data(inode)) 3896 inode->i_mapping->a_ops = &ext4_writeback_aops; 3897 else 3898 inode->i_mapping->a_ops = &ext4_journalled_aops; 3899 } 3900 3901 /* 3902 * ext4_block_truncate_page() zeroes out a mapping from file offset `from' 3903 * up to the end of the block which corresponds to `from'. 3904 * This required during truncate. We need to physically zero the tail end 3905 * of that block so it doesn't yield old data if the file is later grown. 3906 */ 3907 int ext4_block_truncate_page(handle_t *handle, 3908 struct address_space *mapping, loff_t from) 3909 { 3910 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 3911 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3912 unsigned blocksize, length, pos; 3913 ext4_lblk_t iblock; 3914 struct inode *inode = mapping->host; 3915 struct buffer_head *bh; 3916 struct page *page; 3917 int err = 0; 3918 3919 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, 3920 mapping_gfp_mask(mapping) & ~__GFP_FS); 3921 if (!page) 3922 return -EINVAL; 3923 3924 blocksize = inode->i_sb->s_blocksize; 3925 length = blocksize - (offset & (blocksize - 1)); 3926 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 3927 3928 /* 3929 * For "nobh" option, we can only work if we don't need to 3930 * read-in the page - otherwise we create buffers to do the IO. 3931 */ 3932 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) && 3933 ext4_should_writeback_data(inode) && PageUptodate(page)) { 3934 zero_user(page, offset, length); 3935 set_page_dirty(page); 3936 goto unlock; 3937 } 3938 3939 if (!page_has_buffers(page)) 3940 create_empty_buffers(page, blocksize, 0); 3941 3942 /* Find the buffer that contains "offset" */ 3943 bh = page_buffers(page); 3944 pos = blocksize; 3945 while (offset >= pos) { 3946 bh = bh->b_this_page; 3947 iblock++; 3948 pos += blocksize; 3949 } 3950 3951 err = 0; 3952 if (buffer_freed(bh)) { 3953 BUFFER_TRACE(bh, "freed: skip"); 3954 goto unlock; 3955 } 3956 3957 if (!buffer_mapped(bh)) { 3958 BUFFER_TRACE(bh, "unmapped"); 3959 ext4_get_block(inode, iblock, bh, 0); 3960 /* unmapped? It's a hole - nothing to do */ 3961 if (!buffer_mapped(bh)) { 3962 BUFFER_TRACE(bh, "still unmapped"); 3963 goto unlock; 3964 } 3965 } 3966 3967 /* Ok, it's mapped. Make sure it's up-to-date */ 3968 if (PageUptodate(page)) 3969 set_buffer_uptodate(bh); 3970 3971 if (!buffer_uptodate(bh)) { 3972 err = -EIO; 3973 ll_rw_block(READ, 1, &bh); 3974 wait_on_buffer(bh); 3975 /* Uhhuh. Read error. Complain and punt. */ 3976 if (!buffer_uptodate(bh)) 3977 goto unlock; 3978 } 3979 3980 if (ext4_should_journal_data(inode)) { 3981 BUFFER_TRACE(bh, "get write access"); 3982 err = ext4_journal_get_write_access(handle, bh); 3983 if (err) 3984 goto unlock; 3985 } 3986 3987 zero_user(page, offset, length); 3988 3989 BUFFER_TRACE(bh, "zeroed end of block"); 3990 3991 err = 0; 3992 if (ext4_should_journal_data(inode)) { 3993 err = ext4_handle_dirty_metadata(handle, inode, bh); 3994 } else { 3995 if (ext4_should_order_data(inode)) 3996 err = ext4_jbd2_file_inode(handle, inode); 3997 mark_buffer_dirty(bh); 3998 } 3999 4000 unlock: 4001 unlock_page(page); 4002 page_cache_release(page); 4003 return err; 4004 } 4005 4006 /* 4007 * Probably it should be a library function... search for first non-zero word 4008 * or memcmp with zero_page, whatever is better for particular architecture. 4009 * Linus? 4010 */ 4011 static inline int all_zeroes(__le32 *p, __le32 *q) 4012 { 4013 while (p < q) 4014 if (*p++) 4015 return 0; 4016 return 1; 4017 } 4018 4019 /** 4020 * ext4_find_shared - find the indirect blocks for partial truncation. 4021 * @inode: inode in question 4022 * @depth: depth of the affected branch 4023 * @offsets: offsets of pointers in that branch (see ext4_block_to_path) 4024 * @chain: place to store the pointers to partial indirect blocks 4025 * @top: place to the (detached) top of branch 4026 * 4027 * This is a helper function used by ext4_truncate(). 4028 * 4029 * When we do truncate() we may have to clean the ends of several 4030 * indirect blocks but leave the blocks themselves alive. Block is 4031 * partially truncated if some data below the new i_size is refered 4032 * from it (and it is on the path to the first completely truncated 4033 * data block, indeed). We have to free the top of that path along 4034 * with everything to the right of the path. Since no allocation 4035 * past the truncation point is possible until ext4_truncate() 4036 * finishes, we may safely do the latter, but top of branch may 4037 * require special attention - pageout below the truncation point 4038 * might try to populate it. 4039 * 4040 * We atomically detach the top of branch from the tree, store the 4041 * block number of its root in *@top, pointers to buffer_heads of 4042 * partially truncated blocks - in @chain[].bh and pointers to 4043 * their last elements that should not be removed - in 4044 * @chain[].p. Return value is the pointer to last filled element 4045 * of @chain. 4046 * 4047 * The work left to caller to do the actual freeing of subtrees: 4048 * a) free the subtree starting from *@top 4049 * b) free the subtrees whose roots are stored in 4050 * (@chain[i].p+1 .. end of @chain[i].bh->b_data) 4051 * c) free the subtrees growing from the inode past the @chain[0]. 4052 * (no partially truncated stuff there). */ 4053 4054 static Indirect *ext4_find_shared(struct inode *inode, int depth, 4055 ext4_lblk_t offsets[4], Indirect chain[4], 4056 __le32 *top) 4057 { 4058 Indirect *partial, *p; 4059 int k, err; 4060 4061 *top = 0; 4062 /* Make k index the deepest non-null offset + 1 */ 4063 for (k = depth; k > 1 && !offsets[k-1]; k--) 4064 ; 4065 partial = ext4_get_branch(inode, k, offsets, chain, &err); 4066 /* Writer: pointers */ 4067 if (!partial) 4068 partial = chain + k-1; 4069 /* 4070 * If the branch acquired continuation since we've looked at it - 4071 * fine, it should all survive and (new) top doesn't belong to us. 4072 */ 4073 if (!partial->key && *partial->p) 4074 /* Writer: end */ 4075 goto no_top; 4076 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--) 4077 ; 4078 /* 4079 * OK, we've found the last block that must survive. The rest of our 4080 * branch should be detached before unlocking. However, if that rest 4081 * of branch is all ours and does not grow immediately from the inode 4082 * it's easier to cheat and just decrement partial->p. 4083 */ 4084 if (p == chain + k - 1 && p > chain) { 4085 p->p--; 4086 } else { 4087 *top = *p->p; 4088 /* Nope, don't do this in ext4. Must leave the tree intact */ 4089 #if 0 4090 *p->p = 0; 4091 #endif 4092 } 4093 /* Writer: end */ 4094 4095 while (partial > p) { 4096 brelse(partial->bh); 4097 partial--; 4098 } 4099 no_top: 4100 return partial; 4101 } 4102 4103 /* 4104 * Zero a number of block pointers in either an inode or an indirect block. 4105 * If we restart the transaction we must again get write access to the 4106 * indirect block for further modification. 4107 * 4108 * We release `count' blocks on disk, but (last - first) may be greater 4109 * than `count' because there can be holes in there. 4110 */ 4111 static void ext4_clear_blocks(handle_t *handle, struct inode *inode, 4112 struct buffer_head *bh, 4113 ext4_fsblk_t block_to_free, 4114 unsigned long count, __le32 *first, 4115 __le32 *last) 4116 { 4117 __le32 *p; 4118 int flags = EXT4_FREE_BLOCKS_FORGET; 4119 4120 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 4121 flags |= EXT4_FREE_BLOCKS_METADATA; 4122 4123 if (try_to_extend_transaction(handle, inode)) { 4124 if (bh) { 4125 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 4126 ext4_handle_dirty_metadata(handle, inode, bh); 4127 } 4128 ext4_mark_inode_dirty(handle, inode); 4129 ext4_truncate_restart_trans(handle, inode, 4130 blocks_for_truncate(inode)); 4131 if (bh) { 4132 BUFFER_TRACE(bh, "retaking write access"); 4133 ext4_journal_get_write_access(handle, bh); 4134 } 4135 } 4136 4137 for (p = first; p < last; p++) 4138 *p = 0; 4139 4140 ext4_free_blocks(handle, inode, 0, block_to_free, count, flags); 4141 } 4142 4143 /** 4144 * ext4_free_data - free a list of data blocks 4145 * @handle: handle for this transaction 4146 * @inode: inode we are dealing with 4147 * @this_bh: indirect buffer_head which contains *@first and *@last 4148 * @first: array of block numbers 4149 * @last: points immediately past the end of array 4150 * 4151 * We are freeing all blocks refered from that array (numbers are stored as 4152 * little-endian 32-bit) and updating @inode->i_blocks appropriately. 4153 * 4154 * We accumulate contiguous runs of blocks to free. Conveniently, if these 4155 * blocks are contiguous then releasing them at one time will only affect one 4156 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't 4157 * actually use a lot of journal space. 4158 * 4159 * @this_bh will be %NULL if @first and @last point into the inode's direct 4160 * block pointers. 4161 */ 4162 static void ext4_free_data(handle_t *handle, struct inode *inode, 4163 struct buffer_head *this_bh, 4164 __le32 *first, __le32 *last) 4165 { 4166 ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */ 4167 unsigned long count = 0; /* Number of blocks in the run */ 4168 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind 4169 corresponding to 4170 block_to_free */ 4171 ext4_fsblk_t nr; /* Current block # */ 4172 __le32 *p; /* Pointer into inode/ind 4173 for current block */ 4174 int err; 4175 4176 if (this_bh) { /* For indirect block */ 4177 BUFFER_TRACE(this_bh, "get_write_access"); 4178 err = ext4_journal_get_write_access(handle, this_bh); 4179 /* Important: if we can't update the indirect pointers 4180 * to the blocks, we can't free them. */ 4181 if (err) 4182 return; 4183 } 4184 4185 for (p = first; p < last; p++) { 4186 nr = le32_to_cpu(*p); 4187 if (nr) { 4188 /* accumulate blocks to free if they're contiguous */ 4189 if (count == 0) { 4190 block_to_free = nr; 4191 block_to_free_p = p; 4192 count = 1; 4193 } else if (nr == block_to_free + count) { 4194 count++; 4195 } else { 4196 ext4_clear_blocks(handle, inode, this_bh, 4197 block_to_free, 4198 count, block_to_free_p, p); 4199 block_to_free = nr; 4200 block_to_free_p = p; 4201 count = 1; 4202 } 4203 } 4204 } 4205 4206 if (count > 0) 4207 ext4_clear_blocks(handle, inode, this_bh, block_to_free, 4208 count, block_to_free_p, p); 4209 4210 if (this_bh) { 4211 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata"); 4212 4213 /* 4214 * The buffer head should have an attached journal head at this 4215 * point. However, if the data is corrupted and an indirect 4216 * block pointed to itself, it would have been detached when 4217 * the block was cleared. Check for this instead of OOPSing. 4218 */ 4219 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) 4220 ext4_handle_dirty_metadata(handle, inode, this_bh); 4221 else 4222 ext4_error(inode->i_sb, __func__, 4223 "circular indirect block detected, " 4224 "inode=%lu, block=%llu", 4225 inode->i_ino, 4226 (unsigned long long) this_bh->b_blocknr); 4227 } 4228 } 4229 4230 /** 4231 * ext4_free_branches - free an array of branches 4232 * @handle: JBD handle for this transaction 4233 * @inode: inode we are dealing with 4234 * @parent_bh: the buffer_head which contains *@first and *@last 4235 * @first: array of block numbers 4236 * @last: pointer immediately past the end of array 4237 * @depth: depth of the branches to free 4238 * 4239 * We are freeing all blocks refered from these branches (numbers are 4240 * stored as little-endian 32-bit) and updating @inode->i_blocks 4241 * appropriately. 4242 */ 4243 static void ext4_free_branches(handle_t *handle, struct inode *inode, 4244 struct buffer_head *parent_bh, 4245 __le32 *first, __le32 *last, int depth) 4246 { 4247 ext4_fsblk_t nr; 4248 __le32 *p; 4249 4250 if (ext4_handle_is_aborted(handle)) 4251 return; 4252 4253 if (depth--) { 4254 struct buffer_head *bh; 4255 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 4256 p = last; 4257 while (--p >= first) { 4258 nr = le32_to_cpu(*p); 4259 if (!nr) 4260 continue; /* A hole */ 4261 4262 /* Go read the buffer for the next level down */ 4263 bh = sb_bread(inode->i_sb, nr); 4264 4265 /* 4266 * A read failure? Report error and clear slot 4267 * (should be rare). 4268 */ 4269 if (!bh) { 4270 ext4_error(inode->i_sb, "ext4_free_branches", 4271 "Read failure, inode=%lu, block=%llu", 4272 inode->i_ino, nr); 4273 continue; 4274 } 4275 4276 /* This zaps the entire block. Bottom up. */ 4277 BUFFER_TRACE(bh, "free child branches"); 4278 ext4_free_branches(handle, inode, bh, 4279 (__le32 *) bh->b_data, 4280 (__le32 *) bh->b_data + addr_per_block, 4281 depth); 4282 4283 /* 4284 * We've probably journalled the indirect block several 4285 * times during the truncate. But it's no longer 4286 * needed and we now drop it from the transaction via 4287 * jbd2_journal_revoke(). 4288 * 4289 * That's easy if it's exclusively part of this 4290 * transaction. But if it's part of the committing 4291 * transaction then jbd2_journal_forget() will simply 4292 * brelse() it. That means that if the underlying 4293 * block is reallocated in ext4_get_block(), 4294 * unmap_underlying_metadata() will find this block 4295 * and will try to get rid of it. damn, damn. 4296 * 4297 * If this block has already been committed to the 4298 * journal, a revoke record will be written. And 4299 * revoke records must be emitted *before* clearing 4300 * this block's bit in the bitmaps. 4301 */ 4302 ext4_forget(handle, 1, inode, bh, bh->b_blocknr); 4303 4304 /* 4305 * Everything below this this pointer has been 4306 * released. Now let this top-of-subtree go. 4307 * 4308 * We want the freeing of this indirect block to be 4309 * atomic in the journal with the updating of the 4310 * bitmap block which owns it. So make some room in 4311 * the journal. 4312 * 4313 * We zero the parent pointer *after* freeing its 4314 * pointee in the bitmaps, so if extend_transaction() 4315 * for some reason fails to put the bitmap changes and 4316 * the release into the same transaction, recovery 4317 * will merely complain about releasing a free block, 4318 * rather than leaking blocks. 4319 */ 4320 if (ext4_handle_is_aborted(handle)) 4321 return; 4322 if (try_to_extend_transaction(handle, inode)) { 4323 ext4_mark_inode_dirty(handle, inode); 4324 ext4_truncate_restart_trans(handle, inode, 4325 blocks_for_truncate(inode)); 4326 } 4327 4328 ext4_free_blocks(handle, inode, 0, nr, 1, 4329 EXT4_FREE_BLOCKS_METADATA); 4330 4331 if (parent_bh) { 4332 /* 4333 * The block which we have just freed is 4334 * pointed to by an indirect block: journal it 4335 */ 4336 BUFFER_TRACE(parent_bh, "get_write_access"); 4337 if (!ext4_journal_get_write_access(handle, 4338 parent_bh)){ 4339 *p = 0; 4340 BUFFER_TRACE(parent_bh, 4341 "call ext4_handle_dirty_metadata"); 4342 ext4_handle_dirty_metadata(handle, 4343 inode, 4344 parent_bh); 4345 } 4346 } 4347 } 4348 } else { 4349 /* We have reached the bottom of the tree. */ 4350 BUFFER_TRACE(parent_bh, "free data blocks"); 4351 ext4_free_data(handle, inode, parent_bh, first, last); 4352 } 4353 } 4354 4355 int ext4_can_truncate(struct inode *inode) 4356 { 4357 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 4358 return 0; 4359 if (S_ISREG(inode->i_mode)) 4360 return 1; 4361 if (S_ISDIR(inode->i_mode)) 4362 return 1; 4363 if (S_ISLNK(inode->i_mode)) 4364 return !ext4_inode_is_fast_symlink(inode); 4365 return 0; 4366 } 4367 4368 /* 4369 * ext4_truncate() 4370 * 4371 * We block out ext4_get_block() block instantiations across the entire 4372 * transaction, and VFS/VM ensures that ext4_truncate() cannot run 4373 * simultaneously on behalf of the same inode. 4374 * 4375 * As we work through the truncate and commmit bits of it to the journal there 4376 * is one core, guiding principle: the file's tree must always be consistent on 4377 * disk. We must be able to restart the truncate after a crash. 4378 * 4379 * The file's tree may be transiently inconsistent in memory (although it 4380 * probably isn't), but whenever we close off and commit a journal transaction, 4381 * the contents of (the filesystem + the journal) must be consistent and 4382 * restartable. It's pretty simple, really: bottom up, right to left (although 4383 * left-to-right works OK too). 4384 * 4385 * Note that at recovery time, journal replay occurs *before* the restart of 4386 * truncate against the orphan inode list. 4387 * 4388 * The committed inode has the new, desired i_size (which is the same as 4389 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 4390 * that this inode's truncate did not complete and it will again call 4391 * ext4_truncate() to have another go. So there will be instantiated blocks 4392 * to the right of the truncation point in a crashed ext4 filesystem. But 4393 * that's fine - as long as they are linked from the inode, the post-crash 4394 * ext4_truncate() run will find them and release them. 4395 */ 4396 void ext4_truncate(struct inode *inode) 4397 { 4398 handle_t *handle; 4399 struct ext4_inode_info *ei = EXT4_I(inode); 4400 __le32 *i_data = ei->i_data; 4401 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 4402 struct address_space *mapping = inode->i_mapping; 4403 ext4_lblk_t offsets[4]; 4404 Indirect chain[4]; 4405 Indirect *partial; 4406 __le32 nr = 0; 4407 int n; 4408 ext4_lblk_t last_block; 4409 unsigned blocksize = inode->i_sb->s_blocksize; 4410 4411 if (!ext4_can_truncate(inode)) 4412 return; 4413 4414 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 4415 ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE; 4416 4417 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 4418 ext4_ext_truncate(inode); 4419 return; 4420 } 4421 4422 handle = start_transaction(inode); 4423 if (IS_ERR(handle)) 4424 return; /* AKPM: return what? */ 4425 4426 last_block = (inode->i_size + blocksize-1) 4427 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 4428 4429 if (inode->i_size & (blocksize - 1)) 4430 if (ext4_block_truncate_page(handle, mapping, inode->i_size)) 4431 goto out_stop; 4432 4433 n = ext4_block_to_path(inode, last_block, offsets, NULL); 4434 if (n == 0) 4435 goto out_stop; /* error */ 4436 4437 /* 4438 * OK. This truncate is going to happen. We add the inode to the 4439 * orphan list, so that if this truncate spans multiple transactions, 4440 * and we crash, we will resume the truncate when the filesystem 4441 * recovers. It also marks the inode dirty, to catch the new size. 4442 * 4443 * Implication: the file must always be in a sane, consistent 4444 * truncatable state while each transaction commits. 4445 */ 4446 if (ext4_orphan_add(handle, inode)) 4447 goto out_stop; 4448 4449 /* 4450 * From here we block out all ext4_get_block() callers who want to 4451 * modify the block allocation tree. 4452 */ 4453 down_write(&ei->i_data_sem); 4454 4455 ext4_discard_preallocations(inode); 4456 4457 /* 4458 * The orphan list entry will now protect us from any crash which 4459 * occurs before the truncate completes, so it is now safe to propagate 4460 * the new, shorter inode size (held for now in i_size) into the 4461 * on-disk inode. We do this via i_disksize, which is the value which 4462 * ext4 *really* writes onto the disk inode. 4463 */ 4464 ei->i_disksize = inode->i_size; 4465 4466 if (n == 1) { /* direct blocks */ 4467 ext4_free_data(handle, inode, NULL, i_data+offsets[0], 4468 i_data + EXT4_NDIR_BLOCKS); 4469 goto do_indirects; 4470 } 4471 4472 partial = ext4_find_shared(inode, n, offsets, chain, &nr); 4473 /* Kill the top of shared branch (not detached) */ 4474 if (nr) { 4475 if (partial == chain) { 4476 /* Shared branch grows from the inode */ 4477 ext4_free_branches(handle, inode, NULL, 4478 &nr, &nr+1, (chain+n-1) - partial); 4479 *partial->p = 0; 4480 /* 4481 * We mark the inode dirty prior to restart, 4482 * and prior to stop. No need for it here. 4483 */ 4484 } else { 4485 /* Shared branch grows from an indirect block */ 4486 BUFFER_TRACE(partial->bh, "get_write_access"); 4487 ext4_free_branches(handle, inode, partial->bh, 4488 partial->p, 4489 partial->p+1, (chain+n-1) - partial); 4490 } 4491 } 4492 /* Clear the ends of indirect blocks on the shared branch */ 4493 while (partial > chain) { 4494 ext4_free_branches(handle, inode, partial->bh, partial->p + 1, 4495 (__le32*)partial->bh->b_data+addr_per_block, 4496 (chain+n-1) - partial); 4497 BUFFER_TRACE(partial->bh, "call brelse"); 4498 brelse(partial->bh); 4499 partial--; 4500 } 4501 do_indirects: 4502 /* Kill the remaining (whole) subtrees */ 4503 switch (offsets[0]) { 4504 default: 4505 nr = i_data[EXT4_IND_BLOCK]; 4506 if (nr) { 4507 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); 4508 i_data[EXT4_IND_BLOCK] = 0; 4509 } 4510 case EXT4_IND_BLOCK: 4511 nr = i_data[EXT4_DIND_BLOCK]; 4512 if (nr) { 4513 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); 4514 i_data[EXT4_DIND_BLOCK] = 0; 4515 } 4516 case EXT4_DIND_BLOCK: 4517 nr = i_data[EXT4_TIND_BLOCK]; 4518 if (nr) { 4519 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); 4520 i_data[EXT4_TIND_BLOCK] = 0; 4521 } 4522 case EXT4_TIND_BLOCK: 4523 ; 4524 } 4525 4526 up_write(&ei->i_data_sem); 4527 inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4528 ext4_mark_inode_dirty(handle, inode); 4529 4530 /* 4531 * In a multi-transaction truncate, we only make the final transaction 4532 * synchronous 4533 */ 4534 if (IS_SYNC(inode)) 4535 ext4_handle_sync(handle); 4536 out_stop: 4537 /* 4538 * If this was a simple ftruncate(), and the file will remain alive 4539 * then we need to clear up the orphan record which we created above. 4540 * However, if this was a real unlink then we were called by 4541 * ext4_delete_inode(), and we allow that function to clean up the 4542 * orphan info for us. 4543 */ 4544 if (inode->i_nlink) 4545 ext4_orphan_del(handle, inode); 4546 4547 ext4_journal_stop(handle); 4548 } 4549 4550 /* 4551 * ext4_get_inode_loc returns with an extra refcount against the inode's 4552 * underlying buffer_head on success. If 'in_mem' is true, we have all 4553 * data in memory that is needed to recreate the on-disk version of this 4554 * inode. 4555 */ 4556 static int __ext4_get_inode_loc(struct inode *inode, 4557 struct ext4_iloc *iloc, int in_mem) 4558 { 4559 struct ext4_group_desc *gdp; 4560 struct buffer_head *bh; 4561 struct super_block *sb = inode->i_sb; 4562 ext4_fsblk_t block; 4563 int inodes_per_block, inode_offset; 4564 4565 iloc->bh = NULL; 4566 if (!ext4_valid_inum(sb, inode->i_ino)) 4567 return -EIO; 4568 4569 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 4570 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 4571 if (!gdp) 4572 return -EIO; 4573 4574 /* 4575 * Figure out the offset within the block group inode table 4576 */ 4577 inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb)); 4578 inode_offset = ((inode->i_ino - 1) % 4579 EXT4_INODES_PER_GROUP(sb)); 4580 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 4581 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 4582 4583 bh = sb_getblk(sb, block); 4584 if (!bh) { 4585 ext4_error(sb, "ext4_get_inode_loc", "unable to read " 4586 "inode block - inode=%lu, block=%llu", 4587 inode->i_ino, block); 4588 return -EIO; 4589 } 4590 if (!buffer_uptodate(bh)) { 4591 lock_buffer(bh); 4592 4593 /* 4594 * If the buffer has the write error flag, we have failed 4595 * to write out another inode in the same block. In this 4596 * case, we don't have to read the block because we may 4597 * read the old inode data successfully. 4598 */ 4599 if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 4600 set_buffer_uptodate(bh); 4601 4602 if (buffer_uptodate(bh)) { 4603 /* someone brought it uptodate while we waited */ 4604 unlock_buffer(bh); 4605 goto has_buffer; 4606 } 4607 4608 /* 4609 * If we have all information of the inode in memory and this 4610 * is the only valid inode in the block, we need not read the 4611 * block. 4612 */ 4613 if (in_mem) { 4614 struct buffer_head *bitmap_bh; 4615 int i, start; 4616 4617 start = inode_offset & ~(inodes_per_block - 1); 4618 4619 /* Is the inode bitmap in cache? */ 4620 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 4621 if (!bitmap_bh) 4622 goto make_io; 4623 4624 /* 4625 * If the inode bitmap isn't in cache then the 4626 * optimisation may end up performing two reads instead 4627 * of one, so skip it. 4628 */ 4629 if (!buffer_uptodate(bitmap_bh)) { 4630 brelse(bitmap_bh); 4631 goto make_io; 4632 } 4633 for (i = start; i < start + inodes_per_block; i++) { 4634 if (i == inode_offset) 4635 continue; 4636 if (ext4_test_bit(i, bitmap_bh->b_data)) 4637 break; 4638 } 4639 brelse(bitmap_bh); 4640 if (i == start + inodes_per_block) { 4641 /* all other inodes are free, so skip I/O */ 4642 memset(bh->b_data, 0, bh->b_size); 4643 set_buffer_uptodate(bh); 4644 unlock_buffer(bh); 4645 goto has_buffer; 4646 } 4647 } 4648 4649 make_io: 4650 /* 4651 * If we need to do any I/O, try to pre-readahead extra 4652 * blocks from the inode table. 4653 */ 4654 if (EXT4_SB(sb)->s_inode_readahead_blks) { 4655 ext4_fsblk_t b, end, table; 4656 unsigned num; 4657 4658 table = ext4_inode_table(sb, gdp); 4659 /* s_inode_readahead_blks is always a power of 2 */ 4660 b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1); 4661 if (table > b) 4662 b = table; 4663 end = b + EXT4_SB(sb)->s_inode_readahead_blks; 4664 num = EXT4_INODES_PER_GROUP(sb); 4665 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 4666 EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) 4667 num -= ext4_itable_unused_count(sb, gdp); 4668 table += num / inodes_per_block; 4669 if (end > table) 4670 end = table; 4671 while (b <= end) 4672 sb_breadahead(sb, b++); 4673 } 4674 4675 /* 4676 * There are other valid inodes in the buffer, this inode 4677 * has in-inode xattrs, or we don't have this inode in memory. 4678 * Read the block from disk. 4679 */ 4680 get_bh(bh); 4681 bh->b_end_io = end_buffer_read_sync; 4682 submit_bh(READ_META, bh); 4683 wait_on_buffer(bh); 4684 if (!buffer_uptodate(bh)) { 4685 ext4_error(sb, __func__, 4686 "unable to read inode block - inode=%lu, " 4687 "block=%llu", inode->i_ino, block); 4688 brelse(bh); 4689 return -EIO; 4690 } 4691 } 4692 has_buffer: 4693 iloc->bh = bh; 4694 return 0; 4695 } 4696 4697 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 4698 { 4699 /* We have all inode data except xattrs in memory here. */ 4700 return __ext4_get_inode_loc(inode, iloc, 4701 !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)); 4702 } 4703 4704 void ext4_set_inode_flags(struct inode *inode) 4705 { 4706 unsigned int flags = EXT4_I(inode)->i_flags; 4707 4708 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 4709 if (flags & EXT4_SYNC_FL) 4710 inode->i_flags |= S_SYNC; 4711 if (flags & EXT4_APPEND_FL) 4712 inode->i_flags |= S_APPEND; 4713 if (flags & EXT4_IMMUTABLE_FL) 4714 inode->i_flags |= S_IMMUTABLE; 4715 if (flags & EXT4_NOATIME_FL) 4716 inode->i_flags |= S_NOATIME; 4717 if (flags & EXT4_DIRSYNC_FL) 4718 inode->i_flags |= S_DIRSYNC; 4719 } 4720 4721 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ 4722 void ext4_get_inode_flags(struct ext4_inode_info *ei) 4723 { 4724 unsigned int flags = ei->vfs_inode.i_flags; 4725 4726 ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL| 4727 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL); 4728 if (flags & S_SYNC) 4729 ei->i_flags |= EXT4_SYNC_FL; 4730 if (flags & S_APPEND) 4731 ei->i_flags |= EXT4_APPEND_FL; 4732 if (flags & S_IMMUTABLE) 4733 ei->i_flags |= EXT4_IMMUTABLE_FL; 4734 if (flags & S_NOATIME) 4735 ei->i_flags |= EXT4_NOATIME_FL; 4736 if (flags & S_DIRSYNC) 4737 ei->i_flags |= EXT4_DIRSYNC_FL; 4738 } 4739 4740 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 4741 struct ext4_inode_info *ei) 4742 { 4743 blkcnt_t i_blocks ; 4744 struct inode *inode = &(ei->vfs_inode); 4745 struct super_block *sb = inode->i_sb; 4746 4747 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 4748 EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { 4749 /* we are using combined 48 bit field */ 4750 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 4751 le32_to_cpu(raw_inode->i_blocks_lo); 4752 if (ei->i_flags & EXT4_HUGE_FILE_FL) { 4753 /* i_blocks represent file system block size */ 4754 return i_blocks << (inode->i_blkbits - 9); 4755 } else { 4756 return i_blocks; 4757 } 4758 } else { 4759 return le32_to_cpu(raw_inode->i_blocks_lo); 4760 } 4761 } 4762 4763 struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 4764 { 4765 struct ext4_iloc iloc; 4766 struct ext4_inode *raw_inode; 4767 struct ext4_inode_info *ei; 4768 struct inode *inode; 4769 journal_t *journal = EXT4_SB(sb)->s_journal; 4770 long ret; 4771 int block; 4772 4773 inode = iget_locked(sb, ino); 4774 if (!inode) 4775 return ERR_PTR(-ENOMEM); 4776 if (!(inode->i_state & I_NEW)) 4777 return inode; 4778 4779 ei = EXT4_I(inode); 4780 iloc.bh = 0; 4781 4782 ret = __ext4_get_inode_loc(inode, &iloc, 0); 4783 if (ret < 0) 4784 goto bad_inode; 4785 raw_inode = ext4_raw_inode(&iloc); 4786 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 4787 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 4788 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 4789 if (!(test_opt(inode->i_sb, NO_UID32))) { 4790 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 4791 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 4792 } 4793 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 4794 4795 ei->i_state = 0; 4796 ei->i_dir_start_lookup = 0; 4797 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 4798 /* We now have enough fields to check if the inode was active or not. 4799 * This is needed because nfsd might try to access dead inodes 4800 * the test is that same one that e2fsck uses 4801 * NeilBrown 1999oct15 4802 */ 4803 if (inode->i_nlink == 0) { 4804 if (inode->i_mode == 0 || 4805 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { 4806 /* this inode is deleted */ 4807 ret = -ESTALE; 4808 goto bad_inode; 4809 } 4810 /* The only unlinked inodes we let through here have 4811 * valid i_mode and are being read by the orphan 4812 * recovery code: that's fine, we're about to complete 4813 * the process of deleting those. */ 4814 } 4815 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 4816 inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 4817 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 4818 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) 4819 ei->i_file_acl |= 4820 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 4821 inode->i_size = ext4_isize(raw_inode); 4822 ei->i_disksize = inode->i_size; 4823 #ifdef CONFIG_QUOTA 4824 ei->i_reserved_quota = 0; 4825 #endif 4826 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 4827 ei->i_block_group = iloc.block_group; 4828 ei->i_last_alloc_group = ~0; 4829 /* 4830 * NOTE! The in-memory inode i_data array is in little-endian order 4831 * even on big-endian machines: we do NOT byteswap the block numbers! 4832 */ 4833 for (block = 0; block < EXT4_N_BLOCKS; block++) 4834 ei->i_data[block] = raw_inode->i_block[block]; 4835 INIT_LIST_HEAD(&ei->i_orphan); 4836 4837 /* 4838 * Set transaction id's of transactions that have to be committed 4839 * to finish f[data]sync. We set them to currently running transaction 4840 * as we cannot be sure that the inode or some of its metadata isn't 4841 * part of the transaction - the inode could have been reclaimed and 4842 * now it is reread from disk. 4843 */ 4844 if (journal) { 4845 transaction_t *transaction; 4846 tid_t tid; 4847 4848 spin_lock(&journal->j_state_lock); 4849 if (journal->j_running_transaction) 4850 transaction = journal->j_running_transaction; 4851 else 4852 transaction = journal->j_committing_transaction; 4853 if (transaction) 4854 tid = transaction->t_tid; 4855 else 4856 tid = journal->j_commit_sequence; 4857 spin_unlock(&journal->j_state_lock); 4858 ei->i_sync_tid = tid; 4859 ei->i_datasync_tid = tid; 4860 } 4861 4862 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4863 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 4864 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 4865 EXT4_INODE_SIZE(inode->i_sb)) { 4866 ret = -EIO; 4867 goto bad_inode; 4868 } 4869 if (ei->i_extra_isize == 0) { 4870 /* The extra space is currently unused. Use it. */ 4871 ei->i_extra_isize = sizeof(struct ext4_inode) - 4872 EXT4_GOOD_OLD_INODE_SIZE; 4873 } else { 4874 __le32 *magic = (void *)raw_inode + 4875 EXT4_GOOD_OLD_INODE_SIZE + 4876 ei->i_extra_isize; 4877 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) 4878 ei->i_state |= EXT4_STATE_XATTR; 4879 } 4880 } else 4881 ei->i_extra_isize = 0; 4882 4883 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 4884 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 4885 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 4886 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 4887 4888 inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 4889 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4890 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 4891 inode->i_version |= 4892 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 4893 } 4894 4895 ret = 0; 4896 if (ei->i_file_acl && 4897 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { 4898 ext4_error(sb, __func__, 4899 "bad extended attribute block %llu in inode #%lu", 4900 ei->i_file_acl, inode->i_ino); 4901 ret = -EIO; 4902 goto bad_inode; 4903 } else if (ei->i_flags & EXT4_EXTENTS_FL) { 4904 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4905 (S_ISLNK(inode->i_mode) && 4906 !ext4_inode_is_fast_symlink(inode))) 4907 /* Validate extent which is part of inode */ 4908 ret = ext4_ext_check_inode(inode); 4909 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4910 (S_ISLNK(inode->i_mode) && 4911 !ext4_inode_is_fast_symlink(inode))) { 4912 /* Validate block references which are part of inode */ 4913 ret = ext4_check_inode_blockref(inode); 4914 } 4915 if (ret) 4916 goto bad_inode; 4917 4918 if (S_ISREG(inode->i_mode)) { 4919 inode->i_op = &ext4_file_inode_operations; 4920 inode->i_fop = &ext4_file_operations; 4921 ext4_set_aops(inode); 4922 } else if (S_ISDIR(inode->i_mode)) { 4923 inode->i_op = &ext4_dir_inode_operations; 4924 inode->i_fop = &ext4_dir_operations; 4925 } else if (S_ISLNK(inode->i_mode)) { 4926 if (ext4_inode_is_fast_symlink(inode)) { 4927 inode->i_op = &ext4_fast_symlink_inode_operations; 4928 nd_terminate_link(ei->i_data, inode->i_size, 4929 sizeof(ei->i_data) - 1); 4930 } else { 4931 inode->i_op = &ext4_symlink_inode_operations; 4932 ext4_set_aops(inode); 4933 } 4934 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 4935 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 4936 inode->i_op = &ext4_special_inode_operations; 4937 if (raw_inode->i_block[0]) 4938 init_special_inode(inode, inode->i_mode, 4939 old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 4940 else 4941 init_special_inode(inode, inode->i_mode, 4942 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 4943 } else { 4944 ret = -EIO; 4945 ext4_error(inode->i_sb, __func__, 4946 "bogus i_mode (%o) for inode=%lu", 4947 inode->i_mode, inode->i_ino); 4948 goto bad_inode; 4949 } 4950 brelse(iloc.bh); 4951 ext4_set_inode_flags(inode); 4952 unlock_new_inode(inode); 4953 return inode; 4954 4955 bad_inode: 4956 brelse(iloc.bh); 4957 iget_failed(inode); 4958 return ERR_PTR(ret); 4959 } 4960 4961 static int ext4_inode_blocks_set(handle_t *handle, 4962 struct ext4_inode *raw_inode, 4963 struct ext4_inode_info *ei) 4964 { 4965 struct inode *inode = &(ei->vfs_inode); 4966 u64 i_blocks = inode->i_blocks; 4967 struct super_block *sb = inode->i_sb; 4968 4969 if (i_blocks <= ~0U) { 4970 /* 4971 * i_blocks can be represnted in a 32 bit variable 4972 * as multiple of 512 bytes 4973 */ 4974 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4975 raw_inode->i_blocks_high = 0; 4976 ei->i_flags &= ~EXT4_HUGE_FILE_FL; 4977 return 0; 4978 } 4979 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) 4980 return -EFBIG; 4981 4982 if (i_blocks <= 0xffffffffffffULL) { 4983 /* 4984 * i_blocks can be represented in a 48 bit variable 4985 * as multiple of 512 bytes 4986 */ 4987 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4988 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4989 ei->i_flags &= ~EXT4_HUGE_FILE_FL; 4990 } else { 4991 ei->i_flags |= EXT4_HUGE_FILE_FL; 4992 /* i_block is stored in file system block size */ 4993 i_blocks = i_blocks >> (inode->i_blkbits - 9); 4994 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4995 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4996 } 4997 return 0; 4998 } 4999 5000 /* 5001 * Post the struct inode info into an on-disk inode location in the 5002 * buffer-cache. This gobbles the caller's reference to the 5003 * buffer_head in the inode location struct. 5004 * 5005 * The caller must have write access to iloc->bh. 5006 */ 5007 static int ext4_do_update_inode(handle_t *handle, 5008 struct inode *inode, 5009 struct ext4_iloc *iloc) 5010 { 5011 struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 5012 struct ext4_inode_info *ei = EXT4_I(inode); 5013 struct buffer_head *bh = iloc->bh; 5014 int err = 0, rc, block; 5015 5016 /* For fields not not tracking in the in-memory inode, 5017 * initialise them to zero for new inodes. */ 5018 if (ei->i_state & EXT4_STATE_NEW) 5019 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 5020 5021 ext4_get_inode_flags(ei); 5022 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 5023 if (!(test_opt(inode->i_sb, NO_UID32))) { 5024 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); 5025 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); 5026 /* 5027 * Fix up interoperability with old kernels. Otherwise, old inodes get 5028 * re-used with the upper 16 bits of the uid/gid intact 5029 */ 5030 if (!ei->i_dtime) { 5031 raw_inode->i_uid_high = 5032 cpu_to_le16(high_16_bits(inode->i_uid)); 5033 raw_inode->i_gid_high = 5034 cpu_to_le16(high_16_bits(inode->i_gid)); 5035 } else { 5036 raw_inode->i_uid_high = 0; 5037 raw_inode->i_gid_high = 0; 5038 } 5039 } else { 5040 raw_inode->i_uid_low = 5041 cpu_to_le16(fs_high2lowuid(inode->i_uid)); 5042 raw_inode->i_gid_low = 5043 cpu_to_le16(fs_high2lowgid(inode->i_gid)); 5044 raw_inode->i_uid_high = 0; 5045 raw_inode->i_gid_high = 0; 5046 } 5047 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 5048 5049 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 5050 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 5051 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 5052 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 5053 5054 if (ext4_inode_blocks_set(handle, raw_inode, ei)) 5055 goto out_brelse; 5056 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 5057 raw_inode->i_flags = cpu_to_le32(ei->i_flags); 5058 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 5059 cpu_to_le32(EXT4_OS_HURD)) 5060 raw_inode->i_file_acl_high = 5061 cpu_to_le16(ei->i_file_acl >> 32); 5062 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 5063 ext4_isize_set(raw_inode, ei->i_disksize); 5064 if (ei->i_disksize > 0x7fffffffULL) { 5065 struct super_block *sb = inode->i_sb; 5066 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 5067 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || 5068 EXT4_SB(sb)->s_es->s_rev_level == 5069 cpu_to_le32(EXT4_GOOD_OLD_REV)) { 5070 /* If this is the first large file 5071 * created, add a flag to the superblock. 5072 */ 5073 err = ext4_journal_get_write_access(handle, 5074 EXT4_SB(sb)->s_sbh); 5075 if (err) 5076 goto out_brelse; 5077 ext4_update_dynamic_rev(sb); 5078 EXT4_SET_RO_COMPAT_FEATURE(sb, 5079 EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 5080 sb->s_dirt = 1; 5081 ext4_handle_sync(handle); 5082 err = ext4_handle_dirty_metadata(handle, inode, 5083 EXT4_SB(sb)->s_sbh); 5084 } 5085 } 5086 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 5087 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 5088 if (old_valid_dev(inode->i_rdev)) { 5089 raw_inode->i_block[0] = 5090 cpu_to_le32(old_encode_dev(inode->i_rdev)); 5091 raw_inode->i_block[1] = 0; 5092 } else { 5093 raw_inode->i_block[0] = 0; 5094 raw_inode->i_block[1] = 5095 cpu_to_le32(new_encode_dev(inode->i_rdev)); 5096 raw_inode->i_block[2] = 0; 5097 } 5098 } else 5099 for (block = 0; block < EXT4_N_BLOCKS; block++) 5100 raw_inode->i_block[block] = ei->i_data[block]; 5101 5102 raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 5103 if (ei->i_extra_isize) { 5104 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 5105 raw_inode->i_version_hi = 5106 cpu_to_le32(inode->i_version >> 32); 5107 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 5108 } 5109 5110 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 5111 rc = ext4_handle_dirty_metadata(handle, inode, bh); 5112 if (!err) 5113 err = rc; 5114 ei->i_state &= ~EXT4_STATE_NEW; 5115 5116 ext4_update_inode_fsync_trans(handle, inode, 0); 5117 out_brelse: 5118 brelse(bh); 5119 ext4_std_error(inode->i_sb, err); 5120 return err; 5121 } 5122 5123 /* 5124 * ext4_write_inode() 5125 * 5126 * We are called from a few places: 5127 * 5128 * - Within generic_file_write() for O_SYNC files. 5129 * Here, there will be no transaction running. We wait for any running 5130 * trasnaction to commit. 5131 * 5132 * - Within sys_sync(), kupdate and such. 5133 * We wait on commit, if tol to. 5134 * 5135 * - Within prune_icache() (PF_MEMALLOC == true) 5136 * Here we simply return. We can't afford to block kswapd on the 5137 * journal commit. 5138 * 5139 * In all cases it is actually safe for us to return without doing anything, 5140 * because the inode has been copied into a raw inode buffer in 5141 * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 5142 * knfsd. 5143 * 5144 * Note that we are absolutely dependent upon all inode dirtiers doing the 5145 * right thing: they *must* call mark_inode_dirty() after dirtying info in 5146 * which we are interested. 5147 * 5148 * It would be a bug for them to not do this. The code: 5149 * 5150 * mark_inode_dirty(inode) 5151 * stuff(); 5152 * inode->i_size = expr; 5153 * 5154 * is in error because a kswapd-driven write_inode() could occur while 5155 * `stuff()' is running, and the new i_size will be lost. Plus the inode 5156 * will no longer be on the superblock's dirty inode list. 5157 */ 5158 int ext4_write_inode(struct inode *inode, int wait) 5159 { 5160 int err; 5161 5162 if (current->flags & PF_MEMALLOC) 5163 return 0; 5164 5165 if (EXT4_SB(inode->i_sb)->s_journal) { 5166 if (ext4_journal_current_handle()) { 5167 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 5168 dump_stack(); 5169 return -EIO; 5170 } 5171 5172 if (!wait) 5173 return 0; 5174 5175 err = ext4_force_commit(inode->i_sb); 5176 } else { 5177 struct ext4_iloc iloc; 5178 5179 err = ext4_get_inode_loc(inode, &iloc); 5180 if (err) 5181 return err; 5182 if (wait) 5183 sync_dirty_buffer(iloc.bh); 5184 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 5185 ext4_error(inode->i_sb, __func__, 5186 "IO error syncing inode, " 5187 "inode=%lu, block=%llu", 5188 inode->i_ino, 5189 (unsigned long long)iloc.bh->b_blocknr); 5190 err = -EIO; 5191 } 5192 } 5193 return err; 5194 } 5195 5196 /* 5197 * ext4_setattr() 5198 * 5199 * Called from notify_change. 5200 * 5201 * We want to trap VFS attempts to truncate the file as soon as 5202 * possible. In particular, we want to make sure that when the VFS 5203 * shrinks i_size, we put the inode on the orphan list and modify 5204 * i_disksize immediately, so that during the subsequent flushing of 5205 * dirty pages and freeing of disk blocks, we can guarantee that any 5206 * commit will leave the blocks being flushed in an unused state on 5207 * disk. (On recovery, the inode will get truncated and the blocks will 5208 * be freed, so we have a strong guarantee that no future commit will 5209 * leave these blocks visible to the user.) 5210 * 5211 * Another thing we have to assure is that if we are in ordered mode 5212 * and inode is still attached to the committing transaction, we must 5213 * we start writeout of all the dirty pages which are being truncated. 5214 * This way we are sure that all the data written in the previous 5215 * transaction are already on disk (truncate waits for pages under 5216 * writeback). 5217 * 5218 * Called with inode->i_mutex down. 5219 */ 5220 int ext4_setattr(struct dentry *dentry, struct iattr *attr) 5221 { 5222 struct inode *inode = dentry->d_inode; 5223 int error, rc = 0; 5224 const unsigned int ia_valid = attr->ia_valid; 5225 5226 error = inode_change_ok(inode, attr); 5227 if (error) 5228 return error; 5229 5230 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 5231 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 5232 handle_t *handle; 5233 5234 /* (user+group)*(old+new) structure, inode write (sb, 5235 * inode block, ? - but truncate inode update has it) */ 5236 handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+ 5237 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3); 5238 if (IS_ERR(handle)) { 5239 error = PTR_ERR(handle); 5240 goto err_out; 5241 } 5242 error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; 5243 if (error) { 5244 ext4_journal_stop(handle); 5245 return error; 5246 } 5247 /* Update corresponding info in inode so that everything is in 5248 * one transaction */ 5249 if (attr->ia_valid & ATTR_UID) 5250 inode->i_uid = attr->ia_uid; 5251 if (attr->ia_valid & ATTR_GID) 5252 inode->i_gid = attr->ia_gid; 5253 error = ext4_mark_inode_dirty(handle, inode); 5254 ext4_journal_stop(handle); 5255 } 5256 5257 if (attr->ia_valid & ATTR_SIZE) { 5258 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) { 5259 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5260 5261 if (attr->ia_size > sbi->s_bitmap_maxbytes) { 5262 error = -EFBIG; 5263 goto err_out; 5264 } 5265 } 5266 } 5267 5268 if (S_ISREG(inode->i_mode) && 5269 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) { 5270 handle_t *handle; 5271 5272 handle = ext4_journal_start(inode, 3); 5273 if (IS_ERR(handle)) { 5274 error = PTR_ERR(handle); 5275 goto err_out; 5276 } 5277 5278 error = ext4_orphan_add(handle, inode); 5279 EXT4_I(inode)->i_disksize = attr->ia_size; 5280 rc = ext4_mark_inode_dirty(handle, inode); 5281 if (!error) 5282 error = rc; 5283 ext4_journal_stop(handle); 5284 5285 if (ext4_should_order_data(inode)) { 5286 error = ext4_begin_ordered_truncate(inode, 5287 attr->ia_size); 5288 if (error) { 5289 /* Do as much error cleanup as possible */ 5290 handle = ext4_journal_start(inode, 3); 5291 if (IS_ERR(handle)) { 5292 ext4_orphan_del(NULL, inode); 5293 goto err_out; 5294 } 5295 ext4_orphan_del(handle, inode); 5296 ext4_journal_stop(handle); 5297 goto err_out; 5298 } 5299 } 5300 } 5301 5302 rc = inode_setattr(inode, attr); 5303 5304 /* If inode_setattr's call to ext4_truncate failed to get a 5305 * transaction handle at all, we need to clean up the in-core 5306 * orphan list manually. */ 5307 if (inode->i_nlink) 5308 ext4_orphan_del(NULL, inode); 5309 5310 if (!rc && (ia_valid & ATTR_MODE)) 5311 rc = ext4_acl_chmod(inode); 5312 5313 err_out: 5314 ext4_std_error(inode->i_sb, error); 5315 if (!error) 5316 error = rc; 5317 return error; 5318 } 5319 5320 int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 5321 struct kstat *stat) 5322 { 5323 struct inode *inode; 5324 unsigned long delalloc_blocks; 5325 5326 inode = dentry->d_inode; 5327 generic_fillattr(inode, stat); 5328 5329 /* 5330 * We can't update i_blocks if the block allocation is delayed 5331 * otherwise in the case of system crash before the real block 5332 * allocation is done, we will have i_blocks inconsistent with 5333 * on-disk file blocks. 5334 * We always keep i_blocks updated together with real 5335 * allocation. But to not confuse with user, stat 5336 * will return the blocks that include the delayed allocation 5337 * blocks for this file. 5338 */ 5339 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 5340 delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; 5341 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 5342 5343 stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; 5344 return 0; 5345 } 5346 5347 static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks, 5348 int chunk) 5349 { 5350 int indirects; 5351 5352 /* if nrblocks are contiguous */ 5353 if (chunk) { 5354 /* 5355 * With N contiguous data blocks, it need at most 5356 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks 5357 * 2 dindirect blocks 5358 * 1 tindirect block 5359 */ 5360 indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb); 5361 return indirects + 3; 5362 } 5363 /* 5364 * if nrblocks are not contiguous, worse case, each block touch 5365 * a indirect block, and each indirect block touch a double indirect 5366 * block, plus a triple indirect block 5367 */ 5368 indirects = nrblocks * 2 + 1; 5369 return indirects; 5370 } 5371 5372 static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 5373 { 5374 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) 5375 return ext4_indirect_trans_blocks(inode, nrblocks, chunk); 5376 return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); 5377 } 5378 5379 /* 5380 * Account for index blocks, block groups bitmaps and block group 5381 * descriptor blocks if modify datablocks and index blocks 5382 * worse case, the indexs blocks spread over different block groups 5383 * 5384 * If datablocks are discontiguous, they are possible to spread over 5385 * different block groups too. If they are contiuguous, with flexbg, 5386 * they could still across block group boundary. 5387 * 5388 * Also account for superblock, inode, quota and xattr blocks 5389 */ 5390 int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) 5391 { 5392 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 5393 int gdpblocks; 5394 int idxblocks; 5395 int ret = 0; 5396 5397 /* 5398 * How many index blocks need to touch to modify nrblocks? 5399 * The "Chunk" flag indicating whether the nrblocks is 5400 * physically contiguous on disk 5401 * 5402 * For Direct IO and fallocate, they calls get_block to allocate 5403 * one single extent at a time, so they could set the "Chunk" flag 5404 */ 5405 idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); 5406 5407 ret = idxblocks; 5408 5409 /* 5410 * Now let's see how many group bitmaps and group descriptors need 5411 * to account 5412 */ 5413 groups = idxblocks; 5414 if (chunk) 5415 groups += 1; 5416 else 5417 groups += nrblocks; 5418 5419 gdpblocks = groups; 5420 if (groups > ngroups) 5421 groups = ngroups; 5422 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 5423 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 5424 5425 /* bitmaps and block group descriptor blocks */ 5426 ret += groups + gdpblocks; 5427 5428 /* Blocks for super block, inode, quota and xattr blocks */ 5429 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 5430 5431 return ret; 5432 } 5433 5434 /* 5435 * Calulate the total number of credits to reserve to fit 5436 * the modification of a single pages into a single transaction, 5437 * which may include multiple chunks of block allocations. 5438 * 5439 * This could be called via ext4_write_begin() 5440 * 5441 * We need to consider the worse case, when 5442 * one new block per extent. 5443 */ 5444 int ext4_writepage_trans_blocks(struct inode *inode) 5445 { 5446 int bpp = ext4_journal_blocks_per_page(inode); 5447 int ret; 5448 5449 ret = ext4_meta_trans_blocks(inode, bpp, 0); 5450 5451 /* Account for data blocks for journalled mode */ 5452 if (ext4_should_journal_data(inode)) 5453 ret += bpp; 5454 return ret; 5455 } 5456 5457 /* 5458 * Calculate the journal credits for a chunk of data modification. 5459 * 5460 * This is called from DIO, fallocate or whoever calling 5461 * ext4_get_blocks() to map/allocate a chunk of contiguous disk blocks. 5462 * 5463 * journal buffers for data blocks are not included here, as DIO 5464 * and fallocate do no need to journal data buffers. 5465 */ 5466 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 5467 { 5468 return ext4_meta_trans_blocks(inode, nrblocks, 1); 5469 } 5470 5471 /* 5472 * The caller must have previously called ext4_reserve_inode_write(). 5473 * Give this, we know that the caller already has write access to iloc->bh. 5474 */ 5475 int ext4_mark_iloc_dirty(handle_t *handle, 5476 struct inode *inode, struct ext4_iloc *iloc) 5477 { 5478 int err = 0; 5479 5480 if (test_opt(inode->i_sb, I_VERSION)) 5481 inode_inc_iversion(inode); 5482 5483 /* the do_update_inode consumes one bh->b_count */ 5484 get_bh(iloc->bh); 5485 5486 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 5487 err = ext4_do_update_inode(handle, inode, iloc); 5488 put_bh(iloc->bh); 5489 return err; 5490 } 5491 5492 /* 5493 * On success, We end up with an outstanding reference count against 5494 * iloc->bh. This _must_ be cleaned up later. 5495 */ 5496 5497 int 5498 ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 5499 struct ext4_iloc *iloc) 5500 { 5501 int err; 5502 5503 err = ext4_get_inode_loc(inode, iloc); 5504 if (!err) { 5505 BUFFER_TRACE(iloc->bh, "get_write_access"); 5506 err = ext4_journal_get_write_access(handle, iloc->bh); 5507 if (err) { 5508 brelse(iloc->bh); 5509 iloc->bh = NULL; 5510 } 5511 } 5512 ext4_std_error(inode->i_sb, err); 5513 return err; 5514 } 5515 5516 /* 5517 * Expand an inode by new_extra_isize bytes. 5518 * Returns 0 on success or negative error number on failure. 5519 */ 5520 static int ext4_expand_extra_isize(struct inode *inode, 5521 unsigned int new_extra_isize, 5522 struct ext4_iloc iloc, 5523 handle_t *handle) 5524 { 5525 struct ext4_inode *raw_inode; 5526 struct ext4_xattr_ibody_header *header; 5527 struct ext4_xattr_entry *entry; 5528 5529 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 5530 return 0; 5531 5532 raw_inode = ext4_raw_inode(&iloc); 5533 5534 header = IHDR(inode, raw_inode); 5535 entry = IFIRST(header); 5536 5537 /* No extended attributes present */ 5538 if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) || 5539 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 5540 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 5541 new_extra_isize); 5542 EXT4_I(inode)->i_extra_isize = new_extra_isize; 5543 return 0; 5544 } 5545 5546 /* try to expand with EAs present */ 5547 return ext4_expand_extra_isize_ea(inode, new_extra_isize, 5548 raw_inode, handle); 5549 } 5550 5551 /* 5552 * What we do here is to mark the in-core inode as clean with respect to inode 5553 * dirtiness (it may still be data-dirty). 5554 * This means that the in-core inode may be reaped by prune_icache 5555 * without having to perform any I/O. This is a very good thing, 5556 * because *any* task may call prune_icache - even ones which 5557 * have a transaction open against a different journal. 5558 * 5559 * Is this cheating? Not really. Sure, we haven't written the 5560 * inode out, but prune_icache isn't a user-visible syncing function. 5561 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 5562 * we start and wait on commits. 5563 * 5564 * Is this efficient/effective? Well, we're being nice to the system 5565 * by cleaning up our inodes proactively so they can be reaped 5566 * without I/O. But we are potentially leaving up to five seconds' 5567 * worth of inodes floating about which prune_icache wants us to 5568 * write out. One way to fix that would be to get prune_icache() 5569 * to do a write_super() to free up some memory. It has the desired 5570 * effect. 5571 */ 5572 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 5573 { 5574 struct ext4_iloc iloc; 5575 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5576 static unsigned int mnt_count; 5577 int err, ret; 5578 5579 might_sleep(); 5580 err = ext4_reserve_inode_write(handle, inode, &iloc); 5581 if (ext4_handle_valid(handle) && 5582 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 5583 !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) { 5584 /* 5585 * We need extra buffer credits since we may write into EA block 5586 * with this same handle. If journal_extend fails, then it will 5587 * only result in a minor loss of functionality for that inode. 5588 * If this is felt to be critical, then e2fsck should be run to 5589 * force a large enough s_min_extra_isize. 5590 */ 5591 if ((jbd2_journal_extend(handle, 5592 EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { 5593 ret = ext4_expand_extra_isize(inode, 5594 sbi->s_want_extra_isize, 5595 iloc, handle); 5596 if (ret) { 5597 EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND; 5598 if (mnt_count != 5599 le16_to_cpu(sbi->s_es->s_mnt_count)) { 5600 ext4_warning(inode->i_sb, __func__, 5601 "Unable to expand inode %lu. Delete" 5602 " some EAs or run e2fsck.", 5603 inode->i_ino); 5604 mnt_count = 5605 le16_to_cpu(sbi->s_es->s_mnt_count); 5606 } 5607 } 5608 } 5609 } 5610 if (!err) 5611 err = ext4_mark_iloc_dirty(handle, inode, &iloc); 5612 return err; 5613 } 5614 5615 /* 5616 * ext4_dirty_inode() is called from __mark_inode_dirty() 5617 * 5618 * We're really interested in the case where a file is being extended. 5619 * i_size has been changed by generic_commit_write() and we thus need 5620 * to include the updated inode in the current transaction. 5621 * 5622 * Also, vfs_dq_alloc_block() will always dirty the inode when blocks 5623 * are allocated to the file. 5624 * 5625 * If the inode is marked synchronous, we don't honour that here - doing 5626 * so would cause a commit on atime updates, which we don't bother doing. 5627 * We handle synchronous inodes at the highest possible level. 5628 */ 5629 void ext4_dirty_inode(struct inode *inode) 5630 { 5631 handle_t *handle; 5632 5633 handle = ext4_journal_start(inode, 2); 5634 if (IS_ERR(handle)) 5635 goto out; 5636 5637 ext4_mark_inode_dirty(handle, inode); 5638 5639 ext4_journal_stop(handle); 5640 out: 5641 return; 5642 } 5643 5644 #if 0 5645 /* 5646 * Bind an inode's backing buffer_head into this transaction, to prevent 5647 * it from being flushed to disk early. Unlike 5648 * ext4_reserve_inode_write, this leaves behind no bh reference and 5649 * returns no iloc structure, so the caller needs to repeat the iloc 5650 * lookup to mark the inode dirty later. 5651 */ 5652 static int ext4_pin_inode(handle_t *handle, struct inode *inode) 5653 { 5654 struct ext4_iloc iloc; 5655 5656 int err = 0; 5657 if (handle) { 5658 err = ext4_get_inode_loc(inode, &iloc); 5659 if (!err) { 5660 BUFFER_TRACE(iloc.bh, "get_write_access"); 5661 err = jbd2_journal_get_write_access(handle, iloc.bh); 5662 if (!err) 5663 err = ext4_handle_dirty_metadata(handle, 5664 inode, 5665 iloc.bh); 5666 brelse(iloc.bh); 5667 } 5668 } 5669 ext4_std_error(inode->i_sb, err); 5670 return err; 5671 } 5672 #endif 5673 5674 int ext4_change_inode_journal_flag(struct inode *inode, int val) 5675 { 5676 journal_t *journal; 5677 handle_t *handle; 5678 int err; 5679 5680 /* 5681 * We have to be very careful here: changing a data block's 5682 * journaling status dynamically is dangerous. If we write a 5683 * data block to the journal, change the status and then delete 5684 * that block, we risk forgetting to revoke the old log record 5685 * from the journal and so a subsequent replay can corrupt data. 5686 * So, first we make sure that the journal is empty and that 5687 * nobody is changing anything. 5688 */ 5689 5690 journal = EXT4_JOURNAL(inode); 5691 if (!journal) 5692 return 0; 5693 if (is_journal_aborted(journal)) 5694 return -EROFS; 5695 5696 jbd2_journal_lock_updates(journal); 5697 jbd2_journal_flush(journal); 5698 5699 /* 5700 * OK, there are no updates running now, and all cached data is 5701 * synced to disk. We are now in a completely consistent state 5702 * which doesn't have anything in the journal, and we know that 5703 * no filesystem updates are running, so it is safe to modify 5704 * the inode's in-core data-journaling state flag now. 5705 */ 5706 5707 if (val) 5708 EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL; 5709 else 5710 EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL; 5711 ext4_set_aops(inode); 5712 5713 jbd2_journal_unlock_updates(journal); 5714 5715 /* Finally we can mark the inode as dirty. */ 5716 5717 handle = ext4_journal_start(inode, 1); 5718 if (IS_ERR(handle)) 5719 return PTR_ERR(handle); 5720 5721 err = ext4_mark_inode_dirty(handle, inode); 5722 ext4_handle_sync(handle); 5723 ext4_journal_stop(handle); 5724 ext4_std_error(inode->i_sb, err); 5725 5726 return err; 5727 } 5728 5729 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 5730 { 5731 return !buffer_mapped(bh); 5732 } 5733 5734 int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 5735 { 5736 struct page *page = vmf->page; 5737 loff_t size; 5738 unsigned long len; 5739 int ret = -EINVAL; 5740 void *fsdata; 5741 struct file *file = vma->vm_file; 5742 struct inode *inode = file->f_path.dentry->d_inode; 5743 struct address_space *mapping = inode->i_mapping; 5744 5745 /* 5746 * Get i_alloc_sem to stop truncates messing with the inode. We cannot 5747 * get i_mutex because we are already holding mmap_sem. 5748 */ 5749 down_read(&inode->i_alloc_sem); 5750 size = i_size_read(inode); 5751 if (page->mapping != mapping || size <= page_offset(page) 5752 || !PageUptodate(page)) { 5753 /* page got truncated from under us? */ 5754 goto out_unlock; 5755 } 5756 ret = 0; 5757 if (PageMappedToDisk(page)) 5758 goto out_unlock; 5759 5760 if (page->index == size >> PAGE_CACHE_SHIFT) 5761 len = size & ~PAGE_CACHE_MASK; 5762 else 5763 len = PAGE_CACHE_SIZE; 5764 5765 lock_page(page); 5766 /* 5767 * return if we have all the buffers mapped. This avoid 5768 * the need to call write_begin/write_end which does a 5769 * journal_start/journal_stop which can block and take 5770 * long time 5771 */ 5772 if (page_has_buffers(page)) { 5773 if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 5774 ext4_bh_unmapped)) { 5775 unlock_page(page); 5776 goto out_unlock; 5777 } 5778 } 5779 unlock_page(page); 5780 /* 5781 * OK, we need to fill the hole... Do write_begin write_end 5782 * to do block allocation/reservation.We are not holding 5783 * inode.i__mutex here. That allow * parallel write_begin, 5784 * write_end call. lock_page prevent this from happening 5785 * on the same page though 5786 */ 5787 ret = mapping->a_ops->write_begin(file, mapping, page_offset(page), 5788 len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); 5789 if (ret < 0) 5790 goto out_unlock; 5791 ret = mapping->a_ops->write_end(file, mapping, page_offset(page), 5792 len, len, page, fsdata); 5793 if (ret < 0) 5794 goto out_unlock; 5795 ret = 0; 5796 out_unlock: 5797 if (ret) 5798 ret = VM_FAULT_SIGBUS; 5799 up_read(&inode->i_alloc_sem); 5800 return ret; 5801 } 5802