1 /* 2 * linux/fs/ext4/inode.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/inode.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * Goal-directed block allocation by Stephen Tweedie 16 * (sct@redhat.com), 1993, 1998 17 * Big-endian to little-endian byte-swapping/bitmaps by 18 * David S. Miller (davem@caip.rutgers.edu), 1995 19 * 64-bit file support on 64-bit platforms by Jakub Jelinek 20 * (jj@sunsite.ms.mff.cuni.cz) 21 * 22 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 23 */ 24 25 #include <linux/module.h> 26 #include <linux/fs.h> 27 #include <linux/time.h> 28 #include <linux/jbd2.h> 29 #include <linux/highuid.h> 30 #include <linux/pagemap.h> 31 #include <linux/quotaops.h> 32 #include <linux/string.h> 33 #include <linux/buffer_head.h> 34 #include <linux/writeback.h> 35 #include <linux/pagevec.h> 36 #include <linux/mpage.h> 37 #include <linux/namei.h> 38 #include <linux/uio.h> 39 #include <linux/bio.h> 40 #include "ext4_jbd2.h" 41 #include "xattr.h" 42 #include "acl.h" 43 #include "ext4_extents.h" 44 45 #define MPAGE_DA_EXTENT_TAIL 0x01 46 47 static inline int ext4_begin_ordered_truncate(struct inode *inode, 48 loff_t new_size) 49 { 50 return jbd2_journal_begin_ordered_truncate( 51 EXT4_SB(inode->i_sb)->s_journal, 52 &EXT4_I(inode)->jinode, 53 new_size); 54 } 55 56 static void ext4_invalidatepage(struct page *page, unsigned long offset); 57 58 /* 59 * Test whether an inode is a fast symlink. 60 */ 61 static int ext4_inode_is_fast_symlink(struct inode *inode) 62 { 63 int ea_blocks = EXT4_I(inode)->i_file_acl ? 64 (inode->i_sb->s_blocksize >> 9) : 0; 65 66 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 67 } 68 69 /* 70 * The ext4 forget function must perform a revoke if we are freeing data 71 * which has been journaled. Metadata (eg. indirect blocks) must be 72 * revoked in all cases. 73 * 74 * "bh" may be NULL: a metadata block may have been freed from memory 75 * but there may still be a record of it in the journal, and that record 76 * still needs to be revoked. 77 * 78 * If the handle isn't valid we're not journaling so there's nothing to do. 79 */ 80 int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode, 81 struct buffer_head *bh, ext4_fsblk_t blocknr) 82 { 83 int err; 84 85 if (!ext4_handle_valid(handle)) 86 return 0; 87 88 might_sleep(); 89 90 BUFFER_TRACE(bh, "enter"); 91 92 jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, " 93 "data mode %lx\n", 94 bh, is_metadata, inode->i_mode, 95 test_opt(inode->i_sb, DATA_FLAGS)); 96 97 /* Never use the revoke function if we are doing full data 98 * journaling: there is no need to, and a V1 superblock won't 99 * support it. Otherwise, only skip the revoke on un-journaled 100 * data blocks. */ 101 102 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA || 103 (!is_metadata && !ext4_should_journal_data(inode))) { 104 if (bh) { 105 BUFFER_TRACE(bh, "call jbd2_journal_forget"); 106 return ext4_journal_forget(handle, bh); 107 } 108 return 0; 109 } 110 111 /* 112 * data!=journal && (is_metadata || should_journal_data(inode)) 113 */ 114 BUFFER_TRACE(bh, "call ext4_journal_revoke"); 115 err = ext4_journal_revoke(handle, blocknr, bh); 116 if (err) 117 ext4_abort(inode->i_sb, __func__, 118 "error %d when attempting revoke", err); 119 BUFFER_TRACE(bh, "exit"); 120 return err; 121 } 122 123 /* 124 * Work out how many blocks we need to proceed with the next chunk of a 125 * truncate transaction. 126 */ 127 static unsigned long blocks_for_truncate(struct inode *inode) 128 { 129 ext4_lblk_t needed; 130 131 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); 132 133 /* Give ourselves just enough room to cope with inodes in which 134 * i_blocks is corrupt: we've seen disk corruptions in the past 135 * which resulted in random data in an inode which looked enough 136 * like a regular file for ext4 to try to delete it. Things 137 * will go a bit crazy if that happens, but at least we should 138 * try not to panic the whole kernel. */ 139 if (needed < 2) 140 needed = 2; 141 142 /* But we need to bound the transaction so we don't overflow the 143 * journal. */ 144 if (needed > EXT4_MAX_TRANS_DATA) 145 needed = EXT4_MAX_TRANS_DATA; 146 147 return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed; 148 } 149 150 /* 151 * Truncate transactions can be complex and absolutely huge. So we need to 152 * be able to restart the transaction at a conventient checkpoint to make 153 * sure we don't overflow the journal. 154 * 155 * start_transaction gets us a new handle for a truncate transaction, 156 * and extend_transaction tries to extend the existing one a bit. If 157 * extend fails, we need to propagate the failure up and restart the 158 * transaction in the top-level truncate loop. --sct 159 */ 160 static handle_t *start_transaction(struct inode *inode) 161 { 162 handle_t *result; 163 164 result = ext4_journal_start(inode, blocks_for_truncate(inode)); 165 if (!IS_ERR(result)) 166 return result; 167 168 ext4_std_error(inode->i_sb, PTR_ERR(result)); 169 return result; 170 } 171 172 /* 173 * Try to extend this transaction for the purposes of truncation. 174 * 175 * Returns 0 if we managed to create more room. If we can't create more 176 * room, and the transaction must be restarted we return 1. 177 */ 178 static int try_to_extend_transaction(handle_t *handle, struct inode *inode) 179 { 180 if (!ext4_handle_valid(handle)) 181 return 0; 182 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) 183 return 0; 184 if (!ext4_journal_extend(handle, blocks_for_truncate(inode))) 185 return 0; 186 return 1; 187 } 188 189 /* 190 * Restart the transaction associated with *handle. This does a commit, 191 * so before we call here everything must be consistently dirtied against 192 * this transaction. 193 */ 194 static int ext4_journal_test_restart(handle_t *handle, struct inode *inode) 195 { 196 BUG_ON(EXT4_JOURNAL(inode) == NULL); 197 jbd_debug(2, "restarting handle %p\n", handle); 198 return ext4_journal_restart(handle, blocks_for_truncate(inode)); 199 } 200 201 /* 202 * Called at the last iput() if i_nlink is zero. 203 */ 204 void ext4_delete_inode(struct inode *inode) 205 { 206 handle_t *handle; 207 int err; 208 209 if (ext4_should_order_data(inode)) 210 ext4_begin_ordered_truncate(inode, 0); 211 truncate_inode_pages(&inode->i_data, 0); 212 213 if (is_bad_inode(inode)) 214 goto no_delete; 215 216 handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3); 217 if (IS_ERR(handle)) { 218 ext4_std_error(inode->i_sb, PTR_ERR(handle)); 219 /* 220 * If we're going to skip the normal cleanup, we still need to 221 * make sure that the in-core orphan linked list is properly 222 * cleaned up. 223 */ 224 ext4_orphan_del(NULL, inode); 225 goto no_delete; 226 } 227 228 if (IS_SYNC(inode)) 229 ext4_handle_sync(handle); 230 inode->i_size = 0; 231 err = ext4_mark_inode_dirty(handle, inode); 232 if (err) { 233 ext4_warning(inode->i_sb, __func__, 234 "couldn't mark inode dirty (err %d)", err); 235 goto stop_handle; 236 } 237 if (inode->i_blocks) 238 ext4_truncate(inode); 239 240 /* 241 * ext4_ext_truncate() doesn't reserve any slop when it 242 * restarts journal transactions; therefore there may not be 243 * enough credits left in the handle to remove the inode from 244 * the orphan list and set the dtime field. 245 */ 246 if (!ext4_handle_has_enough_credits(handle, 3)) { 247 err = ext4_journal_extend(handle, 3); 248 if (err > 0) 249 err = ext4_journal_restart(handle, 3); 250 if (err != 0) { 251 ext4_warning(inode->i_sb, __func__, 252 "couldn't extend journal (err %d)", err); 253 stop_handle: 254 ext4_journal_stop(handle); 255 goto no_delete; 256 } 257 } 258 259 /* 260 * Kill off the orphan record which ext4_truncate created. 261 * AKPM: I think this can be inside the above `if'. 262 * Note that ext4_orphan_del() has to be able to cope with the 263 * deletion of a non-existent orphan - this is because we don't 264 * know if ext4_truncate() actually created an orphan record. 265 * (Well, we could do this if we need to, but heck - it works) 266 */ 267 ext4_orphan_del(handle, inode); 268 EXT4_I(inode)->i_dtime = get_seconds(); 269 270 /* 271 * One subtle ordering requirement: if anything has gone wrong 272 * (transaction abort, IO errors, whatever), then we can still 273 * do these next steps (the fs will already have been marked as 274 * having errors), but we can't free the inode if the mark_dirty 275 * fails. 276 */ 277 if (ext4_mark_inode_dirty(handle, inode)) 278 /* If that failed, just do the required in-core inode clear. */ 279 clear_inode(inode); 280 else 281 ext4_free_inode(handle, inode); 282 ext4_journal_stop(handle); 283 return; 284 no_delete: 285 clear_inode(inode); /* We must guarantee clearing of inode... */ 286 } 287 288 typedef struct { 289 __le32 *p; 290 __le32 key; 291 struct buffer_head *bh; 292 } Indirect; 293 294 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) 295 { 296 p->key = *(p->p = v); 297 p->bh = bh; 298 } 299 300 /** 301 * ext4_block_to_path - parse the block number into array of offsets 302 * @inode: inode in question (we are only interested in its superblock) 303 * @i_block: block number to be parsed 304 * @offsets: array to store the offsets in 305 * @boundary: set this non-zero if the referred-to block is likely to be 306 * followed (on disk) by an indirect block. 307 * 308 * To store the locations of file's data ext4 uses a data structure common 309 * for UNIX filesystems - tree of pointers anchored in the inode, with 310 * data blocks at leaves and indirect blocks in intermediate nodes. 311 * This function translates the block number into path in that tree - 312 * return value is the path length and @offsets[n] is the offset of 313 * pointer to (n+1)th node in the nth one. If @block is out of range 314 * (negative or too large) warning is printed and zero returned. 315 * 316 * Note: function doesn't find node addresses, so no IO is needed. All 317 * we need to know is the capacity of indirect blocks (taken from the 318 * inode->i_sb). 319 */ 320 321 /* 322 * Portability note: the last comparison (check that we fit into triple 323 * indirect block) is spelled differently, because otherwise on an 324 * architecture with 32-bit longs and 8Kb pages we might get into trouble 325 * if our filesystem had 8Kb blocks. We might use long long, but that would 326 * kill us on x86. Oh, well, at least the sign propagation does not matter - 327 * i_block would have to be negative in the very beginning, so we would not 328 * get there at all. 329 */ 330 331 static int ext4_block_to_path(struct inode *inode, 332 ext4_lblk_t i_block, 333 ext4_lblk_t offsets[4], int *boundary) 334 { 335 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); 336 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); 337 const long direct_blocks = EXT4_NDIR_BLOCKS, 338 indirect_blocks = ptrs, 339 double_blocks = (1 << (ptrs_bits * 2)); 340 int n = 0; 341 int final = 0; 342 343 if (i_block < 0) { 344 ext4_warning(inode->i_sb, "ext4_block_to_path", "block < 0"); 345 } else if (i_block < direct_blocks) { 346 offsets[n++] = i_block; 347 final = direct_blocks; 348 } else if ((i_block -= direct_blocks) < indirect_blocks) { 349 offsets[n++] = EXT4_IND_BLOCK; 350 offsets[n++] = i_block; 351 final = ptrs; 352 } else if ((i_block -= indirect_blocks) < double_blocks) { 353 offsets[n++] = EXT4_DIND_BLOCK; 354 offsets[n++] = i_block >> ptrs_bits; 355 offsets[n++] = i_block & (ptrs - 1); 356 final = ptrs; 357 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 358 offsets[n++] = EXT4_TIND_BLOCK; 359 offsets[n++] = i_block >> (ptrs_bits * 2); 360 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 361 offsets[n++] = i_block & (ptrs - 1); 362 final = ptrs; 363 } else { 364 ext4_warning(inode->i_sb, "ext4_block_to_path", 365 "block %lu > max in inode %lu", 366 i_block + direct_blocks + 367 indirect_blocks + double_blocks, inode->i_ino); 368 } 369 if (boundary) 370 *boundary = final - 1 - (i_block & (ptrs - 1)); 371 return n; 372 } 373 374 static int __ext4_check_blockref(const char *function, struct inode *inode, 375 __le32 *p, unsigned int max) 376 { 377 __le32 *bref = p; 378 unsigned int blk; 379 380 while (bref < p+max) { 381 blk = le32_to_cpu(*bref++); 382 if (blk && 383 unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), 384 blk, 1))) { 385 ext4_error(inode->i_sb, function, 386 "invalid block reference %u " 387 "in inode #%lu", blk, inode->i_ino); 388 return -EIO; 389 } 390 } 391 return 0; 392 } 393 394 395 #define ext4_check_indirect_blockref(inode, bh) \ 396 __ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data, \ 397 EXT4_ADDR_PER_BLOCK((inode)->i_sb)) 398 399 #define ext4_check_inode_blockref(inode) \ 400 __ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data, \ 401 EXT4_NDIR_BLOCKS) 402 403 /** 404 * ext4_get_branch - read the chain of indirect blocks leading to data 405 * @inode: inode in question 406 * @depth: depth of the chain (1 - direct pointer, etc.) 407 * @offsets: offsets of pointers in inode/indirect blocks 408 * @chain: place to store the result 409 * @err: here we store the error value 410 * 411 * Function fills the array of triples <key, p, bh> and returns %NULL 412 * if everything went OK or the pointer to the last filled triple 413 * (incomplete one) otherwise. Upon the return chain[i].key contains 414 * the number of (i+1)-th block in the chain (as it is stored in memory, 415 * i.e. little-endian 32-bit), chain[i].p contains the address of that 416 * number (it points into struct inode for i==0 and into the bh->b_data 417 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect 418 * block for i>0 and NULL for i==0. In other words, it holds the block 419 * numbers of the chain, addresses they were taken from (and where we can 420 * verify that chain did not change) and buffer_heads hosting these 421 * numbers. 422 * 423 * Function stops when it stumbles upon zero pointer (absent block) 424 * (pointer to last triple returned, *@err == 0) 425 * or when it gets an IO error reading an indirect block 426 * (ditto, *@err == -EIO) 427 * or when it reads all @depth-1 indirect blocks successfully and finds 428 * the whole chain, all way to the data (returns %NULL, *err == 0). 429 * 430 * Need to be called with 431 * down_read(&EXT4_I(inode)->i_data_sem) 432 */ 433 static Indirect *ext4_get_branch(struct inode *inode, int depth, 434 ext4_lblk_t *offsets, 435 Indirect chain[4], int *err) 436 { 437 struct super_block *sb = inode->i_sb; 438 Indirect *p = chain; 439 struct buffer_head *bh; 440 441 *err = 0; 442 /* i_data is not going away, no lock needed */ 443 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets); 444 if (!p->key) 445 goto no_block; 446 while (--depth) { 447 bh = sb_getblk(sb, le32_to_cpu(p->key)); 448 if (unlikely(!bh)) 449 goto failure; 450 451 if (!bh_uptodate_or_lock(bh)) { 452 if (bh_submit_read(bh) < 0) { 453 put_bh(bh); 454 goto failure; 455 } 456 /* validate block references */ 457 if (ext4_check_indirect_blockref(inode, bh)) { 458 put_bh(bh); 459 goto failure; 460 } 461 } 462 463 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets); 464 /* Reader: end */ 465 if (!p->key) 466 goto no_block; 467 } 468 return NULL; 469 470 failure: 471 *err = -EIO; 472 no_block: 473 return p; 474 } 475 476 /** 477 * ext4_find_near - find a place for allocation with sufficient locality 478 * @inode: owner 479 * @ind: descriptor of indirect block. 480 * 481 * This function returns the preferred place for block allocation. 482 * It is used when heuristic for sequential allocation fails. 483 * Rules are: 484 * + if there is a block to the left of our position - allocate near it. 485 * + if pointer will live in indirect block - allocate near that block. 486 * + if pointer will live in inode - allocate in the same 487 * cylinder group. 488 * 489 * In the latter case we colour the starting block by the callers PID to 490 * prevent it from clashing with concurrent allocations for a different inode 491 * in the same block group. The PID is used here so that functionally related 492 * files will be close-by on-disk. 493 * 494 * Caller must make sure that @ind is valid and will stay that way. 495 */ 496 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) 497 { 498 struct ext4_inode_info *ei = EXT4_I(inode); 499 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; 500 __le32 *p; 501 ext4_fsblk_t bg_start; 502 ext4_fsblk_t last_block; 503 ext4_grpblk_t colour; 504 ext4_group_t block_group; 505 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); 506 507 /* Try to find previous block */ 508 for (p = ind->p - 1; p >= start; p--) { 509 if (*p) 510 return le32_to_cpu(*p); 511 } 512 513 /* No such thing, so let's try location of indirect block */ 514 if (ind->bh) 515 return ind->bh->b_blocknr; 516 517 /* 518 * It is going to be referred to from the inode itself? OK, just put it 519 * into the same cylinder group then. 520 */ 521 block_group = ei->i_block_group; 522 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { 523 block_group &= ~(flex_size-1); 524 if (S_ISREG(inode->i_mode)) 525 block_group++; 526 } 527 bg_start = ext4_group_first_block_no(inode->i_sb, block_group); 528 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 529 530 /* 531 * If we are doing delayed allocation, we don't need take 532 * colour into account. 533 */ 534 if (test_opt(inode->i_sb, DELALLOC)) 535 return bg_start; 536 537 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) 538 colour = (current->pid % 16) * 539 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 540 else 541 colour = (current->pid % 16) * ((last_block - bg_start) / 16); 542 return bg_start + colour; 543 } 544 545 /** 546 * ext4_find_goal - find a preferred place for allocation. 547 * @inode: owner 548 * @block: block we want 549 * @partial: pointer to the last triple within a chain 550 * 551 * Normally this function find the preferred place for block allocation, 552 * returns it. 553 */ 554 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, 555 Indirect *partial) 556 { 557 /* 558 * XXX need to get goal block from mballoc's data structures 559 */ 560 561 return ext4_find_near(inode, partial); 562 } 563 564 /** 565 * ext4_blks_to_allocate: Look up the block map and count the number 566 * of direct blocks need to be allocated for the given branch. 567 * 568 * @branch: chain of indirect blocks 569 * @k: number of blocks need for indirect blocks 570 * @blks: number of data blocks to be mapped. 571 * @blocks_to_boundary: the offset in the indirect block 572 * 573 * return the total number of blocks to be allocate, including the 574 * direct and indirect blocks. 575 */ 576 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, 577 int blocks_to_boundary) 578 { 579 unsigned int count = 0; 580 581 /* 582 * Simple case, [t,d]Indirect block(s) has not allocated yet 583 * then it's clear blocks on that path have not allocated 584 */ 585 if (k > 0) { 586 /* right now we don't handle cross boundary allocation */ 587 if (blks < blocks_to_boundary + 1) 588 count += blks; 589 else 590 count += blocks_to_boundary + 1; 591 return count; 592 } 593 594 count++; 595 while (count < blks && count <= blocks_to_boundary && 596 le32_to_cpu(*(branch[0].p + count)) == 0) { 597 count++; 598 } 599 return count; 600 } 601 602 /** 603 * ext4_alloc_blocks: multiple allocate blocks needed for a branch 604 * @indirect_blks: the number of blocks need to allocate for indirect 605 * blocks 606 * 607 * @new_blocks: on return it will store the new block numbers for 608 * the indirect blocks(if needed) and the first direct block, 609 * @blks: on return it will store the total number of allocated 610 * direct blocks 611 */ 612 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, 613 ext4_lblk_t iblock, ext4_fsblk_t goal, 614 int indirect_blks, int blks, 615 ext4_fsblk_t new_blocks[4], int *err) 616 { 617 struct ext4_allocation_request ar; 618 int target, i; 619 unsigned long count = 0, blk_allocated = 0; 620 int index = 0; 621 ext4_fsblk_t current_block = 0; 622 int ret = 0; 623 624 /* 625 * Here we try to allocate the requested multiple blocks at once, 626 * on a best-effort basis. 627 * To build a branch, we should allocate blocks for 628 * the indirect blocks(if not allocated yet), and at least 629 * the first direct block of this branch. That's the 630 * minimum number of blocks need to allocate(required) 631 */ 632 /* first we try to allocate the indirect blocks */ 633 target = indirect_blks; 634 while (target > 0) { 635 count = target; 636 /* allocating blocks for indirect blocks and direct blocks */ 637 current_block = ext4_new_meta_blocks(handle, inode, 638 goal, &count, err); 639 if (*err) 640 goto failed_out; 641 642 target -= count; 643 /* allocate blocks for indirect blocks */ 644 while (index < indirect_blks && count) { 645 new_blocks[index++] = current_block++; 646 count--; 647 } 648 if (count > 0) { 649 /* 650 * save the new block number 651 * for the first direct block 652 */ 653 new_blocks[index] = current_block; 654 printk(KERN_INFO "%s returned more blocks than " 655 "requested\n", __func__); 656 WARN_ON(1); 657 break; 658 } 659 } 660 661 target = blks - count ; 662 blk_allocated = count; 663 if (!target) 664 goto allocated; 665 /* Now allocate data blocks */ 666 memset(&ar, 0, sizeof(ar)); 667 ar.inode = inode; 668 ar.goal = goal; 669 ar.len = target; 670 ar.logical = iblock; 671 if (S_ISREG(inode->i_mode)) 672 /* enable in-core preallocation only for regular files */ 673 ar.flags = EXT4_MB_HINT_DATA; 674 675 current_block = ext4_mb_new_blocks(handle, &ar, err); 676 677 if (*err && (target == blks)) { 678 /* 679 * if the allocation failed and we didn't allocate 680 * any blocks before 681 */ 682 goto failed_out; 683 } 684 if (!*err) { 685 if (target == blks) { 686 /* 687 * save the new block number 688 * for the first direct block 689 */ 690 new_blocks[index] = current_block; 691 } 692 blk_allocated += ar.len; 693 } 694 allocated: 695 /* total number of blocks allocated for direct blocks */ 696 ret = blk_allocated; 697 *err = 0; 698 return ret; 699 failed_out: 700 for (i = 0; i < index; i++) 701 ext4_free_blocks(handle, inode, new_blocks[i], 1, 0); 702 return ret; 703 } 704 705 /** 706 * ext4_alloc_branch - allocate and set up a chain of blocks. 707 * @inode: owner 708 * @indirect_blks: number of allocated indirect blocks 709 * @blks: number of allocated direct blocks 710 * @offsets: offsets (in the blocks) to store the pointers to next. 711 * @branch: place to store the chain in. 712 * 713 * This function allocates blocks, zeroes out all but the last one, 714 * links them into chain and (if we are synchronous) writes them to disk. 715 * In other words, it prepares a branch that can be spliced onto the 716 * inode. It stores the information about that chain in the branch[], in 717 * the same format as ext4_get_branch() would do. We are calling it after 718 * we had read the existing part of chain and partial points to the last 719 * triple of that (one with zero ->key). Upon the exit we have the same 720 * picture as after the successful ext4_get_block(), except that in one 721 * place chain is disconnected - *branch->p is still zero (we did not 722 * set the last link), but branch->key contains the number that should 723 * be placed into *branch->p to fill that gap. 724 * 725 * If allocation fails we free all blocks we've allocated (and forget 726 * their buffer_heads) and return the error value the from failed 727 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain 728 * as described above and return 0. 729 */ 730 static int ext4_alloc_branch(handle_t *handle, struct inode *inode, 731 ext4_lblk_t iblock, int indirect_blks, 732 int *blks, ext4_fsblk_t goal, 733 ext4_lblk_t *offsets, Indirect *branch) 734 { 735 int blocksize = inode->i_sb->s_blocksize; 736 int i, n = 0; 737 int err = 0; 738 struct buffer_head *bh; 739 int num; 740 ext4_fsblk_t new_blocks[4]; 741 ext4_fsblk_t current_block; 742 743 num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks, 744 *blks, new_blocks, &err); 745 if (err) 746 return err; 747 748 branch[0].key = cpu_to_le32(new_blocks[0]); 749 /* 750 * metadata blocks and data blocks are allocated. 751 */ 752 for (n = 1; n <= indirect_blks; n++) { 753 /* 754 * Get buffer_head for parent block, zero it out 755 * and set the pointer to new one, then send 756 * parent to disk. 757 */ 758 bh = sb_getblk(inode->i_sb, new_blocks[n-1]); 759 branch[n].bh = bh; 760 lock_buffer(bh); 761 BUFFER_TRACE(bh, "call get_create_access"); 762 err = ext4_journal_get_create_access(handle, bh); 763 if (err) { 764 unlock_buffer(bh); 765 brelse(bh); 766 goto failed; 767 } 768 769 memset(bh->b_data, 0, blocksize); 770 branch[n].p = (__le32 *) bh->b_data + offsets[n]; 771 branch[n].key = cpu_to_le32(new_blocks[n]); 772 *branch[n].p = branch[n].key; 773 if (n == indirect_blks) { 774 current_block = new_blocks[n]; 775 /* 776 * End of chain, update the last new metablock of 777 * the chain to point to the new allocated 778 * data blocks numbers 779 */ 780 for (i=1; i < num; i++) 781 *(branch[n].p + i) = cpu_to_le32(++current_block); 782 } 783 BUFFER_TRACE(bh, "marking uptodate"); 784 set_buffer_uptodate(bh); 785 unlock_buffer(bh); 786 787 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 788 err = ext4_handle_dirty_metadata(handle, inode, bh); 789 if (err) 790 goto failed; 791 } 792 *blks = num; 793 return err; 794 failed: 795 /* Allocation failed, free what we already allocated */ 796 for (i = 1; i <= n ; i++) { 797 BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget"); 798 ext4_journal_forget(handle, branch[i].bh); 799 } 800 for (i = 0; i < indirect_blks; i++) 801 ext4_free_blocks(handle, inode, new_blocks[i], 1, 0); 802 803 ext4_free_blocks(handle, inode, new_blocks[i], num, 0); 804 805 return err; 806 } 807 808 /** 809 * ext4_splice_branch - splice the allocated branch onto inode. 810 * @inode: owner 811 * @block: (logical) number of block we are adding 812 * @chain: chain of indirect blocks (with a missing link - see 813 * ext4_alloc_branch) 814 * @where: location of missing link 815 * @num: number of indirect blocks we are adding 816 * @blks: number of direct blocks we are adding 817 * 818 * This function fills the missing link and does all housekeeping needed in 819 * inode (->i_blocks, etc.). In case of success we end up with the full 820 * chain to new block and return 0. 821 */ 822 static int ext4_splice_branch(handle_t *handle, struct inode *inode, 823 ext4_lblk_t block, Indirect *where, int num, int blks) 824 { 825 int i; 826 int err = 0; 827 ext4_fsblk_t current_block; 828 829 /* 830 * If we're splicing into a [td]indirect block (as opposed to the 831 * inode) then we need to get write access to the [td]indirect block 832 * before the splice. 833 */ 834 if (where->bh) { 835 BUFFER_TRACE(where->bh, "get_write_access"); 836 err = ext4_journal_get_write_access(handle, where->bh); 837 if (err) 838 goto err_out; 839 } 840 /* That's it */ 841 842 *where->p = where->key; 843 844 /* 845 * Update the host buffer_head or inode to point to more just allocated 846 * direct blocks blocks 847 */ 848 if (num == 0 && blks > 1) { 849 current_block = le32_to_cpu(where->key) + 1; 850 for (i = 1; i < blks; i++) 851 *(where->p + i) = cpu_to_le32(current_block++); 852 } 853 854 /* We are done with atomic stuff, now do the rest of housekeeping */ 855 856 inode->i_ctime = ext4_current_time(inode); 857 ext4_mark_inode_dirty(handle, inode); 858 859 /* had we spliced it onto indirect block? */ 860 if (where->bh) { 861 /* 862 * If we spliced it onto an indirect block, we haven't 863 * altered the inode. Note however that if it is being spliced 864 * onto an indirect block at the very end of the file (the 865 * file is growing) then we *will* alter the inode to reflect 866 * the new i_size. But that is not done here - it is done in 867 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. 868 */ 869 jbd_debug(5, "splicing indirect only\n"); 870 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); 871 err = ext4_handle_dirty_metadata(handle, inode, where->bh); 872 if (err) 873 goto err_out; 874 } else { 875 /* 876 * OK, we spliced it into the inode itself on a direct block. 877 * Inode was dirtied above. 878 */ 879 jbd_debug(5, "splicing direct\n"); 880 } 881 return err; 882 883 err_out: 884 for (i = 1; i <= num; i++) { 885 BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget"); 886 ext4_journal_forget(handle, where[i].bh); 887 ext4_free_blocks(handle, inode, 888 le32_to_cpu(where[i-1].key), 1, 0); 889 } 890 ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0); 891 892 return err; 893 } 894 895 /* 896 * The ext4_ind_get_blocks() function handles non-extents inodes 897 * (i.e., using the traditional indirect/double-indirect i_blocks 898 * scheme) for ext4_get_blocks(). 899 * 900 * Allocation strategy is simple: if we have to allocate something, we will 901 * have to go the whole way to leaf. So let's do it before attaching anything 902 * to tree, set linkage between the newborn blocks, write them if sync is 903 * required, recheck the path, free and repeat if check fails, otherwise 904 * set the last missing link (that will protect us from any truncate-generated 905 * removals - all blocks on the path are immune now) and possibly force the 906 * write on the parent block. 907 * That has a nice additional property: no special recovery from the failed 908 * allocations is needed - we simply release blocks and do not touch anything 909 * reachable from inode. 910 * 911 * `handle' can be NULL if create == 0. 912 * 913 * return > 0, # of blocks mapped or allocated. 914 * return = 0, if plain lookup failed. 915 * return < 0, error case. 916 * 917 * The ext4_ind_get_blocks() function should be called with 918 * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem 919 * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or 920 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system 921 * blocks. 922 */ 923 static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, 924 ext4_lblk_t iblock, unsigned int maxblocks, 925 struct buffer_head *bh_result, 926 int flags) 927 { 928 int err = -EIO; 929 ext4_lblk_t offsets[4]; 930 Indirect chain[4]; 931 Indirect *partial; 932 ext4_fsblk_t goal; 933 int indirect_blks; 934 int blocks_to_boundary = 0; 935 int depth; 936 int count = 0; 937 ext4_fsblk_t first_block = 0; 938 939 J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)); 940 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); 941 depth = ext4_block_to_path(inode, iblock, offsets, 942 &blocks_to_boundary); 943 944 if (depth == 0) 945 goto out; 946 947 partial = ext4_get_branch(inode, depth, offsets, chain, &err); 948 949 /* Simplest case - block found, no allocation needed */ 950 if (!partial) { 951 first_block = le32_to_cpu(chain[depth - 1].key); 952 clear_buffer_new(bh_result); 953 count++; 954 /*map more blocks*/ 955 while (count < maxblocks && count <= blocks_to_boundary) { 956 ext4_fsblk_t blk; 957 958 blk = le32_to_cpu(*(chain[depth-1].p + count)); 959 960 if (blk == first_block + count) 961 count++; 962 else 963 break; 964 } 965 goto got_it; 966 } 967 968 /* Next simple case - plain lookup or failed read of indirect block */ 969 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO) 970 goto cleanup; 971 972 /* 973 * Okay, we need to do block allocation. 974 */ 975 goal = ext4_find_goal(inode, iblock, partial); 976 977 /* the number of blocks need to allocate for [d,t]indirect blocks */ 978 indirect_blks = (chain + depth) - partial - 1; 979 980 /* 981 * Next look up the indirect map to count the totoal number of 982 * direct blocks to allocate for this branch. 983 */ 984 count = ext4_blks_to_allocate(partial, indirect_blks, 985 maxblocks, blocks_to_boundary); 986 /* 987 * Block out ext4_truncate while we alter the tree 988 */ 989 err = ext4_alloc_branch(handle, inode, iblock, indirect_blks, 990 &count, goal, 991 offsets + (partial - chain), partial); 992 993 /* 994 * The ext4_splice_branch call will free and forget any buffers 995 * on the new chain if there is a failure, but that risks using 996 * up transaction credits, especially for bitmaps where the 997 * credits cannot be returned. Can we handle this somehow? We 998 * may need to return -EAGAIN upwards in the worst case. --sct 999 */ 1000 if (!err) 1001 err = ext4_splice_branch(handle, inode, iblock, 1002 partial, indirect_blks, count); 1003 else 1004 goto cleanup; 1005 1006 set_buffer_new(bh_result); 1007 got_it: 1008 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); 1009 if (count > blocks_to_boundary) 1010 set_buffer_boundary(bh_result); 1011 err = count; 1012 /* Clean up and exit */ 1013 partial = chain + depth - 1; /* the whole chain */ 1014 cleanup: 1015 while (partial > chain) { 1016 BUFFER_TRACE(partial->bh, "call brelse"); 1017 brelse(partial->bh); 1018 partial--; 1019 } 1020 BUFFER_TRACE(bh_result, "returned"); 1021 out: 1022 return err; 1023 } 1024 1025 qsize_t ext4_get_reserved_space(struct inode *inode) 1026 { 1027 unsigned long long total; 1028 1029 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1030 total = EXT4_I(inode)->i_reserved_data_blocks + 1031 EXT4_I(inode)->i_reserved_meta_blocks; 1032 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1033 1034 return total; 1035 } 1036 /* 1037 * Calculate the number of metadata blocks need to reserve 1038 * to allocate @blocks for non extent file based file 1039 */ 1040 static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks) 1041 { 1042 int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb); 1043 int ind_blks, dind_blks, tind_blks; 1044 1045 /* number of new indirect blocks needed */ 1046 ind_blks = (blocks + icap - 1) / icap; 1047 1048 dind_blks = (ind_blks + icap - 1) / icap; 1049 1050 tind_blks = 1; 1051 1052 return ind_blks + dind_blks + tind_blks; 1053 } 1054 1055 /* 1056 * Calculate the number of metadata blocks need to reserve 1057 * to allocate given number of blocks 1058 */ 1059 static int ext4_calc_metadata_amount(struct inode *inode, int blocks) 1060 { 1061 if (!blocks) 1062 return 0; 1063 1064 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) 1065 return ext4_ext_calc_metadata_amount(inode, blocks); 1066 1067 return ext4_indirect_calc_metadata_amount(inode, blocks); 1068 } 1069 1070 static void ext4_da_update_reserve_space(struct inode *inode, int used) 1071 { 1072 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1073 int total, mdb, mdb_free; 1074 1075 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1076 /* recalculate the number of metablocks still need to be reserved */ 1077 total = EXT4_I(inode)->i_reserved_data_blocks - used; 1078 mdb = ext4_calc_metadata_amount(inode, total); 1079 1080 /* figure out how many metablocks to release */ 1081 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 1082 mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb; 1083 1084 if (mdb_free) { 1085 /* Account for allocated meta_blocks */ 1086 mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks; 1087 1088 /* update fs dirty blocks counter */ 1089 percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free); 1090 EXT4_I(inode)->i_allocated_meta_blocks = 0; 1091 EXT4_I(inode)->i_reserved_meta_blocks = mdb; 1092 } 1093 1094 /* update per-inode reservations */ 1095 BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks); 1096 EXT4_I(inode)->i_reserved_data_blocks -= used; 1097 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1098 1099 /* 1100 * free those over-booking quota for metadata blocks 1101 */ 1102 if (mdb_free) 1103 vfs_dq_release_reservation_block(inode, mdb_free); 1104 1105 /* 1106 * If we have done all the pending block allocations and if 1107 * there aren't any writers on the inode, we can discard the 1108 * inode's preallocations. 1109 */ 1110 if (!total && (atomic_read(&inode->i_writecount) == 0)) 1111 ext4_discard_preallocations(inode); 1112 } 1113 1114 static int check_block_validity(struct inode *inode, sector_t logical, 1115 sector_t phys, int len) 1116 { 1117 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) { 1118 ext4_error(inode->i_sb, "check_block_validity", 1119 "inode #%lu logical block %llu mapped to %llu " 1120 "(size %d)", inode->i_ino, 1121 (unsigned long long) logical, 1122 (unsigned long long) phys, len); 1123 WARN_ON(1); 1124 return -EIO; 1125 } 1126 return 0; 1127 } 1128 1129 /* 1130 * The ext4_get_blocks() function tries to look up the requested blocks, 1131 * and returns if the blocks are already mapped. 1132 * 1133 * Otherwise it takes the write lock of the i_data_sem and allocate blocks 1134 * and store the allocated blocks in the result buffer head and mark it 1135 * mapped. 1136 * 1137 * If file type is extents based, it will call ext4_ext_get_blocks(), 1138 * Otherwise, call with ext4_ind_get_blocks() to handle indirect mapping 1139 * based files 1140 * 1141 * On success, it returns the number of blocks being mapped or allocate. 1142 * if create==0 and the blocks are pre-allocated and uninitialized block, 1143 * the result buffer head is unmapped. If the create ==1, it will make sure 1144 * the buffer head is mapped. 1145 * 1146 * It returns 0 if plain look up failed (blocks have not been allocated), in 1147 * that casem, buffer head is unmapped 1148 * 1149 * It returns the error in case of allocation failure. 1150 */ 1151 int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, 1152 unsigned int max_blocks, struct buffer_head *bh, 1153 int flags) 1154 { 1155 int retval; 1156 1157 clear_buffer_mapped(bh); 1158 clear_buffer_unwritten(bh); 1159 1160 /* 1161 * Try to see if we can get the block without requesting a new 1162 * file system block. 1163 */ 1164 down_read((&EXT4_I(inode)->i_data_sem)); 1165 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 1166 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, 1167 bh, 0); 1168 } else { 1169 retval = ext4_ind_get_blocks(handle, inode, block, max_blocks, 1170 bh, 0); 1171 } 1172 up_read((&EXT4_I(inode)->i_data_sem)); 1173 1174 if (retval > 0 && buffer_mapped(bh)) { 1175 int ret = check_block_validity(inode, block, 1176 bh->b_blocknr, retval); 1177 if (ret != 0) 1178 return ret; 1179 } 1180 1181 /* If it is only a block(s) look up */ 1182 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 1183 return retval; 1184 1185 /* 1186 * Returns if the blocks have already allocated 1187 * 1188 * Note that if blocks have been preallocated 1189 * ext4_ext_get_block() returns th create = 0 1190 * with buffer head unmapped. 1191 */ 1192 if (retval > 0 && buffer_mapped(bh)) 1193 return retval; 1194 1195 /* 1196 * When we call get_blocks without the create flag, the 1197 * BH_Unwritten flag could have gotten set if the blocks 1198 * requested were part of a uninitialized extent. We need to 1199 * clear this flag now that we are committed to convert all or 1200 * part of the uninitialized extent to be an initialized 1201 * extent. This is because we need to avoid the combination 1202 * of BH_Unwritten and BH_Mapped flags being simultaneously 1203 * set on the buffer_head. 1204 */ 1205 clear_buffer_unwritten(bh); 1206 1207 /* 1208 * New blocks allocate and/or writing to uninitialized extent 1209 * will possibly result in updating i_data, so we take 1210 * the write lock of i_data_sem, and call get_blocks() 1211 * with create == 1 flag. 1212 */ 1213 down_write((&EXT4_I(inode)->i_data_sem)); 1214 1215 /* 1216 * if the caller is from delayed allocation writeout path 1217 * we have already reserved fs blocks for allocation 1218 * let the underlying get_block() function know to 1219 * avoid double accounting 1220 */ 1221 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1222 EXT4_I(inode)->i_delalloc_reserved_flag = 1; 1223 /* 1224 * We need to check for EXT4 here because migrate 1225 * could have changed the inode type in between 1226 */ 1227 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 1228 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, 1229 bh, flags); 1230 } else { 1231 retval = ext4_ind_get_blocks(handle, inode, block, 1232 max_blocks, bh, flags); 1233 1234 if (retval > 0 && buffer_new(bh)) { 1235 /* 1236 * We allocated new blocks which will result in 1237 * i_data's format changing. Force the migrate 1238 * to fail by clearing migrate flags 1239 */ 1240 EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags & 1241 ~EXT4_EXT_MIGRATE; 1242 } 1243 } 1244 1245 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1246 EXT4_I(inode)->i_delalloc_reserved_flag = 0; 1247 1248 /* 1249 * Update reserved blocks/metadata blocks after successful 1250 * block allocation which had been deferred till now. 1251 */ 1252 if ((retval > 0) && (flags & EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE)) 1253 ext4_da_update_reserve_space(inode, retval); 1254 1255 up_write((&EXT4_I(inode)->i_data_sem)); 1256 if (retval > 0 && buffer_mapped(bh)) { 1257 int ret = check_block_validity(inode, block, 1258 bh->b_blocknr, retval); 1259 if (ret != 0) 1260 return ret; 1261 } 1262 return retval; 1263 } 1264 1265 /* Maximum number of blocks we map for direct IO at once. */ 1266 #define DIO_MAX_BLOCKS 4096 1267 1268 int ext4_get_block(struct inode *inode, sector_t iblock, 1269 struct buffer_head *bh_result, int create) 1270 { 1271 handle_t *handle = ext4_journal_current_handle(); 1272 int ret = 0, started = 0; 1273 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 1274 int dio_credits; 1275 1276 if (create && !handle) { 1277 /* Direct IO write... */ 1278 if (max_blocks > DIO_MAX_BLOCKS) 1279 max_blocks = DIO_MAX_BLOCKS; 1280 dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); 1281 handle = ext4_journal_start(inode, dio_credits); 1282 if (IS_ERR(handle)) { 1283 ret = PTR_ERR(handle); 1284 goto out; 1285 } 1286 started = 1; 1287 } 1288 1289 ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, 1290 create ? EXT4_GET_BLOCKS_CREATE : 0); 1291 if (ret > 0) { 1292 bh_result->b_size = (ret << inode->i_blkbits); 1293 ret = 0; 1294 } 1295 if (started) 1296 ext4_journal_stop(handle); 1297 out: 1298 return ret; 1299 } 1300 1301 /* 1302 * `handle' can be NULL if create is zero 1303 */ 1304 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 1305 ext4_lblk_t block, int create, int *errp) 1306 { 1307 struct buffer_head dummy; 1308 int fatal = 0, err; 1309 int flags = 0; 1310 1311 J_ASSERT(handle != NULL || create == 0); 1312 1313 dummy.b_state = 0; 1314 dummy.b_blocknr = -1000; 1315 buffer_trace_init(&dummy.b_history); 1316 if (create) 1317 flags |= EXT4_GET_BLOCKS_CREATE; 1318 err = ext4_get_blocks(handle, inode, block, 1, &dummy, flags); 1319 /* 1320 * ext4_get_blocks() returns number of blocks mapped. 0 in 1321 * case of a HOLE. 1322 */ 1323 if (err > 0) { 1324 if (err > 1) 1325 WARN_ON(1); 1326 err = 0; 1327 } 1328 *errp = err; 1329 if (!err && buffer_mapped(&dummy)) { 1330 struct buffer_head *bh; 1331 bh = sb_getblk(inode->i_sb, dummy.b_blocknr); 1332 if (!bh) { 1333 *errp = -EIO; 1334 goto err; 1335 } 1336 if (buffer_new(&dummy)) { 1337 J_ASSERT(create != 0); 1338 J_ASSERT(handle != NULL); 1339 1340 /* 1341 * Now that we do not always journal data, we should 1342 * keep in mind whether this should always journal the 1343 * new buffer as metadata. For now, regular file 1344 * writes use ext4_get_block instead, so it's not a 1345 * problem. 1346 */ 1347 lock_buffer(bh); 1348 BUFFER_TRACE(bh, "call get_create_access"); 1349 fatal = ext4_journal_get_create_access(handle, bh); 1350 if (!fatal && !buffer_uptodate(bh)) { 1351 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 1352 set_buffer_uptodate(bh); 1353 } 1354 unlock_buffer(bh); 1355 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 1356 err = ext4_handle_dirty_metadata(handle, inode, bh); 1357 if (!fatal) 1358 fatal = err; 1359 } else { 1360 BUFFER_TRACE(bh, "not a new buffer"); 1361 } 1362 if (fatal) { 1363 *errp = fatal; 1364 brelse(bh); 1365 bh = NULL; 1366 } 1367 return bh; 1368 } 1369 err: 1370 return NULL; 1371 } 1372 1373 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 1374 ext4_lblk_t block, int create, int *err) 1375 { 1376 struct buffer_head *bh; 1377 1378 bh = ext4_getblk(handle, inode, block, create, err); 1379 if (!bh) 1380 return bh; 1381 if (buffer_uptodate(bh)) 1382 return bh; 1383 ll_rw_block(READ_META, 1, &bh); 1384 wait_on_buffer(bh); 1385 if (buffer_uptodate(bh)) 1386 return bh; 1387 put_bh(bh); 1388 *err = -EIO; 1389 return NULL; 1390 } 1391 1392 static int walk_page_buffers(handle_t *handle, 1393 struct buffer_head *head, 1394 unsigned from, 1395 unsigned to, 1396 int *partial, 1397 int (*fn)(handle_t *handle, 1398 struct buffer_head *bh)) 1399 { 1400 struct buffer_head *bh; 1401 unsigned block_start, block_end; 1402 unsigned blocksize = head->b_size; 1403 int err, ret = 0; 1404 struct buffer_head *next; 1405 1406 for (bh = head, block_start = 0; 1407 ret == 0 && (bh != head || !block_start); 1408 block_start = block_end, bh = next) 1409 { 1410 next = bh->b_this_page; 1411 block_end = block_start + blocksize; 1412 if (block_end <= from || block_start >= to) { 1413 if (partial && !buffer_uptodate(bh)) 1414 *partial = 1; 1415 continue; 1416 } 1417 err = (*fn)(handle, bh); 1418 if (!ret) 1419 ret = err; 1420 } 1421 return ret; 1422 } 1423 1424 /* 1425 * To preserve ordering, it is essential that the hole instantiation and 1426 * the data write be encapsulated in a single transaction. We cannot 1427 * close off a transaction and start a new one between the ext4_get_block() 1428 * and the commit_write(). So doing the jbd2_journal_start at the start of 1429 * prepare_write() is the right place. 1430 * 1431 * Also, this function can nest inside ext4_writepage() -> 1432 * block_write_full_page(). In that case, we *know* that ext4_writepage() 1433 * has generated enough buffer credits to do the whole page. So we won't 1434 * block on the journal in that case, which is good, because the caller may 1435 * be PF_MEMALLOC. 1436 * 1437 * By accident, ext4 can be reentered when a transaction is open via 1438 * quota file writes. If we were to commit the transaction while thus 1439 * reentered, there can be a deadlock - we would be holding a quota 1440 * lock, and the commit would never complete if another thread had a 1441 * transaction open and was blocking on the quota lock - a ranking 1442 * violation. 1443 * 1444 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 1445 * will _not_ run commit under these circumstances because handle->h_ref 1446 * is elevated. We'll still have enough credits for the tiny quotafile 1447 * write. 1448 */ 1449 static int do_journal_get_write_access(handle_t *handle, 1450 struct buffer_head *bh) 1451 { 1452 if (!buffer_mapped(bh) || buffer_freed(bh)) 1453 return 0; 1454 return ext4_journal_get_write_access(handle, bh); 1455 } 1456 1457 static int ext4_write_begin(struct file *file, struct address_space *mapping, 1458 loff_t pos, unsigned len, unsigned flags, 1459 struct page **pagep, void **fsdata) 1460 { 1461 struct inode *inode = mapping->host; 1462 int ret, needed_blocks; 1463 handle_t *handle; 1464 int retries = 0; 1465 struct page *page; 1466 pgoff_t index; 1467 unsigned from, to; 1468 1469 trace_mark(ext4_write_begin, 1470 "dev %s ino %lu pos %llu len %u flags %u", 1471 inode->i_sb->s_id, inode->i_ino, 1472 (unsigned long long) pos, len, flags); 1473 /* 1474 * Reserve one block more for addition to orphan list in case 1475 * we allocate blocks but write fails for some reason 1476 */ 1477 needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 1478 index = pos >> PAGE_CACHE_SHIFT; 1479 from = pos & (PAGE_CACHE_SIZE - 1); 1480 to = from + len; 1481 1482 retry: 1483 handle = ext4_journal_start(inode, needed_blocks); 1484 if (IS_ERR(handle)) { 1485 ret = PTR_ERR(handle); 1486 goto out; 1487 } 1488 1489 /* We cannot recurse into the filesystem as the transaction is already 1490 * started */ 1491 flags |= AOP_FLAG_NOFS; 1492 1493 page = grab_cache_page_write_begin(mapping, index, flags); 1494 if (!page) { 1495 ext4_journal_stop(handle); 1496 ret = -ENOMEM; 1497 goto out; 1498 } 1499 *pagep = page; 1500 1501 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 1502 ext4_get_block); 1503 1504 if (!ret && ext4_should_journal_data(inode)) { 1505 ret = walk_page_buffers(handle, page_buffers(page), 1506 from, to, NULL, do_journal_get_write_access); 1507 } 1508 1509 if (ret) { 1510 unlock_page(page); 1511 page_cache_release(page); 1512 /* 1513 * block_write_begin may have instantiated a few blocks 1514 * outside i_size. Trim these off again. Don't need 1515 * i_size_read because we hold i_mutex. 1516 * 1517 * Add inode to orphan list in case we crash before 1518 * truncate finishes 1519 */ 1520 if (pos + len > inode->i_size) 1521 ext4_orphan_add(handle, inode); 1522 1523 ext4_journal_stop(handle); 1524 if (pos + len > inode->i_size) { 1525 vmtruncate(inode, inode->i_size); 1526 /* 1527 * If vmtruncate failed early the inode might 1528 * still be on the orphan list; we need to 1529 * make sure the inode is removed from the 1530 * orphan list in that case. 1531 */ 1532 if (inode->i_nlink) 1533 ext4_orphan_del(NULL, inode); 1534 } 1535 } 1536 1537 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 1538 goto retry; 1539 out: 1540 return ret; 1541 } 1542 1543 /* For write_end() in data=journal mode */ 1544 static int write_end_fn(handle_t *handle, struct buffer_head *bh) 1545 { 1546 if (!buffer_mapped(bh) || buffer_freed(bh)) 1547 return 0; 1548 set_buffer_uptodate(bh); 1549 return ext4_handle_dirty_metadata(handle, NULL, bh); 1550 } 1551 1552 static int ext4_generic_write_end(struct file *file, 1553 struct address_space *mapping, 1554 loff_t pos, unsigned len, unsigned copied, 1555 struct page *page, void *fsdata) 1556 { 1557 int i_size_changed = 0; 1558 struct inode *inode = mapping->host; 1559 handle_t *handle = ext4_journal_current_handle(); 1560 1561 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 1562 1563 /* 1564 * No need to use i_size_read() here, the i_size 1565 * cannot change under us because we hold i_mutex. 1566 * 1567 * But it's important to update i_size while still holding page lock: 1568 * page writeout could otherwise come in and zero beyond i_size. 1569 */ 1570 if (pos + copied > inode->i_size) { 1571 i_size_write(inode, pos + copied); 1572 i_size_changed = 1; 1573 } 1574 1575 if (pos + copied > EXT4_I(inode)->i_disksize) { 1576 /* We need to mark inode dirty even if 1577 * new_i_size is less that inode->i_size 1578 * bu greater than i_disksize.(hint delalloc) 1579 */ 1580 ext4_update_i_disksize(inode, (pos + copied)); 1581 i_size_changed = 1; 1582 } 1583 unlock_page(page); 1584 page_cache_release(page); 1585 1586 /* 1587 * Don't mark the inode dirty under page lock. First, it unnecessarily 1588 * makes the holding time of page lock longer. Second, it forces lock 1589 * ordering of page lock and transaction start for journaling 1590 * filesystems. 1591 */ 1592 if (i_size_changed) 1593 ext4_mark_inode_dirty(handle, inode); 1594 1595 return copied; 1596 } 1597 1598 /* 1599 * We need to pick up the new inode size which generic_commit_write gave us 1600 * `file' can be NULL - eg, when called from page_symlink(). 1601 * 1602 * ext4 never places buffers on inode->i_mapping->private_list. metadata 1603 * buffers are managed internally. 1604 */ 1605 static int ext4_ordered_write_end(struct file *file, 1606 struct address_space *mapping, 1607 loff_t pos, unsigned len, unsigned copied, 1608 struct page *page, void *fsdata) 1609 { 1610 handle_t *handle = ext4_journal_current_handle(); 1611 struct inode *inode = mapping->host; 1612 int ret = 0, ret2; 1613 1614 trace_mark(ext4_ordered_write_end, 1615 "dev %s ino %lu pos %llu len %u copied %u", 1616 inode->i_sb->s_id, inode->i_ino, 1617 (unsigned long long) pos, len, copied); 1618 ret = ext4_jbd2_file_inode(handle, inode); 1619 1620 if (ret == 0) { 1621 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1622 page, fsdata); 1623 copied = ret2; 1624 if (pos + len > inode->i_size) 1625 /* if we have allocated more blocks and copied 1626 * less. We will have blocks allocated outside 1627 * inode->i_size. So truncate them 1628 */ 1629 ext4_orphan_add(handle, inode); 1630 if (ret2 < 0) 1631 ret = ret2; 1632 } 1633 ret2 = ext4_journal_stop(handle); 1634 if (!ret) 1635 ret = ret2; 1636 1637 if (pos + len > inode->i_size) { 1638 vmtruncate(inode, inode->i_size); 1639 /* 1640 * If vmtruncate failed early the inode might still be 1641 * on the orphan list; we need to make sure the inode 1642 * is removed from the orphan list in that case. 1643 */ 1644 if (inode->i_nlink) 1645 ext4_orphan_del(NULL, inode); 1646 } 1647 1648 1649 return ret ? ret : copied; 1650 } 1651 1652 static int ext4_writeback_write_end(struct file *file, 1653 struct address_space *mapping, 1654 loff_t pos, unsigned len, unsigned copied, 1655 struct page *page, void *fsdata) 1656 { 1657 handle_t *handle = ext4_journal_current_handle(); 1658 struct inode *inode = mapping->host; 1659 int ret = 0, ret2; 1660 1661 trace_mark(ext4_writeback_write_end, 1662 "dev %s ino %lu pos %llu len %u copied %u", 1663 inode->i_sb->s_id, inode->i_ino, 1664 (unsigned long long) pos, len, copied); 1665 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1666 page, fsdata); 1667 copied = ret2; 1668 if (pos + len > inode->i_size) 1669 /* if we have allocated more blocks and copied 1670 * less. We will have blocks allocated outside 1671 * inode->i_size. So truncate them 1672 */ 1673 ext4_orphan_add(handle, inode); 1674 1675 if (ret2 < 0) 1676 ret = ret2; 1677 1678 ret2 = ext4_journal_stop(handle); 1679 if (!ret) 1680 ret = ret2; 1681 1682 if (pos + len > inode->i_size) { 1683 vmtruncate(inode, inode->i_size); 1684 /* 1685 * If vmtruncate failed early the inode might still be 1686 * on the orphan list; we need to make sure the inode 1687 * is removed from the orphan list in that case. 1688 */ 1689 if (inode->i_nlink) 1690 ext4_orphan_del(NULL, inode); 1691 } 1692 1693 return ret ? ret : copied; 1694 } 1695 1696 static int ext4_journalled_write_end(struct file *file, 1697 struct address_space *mapping, 1698 loff_t pos, unsigned len, unsigned copied, 1699 struct page *page, void *fsdata) 1700 { 1701 handle_t *handle = ext4_journal_current_handle(); 1702 struct inode *inode = mapping->host; 1703 int ret = 0, ret2; 1704 int partial = 0; 1705 unsigned from, to; 1706 loff_t new_i_size; 1707 1708 trace_mark(ext4_journalled_write_end, 1709 "dev %s ino %lu pos %llu len %u copied %u", 1710 inode->i_sb->s_id, inode->i_ino, 1711 (unsigned long long) pos, len, copied); 1712 from = pos & (PAGE_CACHE_SIZE - 1); 1713 to = from + len; 1714 1715 if (copied < len) { 1716 if (!PageUptodate(page)) 1717 copied = 0; 1718 page_zero_new_buffers(page, from+copied, to); 1719 } 1720 1721 ret = walk_page_buffers(handle, page_buffers(page), from, 1722 to, &partial, write_end_fn); 1723 if (!partial) 1724 SetPageUptodate(page); 1725 new_i_size = pos + copied; 1726 if (new_i_size > inode->i_size) 1727 i_size_write(inode, pos+copied); 1728 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; 1729 if (new_i_size > EXT4_I(inode)->i_disksize) { 1730 ext4_update_i_disksize(inode, new_i_size); 1731 ret2 = ext4_mark_inode_dirty(handle, inode); 1732 if (!ret) 1733 ret = ret2; 1734 } 1735 1736 unlock_page(page); 1737 page_cache_release(page); 1738 if (pos + len > inode->i_size) 1739 /* if we have allocated more blocks and copied 1740 * less. We will have blocks allocated outside 1741 * inode->i_size. So truncate them 1742 */ 1743 ext4_orphan_add(handle, inode); 1744 1745 ret2 = ext4_journal_stop(handle); 1746 if (!ret) 1747 ret = ret2; 1748 if (pos + len > inode->i_size) { 1749 vmtruncate(inode, inode->i_size); 1750 /* 1751 * If vmtruncate failed early the inode might still be 1752 * on the orphan list; we need to make sure the inode 1753 * is removed from the orphan list in that case. 1754 */ 1755 if (inode->i_nlink) 1756 ext4_orphan_del(NULL, inode); 1757 } 1758 1759 return ret ? ret : copied; 1760 } 1761 1762 static int ext4_da_reserve_space(struct inode *inode, int nrblocks) 1763 { 1764 int retries = 0; 1765 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1766 unsigned long md_needed, mdblocks, total = 0; 1767 1768 /* 1769 * recalculate the amount of metadata blocks to reserve 1770 * in order to allocate nrblocks 1771 * worse case is one extent per block 1772 */ 1773 repeat: 1774 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1775 total = EXT4_I(inode)->i_reserved_data_blocks + nrblocks; 1776 mdblocks = ext4_calc_metadata_amount(inode, total); 1777 BUG_ON(mdblocks < EXT4_I(inode)->i_reserved_meta_blocks); 1778 1779 md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks; 1780 total = md_needed + nrblocks; 1781 1782 /* 1783 * Make quota reservation here to prevent quota overflow 1784 * later. Real quota accounting is done at pages writeout 1785 * time. 1786 */ 1787 if (vfs_dq_reserve_block(inode, total)) { 1788 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1789 return -EDQUOT; 1790 } 1791 1792 if (ext4_claim_free_blocks(sbi, total)) { 1793 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1794 if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1795 yield(); 1796 goto repeat; 1797 } 1798 vfs_dq_release_reservation_block(inode, total); 1799 return -ENOSPC; 1800 } 1801 EXT4_I(inode)->i_reserved_data_blocks += nrblocks; 1802 EXT4_I(inode)->i_reserved_meta_blocks = mdblocks; 1803 1804 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1805 return 0; /* success */ 1806 } 1807 1808 static void ext4_da_release_space(struct inode *inode, int to_free) 1809 { 1810 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1811 int total, mdb, mdb_free, release; 1812 1813 if (!to_free) 1814 return; /* Nothing to release, exit */ 1815 1816 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1817 1818 if (!EXT4_I(inode)->i_reserved_data_blocks) { 1819 /* 1820 * if there is no reserved blocks, but we try to free some 1821 * then the counter is messed up somewhere. 1822 * but since this function is called from invalidate 1823 * page, it's harmless to return without any action 1824 */ 1825 printk(KERN_INFO "ext4 delalloc try to release %d reserved " 1826 "blocks for inode %lu, but there is no reserved " 1827 "data blocks\n", to_free, inode->i_ino); 1828 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1829 return; 1830 } 1831 1832 /* recalculate the number of metablocks still need to be reserved */ 1833 total = EXT4_I(inode)->i_reserved_data_blocks - to_free; 1834 mdb = ext4_calc_metadata_amount(inode, total); 1835 1836 /* figure out how many metablocks to release */ 1837 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 1838 mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb; 1839 1840 release = to_free + mdb_free; 1841 1842 /* update fs dirty blocks counter for truncate case */ 1843 percpu_counter_sub(&sbi->s_dirtyblocks_counter, release); 1844 1845 /* update per-inode reservations */ 1846 BUG_ON(to_free > EXT4_I(inode)->i_reserved_data_blocks); 1847 EXT4_I(inode)->i_reserved_data_blocks -= to_free; 1848 1849 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 1850 EXT4_I(inode)->i_reserved_meta_blocks = mdb; 1851 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1852 1853 vfs_dq_release_reservation_block(inode, release); 1854 } 1855 1856 static void ext4_da_page_release_reservation(struct page *page, 1857 unsigned long offset) 1858 { 1859 int to_release = 0; 1860 struct buffer_head *head, *bh; 1861 unsigned int curr_off = 0; 1862 1863 head = page_buffers(page); 1864 bh = head; 1865 do { 1866 unsigned int next_off = curr_off + bh->b_size; 1867 1868 if ((offset <= curr_off) && (buffer_delay(bh))) { 1869 to_release++; 1870 clear_buffer_delay(bh); 1871 } 1872 curr_off = next_off; 1873 } while ((bh = bh->b_this_page) != head); 1874 ext4_da_release_space(page->mapping->host, to_release); 1875 } 1876 1877 /* 1878 * Delayed allocation stuff 1879 */ 1880 1881 struct mpage_da_data { 1882 struct inode *inode; 1883 sector_t b_blocknr; /* start block number of extent */ 1884 size_t b_size; /* size of extent */ 1885 unsigned long b_state; /* state of the extent */ 1886 unsigned long first_page, next_page; /* extent of pages */ 1887 struct writeback_control *wbc; 1888 int io_done; 1889 int pages_written; 1890 int retval; 1891 }; 1892 1893 /* 1894 * mpage_da_submit_io - walks through extent of pages and try to write 1895 * them with writepage() call back 1896 * 1897 * @mpd->inode: inode 1898 * @mpd->first_page: first page of the extent 1899 * @mpd->next_page: page after the last page of the extent 1900 * 1901 * By the time mpage_da_submit_io() is called we expect all blocks 1902 * to be allocated. this may be wrong if allocation failed. 1903 * 1904 * As pages are already locked by write_cache_pages(), we can't use it 1905 */ 1906 static int mpage_da_submit_io(struct mpage_da_data *mpd) 1907 { 1908 long pages_skipped; 1909 struct pagevec pvec; 1910 unsigned long index, end; 1911 int ret = 0, err, nr_pages, i; 1912 struct inode *inode = mpd->inode; 1913 struct address_space *mapping = inode->i_mapping; 1914 1915 BUG_ON(mpd->next_page <= mpd->first_page); 1916 /* 1917 * We need to start from the first_page to the next_page - 1 1918 * to make sure we also write the mapped dirty buffer_heads. 1919 * If we look at mpd->b_blocknr we would only be looking 1920 * at the currently mapped buffer_heads. 1921 */ 1922 index = mpd->first_page; 1923 end = mpd->next_page - 1; 1924 1925 pagevec_init(&pvec, 0); 1926 while (index <= end) { 1927 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1928 if (nr_pages == 0) 1929 break; 1930 for (i = 0; i < nr_pages; i++) { 1931 struct page *page = pvec.pages[i]; 1932 1933 index = page->index; 1934 if (index > end) 1935 break; 1936 index++; 1937 1938 BUG_ON(!PageLocked(page)); 1939 BUG_ON(PageWriteback(page)); 1940 1941 pages_skipped = mpd->wbc->pages_skipped; 1942 err = mapping->a_ops->writepage(page, mpd->wbc); 1943 if (!err && (pages_skipped == mpd->wbc->pages_skipped)) 1944 /* 1945 * have successfully written the page 1946 * without skipping the same 1947 */ 1948 mpd->pages_written++; 1949 /* 1950 * In error case, we have to continue because 1951 * remaining pages are still locked 1952 * XXX: unlock and re-dirty them? 1953 */ 1954 if (ret == 0) 1955 ret = err; 1956 } 1957 pagevec_release(&pvec); 1958 } 1959 return ret; 1960 } 1961 1962 /* 1963 * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers 1964 * 1965 * @mpd->inode - inode to walk through 1966 * @exbh->b_blocknr - first block on a disk 1967 * @exbh->b_size - amount of space in bytes 1968 * @logical - first logical block to start assignment with 1969 * 1970 * the function goes through all passed space and put actual disk 1971 * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten 1972 */ 1973 static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical, 1974 struct buffer_head *exbh) 1975 { 1976 struct inode *inode = mpd->inode; 1977 struct address_space *mapping = inode->i_mapping; 1978 int blocks = exbh->b_size >> inode->i_blkbits; 1979 sector_t pblock = exbh->b_blocknr, cur_logical; 1980 struct buffer_head *head, *bh; 1981 pgoff_t index, end; 1982 struct pagevec pvec; 1983 int nr_pages, i; 1984 1985 index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 1986 end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 1987 cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1988 1989 pagevec_init(&pvec, 0); 1990 1991 while (index <= end) { 1992 /* XXX: optimize tail */ 1993 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1994 if (nr_pages == 0) 1995 break; 1996 for (i = 0; i < nr_pages; i++) { 1997 struct page *page = pvec.pages[i]; 1998 1999 index = page->index; 2000 if (index > end) 2001 break; 2002 index++; 2003 2004 BUG_ON(!PageLocked(page)); 2005 BUG_ON(PageWriteback(page)); 2006 BUG_ON(!page_has_buffers(page)); 2007 2008 bh = page_buffers(page); 2009 head = bh; 2010 2011 /* skip blocks out of the range */ 2012 do { 2013 if (cur_logical >= logical) 2014 break; 2015 cur_logical++; 2016 } while ((bh = bh->b_this_page) != head); 2017 2018 do { 2019 if (cur_logical >= logical + blocks) 2020 break; 2021 2022 if (buffer_delay(bh) || 2023 buffer_unwritten(bh)) { 2024 2025 BUG_ON(bh->b_bdev != inode->i_sb->s_bdev); 2026 2027 if (buffer_delay(bh)) { 2028 clear_buffer_delay(bh); 2029 bh->b_blocknr = pblock; 2030 } else { 2031 /* 2032 * unwritten already should have 2033 * blocknr assigned. Verify that 2034 */ 2035 clear_buffer_unwritten(bh); 2036 BUG_ON(bh->b_blocknr != pblock); 2037 } 2038 2039 } else if (buffer_mapped(bh)) 2040 BUG_ON(bh->b_blocknr != pblock); 2041 2042 cur_logical++; 2043 pblock++; 2044 } while ((bh = bh->b_this_page) != head); 2045 } 2046 pagevec_release(&pvec); 2047 } 2048 } 2049 2050 2051 /* 2052 * __unmap_underlying_blocks - just a helper function to unmap 2053 * set of blocks described by @bh 2054 */ 2055 static inline void __unmap_underlying_blocks(struct inode *inode, 2056 struct buffer_head *bh) 2057 { 2058 struct block_device *bdev = inode->i_sb->s_bdev; 2059 int blocks, i; 2060 2061 blocks = bh->b_size >> inode->i_blkbits; 2062 for (i = 0; i < blocks; i++) 2063 unmap_underlying_metadata(bdev, bh->b_blocknr + i); 2064 } 2065 2066 static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd, 2067 sector_t logical, long blk_cnt) 2068 { 2069 int nr_pages, i; 2070 pgoff_t index, end; 2071 struct pagevec pvec; 2072 struct inode *inode = mpd->inode; 2073 struct address_space *mapping = inode->i_mapping; 2074 2075 index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 2076 end = (logical + blk_cnt - 1) >> 2077 (PAGE_CACHE_SHIFT - inode->i_blkbits); 2078 while (index <= end) { 2079 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 2080 if (nr_pages == 0) 2081 break; 2082 for (i = 0; i < nr_pages; i++) { 2083 struct page *page = pvec.pages[i]; 2084 index = page->index; 2085 if (index > end) 2086 break; 2087 index++; 2088 2089 BUG_ON(!PageLocked(page)); 2090 BUG_ON(PageWriteback(page)); 2091 block_invalidatepage(page, 0); 2092 ClearPageUptodate(page); 2093 unlock_page(page); 2094 } 2095 } 2096 return; 2097 } 2098 2099 static void ext4_print_free_blocks(struct inode *inode) 2100 { 2101 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2102 printk(KERN_EMERG "Total free blocks count %lld\n", 2103 ext4_count_free_blocks(inode->i_sb)); 2104 printk(KERN_EMERG "Free/Dirty block details\n"); 2105 printk(KERN_EMERG "free_blocks=%lld\n", 2106 (long long)percpu_counter_sum(&sbi->s_freeblocks_counter)); 2107 printk(KERN_EMERG "dirty_blocks=%lld\n", 2108 (long long)percpu_counter_sum(&sbi->s_dirtyblocks_counter)); 2109 printk(KERN_EMERG "Block reservation details\n"); 2110 printk(KERN_EMERG "i_reserved_data_blocks=%u\n", 2111 EXT4_I(inode)->i_reserved_data_blocks); 2112 printk(KERN_EMERG "i_reserved_meta_blocks=%u\n", 2113 EXT4_I(inode)->i_reserved_meta_blocks); 2114 return; 2115 } 2116 2117 /* 2118 * mpage_da_map_blocks - go through given space 2119 * 2120 * @mpd - bh describing space 2121 * 2122 * The function skips space we know is already mapped to disk blocks. 2123 * 2124 */ 2125 static int mpage_da_map_blocks(struct mpage_da_data *mpd) 2126 { 2127 int err, blks, get_blocks_flags; 2128 struct buffer_head new; 2129 sector_t next = mpd->b_blocknr; 2130 unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; 2131 loff_t disksize = EXT4_I(mpd->inode)->i_disksize; 2132 handle_t *handle = NULL; 2133 2134 /* 2135 * We consider only non-mapped and non-allocated blocks 2136 */ 2137 if ((mpd->b_state & (1 << BH_Mapped)) && 2138 !(mpd->b_state & (1 << BH_Delay)) && 2139 !(mpd->b_state & (1 << BH_Unwritten))) 2140 return 0; 2141 2142 /* 2143 * If we didn't accumulate anything to write simply return 2144 */ 2145 if (!mpd->b_size) 2146 return 0; 2147 2148 handle = ext4_journal_current_handle(); 2149 BUG_ON(!handle); 2150 2151 /* 2152 * Call ext4_get_blocks() to allocate any delayed allocation 2153 * blocks, or to convert an uninitialized extent to be 2154 * initialized (in the case where we have written into 2155 * one or more preallocated blocks). 2156 * 2157 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to 2158 * indicate that we are on the delayed allocation path. This 2159 * affects functions in many different parts of the allocation 2160 * call path. This flag exists primarily because we don't 2161 * want to change *many* call functions, so ext4_get_blocks() 2162 * will set the magic i_delalloc_reserved_flag once the 2163 * inode's allocation semaphore is taken. 2164 * 2165 * If the blocks in questions were delalloc blocks, set 2166 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting 2167 * variables are updated after the blocks have been allocated. 2168 */ 2169 new.b_state = 0; 2170 get_blocks_flags = (EXT4_GET_BLOCKS_CREATE | 2171 EXT4_GET_BLOCKS_DELALLOC_RESERVE); 2172 if (mpd->b_state & (1 << BH_Delay)) 2173 get_blocks_flags |= EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE; 2174 blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks, 2175 &new, get_blocks_flags); 2176 if (blks < 0) { 2177 err = blks; 2178 /* 2179 * If get block returns with error we simply 2180 * return. Later writepage will redirty the page and 2181 * writepages will find the dirty page again 2182 */ 2183 if (err == -EAGAIN) 2184 return 0; 2185 2186 if (err == -ENOSPC && 2187 ext4_count_free_blocks(mpd->inode->i_sb)) { 2188 mpd->retval = err; 2189 return 0; 2190 } 2191 2192 /* 2193 * get block failure will cause us to loop in 2194 * writepages, because a_ops->writepage won't be able 2195 * to make progress. The page will be redirtied by 2196 * writepage and writepages will again try to write 2197 * the same. 2198 */ 2199 printk(KERN_EMERG "%s block allocation failed for inode %lu " 2200 "at logical offset %llu with max blocks " 2201 "%zd with error %d\n", 2202 __func__, mpd->inode->i_ino, 2203 (unsigned long long)next, 2204 mpd->b_size >> mpd->inode->i_blkbits, err); 2205 printk(KERN_EMERG "This should not happen.!! " 2206 "Data will be lost\n"); 2207 if (err == -ENOSPC) { 2208 ext4_print_free_blocks(mpd->inode); 2209 } 2210 /* invalidate all the pages */ 2211 ext4_da_block_invalidatepages(mpd, next, 2212 mpd->b_size >> mpd->inode->i_blkbits); 2213 return err; 2214 } 2215 BUG_ON(blks == 0); 2216 2217 new.b_size = (blks << mpd->inode->i_blkbits); 2218 2219 if (buffer_new(&new)) 2220 __unmap_underlying_blocks(mpd->inode, &new); 2221 2222 /* 2223 * If blocks are delayed marked, we need to 2224 * put actual blocknr and drop delayed bit 2225 */ 2226 if ((mpd->b_state & (1 << BH_Delay)) || 2227 (mpd->b_state & (1 << BH_Unwritten))) 2228 mpage_put_bnr_to_bhs(mpd, next, &new); 2229 2230 if (ext4_should_order_data(mpd->inode)) { 2231 err = ext4_jbd2_file_inode(handle, mpd->inode); 2232 if (err) 2233 return err; 2234 } 2235 2236 /* 2237 * Update on-disk size along with block allocation. 2238 */ 2239 disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; 2240 if (disksize > i_size_read(mpd->inode)) 2241 disksize = i_size_read(mpd->inode); 2242 if (disksize > EXT4_I(mpd->inode)->i_disksize) { 2243 ext4_update_i_disksize(mpd->inode, disksize); 2244 return ext4_mark_inode_dirty(handle, mpd->inode); 2245 } 2246 2247 return 0; 2248 } 2249 2250 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ 2251 (1 << BH_Delay) | (1 << BH_Unwritten)) 2252 2253 /* 2254 * mpage_add_bh_to_extent - try to add one more block to extent of blocks 2255 * 2256 * @mpd->lbh - extent of blocks 2257 * @logical - logical number of the block in the file 2258 * @bh - bh of the block (used to access block's state) 2259 * 2260 * the function is used to collect contig. blocks in same state 2261 */ 2262 static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, 2263 sector_t logical, size_t b_size, 2264 unsigned long b_state) 2265 { 2266 sector_t next; 2267 int nrblocks = mpd->b_size >> mpd->inode->i_blkbits; 2268 2269 /* check if thereserved journal credits might overflow */ 2270 if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) { 2271 if (nrblocks >= EXT4_MAX_TRANS_DATA) { 2272 /* 2273 * With non-extent format we are limited by the journal 2274 * credit available. Total credit needed to insert 2275 * nrblocks contiguous blocks is dependent on the 2276 * nrblocks. So limit nrblocks. 2277 */ 2278 goto flush_it; 2279 } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) > 2280 EXT4_MAX_TRANS_DATA) { 2281 /* 2282 * Adding the new buffer_head would make it cross the 2283 * allowed limit for which we have journal credit 2284 * reserved. So limit the new bh->b_size 2285 */ 2286 b_size = (EXT4_MAX_TRANS_DATA - nrblocks) << 2287 mpd->inode->i_blkbits; 2288 /* we will do mpage_da_submit_io in the next loop */ 2289 } 2290 } 2291 /* 2292 * First block in the extent 2293 */ 2294 if (mpd->b_size == 0) { 2295 mpd->b_blocknr = logical; 2296 mpd->b_size = b_size; 2297 mpd->b_state = b_state & BH_FLAGS; 2298 return; 2299 } 2300 2301 next = mpd->b_blocknr + nrblocks; 2302 /* 2303 * Can we merge the block to our big extent? 2304 */ 2305 if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) { 2306 mpd->b_size += b_size; 2307 return; 2308 } 2309 2310 flush_it: 2311 /* 2312 * We couldn't merge the block to our extent, so we 2313 * need to flush current extent and start new one 2314 */ 2315 if (mpage_da_map_blocks(mpd) == 0) 2316 mpage_da_submit_io(mpd); 2317 mpd->io_done = 1; 2318 return; 2319 } 2320 2321 static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh) 2322 { 2323 /* 2324 * unmapped buffer is possible for holes. 2325 * delay buffer is possible with delayed allocation. 2326 * We also need to consider unwritten buffer as unmapped. 2327 */ 2328 return (!buffer_mapped(bh) || buffer_delay(bh) || 2329 buffer_unwritten(bh)) && buffer_dirty(bh); 2330 } 2331 2332 /* 2333 * __mpage_da_writepage - finds extent of pages and blocks 2334 * 2335 * @page: page to consider 2336 * @wbc: not used, we just follow rules 2337 * @data: context 2338 * 2339 * The function finds extents of pages and scan them for all blocks. 2340 */ 2341 static int __mpage_da_writepage(struct page *page, 2342 struct writeback_control *wbc, void *data) 2343 { 2344 struct mpage_da_data *mpd = data; 2345 struct inode *inode = mpd->inode; 2346 struct buffer_head *bh, *head; 2347 sector_t logical; 2348 2349 if (mpd->io_done) { 2350 /* 2351 * Rest of the page in the page_vec 2352 * redirty then and skip then. We will 2353 * try to to write them again after 2354 * starting a new transaction 2355 */ 2356 redirty_page_for_writepage(wbc, page); 2357 unlock_page(page); 2358 return MPAGE_DA_EXTENT_TAIL; 2359 } 2360 /* 2361 * Can we merge this page to current extent? 2362 */ 2363 if (mpd->next_page != page->index) { 2364 /* 2365 * Nope, we can't. So, we map non-allocated blocks 2366 * and start IO on them using writepage() 2367 */ 2368 if (mpd->next_page != mpd->first_page) { 2369 if (mpage_da_map_blocks(mpd) == 0) 2370 mpage_da_submit_io(mpd); 2371 /* 2372 * skip rest of the page in the page_vec 2373 */ 2374 mpd->io_done = 1; 2375 redirty_page_for_writepage(wbc, page); 2376 unlock_page(page); 2377 return MPAGE_DA_EXTENT_TAIL; 2378 } 2379 2380 /* 2381 * Start next extent of pages ... 2382 */ 2383 mpd->first_page = page->index; 2384 2385 /* 2386 * ... and blocks 2387 */ 2388 mpd->b_size = 0; 2389 mpd->b_state = 0; 2390 mpd->b_blocknr = 0; 2391 } 2392 2393 mpd->next_page = page->index + 1; 2394 logical = (sector_t) page->index << 2395 (PAGE_CACHE_SHIFT - inode->i_blkbits); 2396 2397 if (!page_has_buffers(page)) { 2398 mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE, 2399 (1 << BH_Dirty) | (1 << BH_Uptodate)); 2400 if (mpd->io_done) 2401 return MPAGE_DA_EXTENT_TAIL; 2402 } else { 2403 /* 2404 * Page with regular buffer heads, just add all dirty ones 2405 */ 2406 head = page_buffers(page); 2407 bh = head; 2408 do { 2409 BUG_ON(buffer_locked(bh)); 2410 /* 2411 * We need to try to allocate 2412 * unmapped blocks in the same page. 2413 * Otherwise we won't make progress 2414 * with the page in ext4_da_writepage 2415 */ 2416 if (ext4_bh_unmapped_or_delay(NULL, bh)) { 2417 mpage_add_bh_to_extent(mpd, logical, 2418 bh->b_size, 2419 bh->b_state); 2420 if (mpd->io_done) 2421 return MPAGE_DA_EXTENT_TAIL; 2422 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) { 2423 /* 2424 * mapped dirty buffer. We need to update 2425 * the b_state because we look at 2426 * b_state in mpage_da_map_blocks. We don't 2427 * update b_size because if we find an 2428 * unmapped buffer_head later we need to 2429 * use the b_state flag of that buffer_head. 2430 */ 2431 if (mpd->b_size == 0) 2432 mpd->b_state = bh->b_state & BH_FLAGS; 2433 } 2434 logical++; 2435 } while ((bh = bh->b_this_page) != head); 2436 } 2437 2438 return 0; 2439 } 2440 2441 /* 2442 * This is a special get_blocks_t callback which is used by 2443 * ext4_da_write_begin(). It will either return mapped block or 2444 * reserve space for a single block. 2445 * 2446 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 2447 * We also have b_blocknr = -1 and b_bdev initialized properly 2448 * 2449 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 2450 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 2451 * initialized properly. 2452 */ 2453 static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 2454 struct buffer_head *bh_result, int create) 2455 { 2456 int ret = 0; 2457 sector_t invalid_block = ~((sector_t) 0xffff); 2458 2459 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 2460 invalid_block = ~0; 2461 2462 BUG_ON(create == 0); 2463 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 2464 2465 /* 2466 * first, we need to know whether the block is allocated already 2467 * preallocated blocks are unmapped but should treated 2468 * the same as allocated blocks. 2469 */ 2470 ret = ext4_get_blocks(NULL, inode, iblock, 1, bh_result, 0); 2471 if ((ret == 0) && !buffer_delay(bh_result)) { 2472 /* the block isn't (pre)allocated yet, let's reserve space */ 2473 /* 2474 * XXX: __block_prepare_write() unmaps passed block, 2475 * is it OK? 2476 */ 2477 ret = ext4_da_reserve_space(inode, 1); 2478 if (ret) 2479 /* not enough space to reserve */ 2480 return ret; 2481 2482 map_bh(bh_result, inode->i_sb, invalid_block); 2483 set_buffer_new(bh_result); 2484 set_buffer_delay(bh_result); 2485 } else if (ret > 0) { 2486 bh_result->b_size = (ret << inode->i_blkbits); 2487 if (buffer_unwritten(bh_result)) { 2488 /* A delayed write to unwritten bh should 2489 * be marked new and mapped. Mapped ensures 2490 * that we don't do get_block multiple times 2491 * when we write to the same offset and new 2492 * ensures that we do proper zero out for 2493 * partial write. 2494 */ 2495 set_buffer_new(bh_result); 2496 set_buffer_mapped(bh_result); 2497 } 2498 ret = 0; 2499 } 2500 2501 return ret; 2502 } 2503 2504 /* 2505 * This function is used as a standard get_block_t calback function 2506 * when there is no desire to allocate any blocks. It is used as a 2507 * callback function for block_prepare_write(), nobh_writepage(), and 2508 * block_write_full_page(). These functions should only try to map a 2509 * single block at a time. 2510 * 2511 * Since this function doesn't do block allocations even if the caller 2512 * requests it by passing in create=1, it is critically important that 2513 * any caller checks to make sure that any buffer heads are returned 2514 * by this function are either all already mapped or marked for 2515 * delayed allocation before calling nobh_writepage() or 2516 * block_write_full_page(). Otherwise, b_blocknr could be left 2517 * unitialized, and the page write functions will be taken by 2518 * surprise. 2519 */ 2520 static int noalloc_get_block_write(struct inode *inode, sector_t iblock, 2521 struct buffer_head *bh_result, int create) 2522 { 2523 int ret = 0; 2524 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 2525 2526 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 2527 2528 /* 2529 * we don't want to do block allocation in writepage 2530 * so call get_block_wrap with create = 0 2531 */ 2532 ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0); 2533 BUG_ON(create && ret == 0); 2534 if (ret > 0) { 2535 bh_result->b_size = (ret << inode->i_blkbits); 2536 ret = 0; 2537 } 2538 return ret; 2539 } 2540 2541 /* 2542 * This function can get called via... 2543 * - ext4_da_writepages after taking page lock (have journal handle) 2544 * - journal_submit_inode_data_buffers (no journal handle) 2545 * - shrink_page_list via pdflush (no journal handle) 2546 * - grab_page_cache when doing write_begin (have journal handle) 2547 */ 2548 static int ext4_da_writepage(struct page *page, 2549 struct writeback_control *wbc) 2550 { 2551 int ret = 0; 2552 loff_t size; 2553 unsigned int len; 2554 struct buffer_head *page_bufs; 2555 struct inode *inode = page->mapping->host; 2556 2557 trace_mark(ext4_da_writepage, 2558 "dev %s ino %lu page_index %lu", 2559 inode->i_sb->s_id, inode->i_ino, page->index); 2560 size = i_size_read(inode); 2561 if (page->index == size >> PAGE_CACHE_SHIFT) 2562 len = size & ~PAGE_CACHE_MASK; 2563 else 2564 len = PAGE_CACHE_SIZE; 2565 2566 if (page_has_buffers(page)) { 2567 page_bufs = page_buffers(page); 2568 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2569 ext4_bh_unmapped_or_delay)) { 2570 /* 2571 * We don't want to do block allocation 2572 * So redirty the page and return 2573 * We may reach here when we do a journal commit 2574 * via journal_submit_inode_data_buffers. 2575 * If we don't have mapping block we just ignore 2576 * them. We can also reach here via shrink_page_list 2577 */ 2578 redirty_page_for_writepage(wbc, page); 2579 unlock_page(page); 2580 return 0; 2581 } 2582 } else { 2583 /* 2584 * The test for page_has_buffers() is subtle: 2585 * We know the page is dirty but it lost buffers. That means 2586 * that at some moment in time after write_begin()/write_end() 2587 * has been called all buffers have been clean and thus they 2588 * must have been written at least once. So they are all 2589 * mapped and we can happily proceed with mapping them 2590 * and writing the page. 2591 * 2592 * Try to initialize the buffer_heads and check whether 2593 * all are mapped and non delay. We don't want to 2594 * do block allocation here. 2595 */ 2596 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, 2597 noalloc_get_block_write); 2598 if (!ret) { 2599 page_bufs = page_buffers(page); 2600 /* check whether all are mapped and non delay */ 2601 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2602 ext4_bh_unmapped_or_delay)) { 2603 redirty_page_for_writepage(wbc, page); 2604 unlock_page(page); 2605 return 0; 2606 } 2607 } else { 2608 /* 2609 * We can't do block allocation here 2610 * so just redity the page and unlock 2611 * and return 2612 */ 2613 redirty_page_for_writepage(wbc, page); 2614 unlock_page(page); 2615 return 0; 2616 } 2617 /* now mark the buffer_heads as dirty and uptodate */ 2618 block_commit_write(page, 0, PAGE_CACHE_SIZE); 2619 } 2620 2621 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) 2622 ret = nobh_writepage(page, noalloc_get_block_write, wbc); 2623 else 2624 ret = block_write_full_page(page, noalloc_get_block_write, 2625 wbc); 2626 2627 return ret; 2628 } 2629 2630 /* 2631 * This is called via ext4_da_writepages() to 2632 * calulate the total number of credits to reserve to fit 2633 * a single extent allocation into a single transaction, 2634 * ext4_da_writpeages() will loop calling this before 2635 * the block allocation. 2636 */ 2637 2638 static int ext4_da_writepages_trans_blocks(struct inode *inode) 2639 { 2640 int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; 2641 2642 /* 2643 * With non-extent format the journal credit needed to 2644 * insert nrblocks contiguous block is dependent on 2645 * number of contiguous block. So we will limit 2646 * number of contiguous block to a sane value 2647 */ 2648 if (!(inode->i_flags & EXT4_EXTENTS_FL) && 2649 (max_blocks > EXT4_MAX_TRANS_DATA)) 2650 max_blocks = EXT4_MAX_TRANS_DATA; 2651 2652 return ext4_chunk_trans_blocks(inode, max_blocks); 2653 } 2654 2655 static int ext4_da_writepages(struct address_space *mapping, 2656 struct writeback_control *wbc) 2657 { 2658 pgoff_t index; 2659 int range_whole = 0; 2660 handle_t *handle = NULL; 2661 struct mpage_da_data mpd; 2662 struct inode *inode = mapping->host; 2663 int no_nrwrite_index_update; 2664 int pages_written = 0; 2665 long pages_skipped; 2666 int range_cyclic, cycled = 1, io_done = 0; 2667 int needed_blocks, ret = 0, nr_to_writebump = 0; 2668 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2669 2670 trace_mark(ext4_da_writepages, 2671 "dev %s ino %lu nr_t_write %ld " 2672 "pages_skipped %ld range_start %llu " 2673 "range_end %llu nonblocking %d " 2674 "for_kupdate %d for_reclaim %d " 2675 "for_writepages %d range_cyclic %d", 2676 inode->i_sb->s_id, inode->i_ino, 2677 wbc->nr_to_write, wbc->pages_skipped, 2678 (unsigned long long) wbc->range_start, 2679 (unsigned long long) wbc->range_end, 2680 wbc->nonblocking, wbc->for_kupdate, 2681 wbc->for_reclaim, wbc->for_writepages, 2682 wbc->range_cyclic); 2683 2684 /* 2685 * No pages to write? This is mainly a kludge to avoid starting 2686 * a transaction for special inodes like journal inode on last iput() 2687 * because that could violate lock ordering on umount 2688 */ 2689 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 2690 return 0; 2691 2692 /* 2693 * If the filesystem has aborted, it is read-only, so return 2694 * right away instead of dumping stack traces later on that 2695 * will obscure the real source of the problem. We test 2696 * EXT4_MOUNT_ABORT instead of sb->s_flag's MS_RDONLY because 2697 * the latter could be true if the filesystem is mounted 2698 * read-only, and in that case, ext4_da_writepages should 2699 * *never* be called, so if that ever happens, we would want 2700 * the stack trace. 2701 */ 2702 if (unlikely(sbi->s_mount_opt & EXT4_MOUNT_ABORT)) 2703 return -EROFS; 2704 2705 /* 2706 * Make sure nr_to_write is >= sbi->s_mb_stream_request 2707 * This make sure small files blocks are allocated in 2708 * single attempt. This ensure that small files 2709 * get less fragmented. 2710 */ 2711 if (wbc->nr_to_write < sbi->s_mb_stream_request) { 2712 nr_to_writebump = sbi->s_mb_stream_request - wbc->nr_to_write; 2713 wbc->nr_to_write = sbi->s_mb_stream_request; 2714 } 2715 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2716 range_whole = 1; 2717 2718 range_cyclic = wbc->range_cyclic; 2719 if (wbc->range_cyclic) { 2720 index = mapping->writeback_index; 2721 if (index) 2722 cycled = 0; 2723 wbc->range_start = index << PAGE_CACHE_SHIFT; 2724 wbc->range_end = LLONG_MAX; 2725 wbc->range_cyclic = 0; 2726 } else 2727 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2728 2729 mpd.wbc = wbc; 2730 mpd.inode = mapping->host; 2731 2732 /* 2733 * we don't want write_cache_pages to update 2734 * nr_to_write and writeback_index 2735 */ 2736 no_nrwrite_index_update = wbc->no_nrwrite_index_update; 2737 wbc->no_nrwrite_index_update = 1; 2738 pages_skipped = wbc->pages_skipped; 2739 2740 retry: 2741 while (!ret && wbc->nr_to_write > 0) { 2742 2743 /* 2744 * we insert one extent at a time. So we need 2745 * credit needed for single extent allocation. 2746 * journalled mode is currently not supported 2747 * by delalloc 2748 */ 2749 BUG_ON(ext4_should_journal_data(inode)); 2750 needed_blocks = ext4_da_writepages_trans_blocks(inode); 2751 2752 /* start a new transaction*/ 2753 handle = ext4_journal_start(inode, needed_blocks); 2754 if (IS_ERR(handle)) { 2755 ret = PTR_ERR(handle); 2756 printk(KERN_CRIT "%s: jbd2_start: " 2757 "%ld pages, ino %lu; err %d\n", __func__, 2758 wbc->nr_to_write, inode->i_ino, ret); 2759 dump_stack(); 2760 goto out_writepages; 2761 } 2762 2763 /* 2764 * Now call __mpage_da_writepage to find the next 2765 * contiguous region of logical blocks that need 2766 * blocks to be allocated by ext4. We don't actually 2767 * submit the blocks for I/O here, even though 2768 * write_cache_pages thinks it will, and will set the 2769 * pages as clean for write before calling 2770 * __mpage_da_writepage(). 2771 */ 2772 mpd.b_size = 0; 2773 mpd.b_state = 0; 2774 mpd.b_blocknr = 0; 2775 mpd.first_page = 0; 2776 mpd.next_page = 0; 2777 mpd.io_done = 0; 2778 mpd.pages_written = 0; 2779 mpd.retval = 0; 2780 ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, 2781 &mpd); 2782 /* 2783 * If we have a contigous extent of pages and we 2784 * haven't done the I/O yet, map the blocks and submit 2785 * them for I/O. 2786 */ 2787 if (!mpd.io_done && mpd.next_page != mpd.first_page) { 2788 if (mpage_da_map_blocks(&mpd) == 0) 2789 mpage_da_submit_io(&mpd); 2790 mpd.io_done = 1; 2791 ret = MPAGE_DA_EXTENT_TAIL; 2792 } 2793 wbc->nr_to_write -= mpd.pages_written; 2794 2795 ext4_journal_stop(handle); 2796 2797 if ((mpd.retval == -ENOSPC) && sbi->s_journal) { 2798 /* commit the transaction which would 2799 * free blocks released in the transaction 2800 * and try again 2801 */ 2802 jbd2_journal_force_commit_nested(sbi->s_journal); 2803 wbc->pages_skipped = pages_skipped; 2804 ret = 0; 2805 } else if (ret == MPAGE_DA_EXTENT_TAIL) { 2806 /* 2807 * got one extent now try with 2808 * rest of the pages 2809 */ 2810 pages_written += mpd.pages_written; 2811 wbc->pages_skipped = pages_skipped; 2812 ret = 0; 2813 io_done = 1; 2814 } else if (wbc->nr_to_write) 2815 /* 2816 * There is no more writeout needed 2817 * or we requested for a noblocking writeout 2818 * and we found the device congested 2819 */ 2820 break; 2821 } 2822 if (!io_done && !cycled) { 2823 cycled = 1; 2824 index = 0; 2825 wbc->range_start = index << PAGE_CACHE_SHIFT; 2826 wbc->range_end = mapping->writeback_index - 1; 2827 goto retry; 2828 } 2829 if (pages_skipped != wbc->pages_skipped) 2830 printk(KERN_EMERG "This should not happen leaving %s " 2831 "with nr_to_write = %ld ret = %d\n", 2832 __func__, wbc->nr_to_write, ret); 2833 2834 /* Update index */ 2835 index += pages_written; 2836 wbc->range_cyclic = range_cyclic; 2837 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2838 /* 2839 * set the writeback_index so that range_cyclic 2840 * mode will write it back later 2841 */ 2842 mapping->writeback_index = index; 2843 2844 out_writepages: 2845 if (!no_nrwrite_index_update) 2846 wbc->no_nrwrite_index_update = 0; 2847 wbc->nr_to_write -= nr_to_writebump; 2848 trace_mark(ext4_da_writepage_result, 2849 "dev %s ino %lu ret %d pages_written %d " 2850 "pages_skipped %ld congestion %d " 2851 "more_io %d no_nrwrite_index_update %d", 2852 inode->i_sb->s_id, inode->i_ino, ret, 2853 pages_written, wbc->pages_skipped, 2854 wbc->encountered_congestion, wbc->more_io, 2855 wbc->no_nrwrite_index_update); 2856 return ret; 2857 } 2858 2859 #define FALL_BACK_TO_NONDELALLOC 1 2860 static int ext4_nonda_switch(struct super_block *sb) 2861 { 2862 s64 free_blocks, dirty_blocks; 2863 struct ext4_sb_info *sbi = EXT4_SB(sb); 2864 2865 /* 2866 * switch to non delalloc mode if we are running low 2867 * on free block. The free block accounting via percpu 2868 * counters can get slightly wrong with percpu_counter_batch getting 2869 * accumulated on each CPU without updating global counters 2870 * Delalloc need an accurate free block accounting. So switch 2871 * to non delalloc when we are near to error range. 2872 */ 2873 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); 2874 dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter); 2875 if (2 * free_blocks < 3 * dirty_blocks || 2876 free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) { 2877 /* 2878 * free block count is less that 150% of dirty blocks 2879 * or free blocks is less that watermark 2880 */ 2881 return 1; 2882 } 2883 return 0; 2884 } 2885 2886 static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 2887 loff_t pos, unsigned len, unsigned flags, 2888 struct page **pagep, void **fsdata) 2889 { 2890 int ret, retries = 0; 2891 struct page *page; 2892 pgoff_t index; 2893 unsigned from, to; 2894 struct inode *inode = mapping->host; 2895 handle_t *handle; 2896 2897 index = pos >> PAGE_CACHE_SHIFT; 2898 from = pos & (PAGE_CACHE_SIZE - 1); 2899 to = from + len; 2900 2901 if (ext4_nonda_switch(inode->i_sb)) { 2902 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 2903 return ext4_write_begin(file, mapping, pos, 2904 len, flags, pagep, fsdata); 2905 } 2906 *fsdata = (void *)0; 2907 2908 trace_mark(ext4_da_write_begin, 2909 "dev %s ino %lu pos %llu len %u flags %u", 2910 inode->i_sb->s_id, inode->i_ino, 2911 (unsigned long long) pos, len, flags); 2912 retry: 2913 /* 2914 * With delayed allocation, we don't log the i_disksize update 2915 * if there is delayed block allocation. But we still need 2916 * to journalling the i_disksize update if writes to the end 2917 * of file which has an already mapped buffer. 2918 */ 2919 handle = ext4_journal_start(inode, 1); 2920 if (IS_ERR(handle)) { 2921 ret = PTR_ERR(handle); 2922 goto out; 2923 } 2924 /* We cannot recurse into the filesystem as the transaction is already 2925 * started */ 2926 flags |= AOP_FLAG_NOFS; 2927 2928 page = grab_cache_page_write_begin(mapping, index, flags); 2929 if (!page) { 2930 ext4_journal_stop(handle); 2931 ret = -ENOMEM; 2932 goto out; 2933 } 2934 *pagep = page; 2935 2936 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 2937 ext4_da_get_block_prep); 2938 if (ret < 0) { 2939 unlock_page(page); 2940 ext4_journal_stop(handle); 2941 page_cache_release(page); 2942 /* 2943 * block_write_begin may have instantiated a few blocks 2944 * outside i_size. Trim these off again. Don't need 2945 * i_size_read because we hold i_mutex. 2946 */ 2947 if (pos + len > inode->i_size) 2948 vmtruncate(inode, inode->i_size); 2949 } 2950 2951 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 2952 goto retry; 2953 out: 2954 return ret; 2955 } 2956 2957 /* 2958 * Check if we should update i_disksize 2959 * when write to the end of file but not require block allocation 2960 */ 2961 static int ext4_da_should_update_i_disksize(struct page *page, 2962 unsigned long offset) 2963 { 2964 struct buffer_head *bh; 2965 struct inode *inode = page->mapping->host; 2966 unsigned int idx; 2967 int i; 2968 2969 bh = page_buffers(page); 2970 idx = offset >> inode->i_blkbits; 2971 2972 for (i = 0; i < idx; i++) 2973 bh = bh->b_this_page; 2974 2975 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 2976 return 0; 2977 return 1; 2978 } 2979 2980 static int ext4_da_write_end(struct file *file, 2981 struct address_space *mapping, 2982 loff_t pos, unsigned len, unsigned copied, 2983 struct page *page, void *fsdata) 2984 { 2985 struct inode *inode = mapping->host; 2986 int ret = 0, ret2; 2987 handle_t *handle = ext4_journal_current_handle(); 2988 loff_t new_i_size; 2989 unsigned long start, end; 2990 int write_mode = (int)(unsigned long)fsdata; 2991 2992 if (write_mode == FALL_BACK_TO_NONDELALLOC) { 2993 if (ext4_should_order_data(inode)) { 2994 return ext4_ordered_write_end(file, mapping, pos, 2995 len, copied, page, fsdata); 2996 } else if (ext4_should_writeback_data(inode)) { 2997 return ext4_writeback_write_end(file, mapping, pos, 2998 len, copied, page, fsdata); 2999 } else { 3000 BUG(); 3001 } 3002 } 3003 3004 trace_mark(ext4_da_write_end, 3005 "dev %s ino %lu pos %llu len %u copied %u", 3006 inode->i_sb->s_id, inode->i_ino, 3007 (unsigned long long) pos, len, copied); 3008 start = pos & (PAGE_CACHE_SIZE - 1); 3009 end = start + copied - 1; 3010 3011 /* 3012 * generic_write_end() will run mark_inode_dirty() if i_size 3013 * changes. So let's piggyback the i_disksize mark_inode_dirty 3014 * into that. 3015 */ 3016 3017 new_i_size = pos + copied; 3018 if (new_i_size > EXT4_I(inode)->i_disksize) { 3019 if (ext4_da_should_update_i_disksize(page, end)) { 3020 down_write(&EXT4_I(inode)->i_data_sem); 3021 if (new_i_size > EXT4_I(inode)->i_disksize) { 3022 /* 3023 * Updating i_disksize when extending file 3024 * without needing block allocation 3025 */ 3026 if (ext4_should_order_data(inode)) 3027 ret = ext4_jbd2_file_inode(handle, 3028 inode); 3029 3030 EXT4_I(inode)->i_disksize = new_i_size; 3031 } 3032 up_write(&EXT4_I(inode)->i_data_sem); 3033 /* We need to mark inode dirty even if 3034 * new_i_size is less that inode->i_size 3035 * bu greater than i_disksize.(hint delalloc) 3036 */ 3037 ext4_mark_inode_dirty(handle, inode); 3038 } 3039 } 3040 ret2 = generic_write_end(file, mapping, pos, len, copied, 3041 page, fsdata); 3042 copied = ret2; 3043 if (ret2 < 0) 3044 ret = ret2; 3045 ret2 = ext4_journal_stop(handle); 3046 if (!ret) 3047 ret = ret2; 3048 3049 return ret ? ret : copied; 3050 } 3051 3052 static void ext4_da_invalidatepage(struct page *page, unsigned long offset) 3053 { 3054 /* 3055 * Drop reserved blocks 3056 */ 3057 BUG_ON(!PageLocked(page)); 3058 if (!page_has_buffers(page)) 3059 goto out; 3060 3061 ext4_da_page_release_reservation(page, offset); 3062 3063 out: 3064 ext4_invalidatepage(page, offset); 3065 3066 return; 3067 } 3068 3069 /* 3070 * Force all delayed allocation blocks to be allocated for a given inode. 3071 */ 3072 int ext4_alloc_da_blocks(struct inode *inode) 3073 { 3074 if (!EXT4_I(inode)->i_reserved_data_blocks && 3075 !EXT4_I(inode)->i_reserved_meta_blocks) 3076 return 0; 3077 3078 /* 3079 * We do something simple for now. The filemap_flush() will 3080 * also start triggering a write of the data blocks, which is 3081 * not strictly speaking necessary (and for users of 3082 * laptop_mode, not even desirable). However, to do otherwise 3083 * would require replicating code paths in: 3084 * 3085 * ext4_da_writepages() -> 3086 * write_cache_pages() ---> (via passed in callback function) 3087 * __mpage_da_writepage() --> 3088 * mpage_add_bh_to_extent() 3089 * mpage_da_map_blocks() 3090 * 3091 * The problem is that write_cache_pages(), located in 3092 * mm/page-writeback.c, marks pages clean in preparation for 3093 * doing I/O, which is not desirable if we're not planning on 3094 * doing I/O at all. 3095 * 3096 * We could call write_cache_pages(), and then redirty all of 3097 * the pages by calling redirty_page_for_writeback() but that 3098 * would be ugly in the extreme. So instead we would need to 3099 * replicate parts of the code in the above functions, 3100 * simplifying them becuase we wouldn't actually intend to 3101 * write out the pages, but rather only collect contiguous 3102 * logical block extents, call the multi-block allocator, and 3103 * then update the buffer heads with the block allocations. 3104 * 3105 * For now, though, we'll cheat by calling filemap_flush(), 3106 * which will map the blocks, and start the I/O, but not 3107 * actually wait for the I/O to complete. 3108 */ 3109 return filemap_flush(inode->i_mapping); 3110 } 3111 3112 /* 3113 * bmap() is special. It gets used by applications such as lilo and by 3114 * the swapper to find the on-disk block of a specific piece of data. 3115 * 3116 * Naturally, this is dangerous if the block concerned is still in the 3117 * journal. If somebody makes a swapfile on an ext4 data-journaling 3118 * filesystem and enables swap, then they may get a nasty shock when the 3119 * data getting swapped to that swapfile suddenly gets overwritten by 3120 * the original zero's written out previously to the journal and 3121 * awaiting writeback in the kernel's buffer cache. 3122 * 3123 * So, if we see any bmap calls here on a modified, data-journaled file, 3124 * take extra steps to flush any blocks which might be in the cache. 3125 */ 3126 static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 3127 { 3128 struct inode *inode = mapping->host; 3129 journal_t *journal; 3130 int err; 3131 3132 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 3133 test_opt(inode->i_sb, DELALLOC)) { 3134 /* 3135 * With delalloc we want to sync the file 3136 * so that we can make sure we allocate 3137 * blocks for file 3138 */ 3139 filemap_write_and_wait(mapping); 3140 } 3141 3142 if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) { 3143 /* 3144 * This is a REALLY heavyweight approach, but the use of 3145 * bmap on dirty files is expected to be extremely rare: 3146 * only if we run lilo or swapon on a freshly made file 3147 * do we expect this to happen. 3148 * 3149 * (bmap requires CAP_SYS_RAWIO so this does not 3150 * represent an unprivileged user DOS attack --- we'd be 3151 * in trouble if mortal users could trigger this path at 3152 * will.) 3153 * 3154 * NB. EXT4_STATE_JDATA is not set on files other than 3155 * regular files. If somebody wants to bmap a directory 3156 * or symlink and gets confused because the buffer 3157 * hasn't yet been flushed to disk, they deserve 3158 * everything they get. 3159 */ 3160 3161 EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA; 3162 journal = EXT4_JOURNAL(inode); 3163 jbd2_journal_lock_updates(journal); 3164 err = jbd2_journal_flush(journal); 3165 jbd2_journal_unlock_updates(journal); 3166 3167 if (err) 3168 return 0; 3169 } 3170 3171 return generic_block_bmap(mapping, block, ext4_get_block); 3172 } 3173 3174 static int bget_one(handle_t *handle, struct buffer_head *bh) 3175 { 3176 get_bh(bh); 3177 return 0; 3178 } 3179 3180 static int bput_one(handle_t *handle, struct buffer_head *bh) 3181 { 3182 put_bh(bh); 3183 return 0; 3184 } 3185 3186 /* 3187 * Note that we don't need to start a transaction unless we're journaling data 3188 * because we should have holes filled from ext4_page_mkwrite(). We even don't 3189 * need to file the inode to the transaction's list in ordered mode because if 3190 * we are writing back data added by write(), the inode is already there and if 3191 * we are writing back data modified via mmap(), noone guarantees in which 3192 * transaction the data will hit the disk. In case we are journaling data, we 3193 * cannot start transaction directly because transaction start ranks above page 3194 * lock so we have to do some magic. 3195 * 3196 * In all journaling modes block_write_full_page() will start the I/O. 3197 * 3198 * Problem: 3199 * 3200 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 3201 * ext4_writepage() 3202 * 3203 * Similar for: 3204 * 3205 * ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ... 3206 * 3207 * Same applies to ext4_get_block(). We will deadlock on various things like 3208 * lock_journal and i_data_sem 3209 * 3210 * Setting PF_MEMALLOC here doesn't work - too many internal memory 3211 * allocations fail. 3212 * 3213 * 16May01: If we're reentered then journal_current_handle() will be 3214 * non-zero. We simply *return*. 3215 * 3216 * 1 July 2001: @@@ FIXME: 3217 * In journalled data mode, a data buffer may be metadata against the 3218 * current transaction. But the same file is part of a shared mapping 3219 * and someone does a writepage() on it. 3220 * 3221 * We will move the buffer onto the async_data list, but *after* it has 3222 * been dirtied. So there's a small window where we have dirty data on 3223 * BJ_Metadata. 3224 * 3225 * Note that this only applies to the last partial page in the file. The 3226 * bit which block_write_full_page() uses prepare/commit for. (That's 3227 * broken code anyway: it's wrong for msync()). 3228 * 3229 * It's a rare case: affects the final partial page, for journalled data 3230 * where the file is subject to bith write() and writepage() in the same 3231 * transction. To fix it we'll need a custom block_write_full_page(). 3232 * We'll probably need that anyway for journalling writepage() output. 3233 * 3234 * We don't honour synchronous mounts for writepage(). That would be 3235 * disastrous. Any write() or metadata operation will sync the fs for 3236 * us. 3237 * 3238 */ 3239 static int __ext4_normal_writepage(struct page *page, 3240 struct writeback_control *wbc) 3241 { 3242 struct inode *inode = page->mapping->host; 3243 3244 if (test_opt(inode->i_sb, NOBH)) 3245 return nobh_writepage(page, noalloc_get_block_write, wbc); 3246 else 3247 return block_write_full_page(page, noalloc_get_block_write, 3248 wbc); 3249 } 3250 3251 static int ext4_normal_writepage(struct page *page, 3252 struct writeback_control *wbc) 3253 { 3254 struct inode *inode = page->mapping->host; 3255 loff_t size = i_size_read(inode); 3256 loff_t len; 3257 3258 trace_mark(ext4_normal_writepage, 3259 "dev %s ino %lu page_index %lu", 3260 inode->i_sb->s_id, inode->i_ino, page->index); 3261 J_ASSERT(PageLocked(page)); 3262 if (page->index == size >> PAGE_CACHE_SHIFT) 3263 len = size & ~PAGE_CACHE_MASK; 3264 else 3265 len = PAGE_CACHE_SIZE; 3266 3267 if (page_has_buffers(page)) { 3268 /* if page has buffers it should all be mapped 3269 * and allocated. If there are not buffers attached 3270 * to the page we know the page is dirty but it lost 3271 * buffers. That means that at some moment in time 3272 * after write_begin() / write_end() has been called 3273 * all buffers have been clean and thus they must have been 3274 * written at least once. So they are all mapped and we can 3275 * happily proceed with mapping them and writing the page. 3276 */ 3277 BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 3278 ext4_bh_unmapped_or_delay)); 3279 } 3280 3281 if (!ext4_journal_current_handle()) 3282 return __ext4_normal_writepage(page, wbc); 3283 3284 redirty_page_for_writepage(wbc, page); 3285 unlock_page(page); 3286 return 0; 3287 } 3288 3289 static int __ext4_journalled_writepage(struct page *page, 3290 struct writeback_control *wbc) 3291 { 3292 struct address_space *mapping = page->mapping; 3293 struct inode *inode = mapping->host; 3294 struct buffer_head *page_bufs; 3295 handle_t *handle = NULL; 3296 int ret = 0; 3297 int err; 3298 3299 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, 3300 noalloc_get_block_write); 3301 if (ret != 0) 3302 goto out_unlock; 3303 3304 page_bufs = page_buffers(page); 3305 walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL, 3306 bget_one); 3307 /* As soon as we unlock the page, it can go away, but we have 3308 * references to buffers so we are safe */ 3309 unlock_page(page); 3310 3311 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 3312 if (IS_ERR(handle)) { 3313 ret = PTR_ERR(handle); 3314 goto out; 3315 } 3316 3317 ret = walk_page_buffers(handle, page_bufs, 0, 3318 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access); 3319 3320 err = walk_page_buffers(handle, page_bufs, 0, 3321 PAGE_CACHE_SIZE, NULL, write_end_fn); 3322 if (ret == 0) 3323 ret = err; 3324 err = ext4_journal_stop(handle); 3325 if (!ret) 3326 ret = err; 3327 3328 walk_page_buffers(handle, page_bufs, 0, 3329 PAGE_CACHE_SIZE, NULL, bput_one); 3330 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; 3331 goto out; 3332 3333 out_unlock: 3334 unlock_page(page); 3335 out: 3336 return ret; 3337 } 3338 3339 static int ext4_journalled_writepage(struct page *page, 3340 struct writeback_control *wbc) 3341 { 3342 struct inode *inode = page->mapping->host; 3343 loff_t size = i_size_read(inode); 3344 loff_t len; 3345 3346 trace_mark(ext4_journalled_writepage, 3347 "dev %s ino %lu page_index %lu", 3348 inode->i_sb->s_id, inode->i_ino, page->index); 3349 J_ASSERT(PageLocked(page)); 3350 if (page->index == size >> PAGE_CACHE_SHIFT) 3351 len = size & ~PAGE_CACHE_MASK; 3352 else 3353 len = PAGE_CACHE_SIZE; 3354 3355 if (page_has_buffers(page)) { 3356 /* if page has buffers it should all be mapped 3357 * and allocated. If there are not buffers attached 3358 * to the page we know the page is dirty but it lost 3359 * buffers. That means that at some moment in time 3360 * after write_begin() / write_end() has been called 3361 * all buffers have been clean and thus they must have been 3362 * written at least once. So they are all mapped and we can 3363 * happily proceed with mapping them and writing the page. 3364 */ 3365 BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 3366 ext4_bh_unmapped_or_delay)); 3367 } 3368 3369 if (ext4_journal_current_handle()) 3370 goto no_write; 3371 3372 if (PageChecked(page)) { 3373 /* 3374 * It's mmapped pagecache. Add buffers and journal it. There 3375 * doesn't seem much point in redirtying the page here. 3376 */ 3377 ClearPageChecked(page); 3378 return __ext4_journalled_writepage(page, wbc); 3379 } else { 3380 /* 3381 * It may be a page full of checkpoint-mode buffers. We don't 3382 * really know unless we go poke around in the buffer_heads. 3383 * But block_write_full_page will do the right thing. 3384 */ 3385 return block_write_full_page(page, noalloc_get_block_write, 3386 wbc); 3387 } 3388 no_write: 3389 redirty_page_for_writepage(wbc, page); 3390 unlock_page(page); 3391 return 0; 3392 } 3393 3394 static int ext4_readpage(struct file *file, struct page *page) 3395 { 3396 return mpage_readpage(page, ext4_get_block); 3397 } 3398 3399 static int 3400 ext4_readpages(struct file *file, struct address_space *mapping, 3401 struct list_head *pages, unsigned nr_pages) 3402 { 3403 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); 3404 } 3405 3406 static void ext4_invalidatepage(struct page *page, unsigned long offset) 3407 { 3408 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3409 3410 /* 3411 * If it's a full truncate we just forget about the pending dirtying 3412 */ 3413 if (offset == 0) 3414 ClearPageChecked(page); 3415 3416 if (journal) 3417 jbd2_journal_invalidatepage(journal, page, offset); 3418 else 3419 block_invalidatepage(page, offset); 3420 } 3421 3422 static int ext4_releasepage(struct page *page, gfp_t wait) 3423 { 3424 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3425 3426 WARN_ON(PageChecked(page)); 3427 if (!page_has_buffers(page)) 3428 return 0; 3429 if (journal) 3430 return jbd2_journal_try_to_free_buffers(journal, page, wait); 3431 else 3432 return try_to_free_buffers(page); 3433 } 3434 3435 /* 3436 * If the O_DIRECT write will extend the file then add this inode to the 3437 * orphan list. So recovery will truncate it back to the original size 3438 * if the machine crashes during the write. 3439 * 3440 * If the O_DIRECT write is intantiating holes inside i_size and the machine 3441 * crashes then stale disk data _may_ be exposed inside the file. But current 3442 * VFS code falls back into buffered path in that case so we are safe. 3443 */ 3444 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 3445 const struct iovec *iov, loff_t offset, 3446 unsigned long nr_segs) 3447 { 3448 struct file *file = iocb->ki_filp; 3449 struct inode *inode = file->f_mapping->host; 3450 struct ext4_inode_info *ei = EXT4_I(inode); 3451 handle_t *handle; 3452 ssize_t ret; 3453 int orphan = 0; 3454 size_t count = iov_length(iov, nr_segs); 3455 3456 if (rw == WRITE) { 3457 loff_t final_size = offset + count; 3458 3459 if (final_size > inode->i_size) { 3460 /* Credits for sb + inode write */ 3461 handle = ext4_journal_start(inode, 2); 3462 if (IS_ERR(handle)) { 3463 ret = PTR_ERR(handle); 3464 goto out; 3465 } 3466 ret = ext4_orphan_add(handle, inode); 3467 if (ret) { 3468 ext4_journal_stop(handle); 3469 goto out; 3470 } 3471 orphan = 1; 3472 ei->i_disksize = inode->i_size; 3473 ext4_journal_stop(handle); 3474 } 3475 } 3476 3477 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 3478 offset, nr_segs, 3479 ext4_get_block, NULL); 3480 3481 if (orphan) { 3482 int err; 3483 3484 /* Credits for sb + inode write */ 3485 handle = ext4_journal_start(inode, 2); 3486 if (IS_ERR(handle)) { 3487 /* This is really bad luck. We've written the data 3488 * but cannot extend i_size. Bail out and pretend 3489 * the write failed... */ 3490 ret = PTR_ERR(handle); 3491 goto out; 3492 } 3493 if (inode->i_nlink) 3494 ext4_orphan_del(handle, inode); 3495 if (ret > 0) { 3496 loff_t end = offset + ret; 3497 if (end > inode->i_size) { 3498 ei->i_disksize = end; 3499 i_size_write(inode, end); 3500 /* 3501 * We're going to return a positive `ret' 3502 * here due to non-zero-length I/O, so there's 3503 * no way of reporting error returns from 3504 * ext4_mark_inode_dirty() to userspace. So 3505 * ignore it. 3506 */ 3507 ext4_mark_inode_dirty(handle, inode); 3508 } 3509 } 3510 err = ext4_journal_stop(handle); 3511 if (ret == 0) 3512 ret = err; 3513 } 3514 out: 3515 return ret; 3516 } 3517 3518 /* 3519 * Pages can be marked dirty completely asynchronously from ext4's journalling 3520 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3521 * much here because ->set_page_dirty is called under VFS locks. The page is 3522 * not necessarily locked. 3523 * 3524 * We cannot just dirty the page and leave attached buffers clean, because the 3525 * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3526 * or jbddirty because all the journalling code will explode. 3527 * 3528 * So what we do is to mark the page "pending dirty" and next time writepage 3529 * is called, propagate that into the buffers appropriately. 3530 */ 3531 static int ext4_journalled_set_page_dirty(struct page *page) 3532 { 3533 SetPageChecked(page); 3534 return __set_page_dirty_nobuffers(page); 3535 } 3536 3537 static const struct address_space_operations ext4_ordered_aops = { 3538 .readpage = ext4_readpage, 3539 .readpages = ext4_readpages, 3540 .writepage = ext4_normal_writepage, 3541 .sync_page = block_sync_page, 3542 .write_begin = ext4_write_begin, 3543 .write_end = ext4_ordered_write_end, 3544 .bmap = ext4_bmap, 3545 .invalidatepage = ext4_invalidatepage, 3546 .releasepage = ext4_releasepage, 3547 .direct_IO = ext4_direct_IO, 3548 .migratepage = buffer_migrate_page, 3549 .is_partially_uptodate = block_is_partially_uptodate, 3550 }; 3551 3552 static const struct address_space_operations ext4_writeback_aops = { 3553 .readpage = ext4_readpage, 3554 .readpages = ext4_readpages, 3555 .writepage = ext4_normal_writepage, 3556 .sync_page = block_sync_page, 3557 .write_begin = ext4_write_begin, 3558 .write_end = ext4_writeback_write_end, 3559 .bmap = ext4_bmap, 3560 .invalidatepage = ext4_invalidatepage, 3561 .releasepage = ext4_releasepage, 3562 .direct_IO = ext4_direct_IO, 3563 .migratepage = buffer_migrate_page, 3564 .is_partially_uptodate = block_is_partially_uptodate, 3565 }; 3566 3567 static const struct address_space_operations ext4_journalled_aops = { 3568 .readpage = ext4_readpage, 3569 .readpages = ext4_readpages, 3570 .writepage = ext4_journalled_writepage, 3571 .sync_page = block_sync_page, 3572 .write_begin = ext4_write_begin, 3573 .write_end = ext4_journalled_write_end, 3574 .set_page_dirty = ext4_journalled_set_page_dirty, 3575 .bmap = ext4_bmap, 3576 .invalidatepage = ext4_invalidatepage, 3577 .releasepage = ext4_releasepage, 3578 .is_partially_uptodate = block_is_partially_uptodate, 3579 }; 3580 3581 static const struct address_space_operations ext4_da_aops = { 3582 .readpage = ext4_readpage, 3583 .readpages = ext4_readpages, 3584 .writepage = ext4_da_writepage, 3585 .writepages = ext4_da_writepages, 3586 .sync_page = block_sync_page, 3587 .write_begin = ext4_da_write_begin, 3588 .write_end = ext4_da_write_end, 3589 .bmap = ext4_bmap, 3590 .invalidatepage = ext4_da_invalidatepage, 3591 .releasepage = ext4_releasepage, 3592 .direct_IO = ext4_direct_IO, 3593 .migratepage = buffer_migrate_page, 3594 .is_partially_uptodate = block_is_partially_uptodate, 3595 }; 3596 3597 void ext4_set_aops(struct inode *inode) 3598 { 3599 if (ext4_should_order_data(inode) && 3600 test_opt(inode->i_sb, DELALLOC)) 3601 inode->i_mapping->a_ops = &ext4_da_aops; 3602 else if (ext4_should_order_data(inode)) 3603 inode->i_mapping->a_ops = &ext4_ordered_aops; 3604 else if (ext4_should_writeback_data(inode) && 3605 test_opt(inode->i_sb, DELALLOC)) 3606 inode->i_mapping->a_ops = &ext4_da_aops; 3607 else if (ext4_should_writeback_data(inode)) 3608 inode->i_mapping->a_ops = &ext4_writeback_aops; 3609 else 3610 inode->i_mapping->a_ops = &ext4_journalled_aops; 3611 } 3612 3613 /* 3614 * ext4_block_truncate_page() zeroes out a mapping from file offset `from' 3615 * up to the end of the block which corresponds to `from'. 3616 * This required during truncate. We need to physically zero the tail end 3617 * of that block so it doesn't yield old data if the file is later grown. 3618 */ 3619 int ext4_block_truncate_page(handle_t *handle, 3620 struct address_space *mapping, loff_t from) 3621 { 3622 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 3623 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3624 unsigned blocksize, length, pos; 3625 ext4_lblk_t iblock; 3626 struct inode *inode = mapping->host; 3627 struct buffer_head *bh; 3628 struct page *page; 3629 int err = 0; 3630 3631 page = grab_cache_page(mapping, from >> PAGE_CACHE_SHIFT); 3632 if (!page) 3633 return -EINVAL; 3634 3635 blocksize = inode->i_sb->s_blocksize; 3636 length = blocksize - (offset & (blocksize - 1)); 3637 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 3638 3639 /* 3640 * For "nobh" option, we can only work if we don't need to 3641 * read-in the page - otherwise we create buffers to do the IO. 3642 */ 3643 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) && 3644 ext4_should_writeback_data(inode) && PageUptodate(page)) { 3645 zero_user(page, offset, length); 3646 set_page_dirty(page); 3647 goto unlock; 3648 } 3649 3650 if (!page_has_buffers(page)) 3651 create_empty_buffers(page, blocksize, 0); 3652 3653 /* Find the buffer that contains "offset" */ 3654 bh = page_buffers(page); 3655 pos = blocksize; 3656 while (offset >= pos) { 3657 bh = bh->b_this_page; 3658 iblock++; 3659 pos += blocksize; 3660 } 3661 3662 err = 0; 3663 if (buffer_freed(bh)) { 3664 BUFFER_TRACE(bh, "freed: skip"); 3665 goto unlock; 3666 } 3667 3668 if (!buffer_mapped(bh)) { 3669 BUFFER_TRACE(bh, "unmapped"); 3670 ext4_get_block(inode, iblock, bh, 0); 3671 /* unmapped? It's a hole - nothing to do */ 3672 if (!buffer_mapped(bh)) { 3673 BUFFER_TRACE(bh, "still unmapped"); 3674 goto unlock; 3675 } 3676 } 3677 3678 /* Ok, it's mapped. Make sure it's up-to-date */ 3679 if (PageUptodate(page)) 3680 set_buffer_uptodate(bh); 3681 3682 if (!buffer_uptodate(bh)) { 3683 err = -EIO; 3684 ll_rw_block(READ, 1, &bh); 3685 wait_on_buffer(bh); 3686 /* Uhhuh. Read error. Complain and punt. */ 3687 if (!buffer_uptodate(bh)) 3688 goto unlock; 3689 } 3690 3691 if (ext4_should_journal_data(inode)) { 3692 BUFFER_TRACE(bh, "get write access"); 3693 err = ext4_journal_get_write_access(handle, bh); 3694 if (err) 3695 goto unlock; 3696 } 3697 3698 zero_user(page, offset, length); 3699 3700 BUFFER_TRACE(bh, "zeroed end of block"); 3701 3702 err = 0; 3703 if (ext4_should_journal_data(inode)) { 3704 err = ext4_handle_dirty_metadata(handle, inode, bh); 3705 } else { 3706 if (ext4_should_order_data(inode)) 3707 err = ext4_jbd2_file_inode(handle, inode); 3708 mark_buffer_dirty(bh); 3709 } 3710 3711 unlock: 3712 unlock_page(page); 3713 page_cache_release(page); 3714 return err; 3715 } 3716 3717 /* 3718 * Probably it should be a library function... search for first non-zero word 3719 * or memcmp with zero_page, whatever is better for particular architecture. 3720 * Linus? 3721 */ 3722 static inline int all_zeroes(__le32 *p, __le32 *q) 3723 { 3724 while (p < q) 3725 if (*p++) 3726 return 0; 3727 return 1; 3728 } 3729 3730 /** 3731 * ext4_find_shared - find the indirect blocks for partial truncation. 3732 * @inode: inode in question 3733 * @depth: depth of the affected branch 3734 * @offsets: offsets of pointers in that branch (see ext4_block_to_path) 3735 * @chain: place to store the pointers to partial indirect blocks 3736 * @top: place to the (detached) top of branch 3737 * 3738 * This is a helper function used by ext4_truncate(). 3739 * 3740 * When we do truncate() we may have to clean the ends of several 3741 * indirect blocks but leave the blocks themselves alive. Block is 3742 * partially truncated if some data below the new i_size is refered 3743 * from it (and it is on the path to the first completely truncated 3744 * data block, indeed). We have to free the top of that path along 3745 * with everything to the right of the path. Since no allocation 3746 * past the truncation point is possible until ext4_truncate() 3747 * finishes, we may safely do the latter, but top of branch may 3748 * require special attention - pageout below the truncation point 3749 * might try to populate it. 3750 * 3751 * We atomically detach the top of branch from the tree, store the 3752 * block number of its root in *@top, pointers to buffer_heads of 3753 * partially truncated blocks - in @chain[].bh and pointers to 3754 * their last elements that should not be removed - in 3755 * @chain[].p. Return value is the pointer to last filled element 3756 * of @chain. 3757 * 3758 * The work left to caller to do the actual freeing of subtrees: 3759 * a) free the subtree starting from *@top 3760 * b) free the subtrees whose roots are stored in 3761 * (@chain[i].p+1 .. end of @chain[i].bh->b_data) 3762 * c) free the subtrees growing from the inode past the @chain[0]. 3763 * (no partially truncated stuff there). */ 3764 3765 static Indirect *ext4_find_shared(struct inode *inode, int depth, 3766 ext4_lblk_t offsets[4], Indirect chain[4], __le32 *top) 3767 { 3768 Indirect *partial, *p; 3769 int k, err; 3770 3771 *top = 0; 3772 /* Make k index the deepest non-null offest + 1 */ 3773 for (k = depth; k > 1 && !offsets[k-1]; k--) 3774 ; 3775 partial = ext4_get_branch(inode, k, offsets, chain, &err); 3776 /* Writer: pointers */ 3777 if (!partial) 3778 partial = chain + k-1; 3779 /* 3780 * If the branch acquired continuation since we've looked at it - 3781 * fine, it should all survive and (new) top doesn't belong to us. 3782 */ 3783 if (!partial->key && *partial->p) 3784 /* Writer: end */ 3785 goto no_top; 3786 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--) 3787 ; 3788 /* 3789 * OK, we've found the last block that must survive. The rest of our 3790 * branch should be detached before unlocking. However, if that rest 3791 * of branch is all ours and does not grow immediately from the inode 3792 * it's easier to cheat and just decrement partial->p. 3793 */ 3794 if (p == chain + k - 1 && p > chain) { 3795 p->p--; 3796 } else { 3797 *top = *p->p; 3798 /* Nope, don't do this in ext4. Must leave the tree intact */ 3799 #if 0 3800 *p->p = 0; 3801 #endif 3802 } 3803 /* Writer: end */ 3804 3805 while (partial > p) { 3806 brelse(partial->bh); 3807 partial--; 3808 } 3809 no_top: 3810 return partial; 3811 } 3812 3813 /* 3814 * Zero a number of block pointers in either an inode or an indirect block. 3815 * If we restart the transaction we must again get write access to the 3816 * indirect block for further modification. 3817 * 3818 * We release `count' blocks on disk, but (last - first) may be greater 3819 * than `count' because there can be holes in there. 3820 */ 3821 static void ext4_clear_blocks(handle_t *handle, struct inode *inode, 3822 struct buffer_head *bh, ext4_fsblk_t block_to_free, 3823 unsigned long count, __le32 *first, __le32 *last) 3824 { 3825 __le32 *p; 3826 if (try_to_extend_transaction(handle, inode)) { 3827 if (bh) { 3828 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 3829 ext4_handle_dirty_metadata(handle, inode, bh); 3830 } 3831 ext4_mark_inode_dirty(handle, inode); 3832 ext4_journal_test_restart(handle, inode); 3833 if (bh) { 3834 BUFFER_TRACE(bh, "retaking write access"); 3835 ext4_journal_get_write_access(handle, bh); 3836 } 3837 } 3838 3839 /* 3840 * Any buffers which are on the journal will be in memory. We find 3841 * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget() 3842 * on them. We've already detached each block from the file, so 3843 * bforget() in jbd2_journal_forget() should be safe. 3844 * 3845 * AKPM: turn on bforget in jbd2_journal_forget()!!! 3846 */ 3847 for (p = first; p < last; p++) { 3848 u32 nr = le32_to_cpu(*p); 3849 if (nr) { 3850 struct buffer_head *tbh; 3851 3852 *p = 0; 3853 tbh = sb_find_get_block(inode->i_sb, nr); 3854 ext4_forget(handle, 0, inode, tbh, nr); 3855 } 3856 } 3857 3858 ext4_free_blocks(handle, inode, block_to_free, count, 0); 3859 } 3860 3861 /** 3862 * ext4_free_data - free a list of data blocks 3863 * @handle: handle for this transaction 3864 * @inode: inode we are dealing with 3865 * @this_bh: indirect buffer_head which contains *@first and *@last 3866 * @first: array of block numbers 3867 * @last: points immediately past the end of array 3868 * 3869 * We are freeing all blocks refered from that array (numbers are stored as 3870 * little-endian 32-bit) and updating @inode->i_blocks appropriately. 3871 * 3872 * We accumulate contiguous runs of blocks to free. Conveniently, if these 3873 * blocks are contiguous then releasing them at one time will only affect one 3874 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't 3875 * actually use a lot of journal space. 3876 * 3877 * @this_bh will be %NULL if @first and @last point into the inode's direct 3878 * block pointers. 3879 */ 3880 static void ext4_free_data(handle_t *handle, struct inode *inode, 3881 struct buffer_head *this_bh, 3882 __le32 *first, __le32 *last) 3883 { 3884 ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */ 3885 unsigned long count = 0; /* Number of blocks in the run */ 3886 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind 3887 corresponding to 3888 block_to_free */ 3889 ext4_fsblk_t nr; /* Current block # */ 3890 __le32 *p; /* Pointer into inode/ind 3891 for current block */ 3892 int err; 3893 3894 if (this_bh) { /* For indirect block */ 3895 BUFFER_TRACE(this_bh, "get_write_access"); 3896 err = ext4_journal_get_write_access(handle, this_bh); 3897 /* Important: if we can't update the indirect pointers 3898 * to the blocks, we can't free them. */ 3899 if (err) 3900 return; 3901 } 3902 3903 for (p = first; p < last; p++) { 3904 nr = le32_to_cpu(*p); 3905 if (nr) { 3906 /* accumulate blocks to free if they're contiguous */ 3907 if (count == 0) { 3908 block_to_free = nr; 3909 block_to_free_p = p; 3910 count = 1; 3911 } else if (nr == block_to_free + count) { 3912 count++; 3913 } else { 3914 ext4_clear_blocks(handle, inode, this_bh, 3915 block_to_free, 3916 count, block_to_free_p, p); 3917 block_to_free = nr; 3918 block_to_free_p = p; 3919 count = 1; 3920 } 3921 } 3922 } 3923 3924 if (count > 0) 3925 ext4_clear_blocks(handle, inode, this_bh, block_to_free, 3926 count, block_to_free_p, p); 3927 3928 if (this_bh) { 3929 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata"); 3930 3931 /* 3932 * The buffer head should have an attached journal head at this 3933 * point. However, if the data is corrupted and an indirect 3934 * block pointed to itself, it would have been detached when 3935 * the block was cleared. Check for this instead of OOPSing. 3936 */ 3937 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) 3938 ext4_handle_dirty_metadata(handle, inode, this_bh); 3939 else 3940 ext4_error(inode->i_sb, __func__, 3941 "circular indirect block detected, " 3942 "inode=%lu, block=%llu", 3943 inode->i_ino, 3944 (unsigned long long) this_bh->b_blocknr); 3945 } 3946 } 3947 3948 /** 3949 * ext4_free_branches - free an array of branches 3950 * @handle: JBD handle for this transaction 3951 * @inode: inode we are dealing with 3952 * @parent_bh: the buffer_head which contains *@first and *@last 3953 * @first: array of block numbers 3954 * @last: pointer immediately past the end of array 3955 * @depth: depth of the branches to free 3956 * 3957 * We are freeing all blocks refered from these branches (numbers are 3958 * stored as little-endian 32-bit) and updating @inode->i_blocks 3959 * appropriately. 3960 */ 3961 static void ext4_free_branches(handle_t *handle, struct inode *inode, 3962 struct buffer_head *parent_bh, 3963 __le32 *first, __le32 *last, int depth) 3964 { 3965 ext4_fsblk_t nr; 3966 __le32 *p; 3967 3968 if (ext4_handle_is_aborted(handle)) 3969 return; 3970 3971 if (depth--) { 3972 struct buffer_head *bh; 3973 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 3974 p = last; 3975 while (--p >= first) { 3976 nr = le32_to_cpu(*p); 3977 if (!nr) 3978 continue; /* A hole */ 3979 3980 /* Go read the buffer for the next level down */ 3981 bh = sb_bread(inode->i_sb, nr); 3982 3983 /* 3984 * A read failure? Report error and clear slot 3985 * (should be rare). 3986 */ 3987 if (!bh) { 3988 ext4_error(inode->i_sb, "ext4_free_branches", 3989 "Read failure, inode=%lu, block=%llu", 3990 inode->i_ino, nr); 3991 continue; 3992 } 3993 3994 /* This zaps the entire block. Bottom up. */ 3995 BUFFER_TRACE(bh, "free child branches"); 3996 ext4_free_branches(handle, inode, bh, 3997 (__le32 *) bh->b_data, 3998 (__le32 *) bh->b_data + addr_per_block, 3999 depth); 4000 4001 /* 4002 * We've probably journalled the indirect block several 4003 * times during the truncate. But it's no longer 4004 * needed and we now drop it from the transaction via 4005 * jbd2_journal_revoke(). 4006 * 4007 * That's easy if it's exclusively part of this 4008 * transaction. But if it's part of the committing 4009 * transaction then jbd2_journal_forget() will simply 4010 * brelse() it. That means that if the underlying 4011 * block is reallocated in ext4_get_block(), 4012 * unmap_underlying_metadata() will find this block 4013 * and will try to get rid of it. damn, damn. 4014 * 4015 * If this block has already been committed to the 4016 * journal, a revoke record will be written. And 4017 * revoke records must be emitted *before* clearing 4018 * this block's bit in the bitmaps. 4019 */ 4020 ext4_forget(handle, 1, inode, bh, bh->b_blocknr); 4021 4022 /* 4023 * Everything below this this pointer has been 4024 * released. Now let this top-of-subtree go. 4025 * 4026 * We want the freeing of this indirect block to be 4027 * atomic in the journal with the updating of the 4028 * bitmap block which owns it. So make some room in 4029 * the journal. 4030 * 4031 * We zero the parent pointer *after* freeing its 4032 * pointee in the bitmaps, so if extend_transaction() 4033 * for some reason fails to put the bitmap changes and 4034 * the release into the same transaction, recovery 4035 * will merely complain about releasing a free block, 4036 * rather than leaking blocks. 4037 */ 4038 if (ext4_handle_is_aborted(handle)) 4039 return; 4040 if (try_to_extend_transaction(handle, inode)) { 4041 ext4_mark_inode_dirty(handle, inode); 4042 ext4_journal_test_restart(handle, inode); 4043 } 4044 4045 ext4_free_blocks(handle, inode, nr, 1, 1); 4046 4047 if (parent_bh) { 4048 /* 4049 * The block which we have just freed is 4050 * pointed to by an indirect block: journal it 4051 */ 4052 BUFFER_TRACE(parent_bh, "get_write_access"); 4053 if (!ext4_journal_get_write_access(handle, 4054 parent_bh)){ 4055 *p = 0; 4056 BUFFER_TRACE(parent_bh, 4057 "call ext4_handle_dirty_metadata"); 4058 ext4_handle_dirty_metadata(handle, 4059 inode, 4060 parent_bh); 4061 } 4062 } 4063 } 4064 } else { 4065 /* We have reached the bottom of the tree. */ 4066 BUFFER_TRACE(parent_bh, "free data blocks"); 4067 ext4_free_data(handle, inode, parent_bh, first, last); 4068 } 4069 } 4070 4071 int ext4_can_truncate(struct inode *inode) 4072 { 4073 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 4074 return 0; 4075 if (S_ISREG(inode->i_mode)) 4076 return 1; 4077 if (S_ISDIR(inode->i_mode)) 4078 return 1; 4079 if (S_ISLNK(inode->i_mode)) 4080 return !ext4_inode_is_fast_symlink(inode); 4081 return 0; 4082 } 4083 4084 /* 4085 * ext4_truncate() 4086 * 4087 * We block out ext4_get_block() block instantiations across the entire 4088 * transaction, and VFS/VM ensures that ext4_truncate() cannot run 4089 * simultaneously on behalf of the same inode. 4090 * 4091 * As we work through the truncate and commmit bits of it to the journal there 4092 * is one core, guiding principle: the file's tree must always be consistent on 4093 * disk. We must be able to restart the truncate after a crash. 4094 * 4095 * The file's tree may be transiently inconsistent in memory (although it 4096 * probably isn't), but whenever we close off and commit a journal transaction, 4097 * the contents of (the filesystem + the journal) must be consistent and 4098 * restartable. It's pretty simple, really: bottom up, right to left (although 4099 * left-to-right works OK too). 4100 * 4101 * Note that at recovery time, journal replay occurs *before* the restart of 4102 * truncate against the orphan inode list. 4103 * 4104 * The committed inode has the new, desired i_size (which is the same as 4105 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 4106 * that this inode's truncate did not complete and it will again call 4107 * ext4_truncate() to have another go. So there will be instantiated blocks 4108 * to the right of the truncation point in a crashed ext4 filesystem. But 4109 * that's fine - as long as they are linked from the inode, the post-crash 4110 * ext4_truncate() run will find them and release them. 4111 */ 4112 void ext4_truncate(struct inode *inode) 4113 { 4114 handle_t *handle; 4115 struct ext4_inode_info *ei = EXT4_I(inode); 4116 __le32 *i_data = ei->i_data; 4117 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 4118 struct address_space *mapping = inode->i_mapping; 4119 ext4_lblk_t offsets[4]; 4120 Indirect chain[4]; 4121 Indirect *partial; 4122 __le32 nr = 0; 4123 int n; 4124 ext4_lblk_t last_block; 4125 unsigned blocksize = inode->i_sb->s_blocksize; 4126 4127 if (!ext4_can_truncate(inode)) 4128 return; 4129 4130 if (ei->i_disksize && inode->i_size == 0 && 4131 !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 4132 ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE; 4133 4134 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 4135 ext4_ext_truncate(inode); 4136 return; 4137 } 4138 4139 handle = start_transaction(inode); 4140 if (IS_ERR(handle)) 4141 return; /* AKPM: return what? */ 4142 4143 last_block = (inode->i_size + blocksize-1) 4144 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 4145 4146 if (inode->i_size & (blocksize - 1)) 4147 if (ext4_block_truncate_page(handle, mapping, inode->i_size)) 4148 goto out_stop; 4149 4150 n = ext4_block_to_path(inode, last_block, offsets, NULL); 4151 if (n == 0) 4152 goto out_stop; /* error */ 4153 4154 /* 4155 * OK. This truncate is going to happen. We add the inode to the 4156 * orphan list, so that if this truncate spans multiple transactions, 4157 * and we crash, we will resume the truncate when the filesystem 4158 * recovers. It also marks the inode dirty, to catch the new size. 4159 * 4160 * Implication: the file must always be in a sane, consistent 4161 * truncatable state while each transaction commits. 4162 */ 4163 if (ext4_orphan_add(handle, inode)) 4164 goto out_stop; 4165 4166 /* 4167 * From here we block out all ext4_get_block() callers who want to 4168 * modify the block allocation tree. 4169 */ 4170 down_write(&ei->i_data_sem); 4171 4172 ext4_discard_preallocations(inode); 4173 4174 /* 4175 * The orphan list entry will now protect us from any crash which 4176 * occurs before the truncate completes, so it is now safe to propagate 4177 * the new, shorter inode size (held for now in i_size) into the 4178 * on-disk inode. We do this via i_disksize, which is the value which 4179 * ext4 *really* writes onto the disk inode. 4180 */ 4181 ei->i_disksize = inode->i_size; 4182 4183 if (n == 1) { /* direct blocks */ 4184 ext4_free_data(handle, inode, NULL, i_data+offsets[0], 4185 i_data + EXT4_NDIR_BLOCKS); 4186 goto do_indirects; 4187 } 4188 4189 partial = ext4_find_shared(inode, n, offsets, chain, &nr); 4190 /* Kill the top of shared branch (not detached) */ 4191 if (nr) { 4192 if (partial == chain) { 4193 /* Shared branch grows from the inode */ 4194 ext4_free_branches(handle, inode, NULL, 4195 &nr, &nr+1, (chain+n-1) - partial); 4196 *partial->p = 0; 4197 /* 4198 * We mark the inode dirty prior to restart, 4199 * and prior to stop. No need for it here. 4200 */ 4201 } else { 4202 /* Shared branch grows from an indirect block */ 4203 BUFFER_TRACE(partial->bh, "get_write_access"); 4204 ext4_free_branches(handle, inode, partial->bh, 4205 partial->p, 4206 partial->p+1, (chain+n-1) - partial); 4207 } 4208 } 4209 /* Clear the ends of indirect blocks on the shared branch */ 4210 while (partial > chain) { 4211 ext4_free_branches(handle, inode, partial->bh, partial->p + 1, 4212 (__le32*)partial->bh->b_data+addr_per_block, 4213 (chain+n-1) - partial); 4214 BUFFER_TRACE(partial->bh, "call brelse"); 4215 brelse (partial->bh); 4216 partial--; 4217 } 4218 do_indirects: 4219 /* Kill the remaining (whole) subtrees */ 4220 switch (offsets[0]) { 4221 default: 4222 nr = i_data[EXT4_IND_BLOCK]; 4223 if (nr) { 4224 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); 4225 i_data[EXT4_IND_BLOCK] = 0; 4226 } 4227 case EXT4_IND_BLOCK: 4228 nr = i_data[EXT4_DIND_BLOCK]; 4229 if (nr) { 4230 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); 4231 i_data[EXT4_DIND_BLOCK] = 0; 4232 } 4233 case EXT4_DIND_BLOCK: 4234 nr = i_data[EXT4_TIND_BLOCK]; 4235 if (nr) { 4236 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); 4237 i_data[EXT4_TIND_BLOCK] = 0; 4238 } 4239 case EXT4_TIND_BLOCK: 4240 ; 4241 } 4242 4243 up_write(&ei->i_data_sem); 4244 inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4245 ext4_mark_inode_dirty(handle, inode); 4246 4247 /* 4248 * In a multi-transaction truncate, we only make the final transaction 4249 * synchronous 4250 */ 4251 if (IS_SYNC(inode)) 4252 ext4_handle_sync(handle); 4253 out_stop: 4254 /* 4255 * If this was a simple ftruncate(), and the file will remain alive 4256 * then we need to clear up the orphan record which we created above. 4257 * However, if this was a real unlink then we were called by 4258 * ext4_delete_inode(), and we allow that function to clean up the 4259 * orphan info for us. 4260 */ 4261 if (inode->i_nlink) 4262 ext4_orphan_del(handle, inode); 4263 4264 ext4_journal_stop(handle); 4265 } 4266 4267 /* 4268 * ext4_get_inode_loc returns with an extra refcount against the inode's 4269 * underlying buffer_head on success. If 'in_mem' is true, we have all 4270 * data in memory that is needed to recreate the on-disk version of this 4271 * inode. 4272 */ 4273 static int __ext4_get_inode_loc(struct inode *inode, 4274 struct ext4_iloc *iloc, int in_mem) 4275 { 4276 struct ext4_group_desc *gdp; 4277 struct buffer_head *bh; 4278 struct super_block *sb = inode->i_sb; 4279 ext4_fsblk_t block; 4280 int inodes_per_block, inode_offset; 4281 4282 iloc->bh = NULL; 4283 if (!ext4_valid_inum(sb, inode->i_ino)) 4284 return -EIO; 4285 4286 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 4287 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 4288 if (!gdp) 4289 return -EIO; 4290 4291 /* 4292 * Figure out the offset within the block group inode table 4293 */ 4294 inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb)); 4295 inode_offset = ((inode->i_ino - 1) % 4296 EXT4_INODES_PER_GROUP(sb)); 4297 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 4298 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 4299 4300 bh = sb_getblk(sb, block); 4301 if (!bh) { 4302 ext4_error(sb, "ext4_get_inode_loc", "unable to read " 4303 "inode block - inode=%lu, block=%llu", 4304 inode->i_ino, block); 4305 return -EIO; 4306 } 4307 if (!buffer_uptodate(bh)) { 4308 lock_buffer(bh); 4309 4310 /* 4311 * If the buffer has the write error flag, we have failed 4312 * to write out another inode in the same block. In this 4313 * case, we don't have to read the block because we may 4314 * read the old inode data successfully. 4315 */ 4316 if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 4317 set_buffer_uptodate(bh); 4318 4319 if (buffer_uptodate(bh)) { 4320 /* someone brought it uptodate while we waited */ 4321 unlock_buffer(bh); 4322 goto has_buffer; 4323 } 4324 4325 /* 4326 * If we have all information of the inode in memory and this 4327 * is the only valid inode in the block, we need not read the 4328 * block. 4329 */ 4330 if (in_mem) { 4331 struct buffer_head *bitmap_bh; 4332 int i, start; 4333 4334 start = inode_offset & ~(inodes_per_block - 1); 4335 4336 /* Is the inode bitmap in cache? */ 4337 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 4338 if (!bitmap_bh) 4339 goto make_io; 4340 4341 /* 4342 * If the inode bitmap isn't in cache then the 4343 * optimisation may end up performing two reads instead 4344 * of one, so skip it. 4345 */ 4346 if (!buffer_uptodate(bitmap_bh)) { 4347 brelse(bitmap_bh); 4348 goto make_io; 4349 } 4350 for (i = start; i < start + inodes_per_block; i++) { 4351 if (i == inode_offset) 4352 continue; 4353 if (ext4_test_bit(i, bitmap_bh->b_data)) 4354 break; 4355 } 4356 brelse(bitmap_bh); 4357 if (i == start + inodes_per_block) { 4358 /* all other inodes are free, so skip I/O */ 4359 memset(bh->b_data, 0, bh->b_size); 4360 set_buffer_uptodate(bh); 4361 unlock_buffer(bh); 4362 goto has_buffer; 4363 } 4364 } 4365 4366 make_io: 4367 /* 4368 * If we need to do any I/O, try to pre-readahead extra 4369 * blocks from the inode table. 4370 */ 4371 if (EXT4_SB(sb)->s_inode_readahead_blks) { 4372 ext4_fsblk_t b, end, table; 4373 unsigned num; 4374 4375 table = ext4_inode_table(sb, gdp); 4376 /* s_inode_readahead_blks is always a power of 2 */ 4377 b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1); 4378 if (table > b) 4379 b = table; 4380 end = b + EXT4_SB(sb)->s_inode_readahead_blks; 4381 num = EXT4_INODES_PER_GROUP(sb); 4382 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 4383 EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) 4384 num -= ext4_itable_unused_count(sb, gdp); 4385 table += num / inodes_per_block; 4386 if (end > table) 4387 end = table; 4388 while (b <= end) 4389 sb_breadahead(sb, b++); 4390 } 4391 4392 /* 4393 * There are other valid inodes in the buffer, this inode 4394 * has in-inode xattrs, or we don't have this inode in memory. 4395 * Read the block from disk. 4396 */ 4397 get_bh(bh); 4398 bh->b_end_io = end_buffer_read_sync; 4399 submit_bh(READ_META, bh); 4400 wait_on_buffer(bh); 4401 if (!buffer_uptodate(bh)) { 4402 ext4_error(sb, __func__, 4403 "unable to read inode block - inode=%lu, " 4404 "block=%llu", inode->i_ino, block); 4405 brelse(bh); 4406 return -EIO; 4407 } 4408 } 4409 has_buffer: 4410 iloc->bh = bh; 4411 return 0; 4412 } 4413 4414 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 4415 { 4416 /* We have all inode data except xattrs in memory here. */ 4417 return __ext4_get_inode_loc(inode, iloc, 4418 !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)); 4419 } 4420 4421 void ext4_set_inode_flags(struct inode *inode) 4422 { 4423 unsigned int flags = EXT4_I(inode)->i_flags; 4424 4425 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 4426 if (flags & EXT4_SYNC_FL) 4427 inode->i_flags |= S_SYNC; 4428 if (flags & EXT4_APPEND_FL) 4429 inode->i_flags |= S_APPEND; 4430 if (flags & EXT4_IMMUTABLE_FL) 4431 inode->i_flags |= S_IMMUTABLE; 4432 if (flags & EXT4_NOATIME_FL) 4433 inode->i_flags |= S_NOATIME; 4434 if (flags & EXT4_DIRSYNC_FL) 4435 inode->i_flags |= S_DIRSYNC; 4436 } 4437 4438 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ 4439 void ext4_get_inode_flags(struct ext4_inode_info *ei) 4440 { 4441 unsigned int flags = ei->vfs_inode.i_flags; 4442 4443 ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL| 4444 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL); 4445 if (flags & S_SYNC) 4446 ei->i_flags |= EXT4_SYNC_FL; 4447 if (flags & S_APPEND) 4448 ei->i_flags |= EXT4_APPEND_FL; 4449 if (flags & S_IMMUTABLE) 4450 ei->i_flags |= EXT4_IMMUTABLE_FL; 4451 if (flags & S_NOATIME) 4452 ei->i_flags |= EXT4_NOATIME_FL; 4453 if (flags & S_DIRSYNC) 4454 ei->i_flags |= EXT4_DIRSYNC_FL; 4455 } 4456 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 4457 struct ext4_inode_info *ei) 4458 { 4459 blkcnt_t i_blocks ; 4460 struct inode *inode = &(ei->vfs_inode); 4461 struct super_block *sb = inode->i_sb; 4462 4463 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 4464 EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { 4465 /* we are using combined 48 bit field */ 4466 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 4467 le32_to_cpu(raw_inode->i_blocks_lo); 4468 if (ei->i_flags & EXT4_HUGE_FILE_FL) { 4469 /* i_blocks represent file system block size */ 4470 return i_blocks << (inode->i_blkbits - 9); 4471 } else { 4472 return i_blocks; 4473 } 4474 } else { 4475 return le32_to_cpu(raw_inode->i_blocks_lo); 4476 } 4477 } 4478 4479 struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 4480 { 4481 struct ext4_iloc iloc; 4482 struct ext4_inode *raw_inode; 4483 struct ext4_inode_info *ei; 4484 struct buffer_head *bh; 4485 struct inode *inode; 4486 long ret; 4487 int block; 4488 4489 inode = iget_locked(sb, ino); 4490 if (!inode) 4491 return ERR_PTR(-ENOMEM); 4492 if (!(inode->i_state & I_NEW)) 4493 return inode; 4494 4495 ei = EXT4_I(inode); 4496 #ifdef CONFIG_EXT4_FS_POSIX_ACL 4497 ei->i_acl = EXT4_ACL_NOT_CACHED; 4498 ei->i_default_acl = EXT4_ACL_NOT_CACHED; 4499 #endif 4500 4501 ret = __ext4_get_inode_loc(inode, &iloc, 0); 4502 if (ret < 0) 4503 goto bad_inode; 4504 bh = iloc.bh; 4505 raw_inode = ext4_raw_inode(&iloc); 4506 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 4507 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 4508 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 4509 if (!(test_opt(inode->i_sb, NO_UID32))) { 4510 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 4511 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 4512 } 4513 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 4514 4515 ei->i_state = 0; 4516 ei->i_dir_start_lookup = 0; 4517 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 4518 /* We now have enough fields to check if the inode was active or not. 4519 * This is needed because nfsd might try to access dead inodes 4520 * the test is that same one that e2fsck uses 4521 * NeilBrown 1999oct15 4522 */ 4523 if (inode->i_nlink == 0) { 4524 if (inode->i_mode == 0 || 4525 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { 4526 /* this inode is deleted */ 4527 brelse(bh); 4528 ret = -ESTALE; 4529 goto bad_inode; 4530 } 4531 /* The only unlinked inodes we let through here have 4532 * valid i_mode and are being read by the orphan 4533 * recovery code: that's fine, we're about to complete 4534 * the process of deleting those. */ 4535 } 4536 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 4537 inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 4538 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 4539 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) 4540 ei->i_file_acl |= 4541 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 4542 inode->i_size = ext4_isize(raw_inode); 4543 ei->i_disksize = inode->i_size; 4544 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 4545 ei->i_block_group = iloc.block_group; 4546 ei->i_last_alloc_group = ~0; 4547 /* 4548 * NOTE! The in-memory inode i_data array is in little-endian order 4549 * even on big-endian machines: we do NOT byteswap the block numbers! 4550 */ 4551 for (block = 0; block < EXT4_N_BLOCKS; block++) 4552 ei->i_data[block] = raw_inode->i_block[block]; 4553 INIT_LIST_HEAD(&ei->i_orphan); 4554 4555 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4556 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 4557 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 4558 EXT4_INODE_SIZE(inode->i_sb)) { 4559 brelse(bh); 4560 ret = -EIO; 4561 goto bad_inode; 4562 } 4563 if (ei->i_extra_isize == 0) { 4564 /* The extra space is currently unused. Use it. */ 4565 ei->i_extra_isize = sizeof(struct ext4_inode) - 4566 EXT4_GOOD_OLD_INODE_SIZE; 4567 } else { 4568 __le32 *magic = (void *)raw_inode + 4569 EXT4_GOOD_OLD_INODE_SIZE + 4570 ei->i_extra_isize; 4571 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) 4572 ei->i_state |= EXT4_STATE_XATTR; 4573 } 4574 } else 4575 ei->i_extra_isize = 0; 4576 4577 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 4578 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 4579 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 4580 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 4581 4582 inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 4583 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4584 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 4585 inode->i_version |= 4586 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 4587 } 4588 4589 ret = 0; 4590 if (ei->i_file_acl && 4591 ((ei->i_file_acl < 4592 (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) + 4593 EXT4_SB(sb)->s_gdb_count)) || 4594 (ei->i_file_acl >= ext4_blocks_count(EXT4_SB(sb)->s_es)))) { 4595 ext4_error(sb, __func__, 4596 "bad extended attribute block %llu in inode #%lu", 4597 ei->i_file_acl, inode->i_ino); 4598 ret = -EIO; 4599 goto bad_inode; 4600 } else if (ei->i_flags & EXT4_EXTENTS_FL) { 4601 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4602 (S_ISLNK(inode->i_mode) && 4603 !ext4_inode_is_fast_symlink(inode))) 4604 /* Validate extent which is part of inode */ 4605 ret = ext4_ext_check_inode(inode); 4606 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4607 (S_ISLNK(inode->i_mode) && 4608 !ext4_inode_is_fast_symlink(inode))) { 4609 /* Validate block references which are part of inode */ 4610 ret = ext4_check_inode_blockref(inode); 4611 } 4612 if (ret) { 4613 brelse(bh); 4614 goto bad_inode; 4615 } 4616 4617 if (S_ISREG(inode->i_mode)) { 4618 inode->i_op = &ext4_file_inode_operations; 4619 inode->i_fop = &ext4_file_operations; 4620 ext4_set_aops(inode); 4621 } else if (S_ISDIR(inode->i_mode)) { 4622 inode->i_op = &ext4_dir_inode_operations; 4623 inode->i_fop = &ext4_dir_operations; 4624 } else if (S_ISLNK(inode->i_mode)) { 4625 if (ext4_inode_is_fast_symlink(inode)) { 4626 inode->i_op = &ext4_fast_symlink_inode_operations; 4627 nd_terminate_link(ei->i_data, inode->i_size, 4628 sizeof(ei->i_data) - 1); 4629 } else { 4630 inode->i_op = &ext4_symlink_inode_operations; 4631 ext4_set_aops(inode); 4632 } 4633 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 4634 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 4635 inode->i_op = &ext4_special_inode_operations; 4636 if (raw_inode->i_block[0]) 4637 init_special_inode(inode, inode->i_mode, 4638 old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 4639 else 4640 init_special_inode(inode, inode->i_mode, 4641 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 4642 } else { 4643 brelse(bh); 4644 ret = -EIO; 4645 ext4_error(inode->i_sb, __func__, 4646 "bogus i_mode (%o) for inode=%lu", 4647 inode->i_mode, inode->i_ino); 4648 goto bad_inode; 4649 } 4650 brelse(iloc.bh); 4651 ext4_set_inode_flags(inode); 4652 unlock_new_inode(inode); 4653 return inode; 4654 4655 bad_inode: 4656 iget_failed(inode); 4657 return ERR_PTR(ret); 4658 } 4659 4660 static int ext4_inode_blocks_set(handle_t *handle, 4661 struct ext4_inode *raw_inode, 4662 struct ext4_inode_info *ei) 4663 { 4664 struct inode *inode = &(ei->vfs_inode); 4665 u64 i_blocks = inode->i_blocks; 4666 struct super_block *sb = inode->i_sb; 4667 4668 if (i_blocks <= ~0U) { 4669 /* 4670 * i_blocks can be represnted in a 32 bit variable 4671 * as multiple of 512 bytes 4672 */ 4673 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4674 raw_inode->i_blocks_high = 0; 4675 ei->i_flags &= ~EXT4_HUGE_FILE_FL; 4676 return 0; 4677 } 4678 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) 4679 return -EFBIG; 4680 4681 if (i_blocks <= 0xffffffffffffULL) { 4682 /* 4683 * i_blocks can be represented in a 48 bit variable 4684 * as multiple of 512 bytes 4685 */ 4686 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4687 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4688 ei->i_flags &= ~EXT4_HUGE_FILE_FL; 4689 } else { 4690 ei->i_flags |= EXT4_HUGE_FILE_FL; 4691 /* i_block is stored in file system block size */ 4692 i_blocks = i_blocks >> (inode->i_blkbits - 9); 4693 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4694 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4695 } 4696 return 0; 4697 } 4698 4699 /* 4700 * Post the struct inode info into an on-disk inode location in the 4701 * buffer-cache. This gobbles the caller's reference to the 4702 * buffer_head in the inode location struct. 4703 * 4704 * The caller must have write access to iloc->bh. 4705 */ 4706 static int ext4_do_update_inode(handle_t *handle, 4707 struct inode *inode, 4708 struct ext4_iloc *iloc) 4709 { 4710 struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 4711 struct ext4_inode_info *ei = EXT4_I(inode); 4712 struct buffer_head *bh = iloc->bh; 4713 int err = 0, rc, block; 4714 4715 /* For fields not not tracking in the in-memory inode, 4716 * initialise them to zero for new inodes. */ 4717 if (ei->i_state & EXT4_STATE_NEW) 4718 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 4719 4720 ext4_get_inode_flags(ei); 4721 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 4722 if (!(test_opt(inode->i_sb, NO_UID32))) { 4723 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); 4724 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); 4725 /* 4726 * Fix up interoperability with old kernels. Otherwise, old inodes get 4727 * re-used with the upper 16 bits of the uid/gid intact 4728 */ 4729 if (!ei->i_dtime) { 4730 raw_inode->i_uid_high = 4731 cpu_to_le16(high_16_bits(inode->i_uid)); 4732 raw_inode->i_gid_high = 4733 cpu_to_le16(high_16_bits(inode->i_gid)); 4734 } else { 4735 raw_inode->i_uid_high = 0; 4736 raw_inode->i_gid_high = 0; 4737 } 4738 } else { 4739 raw_inode->i_uid_low = 4740 cpu_to_le16(fs_high2lowuid(inode->i_uid)); 4741 raw_inode->i_gid_low = 4742 cpu_to_le16(fs_high2lowgid(inode->i_gid)); 4743 raw_inode->i_uid_high = 0; 4744 raw_inode->i_gid_high = 0; 4745 } 4746 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 4747 4748 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 4749 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 4750 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 4751 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 4752 4753 if (ext4_inode_blocks_set(handle, raw_inode, ei)) 4754 goto out_brelse; 4755 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 4756 /* clear the migrate flag in the raw_inode */ 4757 raw_inode->i_flags = cpu_to_le32(ei->i_flags & ~EXT4_EXT_MIGRATE); 4758 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 4759 cpu_to_le32(EXT4_OS_HURD)) 4760 raw_inode->i_file_acl_high = 4761 cpu_to_le16(ei->i_file_acl >> 32); 4762 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 4763 ext4_isize_set(raw_inode, ei->i_disksize); 4764 if (ei->i_disksize > 0x7fffffffULL) { 4765 struct super_block *sb = inode->i_sb; 4766 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 4767 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || 4768 EXT4_SB(sb)->s_es->s_rev_level == 4769 cpu_to_le32(EXT4_GOOD_OLD_REV)) { 4770 /* If this is the first large file 4771 * created, add a flag to the superblock. 4772 */ 4773 err = ext4_journal_get_write_access(handle, 4774 EXT4_SB(sb)->s_sbh); 4775 if (err) 4776 goto out_brelse; 4777 ext4_update_dynamic_rev(sb); 4778 EXT4_SET_RO_COMPAT_FEATURE(sb, 4779 EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 4780 sb->s_dirt = 1; 4781 ext4_handle_sync(handle); 4782 err = ext4_handle_dirty_metadata(handle, inode, 4783 EXT4_SB(sb)->s_sbh); 4784 } 4785 } 4786 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 4787 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 4788 if (old_valid_dev(inode->i_rdev)) { 4789 raw_inode->i_block[0] = 4790 cpu_to_le32(old_encode_dev(inode->i_rdev)); 4791 raw_inode->i_block[1] = 0; 4792 } else { 4793 raw_inode->i_block[0] = 0; 4794 raw_inode->i_block[1] = 4795 cpu_to_le32(new_encode_dev(inode->i_rdev)); 4796 raw_inode->i_block[2] = 0; 4797 } 4798 } else for (block = 0; block < EXT4_N_BLOCKS; block++) 4799 raw_inode->i_block[block] = ei->i_data[block]; 4800 4801 raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 4802 if (ei->i_extra_isize) { 4803 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 4804 raw_inode->i_version_hi = 4805 cpu_to_le32(inode->i_version >> 32); 4806 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 4807 } 4808 4809 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 4810 rc = ext4_handle_dirty_metadata(handle, inode, bh); 4811 if (!err) 4812 err = rc; 4813 ei->i_state &= ~EXT4_STATE_NEW; 4814 4815 out_brelse: 4816 brelse(bh); 4817 ext4_std_error(inode->i_sb, err); 4818 return err; 4819 } 4820 4821 /* 4822 * ext4_write_inode() 4823 * 4824 * We are called from a few places: 4825 * 4826 * - Within generic_file_write() for O_SYNC files. 4827 * Here, there will be no transaction running. We wait for any running 4828 * trasnaction to commit. 4829 * 4830 * - Within sys_sync(), kupdate and such. 4831 * We wait on commit, if tol to. 4832 * 4833 * - Within prune_icache() (PF_MEMALLOC == true) 4834 * Here we simply return. We can't afford to block kswapd on the 4835 * journal commit. 4836 * 4837 * In all cases it is actually safe for us to return without doing anything, 4838 * because the inode has been copied into a raw inode buffer in 4839 * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 4840 * knfsd. 4841 * 4842 * Note that we are absolutely dependent upon all inode dirtiers doing the 4843 * right thing: they *must* call mark_inode_dirty() after dirtying info in 4844 * which we are interested. 4845 * 4846 * It would be a bug for them to not do this. The code: 4847 * 4848 * mark_inode_dirty(inode) 4849 * stuff(); 4850 * inode->i_size = expr; 4851 * 4852 * is in error because a kswapd-driven write_inode() could occur while 4853 * `stuff()' is running, and the new i_size will be lost. Plus the inode 4854 * will no longer be on the superblock's dirty inode list. 4855 */ 4856 int ext4_write_inode(struct inode *inode, int wait) 4857 { 4858 if (current->flags & PF_MEMALLOC) 4859 return 0; 4860 4861 if (ext4_journal_current_handle()) { 4862 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 4863 dump_stack(); 4864 return -EIO; 4865 } 4866 4867 if (!wait) 4868 return 0; 4869 4870 return ext4_force_commit(inode->i_sb); 4871 } 4872 4873 /* 4874 * ext4_setattr() 4875 * 4876 * Called from notify_change. 4877 * 4878 * We want to trap VFS attempts to truncate the file as soon as 4879 * possible. In particular, we want to make sure that when the VFS 4880 * shrinks i_size, we put the inode on the orphan list and modify 4881 * i_disksize immediately, so that during the subsequent flushing of 4882 * dirty pages and freeing of disk blocks, we can guarantee that any 4883 * commit will leave the blocks being flushed in an unused state on 4884 * disk. (On recovery, the inode will get truncated and the blocks will 4885 * be freed, so we have a strong guarantee that no future commit will 4886 * leave these blocks visible to the user.) 4887 * 4888 * Another thing we have to assure is that if we are in ordered mode 4889 * and inode is still attached to the committing transaction, we must 4890 * we start writeout of all the dirty pages which are being truncated. 4891 * This way we are sure that all the data written in the previous 4892 * transaction are already on disk (truncate waits for pages under 4893 * writeback). 4894 * 4895 * Called with inode->i_mutex down. 4896 */ 4897 int ext4_setattr(struct dentry *dentry, struct iattr *attr) 4898 { 4899 struct inode *inode = dentry->d_inode; 4900 int error, rc = 0; 4901 const unsigned int ia_valid = attr->ia_valid; 4902 4903 error = inode_change_ok(inode, attr); 4904 if (error) 4905 return error; 4906 4907 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 4908 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 4909 handle_t *handle; 4910 4911 /* (user+group)*(old+new) structure, inode write (sb, 4912 * inode block, ? - but truncate inode update has it) */ 4913 handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+ 4914 EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3); 4915 if (IS_ERR(handle)) { 4916 error = PTR_ERR(handle); 4917 goto err_out; 4918 } 4919 error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; 4920 if (error) { 4921 ext4_journal_stop(handle); 4922 return error; 4923 } 4924 /* Update corresponding info in inode so that everything is in 4925 * one transaction */ 4926 if (attr->ia_valid & ATTR_UID) 4927 inode->i_uid = attr->ia_uid; 4928 if (attr->ia_valid & ATTR_GID) 4929 inode->i_gid = attr->ia_gid; 4930 error = ext4_mark_inode_dirty(handle, inode); 4931 ext4_journal_stop(handle); 4932 } 4933 4934 if (attr->ia_valid & ATTR_SIZE) { 4935 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) { 4936 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4937 4938 if (attr->ia_size > sbi->s_bitmap_maxbytes) { 4939 error = -EFBIG; 4940 goto err_out; 4941 } 4942 } 4943 } 4944 4945 if (S_ISREG(inode->i_mode) && 4946 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) { 4947 handle_t *handle; 4948 4949 handle = ext4_journal_start(inode, 3); 4950 if (IS_ERR(handle)) { 4951 error = PTR_ERR(handle); 4952 goto err_out; 4953 } 4954 4955 error = ext4_orphan_add(handle, inode); 4956 EXT4_I(inode)->i_disksize = attr->ia_size; 4957 rc = ext4_mark_inode_dirty(handle, inode); 4958 if (!error) 4959 error = rc; 4960 ext4_journal_stop(handle); 4961 4962 if (ext4_should_order_data(inode)) { 4963 error = ext4_begin_ordered_truncate(inode, 4964 attr->ia_size); 4965 if (error) { 4966 /* Do as much error cleanup as possible */ 4967 handle = ext4_journal_start(inode, 3); 4968 if (IS_ERR(handle)) { 4969 ext4_orphan_del(NULL, inode); 4970 goto err_out; 4971 } 4972 ext4_orphan_del(handle, inode); 4973 ext4_journal_stop(handle); 4974 goto err_out; 4975 } 4976 } 4977 } 4978 4979 rc = inode_setattr(inode, attr); 4980 4981 /* If inode_setattr's call to ext4_truncate failed to get a 4982 * transaction handle at all, we need to clean up the in-core 4983 * orphan list manually. */ 4984 if (inode->i_nlink) 4985 ext4_orphan_del(NULL, inode); 4986 4987 if (!rc && (ia_valid & ATTR_MODE)) 4988 rc = ext4_acl_chmod(inode); 4989 4990 err_out: 4991 ext4_std_error(inode->i_sb, error); 4992 if (!error) 4993 error = rc; 4994 return error; 4995 } 4996 4997 int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 4998 struct kstat *stat) 4999 { 5000 struct inode *inode; 5001 unsigned long delalloc_blocks; 5002 5003 inode = dentry->d_inode; 5004 generic_fillattr(inode, stat); 5005 5006 /* 5007 * We can't update i_blocks if the block allocation is delayed 5008 * otherwise in the case of system crash before the real block 5009 * allocation is done, we will have i_blocks inconsistent with 5010 * on-disk file blocks. 5011 * We always keep i_blocks updated together with real 5012 * allocation. But to not confuse with user, stat 5013 * will return the blocks that include the delayed allocation 5014 * blocks for this file. 5015 */ 5016 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 5017 delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; 5018 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 5019 5020 stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; 5021 return 0; 5022 } 5023 5024 static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks, 5025 int chunk) 5026 { 5027 int indirects; 5028 5029 /* if nrblocks are contiguous */ 5030 if (chunk) { 5031 /* 5032 * With N contiguous data blocks, it need at most 5033 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks 5034 * 2 dindirect blocks 5035 * 1 tindirect block 5036 */ 5037 indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb); 5038 return indirects + 3; 5039 } 5040 /* 5041 * if nrblocks are not contiguous, worse case, each block touch 5042 * a indirect block, and each indirect block touch a double indirect 5043 * block, plus a triple indirect block 5044 */ 5045 indirects = nrblocks * 2 + 1; 5046 return indirects; 5047 } 5048 5049 static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 5050 { 5051 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) 5052 return ext4_indirect_trans_blocks(inode, nrblocks, chunk); 5053 return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); 5054 } 5055 5056 /* 5057 * Account for index blocks, block groups bitmaps and block group 5058 * descriptor blocks if modify datablocks and index blocks 5059 * worse case, the indexs blocks spread over different block groups 5060 * 5061 * If datablocks are discontiguous, they are possible to spread over 5062 * different block groups too. If they are contiugous, with flexbg, 5063 * they could still across block group boundary. 5064 * 5065 * Also account for superblock, inode, quota and xattr blocks 5066 */ 5067 int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) 5068 { 5069 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 5070 int gdpblocks; 5071 int idxblocks; 5072 int ret = 0; 5073 5074 /* 5075 * How many index blocks need to touch to modify nrblocks? 5076 * The "Chunk" flag indicating whether the nrblocks is 5077 * physically contiguous on disk 5078 * 5079 * For Direct IO and fallocate, they calls get_block to allocate 5080 * one single extent at a time, so they could set the "Chunk" flag 5081 */ 5082 idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); 5083 5084 ret = idxblocks; 5085 5086 /* 5087 * Now let's see how many group bitmaps and group descriptors need 5088 * to account 5089 */ 5090 groups = idxblocks; 5091 if (chunk) 5092 groups += 1; 5093 else 5094 groups += nrblocks; 5095 5096 gdpblocks = groups; 5097 if (groups > ngroups) 5098 groups = ngroups; 5099 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 5100 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 5101 5102 /* bitmaps and block group descriptor blocks */ 5103 ret += groups + gdpblocks; 5104 5105 /* Blocks for super block, inode, quota and xattr blocks */ 5106 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 5107 5108 return ret; 5109 } 5110 5111 /* 5112 * Calulate the total number of credits to reserve to fit 5113 * the modification of a single pages into a single transaction, 5114 * which may include multiple chunks of block allocations. 5115 * 5116 * This could be called via ext4_write_begin() 5117 * 5118 * We need to consider the worse case, when 5119 * one new block per extent. 5120 */ 5121 int ext4_writepage_trans_blocks(struct inode *inode) 5122 { 5123 int bpp = ext4_journal_blocks_per_page(inode); 5124 int ret; 5125 5126 ret = ext4_meta_trans_blocks(inode, bpp, 0); 5127 5128 /* Account for data blocks for journalled mode */ 5129 if (ext4_should_journal_data(inode)) 5130 ret += bpp; 5131 return ret; 5132 } 5133 5134 /* 5135 * Calculate the journal credits for a chunk of data modification. 5136 * 5137 * This is called from DIO, fallocate or whoever calling 5138 * ext4_get_blocks() to map/allocate a chunk of contigous disk blocks. 5139 * 5140 * journal buffers for data blocks are not included here, as DIO 5141 * and fallocate do no need to journal data buffers. 5142 */ 5143 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 5144 { 5145 return ext4_meta_trans_blocks(inode, nrblocks, 1); 5146 } 5147 5148 /* 5149 * The caller must have previously called ext4_reserve_inode_write(). 5150 * Give this, we know that the caller already has write access to iloc->bh. 5151 */ 5152 int ext4_mark_iloc_dirty(handle_t *handle, 5153 struct inode *inode, struct ext4_iloc *iloc) 5154 { 5155 int err = 0; 5156 5157 if (test_opt(inode->i_sb, I_VERSION)) 5158 inode_inc_iversion(inode); 5159 5160 /* the do_update_inode consumes one bh->b_count */ 5161 get_bh(iloc->bh); 5162 5163 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 5164 err = ext4_do_update_inode(handle, inode, iloc); 5165 put_bh(iloc->bh); 5166 return err; 5167 } 5168 5169 /* 5170 * On success, We end up with an outstanding reference count against 5171 * iloc->bh. This _must_ be cleaned up later. 5172 */ 5173 5174 int 5175 ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 5176 struct ext4_iloc *iloc) 5177 { 5178 int err; 5179 5180 err = ext4_get_inode_loc(inode, iloc); 5181 if (!err) { 5182 BUFFER_TRACE(iloc->bh, "get_write_access"); 5183 err = ext4_journal_get_write_access(handle, iloc->bh); 5184 if (err) { 5185 brelse(iloc->bh); 5186 iloc->bh = NULL; 5187 } 5188 } 5189 ext4_std_error(inode->i_sb, err); 5190 return err; 5191 } 5192 5193 /* 5194 * Expand an inode by new_extra_isize bytes. 5195 * Returns 0 on success or negative error number on failure. 5196 */ 5197 static int ext4_expand_extra_isize(struct inode *inode, 5198 unsigned int new_extra_isize, 5199 struct ext4_iloc iloc, 5200 handle_t *handle) 5201 { 5202 struct ext4_inode *raw_inode; 5203 struct ext4_xattr_ibody_header *header; 5204 struct ext4_xattr_entry *entry; 5205 5206 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 5207 return 0; 5208 5209 raw_inode = ext4_raw_inode(&iloc); 5210 5211 header = IHDR(inode, raw_inode); 5212 entry = IFIRST(header); 5213 5214 /* No extended attributes present */ 5215 if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) || 5216 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 5217 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 5218 new_extra_isize); 5219 EXT4_I(inode)->i_extra_isize = new_extra_isize; 5220 return 0; 5221 } 5222 5223 /* try to expand with EAs present */ 5224 return ext4_expand_extra_isize_ea(inode, new_extra_isize, 5225 raw_inode, handle); 5226 } 5227 5228 /* 5229 * What we do here is to mark the in-core inode as clean with respect to inode 5230 * dirtiness (it may still be data-dirty). 5231 * This means that the in-core inode may be reaped by prune_icache 5232 * without having to perform any I/O. This is a very good thing, 5233 * because *any* task may call prune_icache - even ones which 5234 * have a transaction open against a different journal. 5235 * 5236 * Is this cheating? Not really. Sure, we haven't written the 5237 * inode out, but prune_icache isn't a user-visible syncing function. 5238 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 5239 * we start and wait on commits. 5240 * 5241 * Is this efficient/effective? Well, we're being nice to the system 5242 * by cleaning up our inodes proactively so they can be reaped 5243 * without I/O. But we are potentially leaving up to five seconds' 5244 * worth of inodes floating about which prune_icache wants us to 5245 * write out. One way to fix that would be to get prune_icache() 5246 * to do a write_super() to free up some memory. It has the desired 5247 * effect. 5248 */ 5249 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 5250 { 5251 struct ext4_iloc iloc; 5252 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5253 static unsigned int mnt_count; 5254 int err, ret; 5255 5256 might_sleep(); 5257 err = ext4_reserve_inode_write(handle, inode, &iloc); 5258 if (ext4_handle_valid(handle) && 5259 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 5260 !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) { 5261 /* 5262 * We need extra buffer credits since we may write into EA block 5263 * with this same handle. If journal_extend fails, then it will 5264 * only result in a minor loss of functionality for that inode. 5265 * If this is felt to be critical, then e2fsck should be run to 5266 * force a large enough s_min_extra_isize. 5267 */ 5268 if ((jbd2_journal_extend(handle, 5269 EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { 5270 ret = ext4_expand_extra_isize(inode, 5271 sbi->s_want_extra_isize, 5272 iloc, handle); 5273 if (ret) { 5274 EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND; 5275 if (mnt_count != 5276 le16_to_cpu(sbi->s_es->s_mnt_count)) { 5277 ext4_warning(inode->i_sb, __func__, 5278 "Unable to expand inode %lu. Delete" 5279 " some EAs or run e2fsck.", 5280 inode->i_ino); 5281 mnt_count = 5282 le16_to_cpu(sbi->s_es->s_mnt_count); 5283 } 5284 } 5285 } 5286 } 5287 if (!err) 5288 err = ext4_mark_iloc_dirty(handle, inode, &iloc); 5289 return err; 5290 } 5291 5292 /* 5293 * ext4_dirty_inode() is called from __mark_inode_dirty() 5294 * 5295 * We're really interested in the case where a file is being extended. 5296 * i_size has been changed by generic_commit_write() and we thus need 5297 * to include the updated inode in the current transaction. 5298 * 5299 * Also, vfs_dq_alloc_block() will always dirty the inode when blocks 5300 * are allocated to the file. 5301 * 5302 * If the inode is marked synchronous, we don't honour that here - doing 5303 * so would cause a commit on atime updates, which we don't bother doing. 5304 * We handle synchronous inodes at the highest possible level. 5305 */ 5306 void ext4_dirty_inode(struct inode *inode) 5307 { 5308 handle_t *current_handle = ext4_journal_current_handle(); 5309 handle_t *handle; 5310 5311 if (!ext4_handle_valid(current_handle)) { 5312 ext4_mark_inode_dirty(current_handle, inode); 5313 return; 5314 } 5315 5316 handle = ext4_journal_start(inode, 2); 5317 if (IS_ERR(handle)) 5318 goto out; 5319 if (current_handle && 5320 current_handle->h_transaction != handle->h_transaction) { 5321 /* This task has a transaction open against a different fs */ 5322 printk(KERN_EMERG "%s: transactions do not match!\n", 5323 __func__); 5324 } else { 5325 jbd_debug(5, "marking dirty. outer handle=%p\n", 5326 current_handle); 5327 ext4_mark_inode_dirty(handle, inode); 5328 } 5329 ext4_journal_stop(handle); 5330 out: 5331 return; 5332 } 5333 5334 #if 0 5335 /* 5336 * Bind an inode's backing buffer_head into this transaction, to prevent 5337 * it from being flushed to disk early. Unlike 5338 * ext4_reserve_inode_write, this leaves behind no bh reference and 5339 * returns no iloc structure, so the caller needs to repeat the iloc 5340 * lookup to mark the inode dirty later. 5341 */ 5342 static int ext4_pin_inode(handle_t *handle, struct inode *inode) 5343 { 5344 struct ext4_iloc iloc; 5345 5346 int err = 0; 5347 if (handle) { 5348 err = ext4_get_inode_loc(inode, &iloc); 5349 if (!err) { 5350 BUFFER_TRACE(iloc.bh, "get_write_access"); 5351 err = jbd2_journal_get_write_access(handle, iloc.bh); 5352 if (!err) 5353 err = ext4_handle_dirty_metadata(handle, 5354 inode, 5355 iloc.bh); 5356 brelse(iloc.bh); 5357 } 5358 } 5359 ext4_std_error(inode->i_sb, err); 5360 return err; 5361 } 5362 #endif 5363 5364 int ext4_change_inode_journal_flag(struct inode *inode, int val) 5365 { 5366 journal_t *journal; 5367 handle_t *handle; 5368 int err; 5369 5370 /* 5371 * We have to be very careful here: changing a data block's 5372 * journaling status dynamically is dangerous. If we write a 5373 * data block to the journal, change the status and then delete 5374 * that block, we risk forgetting to revoke the old log record 5375 * from the journal and so a subsequent replay can corrupt data. 5376 * So, first we make sure that the journal is empty and that 5377 * nobody is changing anything. 5378 */ 5379 5380 journal = EXT4_JOURNAL(inode); 5381 if (!journal) 5382 return 0; 5383 if (is_journal_aborted(journal)) 5384 return -EROFS; 5385 5386 jbd2_journal_lock_updates(journal); 5387 jbd2_journal_flush(journal); 5388 5389 /* 5390 * OK, there are no updates running now, and all cached data is 5391 * synced to disk. We are now in a completely consistent state 5392 * which doesn't have anything in the journal, and we know that 5393 * no filesystem updates are running, so it is safe to modify 5394 * the inode's in-core data-journaling state flag now. 5395 */ 5396 5397 if (val) 5398 EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL; 5399 else 5400 EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL; 5401 ext4_set_aops(inode); 5402 5403 jbd2_journal_unlock_updates(journal); 5404 5405 /* Finally we can mark the inode as dirty. */ 5406 5407 handle = ext4_journal_start(inode, 1); 5408 if (IS_ERR(handle)) 5409 return PTR_ERR(handle); 5410 5411 err = ext4_mark_inode_dirty(handle, inode); 5412 ext4_handle_sync(handle); 5413 ext4_journal_stop(handle); 5414 ext4_std_error(inode->i_sb, err); 5415 5416 return err; 5417 } 5418 5419 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 5420 { 5421 return !buffer_mapped(bh); 5422 } 5423 5424 int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 5425 { 5426 struct page *page = vmf->page; 5427 loff_t size; 5428 unsigned long len; 5429 int ret = -EINVAL; 5430 void *fsdata; 5431 struct file *file = vma->vm_file; 5432 struct inode *inode = file->f_path.dentry->d_inode; 5433 struct address_space *mapping = inode->i_mapping; 5434 5435 /* 5436 * Get i_alloc_sem to stop truncates messing with the inode. We cannot 5437 * get i_mutex because we are already holding mmap_sem. 5438 */ 5439 down_read(&inode->i_alloc_sem); 5440 size = i_size_read(inode); 5441 if (page->mapping != mapping || size <= page_offset(page) 5442 || !PageUptodate(page)) { 5443 /* page got truncated from under us? */ 5444 goto out_unlock; 5445 } 5446 ret = 0; 5447 if (PageMappedToDisk(page)) 5448 goto out_unlock; 5449 5450 if (page->index == size >> PAGE_CACHE_SHIFT) 5451 len = size & ~PAGE_CACHE_MASK; 5452 else 5453 len = PAGE_CACHE_SIZE; 5454 5455 if (page_has_buffers(page)) { 5456 /* return if we have all the buffers mapped */ 5457 if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 5458 ext4_bh_unmapped)) 5459 goto out_unlock; 5460 } 5461 /* 5462 * OK, we need to fill the hole... Do write_begin write_end 5463 * to do block allocation/reservation.We are not holding 5464 * inode.i__mutex here. That allow * parallel write_begin, 5465 * write_end call. lock_page prevent this from happening 5466 * on the same page though 5467 */ 5468 ret = mapping->a_ops->write_begin(file, mapping, page_offset(page), 5469 len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); 5470 if (ret < 0) 5471 goto out_unlock; 5472 ret = mapping->a_ops->write_end(file, mapping, page_offset(page), 5473 len, len, page, fsdata); 5474 if (ret < 0) 5475 goto out_unlock; 5476 ret = 0; 5477 out_unlock: 5478 if (ret) 5479 ret = VM_FAULT_SIGBUS; 5480 up_read(&inode->i_alloc_sem); 5481 return ret; 5482 } 5483