1 /* 2 * linux/fs/ext4/inode.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/inode.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * Goal-directed block allocation by Stephen Tweedie 16 * (sct@redhat.com), 1993, 1998 17 * Big-endian to little-endian byte-swapping/bitmaps by 18 * David S. Miller (davem@caip.rutgers.edu), 1995 19 * 64-bit file support on 64-bit platforms by Jakub Jelinek 20 * (jj@sunsite.ms.mff.cuni.cz) 21 * 22 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 23 */ 24 25 #include <linux/module.h> 26 #include <linux/fs.h> 27 #include <linux/time.h> 28 #include <linux/jbd2.h> 29 #include <linux/highuid.h> 30 #include <linux/pagemap.h> 31 #include <linux/quotaops.h> 32 #include <linux/string.h> 33 #include <linux/buffer_head.h> 34 #include <linux/writeback.h> 35 #include <linux/pagevec.h> 36 #include <linux/mpage.h> 37 #include <linux/namei.h> 38 #include <linux/uio.h> 39 #include <linux/bio.h> 40 41 #include "ext4_jbd2.h" 42 #include "xattr.h" 43 #include "acl.h" 44 #include "ext4_extents.h" 45 46 #include <trace/events/ext4.h> 47 48 #define MPAGE_DA_EXTENT_TAIL 0x01 49 50 static inline int ext4_begin_ordered_truncate(struct inode *inode, 51 loff_t new_size) 52 { 53 return jbd2_journal_begin_ordered_truncate( 54 EXT4_SB(inode->i_sb)->s_journal, 55 &EXT4_I(inode)->jinode, 56 new_size); 57 } 58 59 static void ext4_invalidatepage(struct page *page, unsigned long offset); 60 61 /* 62 * Test whether an inode is a fast symlink. 63 */ 64 static int ext4_inode_is_fast_symlink(struct inode *inode) 65 { 66 int ea_blocks = EXT4_I(inode)->i_file_acl ? 67 (inode->i_sb->s_blocksize >> 9) : 0; 68 69 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 70 } 71 72 /* 73 * The ext4 forget function must perform a revoke if we are freeing data 74 * which has been journaled. Metadata (eg. indirect blocks) must be 75 * revoked in all cases. 76 * 77 * "bh" may be NULL: a metadata block may have been freed from memory 78 * but there may still be a record of it in the journal, and that record 79 * still needs to be revoked. 80 * 81 * If the handle isn't valid we're not journaling, but we still need to 82 * call into ext4_journal_revoke() to put the buffer head. 83 */ 84 int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode, 85 struct buffer_head *bh, ext4_fsblk_t blocknr) 86 { 87 int err; 88 89 might_sleep(); 90 91 BUFFER_TRACE(bh, "enter"); 92 93 jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, " 94 "data mode %x\n", 95 bh, is_metadata, inode->i_mode, 96 test_opt(inode->i_sb, DATA_FLAGS)); 97 98 /* Never use the revoke function if we are doing full data 99 * journaling: there is no need to, and a V1 superblock won't 100 * support it. Otherwise, only skip the revoke on un-journaled 101 * data blocks. */ 102 103 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA || 104 (!is_metadata && !ext4_should_journal_data(inode))) { 105 if (bh) { 106 BUFFER_TRACE(bh, "call jbd2_journal_forget"); 107 return ext4_journal_forget(handle, bh); 108 } 109 return 0; 110 } 111 112 /* 113 * data!=journal && (is_metadata || should_journal_data(inode)) 114 */ 115 BUFFER_TRACE(bh, "call ext4_journal_revoke"); 116 err = ext4_journal_revoke(handle, blocknr, bh); 117 if (err) 118 ext4_abort(inode->i_sb, __func__, 119 "error %d when attempting revoke", err); 120 BUFFER_TRACE(bh, "exit"); 121 return err; 122 } 123 124 /* 125 * Work out how many blocks we need to proceed with the next chunk of a 126 * truncate transaction. 127 */ 128 static unsigned long blocks_for_truncate(struct inode *inode) 129 { 130 ext4_lblk_t needed; 131 132 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); 133 134 /* Give ourselves just enough room to cope with inodes in which 135 * i_blocks is corrupt: we've seen disk corruptions in the past 136 * which resulted in random data in an inode which looked enough 137 * like a regular file for ext4 to try to delete it. Things 138 * will go a bit crazy if that happens, but at least we should 139 * try not to panic the whole kernel. */ 140 if (needed < 2) 141 needed = 2; 142 143 /* But we need to bound the transaction so we don't overflow the 144 * journal. */ 145 if (needed > EXT4_MAX_TRANS_DATA) 146 needed = EXT4_MAX_TRANS_DATA; 147 148 return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed; 149 } 150 151 /* 152 * Truncate transactions can be complex and absolutely huge. So we need to 153 * be able to restart the transaction at a conventient checkpoint to make 154 * sure we don't overflow the journal. 155 * 156 * start_transaction gets us a new handle for a truncate transaction, 157 * and extend_transaction tries to extend the existing one a bit. If 158 * extend fails, we need to propagate the failure up and restart the 159 * transaction in the top-level truncate loop. --sct 160 */ 161 static handle_t *start_transaction(struct inode *inode) 162 { 163 handle_t *result; 164 165 result = ext4_journal_start(inode, blocks_for_truncate(inode)); 166 if (!IS_ERR(result)) 167 return result; 168 169 ext4_std_error(inode->i_sb, PTR_ERR(result)); 170 return result; 171 } 172 173 /* 174 * Try to extend this transaction for the purposes of truncation. 175 * 176 * Returns 0 if we managed to create more room. If we can't create more 177 * room, and the transaction must be restarted we return 1. 178 */ 179 static int try_to_extend_transaction(handle_t *handle, struct inode *inode) 180 { 181 if (!ext4_handle_valid(handle)) 182 return 0; 183 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) 184 return 0; 185 if (!ext4_journal_extend(handle, blocks_for_truncate(inode))) 186 return 0; 187 return 1; 188 } 189 190 /* 191 * Restart the transaction associated with *handle. This does a commit, 192 * so before we call here everything must be consistently dirtied against 193 * this transaction. 194 */ 195 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, 196 int nblocks) 197 { 198 int ret; 199 200 /* 201 * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this 202 * moment, get_block can be called only for blocks inside i_size since 203 * page cache has been already dropped and writes are blocked by 204 * i_mutex. So we can safely drop the i_data_sem here. 205 */ 206 BUG_ON(EXT4_JOURNAL(inode) == NULL); 207 jbd_debug(2, "restarting handle %p\n", handle); 208 up_write(&EXT4_I(inode)->i_data_sem); 209 ret = ext4_journal_restart(handle, blocks_for_truncate(inode)); 210 down_write(&EXT4_I(inode)->i_data_sem); 211 212 return ret; 213 } 214 215 /* 216 * Called at the last iput() if i_nlink is zero. 217 */ 218 void ext4_delete_inode(struct inode *inode) 219 { 220 handle_t *handle; 221 int err; 222 223 if (ext4_should_order_data(inode)) 224 ext4_begin_ordered_truncate(inode, 0); 225 truncate_inode_pages(&inode->i_data, 0); 226 227 if (is_bad_inode(inode)) 228 goto no_delete; 229 230 handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3); 231 if (IS_ERR(handle)) { 232 ext4_std_error(inode->i_sb, PTR_ERR(handle)); 233 /* 234 * If we're going to skip the normal cleanup, we still need to 235 * make sure that the in-core orphan linked list is properly 236 * cleaned up. 237 */ 238 ext4_orphan_del(NULL, inode); 239 goto no_delete; 240 } 241 242 if (IS_SYNC(inode)) 243 ext4_handle_sync(handle); 244 inode->i_size = 0; 245 err = ext4_mark_inode_dirty(handle, inode); 246 if (err) { 247 ext4_warning(inode->i_sb, __func__, 248 "couldn't mark inode dirty (err %d)", err); 249 goto stop_handle; 250 } 251 if (inode->i_blocks) 252 ext4_truncate(inode); 253 254 /* 255 * ext4_ext_truncate() doesn't reserve any slop when it 256 * restarts journal transactions; therefore there may not be 257 * enough credits left in the handle to remove the inode from 258 * the orphan list and set the dtime field. 259 */ 260 if (!ext4_handle_has_enough_credits(handle, 3)) { 261 err = ext4_journal_extend(handle, 3); 262 if (err > 0) 263 err = ext4_journal_restart(handle, 3); 264 if (err != 0) { 265 ext4_warning(inode->i_sb, __func__, 266 "couldn't extend journal (err %d)", err); 267 stop_handle: 268 ext4_journal_stop(handle); 269 goto no_delete; 270 } 271 } 272 273 /* 274 * Kill off the orphan record which ext4_truncate created. 275 * AKPM: I think this can be inside the above `if'. 276 * Note that ext4_orphan_del() has to be able to cope with the 277 * deletion of a non-existent orphan - this is because we don't 278 * know if ext4_truncate() actually created an orphan record. 279 * (Well, we could do this if we need to, but heck - it works) 280 */ 281 ext4_orphan_del(handle, inode); 282 EXT4_I(inode)->i_dtime = get_seconds(); 283 284 /* 285 * One subtle ordering requirement: if anything has gone wrong 286 * (transaction abort, IO errors, whatever), then we can still 287 * do these next steps (the fs will already have been marked as 288 * having errors), but we can't free the inode if the mark_dirty 289 * fails. 290 */ 291 if (ext4_mark_inode_dirty(handle, inode)) 292 /* If that failed, just do the required in-core inode clear. */ 293 clear_inode(inode); 294 else 295 ext4_free_inode(handle, inode); 296 ext4_journal_stop(handle); 297 return; 298 no_delete: 299 clear_inode(inode); /* We must guarantee clearing of inode... */ 300 } 301 302 typedef struct { 303 __le32 *p; 304 __le32 key; 305 struct buffer_head *bh; 306 } Indirect; 307 308 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) 309 { 310 p->key = *(p->p = v); 311 p->bh = bh; 312 } 313 314 /** 315 * ext4_block_to_path - parse the block number into array of offsets 316 * @inode: inode in question (we are only interested in its superblock) 317 * @i_block: block number to be parsed 318 * @offsets: array to store the offsets in 319 * @boundary: set this non-zero if the referred-to block is likely to be 320 * followed (on disk) by an indirect block. 321 * 322 * To store the locations of file's data ext4 uses a data structure common 323 * for UNIX filesystems - tree of pointers anchored in the inode, with 324 * data blocks at leaves and indirect blocks in intermediate nodes. 325 * This function translates the block number into path in that tree - 326 * return value is the path length and @offsets[n] is the offset of 327 * pointer to (n+1)th node in the nth one. If @block is out of range 328 * (negative or too large) warning is printed and zero returned. 329 * 330 * Note: function doesn't find node addresses, so no IO is needed. All 331 * we need to know is the capacity of indirect blocks (taken from the 332 * inode->i_sb). 333 */ 334 335 /* 336 * Portability note: the last comparison (check that we fit into triple 337 * indirect block) is spelled differently, because otherwise on an 338 * architecture with 32-bit longs and 8Kb pages we might get into trouble 339 * if our filesystem had 8Kb blocks. We might use long long, but that would 340 * kill us on x86. Oh, well, at least the sign propagation does not matter - 341 * i_block would have to be negative in the very beginning, so we would not 342 * get there at all. 343 */ 344 345 static int ext4_block_to_path(struct inode *inode, 346 ext4_lblk_t i_block, 347 ext4_lblk_t offsets[4], int *boundary) 348 { 349 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); 350 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); 351 const long direct_blocks = EXT4_NDIR_BLOCKS, 352 indirect_blocks = ptrs, 353 double_blocks = (1 << (ptrs_bits * 2)); 354 int n = 0; 355 int final = 0; 356 357 if (i_block < direct_blocks) { 358 offsets[n++] = i_block; 359 final = direct_blocks; 360 } else if ((i_block -= direct_blocks) < indirect_blocks) { 361 offsets[n++] = EXT4_IND_BLOCK; 362 offsets[n++] = i_block; 363 final = ptrs; 364 } else if ((i_block -= indirect_blocks) < double_blocks) { 365 offsets[n++] = EXT4_DIND_BLOCK; 366 offsets[n++] = i_block >> ptrs_bits; 367 offsets[n++] = i_block & (ptrs - 1); 368 final = ptrs; 369 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 370 offsets[n++] = EXT4_TIND_BLOCK; 371 offsets[n++] = i_block >> (ptrs_bits * 2); 372 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 373 offsets[n++] = i_block & (ptrs - 1); 374 final = ptrs; 375 } else { 376 ext4_warning(inode->i_sb, "ext4_block_to_path", 377 "block %lu > max in inode %lu", 378 i_block + direct_blocks + 379 indirect_blocks + double_blocks, inode->i_ino); 380 } 381 if (boundary) 382 *boundary = final - 1 - (i_block & (ptrs - 1)); 383 return n; 384 } 385 386 static int __ext4_check_blockref(const char *function, struct inode *inode, 387 __le32 *p, unsigned int max) 388 { 389 __le32 *bref = p; 390 unsigned int blk; 391 392 while (bref < p+max) { 393 blk = le32_to_cpu(*bref++); 394 if (blk && 395 unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), 396 blk, 1))) { 397 ext4_error(inode->i_sb, function, 398 "invalid block reference %u " 399 "in inode #%lu", blk, inode->i_ino); 400 return -EIO; 401 } 402 } 403 return 0; 404 } 405 406 407 #define ext4_check_indirect_blockref(inode, bh) \ 408 __ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data, \ 409 EXT4_ADDR_PER_BLOCK((inode)->i_sb)) 410 411 #define ext4_check_inode_blockref(inode) \ 412 __ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data, \ 413 EXT4_NDIR_BLOCKS) 414 415 /** 416 * ext4_get_branch - read the chain of indirect blocks leading to data 417 * @inode: inode in question 418 * @depth: depth of the chain (1 - direct pointer, etc.) 419 * @offsets: offsets of pointers in inode/indirect blocks 420 * @chain: place to store the result 421 * @err: here we store the error value 422 * 423 * Function fills the array of triples <key, p, bh> and returns %NULL 424 * if everything went OK or the pointer to the last filled triple 425 * (incomplete one) otherwise. Upon the return chain[i].key contains 426 * the number of (i+1)-th block in the chain (as it is stored in memory, 427 * i.e. little-endian 32-bit), chain[i].p contains the address of that 428 * number (it points into struct inode for i==0 and into the bh->b_data 429 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect 430 * block for i>0 and NULL for i==0. In other words, it holds the block 431 * numbers of the chain, addresses they were taken from (and where we can 432 * verify that chain did not change) and buffer_heads hosting these 433 * numbers. 434 * 435 * Function stops when it stumbles upon zero pointer (absent block) 436 * (pointer to last triple returned, *@err == 0) 437 * or when it gets an IO error reading an indirect block 438 * (ditto, *@err == -EIO) 439 * or when it reads all @depth-1 indirect blocks successfully and finds 440 * the whole chain, all way to the data (returns %NULL, *err == 0). 441 * 442 * Need to be called with 443 * down_read(&EXT4_I(inode)->i_data_sem) 444 */ 445 static Indirect *ext4_get_branch(struct inode *inode, int depth, 446 ext4_lblk_t *offsets, 447 Indirect chain[4], int *err) 448 { 449 struct super_block *sb = inode->i_sb; 450 Indirect *p = chain; 451 struct buffer_head *bh; 452 453 *err = 0; 454 /* i_data is not going away, no lock needed */ 455 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets); 456 if (!p->key) 457 goto no_block; 458 while (--depth) { 459 bh = sb_getblk(sb, le32_to_cpu(p->key)); 460 if (unlikely(!bh)) 461 goto failure; 462 463 if (!bh_uptodate_or_lock(bh)) { 464 if (bh_submit_read(bh) < 0) { 465 put_bh(bh); 466 goto failure; 467 } 468 /* validate block references */ 469 if (ext4_check_indirect_blockref(inode, bh)) { 470 put_bh(bh); 471 goto failure; 472 } 473 } 474 475 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets); 476 /* Reader: end */ 477 if (!p->key) 478 goto no_block; 479 } 480 return NULL; 481 482 failure: 483 *err = -EIO; 484 no_block: 485 return p; 486 } 487 488 /** 489 * ext4_find_near - find a place for allocation with sufficient locality 490 * @inode: owner 491 * @ind: descriptor of indirect block. 492 * 493 * This function returns the preferred place for block allocation. 494 * It is used when heuristic for sequential allocation fails. 495 * Rules are: 496 * + if there is a block to the left of our position - allocate near it. 497 * + if pointer will live in indirect block - allocate near that block. 498 * + if pointer will live in inode - allocate in the same 499 * cylinder group. 500 * 501 * In the latter case we colour the starting block by the callers PID to 502 * prevent it from clashing with concurrent allocations for a different inode 503 * in the same block group. The PID is used here so that functionally related 504 * files will be close-by on-disk. 505 * 506 * Caller must make sure that @ind is valid and will stay that way. 507 */ 508 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) 509 { 510 struct ext4_inode_info *ei = EXT4_I(inode); 511 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; 512 __le32 *p; 513 ext4_fsblk_t bg_start; 514 ext4_fsblk_t last_block; 515 ext4_grpblk_t colour; 516 ext4_group_t block_group; 517 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); 518 519 /* Try to find previous block */ 520 for (p = ind->p - 1; p >= start; p--) { 521 if (*p) 522 return le32_to_cpu(*p); 523 } 524 525 /* No such thing, so let's try location of indirect block */ 526 if (ind->bh) 527 return ind->bh->b_blocknr; 528 529 /* 530 * It is going to be referred to from the inode itself? OK, just put it 531 * into the same cylinder group then. 532 */ 533 block_group = ei->i_block_group; 534 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { 535 block_group &= ~(flex_size-1); 536 if (S_ISREG(inode->i_mode)) 537 block_group++; 538 } 539 bg_start = ext4_group_first_block_no(inode->i_sb, block_group); 540 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 541 542 /* 543 * If we are doing delayed allocation, we don't need take 544 * colour into account. 545 */ 546 if (test_opt(inode->i_sb, DELALLOC)) 547 return bg_start; 548 549 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) 550 colour = (current->pid % 16) * 551 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 552 else 553 colour = (current->pid % 16) * ((last_block - bg_start) / 16); 554 return bg_start + colour; 555 } 556 557 /** 558 * ext4_find_goal - find a preferred place for allocation. 559 * @inode: owner 560 * @block: block we want 561 * @partial: pointer to the last triple within a chain 562 * 563 * Normally this function find the preferred place for block allocation, 564 * returns it. 565 * Because this is only used for non-extent files, we limit the block nr 566 * to 32 bits. 567 */ 568 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, 569 Indirect *partial) 570 { 571 ext4_fsblk_t goal; 572 573 /* 574 * XXX need to get goal block from mballoc's data structures 575 */ 576 577 goal = ext4_find_near(inode, partial); 578 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; 579 return goal; 580 } 581 582 /** 583 * ext4_blks_to_allocate: Look up the block map and count the number 584 * of direct blocks need to be allocated for the given branch. 585 * 586 * @branch: chain of indirect blocks 587 * @k: number of blocks need for indirect blocks 588 * @blks: number of data blocks to be mapped. 589 * @blocks_to_boundary: the offset in the indirect block 590 * 591 * return the total number of blocks to be allocate, including the 592 * direct and indirect blocks. 593 */ 594 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, 595 int blocks_to_boundary) 596 { 597 unsigned int count = 0; 598 599 /* 600 * Simple case, [t,d]Indirect block(s) has not allocated yet 601 * then it's clear blocks on that path have not allocated 602 */ 603 if (k > 0) { 604 /* right now we don't handle cross boundary allocation */ 605 if (blks < blocks_to_boundary + 1) 606 count += blks; 607 else 608 count += blocks_to_boundary + 1; 609 return count; 610 } 611 612 count++; 613 while (count < blks && count <= blocks_to_boundary && 614 le32_to_cpu(*(branch[0].p + count)) == 0) { 615 count++; 616 } 617 return count; 618 } 619 620 /** 621 * ext4_alloc_blocks: multiple allocate blocks needed for a branch 622 * @indirect_blks: the number of blocks need to allocate for indirect 623 * blocks 624 * 625 * @new_blocks: on return it will store the new block numbers for 626 * the indirect blocks(if needed) and the first direct block, 627 * @blks: on return it will store the total number of allocated 628 * direct blocks 629 */ 630 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, 631 ext4_lblk_t iblock, ext4_fsblk_t goal, 632 int indirect_blks, int blks, 633 ext4_fsblk_t new_blocks[4], int *err) 634 { 635 struct ext4_allocation_request ar; 636 int target, i; 637 unsigned long count = 0, blk_allocated = 0; 638 int index = 0; 639 ext4_fsblk_t current_block = 0; 640 int ret = 0; 641 642 /* 643 * Here we try to allocate the requested multiple blocks at once, 644 * on a best-effort basis. 645 * To build a branch, we should allocate blocks for 646 * the indirect blocks(if not allocated yet), and at least 647 * the first direct block of this branch. That's the 648 * minimum number of blocks need to allocate(required) 649 */ 650 /* first we try to allocate the indirect blocks */ 651 target = indirect_blks; 652 while (target > 0) { 653 count = target; 654 /* allocating blocks for indirect blocks and direct blocks */ 655 current_block = ext4_new_meta_blocks(handle, inode, 656 goal, &count, err); 657 if (*err) 658 goto failed_out; 659 660 BUG_ON(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS); 661 662 target -= count; 663 /* allocate blocks for indirect blocks */ 664 while (index < indirect_blks && count) { 665 new_blocks[index++] = current_block++; 666 count--; 667 } 668 if (count > 0) { 669 /* 670 * save the new block number 671 * for the first direct block 672 */ 673 new_blocks[index] = current_block; 674 printk(KERN_INFO "%s returned more blocks than " 675 "requested\n", __func__); 676 WARN_ON(1); 677 break; 678 } 679 } 680 681 target = blks - count ; 682 blk_allocated = count; 683 if (!target) 684 goto allocated; 685 /* Now allocate data blocks */ 686 memset(&ar, 0, sizeof(ar)); 687 ar.inode = inode; 688 ar.goal = goal; 689 ar.len = target; 690 ar.logical = iblock; 691 if (S_ISREG(inode->i_mode)) 692 /* enable in-core preallocation only for regular files */ 693 ar.flags = EXT4_MB_HINT_DATA; 694 695 current_block = ext4_mb_new_blocks(handle, &ar, err); 696 BUG_ON(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS); 697 698 if (*err && (target == blks)) { 699 /* 700 * if the allocation failed and we didn't allocate 701 * any blocks before 702 */ 703 goto failed_out; 704 } 705 if (!*err) { 706 if (target == blks) { 707 /* 708 * save the new block number 709 * for the first direct block 710 */ 711 new_blocks[index] = current_block; 712 } 713 blk_allocated += ar.len; 714 } 715 allocated: 716 /* total number of blocks allocated for direct blocks */ 717 ret = blk_allocated; 718 *err = 0; 719 return ret; 720 failed_out: 721 for (i = 0; i < index; i++) 722 ext4_free_blocks(handle, inode, new_blocks[i], 1, 0); 723 return ret; 724 } 725 726 /** 727 * ext4_alloc_branch - allocate and set up a chain of blocks. 728 * @inode: owner 729 * @indirect_blks: number of allocated indirect blocks 730 * @blks: number of allocated direct blocks 731 * @offsets: offsets (in the blocks) to store the pointers to next. 732 * @branch: place to store the chain in. 733 * 734 * This function allocates blocks, zeroes out all but the last one, 735 * links them into chain and (if we are synchronous) writes them to disk. 736 * In other words, it prepares a branch that can be spliced onto the 737 * inode. It stores the information about that chain in the branch[], in 738 * the same format as ext4_get_branch() would do. We are calling it after 739 * we had read the existing part of chain and partial points to the last 740 * triple of that (one with zero ->key). Upon the exit we have the same 741 * picture as after the successful ext4_get_block(), except that in one 742 * place chain is disconnected - *branch->p is still zero (we did not 743 * set the last link), but branch->key contains the number that should 744 * be placed into *branch->p to fill that gap. 745 * 746 * If allocation fails we free all blocks we've allocated (and forget 747 * their buffer_heads) and return the error value the from failed 748 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain 749 * as described above and return 0. 750 */ 751 static int ext4_alloc_branch(handle_t *handle, struct inode *inode, 752 ext4_lblk_t iblock, int indirect_blks, 753 int *blks, ext4_fsblk_t goal, 754 ext4_lblk_t *offsets, Indirect *branch) 755 { 756 int blocksize = inode->i_sb->s_blocksize; 757 int i, n = 0; 758 int err = 0; 759 struct buffer_head *bh; 760 int num; 761 ext4_fsblk_t new_blocks[4]; 762 ext4_fsblk_t current_block; 763 764 num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks, 765 *blks, new_blocks, &err); 766 if (err) 767 return err; 768 769 branch[0].key = cpu_to_le32(new_blocks[0]); 770 /* 771 * metadata blocks and data blocks are allocated. 772 */ 773 for (n = 1; n <= indirect_blks; n++) { 774 /* 775 * Get buffer_head for parent block, zero it out 776 * and set the pointer to new one, then send 777 * parent to disk. 778 */ 779 bh = sb_getblk(inode->i_sb, new_blocks[n-1]); 780 branch[n].bh = bh; 781 lock_buffer(bh); 782 BUFFER_TRACE(bh, "call get_create_access"); 783 err = ext4_journal_get_create_access(handle, bh); 784 if (err) { 785 /* Don't brelse(bh) here; it's done in 786 * ext4_journal_forget() below */ 787 unlock_buffer(bh); 788 goto failed; 789 } 790 791 memset(bh->b_data, 0, blocksize); 792 branch[n].p = (__le32 *) bh->b_data + offsets[n]; 793 branch[n].key = cpu_to_le32(new_blocks[n]); 794 *branch[n].p = branch[n].key; 795 if (n == indirect_blks) { 796 current_block = new_blocks[n]; 797 /* 798 * End of chain, update the last new metablock of 799 * the chain to point to the new allocated 800 * data blocks numbers 801 */ 802 for (i = 1; i < num; i++) 803 *(branch[n].p + i) = cpu_to_le32(++current_block); 804 } 805 BUFFER_TRACE(bh, "marking uptodate"); 806 set_buffer_uptodate(bh); 807 unlock_buffer(bh); 808 809 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 810 err = ext4_handle_dirty_metadata(handle, inode, bh); 811 if (err) 812 goto failed; 813 } 814 *blks = num; 815 return err; 816 failed: 817 /* Allocation failed, free what we already allocated */ 818 for (i = 1; i <= n ; i++) { 819 BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget"); 820 ext4_journal_forget(handle, branch[i].bh); 821 } 822 for (i = 0; i < indirect_blks; i++) 823 ext4_free_blocks(handle, inode, new_blocks[i], 1, 0); 824 825 ext4_free_blocks(handle, inode, new_blocks[i], num, 0); 826 827 return err; 828 } 829 830 /** 831 * ext4_splice_branch - splice the allocated branch onto inode. 832 * @inode: owner 833 * @block: (logical) number of block we are adding 834 * @chain: chain of indirect blocks (with a missing link - see 835 * ext4_alloc_branch) 836 * @where: location of missing link 837 * @num: number of indirect blocks we are adding 838 * @blks: number of direct blocks we are adding 839 * 840 * This function fills the missing link and does all housekeeping needed in 841 * inode (->i_blocks, etc.). In case of success we end up with the full 842 * chain to new block and return 0. 843 */ 844 static int ext4_splice_branch(handle_t *handle, struct inode *inode, 845 ext4_lblk_t block, Indirect *where, int num, 846 int blks) 847 { 848 int i; 849 int err = 0; 850 ext4_fsblk_t current_block; 851 852 /* 853 * If we're splicing into a [td]indirect block (as opposed to the 854 * inode) then we need to get write access to the [td]indirect block 855 * before the splice. 856 */ 857 if (where->bh) { 858 BUFFER_TRACE(where->bh, "get_write_access"); 859 err = ext4_journal_get_write_access(handle, where->bh); 860 if (err) 861 goto err_out; 862 } 863 /* That's it */ 864 865 *where->p = where->key; 866 867 /* 868 * Update the host buffer_head or inode to point to more just allocated 869 * direct blocks blocks 870 */ 871 if (num == 0 && blks > 1) { 872 current_block = le32_to_cpu(where->key) + 1; 873 for (i = 1; i < blks; i++) 874 *(where->p + i) = cpu_to_le32(current_block++); 875 } 876 877 /* We are done with atomic stuff, now do the rest of housekeeping */ 878 /* had we spliced it onto indirect block? */ 879 if (where->bh) { 880 /* 881 * If we spliced it onto an indirect block, we haven't 882 * altered the inode. Note however that if it is being spliced 883 * onto an indirect block at the very end of the file (the 884 * file is growing) then we *will* alter the inode to reflect 885 * the new i_size. But that is not done here - it is done in 886 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. 887 */ 888 jbd_debug(5, "splicing indirect only\n"); 889 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); 890 err = ext4_handle_dirty_metadata(handle, inode, where->bh); 891 if (err) 892 goto err_out; 893 } else { 894 /* 895 * OK, we spliced it into the inode itself on a direct block. 896 */ 897 ext4_mark_inode_dirty(handle, inode); 898 jbd_debug(5, "splicing direct\n"); 899 } 900 return err; 901 902 err_out: 903 for (i = 1; i <= num; i++) { 904 BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget"); 905 ext4_journal_forget(handle, where[i].bh); 906 ext4_free_blocks(handle, inode, 907 le32_to_cpu(where[i-1].key), 1, 0); 908 } 909 ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0); 910 911 return err; 912 } 913 914 /* 915 * The ext4_ind_get_blocks() function handles non-extents inodes 916 * (i.e., using the traditional indirect/double-indirect i_blocks 917 * scheme) for ext4_get_blocks(). 918 * 919 * Allocation strategy is simple: if we have to allocate something, we will 920 * have to go the whole way to leaf. So let's do it before attaching anything 921 * to tree, set linkage between the newborn blocks, write them if sync is 922 * required, recheck the path, free and repeat if check fails, otherwise 923 * set the last missing link (that will protect us from any truncate-generated 924 * removals - all blocks on the path are immune now) and possibly force the 925 * write on the parent block. 926 * That has a nice additional property: no special recovery from the failed 927 * allocations is needed - we simply release blocks and do not touch anything 928 * reachable from inode. 929 * 930 * `handle' can be NULL if create == 0. 931 * 932 * return > 0, # of blocks mapped or allocated. 933 * return = 0, if plain lookup failed. 934 * return < 0, error case. 935 * 936 * The ext4_ind_get_blocks() function should be called with 937 * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem 938 * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or 939 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system 940 * blocks. 941 */ 942 static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, 943 ext4_lblk_t iblock, unsigned int maxblocks, 944 struct buffer_head *bh_result, 945 int flags) 946 { 947 int err = -EIO; 948 ext4_lblk_t offsets[4]; 949 Indirect chain[4]; 950 Indirect *partial; 951 ext4_fsblk_t goal; 952 int indirect_blks; 953 int blocks_to_boundary = 0; 954 int depth; 955 int count = 0; 956 ext4_fsblk_t first_block = 0; 957 958 J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)); 959 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); 960 depth = ext4_block_to_path(inode, iblock, offsets, 961 &blocks_to_boundary); 962 963 if (depth == 0) 964 goto out; 965 966 partial = ext4_get_branch(inode, depth, offsets, chain, &err); 967 968 /* Simplest case - block found, no allocation needed */ 969 if (!partial) { 970 first_block = le32_to_cpu(chain[depth - 1].key); 971 clear_buffer_new(bh_result); 972 count++; 973 /*map more blocks*/ 974 while (count < maxblocks && count <= blocks_to_boundary) { 975 ext4_fsblk_t blk; 976 977 blk = le32_to_cpu(*(chain[depth-1].p + count)); 978 979 if (blk == first_block + count) 980 count++; 981 else 982 break; 983 } 984 goto got_it; 985 } 986 987 /* Next simple case - plain lookup or failed read of indirect block */ 988 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO) 989 goto cleanup; 990 991 /* 992 * Okay, we need to do block allocation. 993 */ 994 goal = ext4_find_goal(inode, iblock, partial); 995 996 /* the number of blocks need to allocate for [d,t]indirect blocks */ 997 indirect_blks = (chain + depth) - partial - 1; 998 999 /* 1000 * Next look up the indirect map to count the totoal number of 1001 * direct blocks to allocate for this branch. 1002 */ 1003 count = ext4_blks_to_allocate(partial, indirect_blks, 1004 maxblocks, blocks_to_boundary); 1005 /* 1006 * Block out ext4_truncate while we alter the tree 1007 */ 1008 err = ext4_alloc_branch(handle, inode, iblock, indirect_blks, 1009 &count, goal, 1010 offsets + (partial - chain), partial); 1011 1012 /* 1013 * The ext4_splice_branch call will free and forget any buffers 1014 * on the new chain if there is a failure, but that risks using 1015 * up transaction credits, especially for bitmaps where the 1016 * credits cannot be returned. Can we handle this somehow? We 1017 * may need to return -EAGAIN upwards in the worst case. --sct 1018 */ 1019 if (!err) 1020 err = ext4_splice_branch(handle, inode, iblock, 1021 partial, indirect_blks, count); 1022 else 1023 goto cleanup; 1024 1025 set_buffer_new(bh_result); 1026 got_it: 1027 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); 1028 if (count > blocks_to_boundary) 1029 set_buffer_boundary(bh_result); 1030 err = count; 1031 /* Clean up and exit */ 1032 partial = chain + depth - 1; /* the whole chain */ 1033 cleanup: 1034 while (partial > chain) { 1035 BUFFER_TRACE(partial->bh, "call brelse"); 1036 brelse(partial->bh); 1037 partial--; 1038 } 1039 BUFFER_TRACE(bh_result, "returned"); 1040 out: 1041 return err; 1042 } 1043 1044 qsize_t ext4_get_reserved_space(struct inode *inode) 1045 { 1046 unsigned long long total; 1047 1048 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1049 total = EXT4_I(inode)->i_reserved_data_blocks + 1050 EXT4_I(inode)->i_reserved_meta_blocks; 1051 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1052 1053 return total; 1054 } 1055 /* 1056 * Calculate the number of metadata blocks need to reserve 1057 * to allocate @blocks for non extent file based file 1058 */ 1059 static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks) 1060 { 1061 int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb); 1062 int ind_blks, dind_blks, tind_blks; 1063 1064 /* number of new indirect blocks needed */ 1065 ind_blks = (blocks + icap - 1) / icap; 1066 1067 dind_blks = (ind_blks + icap - 1) / icap; 1068 1069 tind_blks = 1; 1070 1071 return ind_blks + dind_blks + tind_blks; 1072 } 1073 1074 /* 1075 * Calculate the number of metadata blocks need to reserve 1076 * to allocate given number of blocks 1077 */ 1078 static int ext4_calc_metadata_amount(struct inode *inode, int blocks) 1079 { 1080 if (!blocks) 1081 return 0; 1082 1083 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) 1084 return ext4_ext_calc_metadata_amount(inode, blocks); 1085 1086 return ext4_indirect_calc_metadata_amount(inode, blocks); 1087 } 1088 1089 static void ext4_da_update_reserve_space(struct inode *inode, int used) 1090 { 1091 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1092 int total, mdb, mdb_free; 1093 1094 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1095 /* recalculate the number of metablocks still need to be reserved */ 1096 total = EXT4_I(inode)->i_reserved_data_blocks - used; 1097 mdb = ext4_calc_metadata_amount(inode, total); 1098 1099 /* figure out how many metablocks to release */ 1100 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 1101 mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb; 1102 1103 if (mdb_free) { 1104 /* Account for allocated meta_blocks */ 1105 mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks; 1106 1107 /* update fs dirty blocks counter */ 1108 percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free); 1109 EXT4_I(inode)->i_allocated_meta_blocks = 0; 1110 EXT4_I(inode)->i_reserved_meta_blocks = mdb; 1111 } 1112 1113 /* update per-inode reservations */ 1114 BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks); 1115 EXT4_I(inode)->i_reserved_data_blocks -= used; 1116 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1117 1118 /* 1119 * free those over-booking quota for metadata blocks 1120 */ 1121 if (mdb_free) 1122 vfs_dq_release_reservation_block(inode, mdb_free); 1123 1124 /* 1125 * If we have done all the pending block allocations and if 1126 * there aren't any writers on the inode, we can discard the 1127 * inode's preallocations. 1128 */ 1129 if (!total && (atomic_read(&inode->i_writecount) == 0)) 1130 ext4_discard_preallocations(inode); 1131 } 1132 1133 static int check_block_validity(struct inode *inode, const char *msg, 1134 sector_t logical, sector_t phys, int len) 1135 { 1136 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) { 1137 ext4_error(inode->i_sb, msg, 1138 "inode #%lu logical block %llu mapped to %llu " 1139 "(size %d)", inode->i_ino, 1140 (unsigned long long) logical, 1141 (unsigned long long) phys, len); 1142 return -EIO; 1143 } 1144 return 0; 1145 } 1146 1147 /* 1148 * The ext4_get_blocks() function tries to look up the requested blocks, 1149 * and returns if the blocks are already mapped. 1150 * 1151 * Otherwise it takes the write lock of the i_data_sem and allocate blocks 1152 * and store the allocated blocks in the result buffer head and mark it 1153 * mapped. 1154 * 1155 * If file type is extents based, it will call ext4_ext_get_blocks(), 1156 * Otherwise, call with ext4_ind_get_blocks() to handle indirect mapping 1157 * based files 1158 * 1159 * On success, it returns the number of blocks being mapped or allocate. 1160 * if create==0 and the blocks are pre-allocated and uninitialized block, 1161 * the result buffer head is unmapped. If the create ==1, it will make sure 1162 * the buffer head is mapped. 1163 * 1164 * It returns 0 if plain look up failed (blocks have not been allocated), in 1165 * that casem, buffer head is unmapped 1166 * 1167 * It returns the error in case of allocation failure. 1168 */ 1169 int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, 1170 unsigned int max_blocks, struct buffer_head *bh, 1171 int flags) 1172 { 1173 int retval; 1174 1175 clear_buffer_mapped(bh); 1176 clear_buffer_unwritten(bh); 1177 1178 /* 1179 * Try to see if we can get the block without requesting a new 1180 * file system block. 1181 */ 1182 down_read((&EXT4_I(inode)->i_data_sem)); 1183 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 1184 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, 1185 bh, 0); 1186 } else { 1187 retval = ext4_ind_get_blocks(handle, inode, block, max_blocks, 1188 bh, 0); 1189 } 1190 up_read((&EXT4_I(inode)->i_data_sem)); 1191 1192 if (retval > 0 && buffer_mapped(bh)) { 1193 int ret = check_block_validity(inode, "file system corruption", 1194 block, bh->b_blocknr, retval); 1195 if (ret != 0) 1196 return ret; 1197 } 1198 1199 /* If it is only a block(s) look up */ 1200 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 1201 return retval; 1202 1203 /* 1204 * Returns if the blocks have already allocated 1205 * 1206 * Note that if blocks have been preallocated 1207 * ext4_ext_get_block() returns th create = 0 1208 * with buffer head unmapped. 1209 */ 1210 if (retval > 0 && buffer_mapped(bh)) 1211 return retval; 1212 1213 /* 1214 * When we call get_blocks without the create flag, the 1215 * BH_Unwritten flag could have gotten set if the blocks 1216 * requested were part of a uninitialized extent. We need to 1217 * clear this flag now that we are committed to convert all or 1218 * part of the uninitialized extent to be an initialized 1219 * extent. This is because we need to avoid the combination 1220 * of BH_Unwritten and BH_Mapped flags being simultaneously 1221 * set on the buffer_head. 1222 */ 1223 clear_buffer_unwritten(bh); 1224 1225 /* 1226 * New blocks allocate and/or writing to uninitialized extent 1227 * will possibly result in updating i_data, so we take 1228 * the write lock of i_data_sem, and call get_blocks() 1229 * with create == 1 flag. 1230 */ 1231 down_write((&EXT4_I(inode)->i_data_sem)); 1232 1233 /* 1234 * if the caller is from delayed allocation writeout path 1235 * we have already reserved fs blocks for allocation 1236 * let the underlying get_block() function know to 1237 * avoid double accounting 1238 */ 1239 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1240 EXT4_I(inode)->i_delalloc_reserved_flag = 1; 1241 /* 1242 * We need to check for EXT4 here because migrate 1243 * could have changed the inode type in between 1244 */ 1245 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 1246 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, 1247 bh, flags); 1248 } else { 1249 retval = ext4_ind_get_blocks(handle, inode, block, 1250 max_blocks, bh, flags); 1251 1252 if (retval > 0 && buffer_new(bh)) { 1253 /* 1254 * We allocated new blocks which will result in 1255 * i_data's format changing. Force the migrate 1256 * to fail by clearing migrate flags 1257 */ 1258 EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE; 1259 } 1260 } 1261 1262 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1263 EXT4_I(inode)->i_delalloc_reserved_flag = 0; 1264 1265 /* 1266 * Update reserved blocks/metadata blocks after successful 1267 * block allocation which had been deferred till now. 1268 */ 1269 if ((retval > 0) && (flags & EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE)) 1270 ext4_da_update_reserve_space(inode, retval); 1271 1272 up_write((&EXT4_I(inode)->i_data_sem)); 1273 if (retval > 0 && buffer_mapped(bh)) { 1274 int ret = check_block_validity(inode, "file system " 1275 "corruption after allocation", 1276 block, bh->b_blocknr, retval); 1277 if (ret != 0) 1278 return ret; 1279 } 1280 return retval; 1281 } 1282 1283 /* Maximum number of blocks we map for direct IO at once. */ 1284 #define DIO_MAX_BLOCKS 4096 1285 1286 int ext4_get_block(struct inode *inode, sector_t iblock, 1287 struct buffer_head *bh_result, int create) 1288 { 1289 handle_t *handle = ext4_journal_current_handle(); 1290 int ret = 0, started = 0; 1291 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 1292 int dio_credits; 1293 1294 if (create && !handle) { 1295 /* Direct IO write... */ 1296 if (max_blocks > DIO_MAX_BLOCKS) 1297 max_blocks = DIO_MAX_BLOCKS; 1298 dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); 1299 handle = ext4_journal_start(inode, dio_credits); 1300 if (IS_ERR(handle)) { 1301 ret = PTR_ERR(handle); 1302 goto out; 1303 } 1304 started = 1; 1305 } 1306 1307 ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, 1308 create ? EXT4_GET_BLOCKS_CREATE : 0); 1309 if (ret > 0) { 1310 bh_result->b_size = (ret << inode->i_blkbits); 1311 ret = 0; 1312 } 1313 if (started) 1314 ext4_journal_stop(handle); 1315 out: 1316 return ret; 1317 } 1318 1319 /* 1320 * `handle' can be NULL if create is zero 1321 */ 1322 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 1323 ext4_lblk_t block, int create, int *errp) 1324 { 1325 struct buffer_head dummy; 1326 int fatal = 0, err; 1327 int flags = 0; 1328 1329 J_ASSERT(handle != NULL || create == 0); 1330 1331 dummy.b_state = 0; 1332 dummy.b_blocknr = -1000; 1333 buffer_trace_init(&dummy.b_history); 1334 if (create) 1335 flags |= EXT4_GET_BLOCKS_CREATE; 1336 err = ext4_get_blocks(handle, inode, block, 1, &dummy, flags); 1337 /* 1338 * ext4_get_blocks() returns number of blocks mapped. 0 in 1339 * case of a HOLE. 1340 */ 1341 if (err > 0) { 1342 if (err > 1) 1343 WARN_ON(1); 1344 err = 0; 1345 } 1346 *errp = err; 1347 if (!err && buffer_mapped(&dummy)) { 1348 struct buffer_head *bh; 1349 bh = sb_getblk(inode->i_sb, dummy.b_blocknr); 1350 if (!bh) { 1351 *errp = -EIO; 1352 goto err; 1353 } 1354 if (buffer_new(&dummy)) { 1355 J_ASSERT(create != 0); 1356 J_ASSERT(handle != NULL); 1357 1358 /* 1359 * Now that we do not always journal data, we should 1360 * keep in mind whether this should always journal the 1361 * new buffer as metadata. For now, regular file 1362 * writes use ext4_get_block instead, so it's not a 1363 * problem. 1364 */ 1365 lock_buffer(bh); 1366 BUFFER_TRACE(bh, "call get_create_access"); 1367 fatal = ext4_journal_get_create_access(handle, bh); 1368 if (!fatal && !buffer_uptodate(bh)) { 1369 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 1370 set_buffer_uptodate(bh); 1371 } 1372 unlock_buffer(bh); 1373 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 1374 err = ext4_handle_dirty_metadata(handle, inode, bh); 1375 if (!fatal) 1376 fatal = err; 1377 } else { 1378 BUFFER_TRACE(bh, "not a new buffer"); 1379 } 1380 if (fatal) { 1381 *errp = fatal; 1382 brelse(bh); 1383 bh = NULL; 1384 } 1385 return bh; 1386 } 1387 err: 1388 return NULL; 1389 } 1390 1391 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 1392 ext4_lblk_t block, int create, int *err) 1393 { 1394 struct buffer_head *bh; 1395 1396 bh = ext4_getblk(handle, inode, block, create, err); 1397 if (!bh) 1398 return bh; 1399 if (buffer_uptodate(bh)) 1400 return bh; 1401 ll_rw_block(READ_META, 1, &bh); 1402 wait_on_buffer(bh); 1403 if (buffer_uptodate(bh)) 1404 return bh; 1405 put_bh(bh); 1406 *err = -EIO; 1407 return NULL; 1408 } 1409 1410 static int walk_page_buffers(handle_t *handle, 1411 struct buffer_head *head, 1412 unsigned from, 1413 unsigned to, 1414 int *partial, 1415 int (*fn)(handle_t *handle, 1416 struct buffer_head *bh)) 1417 { 1418 struct buffer_head *bh; 1419 unsigned block_start, block_end; 1420 unsigned blocksize = head->b_size; 1421 int err, ret = 0; 1422 struct buffer_head *next; 1423 1424 for (bh = head, block_start = 0; 1425 ret == 0 && (bh != head || !block_start); 1426 block_start = block_end, bh = next) { 1427 next = bh->b_this_page; 1428 block_end = block_start + blocksize; 1429 if (block_end <= from || block_start >= to) { 1430 if (partial && !buffer_uptodate(bh)) 1431 *partial = 1; 1432 continue; 1433 } 1434 err = (*fn)(handle, bh); 1435 if (!ret) 1436 ret = err; 1437 } 1438 return ret; 1439 } 1440 1441 /* 1442 * To preserve ordering, it is essential that the hole instantiation and 1443 * the data write be encapsulated in a single transaction. We cannot 1444 * close off a transaction and start a new one between the ext4_get_block() 1445 * and the commit_write(). So doing the jbd2_journal_start at the start of 1446 * prepare_write() is the right place. 1447 * 1448 * Also, this function can nest inside ext4_writepage() -> 1449 * block_write_full_page(). In that case, we *know* that ext4_writepage() 1450 * has generated enough buffer credits to do the whole page. So we won't 1451 * block on the journal in that case, which is good, because the caller may 1452 * be PF_MEMALLOC. 1453 * 1454 * By accident, ext4 can be reentered when a transaction is open via 1455 * quota file writes. If we were to commit the transaction while thus 1456 * reentered, there can be a deadlock - we would be holding a quota 1457 * lock, and the commit would never complete if another thread had a 1458 * transaction open and was blocking on the quota lock - a ranking 1459 * violation. 1460 * 1461 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 1462 * will _not_ run commit under these circumstances because handle->h_ref 1463 * is elevated. We'll still have enough credits for the tiny quotafile 1464 * write. 1465 */ 1466 static int do_journal_get_write_access(handle_t *handle, 1467 struct buffer_head *bh) 1468 { 1469 if (!buffer_mapped(bh) || buffer_freed(bh)) 1470 return 0; 1471 return ext4_journal_get_write_access(handle, bh); 1472 } 1473 1474 static int ext4_write_begin(struct file *file, struct address_space *mapping, 1475 loff_t pos, unsigned len, unsigned flags, 1476 struct page **pagep, void **fsdata) 1477 { 1478 struct inode *inode = mapping->host; 1479 int ret, needed_blocks; 1480 handle_t *handle; 1481 int retries = 0; 1482 struct page *page; 1483 pgoff_t index; 1484 unsigned from, to; 1485 1486 trace_ext4_write_begin(inode, pos, len, flags); 1487 /* 1488 * Reserve one block more for addition to orphan list in case 1489 * we allocate blocks but write fails for some reason 1490 */ 1491 needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 1492 index = pos >> PAGE_CACHE_SHIFT; 1493 from = pos & (PAGE_CACHE_SIZE - 1); 1494 to = from + len; 1495 1496 retry: 1497 handle = ext4_journal_start(inode, needed_blocks); 1498 if (IS_ERR(handle)) { 1499 ret = PTR_ERR(handle); 1500 goto out; 1501 } 1502 1503 /* We cannot recurse into the filesystem as the transaction is already 1504 * started */ 1505 flags |= AOP_FLAG_NOFS; 1506 1507 page = grab_cache_page_write_begin(mapping, index, flags); 1508 if (!page) { 1509 ext4_journal_stop(handle); 1510 ret = -ENOMEM; 1511 goto out; 1512 } 1513 *pagep = page; 1514 1515 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 1516 ext4_get_block); 1517 1518 if (!ret && ext4_should_journal_data(inode)) { 1519 ret = walk_page_buffers(handle, page_buffers(page), 1520 from, to, NULL, do_journal_get_write_access); 1521 } 1522 1523 if (ret) { 1524 unlock_page(page); 1525 page_cache_release(page); 1526 /* 1527 * block_write_begin may have instantiated a few blocks 1528 * outside i_size. Trim these off again. Don't need 1529 * i_size_read because we hold i_mutex. 1530 * 1531 * Add inode to orphan list in case we crash before 1532 * truncate finishes 1533 */ 1534 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1535 ext4_orphan_add(handle, inode); 1536 1537 ext4_journal_stop(handle); 1538 if (pos + len > inode->i_size) { 1539 ext4_truncate(inode); 1540 /* 1541 * If truncate failed early the inode might 1542 * still be on the orphan list; we need to 1543 * make sure the inode is removed from the 1544 * orphan list in that case. 1545 */ 1546 if (inode->i_nlink) 1547 ext4_orphan_del(NULL, inode); 1548 } 1549 } 1550 1551 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 1552 goto retry; 1553 out: 1554 return ret; 1555 } 1556 1557 /* For write_end() in data=journal mode */ 1558 static int write_end_fn(handle_t *handle, struct buffer_head *bh) 1559 { 1560 if (!buffer_mapped(bh) || buffer_freed(bh)) 1561 return 0; 1562 set_buffer_uptodate(bh); 1563 return ext4_handle_dirty_metadata(handle, NULL, bh); 1564 } 1565 1566 static int ext4_generic_write_end(struct file *file, 1567 struct address_space *mapping, 1568 loff_t pos, unsigned len, unsigned copied, 1569 struct page *page, void *fsdata) 1570 { 1571 int i_size_changed = 0; 1572 struct inode *inode = mapping->host; 1573 handle_t *handle = ext4_journal_current_handle(); 1574 1575 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 1576 1577 /* 1578 * No need to use i_size_read() here, the i_size 1579 * cannot change under us because we hold i_mutex. 1580 * 1581 * But it's important to update i_size while still holding page lock: 1582 * page writeout could otherwise come in and zero beyond i_size. 1583 */ 1584 if (pos + copied > inode->i_size) { 1585 i_size_write(inode, pos + copied); 1586 i_size_changed = 1; 1587 } 1588 1589 if (pos + copied > EXT4_I(inode)->i_disksize) { 1590 /* We need to mark inode dirty even if 1591 * new_i_size is less that inode->i_size 1592 * bu greater than i_disksize.(hint delalloc) 1593 */ 1594 ext4_update_i_disksize(inode, (pos + copied)); 1595 i_size_changed = 1; 1596 } 1597 unlock_page(page); 1598 page_cache_release(page); 1599 1600 /* 1601 * Don't mark the inode dirty under page lock. First, it unnecessarily 1602 * makes the holding time of page lock longer. Second, it forces lock 1603 * ordering of page lock and transaction start for journaling 1604 * filesystems. 1605 */ 1606 if (i_size_changed) 1607 ext4_mark_inode_dirty(handle, inode); 1608 1609 return copied; 1610 } 1611 1612 /* 1613 * We need to pick up the new inode size which generic_commit_write gave us 1614 * `file' can be NULL - eg, when called from page_symlink(). 1615 * 1616 * ext4 never places buffers on inode->i_mapping->private_list. metadata 1617 * buffers are managed internally. 1618 */ 1619 static int ext4_ordered_write_end(struct file *file, 1620 struct address_space *mapping, 1621 loff_t pos, unsigned len, unsigned copied, 1622 struct page *page, void *fsdata) 1623 { 1624 handle_t *handle = ext4_journal_current_handle(); 1625 struct inode *inode = mapping->host; 1626 int ret = 0, ret2; 1627 1628 trace_ext4_ordered_write_end(inode, pos, len, copied); 1629 ret = ext4_jbd2_file_inode(handle, inode); 1630 1631 if (ret == 0) { 1632 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1633 page, fsdata); 1634 copied = ret2; 1635 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1636 /* if we have allocated more blocks and copied 1637 * less. We will have blocks allocated outside 1638 * inode->i_size. So truncate them 1639 */ 1640 ext4_orphan_add(handle, inode); 1641 if (ret2 < 0) 1642 ret = ret2; 1643 } 1644 ret2 = ext4_journal_stop(handle); 1645 if (!ret) 1646 ret = ret2; 1647 1648 if (pos + len > inode->i_size) { 1649 ext4_truncate(inode); 1650 /* 1651 * If truncate failed early the inode might still be 1652 * on the orphan list; we need to make sure the inode 1653 * is removed from the orphan list in that case. 1654 */ 1655 if (inode->i_nlink) 1656 ext4_orphan_del(NULL, inode); 1657 } 1658 1659 1660 return ret ? ret : copied; 1661 } 1662 1663 static int ext4_writeback_write_end(struct file *file, 1664 struct address_space *mapping, 1665 loff_t pos, unsigned len, unsigned copied, 1666 struct page *page, void *fsdata) 1667 { 1668 handle_t *handle = ext4_journal_current_handle(); 1669 struct inode *inode = mapping->host; 1670 int ret = 0, ret2; 1671 1672 trace_ext4_writeback_write_end(inode, pos, len, copied); 1673 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1674 page, fsdata); 1675 copied = ret2; 1676 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1677 /* if we have allocated more blocks and copied 1678 * less. We will have blocks allocated outside 1679 * inode->i_size. So truncate them 1680 */ 1681 ext4_orphan_add(handle, inode); 1682 1683 if (ret2 < 0) 1684 ret = ret2; 1685 1686 ret2 = ext4_journal_stop(handle); 1687 if (!ret) 1688 ret = ret2; 1689 1690 if (pos + len > inode->i_size) { 1691 ext4_truncate(inode); 1692 /* 1693 * If truncate failed early the inode might still be 1694 * on the orphan list; we need to make sure the inode 1695 * is removed from the orphan list in that case. 1696 */ 1697 if (inode->i_nlink) 1698 ext4_orphan_del(NULL, inode); 1699 } 1700 1701 return ret ? ret : copied; 1702 } 1703 1704 static int ext4_journalled_write_end(struct file *file, 1705 struct address_space *mapping, 1706 loff_t pos, unsigned len, unsigned copied, 1707 struct page *page, void *fsdata) 1708 { 1709 handle_t *handle = ext4_journal_current_handle(); 1710 struct inode *inode = mapping->host; 1711 int ret = 0, ret2; 1712 int partial = 0; 1713 unsigned from, to; 1714 loff_t new_i_size; 1715 1716 trace_ext4_journalled_write_end(inode, pos, len, copied); 1717 from = pos & (PAGE_CACHE_SIZE - 1); 1718 to = from + len; 1719 1720 if (copied < len) { 1721 if (!PageUptodate(page)) 1722 copied = 0; 1723 page_zero_new_buffers(page, from+copied, to); 1724 } 1725 1726 ret = walk_page_buffers(handle, page_buffers(page), from, 1727 to, &partial, write_end_fn); 1728 if (!partial) 1729 SetPageUptodate(page); 1730 new_i_size = pos + copied; 1731 if (new_i_size > inode->i_size) 1732 i_size_write(inode, pos+copied); 1733 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; 1734 if (new_i_size > EXT4_I(inode)->i_disksize) { 1735 ext4_update_i_disksize(inode, new_i_size); 1736 ret2 = ext4_mark_inode_dirty(handle, inode); 1737 if (!ret) 1738 ret = ret2; 1739 } 1740 1741 unlock_page(page); 1742 page_cache_release(page); 1743 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1744 /* if we have allocated more blocks and copied 1745 * less. We will have blocks allocated outside 1746 * inode->i_size. So truncate them 1747 */ 1748 ext4_orphan_add(handle, inode); 1749 1750 ret2 = ext4_journal_stop(handle); 1751 if (!ret) 1752 ret = ret2; 1753 if (pos + len > inode->i_size) { 1754 ext4_truncate(inode); 1755 /* 1756 * If truncate failed early the inode might still be 1757 * on the orphan list; we need to make sure the inode 1758 * is removed from the orphan list in that case. 1759 */ 1760 if (inode->i_nlink) 1761 ext4_orphan_del(NULL, inode); 1762 } 1763 1764 return ret ? ret : copied; 1765 } 1766 1767 static int ext4_da_reserve_space(struct inode *inode, int nrblocks) 1768 { 1769 int retries = 0; 1770 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1771 unsigned long md_needed, mdblocks, total = 0; 1772 1773 /* 1774 * recalculate the amount of metadata blocks to reserve 1775 * in order to allocate nrblocks 1776 * worse case is one extent per block 1777 */ 1778 repeat: 1779 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1780 total = EXT4_I(inode)->i_reserved_data_blocks + nrblocks; 1781 mdblocks = ext4_calc_metadata_amount(inode, total); 1782 BUG_ON(mdblocks < EXT4_I(inode)->i_reserved_meta_blocks); 1783 1784 md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks; 1785 total = md_needed + nrblocks; 1786 1787 /* 1788 * Make quota reservation here to prevent quota overflow 1789 * later. Real quota accounting is done at pages writeout 1790 * time. 1791 */ 1792 if (vfs_dq_reserve_block(inode, total)) { 1793 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1794 return -EDQUOT; 1795 } 1796 1797 if (ext4_claim_free_blocks(sbi, total)) { 1798 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1799 if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1800 yield(); 1801 goto repeat; 1802 } 1803 vfs_dq_release_reservation_block(inode, total); 1804 return -ENOSPC; 1805 } 1806 EXT4_I(inode)->i_reserved_data_blocks += nrblocks; 1807 EXT4_I(inode)->i_reserved_meta_blocks = mdblocks; 1808 1809 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1810 return 0; /* success */ 1811 } 1812 1813 static void ext4_da_release_space(struct inode *inode, int to_free) 1814 { 1815 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1816 int total, mdb, mdb_free, release; 1817 1818 if (!to_free) 1819 return; /* Nothing to release, exit */ 1820 1821 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1822 1823 if (!EXT4_I(inode)->i_reserved_data_blocks) { 1824 /* 1825 * if there is no reserved blocks, but we try to free some 1826 * then the counter is messed up somewhere. 1827 * but since this function is called from invalidate 1828 * page, it's harmless to return without any action 1829 */ 1830 printk(KERN_INFO "ext4 delalloc try to release %d reserved " 1831 "blocks for inode %lu, but there is no reserved " 1832 "data blocks\n", to_free, inode->i_ino); 1833 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1834 return; 1835 } 1836 1837 /* recalculate the number of metablocks still need to be reserved */ 1838 total = EXT4_I(inode)->i_reserved_data_blocks - to_free; 1839 mdb = ext4_calc_metadata_amount(inode, total); 1840 1841 /* figure out how many metablocks to release */ 1842 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 1843 mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb; 1844 1845 release = to_free + mdb_free; 1846 1847 /* update fs dirty blocks counter for truncate case */ 1848 percpu_counter_sub(&sbi->s_dirtyblocks_counter, release); 1849 1850 /* update per-inode reservations */ 1851 BUG_ON(to_free > EXT4_I(inode)->i_reserved_data_blocks); 1852 EXT4_I(inode)->i_reserved_data_blocks -= to_free; 1853 1854 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 1855 EXT4_I(inode)->i_reserved_meta_blocks = mdb; 1856 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1857 1858 vfs_dq_release_reservation_block(inode, release); 1859 } 1860 1861 static void ext4_da_page_release_reservation(struct page *page, 1862 unsigned long offset) 1863 { 1864 int to_release = 0; 1865 struct buffer_head *head, *bh; 1866 unsigned int curr_off = 0; 1867 1868 head = page_buffers(page); 1869 bh = head; 1870 do { 1871 unsigned int next_off = curr_off + bh->b_size; 1872 1873 if ((offset <= curr_off) && (buffer_delay(bh))) { 1874 to_release++; 1875 clear_buffer_delay(bh); 1876 } 1877 curr_off = next_off; 1878 } while ((bh = bh->b_this_page) != head); 1879 ext4_da_release_space(page->mapping->host, to_release); 1880 } 1881 1882 /* 1883 * Delayed allocation stuff 1884 */ 1885 1886 /* 1887 * mpage_da_submit_io - walks through extent of pages and try to write 1888 * them with writepage() call back 1889 * 1890 * @mpd->inode: inode 1891 * @mpd->first_page: first page of the extent 1892 * @mpd->next_page: page after the last page of the extent 1893 * 1894 * By the time mpage_da_submit_io() is called we expect all blocks 1895 * to be allocated. this may be wrong if allocation failed. 1896 * 1897 * As pages are already locked by write_cache_pages(), we can't use it 1898 */ 1899 static int mpage_da_submit_io(struct mpage_da_data *mpd) 1900 { 1901 long pages_skipped; 1902 struct pagevec pvec; 1903 unsigned long index, end; 1904 int ret = 0, err, nr_pages, i; 1905 struct inode *inode = mpd->inode; 1906 struct address_space *mapping = inode->i_mapping; 1907 1908 BUG_ON(mpd->next_page <= mpd->first_page); 1909 /* 1910 * We need to start from the first_page to the next_page - 1 1911 * to make sure we also write the mapped dirty buffer_heads. 1912 * If we look at mpd->b_blocknr we would only be looking 1913 * at the currently mapped buffer_heads. 1914 */ 1915 index = mpd->first_page; 1916 end = mpd->next_page - 1; 1917 1918 pagevec_init(&pvec, 0); 1919 while (index <= end) { 1920 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1921 if (nr_pages == 0) 1922 break; 1923 for (i = 0; i < nr_pages; i++) { 1924 struct page *page = pvec.pages[i]; 1925 1926 index = page->index; 1927 if (index > end) 1928 break; 1929 index++; 1930 1931 BUG_ON(!PageLocked(page)); 1932 BUG_ON(PageWriteback(page)); 1933 1934 pages_skipped = mpd->wbc->pages_skipped; 1935 err = mapping->a_ops->writepage(page, mpd->wbc); 1936 if (!err && (pages_skipped == mpd->wbc->pages_skipped)) 1937 /* 1938 * have successfully written the page 1939 * without skipping the same 1940 */ 1941 mpd->pages_written++; 1942 /* 1943 * In error case, we have to continue because 1944 * remaining pages are still locked 1945 * XXX: unlock and re-dirty them? 1946 */ 1947 if (ret == 0) 1948 ret = err; 1949 } 1950 pagevec_release(&pvec); 1951 } 1952 return ret; 1953 } 1954 1955 /* 1956 * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers 1957 * 1958 * @mpd->inode - inode to walk through 1959 * @exbh->b_blocknr - first block on a disk 1960 * @exbh->b_size - amount of space in bytes 1961 * @logical - first logical block to start assignment with 1962 * 1963 * the function goes through all passed space and put actual disk 1964 * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten 1965 */ 1966 static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical, 1967 struct buffer_head *exbh) 1968 { 1969 struct inode *inode = mpd->inode; 1970 struct address_space *mapping = inode->i_mapping; 1971 int blocks = exbh->b_size >> inode->i_blkbits; 1972 sector_t pblock = exbh->b_blocknr, cur_logical; 1973 struct buffer_head *head, *bh; 1974 pgoff_t index, end; 1975 struct pagevec pvec; 1976 int nr_pages, i; 1977 1978 index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 1979 end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 1980 cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1981 1982 pagevec_init(&pvec, 0); 1983 1984 while (index <= end) { 1985 /* XXX: optimize tail */ 1986 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1987 if (nr_pages == 0) 1988 break; 1989 for (i = 0; i < nr_pages; i++) { 1990 struct page *page = pvec.pages[i]; 1991 1992 index = page->index; 1993 if (index > end) 1994 break; 1995 index++; 1996 1997 BUG_ON(!PageLocked(page)); 1998 BUG_ON(PageWriteback(page)); 1999 BUG_ON(!page_has_buffers(page)); 2000 2001 bh = page_buffers(page); 2002 head = bh; 2003 2004 /* skip blocks out of the range */ 2005 do { 2006 if (cur_logical >= logical) 2007 break; 2008 cur_logical++; 2009 } while ((bh = bh->b_this_page) != head); 2010 2011 do { 2012 if (cur_logical >= logical + blocks) 2013 break; 2014 2015 if (buffer_delay(bh) || 2016 buffer_unwritten(bh)) { 2017 2018 BUG_ON(bh->b_bdev != inode->i_sb->s_bdev); 2019 2020 if (buffer_delay(bh)) { 2021 clear_buffer_delay(bh); 2022 bh->b_blocknr = pblock; 2023 } else { 2024 /* 2025 * unwritten already should have 2026 * blocknr assigned. Verify that 2027 */ 2028 clear_buffer_unwritten(bh); 2029 BUG_ON(bh->b_blocknr != pblock); 2030 } 2031 2032 } else if (buffer_mapped(bh)) 2033 BUG_ON(bh->b_blocknr != pblock); 2034 2035 cur_logical++; 2036 pblock++; 2037 } while ((bh = bh->b_this_page) != head); 2038 } 2039 pagevec_release(&pvec); 2040 } 2041 } 2042 2043 2044 /* 2045 * __unmap_underlying_blocks - just a helper function to unmap 2046 * set of blocks described by @bh 2047 */ 2048 static inline void __unmap_underlying_blocks(struct inode *inode, 2049 struct buffer_head *bh) 2050 { 2051 struct block_device *bdev = inode->i_sb->s_bdev; 2052 int blocks, i; 2053 2054 blocks = bh->b_size >> inode->i_blkbits; 2055 for (i = 0; i < blocks; i++) 2056 unmap_underlying_metadata(bdev, bh->b_blocknr + i); 2057 } 2058 2059 static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd, 2060 sector_t logical, long blk_cnt) 2061 { 2062 int nr_pages, i; 2063 pgoff_t index, end; 2064 struct pagevec pvec; 2065 struct inode *inode = mpd->inode; 2066 struct address_space *mapping = inode->i_mapping; 2067 2068 index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 2069 end = (logical + blk_cnt - 1) >> 2070 (PAGE_CACHE_SHIFT - inode->i_blkbits); 2071 while (index <= end) { 2072 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 2073 if (nr_pages == 0) 2074 break; 2075 for (i = 0; i < nr_pages; i++) { 2076 struct page *page = pvec.pages[i]; 2077 index = page->index; 2078 if (index > end) 2079 break; 2080 index++; 2081 2082 BUG_ON(!PageLocked(page)); 2083 BUG_ON(PageWriteback(page)); 2084 block_invalidatepage(page, 0); 2085 ClearPageUptodate(page); 2086 unlock_page(page); 2087 } 2088 } 2089 return; 2090 } 2091 2092 static void ext4_print_free_blocks(struct inode *inode) 2093 { 2094 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2095 printk(KERN_EMERG "Total free blocks count %lld\n", 2096 ext4_count_free_blocks(inode->i_sb)); 2097 printk(KERN_EMERG "Free/Dirty block details\n"); 2098 printk(KERN_EMERG "free_blocks=%lld\n", 2099 (long long)percpu_counter_sum(&sbi->s_freeblocks_counter)); 2100 printk(KERN_EMERG "dirty_blocks=%lld\n", 2101 (long long)percpu_counter_sum(&sbi->s_dirtyblocks_counter)); 2102 printk(KERN_EMERG "Block reservation details\n"); 2103 printk(KERN_EMERG "i_reserved_data_blocks=%u\n", 2104 EXT4_I(inode)->i_reserved_data_blocks); 2105 printk(KERN_EMERG "i_reserved_meta_blocks=%u\n", 2106 EXT4_I(inode)->i_reserved_meta_blocks); 2107 return; 2108 } 2109 2110 /* 2111 * mpage_da_map_blocks - go through given space 2112 * 2113 * @mpd - bh describing space 2114 * 2115 * The function skips space we know is already mapped to disk blocks. 2116 * 2117 */ 2118 static int mpage_da_map_blocks(struct mpage_da_data *mpd) 2119 { 2120 int err, blks, get_blocks_flags; 2121 struct buffer_head new; 2122 sector_t next = mpd->b_blocknr; 2123 unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; 2124 loff_t disksize = EXT4_I(mpd->inode)->i_disksize; 2125 handle_t *handle = NULL; 2126 2127 /* 2128 * We consider only non-mapped and non-allocated blocks 2129 */ 2130 if ((mpd->b_state & (1 << BH_Mapped)) && 2131 !(mpd->b_state & (1 << BH_Delay)) && 2132 !(mpd->b_state & (1 << BH_Unwritten))) 2133 return 0; 2134 2135 /* 2136 * If we didn't accumulate anything to write simply return 2137 */ 2138 if (!mpd->b_size) 2139 return 0; 2140 2141 handle = ext4_journal_current_handle(); 2142 BUG_ON(!handle); 2143 2144 /* 2145 * Call ext4_get_blocks() to allocate any delayed allocation 2146 * blocks, or to convert an uninitialized extent to be 2147 * initialized (in the case where we have written into 2148 * one or more preallocated blocks). 2149 * 2150 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to 2151 * indicate that we are on the delayed allocation path. This 2152 * affects functions in many different parts of the allocation 2153 * call path. This flag exists primarily because we don't 2154 * want to change *many* call functions, so ext4_get_blocks() 2155 * will set the magic i_delalloc_reserved_flag once the 2156 * inode's allocation semaphore is taken. 2157 * 2158 * If the blocks in questions were delalloc blocks, set 2159 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting 2160 * variables are updated after the blocks have been allocated. 2161 */ 2162 new.b_state = 0; 2163 get_blocks_flags = (EXT4_GET_BLOCKS_CREATE | 2164 EXT4_GET_BLOCKS_DELALLOC_RESERVE); 2165 if (mpd->b_state & (1 << BH_Delay)) 2166 get_blocks_flags |= EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE; 2167 blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks, 2168 &new, get_blocks_flags); 2169 if (blks < 0) { 2170 err = blks; 2171 /* 2172 * If get block returns with error we simply 2173 * return. Later writepage will redirty the page and 2174 * writepages will find the dirty page again 2175 */ 2176 if (err == -EAGAIN) 2177 return 0; 2178 2179 if (err == -ENOSPC && 2180 ext4_count_free_blocks(mpd->inode->i_sb)) { 2181 mpd->retval = err; 2182 return 0; 2183 } 2184 2185 /* 2186 * get block failure will cause us to loop in 2187 * writepages, because a_ops->writepage won't be able 2188 * to make progress. The page will be redirtied by 2189 * writepage and writepages will again try to write 2190 * the same. 2191 */ 2192 printk(KERN_EMERG "%s block allocation failed for inode %lu " 2193 "at logical offset %llu with max blocks " 2194 "%zd with error %d\n", 2195 __func__, mpd->inode->i_ino, 2196 (unsigned long long)next, 2197 mpd->b_size >> mpd->inode->i_blkbits, err); 2198 printk(KERN_EMERG "This should not happen.!! " 2199 "Data will be lost\n"); 2200 if (err == -ENOSPC) { 2201 ext4_print_free_blocks(mpd->inode); 2202 } 2203 /* invalidate all the pages */ 2204 ext4_da_block_invalidatepages(mpd, next, 2205 mpd->b_size >> mpd->inode->i_blkbits); 2206 return err; 2207 } 2208 BUG_ON(blks == 0); 2209 2210 new.b_size = (blks << mpd->inode->i_blkbits); 2211 2212 if (buffer_new(&new)) 2213 __unmap_underlying_blocks(mpd->inode, &new); 2214 2215 /* 2216 * If blocks are delayed marked, we need to 2217 * put actual blocknr and drop delayed bit 2218 */ 2219 if ((mpd->b_state & (1 << BH_Delay)) || 2220 (mpd->b_state & (1 << BH_Unwritten))) 2221 mpage_put_bnr_to_bhs(mpd, next, &new); 2222 2223 if (ext4_should_order_data(mpd->inode)) { 2224 err = ext4_jbd2_file_inode(handle, mpd->inode); 2225 if (err) 2226 return err; 2227 } 2228 2229 /* 2230 * Update on-disk size along with block allocation. 2231 */ 2232 disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; 2233 if (disksize > i_size_read(mpd->inode)) 2234 disksize = i_size_read(mpd->inode); 2235 if (disksize > EXT4_I(mpd->inode)->i_disksize) { 2236 ext4_update_i_disksize(mpd->inode, disksize); 2237 return ext4_mark_inode_dirty(handle, mpd->inode); 2238 } 2239 2240 return 0; 2241 } 2242 2243 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ 2244 (1 << BH_Delay) | (1 << BH_Unwritten)) 2245 2246 /* 2247 * mpage_add_bh_to_extent - try to add one more block to extent of blocks 2248 * 2249 * @mpd->lbh - extent of blocks 2250 * @logical - logical number of the block in the file 2251 * @bh - bh of the block (used to access block's state) 2252 * 2253 * the function is used to collect contig. blocks in same state 2254 */ 2255 static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, 2256 sector_t logical, size_t b_size, 2257 unsigned long b_state) 2258 { 2259 sector_t next; 2260 int nrblocks = mpd->b_size >> mpd->inode->i_blkbits; 2261 2262 /* check if thereserved journal credits might overflow */ 2263 if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) { 2264 if (nrblocks >= EXT4_MAX_TRANS_DATA) { 2265 /* 2266 * With non-extent format we are limited by the journal 2267 * credit available. Total credit needed to insert 2268 * nrblocks contiguous blocks is dependent on the 2269 * nrblocks. So limit nrblocks. 2270 */ 2271 goto flush_it; 2272 } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) > 2273 EXT4_MAX_TRANS_DATA) { 2274 /* 2275 * Adding the new buffer_head would make it cross the 2276 * allowed limit for which we have journal credit 2277 * reserved. So limit the new bh->b_size 2278 */ 2279 b_size = (EXT4_MAX_TRANS_DATA - nrblocks) << 2280 mpd->inode->i_blkbits; 2281 /* we will do mpage_da_submit_io in the next loop */ 2282 } 2283 } 2284 /* 2285 * First block in the extent 2286 */ 2287 if (mpd->b_size == 0) { 2288 mpd->b_blocknr = logical; 2289 mpd->b_size = b_size; 2290 mpd->b_state = b_state & BH_FLAGS; 2291 return; 2292 } 2293 2294 next = mpd->b_blocknr + nrblocks; 2295 /* 2296 * Can we merge the block to our big extent? 2297 */ 2298 if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) { 2299 mpd->b_size += b_size; 2300 return; 2301 } 2302 2303 flush_it: 2304 /* 2305 * We couldn't merge the block to our extent, so we 2306 * need to flush current extent and start new one 2307 */ 2308 if (mpage_da_map_blocks(mpd) == 0) 2309 mpage_da_submit_io(mpd); 2310 mpd->io_done = 1; 2311 return; 2312 } 2313 2314 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) 2315 { 2316 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); 2317 } 2318 2319 /* 2320 * __mpage_da_writepage - finds extent of pages and blocks 2321 * 2322 * @page: page to consider 2323 * @wbc: not used, we just follow rules 2324 * @data: context 2325 * 2326 * The function finds extents of pages and scan them for all blocks. 2327 */ 2328 static int __mpage_da_writepage(struct page *page, 2329 struct writeback_control *wbc, void *data) 2330 { 2331 struct mpage_da_data *mpd = data; 2332 struct inode *inode = mpd->inode; 2333 struct buffer_head *bh, *head; 2334 sector_t logical; 2335 2336 if (mpd->io_done) { 2337 /* 2338 * Rest of the page in the page_vec 2339 * redirty then and skip then. We will 2340 * try to write them again after 2341 * starting a new transaction 2342 */ 2343 redirty_page_for_writepage(wbc, page); 2344 unlock_page(page); 2345 return MPAGE_DA_EXTENT_TAIL; 2346 } 2347 /* 2348 * Can we merge this page to current extent? 2349 */ 2350 if (mpd->next_page != page->index) { 2351 /* 2352 * Nope, we can't. So, we map non-allocated blocks 2353 * and start IO on them using writepage() 2354 */ 2355 if (mpd->next_page != mpd->first_page) { 2356 if (mpage_da_map_blocks(mpd) == 0) 2357 mpage_da_submit_io(mpd); 2358 /* 2359 * skip rest of the page in the page_vec 2360 */ 2361 mpd->io_done = 1; 2362 redirty_page_for_writepage(wbc, page); 2363 unlock_page(page); 2364 return MPAGE_DA_EXTENT_TAIL; 2365 } 2366 2367 /* 2368 * Start next extent of pages ... 2369 */ 2370 mpd->first_page = page->index; 2371 2372 /* 2373 * ... and blocks 2374 */ 2375 mpd->b_size = 0; 2376 mpd->b_state = 0; 2377 mpd->b_blocknr = 0; 2378 } 2379 2380 mpd->next_page = page->index + 1; 2381 logical = (sector_t) page->index << 2382 (PAGE_CACHE_SHIFT - inode->i_blkbits); 2383 2384 if (!page_has_buffers(page)) { 2385 mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE, 2386 (1 << BH_Dirty) | (1 << BH_Uptodate)); 2387 if (mpd->io_done) 2388 return MPAGE_DA_EXTENT_TAIL; 2389 } else { 2390 /* 2391 * Page with regular buffer heads, just add all dirty ones 2392 */ 2393 head = page_buffers(page); 2394 bh = head; 2395 do { 2396 BUG_ON(buffer_locked(bh)); 2397 /* 2398 * We need to try to allocate 2399 * unmapped blocks in the same page. 2400 * Otherwise we won't make progress 2401 * with the page in ext4_writepage 2402 */ 2403 if (ext4_bh_delay_or_unwritten(NULL, bh)) { 2404 mpage_add_bh_to_extent(mpd, logical, 2405 bh->b_size, 2406 bh->b_state); 2407 if (mpd->io_done) 2408 return MPAGE_DA_EXTENT_TAIL; 2409 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) { 2410 /* 2411 * mapped dirty buffer. We need to update 2412 * the b_state because we look at 2413 * b_state in mpage_da_map_blocks. We don't 2414 * update b_size because if we find an 2415 * unmapped buffer_head later we need to 2416 * use the b_state flag of that buffer_head. 2417 */ 2418 if (mpd->b_size == 0) 2419 mpd->b_state = bh->b_state & BH_FLAGS; 2420 } 2421 logical++; 2422 } while ((bh = bh->b_this_page) != head); 2423 } 2424 2425 return 0; 2426 } 2427 2428 /* 2429 * This is a special get_blocks_t callback which is used by 2430 * ext4_da_write_begin(). It will either return mapped block or 2431 * reserve space for a single block. 2432 * 2433 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 2434 * We also have b_blocknr = -1 and b_bdev initialized properly 2435 * 2436 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 2437 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 2438 * initialized properly. 2439 */ 2440 static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 2441 struct buffer_head *bh_result, int create) 2442 { 2443 int ret = 0; 2444 sector_t invalid_block = ~((sector_t) 0xffff); 2445 2446 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 2447 invalid_block = ~0; 2448 2449 BUG_ON(create == 0); 2450 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 2451 2452 /* 2453 * first, we need to know whether the block is allocated already 2454 * preallocated blocks are unmapped but should treated 2455 * the same as allocated blocks. 2456 */ 2457 ret = ext4_get_blocks(NULL, inode, iblock, 1, bh_result, 0); 2458 if ((ret == 0) && !buffer_delay(bh_result)) { 2459 /* the block isn't (pre)allocated yet, let's reserve space */ 2460 /* 2461 * XXX: __block_prepare_write() unmaps passed block, 2462 * is it OK? 2463 */ 2464 ret = ext4_da_reserve_space(inode, 1); 2465 if (ret) 2466 /* not enough space to reserve */ 2467 return ret; 2468 2469 map_bh(bh_result, inode->i_sb, invalid_block); 2470 set_buffer_new(bh_result); 2471 set_buffer_delay(bh_result); 2472 } else if (ret > 0) { 2473 bh_result->b_size = (ret << inode->i_blkbits); 2474 if (buffer_unwritten(bh_result)) { 2475 /* A delayed write to unwritten bh should 2476 * be marked new and mapped. Mapped ensures 2477 * that we don't do get_block multiple times 2478 * when we write to the same offset and new 2479 * ensures that we do proper zero out for 2480 * partial write. 2481 */ 2482 set_buffer_new(bh_result); 2483 set_buffer_mapped(bh_result); 2484 } 2485 ret = 0; 2486 } 2487 2488 return ret; 2489 } 2490 2491 /* 2492 * This function is used as a standard get_block_t calback function 2493 * when there is no desire to allocate any blocks. It is used as a 2494 * callback function for block_prepare_write(), nobh_writepage(), and 2495 * block_write_full_page(). These functions should only try to map a 2496 * single block at a time. 2497 * 2498 * Since this function doesn't do block allocations even if the caller 2499 * requests it by passing in create=1, it is critically important that 2500 * any caller checks to make sure that any buffer heads are returned 2501 * by this function are either all already mapped or marked for 2502 * delayed allocation before calling nobh_writepage() or 2503 * block_write_full_page(). Otherwise, b_blocknr could be left 2504 * unitialized, and the page write functions will be taken by 2505 * surprise. 2506 */ 2507 static int noalloc_get_block_write(struct inode *inode, sector_t iblock, 2508 struct buffer_head *bh_result, int create) 2509 { 2510 int ret = 0; 2511 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 2512 2513 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 2514 2515 /* 2516 * we don't want to do block allocation in writepage 2517 * so call get_block_wrap with create = 0 2518 */ 2519 ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0); 2520 if (ret > 0) { 2521 bh_result->b_size = (ret << inode->i_blkbits); 2522 ret = 0; 2523 } 2524 return ret; 2525 } 2526 2527 static int bget_one(handle_t *handle, struct buffer_head *bh) 2528 { 2529 get_bh(bh); 2530 return 0; 2531 } 2532 2533 static int bput_one(handle_t *handle, struct buffer_head *bh) 2534 { 2535 put_bh(bh); 2536 return 0; 2537 } 2538 2539 static int __ext4_journalled_writepage(struct page *page, 2540 struct writeback_control *wbc, 2541 unsigned int len) 2542 { 2543 struct address_space *mapping = page->mapping; 2544 struct inode *inode = mapping->host; 2545 struct buffer_head *page_bufs; 2546 handle_t *handle = NULL; 2547 int ret = 0; 2548 int err; 2549 2550 page_bufs = page_buffers(page); 2551 BUG_ON(!page_bufs); 2552 walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one); 2553 /* As soon as we unlock the page, it can go away, but we have 2554 * references to buffers so we are safe */ 2555 unlock_page(page); 2556 2557 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 2558 if (IS_ERR(handle)) { 2559 ret = PTR_ERR(handle); 2560 goto out; 2561 } 2562 2563 ret = walk_page_buffers(handle, page_bufs, 0, len, NULL, 2564 do_journal_get_write_access); 2565 2566 err = walk_page_buffers(handle, page_bufs, 0, len, NULL, 2567 write_end_fn); 2568 if (ret == 0) 2569 ret = err; 2570 err = ext4_journal_stop(handle); 2571 if (!ret) 2572 ret = err; 2573 2574 walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one); 2575 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; 2576 out: 2577 return ret; 2578 } 2579 2580 /* 2581 * Note that we don't need to start a transaction unless we're journaling data 2582 * because we should have holes filled from ext4_page_mkwrite(). We even don't 2583 * need to file the inode to the transaction's list in ordered mode because if 2584 * we are writing back data added by write(), the inode is already there and if 2585 * we are writing back data modified via mmap(), noone guarantees in which 2586 * transaction the data will hit the disk. In case we are journaling data, we 2587 * cannot start transaction directly because transaction start ranks above page 2588 * lock so we have to do some magic. 2589 * 2590 * This function can get called via... 2591 * - ext4_da_writepages after taking page lock (have journal handle) 2592 * - journal_submit_inode_data_buffers (no journal handle) 2593 * - shrink_page_list via pdflush (no journal handle) 2594 * - grab_page_cache when doing write_begin (have journal handle) 2595 * 2596 * We don't do any block allocation in this function. If we have page with 2597 * multiple blocks we need to write those buffer_heads that are mapped. This 2598 * is important for mmaped based write. So if we do with blocksize 1K 2599 * truncate(f, 1024); 2600 * a = mmap(f, 0, 4096); 2601 * a[0] = 'a'; 2602 * truncate(f, 4096); 2603 * we have in the page first buffer_head mapped via page_mkwrite call back 2604 * but other bufer_heads would be unmapped but dirty(dirty done via the 2605 * do_wp_page). So writepage should write the first block. If we modify 2606 * the mmap area beyond 1024 we will again get a page_fault and the 2607 * page_mkwrite callback will do the block allocation and mark the 2608 * buffer_heads mapped. 2609 * 2610 * We redirty the page if we have any buffer_heads that is either delay or 2611 * unwritten in the page. 2612 * 2613 * We can get recursively called as show below. 2614 * 2615 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 2616 * ext4_writepage() 2617 * 2618 * But since we don't do any block allocation we should not deadlock. 2619 * Page also have the dirty flag cleared so we don't get recurive page_lock. 2620 */ 2621 static int ext4_writepage(struct page *page, 2622 struct writeback_control *wbc) 2623 { 2624 int ret = 0; 2625 loff_t size; 2626 unsigned int len; 2627 struct buffer_head *page_bufs; 2628 struct inode *inode = page->mapping->host; 2629 2630 trace_ext4_writepage(inode, page); 2631 size = i_size_read(inode); 2632 if (page->index == size >> PAGE_CACHE_SHIFT) 2633 len = size & ~PAGE_CACHE_MASK; 2634 else 2635 len = PAGE_CACHE_SIZE; 2636 2637 if (page_has_buffers(page)) { 2638 page_bufs = page_buffers(page); 2639 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2640 ext4_bh_delay_or_unwritten)) { 2641 /* 2642 * We don't want to do block allocation 2643 * So redirty the page and return 2644 * We may reach here when we do a journal commit 2645 * via journal_submit_inode_data_buffers. 2646 * If we don't have mapping block we just ignore 2647 * them. We can also reach here via shrink_page_list 2648 */ 2649 redirty_page_for_writepage(wbc, page); 2650 unlock_page(page); 2651 return 0; 2652 } 2653 } else { 2654 /* 2655 * The test for page_has_buffers() is subtle: 2656 * We know the page is dirty but it lost buffers. That means 2657 * that at some moment in time after write_begin()/write_end() 2658 * has been called all buffers have been clean and thus they 2659 * must have been written at least once. So they are all 2660 * mapped and we can happily proceed with mapping them 2661 * and writing the page. 2662 * 2663 * Try to initialize the buffer_heads and check whether 2664 * all are mapped and non delay. We don't want to 2665 * do block allocation here. 2666 */ 2667 ret = block_prepare_write(page, 0, len, 2668 noalloc_get_block_write); 2669 if (!ret) { 2670 page_bufs = page_buffers(page); 2671 /* check whether all are mapped and non delay */ 2672 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2673 ext4_bh_delay_or_unwritten)) { 2674 redirty_page_for_writepage(wbc, page); 2675 unlock_page(page); 2676 return 0; 2677 } 2678 } else { 2679 /* 2680 * We can't do block allocation here 2681 * so just redity the page and unlock 2682 * and return 2683 */ 2684 redirty_page_for_writepage(wbc, page); 2685 unlock_page(page); 2686 return 0; 2687 } 2688 /* now mark the buffer_heads as dirty and uptodate */ 2689 block_commit_write(page, 0, len); 2690 } 2691 2692 if (PageChecked(page) && ext4_should_journal_data(inode)) { 2693 /* 2694 * It's mmapped pagecache. Add buffers and journal it. There 2695 * doesn't seem much point in redirtying the page here. 2696 */ 2697 ClearPageChecked(page); 2698 return __ext4_journalled_writepage(page, wbc, len); 2699 } 2700 2701 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) 2702 ret = nobh_writepage(page, noalloc_get_block_write, wbc); 2703 else 2704 ret = block_write_full_page(page, noalloc_get_block_write, 2705 wbc); 2706 2707 return ret; 2708 } 2709 2710 /* 2711 * This is called via ext4_da_writepages() to 2712 * calulate the total number of credits to reserve to fit 2713 * a single extent allocation into a single transaction, 2714 * ext4_da_writpeages() will loop calling this before 2715 * the block allocation. 2716 */ 2717 2718 static int ext4_da_writepages_trans_blocks(struct inode *inode) 2719 { 2720 int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; 2721 2722 /* 2723 * With non-extent format the journal credit needed to 2724 * insert nrblocks contiguous block is dependent on 2725 * number of contiguous block. So we will limit 2726 * number of contiguous block to a sane value 2727 */ 2728 if (!(inode->i_flags & EXT4_EXTENTS_FL) && 2729 (max_blocks > EXT4_MAX_TRANS_DATA)) 2730 max_blocks = EXT4_MAX_TRANS_DATA; 2731 2732 return ext4_chunk_trans_blocks(inode, max_blocks); 2733 } 2734 2735 static int ext4_da_writepages(struct address_space *mapping, 2736 struct writeback_control *wbc) 2737 { 2738 pgoff_t index; 2739 int range_whole = 0; 2740 handle_t *handle = NULL; 2741 struct mpage_da_data mpd; 2742 struct inode *inode = mapping->host; 2743 int no_nrwrite_index_update; 2744 int pages_written = 0; 2745 long pages_skipped; 2746 int range_cyclic, cycled = 1, io_done = 0; 2747 int needed_blocks, ret = 0, nr_to_writebump = 0; 2748 loff_t range_start = wbc->range_start; 2749 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2750 2751 trace_ext4_da_writepages(inode, wbc); 2752 2753 /* 2754 * No pages to write? This is mainly a kludge to avoid starting 2755 * a transaction for special inodes like journal inode on last iput() 2756 * because that could violate lock ordering on umount 2757 */ 2758 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 2759 return 0; 2760 2761 /* 2762 * If the filesystem has aborted, it is read-only, so return 2763 * right away instead of dumping stack traces later on that 2764 * will obscure the real source of the problem. We test 2765 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because 2766 * the latter could be true if the filesystem is mounted 2767 * read-only, and in that case, ext4_da_writepages should 2768 * *never* be called, so if that ever happens, we would want 2769 * the stack trace. 2770 */ 2771 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) 2772 return -EROFS; 2773 2774 /* 2775 * Make sure nr_to_write is >= sbi->s_mb_stream_request 2776 * This make sure small files blocks are allocated in 2777 * single attempt. This ensure that small files 2778 * get less fragmented. 2779 */ 2780 if (wbc->nr_to_write < sbi->s_mb_stream_request) { 2781 nr_to_writebump = sbi->s_mb_stream_request - wbc->nr_to_write; 2782 wbc->nr_to_write = sbi->s_mb_stream_request; 2783 } 2784 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2785 range_whole = 1; 2786 2787 range_cyclic = wbc->range_cyclic; 2788 if (wbc->range_cyclic) { 2789 index = mapping->writeback_index; 2790 if (index) 2791 cycled = 0; 2792 wbc->range_start = index << PAGE_CACHE_SHIFT; 2793 wbc->range_end = LLONG_MAX; 2794 wbc->range_cyclic = 0; 2795 } else 2796 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2797 2798 mpd.wbc = wbc; 2799 mpd.inode = mapping->host; 2800 2801 /* 2802 * we don't want write_cache_pages to update 2803 * nr_to_write and writeback_index 2804 */ 2805 no_nrwrite_index_update = wbc->no_nrwrite_index_update; 2806 wbc->no_nrwrite_index_update = 1; 2807 pages_skipped = wbc->pages_skipped; 2808 2809 retry: 2810 while (!ret && wbc->nr_to_write > 0) { 2811 2812 /* 2813 * we insert one extent at a time. So we need 2814 * credit needed for single extent allocation. 2815 * journalled mode is currently not supported 2816 * by delalloc 2817 */ 2818 BUG_ON(ext4_should_journal_data(inode)); 2819 needed_blocks = ext4_da_writepages_trans_blocks(inode); 2820 2821 /* start a new transaction*/ 2822 handle = ext4_journal_start(inode, needed_blocks); 2823 if (IS_ERR(handle)) { 2824 ret = PTR_ERR(handle); 2825 printk(KERN_CRIT "%s: jbd2_start: " 2826 "%ld pages, ino %lu; err %d\n", __func__, 2827 wbc->nr_to_write, inode->i_ino, ret); 2828 dump_stack(); 2829 goto out_writepages; 2830 } 2831 2832 /* 2833 * Now call __mpage_da_writepage to find the next 2834 * contiguous region of logical blocks that need 2835 * blocks to be allocated by ext4. We don't actually 2836 * submit the blocks for I/O here, even though 2837 * write_cache_pages thinks it will, and will set the 2838 * pages as clean for write before calling 2839 * __mpage_da_writepage(). 2840 */ 2841 mpd.b_size = 0; 2842 mpd.b_state = 0; 2843 mpd.b_blocknr = 0; 2844 mpd.first_page = 0; 2845 mpd.next_page = 0; 2846 mpd.io_done = 0; 2847 mpd.pages_written = 0; 2848 mpd.retval = 0; 2849 ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, 2850 &mpd); 2851 /* 2852 * If we have a contigous extent of pages and we 2853 * haven't done the I/O yet, map the blocks and submit 2854 * them for I/O. 2855 */ 2856 if (!mpd.io_done && mpd.next_page != mpd.first_page) { 2857 if (mpage_da_map_blocks(&mpd) == 0) 2858 mpage_da_submit_io(&mpd); 2859 mpd.io_done = 1; 2860 ret = MPAGE_DA_EXTENT_TAIL; 2861 } 2862 trace_ext4_da_write_pages(inode, &mpd); 2863 wbc->nr_to_write -= mpd.pages_written; 2864 2865 ext4_journal_stop(handle); 2866 2867 if ((mpd.retval == -ENOSPC) && sbi->s_journal) { 2868 /* commit the transaction which would 2869 * free blocks released in the transaction 2870 * and try again 2871 */ 2872 jbd2_journal_force_commit_nested(sbi->s_journal); 2873 wbc->pages_skipped = pages_skipped; 2874 ret = 0; 2875 } else if (ret == MPAGE_DA_EXTENT_TAIL) { 2876 /* 2877 * got one extent now try with 2878 * rest of the pages 2879 */ 2880 pages_written += mpd.pages_written; 2881 wbc->pages_skipped = pages_skipped; 2882 ret = 0; 2883 io_done = 1; 2884 } else if (wbc->nr_to_write) 2885 /* 2886 * There is no more writeout needed 2887 * or we requested for a noblocking writeout 2888 * and we found the device congested 2889 */ 2890 break; 2891 } 2892 if (!io_done && !cycled) { 2893 cycled = 1; 2894 index = 0; 2895 wbc->range_start = index << PAGE_CACHE_SHIFT; 2896 wbc->range_end = mapping->writeback_index - 1; 2897 goto retry; 2898 } 2899 if (pages_skipped != wbc->pages_skipped) 2900 printk(KERN_EMERG "This should not happen leaving %s " 2901 "with nr_to_write = %ld ret = %d\n", 2902 __func__, wbc->nr_to_write, ret); 2903 2904 /* Update index */ 2905 index += pages_written; 2906 wbc->range_cyclic = range_cyclic; 2907 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2908 /* 2909 * set the writeback_index so that range_cyclic 2910 * mode will write it back later 2911 */ 2912 mapping->writeback_index = index; 2913 2914 out_writepages: 2915 if (!no_nrwrite_index_update) 2916 wbc->no_nrwrite_index_update = 0; 2917 wbc->nr_to_write -= nr_to_writebump; 2918 wbc->range_start = range_start; 2919 trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); 2920 return ret; 2921 } 2922 2923 #define FALL_BACK_TO_NONDELALLOC 1 2924 static int ext4_nonda_switch(struct super_block *sb) 2925 { 2926 s64 free_blocks, dirty_blocks; 2927 struct ext4_sb_info *sbi = EXT4_SB(sb); 2928 2929 /* 2930 * switch to non delalloc mode if we are running low 2931 * on free block. The free block accounting via percpu 2932 * counters can get slightly wrong with percpu_counter_batch getting 2933 * accumulated on each CPU without updating global counters 2934 * Delalloc need an accurate free block accounting. So switch 2935 * to non delalloc when we are near to error range. 2936 */ 2937 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); 2938 dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter); 2939 if (2 * free_blocks < 3 * dirty_blocks || 2940 free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) { 2941 /* 2942 * free block count is less that 150% of dirty blocks 2943 * or free blocks is less that watermark 2944 */ 2945 return 1; 2946 } 2947 return 0; 2948 } 2949 2950 static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 2951 loff_t pos, unsigned len, unsigned flags, 2952 struct page **pagep, void **fsdata) 2953 { 2954 int ret, retries = 0; 2955 struct page *page; 2956 pgoff_t index; 2957 unsigned from, to; 2958 struct inode *inode = mapping->host; 2959 handle_t *handle; 2960 2961 index = pos >> PAGE_CACHE_SHIFT; 2962 from = pos & (PAGE_CACHE_SIZE - 1); 2963 to = from + len; 2964 2965 if (ext4_nonda_switch(inode->i_sb)) { 2966 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 2967 return ext4_write_begin(file, mapping, pos, 2968 len, flags, pagep, fsdata); 2969 } 2970 *fsdata = (void *)0; 2971 trace_ext4_da_write_begin(inode, pos, len, flags); 2972 retry: 2973 /* 2974 * With delayed allocation, we don't log the i_disksize update 2975 * if there is delayed block allocation. But we still need 2976 * to journalling the i_disksize update if writes to the end 2977 * of file which has an already mapped buffer. 2978 */ 2979 handle = ext4_journal_start(inode, 1); 2980 if (IS_ERR(handle)) { 2981 ret = PTR_ERR(handle); 2982 goto out; 2983 } 2984 /* We cannot recurse into the filesystem as the transaction is already 2985 * started */ 2986 flags |= AOP_FLAG_NOFS; 2987 2988 page = grab_cache_page_write_begin(mapping, index, flags); 2989 if (!page) { 2990 ext4_journal_stop(handle); 2991 ret = -ENOMEM; 2992 goto out; 2993 } 2994 *pagep = page; 2995 2996 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 2997 ext4_da_get_block_prep); 2998 if (ret < 0) { 2999 unlock_page(page); 3000 ext4_journal_stop(handle); 3001 page_cache_release(page); 3002 /* 3003 * block_write_begin may have instantiated a few blocks 3004 * outside i_size. Trim these off again. Don't need 3005 * i_size_read because we hold i_mutex. 3006 */ 3007 if (pos + len > inode->i_size) 3008 ext4_truncate(inode); 3009 } 3010 3011 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 3012 goto retry; 3013 out: 3014 return ret; 3015 } 3016 3017 /* 3018 * Check if we should update i_disksize 3019 * when write to the end of file but not require block allocation 3020 */ 3021 static int ext4_da_should_update_i_disksize(struct page *page, 3022 unsigned long offset) 3023 { 3024 struct buffer_head *bh; 3025 struct inode *inode = page->mapping->host; 3026 unsigned int idx; 3027 int i; 3028 3029 bh = page_buffers(page); 3030 idx = offset >> inode->i_blkbits; 3031 3032 for (i = 0; i < idx; i++) 3033 bh = bh->b_this_page; 3034 3035 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 3036 return 0; 3037 return 1; 3038 } 3039 3040 static int ext4_da_write_end(struct file *file, 3041 struct address_space *mapping, 3042 loff_t pos, unsigned len, unsigned copied, 3043 struct page *page, void *fsdata) 3044 { 3045 struct inode *inode = mapping->host; 3046 int ret = 0, ret2; 3047 handle_t *handle = ext4_journal_current_handle(); 3048 loff_t new_i_size; 3049 unsigned long start, end; 3050 int write_mode = (int)(unsigned long)fsdata; 3051 3052 if (write_mode == FALL_BACK_TO_NONDELALLOC) { 3053 if (ext4_should_order_data(inode)) { 3054 return ext4_ordered_write_end(file, mapping, pos, 3055 len, copied, page, fsdata); 3056 } else if (ext4_should_writeback_data(inode)) { 3057 return ext4_writeback_write_end(file, mapping, pos, 3058 len, copied, page, fsdata); 3059 } else { 3060 BUG(); 3061 } 3062 } 3063 3064 trace_ext4_da_write_end(inode, pos, len, copied); 3065 start = pos & (PAGE_CACHE_SIZE - 1); 3066 end = start + copied - 1; 3067 3068 /* 3069 * generic_write_end() will run mark_inode_dirty() if i_size 3070 * changes. So let's piggyback the i_disksize mark_inode_dirty 3071 * into that. 3072 */ 3073 3074 new_i_size = pos + copied; 3075 if (new_i_size > EXT4_I(inode)->i_disksize) { 3076 if (ext4_da_should_update_i_disksize(page, end)) { 3077 down_write(&EXT4_I(inode)->i_data_sem); 3078 if (new_i_size > EXT4_I(inode)->i_disksize) { 3079 /* 3080 * Updating i_disksize when extending file 3081 * without needing block allocation 3082 */ 3083 if (ext4_should_order_data(inode)) 3084 ret = ext4_jbd2_file_inode(handle, 3085 inode); 3086 3087 EXT4_I(inode)->i_disksize = new_i_size; 3088 } 3089 up_write(&EXT4_I(inode)->i_data_sem); 3090 /* We need to mark inode dirty even if 3091 * new_i_size is less that inode->i_size 3092 * bu greater than i_disksize.(hint delalloc) 3093 */ 3094 ext4_mark_inode_dirty(handle, inode); 3095 } 3096 } 3097 ret2 = generic_write_end(file, mapping, pos, len, copied, 3098 page, fsdata); 3099 copied = ret2; 3100 if (ret2 < 0) 3101 ret = ret2; 3102 ret2 = ext4_journal_stop(handle); 3103 if (!ret) 3104 ret = ret2; 3105 3106 return ret ? ret : copied; 3107 } 3108 3109 static void ext4_da_invalidatepage(struct page *page, unsigned long offset) 3110 { 3111 /* 3112 * Drop reserved blocks 3113 */ 3114 BUG_ON(!PageLocked(page)); 3115 if (!page_has_buffers(page)) 3116 goto out; 3117 3118 ext4_da_page_release_reservation(page, offset); 3119 3120 out: 3121 ext4_invalidatepage(page, offset); 3122 3123 return; 3124 } 3125 3126 /* 3127 * Force all delayed allocation blocks to be allocated for a given inode. 3128 */ 3129 int ext4_alloc_da_blocks(struct inode *inode) 3130 { 3131 trace_ext4_alloc_da_blocks(inode); 3132 3133 if (!EXT4_I(inode)->i_reserved_data_blocks && 3134 !EXT4_I(inode)->i_reserved_meta_blocks) 3135 return 0; 3136 3137 /* 3138 * We do something simple for now. The filemap_flush() will 3139 * also start triggering a write of the data blocks, which is 3140 * not strictly speaking necessary (and for users of 3141 * laptop_mode, not even desirable). However, to do otherwise 3142 * would require replicating code paths in: 3143 * 3144 * ext4_da_writepages() -> 3145 * write_cache_pages() ---> (via passed in callback function) 3146 * __mpage_da_writepage() --> 3147 * mpage_add_bh_to_extent() 3148 * mpage_da_map_blocks() 3149 * 3150 * The problem is that write_cache_pages(), located in 3151 * mm/page-writeback.c, marks pages clean in preparation for 3152 * doing I/O, which is not desirable if we're not planning on 3153 * doing I/O at all. 3154 * 3155 * We could call write_cache_pages(), and then redirty all of 3156 * the pages by calling redirty_page_for_writeback() but that 3157 * would be ugly in the extreme. So instead we would need to 3158 * replicate parts of the code in the above functions, 3159 * simplifying them becuase we wouldn't actually intend to 3160 * write out the pages, but rather only collect contiguous 3161 * logical block extents, call the multi-block allocator, and 3162 * then update the buffer heads with the block allocations. 3163 * 3164 * For now, though, we'll cheat by calling filemap_flush(), 3165 * which will map the blocks, and start the I/O, but not 3166 * actually wait for the I/O to complete. 3167 */ 3168 return filemap_flush(inode->i_mapping); 3169 } 3170 3171 /* 3172 * bmap() is special. It gets used by applications such as lilo and by 3173 * the swapper to find the on-disk block of a specific piece of data. 3174 * 3175 * Naturally, this is dangerous if the block concerned is still in the 3176 * journal. If somebody makes a swapfile on an ext4 data-journaling 3177 * filesystem and enables swap, then they may get a nasty shock when the 3178 * data getting swapped to that swapfile suddenly gets overwritten by 3179 * the original zero's written out previously to the journal and 3180 * awaiting writeback in the kernel's buffer cache. 3181 * 3182 * So, if we see any bmap calls here on a modified, data-journaled file, 3183 * take extra steps to flush any blocks which might be in the cache. 3184 */ 3185 static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 3186 { 3187 struct inode *inode = mapping->host; 3188 journal_t *journal; 3189 int err; 3190 3191 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 3192 test_opt(inode->i_sb, DELALLOC)) { 3193 /* 3194 * With delalloc we want to sync the file 3195 * so that we can make sure we allocate 3196 * blocks for file 3197 */ 3198 filemap_write_and_wait(mapping); 3199 } 3200 3201 if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) { 3202 /* 3203 * This is a REALLY heavyweight approach, but the use of 3204 * bmap on dirty files is expected to be extremely rare: 3205 * only if we run lilo or swapon on a freshly made file 3206 * do we expect this to happen. 3207 * 3208 * (bmap requires CAP_SYS_RAWIO so this does not 3209 * represent an unprivileged user DOS attack --- we'd be 3210 * in trouble if mortal users could trigger this path at 3211 * will.) 3212 * 3213 * NB. EXT4_STATE_JDATA is not set on files other than 3214 * regular files. If somebody wants to bmap a directory 3215 * or symlink and gets confused because the buffer 3216 * hasn't yet been flushed to disk, they deserve 3217 * everything they get. 3218 */ 3219 3220 EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA; 3221 journal = EXT4_JOURNAL(inode); 3222 jbd2_journal_lock_updates(journal); 3223 err = jbd2_journal_flush(journal); 3224 jbd2_journal_unlock_updates(journal); 3225 3226 if (err) 3227 return 0; 3228 } 3229 3230 return generic_block_bmap(mapping, block, ext4_get_block); 3231 } 3232 3233 static int ext4_readpage(struct file *file, struct page *page) 3234 { 3235 return mpage_readpage(page, ext4_get_block); 3236 } 3237 3238 static int 3239 ext4_readpages(struct file *file, struct address_space *mapping, 3240 struct list_head *pages, unsigned nr_pages) 3241 { 3242 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); 3243 } 3244 3245 static void ext4_invalidatepage(struct page *page, unsigned long offset) 3246 { 3247 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3248 3249 /* 3250 * If it's a full truncate we just forget about the pending dirtying 3251 */ 3252 if (offset == 0) 3253 ClearPageChecked(page); 3254 3255 if (journal) 3256 jbd2_journal_invalidatepage(journal, page, offset); 3257 else 3258 block_invalidatepage(page, offset); 3259 } 3260 3261 static int ext4_releasepage(struct page *page, gfp_t wait) 3262 { 3263 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3264 3265 WARN_ON(PageChecked(page)); 3266 if (!page_has_buffers(page)) 3267 return 0; 3268 if (journal) 3269 return jbd2_journal_try_to_free_buffers(journal, page, wait); 3270 else 3271 return try_to_free_buffers(page); 3272 } 3273 3274 /* 3275 * If the O_DIRECT write will extend the file then add this inode to the 3276 * orphan list. So recovery will truncate it back to the original size 3277 * if the machine crashes during the write. 3278 * 3279 * If the O_DIRECT write is intantiating holes inside i_size and the machine 3280 * crashes then stale disk data _may_ be exposed inside the file. But current 3281 * VFS code falls back into buffered path in that case so we are safe. 3282 */ 3283 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 3284 const struct iovec *iov, loff_t offset, 3285 unsigned long nr_segs) 3286 { 3287 struct file *file = iocb->ki_filp; 3288 struct inode *inode = file->f_mapping->host; 3289 struct ext4_inode_info *ei = EXT4_I(inode); 3290 handle_t *handle; 3291 ssize_t ret; 3292 int orphan = 0; 3293 size_t count = iov_length(iov, nr_segs); 3294 3295 if (rw == WRITE) { 3296 loff_t final_size = offset + count; 3297 3298 if (final_size > inode->i_size) { 3299 /* Credits for sb + inode write */ 3300 handle = ext4_journal_start(inode, 2); 3301 if (IS_ERR(handle)) { 3302 ret = PTR_ERR(handle); 3303 goto out; 3304 } 3305 ret = ext4_orphan_add(handle, inode); 3306 if (ret) { 3307 ext4_journal_stop(handle); 3308 goto out; 3309 } 3310 orphan = 1; 3311 ei->i_disksize = inode->i_size; 3312 ext4_journal_stop(handle); 3313 } 3314 } 3315 3316 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 3317 offset, nr_segs, 3318 ext4_get_block, NULL); 3319 3320 if (orphan) { 3321 int err; 3322 3323 /* Credits for sb + inode write */ 3324 handle = ext4_journal_start(inode, 2); 3325 if (IS_ERR(handle)) { 3326 /* This is really bad luck. We've written the data 3327 * but cannot extend i_size. Bail out and pretend 3328 * the write failed... */ 3329 ret = PTR_ERR(handle); 3330 goto out; 3331 } 3332 if (inode->i_nlink) 3333 ext4_orphan_del(handle, inode); 3334 if (ret > 0) { 3335 loff_t end = offset + ret; 3336 if (end > inode->i_size) { 3337 ei->i_disksize = end; 3338 i_size_write(inode, end); 3339 /* 3340 * We're going to return a positive `ret' 3341 * here due to non-zero-length I/O, so there's 3342 * no way of reporting error returns from 3343 * ext4_mark_inode_dirty() to userspace. So 3344 * ignore it. 3345 */ 3346 ext4_mark_inode_dirty(handle, inode); 3347 } 3348 } 3349 err = ext4_journal_stop(handle); 3350 if (ret == 0) 3351 ret = err; 3352 } 3353 out: 3354 return ret; 3355 } 3356 3357 /* 3358 * Pages can be marked dirty completely asynchronously from ext4's journalling 3359 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3360 * much here because ->set_page_dirty is called under VFS locks. The page is 3361 * not necessarily locked. 3362 * 3363 * We cannot just dirty the page and leave attached buffers clean, because the 3364 * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3365 * or jbddirty because all the journalling code will explode. 3366 * 3367 * So what we do is to mark the page "pending dirty" and next time writepage 3368 * is called, propagate that into the buffers appropriately. 3369 */ 3370 static int ext4_journalled_set_page_dirty(struct page *page) 3371 { 3372 SetPageChecked(page); 3373 return __set_page_dirty_nobuffers(page); 3374 } 3375 3376 static const struct address_space_operations ext4_ordered_aops = { 3377 .readpage = ext4_readpage, 3378 .readpages = ext4_readpages, 3379 .writepage = ext4_writepage, 3380 .sync_page = block_sync_page, 3381 .write_begin = ext4_write_begin, 3382 .write_end = ext4_ordered_write_end, 3383 .bmap = ext4_bmap, 3384 .invalidatepage = ext4_invalidatepage, 3385 .releasepage = ext4_releasepage, 3386 .direct_IO = ext4_direct_IO, 3387 .migratepage = buffer_migrate_page, 3388 .is_partially_uptodate = block_is_partially_uptodate, 3389 }; 3390 3391 static const struct address_space_operations ext4_writeback_aops = { 3392 .readpage = ext4_readpage, 3393 .readpages = ext4_readpages, 3394 .writepage = ext4_writepage, 3395 .sync_page = block_sync_page, 3396 .write_begin = ext4_write_begin, 3397 .write_end = ext4_writeback_write_end, 3398 .bmap = ext4_bmap, 3399 .invalidatepage = ext4_invalidatepage, 3400 .releasepage = ext4_releasepage, 3401 .direct_IO = ext4_direct_IO, 3402 .migratepage = buffer_migrate_page, 3403 .is_partially_uptodate = block_is_partially_uptodate, 3404 }; 3405 3406 static const struct address_space_operations ext4_journalled_aops = { 3407 .readpage = ext4_readpage, 3408 .readpages = ext4_readpages, 3409 .writepage = ext4_writepage, 3410 .sync_page = block_sync_page, 3411 .write_begin = ext4_write_begin, 3412 .write_end = ext4_journalled_write_end, 3413 .set_page_dirty = ext4_journalled_set_page_dirty, 3414 .bmap = ext4_bmap, 3415 .invalidatepage = ext4_invalidatepage, 3416 .releasepage = ext4_releasepage, 3417 .is_partially_uptodate = block_is_partially_uptodate, 3418 }; 3419 3420 static const struct address_space_operations ext4_da_aops = { 3421 .readpage = ext4_readpage, 3422 .readpages = ext4_readpages, 3423 .writepage = ext4_writepage, 3424 .writepages = ext4_da_writepages, 3425 .sync_page = block_sync_page, 3426 .write_begin = ext4_da_write_begin, 3427 .write_end = ext4_da_write_end, 3428 .bmap = ext4_bmap, 3429 .invalidatepage = ext4_da_invalidatepage, 3430 .releasepage = ext4_releasepage, 3431 .direct_IO = ext4_direct_IO, 3432 .migratepage = buffer_migrate_page, 3433 .is_partially_uptodate = block_is_partially_uptodate, 3434 }; 3435 3436 void ext4_set_aops(struct inode *inode) 3437 { 3438 if (ext4_should_order_data(inode) && 3439 test_opt(inode->i_sb, DELALLOC)) 3440 inode->i_mapping->a_ops = &ext4_da_aops; 3441 else if (ext4_should_order_data(inode)) 3442 inode->i_mapping->a_ops = &ext4_ordered_aops; 3443 else if (ext4_should_writeback_data(inode) && 3444 test_opt(inode->i_sb, DELALLOC)) 3445 inode->i_mapping->a_ops = &ext4_da_aops; 3446 else if (ext4_should_writeback_data(inode)) 3447 inode->i_mapping->a_ops = &ext4_writeback_aops; 3448 else 3449 inode->i_mapping->a_ops = &ext4_journalled_aops; 3450 } 3451 3452 /* 3453 * ext4_block_truncate_page() zeroes out a mapping from file offset `from' 3454 * up to the end of the block which corresponds to `from'. 3455 * This required during truncate. We need to physically zero the tail end 3456 * of that block so it doesn't yield old data if the file is later grown. 3457 */ 3458 int ext4_block_truncate_page(handle_t *handle, 3459 struct address_space *mapping, loff_t from) 3460 { 3461 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 3462 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3463 unsigned blocksize, length, pos; 3464 ext4_lblk_t iblock; 3465 struct inode *inode = mapping->host; 3466 struct buffer_head *bh; 3467 struct page *page; 3468 int err = 0; 3469 3470 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, 3471 mapping_gfp_mask(mapping) & ~__GFP_FS); 3472 if (!page) 3473 return -EINVAL; 3474 3475 blocksize = inode->i_sb->s_blocksize; 3476 length = blocksize - (offset & (blocksize - 1)); 3477 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 3478 3479 /* 3480 * For "nobh" option, we can only work if we don't need to 3481 * read-in the page - otherwise we create buffers to do the IO. 3482 */ 3483 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) && 3484 ext4_should_writeback_data(inode) && PageUptodate(page)) { 3485 zero_user(page, offset, length); 3486 set_page_dirty(page); 3487 goto unlock; 3488 } 3489 3490 if (!page_has_buffers(page)) 3491 create_empty_buffers(page, blocksize, 0); 3492 3493 /* Find the buffer that contains "offset" */ 3494 bh = page_buffers(page); 3495 pos = blocksize; 3496 while (offset >= pos) { 3497 bh = bh->b_this_page; 3498 iblock++; 3499 pos += blocksize; 3500 } 3501 3502 err = 0; 3503 if (buffer_freed(bh)) { 3504 BUFFER_TRACE(bh, "freed: skip"); 3505 goto unlock; 3506 } 3507 3508 if (!buffer_mapped(bh)) { 3509 BUFFER_TRACE(bh, "unmapped"); 3510 ext4_get_block(inode, iblock, bh, 0); 3511 /* unmapped? It's a hole - nothing to do */ 3512 if (!buffer_mapped(bh)) { 3513 BUFFER_TRACE(bh, "still unmapped"); 3514 goto unlock; 3515 } 3516 } 3517 3518 /* Ok, it's mapped. Make sure it's up-to-date */ 3519 if (PageUptodate(page)) 3520 set_buffer_uptodate(bh); 3521 3522 if (!buffer_uptodate(bh)) { 3523 err = -EIO; 3524 ll_rw_block(READ, 1, &bh); 3525 wait_on_buffer(bh); 3526 /* Uhhuh. Read error. Complain and punt. */ 3527 if (!buffer_uptodate(bh)) 3528 goto unlock; 3529 } 3530 3531 if (ext4_should_journal_data(inode)) { 3532 BUFFER_TRACE(bh, "get write access"); 3533 err = ext4_journal_get_write_access(handle, bh); 3534 if (err) 3535 goto unlock; 3536 } 3537 3538 zero_user(page, offset, length); 3539 3540 BUFFER_TRACE(bh, "zeroed end of block"); 3541 3542 err = 0; 3543 if (ext4_should_journal_data(inode)) { 3544 err = ext4_handle_dirty_metadata(handle, inode, bh); 3545 } else { 3546 if (ext4_should_order_data(inode)) 3547 err = ext4_jbd2_file_inode(handle, inode); 3548 mark_buffer_dirty(bh); 3549 } 3550 3551 unlock: 3552 unlock_page(page); 3553 page_cache_release(page); 3554 return err; 3555 } 3556 3557 /* 3558 * Probably it should be a library function... search for first non-zero word 3559 * or memcmp with zero_page, whatever is better for particular architecture. 3560 * Linus? 3561 */ 3562 static inline int all_zeroes(__le32 *p, __le32 *q) 3563 { 3564 while (p < q) 3565 if (*p++) 3566 return 0; 3567 return 1; 3568 } 3569 3570 /** 3571 * ext4_find_shared - find the indirect blocks for partial truncation. 3572 * @inode: inode in question 3573 * @depth: depth of the affected branch 3574 * @offsets: offsets of pointers in that branch (see ext4_block_to_path) 3575 * @chain: place to store the pointers to partial indirect blocks 3576 * @top: place to the (detached) top of branch 3577 * 3578 * This is a helper function used by ext4_truncate(). 3579 * 3580 * When we do truncate() we may have to clean the ends of several 3581 * indirect blocks but leave the blocks themselves alive. Block is 3582 * partially truncated if some data below the new i_size is refered 3583 * from it (and it is on the path to the first completely truncated 3584 * data block, indeed). We have to free the top of that path along 3585 * with everything to the right of the path. Since no allocation 3586 * past the truncation point is possible until ext4_truncate() 3587 * finishes, we may safely do the latter, but top of branch may 3588 * require special attention - pageout below the truncation point 3589 * might try to populate it. 3590 * 3591 * We atomically detach the top of branch from the tree, store the 3592 * block number of its root in *@top, pointers to buffer_heads of 3593 * partially truncated blocks - in @chain[].bh and pointers to 3594 * their last elements that should not be removed - in 3595 * @chain[].p. Return value is the pointer to last filled element 3596 * of @chain. 3597 * 3598 * The work left to caller to do the actual freeing of subtrees: 3599 * a) free the subtree starting from *@top 3600 * b) free the subtrees whose roots are stored in 3601 * (@chain[i].p+1 .. end of @chain[i].bh->b_data) 3602 * c) free the subtrees growing from the inode past the @chain[0]. 3603 * (no partially truncated stuff there). */ 3604 3605 static Indirect *ext4_find_shared(struct inode *inode, int depth, 3606 ext4_lblk_t offsets[4], Indirect chain[4], 3607 __le32 *top) 3608 { 3609 Indirect *partial, *p; 3610 int k, err; 3611 3612 *top = 0; 3613 /* Make k index the deepest non-null offest + 1 */ 3614 for (k = depth; k > 1 && !offsets[k-1]; k--) 3615 ; 3616 partial = ext4_get_branch(inode, k, offsets, chain, &err); 3617 /* Writer: pointers */ 3618 if (!partial) 3619 partial = chain + k-1; 3620 /* 3621 * If the branch acquired continuation since we've looked at it - 3622 * fine, it should all survive and (new) top doesn't belong to us. 3623 */ 3624 if (!partial->key && *partial->p) 3625 /* Writer: end */ 3626 goto no_top; 3627 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--) 3628 ; 3629 /* 3630 * OK, we've found the last block that must survive. The rest of our 3631 * branch should be detached before unlocking. However, if that rest 3632 * of branch is all ours and does not grow immediately from the inode 3633 * it's easier to cheat and just decrement partial->p. 3634 */ 3635 if (p == chain + k - 1 && p > chain) { 3636 p->p--; 3637 } else { 3638 *top = *p->p; 3639 /* Nope, don't do this in ext4. Must leave the tree intact */ 3640 #if 0 3641 *p->p = 0; 3642 #endif 3643 } 3644 /* Writer: end */ 3645 3646 while (partial > p) { 3647 brelse(partial->bh); 3648 partial--; 3649 } 3650 no_top: 3651 return partial; 3652 } 3653 3654 /* 3655 * Zero a number of block pointers in either an inode or an indirect block. 3656 * If we restart the transaction we must again get write access to the 3657 * indirect block for further modification. 3658 * 3659 * We release `count' blocks on disk, but (last - first) may be greater 3660 * than `count' because there can be holes in there. 3661 */ 3662 static void ext4_clear_blocks(handle_t *handle, struct inode *inode, 3663 struct buffer_head *bh, 3664 ext4_fsblk_t block_to_free, 3665 unsigned long count, __le32 *first, 3666 __le32 *last) 3667 { 3668 __le32 *p; 3669 if (try_to_extend_transaction(handle, inode)) { 3670 if (bh) { 3671 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 3672 ext4_handle_dirty_metadata(handle, inode, bh); 3673 } 3674 ext4_mark_inode_dirty(handle, inode); 3675 ext4_truncate_restart_trans(handle, inode, 3676 blocks_for_truncate(inode)); 3677 if (bh) { 3678 BUFFER_TRACE(bh, "retaking write access"); 3679 ext4_journal_get_write_access(handle, bh); 3680 } 3681 } 3682 3683 /* 3684 * Any buffers which are on the journal will be in memory. We 3685 * find them on the hash table so jbd2_journal_revoke() will 3686 * run jbd2_journal_forget() on them. We've already detached 3687 * each block from the file, so bforget() in 3688 * jbd2_journal_forget() should be safe. 3689 * 3690 * AKPM: turn on bforget in jbd2_journal_forget()!!! 3691 */ 3692 for (p = first; p < last; p++) { 3693 u32 nr = le32_to_cpu(*p); 3694 if (nr) { 3695 struct buffer_head *tbh; 3696 3697 *p = 0; 3698 tbh = sb_find_get_block(inode->i_sb, nr); 3699 ext4_forget(handle, 0, inode, tbh, nr); 3700 } 3701 } 3702 3703 ext4_free_blocks(handle, inode, block_to_free, count, 0); 3704 } 3705 3706 /** 3707 * ext4_free_data - free a list of data blocks 3708 * @handle: handle for this transaction 3709 * @inode: inode we are dealing with 3710 * @this_bh: indirect buffer_head which contains *@first and *@last 3711 * @first: array of block numbers 3712 * @last: points immediately past the end of array 3713 * 3714 * We are freeing all blocks refered from that array (numbers are stored as 3715 * little-endian 32-bit) and updating @inode->i_blocks appropriately. 3716 * 3717 * We accumulate contiguous runs of blocks to free. Conveniently, if these 3718 * blocks are contiguous then releasing them at one time will only affect one 3719 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't 3720 * actually use a lot of journal space. 3721 * 3722 * @this_bh will be %NULL if @first and @last point into the inode's direct 3723 * block pointers. 3724 */ 3725 static void ext4_free_data(handle_t *handle, struct inode *inode, 3726 struct buffer_head *this_bh, 3727 __le32 *first, __le32 *last) 3728 { 3729 ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */ 3730 unsigned long count = 0; /* Number of blocks in the run */ 3731 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind 3732 corresponding to 3733 block_to_free */ 3734 ext4_fsblk_t nr; /* Current block # */ 3735 __le32 *p; /* Pointer into inode/ind 3736 for current block */ 3737 int err; 3738 3739 if (this_bh) { /* For indirect block */ 3740 BUFFER_TRACE(this_bh, "get_write_access"); 3741 err = ext4_journal_get_write_access(handle, this_bh); 3742 /* Important: if we can't update the indirect pointers 3743 * to the blocks, we can't free them. */ 3744 if (err) 3745 return; 3746 } 3747 3748 for (p = first; p < last; p++) { 3749 nr = le32_to_cpu(*p); 3750 if (nr) { 3751 /* accumulate blocks to free if they're contiguous */ 3752 if (count == 0) { 3753 block_to_free = nr; 3754 block_to_free_p = p; 3755 count = 1; 3756 } else if (nr == block_to_free + count) { 3757 count++; 3758 } else { 3759 ext4_clear_blocks(handle, inode, this_bh, 3760 block_to_free, 3761 count, block_to_free_p, p); 3762 block_to_free = nr; 3763 block_to_free_p = p; 3764 count = 1; 3765 } 3766 } 3767 } 3768 3769 if (count > 0) 3770 ext4_clear_blocks(handle, inode, this_bh, block_to_free, 3771 count, block_to_free_p, p); 3772 3773 if (this_bh) { 3774 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata"); 3775 3776 /* 3777 * The buffer head should have an attached journal head at this 3778 * point. However, if the data is corrupted and an indirect 3779 * block pointed to itself, it would have been detached when 3780 * the block was cleared. Check for this instead of OOPSing. 3781 */ 3782 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) 3783 ext4_handle_dirty_metadata(handle, inode, this_bh); 3784 else 3785 ext4_error(inode->i_sb, __func__, 3786 "circular indirect block detected, " 3787 "inode=%lu, block=%llu", 3788 inode->i_ino, 3789 (unsigned long long) this_bh->b_blocknr); 3790 } 3791 } 3792 3793 /** 3794 * ext4_free_branches - free an array of branches 3795 * @handle: JBD handle for this transaction 3796 * @inode: inode we are dealing with 3797 * @parent_bh: the buffer_head which contains *@first and *@last 3798 * @first: array of block numbers 3799 * @last: pointer immediately past the end of array 3800 * @depth: depth of the branches to free 3801 * 3802 * We are freeing all blocks refered from these branches (numbers are 3803 * stored as little-endian 32-bit) and updating @inode->i_blocks 3804 * appropriately. 3805 */ 3806 static void ext4_free_branches(handle_t *handle, struct inode *inode, 3807 struct buffer_head *parent_bh, 3808 __le32 *first, __le32 *last, int depth) 3809 { 3810 ext4_fsblk_t nr; 3811 __le32 *p; 3812 3813 if (ext4_handle_is_aborted(handle)) 3814 return; 3815 3816 if (depth--) { 3817 struct buffer_head *bh; 3818 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 3819 p = last; 3820 while (--p >= first) { 3821 nr = le32_to_cpu(*p); 3822 if (!nr) 3823 continue; /* A hole */ 3824 3825 /* Go read the buffer for the next level down */ 3826 bh = sb_bread(inode->i_sb, nr); 3827 3828 /* 3829 * A read failure? Report error and clear slot 3830 * (should be rare). 3831 */ 3832 if (!bh) { 3833 ext4_error(inode->i_sb, "ext4_free_branches", 3834 "Read failure, inode=%lu, block=%llu", 3835 inode->i_ino, nr); 3836 continue; 3837 } 3838 3839 /* This zaps the entire block. Bottom up. */ 3840 BUFFER_TRACE(bh, "free child branches"); 3841 ext4_free_branches(handle, inode, bh, 3842 (__le32 *) bh->b_data, 3843 (__le32 *) bh->b_data + addr_per_block, 3844 depth); 3845 3846 /* 3847 * We've probably journalled the indirect block several 3848 * times during the truncate. But it's no longer 3849 * needed and we now drop it from the transaction via 3850 * jbd2_journal_revoke(). 3851 * 3852 * That's easy if it's exclusively part of this 3853 * transaction. But if it's part of the committing 3854 * transaction then jbd2_journal_forget() will simply 3855 * brelse() it. That means that if the underlying 3856 * block is reallocated in ext4_get_block(), 3857 * unmap_underlying_metadata() will find this block 3858 * and will try to get rid of it. damn, damn. 3859 * 3860 * If this block has already been committed to the 3861 * journal, a revoke record will be written. And 3862 * revoke records must be emitted *before* clearing 3863 * this block's bit in the bitmaps. 3864 */ 3865 ext4_forget(handle, 1, inode, bh, bh->b_blocknr); 3866 3867 /* 3868 * Everything below this this pointer has been 3869 * released. Now let this top-of-subtree go. 3870 * 3871 * We want the freeing of this indirect block to be 3872 * atomic in the journal with the updating of the 3873 * bitmap block which owns it. So make some room in 3874 * the journal. 3875 * 3876 * We zero the parent pointer *after* freeing its 3877 * pointee in the bitmaps, so if extend_transaction() 3878 * for some reason fails to put the bitmap changes and 3879 * the release into the same transaction, recovery 3880 * will merely complain about releasing a free block, 3881 * rather than leaking blocks. 3882 */ 3883 if (ext4_handle_is_aborted(handle)) 3884 return; 3885 if (try_to_extend_transaction(handle, inode)) { 3886 ext4_mark_inode_dirty(handle, inode); 3887 ext4_truncate_restart_trans(handle, inode, 3888 blocks_for_truncate(inode)); 3889 } 3890 3891 ext4_free_blocks(handle, inode, nr, 1, 1); 3892 3893 if (parent_bh) { 3894 /* 3895 * The block which we have just freed is 3896 * pointed to by an indirect block: journal it 3897 */ 3898 BUFFER_TRACE(parent_bh, "get_write_access"); 3899 if (!ext4_journal_get_write_access(handle, 3900 parent_bh)){ 3901 *p = 0; 3902 BUFFER_TRACE(parent_bh, 3903 "call ext4_handle_dirty_metadata"); 3904 ext4_handle_dirty_metadata(handle, 3905 inode, 3906 parent_bh); 3907 } 3908 } 3909 } 3910 } else { 3911 /* We have reached the bottom of the tree. */ 3912 BUFFER_TRACE(parent_bh, "free data blocks"); 3913 ext4_free_data(handle, inode, parent_bh, first, last); 3914 } 3915 } 3916 3917 int ext4_can_truncate(struct inode *inode) 3918 { 3919 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 3920 return 0; 3921 if (S_ISREG(inode->i_mode)) 3922 return 1; 3923 if (S_ISDIR(inode->i_mode)) 3924 return 1; 3925 if (S_ISLNK(inode->i_mode)) 3926 return !ext4_inode_is_fast_symlink(inode); 3927 return 0; 3928 } 3929 3930 /* 3931 * ext4_truncate() 3932 * 3933 * We block out ext4_get_block() block instantiations across the entire 3934 * transaction, and VFS/VM ensures that ext4_truncate() cannot run 3935 * simultaneously on behalf of the same inode. 3936 * 3937 * As we work through the truncate and commmit bits of it to the journal there 3938 * is one core, guiding principle: the file's tree must always be consistent on 3939 * disk. We must be able to restart the truncate after a crash. 3940 * 3941 * The file's tree may be transiently inconsistent in memory (although it 3942 * probably isn't), but whenever we close off and commit a journal transaction, 3943 * the contents of (the filesystem + the journal) must be consistent and 3944 * restartable. It's pretty simple, really: bottom up, right to left (although 3945 * left-to-right works OK too). 3946 * 3947 * Note that at recovery time, journal replay occurs *before* the restart of 3948 * truncate against the orphan inode list. 3949 * 3950 * The committed inode has the new, desired i_size (which is the same as 3951 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 3952 * that this inode's truncate did not complete and it will again call 3953 * ext4_truncate() to have another go. So there will be instantiated blocks 3954 * to the right of the truncation point in a crashed ext4 filesystem. But 3955 * that's fine - as long as they are linked from the inode, the post-crash 3956 * ext4_truncate() run will find them and release them. 3957 */ 3958 void ext4_truncate(struct inode *inode) 3959 { 3960 handle_t *handle; 3961 struct ext4_inode_info *ei = EXT4_I(inode); 3962 __le32 *i_data = ei->i_data; 3963 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 3964 struct address_space *mapping = inode->i_mapping; 3965 ext4_lblk_t offsets[4]; 3966 Indirect chain[4]; 3967 Indirect *partial; 3968 __le32 nr = 0; 3969 int n; 3970 ext4_lblk_t last_block; 3971 unsigned blocksize = inode->i_sb->s_blocksize; 3972 3973 if (!ext4_can_truncate(inode)) 3974 return; 3975 3976 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 3977 ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE; 3978 3979 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 3980 ext4_ext_truncate(inode); 3981 return; 3982 } 3983 3984 handle = start_transaction(inode); 3985 if (IS_ERR(handle)) 3986 return; /* AKPM: return what? */ 3987 3988 last_block = (inode->i_size + blocksize-1) 3989 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 3990 3991 if (inode->i_size & (blocksize - 1)) 3992 if (ext4_block_truncate_page(handle, mapping, inode->i_size)) 3993 goto out_stop; 3994 3995 n = ext4_block_to_path(inode, last_block, offsets, NULL); 3996 if (n == 0) 3997 goto out_stop; /* error */ 3998 3999 /* 4000 * OK. This truncate is going to happen. We add the inode to the 4001 * orphan list, so that if this truncate spans multiple transactions, 4002 * and we crash, we will resume the truncate when the filesystem 4003 * recovers. It also marks the inode dirty, to catch the new size. 4004 * 4005 * Implication: the file must always be in a sane, consistent 4006 * truncatable state while each transaction commits. 4007 */ 4008 if (ext4_orphan_add(handle, inode)) 4009 goto out_stop; 4010 4011 /* 4012 * From here we block out all ext4_get_block() callers who want to 4013 * modify the block allocation tree. 4014 */ 4015 down_write(&ei->i_data_sem); 4016 4017 ext4_discard_preallocations(inode); 4018 4019 /* 4020 * The orphan list entry will now protect us from any crash which 4021 * occurs before the truncate completes, so it is now safe to propagate 4022 * the new, shorter inode size (held for now in i_size) into the 4023 * on-disk inode. We do this via i_disksize, which is the value which 4024 * ext4 *really* writes onto the disk inode. 4025 */ 4026 ei->i_disksize = inode->i_size; 4027 4028 if (n == 1) { /* direct blocks */ 4029 ext4_free_data(handle, inode, NULL, i_data+offsets[0], 4030 i_data + EXT4_NDIR_BLOCKS); 4031 goto do_indirects; 4032 } 4033 4034 partial = ext4_find_shared(inode, n, offsets, chain, &nr); 4035 /* Kill the top of shared branch (not detached) */ 4036 if (nr) { 4037 if (partial == chain) { 4038 /* Shared branch grows from the inode */ 4039 ext4_free_branches(handle, inode, NULL, 4040 &nr, &nr+1, (chain+n-1) - partial); 4041 *partial->p = 0; 4042 /* 4043 * We mark the inode dirty prior to restart, 4044 * and prior to stop. No need for it here. 4045 */ 4046 } else { 4047 /* Shared branch grows from an indirect block */ 4048 BUFFER_TRACE(partial->bh, "get_write_access"); 4049 ext4_free_branches(handle, inode, partial->bh, 4050 partial->p, 4051 partial->p+1, (chain+n-1) - partial); 4052 } 4053 } 4054 /* Clear the ends of indirect blocks on the shared branch */ 4055 while (partial > chain) { 4056 ext4_free_branches(handle, inode, partial->bh, partial->p + 1, 4057 (__le32*)partial->bh->b_data+addr_per_block, 4058 (chain+n-1) - partial); 4059 BUFFER_TRACE(partial->bh, "call brelse"); 4060 brelse(partial->bh); 4061 partial--; 4062 } 4063 do_indirects: 4064 /* Kill the remaining (whole) subtrees */ 4065 switch (offsets[0]) { 4066 default: 4067 nr = i_data[EXT4_IND_BLOCK]; 4068 if (nr) { 4069 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); 4070 i_data[EXT4_IND_BLOCK] = 0; 4071 } 4072 case EXT4_IND_BLOCK: 4073 nr = i_data[EXT4_DIND_BLOCK]; 4074 if (nr) { 4075 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); 4076 i_data[EXT4_DIND_BLOCK] = 0; 4077 } 4078 case EXT4_DIND_BLOCK: 4079 nr = i_data[EXT4_TIND_BLOCK]; 4080 if (nr) { 4081 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); 4082 i_data[EXT4_TIND_BLOCK] = 0; 4083 } 4084 case EXT4_TIND_BLOCK: 4085 ; 4086 } 4087 4088 up_write(&ei->i_data_sem); 4089 inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4090 ext4_mark_inode_dirty(handle, inode); 4091 4092 /* 4093 * In a multi-transaction truncate, we only make the final transaction 4094 * synchronous 4095 */ 4096 if (IS_SYNC(inode)) 4097 ext4_handle_sync(handle); 4098 out_stop: 4099 /* 4100 * If this was a simple ftruncate(), and the file will remain alive 4101 * then we need to clear up the orphan record which we created above. 4102 * However, if this was a real unlink then we were called by 4103 * ext4_delete_inode(), and we allow that function to clean up the 4104 * orphan info for us. 4105 */ 4106 if (inode->i_nlink) 4107 ext4_orphan_del(handle, inode); 4108 4109 ext4_journal_stop(handle); 4110 } 4111 4112 /* 4113 * ext4_get_inode_loc returns with an extra refcount against the inode's 4114 * underlying buffer_head on success. If 'in_mem' is true, we have all 4115 * data in memory that is needed to recreate the on-disk version of this 4116 * inode. 4117 */ 4118 static int __ext4_get_inode_loc(struct inode *inode, 4119 struct ext4_iloc *iloc, int in_mem) 4120 { 4121 struct ext4_group_desc *gdp; 4122 struct buffer_head *bh; 4123 struct super_block *sb = inode->i_sb; 4124 ext4_fsblk_t block; 4125 int inodes_per_block, inode_offset; 4126 4127 iloc->bh = NULL; 4128 if (!ext4_valid_inum(sb, inode->i_ino)) 4129 return -EIO; 4130 4131 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 4132 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 4133 if (!gdp) 4134 return -EIO; 4135 4136 /* 4137 * Figure out the offset within the block group inode table 4138 */ 4139 inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb)); 4140 inode_offset = ((inode->i_ino - 1) % 4141 EXT4_INODES_PER_GROUP(sb)); 4142 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 4143 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 4144 4145 bh = sb_getblk(sb, block); 4146 if (!bh) { 4147 ext4_error(sb, "ext4_get_inode_loc", "unable to read " 4148 "inode block - inode=%lu, block=%llu", 4149 inode->i_ino, block); 4150 return -EIO; 4151 } 4152 if (!buffer_uptodate(bh)) { 4153 lock_buffer(bh); 4154 4155 /* 4156 * If the buffer has the write error flag, we have failed 4157 * to write out another inode in the same block. In this 4158 * case, we don't have to read the block because we may 4159 * read the old inode data successfully. 4160 */ 4161 if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 4162 set_buffer_uptodate(bh); 4163 4164 if (buffer_uptodate(bh)) { 4165 /* someone brought it uptodate while we waited */ 4166 unlock_buffer(bh); 4167 goto has_buffer; 4168 } 4169 4170 /* 4171 * If we have all information of the inode in memory and this 4172 * is the only valid inode in the block, we need not read the 4173 * block. 4174 */ 4175 if (in_mem) { 4176 struct buffer_head *bitmap_bh; 4177 int i, start; 4178 4179 start = inode_offset & ~(inodes_per_block - 1); 4180 4181 /* Is the inode bitmap in cache? */ 4182 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 4183 if (!bitmap_bh) 4184 goto make_io; 4185 4186 /* 4187 * If the inode bitmap isn't in cache then the 4188 * optimisation may end up performing two reads instead 4189 * of one, so skip it. 4190 */ 4191 if (!buffer_uptodate(bitmap_bh)) { 4192 brelse(bitmap_bh); 4193 goto make_io; 4194 } 4195 for (i = start; i < start + inodes_per_block; i++) { 4196 if (i == inode_offset) 4197 continue; 4198 if (ext4_test_bit(i, bitmap_bh->b_data)) 4199 break; 4200 } 4201 brelse(bitmap_bh); 4202 if (i == start + inodes_per_block) { 4203 /* all other inodes are free, so skip I/O */ 4204 memset(bh->b_data, 0, bh->b_size); 4205 set_buffer_uptodate(bh); 4206 unlock_buffer(bh); 4207 goto has_buffer; 4208 } 4209 } 4210 4211 make_io: 4212 /* 4213 * If we need to do any I/O, try to pre-readahead extra 4214 * blocks from the inode table. 4215 */ 4216 if (EXT4_SB(sb)->s_inode_readahead_blks) { 4217 ext4_fsblk_t b, end, table; 4218 unsigned num; 4219 4220 table = ext4_inode_table(sb, gdp); 4221 /* s_inode_readahead_blks is always a power of 2 */ 4222 b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1); 4223 if (table > b) 4224 b = table; 4225 end = b + EXT4_SB(sb)->s_inode_readahead_blks; 4226 num = EXT4_INODES_PER_GROUP(sb); 4227 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 4228 EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) 4229 num -= ext4_itable_unused_count(sb, gdp); 4230 table += num / inodes_per_block; 4231 if (end > table) 4232 end = table; 4233 while (b <= end) 4234 sb_breadahead(sb, b++); 4235 } 4236 4237 /* 4238 * There are other valid inodes in the buffer, this inode 4239 * has in-inode xattrs, or we don't have this inode in memory. 4240 * Read the block from disk. 4241 */ 4242 get_bh(bh); 4243 bh->b_end_io = end_buffer_read_sync; 4244 submit_bh(READ_META, bh); 4245 wait_on_buffer(bh); 4246 if (!buffer_uptodate(bh)) { 4247 ext4_error(sb, __func__, 4248 "unable to read inode block - inode=%lu, " 4249 "block=%llu", inode->i_ino, block); 4250 brelse(bh); 4251 return -EIO; 4252 } 4253 } 4254 has_buffer: 4255 iloc->bh = bh; 4256 return 0; 4257 } 4258 4259 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 4260 { 4261 /* We have all inode data except xattrs in memory here. */ 4262 return __ext4_get_inode_loc(inode, iloc, 4263 !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)); 4264 } 4265 4266 void ext4_set_inode_flags(struct inode *inode) 4267 { 4268 unsigned int flags = EXT4_I(inode)->i_flags; 4269 4270 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 4271 if (flags & EXT4_SYNC_FL) 4272 inode->i_flags |= S_SYNC; 4273 if (flags & EXT4_APPEND_FL) 4274 inode->i_flags |= S_APPEND; 4275 if (flags & EXT4_IMMUTABLE_FL) 4276 inode->i_flags |= S_IMMUTABLE; 4277 if (flags & EXT4_NOATIME_FL) 4278 inode->i_flags |= S_NOATIME; 4279 if (flags & EXT4_DIRSYNC_FL) 4280 inode->i_flags |= S_DIRSYNC; 4281 } 4282 4283 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ 4284 void ext4_get_inode_flags(struct ext4_inode_info *ei) 4285 { 4286 unsigned int flags = ei->vfs_inode.i_flags; 4287 4288 ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL| 4289 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL); 4290 if (flags & S_SYNC) 4291 ei->i_flags |= EXT4_SYNC_FL; 4292 if (flags & S_APPEND) 4293 ei->i_flags |= EXT4_APPEND_FL; 4294 if (flags & S_IMMUTABLE) 4295 ei->i_flags |= EXT4_IMMUTABLE_FL; 4296 if (flags & S_NOATIME) 4297 ei->i_flags |= EXT4_NOATIME_FL; 4298 if (flags & S_DIRSYNC) 4299 ei->i_flags |= EXT4_DIRSYNC_FL; 4300 } 4301 4302 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 4303 struct ext4_inode_info *ei) 4304 { 4305 blkcnt_t i_blocks ; 4306 struct inode *inode = &(ei->vfs_inode); 4307 struct super_block *sb = inode->i_sb; 4308 4309 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 4310 EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { 4311 /* we are using combined 48 bit field */ 4312 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 4313 le32_to_cpu(raw_inode->i_blocks_lo); 4314 if (ei->i_flags & EXT4_HUGE_FILE_FL) { 4315 /* i_blocks represent file system block size */ 4316 return i_blocks << (inode->i_blkbits - 9); 4317 } else { 4318 return i_blocks; 4319 } 4320 } else { 4321 return le32_to_cpu(raw_inode->i_blocks_lo); 4322 } 4323 } 4324 4325 struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 4326 { 4327 struct ext4_iloc iloc; 4328 struct ext4_inode *raw_inode; 4329 struct ext4_inode_info *ei; 4330 struct buffer_head *bh; 4331 struct inode *inode; 4332 long ret; 4333 int block; 4334 4335 inode = iget_locked(sb, ino); 4336 if (!inode) 4337 return ERR_PTR(-ENOMEM); 4338 if (!(inode->i_state & I_NEW)) 4339 return inode; 4340 4341 ei = EXT4_I(inode); 4342 4343 ret = __ext4_get_inode_loc(inode, &iloc, 0); 4344 if (ret < 0) 4345 goto bad_inode; 4346 bh = iloc.bh; 4347 raw_inode = ext4_raw_inode(&iloc); 4348 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 4349 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 4350 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 4351 if (!(test_opt(inode->i_sb, NO_UID32))) { 4352 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 4353 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 4354 } 4355 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 4356 4357 ei->i_state = 0; 4358 ei->i_dir_start_lookup = 0; 4359 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 4360 /* We now have enough fields to check if the inode was active or not. 4361 * This is needed because nfsd might try to access dead inodes 4362 * the test is that same one that e2fsck uses 4363 * NeilBrown 1999oct15 4364 */ 4365 if (inode->i_nlink == 0) { 4366 if (inode->i_mode == 0 || 4367 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { 4368 /* this inode is deleted */ 4369 brelse(bh); 4370 ret = -ESTALE; 4371 goto bad_inode; 4372 } 4373 /* The only unlinked inodes we let through here have 4374 * valid i_mode and are being read by the orphan 4375 * recovery code: that's fine, we're about to complete 4376 * the process of deleting those. */ 4377 } 4378 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 4379 inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 4380 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 4381 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) 4382 ei->i_file_acl |= 4383 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 4384 inode->i_size = ext4_isize(raw_inode); 4385 ei->i_disksize = inode->i_size; 4386 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 4387 ei->i_block_group = iloc.block_group; 4388 ei->i_last_alloc_group = ~0; 4389 /* 4390 * NOTE! The in-memory inode i_data array is in little-endian order 4391 * even on big-endian machines: we do NOT byteswap the block numbers! 4392 */ 4393 for (block = 0; block < EXT4_N_BLOCKS; block++) 4394 ei->i_data[block] = raw_inode->i_block[block]; 4395 INIT_LIST_HEAD(&ei->i_orphan); 4396 4397 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4398 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 4399 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 4400 EXT4_INODE_SIZE(inode->i_sb)) { 4401 brelse(bh); 4402 ret = -EIO; 4403 goto bad_inode; 4404 } 4405 if (ei->i_extra_isize == 0) { 4406 /* The extra space is currently unused. Use it. */ 4407 ei->i_extra_isize = sizeof(struct ext4_inode) - 4408 EXT4_GOOD_OLD_INODE_SIZE; 4409 } else { 4410 __le32 *magic = (void *)raw_inode + 4411 EXT4_GOOD_OLD_INODE_SIZE + 4412 ei->i_extra_isize; 4413 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) 4414 ei->i_state |= EXT4_STATE_XATTR; 4415 } 4416 } else 4417 ei->i_extra_isize = 0; 4418 4419 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 4420 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 4421 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 4422 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 4423 4424 inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 4425 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4426 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 4427 inode->i_version |= 4428 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 4429 } 4430 4431 ret = 0; 4432 if (ei->i_file_acl && 4433 ((ei->i_file_acl < 4434 (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) + 4435 EXT4_SB(sb)->s_gdb_count)) || 4436 (ei->i_file_acl >= ext4_blocks_count(EXT4_SB(sb)->s_es)))) { 4437 ext4_error(sb, __func__, 4438 "bad extended attribute block %llu in inode #%lu", 4439 ei->i_file_acl, inode->i_ino); 4440 ret = -EIO; 4441 goto bad_inode; 4442 } else if (ei->i_flags & EXT4_EXTENTS_FL) { 4443 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4444 (S_ISLNK(inode->i_mode) && 4445 !ext4_inode_is_fast_symlink(inode))) 4446 /* Validate extent which is part of inode */ 4447 ret = ext4_ext_check_inode(inode); 4448 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4449 (S_ISLNK(inode->i_mode) && 4450 !ext4_inode_is_fast_symlink(inode))) { 4451 /* Validate block references which are part of inode */ 4452 ret = ext4_check_inode_blockref(inode); 4453 } 4454 if (ret) { 4455 brelse(bh); 4456 goto bad_inode; 4457 } 4458 4459 if (S_ISREG(inode->i_mode)) { 4460 inode->i_op = &ext4_file_inode_operations; 4461 inode->i_fop = &ext4_file_operations; 4462 ext4_set_aops(inode); 4463 } else if (S_ISDIR(inode->i_mode)) { 4464 inode->i_op = &ext4_dir_inode_operations; 4465 inode->i_fop = &ext4_dir_operations; 4466 } else if (S_ISLNK(inode->i_mode)) { 4467 if (ext4_inode_is_fast_symlink(inode)) { 4468 inode->i_op = &ext4_fast_symlink_inode_operations; 4469 nd_terminate_link(ei->i_data, inode->i_size, 4470 sizeof(ei->i_data) - 1); 4471 } else { 4472 inode->i_op = &ext4_symlink_inode_operations; 4473 ext4_set_aops(inode); 4474 } 4475 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 4476 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 4477 inode->i_op = &ext4_special_inode_operations; 4478 if (raw_inode->i_block[0]) 4479 init_special_inode(inode, inode->i_mode, 4480 old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 4481 else 4482 init_special_inode(inode, inode->i_mode, 4483 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 4484 } else { 4485 brelse(bh); 4486 ret = -EIO; 4487 ext4_error(inode->i_sb, __func__, 4488 "bogus i_mode (%o) for inode=%lu", 4489 inode->i_mode, inode->i_ino); 4490 goto bad_inode; 4491 } 4492 brelse(iloc.bh); 4493 ext4_set_inode_flags(inode); 4494 unlock_new_inode(inode); 4495 return inode; 4496 4497 bad_inode: 4498 iget_failed(inode); 4499 return ERR_PTR(ret); 4500 } 4501 4502 static int ext4_inode_blocks_set(handle_t *handle, 4503 struct ext4_inode *raw_inode, 4504 struct ext4_inode_info *ei) 4505 { 4506 struct inode *inode = &(ei->vfs_inode); 4507 u64 i_blocks = inode->i_blocks; 4508 struct super_block *sb = inode->i_sb; 4509 4510 if (i_blocks <= ~0U) { 4511 /* 4512 * i_blocks can be represnted in a 32 bit variable 4513 * as multiple of 512 bytes 4514 */ 4515 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4516 raw_inode->i_blocks_high = 0; 4517 ei->i_flags &= ~EXT4_HUGE_FILE_FL; 4518 return 0; 4519 } 4520 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) 4521 return -EFBIG; 4522 4523 if (i_blocks <= 0xffffffffffffULL) { 4524 /* 4525 * i_blocks can be represented in a 48 bit variable 4526 * as multiple of 512 bytes 4527 */ 4528 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4529 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4530 ei->i_flags &= ~EXT4_HUGE_FILE_FL; 4531 } else { 4532 ei->i_flags |= EXT4_HUGE_FILE_FL; 4533 /* i_block is stored in file system block size */ 4534 i_blocks = i_blocks >> (inode->i_blkbits - 9); 4535 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4536 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4537 } 4538 return 0; 4539 } 4540 4541 /* 4542 * Post the struct inode info into an on-disk inode location in the 4543 * buffer-cache. This gobbles the caller's reference to the 4544 * buffer_head in the inode location struct. 4545 * 4546 * The caller must have write access to iloc->bh. 4547 */ 4548 static int ext4_do_update_inode(handle_t *handle, 4549 struct inode *inode, 4550 struct ext4_iloc *iloc, 4551 int do_sync) 4552 { 4553 struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 4554 struct ext4_inode_info *ei = EXT4_I(inode); 4555 struct buffer_head *bh = iloc->bh; 4556 int err = 0, rc, block; 4557 4558 /* For fields not not tracking in the in-memory inode, 4559 * initialise them to zero for new inodes. */ 4560 if (ei->i_state & EXT4_STATE_NEW) 4561 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 4562 4563 ext4_get_inode_flags(ei); 4564 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 4565 if (!(test_opt(inode->i_sb, NO_UID32))) { 4566 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); 4567 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); 4568 /* 4569 * Fix up interoperability with old kernels. Otherwise, old inodes get 4570 * re-used with the upper 16 bits of the uid/gid intact 4571 */ 4572 if (!ei->i_dtime) { 4573 raw_inode->i_uid_high = 4574 cpu_to_le16(high_16_bits(inode->i_uid)); 4575 raw_inode->i_gid_high = 4576 cpu_to_le16(high_16_bits(inode->i_gid)); 4577 } else { 4578 raw_inode->i_uid_high = 0; 4579 raw_inode->i_gid_high = 0; 4580 } 4581 } else { 4582 raw_inode->i_uid_low = 4583 cpu_to_le16(fs_high2lowuid(inode->i_uid)); 4584 raw_inode->i_gid_low = 4585 cpu_to_le16(fs_high2lowgid(inode->i_gid)); 4586 raw_inode->i_uid_high = 0; 4587 raw_inode->i_gid_high = 0; 4588 } 4589 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 4590 4591 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 4592 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 4593 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 4594 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 4595 4596 if (ext4_inode_blocks_set(handle, raw_inode, ei)) 4597 goto out_brelse; 4598 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 4599 raw_inode->i_flags = cpu_to_le32(ei->i_flags); 4600 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 4601 cpu_to_le32(EXT4_OS_HURD)) 4602 raw_inode->i_file_acl_high = 4603 cpu_to_le16(ei->i_file_acl >> 32); 4604 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 4605 ext4_isize_set(raw_inode, ei->i_disksize); 4606 if (ei->i_disksize > 0x7fffffffULL) { 4607 struct super_block *sb = inode->i_sb; 4608 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 4609 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || 4610 EXT4_SB(sb)->s_es->s_rev_level == 4611 cpu_to_le32(EXT4_GOOD_OLD_REV)) { 4612 /* If this is the first large file 4613 * created, add a flag to the superblock. 4614 */ 4615 err = ext4_journal_get_write_access(handle, 4616 EXT4_SB(sb)->s_sbh); 4617 if (err) 4618 goto out_brelse; 4619 ext4_update_dynamic_rev(sb); 4620 EXT4_SET_RO_COMPAT_FEATURE(sb, 4621 EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 4622 sb->s_dirt = 1; 4623 ext4_handle_sync(handle); 4624 err = ext4_handle_dirty_metadata(handle, inode, 4625 EXT4_SB(sb)->s_sbh); 4626 } 4627 } 4628 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 4629 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 4630 if (old_valid_dev(inode->i_rdev)) { 4631 raw_inode->i_block[0] = 4632 cpu_to_le32(old_encode_dev(inode->i_rdev)); 4633 raw_inode->i_block[1] = 0; 4634 } else { 4635 raw_inode->i_block[0] = 0; 4636 raw_inode->i_block[1] = 4637 cpu_to_le32(new_encode_dev(inode->i_rdev)); 4638 raw_inode->i_block[2] = 0; 4639 } 4640 } else 4641 for (block = 0; block < EXT4_N_BLOCKS; block++) 4642 raw_inode->i_block[block] = ei->i_data[block]; 4643 4644 raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 4645 if (ei->i_extra_isize) { 4646 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 4647 raw_inode->i_version_hi = 4648 cpu_to_le32(inode->i_version >> 32); 4649 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 4650 } 4651 4652 /* 4653 * If we're not using a journal and we were called from 4654 * ext4_write_inode() to sync the inode (making do_sync true), 4655 * we can just use sync_dirty_buffer() directly to do our dirty 4656 * work. Testing s_journal here is a bit redundant but it's 4657 * worth it to avoid potential future trouble. 4658 */ 4659 if (EXT4_SB(inode->i_sb)->s_journal == NULL && do_sync) { 4660 BUFFER_TRACE(bh, "call sync_dirty_buffer"); 4661 sync_dirty_buffer(bh); 4662 } else { 4663 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 4664 rc = ext4_handle_dirty_metadata(handle, inode, bh); 4665 if (!err) 4666 err = rc; 4667 } 4668 ei->i_state &= ~EXT4_STATE_NEW; 4669 4670 out_brelse: 4671 brelse(bh); 4672 ext4_std_error(inode->i_sb, err); 4673 return err; 4674 } 4675 4676 /* 4677 * ext4_write_inode() 4678 * 4679 * We are called from a few places: 4680 * 4681 * - Within generic_file_write() for O_SYNC files. 4682 * Here, there will be no transaction running. We wait for any running 4683 * trasnaction to commit. 4684 * 4685 * - Within sys_sync(), kupdate and such. 4686 * We wait on commit, if tol to. 4687 * 4688 * - Within prune_icache() (PF_MEMALLOC == true) 4689 * Here we simply return. We can't afford to block kswapd on the 4690 * journal commit. 4691 * 4692 * In all cases it is actually safe for us to return without doing anything, 4693 * because the inode has been copied into a raw inode buffer in 4694 * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 4695 * knfsd. 4696 * 4697 * Note that we are absolutely dependent upon all inode dirtiers doing the 4698 * right thing: they *must* call mark_inode_dirty() after dirtying info in 4699 * which we are interested. 4700 * 4701 * It would be a bug for them to not do this. The code: 4702 * 4703 * mark_inode_dirty(inode) 4704 * stuff(); 4705 * inode->i_size = expr; 4706 * 4707 * is in error because a kswapd-driven write_inode() could occur while 4708 * `stuff()' is running, and the new i_size will be lost. Plus the inode 4709 * will no longer be on the superblock's dirty inode list. 4710 */ 4711 int ext4_write_inode(struct inode *inode, int wait) 4712 { 4713 int err; 4714 4715 if (current->flags & PF_MEMALLOC) 4716 return 0; 4717 4718 if (EXT4_SB(inode->i_sb)->s_journal) { 4719 if (ext4_journal_current_handle()) { 4720 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 4721 dump_stack(); 4722 return -EIO; 4723 } 4724 4725 if (!wait) 4726 return 0; 4727 4728 err = ext4_force_commit(inode->i_sb); 4729 } else { 4730 struct ext4_iloc iloc; 4731 4732 err = ext4_get_inode_loc(inode, &iloc); 4733 if (err) 4734 return err; 4735 err = ext4_do_update_inode(EXT4_NOJOURNAL_HANDLE, 4736 inode, &iloc, wait); 4737 } 4738 return err; 4739 } 4740 4741 /* 4742 * ext4_setattr() 4743 * 4744 * Called from notify_change. 4745 * 4746 * We want to trap VFS attempts to truncate the file as soon as 4747 * possible. In particular, we want to make sure that when the VFS 4748 * shrinks i_size, we put the inode on the orphan list and modify 4749 * i_disksize immediately, so that during the subsequent flushing of 4750 * dirty pages and freeing of disk blocks, we can guarantee that any 4751 * commit will leave the blocks being flushed in an unused state on 4752 * disk. (On recovery, the inode will get truncated and the blocks will 4753 * be freed, so we have a strong guarantee that no future commit will 4754 * leave these blocks visible to the user.) 4755 * 4756 * Another thing we have to assure is that if we are in ordered mode 4757 * and inode is still attached to the committing transaction, we must 4758 * we start writeout of all the dirty pages which are being truncated. 4759 * This way we are sure that all the data written in the previous 4760 * transaction are already on disk (truncate waits for pages under 4761 * writeback). 4762 * 4763 * Called with inode->i_mutex down. 4764 */ 4765 int ext4_setattr(struct dentry *dentry, struct iattr *attr) 4766 { 4767 struct inode *inode = dentry->d_inode; 4768 int error, rc = 0; 4769 const unsigned int ia_valid = attr->ia_valid; 4770 4771 error = inode_change_ok(inode, attr); 4772 if (error) 4773 return error; 4774 4775 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 4776 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 4777 handle_t *handle; 4778 4779 /* (user+group)*(old+new) structure, inode write (sb, 4780 * inode block, ? - but truncate inode update has it) */ 4781 handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+ 4782 EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3); 4783 if (IS_ERR(handle)) { 4784 error = PTR_ERR(handle); 4785 goto err_out; 4786 } 4787 error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; 4788 if (error) { 4789 ext4_journal_stop(handle); 4790 return error; 4791 } 4792 /* Update corresponding info in inode so that everything is in 4793 * one transaction */ 4794 if (attr->ia_valid & ATTR_UID) 4795 inode->i_uid = attr->ia_uid; 4796 if (attr->ia_valid & ATTR_GID) 4797 inode->i_gid = attr->ia_gid; 4798 error = ext4_mark_inode_dirty(handle, inode); 4799 ext4_journal_stop(handle); 4800 } 4801 4802 if (attr->ia_valid & ATTR_SIZE) { 4803 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) { 4804 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4805 4806 if (attr->ia_size > sbi->s_bitmap_maxbytes) { 4807 error = -EFBIG; 4808 goto err_out; 4809 } 4810 } 4811 } 4812 4813 if (S_ISREG(inode->i_mode) && 4814 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) { 4815 handle_t *handle; 4816 4817 handle = ext4_journal_start(inode, 3); 4818 if (IS_ERR(handle)) { 4819 error = PTR_ERR(handle); 4820 goto err_out; 4821 } 4822 4823 error = ext4_orphan_add(handle, inode); 4824 EXT4_I(inode)->i_disksize = attr->ia_size; 4825 rc = ext4_mark_inode_dirty(handle, inode); 4826 if (!error) 4827 error = rc; 4828 ext4_journal_stop(handle); 4829 4830 if (ext4_should_order_data(inode)) { 4831 error = ext4_begin_ordered_truncate(inode, 4832 attr->ia_size); 4833 if (error) { 4834 /* Do as much error cleanup as possible */ 4835 handle = ext4_journal_start(inode, 3); 4836 if (IS_ERR(handle)) { 4837 ext4_orphan_del(NULL, inode); 4838 goto err_out; 4839 } 4840 ext4_orphan_del(handle, inode); 4841 ext4_journal_stop(handle); 4842 goto err_out; 4843 } 4844 } 4845 } 4846 4847 rc = inode_setattr(inode, attr); 4848 4849 /* If inode_setattr's call to ext4_truncate failed to get a 4850 * transaction handle at all, we need to clean up the in-core 4851 * orphan list manually. */ 4852 if (inode->i_nlink) 4853 ext4_orphan_del(NULL, inode); 4854 4855 if (!rc && (ia_valid & ATTR_MODE)) 4856 rc = ext4_acl_chmod(inode); 4857 4858 err_out: 4859 ext4_std_error(inode->i_sb, error); 4860 if (!error) 4861 error = rc; 4862 return error; 4863 } 4864 4865 int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 4866 struct kstat *stat) 4867 { 4868 struct inode *inode; 4869 unsigned long delalloc_blocks; 4870 4871 inode = dentry->d_inode; 4872 generic_fillattr(inode, stat); 4873 4874 /* 4875 * We can't update i_blocks if the block allocation is delayed 4876 * otherwise in the case of system crash before the real block 4877 * allocation is done, we will have i_blocks inconsistent with 4878 * on-disk file blocks. 4879 * We always keep i_blocks updated together with real 4880 * allocation. But to not confuse with user, stat 4881 * will return the blocks that include the delayed allocation 4882 * blocks for this file. 4883 */ 4884 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 4885 delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; 4886 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 4887 4888 stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; 4889 return 0; 4890 } 4891 4892 static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks, 4893 int chunk) 4894 { 4895 int indirects; 4896 4897 /* if nrblocks are contiguous */ 4898 if (chunk) { 4899 /* 4900 * With N contiguous data blocks, it need at most 4901 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks 4902 * 2 dindirect blocks 4903 * 1 tindirect block 4904 */ 4905 indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb); 4906 return indirects + 3; 4907 } 4908 /* 4909 * if nrblocks are not contiguous, worse case, each block touch 4910 * a indirect block, and each indirect block touch a double indirect 4911 * block, plus a triple indirect block 4912 */ 4913 indirects = nrblocks * 2 + 1; 4914 return indirects; 4915 } 4916 4917 static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4918 { 4919 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) 4920 return ext4_indirect_trans_blocks(inode, nrblocks, chunk); 4921 return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); 4922 } 4923 4924 /* 4925 * Account for index blocks, block groups bitmaps and block group 4926 * descriptor blocks if modify datablocks and index blocks 4927 * worse case, the indexs blocks spread over different block groups 4928 * 4929 * If datablocks are discontiguous, they are possible to spread over 4930 * different block groups too. If they are contiugous, with flexbg, 4931 * they could still across block group boundary. 4932 * 4933 * Also account for superblock, inode, quota and xattr blocks 4934 */ 4935 int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4936 { 4937 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 4938 int gdpblocks; 4939 int idxblocks; 4940 int ret = 0; 4941 4942 /* 4943 * How many index blocks need to touch to modify nrblocks? 4944 * The "Chunk" flag indicating whether the nrblocks is 4945 * physically contiguous on disk 4946 * 4947 * For Direct IO and fallocate, they calls get_block to allocate 4948 * one single extent at a time, so they could set the "Chunk" flag 4949 */ 4950 idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); 4951 4952 ret = idxblocks; 4953 4954 /* 4955 * Now let's see how many group bitmaps and group descriptors need 4956 * to account 4957 */ 4958 groups = idxblocks; 4959 if (chunk) 4960 groups += 1; 4961 else 4962 groups += nrblocks; 4963 4964 gdpblocks = groups; 4965 if (groups > ngroups) 4966 groups = ngroups; 4967 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 4968 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 4969 4970 /* bitmaps and block group descriptor blocks */ 4971 ret += groups + gdpblocks; 4972 4973 /* Blocks for super block, inode, quota and xattr blocks */ 4974 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 4975 4976 return ret; 4977 } 4978 4979 /* 4980 * Calulate the total number of credits to reserve to fit 4981 * the modification of a single pages into a single transaction, 4982 * which may include multiple chunks of block allocations. 4983 * 4984 * This could be called via ext4_write_begin() 4985 * 4986 * We need to consider the worse case, when 4987 * one new block per extent. 4988 */ 4989 int ext4_writepage_trans_blocks(struct inode *inode) 4990 { 4991 int bpp = ext4_journal_blocks_per_page(inode); 4992 int ret; 4993 4994 ret = ext4_meta_trans_blocks(inode, bpp, 0); 4995 4996 /* Account for data blocks for journalled mode */ 4997 if (ext4_should_journal_data(inode)) 4998 ret += bpp; 4999 return ret; 5000 } 5001 5002 /* 5003 * Calculate the journal credits for a chunk of data modification. 5004 * 5005 * This is called from DIO, fallocate or whoever calling 5006 * ext4_get_blocks() to map/allocate a chunk of contigous disk blocks. 5007 * 5008 * journal buffers for data blocks are not included here, as DIO 5009 * and fallocate do no need to journal data buffers. 5010 */ 5011 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 5012 { 5013 return ext4_meta_trans_blocks(inode, nrblocks, 1); 5014 } 5015 5016 /* 5017 * The caller must have previously called ext4_reserve_inode_write(). 5018 * Give this, we know that the caller already has write access to iloc->bh. 5019 */ 5020 int ext4_mark_iloc_dirty(handle_t *handle, 5021 struct inode *inode, struct ext4_iloc *iloc) 5022 { 5023 int err = 0; 5024 5025 if (test_opt(inode->i_sb, I_VERSION)) 5026 inode_inc_iversion(inode); 5027 5028 /* the do_update_inode consumes one bh->b_count */ 5029 get_bh(iloc->bh); 5030 5031 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 5032 err = ext4_do_update_inode(handle, inode, iloc, 0); 5033 put_bh(iloc->bh); 5034 return err; 5035 } 5036 5037 /* 5038 * On success, We end up with an outstanding reference count against 5039 * iloc->bh. This _must_ be cleaned up later. 5040 */ 5041 5042 int 5043 ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 5044 struct ext4_iloc *iloc) 5045 { 5046 int err; 5047 5048 err = ext4_get_inode_loc(inode, iloc); 5049 if (!err) { 5050 BUFFER_TRACE(iloc->bh, "get_write_access"); 5051 err = ext4_journal_get_write_access(handle, iloc->bh); 5052 if (err) { 5053 brelse(iloc->bh); 5054 iloc->bh = NULL; 5055 } 5056 } 5057 ext4_std_error(inode->i_sb, err); 5058 return err; 5059 } 5060 5061 /* 5062 * Expand an inode by new_extra_isize bytes. 5063 * Returns 0 on success or negative error number on failure. 5064 */ 5065 static int ext4_expand_extra_isize(struct inode *inode, 5066 unsigned int new_extra_isize, 5067 struct ext4_iloc iloc, 5068 handle_t *handle) 5069 { 5070 struct ext4_inode *raw_inode; 5071 struct ext4_xattr_ibody_header *header; 5072 struct ext4_xattr_entry *entry; 5073 5074 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 5075 return 0; 5076 5077 raw_inode = ext4_raw_inode(&iloc); 5078 5079 header = IHDR(inode, raw_inode); 5080 entry = IFIRST(header); 5081 5082 /* No extended attributes present */ 5083 if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) || 5084 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 5085 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 5086 new_extra_isize); 5087 EXT4_I(inode)->i_extra_isize = new_extra_isize; 5088 return 0; 5089 } 5090 5091 /* try to expand with EAs present */ 5092 return ext4_expand_extra_isize_ea(inode, new_extra_isize, 5093 raw_inode, handle); 5094 } 5095 5096 /* 5097 * What we do here is to mark the in-core inode as clean with respect to inode 5098 * dirtiness (it may still be data-dirty). 5099 * This means that the in-core inode may be reaped by prune_icache 5100 * without having to perform any I/O. This is a very good thing, 5101 * because *any* task may call prune_icache - even ones which 5102 * have a transaction open against a different journal. 5103 * 5104 * Is this cheating? Not really. Sure, we haven't written the 5105 * inode out, but prune_icache isn't a user-visible syncing function. 5106 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 5107 * we start and wait on commits. 5108 * 5109 * Is this efficient/effective? Well, we're being nice to the system 5110 * by cleaning up our inodes proactively so they can be reaped 5111 * without I/O. But we are potentially leaving up to five seconds' 5112 * worth of inodes floating about which prune_icache wants us to 5113 * write out. One way to fix that would be to get prune_icache() 5114 * to do a write_super() to free up some memory. It has the desired 5115 * effect. 5116 */ 5117 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 5118 { 5119 struct ext4_iloc iloc; 5120 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5121 static unsigned int mnt_count; 5122 int err, ret; 5123 5124 might_sleep(); 5125 err = ext4_reserve_inode_write(handle, inode, &iloc); 5126 if (ext4_handle_valid(handle) && 5127 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 5128 !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) { 5129 /* 5130 * We need extra buffer credits since we may write into EA block 5131 * with this same handle. If journal_extend fails, then it will 5132 * only result in a minor loss of functionality for that inode. 5133 * If this is felt to be critical, then e2fsck should be run to 5134 * force a large enough s_min_extra_isize. 5135 */ 5136 if ((jbd2_journal_extend(handle, 5137 EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { 5138 ret = ext4_expand_extra_isize(inode, 5139 sbi->s_want_extra_isize, 5140 iloc, handle); 5141 if (ret) { 5142 EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND; 5143 if (mnt_count != 5144 le16_to_cpu(sbi->s_es->s_mnt_count)) { 5145 ext4_warning(inode->i_sb, __func__, 5146 "Unable to expand inode %lu. Delete" 5147 " some EAs or run e2fsck.", 5148 inode->i_ino); 5149 mnt_count = 5150 le16_to_cpu(sbi->s_es->s_mnt_count); 5151 } 5152 } 5153 } 5154 } 5155 if (!err) 5156 err = ext4_mark_iloc_dirty(handle, inode, &iloc); 5157 return err; 5158 } 5159 5160 /* 5161 * ext4_dirty_inode() is called from __mark_inode_dirty() 5162 * 5163 * We're really interested in the case where a file is being extended. 5164 * i_size has been changed by generic_commit_write() and we thus need 5165 * to include the updated inode in the current transaction. 5166 * 5167 * Also, vfs_dq_alloc_block() will always dirty the inode when blocks 5168 * are allocated to the file. 5169 * 5170 * If the inode is marked synchronous, we don't honour that here - doing 5171 * so would cause a commit on atime updates, which we don't bother doing. 5172 * We handle synchronous inodes at the highest possible level. 5173 */ 5174 void ext4_dirty_inode(struct inode *inode) 5175 { 5176 handle_t *current_handle = ext4_journal_current_handle(); 5177 handle_t *handle; 5178 5179 if (!ext4_handle_valid(current_handle)) { 5180 ext4_mark_inode_dirty(current_handle, inode); 5181 return; 5182 } 5183 5184 handle = ext4_journal_start(inode, 2); 5185 if (IS_ERR(handle)) 5186 goto out; 5187 if (current_handle && 5188 current_handle->h_transaction != handle->h_transaction) { 5189 /* This task has a transaction open against a different fs */ 5190 printk(KERN_EMERG "%s: transactions do not match!\n", 5191 __func__); 5192 } else { 5193 jbd_debug(5, "marking dirty. outer handle=%p\n", 5194 current_handle); 5195 ext4_mark_inode_dirty(handle, inode); 5196 } 5197 ext4_journal_stop(handle); 5198 out: 5199 return; 5200 } 5201 5202 #if 0 5203 /* 5204 * Bind an inode's backing buffer_head into this transaction, to prevent 5205 * it from being flushed to disk early. Unlike 5206 * ext4_reserve_inode_write, this leaves behind no bh reference and 5207 * returns no iloc structure, so the caller needs to repeat the iloc 5208 * lookup to mark the inode dirty later. 5209 */ 5210 static int ext4_pin_inode(handle_t *handle, struct inode *inode) 5211 { 5212 struct ext4_iloc iloc; 5213 5214 int err = 0; 5215 if (handle) { 5216 err = ext4_get_inode_loc(inode, &iloc); 5217 if (!err) { 5218 BUFFER_TRACE(iloc.bh, "get_write_access"); 5219 err = jbd2_journal_get_write_access(handle, iloc.bh); 5220 if (!err) 5221 err = ext4_handle_dirty_metadata(handle, 5222 inode, 5223 iloc.bh); 5224 brelse(iloc.bh); 5225 } 5226 } 5227 ext4_std_error(inode->i_sb, err); 5228 return err; 5229 } 5230 #endif 5231 5232 int ext4_change_inode_journal_flag(struct inode *inode, int val) 5233 { 5234 journal_t *journal; 5235 handle_t *handle; 5236 int err; 5237 5238 /* 5239 * We have to be very careful here: changing a data block's 5240 * journaling status dynamically is dangerous. If we write a 5241 * data block to the journal, change the status and then delete 5242 * that block, we risk forgetting to revoke the old log record 5243 * from the journal and so a subsequent replay can corrupt data. 5244 * So, first we make sure that the journal is empty and that 5245 * nobody is changing anything. 5246 */ 5247 5248 journal = EXT4_JOURNAL(inode); 5249 if (!journal) 5250 return 0; 5251 if (is_journal_aborted(journal)) 5252 return -EROFS; 5253 5254 jbd2_journal_lock_updates(journal); 5255 jbd2_journal_flush(journal); 5256 5257 /* 5258 * OK, there are no updates running now, and all cached data is 5259 * synced to disk. We are now in a completely consistent state 5260 * which doesn't have anything in the journal, and we know that 5261 * no filesystem updates are running, so it is safe to modify 5262 * the inode's in-core data-journaling state flag now. 5263 */ 5264 5265 if (val) 5266 EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL; 5267 else 5268 EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL; 5269 ext4_set_aops(inode); 5270 5271 jbd2_journal_unlock_updates(journal); 5272 5273 /* Finally we can mark the inode as dirty. */ 5274 5275 handle = ext4_journal_start(inode, 1); 5276 if (IS_ERR(handle)) 5277 return PTR_ERR(handle); 5278 5279 err = ext4_mark_inode_dirty(handle, inode); 5280 ext4_handle_sync(handle); 5281 ext4_journal_stop(handle); 5282 ext4_std_error(inode->i_sb, err); 5283 5284 return err; 5285 } 5286 5287 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 5288 { 5289 return !buffer_mapped(bh); 5290 } 5291 5292 int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 5293 { 5294 struct page *page = vmf->page; 5295 loff_t size; 5296 unsigned long len; 5297 int ret = -EINVAL; 5298 void *fsdata; 5299 struct file *file = vma->vm_file; 5300 struct inode *inode = file->f_path.dentry->d_inode; 5301 struct address_space *mapping = inode->i_mapping; 5302 5303 /* 5304 * Get i_alloc_sem to stop truncates messing with the inode. We cannot 5305 * get i_mutex because we are already holding mmap_sem. 5306 */ 5307 down_read(&inode->i_alloc_sem); 5308 size = i_size_read(inode); 5309 if (page->mapping != mapping || size <= page_offset(page) 5310 || !PageUptodate(page)) { 5311 /* page got truncated from under us? */ 5312 goto out_unlock; 5313 } 5314 ret = 0; 5315 if (PageMappedToDisk(page)) 5316 goto out_unlock; 5317 5318 if (page->index == size >> PAGE_CACHE_SHIFT) 5319 len = size & ~PAGE_CACHE_MASK; 5320 else 5321 len = PAGE_CACHE_SIZE; 5322 5323 lock_page(page); 5324 /* 5325 * return if we have all the buffers mapped. This avoid 5326 * the need to call write_begin/write_end which does a 5327 * journal_start/journal_stop which can block and take 5328 * long time 5329 */ 5330 if (page_has_buffers(page)) { 5331 if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 5332 ext4_bh_unmapped)) { 5333 unlock_page(page); 5334 goto out_unlock; 5335 } 5336 } 5337 unlock_page(page); 5338 /* 5339 * OK, we need to fill the hole... Do write_begin write_end 5340 * to do block allocation/reservation.We are not holding 5341 * inode.i__mutex here. That allow * parallel write_begin, 5342 * write_end call. lock_page prevent this from happening 5343 * on the same page though 5344 */ 5345 ret = mapping->a_ops->write_begin(file, mapping, page_offset(page), 5346 len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); 5347 if (ret < 0) 5348 goto out_unlock; 5349 ret = mapping->a_ops->write_end(file, mapping, page_offset(page), 5350 len, len, page, fsdata); 5351 if (ret < 0) 5352 goto out_unlock; 5353 ret = 0; 5354 out_unlock: 5355 if (ret) 5356 ret = VM_FAULT_SIGBUS; 5357 up_read(&inode->i_alloc_sem); 5358 return ret; 5359 } 5360