1 /* 2 * linux/fs/ext4/inode.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/inode.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * Goal-directed block allocation by Stephen Tweedie 16 * (sct@redhat.com), 1993, 1998 17 * Big-endian to little-endian byte-swapping/bitmaps by 18 * David S. Miller (davem@caip.rutgers.edu), 1995 19 * 64-bit file support on 64-bit platforms by Jakub Jelinek 20 * (jj@sunsite.ms.mff.cuni.cz) 21 * 22 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 23 */ 24 25 #include <linux/module.h> 26 #include <linux/fs.h> 27 #include <linux/time.h> 28 #include <linux/jbd2.h> 29 #include <linux/highuid.h> 30 #include <linux/pagemap.h> 31 #include <linux/quotaops.h> 32 #include <linux/string.h> 33 #include <linux/buffer_head.h> 34 #include <linux/writeback.h> 35 #include <linux/pagevec.h> 36 #include <linux/mpage.h> 37 #include <linux/namei.h> 38 #include <linux/uio.h> 39 #include <linux/bio.h> 40 #include "ext4_jbd2.h" 41 #include "xattr.h" 42 #include "acl.h" 43 #include "ext4_extents.h" 44 45 #define MPAGE_DA_EXTENT_TAIL 0x01 46 47 static inline int ext4_begin_ordered_truncate(struct inode *inode, 48 loff_t new_size) 49 { 50 return jbd2_journal_begin_ordered_truncate( 51 EXT4_SB(inode->i_sb)->s_journal, 52 &EXT4_I(inode)->jinode, 53 new_size); 54 } 55 56 static void ext4_invalidatepage(struct page *page, unsigned long offset); 57 58 /* 59 * Test whether an inode is a fast symlink. 60 */ 61 static int ext4_inode_is_fast_symlink(struct inode *inode) 62 { 63 int ea_blocks = EXT4_I(inode)->i_file_acl ? 64 (inode->i_sb->s_blocksize >> 9) : 0; 65 66 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 67 } 68 69 /* 70 * The ext4 forget function must perform a revoke if we are freeing data 71 * which has been journaled. Metadata (eg. indirect blocks) must be 72 * revoked in all cases. 73 * 74 * "bh" may be NULL: a metadata block may have been freed from memory 75 * but there may still be a record of it in the journal, and that record 76 * still needs to be revoked. 77 * 78 * If the handle isn't valid we're not journaling so there's nothing to do. 79 */ 80 int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode, 81 struct buffer_head *bh, ext4_fsblk_t blocknr) 82 { 83 int err; 84 85 if (!ext4_handle_valid(handle)) 86 return 0; 87 88 might_sleep(); 89 90 BUFFER_TRACE(bh, "enter"); 91 92 jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, " 93 "data mode %lx\n", 94 bh, is_metadata, inode->i_mode, 95 test_opt(inode->i_sb, DATA_FLAGS)); 96 97 /* Never use the revoke function if we are doing full data 98 * journaling: there is no need to, and a V1 superblock won't 99 * support it. Otherwise, only skip the revoke on un-journaled 100 * data blocks. */ 101 102 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA || 103 (!is_metadata && !ext4_should_journal_data(inode))) { 104 if (bh) { 105 BUFFER_TRACE(bh, "call jbd2_journal_forget"); 106 return ext4_journal_forget(handle, bh); 107 } 108 return 0; 109 } 110 111 /* 112 * data!=journal && (is_metadata || should_journal_data(inode)) 113 */ 114 BUFFER_TRACE(bh, "call ext4_journal_revoke"); 115 err = ext4_journal_revoke(handle, blocknr, bh); 116 if (err) 117 ext4_abort(inode->i_sb, __func__, 118 "error %d when attempting revoke", err); 119 BUFFER_TRACE(bh, "exit"); 120 return err; 121 } 122 123 /* 124 * Work out how many blocks we need to proceed with the next chunk of a 125 * truncate transaction. 126 */ 127 static unsigned long blocks_for_truncate(struct inode *inode) 128 { 129 ext4_lblk_t needed; 130 131 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); 132 133 /* Give ourselves just enough room to cope with inodes in which 134 * i_blocks is corrupt: we've seen disk corruptions in the past 135 * which resulted in random data in an inode which looked enough 136 * like a regular file for ext4 to try to delete it. Things 137 * will go a bit crazy if that happens, but at least we should 138 * try not to panic the whole kernel. */ 139 if (needed < 2) 140 needed = 2; 141 142 /* But we need to bound the transaction so we don't overflow the 143 * journal. */ 144 if (needed > EXT4_MAX_TRANS_DATA) 145 needed = EXT4_MAX_TRANS_DATA; 146 147 return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed; 148 } 149 150 /* 151 * Truncate transactions can be complex and absolutely huge. So we need to 152 * be able to restart the transaction at a conventient checkpoint to make 153 * sure we don't overflow the journal. 154 * 155 * start_transaction gets us a new handle for a truncate transaction, 156 * and extend_transaction tries to extend the existing one a bit. If 157 * extend fails, we need to propagate the failure up and restart the 158 * transaction in the top-level truncate loop. --sct 159 */ 160 static handle_t *start_transaction(struct inode *inode) 161 { 162 handle_t *result; 163 164 result = ext4_journal_start(inode, blocks_for_truncate(inode)); 165 if (!IS_ERR(result)) 166 return result; 167 168 ext4_std_error(inode->i_sb, PTR_ERR(result)); 169 return result; 170 } 171 172 /* 173 * Try to extend this transaction for the purposes of truncation. 174 * 175 * Returns 0 if we managed to create more room. If we can't create more 176 * room, and the transaction must be restarted we return 1. 177 */ 178 static int try_to_extend_transaction(handle_t *handle, struct inode *inode) 179 { 180 if (!ext4_handle_valid(handle)) 181 return 0; 182 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) 183 return 0; 184 if (!ext4_journal_extend(handle, blocks_for_truncate(inode))) 185 return 0; 186 return 1; 187 } 188 189 /* 190 * Restart the transaction associated with *handle. This does a commit, 191 * so before we call here everything must be consistently dirtied against 192 * this transaction. 193 */ 194 static int ext4_journal_test_restart(handle_t *handle, struct inode *inode) 195 { 196 BUG_ON(EXT4_JOURNAL(inode) == NULL); 197 jbd_debug(2, "restarting handle %p\n", handle); 198 return ext4_journal_restart(handle, blocks_for_truncate(inode)); 199 } 200 201 /* 202 * Called at the last iput() if i_nlink is zero. 203 */ 204 void ext4_delete_inode(struct inode *inode) 205 { 206 handle_t *handle; 207 int err; 208 209 if (ext4_should_order_data(inode)) 210 ext4_begin_ordered_truncate(inode, 0); 211 truncate_inode_pages(&inode->i_data, 0); 212 213 if (is_bad_inode(inode)) 214 goto no_delete; 215 216 handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3); 217 if (IS_ERR(handle)) { 218 ext4_std_error(inode->i_sb, PTR_ERR(handle)); 219 /* 220 * If we're going to skip the normal cleanup, we still need to 221 * make sure that the in-core orphan linked list is properly 222 * cleaned up. 223 */ 224 ext4_orphan_del(NULL, inode); 225 goto no_delete; 226 } 227 228 if (IS_SYNC(inode)) 229 ext4_handle_sync(handle); 230 inode->i_size = 0; 231 err = ext4_mark_inode_dirty(handle, inode); 232 if (err) { 233 ext4_warning(inode->i_sb, __func__, 234 "couldn't mark inode dirty (err %d)", err); 235 goto stop_handle; 236 } 237 if (inode->i_blocks) 238 ext4_truncate(inode); 239 240 /* 241 * ext4_ext_truncate() doesn't reserve any slop when it 242 * restarts journal transactions; therefore there may not be 243 * enough credits left in the handle to remove the inode from 244 * the orphan list and set the dtime field. 245 */ 246 if (!ext4_handle_has_enough_credits(handle, 3)) { 247 err = ext4_journal_extend(handle, 3); 248 if (err > 0) 249 err = ext4_journal_restart(handle, 3); 250 if (err != 0) { 251 ext4_warning(inode->i_sb, __func__, 252 "couldn't extend journal (err %d)", err); 253 stop_handle: 254 ext4_journal_stop(handle); 255 goto no_delete; 256 } 257 } 258 259 /* 260 * Kill off the orphan record which ext4_truncate created. 261 * AKPM: I think this can be inside the above `if'. 262 * Note that ext4_orphan_del() has to be able to cope with the 263 * deletion of a non-existent orphan - this is because we don't 264 * know if ext4_truncate() actually created an orphan record. 265 * (Well, we could do this if we need to, but heck - it works) 266 */ 267 ext4_orphan_del(handle, inode); 268 EXT4_I(inode)->i_dtime = get_seconds(); 269 270 /* 271 * One subtle ordering requirement: if anything has gone wrong 272 * (transaction abort, IO errors, whatever), then we can still 273 * do these next steps (the fs will already have been marked as 274 * having errors), but we can't free the inode if the mark_dirty 275 * fails. 276 */ 277 if (ext4_mark_inode_dirty(handle, inode)) 278 /* If that failed, just do the required in-core inode clear. */ 279 clear_inode(inode); 280 else 281 ext4_free_inode(handle, inode); 282 ext4_journal_stop(handle); 283 return; 284 no_delete: 285 clear_inode(inode); /* We must guarantee clearing of inode... */ 286 } 287 288 typedef struct { 289 __le32 *p; 290 __le32 key; 291 struct buffer_head *bh; 292 } Indirect; 293 294 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) 295 { 296 p->key = *(p->p = v); 297 p->bh = bh; 298 } 299 300 /** 301 * ext4_block_to_path - parse the block number into array of offsets 302 * @inode: inode in question (we are only interested in its superblock) 303 * @i_block: block number to be parsed 304 * @offsets: array to store the offsets in 305 * @boundary: set this non-zero if the referred-to block is likely to be 306 * followed (on disk) by an indirect block. 307 * 308 * To store the locations of file's data ext4 uses a data structure common 309 * for UNIX filesystems - tree of pointers anchored in the inode, with 310 * data blocks at leaves and indirect blocks in intermediate nodes. 311 * This function translates the block number into path in that tree - 312 * return value is the path length and @offsets[n] is the offset of 313 * pointer to (n+1)th node in the nth one. If @block is out of range 314 * (negative or too large) warning is printed and zero returned. 315 * 316 * Note: function doesn't find node addresses, so no IO is needed. All 317 * we need to know is the capacity of indirect blocks (taken from the 318 * inode->i_sb). 319 */ 320 321 /* 322 * Portability note: the last comparison (check that we fit into triple 323 * indirect block) is spelled differently, because otherwise on an 324 * architecture with 32-bit longs and 8Kb pages we might get into trouble 325 * if our filesystem had 8Kb blocks. We might use long long, but that would 326 * kill us on x86. Oh, well, at least the sign propagation does not matter - 327 * i_block would have to be negative in the very beginning, so we would not 328 * get there at all. 329 */ 330 331 static int ext4_block_to_path(struct inode *inode, 332 ext4_lblk_t i_block, 333 ext4_lblk_t offsets[4], int *boundary) 334 { 335 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); 336 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); 337 const long direct_blocks = EXT4_NDIR_BLOCKS, 338 indirect_blocks = ptrs, 339 double_blocks = (1 << (ptrs_bits * 2)); 340 int n = 0; 341 int final = 0; 342 343 if (i_block < 0) { 344 ext4_warning(inode->i_sb, "ext4_block_to_path", "block < 0"); 345 } else if (i_block < direct_blocks) { 346 offsets[n++] = i_block; 347 final = direct_blocks; 348 } else if ((i_block -= direct_blocks) < indirect_blocks) { 349 offsets[n++] = EXT4_IND_BLOCK; 350 offsets[n++] = i_block; 351 final = ptrs; 352 } else if ((i_block -= indirect_blocks) < double_blocks) { 353 offsets[n++] = EXT4_DIND_BLOCK; 354 offsets[n++] = i_block >> ptrs_bits; 355 offsets[n++] = i_block & (ptrs - 1); 356 final = ptrs; 357 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 358 offsets[n++] = EXT4_TIND_BLOCK; 359 offsets[n++] = i_block >> (ptrs_bits * 2); 360 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 361 offsets[n++] = i_block & (ptrs - 1); 362 final = ptrs; 363 } else { 364 ext4_warning(inode->i_sb, "ext4_block_to_path", 365 "block %lu > max in inode %lu", 366 i_block + direct_blocks + 367 indirect_blocks + double_blocks, inode->i_ino); 368 } 369 if (boundary) 370 *boundary = final - 1 - (i_block & (ptrs - 1)); 371 return n; 372 } 373 374 static int __ext4_check_blockref(const char *function, struct inode *inode, 375 __le32 *p, unsigned int max) { 376 377 unsigned int maxblocks = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es); 378 __le32 *bref = p; 379 while (bref < p+max) { 380 if (unlikely(le32_to_cpu(*bref) >= maxblocks)) { 381 ext4_error(inode->i_sb, function, 382 "block reference %u >= max (%u) " 383 "in inode #%lu, offset=%d", 384 le32_to_cpu(*bref), maxblocks, 385 inode->i_ino, (int)(bref-p)); 386 return -EIO; 387 } 388 bref++; 389 } 390 return 0; 391 } 392 393 394 #define ext4_check_indirect_blockref(inode, bh) \ 395 __ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data, \ 396 EXT4_ADDR_PER_BLOCK((inode)->i_sb)) 397 398 #define ext4_check_inode_blockref(inode) \ 399 __ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data, \ 400 EXT4_NDIR_BLOCKS) 401 402 /** 403 * ext4_get_branch - read the chain of indirect blocks leading to data 404 * @inode: inode in question 405 * @depth: depth of the chain (1 - direct pointer, etc.) 406 * @offsets: offsets of pointers in inode/indirect blocks 407 * @chain: place to store the result 408 * @err: here we store the error value 409 * 410 * Function fills the array of triples <key, p, bh> and returns %NULL 411 * if everything went OK or the pointer to the last filled triple 412 * (incomplete one) otherwise. Upon the return chain[i].key contains 413 * the number of (i+1)-th block in the chain (as it is stored in memory, 414 * i.e. little-endian 32-bit), chain[i].p contains the address of that 415 * number (it points into struct inode for i==0 and into the bh->b_data 416 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect 417 * block for i>0 and NULL for i==0. In other words, it holds the block 418 * numbers of the chain, addresses they were taken from (and where we can 419 * verify that chain did not change) and buffer_heads hosting these 420 * numbers. 421 * 422 * Function stops when it stumbles upon zero pointer (absent block) 423 * (pointer to last triple returned, *@err == 0) 424 * or when it gets an IO error reading an indirect block 425 * (ditto, *@err == -EIO) 426 * or when it reads all @depth-1 indirect blocks successfully and finds 427 * the whole chain, all way to the data (returns %NULL, *err == 0). 428 * 429 * Need to be called with 430 * down_read(&EXT4_I(inode)->i_data_sem) 431 */ 432 static Indirect *ext4_get_branch(struct inode *inode, int depth, 433 ext4_lblk_t *offsets, 434 Indirect chain[4], int *err) 435 { 436 struct super_block *sb = inode->i_sb; 437 Indirect *p = chain; 438 struct buffer_head *bh; 439 440 *err = 0; 441 /* i_data is not going away, no lock needed */ 442 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets); 443 if (!p->key) 444 goto no_block; 445 while (--depth) { 446 bh = sb_getblk(sb, le32_to_cpu(p->key)); 447 if (unlikely(!bh)) 448 goto failure; 449 450 if (!bh_uptodate_or_lock(bh)) { 451 if (bh_submit_read(bh) < 0) { 452 put_bh(bh); 453 goto failure; 454 } 455 /* validate block references */ 456 if (ext4_check_indirect_blockref(inode, bh)) { 457 put_bh(bh); 458 goto failure; 459 } 460 } 461 462 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets); 463 /* Reader: end */ 464 if (!p->key) 465 goto no_block; 466 } 467 return NULL; 468 469 failure: 470 *err = -EIO; 471 no_block: 472 return p; 473 } 474 475 /** 476 * ext4_find_near - find a place for allocation with sufficient locality 477 * @inode: owner 478 * @ind: descriptor of indirect block. 479 * 480 * This function returns the preferred place for block allocation. 481 * It is used when heuristic for sequential allocation fails. 482 * Rules are: 483 * + if there is a block to the left of our position - allocate near it. 484 * + if pointer will live in indirect block - allocate near that block. 485 * + if pointer will live in inode - allocate in the same 486 * cylinder group. 487 * 488 * In the latter case we colour the starting block by the callers PID to 489 * prevent it from clashing with concurrent allocations for a different inode 490 * in the same block group. The PID is used here so that functionally related 491 * files will be close-by on-disk. 492 * 493 * Caller must make sure that @ind is valid and will stay that way. 494 */ 495 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) 496 { 497 struct ext4_inode_info *ei = EXT4_I(inode); 498 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; 499 __le32 *p; 500 ext4_fsblk_t bg_start; 501 ext4_fsblk_t last_block; 502 ext4_grpblk_t colour; 503 ext4_group_t block_group; 504 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); 505 506 /* Try to find previous block */ 507 for (p = ind->p - 1; p >= start; p--) { 508 if (*p) 509 return le32_to_cpu(*p); 510 } 511 512 /* No such thing, so let's try location of indirect block */ 513 if (ind->bh) 514 return ind->bh->b_blocknr; 515 516 /* 517 * It is going to be referred to from the inode itself? OK, just put it 518 * into the same cylinder group then. 519 */ 520 block_group = ei->i_block_group; 521 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { 522 block_group &= ~(flex_size-1); 523 if (S_ISREG(inode->i_mode)) 524 block_group++; 525 } 526 bg_start = ext4_group_first_block_no(inode->i_sb, block_group); 527 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 528 529 /* 530 * If we are doing delayed allocation, we don't need take 531 * colour into account. 532 */ 533 if (test_opt(inode->i_sb, DELALLOC)) 534 return bg_start; 535 536 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) 537 colour = (current->pid % 16) * 538 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 539 else 540 colour = (current->pid % 16) * ((last_block - bg_start) / 16); 541 return bg_start + colour; 542 } 543 544 /** 545 * ext4_find_goal - find a preferred place for allocation. 546 * @inode: owner 547 * @block: block we want 548 * @partial: pointer to the last triple within a chain 549 * 550 * Normally this function find the preferred place for block allocation, 551 * returns it. 552 */ 553 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, 554 Indirect *partial) 555 { 556 /* 557 * XXX need to get goal block from mballoc's data structures 558 */ 559 560 return ext4_find_near(inode, partial); 561 } 562 563 /** 564 * ext4_blks_to_allocate: Look up the block map and count the number 565 * of direct blocks need to be allocated for the given branch. 566 * 567 * @branch: chain of indirect blocks 568 * @k: number of blocks need for indirect blocks 569 * @blks: number of data blocks to be mapped. 570 * @blocks_to_boundary: the offset in the indirect block 571 * 572 * return the total number of blocks to be allocate, including the 573 * direct and indirect blocks. 574 */ 575 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, 576 int blocks_to_boundary) 577 { 578 unsigned int count = 0; 579 580 /* 581 * Simple case, [t,d]Indirect block(s) has not allocated yet 582 * then it's clear blocks on that path have not allocated 583 */ 584 if (k > 0) { 585 /* right now we don't handle cross boundary allocation */ 586 if (blks < blocks_to_boundary + 1) 587 count += blks; 588 else 589 count += blocks_to_boundary + 1; 590 return count; 591 } 592 593 count++; 594 while (count < blks && count <= blocks_to_boundary && 595 le32_to_cpu(*(branch[0].p + count)) == 0) { 596 count++; 597 } 598 return count; 599 } 600 601 /** 602 * ext4_alloc_blocks: multiple allocate blocks needed for a branch 603 * @indirect_blks: the number of blocks need to allocate for indirect 604 * blocks 605 * 606 * @new_blocks: on return it will store the new block numbers for 607 * the indirect blocks(if needed) and the first direct block, 608 * @blks: on return it will store the total number of allocated 609 * direct blocks 610 */ 611 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, 612 ext4_lblk_t iblock, ext4_fsblk_t goal, 613 int indirect_blks, int blks, 614 ext4_fsblk_t new_blocks[4], int *err) 615 { 616 struct ext4_allocation_request ar; 617 int target, i; 618 unsigned long count = 0, blk_allocated = 0; 619 int index = 0; 620 ext4_fsblk_t current_block = 0; 621 int ret = 0; 622 623 /* 624 * Here we try to allocate the requested multiple blocks at once, 625 * on a best-effort basis. 626 * To build a branch, we should allocate blocks for 627 * the indirect blocks(if not allocated yet), and at least 628 * the first direct block of this branch. That's the 629 * minimum number of blocks need to allocate(required) 630 */ 631 /* first we try to allocate the indirect blocks */ 632 target = indirect_blks; 633 while (target > 0) { 634 count = target; 635 /* allocating blocks for indirect blocks and direct blocks */ 636 current_block = ext4_new_meta_blocks(handle, inode, 637 goal, &count, err); 638 if (*err) 639 goto failed_out; 640 641 target -= count; 642 /* allocate blocks for indirect blocks */ 643 while (index < indirect_blks && count) { 644 new_blocks[index++] = current_block++; 645 count--; 646 } 647 if (count > 0) { 648 /* 649 * save the new block number 650 * for the first direct block 651 */ 652 new_blocks[index] = current_block; 653 printk(KERN_INFO "%s returned more blocks than " 654 "requested\n", __func__); 655 WARN_ON(1); 656 break; 657 } 658 } 659 660 target = blks - count ; 661 blk_allocated = count; 662 if (!target) 663 goto allocated; 664 /* Now allocate data blocks */ 665 memset(&ar, 0, sizeof(ar)); 666 ar.inode = inode; 667 ar.goal = goal; 668 ar.len = target; 669 ar.logical = iblock; 670 if (S_ISREG(inode->i_mode)) 671 /* enable in-core preallocation only for regular files */ 672 ar.flags = EXT4_MB_HINT_DATA; 673 674 current_block = ext4_mb_new_blocks(handle, &ar, err); 675 676 if (*err && (target == blks)) { 677 /* 678 * if the allocation failed and we didn't allocate 679 * any blocks before 680 */ 681 goto failed_out; 682 } 683 if (!*err) { 684 if (target == blks) { 685 /* 686 * save the new block number 687 * for the first direct block 688 */ 689 new_blocks[index] = current_block; 690 } 691 blk_allocated += ar.len; 692 } 693 allocated: 694 /* total number of blocks allocated for direct blocks */ 695 ret = blk_allocated; 696 *err = 0; 697 return ret; 698 failed_out: 699 for (i = 0; i < index; i++) 700 ext4_free_blocks(handle, inode, new_blocks[i], 1, 0); 701 return ret; 702 } 703 704 /** 705 * ext4_alloc_branch - allocate and set up a chain of blocks. 706 * @inode: owner 707 * @indirect_blks: number of allocated indirect blocks 708 * @blks: number of allocated direct blocks 709 * @offsets: offsets (in the blocks) to store the pointers to next. 710 * @branch: place to store the chain in. 711 * 712 * This function allocates blocks, zeroes out all but the last one, 713 * links them into chain and (if we are synchronous) writes them to disk. 714 * In other words, it prepares a branch that can be spliced onto the 715 * inode. It stores the information about that chain in the branch[], in 716 * the same format as ext4_get_branch() would do. We are calling it after 717 * we had read the existing part of chain and partial points to the last 718 * triple of that (one with zero ->key). Upon the exit we have the same 719 * picture as after the successful ext4_get_block(), except that in one 720 * place chain is disconnected - *branch->p is still zero (we did not 721 * set the last link), but branch->key contains the number that should 722 * be placed into *branch->p to fill that gap. 723 * 724 * If allocation fails we free all blocks we've allocated (and forget 725 * their buffer_heads) and return the error value the from failed 726 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain 727 * as described above and return 0. 728 */ 729 static int ext4_alloc_branch(handle_t *handle, struct inode *inode, 730 ext4_lblk_t iblock, int indirect_blks, 731 int *blks, ext4_fsblk_t goal, 732 ext4_lblk_t *offsets, Indirect *branch) 733 { 734 int blocksize = inode->i_sb->s_blocksize; 735 int i, n = 0; 736 int err = 0; 737 struct buffer_head *bh; 738 int num; 739 ext4_fsblk_t new_blocks[4]; 740 ext4_fsblk_t current_block; 741 742 num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks, 743 *blks, new_blocks, &err); 744 if (err) 745 return err; 746 747 branch[0].key = cpu_to_le32(new_blocks[0]); 748 /* 749 * metadata blocks and data blocks are allocated. 750 */ 751 for (n = 1; n <= indirect_blks; n++) { 752 /* 753 * Get buffer_head for parent block, zero it out 754 * and set the pointer to new one, then send 755 * parent to disk. 756 */ 757 bh = sb_getblk(inode->i_sb, new_blocks[n-1]); 758 branch[n].bh = bh; 759 lock_buffer(bh); 760 BUFFER_TRACE(bh, "call get_create_access"); 761 err = ext4_journal_get_create_access(handle, bh); 762 if (err) { 763 unlock_buffer(bh); 764 brelse(bh); 765 goto failed; 766 } 767 768 memset(bh->b_data, 0, blocksize); 769 branch[n].p = (__le32 *) bh->b_data + offsets[n]; 770 branch[n].key = cpu_to_le32(new_blocks[n]); 771 *branch[n].p = branch[n].key; 772 if (n == indirect_blks) { 773 current_block = new_blocks[n]; 774 /* 775 * End of chain, update the last new metablock of 776 * the chain to point to the new allocated 777 * data blocks numbers 778 */ 779 for (i=1; i < num; i++) 780 *(branch[n].p + i) = cpu_to_le32(++current_block); 781 } 782 BUFFER_TRACE(bh, "marking uptodate"); 783 set_buffer_uptodate(bh); 784 unlock_buffer(bh); 785 786 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 787 err = ext4_handle_dirty_metadata(handle, inode, bh); 788 if (err) 789 goto failed; 790 } 791 *blks = num; 792 return err; 793 failed: 794 /* Allocation failed, free what we already allocated */ 795 for (i = 1; i <= n ; i++) { 796 BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget"); 797 ext4_journal_forget(handle, branch[i].bh); 798 } 799 for (i = 0; i < indirect_blks; i++) 800 ext4_free_blocks(handle, inode, new_blocks[i], 1, 0); 801 802 ext4_free_blocks(handle, inode, new_blocks[i], num, 0); 803 804 return err; 805 } 806 807 /** 808 * ext4_splice_branch - splice the allocated branch onto inode. 809 * @inode: owner 810 * @block: (logical) number of block we are adding 811 * @chain: chain of indirect blocks (with a missing link - see 812 * ext4_alloc_branch) 813 * @where: location of missing link 814 * @num: number of indirect blocks we are adding 815 * @blks: number of direct blocks we are adding 816 * 817 * This function fills the missing link and does all housekeeping needed in 818 * inode (->i_blocks, etc.). In case of success we end up with the full 819 * chain to new block and return 0. 820 */ 821 static int ext4_splice_branch(handle_t *handle, struct inode *inode, 822 ext4_lblk_t block, Indirect *where, int num, int blks) 823 { 824 int i; 825 int err = 0; 826 ext4_fsblk_t current_block; 827 828 /* 829 * If we're splicing into a [td]indirect block (as opposed to the 830 * inode) then we need to get write access to the [td]indirect block 831 * before the splice. 832 */ 833 if (where->bh) { 834 BUFFER_TRACE(where->bh, "get_write_access"); 835 err = ext4_journal_get_write_access(handle, where->bh); 836 if (err) 837 goto err_out; 838 } 839 /* That's it */ 840 841 *where->p = where->key; 842 843 /* 844 * Update the host buffer_head or inode to point to more just allocated 845 * direct blocks blocks 846 */ 847 if (num == 0 && blks > 1) { 848 current_block = le32_to_cpu(where->key) + 1; 849 for (i = 1; i < blks; i++) 850 *(where->p + i) = cpu_to_le32(current_block++); 851 } 852 853 /* We are done with atomic stuff, now do the rest of housekeeping */ 854 855 inode->i_ctime = ext4_current_time(inode); 856 ext4_mark_inode_dirty(handle, inode); 857 858 /* had we spliced it onto indirect block? */ 859 if (where->bh) { 860 /* 861 * If we spliced it onto an indirect block, we haven't 862 * altered the inode. Note however that if it is being spliced 863 * onto an indirect block at the very end of the file (the 864 * file is growing) then we *will* alter the inode to reflect 865 * the new i_size. But that is not done here - it is done in 866 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. 867 */ 868 jbd_debug(5, "splicing indirect only\n"); 869 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); 870 err = ext4_handle_dirty_metadata(handle, inode, where->bh); 871 if (err) 872 goto err_out; 873 } else { 874 /* 875 * OK, we spliced it into the inode itself on a direct block. 876 * Inode was dirtied above. 877 */ 878 jbd_debug(5, "splicing direct\n"); 879 } 880 return err; 881 882 err_out: 883 for (i = 1; i <= num; i++) { 884 BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget"); 885 ext4_journal_forget(handle, where[i].bh); 886 ext4_free_blocks(handle, inode, 887 le32_to_cpu(where[i-1].key), 1, 0); 888 } 889 ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0); 890 891 return err; 892 } 893 894 /* 895 * Allocation strategy is simple: if we have to allocate something, we will 896 * have to go the whole way to leaf. So let's do it before attaching anything 897 * to tree, set linkage between the newborn blocks, write them if sync is 898 * required, recheck the path, free and repeat if check fails, otherwise 899 * set the last missing link (that will protect us from any truncate-generated 900 * removals - all blocks on the path are immune now) and possibly force the 901 * write on the parent block. 902 * That has a nice additional property: no special recovery from the failed 903 * allocations is needed - we simply release blocks and do not touch anything 904 * reachable from inode. 905 * 906 * `handle' can be NULL if create == 0. 907 * 908 * return > 0, # of blocks mapped or allocated. 909 * return = 0, if plain lookup failed. 910 * return < 0, error case. 911 * 912 * 913 * Need to be called with 914 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 915 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 916 */ 917 static int ext4_get_blocks_handle(handle_t *handle, struct inode *inode, 918 ext4_lblk_t iblock, unsigned int maxblocks, 919 struct buffer_head *bh_result, 920 int create, int extend_disksize) 921 { 922 int err = -EIO; 923 ext4_lblk_t offsets[4]; 924 Indirect chain[4]; 925 Indirect *partial; 926 ext4_fsblk_t goal; 927 int indirect_blks; 928 int blocks_to_boundary = 0; 929 int depth; 930 struct ext4_inode_info *ei = EXT4_I(inode); 931 int count = 0; 932 ext4_fsblk_t first_block = 0; 933 loff_t disksize; 934 935 936 J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)); 937 J_ASSERT(handle != NULL || create == 0); 938 depth = ext4_block_to_path(inode, iblock, offsets, 939 &blocks_to_boundary); 940 941 if (depth == 0) 942 goto out; 943 944 partial = ext4_get_branch(inode, depth, offsets, chain, &err); 945 946 /* Simplest case - block found, no allocation needed */ 947 if (!partial) { 948 first_block = le32_to_cpu(chain[depth - 1].key); 949 clear_buffer_new(bh_result); 950 count++; 951 /*map more blocks*/ 952 while (count < maxblocks && count <= blocks_to_boundary) { 953 ext4_fsblk_t blk; 954 955 blk = le32_to_cpu(*(chain[depth-1].p + count)); 956 957 if (blk == first_block + count) 958 count++; 959 else 960 break; 961 } 962 goto got_it; 963 } 964 965 /* Next simple case - plain lookup or failed read of indirect block */ 966 if (!create || err == -EIO) 967 goto cleanup; 968 969 /* 970 * Okay, we need to do block allocation. 971 */ 972 goal = ext4_find_goal(inode, iblock, partial); 973 974 /* the number of blocks need to allocate for [d,t]indirect blocks */ 975 indirect_blks = (chain + depth) - partial - 1; 976 977 /* 978 * Next look up the indirect map to count the totoal number of 979 * direct blocks to allocate for this branch. 980 */ 981 count = ext4_blks_to_allocate(partial, indirect_blks, 982 maxblocks, blocks_to_boundary); 983 /* 984 * Block out ext4_truncate while we alter the tree 985 */ 986 err = ext4_alloc_branch(handle, inode, iblock, indirect_blks, 987 &count, goal, 988 offsets + (partial - chain), partial); 989 990 /* 991 * The ext4_splice_branch call will free and forget any buffers 992 * on the new chain if there is a failure, but that risks using 993 * up transaction credits, especially for bitmaps where the 994 * credits cannot be returned. Can we handle this somehow? We 995 * may need to return -EAGAIN upwards in the worst case. --sct 996 */ 997 if (!err) 998 err = ext4_splice_branch(handle, inode, iblock, 999 partial, indirect_blks, count); 1000 /* 1001 * i_disksize growing is protected by i_data_sem. Don't forget to 1002 * protect it if you're about to implement concurrent 1003 * ext4_get_block() -bzzz 1004 */ 1005 if (!err && extend_disksize) { 1006 disksize = ((loff_t) iblock + count) << inode->i_blkbits; 1007 if (disksize > i_size_read(inode)) 1008 disksize = i_size_read(inode); 1009 if (disksize > ei->i_disksize) 1010 ei->i_disksize = disksize; 1011 } 1012 if (err) 1013 goto cleanup; 1014 1015 set_buffer_new(bh_result); 1016 got_it: 1017 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); 1018 if (count > blocks_to_boundary) 1019 set_buffer_boundary(bh_result); 1020 err = count; 1021 /* Clean up and exit */ 1022 partial = chain + depth - 1; /* the whole chain */ 1023 cleanup: 1024 while (partial > chain) { 1025 BUFFER_TRACE(partial->bh, "call brelse"); 1026 brelse(partial->bh); 1027 partial--; 1028 } 1029 BUFFER_TRACE(bh_result, "returned"); 1030 out: 1031 return err; 1032 } 1033 1034 qsize_t ext4_get_reserved_space(struct inode *inode) 1035 { 1036 unsigned long long total; 1037 1038 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1039 total = EXT4_I(inode)->i_reserved_data_blocks + 1040 EXT4_I(inode)->i_reserved_meta_blocks; 1041 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1042 1043 return total; 1044 } 1045 /* 1046 * Calculate the number of metadata blocks need to reserve 1047 * to allocate @blocks for non extent file based file 1048 */ 1049 static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks) 1050 { 1051 int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb); 1052 int ind_blks, dind_blks, tind_blks; 1053 1054 /* number of new indirect blocks needed */ 1055 ind_blks = (blocks + icap - 1) / icap; 1056 1057 dind_blks = (ind_blks + icap - 1) / icap; 1058 1059 tind_blks = 1; 1060 1061 return ind_blks + dind_blks + tind_blks; 1062 } 1063 1064 /* 1065 * Calculate the number of metadata blocks need to reserve 1066 * to allocate given number of blocks 1067 */ 1068 static int ext4_calc_metadata_amount(struct inode *inode, int blocks) 1069 { 1070 if (!blocks) 1071 return 0; 1072 1073 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) 1074 return ext4_ext_calc_metadata_amount(inode, blocks); 1075 1076 return ext4_indirect_calc_metadata_amount(inode, blocks); 1077 } 1078 1079 static void ext4_da_update_reserve_space(struct inode *inode, int used) 1080 { 1081 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1082 int total, mdb, mdb_free; 1083 1084 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1085 /* recalculate the number of metablocks still need to be reserved */ 1086 total = EXT4_I(inode)->i_reserved_data_blocks - used; 1087 mdb = ext4_calc_metadata_amount(inode, total); 1088 1089 /* figure out how many metablocks to release */ 1090 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 1091 mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb; 1092 1093 if (mdb_free) { 1094 /* Account for allocated meta_blocks */ 1095 mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks; 1096 1097 /* update fs dirty blocks counter */ 1098 percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free); 1099 EXT4_I(inode)->i_allocated_meta_blocks = 0; 1100 EXT4_I(inode)->i_reserved_meta_blocks = mdb; 1101 } 1102 1103 /* update per-inode reservations */ 1104 BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks); 1105 EXT4_I(inode)->i_reserved_data_blocks -= used; 1106 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1107 1108 /* 1109 * free those over-booking quota for metadata blocks 1110 */ 1111 if (mdb_free) 1112 vfs_dq_release_reservation_block(inode, mdb_free); 1113 1114 /* 1115 * If we have done all the pending block allocations and if 1116 * there aren't any writers on the inode, we can discard the 1117 * inode's preallocations. 1118 */ 1119 if (!total && (atomic_read(&inode->i_writecount) == 0)) 1120 ext4_discard_preallocations(inode); 1121 } 1122 1123 /* 1124 * The ext4_get_blocks_wrap() function try to look up the requested blocks, 1125 * and returns if the blocks are already mapped. 1126 * 1127 * Otherwise it takes the write lock of the i_data_sem and allocate blocks 1128 * and store the allocated blocks in the result buffer head and mark it 1129 * mapped. 1130 * 1131 * If file type is extents based, it will call ext4_ext_get_blocks(), 1132 * Otherwise, call with ext4_get_blocks_handle() to handle indirect mapping 1133 * based files 1134 * 1135 * On success, it returns the number of blocks being mapped or allocate. 1136 * if create==0 and the blocks are pre-allocated and uninitialized block, 1137 * the result buffer head is unmapped. If the create ==1, it will make sure 1138 * the buffer head is mapped. 1139 * 1140 * It returns 0 if plain look up failed (blocks have not been allocated), in 1141 * that casem, buffer head is unmapped 1142 * 1143 * It returns the error in case of allocation failure. 1144 */ 1145 int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block, 1146 unsigned int max_blocks, struct buffer_head *bh, 1147 int create, int extend_disksize, int flag) 1148 { 1149 int retval; 1150 1151 clear_buffer_mapped(bh); 1152 1153 /* 1154 * Try to see if we can get the block without requesting 1155 * for new file system block. 1156 */ 1157 down_read((&EXT4_I(inode)->i_data_sem)); 1158 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 1159 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, 1160 bh, 0, 0); 1161 } else { 1162 retval = ext4_get_blocks_handle(handle, 1163 inode, block, max_blocks, bh, 0, 0); 1164 } 1165 up_read((&EXT4_I(inode)->i_data_sem)); 1166 1167 /* If it is only a block(s) look up */ 1168 if (!create) 1169 return retval; 1170 1171 /* 1172 * Returns if the blocks have already allocated 1173 * 1174 * Note that if blocks have been preallocated 1175 * ext4_ext_get_block() returns th create = 0 1176 * with buffer head unmapped. 1177 */ 1178 if (retval > 0 && buffer_mapped(bh)) 1179 return retval; 1180 1181 /* 1182 * New blocks allocate and/or writing to uninitialized extent 1183 * will possibly result in updating i_data, so we take 1184 * the write lock of i_data_sem, and call get_blocks() 1185 * with create == 1 flag. 1186 */ 1187 down_write((&EXT4_I(inode)->i_data_sem)); 1188 1189 /* 1190 * if the caller is from delayed allocation writeout path 1191 * we have already reserved fs blocks for allocation 1192 * let the underlying get_block() function know to 1193 * avoid double accounting 1194 */ 1195 if (flag) 1196 EXT4_I(inode)->i_delalloc_reserved_flag = 1; 1197 /* 1198 * We need to check for EXT4 here because migrate 1199 * could have changed the inode type in between 1200 */ 1201 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 1202 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, 1203 bh, create, extend_disksize); 1204 } else { 1205 retval = ext4_get_blocks_handle(handle, inode, block, 1206 max_blocks, bh, create, extend_disksize); 1207 1208 if (retval > 0 && buffer_new(bh)) { 1209 /* 1210 * We allocated new blocks which will result in 1211 * i_data's format changing. Force the migrate 1212 * to fail by clearing migrate flags 1213 */ 1214 EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags & 1215 ~EXT4_EXT_MIGRATE; 1216 } 1217 } 1218 1219 if (flag) { 1220 EXT4_I(inode)->i_delalloc_reserved_flag = 0; 1221 /* 1222 * Update reserved blocks/metadata blocks 1223 * after successful block allocation 1224 * which were deferred till now 1225 */ 1226 if ((retval > 0) && buffer_delay(bh)) 1227 ext4_da_update_reserve_space(inode, retval); 1228 } 1229 1230 up_write((&EXT4_I(inode)->i_data_sem)); 1231 return retval; 1232 } 1233 1234 /* Maximum number of blocks we map for direct IO at once. */ 1235 #define DIO_MAX_BLOCKS 4096 1236 1237 int ext4_get_block(struct inode *inode, sector_t iblock, 1238 struct buffer_head *bh_result, int create) 1239 { 1240 handle_t *handle = ext4_journal_current_handle(); 1241 int ret = 0, started = 0; 1242 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 1243 int dio_credits; 1244 1245 if (create && !handle) { 1246 /* Direct IO write... */ 1247 if (max_blocks > DIO_MAX_BLOCKS) 1248 max_blocks = DIO_MAX_BLOCKS; 1249 dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); 1250 handle = ext4_journal_start(inode, dio_credits); 1251 if (IS_ERR(handle)) { 1252 ret = PTR_ERR(handle); 1253 goto out; 1254 } 1255 started = 1; 1256 } 1257 1258 ret = ext4_get_blocks_wrap(handle, inode, iblock, 1259 max_blocks, bh_result, create, 0, 0); 1260 if (ret > 0) { 1261 bh_result->b_size = (ret << inode->i_blkbits); 1262 ret = 0; 1263 } 1264 if (started) 1265 ext4_journal_stop(handle); 1266 out: 1267 return ret; 1268 } 1269 1270 /* 1271 * `handle' can be NULL if create is zero 1272 */ 1273 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 1274 ext4_lblk_t block, int create, int *errp) 1275 { 1276 struct buffer_head dummy; 1277 int fatal = 0, err; 1278 1279 J_ASSERT(handle != NULL || create == 0); 1280 1281 dummy.b_state = 0; 1282 dummy.b_blocknr = -1000; 1283 buffer_trace_init(&dummy.b_history); 1284 err = ext4_get_blocks_wrap(handle, inode, block, 1, 1285 &dummy, create, 1, 0); 1286 /* 1287 * ext4_get_blocks_handle() returns number of blocks 1288 * mapped. 0 in case of a HOLE. 1289 */ 1290 if (err > 0) { 1291 if (err > 1) 1292 WARN_ON(1); 1293 err = 0; 1294 } 1295 *errp = err; 1296 if (!err && buffer_mapped(&dummy)) { 1297 struct buffer_head *bh; 1298 bh = sb_getblk(inode->i_sb, dummy.b_blocknr); 1299 if (!bh) { 1300 *errp = -EIO; 1301 goto err; 1302 } 1303 if (buffer_new(&dummy)) { 1304 J_ASSERT(create != 0); 1305 J_ASSERT(handle != NULL); 1306 1307 /* 1308 * Now that we do not always journal data, we should 1309 * keep in mind whether this should always journal the 1310 * new buffer as metadata. For now, regular file 1311 * writes use ext4_get_block instead, so it's not a 1312 * problem. 1313 */ 1314 lock_buffer(bh); 1315 BUFFER_TRACE(bh, "call get_create_access"); 1316 fatal = ext4_journal_get_create_access(handle, bh); 1317 if (!fatal && !buffer_uptodate(bh)) { 1318 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 1319 set_buffer_uptodate(bh); 1320 } 1321 unlock_buffer(bh); 1322 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 1323 err = ext4_handle_dirty_metadata(handle, inode, bh); 1324 if (!fatal) 1325 fatal = err; 1326 } else { 1327 BUFFER_TRACE(bh, "not a new buffer"); 1328 } 1329 if (fatal) { 1330 *errp = fatal; 1331 brelse(bh); 1332 bh = NULL; 1333 } 1334 return bh; 1335 } 1336 err: 1337 return NULL; 1338 } 1339 1340 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 1341 ext4_lblk_t block, int create, int *err) 1342 { 1343 struct buffer_head *bh; 1344 1345 bh = ext4_getblk(handle, inode, block, create, err); 1346 if (!bh) 1347 return bh; 1348 if (buffer_uptodate(bh)) 1349 return bh; 1350 ll_rw_block(READ_META, 1, &bh); 1351 wait_on_buffer(bh); 1352 if (buffer_uptodate(bh)) 1353 return bh; 1354 put_bh(bh); 1355 *err = -EIO; 1356 return NULL; 1357 } 1358 1359 static int walk_page_buffers(handle_t *handle, 1360 struct buffer_head *head, 1361 unsigned from, 1362 unsigned to, 1363 int *partial, 1364 int (*fn)(handle_t *handle, 1365 struct buffer_head *bh)) 1366 { 1367 struct buffer_head *bh; 1368 unsigned block_start, block_end; 1369 unsigned blocksize = head->b_size; 1370 int err, ret = 0; 1371 struct buffer_head *next; 1372 1373 for (bh = head, block_start = 0; 1374 ret == 0 && (bh != head || !block_start); 1375 block_start = block_end, bh = next) 1376 { 1377 next = bh->b_this_page; 1378 block_end = block_start + blocksize; 1379 if (block_end <= from || block_start >= to) { 1380 if (partial && !buffer_uptodate(bh)) 1381 *partial = 1; 1382 continue; 1383 } 1384 err = (*fn)(handle, bh); 1385 if (!ret) 1386 ret = err; 1387 } 1388 return ret; 1389 } 1390 1391 /* 1392 * To preserve ordering, it is essential that the hole instantiation and 1393 * the data write be encapsulated in a single transaction. We cannot 1394 * close off a transaction and start a new one between the ext4_get_block() 1395 * and the commit_write(). So doing the jbd2_journal_start at the start of 1396 * prepare_write() is the right place. 1397 * 1398 * Also, this function can nest inside ext4_writepage() -> 1399 * block_write_full_page(). In that case, we *know* that ext4_writepage() 1400 * has generated enough buffer credits to do the whole page. So we won't 1401 * block on the journal in that case, which is good, because the caller may 1402 * be PF_MEMALLOC. 1403 * 1404 * By accident, ext4 can be reentered when a transaction is open via 1405 * quota file writes. If we were to commit the transaction while thus 1406 * reentered, there can be a deadlock - we would be holding a quota 1407 * lock, and the commit would never complete if another thread had a 1408 * transaction open and was blocking on the quota lock - a ranking 1409 * violation. 1410 * 1411 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 1412 * will _not_ run commit under these circumstances because handle->h_ref 1413 * is elevated. We'll still have enough credits for the tiny quotafile 1414 * write. 1415 */ 1416 static int do_journal_get_write_access(handle_t *handle, 1417 struct buffer_head *bh) 1418 { 1419 if (!buffer_mapped(bh) || buffer_freed(bh)) 1420 return 0; 1421 return ext4_journal_get_write_access(handle, bh); 1422 } 1423 1424 static int ext4_write_begin(struct file *file, struct address_space *mapping, 1425 loff_t pos, unsigned len, unsigned flags, 1426 struct page **pagep, void **fsdata) 1427 { 1428 struct inode *inode = mapping->host; 1429 int ret, needed_blocks = ext4_writepage_trans_blocks(inode); 1430 handle_t *handle; 1431 int retries = 0; 1432 struct page *page; 1433 pgoff_t index; 1434 unsigned from, to; 1435 1436 trace_mark(ext4_write_begin, 1437 "dev %s ino %lu pos %llu len %u flags %u", 1438 inode->i_sb->s_id, inode->i_ino, 1439 (unsigned long long) pos, len, flags); 1440 index = pos >> PAGE_CACHE_SHIFT; 1441 from = pos & (PAGE_CACHE_SIZE - 1); 1442 to = from + len; 1443 1444 retry: 1445 handle = ext4_journal_start(inode, needed_blocks); 1446 if (IS_ERR(handle)) { 1447 ret = PTR_ERR(handle); 1448 goto out; 1449 } 1450 1451 /* We cannot recurse into the filesystem as the transaction is already 1452 * started */ 1453 flags |= AOP_FLAG_NOFS; 1454 1455 page = grab_cache_page_write_begin(mapping, index, flags); 1456 if (!page) { 1457 ext4_journal_stop(handle); 1458 ret = -ENOMEM; 1459 goto out; 1460 } 1461 *pagep = page; 1462 1463 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 1464 ext4_get_block); 1465 1466 if (!ret && ext4_should_journal_data(inode)) { 1467 ret = walk_page_buffers(handle, page_buffers(page), 1468 from, to, NULL, do_journal_get_write_access); 1469 } 1470 1471 if (ret) { 1472 unlock_page(page); 1473 ext4_journal_stop(handle); 1474 page_cache_release(page); 1475 /* 1476 * block_write_begin may have instantiated a few blocks 1477 * outside i_size. Trim these off again. Don't need 1478 * i_size_read because we hold i_mutex. 1479 */ 1480 if (pos + len > inode->i_size) 1481 vmtruncate(inode, inode->i_size); 1482 } 1483 1484 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 1485 goto retry; 1486 out: 1487 return ret; 1488 } 1489 1490 /* For write_end() in data=journal mode */ 1491 static int write_end_fn(handle_t *handle, struct buffer_head *bh) 1492 { 1493 if (!buffer_mapped(bh) || buffer_freed(bh)) 1494 return 0; 1495 set_buffer_uptodate(bh); 1496 return ext4_handle_dirty_metadata(handle, NULL, bh); 1497 } 1498 1499 /* 1500 * We need to pick up the new inode size which generic_commit_write gave us 1501 * `file' can be NULL - eg, when called from page_symlink(). 1502 * 1503 * ext4 never places buffers on inode->i_mapping->private_list. metadata 1504 * buffers are managed internally. 1505 */ 1506 static int ext4_ordered_write_end(struct file *file, 1507 struct address_space *mapping, 1508 loff_t pos, unsigned len, unsigned copied, 1509 struct page *page, void *fsdata) 1510 { 1511 handle_t *handle = ext4_journal_current_handle(); 1512 struct inode *inode = mapping->host; 1513 int ret = 0, ret2; 1514 1515 trace_mark(ext4_ordered_write_end, 1516 "dev %s ino %lu pos %llu len %u copied %u", 1517 inode->i_sb->s_id, inode->i_ino, 1518 (unsigned long long) pos, len, copied); 1519 ret = ext4_jbd2_file_inode(handle, inode); 1520 1521 if (ret == 0) { 1522 loff_t new_i_size; 1523 1524 new_i_size = pos + copied; 1525 if (new_i_size > EXT4_I(inode)->i_disksize) { 1526 ext4_update_i_disksize(inode, new_i_size); 1527 /* We need to mark inode dirty even if 1528 * new_i_size is less that inode->i_size 1529 * bu greater than i_disksize.(hint delalloc) 1530 */ 1531 ext4_mark_inode_dirty(handle, inode); 1532 } 1533 1534 ret2 = generic_write_end(file, mapping, pos, len, copied, 1535 page, fsdata); 1536 copied = ret2; 1537 if (ret2 < 0) 1538 ret = ret2; 1539 } 1540 ret2 = ext4_journal_stop(handle); 1541 if (!ret) 1542 ret = ret2; 1543 1544 return ret ? ret : copied; 1545 } 1546 1547 static int ext4_writeback_write_end(struct file *file, 1548 struct address_space *mapping, 1549 loff_t pos, unsigned len, unsigned copied, 1550 struct page *page, void *fsdata) 1551 { 1552 handle_t *handle = ext4_journal_current_handle(); 1553 struct inode *inode = mapping->host; 1554 int ret = 0, ret2; 1555 loff_t new_i_size; 1556 1557 trace_mark(ext4_writeback_write_end, 1558 "dev %s ino %lu pos %llu len %u copied %u", 1559 inode->i_sb->s_id, inode->i_ino, 1560 (unsigned long long) pos, len, copied); 1561 new_i_size = pos + copied; 1562 if (new_i_size > EXT4_I(inode)->i_disksize) { 1563 ext4_update_i_disksize(inode, new_i_size); 1564 /* We need to mark inode dirty even if 1565 * new_i_size is less that inode->i_size 1566 * bu greater than i_disksize.(hint delalloc) 1567 */ 1568 ext4_mark_inode_dirty(handle, inode); 1569 } 1570 1571 ret2 = generic_write_end(file, mapping, pos, len, copied, 1572 page, fsdata); 1573 copied = ret2; 1574 if (ret2 < 0) 1575 ret = ret2; 1576 1577 ret2 = ext4_journal_stop(handle); 1578 if (!ret) 1579 ret = ret2; 1580 1581 return ret ? ret : copied; 1582 } 1583 1584 static int ext4_journalled_write_end(struct file *file, 1585 struct address_space *mapping, 1586 loff_t pos, unsigned len, unsigned copied, 1587 struct page *page, void *fsdata) 1588 { 1589 handle_t *handle = ext4_journal_current_handle(); 1590 struct inode *inode = mapping->host; 1591 int ret = 0, ret2; 1592 int partial = 0; 1593 unsigned from, to; 1594 loff_t new_i_size; 1595 1596 trace_mark(ext4_journalled_write_end, 1597 "dev %s ino %lu pos %llu len %u copied %u", 1598 inode->i_sb->s_id, inode->i_ino, 1599 (unsigned long long) pos, len, copied); 1600 from = pos & (PAGE_CACHE_SIZE - 1); 1601 to = from + len; 1602 1603 if (copied < len) { 1604 if (!PageUptodate(page)) 1605 copied = 0; 1606 page_zero_new_buffers(page, from+copied, to); 1607 } 1608 1609 ret = walk_page_buffers(handle, page_buffers(page), from, 1610 to, &partial, write_end_fn); 1611 if (!partial) 1612 SetPageUptodate(page); 1613 new_i_size = pos + copied; 1614 if (new_i_size > inode->i_size) 1615 i_size_write(inode, pos+copied); 1616 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; 1617 if (new_i_size > EXT4_I(inode)->i_disksize) { 1618 ext4_update_i_disksize(inode, new_i_size); 1619 ret2 = ext4_mark_inode_dirty(handle, inode); 1620 if (!ret) 1621 ret = ret2; 1622 } 1623 1624 unlock_page(page); 1625 ret2 = ext4_journal_stop(handle); 1626 if (!ret) 1627 ret = ret2; 1628 page_cache_release(page); 1629 1630 return ret ? ret : copied; 1631 } 1632 1633 static int ext4_da_reserve_space(struct inode *inode, int nrblocks) 1634 { 1635 int retries = 0; 1636 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1637 unsigned long md_needed, mdblocks, total = 0; 1638 1639 /* 1640 * recalculate the amount of metadata blocks to reserve 1641 * in order to allocate nrblocks 1642 * worse case is one extent per block 1643 */ 1644 repeat: 1645 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1646 total = EXT4_I(inode)->i_reserved_data_blocks + nrblocks; 1647 mdblocks = ext4_calc_metadata_amount(inode, total); 1648 BUG_ON(mdblocks < EXT4_I(inode)->i_reserved_meta_blocks); 1649 1650 md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks; 1651 total = md_needed + nrblocks; 1652 1653 /* 1654 * Make quota reservation here to prevent quota overflow 1655 * later. Real quota accounting is done at pages writeout 1656 * time. 1657 */ 1658 if (vfs_dq_reserve_block(inode, total)) { 1659 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1660 return -EDQUOT; 1661 } 1662 1663 if (ext4_claim_free_blocks(sbi, total)) { 1664 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1665 if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1666 yield(); 1667 goto repeat; 1668 } 1669 vfs_dq_release_reservation_block(inode, total); 1670 return -ENOSPC; 1671 } 1672 EXT4_I(inode)->i_reserved_data_blocks += nrblocks; 1673 EXT4_I(inode)->i_reserved_meta_blocks = mdblocks; 1674 1675 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1676 return 0; /* success */ 1677 } 1678 1679 static void ext4_da_release_space(struct inode *inode, int to_free) 1680 { 1681 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1682 int total, mdb, mdb_free, release; 1683 1684 if (!to_free) 1685 return; /* Nothing to release, exit */ 1686 1687 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1688 1689 if (!EXT4_I(inode)->i_reserved_data_blocks) { 1690 /* 1691 * if there is no reserved blocks, but we try to free some 1692 * then the counter is messed up somewhere. 1693 * but since this function is called from invalidate 1694 * page, it's harmless to return without any action 1695 */ 1696 printk(KERN_INFO "ext4 delalloc try to release %d reserved " 1697 "blocks for inode %lu, but there is no reserved " 1698 "data blocks\n", to_free, inode->i_ino); 1699 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1700 return; 1701 } 1702 1703 /* recalculate the number of metablocks still need to be reserved */ 1704 total = EXT4_I(inode)->i_reserved_data_blocks - to_free; 1705 mdb = ext4_calc_metadata_amount(inode, total); 1706 1707 /* figure out how many metablocks to release */ 1708 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 1709 mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb; 1710 1711 release = to_free + mdb_free; 1712 1713 /* update fs dirty blocks counter for truncate case */ 1714 percpu_counter_sub(&sbi->s_dirtyblocks_counter, release); 1715 1716 /* update per-inode reservations */ 1717 BUG_ON(to_free > EXT4_I(inode)->i_reserved_data_blocks); 1718 EXT4_I(inode)->i_reserved_data_blocks -= to_free; 1719 1720 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 1721 EXT4_I(inode)->i_reserved_meta_blocks = mdb; 1722 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1723 1724 vfs_dq_release_reservation_block(inode, release); 1725 } 1726 1727 static void ext4_da_page_release_reservation(struct page *page, 1728 unsigned long offset) 1729 { 1730 int to_release = 0; 1731 struct buffer_head *head, *bh; 1732 unsigned int curr_off = 0; 1733 1734 head = page_buffers(page); 1735 bh = head; 1736 do { 1737 unsigned int next_off = curr_off + bh->b_size; 1738 1739 if ((offset <= curr_off) && (buffer_delay(bh))) { 1740 to_release++; 1741 clear_buffer_delay(bh); 1742 } 1743 curr_off = next_off; 1744 } while ((bh = bh->b_this_page) != head); 1745 ext4_da_release_space(page->mapping->host, to_release); 1746 } 1747 1748 /* 1749 * Delayed allocation stuff 1750 */ 1751 1752 struct mpage_da_data { 1753 struct inode *inode; 1754 sector_t b_blocknr; /* start block number of extent */ 1755 size_t b_size; /* size of extent */ 1756 unsigned long b_state; /* state of the extent */ 1757 unsigned long first_page, next_page; /* extent of pages */ 1758 struct writeback_control *wbc; 1759 int io_done; 1760 int pages_written; 1761 int retval; 1762 }; 1763 1764 /* 1765 * mpage_da_submit_io - walks through extent of pages and try to write 1766 * them with writepage() call back 1767 * 1768 * @mpd->inode: inode 1769 * @mpd->first_page: first page of the extent 1770 * @mpd->next_page: page after the last page of the extent 1771 * 1772 * By the time mpage_da_submit_io() is called we expect all blocks 1773 * to be allocated. this may be wrong if allocation failed. 1774 * 1775 * As pages are already locked by write_cache_pages(), we can't use it 1776 */ 1777 static int mpage_da_submit_io(struct mpage_da_data *mpd) 1778 { 1779 long pages_skipped; 1780 struct pagevec pvec; 1781 unsigned long index, end; 1782 int ret = 0, err, nr_pages, i; 1783 struct inode *inode = mpd->inode; 1784 struct address_space *mapping = inode->i_mapping; 1785 1786 BUG_ON(mpd->next_page <= mpd->first_page); 1787 /* 1788 * We need to start from the first_page to the next_page - 1 1789 * to make sure we also write the mapped dirty buffer_heads. 1790 * If we look at mpd->b_blocknr we would only be looking 1791 * at the currently mapped buffer_heads. 1792 */ 1793 index = mpd->first_page; 1794 end = mpd->next_page - 1; 1795 1796 pagevec_init(&pvec, 0); 1797 while (index <= end) { 1798 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1799 if (nr_pages == 0) 1800 break; 1801 for (i = 0; i < nr_pages; i++) { 1802 struct page *page = pvec.pages[i]; 1803 1804 index = page->index; 1805 if (index > end) 1806 break; 1807 index++; 1808 1809 BUG_ON(!PageLocked(page)); 1810 BUG_ON(PageWriteback(page)); 1811 1812 pages_skipped = mpd->wbc->pages_skipped; 1813 err = mapping->a_ops->writepage(page, mpd->wbc); 1814 if (!err && (pages_skipped == mpd->wbc->pages_skipped)) 1815 /* 1816 * have successfully written the page 1817 * without skipping the same 1818 */ 1819 mpd->pages_written++; 1820 /* 1821 * In error case, we have to continue because 1822 * remaining pages are still locked 1823 * XXX: unlock and re-dirty them? 1824 */ 1825 if (ret == 0) 1826 ret = err; 1827 } 1828 pagevec_release(&pvec); 1829 } 1830 return ret; 1831 } 1832 1833 /* 1834 * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers 1835 * 1836 * @mpd->inode - inode to walk through 1837 * @exbh->b_blocknr - first block on a disk 1838 * @exbh->b_size - amount of space in bytes 1839 * @logical - first logical block to start assignment with 1840 * 1841 * the function goes through all passed space and put actual disk 1842 * block numbers into buffer heads, dropping BH_Delay 1843 */ 1844 static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical, 1845 struct buffer_head *exbh) 1846 { 1847 struct inode *inode = mpd->inode; 1848 struct address_space *mapping = inode->i_mapping; 1849 int blocks = exbh->b_size >> inode->i_blkbits; 1850 sector_t pblock = exbh->b_blocknr, cur_logical; 1851 struct buffer_head *head, *bh; 1852 pgoff_t index, end; 1853 struct pagevec pvec; 1854 int nr_pages, i; 1855 1856 index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 1857 end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 1858 cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1859 1860 pagevec_init(&pvec, 0); 1861 1862 while (index <= end) { 1863 /* XXX: optimize tail */ 1864 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1865 if (nr_pages == 0) 1866 break; 1867 for (i = 0; i < nr_pages; i++) { 1868 struct page *page = pvec.pages[i]; 1869 1870 index = page->index; 1871 if (index > end) 1872 break; 1873 index++; 1874 1875 BUG_ON(!PageLocked(page)); 1876 BUG_ON(PageWriteback(page)); 1877 BUG_ON(!page_has_buffers(page)); 1878 1879 bh = page_buffers(page); 1880 head = bh; 1881 1882 /* skip blocks out of the range */ 1883 do { 1884 if (cur_logical >= logical) 1885 break; 1886 cur_logical++; 1887 } while ((bh = bh->b_this_page) != head); 1888 1889 do { 1890 if (cur_logical >= logical + blocks) 1891 break; 1892 if (buffer_delay(bh)) { 1893 bh->b_blocknr = pblock; 1894 clear_buffer_delay(bh); 1895 bh->b_bdev = inode->i_sb->s_bdev; 1896 } else if (buffer_unwritten(bh)) { 1897 bh->b_blocknr = pblock; 1898 clear_buffer_unwritten(bh); 1899 set_buffer_mapped(bh); 1900 set_buffer_new(bh); 1901 bh->b_bdev = inode->i_sb->s_bdev; 1902 } else if (buffer_mapped(bh)) 1903 BUG_ON(bh->b_blocknr != pblock); 1904 1905 cur_logical++; 1906 pblock++; 1907 } while ((bh = bh->b_this_page) != head); 1908 } 1909 pagevec_release(&pvec); 1910 } 1911 } 1912 1913 1914 /* 1915 * __unmap_underlying_blocks - just a helper function to unmap 1916 * set of blocks described by @bh 1917 */ 1918 static inline void __unmap_underlying_blocks(struct inode *inode, 1919 struct buffer_head *bh) 1920 { 1921 struct block_device *bdev = inode->i_sb->s_bdev; 1922 int blocks, i; 1923 1924 blocks = bh->b_size >> inode->i_blkbits; 1925 for (i = 0; i < blocks; i++) 1926 unmap_underlying_metadata(bdev, bh->b_blocknr + i); 1927 } 1928 1929 static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd, 1930 sector_t logical, long blk_cnt) 1931 { 1932 int nr_pages, i; 1933 pgoff_t index, end; 1934 struct pagevec pvec; 1935 struct inode *inode = mpd->inode; 1936 struct address_space *mapping = inode->i_mapping; 1937 1938 index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 1939 end = (logical + blk_cnt - 1) >> 1940 (PAGE_CACHE_SHIFT - inode->i_blkbits); 1941 while (index <= end) { 1942 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1943 if (nr_pages == 0) 1944 break; 1945 for (i = 0; i < nr_pages; i++) { 1946 struct page *page = pvec.pages[i]; 1947 index = page->index; 1948 if (index > end) 1949 break; 1950 index++; 1951 1952 BUG_ON(!PageLocked(page)); 1953 BUG_ON(PageWriteback(page)); 1954 block_invalidatepage(page, 0); 1955 ClearPageUptodate(page); 1956 unlock_page(page); 1957 } 1958 } 1959 return; 1960 } 1961 1962 static void ext4_print_free_blocks(struct inode *inode) 1963 { 1964 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1965 printk(KERN_EMERG "Total free blocks count %lld\n", 1966 ext4_count_free_blocks(inode->i_sb)); 1967 printk(KERN_EMERG "Free/Dirty block details\n"); 1968 printk(KERN_EMERG "free_blocks=%lld\n", 1969 (long long)percpu_counter_sum(&sbi->s_freeblocks_counter)); 1970 printk(KERN_EMERG "dirty_blocks=%lld\n", 1971 (long long)percpu_counter_sum(&sbi->s_dirtyblocks_counter)); 1972 printk(KERN_EMERG "Block reservation details\n"); 1973 printk(KERN_EMERG "i_reserved_data_blocks=%u\n", 1974 EXT4_I(inode)->i_reserved_data_blocks); 1975 printk(KERN_EMERG "i_reserved_meta_blocks=%u\n", 1976 EXT4_I(inode)->i_reserved_meta_blocks); 1977 return; 1978 } 1979 1980 #define EXT4_DELALLOC_RSVED 1 1981 static int ext4_da_get_block_write(struct inode *inode, sector_t iblock, 1982 struct buffer_head *bh_result, int create) 1983 { 1984 int ret; 1985 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 1986 loff_t disksize = EXT4_I(inode)->i_disksize; 1987 handle_t *handle = NULL; 1988 1989 handle = ext4_journal_current_handle(); 1990 BUG_ON(!handle); 1991 ret = ext4_get_blocks_wrap(handle, inode, iblock, max_blocks, 1992 bh_result, create, 0, EXT4_DELALLOC_RSVED); 1993 if (ret <= 0) 1994 return ret; 1995 1996 bh_result->b_size = (ret << inode->i_blkbits); 1997 1998 if (ext4_should_order_data(inode)) { 1999 int retval; 2000 retval = ext4_jbd2_file_inode(handle, inode); 2001 if (retval) 2002 /* 2003 * Failed to add inode for ordered mode. Don't 2004 * update file size 2005 */ 2006 return retval; 2007 } 2008 2009 /* 2010 * Update on-disk size along with block allocation we don't 2011 * use 'extend_disksize' as size may change within already 2012 * allocated block -bzzz 2013 */ 2014 disksize = ((loff_t) iblock + ret) << inode->i_blkbits; 2015 if (disksize > i_size_read(inode)) 2016 disksize = i_size_read(inode); 2017 if (disksize > EXT4_I(inode)->i_disksize) { 2018 ext4_update_i_disksize(inode, disksize); 2019 ret = ext4_mark_inode_dirty(handle, inode); 2020 return ret; 2021 } 2022 return 0; 2023 } 2024 2025 /* 2026 * mpage_da_map_blocks - go through given space 2027 * 2028 * @mpd - bh describing space 2029 * 2030 * The function skips space we know is already mapped to disk blocks. 2031 * 2032 */ 2033 static int mpage_da_map_blocks(struct mpage_da_data *mpd) 2034 { 2035 int err = 0; 2036 struct buffer_head new; 2037 sector_t next; 2038 2039 /* 2040 * We consider only non-mapped and non-allocated blocks 2041 */ 2042 if ((mpd->b_state & (1 << BH_Mapped)) && 2043 !(mpd->b_state & (1 << BH_Delay))) 2044 return 0; 2045 new.b_state = mpd->b_state; 2046 new.b_blocknr = 0; 2047 new.b_size = mpd->b_size; 2048 next = mpd->b_blocknr; 2049 /* 2050 * If we didn't accumulate anything 2051 * to write simply return 2052 */ 2053 if (!new.b_size) 2054 return 0; 2055 2056 err = ext4_da_get_block_write(mpd->inode, next, &new, 1); 2057 if (err) { 2058 /* 2059 * If get block returns with error we simply 2060 * return. Later writepage will redirty the page and 2061 * writepages will find the dirty page again 2062 */ 2063 if (err == -EAGAIN) 2064 return 0; 2065 2066 if (err == -ENOSPC && 2067 ext4_count_free_blocks(mpd->inode->i_sb)) { 2068 mpd->retval = err; 2069 return 0; 2070 } 2071 2072 /* 2073 * get block failure will cause us to loop in 2074 * writepages, because a_ops->writepage won't be able 2075 * to make progress. The page will be redirtied by 2076 * writepage and writepages will again try to write 2077 * the same. 2078 */ 2079 printk(KERN_EMERG "%s block allocation failed for inode %lu " 2080 "at logical offset %llu with max blocks " 2081 "%zd with error %d\n", 2082 __func__, mpd->inode->i_ino, 2083 (unsigned long long)next, 2084 mpd->b_size >> mpd->inode->i_blkbits, err); 2085 printk(KERN_EMERG "This should not happen.!! " 2086 "Data will be lost\n"); 2087 if (err == -ENOSPC) { 2088 ext4_print_free_blocks(mpd->inode); 2089 } 2090 /* invlaidate all the pages */ 2091 ext4_da_block_invalidatepages(mpd, next, 2092 mpd->b_size >> mpd->inode->i_blkbits); 2093 return err; 2094 } 2095 BUG_ON(new.b_size == 0); 2096 2097 if (buffer_new(&new)) 2098 __unmap_underlying_blocks(mpd->inode, &new); 2099 2100 /* 2101 * If blocks are delayed marked, we need to 2102 * put actual blocknr and drop delayed bit 2103 */ 2104 if ((mpd->b_state & (1 << BH_Delay)) || 2105 (mpd->b_state & (1 << BH_Unwritten))) 2106 mpage_put_bnr_to_bhs(mpd, next, &new); 2107 2108 return 0; 2109 } 2110 2111 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ 2112 (1 << BH_Delay) | (1 << BH_Unwritten)) 2113 2114 /* 2115 * mpage_add_bh_to_extent - try to add one more block to extent of blocks 2116 * 2117 * @mpd->lbh - extent of blocks 2118 * @logical - logical number of the block in the file 2119 * @bh - bh of the block (used to access block's state) 2120 * 2121 * the function is used to collect contig. blocks in same state 2122 */ 2123 static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, 2124 sector_t logical, size_t b_size, 2125 unsigned long b_state) 2126 { 2127 sector_t next; 2128 int nrblocks = mpd->b_size >> mpd->inode->i_blkbits; 2129 2130 /* check if thereserved journal credits might overflow */ 2131 if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) { 2132 if (nrblocks >= EXT4_MAX_TRANS_DATA) { 2133 /* 2134 * With non-extent format we are limited by the journal 2135 * credit available. Total credit needed to insert 2136 * nrblocks contiguous blocks is dependent on the 2137 * nrblocks. So limit nrblocks. 2138 */ 2139 goto flush_it; 2140 } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) > 2141 EXT4_MAX_TRANS_DATA) { 2142 /* 2143 * Adding the new buffer_head would make it cross the 2144 * allowed limit for which we have journal credit 2145 * reserved. So limit the new bh->b_size 2146 */ 2147 b_size = (EXT4_MAX_TRANS_DATA - nrblocks) << 2148 mpd->inode->i_blkbits; 2149 /* we will do mpage_da_submit_io in the next loop */ 2150 } 2151 } 2152 /* 2153 * First block in the extent 2154 */ 2155 if (mpd->b_size == 0) { 2156 mpd->b_blocknr = logical; 2157 mpd->b_size = b_size; 2158 mpd->b_state = b_state & BH_FLAGS; 2159 return; 2160 } 2161 2162 next = mpd->b_blocknr + nrblocks; 2163 /* 2164 * Can we merge the block to our big extent? 2165 */ 2166 if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) { 2167 mpd->b_size += b_size; 2168 return; 2169 } 2170 2171 flush_it: 2172 /* 2173 * We couldn't merge the block to our extent, so we 2174 * need to flush current extent and start new one 2175 */ 2176 if (mpage_da_map_blocks(mpd) == 0) 2177 mpage_da_submit_io(mpd); 2178 mpd->io_done = 1; 2179 return; 2180 } 2181 2182 /* 2183 * __mpage_da_writepage - finds extent of pages and blocks 2184 * 2185 * @page: page to consider 2186 * @wbc: not used, we just follow rules 2187 * @data: context 2188 * 2189 * The function finds extents of pages and scan them for all blocks. 2190 */ 2191 static int __mpage_da_writepage(struct page *page, 2192 struct writeback_control *wbc, void *data) 2193 { 2194 struct mpage_da_data *mpd = data; 2195 struct inode *inode = mpd->inode; 2196 struct buffer_head *bh, *head; 2197 sector_t logical; 2198 2199 if (mpd->io_done) { 2200 /* 2201 * Rest of the page in the page_vec 2202 * redirty then and skip then. We will 2203 * try to to write them again after 2204 * starting a new transaction 2205 */ 2206 redirty_page_for_writepage(wbc, page); 2207 unlock_page(page); 2208 return MPAGE_DA_EXTENT_TAIL; 2209 } 2210 /* 2211 * Can we merge this page to current extent? 2212 */ 2213 if (mpd->next_page != page->index) { 2214 /* 2215 * Nope, we can't. So, we map non-allocated blocks 2216 * and start IO on them using writepage() 2217 */ 2218 if (mpd->next_page != mpd->first_page) { 2219 if (mpage_da_map_blocks(mpd) == 0) 2220 mpage_da_submit_io(mpd); 2221 /* 2222 * skip rest of the page in the page_vec 2223 */ 2224 mpd->io_done = 1; 2225 redirty_page_for_writepage(wbc, page); 2226 unlock_page(page); 2227 return MPAGE_DA_EXTENT_TAIL; 2228 } 2229 2230 /* 2231 * Start next extent of pages ... 2232 */ 2233 mpd->first_page = page->index; 2234 2235 /* 2236 * ... and blocks 2237 */ 2238 mpd->b_size = 0; 2239 mpd->b_state = 0; 2240 mpd->b_blocknr = 0; 2241 } 2242 2243 mpd->next_page = page->index + 1; 2244 logical = (sector_t) page->index << 2245 (PAGE_CACHE_SHIFT - inode->i_blkbits); 2246 2247 if (!page_has_buffers(page)) { 2248 mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE, 2249 (1 << BH_Dirty) | (1 << BH_Uptodate)); 2250 if (mpd->io_done) 2251 return MPAGE_DA_EXTENT_TAIL; 2252 } else { 2253 /* 2254 * Page with regular buffer heads, just add all dirty ones 2255 */ 2256 head = page_buffers(page); 2257 bh = head; 2258 do { 2259 BUG_ON(buffer_locked(bh)); 2260 /* 2261 * We need to try to allocate 2262 * unmapped blocks in the same page. 2263 * Otherwise we won't make progress 2264 * with the page in ext4_da_writepage 2265 */ 2266 if (buffer_dirty(bh) && 2267 (!buffer_mapped(bh) || buffer_delay(bh))) { 2268 mpage_add_bh_to_extent(mpd, logical, 2269 bh->b_size, 2270 bh->b_state); 2271 if (mpd->io_done) 2272 return MPAGE_DA_EXTENT_TAIL; 2273 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) { 2274 /* 2275 * mapped dirty buffer. We need to update 2276 * the b_state because we look at 2277 * b_state in mpage_da_map_blocks. We don't 2278 * update b_size because if we find an 2279 * unmapped buffer_head later we need to 2280 * use the b_state flag of that buffer_head. 2281 */ 2282 if (mpd->b_size == 0) 2283 mpd->b_state = bh->b_state & BH_FLAGS; 2284 } 2285 logical++; 2286 } while ((bh = bh->b_this_page) != head); 2287 } 2288 2289 return 0; 2290 } 2291 2292 /* 2293 * this is a special callback for ->write_begin() only 2294 * it's intention is to return mapped block or reserve space 2295 */ 2296 static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 2297 struct buffer_head *bh_result, int create) 2298 { 2299 int ret = 0; 2300 2301 BUG_ON(create == 0); 2302 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 2303 2304 /* 2305 * first, we need to know whether the block is allocated already 2306 * preallocated blocks are unmapped but should treated 2307 * the same as allocated blocks. 2308 */ 2309 ret = ext4_get_blocks_wrap(NULL, inode, iblock, 1, bh_result, 0, 0, 0); 2310 if ((ret == 0) && !buffer_delay(bh_result)) { 2311 /* the block isn't (pre)allocated yet, let's reserve space */ 2312 /* 2313 * XXX: __block_prepare_write() unmaps passed block, 2314 * is it OK? 2315 */ 2316 ret = ext4_da_reserve_space(inode, 1); 2317 if (ret) 2318 /* not enough space to reserve */ 2319 return ret; 2320 2321 map_bh(bh_result, inode->i_sb, 0); 2322 set_buffer_new(bh_result); 2323 set_buffer_delay(bh_result); 2324 } else if (ret > 0) { 2325 bh_result->b_size = (ret << inode->i_blkbits); 2326 ret = 0; 2327 } 2328 2329 return ret; 2330 } 2331 2332 static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh) 2333 { 2334 /* 2335 * unmapped buffer is possible for holes. 2336 * delay buffer is possible with delayed allocation 2337 */ 2338 return ((!buffer_mapped(bh) || buffer_delay(bh)) && buffer_dirty(bh)); 2339 } 2340 2341 static int ext4_normal_get_block_write(struct inode *inode, sector_t iblock, 2342 struct buffer_head *bh_result, int create) 2343 { 2344 int ret = 0; 2345 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 2346 2347 /* 2348 * we don't want to do block allocation in writepage 2349 * so call get_block_wrap with create = 0 2350 */ 2351 ret = ext4_get_blocks_wrap(NULL, inode, iblock, max_blocks, 2352 bh_result, 0, 0, 0); 2353 if (ret > 0) { 2354 bh_result->b_size = (ret << inode->i_blkbits); 2355 ret = 0; 2356 } 2357 return ret; 2358 } 2359 2360 /* 2361 * get called vi ext4_da_writepages after taking page lock (have journal handle) 2362 * get called via journal_submit_inode_data_buffers (no journal handle) 2363 * get called via shrink_page_list via pdflush (no journal handle) 2364 * or grab_page_cache when doing write_begin (have journal handle) 2365 */ 2366 static int ext4_da_writepage(struct page *page, 2367 struct writeback_control *wbc) 2368 { 2369 int ret = 0; 2370 loff_t size; 2371 unsigned int len; 2372 struct buffer_head *page_bufs; 2373 struct inode *inode = page->mapping->host; 2374 2375 trace_mark(ext4_da_writepage, 2376 "dev %s ino %lu page_index %lu", 2377 inode->i_sb->s_id, inode->i_ino, page->index); 2378 size = i_size_read(inode); 2379 if (page->index == size >> PAGE_CACHE_SHIFT) 2380 len = size & ~PAGE_CACHE_MASK; 2381 else 2382 len = PAGE_CACHE_SIZE; 2383 2384 if (page_has_buffers(page)) { 2385 page_bufs = page_buffers(page); 2386 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2387 ext4_bh_unmapped_or_delay)) { 2388 /* 2389 * We don't want to do block allocation 2390 * So redirty the page and return 2391 * We may reach here when we do a journal commit 2392 * via journal_submit_inode_data_buffers. 2393 * If we don't have mapping block we just ignore 2394 * them. We can also reach here via shrink_page_list 2395 */ 2396 redirty_page_for_writepage(wbc, page); 2397 unlock_page(page); 2398 return 0; 2399 } 2400 } else { 2401 /* 2402 * The test for page_has_buffers() is subtle: 2403 * We know the page is dirty but it lost buffers. That means 2404 * that at some moment in time after write_begin()/write_end() 2405 * has been called all buffers have been clean and thus they 2406 * must have been written at least once. So they are all 2407 * mapped and we can happily proceed with mapping them 2408 * and writing the page. 2409 * 2410 * Try to initialize the buffer_heads and check whether 2411 * all are mapped and non delay. We don't want to 2412 * do block allocation here. 2413 */ 2414 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, 2415 ext4_normal_get_block_write); 2416 if (!ret) { 2417 page_bufs = page_buffers(page); 2418 /* check whether all are mapped and non delay */ 2419 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2420 ext4_bh_unmapped_or_delay)) { 2421 redirty_page_for_writepage(wbc, page); 2422 unlock_page(page); 2423 return 0; 2424 } 2425 } else { 2426 /* 2427 * We can't do block allocation here 2428 * so just redity the page and unlock 2429 * and return 2430 */ 2431 redirty_page_for_writepage(wbc, page); 2432 unlock_page(page); 2433 return 0; 2434 } 2435 /* now mark the buffer_heads as dirty and uptodate */ 2436 block_commit_write(page, 0, PAGE_CACHE_SIZE); 2437 } 2438 2439 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) 2440 ret = nobh_writepage(page, ext4_normal_get_block_write, wbc); 2441 else 2442 ret = block_write_full_page(page, 2443 ext4_normal_get_block_write, 2444 wbc); 2445 2446 return ret; 2447 } 2448 2449 /* 2450 * This is called via ext4_da_writepages() to 2451 * calulate the total number of credits to reserve to fit 2452 * a single extent allocation into a single transaction, 2453 * ext4_da_writpeages() will loop calling this before 2454 * the block allocation. 2455 */ 2456 2457 static int ext4_da_writepages_trans_blocks(struct inode *inode) 2458 { 2459 int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; 2460 2461 /* 2462 * With non-extent format the journal credit needed to 2463 * insert nrblocks contiguous block is dependent on 2464 * number of contiguous block. So we will limit 2465 * number of contiguous block to a sane value 2466 */ 2467 if (!(inode->i_flags & EXT4_EXTENTS_FL) && 2468 (max_blocks > EXT4_MAX_TRANS_DATA)) 2469 max_blocks = EXT4_MAX_TRANS_DATA; 2470 2471 return ext4_chunk_trans_blocks(inode, max_blocks); 2472 } 2473 2474 static int ext4_da_writepages(struct address_space *mapping, 2475 struct writeback_control *wbc) 2476 { 2477 pgoff_t index; 2478 int range_whole = 0; 2479 handle_t *handle = NULL; 2480 struct mpage_da_data mpd; 2481 struct inode *inode = mapping->host; 2482 int no_nrwrite_index_update; 2483 int pages_written = 0; 2484 long pages_skipped; 2485 int range_cyclic, cycled = 1, io_done = 0; 2486 int needed_blocks, ret = 0, nr_to_writebump = 0; 2487 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2488 2489 trace_mark(ext4_da_writepages, 2490 "dev %s ino %lu nr_t_write %ld " 2491 "pages_skipped %ld range_start %llu " 2492 "range_end %llu nonblocking %d " 2493 "for_kupdate %d for_reclaim %d " 2494 "for_writepages %d range_cyclic %d", 2495 inode->i_sb->s_id, inode->i_ino, 2496 wbc->nr_to_write, wbc->pages_skipped, 2497 (unsigned long long) wbc->range_start, 2498 (unsigned long long) wbc->range_end, 2499 wbc->nonblocking, wbc->for_kupdate, 2500 wbc->for_reclaim, wbc->for_writepages, 2501 wbc->range_cyclic); 2502 2503 /* 2504 * No pages to write? This is mainly a kludge to avoid starting 2505 * a transaction for special inodes like journal inode on last iput() 2506 * because that could violate lock ordering on umount 2507 */ 2508 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 2509 return 0; 2510 2511 /* 2512 * If the filesystem has aborted, it is read-only, so return 2513 * right away instead of dumping stack traces later on that 2514 * will obscure the real source of the problem. We test 2515 * EXT4_MOUNT_ABORT instead of sb->s_flag's MS_RDONLY because 2516 * the latter could be true if the filesystem is mounted 2517 * read-only, and in that case, ext4_da_writepages should 2518 * *never* be called, so if that ever happens, we would want 2519 * the stack trace. 2520 */ 2521 if (unlikely(sbi->s_mount_opt & EXT4_MOUNT_ABORT)) 2522 return -EROFS; 2523 2524 /* 2525 * Make sure nr_to_write is >= sbi->s_mb_stream_request 2526 * This make sure small files blocks are allocated in 2527 * single attempt. This ensure that small files 2528 * get less fragmented. 2529 */ 2530 if (wbc->nr_to_write < sbi->s_mb_stream_request) { 2531 nr_to_writebump = sbi->s_mb_stream_request - wbc->nr_to_write; 2532 wbc->nr_to_write = sbi->s_mb_stream_request; 2533 } 2534 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2535 range_whole = 1; 2536 2537 range_cyclic = wbc->range_cyclic; 2538 if (wbc->range_cyclic) { 2539 index = mapping->writeback_index; 2540 if (index) 2541 cycled = 0; 2542 wbc->range_start = index << PAGE_CACHE_SHIFT; 2543 wbc->range_end = LLONG_MAX; 2544 wbc->range_cyclic = 0; 2545 } else 2546 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2547 2548 mpd.wbc = wbc; 2549 mpd.inode = mapping->host; 2550 2551 /* 2552 * we don't want write_cache_pages to update 2553 * nr_to_write and writeback_index 2554 */ 2555 no_nrwrite_index_update = wbc->no_nrwrite_index_update; 2556 wbc->no_nrwrite_index_update = 1; 2557 pages_skipped = wbc->pages_skipped; 2558 2559 retry: 2560 while (!ret && wbc->nr_to_write > 0) { 2561 2562 /* 2563 * we insert one extent at a time. So we need 2564 * credit needed for single extent allocation. 2565 * journalled mode is currently not supported 2566 * by delalloc 2567 */ 2568 BUG_ON(ext4_should_journal_data(inode)); 2569 needed_blocks = ext4_da_writepages_trans_blocks(inode); 2570 2571 /* start a new transaction*/ 2572 handle = ext4_journal_start(inode, needed_blocks); 2573 if (IS_ERR(handle)) { 2574 ret = PTR_ERR(handle); 2575 printk(KERN_CRIT "%s: jbd2_start: " 2576 "%ld pages, ino %lu; err %d\n", __func__, 2577 wbc->nr_to_write, inode->i_ino, ret); 2578 dump_stack(); 2579 goto out_writepages; 2580 } 2581 2582 /* 2583 * Now call __mpage_da_writepage to find the next 2584 * contiguous region of logical blocks that need 2585 * blocks to be allocated by ext4. We don't actually 2586 * submit the blocks for I/O here, even though 2587 * write_cache_pages thinks it will, and will set the 2588 * pages as clean for write before calling 2589 * __mpage_da_writepage(). 2590 */ 2591 mpd.b_size = 0; 2592 mpd.b_state = 0; 2593 mpd.b_blocknr = 0; 2594 mpd.first_page = 0; 2595 mpd.next_page = 0; 2596 mpd.io_done = 0; 2597 mpd.pages_written = 0; 2598 mpd.retval = 0; 2599 ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, 2600 &mpd); 2601 /* 2602 * If we have a contigous extent of pages and we 2603 * haven't done the I/O yet, map the blocks and submit 2604 * them for I/O. 2605 */ 2606 if (!mpd.io_done && mpd.next_page != mpd.first_page) { 2607 if (mpage_da_map_blocks(&mpd) == 0) 2608 mpage_da_submit_io(&mpd); 2609 mpd.io_done = 1; 2610 ret = MPAGE_DA_EXTENT_TAIL; 2611 } 2612 wbc->nr_to_write -= mpd.pages_written; 2613 2614 ext4_journal_stop(handle); 2615 2616 if ((mpd.retval == -ENOSPC) && sbi->s_journal) { 2617 /* commit the transaction which would 2618 * free blocks released in the transaction 2619 * and try again 2620 */ 2621 jbd2_journal_force_commit_nested(sbi->s_journal); 2622 wbc->pages_skipped = pages_skipped; 2623 ret = 0; 2624 } else if (ret == MPAGE_DA_EXTENT_TAIL) { 2625 /* 2626 * got one extent now try with 2627 * rest of the pages 2628 */ 2629 pages_written += mpd.pages_written; 2630 wbc->pages_skipped = pages_skipped; 2631 ret = 0; 2632 io_done = 1; 2633 } else if (wbc->nr_to_write) 2634 /* 2635 * There is no more writeout needed 2636 * or we requested for a noblocking writeout 2637 * and we found the device congested 2638 */ 2639 break; 2640 } 2641 if (!io_done && !cycled) { 2642 cycled = 1; 2643 index = 0; 2644 wbc->range_start = index << PAGE_CACHE_SHIFT; 2645 wbc->range_end = mapping->writeback_index - 1; 2646 goto retry; 2647 } 2648 if (pages_skipped != wbc->pages_skipped) 2649 printk(KERN_EMERG "This should not happen leaving %s " 2650 "with nr_to_write = %ld ret = %d\n", 2651 __func__, wbc->nr_to_write, ret); 2652 2653 /* Update index */ 2654 index += pages_written; 2655 wbc->range_cyclic = range_cyclic; 2656 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2657 /* 2658 * set the writeback_index so that range_cyclic 2659 * mode will write it back later 2660 */ 2661 mapping->writeback_index = index; 2662 2663 out_writepages: 2664 if (!no_nrwrite_index_update) 2665 wbc->no_nrwrite_index_update = 0; 2666 wbc->nr_to_write -= nr_to_writebump; 2667 trace_mark(ext4_da_writepage_result, 2668 "dev %s ino %lu ret %d pages_written %d " 2669 "pages_skipped %ld congestion %d " 2670 "more_io %d no_nrwrite_index_update %d", 2671 inode->i_sb->s_id, inode->i_ino, ret, 2672 pages_written, wbc->pages_skipped, 2673 wbc->encountered_congestion, wbc->more_io, 2674 wbc->no_nrwrite_index_update); 2675 return ret; 2676 } 2677 2678 #define FALL_BACK_TO_NONDELALLOC 1 2679 static int ext4_nonda_switch(struct super_block *sb) 2680 { 2681 s64 free_blocks, dirty_blocks; 2682 struct ext4_sb_info *sbi = EXT4_SB(sb); 2683 2684 /* 2685 * switch to non delalloc mode if we are running low 2686 * on free block. The free block accounting via percpu 2687 * counters can get slightly wrong with percpu_counter_batch getting 2688 * accumulated on each CPU without updating global counters 2689 * Delalloc need an accurate free block accounting. So switch 2690 * to non delalloc when we are near to error range. 2691 */ 2692 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); 2693 dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter); 2694 if (2 * free_blocks < 3 * dirty_blocks || 2695 free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) { 2696 /* 2697 * free block count is less that 150% of dirty blocks 2698 * or free blocks is less that watermark 2699 */ 2700 return 1; 2701 } 2702 return 0; 2703 } 2704 2705 static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 2706 loff_t pos, unsigned len, unsigned flags, 2707 struct page **pagep, void **fsdata) 2708 { 2709 int ret, retries = 0; 2710 struct page *page; 2711 pgoff_t index; 2712 unsigned from, to; 2713 struct inode *inode = mapping->host; 2714 handle_t *handle; 2715 2716 index = pos >> PAGE_CACHE_SHIFT; 2717 from = pos & (PAGE_CACHE_SIZE - 1); 2718 to = from + len; 2719 2720 if (ext4_nonda_switch(inode->i_sb)) { 2721 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 2722 return ext4_write_begin(file, mapping, pos, 2723 len, flags, pagep, fsdata); 2724 } 2725 *fsdata = (void *)0; 2726 2727 trace_mark(ext4_da_write_begin, 2728 "dev %s ino %lu pos %llu len %u flags %u", 2729 inode->i_sb->s_id, inode->i_ino, 2730 (unsigned long long) pos, len, flags); 2731 retry: 2732 /* 2733 * With delayed allocation, we don't log the i_disksize update 2734 * if there is delayed block allocation. But we still need 2735 * to journalling the i_disksize update if writes to the end 2736 * of file which has an already mapped buffer. 2737 */ 2738 handle = ext4_journal_start(inode, 1); 2739 if (IS_ERR(handle)) { 2740 ret = PTR_ERR(handle); 2741 goto out; 2742 } 2743 /* We cannot recurse into the filesystem as the transaction is already 2744 * started */ 2745 flags |= AOP_FLAG_NOFS; 2746 2747 page = grab_cache_page_write_begin(mapping, index, flags); 2748 if (!page) { 2749 ext4_journal_stop(handle); 2750 ret = -ENOMEM; 2751 goto out; 2752 } 2753 *pagep = page; 2754 2755 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 2756 ext4_da_get_block_prep); 2757 if (ret < 0) { 2758 unlock_page(page); 2759 ext4_journal_stop(handle); 2760 page_cache_release(page); 2761 /* 2762 * block_write_begin may have instantiated a few blocks 2763 * outside i_size. Trim these off again. Don't need 2764 * i_size_read because we hold i_mutex. 2765 */ 2766 if (pos + len > inode->i_size) 2767 vmtruncate(inode, inode->i_size); 2768 } 2769 2770 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 2771 goto retry; 2772 out: 2773 return ret; 2774 } 2775 2776 /* 2777 * Check if we should update i_disksize 2778 * when write to the end of file but not require block allocation 2779 */ 2780 static int ext4_da_should_update_i_disksize(struct page *page, 2781 unsigned long offset) 2782 { 2783 struct buffer_head *bh; 2784 struct inode *inode = page->mapping->host; 2785 unsigned int idx; 2786 int i; 2787 2788 bh = page_buffers(page); 2789 idx = offset >> inode->i_blkbits; 2790 2791 for (i = 0; i < idx; i++) 2792 bh = bh->b_this_page; 2793 2794 if (!buffer_mapped(bh) || (buffer_delay(bh))) 2795 return 0; 2796 return 1; 2797 } 2798 2799 static int ext4_da_write_end(struct file *file, 2800 struct address_space *mapping, 2801 loff_t pos, unsigned len, unsigned copied, 2802 struct page *page, void *fsdata) 2803 { 2804 struct inode *inode = mapping->host; 2805 int ret = 0, ret2; 2806 handle_t *handle = ext4_journal_current_handle(); 2807 loff_t new_i_size; 2808 unsigned long start, end; 2809 int write_mode = (int)(unsigned long)fsdata; 2810 2811 if (write_mode == FALL_BACK_TO_NONDELALLOC) { 2812 if (ext4_should_order_data(inode)) { 2813 return ext4_ordered_write_end(file, mapping, pos, 2814 len, copied, page, fsdata); 2815 } else if (ext4_should_writeback_data(inode)) { 2816 return ext4_writeback_write_end(file, mapping, pos, 2817 len, copied, page, fsdata); 2818 } else { 2819 BUG(); 2820 } 2821 } 2822 2823 trace_mark(ext4_da_write_end, 2824 "dev %s ino %lu pos %llu len %u copied %u", 2825 inode->i_sb->s_id, inode->i_ino, 2826 (unsigned long long) pos, len, copied); 2827 start = pos & (PAGE_CACHE_SIZE - 1); 2828 end = start + copied - 1; 2829 2830 /* 2831 * generic_write_end() will run mark_inode_dirty() if i_size 2832 * changes. So let's piggyback the i_disksize mark_inode_dirty 2833 * into that. 2834 */ 2835 2836 new_i_size = pos + copied; 2837 if (new_i_size > EXT4_I(inode)->i_disksize) { 2838 if (ext4_da_should_update_i_disksize(page, end)) { 2839 down_write(&EXT4_I(inode)->i_data_sem); 2840 if (new_i_size > EXT4_I(inode)->i_disksize) { 2841 /* 2842 * Updating i_disksize when extending file 2843 * without needing block allocation 2844 */ 2845 if (ext4_should_order_data(inode)) 2846 ret = ext4_jbd2_file_inode(handle, 2847 inode); 2848 2849 EXT4_I(inode)->i_disksize = new_i_size; 2850 } 2851 up_write(&EXT4_I(inode)->i_data_sem); 2852 /* We need to mark inode dirty even if 2853 * new_i_size is less that inode->i_size 2854 * bu greater than i_disksize.(hint delalloc) 2855 */ 2856 ext4_mark_inode_dirty(handle, inode); 2857 } 2858 } 2859 ret2 = generic_write_end(file, mapping, pos, len, copied, 2860 page, fsdata); 2861 copied = ret2; 2862 if (ret2 < 0) 2863 ret = ret2; 2864 ret2 = ext4_journal_stop(handle); 2865 if (!ret) 2866 ret = ret2; 2867 2868 return ret ? ret : copied; 2869 } 2870 2871 static void ext4_da_invalidatepage(struct page *page, unsigned long offset) 2872 { 2873 /* 2874 * Drop reserved blocks 2875 */ 2876 BUG_ON(!PageLocked(page)); 2877 if (!page_has_buffers(page)) 2878 goto out; 2879 2880 ext4_da_page_release_reservation(page, offset); 2881 2882 out: 2883 ext4_invalidatepage(page, offset); 2884 2885 return; 2886 } 2887 2888 /* 2889 * Force all delayed allocation blocks to be allocated for a given inode. 2890 */ 2891 int ext4_alloc_da_blocks(struct inode *inode) 2892 { 2893 if (!EXT4_I(inode)->i_reserved_data_blocks && 2894 !EXT4_I(inode)->i_reserved_meta_blocks) 2895 return 0; 2896 2897 /* 2898 * We do something simple for now. The filemap_flush() will 2899 * also start triggering a write of the data blocks, which is 2900 * not strictly speaking necessary (and for users of 2901 * laptop_mode, not even desirable). However, to do otherwise 2902 * would require replicating code paths in: 2903 * 2904 * ext4_da_writepages() -> 2905 * write_cache_pages() ---> (via passed in callback function) 2906 * __mpage_da_writepage() --> 2907 * mpage_add_bh_to_extent() 2908 * mpage_da_map_blocks() 2909 * 2910 * The problem is that write_cache_pages(), located in 2911 * mm/page-writeback.c, marks pages clean in preparation for 2912 * doing I/O, which is not desirable if we're not planning on 2913 * doing I/O at all. 2914 * 2915 * We could call write_cache_pages(), and then redirty all of 2916 * the pages by calling redirty_page_for_writeback() but that 2917 * would be ugly in the extreme. So instead we would need to 2918 * replicate parts of the code in the above functions, 2919 * simplifying them becuase we wouldn't actually intend to 2920 * write out the pages, but rather only collect contiguous 2921 * logical block extents, call the multi-block allocator, and 2922 * then update the buffer heads with the block allocations. 2923 * 2924 * For now, though, we'll cheat by calling filemap_flush(), 2925 * which will map the blocks, and start the I/O, but not 2926 * actually wait for the I/O to complete. 2927 */ 2928 return filemap_flush(inode->i_mapping); 2929 } 2930 2931 /* 2932 * bmap() is special. It gets used by applications such as lilo and by 2933 * the swapper to find the on-disk block of a specific piece of data. 2934 * 2935 * Naturally, this is dangerous if the block concerned is still in the 2936 * journal. If somebody makes a swapfile on an ext4 data-journaling 2937 * filesystem and enables swap, then they may get a nasty shock when the 2938 * data getting swapped to that swapfile suddenly gets overwritten by 2939 * the original zero's written out previously to the journal and 2940 * awaiting writeback in the kernel's buffer cache. 2941 * 2942 * So, if we see any bmap calls here on a modified, data-journaled file, 2943 * take extra steps to flush any blocks which might be in the cache. 2944 */ 2945 static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 2946 { 2947 struct inode *inode = mapping->host; 2948 journal_t *journal; 2949 int err; 2950 2951 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 2952 test_opt(inode->i_sb, DELALLOC)) { 2953 /* 2954 * With delalloc we want to sync the file 2955 * so that we can make sure we allocate 2956 * blocks for file 2957 */ 2958 filemap_write_and_wait(mapping); 2959 } 2960 2961 if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) { 2962 /* 2963 * This is a REALLY heavyweight approach, but the use of 2964 * bmap on dirty files is expected to be extremely rare: 2965 * only if we run lilo or swapon on a freshly made file 2966 * do we expect this to happen. 2967 * 2968 * (bmap requires CAP_SYS_RAWIO so this does not 2969 * represent an unprivileged user DOS attack --- we'd be 2970 * in trouble if mortal users could trigger this path at 2971 * will.) 2972 * 2973 * NB. EXT4_STATE_JDATA is not set on files other than 2974 * regular files. If somebody wants to bmap a directory 2975 * or symlink and gets confused because the buffer 2976 * hasn't yet been flushed to disk, they deserve 2977 * everything they get. 2978 */ 2979 2980 EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA; 2981 journal = EXT4_JOURNAL(inode); 2982 jbd2_journal_lock_updates(journal); 2983 err = jbd2_journal_flush(journal); 2984 jbd2_journal_unlock_updates(journal); 2985 2986 if (err) 2987 return 0; 2988 } 2989 2990 return generic_block_bmap(mapping, block, ext4_get_block); 2991 } 2992 2993 static int bget_one(handle_t *handle, struct buffer_head *bh) 2994 { 2995 get_bh(bh); 2996 return 0; 2997 } 2998 2999 static int bput_one(handle_t *handle, struct buffer_head *bh) 3000 { 3001 put_bh(bh); 3002 return 0; 3003 } 3004 3005 /* 3006 * Note that we don't need to start a transaction unless we're journaling data 3007 * because we should have holes filled from ext4_page_mkwrite(). We even don't 3008 * need to file the inode to the transaction's list in ordered mode because if 3009 * we are writing back data added by write(), the inode is already there and if 3010 * we are writing back data modified via mmap(), noone guarantees in which 3011 * transaction the data will hit the disk. In case we are journaling data, we 3012 * cannot start transaction directly because transaction start ranks above page 3013 * lock so we have to do some magic. 3014 * 3015 * In all journaling modes block_write_full_page() will start the I/O. 3016 * 3017 * Problem: 3018 * 3019 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 3020 * ext4_writepage() 3021 * 3022 * Similar for: 3023 * 3024 * ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ... 3025 * 3026 * Same applies to ext4_get_block(). We will deadlock on various things like 3027 * lock_journal and i_data_sem 3028 * 3029 * Setting PF_MEMALLOC here doesn't work - too many internal memory 3030 * allocations fail. 3031 * 3032 * 16May01: If we're reentered then journal_current_handle() will be 3033 * non-zero. We simply *return*. 3034 * 3035 * 1 July 2001: @@@ FIXME: 3036 * In journalled data mode, a data buffer may be metadata against the 3037 * current transaction. But the same file is part of a shared mapping 3038 * and someone does a writepage() on it. 3039 * 3040 * We will move the buffer onto the async_data list, but *after* it has 3041 * been dirtied. So there's a small window where we have dirty data on 3042 * BJ_Metadata. 3043 * 3044 * Note that this only applies to the last partial page in the file. The 3045 * bit which block_write_full_page() uses prepare/commit for. (That's 3046 * broken code anyway: it's wrong for msync()). 3047 * 3048 * It's a rare case: affects the final partial page, for journalled data 3049 * where the file is subject to bith write() and writepage() in the same 3050 * transction. To fix it we'll need a custom block_write_full_page(). 3051 * We'll probably need that anyway for journalling writepage() output. 3052 * 3053 * We don't honour synchronous mounts for writepage(). That would be 3054 * disastrous. Any write() or metadata operation will sync the fs for 3055 * us. 3056 * 3057 */ 3058 static int __ext4_normal_writepage(struct page *page, 3059 struct writeback_control *wbc) 3060 { 3061 struct inode *inode = page->mapping->host; 3062 3063 if (test_opt(inode->i_sb, NOBH)) 3064 return nobh_writepage(page, 3065 ext4_normal_get_block_write, wbc); 3066 else 3067 return block_write_full_page(page, 3068 ext4_normal_get_block_write, 3069 wbc); 3070 } 3071 3072 static int ext4_normal_writepage(struct page *page, 3073 struct writeback_control *wbc) 3074 { 3075 struct inode *inode = page->mapping->host; 3076 loff_t size = i_size_read(inode); 3077 loff_t len; 3078 3079 trace_mark(ext4_normal_writepage, 3080 "dev %s ino %lu page_index %lu", 3081 inode->i_sb->s_id, inode->i_ino, page->index); 3082 J_ASSERT(PageLocked(page)); 3083 if (page->index == size >> PAGE_CACHE_SHIFT) 3084 len = size & ~PAGE_CACHE_MASK; 3085 else 3086 len = PAGE_CACHE_SIZE; 3087 3088 if (page_has_buffers(page)) { 3089 /* if page has buffers it should all be mapped 3090 * and allocated. If there are not buffers attached 3091 * to the page we know the page is dirty but it lost 3092 * buffers. That means that at some moment in time 3093 * after write_begin() / write_end() has been called 3094 * all buffers have been clean and thus they must have been 3095 * written at least once. So they are all mapped and we can 3096 * happily proceed with mapping them and writing the page. 3097 */ 3098 BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 3099 ext4_bh_unmapped_or_delay)); 3100 } 3101 3102 if (!ext4_journal_current_handle()) 3103 return __ext4_normal_writepage(page, wbc); 3104 3105 redirty_page_for_writepage(wbc, page); 3106 unlock_page(page); 3107 return 0; 3108 } 3109 3110 static int __ext4_journalled_writepage(struct page *page, 3111 struct writeback_control *wbc) 3112 { 3113 struct address_space *mapping = page->mapping; 3114 struct inode *inode = mapping->host; 3115 struct buffer_head *page_bufs; 3116 handle_t *handle = NULL; 3117 int ret = 0; 3118 int err; 3119 3120 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, 3121 ext4_normal_get_block_write); 3122 if (ret != 0) 3123 goto out_unlock; 3124 3125 page_bufs = page_buffers(page); 3126 walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL, 3127 bget_one); 3128 /* As soon as we unlock the page, it can go away, but we have 3129 * references to buffers so we are safe */ 3130 unlock_page(page); 3131 3132 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 3133 if (IS_ERR(handle)) { 3134 ret = PTR_ERR(handle); 3135 goto out; 3136 } 3137 3138 ret = walk_page_buffers(handle, page_bufs, 0, 3139 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access); 3140 3141 err = walk_page_buffers(handle, page_bufs, 0, 3142 PAGE_CACHE_SIZE, NULL, write_end_fn); 3143 if (ret == 0) 3144 ret = err; 3145 err = ext4_journal_stop(handle); 3146 if (!ret) 3147 ret = err; 3148 3149 walk_page_buffers(handle, page_bufs, 0, 3150 PAGE_CACHE_SIZE, NULL, bput_one); 3151 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; 3152 goto out; 3153 3154 out_unlock: 3155 unlock_page(page); 3156 out: 3157 return ret; 3158 } 3159 3160 static int ext4_journalled_writepage(struct page *page, 3161 struct writeback_control *wbc) 3162 { 3163 struct inode *inode = page->mapping->host; 3164 loff_t size = i_size_read(inode); 3165 loff_t len; 3166 3167 trace_mark(ext4_journalled_writepage, 3168 "dev %s ino %lu page_index %lu", 3169 inode->i_sb->s_id, inode->i_ino, page->index); 3170 J_ASSERT(PageLocked(page)); 3171 if (page->index == size >> PAGE_CACHE_SHIFT) 3172 len = size & ~PAGE_CACHE_MASK; 3173 else 3174 len = PAGE_CACHE_SIZE; 3175 3176 if (page_has_buffers(page)) { 3177 /* if page has buffers it should all be mapped 3178 * and allocated. If there are not buffers attached 3179 * to the page we know the page is dirty but it lost 3180 * buffers. That means that at some moment in time 3181 * after write_begin() / write_end() has been called 3182 * all buffers have been clean and thus they must have been 3183 * written at least once. So they are all mapped and we can 3184 * happily proceed with mapping them and writing the page. 3185 */ 3186 BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 3187 ext4_bh_unmapped_or_delay)); 3188 } 3189 3190 if (ext4_journal_current_handle()) 3191 goto no_write; 3192 3193 if (PageChecked(page)) { 3194 /* 3195 * It's mmapped pagecache. Add buffers and journal it. There 3196 * doesn't seem much point in redirtying the page here. 3197 */ 3198 ClearPageChecked(page); 3199 return __ext4_journalled_writepage(page, wbc); 3200 } else { 3201 /* 3202 * It may be a page full of checkpoint-mode buffers. We don't 3203 * really know unless we go poke around in the buffer_heads. 3204 * But block_write_full_page will do the right thing. 3205 */ 3206 return block_write_full_page(page, 3207 ext4_normal_get_block_write, 3208 wbc); 3209 } 3210 no_write: 3211 redirty_page_for_writepage(wbc, page); 3212 unlock_page(page); 3213 return 0; 3214 } 3215 3216 static int ext4_readpage(struct file *file, struct page *page) 3217 { 3218 return mpage_readpage(page, ext4_get_block); 3219 } 3220 3221 static int 3222 ext4_readpages(struct file *file, struct address_space *mapping, 3223 struct list_head *pages, unsigned nr_pages) 3224 { 3225 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); 3226 } 3227 3228 static void ext4_invalidatepage(struct page *page, unsigned long offset) 3229 { 3230 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3231 3232 /* 3233 * If it's a full truncate we just forget about the pending dirtying 3234 */ 3235 if (offset == 0) 3236 ClearPageChecked(page); 3237 3238 if (journal) 3239 jbd2_journal_invalidatepage(journal, page, offset); 3240 else 3241 block_invalidatepage(page, offset); 3242 } 3243 3244 static int ext4_releasepage(struct page *page, gfp_t wait) 3245 { 3246 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3247 3248 WARN_ON(PageChecked(page)); 3249 if (!page_has_buffers(page)) 3250 return 0; 3251 if (journal) 3252 return jbd2_journal_try_to_free_buffers(journal, page, wait); 3253 else 3254 return try_to_free_buffers(page); 3255 } 3256 3257 /* 3258 * If the O_DIRECT write will extend the file then add this inode to the 3259 * orphan list. So recovery will truncate it back to the original size 3260 * if the machine crashes during the write. 3261 * 3262 * If the O_DIRECT write is intantiating holes inside i_size and the machine 3263 * crashes then stale disk data _may_ be exposed inside the file. But current 3264 * VFS code falls back into buffered path in that case so we are safe. 3265 */ 3266 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 3267 const struct iovec *iov, loff_t offset, 3268 unsigned long nr_segs) 3269 { 3270 struct file *file = iocb->ki_filp; 3271 struct inode *inode = file->f_mapping->host; 3272 struct ext4_inode_info *ei = EXT4_I(inode); 3273 handle_t *handle; 3274 ssize_t ret; 3275 int orphan = 0; 3276 size_t count = iov_length(iov, nr_segs); 3277 3278 if (rw == WRITE) { 3279 loff_t final_size = offset + count; 3280 3281 if (final_size > inode->i_size) { 3282 /* Credits for sb + inode write */ 3283 handle = ext4_journal_start(inode, 2); 3284 if (IS_ERR(handle)) { 3285 ret = PTR_ERR(handle); 3286 goto out; 3287 } 3288 ret = ext4_orphan_add(handle, inode); 3289 if (ret) { 3290 ext4_journal_stop(handle); 3291 goto out; 3292 } 3293 orphan = 1; 3294 ei->i_disksize = inode->i_size; 3295 ext4_journal_stop(handle); 3296 } 3297 } 3298 3299 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 3300 offset, nr_segs, 3301 ext4_get_block, NULL); 3302 3303 if (orphan) { 3304 int err; 3305 3306 /* Credits for sb + inode write */ 3307 handle = ext4_journal_start(inode, 2); 3308 if (IS_ERR(handle)) { 3309 /* This is really bad luck. We've written the data 3310 * but cannot extend i_size. Bail out and pretend 3311 * the write failed... */ 3312 ret = PTR_ERR(handle); 3313 goto out; 3314 } 3315 if (inode->i_nlink) 3316 ext4_orphan_del(handle, inode); 3317 if (ret > 0) { 3318 loff_t end = offset + ret; 3319 if (end > inode->i_size) { 3320 ei->i_disksize = end; 3321 i_size_write(inode, end); 3322 /* 3323 * We're going to return a positive `ret' 3324 * here due to non-zero-length I/O, so there's 3325 * no way of reporting error returns from 3326 * ext4_mark_inode_dirty() to userspace. So 3327 * ignore it. 3328 */ 3329 ext4_mark_inode_dirty(handle, inode); 3330 } 3331 } 3332 err = ext4_journal_stop(handle); 3333 if (ret == 0) 3334 ret = err; 3335 } 3336 out: 3337 return ret; 3338 } 3339 3340 /* 3341 * Pages can be marked dirty completely asynchronously from ext4's journalling 3342 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3343 * much here because ->set_page_dirty is called under VFS locks. The page is 3344 * not necessarily locked. 3345 * 3346 * We cannot just dirty the page and leave attached buffers clean, because the 3347 * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3348 * or jbddirty because all the journalling code will explode. 3349 * 3350 * So what we do is to mark the page "pending dirty" and next time writepage 3351 * is called, propagate that into the buffers appropriately. 3352 */ 3353 static int ext4_journalled_set_page_dirty(struct page *page) 3354 { 3355 SetPageChecked(page); 3356 return __set_page_dirty_nobuffers(page); 3357 } 3358 3359 static const struct address_space_operations ext4_ordered_aops = { 3360 .readpage = ext4_readpage, 3361 .readpages = ext4_readpages, 3362 .writepage = ext4_normal_writepage, 3363 .sync_page = block_sync_page, 3364 .write_begin = ext4_write_begin, 3365 .write_end = ext4_ordered_write_end, 3366 .bmap = ext4_bmap, 3367 .invalidatepage = ext4_invalidatepage, 3368 .releasepage = ext4_releasepage, 3369 .direct_IO = ext4_direct_IO, 3370 .migratepage = buffer_migrate_page, 3371 .is_partially_uptodate = block_is_partially_uptodate, 3372 }; 3373 3374 static const struct address_space_operations ext4_writeback_aops = { 3375 .readpage = ext4_readpage, 3376 .readpages = ext4_readpages, 3377 .writepage = ext4_normal_writepage, 3378 .sync_page = block_sync_page, 3379 .write_begin = ext4_write_begin, 3380 .write_end = ext4_writeback_write_end, 3381 .bmap = ext4_bmap, 3382 .invalidatepage = ext4_invalidatepage, 3383 .releasepage = ext4_releasepage, 3384 .direct_IO = ext4_direct_IO, 3385 .migratepage = buffer_migrate_page, 3386 .is_partially_uptodate = block_is_partially_uptodate, 3387 }; 3388 3389 static const struct address_space_operations ext4_journalled_aops = { 3390 .readpage = ext4_readpage, 3391 .readpages = ext4_readpages, 3392 .writepage = ext4_journalled_writepage, 3393 .sync_page = block_sync_page, 3394 .write_begin = ext4_write_begin, 3395 .write_end = ext4_journalled_write_end, 3396 .set_page_dirty = ext4_journalled_set_page_dirty, 3397 .bmap = ext4_bmap, 3398 .invalidatepage = ext4_invalidatepage, 3399 .releasepage = ext4_releasepage, 3400 .is_partially_uptodate = block_is_partially_uptodate, 3401 }; 3402 3403 static const struct address_space_operations ext4_da_aops = { 3404 .readpage = ext4_readpage, 3405 .readpages = ext4_readpages, 3406 .writepage = ext4_da_writepage, 3407 .writepages = ext4_da_writepages, 3408 .sync_page = block_sync_page, 3409 .write_begin = ext4_da_write_begin, 3410 .write_end = ext4_da_write_end, 3411 .bmap = ext4_bmap, 3412 .invalidatepage = ext4_da_invalidatepage, 3413 .releasepage = ext4_releasepage, 3414 .direct_IO = ext4_direct_IO, 3415 .migratepage = buffer_migrate_page, 3416 .is_partially_uptodate = block_is_partially_uptodate, 3417 }; 3418 3419 void ext4_set_aops(struct inode *inode) 3420 { 3421 if (ext4_should_order_data(inode) && 3422 test_opt(inode->i_sb, DELALLOC)) 3423 inode->i_mapping->a_ops = &ext4_da_aops; 3424 else if (ext4_should_order_data(inode)) 3425 inode->i_mapping->a_ops = &ext4_ordered_aops; 3426 else if (ext4_should_writeback_data(inode) && 3427 test_opt(inode->i_sb, DELALLOC)) 3428 inode->i_mapping->a_ops = &ext4_da_aops; 3429 else if (ext4_should_writeback_data(inode)) 3430 inode->i_mapping->a_ops = &ext4_writeback_aops; 3431 else 3432 inode->i_mapping->a_ops = &ext4_journalled_aops; 3433 } 3434 3435 /* 3436 * ext4_block_truncate_page() zeroes out a mapping from file offset `from' 3437 * up to the end of the block which corresponds to `from'. 3438 * This required during truncate. We need to physically zero the tail end 3439 * of that block so it doesn't yield old data if the file is later grown. 3440 */ 3441 int ext4_block_truncate_page(handle_t *handle, 3442 struct address_space *mapping, loff_t from) 3443 { 3444 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 3445 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3446 unsigned blocksize, length, pos; 3447 ext4_lblk_t iblock; 3448 struct inode *inode = mapping->host; 3449 struct buffer_head *bh; 3450 struct page *page; 3451 int err = 0; 3452 3453 page = grab_cache_page(mapping, from >> PAGE_CACHE_SHIFT); 3454 if (!page) 3455 return -EINVAL; 3456 3457 blocksize = inode->i_sb->s_blocksize; 3458 length = blocksize - (offset & (blocksize - 1)); 3459 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 3460 3461 /* 3462 * For "nobh" option, we can only work if we don't need to 3463 * read-in the page - otherwise we create buffers to do the IO. 3464 */ 3465 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) && 3466 ext4_should_writeback_data(inode) && PageUptodate(page)) { 3467 zero_user(page, offset, length); 3468 set_page_dirty(page); 3469 goto unlock; 3470 } 3471 3472 if (!page_has_buffers(page)) 3473 create_empty_buffers(page, blocksize, 0); 3474 3475 /* Find the buffer that contains "offset" */ 3476 bh = page_buffers(page); 3477 pos = blocksize; 3478 while (offset >= pos) { 3479 bh = bh->b_this_page; 3480 iblock++; 3481 pos += blocksize; 3482 } 3483 3484 err = 0; 3485 if (buffer_freed(bh)) { 3486 BUFFER_TRACE(bh, "freed: skip"); 3487 goto unlock; 3488 } 3489 3490 if (!buffer_mapped(bh)) { 3491 BUFFER_TRACE(bh, "unmapped"); 3492 ext4_get_block(inode, iblock, bh, 0); 3493 /* unmapped? It's a hole - nothing to do */ 3494 if (!buffer_mapped(bh)) { 3495 BUFFER_TRACE(bh, "still unmapped"); 3496 goto unlock; 3497 } 3498 } 3499 3500 /* Ok, it's mapped. Make sure it's up-to-date */ 3501 if (PageUptodate(page)) 3502 set_buffer_uptodate(bh); 3503 3504 if (!buffer_uptodate(bh)) { 3505 err = -EIO; 3506 ll_rw_block(READ, 1, &bh); 3507 wait_on_buffer(bh); 3508 /* Uhhuh. Read error. Complain and punt. */ 3509 if (!buffer_uptodate(bh)) 3510 goto unlock; 3511 } 3512 3513 if (ext4_should_journal_data(inode)) { 3514 BUFFER_TRACE(bh, "get write access"); 3515 err = ext4_journal_get_write_access(handle, bh); 3516 if (err) 3517 goto unlock; 3518 } 3519 3520 zero_user(page, offset, length); 3521 3522 BUFFER_TRACE(bh, "zeroed end of block"); 3523 3524 err = 0; 3525 if (ext4_should_journal_data(inode)) { 3526 err = ext4_handle_dirty_metadata(handle, inode, bh); 3527 } else { 3528 if (ext4_should_order_data(inode)) 3529 err = ext4_jbd2_file_inode(handle, inode); 3530 mark_buffer_dirty(bh); 3531 } 3532 3533 unlock: 3534 unlock_page(page); 3535 page_cache_release(page); 3536 return err; 3537 } 3538 3539 /* 3540 * Probably it should be a library function... search for first non-zero word 3541 * or memcmp with zero_page, whatever is better for particular architecture. 3542 * Linus? 3543 */ 3544 static inline int all_zeroes(__le32 *p, __le32 *q) 3545 { 3546 while (p < q) 3547 if (*p++) 3548 return 0; 3549 return 1; 3550 } 3551 3552 /** 3553 * ext4_find_shared - find the indirect blocks for partial truncation. 3554 * @inode: inode in question 3555 * @depth: depth of the affected branch 3556 * @offsets: offsets of pointers in that branch (see ext4_block_to_path) 3557 * @chain: place to store the pointers to partial indirect blocks 3558 * @top: place to the (detached) top of branch 3559 * 3560 * This is a helper function used by ext4_truncate(). 3561 * 3562 * When we do truncate() we may have to clean the ends of several 3563 * indirect blocks but leave the blocks themselves alive. Block is 3564 * partially truncated if some data below the new i_size is refered 3565 * from it (and it is on the path to the first completely truncated 3566 * data block, indeed). We have to free the top of that path along 3567 * with everything to the right of the path. Since no allocation 3568 * past the truncation point is possible until ext4_truncate() 3569 * finishes, we may safely do the latter, but top of branch may 3570 * require special attention - pageout below the truncation point 3571 * might try to populate it. 3572 * 3573 * We atomically detach the top of branch from the tree, store the 3574 * block number of its root in *@top, pointers to buffer_heads of 3575 * partially truncated blocks - in @chain[].bh and pointers to 3576 * their last elements that should not be removed - in 3577 * @chain[].p. Return value is the pointer to last filled element 3578 * of @chain. 3579 * 3580 * The work left to caller to do the actual freeing of subtrees: 3581 * a) free the subtree starting from *@top 3582 * b) free the subtrees whose roots are stored in 3583 * (@chain[i].p+1 .. end of @chain[i].bh->b_data) 3584 * c) free the subtrees growing from the inode past the @chain[0]. 3585 * (no partially truncated stuff there). */ 3586 3587 static Indirect *ext4_find_shared(struct inode *inode, int depth, 3588 ext4_lblk_t offsets[4], Indirect chain[4], __le32 *top) 3589 { 3590 Indirect *partial, *p; 3591 int k, err; 3592 3593 *top = 0; 3594 /* Make k index the deepest non-null offest + 1 */ 3595 for (k = depth; k > 1 && !offsets[k-1]; k--) 3596 ; 3597 partial = ext4_get_branch(inode, k, offsets, chain, &err); 3598 /* Writer: pointers */ 3599 if (!partial) 3600 partial = chain + k-1; 3601 /* 3602 * If the branch acquired continuation since we've looked at it - 3603 * fine, it should all survive and (new) top doesn't belong to us. 3604 */ 3605 if (!partial->key && *partial->p) 3606 /* Writer: end */ 3607 goto no_top; 3608 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--) 3609 ; 3610 /* 3611 * OK, we've found the last block that must survive. The rest of our 3612 * branch should be detached before unlocking. However, if that rest 3613 * of branch is all ours and does not grow immediately from the inode 3614 * it's easier to cheat and just decrement partial->p. 3615 */ 3616 if (p == chain + k - 1 && p > chain) { 3617 p->p--; 3618 } else { 3619 *top = *p->p; 3620 /* Nope, don't do this in ext4. Must leave the tree intact */ 3621 #if 0 3622 *p->p = 0; 3623 #endif 3624 } 3625 /* Writer: end */ 3626 3627 while (partial > p) { 3628 brelse(partial->bh); 3629 partial--; 3630 } 3631 no_top: 3632 return partial; 3633 } 3634 3635 /* 3636 * Zero a number of block pointers in either an inode or an indirect block. 3637 * If we restart the transaction we must again get write access to the 3638 * indirect block for further modification. 3639 * 3640 * We release `count' blocks on disk, but (last - first) may be greater 3641 * than `count' because there can be holes in there. 3642 */ 3643 static void ext4_clear_blocks(handle_t *handle, struct inode *inode, 3644 struct buffer_head *bh, ext4_fsblk_t block_to_free, 3645 unsigned long count, __le32 *first, __le32 *last) 3646 { 3647 __le32 *p; 3648 if (try_to_extend_transaction(handle, inode)) { 3649 if (bh) { 3650 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 3651 ext4_handle_dirty_metadata(handle, inode, bh); 3652 } 3653 ext4_mark_inode_dirty(handle, inode); 3654 ext4_journal_test_restart(handle, inode); 3655 if (bh) { 3656 BUFFER_TRACE(bh, "retaking write access"); 3657 ext4_journal_get_write_access(handle, bh); 3658 } 3659 } 3660 3661 /* 3662 * Any buffers which are on the journal will be in memory. We find 3663 * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget() 3664 * on them. We've already detached each block from the file, so 3665 * bforget() in jbd2_journal_forget() should be safe. 3666 * 3667 * AKPM: turn on bforget in jbd2_journal_forget()!!! 3668 */ 3669 for (p = first; p < last; p++) { 3670 u32 nr = le32_to_cpu(*p); 3671 if (nr) { 3672 struct buffer_head *tbh; 3673 3674 *p = 0; 3675 tbh = sb_find_get_block(inode->i_sb, nr); 3676 ext4_forget(handle, 0, inode, tbh, nr); 3677 } 3678 } 3679 3680 ext4_free_blocks(handle, inode, block_to_free, count, 0); 3681 } 3682 3683 /** 3684 * ext4_free_data - free a list of data blocks 3685 * @handle: handle for this transaction 3686 * @inode: inode we are dealing with 3687 * @this_bh: indirect buffer_head which contains *@first and *@last 3688 * @first: array of block numbers 3689 * @last: points immediately past the end of array 3690 * 3691 * We are freeing all blocks refered from that array (numbers are stored as 3692 * little-endian 32-bit) and updating @inode->i_blocks appropriately. 3693 * 3694 * We accumulate contiguous runs of blocks to free. Conveniently, if these 3695 * blocks are contiguous then releasing them at one time will only affect one 3696 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't 3697 * actually use a lot of journal space. 3698 * 3699 * @this_bh will be %NULL if @first and @last point into the inode's direct 3700 * block pointers. 3701 */ 3702 static void ext4_free_data(handle_t *handle, struct inode *inode, 3703 struct buffer_head *this_bh, 3704 __le32 *first, __le32 *last) 3705 { 3706 ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */ 3707 unsigned long count = 0; /* Number of blocks in the run */ 3708 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind 3709 corresponding to 3710 block_to_free */ 3711 ext4_fsblk_t nr; /* Current block # */ 3712 __le32 *p; /* Pointer into inode/ind 3713 for current block */ 3714 int err; 3715 3716 if (this_bh) { /* For indirect block */ 3717 BUFFER_TRACE(this_bh, "get_write_access"); 3718 err = ext4_journal_get_write_access(handle, this_bh); 3719 /* Important: if we can't update the indirect pointers 3720 * to the blocks, we can't free them. */ 3721 if (err) 3722 return; 3723 } 3724 3725 for (p = first; p < last; p++) { 3726 nr = le32_to_cpu(*p); 3727 if (nr) { 3728 /* accumulate blocks to free if they're contiguous */ 3729 if (count == 0) { 3730 block_to_free = nr; 3731 block_to_free_p = p; 3732 count = 1; 3733 } else if (nr == block_to_free + count) { 3734 count++; 3735 } else { 3736 ext4_clear_blocks(handle, inode, this_bh, 3737 block_to_free, 3738 count, block_to_free_p, p); 3739 block_to_free = nr; 3740 block_to_free_p = p; 3741 count = 1; 3742 } 3743 } 3744 } 3745 3746 if (count > 0) 3747 ext4_clear_blocks(handle, inode, this_bh, block_to_free, 3748 count, block_to_free_p, p); 3749 3750 if (this_bh) { 3751 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata"); 3752 3753 /* 3754 * The buffer head should have an attached journal head at this 3755 * point. However, if the data is corrupted and an indirect 3756 * block pointed to itself, it would have been detached when 3757 * the block was cleared. Check for this instead of OOPSing. 3758 */ 3759 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) 3760 ext4_handle_dirty_metadata(handle, inode, this_bh); 3761 else 3762 ext4_error(inode->i_sb, __func__, 3763 "circular indirect block detected, " 3764 "inode=%lu, block=%llu", 3765 inode->i_ino, 3766 (unsigned long long) this_bh->b_blocknr); 3767 } 3768 } 3769 3770 /** 3771 * ext4_free_branches - free an array of branches 3772 * @handle: JBD handle for this transaction 3773 * @inode: inode we are dealing with 3774 * @parent_bh: the buffer_head which contains *@first and *@last 3775 * @first: array of block numbers 3776 * @last: pointer immediately past the end of array 3777 * @depth: depth of the branches to free 3778 * 3779 * We are freeing all blocks refered from these branches (numbers are 3780 * stored as little-endian 32-bit) and updating @inode->i_blocks 3781 * appropriately. 3782 */ 3783 static void ext4_free_branches(handle_t *handle, struct inode *inode, 3784 struct buffer_head *parent_bh, 3785 __le32 *first, __le32 *last, int depth) 3786 { 3787 ext4_fsblk_t nr; 3788 __le32 *p; 3789 3790 if (ext4_handle_is_aborted(handle)) 3791 return; 3792 3793 if (depth--) { 3794 struct buffer_head *bh; 3795 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 3796 p = last; 3797 while (--p >= first) { 3798 nr = le32_to_cpu(*p); 3799 if (!nr) 3800 continue; /* A hole */ 3801 3802 /* Go read the buffer for the next level down */ 3803 bh = sb_bread(inode->i_sb, nr); 3804 3805 /* 3806 * A read failure? Report error and clear slot 3807 * (should be rare). 3808 */ 3809 if (!bh) { 3810 ext4_error(inode->i_sb, "ext4_free_branches", 3811 "Read failure, inode=%lu, block=%llu", 3812 inode->i_ino, nr); 3813 continue; 3814 } 3815 3816 /* This zaps the entire block. Bottom up. */ 3817 BUFFER_TRACE(bh, "free child branches"); 3818 ext4_free_branches(handle, inode, bh, 3819 (__le32 *) bh->b_data, 3820 (__le32 *) bh->b_data + addr_per_block, 3821 depth); 3822 3823 /* 3824 * We've probably journalled the indirect block several 3825 * times during the truncate. But it's no longer 3826 * needed and we now drop it from the transaction via 3827 * jbd2_journal_revoke(). 3828 * 3829 * That's easy if it's exclusively part of this 3830 * transaction. But if it's part of the committing 3831 * transaction then jbd2_journal_forget() will simply 3832 * brelse() it. That means that if the underlying 3833 * block is reallocated in ext4_get_block(), 3834 * unmap_underlying_metadata() will find this block 3835 * and will try to get rid of it. damn, damn. 3836 * 3837 * If this block has already been committed to the 3838 * journal, a revoke record will be written. And 3839 * revoke records must be emitted *before* clearing 3840 * this block's bit in the bitmaps. 3841 */ 3842 ext4_forget(handle, 1, inode, bh, bh->b_blocknr); 3843 3844 /* 3845 * Everything below this this pointer has been 3846 * released. Now let this top-of-subtree go. 3847 * 3848 * We want the freeing of this indirect block to be 3849 * atomic in the journal with the updating of the 3850 * bitmap block which owns it. So make some room in 3851 * the journal. 3852 * 3853 * We zero the parent pointer *after* freeing its 3854 * pointee in the bitmaps, so if extend_transaction() 3855 * for some reason fails to put the bitmap changes and 3856 * the release into the same transaction, recovery 3857 * will merely complain about releasing a free block, 3858 * rather than leaking blocks. 3859 */ 3860 if (ext4_handle_is_aborted(handle)) 3861 return; 3862 if (try_to_extend_transaction(handle, inode)) { 3863 ext4_mark_inode_dirty(handle, inode); 3864 ext4_journal_test_restart(handle, inode); 3865 } 3866 3867 ext4_free_blocks(handle, inode, nr, 1, 1); 3868 3869 if (parent_bh) { 3870 /* 3871 * The block which we have just freed is 3872 * pointed to by an indirect block: journal it 3873 */ 3874 BUFFER_TRACE(parent_bh, "get_write_access"); 3875 if (!ext4_journal_get_write_access(handle, 3876 parent_bh)){ 3877 *p = 0; 3878 BUFFER_TRACE(parent_bh, 3879 "call ext4_handle_dirty_metadata"); 3880 ext4_handle_dirty_metadata(handle, 3881 inode, 3882 parent_bh); 3883 } 3884 } 3885 } 3886 } else { 3887 /* We have reached the bottom of the tree. */ 3888 BUFFER_TRACE(parent_bh, "free data blocks"); 3889 ext4_free_data(handle, inode, parent_bh, first, last); 3890 } 3891 } 3892 3893 int ext4_can_truncate(struct inode *inode) 3894 { 3895 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 3896 return 0; 3897 if (S_ISREG(inode->i_mode)) 3898 return 1; 3899 if (S_ISDIR(inode->i_mode)) 3900 return 1; 3901 if (S_ISLNK(inode->i_mode)) 3902 return !ext4_inode_is_fast_symlink(inode); 3903 return 0; 3904 } 3905 3906 /* 3907 * ext4_truncate() 3908 * 3909 * We block out ext4_get_block() block instantiations across the entire 3910 * transaction, and VFS/VM ensures that ext4_truncate() cannot run 3911 * simultaneously on behalf of the same inode. 3912 * 3913 * As we work through the truncate and commmit bits of it to the journal there 3914 * is one core, guiding principle: the file's tree must always be consistent on 3915 * disk. We must be able to restart the truncate after a crash. 3916 * 3917 * The file's tree may be transiently inconsistent in memory (although it 3918 * probably isn't), but whenever we close off and commit a journal transaction, 3919 * the contents of (the filesystem + the journal) must be consistent and 3920 * restartable. It's pretty simple, really: bottom up, right to left (although 3921 * left-to-right works OK too). 3922 * 3923 * Note that at recovery time, journal replay occurs *before* the restart of 3924 * truncate against the orphan inode list. 3925 * 3926 * The committed inode has the new, desired i_size (which is the same as 3927 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 3928 * that this inode's truncate did not complete and it will again call 3929 * ext4_truncate() to have another go. So there will be instantiated blocks 3930 * to the right of the truncation point in a crashed ext4 filesystem. But 3931 * that's fine - as long as they are linked from the inode, the post-crash 3932 * ext4_truncate() run will find them and release them. 3933 */ 3934 void ext4_truncate(struct inode *inode) 3935 { 3936 handle_t *handle; 3937 struct ext4_inode_info *ei = EXT4_I(inode); 3938 __le32 *i_data = ei->i_data; 3939 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 3940 struct address_space *mapping = inode->i_mapping; 3941 ext4_lblk_t offsets[4]; 3942 Indirect chain[4]; 3943 Indirect *partial; 3944 __le32 nr = 0; 3945 int n; 3946 ext4_lblk_t last_block; 3947 unsigned blocksize = inode->i_sb->s_blocksize; 3948 3949 if (!ext4_can_truncate(inode)) 3950 return; 3951 3952 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 3953 ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE; 3954 3955 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 3956 ext4_ext_truncate(inode); 3957 return; 3958 } 3959 3960 handle = start_transaction(inode); 3961 if (IS_ERR(handle)) 3962 return; /* AKPM: return what? */ 3963 3964 last_block = (inode->i_size + blocksize-1) 3965 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 3966 3967 if (inode->i_size & (blocksize - 1)) 3968 if (ext4_block_truncate_page(handle, mapping, inode->i_size)) 3969 goto out_stop; 3970 3971 n = ext4_block_to_path(inode, last_block, offsets, NULL); 3972 if (n == 0) 3973 goto out_stop; /* error */ 3974 3975 /* 3976 * OK. This truncate is going to happen. We add the inode to the 3977 * orphan list, so that if this truncate spans multiple transactions, 3978 * and we crash, we will resume the truncate when the filesystem 3979 * recovers. It also marks the inode dirty, to catch the new size. 3980 * 3981 * Implication: the file must always be in a sane, consistent 3982 * truncatable state while each transaction commits. 3983 */ 3984 if (ext4_orphan_add(handle, inode)) 3985 goto out_stop; 3986 3987 /* 3988 * From here we block out all ext4_get_block() callers who want to 3989 * modify the block allocation tree. 3990 */ 3991 down_write(&ei->i_data_sem); 3992 3993 ext4_discard_preallocations(inode); 3994 3995 /* 3996 * The orphan list entry will now protect us from any crash which 3997 * occurs before the truncate completes, so it is now safe to propagate 3998 * the new, shorter inode size (held for now in i_size) into the 3999 * on-disk inode. We do this via i_disksize, which is the value which 4000 * ext4 *really* writes onto the disk inode. 4001 */ 4002 ei->i_disksize = inode->i_size; 4003 4004 if (n == 1) { /* direct blocks */ 4005 ext4_free_data(handle, inode, NULL, i_data+offsets[0], 4006 i_data + EXT4_NDIR_BLOCKS); 4007 goto do_indirects; 4008 } 4009 4010 partial = ext4_find_shared(inode, n, offsets, chain, &nr); 4011 /* Kill the top of shared branch (not detached) */ 4012 if (nr) { 4013 if (partial == chain) { 4014 /* Shared branch grows from the inode */ 4015 ext4_free_branches(handle, inode, NULL, 4016 &nr, &nr+1, (chain+n-1) - partial); 4017 *partial->p = 0; 4018 /* 4019 * We mark the inode dirty prior to restart, 4020 * and prior to stop. No need for it here. 4021 */ 4022 } else { 4023 /* Shared branch grows from an indirect block */ 4024 BUFFER_TRACE(partial->bh, "get_write_access"); 4025 ext4_free_branches(handle, inode, partial->bh, 4026 partial->p, 4027 partial->p+1, (chain+n-1) - partial); 4028 } 4029 } 4030 /* Clear the ends of indirect blocks on the shared branch */ 4031 while (partial > chain) { 4032 ext4_free_branches(handle, inode, partial->bh, partial->p + 1, 4033 (__le32*)partial->bh->b_data+addr_per_block, 4034 (chain+n-1) - partial); 4035 BUFFER_TRACE(partial->bh, "call brelse"); 4036 brelse (partial->bh); 4037 partial--; 4038 } 4039 do_indirects: 4040 /* Kill the remaining (whole) subtrees */ 4041 switch (offsets[0]) { 4042 default: 4043 nr = i_data[EXT4_IND_BLOCK]; 4044 if (nr) { 4045 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); 4046 i_data[EXT4_IND_BLOCK] = 0; 4047 } 4048 case EXT4_IND_BLOCK: 4049 nr = i_data[EXT4_DIND_BLOCK]; 4050 if (nr) { 4051 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); 4052 i_data[EXT4_DIND_BLOCK] = 0; 4053 } 4054 case EXT4_DIND_BLOCK: 4055 nr = i_data[EXT4_TIND_BLOCK]; 4056 if (nr) { 4057 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); 4058 i_data[EXT4_TIND_BLOCK] = 0; 4059 } 4060 case EXT4_TIND_BLOCK: 4061 ; 4062 } 4063 4064 up_write(&ei->i_data_sem); 4065 inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4066 ext4_mark_inode_dirty(handle, inode); 4067 4068 /* 4069 * In a multi-transaction truncate, we only make the final transaction 4070 * synchronous 4071 */ 4072 if (IS_SYNC(inode)) 4073 ext4_handle_sync(handle); 4074 out_stop: 4075 /* 4076 * If this was a simple ftruncate(), and the file will remain alive 4077 * then we need to clear up the orphan record which we created above. 4078 * However, if this was a real unlink then we were called by 4079 * ext4_delete_inode(), and we allow that function to clean up the 4080 * orphan info for us. 4081 */ 4082 if (inode->i_nlink) 4083 ext4_orphan_del(handle, inode); 4084 4085 ext4_journal_stop(handle); 4086 } 4087 4088 /* 4089 * ext4_get_inode_loc returns with an extra refcount against the inode's 4090 * underlying buffer_head on success. If 'in_mem' is true, we have all 4091 * data in memory that is needed to recreate the on-disk version of this 4092 * inode. 4093 */ 4094 static int __ext4_get_inode_loc(struct inode *inode, 4095 struct ext4_iloc *iloc, int in_mem) 4096 { 4097 struct ext4_group_desc *gdp; 4098 struct buffer_head *bh; 4099 struct super_block *sb = inode->i_sb; 4100 ext4_fsblk_t block; 4101 int inodes_per_block, inode_offset; 4102 4103 iloc->bh = NULL; 4104 if (!ext4_valid_inum(sb, inode->i_ino)) 4105 return -EIO; 4106 4107 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 4108 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 4109 if (!gdp) 4110 return -EIO; 4111 4112 /* 4113 * Figure out the offset within the block group inode table 4114 */ 4115 inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb)); 4116 inode_offset = ((inode->i_ino - 1) % 4117 EXT4_INODES_PER_GROUP(sb)); 4118 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 4119 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 4120 4121 bh = sb_getblk(sb, block); 4122 if (!bh) { 4123 ext4_error(sb, "ext4_get_inode_loc", "unable to read " 4124 "inode block - inode=%lu, block=%llu", 4125 inode->i_ino, block); 4126 return -EIO; 4127 } 4128 if (!buffer_uptodate(bh)) { 4129 lock_buffer(bh); 4130 4131 /* 4132 * If the buffer has the write error flag, we have failed 4133 * to write out another inode in the same block. In this 4134 * case, we don't have to read the block because we may 4135 * read the old inode data successfully. 4136 */ 4137 if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 4138 set_buffer_uptodate(bh); 4139 4140 if (buffer_uptodate(bh)) { 4141 /* someone brought it uptodate while we waited */ 4142 unlock_buffer(bh); 4143 goto has_buffer; 4144 } 4145 4146 /* 4147 * If we have all information of the inode in memory and this 4148 * is the only valid inode in the block, we need not read the 4149 * block. 4150 */ 4151 if (in_mem) { 4152 struct buffer_head *bitmap_bh; 4153 int i, start; 4154 4155 start = inode_offset & ~(inodes_per_block - 1); 4156 4157 /* Is the inode bitmap in cache? */ 4158 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 4159 if (!bitmap_bh) 4160 goto make_io; 4161 4162 /* 4163 * If the inode bitmap isn't in cache then the 4164 * optimisation may end up performing two reads instead 4165 * of one, so skip it. 4166 */ 4167 if (!buffer_uptodate(bitmap_bh)) { 4168 brelse(bitmap_bh); 4169 goto make_io; 4170 } 4171 for (i = start; i < start + inodes_per_block; i++) { 4172 if (i == inode_offset) 4173 continue; 4174 if (ext4_test_bit(i, bitmap_bh->b_data)) 4175 break; 4176 } 4177 brelse(bitmap_bh); 4178 if (i == start + inodes_per_block) { 4179 /* all other inodes are free, so skip I/O */ 4180 memset(bh->b_data, 0, bh->b_size); 4181 set_buffer_uptodate(bh); 4182 unlock_buffer(bh); 4183 goto has_buffer; 4184 } 4185 } 4186 4187 make_io: 4188 /* 4189 * If we need to do any I/O, try to pre-readahead extra 4190 * blocks from the inode table. 4191 */ 4192 if (EXT4_SB(sb)->s_inode_readahead_blks) { 4193 ext4_fsblk_t b, end, table; 4194 unsigned num; 4195 4196 table = ext4_inode_table(sb, gdp); 4197 /* s_inode_readahead_blks is always a power of 2 */ 4198 b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1); 4199 if (table > b) 4200 b = table; 4201 end = b + EXT4_SB(sb)->s_inode_readahead_blks; 4202 num = EXT4_INODES_PER_GROUP(sb); 4203 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 4204 EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) 4205 num -= ext4_itable_unused_count(sb, gdp); 4206 table += num / inodes_per_block; 4207 if (end > table) 4208 end = table; 4209 while (b <= end) 4210 sb_breadahead(sb, b++); 4211 } 4212 4213 /* 4214 * There are other valid inodes in the buffer, this inode 4215 * has in-inode xattrs, or we don't have this inode in memory. 4216 * Read the block from disk. 4217 */ 4218 get_bh(bh); 4219 bh->b_end_io = end_buffer_read_sync; 4220 submit_bh(READ_META, bh); 4221 wait_on_buffer(bh); 4222 if (!buffer_uptodate(bh)) { 4223 ext4_error(sb, __func__, 4224 "unable to read inode block - inode=%lu, " 4225 "block=%llu", inode->i_ino, block); 4226 brelse(bh); 4227 return -EIO; 4228 } 4229 } 4230 has_buffer: 4231 iloc->bh = bh; 4232 return 0; 4233 } 4234 4235 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 4236 { 4237 /* We have all inode data except xattrs in memory here. */ 4238 return __ext4_get_inode_loc(inode, iloc, 4239 !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)); 4240 } 4241 4242 void ext4_set_inode_flags(struct inode *inode) 4243 { 4244 unsigned int flags = EXT4_I(inode)->i_flags; 4245 4246 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 4247 if (flags & EXT4_SYNC_FL) 4248 inode->i_flags |= S_SYNC; 4249 if (flags & EXT4_APPEND_FL) 4250 inode->i_flags |= S_APPEND; 4251 if (flags & EXT4_IMMUTABLE_FL) 4252 inode->i_flags |= S_IMMUTABLE; 4253 if (flags & EXT4_NOATIME_FL) 4254 inode->i_flags |= S_NOATIME; 4255 if (flags & EXT4_DIRSYNC_FL) 4256 inode->i_flags |= S_DIRSYNC; 4257 } 4258 4259 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ 4260 void ext4_get_inode_flags(struct ext4_inode_info *ei) 4261 { 4262 unsigned int flags = ei->vfs_inode.i_flags; 4263 4264 ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL| 4265 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL); 4266 if (flags & S_SYNC) 4267 ei->i_flags |= EXT4_SYNC_FL; 4268 if (flags & S_APPEND) 4269 ei->i_flags |= EXT4_APPEND_FL; 4270 if (flags & S_IMMUTABLE) 4271 ei->i_flags |= EXT4_IMMUTABLE_FL; 4272 if (flags & S_NOATIME) 4273 ei->i_flags |= EXT4_NOATIME_FL; 4274 if (flags & S_DIRSYNC) 4275 ei->i_flags |= EXT4_DIRSYNC_FL; 4276 } 4277 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 4278 struct ext4_inode_info *ei) 4279 { 4280 blkcnt_t i_blocks ; 4281 struct inode *inode = &(ei->vfs_inode); 4282 struct super_block *sb = inode->i_sb; 4283 4284 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 4285 EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { 4286 /* we are using combined 48 bit field */ 4287 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 4288 le32_to_cpu(raw_inode->i_blocks_lo); 4289 if (ei->i_flags & EXT4_HUGE_FILE_FL) { 4290 /* i_blocks represent file system block size */ 4291 return i_blocks << (inode->i_blkbits - 9); 4292 } else { 4293 return i_blocks; 4294 } 4295 } else { 4296 return le32_to_cpu(raw_inode->i_blocks_lo); 4297 } 4298 } 4299 4300 struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 4301 { 4302 struct ext4_iloc iloc; 4303 struct ext4_inode *raw_inode; 4304 struct ext4_inode_info *ei; 4305 struct buffer_head *bh; 4306 struct inode *inode; 4307 long ret; 4308 int block; 4309 4310 inode = iget_locked(sb, ino); 4311 if (!inode) 4312 return ERR_PTR(-ENOMEM); 4313 if (!(inode->i_state & I_NEW)) 4314 return inode; 4315 4316 ei = EXT4_I(inode); 4317 #ifdef CONFIG_EXT4_FS_POSIX_ACL 4318 ei->i_acl = EXT4_ACL_NOT_CACHED; 4319 ei->i_default_acl = EXT4_ACL_NOT_CACHED; 4320 #endif 4321 4322 ret = __ext4_get_inode_loc(inode, &iloc, 0); 4323 if (ret < 0) 4324 goto bad_inode; 4325 bh = iloc.bh; 4326 raw_inode = ext4_raw_inode(&iloc); 4327 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 4328 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 4329 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 4330 if (!(test_opt(inode->i_sb, NO_UID32))) { 4331 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 4332 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 4333 } 4334 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 4335 4336 ei->i_state = 0; 4337 ei->i_dir_start_lookup = 0; 4338 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 4339 /* We now have enough fields to check if the inode was active or not. 4340 * This is needed because nfsd might try to access dead inodes 4341 * the test is that same one that e2fsck uses 4342 * NeilBrown 1999oct15 4343 */ 4344 if (inode->i_nlink == 0) { 4345 if (inode->i_mode == 0 || 4346 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { 4347 /* this inode is deleted */ 4348 brelse(bh); 4349 ret = -ESTALE; 4350 goto bad_inode; 4351 } 4352 /* The only unlinked inodes we let through here have 4353 * valid i_mode and are being read by the orphan 4354 * recovery code: that's fine, we're about to complete 4355 * the process of deleting those. */ 4356 } 4357 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 4358 inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 4359 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 4360 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) 4361 ei->i_file_acl |= 4362 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 4363 inode->i_size = ext4_isize(raw_inode); 4364 ei->i_disksize = inode->i_size; 4365 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 4366 ei->i_block_group = iloc.block_group; 4367 ei->i_last_alloc_group = ~0; 4368 /* 4369 * NOTE! The in-memory inode i_data array is in little-endian order 4370 * even on big-endian machines: we do NOT byteswap the block numbers! 4371 */ 4372 for (block = 0; block < EXT4_N_BLOCKS; block++) 4373 ei->i_data[block] = raw_inode->i_block[block]; 4374 INIT_LIST_HEAD(&ei->i_orphan); 4375 4376 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4377 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 4378 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 4379 EXT4_INODE_SIZE(inode->i_sb)) { 4380 brelse(bh); 4381 ret = -EIO; 4382 goto bad_inode; 4383 } 4384 if (ei->i_extra_isize == 0) { 4385 /* The extra space is currently unused. Use it. */ 4386 ei->i_extra_isize = sizeof(struct ext4_inode) - 4387 EXT4_GOOD_OLD_INODE_SIZE; 4388 } else { 4389 __le32 *magic = (void *)raw_inode + 4390 EXT4_GOOD_OLD_INODE_SIZE + 4391 ei->i_extra_isize; 4392 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) 4393 ei->i_state |= EXT4_STATE_XATTR; 4394 } 4395 } else 4396 ei->i_extra_isize = 0; 4397 4398 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 4399 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 4400 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 4401 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 4402 4403 inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 4404 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4405 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 4406 inode->i_version |= 4407 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 4408 } 4409 4410 ret = 0; 4411 if (ei->i_file_acl && 4412 ((ei->i_file_acl < 4413 (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) + 4414 EXT4_SB(sb)->s_gdb_count)) || 4415 (ei->i_file_acl >= ext4_blocks_count(EXT4_SB(sb)->s_es)))) { 4416 ext4_error(sb, __func__, 4417 "bad extended attribute block %llu in inode #%lu", 4418 ei->i_file_acl, inode->i_ino); 4419 ret = -EIO; 4420 goto bad_inode; 4421 } else if (ei->i_flags & EXT4_EXTENTS_FL) { 4422 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4423 (S_ISLNK(inode->i_mode) && 4424 !ext4_inode_is_fast_symlink(inode))) 4425 /* Validate extent which is part of inode */ 4426 ret = ext4_ext_check_inode(inode); 4427 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4428 (S_ISLNK(inode->i_mode) && 4429 !ext4_inode_is_fast_symlink(inode))) { 4430 /* Validate block references which are part of inode */ 4431 ret = ext4_check_inode_blockref(inode); 4432 } 4433 if (ret) { 4434 brelse(bh); 4435 goto bad_inode; 4436 } 4437 4438 if (S_ISREG(inode->i_mode)) { 4439 inode->i_op = &ext4_file_inode_operations; 4440 inode->i_fop = &ext4_file_operations; 4441 ext4_set_aops(inode); 4442 } else if (S_ISDIR(inode->i_mode)) { 4443 inode->i_op = &ext4_dir_inode_operations; 4444 inode->i_fop = &ext4_dir_operations; 4445 } else if (S_ISLNK(inode->i_mode)) { 4446 if (ext4_inode_is_fast_symlink(inode)) { 4447 inode->i_op = &ext4_fast_symlink_inode_operations; 4448 nd_terminate_link(ei->i_data, inode->i_size, 4449 sizeof(ei->i_data) - 1); 4450 } else { 4451 inode->i_op = &ext4_symlink_inode_operations; 4452 ext4_set_aops(inode); 4453 } 4454 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 4455 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 4456 inode->i_op = &ext4_special_inode_operations; 4457 if (raw_inode->i_block[0]) 4458 init_special_inode(inode, inode->i_mode, 4459 old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 4460 else 4461 init_special_inode(inode, inode->i_mode, 4462 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 4463 } else { 4464 brelse(bh); 4465 ret = -EIO; 4466 ext4_error(inode->i_sb, __func__, 4467 "bogus i_mode (%o) for inode=%lu", 4468 inode->i_mode, inode->i_ino); 4469 goto bad_inode; 4470 } 4471 brelse(iloc.bh); 4472 ext4_set_inode_flags(inode); 4473 unlock_new_inode(inode); 4474 return inode; 4475 4476 bad_inode: 4477 iget_failed(inode); 4478 return ERR_PTR(ret); 4479 } 4480 4481 static int ext4_inode_blocks_set(handle_t *handle, 4482 struct ext4_inode *raw_inode, 4483 struct ext4_inode_info *ei) 4484 { 4485 struct inode *inode = &(ei->vfs_inode); 4486 u64 i_blocks = inode->i_blocks; 4487 struct super_block *sb = inode->i_sb; 4488 4489 if (i_blocks <= ~0U) { 4490 /* 4491 * i_blocks can be represnted in a 32 bit variable 4492 * as multiple of 512 bytes 4493 */ 4494 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4495 raw_inode->i_blocks_high = 0; 4496 ei->i_flags &= ~EXT4_HUGE_FILE_FL; 4497 return 0; 4498 } 4499 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) 4500 return -EFBIG; 4501 4502 if (i_blocks <= 0xffffffffffffULL) { 4503 /* 4504 * i_blocks can be represented in a 48 bit variable 4505 * as multiple of 512 bytes 4506 */ 4507 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4508 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4509 ei->i_flags &= ~EXT4_HUGE_FILE_FL; 4510 } else { 4511 ei->i_flags |= EXT4_HUGE_FILE_FL; 4512 /* i_block is stored in file system block size */ 4513 i_blocks = i_blocks >> (inode->i_blkbits - 9); 4514 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4515 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4516 } 4517 return 0; 4518 } 4519 4520 /* 4521 * Post the struct inode info into an on-disk inode location in the 4522 * buffer-cache. This gobbles the caller's reference to the 4523 * buffer_head in the inode location struct. 4524 * 4525 * The caller must have write access to iloc->bh. 4526 */ 4527 static int ext4_do_update_inode(handle_t *handle, 4528 struct inode *inode, 4529 struct ext4_iloc *iloc) 4530 { 4531 struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 4532 struct ext4_inode_info *ei = EXT4_I(inode); 4533 struct buffer_head *bh = iloc->bh; 4534 int err = 0, rc, block; 4535 4536 /* For fields not not tracking in the in-memory inode, 4537 * initialise them to zero for new inodes. */ 4538 if (ei->i_state & EXT4_STATE_NEW) 4539 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 4540 4541 ext4_get_inode_flags(ei); 4542 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 4543 if (!(test_opt(inode->i_sb, NO_UID32))) { 4544 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); 4545 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); 4546 /* 4547 * Fix up interoperability with old kernels. Otherwise, old inodes get 4548 * re-used with the upper 16 bits of the uid/gid intact 4549 */ 4550 if (!ei->i_dtime) { 4551 raw_inode->i_uid_high = 4552 cpu_to_le16(high_16_bits(inode->i_uid)); 4553 raw_inode->i_gid_high = 4554 cpu_to_le16(high_16_bits(inode->i_gid)); 4555 } else { 4556 raw_inode->i_uid_high = 0; 4557 raw_inode->i_gid_high = 0; 4558 } 4559 } else { 4560 raw_inode->i_uid_low = 4561 cpu_to_le16(fs_high2lowuid(inode->i_uid)); 4562 raw_inode->i_gid_low = 4563 cpu_to_le16(fs_high2lowgid(inode->i_gid)); 4564 raw_inode->i_uid_high = 0; 4565 raw_inode->i_gid_high = 0; 4566 } 4567 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 4568 4569 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 4570 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 4571 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 4572 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 4573 4574 if (ext4_inode_blocks_set(handle, raw_inode, ei)) 4575 goto out_brelse; 4576 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 4577 /* clear the migrate flag in the raw_inode */ 4578 raw_inode->i_flags = cpu_to_le32(ei->i_flags & ~EXT4_EXT_MIGRATE); 4579 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 4580 cpu_to_le32(EXT4_OS_HURD)) 4581 raw_inode->i_file_acl_high = 4582 cpu_to_le16(ei->i_file_acl >> 32); 4583 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 4584 ext4_isize_set(raw_inode, ei->i_disksize); 4585 if (ei->i_disksize > 0x7fffffffULL) { 4586 struct super_block *sb = inode->i_sb; 4587 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 4588 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || 4589 EXT4_SB(sb)->s_es->s_rev_level == 4590 cpu_to_le32(EXT4_GOOD_OLD_REV)) { 4591 /* If this is the first large file 4592 * created, add a flag to the superblock. 4593 */ 4594 err = ext4_journal_get_write_access(handle, 4595 EXT4_SB(sb)->s_sbh); 4596 if (err) 4597 goto out_brelse; 4598 ext4_update_dynamic_rev(sb); 4599 EXT4_SET_RO_COMPAT_FEATURE(sb, 4600 EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 4601 sb->s_dirt = 1; 4602 ext4_handle_sync(handle); 4603 err = ext4_handle_dirty_metadata(handle, inode, 4604 EXT4_SB(sb)->s_sbh); 4605 } 4606 } 4607 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 4608 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 4609 if (old_valid_dev(inode->i_rdev)) { 4610 raw_inode->i_block[0] = 4611 cpu_to_le32(old_encode_dev(inode->i_rdev)); 4612 raw_inode->i_block[1] = 0; 4613 } else { 4614 raw_inode->i_block[0] = 0; 4615 raw_inode->i_block[1] = 4616 cpu_to_le32(new_encode_dev(inode->i_rdev)); 4617 raw_inode->i_block[2] = 0; 4618 } 4619 } else for (block = 0; block < EXT4_N_BLOCKS; block++) 4620 raw_inode->i_block[block] = ei->i_data[block]; 4621 4622 raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 4623 if (ei->i_extra_isize) { 4624 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 4625 raw_inode->i_version_hi = 4626 cpu_to_le32(inode->i_version >> 32); 4627 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 4628 } 4629 4630 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 4631 rc = ext4_handle_dirty_metadata(handle, inode, bh); 4632 if (!err) 4633 err = rc; 4634 ei->i_state &= ~EXT4_STATE_NEW; 4635 4636 out_brelse: 4637 brelse(bh); 4638 ext4_std_error(inode->i_sb, err); 4639 return err; 4640 } 4641 4642 /* 4643 * ext4_write_inode() 4644 * 4645 * We are called from a few places: 4646 * 4647 * - Within generic_file_write() for O_SYNC files. 4648 * Here, there will be no transaction running. We wait for any running 4649 * trasnaction to commit. 4650 * 4651 * - Within sys_sync(), kupdate and such. 4652 * We wait on commit, if tol to. 4653 * 4654 * - Within prune_icache() (PF_MEMALLOC == true) 4655 * Here we simply return. We can't afford to block kswapd on the 4656 * journal commit. 4657 * 4658 * In all cases it is actually safe for us to return without doing anything, 4659 * because the inode has been copied into a raw inode buffer in 4660 * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 4661 * knfsd. 4662 * 4663 * Note that we are absolutely dependent upon all inode dirtiers doing the 4664 * right thing: they *must* call mark_inode_dirty() after dirtying info in 4665 * which we are interested. 4666 * 4667 * It would be a bug for them to not do this. The code: 4668 * 4669 * mark_inode_dirty(inode) 4670 * stuff(); 4671 * inode->i_size = expr; 4672 * 4673 * is in error because a kswapd-driven write_inode() could occur while 4674 * `stuff()' is running, and the new i_size will be lost. Plus the inode 4675 * will no longer be on the superblock's dirty inode list. 4676 */ 4677 int ext4_write_inode(struct inode *inode, int wait) 4678 { 4679 if (current->flags & PF_MEMALLOC) 4680 return 0; 4681 4682 if (ext4_journal_current_handle()) { 4683 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 4684 dump_stack(); 4685 return -EIO; 4686 } 4687 4688 if (!wait) 4689 return 0; 4690 4691 return ext4_force_commit(inode->i_sb); 4692 } 4693 4694 int __ext4_write_dirty_metadata(struct inode *inode, struct buffer_head *bh) 4695 { 4696 int err = 0; 4697 4698 mark_buffer_dirty(bh); 4699 if (inode && inode_needs_sync(inode)) { 4700 sync_dirty_buffer(bh); 4701 if (buffer_req(bh) && !buffer_uptodate(bh)) { 4702 ext4_error(inode->i_sb, __func__, 4703 "IO error syncing inode, " 4704 "inode=%lu, block=%llu", 4705 inode->i_ino, 4706 (unsigned long long)bh->b_blocknr); 4707 err = -EIO; 4708 } 4709 } 4710 return err; 4711 } 4712 4713 /* 4714 * ext4_setattr() 4715 * 4716 * Called from notify_change. 4717 * 4718 * We want to trap VFS attempts to truncate the file as soon as 4719 * possible. In particular, we want to make sure that when the VFS 4720 * shrinks i_size, we put the inode on the orphan list and modify 4721 * i_disksize immediately, so that during the subsequent flushing of 4722 * dirty pages and freeing of disk blocks, we can guarantee that any 4723 * commit will leave the blocks being flushed in an unused state on 4724 * disk. (On recovery, the inode will get truncated and the blocks will 4725 * be freed, so we have a strong guarantee that no future commit will 4726 * leave these blocks visible to the user.) 4727 * 4728 * Another thing we have to assure is that if we are in ordered mode 4729 * and inode is still attached to the committing transaction, we must 4730 * we start writeout of all the dirty pages which are being truncated. 4731 * This way we are sure that all the data written in the previous 4732 * transaction are already on disk (truncate waits for pages under 4733 * writeback). 4734 * 4735 * Called with inode->i_mutex down. 4736 */ 4737 int ext4_setattr(struct dentry *dentry, struct iattr *attr) 4738 { 4739 struct inode *inode = dentry->d_inode; 4740 int error, rc = 0; 4741 const unsigned int ia_valid = attr->ia_valid; 4742 4743 error = inode_change_ok(inode, attr); 4744 if (error) 4745 return error; 4746 4747 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 4748 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 4749 handle_t *handle; 4750 4751 /* (user+group)*(old+new) structure, inode write (sb, 4752 * inode block, ? - but truncate inode update has it) */ 4753 handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+ 4754 EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3); 4755 if (IS_ERR(handle)) { 4756 error = PTR_ERR(handle); 4757 goto err_out; 4758 } 4759 error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; 4760 if (error) { 4761 ext4_journal_stop(handle); 4762 return error; 4763 } 4764 /* Update corresponding info in inode so that everything is in 4765 * one transaction */ 4766 if (attr->ia_valid & ATTR_UID) 4767 inode->i_uid = attr->ia_uid; 4768 if (attr->ia_valid & ATTR_GID) 4769 inode->i_gid = attr->ia_gid; 4770 error = ext4_mark_inode_dirty(handle, inode); 4771 ext4_journal_stop(handle); 4772 } 4773 4774 if (attr->ia_valid & ATTR_SIZE) { 4775 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) { 4776 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4777 4778 if (attr->ia_size > sbi->s_bitmap_maxbytes) { 4779 error = -EFBIG; 4780 goto err_out; 4781 } 4782 } 4783 } 4784 4785 if (S_ISREG(inode->i_mode) && 4786 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) { 4787 handle_t *handle; 4788 4789 handle = ext4_journal_start(inode, 3); 4790 if (IS_ERR(handle)) { 4791 error = PTR_ERR(handle); 4792 goto err_out; 4793 } 4794 4795 error = ext4_orphan_add(handle, inode); 4796 EXT4_I(inode)->i_disksize = attr->ia_size; 4797 rc = ext4_mark_inode_dirty(handle, inode); 4798 if (!error) 4799 error = rc; 4800 ext4_journal_stop(handle); 4801 4802 if (ext4_should_order_data(inode)) { 4803 error = ext4_begin_ordered_truncate(inode, 4804 attr->ia_size); 4805 if (error) { 4806 /* Do as much error cleanup as possible */ 4807 handle = ext4_journal_start(inode, 3); 4808 if (IS_ERR(handle)) { 4809 ext4_orphan_del(NULL, inode); 4810 goto err_out; 4811 } 4812 ext4_orphan_del(handle, inode); 4813 ext4_journal_stop(handle); 4814 goto err_out; 4815 } 4816 } 4817 } 4818 4819 rc = inode_setattr(inode, attr); 4820 4821 /* If inode_setattr's call to ext4_truncate failed to get a 4822 * transaction handle at all, we need to clean up the in-core 4823 * orphan list manually. */ 4824 if (inode->i_nlink) 4825 ext4_orphan_del(NULL, inode); 4826 4827 if (!rc && (ia_valid & ATTR_MODE)) 4828 rc = ext4_acl_chmod(inode); 4829 4830 err_out: 4831 ext4_std_error(inode->i_sb, error); 4832 if (!error) 4833 error = rc; 4834 return error; 4835 } 4836 4837 int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 4838 struct kstat *stat) 4839 { 4840 struct inode *inode; 4841 unsigned long delalloc_blocks; 4842 4843 inode = dentry->d_inode; 4844 generic_fillattr(inode, stat); 4845 4846 /* 4847 * We can't update i_blocks if the block allocation is delayed 4848 * otherwise in the case of system crash before the real block 4849 * allocation is done, we will have i_blocks inconsistent with 4850 * on-disk file blocks. 4851 * We always keep i_blocks updated together with real 4852 * allocation. But to not confuse with user, stat 4853 * will return the blocks that include the delayed allocation 4854 * blocks for this file. 4855 */ 4856 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 4857 delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; 4858 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 4859 4860 stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; 4861 return 0; 4862 } 4863 4864 static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks, 4865 int chunk) 4866 { 4867 int indirects; 4868 4869 /* if nrblocks are contiguous */ 4870 if (chunk) { 4871 /* 4872 * With N contiguous data blocks, it need at most 4873 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks 4874 * 2 dindirect blocks 4875 * 1 tindirect block 4876 */ 4877 indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb); 4878 return indirects + 3; 4879 } 4880 /* 4881 * if nrblocks are not contiguous, worse case, each block touch 4882 * a indirect block, and each indirect block touch a double indirect 4883 * block, plus a triple indirect block 4884 */ 4885 indirects = nrblocks * 2 + 1; 4886 return indirects; 4887 } 4888 4889 static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4890 { 4891 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) 4892 return ext4_indirect_trans_blocks(inode, nrblocks, chunk); 4893 return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); 4894 } 4895 4896 /* 4897 * Account for index blocks, block groups bitmaps and block group 4898 * descriptor blocks if modify datablocks and index blocks 4899 * worse case, the indexs blocks spread over different block groups 4900 * 4901 * If datablocks are discontiguous, they are possible to spread over 4902 * different block groups too. If they are contiugous, with flexbg, 4903 * they could still across block group boundary. 4904 * 4905 * Also account for superblock, inode, quota and xattr blocks 4906 */ 4907 int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4908 { 4909 int groups, gdpblocks; 4910 int idxblocks; 4911 int ret = 0; 4912 4913 /* 4914 * How many index blocks need to touch to modify nrblocks? 4915 * The "Chunk" flag indicating whether the nrblocks is 4916 * physically contiguous on disk 4917 * 4918 * For Direct IO and fallocate, they calls get_block to allocate 4919 * one single extent at a time, so they could set the "Chunk" flag 4920 */ 4921 idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); 4922 4923 ret = idxblocks; 4924 4925 /* 4926 * Now let's see how many group bitmaps and group descriptors need 4927 * to account 4928 */ 4929 groups = idxblocks; 4930 if (chunk) 4931 groups += 1; 4932 else 4933 groups += nrblocks; 4934 4935 gdpblocks = groups; 4936 if (groups > EXT4_SB(inode->i_sb)->s_groups_count) 4937 groups = EXT4_SB(inode->i_sb)->s_groups_count; 4938 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 4939 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 4940 4941 /* bitmaps and block group descriptor blocks */ 4942 ret += groups + gdpblocks; 4943 4944 /* Blocks for super block, inode, quota and xattr blocks */ 4945 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 4946 4947 return ret; 4948 } 4949 4950 /* 4951 * Calulate the total number of credits to reserve to fit 4952 * the modification of a single pages into a single transaction, 4953 * which may include multiple chunks of block allocations. 4954 * 4955 * This could be called via ext4_write_begin() 4956 * 4957 * We need to consider the worse case, when 4958 * one new block per extent. 4959 */ 4960 int ext4_writepage_trans_blocks(struct inode *inode) 4961 { 4962 int bpp = ext4_journal_blocks_per_page(inode); 4963 int ret; 4964 4965 ret = ext4_meta_trans_blocks(inode, bpp, 0); 4966 4967 /* Account for data blocks for journalled mode */ 4968 if (ext4_should_journal_data(inode)) 4969 ret += bpp; 4970 return ret; 4971 } 4972 4973 /* 4974 * Calculate the journal credits for a chunk of data modification. 4975 * 4976 * This is called from DIO, fallocate or whoever calling 4977 * ext4_get_blocks_wrap() to map/allocate a chunk of contigous disk blocks. 4978 * 4979 * journal buffers for data blocks are not included here, as DIO 4980 * and fallocate do no need to journal data buffers. 4981 */ 4982 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 4983 { 4984 return ext4_meta_trans_blocks(inode, nrblocks, 1); 4985 } 4986 4987 /* 4988 * The caller must have previously called ext4_reserve_inode_write(). 4989 * Give this, we know that the caller already has write access to iloc->bh. 4990 */ 4991 int ext4_mark_iloc_dirty(handle_t *handle, 4992 struct inode *inode, struct ext4_iloc *iloc) 4993 { 4994 int err = 0; 4995 4996 if (test_opt(inode->i_sb, I_VERSION)) 4997 inode_inc_iversion(inode); 4998 4999 /* the do_update_inode consumes one bh->b_count */ 5000 get_bh(iloc->bh); 5001 5002 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 5003 err = ext4_do_update_inode(handle, inode, iloc); 5004 put_bh(iloc->bh); 5005 return err; 5006 } 5007 5008 /* 5009 * On success, We end up with an outstanding reference count against 5010 * iloc->bh. This _must_ be cleaned up later. 5011 */ 5012 5013 int 5014 ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 5015 struct ext4_iloc *iloc) 5016 { 5017 int err; 5018 5019 err = ext4_get_inode_loc(inode, iloc); 5020 if (!err) { 5021 BUFFER_TRACE(iloc->bh, "get_write_access"); 5022 err = ext4_journal_get_write_access(handle, iloc->bh); 5023 if (err) { 5024 brelse(iloc->bh); 5025 iloc->bh = NULL; 5026 } 5027 } 5028 ext4_std_error(inode->i_sb, err); 5029 return err; 5030 } 5031 5032 /* 5033 * Expand an inode by new_extra_isize bytes. 5034 * Returns 0 on success or negative error number on failure. 5035 */ 5036 static int ext4_expand_extra_isize(struct inode *inode, 5037 unsigned int new_extra_isize, 5038 struct ext4_iloc iloc, 5039 handle_t *handle) 5040 { 5041 struct ext4_inode *raw_inode; 5042 struct ext4_xattr_ibody_header *header; 5043 struct ext4_xattr_entry *entry; 5044 5045 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 5046 return 0; 5047 5048 raw_inode = ext4_raw_inode(&iloc); 5049 5050 header = IHDR(inode, raw_inode); 5051 entry = IFIRST(header); 5052 5053 /* No extended attributes present */ 5054 if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) || 5055 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 5056 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 5057 new_extra_isize); 5058 EXT4_I(inode)->i_extra_isize = new_extra_isize; 5059 return 0; 5060 } 5061 5062 /* try to expand with EAs present */ 5063 return ext4_expand_extra_isize_ea(inode, new_extra_isize, 5064 raw_inode, handle); 5065 } 5066 5067 /* 5068 * What we do here is to mark the in-core inode as clean with respect to inode 5069 * dirtiness (it may still be data-dirty). 5070 * This means that the in-core inode may be reaped by prune_icache 5071 * without having to perform any I/O. This is a very good thing, 5072 * because *any* task may call prune_icache - even ones which 5073 * have a transaction open against a different journal. 5074 * 5075 * Is this cheating? Not really. Sure, we haven't written the 5076 * inode out, but prune_icache isn't a user-visible syncing function. 5077 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 5078 * we start and wait on commits. 5079 * 5080 * Is this efficient/effective? Well, we're being nice to the system 5081 * by cleaning up our inodes proactively so they can be reaped 5082 * without I/O. But we are potentially leaving up to five seconds' 5083 * worth of inodes floating about which prune_icache wants us to 5084 * write out. One way to fix that would be to get prune_icache() 5085 * to do a write_super() to free up some memory. It has the desired 5086 * effect. 5087 */ 5088 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 5089 { 5090 struct ext4_iloc iloc; 5091 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5092 static unsigned int mnt_count; 5093 int err, ret; 5094 5095 might_sleep(); 5096 err = ext4_reserve_inode_write(handle, inode, &iloc); 5097 if (ext4_handle_valid(handle) && 5098 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 5099 !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) { 5100 /* 5101 * We need extra buffer credits since we may write into EA block 5102 * with this same handle. If journal_extend fails, then it will 5103 * only result in a minor loss of functionality for that inode. 5104 * If this is felt to be critical, then e2fsck should be run to 5105 * force a large enough s_min_extra_isize. 5106 */ 5107 if ((jbd2_journal_extend(handle, 5108 EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { 5109 ret = ext4_expand_extra_isize(inode, 5110 sbi->s_want_extra_isize, 5111 iloc, handle); 5112 if (ret) { 5113 EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND; 5114 if (mnt_count != 5115 le16_to_cpu(sbi->s_es->s_mnt_count)) { 5116 ext4_warning(inode->i_sb, __func__, 5117 "Unable to expand inode %lu. Delete" 5118 " some EAs or run e2fsck.", 5119 inode->i_ino); 5120 mnt_count = 5121 le16_to_cpu(sbi->s_es->s_mnt_count); 5122 } 5123 } 5124 } 5125 } 5126 if (!err) 5127 err = ext4_mark_iloc_dirty(handle, inode, &iloc); 5128 return err; 5129 } 5130 5131 /* 5132 * ext4_dirty_inode() is called from __mark_inode_dirty() 5133 * 5134 * We're really interested in the case where a file is being extended. 5135 * i_size has been changed by generic_commit_write() and we thus need 5136 * to include the updated inode in the current transaction. 5137 * 5138 * Also, vfs_dq_alloc_block() will always dirty the inode when blocks 5139 * are allocated to the file. 5140 * 5141 * If the inode is marked synchronous, we don't honour that here - doing 5142 * so would cause a commit on atime updates, which we don't bother doing. 5143 * We handle synchronous inodes at the highest possible level. 5144 */ 5145 void ext4_dirty_inode(struct inode *inode) 5146 { 5147 handle_t *current_handle = ext4_journal_current_handle(); 5148 handle_t *handle; 5149 5150 if (!ext4_handle_valid(current_handle)) { 5151 ext4_mark_inode_dirty(current_handle, inode); 5152 return; 5153 } 5154 5155 handle = ext4_journal_start(inode, 2); 5156 if (IS_ERR(handle)) 5157 goto out; 5158 if (current_handle && 5159 current_handle->h_transaction != handle->h_transaction) { 5160 /* This task has a transaction open against a different fs */ 5161 printk(KERN_EMERG "%s: transactions do not match!\n", 5162 __func__); 5163 } else { 5164 jbd_debug(5, "marking dirty. outer handle=%p\n", 5165 current_handle); 5166 ext4_mark_inode_dirty(handle, inode); 5167 } 5168 ext4_journal_stop(handle); 5169 out: 5170 return; 5171 } 5172 5173 #if 0 5174 /* 5175 * Bind an inode's backing buffer_head into this transaction, to prevent 5176 * it from being flushed to disk early. Unlike 5177 * ext4_reserve_inode_write, this leaves behind no bh reference and 5178 * returns no iloc structure, so the caller needs to repeat the iloc 5179 * lookup to mark the inode dirty later. 5180 */ 5181 static int ext4_pin_inode(handle_t *handle, struct inode *inode) 5182 { 5183 struct ext4_iloc iloc; 5184 5185 int err = 0; 5186 if (handle) { 5187 err = ext4_get_inode_loc(inode, &iloc); 5188 if (!err) { 5189 BUFFER_TRACE(iloc.bh, "get_write_access"); 5190 err = jbd2_journal_get_write_access(handle, iloc.bh); 5191 if (!err) 5192 err = ext4_handle_dirty_metadata(handle, 5193 inode, 5194 iloc.bh); 5195 brelse(iloc.bh); 5196 } 5197 } 5198 ext4_std_error(inode->i_sb, err); 5199 return err; 5200 } 5201 #endif 5202 5203 int ext4_change_inode_journal_flag(struct inode *inode, int val) 5204 { 5205 journal_t *journal; 5206 handle_t *handle; 5207 int err; 5208 5209 /* 5210 * We have to be very careful here: changing a data block's 5211 * journaling status dynamically is dangerous. If we write a 5212 * data block to the journal, change the status and then delete 5213 * that block, we risk forgetting to revoke the old log record 5214 * from the journal and so a subsequent replay can corrupt data. 5215 * So, first we make sure that the journal is empty and that 5216 * nobody is changing anything. 5217 */ 5218 5219 journal = EXT4_JOURNAL(inode); 5220 if (!journal) 5221 return 0; 5222 if (is_journal_aborted(journal)) 5223 return -EROFS; 5224 5225 jbd2_journal_lock_updates(journal); 5226 jbd2_journal_flush(journal); 5227 5228 /* 5229 * OK, there are no updates running now, and all cached data is 5230 * synced to disk. We are now in a completely consistent state 5231 * which doesn't have anything in the journal, and we know that 5232 * no filesystem updates are running, so it is safe to modify 5233 * the inode's in-core data-journaling state flag now. 5234 */ 5235 5236 if (val) 5237 EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL; 5238 else 5239 EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL; 5240 ext4_set_aops(inode); 5241 5242 jbd2_journal_unlock_updates(journal); 5243 5244 /* Finally we can mark the inode as dirty. */ 5245 5246 handle = ext4_journal_start(inode, 1); 5247 if (IS_ERR(handle)) 5248 return PTR_ERR(handle); 5249 5250 err = ext4_mark_inode_dirty(handle, inode); 5251 ext4_handle_sync(handle); 5252 ext4_journal_stop(handle); 5253 ext4_std_error(inode->i_sb, err); 5254 5255 return err; 5256 } 5257 5258 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 5259 { 5260 return !buffer_mapped(bh); 5261 } 5262 5263 int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 5264 { 5265 struct page *page = vmf->page; 5266 loff_t size; 5267 unsigned long len; 5268 int ret = -EINVAL; 5269 void *fsdata; 5270 struct file *file = vma->vm_file; 5271 struct inode *inode = file->f_path.dentry->d_inode; 5272 struct address_space *mapping = inode->i_mapping; 5273 5274 /* 5275 * Get i_alloc_sem to stop truncates messing with the inode. We cannot 5276 * get i_mutex because we are already holding mmap_sem. 5277 */ 5278 down_read(&inode->i_alloc_sem); 5279 size = i_size_read(inode); 5280 if (page->mapping != mapping || size <= page_offset(page) 5281 || !PageUptodate(page)) { 5282 /* page got truncated from under us? */ 5283 goto out_unlock; 5284 } 5285 ret = 0; 5286 if (PageMappedToDisk(page)) 5287 goto out_unlock; 5288 5289 if (page->index == size >> PAGE_CACHE_SHIFT) 5290 len = size & ~PAGE_CACHE_MASK; 5291 else 5292 len = PAGE_CACHE_SIZE; 5293 5294 if (page_has_buffers(page)) { 5295 /* return if we have all the buffers mapped */ 5296 if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 5297 ext4_bh_unmapped)) 5298 goto out_unlock; 5299 } 5300 /* 5301 * OK, we need to fill the hole... Do write_begin write_end 5302 * to do block allocation/reservation.We are not holding 5303 * inode.i__mutex here. That allow * parallel write_begin, 5304 * write_end call. lock_page prevent this from happening 5305 * on the same page though 5306 */ 5307 ret = mapping->a_ops->write_begin(file, mapping, page_offset(page), 5308 len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); 5309 if (ret < 0) 5310 goto out_unlock; 5311 ret = mapping->a_ops->write_end(file, mapping, page_offset(page), 5312 len, len, page, fsdata); 5313 if (ret < 0) 5314 goto out_unlock; 5315 ret = 0; 5316 out_unlock: 5317 if (ret) 5318 ret = VM_FAULT_SIGBUS; 5319 up_read(&inode->i_alloc_sem); 5320 return ret; 5321 } 5322