1 /* 2 * linux/fs/ext4/inode.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/inode.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * Goal-directed block allocation by Stephen Tweedie 16 * (sct@redhat.com), 1993, 1998 17 * Big-endian to little-endian byte-swapping/bitmaps by 18 * David S. Miller (davem@caip.rutgers.edu), 1995 19 * 64-bit file support on 64-bit platforms by Jakub Jelinek 20 * (jj@sunsite.ms.mff.cuni.cz) 21 * 22 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 23 */ 24 25 #include <linux/module.h> 26 #include <linux/fs.h> 27 #include <linux/time.h> 28 #include <linux/jbd2.h> 29 #include <linux/highuid.h> 30 #include <linux/pagemap.h> 31 #include <linux/quotaops.h> 32 #include <linux/string.h> 33 #include <linux/buffer_head.h> 34 #include <linux/writeback.h> 35 #include <linux/pagevec.h> 36 #include <linux/mpage.h> 37 #include <linux/namei.h> 38 #include <linux/uio.h> 39 #include <linux/bio.h> 40 #include <linux/workqueue.h> 41 #include <linux/kernel.h> 42 #include <linux/printk.h> 43 #include <linux/slab.h> 44 #include <linux/ratelimit.h> 45 46 #include "ext4_jbd2.h" 47 #include "xattr.h" 48 #include "acl.h" 49 #include "ext4_extents.h" 50 51 #include <trace/events/ext4.h> 52 53 #define MPAGE_DA_EXTENT_TAIL 0x01 54 55 static inline int ext4_begin_ordered_truncate(struct inode *inode, 56 loff_t new_size) 57 { 58 trace_ext4_begin_ordered_truncate(inode, new_size); 59 /* 60 * If jinode is zero, then we never opened the file for 61 * writing, so there's no need to call 62 * jbd2_journal_begin_ordered_truncate() since there's no 63 * outstanding writes we need to flush. 64 */ 65 if (!EXT4_I(inode)->jinode) 66 return 0; 67 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), 68 EXT4_I(inode)->jinode, 69 new_size); 70 } 71 72 static void ext4_invalidatepage(struct page *page, unsigned long offset); 73 static int noalloc_get_block_write(struct inode *inode, sector_t iblock, 74 struct buffer_head *bh_result, int create); 75 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode); 76 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate); 77 static int __ext4_journalled_writepage(struct page *page, unsigned int len); 78 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); 79 80 /* 81 * Test whether an inode is a fast symlink. 82 */ 83 static int ext4_inode_is_fast_symlink(struct inode *inode) 84 { 85 int ea_blocks = EXT4_I(inode)->i_file_acl ? 86 (inode->i_sb->s_blocksize >> 9) : 0; 87 88 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 89 } 90 91 /* 92 * Work out how many blocks we need to proceed with the next chunk of a 93 * truncate transaction. 94 */ 95 static unsigned long blocks_for_truncate(struct inode *inode) 96 { 97 ext4_lblk_t needed; 98 99 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); 100 101 /* Give ourselves just enough room to cope with inodes in which 102 * i_blocks is corrupt: we've seen disk corruptions in the past 103 * which resulted in random data in an inode which looked enough 104 * like a regular file for ext4 to try to delete it. Things 105 * will go a bit crazy if that happens, but at least we should 106 * try not to panic the whole kernel. */ 107 if (needed < 2) 108 needed = 2; 109 110 /* But we need to bound the transaction so we don't overflow the 111 * journal. */ 112 if (needed > EXT4_MAX_TRANS_DATA) 113 needed = EXT4_MAX_TRANS_DATA; 114 115 return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed; 116 } 117 118 /* 119 * Truncate transactions can be complex and absolutely huge. So we need to 120 * be able to restart the transaction at a conventient checkpoint to make 121 * sure we don't overflow the journal. 122 * 123 * start_transaction gets us a new handle for a truncate transaction, 124 * and extend_transaction tries to extend the existing one a bit. If 125 * extend fails, we need to propagate the failure up and restart the 126 * transaction in the top-level truncate loop. --sct 127 */ 128 static handle_t *start_transaction(struct inode *inode) 129 { 130 handle_t *result; 131 132 result = ext4_journal_start(inode, blocks_for_truncate(inode)); 133 if (!IS_ERR(result)) 134 return result; 135 136 ext4_std_error(inode->i_sb, PTR_ERR(result)); 137 return result; 138 } 139 140 /* 141 * Try to extend this transaction for the purposes of truncation. 142 * 143 * Returns 0 if we managed to create more room. If we can't create more 144 * room, and the transaction must be restarted we return 1. 145 */ 146 static int try_to_extend_transaction(handle_t *handle, struct inode *inode) 147 { 148 if (!ext4_handle_valid(handle)) 149 return 0; 150 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) 151 return 0; 152 if (!ext4_journal_extend(handle, blocks_for_truncate(inode))) 153 return 0; 154 return 1; 155 } 156 157 /* 158 * Restart the transaction associated with *handle. This does a commit, 159 * so before we call here everything must be consistently dirtied against 160 * this transaction. 161 */ 162 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, 163 int nblocks) 164 { 165 int ret; 166 167 /* 168 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 169 * moment, get_block can be called only for blocks inside i_size since 170 * page cache has been already dropped and writes are blocked by 171 * i_mutex. So we can safely drop the i_data_sem here. 172 */ 173 BUG_ON(EXT4_JOURNAL(inode) == NULL); 174 jbd_debug(2, "restarting handle %p\n", handle); 175 up_write(&EXT4_I(inode)->i_data_sem); 176 ret = ext4_journal_restart(handle, nblocks); 177 down_write(&EXT4_I(inode)->i_data_sem); 178 ext4_discard_preallocations(inode); 179 180 return ret; 181 } 182 183 /* 184 * Called at the last iput() if i_nlink is zero. 185 */ 186 void ext4_evict_inode(struct inode *inode) 187 { 188 handle_t *handle; 189 int err; 190 191 trace_ext4_evict_inode(inode); 192 if (inode->i_nlink) { 193 truncate_inode_pages(&inode->i_data, 0); 194 goto no_delete; 195 } 196 197 if (!is_bad_inode(inode)) 198 dquot_initialize(inode); 199 200 if (ext4_should_order_data(inode)) 201 ext4_begin_ordered_truncate(inode, 0); 202 truncate_inode_pages(&inode->i_data, 0); 203 204 if (is_bad_inode(inode)) 205 goto no_delete; 206 207 handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3); 208 if (IS_ERR(handle)) { 209 ext4_std_error(inode->i_sb, PTR_ERR(handle)); 210 /* 211 * If we're going to skip the normal cleanup, we still need to 212 * make sure that the in-core orphan linked list is properly 213 * cleaned up. 214 */ 215 ext4_orphan_del(NULL, inode); 216 goto no_delete; 217 } 218 219 if (IS_SYNC(inode)) 220 ext4_handle_sync(handle); 221 inode->i_size = 0; 222 err = ext4_mark_inode_dirty(handle, inode); 223 if (err) { 224 ext4_warning(inode->i_sb, 225 "couldn't mark inode dirty (err %d)", err); 226 goto stop_handle; 227 } 228 if (inode->i_blocks) 229 ext4_truncate(inode); 230 231 /* 232 * ext4_ext_truncate() doesn't reserve any slop when it 233 * restarts journal transactions; therefore there may not be 234 * enough credits left in the handle to remove the inode from 235 * the orphan list and set the dtime field. 236 */ 237 if (!ext4_handle_has_enough_credits(handle, 3)) { 238 err = ext4_journal_extend(handle, 3); 239 if (err > 0) 240 err = ext4_journal_restart(handle, 3); 241 if (err != 0) { 242 ext4_warning(inode->i_sb, 243 "couldn't extend journal (err %d)", err); 244 stop_handle: 245 ext4_journal_stop(handle); 246 ext4_orphan_del(NULL, inode); 247 goto no_delete; 248 } 249 } 250 251 /* 252 * Kill off the orphan record which ext4_truncate created. 253 * AKPM: I think this can be inside the above `if'. 254 * Note that ext4_orphan_del() has to be able to cope with the 255 * deletion of a non-existent orphan - this is because we don't 256 * know if ext4_truncate() actually created an orphan record. 257 * (Well, we could do this if we need to, but heck - it works) 258 */ 259 ext4_orphan_del(handle, inode); 260 EXT4_I(inode)->i_dtime = get_seconds(); 261 262 /* 263 * One subtle ordering requirement: if anything has gone wrong 264 * (transaction abort, IO errors, whatever), then we can still 265 * do these next steps (the fs will already have been marked as 266 * having errors), but we can't free the inode if the mark_dirty 267 * fails. 268 */ 269 if (ext4_mark_inode_dirty(handle, inode)) 270 /* If that failed, just do the required in-core inode clear. */ 271 ext4_clear_inode(inode); 272 else 273 ext4_free_inode(handle, inode); 274 ext4_journal_stop(handle); 275 return; 276 no_delete: 277 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ 278 } 279 280 typedef struct { 281 __le32 *p; 282 __le32 key; 283 struct buffer_head *bh; 284 } Indirect; 285 286 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) 287 { 288 p->key = *(p->p = v); 289 p->bh = bh; 290 } 291 292 /** 293 * ext4_block_to_path - parse the block number into array of offsets 294 * @inode: inode in question (we are only interested in its superblock) 295 * @i_block: block number to be parsed 296 * @offsets: array to store the offsets in 297 * @boundary: set this non-zero if the referred-to block is likely to be 298 * followed (on disk) by an indirect block. 299 * 300 * To store the locations of file's data ext4 uses a data structure common 301 * for UNIX filesystems - tree of pointers anchored in the inode, with 302 * data blocks at leaves and indirect blocks in intermediate nodes. 303 * This function translates the block number into path in that tree - 304 * return value is the path length and @offsets[n] is the offset of 305 * pointer to (n+1)th node in the nth one. If @block is out of range 306 * (negative or too large) warning is printed and zero returned. 307 * 308 * Note: function doesn't find node addresses, so no IO is needed. All 309 * we need to know is the capacity of indirect blocks (taken from the 310 * inode->i_sb). 311 */ 312 313 /* 314 * Portability note: the last comparison (check that we fit into triple 315 * indirect block) is spelled differently, because otherwise on an 316 * architecture with 32-bit longs and 8Kb pages we might get into trouble 317 * if our filesystem had 8Kb blocks. We might use long long, but that would 318 * kill us on x86. Oh, well, at least the sign propagation does not matter - 319 * i_block would have to be negative in the very beginning, so we would not 320 * get there at all. 321 */ 322 323 static int ext4_block_to_path(struct inode *inode, 324 ext4_lblk_t i_block, 325 ext4_lblk_t offsets[4], int *boundary) 326 { 327 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); 328 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); 329 const long direct_blocks = EXT4_NDIR_BLOCKS, 330 indirect_blocks = ptrs, 331 double_blocks = (1 << (ptrs_bits * 2)); 332 int n = 0; 333 int final = 0; 334 335 if (i_block < direct_blocks) { 336 offsets[n++] = i_block; 337 final = direct_blocks; 338 } else if ((i_block -= direct_blocks) < indirect_blocks) { 339 offsets[n++] = EXT4_IND_BLOCK; 340 offsets[n++] = i_block; 341 final = ptrs; 342 } else if ((i_block -= indirect_blocks) < double_blocks) { 343 offsets[n++] = EXT4_DIND_BLOCK; 344 offsets[n++] = i_block >> ptrs_bits; 345 offsets[n++] = i_block & (ptrs - 1); 346 final = ptrs; 347 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 348 offsets[n++] = EXT4_TIND_BLOCK; 349 offsets[n++] = i_block >> (ptrs_bits * 2); 350 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 351 offsets[n++] = i_block & (ptrs - 1); 352 final = ptrs; 353 } else { 354 ext4_warning(inode->i_sb, "block %lu > max in inode %lu", 355 i_block + direct_blocks + 356 indirect_blocks + double_blocks, inode->i_ino); 357 } 358 if (boundary) 359 *boundary = final - 1 - (i_block & (ptrs - 1)); 360 return n; 361 } 362 363 /** 364 * ext4_get_branch - read the chain of indirect blocks leading to data 365 * @inode: inode in question 366 * @depth: depth of the chain (1 - direct pointer, etc.) 367 * @offsets: offsets of pointers in inode/indirect blocks 368 * @chain: place to store the result 369 * @err: here we store the error value 370 * 371 * Function fills the array of triples <key, p, bh> and returns %NULL 372 * if everything went OK or the pointer to the last filled triple 373 * (incomplete one) otherwise. Upon the return chain[i].key contains 374 * the number of (i+1)-th block in the chain (as it is stored in memory, 375 * i.e. little-endian 32-bit), chain[i].p contains the address of that 376 * number (it points into struct inode for i==0 and into the bh->b_data 377 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect 378 * block for i>0 and NULL for i==0. In other words, it holds the block 379 * numbers of the chain, addresses they were taken from (and where we can 380 * verify that chain did not change) and buffer_heads hosting these 381 * numbers. 382 * 383 * Function stops when it stumbles upon zero pointer (absent block) 384 * (pointer to last triple returned, *@err == 0) 385 * or when it gets an IO error reading an indirect block 386 * (ditto, *@err == -EIO) 387 * or when it reads all @depth-1 indirect blocks successfully and finds 388 * the whole chain, all way to the data (returns %NULL, *err == 0). 389 * 390 * Need to be called with 391 * down_read(&EXT4_I(inode)->i_data_sem) 392 */ 393 static Indirect *ext4_get_branch(struct inode *inode, int depth, 394 ext4_lblk_t *offsets, 395 Indirect chain[4], int *err) 396 { 397 struct super_block *sb = inode->i_sb; 398 Indirect *p = chain; 399 struct buffer_head *bh; 400 401 *err = 0; 402 /* i_data is not going away, no lock needed */ 403 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets); 404 if (!p->key) 405 goto no_block; 406 while (--depth) { 407 bh = sb_getblk(sb, le32_to_cpu(p->key)); 408 if (unlikely(!bh)) 409 goto failure; 410 411 if (!bh_uptodate_or_lock(bh)) { 412 if (bh_submit_read(bh) < 0) { 413 put_bh(bh); 414 goto failure; 415 } 416 /* validate block references */ 417 if (ext4_check_indirect_blockref(inode, bh)) { 418 put_bh(bh); 419 goto failure; 420 } 421 } 422 423 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets); 424 /* Reader: end */ 425 if (!p->key) 426 goto no_block; 427 } 428 return NULL; 429 430 failure: 431 *err = -EIO; 432 no_block: 433 return p; 434 } 435 436 /** 437 * ext4_find_near - find a place for allocation with sufficient locality 438 * @inode: owner 439 * @ind: descriptor of indirect block. 440 * 441 * This function returns the preferred place for block allocation. 442 * It is used when heuristic for sequential allocation fails. 443 * Rules are: 444 * + if there is a block to the left of our position - allocate near it. 445 * + if pointer will live in indirect block - allocate near that block. 446 * + if pointer will live in inode - allocate in the same 447 * cylinder group. 448 * 449 * In the latter case we colour the starting block by the callers PID to 450 * prevent it from clashing with concurrent allocations for a different inode 451 * in the same block group. The PID is used here so that functionally related 452 * files will be close-by on-disk. 453 * 454 * Caller must make sure that @ind is valid and will stay that way. 455 */ 456 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) 457 { 458 struct ext4_inode_info *ei = EXT4_I(inode); 459 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; 460 __le32 *p; 461 ext4_fsblk_t bg_start; 462 ext4_fsblk_t last_block; 463 ext4_grpblk_t colour; 464 ext4_group_t block_group; 465 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); 466 467 /* Try to find previous block */ 468 for (p = ind->p - 1; p >= start; p--) { 469 if (*p) 470 return le32_to_cpu(*p); 471 } 472 473 /* No such thing, so let's try location of indirect block */ 474 if (ind->bh) 475 return ind->bh->b_blocknr; 476 477 /* 478 * It is going to be referred to from the inode itself? OK, just put it 479 * into the same cylinder group then. 480 */ 481 block_group = ei->i_block_group; 482 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { 483 block_group &= ~(flex_size-1); 484 if (S_ISREG(inode->i_mode)) 485 block_group++; 486 } 487 bg_start = ext4_group_first_block_no(inode->i_sb, block_group); 488 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 489 490 /* 491 * If we are doing delayed allocation, we don't need take 492 * colour into account. 493 */ 494 if (test_opt(inode->i_sb, DELALLOC)) 495 return bg_start; 496 497 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) 498 colour = (current->pid % 16) * 499 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 500 else 501 colour = (current->pid % 16) * ((last_block - bg_start) / 16); 502 return bg_start + colour; 503 } 504 505 /** 506 * ext4_find_goal - find a preferred place for allocation. 507 * @inode: owner 508 * @block: block we want 509 * @partial: pointer to the last triple within a chain 510 * 511 * Normally this function find the preferred place for block allocation, 512 * returns it. 513 * Because this is only used for non-extent files, we limit the block nr 514 * to 32 bits. 515 */ 516 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, 517 Indirect *partial) 518 { 519 ext4_fsblk_t goal; 520 521 /* 522 * XXX need to get goal block from mballoc's data structures 523 */ 524 525 goal = ext4_find_near(inode, partial); 526 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; 527 return goal; 528 } 529 530 /** 531 * ext4_blks_to_allocate - Look up the block map and count the number 532 * of direct blocks need to be allocated for the given branch. 533 * 534 * @branch: chain of indirect blocks 535 * @k: number of blocks need for indirect blocks 536 * @blks: number of data blocks to be mapped. 537 * @blocks_to_boundary: the offset in the indirect block 538 * 539 * return the total number of blocks to be allocate, including the 540 * direct and indirect blocks. 541 */ 542 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, 543 int blocks_to_boundary) 544 { 545 unsigned int count = 0; 546 547 /* 548 * Simple case, [t,d]Indirect block(s) has not allocated yet 549 * then it's clear blocks on that path have not allocated 550 */ 551 if (k > 0) { 552 /* right now we don't handle cross boundary allocation */ 553 if (blks < blocks_to_boundary + 1) 554 count += blks; 555 else 556 count += blocks_to_boundary + 1; 557 return count; 558 } 559 560 count++; 561 while (count < blks && count <= blocks_to_boundary && 562 le32_to_cpu(*(branch[0].p + count)) == 0) { 563 count++; 564 } 565 return count; 566 } 567 568 /** 569 * ext4_alloc_blocks: multiple allocate blocks needed for a branch 570 * @handle: handle for this transaction 571 * @inode: inode which needs allocated blocks 572 * @iblock: the logical block to start allocated at 573 * @goal: preferred physical block of allocation 574 * @indirect_blks: the number of blocks need to allocate for indirect 575 * blocks 576 * @blks: number of desired blocks 577 * @new_blocks: on return it will store the new block numbers for 578 * the indirect blocks(if needed) and the first direct block, 579 * @err: on return it will store the error code 580 * 581 * This function will return the number of blocks allocated as 582 * requested by the passed-in parameters. 583 */ 584 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, 585 ext4_lblk_t iblock, ext4_fsblk_t goal, 586 int indirect_blks, int blks, 587 ext4_fsblk_t new_blocks[4], int *err) 588 { 589 struct ext4_allocation_request ar; 590 int target, i; 591 unsigned long count = 0, blk_allocated = 0; 592 int index = 0; 593 ext4_fsblk_t current_block = 0; 594 int ret = 0; 595 596 /* 597 * Here we try to allocate the requested multiple blocks at once, 598 * on a best-effort basis. 599 * To build a branch, we should allocate blocks for 600 * the indirect blocks(if not allocated yet), and at least 601 * the first direct block of this branch. That's the 602 * minimum number of blocks need to allocate(required) 603 */ 604 /* first we try to allocate the indirect blocks */ 605 target = indirect_blks; 606 while (target > 0) { 607 count = target; 608 /* allocating blocks for indirect blocks and direct blocks */ 609 current_block = ext4_new_meta_blocks(handle, inode, goal, 610 0, &count, err); 611 if (*err) 612 goto failed_out; 613 614 if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) { 615 EXT4_ERROR_INODE(inode, 616 "current_block %llu + count %lu > %d!", 617 current_block, count, 618 EXT4_MAX_BLOCK_FILE_PHYS); 619 *err = -EIO; 620 goto failed_out; 621 } 622 623 target -= count; 624 /* allocate blocks for indirect blocks */ 625 while (index < indirect_blks && count) { 626 new_blocks[index++] = current_block++; 627 count--; 628 } 629 if (count > 0) { 630 /* 631 * save the new block number 632 * for the first direct block 633 */ 634 new_blocks[index] = current_block; 635 printk(KERN_INFO "%s returned more blocks than " 636 "requested\n", __func__); 637 WARN_ON(1); 638 break; 639 } 640 } 641 642 target = blks - count ; 643 blk_allocated = count; 644 if (!target) 645 goto allocated; 646 /* Now allocate data blocks */ 647 memset(&ar, 0, sizeof(ar)); 648 ar.inode = inode; 649 ar.goal = goal; 650 ar.len = target; 651 ar.logical = iblock; 652 if (S_ISREG(inode->i_mode)) 653 /* enable in-core preallocation only for regular files */ 654 ar.flags = EXT4_MB_HINT_DATA; 655 656 current_block = ext4_mb_new_blocks(handle, &ar, err); 657 if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) { 658 EXT4_ERROR_INODE(inode, 659 "current_block %llu + ar.len %d > %d!", 660 current_block, ar.len, 661 EXT4_MAX_BLOCK_FILE_PHYS); 662 *err = -EIO; 663 goto failed_out; 664 } 665 666 if (*err && (target == blks)) { 667 /* 668 * if the allocation failed and we didn't allocate 669 * any blocks before 670 */ 671 goto failed_out; 672 } 673 if (!*err) { 674 if (target == blks) { 675 /* 676 * save the new block number 677 * for the first direct block 678 */ 679 new_blocks[index] = current_block; 680 } 681 blk_allocated += ar.len; 682 } 683 allocated: 684 /* total number of blocks allocated for direct blocks */ 685 ret = blk_allocated; 686 *err = 0; 687 return ret; 688 failed_out: 689 for (i = 0; i < index; i++) 690 ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0); 691 return ret; 692 } 693 694 /** 695 * ext4_alloc_branch - allocate and set up a chain of blocks. 696 * @handle: handle for this transaction 697 * @inode: owner 698 * @indirect_blks: number of allocated indirect blocks 699 * @blks: number of allocated direct blocks 700 * @goal: preferred place for allocation 701 * @offsets: offsets (in the blocks) to store the pointers to next. 702 * @branch: place to store the chain in. 703 * 704 * This function allocates blocks, zeroes out all but the last one, 705 * links them into chain and (if we are synchronous) writes them to disk. 706 * In other words, it prepares a branch that can be spliced onto the 707 * inode. It stores the information about that chain in the branch[], in 708 * the same format as ext4_get_branch() would do. We are calling it after 709 * we had read the existing part of chain and partial points to the last 710 * triple of that (one with zero ->key). Upon the exit we have the same 711 * picture as after the successful ext4_get_block(), except that in one 712 * place chain is disconnected - *branch->p is still zero (we did not 713 * set the last link), but branch->key contains the number that should 714 * be placed into *branch->p to fill that gap. 715 * 716 * If allocation fails we free all blocks we've allocated (and forget 717 * their buffer_heads) and return the error value the from failed 718 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain 719 * as described above and return 0. 720 */ 721 static int ext4_alloc_branch(handle_t *handle, struct inode *inode, 722 ext4_lblk_t iblock, int indirect_blks, 723 int *blks, ext4_fsblk_t goal, 724 ext4_lblk_t *offsets, Indirect *branch) 725 { 726 int blocksize = inode->i_sb->s_blocksize; 727 int i, n = 0; 728 int err = 0; 729 struct buffer_head *bh; 730 int num; 731 ext4_fsblk_t new_blocks[4]; 732 ext4_fsblk_t current_block; 733 734 num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks, 735 *blks, new_blocks, &err); 736 if (err) 737 return err; 738 739 branch[0].key = cpu_to_le32(new_blocks[0]); 740 /* 741 * metadata blocks and data blocks are allocated. 742 */ 743 for (n = 1; n <= indirect_blks; n++) { 744 /* 745 * Get buffer_head for parent block, zero it out 746 * and set the pointer to new one, then send 747 * parent to disk. 748 */ 749 bh = sb_getblk(inode->i_sb, new_blocks[n-1]); 750 if (unlikely(!bh)) { 751 err = -EIO; 752 goto failed; 753 } 754 755 branch[n].bh = bh; 756 lock_buffer(bh); 757 BUFFER_TRACE(bh, "call get_create_access"); 758 err = ext4_journal_get_create_access(handle, bh); 759 if (err) { 760 /* Don't brelse(bh) here; it's done in 761 * ext4_journal_forget() below */ 762 unlock_buffer(bh); 763 goto failed; 764 } 765 766 memset(bh->b_data, 0, blocksize); 767 branch[n].p = (__le32 *) bh->b_data + offsets[n]; 768 branch[n].key = cpu_to_le32(new_blocks[n]); 769 *branch[n].p = branch[n].key; 770 if (n == indirect_blks) { 771 current_block = new_blocks[n]; 772 /* 773 * End of chain, update the last new metablock of 774 * the chain to point to the new allocated 775 * data blocks numbers 776 */ 777 for (i = 1; i < num; i++) 778 *(branch[n].p + i) = cpu_to_le32(++current_block); 779 } 780 BUFFER_TRACE(bh, "marking uptodate"); 781 set_buffer_uptodate(bh); 782 unlock_buffer(bh); 783 784 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 785 err = ext4_handle_dirty_metadata(handle, inode, bh); 786 if (err) 787 goto failed; 788 } 789 *blks = num; 790 return err; 791 failed: 792 /* Allocation failed, free what we already allocated */ 793 ext4_free_blocks(handle, inode, NULL, new_blocks[0], 1, 0); 794 for (i = 1; i <= n ; i++) { 795 /* 796 * branch[i].bh is newly allocated, so there is no 797 * need to revoke the block, which is why we don't 798 * need to set EXT4_FREE_BLOCKS_METADATA. 799 */ 800 ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 801 EXT4_FREE_BLOCKS_FORGET); 802 } 803 for (i = n+1; i < indirect_blks; i++) 804 ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0); 805 806 ext4_free_blocks(handle, inode, NULL, new_blocks[i], num, 0); 807 808 return err; 809 } 810 811 /** 812 * ext4_splice_branch - splice the allocated branch onto inode. 813 * @handle: handle for this transaction 814 * @inode: owner 815 * @block: (logical) number of block we are adding 816 * @chain: chain of indirect blocks (with a missing link - see 817 * ext4_alloc_branch) 818 * @where: location of missing link 819 * @num: number of indirect blocks we are adding 820 * @blks: number of direct blocks we are adding 821 * 822 * This function fills the missing link and does all housekeeping needed in 823 * inode (->i_blocks, etc.). In case of success we end up with the full 824 * chain to new block and return 0. 825 */ 826 static int ext4_splice_branch(handle_t *handle, struct inode *inode, 827 ext4_lblk_t block, Indirect *where, int num, 828 int blks) 829 { 830 int i; 831 int err = 0; 832 ext4_fsblk_t current_block; 833 834 /* 835 * If we're splicing into a [td]indirect block (as opposed to the 836 * inode) then we need to get write access to the [td]indirect block 837 * before the splice. 838 */ 839 if (where->bh) { 840 BUFFER_TRACE(where->bh, "get_write_access"); 841 err = ext4_journal_get_write_access(handle, where->bh); 842 if (err) 843 goto err_out; 844 } 845 /* That's it */ 846 847 *where->p = where->key; 848 849 /* 850 * Update the host buffer_head or inode to point to more just allocated 851 * direct blocks blocks 852 */ 853 if (num == 0 && blks > 1) { 854 current_block = le32_to_cpu(where->key) + 1; 855 for (i = 1; i < blks; i++) 856 *(where->p + i) = cpu_to_le32(current_block++); 857 } 858 859 /* We are done with atomic stuff, now do the rest of housekeeping */ 860 /* had we spliced it onto indirect block? */ 861 if (where->bh) { 862 /* 863 * If we spliced it onto an indirect block, we haven't 864 * altered the inode. Note however that if it is being spliced 865 * onto an indirect block at the very end of the file (the 866 * file is growing) then we *will* alter the inode to reflect 867 * the new i_size. But that is not done here - it is done in 868 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. 869 */ 870 jbd_debug(5, "splicing indirect only\n"); 871 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); 872 err = ext4_handle_dirty_metadata(handle, inode, where->bh); 873 if (err) 874 goto err_out; 875 } else { 876 /* 877 * OK, we spliced it into the inode itself on a direct block. 878 */ 879 ext4_mark_inode_dirty(handle, inode); 880 jbd_debug(5, "splicing direct\n"); 881 } 882 return err; 883 884 err_out: 885 for (i = 1; i <= num; i++) { 886 /* 887 * branch[i].bh is newly allocated, so there is no 888 * need to revoke the block, which is why we don't 889 * need to set EXT4_FREE_BLOCKS_METADATA. 890 */ 891 ext4_free_blocks(handle, inode, where[i].bh, 0, 1, 892 EXT4_FREE_BLOCKS_FORGET); 893 } 894 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key), 895 blks, 0); 896 897 return err; 898 } 899 900 /* 901 * The ext4_ind_map_blocks() function handles non-extents inodes 902 * (i.e., using the traditional indirect/double-indirect i_blocks 903 * scheme) for ext4_map_blocks(). 904 * 905 * Allocation strategy is simple: if we have to allocate something, we will 906 * have to go the whole way to leaf. So let's do it before attaching anything 907 * to tree, set linkage between the newborn blocks, write them if sync is 908 * required, recheck the path, free and repeat if check fails, otherwise 909 * set the last missing link (that will protect us from any truncate-generated 910 * removals - all blocks on the path are immune now) and possibly force the 911 * write on the parent block. 912 * That has a nice additional property: no special recovery from the failed 913 * allocations is needed - we simply release blocks and do not touch anything 914 * reachable from inode. 915 * 916 * `handle' can be NULL if create == 0. 917 * 918 * return > 0, # of blocks mapped or allocated. 919 * return = 0, if plain lookup failed. 920 * return < 0, error case. 921 * 922 * The ext4_ind_get_blocks() function should be called with 923 * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem 924 * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or 925 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system 926 * blocks. 927 */ 928 static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, 929 struct ext4_map_blocks *map, 930 int flags) 931 { 932 int err = -EIO; 933 ext4_lblk_t offsets[4]; 934 Indirect chain[4]; 935 Indirect *partial; 936 ext4_fsblk_t goal; 937 int indirect_blks; 938 int blocks_to_boundary = 0; 939 int depth; 940 int count = 0; 941 ext4_fsblk_t first_block = 0; 942 943 trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 944 J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))); 945 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); 946 depth = ext4_block_to_path(inode, map->m_lblk, offsets, 947 &blocks_to_boundary); 948 949 if (depth == 0) 950 goto out; 951 952 partial = ext4_get_branch(inode, depth, offsets, chain, &err); 953 954 /* Simplest case - block found, no allocation needed */ 955 if (!partial) { 956 first_block = le32_to_cpu(chain[depth - 1].key); 957 count++; 958 /*map more blocks*/ 959 while (count < map->m_len && count <= blocks_to_boundary) { 960 ext4_fsblk_t blk; 961 962 blk = le32_to_cpu(*(chain[depth-1].p + count)); 963 964 if (blk == first_block + count) 965 count++; 966 else 967 break; 968 } 969 goto got_it; 970 } 971 972 /* Next simple case - plain lookup or failed read of indirect block */ 973 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO) 974 goto cleanup; 975 976 /* 977 * Okay, we need to do block allocation. 978 */ 979 goal = ext4_find_goal(inode, map->m_lblk, partial); 980 981 /* the number of blocks need to allocate for [d,t]indirect blocks */ 982 indirect_blks = (chain + depth) - partial - 1; 983 984 /* 985 * Next look up the indirect map to count the totoal number of 986 * direct blocks to allocate for this branch. 987 */ 988 count = ext4_blks_to_allocate(partial, indirect_blks, 989 map->m_len, blocks_to_boundary); 990 /* 991 * Block out ext4_truncate while we alter the tree 992 */ 993 err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks, 994 &count, goal, 995 offsets + (partial - chain), partial); 996 997 /* 998 * The ext4_splice_branch call will free and forget any buffers 999 * on the new chain if there is a failure, but that risks using 1000 * up transaction credits, especially for bitmaps where the 1001 * credits cannot be returned. Can we handle this somehow? We 1002 * may need to return -EAGAIN upwards in the worst case. --sct 1003 */ 1004 if (!err) 1005 err = ext4_splice_branch(handle, inode, map->m_lblk, 1006 partial, indirect_blks, count); 1007 if (err) 1008 goto cleanup; 1009 1010 map->m_flags |= EXT4_MAP_NEW; 1011 1012 ext4_update_inode_fsync_trans(handle, inode, 1); 1013 got_it: 1014 map->m_flags |= EXT4_MAP_MAPPED; 1015 map->m_pblk = le32_to_cpu(chain[depth-1].key); 1016 map->m_len = count; 1017 if (count > blocks_to_boundary) 1018 map->m_flags |= EXT4_MAP_BOUNDARY; 1019 err = count; 1020 /* Clean up and exit */ 1021 partial = chain + depth - 1; /* the whole chain */ 1022 cleanup: 1023 while (partial > chain) { 1024 BUFFER_TRACE(partial->bh, "call brelse"); 1025 brelse(partial->bh); 1026 partial--; 1027 } 1028 out: 1029 trace_ext4_ind_map_blocks_exit(inode, map->m_lblk, 1030 map->m_pblk, map->m_len, err); 1031 return err; 1032 } 1033 1034 #ifdef CONFIG_QUOTA 1035 qsize_t *ext4_get_reserved_space(struct inode *inode) 1036 { 1037 return &EXT4_I(inode)->i_reserved_quota; 1038 } 1039 #endif 1040 1041 /* 1042 * Calculate the number of metadata blocks need to reserve 1043 * to allocate a new block at @lblocks for non extent file based file 1044 */ 1045 static int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock) 1046 { 1047 struct ext4_inode_info *ei = EXT4_I(inode); 1048 sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1); 1049 int blk_bits; 1050 1051 if (lblock < EXT4_NDIR_BLOCKS) 1052 return 0; 1053 1054 lblock -= EXT4_NDIR_BLOCKS; 1055 1056 if (ei->i_da_metadata_calc_len && 1057 (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) { 1058 ei->i_da_metadata_calc_len++; 1059 return 0; 1060 } 1061 ei->i_da_metadata_calc_last_lblock = lblock & dind_mask; 1062 ei->i_da_metadata_calc_len = 1; 1063 blk_bits = order_base_2(lblock); 1064 return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1; 1065 } 1066 1067 /* 1068 * Calculate the number of metadata blocks need to reserve 1069 * to allocate a block located at @lblock 1070 */ 1071 static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) 1072 { 1073 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 1074 return ext4_ext_calc_metadata_amount(inode, lblock); 1075 1076 return ext4_ind_calc_metadata_amount(inode, lblock); 1077 } 1078 1079 /* 1080 * Called with i_data_sem down, which is important since we can call 1081 * ext4_discard_preallocations() from here. 1082 */ 1083 void ext4_da_update_reserve_space(struct inode *inode, 1084 int used, int quota_claim) 1085 { 1086 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1087 struct ext4_inode_info *ei = EXT4_I(inode); 1088 1089 spin_lock(&ei->i_block_reservation_lock); 1090 trace_ext4_da_update_reserve_space(inode, used); 1091 if (unlikely(used > ei->i_reserved_data_blocks)) { 1092 ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d " 1093 "with only %d reserved data blocks\n", 1094 __func__, inode->i_ino, used, 1095 ei->i_reserved_data_blocks); 1096 WARN_ON(1); 1097 used = ei->i_reserved_data_blocks; 1098 } 1099 1100 /* Update per-inode reservations */ 1101 ei->i_reserved_data_blocks -= used; 1102 ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; 1103 percpu_counter_sub(&sbi->s_dirtyblocks_counter, 1104 used + ei->i_allocated_meta_blocks); 1105 ei->i_allocated_meta_blocks = 0; 1106 1107 if (ei->i_reserved_data_blocks == 0) { 1108 /* 1109 * We can release all of the reserved metadata blocks 1110 * only when we have written all of the delayed 1111 * allocation blocks. 1112 */ 1113 percpu_counter_sub(&sbi->s_dirtyblocks_counter, 1114 ei->i_reserved_meta_blocks); 1115 ei->i_reserved_meta_blocks = 0; 1116 ei->i_da_metadata_calc_len = 0; 1117 } 1118 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1119 1120 /* Update quota subsystem for data blocks */ 1121 if (quota_claim) 1122 dquot_claim_block(inode, used); 1123 else { 1124 /* 1125 * We did fallocate with an offset that is already delayed 1126 * allocated. So on delayed allocated writeback we should 1127 * not re-claim the quota for fallocated blocks. 1128 */ 1129 dquot_release_reservation_block(inode, used); 1130 } 1131 1132 /* 1133 * If we have done all the pending block allocations and if 1134 * there aren't any writers on the inode, we can discard the 1135 * inode's preallocations. 1136 */ 1137 if ((ei->i_reserved_data_blocks == 0) && 1138 (atomic_read(&inode->i_writecount) == 0)) 1139 ext4_discard_preallocations(inode); 1140 } 1141 1142 static int __check_block_validity(struct inode *inode, const char *func, 1143 unsigned int line, 1144 struct ext4_map_blocks *map) 1145 { 1146 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, 1147 map->m_len)) { 1148 ext4_error_inode(inode, func, line, map->m_pblk, 1149 "lblock %lu mapped to illegal pblock " 1150 "(length %d)", (unsigned long) map->m_lblk, 1151 map->m_len); 1152 return -EIO; 1153 } 1154 return 0; 1155 } 1156 1157 #define check_block_validity(inode, map) \ 1158 __check_block_validity((inode), __func__, __LINE__, (map)) 1159 1160 /* 1161 * Return the number of contiguous dirty pages in a given inode 1162 * starting at page frame idx. 1163 */ 1164 static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, 1165 unsigned int max_pages) 1166 { 1167 struct address_space *mapping = inode->i_mapping; 1168 pgoff_t index; 1169 struct pagevec pvec; 1170 pgoff_t num = 0; 1171 int i, nr_pages, done = 0; 1172 1173 if (max_pages == 0) 1174 return 0; 1175 pagevec_init(&pvec, 0); 1176 while (!done) { 1177 index = idx; 1178 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 1179 PAGECACHE_TAG_DIRTY, 1180 (pgoff_t)PAGEVEC_SIZE); 1181 if (nr_pages == 0) 1182 break; 1183 for (i = 0; i < nr_pages; i++) { 1184 struct page *page = pvec.pages[i]; 1185 struct buffer_head *bh, *head; 1186 1187 lock_page(page); 1188 if (unlikely(page->mapping != mapping) || 1189 !PageDirty(page) || 1190 PageWriteback(page) || 1191 page->index != idx) { 1192 done = 1; 1193 unlock_page(page); 1194 break; 1195 } 1196 if (page_has_buffers(page)) { 1197 bh = head = page_buffers(page); 1198 do { 1199 if (!buffer_delay(bh) && 1200 !buffer_unwritten(bh)) 1201 done = 1; 1202 bh = bh->b_this_page; 1203 } while (!done && (bh != head)); 1204 } 1205 unlock_page(page); 1206 if (done) 1207 break; 1208 idx++; 1209 num++; 1210 if (num >= max_pages) { 1211 done = 1; 1212 break; 1213 } 1214 } 1215 pagevec_release(&pvec); 1216 } 1217 return num; 1218 } 1219 1220 /* 1221 * The ext4_map_blocks() function tries to look up the requested blocks, 1222 * and returns if the blocks are already mapped. 1223 * 1224 * Otherwise it takes the write lock of the i_data_sem and allocate blocks 1225 * and store the allocated blocks in the result buffer head and mark it 1226 * mapped. 1227 * 1228 * If file type is extents based, it will call ext4_ext_map_blocks(), 1229 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping 1230 * based files 1231 * 1232 * On success, it returns the number of blocks being mapped or allocate. 1233 * if create==0 and the blocks are pre-allocated and uninitialized block, 1234 * the result buffer head is unmapped. If the create ==1, it will make sure 1235 * the buffer head is mapped. 1236 * 1237 * It returns 0 if plain look up failed (blocks have not been allocated), in 1238 * that casem, buffer head is unmapped 1239 * 1240 * It returns the error in case of allocation failure. 1241 */ 1242 int ext4_map_blocks(handle_t *handle, struct inode *inode, 1243 struct ext4_map_blocks *map, int flags) 1244 { 1245 int retval; 1246 1247 map->m_flags = 0; 1248 ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," 1249 "logical block %lu\n", inode->i_ino, flags, map->m_len, 1250 (unsigned long) map->m_lblk); 1251 /* 1252 * Try to see if we can get the block without requesting a new 1253 * file system block. 1254 */ 1255 down_read((&EXT4_I(inode)->i_data_sem)); 1256 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 1257 retval = ext4_ext_map_blocks(handle, inode, map, 0); 1258 } else { 1259 retval = ext4_ind_map_blocks(handle, inode, map, 0); 1260 } 1261 up_read((&EXT4_I(inode)->i_data_sem)); 1262 1263 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 1264 int ret = check_block_validity(inode, map); 1265 if (ret != 0) 1266 return ret; 1267 } 1268 1269 /* If it is only a block(s) look up */ 1270 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 1271 return retval; 1272 1273 /* 1274 * Returns if the blocks have already allocated 1275 * 1276 * Note that if blocks have been preallocated 1277 * ext4_ext_get_block() returns th create = 0 1278 * with buffer head unmapped. 1279 */ 1280 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) 1281 return retval; 1282 1283 /* 1284 * When we call get_blocks without the create flag, the 1285 * BH_Unwritten flag could have gotten set if the blocks 1286 * requested were part of a uninitialized extent. We need to 1287 * clear this flag now that we are committed to convert all or 1288 * part of the uninitialized extent to be an initialized 1289 * extent. This is because we need to avoid the combination 1290 * of BH_Unwritten and BH_Mapped flags being simultaneously 1291 * set on the buffer_head. 1292 */ 1293 map->m_flags &= ~EXT4_MAP_UNWRITTEN; 1294 1295 /* 1296 * New blocks allocate and/or writing to uninitialized extent 1297 * will possibly result in updating i_data, so we take 1298 * the write lock of i_data_sem, and call get_blocks() 1299 * with create == 1 flag. 1300 */ 1301 down_write((&EXT4_I(inode)->i_data_sem)); 1302 1303 /* 1304 * if the caller is from delayed allocation writeout path 1305 * we have already reserved fs blocks for allocation 1306 * let the underlying get_block() function know to 1307 * avoid double accounting 1308 */ 1309 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1310 ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 1311 /* 1312 * We need to check for EXT4 here because migrate 1313 * could have changed the inode type in between 1314 */ 1315 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 1316 retval = ext4_ext_map_blocks(handle, inode, map, flags); 1317 } else { 1318 retval = ext4_ind_map_blocks(handle, inode, map, flags); 1319 1320 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { 1321 /* 1322 * We allocated new blocks which will result in 1323 * i_data's format changing. Force the migrate 1324 * to fail by clearing migrate flags 1325 */ 1326 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); 1327 } 1328 1329 /* 1330 * Update reserved blocks/metadata blocks after successful 1331 * block allocation which had been deferred till now. We don't 1332 * support fallocate for non extent files. So we can update 1333 * reserve space here. 1334 */ 1335 if ((retval > 0) && 1336 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) 1337 ext4_da_update_reserve_space(inode, retval, 1); 1338 } 1339 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1340 ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 1341 1342 up_write((&EXT4_I(inode)->i_data_sem)); 1343 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 1344 int ret = check_block_validity(inode, map); 1345 if (ret != 0) 1346 return ret; 1347 } 1348 return retval; 1349 } 1350 1351 /* Maximum number of blocks we map for direct IO at once. */ 1352 #define DIO_MAX_BLOCKS 4096 1353 1354 static int _ext4_get_block(struct inode *inode, sector_t iblock, 1355 struct buffer_head *bh, int flags) 1356 { 1357 handle_t *handle = ext4_journal_current_handle(); 1358 struct ext4_map_blocks map; 1359 int ret = 0, started = 0; 1360 int dio_credits; 1361 1362 map.m_lblk = iblock; 1363 map.m_len = bh->b_size >> inode->i_blkbits; 1364 1365 if (flags && !handle) { 1366 /* Direct IO write... */ 1367 if (map.m_len > DIO_MAX_BLOCKS) 1368 map.m_len = DIO_MAX_BLOCKS; 1369 dio_credits = ext4_chunk_trans_blocks(inode, map.m_len); 1370 handle = ext4_journal_start(inode, dio_credits); 1371 if (IS_ERR(handle)) { 1372 ret = PTR_ERR(handle); 1373 return ret; 1374 } 1375 started = 1; 1376 } 1377 1378 ret = ext4_map_blocks(handle, inode, &map, flags); 1379 if (ret > 0) { 1380 map_bh(bh, inode->i_sb, map.m_pblk); 1381 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 1382 bh->b_size = inode->i_sb->s_blocksize * map.m_len; 1383 ret = 0; 1384 } 1385 if (started) 1386 ext4_journal_stop(handle); 1387 return ret; 1388 } 1389 1390 int ext4_get_block(struct inode *inode, sector_t iblock, 1391 struct buffer_head *bh, int create) 1392 { 1393 return _ext4_get_block(inode, iblock, bh, 1394 create ? EXT4_GET_BLOCKS_CREATE : 0); 1395 } 1396 1397 /* 1398 * `handle' can be NULL if create is zero 1399 */ 1400 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 1401 ext4_lblk_t block, int create, int *errp) 1402 { 1403 struct ext4_map_blocks map; 1404 struct buffer_head *bh; 1405 int fatal = 0, err; 1406 1407 J_ASSERT(handle != NULL || create == 0); 1408 1409 map.m_lblk = block; 1410 map.m_len = 1; 1411 err = ext4_map_blocks(handle, inode, &map, 1412 create ? EXT4_GET_BLOCKS_CREATE : 0); 1413 1414 if (err < 0) 1415 *errp = err; 1416 if (err <= 0) 1417 return NULL; 1418 *errp = 0; 1419 1420 bh = sb_getblk(inode->i_sb, map.m_pblk); 1421 if (!bh) { 1422 *errp = -EIO; 1423 return NULL; 1424 } 1425 if (map.m_flags & EXT4_MAP_NEW) { 1426 J_ASSERT(create != 0); 1427 J_ASSERT(handle != NULL); 1428 1429 /* 1430 * Now that we do not always journal data, we should 1431 * keep in mind whether this should always journal the 1432 * new buffer as metadata. For now, regular file 1433 * writes use ext4_get_block instead, so it's not a 1434 * problem. 1435 */ 1436 lock_buffer(bh); 1437 BUFFER_TRACE(bh, "call get_create_access"); 1438 fatal = ext4_journal_get_create_access(handle, bh); 1439 if (!fatal && !buffer_uptodate(bh)) { 1440 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 1441 set_buffer_uptodate(bh); 1442 } 1443 unlock_buffer(bh); 1444 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 1445 err = ext4_handle_dirty_metadata(handle, inode, bh); 1446 if (!fatal) 1447 fatal = err; 1448 } else { 1449 BUFFER_TRACE(bh, "not a new buffer"); 1450 } 1451 if (fatal) { 1452 *errp = fatal; 1453 brelse(bh); 1454 bh = NULL; 1455 } 1456 return bh; 1457 } 1458 1459 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 1460 ext4_lblk_t block, int create, int *err) 1461 { 1462 struct buffer_head *bh; 1463 1464 bh = ext4_getblk(handle, inode, block, create, err); 1465 if (!bh) 1466 return bh; 1467 if (buffer_uptodate(bh)) 1468 return bh; 1469 ll_rw_block(READ_META, 1, &bh); 1470 wait_on_buffer(bh); 1471 if (buffer_uptodate(bh)) 1472 return bh; 1473 put_bh(bh); 1474 *err = -EIO; 1475 return NULL; 1476 } 1477 1478 static int walk_page_buffers(handle_t *handle, 1479 struct buffer_head *head, 1480 unsigned from, 1481 unsigned to, 1482 int *partial, 1483 int (*fn)(handle_t *handle, 1484 struct buffer_head *bh)) 1485 { 1486 struct buffer_head *bh; 1487 unsigned block_start, block_end; 1488 unsigned blocksize = head->b_size; 1489 int err, ret = 0; 1490 struct buffer_head *next; 1491 1492 for (bh = head, block_start = 0; 1493 ret == 0 && (bh != head || !block_start); 1494 block_start = block_end, bh = next) { 1495 next = bh->b_this_page; 1496 block_end = block_start + blocksize; 1497 if (block_end <= from || block_start >= to) { 1498 if (partial && !buffer_uptodate(bh)) 1499 *partial = 1; 1500 continue; 1501 } 1502 err = (*fn)(handle, bh); 1503 if (!ret) 1504 ret = err; 1505 } 1506 return ret; 1507 } 1508 1509 /* 1510 * To preserve ordering, it is essential that the hole instantiation and 1511 * the data write be encapsulated in a single transaction. We cannot 1512 * close off a transaction and start a new one between the ext4_get_block() 1513 * and the commit_write(). So doing the jbd2_journal_start at the start of 1514 * prepare_write() is the right place. 1515 * 1516 * Also, this function can nest inside ext4_writepage() -> 1517 * block_write_full_page(). In that case, we *know* that ext4_writepage() 1518 * has generated enough buffer credits to do the whole page. So we won't 1519 * block on the journal in that case, which is good, because the caller may 1520 * be PF_MEMALLOC. 1521 * 1522 * By accident, ext4 can be reentered when a transaction is open via 1523 * quota file writes. If we were to commit the transaction while thus 1524 * reentered, there can be a deadlock - we would be holding a quota 1525 * lock, and the commit would never complete if another thread had a 1526 * transaction open and was blocking on the quota lock - a ranking 1527 * violation. 1528 * 1529 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 1530 * will _not_ run commit under these circumstances because handle->h_ref 1531 * is elevated. We'll still have enough credits for the tiny quotafile 1532 * write. 1533 */ 1534 static int do_journal_get_write_access(handle_t *handle, 1535 struct buffer_head *bh) 1536 { 1537 int dirty = buffer_dirty(bh); 1538 int ret; 1539 1540 if (!buffer_mapped(bh) || buffer_freed(bh)) 1541 return 0; 1542 /* 1543 * __block_write_begin() could have dirtied some buffers. Clean 1544 * the dirty bit as jbd2_journal_get_write_access() could complain 1545 * otherwise about fs integrity issues. Setting of the dirty bit 1546 * by __block_write_begin() isn't a real problem here as we clear 1547 * the bit before releasing a page lock and thus writeback cannot 1548 * ever write the buffer. 1549 */ 1550 if (dirty) 1551 clear_buffer_dirty(bh); 1552 ret = ext4_journal_get_write_access(handle, bh); 1553 if (!ret && dirty) 1554 ret = ext4_handle_dirty_metadata(handle, NULL, bh); 1555 return ret; 1556 } 1557 1558 /* 1559 * Truncate blocks that were not used by write. We have to truncate the 1560 * pagecache as well so that corresponding buffers get properly unmapped. 1561 */ 1562 static void ext4_truncate_failed_write(struct inode *inode) 1563 { 1564 truncate_inode_pages(inode->i_mapping, inode->i_size); 1565 ext4_truncate(inode); 1566 } 1567 1568 static int ext4_get_block_write(struct inode *inode, sector_t iblock, 1569 struct buffer_head *bh_result, int create); 1570 static int ext4_write_begin(struct file *file, struct address_space *mapping, 1571 loff_t pos, unsigned len, unsigned flags, 1572 struct page **pagep, void **fsdata) 1573 { 1574 struct inode *inode = mapping->host; 1575 int ret, needed_blocks; 1576 handle_t *handle; 1577 int retries = 0; 1578 struct page *page; 1579 pgoff_t index; 1580 unsigned from, to; 1581 1582 trace_ext4_write_begin(inode, pos, len, flags); 1583 /* 1584 * Reserve one block more for addition to orphan list in case 1585 * we allocate blocks but write fails for some reason 1586 */ 1587 needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 1588 index = pos >> PAGE_CACHE_SHIFT; 1589 from = pos & (PAGE_CACHE_SIZE - 1); 1590 to = from + len; 1591 1592 retry: 1593 handle = ext4_journal_start(inode, needed_blocks); 1594 if (IS_ERR(handle)) { 1595 ret = PTR_ERR(handle); 1596 goto out; 1597 } 1598 1599 /* We cannot recurse into the filesystem as the transaction is already 1600 * started */ 1601 flags |= AOP_FLAG_NOFS; 1602 1603 page = grab_cache_page_write_begin(mapping, index, flags); 1604 if (!page) { 1605 ext4_journal_stop(handle); 1606 ret = -ENOMEM; 1607 goto out; 1608 } 1609 *pagep = page; 1610 1611 if (ext4_should_dioread_nolock(inode)) 1612 ret = __block_write_begin(page, pos, len, ext4_get_block_write); 1613 else 1614 ret = __block_write_begin(page, pos, len, ext4_get_block); 1615 1616 if (!ret && ext4_should_journal_data(inode)) { 1617 ret = walk_page_buffers(handle, page_buffers(page), 1618 from, to, NULL, do_journal_get_write_access); 1619 } 1620 1621 if (ret) { 1622 unlock_page(page); 1623 page_cache_release(page); 1624 /* 1625 * __block_write_begin may have instantiated a few blocks 1626 * outside i_size. Trim these off again. Don't need 1627 * i_size_read because we hold i_mutex. 1628 * 1629 * Add inode to orphan list in case we crash before 1630 * truncate finishes 1631 */ 1632 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1633 ext4_orphan_add(handle, inode); 1634 1635 ext4_journal_stop(handle); 1636 if (pos + len > inode->i_size) { 1637 ext4_truncate_failed_write(inode); 1638 /* 1639 * If truncate failed early the inode might 1640 * still be on the orphan list; we need to 1641 * make sure the inode is removed from the 1642 * orphan list in that case. 1643 */ 1644 if (inode->i_nlink) 1645 ext4_orphan_del(NULL, inode); 1646 } 1647 } 1648 1649 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 1650 goto retry; 1651 out: 1652 return ret; 1653 } 1654 1655 /* For write_end() in data=journal mode */ 1656 static int write_end_fn(handle_t *handle, struct buffer_head *bh) 1657 { 1658 if (!buffer_mapped(bh) || buffer_freed(bh)) 1659 return 0; 1660 set_buffer_uptodate(bh); 1661 return ext4_handle_dirty_metadata(handle, NULL, bh); 1662 } 1663 1664 static int ext4_generic_write_end(struct file *file, 1665 struct address_space *mapping, 1666 loff_t pos, unsigned len, unsigned copied, 1667 struct page *page, void *fsdata) 1668 { 1669 int i_size_changed = 0; 1670 struct inode *inode = mapping->host; 1671 handle_t *handle = ext4_journal_current_handle(); 1672 1673 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 1674 1675 /* 1676 * No need to use i_size_read() here, the i_size 1677 * cannot change under us because we hold i_mutex. 1678 * 1679 * But it's important to update i_size while still holding page lock: 1680 * page writeout could otherwise come in and zero beyond i_size. 1681 */ 1682 if (pos + copied > inode->i_size) { 1683 i_size_write(inode, pos + copied); 1684 i_size_changed = 1; 1685 } 1686 1687 if (pos + copied > EXT4_I(inode)->i_disksize) { 1688 /* We need to mark inode dirty even if 1689 * new_i_size is less that inode->i_size 1690 * bu greater than i_disksize.(hint delalloc) 1691 */ 1692 ext4_update_i_disksize(inode, (pos + copied)); 1693 i_size_changed = 1; 1694 } 1695 unlock_page(page); 1696 page_cache_release(page); 1697 1698 /* 1699 * Don't mark the inode dirty under page lock. First, it unnecessarily 1700 * makes the holding time of page lock longer. Second, it forces lock 1701 * ordering of page lock and transaction start for journaling 1702 * filesystems. 1703 */ 1704 if (i_size_changed) 1705 ext4_mark_inode_dirty(handle, inode); 1706 1707 return copied; 1708 } 1709 1710 /* 1711 * We need to pick up the new inode size which generic_commit_write gave us 1712 * `file' can be NULL - eg, when called from page_symlink(). 1713 * 1714 * ext4 never places buffers on inode->i_mapping->private_list. metadata 1715 * buffers are managed internally. 1716 */ 1717 static int ext4_ordered_write_end(struct file *file, 1718 struct address_space *mapping, 1719 loff_t pos, unsigned len, unsigned copied, 1720 struct page *page, void *fsdata) 1721 { 1722 handle_t *handle = ext4_journal_current_handle(); 1723 struct inode *inode = mapping->host; 1724 int ret = 0, ret2; 1725 1726 trace_ext4_ordered_write_end(inode, pos, len, copied); 1727 ret = ext4_jbd2_file_inode(handle, inode); 1728 1729 if (ret == 0) { 1730 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1731 page, fsdata); 1732 copied = ret2; 1733 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1734 /* if we have allocated more blocks and copied 1735 * less. We will have blocks allocated outside 1736 * inode->i_size. So truncate them 1737 */ 1738 ext4_orphan_add(handle, inode); 1739 if (ret2 < 0) 1740 ret = ret2; 1741 } 1742 ret2 = ext4_journal_stop(handle); 1743 if (!ret) 1744 ret = ret2; 1745 1746 if (pos + len > inode->i_size) { 1747 ext4_truncate_failed_write(inode); 1748 /* 1749 * If truncate failed early the inode might still be 1750 * on the orphan list; we need to make sure the inode 1751 * is removed from the orphan list in that case. 1752 */ 1753 if (inode->i_nlink) 1754 ext4_orphan_del(NULL, inode); 1755 } 1756 1757 1758 return ret ? ret : copied; 1759 } 1760 1761 static int ext4_writeback_write_end(struct file *file, 1762 struct address_space *mapping, 1763 loff_t pos, unsigned len, unsigned copied, 1764 struct page *page, void *fsdata) 1765 { 1766 handle_t *handle = ext4_journal_current_handle(); 1767 struct inode *inode = mapping->host; 1768 int ret = 0, ret2; 1769 1770 trace_ext4_writeback_write_end(inode, pos, len, copied); 1771 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1772 page, fsdata); 1773 copied = ret2; 1774 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1775 /* if we have allocated more blocks and copied 1776 * less. We will have blocks allocated outside 1777 * inode->i_size. So truncate them 1778 */ 1779 ext4_orphan_add(handle, inode); 1780 1781 if (ret2 < 0) 1782 ret = ret2; 1783 1784 ret2 = ext4_journal_stop(handle); 1785 if (!ret) 1786 ret = ret2; 1787 1788 if (pos + len > inode->i_size) { 1789 ext4_truncate_failed_write(inode); 1790 /* 1791 * If truncate failed early the inode might still be 1792 * on the orphan list; we need to make sure the inode 1793 * is removed from the orphan list in that case. 1794 */ 1795 if (inode->i_nlink) 1796 ext4_orphan_del(NULL, inode); 1797 } 1798 1799 return ret ? ret : copied; 1800 } 1801 1802 static int ext4_journalled_write_end(struct file *file, 1803 struct address_space *mapping, 1804 loff_t pos, unsigned len, unsigned copied, 1805 struct page *page, void *fsdata) 1806 { 1807 handle_t *handle = ext4_journal_current_handle(); 1808 struct inode *inode = mapping->host; 1809 int ret = 0, ret2; 1810 int partial = 0; 1811 unsigned from, to; 1812 loff_t new_i_size; 1813 1814 trace_ext4_journalled_write_end(inode, pos, len, copied); 1815 from = pos & (PAGE_CACHE_SIZE - 1); 1816 to = from + len; 1817 1818 if (copied < len) { 1819 if (!PageUptodate(page)) 1820 copied = 0; 1821 page_zero_new_buffers(page, from+copied, to); 1822 } 1823 1824 ret = walk_page_buffers(handle, page_buffers(page), from, 1825 to, &partial, write_end_fn); 1826 if (!partial) 1827 SetPageUptodate(page); 1828 new_i_size = pos + copied; 1829 if (new_i_size > inode->i_size) 1830 i_size_write(inode, pos+copied); 1831 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 1832 if (new_i_size > EXT4_I(inode)->i_disksize) { 1833 ext4_update_i_disksize(inode, new_i_size); 1834 ret2 = ext4_mark_inode_dirty(handle, inode); 1835 if (!ret) 1836 ret = ret2; 1837 } 1838 1839 unlock_page(page); 1840 page_cache_release(page); 1841 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1842 /* if we have allocated more blocks and copied 1843 * less. We will have blocks allocated outside 1844 * inode->i_size. So truncate them 1845 */ 1846 ext4_orphan_add(handle, inode); 1847 1848 ret2 = ext4_journal_stop(handle); 1849 if (!ret) 1850 ret = ret2; 1851 if (pos + len > inode->i_size) { 1852 ext4_truncate_failed_write(inode); 1853 /* 1854 * If truncate failed early the inode might still be 1855 * on the orphan list; we need to make sure the inode 1856 * is removed from the orphan list in that case. 1857 */ 1858 if (inode->i_nlink) 1859 ext4_orphan_del(NULL, inode); 1860 } 1861 1862 return ret ? ret : copied; 1863 } 1864 1865 /* 1866 * Reserve a single block located at lblock 1867 */ 1868 static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) 1869 { 1870 int retries = 0; 1871 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1872 struct ext4_inode_info *ei = EXT4_I(inode); 1873 unsigned long md_needed; 1874 int ret; 1875 1876 /* 1877 * recalculate the amount of metadata blocks to reserve 1878 * in order to allocate nrblocks 1879 * worse case is one extent per block 1880 */ 1881 repeat: 1882 spin_lock(&ei->i_block_reservation_lock); 1883 md_needed = ext4_calc_metadata_amount(inode, lblock); 1884 trace_ext4_da_reserve_space(inode, md_needed); 1885 spin_unlock(&ei->i_block_reservation_lock); 1886 1887 /* 1888 * We will charge metadata quota at writeout time; this saves 1889 * us from metadata over-estimation, though we may go over by 1890 * a small amount in the end. Here we just reserve for data. 1891 */ 1892 ret = dquot_reserve_block(inode, 1); 1893 if (ret) 1894 return ret; 1895 /* 1896 * We do still charge estimated metadata to the sb though; 1897 * we cannot afford to run out of free blocks. 1898 */ 1899 if (ext4_claim_free_blocks(sbi, md_needed + 1, 0)) { 1900 dquot_release_reservation_block(inode, 1); 1901 if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1902 yield(); 1903 goto repeat; 1904 } 1905 return -ENOSPC; 1906 } 1907 spin_lock(&ei->i_block_reservation_lock); 1908 ei->i_reserved_data_blocks++; 1909 ei->i_reserved_meta_blocks += md_needed; 1910 spin_unlock(&ei->i_block_reservation_lock); 1911 1912 return 0; /* success */ 1913 } 1914 1915 static void ext4_da_release_space(struct inode *inode, int to_free) 1916 { 1917 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1918 struct ext4_inode_info *ei = EXT4_I(inode); 1919 1920 if (!to_free) 1921 return; /* Nothing to release, exit */ 1922 1923 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1924 1925 trace_ext4_da_release_space(inode, to_free); 1926 if (unlikely(to_free > ei->i_reserved_data_blocks)) { 1927 /* 1928 * if there aren't enough reserved blocks, then the 1929 * counter is messed up somewhere. Since this 1930 * function is called from invalidate page, it's 1931 * harmless to return without any action. 1932 */ 1933 ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: " 1934 "ino %lu, to_free %d with only %d reserved " 1935 "data blocks\n", inode->i_ino, to_free, 1936 ei->i_reserved_data_blocks); 1937 WARN_ON(1); 1938 to_free = ei->i_reserved_data_blocks; 1939 } 1940 ei->i_reserved_data_blocks -= to_free; 1941 1942 if (ei->i_reserved_data_blocks == 0) { 1943 /* 1944 * We can release all of the reserved metadata blocks 1945 * only when we have written all of the delayed 1946 * allocation blocks. 1947 */ 1948 percpu_counter_sub(&sbi->s_dirtyblocks_counter, 1949 ei->i_reserved_meta_blocks); 1950 ei->i_reserved_meta_blocks = 0; 1951 ei->i_da_metadata_calc_len = 0; 1952 } 1953 1954 /* update fs dirty data blocks counter */ 1955 percpu_counter_sub(&sbi->s_dirtyblocks_counter, to_free); 1956 1957 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1958 1959 dquot_release_reservation_block(inode, to_free); 1960 } 1961 1962 static void ext4_da_page_release_reservation(struct page *page, 1963 unsigned long offset) 1964 { 1965 int to_release = 0; 1966 struct buffer_head *head, *bh; 1967 unsigned int curr_off = 0; 1968 1969 head = page_buffers(page); 1970 bh = head; 1971 do { 1972 unsigned int next_off = curr_off + bh->b_size; 1973 1974 if ((offset <= curr_off) && (buffer_delay(bh))) { 1975 to_release++; 1976 clear_buffer_delay(bh); 1977 } 1978 curr_off = next_off; 1979 } while ((bh = bh->b_this_page) != head); 1980 ext4_da_release_space(page->mapping->host, to_release); 1981 } 1982 1983 /* 1984 * Delayed allocation stuff 1985 */ 1986 1987 /* 1988 * mpage_da_submit_io - walks through extent of pages and try to write 1989 * them with writepage() call back 1990 * 1991 * @mpd->inode: inode 1992 * @mpd->first_page: first page of the extent 1993 * @mpd->next_page: page after the last page of the extent 1994 * 1995 * By the time mpage_da_submit_io() is called we expect all blocks 1996 * to be allocated. this may be wrong if allocation failed. 1997 * 1998 * As pages are already locked by write_cache_pages(), we can't use it 1999 */ 2000 static int mpage_da_submit_io(struct mpage_da_data *mpd, 2001 struct ext4_map_blocks *map) 2002 { 2003 struct pagevec pvec; 2004 unsigned long index, end; 2005 int ret = 0, err, nr_pages, i; 2006 struct inode *inode = mpd->inode; 2007 struct address_space *mapping = inode->i_mapping; 2008 loff_t size = i_size_read(inode); 2009 unsigned int len, block_start; 2010 struct buffer_head *bh, *page_bufs = NULL; 2011 int journal_data = ext4_should_journal_data(inode); 2012 sector_t pblock = 0, cur_logical = 0; 2013 struct ext4_io_submit io_submit; 2014 2015 BUG_ON(mpd->next_page <= mpd->first_page); 2016 memset(&io_submit, 0, sizeof(io_submit)); 2017 /* 2018 * We need to start from the first_page to the next_page - 1 2019 * to make sure we also write the mapped dirty buffer_heads. 2020 * If we look at mpd->b_blocknr we would only be looking 2021 * at the currently mapped buffer_heads. 2022 */ 2023 index = mpd->first_page; 2024 end = mpd->next_page - 1; 2025 2026 pagevec_init(&pvec, 0); 2027 while (index <= end) { 2028 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 2029 if (nr_pages == 0) 2030 break; 2031 for (i = 0; i < nr_pages; i++) { 2032 int commit_write = 0, skip_page = 0; 2033 struct page *page = pvec.pages[i]; 2034 2035 index = page->index; 2036 if (index > end) 2037 break; 2038 2039 if (index == size >> PAGE_CACHE_SHIFT) 2040 len = size & ~PAGE_CACHE_MASK; 2041 else 2042 len = PAGE_CACHE_SIZE; 2043 if (map) { 2044 cur_logical = index << (PAGE_CACHE_SHIFT - 2045 inode->i_blkbits); 2046 pblock = map->m_pblk + (cur_logical - 2047 map->m_lblk); 2048 } 2049 index++; 2050 2051 BUG_ON(!PageLocked(page)); 2052 BUG_ON(PageWriteback(page)); 2053 2054 /* 2055 * If the page does not have buffers (for 2056 * whatever reason), try to create them using 2057 * __block_write_begin. If this fails, 2058 * skip the page and move on. 2059 */ 2060 if (!page_has_buffers(page)) { 2061 if (__block_write_begin(page, 0, len, 2062 noalloc_get_block_write)) { 2063 skip_page: 2064 unlock_page(page); 2065 continue; 2066 } 2067 commit_write = 1; 2068 } 2069 2070 bh = page_bufs = page_buffers(page); 2071 block_start = 0; 2072 do { 2073 if (!bh) 2074 goto skip_page; 2075 if (map && (cur_logical >= map->m_lblk) && 2076 (cur_logical <= (map->m_lblk + 2077 (map->m_len - 1)))) { 2078 if (buffer_delay(bh)) { 2079 clear_buffer_delay(bh); 2080 bh->b_blocknr = pblock; 2081 } 2082 if (buffer_unwritten(bh) || 2083 buffer_mapped(bh)) 2084 BUG_ON(bh->b_blocknr != pblock); 2085 if (map->m_flags & EXT4_MAP_UNINIT) 2086 set_buffer_uninit(bh); 2087 clear_buffer_unwritten(bh); 2088 } 2089 2090 /* skip page if block allocation undone */ 2091 if (buffer_delay(bh) || buffer_unwritten(bh)) 2092 skip_page = 1; 2093 bh = bh->b_this_page; 2094 block_start += bh->b_size; 2095 cur_logical++; 2096 pblock++; 2097 } while (bh != page_bufs); 2098 2099 if (skip_page) 2100 goto skip_page; 2101 2102 if (commit_write) 2103 /* mark the buffer_heads as dirty & uptodate */ 2104 block_commit_write(page, 0, len); 2105 2106 clear_page_dirty_for_io(page); 2107 /* 2108 * Delalloc doesn't support data journalling, 2109 * but eventually maybe we'll lift this 2110 * restriction. 2111 */ 2112 if (unlikely(journal_data && PageChecked(page))) 2113 err = __ext4_journalled_writepage(page, len); 2114 else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT)) 2115 err = ext4_bio_write_page(&io_submit, page, 2116 len, mpd->wbc); 2117 else 2118 err = block_write_full_page(page, 2119 noalloc_get_block_write, mpd->wbc); 2120 2121 if (!err) 2122 mpd->pages_written++; 2123 /* 2124 * In error case, we have to continue because 2125 * remaining pages are still locked 2126 */ 2127 if (ret == 0) 2128 ret = err; 2129 } 2130 pagevec_release(&pvec); 2131 } 2132 ext4_io_submit(&io_submit); 2133 return ret; 2134 } 2135 2136 static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd) 2137 { 2138 int nr_pages, i; 2139 pgoff_t index, end; 2140 struct pagevec pvec; 2141 struct inode *inode = mpd->inode; 2142 struct address_space *mapping = inode->i_mapping; 2143 2144 index = mpd->first_page; 2145 end = mpd->next_page - 1; 2146 while (index <= end) { 2147 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 2148 if (nr_pages == 0) 2149 break; 2150 for (i = 0; i < nr_pages; i++) { 2151 struct page *page = pvec.pages[i]; 2152 if (page->index > end) 2153 break; 2154 BUG_ON(!PageLocked(page)); 2155 BUG_ON(PageWriteback(page)); 2156 block_invalidatepage(page, 0); 2157 ClearPageUptodate(page); 2158 unlock_page(page); 2159 } 2160 index = pvec.pages[nr_pages - 1]->index + 1; 2161 pagevec_release(&pvec); 2162 } 2163 return; 2164 } 2165 2166 static void ext4_print_free_blocks(struct inode *inode) 2167 { 2168 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2169 printk(KERN_CRIT "Total free blocks count %lld\n", 2170 ext4_count_free_blocks(inode->i_sb)); 2171 printk(KERN_CRIT "Free/Dirty block details\n"); 2172 printk(KERN_CRIT "free_blocks=%lld\n", 2173 (long long) percpu_counter_sum(&sbi->s_freeblocks_counter)); 2174 printk(KERN_CRIT "dirty_blocks=%lld\n", 2175 (long long) percpu_counter_sum(&sbi->s_dirtyblocks_counter)); 2176 printk(KERN_CRIT "Block reservation details\n"); 2177 printk(KERN_CRIT "i_reserved_data_blocks=%u\n", 2178 EXT4_I(inode)->i_reserved_data_blocks); 2179 printk(KERN_CRIT "i_reserved_meta_blocks=%u\n", 2180 EXT4_I(inode)->i_reserved_meta_blocks); 2181 return; 2182 } 2183 2184 /* 2185 * mpage_da_map_and_submit - go through given space, map them 2186 * if necessary, and then submit them for I/O 2187 * 2188 * @mpd - bh describing space 2189 * 2190 * The function skips space we know is already mapped to disk blocks. 2191 * 2192 */ 2193 static void mpage_da_map_and_submit(struct mpage_da_data *mpd) 2194 { 2195 int err, blks, get_blocks_flags; 2196 struct ext4_map_blocks map, *mapp = NULL; 2197 sector_t next = mpd->b_blocknr; 2198 unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; 2199 loff_t disksize = EXT4_I(mpd->inode)->i_disksize; 2200 handle_t *handle = NULL; 2201 2202 /* 2203 * If the blocks are mapped already, or we couldn't accumulate 2204 * any blocks, then proceed immediately to the submission stage. 2205 */ 2206 if ((mpd->b_size == 0) || 2207 ((mpd->b_state & (1 << BH_Mapped)) && 2208 !(mpd->b_state & (1 << BH_Delay)) && 2209 !(mpd->b_state & (1 << BH_Unwritten)))) 2210 goto submit_io; 2211 2212 handle = ext4_journal_current_handle(); 2213 BUG_ON(!handle); 2214 2215 /* 2216 * Call ext4_map_blocks() to allocate any delayed allocation 2217 * blocks, or to convert an uninitialized extent to be 2218 * initialized (in the case where we have written into 2219 * one or more preallocated blocks). 2220 * 2221 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to 2222 * indicate that we are on the delayed allocation path. This 2223 * affects functions in many different parts of the allocation 2224 * call path. This flag exists primarily because we don't 2225 * want to change *many* call functions, so ext4_map_blocks() 2226 * will set the EXT4_STATE_DELALLOC_RESERVED flag once the 2227 * inode's allocation semaphore is taken. 2228 * 2229 * If the blocks in questions were delalloc blocks, set 2230 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting 2231 * variables are updated after the blocks have been allocated. 2232 */ 2233 map.m_lblk = next; 2234 map.m_len = max_blocks; 2235 get_blocks_flags = EXT4_GET_BLOCKS_CREATE; 2236 if (ext4_should_dioread_nolock(mpd->inode)) 2237 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; 2238 if (mpd->b_state & (1 << BH_Delay)) 2239 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 2240 2241 blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags); 2242 if (blks < 0) { 2243 struct super_block *sb = mpd->inode->i_sb; 2244 2245 err = blks; 2246 /* 2247 * If get block returns EAGAIN or ENOSPC and there 2248 * appears to be free blocks we will just let 2249 * mpage_da_submit_io() unlock all of the pages. 2250 */ 2251 if (err == -EAGAIN) 2252 goto submit_io; 2253 2254 if (err == -ENOSPC && 2255 ext4_count_free_blocks(sb)) { 2256 mpd->retval = err; 2257 goto submit_io; 2258 } 2259 2260 /* 2261 * get block failure will cause us to loop in 2262 * writepages, because a_ops->writepage won't be able 2263 * to make progress. The page will be redirtied by 2264 * writepage and writepages will again try to write 2265 * the same. 2266 */ 2267 if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) { 2268 ext4_msg(sb, KERN_CRIT, 2269 "delayed block allocation failed for inode %lu " 2270 "at logical offset %llu with max blocks %zd " 2271 "with error %d", mpd->inode->i_ino, 2272 (unsigned long long) next, 2273 mpd->b_size >> mpd->inode->i_blkbits, err); 2274 ext4_msg(sb, KERN_CRIT, 2275 "This should not happen!! Data will be lost\n"); 2276 if (err == -ENOSPC) 2277 ext4_print_free_blocks(mpd->inode); 2278 } 2279 /* invalidate all the pages */ 2280 ext4_da_block_invalidatepages(mpd); 2281 2282 /* Mark this page range as having been completed */ 2283 mpd->io_done = 1; 2284 return; 2285 } 2286 BUG_ON(blks == 0); 2287 2288 mapp = ↦ 2289 if (map.m_flags & EXT4_MAP_NEW) { 2290 struct block_device *bdev = mpd->inode->i_sb->s_bdev; 2291 int i; 2292 2293 for (i = 0; i < map.m_len; i++) 2294 unmap_underlying_metadata(bdev, map.m_pblk + i); 2295 } 2296 2297 if (ext4_should_order_data(mpd->inode)) { 2298 err = ext4_jbd2_file_inode(handle, mpd->inode); 2299 if (err) 2300 /* This only happens if the journal is aborted */ 2301 return; 2302 } 2303 2304 /* 2305 * Update on-disk size along with block allocation. 2306 */ 2307 disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; 2308 if (disksize > i_size_read(mpd->inode)) 2309 disksize = i_size_read(mpd->inode); 2310 if (disksize > EXT4_I(mpd->inode)->i_disksize) { 2311 ext4_update_i_disksize(mpd->inode, disksize); 2312 err = ext4_mark_inode_dirty(handle, mpd->inode); 2313 if (err) 2314 ext4_error(mpd->inode->i_sb, 2315 "Failed to mark inode %lu dirty", 2316 mpd->inode->i_ino); 2317 } 2318 2319 submit_io: 2320 mpage_da_submit_io(mpd, mapp); 2321 mpd->io_done = 1; 2322 } 2323 2324 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ 2325 (1 << BH_Delay) | (1 << BH_Unwritten)) 2326 2327 /* 2328 * mpage_add_bh_to_extent - try to add one more block to extent of blocks 2329 * 2330 * @mpd->lbh - extent of blocks 2331 * @logical - logical number of the block in the file 2332 * @bh - bh of the block (used to access block's state) 2333 * 2334 * the function is used to collect contig. blocks in same state 2335 */ 2336 static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, 2337 sector_t logical, size_t b_size, 2338 unsigned long b_state) 2339 { 2340 sector_t next; 2341 int nrblocks = mpd->b_size >> mpd->inode->i_blkbits; 2342 2343 /* 2344 * XXX Don't go larger than mballoc is willing to allocate 2345 * This is a stopgap solution. We eventually need to fold 2346 * mpage_da_submit_io() into this function and then call 2347 * ext4_map_blocks() multiple times in a loop 2348 */ 2349 if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize) 2350 goto flush_it; 2351 2352 /* check if thereserved journal credits might overflow */ 2353 if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) { 2354 if (nrblocks >= EXT4_MAX_TRANS_DATA) { 2355 /* 2356 * With non-extent format we are limited by the journal 2357 * credit available. Total credit needed to insert 2358 * nrblocks contiguous blocks is dependent on the 2359 * nrblocks. So limit nrblocks. 2360 */ 2361 goto flush_it; 2362 } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) > 2363 EXT4_MAX_TRANS_DATA) { 2364 /* 2365 * Adding the new buffer_head would make it cross the 2366 * allowed limit for which we have journal credit 2367 * reserved. So limit the new bh->b_size 2368 */ 2369 b_size = (EXT4_MAX_TRANS_DATA - nrblocks) << 2370 mpd->inode->i_blkbits; 2371 /* we will do mpage_da_submit_io in the next loop */ 2372 } 2373 } 2374 /* 2375 * First block in the extent 2376 */ 2377 if (mpd->b_size == 0) { 2378 mpd->b_blocknr = logical; 2379 mpd->b_size = b_size; 2380 mpd->b_state = b_state & BH_FLAGS; 2381 return; 2382 } 2383 2384 next = mpd->b_blocknr + nrblocks; 2385 /* 2386 * Can we merge the block to our big extent? 2387 */ 2388 if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) { 2389 mpd->b_size += b_size; 2390 return; 2391 } 2392 2393 flush_it: 2394 /* 2395 * We couldn't merge the block to our extent, so we 2396 * need to flush current extent and start new one 2397 */ 2398 mpage_da_map_and_submit(mpd); 2399 return; 2400 } 2401 2402 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) 2403 { 2404 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); 2405 } 2406 2407 /* 2408 * This is a special get_blocks_t callback which is used by 2409 * ext4_da_write_begin(). It will either return mapped block or 2410 * reserve space for a single block. 2411 * 2412 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 2413 * We also have b_blocknr = -1 and b_bdev initialized properly 2414 * 2415 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 2416 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 2417 * initialized properly. 2418 */ 2419 static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 2420 struct buffer_head *bh, int create) 2421 { 2422 struct ext4_map_blocks map; 2423 int ret = 0; 2424 sector_t invalid_block = ~((sector_t) 0xffff); 2425 2426 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 2427 invalid_block = ~0; 2428 2429 BUG_ON(create == 0); 2430 BUG_ON(bh->b_size != inode->i_sb->s_blocksize); 2431 2432 map.m_lblk = iblock; 2433 map.m_len = 1; 2434 2435 /* 2436 * first, we need to know whether the block is allocated already 2437 * preallocated blocks are unmapped but should treated 2438 * the same as allocated blocks. 2439 */ 2440 ret = ext4_map_blocks(NULL, inode, &map, 0); 2441 if (ret < 0) 2442 return ret; 2443 if (ret == 0) { 2444 if (buffer_delay(bh)) 2445 return 0; /* Not sure this could or should happen */ 2446 /* 2447 * XXX: __block_write_begin() unmaps passed block, is it OK? 2448 */ 2449 ret = ext4_da_reserve_space(inode, iblock); 2450 if (ret) 2451 /* not enough space to reserve */ 2452 return ret; 2453 2454 map_bh(bh, inode->i_sb, invalid_block); 2455 set_buffer_new(bh); 2456 set_buffer_delay(bh); 2457 return 0; 2458 } 2459 2460 map_bh(bh, inode->i_sb, map.m_pblk); 2461 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 2462 2463 if (buffer_unwritten(bh)) { 2464 /* A delayed write to unwritten bh should be marked 2465 * new and mapped. Mapped ensures that we don't do 2466 * get_block multiple times when we write to the same 2467 * offset and new ensures that we do proper zero out 2468 * for partial write. 2469 */ 2470 set_buffer_new(bh); 2471 set_buffer_mapped(bh); 2472 } 2473 return 0; 2474 } 2475 2476 /* 2477 * This function is used as a standard get_block_t calback function 2478 * when there is no desire to allocate any blocks. It is used as a 2479 * callback function for block_write_begin() and block_write_full_page(). 2480 * These functions should only try to map a single block at a time. 2481 * 2482 * Since this function doesn't do block allocations even if the caller 2483 * requests it by passing in create=1, it is critically important that 2484 * any caller checks to make sure that any buffer heads are returned 2485 * by this function are either all already mapped or marked for 2486 * delayed allocation before calling block_write_full_page(). Otherwise, 2487 * b_blocknr could be left unitialized, and the page write functions will 2488 * be taken by surprise. 2489 */ 2490 static int noalloc_get_block_write(struct inode *inode, sector_t iblock, 2491 struct buffer_head *bh_result, int create) 2492 { 2493 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 2494 return _ext4_get_block(inode, iblock, bh_result, 0); 2495 } 2496 2497 static int bget_one(handle_t *handle, struct buffer_head *bh) 2498 { 2499 get_bh(bh); 2500 return 0; 2501 } 2502 2503 static int bput_one(handle_t *handle, struct buffer_head *bh) 2504 { 2505 put_bh(bh); 2506 return 0; 2507 } 2508 2509 static int __ext4_journalled_writepage(struct page *page, 2510 unsigned int len) 2511 { 2512 struct address_space *mapping = page->mapping; 2513 struct inode *inode = mapping->host; 2514 struct buffer_head *page_bufs; 2515 handle_t *handle = NULL; 2516 int ret = 0; 2517 int err; 2518 2519 ClearPageChecked(page); 2520 page_bufs = page_buffers(page); 2521 BUG_ON(!page_bufs); 2522 walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one); 2523 /* As soon as we unlock the page, it can go away, but we have 2524 * references to buffers so we are safe */ 2525 unlock_page(page); 2526 2527 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 2528 if (IS_ERR(handle)) { 2529 ret = PTR_ERR(handle); 2530 goto out; 2531 } 2532 2533 ret = walk_page_buffers(handle, page_bufs, 0, len, NULL, 2534 do_journal_get_write_access); 2535 2536 err = walk_page_buffers(handle, page_bufs, 0, len, NULL, 2537 write_end_fn); 2538 if (ret == 0) 2539 ret = err; 2540 err = ext4_journal_stop(handle); 2541 if (!ret) 2542 ret = err; 2543 2544 walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one); 2545 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 2546 out: 2547 return ret; 2548 } 2549 2550 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode); 2551 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate); 2552 2553 /* 2554 * Note that we don't need to start a transaction unless we're journaling data 2555 * because we should have holes filled from ext4_page_mkwrite(). We even don't 2556 * need to file the inode to the transaction's list in ordered mode because if 2557 * we are writing back data added by write(), the inode is already there and if 2558 * we are writing back data modified via mmap(), no one guarantees in which 2559 * transaction the data will hit the disk. In case we are journaling data, we 2560 * cannot start transaction directly because transaction start ranks above page 2561 * lock so we have to do some magic. 2562 * 2563 * This function can get called via... 2564 * - ext4_da_writepages after taking page lock (have journal handle) 2565 * - journal_submit_inode_data_buffers (no journal handle) 2566 * - shrink_page_list via pdflush (no journal handle) 2567 * - grab_page_cache when doing write_begin (have journal handle) 2568 * 2569 * We don't do any block allocation in this function. If we have page with 2570 * multiple blocks we need to write those buffer_heads that are mapped. This 2571 * is important for mmaped based write. So if we do with blocksize 1K 2572 * truncate(f, 1024); 2573 * a = mmap(f, 0, 4096); 2574 * a[0] = 'a'; 2575 * truncate(f, 4096); 2576 * we have in the page first buffer_head mapped via page_mkwrite call back 2577 * but other bufer_heads would be unmapped but dirty(dirty done via the 2578 * do_wp_page). So writepage should write the first block. If we modify 2579 * the mmap area beyond 1024 we will again get a page_fault and the 2580 * page_mkwrite callback will do the block allocation and mark the 2581 * buffer_heads mapped. 2582 * 2583 * We redirty the page if we have any buffer_heads that is either delay or 2584 * unwritten in the page. 2585 * 2586 * We can get recursively called as show below. 2587 * 2588 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 2589 * ext4_writepage() 2590 * 2591 * But since we don't do any block allocation we should not deadlock. 2592 * Page also have the dirty flag cleared so we don't get recurive page_lock. 2593 */ 2594 static int ext4_writepage(struct page *page, 2595 struct writeback_control *wbc) 2596 { 2597 int ret = 0, commit_write = 0; 2598 loff_t size; 2599 unsigned int len; 2600 struct buffer_head *page_bufs = NULL; 2601 struct inode *inode = page->mapping->host; 2602 2603 trace_ext4_writepage(page); 2604 size = i_size_read(inode); 2605 if (page->index == size >> PAGE_CACHE_SHIFT) 2606 len = size & ~PAGE_CACHE_MASK; 2607 else 2608 len = PAGE_CACHE_SIZE; 2609 2610 /* 2611 * If the page does not have buffers (for whatever reason), 2612 * try to create them using __block_write_begin. If this 2613 * fails, redirty the page and move on. 2614 */ 2615 if (!page_has_buffers(page)) { 2616 if (__block_write_begin(page, 0, len, 2617 noalloc_get_block_write)) { 2618 redirty_page: 2619 redirty_page_for_writepage(wbc, page); 2620 unlock_page(page); 2621 return 0; 2622 } 2623 commit_write = 1; 2624 } 2625 page_bufs = page_buffers(page); 2626 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2627 ext4_bh_delay_or_unwritten)) { 2628 /* 2629 * We don't want to do block allocation, so redirty 2630 * the page and return. We may reach here when we do 2631 * a journal commit via journal_submit_inode_data_buffers. 2632 * We can also reach here via shrink_page_list 2633 */ 2634 goto redirty_page; 2635 } 2636 if (commit_write) 2637 /* now mark the buffer_heads as dirty and uptodate */ 2638 block_commit_write(page, 0, len); 2639 2640 if (PageChecked(page) && ext4_should_journal_data(inode)) 2641 /* 2642 * It's mmapped pagecache. Add buffers and journal it. There 2643 * doesn't seem much point in redirtying the page here. 2644 */ 2645 return __ext4_journalled_writepage(page, len); 2646 2647 if (buffer_uninit(page_bufs)) { 2648 ext4_set_bh_endio(page_bufs, inode); 2649 ret = block_write_full_page_endio(page, noalloc_get_block_write, 2650 wbc, ext4_end_io_buffer_write); 2651 } else 2652 ret = block_write_full_page(page, noalloc_get_block_write, 2653 wbc); 2654 2655 return ret; 2656 } 2657 2658 /* 2659 * This is called via ext4_da_writepages() to 2660 * calculate the total number of credits to reserve to fit 2661 * a single extent allocation into a single transaction, 2662 * ext4_da_writpeages() will loop calling this before 2663 * the block allocation. 2664 */ 2665 2666 static int ext4_da_writepages_trans_blocks(struct inode *inode) 2667 { 2668 int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; 2669 2670 /* 2671 * With non-extent format the journal credit needed to 2672 * insert nrblocks contiguous block is dependent on 2673 * number of contiguous block. So we will limit 2674 * number of contiguous block to a sane value 2675 */ 2676 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) && 2677 (max_blocks > EXT4_MAX_TRANS_DATA)) 2678 max_blocks = EXT4_MAX_TRANS_DATA; 2679 2680 return ext4_chunk_trans_blocks(inode, max_blocks); 2681 } 2682 2683 /* 2684 * write_cache_pages_da - walk the list of dirty pages of the given 2685 * address space and accumulate pages that need writing, and call 2686 * mpage_da_map_and_submit to map a single contiguous memory region 2687 * and then write them. 2688 */ 2689 static int write_cache_pages_da(struct address_space *mapping, 2690 struct writeback_control *wbc, 2691 struct mpage_da_data *mpd, 2692 pgoff_t *done_index) 2693 { 2694 struct buffer_head *bh, *head; 2695 struct inode *inode = mapping->host; 2696 struct pagevec pvec; 2697 unsigned int nr_pages; 2698 sector_t logical; 2699 pgoff_t index, end; 2700 long nr_to_write = wbc->nr_to_write; 2701 int i, tag, ret = 0; 2702 2703 memset(mpd, 0, sizeof(struct mpage_da_data)); 2704 mpd->wbc = wbc; 2705 mpd->inode = inode; 2706 pagevec_init(&pvec, 0); 2707 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2708 end = wbc->range_end >> PAGE_CACHE_SHIFT; 2709 2710 if (wbc->sync_mode == WB_SYNC_ALL) 2711 tag = PAGECACHE_TAG_TOWRITE; 2712 else 2713 tag = PAGECACHE_TAG_DIRTY; 2714 2715 *done_index = index; 2716 while (index <= end) { 2717 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 2718 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 2719 if (nr_pages == 0) 2720 return 0; 2721 2722 for (i = 0; i < nr_pages; i++) { 2723 struct page *page = pvec.pages[i]; 2724 2725 /* 2726 * At this point, the page may be truncated or 2727 * invalidated (changing page->mapping to NULL), or 2728 * even swizzled back from swapper_space to tmpfs file 2729 * mapping. However, page->index will not change 2730 * because we have a reference on the page. 2731 */ 2732 if (page->index > end) 2733 goto out; 2734 2735 *done_index = page->index + 1; 2736 2737 /* 2738 * If we can't merge this page, and we have 2739 * accumulated an contiguous region, write it 2740 */ 2741 if ((mpd->next_page != page->index) && 2742 (mpd->next_page != mpd->first_page)) { 2743 mpage_da_map_and_submit(mpd); 2744 goto ret_extent_tail; 2745 } 2746 2747 lock_page(page); 2748 2749 /* 2750 * If the page is no longer dirty, or its 2751 * mapping no longer corresponds to inode we 2752 * are writing (which means it has been 2753 * truncated or invalidated), or the page is 2754 * already under writeback and we are not 2755 * doing a data integrity writeback, skip the page 2756 */ 2757 if (!PageDirty(page) || 2758 (PageWriteback(page) && 2759 (wbc->sync_mode == WB_SYNC_NONE)) || 2760 unlikely(page->mapping != mapping)) { 2761 unlock_page(page); 2762 continue; 2763 } 2764 2765 wait_on_page_writeback(page); 2766 BUG_ON(PageWriteback(page)); 2767 2768 if (mpd->next_page != page->index) 2769 mpd->first_page = page->index; 2770 mpd->next_page = page->index + 1; 2771 logical = (sector_t) page->index << 2772 (PAGE_CACHE_SHIFT - inode->i_blkbits); 2773 2774 if (!page_has_buffers(page)) { 2775 mpage_add_bh_to_extent(mpd, logical, 2776 PAGE_CACHE_SIZE, 2777 (1 << BH_Dirty) | (1 << BH_Uptodate)); 2778 if (mpd->io_done) 2779 goto ret_extent_tail; 2780 } else { 2781 /* 2782 * Page with regular buffer heads, 2783 * just add all dirty ones 2784 */ 2785 head = page_buffers(page); 2786 bh = head; 2787 do { 2788 BUG_ON(buffer_locked(bh)); 2789 /* 2790 * We need to try to allocate 2791 * unmapped blocks in the same page. 2792 * Otherwise we won't make progress 2793 * with the page in ext4_writepage 2794 */ 2795 if (ext4_bh_delay_or_unwritten(NULL, bh)) { 2796 mpage_add_bh_to_extent(mpd, logical, 2797 bh->b_size, 2798 bh->b_state); 2799 if (mpd->io_done) 2800 goto ret_extent_tail; 2801 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) { 2802 /* 2803 * mapped dirty buffer. We need 2804 * to update the b_state 2805 * because we look at b_state 2806 * in mpage_da_map_blocks. We 2807 * don't update b_size because 2808 * if we find an unmapped 2809 * buffer_head later we need to 2810 * use the b_state flag of that 2811 * buffer_head. 2812 */ 2813 if (mpd->b_size == 0) 2814 mpd->b_state = bh->b_state & BH_FLAGS; 2815 } 2816 logical++; 2817 } while ((bh = bh->b_this_page) != head); 2818 } 2819 2820 if (nr_to_write > 0) { 2821 nr_to_write--; 2822 if (nr_to_write == 0 && 2823 wbc->sync_mode == WB_SYNC_NONE) 2824 /* 2825 * We stop writing back only if we are 2826 * not doing integrity sync. In case of 2827 * integrity sync we have to keep going 2828 * because someone may be concurrently 2829 * dirtying pages, and we might have 2830 * synced a lot of newly appeared dirty 2831 * pages, but have not synced all of the 2832 * old dirty pages. 2833 */ 2834 goto out; 2835 } 2836 } 2837 pagevec_release(&pvec); 2838 cond_resched(); 2839 } 2840 return 0; 2841 ret_extent_tail: 2842 ret = MPAGE_DA_EXTENT_TAIL; 2843 out: 2844 pagevec_release(&pvec); 2845 cond_resched(); 2846 return ret; 2847 } 2848 2849 2850 static int ext4_da_writepages(struct address_space *mapping, 2851 struct writeback_control *wbc) 2852 { 2853 pgoff_t index; 2854 int range_whole = 0; 2855 handle_t *handle = NULL; 2856 struct mpage_da_data mpd; 2857 struct inode *inode = mapping->host; 2858 int pages_written = 0; 2859 unsigned int max_pages; 2860 int range_cyclic, cycled = 1, io_done = 0; 2861 int needed_blocks, ret = 0; 2862 long desired_nr_to_write, nr_to_writebump = 0; 2863 loff_t range_start = wbc->range_start; 2864 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2865 pgoff_t done_index = 0; 2866 pgoff_t end; 2867 2868 trace_ext4_da_writepages(inode, wbc); 2869 2870 /* 2871 * No pages to write? This is mainly a kludge to avoid starting 2872 * a transaction for special inodes like journal inode on last iput() 2873 * because that could violate lock ordering on umount 2874 */ 2875 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 2876 return 0; 2877 2878 /* 2879 * If the filesystem has aborted, it is read-only, so return 2880 * right away instead of dumping stack traces later on that 2881 * will obscure the real source of the problem. We test 2882 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because 2883 * the latter could be true if the filesystem is mounted 2884 * read-only, and in that case, ext4_da_writepages should 2885 * *never* be called, so if that ever happens, we would want 2886 * the stack trace. 2887 */ 2888 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) 2889 return -EROFS; 2890 2891 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2892 range_whole = 1; 2893 2894 range_cyclic = wbc->range_cyclic; 2895 if (wbc->range_cyclic) { 2896 index = mapping->writeback_index; 2897 if (index) 2898 cycled = 0; 2899 wbc->range_start = index << PAGE_CACHE_SHIFT; 2900 wbc->range_end = LLONG_MAX; 2901 wbc->range_cyclic = 0; 2902 end = -1; 2903 } else { 2904 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2905 end = wbc->range_end >> PAGE_CACHE_SHIFT; 2906 } 2907 2908 /* 2909 * This works around two forms of stupidity. The first is in 2910 * the writeback code, which caps the maximum number of pages 2911 * written to be 1024 pages. This is wrong on multiple 2912 * levels; different architectues have a different page size, 2913 * which changes the maximum amount of data which gets 2914 * written. Secondly, 4 megabytes is way too small. XFS 2915 * forces this value to be 16 megabytes by multiplying 2916 * nr_to_write parameter by four, and then relies on its 2917 * allocator to allocate larger extents to make them 2918 * contiguous. Unfortunately this brings us to the second 2919 * stupidity, which is that ext4's mballoc code only allocates 2920 * at most 2048 blocks. So we force contiguous writes up to 2921 * the number of dirty blocks in the inode, or 2922 * sbi->max_writeback_mb_bump whichever is smaller. 2923 */ 2924 max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT); 2925 if (!range_cyclic && range_whole) { 2926 if (wbc->nr_to_write == LONG_MAX) 2927 desired_nr_to_write = wbc->nr_to_write; 2928 else 2929 desired_nr_to_write = wbc->nr_to_write * 8; 2930 } else 2931 desired_nr_to_write = ext4_num_dirty_pages(inode, index, 2932 max_pages); 2933 if (desired_nr_to_write > max_pages) 2934 desired_nr_to_write = max_pages; 2935 2936 if (wbc->nr_to_write < desired_nr_to_write) { 2937 nr_to_writebump = desired_nr_to_write - wbc->nr_to_write; 2938 wbc->nr_to_write = desired_nr_to_write; 2939 } 2940 2941 retry: 2942 if (wbc->sync_mode == WB_SYNC_ALL) 2943 tag_pages_for_writeback(mapping, index, end); 2944 2945 while (!ret && wbc->nr_to_write > 0) { 2946 2947 /* 2948 * we insert one extent at a time. So we need 2949 * credit needed for single extent allocation. 2950 * journalled mode is currently not supported 2951 * by delalloc 2952 */ 2953 BUG_ON(ext4_should_journal_data(inode)); 2954 needed_blocks = ext4_da_writepages_trans_blocks(inode); 2955 2956 /* start a new transaction*/ 2957 handle = ext4_journal_start(inode, needed_blocks); 2958 if (IS_ERR(handle)) { 2959 ret = PTR_ERR(handle); 2960 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2961 "%ld pages, ino %lu; err %d", __func__, 2962 wbc->nr_to_write, inode->i_ino, ret); 2963 goto out_writepages; 2964 } 2965 2966 /* 2967 * Now call write_cache_pages_da() to find the next 2968 * contiguous region of logical blocks that need 2969 * blocks to be allocated by ext4 and submit them. 2970 */ 2971 ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index); 2972 /* 2973 * If we have a contiguous extent of pages and we 2974 * haven't done the I/O yet, map the blocks and submit 2975 * them for I/O. 2976 */ 2977 if (!mpd.io_done && mpd.next_page != mpd.first_page) { 2978 mpage_da_map_and_submit(&mpd); 2979 ret = MPAGE_DA_EXTENT_TAIL; 2980 } 2981 trace_ext4_da_write_pages(inode, &mpd); 2982 wbc->nr_to_write -= mpd.pages_written; 2983 2984 ext4_journal_stop(handle); 2985 2986 if ((mpd.retval == -ENOSPC) && sbi->s_journal) { 2987 /* commit the transaction which would 2988 * free blocks released in the transaction 2989 * and try again 2990 */ 2991 jbd2_journal_force_commit_nested(sbi->s_journal); 2992 ret = 0; 2993 } else if (ret == MPAGE_DA_EXTENT_TAIL) { 2994 /* 2995 * got one extent now try with 2996 * rest of the pages 2997 */ 2998 pages_written += mpd.pages_written; 2999 ret = 0; 3000 io_done = 1; 3001 } else if (wbc->nr_to_write) 3002 /* 3003 * There is no more writeout needed 3004 * or we requested for a noblocking writeout 3005 * and we found the device congested 3006 */ 3007 break; 3008 } 3009 if (!io_done && !cycled) { 3010 cycled = 1; 3011 index = 0; 3012 wbc->range_start = index << PAGE_CACHE_SHIFT; 3013 wbc->range_end = mapping->writeback_index - 1; 3014 goto retry; 3015 } 3016 3017 /* Update index */ 3018 wbc->range_cyclic = range_cyclic; 3019 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 3020 /* 3021 * set the writeback_index so that range_cyclic 3022 * mode will write it back later 3023 */ 3024 mapping->writeback_index = done_index; 3025 3026 out_writepages: 3027 wbc->nr_to_write -= nr_to_writebump; 3028 wbc->range_start = range_start; 3029 trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); 3030 return ret; 3031 } 3032 3033 #define FALL_BACK_TO_NONDELALLOC 1 3034 static int ext4_nonda_switch(struct super_block *sb) 3035 { 3036 s64 free_blocks, dirty_blocks; 3037 struct ext4_sb_info *sbi = EXT4_SB(sb); 3038 3039 /* 3040 * switch to non delalloc mode if we are running low 3041 * on free block. The free block accounting via percpu 3042 * counters can get slightly wrong with percpu_counter_batch getting 3043 * accumulated on each CPU without updating global counters 3044 * Delalloc need an accurate free block accounting. So switch 3045 * to non delalloc when we are near to error range. 3046 */ 3047 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); 3048 dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter); 3049 if (2 * free_blocks < 3 * dirty_blocks || 3050 free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) { 3051 /* 3052 * free block count is less than 150% of dirty blocks 3053 * or free blocks is less than watermark 3054 */ 3055 return 1; 3056 } 3057 /* 3058 * Even if we don't switch but are nearing capacity, 3059 * start pushing delalloc when 1/2 of free blocks are dirty. 3060 */ 3061 if (free_blocks < 2 * dirty_blocks) 3062 writeback_inodes_sb_if_idle(sb); 3063 3064 return 0; 3065 } 3066 3067 static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 3068 loff_t pos, unsigned len, unsigned flags, 3069 struct page **pagep, void **fsdata) 3070 { 3071 int ret, retries = 0; 3072 struct page *page; 3073 pgoff_t index; 3074 struct inode *inode = mapping->host; 3075 handle_t *handle; 3076 3077 index = pos >> PAGE_CACHE_SHIFT; 3078 3079 if (ext4_nonda_switch(inode->i_sb)) { 3080 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 3081 return ext4_write_begin(file, mapping, pos, 3082 len, flags, pagep, fsdata); 3083 } 3084 *fsdata = (void *)0; 3085 trace_ext4_da_write_begin(inode, pos, len, flags); 3086 retry: 3087 /* 3088 * With delayed allocation, we don't log the i_disksize update 3089 * if there is delayed block allocation. But we still need 3090 * to journalling the i_disksize update if writes to the end 3091 * of file which has an already mapped buffer. 3092 */ 3093 handle = ext4_journal_start(inode, 1); 3094 if (IS_ERR(handle)) { 3095 ret = PTR_ERR(handle); 3096 goto out; 3097 } 3098 /* We cannot recurse into the filesystem as the transaction is already 3099 * started */ 3100 flags |= AOP_FLAG_NOFS; 3101 3102 page = grab_cache_page_write_begin(mapping, index, flags); 3103 if (!page) { 3104 ext4_journal_stop(handle); 3105 ret = -ENOMEM; 3106 goto out; 3107 } 3108 *pagep = page; 3109 3110 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); 3111 if (ret < 0) { 3112 unlock_page(page); 3113 ext4_journal_stop(handle); 3114 page_cache_release(page); 3115 /* 3116 * block_write_begin may have instantiated a few blocks 3117 * outside i_size. Trim these off again. Don't need 3118 * i_size_read because we hold i_mutex. 3119 */ 3120 if (pos + len > inode->i_size) 3121 ext4_truncate_failed_write(inode); 3122 } 3123 3124 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 3125 goto retry; 3126 out: 3127 return ret; 3128 } 3129 3130 /* 3131 * Check if we should update i_disksize 3132 * when write to the end of file but not require block allocation 3133 */ 3134 static int ext4_da_should_update_i_disksize(struct page *page, 3135 unsigned long offset) 3136 { 3137 struct buffer_head *bh; 3138 struct inode *inode = page->mapping->host; 3139 unsigned int idx; 3140 int i; 3141 3142 bh = page_buffers(page); 3143 idx = offset >> inode->i_blkbits; 3144 3145 for (i = 0; i < idx; i++) 3146 bh = bh->b_this_page; 3147 3148 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 3149 return 0; 3150 return 1; 3151 } 3152 3153 static int ext4_da_write_end(struct file *file, 3154 struct address_space *mapping, 3155 loff_t pos, unsigned len, unsigned copied, 3156 struct page *page, void *fsdata) 3157 { 3158 struct inode *inode = mapping->host; 3159 int ret = 0, ret2; 3160 handle_t *handle = ext4_journal_current_handle(); 3161 loff_t new_i_size; 3162 unsigned long start, end; 3163 int write_mode = (int)(unsigned long)fsdata; 3164 3165 if (write_mode == FALL_BACK_TO_NONDELALLOC) { 3166 if (ext4_should_order_data(inode)) { 3167 return ext4_ordered_write_end(file, mapping, pos, 3168 len, copied, page, fsdata); 3169 } else if (ext4_should_writeback_data(inode)) { 3170 return ext4_writeback_write_end(file, mapping, pos, 3171 len, copied, page, fsdata); 3172 } else { 3173 BUG(); 3174 } 3175 } 3176 3177 trace_ext4_da_write_end(inode, pos, len, copied); 3178 start = pos & (PAGE_CACHE_SIZE - 1); 3179 end = start + copied - 1; 3180 3181 /* 3182 * generic_write_end() will run mark_inode_dirty() if i_size 3183 * changes. So let's piggyback the i_disksize mark_inode_dirty 3184 * into that. 3185 */ 3186 3187 new_i_size = pos + copied; 3188 if (new_i_size > EXT4_I(inode)->i_disksize) { 3189 if (ext4_da_should_update_i_disksize(page, end)) { 3190 down_write(&EXT4_I(inode)->i_data_sem); 3191 if (new_i_size > EXT4_I(inode)->i_disksize) { 3192 /* 3193 * Updating i_disksize when extending file 3194 * without needing block allocation 3195 */ 3196 if (ext4_should_order_data(inode)) 3197 ret = ext4_jbd2_file_inode(handle, 3198 inode); 3199 3200 EXT4_I(inode)->i_disksize = new_i_size; 3201 } 3202 up_write(&EXT4_I(inode)->i_data_sem); 3203 /* We need to mark inode dirty even if 3204 * new_i_size is less that inode->i_size 3205 * bu greater than i_disksize.(hint delalloc) 3206 */ 3207 ext4_mark_inode_dirty(handle, inode); 3208 } 3209 } 3210 ret2 = generic_write_end(file, mapping, pos, len, copied, 3211 page, fsdata); 3212 copied = ret2; 3213 if (ret2 < 0) 3214 ret = ret2; 3215 ret2 = ext4_journal_stop(handle); 3216 if (!ret) 3217 ret = ret2; 3218 3219 return ret ? ret : copied; 3220 } 3221 3222 static void ext4_da_invalidatepage(struct page *page, unsigned long offset) 3223 { 3224 /* 3225 * Drop reserved blocks 3226 */ 3227 BUG_ON(!PageLocked(page)); 3228 if (!page_has_buffers(page)) 3229 goto out; 3230 3231 ext4_da_page_release_reservation(page, offset); 3232 3233 out: 3234 ext4_invalidatepage(page, offset); 3235 3236 return; 3237 } 3238 3239 /* 3240 * Force all delayed allocation blocks to be allocated for a given inode. 3241 */ 3242 int ext4_alloc_da_blocks(struct inode *inode) 3243 { 3244 trace_ext4_alloc_da_blocks(inode); 3245 3246 if (!EXT4_I(inode)->i_reserved_data_blocks && 3247 !EXT4_I(inode)->i_reserved_meta_blocks) 3248 return 0; 3249 3250 /* 3251 * We do something simple for now. The filemap_flush() will 3252 * also start triggering a write of the data blocks, which is 3253 * not strictly speaking necessary (and for users of 3254 * laptop_mode, not even desirable). However, to do otherwise 3255 * would require replicating code paths in: 3256 * 3257 * ext4_da_writepages() -> 3258 * write_cache_pages() ---> (via passed in callback function) 3259 * __mpage_da_writepage() --> 3260 * mpage_add_bh_to_extent() 3261 * mpage_da_map_blocks() 3262 * 3263 * The problem is that write_cache_pages(), located in 3264 * mm/page-writeback.c, marks pages clean in preparation for 3265 * doing I/O, which is not desirable if we're not planning on 3266 * doing I/O at all. 3267 * 3268 * We could call write_cache_pages(), and then redirty all of 3269 * the pages by calling redirty_page_for_writepage() but that 3270 * would be ugly in the extreme. So instead we would need to 3271 * replicate parts of the code in the above functions, 3272 * simplifying them because we wouldn't actually intend to 3273 * write out the pages, but rather only collect contiguous 3274 * logical block extents, call the multi-block allocator, and 3275 * then update the buffer heads with the block allocations. 3276 * 3277 * For now, though, we'll cheat by calling filemap_flush(), 3278 * which will map the blocks, and start the I/O, but not 3279 * actually wait for the I/O to complete. 3280 */ 3281 return filemap_flush(inode->i_mapping); 3282 } 3283 3284 /* 3285 * bmap() is special. It gets used by applications such as lilo and by 3286 * the swapper to find the on-disk block of a specific piece of data. 3287 * 3288 * Naturally, this is dangerous if the block concerned is still in the 3289 * journal. If somebody makes a swapfile on an ext4 data-journaling 3290 * filesystem and enables swap, then they may get a nasty shock when the 3291 * data getting swapped to that swapfile suddenly gets overwritten by 3292 * the original zero's written out previously to the journal and 3293 * awaiting writeback in the kernel's buffer cache. 3294 * 3295 * So, if we see any bmap calls here on a modified, data-journaled file, 3296 * take extra steps to flush any blocks which might be in the cache. 3297 */ 3298 static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 3299 { 3300 struct inode *inode = mapping->host; 3301 journal_t *journal; 3302 int err; 3303 3304 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 3305 test_opt(inode->i_sb, DELALLOC)) { 3306 /* 3307 * With delalloc we want to sync the file 3308 * so that we can make sure we allocate 3309 * blocks for file 3310 */ 3311 filemap_write_and_wait(mapping); 3312 } 3313 3314 if (EXT4_JOURNAL(inode) && 3315 ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { 3316 /* 3317 * This is a REALLY heavyweight approach, but the use of 3318 * bmap on dirty files is expected to be extremely rare: 3319 * only if we run lilo or swapon on a freshly made file 3320 * do we expect this to happen. 3321 * 3322 * (bmap requires CAP_SYS_RAWIO so this does not 3323 * represent an unprivileged user DOS attack --- we'd be 3324 * in trouble if mortal users could trigger this path at 3325 * will.) 3326 * 3327 * NB. EXT4_STATE_JDATA is not set on files other than 3328 * regular files. If somebody wants to bmap a directory 3329 * or symlink and gets confused because the buffer 3330 * hasn't yet been flushed to disk, they deserve 3331 * everything they get. 3332 */ 3333 3334 ext4_clear_inode_state(inode, EXT4_STATE_JDATA); 3335 journal = EXT4_JOURNAL(inode); 3336 jbd2_journal_lock_updates(journal); 3337 err = jbd2_journal_flush(journal); 3338 jbd2_journal_unlock_updates(journal); 3339 3340 if (err) 3341 return 0; 3342 } 3343 3344 return generic_block_bmap(mapping, block, ext4_get_block); 3345 } 3346 3347 static int ext4_readpage(struct file *file, struct page *page) 3348 { 3349 trace_ext4_readpage(page); 3350 return mpage_readpage(page, ext4_get_block); 3351 } 3352 3353 static int 3354 ext4_readpages(struct file *file, struct address_space *mapping, 3355 struct list_head *pages, unsigned nr_pages) 3356 { 3357 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); 3358 } 3359 3360 static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset) 3361 { 3362 struct buffer_head *head, *bh; 3363 unsigned int curr_off = 0; 3364 3365 if (!page_has_buffers(page)) 3366 return; 3367 head = bh = page_buffers(page); 3368 do { 3369 if (offset <= curr_off && test_clear_buffer_uninit(bh) 3370 && bh->b_private) { 3371 ext4_free_io_end(bh->b_private); 3372 bh->b_private = NULL; 3373 bh->b_end_io = NULL; 3374 } 3375 curr_off = curr_off + bh->b_size; 3376 bh = bh->b_this_page; 3377 } while (bh != head); 3378 } 3379 3380 static void ext4_invalidatepage(struct page *page, unsigned long offset) 3381 { 3382 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3383 3384 trace_ext4_invalidatepage(page, offset); 3385 3386 /* 3387 * free any io_end structure allocated for buffers to be discarded 3388 */ 3389 if (ext4_should_dioread_nolock(page->mapping->host)) 3390 ext4_invalidatepage_free_endio(page, offset); 3391 /* 3392 * If it's a full truncate we just forget about the pending dirtying 3393 */ 3394 if (offset == 0) 3395 ClearPageChecked(page); 3396 3397 if (journal) 3398 jbd2_journal_invalidatepage(journal, page, offset); 3399 else 3400 block_invalidatepage(page, offset); 3401 } 3402 3403 static int ext4_releasepage(struct page *page, gfp_t wait) 3404 { 3405 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3406 3407 trace_ext4_releasepage(page); 3408 3409 WARN_ON(PageChecked(page)); 3410 if (!page_has_buffers(page)) 3411 return 0; 3412 if (journal) 3413 return jbd2_journal_try_to_free_buffers(journal, page, wait); 3414 else 3415 return try_to_free_buffers(page); 3416 } 3417 3418 /* 3419 * O_DIRECT for ext3 (or indirect map) based files 3420 * 3421 * If the O_DIRECT write will extend the file then add this inode to the 3422 * orphan list. So recovery will truncate it back to the original size 3423 * if the machine crashes during the write. 3424 * 3425 * If the O_DIRECT write is intantiating holes inside i_size and the machine 3426 * crashes then stale disk data _may_ be exposed inside the file. But current 3427 * VFS code falls back into buffered path in that case so we are safe. 3428 */ 3429 static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, 3430 const struct iovec *iov, loff_t offset, 3431 unsigned long nr_segs) 3432 { 3433 struct file *file = iocb->ki_filp; 3434 struct inode *inode = file->f_mapping->host; 3435 struct ext4_inode_info *ei = EXT4_I(inode); 3436 handle_t *handle; 3437 ssize_t ret; 3438 int orphan = 0; 3439 size_t count = iov_length(iov, nr_segs); 3440 int retries = 0; 3441 3442 if (rw == WRITE) { 3443 loff_t final_size = offset + count; 3444 3445 if (final_size > inode->i_size) { 3446 /* Credits for sb + inode write */ 3447 handle = ext4_journal_start(inode, 2); 3448 if (IS_ERR(handle)) { 3449 ret = PTR_ERR(handle); 3450 goto out; 3451 } 3452 ret = ext4_orphan_add(handle, inode); 3453 if (ret) { 3454 ext4_journal_stop(handle); 3455 goto out; 3456 } 3457 orphan = 1; 3458 ei->i_disksize = inode->i_size; 3459 ext4_journal_stop(handle); 3460 } 3461 } 3462 3463 retry: 3464 if (rw == READ && ext4_should_dioread_nolock(inode)) 3465 ret = __blockdev_direct_IO(rw, iocb, inode, 3466 inode->i_sb->s_bdev, iov, 3467 offset, nr_segs, 3468 ext4_get_block, NULL, NULL, 0); 3469 else { 3470 ret = blockdev_direct_IO(rw, iocb, inode, 3471 inode->i_sb->s_bdev, iov, 3472 offset, nr_segs, 3473 ext4_get_block, NULL); 3474 3475 if (unlikely((rw & WRITE) && ret < 0)) { 3476 loff_t isize = i_size_read(inode); 3477 loff_t end = offset + iov_length(iov, nr_segs); 3478 3479 if (end > isize) 3480 ext4_truncate_failed_write(inode); 3481 } 3482 } 3483 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 3484 goto retry; 3485 3486 if (orphan) { 3487 int err; 3488 3489 /* Credits for sb + inode write */ 3490 handle = ext4_journal_start(inode, 2); 3491 if (IS_ERR(handle)) { 3492 /* This is really bad luck. We've written the data 3493 * but cannot extend i_size. Bail out and pretend 3494 * the write failed... */ 3495 ret = PTR_ERR(handle); 3496 if (inode->i_nlink) 3497 ext4_orphan_del(NULL, inode); 3498 3499 goto out; 3500 } 3501 if (inode->i_nlink) 3502 ext4_orphan_del(handle, inode); 3503 if (ret > 0) { 3504 loff_t end = offset + ret; 3505 if (end > inode->i_size) { 3506 ei->i_disksize = end; 3507 i_size_write(inode, end); 3508 /* 3509 * We're going to return a positive `ret' 3510 * here due to non-zero-length I/O, so there's 3511 * no way of reporting error returns from 3512 * ext4_mark_inode_dirty() to userspace. So 3513 * ignore it. 3514 */ 3515 ext4_mark_inode_dirty(handle, inode); 3516 } 3517 } 3518 err = ext4_journal_stop(handle); 3519 if (ret == 0) 3520 ret = err; 3521 } 3522 out: 3523 return ret; 3524 } 3525 3526 /* 3527 * ext4_get_block used when preparing for a DIO write or buffer write. 3528 * We allocate an uinitialized extent if blocks haven't been allocated. 3529 * The extent will be converted to initialized after the IO is complete. 3530 */ 3531 static int ext4_get_block_write(struct inode *inode, sector_t iblock, 3532 struct buffer_head *bh_result, int create) 3533 { 3534 ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n", 3535 inode->i_ino, create); 3536 return _ext4_get_block(inode, iblock, bh_result, 3537 EXT4_GET_BLOCKS_IO_CREATE_EXT); 3538 } 3539 3540 static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, 3541 ssize_t size, void *private, int ret, 3542 bool is_async) 3543 { 3544 ext4_io_end_t *io_end = iocb->private; 3545 struct workqueue_struct *wq; 3546 unsigned long flags; 3547 struct ext4_inode_info *ei; 3548 3549 /* if not async direct IO or dio with 0 bytes write, just return */ 3550 if (!io_end || !size) 3551 goto out; 3552 3553 ext_debug("ext4_end_io_dio(): io_end 0x%p" 3554 "for inode %lu, iocb 0x%p, offset %llu, size %llu\n", 3555 iocb->private, io_end->inode->i_ino, iocb, offset, 3556 size); 3557 3558 /* if not aio dio with unwritten extents, just free io and return */ 3559 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { 3560 ext4_free_io_end(io_end); 3561 iocb->private = NULL; 3562 out: 3563 if (is_async) 3564 aio_complete(iocb, ret, 0); 3565 return; 3566 } 3567 3568 io_end->offset = offset; 3569 io_end->size = size; 3570 if (is_async) { 3571 io_end->iocb = iocb; 3572 io_end->result = ret; 3573 } 3574 wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq; 3575 3576 /* Add the io_end to per-inode completed aio dio list*/ 3577 ei = EXT4_I(io_end->inode); 3578 spin_lock_irqsave(&ei->i_completed_io_lock, flags); 3579 list_add_tail(&io_end->list, &ei->i_completed_io_list); 3580 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); 3581 3582 /* queue the work to convert unwritten extents to written */ 3583 queue_work(wq, &io_end->work); 3584 iocb->private = NULL; 3585 } 3586 3587 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate) 3588 { 3589 ext4_io_end_t *io_end = bh->b_private; 3590 struct workqueue_struct *wq; 3591 struct inode *inode; 3592 unsigned long flags; 3593 3594 if (!test_clear_buffer_uninit(bh) || !io_end) 3595 goto out; 3596 3597 if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) { 3598 printk("sb umounted, discard end_io request for inode %lu\n", 3599 io_end->inode->i_ino); 3600 ext4_free_io_end(io_end); 3601 goto out; 3602 } 3603 3604 io_end->flag = EXT4_IO_END_UNWRITTEN; 3605 inode = io_end->inode; 3606 3607 /* Add the io_end to per-inode completed io list*/ 3608 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); 3609 list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list); 3610 spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags); 3611 3612 wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq; 3613 /* queue the work to convert unwritten extents to written */ 3614 queue_work(wq, &io_end->work); 3615 out: 3616 bh->b_private = NULL; 3617 bh->b_end_io = NULL; 3618 clear_buffer_uninit(bh); 3619 end_buffer_async_write(bh, uptodate); 3620 } 3621 3622 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode) 3623 { 3624 ext4_io_end_t *io_end; 3625 struct page *page = bh->b_page; 3626 loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT; 3627 size_t size = bh->b_size; 3628 3629 retry: 3630 io_end = ext4_init_io_end(inode, GFP_ATOMIC); 3631 if (!io_end) { 3632 pr_warn_ratelimited("%s: allocation fail\n", __func__); 3633 schedule(); 3634 goto retry; 3635 } 3636 io_end->offset = offset; 3637 io_end->size = size; 3638 /* 3639 * We need to hold a reference to the page to make sure it 3640 * doesn't get evicted before ext4_end_io_work() has a chance 3641 * to convert the extent from written to unwritten. 3642 */ 3643 io_end->page = page; 3644 get_page(io_end->page); 3645 3646 bh->b_private = io_end; 3647 bh->b_end_io = ext4_end_io_buffer_write; 3648 return 0; 3649 } 3650 3651 /* 3652 * For ext4 extent files, ext4 will do direct-io write to holes, 3653 * preallocated extents, and those write extend the file, no need to 3654 * fall back to buffered IO. 3655 * 3656 * For holes, we fallocate those blocks, mark them as uninitialized 3657 * If those blocks were preallocated, we mark sure they are splited, but 3658 * still keep the range to write as uninitialized. 3659 * 3660 * The unwrritten extents will be converted to written when DIO is completed. 3661 * For async direct IO, since the IO may still pending when return, we 3662 * set up an end_io call back function, which will do the conversion 3663 * when async direct IO completed. 3664 * 3665 * If the O_DIRECT write will extend the file then add this inode to the 3666 * orphan list. So recovery will truncate it back to the original size 3667 * if the machine crashes during the write. 3668 * 3669 */ 3670 static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, 3671 const struct iovec *iov, loff_t offset, 3672 unsigned long nr_segs) 3673 { 3674 struct file *file = iocb->ki_filp; 3675 struct inode *inode = file->f_mapping->host; 3676 ssize_t ret; 3677 size_t count = iov_length(iov, nr_segs); 3678 3679 loff_t final_size = offset + count; 3680 if (rw == WRITE && final_size <= inode->i_size) { 3681 /* 3682 * We could direct write to holes and fallocate. 3683 * 3684 * Allocated blocks to fill the hole are marked as uninitialized 3685 * to prevent parallel buffered read to expose the stale data 3686 * before DIO complete the data IO. 3687 * 3688 * As to previously fallocated extents, ext4 get_block 3689 * will just simply mark the buffer mapped but still 3690 * keep the extents uninitialized. 3691 * 3692 * for non AIO case, we will convert those unwritten extents 3693 * to written after return back from blockdev_direct_IO. 3694 * 3695 * for async DIO, the conversion needs to be defered when 3696 * the IO is completed. The ext4 end_io callback function 3697 * will be called to take care of the conversion work. 3698 * Here for async case, we allocate an io_end structure to 3699 * hook to the iocb. 3700 */ 3701 iocb->private = NULL; 3702 EXT4_I(inode)->cur_aio_dio = NULL; 3703 if (!is_sync_kiocb(iocb)) { 3704 iocb->private = ext4_init_io_end(inode, GFP_NOFS); 3705 if (!iocb->private) 3706 return -ENOMEM; 3707 /* 3708 * we save the io structure for current async 3709 * direct IO, so that later ext4_map_blocks() 3710 * could flag the io structure whether there 3711 * is a unwritten extents needs to be converted 3712 * when IO is completed. 3713 */ 3714 EXT4_I(inode)->cur_aio_dio = iocb->private; 3715 } 3716 3717 ret = blockdev_direct_IO(rw, iocb, inode, 3718 inode->i_sb->s_bdev, iov, 3719 offset, nr_segs, 3720 ext4_get_block_write, 3721 ext4_end_io_dio); 3722 if (iocb->private) 3723 EXT4_I(inode)->cur_aio_dio = NULL; 3724 /* 3725 * The io_end structure takes a reference to the inode, 3726 * that structure needs to be destroyed and the 3727 * reference to the inode need to be dropped, when IO is 3728 * complete, even with 0 byte write, or failed. 3729 * 3730 * In the successful AIO DIO case, the io_end structure will be 3731 * desctroyed and the reference to the inode will be dropped 3732 * after the end_io call back function is called. 3733 * 3734 * In the case there is 0 byte write, or error case, since 3735 * VFS direct IO won't invoke the end_io call back function, 3736 * we need to free the end_io structure here. 3737 */ 3738 if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { 3739 ext4_free_io_end(iocb->private); 3740 iocb->private = NULL; 3741 } else if (ret > 0 && ext4_test_inode_state(inode, 3742 EXT4_STATE_DIO_UNWRITTEN)) { 3743 int err; 3744 /* 3745 * for non AIO case, since the IO is already 3746 * completed, we could do the conversion right here 3747 */ 3748 err = ext4_convert_unwritten_extents(inode, 3749 offset, ret); 3750 if (err < 0) 3751 ret = err; 3752 ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3753 } 3754 return ret; 3755 } 3756 3757 /* for write the the end of file case, we fall back to old way */ 3758 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 3759 } 3760 3761 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 3762 const struct iovec *iov, loff_t offset, 3763 unsigned long nr_segs) 3764 { 3765 struct file *file = iocb->ki_filp; 3766 struct inode *inode = file->f_mapping->host; 3767 ssize_t ret; 3768 3769 trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw); 3770 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3771 ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs); 3772 else 3773 ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 3774 trace_ext4_direct_IO_exit(inode, offset, 3775 iov_length(iov, nr_segs), rw, ret); 3776 return ret; 3777 } 3778 3779 /* 3780 * Pages can be marked dirty completely asynchronously from ext4's journalling 3781 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3782 * much here because ->set_page_dirty is called under VFS locks. The page is 3783 * not necessarily locked. 3784 * 3785 * We cannot just dirty the page and leave attached buffers clean, because the 3786 * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3787 * or jbddirty because all the journalling code will explode. 3788 * 3789 * So what we do is to mark the page "pending dirty" and next time writepage 3790 * is called, propagate that into the buffers appropriately. 3791 */ 3792 static int ext4_journalled_set_page_dirty(struct page *page) 3793 { 3794 SetPageChecked(page); 3795 return __set_page_dirty_nobuffers(page); 3796 } 3797 3798 static const struct address_space_operations ext4_ordered_aops = { 3799 .readpage = ext4_readpage, 3800 .readpages = ext4_readpages, 3801 .writepage = ext4_writepage, 3802 .write_begin = ext4_write_begin, 3803 .write_end = ext4_ordered_write_end, 3804 .bmap = ext4_bmap, 3805 .invalidatepage = ext4_invalidatepage, 3806 .releasepage = ext4_releasepage, 3807 .direct_IO = ext4_direct_IO, 3808 .migratepage = buffer_migrate_page, 3809 .is_partially_uptodate = block_is_partially_uptodate, 3810 .error_remove_page = generic_error_remove_page, 3811 }; 3812 3813 static const struct address_space_operations ext4_writeback_aops = { 3814 .readpage = ext4_readpage, 3815 .readpages = ext4_readpages, 3816 .writepage = ext4_writepage, 3817 .write_begin = ext4_write_begin, 3818 .write_end = ext4_writeback_write_end, 3819 .bmap = ext4_bmap, 3820 .invalidatepage = ext4_invalidatepage, 3821 .releasepage = ext4_releasepage, 3822 .direct_IO = ext4_direct_IO, 3823 .migratepage = buffer_migrate_page, 3824 .is_partially_uptodate = block_is_partially_uptodate, 3825 .error_remove_page = generic_error_remove_page, 3826 }; 3827 3828 static const struct address_space_operations ext4_journalled_aops = { 3829 .readpage = ext4_readpage, 3830 .readpages = ext4_readpages, 3831 .writepage = ext4_writepage, 3832 .write_begin = ext4_write_begin, 3833 .write_end = ext4_journalled_write_end, 3834 .set_page_dirty = ext4_journalled_set_page_dirty, 3835 .bmap = ext4_bmap, 3836 .invalidatepage = ext4_invalidatepage, 3837 .releasepage = ext4_releasepage, 3838 .is_partially_uptodate = block_is_partially_uptodate, 3839 .error_remove_page = generic_error_remove_page, 3840 }; 3841 3842 static const struct address_space_operations ext4_da_aops = { 3843 .readpage = ext4_readpage, 3844 .readpages = ext4_readpages, 3845 .writepage = ext4_writepage, 3846 .writepages = ext4_da_writepages, 3847 .write_begin = ext4_da_write_begin, 3848 .write_end = ext4_da_write_end, 3849 .bmap = ext4_bmap, 3850 .invalidatepage = ext4_da_invalidatepage, 3851 .releasepage = ext4_releasepage, 3852 .direct_IO = ext4_direct_IO, 3853 .migratepage = buffer_migrate_page, 3854 .is_partially_uptodate = block_is_partially_uptodate, 3855 .error_remove_page = generic_error_remove_page, 3856 }; 3857 3858 void ext4_set_aops(struct inode *inode) 3859 { 3860 if (ext4_should_order_data(inode) && 3861 test_opt(inode->i_sb, DELALLOC)) 3862 inode->i_mapping->a_ops = &ext4_da_aops; 3863 else if (ext4_should_order_data(inode)) 3864 inode->i_mapping->a_ops = &ext4_ordered_aops; 3865 else if (ext4_should_writeback_data(inode) && 3866 test_opt(inode->i_sb, DELALLOC)) 3867 inode->i_mapping->a_ops = &ext4_da_aops; 3868 else if (ext4_should_writeback_data(inode)) 3869 inode->i_mapping->a_ops = &ext4_writeback_aops; 3870 else 3871 inode->i_mapping->a_ops = &ext4_journalled_aops; 3872 } 3873 3874 /* 3875 * ext4_block_truncate_page() zeroes out a mapping from file offset `from' 3876 * up to the end of the block which corresponds to `from'. 3877 * This required during truncate. We need to physically zero the tail end 3878 * of that block so it doesn't yield old data if the file is later grown. 3879 */ 3880 int ext4_block_truncate_page(handle_t *handle, 3881 struct address_space *mapping, loff_t from) 3882 { 3883 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3884 unsigned length; 3885 unsigned blocksize; 3886 struct inode *inode = mapping->host; 3887 3888 blocksize = inode->i_sb->s_blocksize; 3889 length = blocksize - (offset & (blocksize - 1)); 3890 3891 return ext4_block_zero_page_range(handle, mapping, from, length); 3892 } 3893 3894 /* 3895 * ext4_block_zero_page_range() zeros out a mapping of length 'length' 3896 * starting from file offset 'from'. The range to be zero'd must 3897 * be contained with in one block. If the specified range exceeds 3898 * the end of the block it will be shortened to end of the block 3899 * that cooresponds to 'from' 3900 */ 3901 int ext4_block_zero_page_range(handle_t *handle, 3902 struct address_space *mapping, loff_t from, loff_t length) 3903 { 3904 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 3905 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3906 unsigned blocksize, max, pos; 3907 ext4_lblk_t iblock; 3908 struct inode *inode = mapping->host; 3909 struct buffer_head *bh; 3910 struct page *page; 3911 int err = 0; 3912 3913 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, 3914 mapping_gfp_mask(mapping) & ~__GFP_FS); 3915 if (!page) 3916 return -EINVAL; 3917 3918 blocksize = inode->i_sb->s_blocksize; 3919 max = blocksize - (offset & (blocksize - 1)); 3920 3921 /* 3922 * correct length if it does not fall between 3923 * 'from' and the end of the block 3924 */ 3925 if (length > max || length < 0) 3926 length = max; 3927 3928 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 3929 3930 if (!page_has_buffers(page)) 3931 create_empty_buffers(page, blocksize, 0); 3932 3933 /* Find the buffer that contains "offset" */ 3934 bh = page_buffers(page); 3935 pos = blocksize; 3936 while (offset >= pos) { 3937 bh = bh->b_this_page; 3938 iblock++; 3939 pos += blocksize; 3940 } 3941 3942 err = 0; 3943 if (buffer_freed(bh)) { 3944 BUFFER_TRACE(bh, "freed: skip"); 3945 goto unlock; 3946 } 3947 3948 if (!buffer_mapped(bh)) { 3949 BUFFER_TRACE(bh, "unmapped"); 3950 ext4_get_block(inode, iblock, bh, 0); 3951 /* unmapped? It's a hole - nothing to do */ 3952 if (!buffer_mapped(bh)) { 3953 BUFFER_TRACE(bh, "still unmapped"); 3954 goto unlock; 3955 } 3956 } 3957 3958 /* Ok, it's mapped. Make sure it's up-to-date */ 3959 if (PageUptodate(page)) 3960 set_buffer_uptodate(bh); 3961 3962 if (!buffer_uptodate(bh)) { 3963 err = -EIO; 3964 ll_rw_block(READ, 1, &bh); 3965 wait_on_buffer(bh); 3966 /* Uhhuh. Read error. Complain and punt. */ 3967 if (!buffer_uptodate(bh)) 3968 goto unlock; 3969 } 3970 3971 if (ext4_should_journal_data(inode)) { 3972 BUFFER_TRACE(bh, "get write access"); 3973 err = ext4_journal_get_write_access(handle, bh); 3974 if (err) 3975 goto unlock; 3976 } 3977 3978 zero_user(page, offset, length); 3979 3980 BUFFER_TRACE(bh, "zeroed end of block"); 3981 3982 err = 0; 3983 if (ext4_should_journal_data(inode)) { 3984 err = ext4_handle_dirty_metadata(handle, inode, bh); 3985 } else { 3986 if (ext4_should_order_data(inode) && EXT4_I(inode)->jinode) 3987 err = ext4_jbd2_file_inode(handle, inode); 3988 mark_buffer_dirty(bh); 3989 } 3990 3991 unlock: 3992 unlock_page(page); 3993 page_cache_release(page); 3994 return err; 3995 } 3996 3997 /* 3998 * Probably it should be a library function... search for first non-zero word 3999 * or memcmp with zero_page, whatever is better for particular architecture. 4000 * Linus? 4001 */ 4002 static inline int all_zeroes(__le32 *p, __le32 *q) 4003 { 4004 while (p < q) 4005 if (*p++) 4006 return 0; 4007 return 1; 4008 } 4009 4010 /** 4011 * ext4_find_shared - find the indirect blocks for partial truncation. 4012 * @inode: inode in question 4013 * @depth: depth of the affected branch 4014 * @offsets: offsets of pointers in that branch (see ext4_block_to_path) 4015 * @chain: place to store the pointers to partial indirect blocks 4016 * @top: place to the (detached) top of branch 4017 * 4018 * This is a helper function used by ext4_truncate(). 4019 * 4020 * When we do truncate() we may have to clean the ends of several 4021 * indirect blocks but leave the blocks themselves alive. Block is 4022 * partially truncated if some data below the new i_size is referred 4023 * from it (and it is on the path to the first completely truncated 4024 * data block, indeed). We have to free the top of that path along 4025 * with everything to the right of the path. Since no allocation 4026 * past the truncation point is possible until ext4_truncate() 4027 * finishes, we may safely do the latter, but top of branch may 4028 * require special attention - pageout below the truncation point 4029 * might try to populate it. 4030 * 4031 * We atomically detach the top of branch from the tree, store the 4032 * block number of its root in *@top, pointers to buffer_heads of 4033 * partially truncated blocks - in @chain[].bh and pointers to 4034 * their last elements that should not be removed - in 4035 * @chain[].p. Return value is the pointer to last filled element 4036 * of @chain. 4037 * 4038 * The work left to caller to do the actual freeing of subtrees: 4039 * a) free the subtree starting from *@top 4040 * b) free the subtrees whose roots are stored in 4041 * (@chain[i].p+1 .. end of @chain[i].bh->b_data) 4042 * c) free the subtrees growing from the inode past the @chain[0]. 4043 * (no partially truncated stuff there). */ 4044 4045 static Indirect *ext4_find_shared(struct inode *inode, int depth, 4046 ext4_lblk_t offsets[4], Indirect chain[4], 4047 __le32 *top) 4048 { 4049 Indirect *partial, *p; 4050 int k, err; 4051 4052 *top = 0; 4053 /* Make k index the deepest non-null offset + 1 */ 4054 for (k = depth; k > 1 && !offsets[k-1]; k--) 4055 ; 4056 partial = ext4_get_branch(inode, k, offsets, chain, &err); 4057 /* Writer: pointers */ 4058 if (!partial) 4059 partial = chain + k-1; 4060 /* 4061 * If the branch acquired continuation since we've looked at it - 4062 * fine, it should all survive and (new) top doesn't belong to us. 4063 */ 4064 if (!partial->key && *partial->p) 4065 /* Writer: end */ 4066 goto no_top; 4067 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--) 4068 ; 4069 /* 4070 * OK, we've found the last block that must survive. The rest of our 4071 * branch should be detached before unlocking. However, if that rest 4072 * of branch is all ours and does not grow immediately from the inode 4073 * it's easier to cheat and just decrement partial->p. 4074 */ 4075 if (p == chain + k - 1 && p > chain) { 4076 p->p--; 4077 } else { 4078 *top = *p->p; 4079 /* Nope, don't do this in ext4. Must leave the tree intact */ 4080 #if 0 4081 *p->p = 0; 4082 #endif 4083 } 4084 /* Writer: end */ 4085 4086 while (partial > p) { 4087 brelse(partial->bh); 4088 partial--; 4089 } 4090 no_top: 4091 return partial; 4092 } 4093 4094 /* 4095 * Zero a number of block pointers in either an inode or an indirect block. 4096 * If we restart the transaction we must again get write access to the 4097 * indirect block for further modification. 4098 * 4099 * We release `count' blocks on disk, but (last - first) may be greater 4100 * than `count' because there can be holes in there. 4101 * 4102 * Return 0 on success, 1 on invalid block range 4103 * and < 0 on fatal error. 4104 */ 4105 static int ext4_clear_blocks(handle_t *handle, struct inode *inode, 4106 struct buffer_head *bh, 4107 ext4_fsblk_t block_to_free, 4108 unsigned long count, __le32 *first, 4109 __le32 *last) 4110 { 4111 __le32 *p; 4112 int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED; 4113 int err; 4114 4115 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 4116 flags |= EXT4_FREE_BLOCKS_METADATA; 4117 4118 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free, 4119 count)) { 4120 EXT4_ERROR_INODE(inode, "attempt to clear invalid " 4121 "blocks %llu len %lu", 4122 (unsigned long long) block_to_free, count); 4123 return 1; 4124 } 4125 4126 if (try_to_extend_transaction(handle, inode)) { 4127 if (bh) { 4128 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 4129 err = ext4_handle_dirty_metadata(handle, inode, bh); 4130 if (unlikely(err)) 4131 goto out_err; 4132 } 4133 err = ext4_mark_inode_dirty(handle, inode); 4134 if (unlikely(err)) 4135 goto out_err; 4136 err = ext4_truncate_restart_trans(handle, inode, 4137 blocks_for_truncate(inode)); 4138 if (unlikely(err)) 4139 goto out_err; 4140 if (bh) { 4141 BUFFER_TRACE(bh, "retaking write access"); 4142 err = ext4_journal_get_write_access(handle, bh); 4143 if (unlikely(err)) 4144 goto out_err; 4145 } 4146 } 4147 4148 for (p = first; p < last; p++) 4149 *p = 0; 4150 4151 ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags); 4152 return 0; 4153 out_err: 4154 ext4_std_error(inode->i_sb, err); 4155 return err; 4156 } 4157 4158 /** 4159 * ext4_free_data - free a list of data blocks 4160 * @handle: handle for this transaction 4161 * @inode: inode we are dealing with 4162 * @this_bh: indirect buffer_head which contains *@first and *@last 4163 * @first: array of block numbers 4164 * @last: points immediately past the end of array 4165 * 4166 * We are freeing all blocks referred from that array (numbers are stored as 4167 * little-endian 32-bit) and updating @inode->i_blocks appropriately. 4168 * 4169 * We accumulate contiguous runs of blocks to free. Conveniently, if these 4170 * blocks are contiguous then releasing them at one time will only affect one 4171 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't 4172 * actually use a lot of journal space. 4173 * 4174 * @this_bh will be %NULL if @first and @last point into the inode's direct 4175 * block pointers. 4176 */ 4177 static void ext4_free_data(handle_t *handle, struct inode *inode, 4178 struct buffer_head *this_bh, 4179 __le32 *first, __le32 *last) 4180 { 4181 ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */ 4182 unsigned long count = 0; /* Number of blocks in the run */ 4183 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind 4184 corresponding to 4185 block_to_free */ 4186 ext4_fsblk_t nr; /* Current block # */ 4187 __le32 *p; /* Pointer into inode/ind 4188 for current block */ 4189 int err = 0; 4190 4191 if (this_bh) { /* For indirect block */ 4192 BUFFER_TRACE(this_bh, "get_write_access"); 4193 err = ext4_journal_get_write_access(handle, this_bh); 4194 /* Important: if we can't update the indirect pointers 4195 * to the blocks, we can't free them. */ 4196 if (err) 4197 return; 4198 } 4199 4200 for (p = first; p < last; p++) { 4201 nr = le32_to_cpu(*p); 4202 if (nr) { 4203 /* accumulate blocks to free if they're contiguous */ 4204 if (count == 0) { 4205 block_to_free = nr; 4206 block_to_free_p = p; 4207 count = 1; 4208 } else if (nr == block_to_free + count) { 4209 count++; 4210 } else { 4211 err = ext4_clear_blocks(handle, inode, this_bh, 4212 block_to_free, count, 4213 block_to_free_p, p); 4214 if (err) 4215 break; 4216 block_to_free = nr; 4217 block_to_free_p = p; 4218 count = 1; 4219 } 4220 } 4221 } 4222 4223 if (!err && count > 0) 4224 err = ext4_clear_blocks(handle, inode, this_bh, block_to_free, 4225 count, block_to_free_p, p); 4226 if (err < 0) 4227 /* fatal error */ 4228 return; 4229 4230 if (this_bh) { 4231 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata"); 4232 4233 /* 4234 * The buffer head should have an attached journal head at this 4235 * point. However, if the data is corrupted and an indirect 4236 * block pointed to itself, it would have been detached when 4237 * the block was cleared. Check for this instead of OOPSing. 4238 */ 4239 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) 4240 ext4_handle_dirty_metadata(handle, inode, this_bh); 4241 else 4242 EXT4_ERROR_INODE(inode, 4243 "circular indirect block detected at " 4244 "block %llu", 4245 (unsigned long long) this_bh->b_blocknr); 4246 } 4247 } 4248 4249 /** 4250 * ext4_free_branches - free an array of branches 4251 * @handle: JBD handle for this transaction 4252 * @inode: inode we are dealing with 4253 * @parent_bh: the buffer_head which contains *@first and *@last 4254 * @first: array of block numbers 4255 * @last: pointer immediately past the end of array 4256 * @depth: depth of the branches to free 4257 * 4258 * We are freeing all blocks referred from these branches (numbers are 4259 * stored as little-endian 32-bit) and updating @inode->i_blocks 4260 * appropriately. 4261 */ 4262 static void ext4_free_branches(handle_t *handle, struct inode *inode, 4263 struct buffer_head *parent_bh, 4264 __le32 *first, __le32 *last, int depth) 4265 { 4266 ext4_fsblk_t nr; 4267 __le32 *p; 4268 4269 if (ext4_handle_is_aborted(handle)) 4270 return; 4271 4272 if (depth--) { 4273 struct buffer_head *bh; 4274 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 4275 p = last; 4276 while (--p >= first) { 4277 nr = le32_to_cpu(*p); 4278 if (!nr) 4279 continue; /* A hole */ 4280 4281 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), 4282 nr, 1)) { 4283 EXT4_ERROR_INODE(inode, 4284 "invalid indirect mapped " 4285 "block %lu (level %d)", 4286 (unsigned long) nr, depth); 4287 break; 4288 } 4289 4290 /* Go read the buffer for the next level down */ 4291 bh = sb_bread(inode->i_sb, nr); 4292 4293 /* 4294 * A read failure? Report error and clear slot 4295 * (should be rare). 4296 */ 4297 if (!bh) { 4298 EXT4_ERROR_INODE_BLOCK(inode, nr, 4299 "Read failure"); 4300 continue; 4301 } 4302 4303 /* This zaps the entire block. Bottom up. */ 4304 BUFFER_TRACE(bh, "free child branches"); 4305 ext4_free_branches(handle, inode, bh, 4306 (__le32 *) bh->b_data, 4307 (__le32 *) bh->b_data + addr_per_block, 4308 depth); 4309 brelse(bh); 4310 4311 /* 4312 * Everything below this this pointer has been 4313 * released. Now let this top-of-subtree go. 4314 * 4315 * We want the freeing of this indirect block to be 4316 * atomic in the journal with the updating of the 4317 * bitmap block which owns it. So make some room in 4318 * the journal. 4319 * 4320 * We zero the parent pointer *after* freeing its 4321 * pointee in the bitmaps, so if extend_transaction() 4322 * for some reason fails to put the bitmap changes and 4323 * the release into the same transaction, recovery 4324 * will merely complain about releasing a free block, 4325 * rather than leaking blocks. 4326 */ 4327 if (ext4_handle_is_aborted(handle)) 4328 return; 4329 if (try_to_extend_transaction(handle, inode)) { 4330 ext4_mark_inode_dirty(handle, inode); 4331 ext4_truncate_restart_trans(handle, inode, 4332 blocks_for_truncate(inode)); 4333 } 4334 4335 /* 4336 * The forget flag here is critical because if 4337 * we are journaling (and not doing data 4338 * journaling), we have to make sure a revoke 4339 * record is written to prevent the journal 4340 * replay from overwriting the (former) 4341 * indirect block if it gets reallocated as a 4342 * data block. This must happen in the same 4343 * transaction where the data blocks are 4344 * actually freed. 4345 */ 4346 ext4_free_blocks(handle, inode, NULL, nr, 1, 4347 EXT4_FREE_BLOCKS_METADATA| 4348 EXT4_FREE_BLOCKS_FORGET); 4349 4350 if (parent_bh) { 4351 /* 4352 * The block which we have just freed is 4353 * pointed to by an indirect block: journal it 4354 */ 4355 BUFFER_TRACE(parent_bh, "get_write_access"); 4356 if (!ext4_journal_get_write_access(handle, 4357 parent_bh)){ 4358 *p = 0; 4359 BUFFER_TRACE(parent_bh, 4360 "call ext4_handle_dirty_metadata"); 4361 ext4_handle_dirty_metadata(handle, 4362 inode, 4363 parent_bh); 4364 } 4365 } 4366 } 4367 } else { 4368 /* We have reached the bottom of the tree. */ 4369 BUFFER_TRACE(parent_bh, "free data blocks"); 4370 ext4_free_data(handle, inode, parent_bh, first, last); 4371 } 4372 } 4373 4374 int ext4_can_truncate(struct inode *inode) 4375 { 4376 if (S_ISREG(inode->i_mode)) 4377 return 1; 4378 if (S_ISDIR(inode->i_mode)) 4379 return 1; 4380 if (S_ISLNK(inode->i_mode)) 4381 return !ext4_inode_is_fast_symlink(inode); 4382 return 0; 4383 } 4384 4385 /* 4386 * ext4_punch_hole: punches a hole in a file by releaseing the blocks 4387 * associated with the given offset and length 4388 * 4389 * @inode: File inode 4390 * @offset: The offset where the hole will begin 4391 * @len: The length of the hole 4392 * 4393 * Returns: 0 on sucess or negative on failure 4394 */ 4395 4396 int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) 4397 { 4398 struct inode *inode = file->f_path.dentry->d_inode; 4399 if (!S_ISREG(inode->i_mode)) 4400 return -ENOTSUPP; 4401 4402 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 4403 /* TODO: Add support for non extent hole punching */ 4404 return -ENOTSUPP; 4405 } 4406 4407 return ext4_ext_punch_hole(file, offset, length); 4408 } 4409 4410 /* 4411 * ext4_truncate() 4412 * 4413 * We block out ext4_get_block() block instantiations across the entire 4414 * transaction, and VFS/VM ensures that ext4_truncate() cannot run 4415 * simultaneously on behalf of the same inode. 4416 * 4417 * As we work through the truncate and commmit bits of it to the journal there 4418 * is one core, guiding principle: the file's tree must always be consistent on 4419 * disk. We must be able to restart the truncate after a crash. 4420 * 4421 * The file's tree may be transiently inconsistent in memory (although it 4422 * probably isn't), but whenever we close off and commit a journal transaction, 4423 * the contents of (the filesystem + the journal) must be consistent and 4424 * restartable. It's pretty simple, really: bottom up, right to left (although 4425 * left-to-right works OK too). 4426 * 4427 * Note that at recovery time, journal replay occurs *before* the restart of 4428 * truncate against the orphan inode list. 4429 * 4430 * The committed inode has the new, desired i_size (which is the same as 4431 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 4432 * that this inode's truncate did not complete and it will again call 4433 * ext4_truncate() to have another go. So there will be instantiated blocks 4434 * to the right of the truncation point in a crashed ext4 filesystem. But 4435 * that's fine - as long as they are linked from the inode, the post-crash 4436 * ext4_truncate() run will find them and release them. 4437 */ 4438 void ext4_truncate(struct inode *inode) 4439 { 4440 trace_ext4_truncate_enter(inode); 4441 4442 if (!ext4_can_truncate(inode)) 4443 return; 4444 4445 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 4446 4447 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 4448 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 4449 4450 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4451 ext4_ext_truncate(inode); 4452 else 4453 ext4_ind_truncate(inode); 4454 4455 trace_ext4_truncate_exit(inode); 4456 } 4457 4458 void ext4_ind_truncate(struct inode *inode) 4459 { 4460 handle_t *handle; 4461 struct ext4_inode_info *ei = EXT4_I(inode); 4462 __le32 *i_data = ei->i_data; 4463 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 4464 struct address_space *mapping = inode->i_mapping; 4465 ext4_lblk_t offsets[4]; 4466 Indirect chain[4]; 4467 Indirect *partial; 4468 __le32 nr = 0; 4469 int n = 0; 4470 ext4_lblk_t last_block, max_block; 4471 unsigned blocksize = inode->i_sb->s_blocksize; 4472 4473 handle = start_transaction(inode); 4474 if (IS_ERR(handle)) 4475 return; /* AKPM: return what? */ 4476 4477 last_block = (inode->i_size + blocksize-1) 4478 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 4479 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1) 4480 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 4481 4482 if (inode->i_size & (blocksize - 1)) 4483 if (ext4_block_truncate_page(handle, mapping, inode->i_size)) 4484 goto out_stop; 4485 4486 if (last_block != max_block) { 4487 n = ext4_block_to_path(inode, last_block, offsets, NULL); 4488 if (n == 0) 4489 goto out_stop; /* error */ 4490 } 4491 4492 /* 4493 * OK. This truncate is going to happen. We add the inode to the 4494 * orphan list, so that if this truncate spans multiple transactions, 4495 * and we crash, we will resume the truncate when the filesystem 4496 * recovers. It also marks the inode dirty, to catch the new size. 4497 * 4498 * Implication: the file must always be in a sane, consistent 4499 * truncatable state while each transaction commits. 4500 */ 4501 if (ext4_orphan_add(handle, inode)) 4502 goto out_stop; 4503 4504 /* 4505 * From here we block out all ext4_get_block() callers who want to 4506 * modify the block allocation tree. 4507 */ 4508 down_write(&ei->i_data_sem); 4509 4510 ext4_discard_preallocations(inode); 4511 4512 /* 4513 * The orphan list entry will now protect us from any crash which 4514 * occurs before the truncate completes, so it is now safe to propagate 4515 * the new, shorter inode size (held for now in i_size) into the 4516 * on-disk inode. We do this via i_disksize, which is the value which 4517 * ext4 *really* writes onto the disk inode. 4518 */ 4519 ei->i_disksize = inode->i_size; 4520 4521 if (last_block == max_block) { 4522 /* 4523 * It is unnecessary to free any data blocks if last_block is 4524 * equal to the indirect block limit. 4525 */ 4526 goto out_unlock; 4527 } else if (n == 1) { /* direct blocks */ 4528 ext4_free_data(handle, inode, NULL, i_data+offsets[0], 4529 i_data + EXT4_NDIR_BLOCKS); 4530 goto do_indirects; 4531 } 4532 4533 partial = ext4_find_shared(inode, n, offsets, chain, &nr); 4534 /* Kill the top of shared branch (not detached) */ 4535 if (nr) { 4536 if (partial == chain) { 4537 /* Shared branch grows from the inode */ 4538 ext4_free_branches(handle, inode, NULL, 4539 &nr, &nr+1, (chain+n-1) - partial); 4540 *partial->p = 0; 4541 /* 4542 * We mark the inode dirty prior to restart, 4543 * and prior to stop. No need for it here. 4544 */ 4545 } else { 4546 /* Shared branch grows from an indirect block */ 4547 BUFFER_TRACE(partial->bh, "get_write_access"); 4548 ext4_free_branches(handle, inode, partial->bh, 4549 partial->p, 4550 partial->p+1, (chain+n-1) - partial); 4551 } 4552 } 4553 /* Clear the ends of indirect blocks on the shared branch */ 4554 while (partial > chain) { 4555 ext4_free_branches(handle, inode, partial->bh, partial->p + 1, 4556 (__le32*)partial->bh->b_data+addr_per_block, 4557 (chain+n-1) - partial); 4558 BUFFER_TRACE(partial->bh, "call brelse"); 4559 brelse(partial->bh); 4560 partial--; 4561 } 4562 do_indirects: 4563 /* Kill the remaining (whole) subtrees */ 4564 switch (offsets[0]) { 4565 default: 4566 nr = i_data[EXT4_IND_BLOCK]; 4567 if (nr) { 4568 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); 4569 i_data[EXT4_IND_BLOCK] = 0; 4570 } 4571 case EXT4_IND_BLOCK: 4572 nr = i_data[EXT4_DIND_BLOCK]; 4573 if (nr) { 4574 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); 4575 i_data[EXT4_DIND_BLOCK] = 0; 4576 } 4577 case EXT4_DIND_BLOCK: 4578 nr = i_data[EXT4_TIND_BLOCK]; 4579 if (nr) { 4580 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); 4581 i_data[EXT4_TIND_BLOCK] = 0; 4582 } 4583 case EXT4_TIND_BLOCK: 4584 ; 4585 } 4586 4587 out_unlock: 4588 up_write(&ei->i_data_sem); 4589 inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4590 ext4_mark_inode_dirty(handle, inode); 4591 4592 /* 4593 * In a multi-transaction truncate, we only make the final transaction 4594 * synchronous 4595 */ 4596 if (IS_SYNC(inode)) 4597 ext4_handle_sync(handle); 4598 out_stop: 4599 /* 4600 * If this was a simple ftruncate(), and the file will remain alive 4601 * then we need to clear up the orphan record which we created above. 4602 * However, if this was a real unlink then we were called by 4603 * ext4_delete_inode(), and we allow that function to clean up the 4604 * orphan info for us. 4605 */ 4606 if (inode->i_nlink) 4607 ext4_orphan_del(handle, inode); 4608 4609 ext4_journal_stop(handle); 4610 trace_ext4_truncate_exit(inode); 4611 } 4612 4613 /* 4614 * ext4_get_inode_loc returns with an extra refcount against the inode's 4615 * underlying buffer_head on success. If 'in_mem' is true, we have all 4616 * data in memory that is needed to recreate the on-disk version of this 4617 * inode. 4618 */ 4619 static int __ext4_get_inode_loc(struct inode *inode, 4620 struct ext4_iloc *iloc, int in_mem) 4621 { 4622 struct ext4_group_desc *gdp; 4623 struct buffer_head *bh; 4624 struct super_block *sb = inode->i_sb; 4625 ext4_fsblk_t block; 4626 int inodes_per_block, inode_offset; 4627 4628 iloc->bh = NULL; 4629 if (!ext4_valid_inum(sb, inode->i_ino)) 4630 return -EIO; 4631 4632 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 4633 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 4634 if (!gdp) 4635 return -EIO; 4636 4637 /* 4638 * Figure out the offset within the block group inode table 4639 */ 4640 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 4641 inode_offset = ((inode->i_ino - 1) % 4642 EXT4_INODES_PER_GROUP(sb)); 4643 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 4644 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 4645 4646 bh = sb_getblk(sb, block); 4647 if (!bh) { 4648 EXT4_ERROR_INODE_BLOCK(inode, block, 4649 "unable to read itable block"); 4650 return -EIO; 4651 } 4652 if (!buffer_uptodate(bh)) { 4653 lock_buffer(bh); 4654 4655 /* 4656 * If the buffer has the write error flag, we have failed 4657 * to write out another inode in the same block. In this 4658 * case, we don't have to read the block because we may 4659 * read the old inode data successfully. 4660 */ 4661 if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 4662 set_buffer_uptodate(bh); 4663 4664 if (buffer_uptodate(bh)) { 4665 /* someone brought it uptodate while we waited */ 4666 unlock_buffer(bh); 4667 goto has_buffer; 4668 } 4669 4670 /* 4671 * If we have all information of the inode in memory and this 4672 * is the only valid inode in the block, we need not read the 4673 * block. 4674 */ 4675 if (in_mem) { 4676 struct buffer_head *bitmap_bh; 4677 int i, start; 4678 4679 start = inode_offset & ~(inodes_per_block - 1); 4680 4681 /* Is the inode bitmap in cache? */ 4682 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 4683 if (!bitmap_bh) 4684 goto make_io; 4685 4686 /* 4687 * If the inode bitmap isn't in cache then the 4688 * optimisation may end up performing two reads instead 4689 * of one, so skip it. 4690 */ 4691 if (!buffer_uptodate(bitmap_bh)) { 4692 brelse(bitmap_bh); 4693 goto make_io; 4694 } 4695 for (i = start; i < start + inodes_per_block; i++) { 4696 if (i == inode_offset) 4697 continue; 4698 if (ext4_test_bit(i, bitmap_bh->b_data)) 4699 break; 4700 } 4701 brelse(bitmap_bh); 4702 if (i == start + inodes_per_block) { 4703 /* all other inodes are free, so skip I/O */ 4704 memset(bh->b_data, 0, bh->b_size); 4705 set_buffer_uptodate(bh); 4706 unlock_buffer(bh); 4707 goto has_buffer; 4708 } 4709 } 4710 4711 make_io: 4712 /* 4713 * If we need to do any I/O, try to pre-readahead extra 4714 * blocks from the inode table. 4715 */ 4716 if (EXT4_SB(sb)->s_inode_readahead_blks) { 4717 ext4_fsblk_t b, end, table; 4718 unsigned num; 4719 4720 table = ext4_inode_table(sb, gdp); 4721 /* s_inode_readahead_blks is always a power of 2 */ 4722 b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1); 4723 if (table > b) 4724 b = table; 4725 end = b + EXT4_SB(sb)->s_inode_readahead_blks; 4726 num = EXT4_INODES_PER_GROUP(sb); 4727 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 4728 EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) 4729 num -= ext4_itable_unused_count(sb, gdp); 4730 table += num / inodes_per_block; 4731 if (end > table) 4732 end = table; 4733 while (b <= end) 4734 sb_breadahead(sb, b++); 4735 } 4736 4737 /* 4738 * There are other valid inodes in the buffer, this inode 4739 * has in-inode xattrs, or we don't have this inode in memory. 4740 * Read the block from disk. 4741 */ 4742 trace_ext4_load_inode(inode); 4743 get_bh(bh); 4744 bh->b_end_io = end_buffer_read_sync; 4745 submit_bh(READ_META, bh); 4746 wait_on_buffer(bh); 4747 if (!buffer_uptodate(bh)) { 4748 EXT4_ERROR_INODE_BLOCK(inode, block, 4749 "unable to read itable block"); 4750 brelse(bh); 4751 return -EIO; 4752 } 4753 } 4754 has_buffer: 4755 iloc->bh = bh; 4756 return 0; 4757 } 4758 4759 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 4760 { 4761 /* We have all inode data except xattrs in memory here. */ 4762 return __ext4_get_inode_loc(inode, iloc, 4763 !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); 4764 } 4765 4766 void ext4_set_inode_flags(struct inode *inode) 4767 { 4768 unsigned int flags = EXT4_I(inode)->i_flags; 4769 4770 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 4771 if (flags & EXT4_SYNC_FL) 4772 inode->i_flags |= S_SYNC; 4773 if (flags & EXT4_APPEND_FL) 4774 inode->i_flags |= S_APPEND; 4775 if (flags & EXT4_IMMUTABLE_FL) 4776 inode->i_flags |= S_IMMUTABLE; 4777 if (flags & EXT4_NOATIME_FL) 4778 inode->i_flags |= S_NOATIME; 4779 if (flags & EXT4_DIRSYNC_FL) 4780 inode->i_flags |= S_DIRSYNC; 4781 } 4782 4783 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ 4784 void ext4_get_inode_flags(struct ext4_inode_info *ei) 4785 { 4786 unsigned int vfs_fl; 4787 unsigned long old_fl, new_fl; 4788 4789 do { 4790 vfs_fl = ei->vfs_inode.i_flags; 4791 old_fl = ei->i_flags; 4792 new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL| 4793 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL| 4794 EXT4_DIRSYNC_FL); 4795 if (vfs_fl & S_SYNC) 4796 new_fl |= EXT4_SYNC_FL; 4797 if (vfs_fl & S_APPEND) 4798 new_fl |= EXT4_APPEND_FL; 4799 if (vfs_fl & S_IMMUTABLE) 4800 new_fl |= EXT4_IMMUTABLE_FL; 4801 if (vfs_fl & S_NOATIME) 4802 new_fl |= EXT4_NOATIME_FL; 4803 if (vfs_fl & S_DIRSYNC) 4804 new_fl |= EXT4_DIRSYNC_FL; 4805 } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl); 4806 } 4807 4808 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 4809 struct ext4_inode_info *ei) 4810 { 4811 blkcnt_t i_blocks ; 4812 struct inode *inode = &(ei->vfs_inode); 4813 struct super_block *sb = inode->i_sb; 4814 4815 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 4816 EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { 4817 /* we are using combined 48 bit field */ 4818 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 4819 le32_to_cpu(raw_inode->i_blocks_lo); 4820 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { 4821 /* i_blocks represent file system block size */ 4822 return i_blocks << (inode->i_blkbits - 9); 4823 } else { 4824 return i_blocks; 4825 } 4826 } else { 4827 return le32_to_cpu(raw_inode->i_blocks_lo); 4828 } 4829 } 4830 4831 struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 4832 { 4833 struct ext4_iloc iloc; 4834 struct ext4_inode *raw_inode; 4835 struct ext4_inode_info *ei; 4836 struct inode *inode; 4837 journal_t *journal = EXT4_SB(sb)->s_journal; 4838 long ret; 4839 int block; 4840 4841 inode = iget_locked(sb, ino); 4842 if (!inode) 4843 return ERR_PTR(-ENOMEM); 4844 if (!(inode->i_state & I_NEW)) 4845 return inode; 4846 4847 ei = EXT4_I(inode); 4848 iloc.bh = NULL; 4849 4850 ret = __ext4_get_inode_loc(inode, &iloc, 0); 4851 if (ret < 0) 4852 goto bad_inode; 4853 raw_inode = ext4_raw_inode(&iloc); 4854 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 4855 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 4856 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 4857 if (!(test_opt(inode->i_sb, NO_UID32))) { 4858 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 4859 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 4860 } 4861 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 4862 4863 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 4864 ei->i_dir_start_lookup = 0; 4865 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 4866 /* We now have enough fields to check if the inode was active or not. 4867 * This is needed because nfsd might try to access dead inodes 4868 * the test is that same one that e2fsck uses 4869 * NeilBrown 1999oct15 4870 */ 4871 if (inode->i_nlink == 0) { 4872 if (inode->i_mode == 0 || 4873 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { 4874 /* this inode is deleted */ 4875 ret = -ESTALE; 4876 goto bad_inode; 4877 } 4878 /* The only unlinked inodes we let through here have 4879 * valid i_mode and are being read by the orphan 4880 * recovery code: that's fine, we're about to complete 4881 * the process of deleting those. */ 4882 } 4883 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 4884 inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 4885 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 4886 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) 4887 ei->i_file_acl |= 4888 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 4889 inode->i_size = ext4_isize(raw_inode); 4890 ei->i_disksize = inode->i_size; 4891 #ifdef CONFIG_QUOTA 4892 ei->i_reserved_quota = 0; 4893 #endif 4894 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 4895 ei->i_block_group = iloc.block_group; 4896 ei->i_last_alloc_group = ~0; 4897 /* 4898 * NOTE! The in-memory inode i_data array is in little-endian order 4899 * even on big-endian machines: we do NOT byteswap the block numbers! 4900 */ 4901 for (block = 0; block < EXT4_N_BLOCKS; block++) 4902 ei->i_data[block] = raw_inode->i_block[block]; 4903 INIT_LIST_HEAD(&ei->i_orphan); 4904 4905 /* 4906 * Set transaction id's of transactions that have to be committed 4907 * to finish f[data]sync. We set them to currently running transaction 4908 * as we cannot be sure that the inode or some of its metadata isn't 4909 * part of the transaction - the inode could have been reclaimed and 4910 * now it is reread from disk. 4911 */ 4912 if (journal) { 4913 transaction_t *transaction; 4914 tid_t tid; 4915 4916 read_lock(&journal->j_state_lock); 4917 if (journal->j_running_transaction) 4918 transaction = journal->j_running_transaction; 4919 else 4920 transaction = journal->j_committing_transaction; 4921 if (transaction) 4922 tid = transaction->t_tid; 4923 else 4924 tid = journal->j_commit_sequence; 4925 read_unlock(&journal->j_state_lock); 4926 ei->i_sync_tid = tid; 4927 ei->i_datasync_tid = tid; 4928 } 4929 4930 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4931 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 4932 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 4933 EXT4_INODE_SIZE(inode->i_sb)) { 4934 ret = -EIO; 4935 goto bad_inode; 4936 } 4937 if (ei->i_extra_isize == 0) { 4938 /* The extra space is currently unused. Use it. */ 4939 ei->i_extra_isize = sizeof(struct ext4_inode) - 4940 EXT4_GOOD_OLD_INODE_SIZE; 4941 } else { 4942 __le32 *magic = (void *)raw_inode + 4943 EXT4_GOOD_OLD_INODE_SIZE + 4944 ei->i_extra_isize; 4945 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) 4946 ext4_set_inode_state(inode, EXT4_STATE_XATTR); 4947 } 4948 } else 4949 ei->i_extra_isize = 0; 4950 4951 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 4952 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 4953 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 4954 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 4955 4956 inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 4957 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4958 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 4959 inode->i_version |= 4960 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 4961 } 4962 4963 ret = 0; 4964 if (ei->i_file_acl && 4965 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { 4966 EXT4_ERROR_INODE(inode, "bad extended attribute block %llu", 4967 ei->i_file_acl); 4968 ret = -EIO; 4969 goto bad_inode; 4970 } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 4971 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4972 (S_ISLNK(inode->i_mode) && 4973 !ext4_inode_is_fast_symlink(inode))) 4974 /* Validate extent which is part of inode */ 4975 ret = ext4_ext_check_inode(inode); 4976 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4977 (S_ISLNK(inode->i_mode) && 4978 !ext4_inode_is_fast_symlink(inode))) { 4979 /* Validate block references which are part of inode */ 4980 ret = ext4_ind_check_inode(inode); 4981 } 4982 if (ret) 4983 goto bad_inode; 4984 4985 if (S_ISREG(inode->i_mode)) { 4986 inode->i_op = &ext4_file_inode_operations; 4987 inode->i_fop = &ext4_file_operations; 4988 ext4_set_aops(inode); 4989 } else if (S_ISDIR(inode->i_mode)) { 4990 inode->i_op = &ext4_dir_inode_operations; 4991 inode->i_fop = &ext4_dir_operations; 4992 } else if (S_ISLNK(inode->i_mode)) { 4993 if (ext4_inode_is_fast_symlink(inode)) { 4994 inode->i_op = &ext4_fast_symlink_inode_operations; 4995 nd_terminate_link(ei->i_data, inode->i_size, 4996 sizeof(ei->i_data) - 1); 4997 } else { 4998 inode->i_op = &ext4_symlink_inode_operations; 4999 ext4_set_aops(inode); 5000 } 5001 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 5002 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 5003 inode->i_op = &ext4_special_inode_operations; 5004 if (raw_inode->i_block[0]) 5005 init_special_inode(inode, inode->i_mode, 5006 old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 5007 else 5008 init_special_inode(inode, inode->i_mode, 5009 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 5010 } else { 5011 ret = -EIO; 5012 EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode); 5013 goto bad_inode; 5014 } 5015 brelse(iloc.bh); 5016 ext4_set_inode_flags(inode); 5017 unlock_new_inode(inode); 5018 return inode; 5019 5020 bad_inode: 5021 brelse(iloc.bh); 5022 iget_failed(inode); 5023 return ERR_PTR(ret); 5024 } 5025 5026 static int ext4_inode_blocks_set(handle_t *handle, 5027 struct ext4_inode *raw_inode, 5028 struct ext4_inode_info *ei) 5029 { 5030 struct inode *inode = &(ei->vfs_inode); 5031 u64 i_blocks = inode->i_blocks; 5032 struct super_block *sb = inode->i_sb; 5033 5034 if (i_blocks <= ~0U) { 5035 /* 5036 * i_blocks can be represnted in a 32 bit variable 5037 * as multiple of 512 bytes 5038 */ 5039 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 5040 raw_inode->i_blocks_high = 0; 5041 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 5042 return 0; 5043 } 5044 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) 5045 return -EFBIG; 5046 5047 if (i_blocks <= 0xffffffffffffULL) { 5048 /* 5049 * i_blocks can be represented in a 48 bit variable 5050 * as multiple of 512 bytes 5051 */ 5052 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 5053 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 5054 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 5055 } else { 5056 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); 5057 /* i_block is stored in file system block size */ 5058 i_blocks = i_blocks >> (inode->i_blkbits - 9); 5059 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 5060 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 5061 } 5062 return 0; 5063 } 5064 5065 /* 5066 * Post the struct inode info into an on-disk inode location in the 5067 * buffer-cache. This gobbles the caller's reference to the 5068 * buffer_head in the inode location struct. 5069 * 5070 * The caller must have write access to iloc->bh. 5071 */ 5072 static int ext4_do_update_inode(handle_t *handle, 5073 struct inode *inode, 5074 struct ext4_iloc *iloc) 5075 { 5076 struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 5077 struct ext4_inode_info *ei = EXT4_I(inode); 5078 struct buffer_head *bh = iloc->bh; 5079 int err = 0, rc, block; 5080 5081 /* For fields not not tracking in the in-memory inode, 5082 * initialise them to zero for new inodes. */ 5083 if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) 5084 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 5085 5086 ext4_get_inode_flags(ei); 5087 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 5088 if (!(test_opt(inode->i_sb, NO_UID32))) { 5089 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); 5090 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); 5091 /* 5092 * Fix up interoperability with old kernels. Otherwise, old inodes get 5093 * re-used with the upper 16 bits of the uid/gid intact 5094 */ 5095 if (!ei->i_dtime) { 5096 raw_inode->i_uid_high = 5097 cpu_to_le16(high_16_bits(inode->i_uid)); 5098 raw_inode->i_gid_high = 5099 cpu_to_le16(high_16_bits(inode->i_gid)); 5100 } else { 5101 raw_inode->i_uid_high = 0; 5102 raw_inode->i_gid_high = 0; 5103 } 5104 } else { 5105 raw_inode->i_uid_low = 5106 cpu_to_le16(fs_high2lowuid(inode->i_uid)); 5107 raw_inode->i_gid_low = 5108 cpu_to_le16(fs_high2lowgid(inode->i_gid)); 5109 raw_inode->i_uid_high = 0; 5110 raw_inode->i_gid_high = 0; 5111 } 5112 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 5113 5114 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 5115 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 5116 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 5117 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 5118 5119 if (ext4_inode_blocks_set(handle, raw_inode, ei)) 5120 goto out_brelse; 5121 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 5122 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); 5123 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 5124 cpu_to_le32(EXT4_OS_HURD)) 5125 raw_inode->i_file_acl_high = 5126 cpu_to_le16(ei->i_file_acl >> 32); 5127 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 5128 ext4_isize_set(raw_inode, ei->i_disksize); 5129 if (ei->i_disksize > 0x7fffffffULL) { 5130 struct super_block *sb = inode->i_sb; 5131 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 5132 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || 5133 EXT4_SB(sb)->s_es->s_rev_level == 5134 cpu_to_le32(EXT4_GOOD_OLD_REV)) { 5135 /* If this is the first large file 5136 * created, add a flag to the superblock. 5137 */ 5138 err = ext4_journal_get_write_access(handle, 5139 EXT4_SB(sb)->s_sbh); 5140 if (err) 5141 goto out_brelse; 5142 ext4_update_dynamic_rev(sb); 5143 EXT4_SET_RO_COMPAT_FEATURE(sb, 5144 EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 5145 sb->s_dirt = 1; 5146 ext4_handle_sync(handle); 5147 err = ext4_handle_dirty_metadata(handle, NULL, 5148 EXT4_SB(sb)->s_sbh); 5149 } 5150 } 5151 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 5152 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 5153 if (old_valid_dev(inode->i_rdev)) { 5154 raw_inode->i_block[0] = 5155 cpu_to_le32(old_encode_dev(inode->i_rdev)); 5156 raw_inode->i_block[1] = 0; 5157 } else { 5158 raw_inode->i_block[0] = 0; 5159 raw_inode->i_block[1] = 5160 cpu_to_le32(new_encode_dev(inode->i_rdev)); 5161 raw_inode->i_block[2] = 0; 5162 } 5163 } else 5164 for (block = 0; block < EXT4_N_BLOCKS; block++) 5165 raw_inode->i_block[block] = ei->i_data[block]; 5166 5167 raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 5168 if (ei->i_extra_isize) { 5169 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 5170 raw_inode->i_version_hi = 5171 cpu_to_le32(inode->i_version >> 32); 5172 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 5173 } 5174 5175 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 5176 rc = ext4_handle_dirty_metadata(handle, NULL, bh); 5177 if (!err) 5178 err = rc; 5179 ext4_clear_inode_state(inode, EXT4_STATE_NEW); 5180 5181 ext4_update_inode_fsync_trans(handle, inode, 0); 5182 out_brelse: 5183 brelse(bh); 5184 ext4_std_error(inode->i_sb, err); 5185 return err; 5186 } 5187 5188 /* 5189 * ext4_write_inode() 5190 * 5191 * We are called from a few places: 5192 * 5193 * - Within generic_file_write() for O_SYNC files. 5194 * Here, there will be no transaction running. We wait for any running 5195 * trasnaction to commit. 5196 * 5197 * - Within sys_sync(), kupdate and such. 5198 * We wait on commit, if tol to. 5199 * 5200 * - Within prune_icache() (PF_MEMALLOC == true) 5201 * Here we simply return. We can't afford to block kswapd on the 5202 * journal commit. 5203 * 5204 * In all cases it is actually safe for us to return without doing anything, 5205 * because the inode has been copied into a raw inode buffer in 5206 * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 5207 * knfsd. 5208 * 5209 * Note that we are absolutely dependent upon all inode dirtiers doing the 5210 * right thing: they *must* call mark_inode_dirty() after dirtying info in 5211 * which we are interested. 5212 * 5213 * It would be a bug for them to not do this. The code: 5214 * 5215 * mark_inode_dirty(inode) 5216 * stuff(); 5217 * inode->i_size = expr; 5218 * 5219 * is in error because a kswapd-driven write_inode() could occur while 5220 * `stuff()' is running, and the new i_size will be lost. Plus the inode 5221 * will no longer be on the superblock's dirty inode list. 5222 */ 5223 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) 5224 { 5225 int err; 5226 5227 if (current->flags & PF_MEMALLOC) 5228 return 0; 5229 5230 if (EXT4_SB(inode->i_sb)->s_journal) { 5231 if (ext4_journal_current_handle()) { 5232 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 5233 dump_stack(); 5234 return -EIO; 5235 } 5236 5237 if (wbc->sync_mode != WB_SYNC_ALL) 5238 return 0; 5239 5240 err = ext4_force_commit(inode->i_sb); 5241 } else { 5242 struct ext4_iloc iloc; 5243 5244 err = __ext4_get_inode_loc(inode, &iloc, 0); 5245 if (err) 5246 return err; 5247 if (wbc->sync_mode == WB_SYNC_ALL) 5248 sync_dirty_buffer(iloc.bh); 5249 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 5250 EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, 5251 "IO error syncing inode"); 5252 err = -EIO; 5253 } 5254 brelse(iloc.bh); 5255 } 5256 return err; 5257 } 5258 5259 /* 5260 * ext4_setattr() 5261 * 5262 * Called from notify_change. 5263 * 5264 * We want to trap VFS attempts to truncate the file as soon as 5265 * possible. In particular, we want to make sure that when the VFS 5266 * shrinks i_size, we put the inode on the orphan list and modify 5267 * i_disksize immediately, so that during the subsequent flushing of 5268 * dirty pages and freeing of disk blocks, we can guarantee that any 5269 * commit will leave the blocks being flushed in an unused state on 5270 * disk. (On recovery, the inode will get truncated and the blocks will 5271 * be freed, so we have a strong guarantee that no future commit will 5272 * leave these blocks visible to the user.) 5273 * 5274 * Another thing we have to assure is that if we are in ordered mode 5275 * and inode is still attached to the committing transaction, we must 5276 * we start writeout of all the dirty pages which are being truncated. 5277 * This way we are sure that all the data written in the previous 5278 * transaction are already on disk (truncate waits for pages under 5279 * writeback). 5280 * 5281 * Called with inode->i_mutex down. 5282 */ 5283 int ext4_setattr(struct dentry *dentry, struct iattr *attr) 5284 { 5285 struct inode *inode = dentry->d_inode; 5286 int error, rc = 0; 5287 int orphan = 0; 5288 const unsigned int ia_valid = attr->ia_valid; 5289 5290 error = inode_change_ok(inode, attr); 5291 if (error) 5292 return error; 5293 5294 if (is_quota_modification(inode, attr)) 5295 dquot_initialize(inode); 5296 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 5297 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 5298 handle_t *handle; 5299 5300 /* (user+group)*(old+new) structure, inode write (sb, 5301 * inode block, ? - but truncate inode update has it) */ 5302 handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+ 5303 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3); 5304 if (IS_ERR(handle)) { 5305 error = PTR_ERR(handle); 5306 goto err_out; 5307 } 5308 error = dquot_transfer(inode, attr); 5309 if (error) { 5310 ext4_journal_stop(handle); 5311 return error; 5312 } 5313 /* Update corresponding info in inode so that everything is in 5314 * one transaction */ 5315 if (attr->ia_valid & ATTR_UID) 5316 inode->i_uid = attr->ia_uid; 5317 if (attr->ia_valid & ATTR_GID) 5318 inode->i_gid = attr->ia_gid; 5319 error = ext4_mark_inode_dirty(handle, inode); 5320 ext4_journal_stop(handle); 5321 } 5322 5323 if (attr->ia_valid & ATTR_SIZE) { 5324 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 5325 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5326 5327 if (attr->ia_size > sbi->s_bitmap_maxbytes) 5328 return -EFBIG; 5329 } 5330 } 5331 5332 if (S_ISREG(inode->i_mode) && 5333 attr->ia_valid & ATTR_SIZE && 5334 (attr->ia_size < inode->i_size)) { 5335 handle_t *handle; 5336 5337 handle = ext4_journal_start(inode, 3); 5338 if (IS_ERR(handle)) { 5339 error = PTR_ERR(handle); 5340 goto err_out; 5341 } 5342 if (ext4_handle_valid(handle)) { 5343 error = ext4_orphan_add(handle, inode); 5344 orphan = 1; 5345 } 5346 EXT4_I(inode)->i_disksize = attr->ia_size; 5347 rc = ext4_mark_inode_dirty(handle, inode); 5348 if (!error) 5349 error = rc; 5350 ext4_journal_stop(handle); 5351 5352 if (ext4_should_order_data(inode)) { 5353 error = ext4_begin_ordered_truncate(inode, 5354 attr->ia_size); 5355 if (error) { 5356 /* Do as much error cleanup as possible */ 5357 handle = ext4_journal_start(inode, 3); 5358 if (IS_ERR(handle)) { 5359 ext4_orphan_del(NULL, inode); 5360 goto err_out; 5361 } 5362 ext4_orphan_del(handle, inode); 5363 orphan = 0; 5364 ext4_journal_stop(handle); 5365 goto err_out; 5366 } 5367 } 5368 } 5369 5370 if (attr->ia_valid & ATTR_SIZE) { 5371 if (attr->ia_size != i_size_read(inode)) { 5372 truncate_setsize(inode, attr->ia_size); 5373 ext4_truncate(inode); 5374 } else if (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)) 5375 ext4_truncate(inode); 5376 } 5377 5378 if (!rc) { 5379 setattr_copy(inode, attr); 5380 mark_inode_dirty(inode); 5381 } 5382 5383 /* 5384 * If the call to ext4_truncate failed to get a transaction handle at 5385 * all, we need to clean up the in-core orphan list manually. 5386 */ 5387 if (orphan && inode->i_nlink) 5388 ext4_orphan_del(NULL, inode); 5389 5390 if (!rc && (ia_valid & ATTR_MODE)) 5391 rc = ext4_acl_chmod(inode); 5392 5393 err_out: 5394 ext4_std_error(inode->i_sb, error); 5395 if (!error) 5396 error = rc; 5397 return error; 5398 } 5399 5400 int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 5401 struct kstat *stat) 5402 { 5403 struct inode *inode; 5404 unsigned long delalloc_blocks; 5405 5406 inode = dentry->d_inode; 5407 generic_fillattr(inode, stat); 5408 5409 /* 5410 * We can't update i_blocks if the block allocation is delayed 5411 * otherwise in the case of system crash before the real block 5412 * allocation is done, we will have i_blocks inconsistent with 5413 * on-disk file blocks. 5414 * We always keep i_blocks updated together with real 5415 * allocation. But to not confuse with user, stat 5416 * will return the blocks that include the delayed allocation 5417 * blocks for this file. 5418 */ 5419 delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; 5420 5421 stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; 5422 return 0; 5423 } 5424 5425 static int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk) 5426 { 5427 int indirects; 5428 5429 /* if nrblocks are contiguous */ 5430 if (chunk) { 5431 /* 5432 * With N contiguous data blocks, we need at most 5433 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks, 5434 * 2 dindirect blocks, and 1 tindirect block 5435 */ 5436 return DIV_ROUND_UP(nrblocks, 5437 EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4; 5438 } 5439 /* 5440 * if nrblocks are not contiguous, worse case, each block touch 5441 * a indirect block, and each indirect block touch a double indirect 5442 * block, plus a triple indirect block 5443 */ 5444 indirects = nrblocks * 2 + 1; 5445 return indirects; 5446 } 5447 5448 static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 5449 { 5450 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 5451 return ext4_ind_trans_blocks(inode, nrblocks, chunk); 5452 return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); 5453 } 5454 5455 /* 5456 * Account for index blocks, block groups bitmaps and block group 5457 * descriptor blocks if modify datablocks and index blocks 5458 * worse case, the indexs blocks spread over different block groups 5459 * 5460 * If datablocks are discontiguous, they are possible to spread over 5461 * different block groups too. If they are contiuguous, with flexbg, 5462 * they could still across block group boundary. 5463 * 5464 * Also account for superblock, inode, quota and xattr blocks 5465 */ 5466 static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) 5467 { 5468 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 5469 int gdpblocks; 5470 int idxblocks; 5471 int ret = 0; 5472 5473 /* 5474 * How many index blocks need to touch to modify nrblocks? 5475 * The "Chunk" flag indicating whether the nrblocks is 5476 * physically contiguous on disk 5477 * 5478 * For Direct IO and fallocate, they calls get_block to allocate 5479 * one single extent at a time, so they could set the "Chunk" flag 5480 */ 5481 idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); 5482 5483 ret = idxblocks; 5484 5485 /* 5486 * Now let's see how many group bitmaps and group descriptors need 5487 * to account 5488 */ 5489 groups = idxblocks; 5490 if (chunk) 5491 groups += 1; 5492 else 5493 groups += nrblocks; 5494 5495 gdpblocks = groups; 5496 if (groups > ngroups) 5497 groups = ngroups; 5498 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 5499 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 5500 5501 /* bitmaps and block group descriptor blocks */ 5502 ret += groups + gdpblocks; 5503 5504 /* Blocks for super block, inode, quota and xattr blocks */ 5505 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 5506 5507 return ret; 5508 } 5509 5510 /* 5511 * Calculate the total number of credits to reserve to fit 5512 * the modification of a single pages into a single transaction, 5513 * which may include multiple chunks of block allocations. 5514 * 5515 * This could be called via ext4_write_begin() 5516 * 5517 * We need to consider the worse case, when 5518 * one new block per extent. 5519 */ 5520 int ext4_writepage_trans_blocks(struct inode *inode) 5521 { 5522 int bpp = ext4_journal_blocks_per_page(inode); 5523 int ret; 5524 5525 ret = ext4_meta_trans_blocks(inode, bpp, 0); 5526 5527 /* Account for data blocks for journalled mode */ 5528 if (ext4_should_journal_data(inode)) 5529 ret += bpp; 5530 return ret; 5531 } 5532 5533 /* 5534 * Calculate the journal credits for a chunk of data modification. 5535 * 5536 * This is called from DIO, fallocate or whoever calling 5537 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. 5538 * 5539 * journal buffers for data blocks are not included here, as DIO 5540 * and fallocate do no need to journal data buffers. 5541 */ 5542 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 5543 { 5544 return ext4_meta_trans_blocks(inode, nrblocks, 1); 5545 } 5546 5547 /* 5548 * The caller must have previously called ext4_reserve_inode_write(). 5549 * Give this, we know that the caller already has write access to iloc->bh. 5550 */ 5551 int ext4_mark_iloc_dirty(handle_t *handle, 5552 struct inode *inode, struct ext4_iloc *iloc) 5553 { 5554 int err = 0; 5555 5556 if (test_opt(inode->i_sb, I_VERSION)) 5557 inode_inc_iversion(inode); 5558 5559 /* the do_update_inode consumes one bh->b_count */ 5560 get_bh(iloc->bh); 5561 5562 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 5563 err = ext4_do_update_inode(handle, inode, iloc); 5564 put_bh(iloc->bh); 5565 return err; 5566 } 5567 5568 /* 5569 * On success, We end up with an outstanding reference count against 5570 * iloc->bh. This _must_ be cleaned up later. 5571 */ 5572 5573 int 5574 ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 5575 struct ext4_iloc *iloc) 5576 { 5577 int err; 5578 5579 err = ext4_get_inode_loc(inode, iloc); 5580 if (!err) { 5581 BUFFER_TRACE(iloc->bh, "get_write_access"); 5582 err = ext4_journal_get_write_access(handle, iloc->bh); 5583 if (err) { 5584 brelse(iloc->bh); 5585 iloc->bh = NULL; 5586 } 5587 } 5588 ext4_std_error(inode->i_sb, err); 5589 return err; 5590 } 5591 5592 /* 5593 * Expand an inode by new_extra_isize bytes. 5594 * Returns 0 on success or negative error number on failure. 5595 */ 5596 static int ext4_expand_extra_isize(struct inode *inode, 5597 unsigned int new_extra_isize, 5598 struct ext4_iloc iloc, 5599 handle_t *handle) 5600 { 5601 struct ext4_inode *raw_inode; 5602 struct ext4_xattr_ibody_header *header; 5603 5604 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 5605 return 0; 5606 5607 raw_inode = ext4_raw_inode(&iloc); 5608 5609 header = IHDR(inode, raw_inode); 5610 5611 /* No extended attributes present */ 5612 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 5613 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 5614 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 5615 new_extra_isize); 5616 EXT4_I(inode)->i_extra_isize = new_extra_isize; 5617 return 0; 5618 } 5619 5620 /* try to expand with EAs present */ 5621 return ext4_expand_extra_isize_ea(inode, new_extra_isize, 5622 raw_inode, handle); 5623 } 5624 5625 /* 5626 * What we do here is to mark the in-core inode as clean with respect to inode 5627 * dirtiness (it may still be data-dirty). 5628 * This means that the in-core inode may be reaped by prune_icache 5629 * without having to perform any I/O. This is a very good thing, 5630 * because *any* task may call prune_icache - even ones which 5631 * have a transaction open against a different journal. 5632 * 5633 * Is this cheating? Not really. Sure, we haven't written the 5634 * inode out, but prune_icache isn't a user-visible syncing function. 5635 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 5636 * we start and wait on commits. 5637 * 5638 * Is this efficient/effective? Well, we're being nice to the system 5639 * by cleaning up our inodes proactively so they can be reaped 5640 * without I/O. But we are potentially leaving up to five seconds' 5641 * worth of inodes floating about which prune_icache wants us to 5642 * write out. One way to fix that would be to get prune_icache() 5643 * to do a write_super() to free up some memory. It has the desired 5644 * effect. 5645 */ 5646 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 5647 { 5648 struct ext4_iloc iloc; 5649 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5650 static unsigned int mnt_count; 5651 int err, ret; 5652 5653 might_sleep(); 5654 trace_ext4_mark_inode_dirty(inode, _RET_IP_); 5655 err = ext4_reserve_inode_write(handle, inode, &iloc); 5656 if (ext4_handle_valid(handle) && 5657 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 5658 !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { 5659 /* 5660 * We need extra buffer credits since we may write into EA block 5661 * with this same handle. If journal_extend fails, then it will 5662 * only result in a minor loss of functionality for that inode. 5663 * If this is felt to be critical, then e2fsck should be run to 5664 * force a large enough s_min_extra_isize. 5665 */ 5666 if ((jbd2_journal_extend(handle, 5667 EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { 5668 ret = ext4_expand_extra_isize(inode, 5669 sbi->s_want_extra_isize, 5670 iloc, handle); 5671 if (ret) { 5672 ext4_set_inode_state(inode, 5673 EXT4_STATE_NO_EXPAND); 5674 if (mnt_count != 5675 le16_to_cpu(sbi->s_es->s_mnt_count)) { 5676 ext4_warning(inode->i_sb, 5677 "Unable to expand inode %lu. Delete" 5678 " some EAs or run e2fsck.", 5679 inode->i_ino); 5680 mnt_count = 5681 le16_to_cpu(sbi->s_es->s_mnt_count); 5682 } 5683 } 5684 } 5685 } 5686 if (!err) 5687 err = ext4_mark_iloc_dirty(handle, inode, &iloc); 5688 return err; 5689 } 5690 5691 /* 5692 * ext4_dirty_inode() is called from __mark_inode_dirty() 5693 * 5694 * We're really interested in the case where a file is being extended. 5695 * i_size has been changed by generic_commit_write() and we thus need 5696 * to include the updated inode in the current transaction. 5697 * 5698 * Also, dquot_alloc_block() will always dirty the inode when blocks 5699 * are allocated to the file. 5700 * 5701 * If the inode is marked synchronous, we don't honour that here - doing 5702 * so would cause a commit on atime updates, which we don't bother doing. 5703 * We handle synchronous inodes at the highest possible level. 5704 */ 5705 void ext4_dirty_inode(struct inode *inode, int flags) 5706 { 5707 handle_t *handle; 5708 5709 handle = ext4_journal_start(inode, 2); 5710 if (IS_ERR(handle)) 5711 goto out; 5712 5713 ext4_mark_inode_dirty(handle, inode); 5714 5715 ext4_journal_stop(handle); 5716 out: 5717 return; 5718 } 5719 5720 #if 0 5721 /* 5722 * Bind an inode's backing buffer_head into this transaction, to prevent 5723 * it from being flushed to disk early. Unlike 5724 * ext4_reserve_inode_write, this leaves behind no bh reference and 5725 * returns no iloc structure, so the caller needs to repeat the iloc 5726 * lookup to mark the inode dirty later. 5727 */ 5728 static int ext4_pin_inode(handle_t *handle, struct inode *inode) 5729 { 5730 struct ext4_iloc iloc; 5731 5732 int err = 0; 5733 if (handle) { 5734 err = ext4_get_inode_loc(inode, &iloc); 5735 if (!err) { 5736 BUFFER_TRACE(iloc.bh, "get_write_access"); 5737 err = jbd2_journal_get_write_access(handle, iloc.bh); 5738 if (!err) 5739 err = ext4_handle_dirty_metadata(handle, 5740 NULL, 5741 iloc.bh); 5742 brelse(iloc.bh); 5743 } 5744 } 5745 ext4_std_error(inode->i_sb, err); 5746 return err; 5747 } 5748 #endif 5749 5750 int ext4_change_inode_journal_flag(struct inode *inode, int val) 5751 { 5752 journal_t *journal; 5753 handle_t *handle; 5754 int err; 5755 5756 /* 5757 * We have to be very careful here: changing a data block's 5758 * journaling status dynamically is dangerous. If we write a 5759 * data block to the journal, change the status and then delete 5760 * that block, we risk forgetting to revoke the old log record 5761 * from the journal and so a subsequent replay can corrupt data. 5762 * So, first we make sure that the journal is empty and that 5763 * nobody is changing anything. 5764 */ 5765 5766 journal = EXT4_JOURNAL(inode); 5767 if (!journal) 5768 return 0; 5769 if (is_journal_aborted(journal)) 5770 return -EROFS; 5771 5772 jbd2_journal_lock_updates(journal); 5773 jbd2_journal_flush(journal); 5774 5775 /* 5776 * OK, there are no updates running now, and all cached data is 5777 * synced to disk. We are now in a completely consistent state 5778 * which doesn't have anything in the journal, and we know that 5779 * no filesystem updates are running, so it is safe to modify 5780 * the inode's in-core data-journaling state flag now. 5781 */ 5782 5783 if (val) 5784 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 5785 else 5786 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 5787 ext4_set_aops(inode); 5788 5789 jbd2_journal_unlock_updates(journal); 5790 5791 /* Finally we can mark the inode as dirty. */ 5792 5793 handle = ext4_journal_start(inode, 1); 5794 if (IS_ERR(handle)) 5795 return PTR_ERR(handle); 5796 5797 err = ext4_mark_inode_dirty(handle, inode); 5798 ext4_handle_sync(handle); 5799 ext4_journal_stop(handle); 5800 ext4_std_error(inode->i_sb, err); 5801 5802 return err; 5803 } 5804 5805 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 5806 { 5807 return !buffer_mapped(bh); 5808 } 5809 5810 int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 5811 { 5812 struct page *page = vmf->page; 5813 loff_t size; 5814 unsigned long len; 5815 int ret = -EINVAL; 5816 void *fsdata; 5817 struct file *file = vma->vm_file; 5818 struct inode *inode = file->f_path.dentry->d_inode; 5819 struct address_space *mapping = inode->i_mapping; 5820 5821 /* 5822 * Get i_alloc_sem to stop truncates messing with the inode. We cannot 5823 * get i_mutex because we are already holding mmap_sem. 5824 */ 5825 down_read(&inode->i_alloc_sem); 5826 size = i_size_read(inode); 5827 if (page->mapping != mapping || size <= page_offset(page) 5828 || !PageUptodate(page)) { 5829 /* page got truncated from under us? */ 5830 goto out_unlock; 5831 } 5832 ret = 0; 5833 5834 lock_page(page); 5835 wait_on_page_writeback(page); 5836 if (PageMappedToDisk(page)) { 5837 up_read(&inode->i_alloc_sem); 5838 return VM_FAULT_LOCKED; 5839 } 5840 5841 if (page->index == size >> PAGE_CACHE_SHIFT) 5842 len = size & ~PAGE_CACHE_MASK; 5843 else 5844 len = PAGE_CACHE_SIZE; 5845 5846 /* 5847 * return if we have all the buffers mapped. This avoid 5848 * the need to call write_begin/write_end which does a 5849 * journal_start/journal_stop which can block and take 5850 * long time 5851 */ 5852 if (page_has_buffers(page)) { 5853 if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 5854 ext4_bh_unmapped)) { 5855 up_read(&inode->i_alloc_sem); 5856 return VM_FAULT_LOCKED; 5857 } 5858 } 5859 unlock_page(page); 5860 /* 5861 * OK, we need to fill the hole... Do write_begin write_end 5862 * to do block allocation/reservation.We are not holding 5863 * inode.i__mutex here. That allow * parallel write_begin, 5864 * write_end call. lock_page prevent this from happening 5865 * on the same page though 5866 */ 5867 ret = mapping->a_ops->write_begin(file, mapping, page_offset(page), 5868 len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); 5869 if (ret < 0) 5870 goto out_unlock; 5871 ret = mapping->a_ops->write_end(file, mapping, page_offset(page), 5872 len, len, page, fsdata); 5873 if (ret < 0) 5874 goto out_unlock; 5875 ret = 0; 5876 5877 /* 5878 * write_begin/end might have created a dirty page and someone 5879 * could wander in and start the IO. Make sure that hasn't 5880 * happened. 5881 */ 5882 lock_page(page); 5883 wait_on_page_writeback(page); 5884 up_read(&inode->i_alloc_sem); 5885 return VM_FAULT_LOCKED; 5886 out_unlock: 5887 if (ret) 5888 ret = VM_FAULT_SIGBUS; 5889 up_read(&inode->i_alloc_sem); 5890 return ret; 5891 } 5892