1 /* 2 * linux/fs/ext4/inode.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/inode.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * 64-bit file support on 64-bit platforms by Jakub Jelinek 16 * (jj@sunsite.ms.mff.cuni.cz) 17 * 18 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 19 */ 20 21 #include <linux/fs.h> 22 #include <linux/time.h> 23 #include <linux/jbd2.h> 24 #include <linux/highuid.h> 25 #include <linux/pagemap.h> 26 #include <linux/quotaops.h> 27 #include <linux/string.h> 28 #include <linux/buffer_head.h> 29 #include <linux/writeback.h> 30 #include <linux/pagevec.h> 31 #include <linux/mpage.h> 32 #include <linux/namei.h> 33 #include <linux/uio.h> 34 #include <linux/bio.h> 35 #include <linux/workqueue.h> 36 #include <linux/kernel.h> 37 #include <linux/printk.h> 38 #include <linux/slab.h> 39 #include <linux/ratelimit.h> 40 41 #include "ext4_jbd2.h" 42 #include "xattr.h" 43 #include "acl.h" 44 #include "truncate.h" 45 46 #include <trace/events/ext4.h> 47 48 #define MPAGE_DA_EXTENT_TAIL 0x01 49 50 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, 51 struct ext4_inode_info *ei) 52 { 53 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 54 __u16 csum_lo; 55 __u16 csum_hi = 0; 56 __u32 csum; 57 58 csum_lo = raw->i_checksum_lo; 59 raw->i_checksum_lo = 0; 60 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 61 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { 62 csum_hi = raw->i_checksum_hi; 63 raw->i_checksum_hi = 0; 64 } 65 66 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, 67 EXT4_INODE_SIZE(inode->i_sb)); 68 69 raw->i_checksum_lo = csum_lo; 70 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 71 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 72 raw->i_checksum_hi = csum_hi; 73 74 return csum; 75 } 76 77 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, 78 struct ext4_inode_info *ei) 79 { 80 __u32 provided, calculated; 81 82 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 83 cpu_to_le32(EXT4_OS_LINUX) || 84 !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 85 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 86 return 1; 87 88 provided = le16_to_cpu(raw->i_checksum_lo); 89 calculated = ext4_inode_csum(inode, raw, ei); 90 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 91 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 92 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16; 93 else 94 calculated &= 0xFFFF; 95 96 return provided == calculated; 97 } 98 99 static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, 100 struct ext4_inode_info *ei) 101 { 102 __u32 csum; 103 104 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 105 cpu_to_le32(EXT4_OS_LINUX) || 106 !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 107 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 108 return; 109 110 csum = ext4_inode_csum(inode, raw, ei); 111 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF); 112 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 113 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 114 raw->i_checksum_hi = cpu_to_le16(csum >> 16); 115 } 116 117 static inline int ext4_begin_ordered_truncate(struct inode *inode, 118 loff_t new_size) 119 { 120 trace_ext4_begin_ordered_truncate(inode, new_size); 121 /* 122 * If jinode is zero, then we never opened the file for 123 * writing, so there's no need to call 124 * jbd2_journal_begin_ordered_truncate() since there's no 125 * outstanding writes we need to flush. 126 */ 127 if (!EXT4_I(inode)->jinode) 128 return 0; 129 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), 130 EXT4_I(inode)->jinode, 131 new_size); 132 } 133 134 static void ext4_invalidatepage(struct page *page, unsigned long offset); 135 static int __ext4_journalled_writepage(struct page *page, unsigned int len); 136 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); 137 static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, 138 struct inode *inode, struct page *page, loff_t from, 139 loff_t length, int flags); 140 141 /* 142 * Test whether an inode is a fast symlink. 143 */ 144 static int ext4_inode_is_fast_symlink(struct inode *inode) 145 { 146 int ea_blocks = EXT4_I(inode)->i_file_acl ? 147 (inode->i_sb->s_blocksize >> 9) : 0; 148 149 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 150 } 151 152 /* 153 * Restart the transaction associated with *handle. This does a commit, 154 * so before we call here everything must be consistently dirtied against 155 * this transaction. 156 */ 157 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, 158 int nblocks) 159 { 160 int ret; 161 162 /* 163 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 164 * moment, get_block can be called only for blocks inside i_size since 165 * page cache has been already dropped and writes are blocked by 166 * i_mutex. So we can safely drop the i_data_sem here. 167 */ 168 BUG_ON(EXT4_JOURNAL(inode) == NULL); 169 jbd_debug(2, "restarting handle %p\n", handle); 170 up_write(&EXT4_I(inode)->i_data_sem); 171 ret = ext4_journal_restart(handle, nblocks); 172 down_write(&EXT4_I(inode)->i_data_sem); 173 ext4_discard_preallocations(inode); 174 175 return ret; 176 } 177 178 /* 179 * Called at the last iput() if i_nlink is zero. 180 */ 181 void ext4_evict_inode(struct inode *inode) 182 { 183 handle_t *handle; 184 int err; 185 186 trace_ext4_evict_inode(inode); 187 188 ext4_ioend_wait(inode); 189 190 if (inode->i_nlink) { 191 /* 192 * When journalling data dirty buffers are tracked only in the 193 * journal. So although mm thinks everything is clean and 194 * ready for reaping the inode might still have some pages to 195 * write in the running transaction or waiting to be 196 * checkpointed. Thus calling jbd2_journal_invalidatepage() 197 * (via truncate_inode_pages()) to discard these buffers can 198 * cause data loss. Also even if we did not discard these 199 * buffers, we would have no way to find them after the inode 200 * is reaped and thus user could see stale data if he tries to 201 * read them before the transaction is checkpointed. So be 202 * careful and force everything to disk here... We use 203 * ei->i_datasync_tid to store the newest transaction 204 * containing inode's data. 205 * 206 * Note that directories do not have this problem because they 207 * don't use page cache. 208 */ 209 if (ext4_should_journal_data(inode) && 210 (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) { 211 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 212 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; 213 214 jbd2_log_start_commit(journal, commit_tid); 215 jbd2_log_wait_commit(journal, commit_tid); 216 filemap_write_and_wait(&inode->i_data); 217 } 218 truncate_inode_pages(&inode->i_data, 0); 219 goto no_delete; 220 } 221 222 if (!is_bad_inode(inode)) 223 dquot_initialize(inode); 224 225 if (ext4_should_order_data(inode)) 226 ext4_begin_ordered_truncate(inode, 0); 227 truncate_inode_pages(&inode->i_data, 0); 228 229 if (is_bad_inode(inode)) 230 goto no_delete; 231 232 /* 233 * Protect us against freezing - iput() caller didn't have to have any 234 * protection against it 235 */ 236 sb_start_intwrite(inode->i_sb); 237 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, 238 ext4_blocks_for_truncate(inode)+3); 239 if (IS_ERR(handle)) { 240 ext4_std_error(inode->i_sb, PTR_ERR(handle)); 241 /* 242 * If we're going to skip the normal cleanup, we still need to 243 * make sure that the in-core orphan linked list is properly 244 * cleaned up. 245 */ 246 ext4_orphan_del(NULL, inode); 247 sb_end_intwrite(inode->i_sb); 248 goto no_delete; 249 } 250 251 if (IS_SYNC(inode)) 252 ext4_handle_sync(handle); 253 inode->i_size = 0; 254 err = ext4_mark_inode_dirty(handle, inode); 255 if (err) { 256 ext4_warning(inode->i_sb, 257 "couldn't mark inode dirty (err %d)", err); 258 goto stop_handle; 259 } 260 if (inode->i_blocks) 261 ext4_truncate(inode); 262 263 /* 264 * ext4_ext_truncate() doesn't reserve any slop when it 265 * restarts journal transactions; therefore there may not be 266 * enough credits left in the handle to remove the inode from 267 * the orphan list and set the dtime field. 268 */ 269 if (!ext4_handle_has_enough_credits(handle, 3)) { 270 err = ext4_journal_extend(handle, 3); 271 if (err > 0) 272 err = ext4_journal_restart(handle, 3); 273 if (err != 0) { 274 ext4_warning(inode->i_sb, 275 "couldn't extend journal (err %d)", err); 276 stop_handle: 277 ext4_journal_stop(handle); 278 ext4_orphan_del(NULL, inode); 279 sb_end_intwrite(inode->i_sb); 280 goto no_delete; 281 } 282 } 283 284 /* 285 * Kill off the orphan record which ext4_truncate created. 286 * AKPM: I think this can be inside the above `if'. 287 * Note that ext4_orphan_del() has to be able to cope with the 288 * deletion of a non-existent orphan - this is because we don't 289 * know if ext4_truncate() actually created an orphan record. 290 * (Well, we could do this if we need to, but heck - it works) 291 */ 292 ext4_orphan_del(handle, inode); 293 EXT4_I(inode)->i_dtime = get_seconds(); 294 295 /* 296 * One subtle ordering requirement: if anything has gone wrong 297 * (transaction abort, IO errors, whatever), then we can still 298 * do these next steps (the fs will already have been marked as 299 * having errors), but we can't free the inode if the mark_dirty 300 * fails. 301 */ 302 if (ext4_mark_inode_dirty(handle, inode)) 303 /* If that failed, just do the required in-core inode clear. */ 304 ext4_clear_inode(inode); 305 else 306 ext4_free_inode(handle, inode); 307 ext4_journal_stop(handle); 308 sb_end_intwrite(inode->i_sb); 309 return; 310 no_delete: 311 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ 312 } 313 314 #ifdef CONFIG_QUOTA 315 qsize_t *ext4_get_reserved_space(struct inode *inode) 316 { 317 return &EXT4_I(inode)->i_reserved_quota; 318 } 319 #endif 320 321 /* 322 * Calculate the number of metadata blocks need to reserve 323 * to allocate a block located at @lblock 324 */ 325 static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) 326 { 327 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 328 return ext4_ext_calc_metadata_amount(inode, lblock); 329 330 return ext4_ind_calc_metadata_amount(inode, lblock); 331 } 332 333 /* 334 * Called with i_data_sem down, which is important since we can call 335 * ext4_discard_preallocations() from here. 336 */ 337 void ext4_da_update_reserve_space(struct inode *inode, 338 int used, int quota_claim) 339 { 340 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 341 struct ext4_inode_info *ei = EXT4_I(inode); 342 343 spin_lock(&ei->i_block_reservation_lock); 344 trace_ext4_da_update_reserve_space(inode, used, quota_claim); 345 if (unlikely(used > ei->i_reserved_data_blocks)) { 346 ext4_warning(inode->i_sb, "%s: ino %lu, used %d " 347 "with only %d reserved data blocks", 348 __func__, inode->i_ino, used, 349 ei->i_reserved_data_blocks); 350 WARN_ON(1); 351 used = ei->i_reserved_data_blocks; 352 } 353 354 if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) { 355 ext4_warning(inode->i_sb, "ino %lu, allocated %d " 356 "with only %d reserved metadata blocks " 357 "(releasing %d blocks with reserved %d data blocks)", 358 inode->i_ino, ei->i_allocated_meta_blocks, 359 ei->i_reserved_meta_blocks, used, 360 ei->i_reserved_data_blocks); 361 WARN_ON(1); 362 ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks; 363 } 364 365 /* Update per-inode reservations */ 366 ei->i_reserved_data_blocks -= used; 367 ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; 368 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 369 used + ei->i_allocated_meta_blocks); 370 ei->i_allocated_meta_blocks = 0; 371 372 if (ei->i_reserved_data_blocks == 0) { 373 /* 374 * We can release all of the reserved metadata blocks 375 * only when we have written all of the delayed 376 * allocation blocks. 377 */ 378 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 379 ei->i_reserved_meta_blocks); 380 ei->i_reserved_meta_blocks = 0; 381 ei->i_da_metadata_calc_len = 0; 382 } 383 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 384 385 /* Update quota subsystem for data blocks */ 386 if (quota_claim) 387 dquot_claim_block(inode, EXT4_C2B(sbi, used)); 388 else { 389 /* 390 * We did fallocate with an offset that is already delayed 391 * allocated. So on delayed allocated writeback we should 392 * not re-claim the quota for fallocated blocks. 393 */ 394 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); 395 } 396 397 /* 398 * If we have done all the pending block allocations and if 399 * there aren't any writers on the inode, we can discard the 400 * inode's preallocations. 401 */ 402 if ((ei->i_reserved_data_blocks == 0) && 403 (atomic_read(&inode->i_writecount) == 0)) 404 ext4_discard_preallocations(inode); 405 } 406 407 static int __check_block_validity(struct inode *inode, const char *func, 408 unsigned int line, 409 struct ext4_map_blocks *map) 410 { 411 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, 412 map->m_len)) { 413 ext4_error_inode(inode, func, line, map->m_pblk, 414 "lblock %lu mapped to illegal pblock " 415 "(length %d)", (unsigned long) map->m_lblk, 416 map->m_len); 417 return -EIO; 418 } 419 return 0; 420 } 421 422 #define check_block_validity(inode, map) \ 423 __check_block_validity((inode), __func__, __LINE__, (map)) 424 425 /* 426 * Return the number of contiguous dirty pages in a given inode 427 * starting at page frame idx. 428 */ 429 static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, 430 unsigned int max_pages) 431 { 432 struct address_space *mapping = inode->i_mapping; 433 pgoff_t index; 434 struct pagevec pvec; 435 pgoff_t num = 0; 436 int i, nr_pages, done = 0; 437 438 if (max_pages == 0) 439 return 0; 440 pagevec_init(&pvec, 0); 441 while (!done) { 442 index = idx; 443 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 444 PAGECACHE_TAG_DIRTY, 445 (pgoff_t)PAGEVEC_SIZE); 446 if (nr_pages == 0) 447 break; 448 for (i = 0; i < nr_pages; i++) { 449 struct page *page = pvec.pages[i]; 450 struct buffer_head *bh, *head; 451 452 lock_page(page); 453 if (unlikely(page->mapping != mapping) || 454 !PageDirty(page) || 455 PageWriteback(page) || 456 page->index != idx) { 457 done = 1; 458 unlock_page(page); 459 break; 460 } 461 if (page_has_buffers(page)) { 462 bh = head = page_buffers(page); 463 do { 464 if (!buffer_delay(bh) && 465 !buffer_unwritten(bh)) 466 done = 1; 467 bh = bh->b_this_page; 468 } while (!done && (bh != head)); 469 } 470 unlock_page(page); 471 if (done) 472 break; 473 idx++; 474 num++; 475 if (num >= max_pages) { 476 done = 1; 477 break; 478 } 479 } 480 pagevec_release(&pvec); 481 } 482 return num; 483 } 484 485 /* 486 * The ext4_map_blocks() function tries to look up the requested blocks, 487 * and returns if the blocks are already mapped. 488 * 489 * Otherwise it takes the write lock of the i_data_sem and allocate blocks 490 * and store the allocated blocks in the result buffer head and mark it 491 * mapped. 492 * 493 * If file type is extents based, it will call ext4_ext_map_blocks(), 494 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping 495 * based files 496 * 497 * On success, it returns the number of blocks being mapped or allocate. 498 * if create==0 and the blocks are pre-allocated and uninitialized block, 499 * the result buffer head is unmapped. If the create ==1, it will make sure 500 * the buffer head is mapped. 501 * 502 * It returns 0 if plain look up failed (blocks have not been allocated), in 503 * that case, buffer head is unmapped 504 * 505 * It returns the error in case of allocation failure. 506 */ 507 int ext4_map_blocks(handle_t *handle, struct inode *inode, 508 struct ext4_map_blocks *map, int flags) 509 { 510 struct extent_status es; 511 int retval; 512 513 map->m_flags = 0; 514 ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," 515 "logical block %lu\n", inode->i_ino, flags, map->m_len, 516 (unsigned long) map->m_lblk); 517 518 /* Lookup extent status tree firstly */ 519 if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) { 520 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) { 521 map->m_pblk = ext4_es_pblock(&es) + 522 map->m_lblk - es.es_lblk; 523 map->m_flags |= ext4_es_is_written(&es) ? 524 EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN; 525 retval = es.es_len - (map->m_lblk - es.es_lblk); 526 if (retval > map->m_len) 527 retval = map->m_len; 528 map->m_len = retval; 529 } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) { 530 retval = 0; 531 } else { 532 BUG_ON(1); 533 } 534 goto found; 535 } 536 537 /* 538 * Try to see if we can get the block without requesting a new 539 * file system block. 540 */ 541 if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) 542 down_read((&EXT4_I(inode)->i_data_sem)); 543 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 544 retval = ext4_ext_map_blocks(handle, inode, map, flags & 545 EXT4_GET_BLOCKS_KEEP_SIZE); 546 } else { 547 retval = ext4_ind_map_blocks(handle, inode, map, flags & 548 EXT4_GET_BLOCKS_KEEP_SIZE); 549 } 550 if (retval > 0) { 551 int ret; 552 unsigned long long status; 553 554 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 555 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 556 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 557 ext4_find_delalloc_range(inode, map->m_lblk, 558 map->m_lblk + map->m_len - 1)) 559 status |= EXTENT_STATUS_DELAYED; 560 ret = ext4_es_insert_extent(inode, map->m_lblk, 561 map->m_len, map->m_pblk, status); 562 if (ret < 0) 563 retval = ret; 564 } 565 if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) 566 up_read((&EXT4_I(inode)->i_data_sem)); 567 568 found: 569 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 570 int ret = check_block_validity(inode, map); 571 if (ret != 0) 572 return ret; 573 } 574 575 /* If it is only a block(s) look up */ 576 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 577 return retval; 578 579 /* 580 * Returns if the blocks have already allocated 581 * 582 * Note that if blocks have been preallocated 583 * ext4_ext_get_block() returns the create = 0 584 * with buffer head unmapped. 585 */ 586 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) 587 return retval; 588 589 /* 590 * Here we clear m_flags because after allocating an new extent, 591 * it will be set again. 592 */ 593 map->m_flags &= ~EXT4_MAP_FLAGS; 594 595 /* 596 * New blocks allocate and/or writing to uninitialized extent 597 * will possibly result in updating i_data, so we take 598 * the write lock of i_data_sem, and call get_blocks() 599 * with create == 1 flag. 600 */ 601 down_write((&EXT4_I(inode)->i_data_sem)); 602 603 /* 604 * if the caller is from delayed allocation writeout path 605 * we have already reserved fs blocks for allocation 606 * let the underlying get_block() function know to 607 * avoid double accounting 608 */ 609 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 610 ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 611 /* 612 * We need to check for EXT4 here because migrate 613 * could have changed the inode type in between 614 */ 615 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 616 retval = ext4_ext_map_blocks(handle, inode, map, flags); 617 } else { 618 retval = ext4_ind_map_blocks(handle, inode, map, flags); 619 620 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { 621 /* 622 * We allocated new blocks which will result in 623 * i_data's format changing. Force the migrate 624 * to fail by clearing migrate flags 625 */ 626 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); 627 } 628 629 /* 630 * Update reserved blocks/metadata blocks after successful 631 * block allocation which had been deferred till now. We don't 632 * support fallocate for non extent files. So we can update 633 * reserve space here. 634 */ 635 if ((retval > 0) && 636 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) 637 ext4_da_update_reserve_space(inode, retval, 1); 638 } 639 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 640 ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 641 642 if (retval > 0) { 643 int ret; 644 unsigned long long status; 645 646 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 647 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 648 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 649 ext4_find_delalloc_range(inode, map->m_lblk, 650 map->m_lblk + map->m_len - 1)) 651 status |= EXTENT_STATUS_DELAYED; 652 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 653 map->m_pblk, status); 654 if (ret < 0) 655 retval = ret; 656 } 657 658 up_write((&EXT4_I(inode)->i_data_sem)); 659 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 660 int ret = check_block_validity(inode, map); 661 if (ret != 0) 662 return ret; 663 } 664 return retval; 665 } 666 667 /* Maximum number of blocks we map for direct IO at once. */ 668 #define DIO_MAX_BLOCKS 4096 669 670 static int _ext4_get_block(struct inode *inode, sector_t iblock, 671 struct buffer_head *bh, int flags) 672 { 673 handle_t *handle = ext4_journal_current_handle(); 674 struct ext4_map_blocks map; 675 int ret = 0, started = 0; 676 int dio_credits; 677 678 if (ext4_has_inline_data(inode)) 679 return -ERANGE; 680 681 map.m_lblk = iblock; 682 map.m_len = bh->b_size >> inode->i_blkbits; 683 684 if (flags && !(flags & EXT4_GET_BLOCKS_NO_LOCK) && !handle) { 685 /* Direct IO write... */ 686 if (map.m_len > DIO_MAX_BLOCKS) 687 map.m_len = DIO_MAX_BLOCKS; 688 dio_credits = ext4_chunk_trans_blocks(inode, map.m_len); 689 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 690 dio_credits); 691 if (IS_ERR(handle)) { 692 ret = PTR_ERR(handle); 693 return ret; 694 } 695 started = 1; 696 } 697 698 ret = ext4_map_blocks(handle, inode, &map, flags); 699 if (ret > 0) { 700 map_bh(bh, inode->i_sb, map.m_pblk); 701 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 702 bh->b_size = inode->i_sb->s_blocksize * map.m_len; 703 ret = 0; 704 } 705 if (started) 706 ext4_journal_stop(handle); 707 return ret; 708 } 709 710 int ext4_get_block(struct inode *inode, sector_t iblock, 711 struct buffer_head *bh, int create) 712 { 713 return _ext4_get_block(inode, iblock, bh, 714 create ? EXT4_GET_BLOCKS_CREATE : 0); 715 } 716 717 /* 718 * `handle' can be NULL if create is zero 719 */ 720 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 721 ext4_lblk_t block, int create, int *errp) 722 { 723 struct ext4_map_blocks map; 724 struct buffer_head *bh; 725 int fatal = 0, err; 726 727 J_ASSERT(handle != NULL || create == 0); 728 729 map.m_lblk = block; 730 map.m_len = 1; 731 err = ext4_map_blocks(handle, inode, &map, 732 create ? EXT4_GET_BLOCKS_CREATE : 0); 733 734 /* ensure we send some value back into *errp */ 735 *errp = 0; 736 737 if (create && err == 0) 738 err = -ENOSPC; /* should never happen */ 739 if (err < 0) 740 *errp = err; 741 if (err <= 0) 742 return NULL; 743 744 bh = sb_getblk(inode->i_sb, map.m_pblk); 745 if (unlikely(!bh)) { 746 *errp = -ENOMEM; 747 return NULL; 748 } 749 if (map.m_flags & EXT4_MAP_NEW) { 750 J_ASSERT(create != 0); 751 J_ASSERT(handle != NULL); 752 753 /* 754 * Now that we do not always journal data, we should 755 * keep in mind whether this should always journal the 756 * new buffer as metadata. For now, regular file 757 * writes use ext4_get_block instead, so it's not a 758 * problem. 759 */ 760 lock_buffer(bh); 761 BUFFER_TRACE(bh, "call get_create_access"); 762 fatal = ext4_journal_get_create_access(handle, bh); 763 if (!fatal && !buffer_uptodate(bh)) { 764 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 765 set_buffer_uptodate(bh); 766 } 767 unlock_buffer(bh); 768 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 769 err = ext4_handle_dirty_metadata(handle, inode, bh); 770 if (!fatal) 771 fatal = err; 772 } else { 773 BUFFER_TRACE(bh, "not a new buffer"); 774 } 775 if (fatal) { 776 *errp = fatal; 777 brelse(bh); 778 bh = NULL; 779 } 780 return bh; 781 } 782 783 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 784 ext4_lblk_t block, int create, int *err) 785 { 786 struct buffer_head *bh; 787 788 bh = ext4_getblk(handle, inode, block, create, err); 789 if (!bh) 790 return bh; 791 if (buffer_uptodate(bh)) 792 return bh; 793 ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); 794 wait_on_buffer(bh); 795 if (buffer_uptodate(bh)) 796 return bh; 797 put_bh(bh); 798 *err = -EIO; 799 return NULL; 800 } 801 802 int ext4_walk_page_buffers(handle_t *handle, 803 struct buffer_head *head, 804 unsigned from, 805 unsigned to, 806 int *partial, 807 int (*fn)(handle_t *handle, 808 struct buffer_head *bh)) 809 { 810 struct buffer_head *bh; 811 unsigned block_start, block_end; 812 unsigned blocksize = head->b_size; 813 int err, ret = 0; 814 struct buffer_head *next; 815 816 for (bh = head, block_start = 0; 817 ret == 0 && (bh != head || !block_start); 818 block_start = block_end, bh = next) { 819 next = bh->b_this_page; 820 block_end = block_start + blocksize; 821 if (block_end <= from || block_start >= to) { 822 if (partial && !buffer_uptodate(bh)) 823 *partial = 1; 824 continue; 825 } 826 err = (*fn)(handle, bh); 827 if (!ret) 828 ret = err; 829 } 830 return ret; 831 } 832 833 /* 834 * To preserve ordering, it is essential that the hole instantiation and 835 * the data write be encapsulated in a single transaction. We cannot 836 * close off a transaction and start a new one between the ext4_get_block() 837 * and the commit_write(). So doing the jbd2_journal_start at the start of 838 * prepare_write() is the right place. 839 * 840 * Also, this function can nest inside ext4_writepage(). In that case, we 841 * *know* that ext4_writepage() has generated enough buffer credits to do the 842 * whole page. So we won't block on the journal in that case, which is good, 843 * because the caller may be PF_MEMALLOC. 844 * 845 * By accident, ext4 can be reentered when a transaction is open via 846 * quota file writes. If we were to commit the transaction while thus 847 * reentered, there can be a deadlock - we would be holding a quota 848 * lock, and the commit would never complete if another thread had a 849 * transaction open and was blocking on the quota lock - a ranking 850 * violation. 851 * 852 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 853 * will _not_ run commit under these circumstances because handle->h_ref 854 * is elevated. We'll still have enough credits for the tiny quotafile 855 * write. 856 */ 857 int do_journal_get_write_access(handle_t *handle, 858 struct buffer_head *bh) 859 { 860 int dirty = buffer_dirty(bh); 861 int ret; 862 863 if (!buffer_mapped(bh) || buffer_freed(bh)) 864 return 0; 865 /* 866 * __block_write_begin() could have dirtied some buffers. Clean 867 * the dirty bit as jbd2_journal_get_write_access() could complain 868 * otherwise about fs integrity issues. Setting of the dirty bit 869 * by __block_write_begin() isn't a real problem here as we clear 870 * the bit before releasing a page lock and thus writeback cannot 871 * ever write the buffer. 872 */ 873 if (dirty) 874 clear_buffer_dirty(bh); 875 ret = ext4_journal_get_write_access(handle, bh); 876 if (!ret && dirty) 877 ret = ext4_handle_dirty_metadata(handle, NULL, bh); 878 return ret; 879 } 880 881 static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock, 882 struct buffer_head *bh_result, int create); 883 static int ext4_write_begin(struct file *file, struct address_space *mapping, 884 loff_t pos, unsigned len, unsigned flags, 885 struct page **pagep, void **fsdata) 886 { 887 struct inode *inode = mapping->host; 888 int ret, needed_blocks; 889 handle_t *handle; 890 int retries = 0; 891 struct page *page; 892 pgoff_t index; 893 unsigned from, to; 894 895 trace_ext4_write_begin(inode, pos, len, flags); 896 /* 897 * Reserve one block more for addition to orphan list in case 898 * we allocate blocks but write fails for some reason 899 */ 900 needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 901 index = pos >> PAGE_CACHE_SHIFT; 902 from = pos & (PAGE_CACHE_SIZE - 1); 903 to = from + len; 904 905 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 906 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, 907 flags, pagep); 908 if (ret < 0) 909 return ret; 910 if (ret == 1) 911 return 0; 912 } 913 914 /* 915 * grab_cache_page_write_begin() can take a long time if the 916 * system is thrashing due to memory pressure, or if the page 917 * is being written back. So grab it first before we start 918 * the transaction handle. This also allows us to allocate 919 * the page (if needed) without using GFP_NOFS. 920 */ 921 retry_grab: 922 page = grab_cache_page_write_begin(mapping, index, flags); 923 if (!page) 924 return -ENOMEM; 925 unlock_page(page); 926 927 retry_journal: 928 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); 929 if (IS_ERR(handle)) { 930 page_cache_release(page); 931 return PTR_ERR(handle); 932 } 933 934 lock_page(page); 935 if (page->mapping != mapping) { 936 /* The page got truncated from under us */ 937 unlock_page(page); 938 page_cache_release(page); 939 ext4_journal_stop(handle); 940 goto retry_grab; 941 } 942 wait_on_page_writeback(page); 943 944 if (ext4_should_dioread_nolock(inode)) 945 ret = __block_write_begin(page, pos, len, ext4_get_block_write); 946 else 947 ret = __block_write_begin(page, pos, len, ext4_get_block); 948 949 if (!ret && ext4_should_journal_data(inode)) { 950 ret = ext4_walk_page_buffers(handle, page_buffers(page), 951 from, to, NULL, 952 do_journal_get_write_access); 953 } 954 955 if (ret) { 956 unlock_page(page); 957 /* 958 * __block_write_begin may have instantiated a few blocks 959 * outside i_size. Trim these off again. Don't need 960 * i_size_read because we hold i_mutex. 961 * 962 * Add inode to orphan list in case we crash before 963 * truncate finishes 964 */ 965 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 966 ext4_orphan_add(handle, inode); 967 968 ext4_journal_stop(handle); 969 if (pos + len > inode->i_size) { 970 ext4_truncate_failed_write(inode); 971 /* 972 * If truncate failed early the inode might 973 * still be on the orphan list; we need to 974 * make sure the inode is removed from the 975 * orphan list in that case. 976 */ 977 if (inode->i_nlink) 978 ext4_orphan_del(NULL, inode); 979 } 980 981 if (ret == -ENOSPC && 982 ext4_should_retry_alloc(inode->i_sb, &retries)) 983 goto retry_journal; 984 page_cache_release(page); 985 return ret; 986 } 987 *pagep = page; 988 return ret; 989 } 990 991 /* For write_end() in data=journal mode */ 992 static int write_end_fn(handle_t *handle, struct buffer_head *bh) 993 { 994 if (!buffer_mapped(bh) || buffer_freed(bh)) 995 return 0; 996 set_buffer_uptodate(bh); 997 return ext4_handle_dirty_metadata(handle, NULL, bh); 998 } 999 1000 static int ext4_generic_write_end(struct file *file, 1001 struct address_space *mapping, 1002 loff_t pos, unsigned len, unsigned copied, 1003 struct page *page, void *fsdata) 1004 { 1005 int i_size_changed = 0; 1006 struct inode *inode = mapping->host; 1007 handle_t *handle = ext4_journal_current_handle(); 1008 1009 if (ext4_has_inline_data(inode)) 1010 copied = ext4_write_inline_data_end(inode, pos, len, 1011 copied, page); 1012 else 1013 copied = block_write_end(file, mapping, pos, 1014 len, copied, page, fsdata); 1015 1016 /* 1017 * No need to use i_size_read() here, the i_size 1018 * cannot change under us because we hold i_mutex. 1019 * 1020 * But it's important to update i_size while still holding page lock: 1021 * page writeout could otherwise come in and zero beyond i_size. 1022 */ 1023 if (pos + copied > inode->i_size) { 1024 i_size_write(inode, pos + copied); 1025 i_size_changed = 1; 1026 } 1027 1028 if (pos + copied > EXT4_I(inode)->i_disksize) { 1029 /* We need to mark inode dirty even if 1030 * new_i_size is less that inode->i_size 1031 * bu greater than i_disksize.(hint delalloc) 1032 */ 1033 ext4_update_i_disksize(inode, (pos + copied)); 1034 i_size_changed = 1; 1035 } 1036 unlock_page(page); 1037 page_cache_release(page); 1038 1039 /* 1040 * Don't mark the inode dirty under page lock. First, it unnecessarily 1041 * makes the holding time of page lock longer. Second, it forces lock 1042 * ordering of page lock and transaction start for journaling 1043 * filesystems. 1044 */ 1045 if (i_size_changed) 1046 ext4_mark_inode_dirty(handle, inode); 1047 1048 return copied; 1049 } 1050 1051 /* 1052 * We need to pick up the new inode size which generic_commit_write gave us 1053 * `file' can be NULL - eg, when called from page_symlink(). 1054 * 1055 * ext4 never places buffers on inode->i_mapping->private_list. metadata 1056 * buffers are managed internally. 1057 */ 1058 static int ext4_ordered_write_end(struct file *file, 1059 struct address_space *mapping, 1060 loff_t pos, unsigned len, unsigned copied, 1061 struct page *page, void *fsdata) 1062 { 1063 handle_t *handle = ext4_journal_current_handle(); 1064 struct inode *inode = mapping->host; 1065 int ret = 0, ret2; 1066 1067 trace_ext4_ordered_write_end(inode, pos, len, copied); 1068 ret = ext4_jbd2_file_inode(handle, inode); 1069 1070 if (ret == 0) { 1071 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1072 page, fsdata); 1073 copied = ret2; 1074 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1075 /* if we have allocated more blocks and copied 1076 * less. We will have blocks allocated outside 1077 * inode->i_size. So truncate them 1078 */ 1079 ext4_orphan_add(handle, inode); 1080 if (ret2 < 0) 1081 ret = ret2; 1082 } else { 1083 unlock_page(page); 1084 page_cache_release(page); 1085 } 1086 1087 ret2 = ext4_journal_stop(handle); 1088 if (!ret) 1089 ret = ret2; 1090 1091 if (pos + len > inode->i_size) { 1092 ext4_truncate_failed_write(inode); 1093 /* 1094 * If truncate failed early the inode might still be 1095 * on the orphan list; we need to make sure the inode 1096 * is removed from the orphan list in that case. 1097 */ 1098 if (inode->i_nlink) 1099 ext4_orphan_del(NULL, inode); 1100 } 1101 1102 1103 return ret ? ret : copied; 1104 } 1105 1106 static int ext4_writeback_write_end(struct file *file, 1107 struct address_space *mapping, 1108 loff_t pos, unsigned len, unsigned copied, 1109 struct page *page, void *fsdata) 1110 { 1111 handle_t *handle = ext4_journal_current_handle(); 1112 struct inode *inode = mapping->host; 1113 int ret = 0, ret2; 1114 1115 trace_ext4_writeback_write_end(inode, pos, len, copied); 1116 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1117 page, fsdata); 1118 copied = ret2; 1119 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1120 /* if we have allocated more blocks and copied 1121 * less. We will have blocks allocated outside 1122 * inode->i_size. So truncate them 1123 */ 1124 ext4_orphan_add(handle, inode); 1125 1126 if (ret2 < 0) 1127 ret = ret2; 1128 1129 ret2 = ext4_journal_stop(handle); 1130 if (!ret) 1131 ret = ret2; 1132 1133 if (pos + len > inode->i_size) { 1134 ext4_truncate_failed_write(inode); 1135 /* 1136 * If truncate failed early the inode might still be 1137 * on the orphan list; we need to make sure the inode 1138 * is removed from the orphan list in that case. 1139 */ 1140 if (inode->i_nlink) 1141 ext4_orphan_del(NULL, inode); 1142 } 1143 1144 return ret ? ret : copied; 1145 } 1146 1147 static int ext4_journalled_write_end(struct file *file, 1148 struct address_space *mapping, 1149 loff_t pos, unsigned len, unsigned copied, 1150 struct page *page, void *fsdata) 1151 { 1152 handle_t *handle = ext4_journal_current_handle(); 1153 struct inode *inode = mapping->host; 1154 int ret = 0, ret2; 1155 int partial = 0; 1156 unsigned from, to; 1157 loff_t new_i_size; 1158 1159 trace_ext4_journalled_write_end(inode, pos, len, copied); 1160 from = pos & (PAGE_CACHE_SIZE - 1); 1161 to = from + len; 1162 1163 BUG_ON(!ext4_handle_valid(handle)); 1164 1165 if (ext4_has_inline_data(inode)) 1166 copied = ext4_write_inline_data_end(inode, pos, len, 1167 copied, page); 1168 else { 1169 if (copied < len) { 1170 if (!PageUptodate(page)) 1171 copied = 0; 1172 page_zero_new_buffers(page, from+copied, to); 1173 } 1174 1175 ret = ext4_walk_page_buffers(handle, page_buffers(page), from, 1176 to, &partial, write_end_fn); 1177 if (!partial) 1178 SetPageUptodate(page); 1179 } 1180 new_i_size = pos + copied; 1181 if (new_i_size > inode->i_size) 1182 i_size_write(inode, pos+copied); 1183 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 1184 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1185 if (new_i_size > EXT4_I(inode)->i_disksize) { 1186 ext4_update_i_disksize(inode, new_i_size); 1187 ret2 = ext4_mark_inode_dirty(handle, inode); 1188 if (!ret) 1189 ret = ret2; 1190 } 1191 1192 unlock_page(page); 1193 page_cache_release(page); 1194 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1195 /* if we have allocated more blocks and copied 1196 * less. We will have blocks allocated outside 1197 * inode->i_size. So truncate them 1198 */ 1199 ext4_orphan_add(handle, inode); 1200 1201 ret2 = ext4_journal_stop(handle); 1202 if (!ret) 1203 ret = ret2; 1204 if (pos + len > inode->i_size) { 1205 ext4_truncate_failed_write(inode); 1206 /* 1207 * If truncate failed early the inode might still be 1208 * on the orphan list; we need to make sure the inode 1209 * is removed from the orphan list in that case. 1210 */ 1211 if (inode->i_nlink) 1212 ext4_orphan_del(NULL, inode); 1213 } 1214 1215 return ret ? ret : copied; 1216 } 1217 1218 /* 1219 * Reserve a single cluster located at lblock 1220 */ 1221 static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) 1222 { 1223 int retries = 0; 1224 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1225 struct ext4_inode_info *ei = EXT4_I(inode); 1226 unsigned int md_needed; 1227 int ret; 1228 ext4_lblk_t save_last_lblock; 1229 int save_len; 1230 1231 /* 1232 * We will charge metadata quota at writeout time; this saves 1233 * us from metadata over-estimation, though we may go over by 1234 * a small amount in the end. Here we just reserve for data. 1235 */ 1236 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); 1237 if (ret) 1238 return ret; 1239 1240 /* 1241 * recalculate the amount of metadata blocks to reserve 1242 * in order to allocate nrblocks 1243 * worse case is one extent per block 1244 */ 1245 repeat: 1246 spin_lock(&ei->i_block_reservation_lock); 1247 /* 1248 * ext4_calc_metadata_amount() has side effects, which we have 1249 * to be prepared undo if we fail to claim space. 1250 */ 1251 save_len = ei->i_da_metadata_calc_len; 1252 save_last_lblock = ei->i_da_metadata_calc_last_lblock; 1253 md_needed = EXT4_NUM_B2C(sbi, 1254 ext4_calc_metadata_amount(inode, lblock)); 1255 trace_ext4_da_reserve_space(inode, md_needed); 1256 1257 /* 1258 * We do still charge estimated metadata to the sb though; 1259 * we cannot afford to run out of free blocks. 1260 */ 1261 if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) { 1262 ei->i_da_metadata_calc_len = save_len; 1263 ei->i_da_metadata_calc_last_lblock = save_last_lblock; 1264 spin_unlock(&ei->i_block_reservation_lock); 1265 if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1266 yield(); 1267 goto repeat; 1268 } 1269 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); 1270 return -ENOSPC; 1271 } 1272 ei->i_reserved_data_blocks++; 1273 ei->i_reserved_meta_blocks += md_needed; 1274 spin_unlock(&ei->i_block_reservation_lock); 1275 1276 return 0; /* success */ 1277 } 1278 1279 static void ext4_da_release_space(struct inode *inode, int to_free) 1280 { 1281 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1282 struct ext4_inode_info *ei = EXT4_I(inode); 1283 1284 if (!to_free) 1285 return; /* Nothing to release, exit */ 1286 1287 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1288 1289 trace_ext4_da_release_space(inode, to_free); 1290 if (unlikely(to_free > ei->i_reserved_data_blocks)) { 1291 /* 1292 * if there aren't enough reserved blocks, then the 1293 * counter is messed up somewhere. Since this 1294 * function is called from invalidate page, it's 1295 * harmless to return without any action. 1296 */ 1297 ext4_warning(inode->i_sb, "ext4_da_release_space: " 1298 "ino %lu, to_free %d with only %d reserved " 1299 "data blocks", inode->i_ino, to_free, 1300 ei->i_reserved_data_blocks); 1301 WARN_ON(1); 1302 to_free = ei->i_reserved_data_blocks; 1303 } 1304 ei->i_reserved_data_blocks -= to_free; 1305 1306 if (ei->i_reserved_data_blocks == 0) { 1307 /* 1308 * We can release all of the reserved metadata blocks 1309 * only when we have written all of the delayed 1310 * allocation blocks. 1311 * Note that in case of bigalloc, i_reserved_meta_blocks, 1312 * i_reserved_data_blocks, etc. refer to number of clusters. 1313 */ 1314 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 1315 ei->i_reserved_meta_blocks); 1316 ei->i_reserved_meta_blocks = 0; 1317 ei->i_da_metadata_calc_len = 0; 1318 } 1319 1320 /* update fs dirty data blocks counter */ 1321 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); 1322 1323 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1324 1325 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); 1326 } 1327 1328 static void ext4_da_page_release_reservation(struct page *page, 1329 unsigned long offset) 1330 { 1331 int to_release = 0; 1332 struct buffer_head *head, *bh; 1333 unsigned int curr_off = 0; 1334 struct inode *inode = page->mapping->host; 1335 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1336 int num_clusters; 1337 ext4_fsblk_t lblk; 1338 1339 head = page_buffers(page); 1340 bh = head; 1341 do { 1342 unsigned int next_off = curr_off + bh->b_size; 1343 1344 if ((offset <= curr_off) && (buffer_delay(bh))) { 1345 to_release++; 1346 clear_buffer_delay(bh); 1347 } 1348 curr_off = next_off; 1349 } while ((bh = bh->b_this_page) != head); 1350 1351 if (to_release) { 1352 lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1353 ext4_es_remove_extent(inode, lblk, to_release); 1354 } 1355 1356 /* If we have released all the blocks belonging to a cluster, then we 1357 * need to release the reserved space for that cluster. */ 1358 num_clusters = EXT4_NUM_B2C(sbi, to_release); 1359 while (num_clusters > 0) { 1360 lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) + 1361 ((num_clusters - 1) << sbi->s_cluster_bits); 1362 if (sbi->s_cluster_ratio == 1 || 1363 !ext4_find_delalloc_cluster(inode, lblk)) 1364 ext4_da_release_space(inode, 1); 1365 1366 num_clusters--; 1367 } 1368 } 1369 1370 /* 1371 * Delayed allocation stuff 1372 */ 1373 1374 /* 1375 * mpage_da_submit_io - walks through extent of pages and try to write 1376 * them with writepage() call back 1377 * 1378 * @mpd->inode: inode 1379 * @mpd->first_page: first page of the extent 1380 * @mpd->next_page: page after the last page of the extent 1381 * 1382 * By the time mpage_da_submit_io() is called we expect all blocks 1383 * to be allocated. this may be wrong if allocation failed. 1384 * 1385 * As pages are already locked by write_cache_pages(), we can't use it 1386 */ 1387 static int mpage_da_submit_io(struct mpage_da_data *mpd, 1388 struct ext4_map_blocks *map) 1389 { 1390 struct pagevec pvec; 1391 unsigned long index, end; 1392 int ret = 0, err, nr_pages, i; 1393 struct inode *inode = mpd->inode; 1394 struct address_space *mapping = inode->i_mapping; 1395 loff_t size = i_size_read(inode); 1396 unsigned int len, block_start; 1397 struct buffer_head *bh, *page_bufs = NULL; 1398 sector_t pblock = 0, cur_logical = 0; 1399 struct ext4_io_submit io_submit; 1400 1401 BUG_ON(mpd->next_page <= mpd->first_page); 1402 memset(&io_submit, 0, sizeof(io_submit)); 1403 /* 1404 * We need to start from the first_page to the next_page - 1 1405 * to make sure we also write the mapped dirty buffer_heads. 1406 * If we look at mpd->b_blocknr we would only be looking 1407 * at the currently mapped buffer_heads. 1408 */ 1409 index = mpd->first_page; 1410 end = mpd->next_page - 1; 1411 1412 pagevec_init(&pvec, 0); 1413 while (index <= end) { 1414 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1415 if (nr_pages == 0) 1416 break; 1417 for (i = 0; i < nr_pages; i++) { 1418 int skip_page = 0; 1419 struct page *page = pvec.pages[i]; 1420 1421 index = page->index; 1422 if (index > end) 1423 break; 1424 1425 if (index == size >> PAGE_CACHE_SHIFT) 1426 len = size & ~PAGE_CACHE_MASK; 1427 else 1428 len = PAGE_CACHE_SIZE; 1429 if (map) { 1430 cur_logical = index << (PAGE_CACHE_SHIFT - 1431 inode->i_blkbits); 1432 pblock = map->m_pblk + (cur_logical - 1433 map->m_lblk); 1434 } 1435 index++; 1436 1437 BUG_ON(!PageLocked(page)); 1438 BUG_ON(PageWriteback(page)); 1439 1440 bh = page_bufs = page_buffers(page); 1441 block_start = 0; 1442 do { 1443 if (map && (cur_logical >= map->m_lblk) && 1444 (cur_logical <= (map->m_lblk + 1445 (map->m_len - 1)))) { 1446 if (buffer_delay(bh)) { 1447 clear_buffer_delay(bh); 1448 bh->b_blocknr = pblock; 1449 } 1450 if (buffer_unwritten(bh) || 1451 buffer_mapped(bh)) 1452 BUG_ON(bh->b_blocknr != pblock); 1453 if (map->m_flags & EXT4_MAP_UNINIT) 1454 set_buffer_uninit(bh); 1455 clear_buffer_unwritten(bh); 1456 } 1457 1458 /* 1459 * skip page if block allocation undone and 1460 * block is dirty 1461 */ 1462 if (ext4_bh_delay_or_unwritten(NULL, bh)) 1463 skip_page = 1; 1464 bh = bh->b_this_page; 1465 block_start += bh->b_size; 1466 cur_logical++; 1467 pblock++; 1468 } while (bh != page_bufs); 1469 1470 if (skip_page) { 1471 unlock_page(page); 1472 continue; 1473 } 1474 1475 clear_page_dirty_for_io(page); 1476 err = ext4_bio_write_page(&io_submit, page, len, 1477 mpd->wbc); 1478 if (!err) 1479 mpd->pages_written++; 1480 /* 1481 * In error case, we have to continue because 1482 * remaining pages are still locked 1483 */ 1484 if (ret == 0) 1485 ret = err; 1486 } 1487 pagevec_release(&pvec); 1488 } 1489 ext4_io_submit(&io_submit); 1490 return ret; 1491 } 1492 1493 static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd) 1494 { 1495 int nr_pages, i; 1496 pgoff_t index, end; 1497 struct pagevec pvec; 1498 struct inode *inode = mpd->inode; 1499 struct address_space *mapping = inode->i_mapping; 1500 ext4_lblk_t start, last; 1501 1502 index = mpd->first_page; 1503 end = mpd->next_page - 1; 1504 1505 start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1506 last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1507 ext4_es_remove_extent(inode, start, last - start + 1); 1508 1509 pagevec_init(&pvec, 0); 1510 while (index <= end) { 1511 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1512 if (nr_pages == 0) 1513 break; 1514 for (i = 0; i < nr_pages; i++) { 1515 struct page *page = pvec.pages[i]; 1516 if (page->index > end) 1517 break; 1518 BUG_ON(!PageLocked(page)); 1519 BUG_ON(PageWriteback(page)); 1520 block_invalidatepage(page, 0); 1521 ClearPageUptodate(page); 1522 unlock_page(page); 1523 } 1524 index = pvec.pages[nr_pages - 1]->index + 1; 1525 pagevec_release(&pvec); 1526 } 1527 return; 1528 } 1529 1530 static void ext4_print_free_blocks(struct inode *inode) 1531 { 1532 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1533 struct super_block *sb = inode->i_sb; 1534 1535 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld", 1536 EXT4_C2B(EXT4_SB(inode->i_sb), 1537 ext4_count_free_clusters(inode->i_sb))); 1538 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details"); 1539 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld", 1540 (long long) EXT4_C2B(EXT4_SB(inode->i_sb), 1541 percpu_counter_sum(&sbi->s_freeclusters_counter))); 1542 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld", 1543 (long long) EXT4_C2B(EXT4_SB(inode->i_sb), 1544 percpu_counter_sum(&sbi->s_dirtyclusters_counter))); 1545 ext4_msg(sb, KERN_CRIT, "Block reservation details"); 1546 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u", 1547 EXT4_I(inode)->i_reserved_data_blocks); 1548 ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u", 1549 EXT4_I(inode)->i_reserved_meta_blocks); 1550 return; 1551 } 1552 1553 /* 1554 * mpage_da_map_and_submit - go through given space, map them 1555 * if necessary, and then submit them for I/O 1556 * 1557 * @mpd - bh describing space 1558 * 1559 * The function skips space we know is already mapped to disk blocks. 1560 * 1561 */ 1562 static void mpage_da_map_and_submit(struct mpage_da_data *mpd) 1563 { 1564 int err, blks, get_blocks_flags; 1565 struct ext4_map_blocks map, *mapp = NULL; 1566 sector_t next = mpd->b_blocknr; 1567 unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; 1568 loff_t disksize = EXT4_I(mpd->inode)->i_disksize; 1569 handle_t *handle = NULL; 1570 1571 /* 1572 * If the blocks are mapped already, or we couldn't accumulate 1573 * any blocks, then proceed immediately to the submission stage. 1574 */ 1575 if ((mpd->b_size == 0) || 1576 ((mpd->b_state & (1 << BH_Mapped)) && 1577 !(mpd->b_state & (1 << BH_Delay)) && 1578 !(mpd->b_state & (1 << BH_Unwritten)))) 1579 goto submit_io; 1580 1581 handle = ext4_journal_current_handle(); 1582 BUG_ON(!handle); 1583 1584 /* 1585 * Call ext4_map_blocks() to allocate any delayed allocation 1586 * blocks, or to convert an uninitialized extent to be 1587 * initialized (in the case where we have written into 1588 * one or more preallocated blocks). 1589 * 1590 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to 1591 * indicate that we are on the delayed allocation path. This 1592 * affects functions in many different parts of the allocation 1593 * call path. This flag exists primarily because we don't 1594 * want to change *many* call functions, so ext4_map_blocks() 1595 * will set the EXT4_STATE_DELALLOC_RESERVED flag once the 1596 * inode's allocation semaphore is taken. 1597 * 1598 * If the blocks in questions were delalloc blocks, set 1599 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting 1600 * variables are updated after the blocks have been allocated. 1601 */ 1602 map.m_lblk = next; 1603 map.m_len = max_blocks; 1604 get_blocks_flags = EXT4_GET_BLOCKS_CREATE; 1605 if (ext4_should_dioread_nolock(mpd->inode)) 1606 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; 1607 if (mpd->b_state & (1 << BH_Delay)) 1608 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 1609 1610 blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags); 1611 if (blks < 0) { 1612 struct super_block *sb = mpd->inode->i_sb; 1613 1614 err = blks; 1615 /* 1616 * If get block returns EAGAIN or ENOSPC and there 1617 * appears to be free blocks we will just let 1618 * mpage_da_submit_io() unlock all of the pages. 1619 */ 1620 if (err == -EAGAIN) 1621 goto submit_io; 1622 1623 if (err == -ENOSPC && ext4_count_free_clusters(sb)) { 1624 mpd->retval = err; 1625 goto submit_io; 1626 } 1627 1628 /* 1629 * get block failure will cause us to loop in 1630 * writepages, because a_ops->writepage won't be able 1631 * to make progress. The page will be redirtied by 1632 * writepage and writepages will again try to write 1633 * the same. 1634 */ 1635 if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) { 1636 ext4_msg(sb, KERN_CRIT, 1637 "delayed block allocation failed for inode %lu " 1638 "at logical offset %llu with max blocks %zd " 1639 "with error %d", mpd->inode->i_ino, 1640 (unsigned long long) next, 1641 mpd->b_size >> mpd->inode->i_blkbits, err); 1642 ext4_msg(sb, KERN_CRIT, 1643 "This should not happen!! Data will be lost"); 1644 if (err == -ENOSPC) 1645 ext4_print_free_blocks(mpd->inode); 1646 } 1647 /* invalidate all the pages */ 1648 ext4_da_block_invalidatepages(mpd); 1649 1650 /* Mark this page range as having been completed */ 1651 mpd->io_done = 1; 1652 return; 1653 } 1654 BUG_ON(blks == 0); 1655 1656 mapp = ↦ 1657 if (map.m_flags & EXT4_MAP_NEW) { 1658 struct block_device *bdev = mpd->inode->i_sb->s_bdev; 1659 int i; 1660 1661 for (i = 0; i < map.m_len; i++) 1662 unmap_underlying_metadata(bdev, map.m_pblk + i); 1663 } 1664 1665 /* 1666 * Update on-disk size along with block allocation. 1667 */ 1668 disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; 1669 if (disksize > i_size_read(mpd->inode)) 1670 disksize = i_size_read(mpd->inode); 1671 if (disksize > EXT4_I(mpd->inode)->i_disksize) { 1672 ext4_update_i_disksize(mpd->inode, disksize); 1673 err = ext4_mark_inode_dirty(handle, mpd->inode); 1674 if (err) 1675 ext4_error(mpd->inode->i_sb, 1676 "Failed to mark inode %lu dirty", 1677 mpd->inode->i_ino); 1678 } 1679 1680 submit_io: 1681 mpage_da_submit_io(mpd, mapp); 1682 mpd->io_done = 1; 1683 } 1684 1685 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ 1686 (1 << BH_Delay) | (1 << BH_Unwritten)) 1687 1688 /* 1689 * mpage_add_bh_to_extent - try to add one more block to extent of blocks 1690 * 1691 * @mpd->lbh - extent of blocks 1692 * @logical - logical number of the block in the file 1693 * @b_state - b_state of the buffer head added 1694 * 1695 * the function is used to collect contig. blocks in same state 1696 */ 1697 static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, sector_t logical, 1698 unsigned long b_state) 1699 { 1700 sector_t next; 1701 int blkbits = mpd->inode->i_blkbits; 1702 int nrblocks = mpd->b_size >> blkbits; 1703 1704 /* 1705 * XXX Don't go larger than mballoc is willing to allocate 1706 * This is a stopgap solution. We eventually need to fold 1707 * mpage_da_submit_io() into this function and then call 1708 * ext4_map_blocks() multiple times in a loop 1709 */ 1710 if (nrblocks >= (8*1024*1024 >> blkbits)) 1711 goto flush_it; 1712 1713 /* check if the reserved journal credits might overflow */ 1714 if (!ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS)) { 1715 if (nrblocks >= EXT4_MAX_TRANS_DATA) { 1716 /* 1717 * With non-extent format we are limited by the journal 1718 * credit available. Total credit needed to insert 1719 * nrblocks contiguous blocks is dependent on the 1720 * nrblocks. So limit nrblocks. 1721 */ 1722 goto flush_it; 1723 } 1724 } 1725 /* 1726 * First block in the extent 1727 */ 1728 if (mpd->b_size == 0) { 1729 mpd->b_blocknr = logical; 1730 mpd->b_size = 1 << blkbits; 1731 mpd->b_state = b_state & BH_FLAGS; 1732 return; 1733 } 1734 1735 next = mpd->b_blocknr + nrblocks; 1736 /* 1737 * Can we merge the block to our big extent? 1738 */ 1739 if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) { 1740 mpd->b_size += 1 << blkbits; 1741 return; 1742 } 1743 1744 flush_it: 1745 /* 1746 * We couldn't merge the block to our extent, so we 1747 * need to flush current extent and start new one 1748 */ 1749 mpage_da_map_and_submit(mpd); 1750 return; 1751 } 1752 1753 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) 1754 { 1755 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); 1756 } 1757 1758 /* 1759 * This function is grabs code from the very beginning of 1760 * ext4_map_blocks, but assumes that the caller is from delayed write 1761 * time. This function looks up the requested blocks and sets the 1762 * buffer delay bit under the protection of i_data_sem. 1763 */ 1764 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, 1765 struct ext4_map_blocks *map, 1766 struct buffer_head *bh) 1767 { 1768 struct extent_status es; 1769 int retval; 1770 sector_t invalid_block = ~((sector_t) 0xffff); 1771 1772 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 1773 invalid_block = ~0; 1774 1775 map->m_flags = 0; 1776 ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u," 1777 "logical block %lu\n", inode->i_ino, map->m_len, 1778 (unsigned long) map->m_lblk); 1779 1780 /* Lookup extent status tree firstly */ 1781 if (ext4_es_lookup_extent(inode, iblock, &es)) { 1782 1783 if (ext4_es_is_hole(&es)) { 1784 retval = 0; 1785 down_read((&EXT4_I(inode)->i_data_sem)); 1786 goto add_delayed; 1787 } 1788 1789 /* 1790 * Delayed extent could be allocated by fallocate. 1791 * So we need to check it. 1792 */ 1793 if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) { 1794 map_bh(bh, inode->i_sb, invalid_block); 1795 set_buffer_new(bh); 1796 set_buffer_delay(bh); 1797 return 0; 1798 } 1799 1800 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk; 1801 retval = es.es_len - (iblock - es.es_lblk); 1802 if (retval > map->m_len) 1803 retval = map->m_len; 1804 map->m_len = retval; 1805 if (ext4_es_is_written(&es)) 1806 map->m_flags |= EXT4_MAP_MAPPED; 1807 else if (ext4_es_is_unwritten(&es)) 1808 map->m_flags |= EXT4_MAP_UNWRITTEN; 1809 else 1810 BUG_ON(1); 1811 1812 return retval; 1813 } 1814 1815 /* 1816 * Try to see if we can get the block without requesting a new 1817 * file system block. 1818 */ 1819 down_read((&EXT4_I(inode)->i_data_sem)); 1820 if (ext4_has_inline_data(inode)) { 1821 /* 1822 * We will soon create blocks for this page, and let 1823 * us pretend as if the blocks aren't allocated yet. 1824 * In case of clusters, we have to handle the work 1825 * of mapping from cluster so that the reserved space 1826 * is calculated properly. 1827 */ 1828 if ((EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) && 1829 ext4_find_delalloc_cluster(inode, map->m_lblk)) 1830 map->m_flags |= EXT4_MAP_FROM_CLUSTER; 1831 retval = 0; 1832 } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 1833 retval = ext4_ext_map_blocks(NULL, inode, map, 1834 EXT4_GET_BLOCKS_NO_PUT_HOLE); 1835 else 1836 retval = ext4_ind_map_blocks(NULL, inode, map, 1837 EXT4_GET_BLOCKS_NO_PUT_HOLE); 1838 1839 add_delayed: 1840 if (retval == 0) { 1841 int ret; 1842 /* 1843 * XXX: __block_prepare_write() unmaps passed block, 1844 * is it OK? 1845 */ 1846 /* If the block was allocated from previously allocated cluster, 1847 * then we dont need to reserve it again. */ 1848 if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) { 1849 ret = ext4_da_reserve_space(inode, iblock); 1850 if (ret) { 1851 /* not enough space to reserve */ 1852 retval = ret; 1853 goto out_unlock; 1854 } 1855 } 1856 1857 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 1858 ~0, EXTENT_STATUS_DELAYED); 1859 if (ret) { 1860 retval = ret; 1861 goto out_unlock; 1862 } 1863 1864 /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served 1865 * and it should not appear on the bh->b_state. 1866 */ 1867 map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; 1868 1869 map_bh(bh, inode->i_sb, invalid_block); 1870 set_buffer_new(bh); 1871 set_buffer_delay(bh); 1872 } else if (retval > 0) { 1873 int ret; 1874 unsigned long long status; 1875 1876 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 1877 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 1878 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 1879 map->m_pblk, status); 1880 if (ret != 0) 1881 retval = ret; 1882 } 1883 1884 out_unlock: 1885 up_read((&EXT4_I(inode)->i_data_sem)); 1886 1887 return retval; 1888 } 1889 1890 /* 1891 * This is a special get_blocks_t callback which is used by 1892 * ext4_da_write_begin(). It will either return mapped block or 1893 * reserve space for a single block. 1894 * 1895 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 1896 * We also have b_blocknr = -1 and b_bdev initialized properly 1897 * 1898 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 1899 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 1900 * initialized properly. 1901 */ 1902 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 1903 struct buffer_head *bh, int create) 1904 { 1905 struct ext4_map_blocks map; 1906 int ret = 0; 1907 1908 BUG_ON(create == 0); 1909 BUG_ON(bh->b_size != inode->i_sb->s_blocksize); 1910 1911 map.m_lblk = iblock; 1912 map.m_len = 1; 1913 1914 /* 1915 * first, we need to know whether the block is allocated already 1916 * preallocated blocks are unmapped but should treated 1917 * the same as allocated blocks. 1918 */ 1919 ret = ext4_da_map_blocks(inode, iblock, &map, bh); 1920 if (ret <= 0) 1921 return ret; 1922 1923 map_bh(bh, inode->i_sb, map.m_pblk); 1924 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 1925 1926 if (buffer_unwritten(bh)) { 1927 /* A delayed write to unwritten bh should be marked 1928 * new and mapped. Mapped ensures that we don't do 1929 * get_block multiple times when we write to the same 1930 * offset and new ensures that we do proper zero out 1931 * for partial write. 1932 */ 1933 set_buffer_new(bh); 1934 set_buffer_mapped(bh); 1935 } 1936 return 0; 1937 } 1938 1939 static int bget_one(handle_t *handle, struct buffer_head *bh) 1940 { 1941 get_bh(bh); 1942 return 0; 1943 } 1944 1945 static int bput_one(handle_t *handle, struct buffer_head *bh) 1946 { 1947 put_bh(bh); 1948 return 0; 1949 } 1950 1951 static int __ext4_journalled_writepage(struct page *page, 1952 unsigned int len) 1953 { 1954 struct address_space *mapping = page->mapping; 1955 struct inode *inode = mapping->host; 1956 struct buffer_head *page_bufs = NULL; 1957 handle_t *handle = NULL; 1958 int ret = 0, err = 0; 1959 int inline_data = ext4_has_inline_data(inode); 1960 struct buffer_head *inode_bh = NULL; 1961 1962 ClearPageChecked(page); 1963 1964 if (inline_data) { 1965 BUG_ON(page->index != 0); 1966 BUG_ON(len > ext4_get_max_inline_size(inode)); 1967 inode_bh = ext4_journalled_write_inline_data(inode, len, page); 1968 if (inode_bh == NULL) 1969 goto out; 1970 } else { 1971 page_bufs = page_buffers(page); 1972 if (!page_bufs) { 1973 BUG(); 1974 goto out; 1975 } 1976 ext4_walk_page_buffers(handle, page_bufs, 0, len, 1977 NULL, bget_one); 1978 } 1979 /* As soon as we unlock the page, it can go away, but we have 1980 * references to buffers so we are safe */ 1981 unlock_page(page); 1982 1983 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1984 ext4_writepage_trans_blocks(inode)); 1985 if (IS_ERR(handle)) { 1986 ret = PTR_ERR(handle); 1987 goto out; 1988 } 1989 1990 BUG_ON(!ext4_handle_valid(handle)); 1991 1992 if (inline_data) { 1993 ret = ext4_journal_get_write_access(handle, inode_bh); 1994 1995 err = ext4_handle_dirty_metadata(handle, inode, inode_bh); 1996 1997 } else { 1998 ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 1999 do_journal_get_write_access); 2000 2001 err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 2002 write_end_fn); 2003 } 2004 if (ret == 0) 2005 ret = err; 2006 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 2007 err = ext4_journal_stop(handle); 2008 if (!ret) 2009 ret = err; 2010 2011 if (!ext4_has_inline_data(inode)) 2012 ext4_walk_page_buffers(handle, page_bufs, 0, len, 2013 NULL, bput_one); 2014 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 2015 out: 2016 brelse(inode_bh); 2017 return ret; 2018 } 2019 2020 /* 2021 * Note that we don't need to start a transaction unless we're journaling data 2022 * because we should have holes filled from ext4_page_mkwrite(). We even don't 2023 * need to file the inode to the transaction's list in ordered mode because if 2024 * we are writing back data added by write(), the inode is already there and if 2025 * we are writing back data modified via mmap(), no one guarantees in which 2026 * transaction the data will hit the disk. In case we are journaling data, we 2027 * cannot start transaction directly because transaction start ranks above page 2028 * lock so we have to do some magic. 2029 * 2030 * This function can get called via... 2031 * - ext4_da_writepages after taking page lock (have journal handle) 2032 * - journal_submit_inode_data_buffers (no journal handle) 2033 * - shrink_page_list via the kswapd/direct reclaim (no journal handle) 2034 * - grab_page_cache when doing write_begin (have journal handle) 2035 * 2036 * We don't do any block allocation in this function. If we have page with 2037 * multiple blocks we need to write those buffer_heads that are mapped. This 2038 * is important for mmaped based write. So if we do with blocksize 1K 2039 * truncate(f, 1024); 2040 * a = mmap(f, 0, 4096); 2041 * a[0] = 'a'; 2042 * truncate(f, 4096); 2043 * we have in the page first buffer_head mapped via page_mkwrite call back 2044 * but other buffer_heads would be unmapped but dirty (dirty done via the 2045 * do_wp_page). So writepage should write the first block. If we modify 2046 * the mmap area beyond 1024 we will again get a page_fault and the 2047 * page_mkwrite callback will do the block allocation and mark the 2048 * buffer_heads mapped. 2049 * 2050 * We redirty the page if we have any buffer_heads that is either delay or 2051 * unwritten in the page. 2052 * 2053 * We can get recursively called as show below. 2054 * 2055 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 2056 * ext4_writepage() 2057 * 2058 * But since we don't do any block allocation we should not deadlock. 2059 * Page also have the dirty flag cleared so we don't get recurive page_lock. 2060 */ 2061 static int ext4_writepage(struct page *page, 2062 struct writeback_control *wbc) 2063 { 2064 int ret = 0; 2065 loff_t size; 2066 unsigned int len; 2067 struct buffer_head *page_bufs = NULL; 2068 struct inode *inode = page->mapping->host; 2069 struct ext4_io_submit io_submit; 2070 2071 trace_ext4_writepage(page); 2072 size = i_size_read(inode); 2073 if (page->index == size >> PAGE_CACHE_SHIFT) 2074 len = size & ~PAGE_CACHE_MASK; 2075 else 2076 len = PAGE_CACHE_SIZE; 2077 2078 page_bufs = page_buffers(page); 2079 /* 2080 * We cannot do block allocation or other extent handling in this 2081 * function. If there are buffers needing that, we have to redirty 2082 * the page. But we may reach here when we do a journal commit via 2083 * journal_submit_inode_data_buffers() and in that case we must write 2084 * allocated buffers to achieve data=ordered mode guarantees. 2085 */ 2086 if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2087 ext4_bh_delay_or_unwritten)) { 2088 redirty_page_for_writepage(wbc, page); 2089 if (current->flags & PF_MEMALLOC) { 2090 /* 2091 * For memory cleaning there's no point in writing only 2092 * some buffers. So just bail out. Warn if we came here 2093 * from direct reclaim. 2094 */ 2095 WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) 2096 == PF_MEMALLOC); 2097 unlock_page(page); 2098 return 0; 2099 } 2100 } 2101 2102 if (PageChecked(page) && ext4_should_journal_data(inode)) 2103 /* 2104 * It's mmapped pagecache. Add buffers and journal it. There 2105 * doesn't seem much point in redirtying the page here. 2106 */ 2107 return __ext4_journalled_writepage(page, len); 2108 2109 memset(&io_submit, 0, sizeof(io_submit)); 2110 ret = ext4_bio_write_page(&io_submit, page, len, wbc); 2111 ext4_io_submit(&io_submit); 2112 return ret; 2113 } 2114 2115 /* 2116 * This is called via ext4_da_writepages() to 2117 * calculate the total number of credits to reserve to fit 2118 * a single extent allocation into a single transaction, 2119 * ext4_da_writpeages() will loop calling this before 2120 * the block allocation. 2121 */ 2122 2123 static int ext4_da_writepages_trans_blocks(struct inode *inode) 2124 { 2125 int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; 2126 2127 /* 2128 * With non-extent format the journal credit needed to 2129 * insert nrblocks contiguous block is dependent on 2130 * number of contiguous block. So we will limit 2131 * number of contiguous block to a sane value 2132 */ 2133 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) && 2134 (max_blocks > EXT4_MAX_TRANS_DATA)) 2135 max_blocks = EXT4_MAX_TRANS_DATA; 2136 2137 return ext4_chunk_trans_blocks(inode, max_blocks); 2138 } 2139 2140 /* 2141 * write_cache_pages_da - walk the list of dirty pages of the given 2142 * address space and accumulate pages that need writing, and call 2143 * mpage_da_map_and_submit to map a single contiguous memory region 2144 * and then write them. 2145 */ 2146 static int write_cache_pages_da(handle_t *handle, 2147 struct address_space *mapping, 2148 struct writeback_control *wbc, 2149 struct mpage_da_data *mpd, 2150 pgoff_t *done_index) 2151 { 2152 struct buffer_head *bh, *head; 2153 struct inode *inode = mapping->host; 2154 struct pagevec pvec; 2155 unsigned int nr_pages; 2156 sector_t logical; 2157 pgoff_t index, end; 2158 long nr_to_write = wbc->nr_to_write; 2159 int i, tag, ret = 0; 2160 2161 memset(mpd, 0, sizeof(struct mpage_da_data)); 2162 mpd->wbc = wbc; 2163 mpd->inode = inode; 2164 pagevec_init(&pvec, 0); 2165 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2166 end = wbc->range_end >> PAGE_CACHE_SHIFT; 2167 2168 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 2169 tag = PAGECACHE_TAG_TOWRITE; 2170 else 2171 tag = PAGECACHE_TAG_DIRTY; 2172 2173 *done_index = index; 2174 while (index <= end) { 2175 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 2176 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 2177 if (nr_pages == 0) 2178 return 0; 2179 2180 for (i = 0; i < nr_pages; i++) { 2181 struct page *page = pvec.pages[i]; 2182 2183 /* 2184 * At this point, the page may be truncated or 2185 * invalidated (changing page->mapping to NULL), or 2186 * even swizzled back from swapper_space to tmpfs file 2187 * mapping. However, page->index will not change 2188 * because we have a reference on the page. 2189 */ 2190 if (page->index > end) 2191 goto out; 2192 2193 *done_index = page->index + 1; 2194 2195 /* 2196 * If we can't merge this page, and we have 2197 * accumulated an contiguous region, write it 2198 */ 2199 if ((mpd->next_page != page->index) && 2200 (mpd->next_page != mpd->first_page)) { 2201 mpage_da_map_and_submit(mpd); 2202 goto ret_extent_tail; 2203 } 2204 2205 lock_page(page); 2206 2207 /* 2208 * If the page is no longer dirty, or its 2209 * mapping no longer corresponds to inode we 2210 * are writing (which means it has been 2211 * truncated or invalidated), or the page is 2212 * already under writeback and we are not 2213 * doing a data integrity writeback, skip the page 2214 */ 2215 if (!PageDirty(page) || 2216 (PageWriteback(page) && 2217 (wbc->sync_mode == WB_SYNC_NONE)) || 2218 unlikely(page->mapping != mapping)) { 2219 unlock_page(page); 2220 continue; 2221 } 2222 2223 wait_on_page_writeback(page); 2224 BUG_ON(PageWriteback(page)); 2225 2226 /* 2227 * If we have inline data and arrive here, it means that 2228 * we will soon create the block for the 1st page, so 2229 * we'd better clear the inline data here. 2230 */ 2231 if (ext4_has_inline_data(inode)) { 2232 BUG_ON(ext4_test_inode_state(inode, 2233 EXT4_STATE_MAY_INLINE_DATA)); 2234 ext4_destroy_inline_data(handle, inode); 2235 } 2236 2237 if (mpd->next_page != page->index) 2238 mpd->first_page = page->index; 2239 mpd->next_page = page->index + 1; 2240 logical = (sector_t) page->index << 2241 (PAGE_CACHE_SHIFT - inode->i_blkbits); 2242 2243 /* Add all dirty buffers to mpd */ 2244 head = page_buffers(page); 2245 bh = head; 2246 do { 2247 BUG_ON(buffer_locked(bh)); 2248 /* 2249 * We need to try to allocate unmapped blocks 2250 * in the same page. Otherwise we won't make 2251 * progress with the page in ext4_writepage 2252 */ 2253 if (ext4_bh_delay_or_unwritten(NULL, bh)) { 2254 mpage_add_bh_to_extent(mpd, logical, 2255 bh->b_state); 2256 if (mpd->io_done) 2257 goto ret_extent_tail; 2258 } else if (buffer_dirty(bh) && 2259 buffer_mapped(bh)) { 2260 /* 2261 * mapped dirty buffer. We need to 2262 * update the b_state because we look 2263 * at b_state in mpage_da_map_blocks. 2264 * We don't update b_size because if we 2265 * find an unmapped buffer_head later 2266 * we need to use the b_state flag of 2267 * that buffer_head. 2268 */ 2269 if (mpd->b_size == 0) 2270 mpd->b_state = 2271 bh->b_state & BH_FLAGS; 2272 } 2273 logical++; 2274 } while ((bh = bh->b_this_page) != head); 2275 2276 if (nr_to_write > 0) { 2277 nr_to_write--; 2278 if (nr_to_write == 0 && 2279 wbc->sync_mode == WB_SYNC_NONE) 2280 /* 2281 * We stop writing back only if we are 2282 * not doing integrity sync. In case of 2283 * integrity sync we have to keep going 2284 * because someone may be concurrently 2285 * dirtying pages, and we might have 2286 * synced a lot of newly appeared dirty 2287 * pages, but have not synced all of the 2288 * old dirty pages. 2289 */ 2290 goto out; 2291 } 2292 } 2293 pagevec_release(&pvec); 2294 cond_resched(); 2295 } 2296 return 0; 2297 ret_extent_tail: 2298 ret = MPAGE_DA_EXTENT_TAIL; 2299 out: 2300 pagevec_release(&pvec); 2301 cond_resched(); 2302 return ret; 2303 } 2304 2305 2306 static int ext4_da_writepages(struct address_space *mapping, 2307 struct writeback_control *wbc) 2308 { 2309 pgoff_t index; 2310 int range_whole = 0; 2311 handle_t *handle = NULL; 2312 struct mpage_da_data mpd; 2313 struct inode *inode = mapping->host; 2314 int pages_written = 0; 2315 unsigned int max_pages; 2316 int range_cyclic, cycled = 1, io_done = 0; 2317 int needed_blocks, ret = 0; 2318 long desired_nr_to_write, nr_to_writebump = 0; 2319 loff_t range_start = wbc->range_start; 2320 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2321 pgoff_t done_index = 0; 2322 pgoff_t end; 2323 struct blk_plug plug; 2324 2325 trace_ext4_da_writepages(inode, wbc); 2326 2327 /* 2328 * No pages to write? This is mainly a kludge to avoid starting 2329 * a transaction for special inodes like journal inode on last iput() 2330 * because that could violate lock ordering on umount 2331 */ 2332 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 2333 return 0; 2334 2335 /* 2336 * If the filesystem has aborted, it is read-only, so return 2337 * right away instead of dumping stack traces later on that 2338 * will obscure the real source of the problem. We test 2339 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because 2340 * the latter could be true if the filesystem is mounted 2341 * read-only, and in that case, ext4_da_writepages should 2342 * *never* be called, so if that ever happens, we would want 2343 * the stack trace. 2344 */ 2345 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) 2346 return -EROFS; 2347 2348 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2349 range_whole = 1; 2350 2351 range_cyclic = wbc->range_cyclic; 2352 if (wbc->range_cyclic) { 2353 index = mapping->writeback_index; 2354 if (index) 2355 cycled = 0; 2356 wbc->range_start = index << PAGE_CACHE_SHIFT; 2357 wbc->range_end = LLONG_MAX; 2358 wbc->range_cyclic = 0; 2359 end = -1; 2360 } else { 2361 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2362 end = wbc->range_end >> PAGE_CACHE_SHIFT; 2363 } 2364 2365 /* 2366 * This works around two forms of stupidity. The first is in 2367 * the writeback code, which caps the maximum number of pages 2368 * written to be 1024 pages. This is wrong on multiple 2369 * levels; different architectues have a different page size, 2370 * which changes the maximum amount of data which gets 2371 * written. Secondly, 4 megabytes is way too small. XFS 2372 * forces this value to be 16 megabytes by multiplying 2373 * nr_to_write parameter by four, and then relies on its 2374 * allocator to allocate larger extents to make them 2375 * contiguous. Unfortunately this brings us to the second 2376 * stupidity, which is that ext4's mballoc code only allocates 2377 * at most 2048 blocks. So we force contiguous writes up to 2378 * the number of dirty blocks in the inode, or 2379 * sbi->max_writeback_mb_bump whichever is smaller. 2380 */ 2381 max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT); 2382 if (!range_cyclic && range_whole) { 2383 if (wbc->nr_to_write == LONG_MAX) 2384 desired_nr_to_write = wbc->nr_to_write; 2385 else 2386 desired_nr_to_write = wbc->nr_to_write * 8; 2387 } else 2388 desired_nr_to_write = ext4_num_dirty_pages(inode, index, 2389 max_pages); 2390 if (desired_nr_to_write > max_pages) 2391 desired_nr_to_write = max_pages; 2392 2393 if (wbc->nr_to_write < desired_nr_to_write) { 2394 nr_to_writebump = desired_nr_to_write - wbc->nr_to_write; 2395 wbc->nr_to_write = desired_nr_to_write; 2396 } 2397 2398 retry: 2399 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 2400 tag_pages_for_writeback(mapping, index, end); 2401 2402 blk_start_plug(&plug); 2403 while (!ret && wbc->nr_to_write > 0) { 2404 2405 /* 2406 * we insert one extent at a time. So we need 2407 * credit needed for single extent allocation. 2408 * journalled mode is currently not supported 2409 * by delalloc 2410 */ 2411 BUG_ON(ext4_should_journal_data(inode)); 2412 needed_blocks = ext4_da_writepages_trans_blocks(inode); 2413 2414 /* start a new transaction*/ 2415 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 2416 needed_blocks); 2417 if (IS_ERR(handle)) { 2418 ret = PTR_ERR(handle); 2419 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2420 "%ld pages, ino %lu; err %d", __func__, 2421 wbc->nr_to_write, inode->i_ino, ret); 2422 blk_finish_plug(&plug); 2423 goto out_writepages; 2424 } 2425 2426 /* 2427 * Now call write_cache_pages_da() to find the next 2428 * contiguous region of logical blocks that need 2429 * blocks to be allocated by ext4 and submit them. 2430 */ 2431 ret = write_cache_pages_da(handle, mapping, 2432 wbc, &mpd, &done_index); 2433 /* 2434 * If we have a contiguous extent of pages and we 2435 * haven't done the I/O yet, map the blocks and submit 2436 * them for I/O. 2437 */ 2438 if (!mpd.io_done && mpd.next_page != mpd.first_page) { 2439 mpage_da_map_and_submit(&mpd); 2440 ret = MPAGE_DA_EXTENT_TAIL; 2441 } 2442 trace_ext4_da_write_pages(inode, &mpd); 2443 wbc->nr_to_write -= mpd.pages_written; 2444 2445 ext4_journal_stop(handle); 2446 2447 if ((mpd.retval == -ENOSPC) && sbi->s_journal) { 2448 /* commit the transaction which would 2449 * free blocks released in the transaction 2450 * and try again 2451 */ 2452 jbd2_journal_force_commit_nested(sbi->s_journal); 2453 ret = 0; 2454 } else if (ret == MPAGE_DA_EXTENT_TAIL) { 2455 /* 2456 * Got one extent now try with rest of the pages. 2457 * If mpd.retval is set -EIO, journal is aborted. 2458 * So we don't need to write any more. 2459 */ 2460 pages_written += mpd.pages_written; 2461 ret = mpd.retval; 2462 io_done = 1; 2463 } else if (wbc->nr_to_write) 2464 /* 2465 * There is no more writeout needed 2466 * or we requested for a noblocking writeout 2467 * and we found the device congested 2468 */ 2469 break; 2470 } 2471 blk_finish_plug(&plug); 2472 if (!io_done && !cycled) { 2473 cycled = 1; 2474 index = 0; 2475 wbc->range_start = index << PAGE_CACHE_SHIFT; 2476 wbc->range_end = mapping->writeback_index - 1; 2477 goto retry; 2478 } 2479 2480 /* Update index */ 2481 wbc->range_cyclic = range_cyclic; 2482 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2483 /* 2484 * set the writeback_index so that range_cyclic 2485 * mode will write it back later 2486 */ 2487 mapping->writeback_index = done_index; 2488 2489 out_writepages: 2490 wbc->nr_to_write -= nr_to_writebump; 2491 wbc->range_start = range_start; 2492 trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); 2493 return ret; 2494 } 2495 2496 static int ext4_nonda_switch(struct super_block *sb) 2497 { 2498 s64 free_blocks, dirty_blocks; 2499 struct ext4_sb_info *sbi = EXT4_SB(sb); 2500 2501 /* 2502 * switch to non delalloc mode if we are running low 2503 * on free block. The free block accounting via percpu 2504 * counters can get slightly wrong with percpu_counter_batch getting 2505 * accumulated on each CPU without updating global counters 2506 * Delalloc need an accurate free block accounting. So switch 2507 * to non delalloc when we are near to error range. 2508 */ 2509 free_blocks = EXT4_C2B(sbi, 2510 percpu_counter_read_positive(&sbi->s_freeclusters_counter)); 2511 dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter); 2512 /* 2513 * Start pushing delalloc when 1/2 of free blocks are dirty. 2514 */ 2515 if (dirty_blocks && (free_blocks < 2 * dirty_blocks)) 2516 try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE); 2517 2518 if (2 * free_blocks < 3 * dirty_blocks || 2519 free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) { 2520 /* 2521 * free block count is less than 150% of dirty blocks 2522 * or free blocks is less than watermark 2523 */ 2524 return 1; 2525 } 2526 return 0; 2527 } 2528 2529 static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 2530 loff_t pos, unsigned len, unsigned flags, 2531 struct page **pagep, void **fsdata) 2532 { 2533 int ret, retries = 0; 2534 struct page *page; 2535 pgoff_t index; 2536 struct inode *inode = mapping->host; 2537 handle_t *handle; 2538 2539 index = pos >> PAGE_CACHE_SHIFT; 2540 2541 if (ext4_nonda_switch(inode->i_sb)) { 2542 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 2543 return ext4_write_begin(file, mapping, pos, 2544 len, flags, pagep, fsdata); 2545 } 2546 *fsdata = (void *)0; 2547 trace_ext4_da_write_begin(inode, pos, len, flags); 2548 2549 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 2550 ret = ext4_da_write_inline_data_begin(mapping, inode, 2551 pos, len, flags, 2552 pagep, fsdata); 2553 if (ret < 0) 2554 return ret; 2555 if (ret == 1) 2556 return 0; 2557 } 2558 2559 /* 2560 * grab_cache_page_write_begin() can take a long time if the 2561 * system is thrashing due to memory pressure, or if the page 2562 * is being written back. So grab it first before we start 2563 * the transaction handle. This also allows us to allocate 2564 * the page (if needed) without using GFP_NOFS. 2565 */ 2566 retry_grab: 2567 page = grab_cache_page_write_begin(mapping, index, flags); 2568 if (!page) 2569 return -ENOMEM; 2570 unlock_page(page); 2571 2572 /* 2573 * With delayed allocation, we don't log the i_disksize update 2574 * if there is delayed block allocation. But we still need 2575 * to journalling the i_disksize update if writes to the end 2576 * of file which has an already mapped buffer. 2577 */ 2578 retry_journal: 2579 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1); 2580 if (IS_ERR(handle)) { 2581 page_cache_release(page); 2582 return PTR_ERR(handle); 2583 } 2584 2585 lock_page(page); 2586 if (page->mapping != mapping) { 2587 /* The page got truncated from under us */ 2588 unlock_page(page); 2589 page_cache_release(page); 2590 ext4_journal_stop(handle); 2591 goto retry_grab; 2592 } 2593 /* In case writeback began while the page was unlocked */ 2594 wait_on_page_writeback(page); 2595 2596 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); 2597 if (ret < 0) { 2598 unlock_page(page); 2599 ext4_journal_stop(handle); 2600 /* 2601 * block_write_begin may have instantiated a few blocks 2602 * outside i_size. Trim these off again. Don't need 2603 * i_size_read because we hold i_mutex. 2604 */ 2605 if (pos + len > inode->i_size) 2606 ext4_truncate_failed_write(inode); 2607 2608 if (ret == -ENOSPC && 2609 ext4_should_retry_alloc(inode->i_sb, &retries)) 2610 goto retry_journal; 2611 2612 page_cache_release(page); 2613 return ret; 2614 } 2615 2616 *pagep = page; 2617 return ret; 2618 } 2619 2620 /* 2621 * Check if we should update i_disksize 2622 * when write to the end of file but not require block allocation 2623 */ 2624 static int ext4_da_should_update_i_disksize(struct page *page, 2625 unsigned long offset) 2626 { 2627 struct buffer_head *bh; 2628 struct inode *inode = page->mapping->host; 2629 unsigned int idx; 2630 int i; 2631 2632 bh = page_buffers(page); 2633 idx = offset >> inode->i_blkbits; 2634 2635 for (i = 0; i < idx; i++) 2636 bh = bh->b_this_page; 2637 2638 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 2639 return 0; 2640 return 1; 2641 } 2642 2643 static int ext4_da_write_end(struct file *file, 2644 struct address_space *mapping, 2645 loff_t pos, unsigned len, unsigned copied, 2646 struct page *page, void *fsdata) 2647 { 2648 struct inode *inode = mapping->host; 2649 int ret = 0, ret2; 2650 handle_t *handle = ext4_journal_current_handle(); 2651 loff_t new_i_size; 2652 unsigned long start, end; 2653 int write_mode = (int)(unsigned long)fsdata; 2654 2655 if (write_mode == FALL_BACK_TO_NONDELALLOC) { 2656 switch (ext4_inode_journal_mode(inode)) { 2657 case EXT4_INODE_ORDERED_DATA_MODE: 2658 return ext4_ordered_write_end(file, mapping, pos, 2659 len, copied, page, fsdata); 2660 case EXT4_INODE_WRITEBACK_DATA_MODE: 2661 return ext4_writeback_write_end(file, mapping, pos, 2662 len, copied, page, fsdata); 2663 default: 2664 BUG(); 2665 } 2666 } 2667 2668 trace_ext4_da_write_end(inode, pos, len, copied); 2669 start = pos & (PAGE_CACHE_SIZE - 1); 2670 end = start + copied - 1; 2671 2672 /* 2673 * generic_write_end() will run mark_inode_dirty() if i_size 2674 * changes. So let's piggyback the i_disksize mark_inode_dirty 2675 * into that. 2676 */ 2677 new_i_size = pos + copied; 2678 if (copied && new_i_size > EXT4_I(inode)->i_disksize) { 2679 if (ext4_has_inline_data(inode) || 2680 ext4_da_should_update_i_disksize(page, end)) { 2681 down_write(&EXT4_I(inode)->i_data_sem); 2682 if (new_i_size > EXT4_I(inode)->i_disksize) 2683 EXT4_I(inode)->i_disksize = new_i_size; 2684 up_write(&EXT4_I(inode)->i_data_sem); 2685 /* We need to mark inode dirty even if 2686 * new_i_size is less that inode->i_size 2687 * bu greater than i_disksize.(hint delalloc) 2688 */ 2689 ext4_mark_inode_dirty(handle, inode); 2690 } 2691 } 2692 2693 if (write_mode != CONVERT_INLINE_DATA && 2694 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) && 2695 ext4_has_inline_data(inode)) 2696 ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied, 2697 page); 2698 else 2699 ret2 = generic_write_end(file, mapping, pos, len, copied, 2700 page, fsdata); 2701 2702 copied = ret2; 2703 if (ret2 < 0) 2704 ret = ret2; 2705 ret2 = ext4_journal_stop(handle); 2706 if (!ret) 2707 ret = ret2; 2708 2709 return ret ? ret : copied; 2710 } 2711 2712 static void ext4_da_invalidatepage(struct page *page, unsigned long offset) 2713 { 2714 /* 2715 * Drop reserved blocks 2716 */ 2717 BUG_ON(!PageLocked(page)); 2718 if (!page_has_buffers(page)) 2719 goto out; 2720 2721 ext4_da_page_release_reservation(page, offset); 2722 2723 out: 2724 ext4_invalidatepage(page, offset); 2725 2726 return; 2727 } 2728 2729 /* 2730 * Force all delayed allocation blocks to be allocated for a given inode. 2731 */ 2732 int ext4_alloc_da_blocks(struct inode *inode) 2733 { 2734 trace_ext4_alloc_da_blocks(inode); 2735 2736 if (!EXT4_I(inode)->i_reserved_data_blocks && 2737 !EXT4_I(inode)->i_reserved_meta_blocks) 2738 return 0; 2739 2740 /* 2741 * We do something simple for now. The filemap_flush() will 2742 * also start triggering a write of the data blocks, which is 2743 * not strictly speaking necessary (and for users of 2744 * laptop_mode, not even desirable). However, to do otherwise 2745 * would require replicating code paths in: 2746 * 2747 * ext4_da_writepages() -> 2748 * write_cache_pages() ---> (via passed in callback function) 2749 * __mpage_da_writepage() --> 2750 * mpage_add_bh_to_extent() 2751 * mpage_da_map_blocks() 2752 * 2753 * The problem is that write_cache_pages(), located in 2754 * mm/page-writeback.c, marks pages clean in preparation for 2755 * doing I/O, which is not desirable if we're not planning on 2756 * doing I/O at all. 2757 * 2758 * We could call write_cache_pages(), and then redirty all of 2759 * the pages by calling redirty_page_for_writepage() but that 2760 * would be ugly in the extreme. So instead we would need to 2761 * replicate parts of the code in the above functions, 2762 * simplifying them because we wouldn't actually intend to 2763 * write out the pages, but rather only collect contiguous 2764 * logical block extents, call the multi-block allocator, and 2765 * then update the buffer heads with the block allocations. 2766 * 2767 * For now, though, we'll cheat by calling filemap_flush(), 2768 * which will map the blocks, and start the I/O, but not 2769 * actually wait for the I/O to complete. 2770 */ 2771 return filemap_flush(inode->i_mapping); 2772 } 2773 2774 /* 2775 * bmap() is special. It gets used by applications such as lilo and by 2776 * the swapper to find the on-disk block of a specific piece of data. 2777 * 2778 * Naturally, this is dangerous if the block concerned is still in the 2779 * journal. If somebody makes a swapfile on an ext4 data-journaling 2780 * filesystem and enables swap, then they may get a nasty shock when the 2781 * data getting swapped to that swapfile suddenly gets overwritten by 2782 * the original zero's written out previously to the journal and 2783 * awaiting writeback in the kernel's buffer cache. 2784 * 2785 * So, if we see any bmap calls here on a modified, data-journaled file, 2786 * take extra steps to flush any blocks which might be in the cache. 2787 */ 2788 static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 2789 { 2790 struct inode *inode = mapping->host; 2791 journal_t *journal; 2792 int err; 2793 2794 /* 2795 * We can get here for an inline file via the FIBMAP ioctl 2796 */ 2797 if (ext4_has_inline_data(inode)) 2798 return 0; 2799 2800 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 2801 test_opt(inode->i_sb, DELALLOC)) { 2802 /* 2803 * With delalloc we want to sync the file 2804 * so that we can make sure we allocate 2805 * blocks for file 2806 */ 2807 filemap_write_and_wait(mapping); 2808 } 2809 2810 if (EXT4_JOURNAL(inode) && 2811 ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { 2812 /* 2813 * This is a REALLY heavyweight approach, but the use of 2814 * bmap on dirty files is expected to be extremely rare: 2815 * only if we run lilo or swapon on a freshly made file 2816 * do we expect this to happen. 2817 * 2818 * (bmap requires CAP_SYS_RAWIO so this does not 2819 * represent an unprivileged user DOS attack --- we'd be 2820 * in trouble if mortal users could trigger this path at 2821 * will.) 2822 * 2823 * NB. EXT4_STATE_JDATA is not set on files other than 2824 * regular files. If somebody wants to bmap a directory 2825 * or symlink and gets confused because the buffer 2826 * hasn't yet been flushed to disk, they deserve 2827 * everything they get. 2828 */ 2829 2830 ext4_clear_inode_state(inode, EXT4_STATE_JDATA); 2831 journal = EXT4_JOURNAL(inode); 2832 jbd2_journal_lock_updates(journal); 2833 err = jbd2_journal_flush(journal); 2834 jbd2_journal_unlock_updates(journal); 2835 2836 if (err) 2837 return 0; 2838 } 2839 2840 return generic_block_bmap(mapping, block, ext4_get_block); 2841 } 2842 2843 static int ext4_readpage(struct file *file, struct page *page) 2844 { 2845 int ret = -EAGAIN; 2846 struct inode *inode = page->mapping->host; 2847 2848 trace_ext4_readpage(page); 2849 2850 if (ext4_has_inline_data(inode)) 2851 ret = ext4_readpage_inline(inode, page); 2852 2853 if (ret == -EAGAIN) 2854 return mpage_readpage(page, ext4_get_block); 2855 2856 return ret; 2857 } 2858 2859 static int 2860 ext4_readpages(struct file *file, struct address_space *mapping, 2861 struct list_head *pages, unsigned nr_pages) 2862 { 2863 struct inode *inode = mapping->host; 2864 2865 /* If the file has inline data, no need to do readpages. */ 2866 if (ext4_has_inline_data(inode)) 2867 return 0; 2868 2869 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); 2870 } 2871 2872 static void ext4_invalidatepage(struct page *page, unsigned long offset) 2873 { 2874 trace_ext4_invalidatepage(page, offset); 2875 2876 /* No journalling happens on data buffers when this function is used */ 2877 WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page))); 2878 2879 block_invalidatepage(page, offset); 2880 } 2881 2882 static int __ext4_journalled_invalidatepage(struct page *page, 2883 unsigned long offset) 2884 { 2885 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 2886 2887 trace_ext4_journalled_invalidatepage(page, offset); 2888 2889 /* 2890 * If it's a full truncate we just forget about the pending dirtying 2891 */ 2892 if (offset == 0) 2893 ClearPageChecked(page); 2894 2895 return jbd2_journal_invalidatepage(journal, page, offset); 2896 } 2897 2898 /* Wrapper for aops... */ 2899 static void ext4_journalled_invalidatepage(struct page *page, 2900 unsigned long offset) 2901 { 2902 WARN_ON(__ext4_journalled_invalidatepage(page, offset) < 0); 2903 } 2904 2905 static int ext4_releasepage(struct page *page, gfp_t wait) 2906 { 2907 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 2908 2909 trace_ext4_releasepage(page); 2910 2911 WARN_ON(PageChecked(page)); 2912 if (!page_has_buffers(page)) 2913 return 0; 2914 if (journal) 2915 return jbd2_journal_try_to_free_buffers(journal, page, wait); 2916 else 2917 return try_to_free_buffers(page); 2918 } 2919 2920 /* 2921 * ext4_get_block used when preparing for a DIO write or buffer write. 2922 * We allocate an uinitialized extent if blocks haven't been allocated. 2923 * The extent will be converted to initialized after the IO is complete. 2924 */ 2925 int ext4_get_block_write(struct inode *inode, sector_t iblock, 2926 struct buffer_head *bh_result, int create) 2927 { 2928 ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n", 2929 inode->i_ino, create); 2930 return _ext4_get_block(inode, iblock, bh_result, 2931 EXT4_GET_BLOCKS_IO_CREATE_EXT); 2932 } 2933 2934 static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock, 2935 struct buffer_head *bh_result, int create) 2936 { 2937 ext4_debug("ext4_get_block_write_nolock: inode %lu, create flag %d\n", 2938 inode->i_ino, create); 2939 return _ext4_get_block(inode, iblock, bh_result, 2940 EXT4_GET_BLOCKS_NO_LOCK); 2941 } 2942 2943 static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, 2944 ssize_t size, void *private, int ret, 2945 bool is_async) 2946 { 2947 struct inode *inode = file_inode(iocb->ki_filp); 2948 ext4_io_end_t *io_end = iocb->private; 2949 2950 /* if not async direct IO or dio with 0 bytes write, just return */ 2951 if (!io_end || !size) 2952 goto out; 2953 2954 ext_debug("ext4_end_io_dio(): io_end 0x%p " 2955 "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", 2956 iocb->private, io_end->inode->i_ino, iocb, offset, 2957 size); 2958 2959 iocb->private = NULL; 2960 2961 /* if not aio dio with unwritten extents, just free io and return */ 2962 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { 2963 ext4_free_io_end(io_end); 2964 out: 2965 inode_dio_done(inode); 2966 if (is_async) 2967 aio_complete(iocb, ret, 0); 2968 return; 2969 } 2970 2971 io_end->offset = offset; 2972 io_end->size = size; 2973 if (is_async) { 2974 io_end->iocb = iocb; 2975 io_end->result = ret; 2976 } 2977 2978 ext4_add_complete_io(io_end); 2979 } 2980 2981 /* 2982 * For ext4 extent files, ext4 will do direct-io write to holes, 2983 * preallocated extents, and those write extend the file, no need to 2984 * fall back to buffered IO. 2985 * 2986 * For holes, we fallocate those blocks, mark them as uninitialized 2987 * If those blocks were preallocated, we mark sure they are split, but 2988 * still keep the range to write as uninitialized. 2989 * 2990 * The unwritten extents will be converted to written when DIO is completed. 2991 * For async direct IO, since the IO may still pending when return, we 2992 * set up an end_io call back function, which will do the conversion 2993 * when async direct IO completed. 2994 * 2995 * If the O_DIRECT write will extend the file then add this inode to the 2996 * orphan list. So recovery will truncate it back to the original size 2997 * if the machine crashes during the write. 2998 * 2999 */ 3000 static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, 3001 const struct iovec *iov, loff_t offset, 3002 unsigned long nr_segs) 3003 { 3004 struct file *file = iocb->ki_filp; 3005 struct inode *inode = file->f_mapping->host; 3006 ssize_t ret; 3007 size_t count = iov_length(iov, nr_segs); 3008 int overwrite = 0; 3009 get_block_t *get_block_func = NULL; 3010 int dio_flags = 0; 3011 loff_t final_size = offset + count; 3012 3013 /* Use the old path for reads and writes beyond i_size. */ 3014 if (rw != WRITE || final_size > inode->i_size) 3015 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 3016 3017 BUG_ON(iocb->private == NULL); 3018 3019 /* If we do a overwrite dio, i_mutex locking can be released */ 3020 overwrite = *((int *)iocb->private); 3021 3022 if (overwrite) { 3023 atomic_inc(&inode->i_dio_count); 3024 down_read(&EXT4_I(inode)->i_data_sem); 3025 mutex_unlock(&inode->i_mutex); 3026 } 3027 3028 /* 3029 * We could direct write to holes and fallocate. 3030 * 3031 * Allocated blocks to fill the hole are marked as 3032 * uninitialized to prevent parallel buffered read to expose 3033 * the stale data before DIO complete the data IO. 3034 * 3035 * As to previously fallocated extents, ext4 get_block will 3036 * just simply mark the buffer mapped but still keep the 3037 * extents uninitialized. 3038 * 3039 * For non AIO case, we will convert those unwritten extents 3040 * to written after return back from blockdev_direct_IO. 3041 * 3042 * For async DIO, the conversion needs to be deferred when the 3043 * IO is completed. The ext4 end_io callback function will be 3044 * called to take care of the conversion work. Here for async 3045 * case, we allocate an io_end structure to hook to the iocb. 3046 */ 3047 iocb->private = NULL; 3048 ext4_inode_aio_set(inode, NULL); 3049 if (!is_sync_kiocb(iocb)) { 3050 ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS); 3051 if (!io_end) { 3052 ret = -ENOMEM; 3053 goto retake_lock; 3054 } 3055 io_end->flag |= EXT4_IO_END_DIRECT; 3056 iocb->private = io_end; 3057 /* 3058 * we save the io structure for current async direct 3059 * IO, so that later ext4_map_blocks() could flag the 3060 * io structure whether there is a unwritten extents 3061 * needs to be converted when IO is completed. 3062 */ 3063 ext4_inode_aio_set(inode, io_end); 3064 } 3065 3066 if (overwrite) { 3067 get_block_func = ext4_get_block_write_nolock; 3068 } else { 3069 get_block_func = ext4_get_block_write; 3070 dio_flags = DIO_LOCKING; 3071 } 3072 ret = __blockdev_direct_IO(rw, iocb, inode, 3073 inode->i_sb->s_bdev, iov, 3074 offset, nr_segs, 3075 get_block_func, 3076 ext4_end_io_dio, 3077 NULL, 3078 dio_flags); 3079 3080 if (iocb->private) 3081 ext4_inode_aio_set(inode, NULL); 3082 /* 3083 * The io_end structure takes a reference to the inode, that 3084 * structure needs to be destroyed and the reference to the 3085 * inode need to be dropped, when IO is complete, even with 0 3086 * byte write, or failed. 3087 * 3088 * In the successful AIO DIO case, the io_end structure will 3089 * be destroyed and the reference to the inode will be dropped 3090 * after the end_io call back function is called. 3091 * 3092 * In the case there is 0 byte write, or error case, since VFS 3093 * direct IO won't invoke the end_io call back function, we 3094 * need to free the end_io structure here. 3095 */ 3096 if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { 3097 ext4_free_io_end(iocb->private); 3098 iocb->private = NULL; 3099 } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode, 3100 EXT4_STATE_DIO_UNWRITTEN)) { 3101 int err; 3102 /* 3103 * for non AIO case, since the IO is already 3104 * completed, we could do the conversion right here 3105 */ 3106 err = ext4_convert_unwritten_extents(inode, 3107 offset, ret); 3108 if (err < 0) 3109 ret = err; 3110 ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3111 } 3112 3113 retake_lock: 3114 /* take i_mutex locking again if we do a ovewrite dio */ 3115 if (overwrite) { 3116 inode_dio_done(inode); 3117 up_read(&EXT4_I(inode)->i_data_sem); 3118 mutex_lock(&inode->i_mutex); 3119 } 3120 3121 return ret; 3122 } 3123 3124 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 3125 const struct iovec *iov, loff_t offset, 3126 unsigned long nr_segs) 3127 { 3128 struct file *file = iocb->ki_filp; 3129 struct inode *inode = file->f_mapping->host; 3130 ssize_t ret; 3131 3132 /* 3133 * If we are doing data journalling we don't support O_DIRECT 3134 */ 3135 if (ext4_should_journal_data(inode)) 3136 return 0; 3137 3138 /* Let buffer I/O handle the inline data case. */ 3139 if (ext4_has_inline_data(inode)) 3140 return 0; 3141 3142 trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw); 3143 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3144 ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs); 3145 else 3146 ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 3147 trace_ext4_direct_IO_exit(inode, offset, 3148 iov_length(iov, nr_segs), rw, ret); 3149 return ret; 3150 } 3151 3152 /* 3153 * Pages can be marked dirty completely asynchronously from ext4's journalling 3154 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3155 * much here because ->set_page_dirty is called under VFS locks. The page is 3156 * not necessarily locked. 3157 * 3158 * We cannot just dirty the page and leave attached buffers clean, because the 3159 * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3160 * or jbddirty because all the journalling code will explode. 3161 * 3162 * So what we do is to mark the page "pending dirty" and next time writepage 3163 * is called, propagate that into the buffers appropriately. 3164 */ 3165 static int ext4_journalled_set_page_dirty(struct page *page) 3166 { 3167 SetPageChecked(page); 3168 return __set_page_dirty_nobuffers(page); 3169 } 3170 3171 static const struct address_space_operations ext4_ordered_aops = { 3172 .readpage = ext4_readpage, 3173 .readpages = ext4_readpages, 3174 .writepage = ext4_writepage, 3175 .write_begin = ext4_write_begin, 3176 .write_end = ext4_ordered_write_end, 3177 .bmap = ext4_bmap, 3178 .invalidatepage = ext4_invalidatepage, 3179 .releasepage = ext4_releasepage, 3180 .direct_IO = ext4_direct_IO, 3181 .migratepage = buffer_migrate_page, 3182 .is_partially_uptodate = block_is_partially_uptodate, 3183 .error_remove_page = generic_error_remove_page, 3184 }; 3185 3186 static const struct address_space_operations ext4_writeback_aops = { 3187 .readpage = ext4_readpage, 3188 .readpages = ext4_readpages, 3189 .writepage = ext4_writepage, 3190 .write_begin = ext4_write_begin, 3191 .write_end = ext4_writeback_write_end, 3192 .bmap = ext4_bmap, 3193 .invalidatepage = ext4_invalidatepage, 3194 .releasepage = ext4_releasepage, 3195 .direct_IO = ext4_direct_IO, 3196 .migratepage = buffer_migrate_page, 3197 .is_partially_uptodate = block_is_partially_uptodate, 3198 .error_remove_page = generic_error_remove_page, 3199 }; 3200 3201 static const struct address_space_operations ext4_journalled_aops = { 3202 .readpage = ext4_readpage, 3203 .readpages = ext4_readpages, 3204 .writepage = ext4_writepage, 3205 .write_begin = ext4_write_begin, 3206 .write_end = ext4_journalled_write_end, 3207 .set_page_dirty = ext4_journalled_set_page_dirty, 3208 .bmap = ext4_bmap, 3209 .invalidatepage = ext4_journalled_invalidatepage, 3210 .releasepage = ext4_releasepage, 3211 .direct_IO = ext4_direct_IO, 3212 .is_partially_uptodate = block_is_partially_uptodate, 3213 .error_remove_page = generic_error_remove_page, 3214 }; 3215 3216 static const struct address_space_operations ext4_da_aops = { 3217 .readpage = ext4_readpage, 3218 .readpages = ext4_readpages, 3219 .writepage = ext4_writepage, 3220 .writepages = ext4_da_writepages, 3221 .write_begin = ext4_da_write_begin, 3222 .write_end = ext4_da_write_end, 3223 .bmap = ext4_bmap, 3224 .invalidatepage = ext4_da_invalidatepage, 3225 .releasepage = ext4_releasepage, 3226 .direct_IO = ext4_direct_IO, 3227 .migratepage = buffer_migrate_page, 3228 .is_partially_uptodate = block_is_partially_uptodate, 3229 .error_remove_page = generic_error_remove_page, 3230 }; 3231 3232 void ext4_set_aops(struct inode *inode) 3233 { 3234 switch (ext4_inode_journal_mode(inode)) { 3235 case EXT4_INODE_ORDERED_DATA_MODE: 3236 if (test_opt(inode->i_sb, DELALLOC)) 3237 inode->i_mapping->a_ops = &ext4_da_aops; 3238 else 3239 inode->i_mapping->a_ops = &ext4_ordered_aops; 3240 break; 3241 case EXT4_INODE_WRITEBACK_DATA_MODE: 3242 if (test_opt(inode->i_sb, DELALLOC)) 3243 inode->i_mapping->a_ops = &ext4_da_aops; 3244 else 3245 inode->i_mapping->a_ops = &ext4_writeback_aops; 3246 break; 3247 case EXT4_INODE_JOURNAL_DATA_MODE: 3248 inode->i_mapping->a_ops = &ext4_journalled_aops; 3249 break; 3250 default: 3251 BUG(); 3252 } 3253 } 3254 3255 3256 /* 3257 * ext4_discard_partial_page_buffers() 3258 * Wrapper function for ext4_discard_partial_page_buffers_no_lock. 3259 * This function finds and locks the page containing the offset 3260 * "from" and passes it to ext4_discard_partial_page_buffers_no_lock. 3261 * Calling functions that already have the page locked should call 3262 * ext4_discard_partial_page_buffers_no_lock directly. 3263 */ 3264 int ext4_discard_partial_page_buffers(handle_t *handle, 3265 struct address_space *mapping, loff_t from, 3266 loff_t length, int flags) 3267 { 3268 struct inode *inode = mapping->host; 3269 struct page *page; 3270 int err = 0; 3271 3272 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, 3273 mapping_gfp_mask(mapping) & ~__GFP_FS); 3274 if (!page) 3275 return -ENOMEM; 3276 3277 err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page, 3278 from, length, flags); 3279 3280 unlock_page(page); 3281 page_cache_release(page); 3282 return err; 3283 } 3284 3285 /* 3286 * ext4_discard_partial_page_buffers_no_lock() 3287 * Zeros a page range of length 'length' starting from offset 'from'. 3288 * Buffer heads that correspond to the block aligned regions of the 3289 * zeroed range will be unmapped. Unblock aligned regions 3290 * will have the corresponding buffer head mapped if needed so that 3291 * that region of the page can be updated with the partial zero out. 3292 * 3293 * This function assumes that the page has already been locked. The 3294 * The range to be discarded must be contained with in the given page. 3295 * If the specified range exceeds the end of the page it will be shortened 3296 * to the end of the page that corresponds to 'from'. This function is 3297 * appropriate for updating a page and it buffer heads to be unmapped and 3298 * zeroed for blocks that have been either released, or are going to be 3299 * released. 3300 * 3301 * handle: The journal handle 3302 * inode: The files inode 3303 * page: A locked page that contains the offset "from" 3304 * from: The starting byte offset (from the beginning of the file) 3305 * to begin discarding 3306 * len: The length of bytes to discard 3307 * flags: Optional flags that may be used: 3308 * 3309 * EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED 3310 * Only zero the regions of the page whose buffer heads 3311 * have already been unmapped. This flag is appropriate 3312 * for updating the contents of a page whose blocks may 3313 * have already been released, and we only want to zero 3314 * out the regions that correspond to those released blocks. 3315 * 3316 * Returns zero on success or negative on failure. 3317 */ 3318 static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, 3319 struct inode *inode, struct page *page, loff_t from, 3320 loff_t length, int flags) 3321 { 3322 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 3323 unsigned int offset = from & (PAGE_CACHE_SIZE-1); 3324 unsigned int blocksize, max, pos; 3325 ext4_lblk_t iblock; 3326 struct buffer_head *bh; 3327 int err = 0; 3328 3329 blocksize = inode->i_sb->s_blocksize; 3330 max = PAGE_CACHE_SIZE - offset; 3331 3332 if (index != page->index) 3333 return -EINVAL; 3334 3335 /* 3336 * correct length if it does not fall between 3337 * 'from' and the end of the page 3338 */ 3339 if (length > max || length < 0) 3340 length = max; 3341 3342 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 3343 3344 if (!page_has_buffers(page)) 3345 create_empty_buffers(page, blocksize, 0); 3346 3347 /* Find the buffer that contains "offset" */ 3348 bh = page_buffers(page); 3349 pos = blocksize; 3350 while (offset >= pos) { 3351 bh = bh->b_this_page; 3352 iblock++; 3353 pos += blocksize; 3354 } 3355 3356 pos = offset; 3357 while (pos < offset + length) { 3358 unsigned int end_of_block, range_to_discard; 3359 3360 err = 0; 3361 3362 /* The length of space left to zero and unmap */ 3363 range_to_discard = offset + length - pos; 3364 3365 /* The length of space until the end of the block */ 3366 end_of_block = blocksize - (pos & (blocksize-1)); 3367 3368 /* 3369 * Do not unmap or zero past end of block 3370 * for this buffer head 3371 */ 3372 if (range_to_discard > end_of_block) 3373 range_to_discard = end_of_block; 3374 3375 3376 /* 3377 * Skip this buffer head if we are only zeroing unampped 3378 * regions of the page 3379 */ 3380 if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED && 3381 buffer_mapped(bh)) 3382 goto next; 3383 3384 /* If the range is block aligned, unmap */ 3385 if (range_to_discard == blocksize) { 3386 clear_buffer_dirty(bh); 3387 bh->b_bdev = NULL; 3388 clear_buffer_mapped(bh); 3389 clear_buffer_req(bh); 3390 clear_buffer_new(bh); 3391 clear_buffer_delay(bh); 3392 clear_buffer_unwritten(bh); 3393 clear_buffer_uptodate(bh); 3394 zero_user(page, pos, range_to_discard); 3395 BUFFER_TRACE(bh, "Buffer discarded"); 3396 goto next; 3397 } 3398 3399 /* 3400 * If this block is not completely contained in the range 3401 * to be discarded, then it is not going to be released. Because 3402 * we need to keep this block, we need to make sure this part 3403 * of the page is uptodate before we modify it by writeing 3404 * partial zeros on it. 3405 */ 3406 if (!buffer_mapped(bh)) { 3407 /* 3408 * Buffer head must be mapped before we can read 3409 * from the block 3410 */ 3411 BUFFER_TRACE(bh, "unmapped"); 3412 ext4_get_block(inode, iblock, bh, 0); 3413 /* unmapped? It's a hole - nothing to do */ 3414 if (!buffer_mapped(bh)) { 3415 BUFFER_TRACE(bh, "still unmapped"); 3416 goto next; 3417 } 3418 } 3419 3420 /* Ok, it's mapped. Make sure it's up-to-date */ 3421 if (PageUptodate(page)) 3422 set_buffer_uptodate(bh); 3423 3424 if (!buffer_uptodate(bh)) { 3425 err = -EIO; 3426 ll_rw_block(READ, 1, &bh); 3427 wait_on_buffer(bh); 3428 /* Uhhuh. Read error. Complain and punt.*/ 3429 if (!buffer_uptodate(bh)) 3430 goto next; 3431 } 3432 3433 if (ext4_should_journal_data(inode)) { 3434 BUFFER_TRACE(bh, "get write access"); 3435 err = ext4_journal_get_write_access(handle, bh); 3436 if (err) 3437 goto next; 3438 } 3439 3440 zero_user(page, pos, range_to_discard); 3441 3442 err = 0; 3443 if (ext4_should_journal_data(inode)) { 3444 err = ext4_handle_dirty_metadata(handle, inode, bh); 3445 } else 3446 mark_buffer_dirty(bh); 3447 3448 BUFFER_TRACE(bh, "Partial buffer zeroed"); 3449 next: 3450 bh = bh->b_this_page; 3451 iblock++; 3452 pos += range_to_discard; 3453 } 3454 3455 return err; 3456 } 3457 3458 int ext4_can_truncate(struct inode *inode) 3459 { 3460 if (S_ISREG(inode->i_mode)) 3461 return 1; 3462 if (S_ISDIR(inode->i_mode)) 3463 return 1; 3464 if (S_ISLNK(inode->i_mode)) 3465 return !ext4_inode_is_fast_symlink(inode); 3466 return 0; 3467 } 3468 3469 /* 3470 * ext4_punch_hole: punches a hole in a file by releaseing the blocks 3471 * associated with the given offset and length 3472 * 3473 * @inode: File inode 3474 * @offset: The offset where the hole will begin 3475 * @len: The length of the hole 3476 * 3477 * Returns: 0 on success or negative on failure 3478 */ 3479 3480 int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) 3481 { 3482 struct inode *inode = file_inode(file); 3483 if (!S_ISREG(inode->i_mode)) 3484 return -EOPNOTSUPP; 3485 3486 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3487 return ext4_ind_punch_hole(file, offset, length); 3488 3489 if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) { 3490 /* TODO: Add support for bigalloc file systems */ 3491 return -EOPNOTSUPP; 3492 } 3493 3494 trace_ext4_punch_hole(inode, offset, length); 3495 3496 return ext4_ext_punch_hole(file, offset, length); 3497 } 3498 3499 /* 3500 * ext4_truncate() 3501 * 3502 * We block out ext4_get_block() block instantiations across the entire 3503 * transaction, and VFS/VM ensures that ext4_truncate() cannot run 3504 * simultaneously on behalf of the same inode. 3505 * 3506 * As we work through the truncate and commit bits of it to the journal there 3507 * is one core, guiding principle: the file's tree must always be consistent on 3508 * disk. We must be able to restart the truncate after a crash. 3509 * 3510 * The file's tree may be transiently inconsistent in memory (although it 3511 * probably isn't), but whenever we close off and commit a journal transaction, 3512 * the contents of (the filesystem + the journal) must be consistent and 3513 * restartable. It's pretty simple, really: bottom up, right to left (although 3514 * left-to-right works OK too). 3515 * 3516 * Note that at recovery time, journal replay occurs *before* the restart of 3517 * truncate against the orphan inode list. 3518 * 3519 * The committed inode has the new, desired i_size (which is the same as 3520 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 3521 * that this inode's truncate did not complete and it will again call 3522 * ext4_truncate() to have another go. So there will be instantiated blocks 3523 * to the right of the truncation point in a crashed ext4 filesystem. But 3524 * that's fine - as long as they are linked from the inode, the post-crash 3525 * ext4_truncate() run will find them and release them. 3526 */ 3527 void ext4_truncate(struct inode *inode) 3528 { 3529 trace_ext4_truncate_enter(inode); 3530 3531 if (!ext4_can_truncate(inode)) 3532 return; 3533 3534 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 3535 3536 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 3537 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 3538 3539 if (ext4_has_inline_data(inode)) { 3540 int has_inline = 1; 3541 3542 ext4_inline_data_truncate(inode, &has_inline); 3543 if (has_inline) 3544 return; 3545 } 3546 3547 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3548 ext4_ext_truncate(inode); 3549 else 3550 ext4_ind_truncate(inode); 3551 3552 trace_ext4_truncate_exit(inode); 3553 } 3554 3555 /* 3556 * ext4_get_inode_loc returns with an extra refcount against the inode's 3557 * underlying buffer_head on success. If 'in_mem' is true, we have all 3558 * data in memory that is needed to recreate the on-disk version of this 3559 * inode. 3560 */ 3561 static int __ext4_get_inode_loc(struct inode *inode, 3562 struct ext4_iloc *iloc, int in_mem) 3563 { 3564 struct ext4_group_desc *gdp; 3565 struct buffer_head *bh; 3566 struct super_block *sb = inode->i_sb; 3567 ext4_fsblk_t block; 3568 int inodes_per_block, inode_offset; 3569 3570 iloc->bh = NULL; 3571 if (!ext4_valid_inum(sb, inode->i_ino)) 3572 return -EIO; 3573 3574 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 3575 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 3576 if (!gdp) 3577 return -EIO; 3578 3579 /* 3580 * Figure out the offset within the block group inode table 3581 */ 3582 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 3583 inode_offset = ((inode->i_ino - 1) % 3584 EXT4_INODES_PER_GROUP(sb)); 3585 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 3586 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 3587 3588 bh = sb_getblk(sb, block); 3589 if (unlikely(!bh)) 3590 return -ENOMEM; 3591 if (!buffer_uptodate(bh)) { 3592 lock_buffer(bh); 3593 3594 /* 3595 * If the buffer has the write error flag, we have failed 3596 * to write out another inode in the same block. In this 3597 * case, we don't have to read the block because we may 3598 * read the old inode data successfully. 3599 */ 3600 if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 3601 set_buffer_uptodate(bh); 3602 3603 if (buffer_uptodate(bh)) { 3604 /* someone brought it uptodate while we waited */ 3605 unlock_buffer(bh); 3606 goto has_buffer; 3607 } 3608 3609 /* 3610 * If we have all information of the inode in memory and this 3611 * is the only valid inode in the block, we need not read the 3612 * block. 3613 */ 3614 if (in_mem) { 3615 struct buffer_head *bitmap_bh; 3616 int i, start; 3617 3618 start = inode_offset & ~(inodes_per_block - 1); 3619 3620 /* Is the inode bitmap in cache? */ 3621 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 3622 if (unlikely(!bitmap_bh)) 3623 goto make_io; 3624 3625 /* 3626 * If the inode bitmap isn't in cache then the 3627 * optimisation may end up performing two reads instead 3628 * of one, so skip it. 3629 */ 3630 if (!buffer_uptodate(bitmap_bh)) { 3631 brelse(bitmap_bh); 3632 goto make_io; 3633 } 3634 for (i = start; i < start + inodes_per_block; i++) { 3635 if (i == inode_offset) 3636 continue; 3637 if (ext4_test_bit(i, bitmap_bh->b_data)) 3638 break; 3639 } 3640 brelse(bitmap_bh); 3641 if (i == start + inodes_per_block) { 3642 /* all other inodes are free, so skip I/O */ 3643 memset(bh->b_data, 0, bh->b_size); 3644 set_buffer_uptodate(bh); 3645 unlock_buffer(bh); 3646 goto has_buffer; 3647 } 3648 } 3649 3650 make_io: 3651 /* 3652 * If we need to do any I/O, try to pre-readahead extra 3653 * blocks from the inode table. 3654 */ 3655 if (EXT4_SB(sb)->s_inode_readahead_blks) { 3656 ext4_fsblk_t b, end, table; 3657 unsigned num; 3658 3659 table = ext4_inode_table(sb, gdp); 3660 /* s_inode_readahead_blks is always a power of 2 */ 3661 b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1); 3662 if (table > b) 3663 b = table; 3664 end = b + EXT4_SB(sb)->s_inode_readahead_blks; 3665 num = EXT4_INODES_PER_GROUP(sb); 3666 if (ext4_has_group_desc_csum(sb)) 3667 num -= ext4_itable_unused_count(sb, gdp); 3668 table += num / inodes_per_block; 3669 if (end > table) 3670 end = table; 3671 while (b <= end) 3672 sb_breadahead(sb, b++); 3673 } 3674 3675 /* 3676 * There are other valid inodes in the buffer, this inode 3677 * has in-inode xattrs, or we don't have this inode in memory. 3678 * Read the block from disk. 3679 */ 3680 trace_ext4_load_inode(inode); 3681 get_bh(bh); 3682 bh->b_end_io = end_buffer_read_sync; 3683 submit_bh(READ | REQ_META | REQ_PRIO, bh); 3684 wait_on_buffer(bh); 3685 if (!buffer_uptodate(bh)) { 3686 EXT4_ERROR_INODE_BLOCK(inode, block, 3687 "unable to read itable block"); 3688 brelse(bh); 3689 return -EIO; 3690 } 3691 } 3692 has_buffer: 3693 iloc->bh = bh; 3694 return 0; 3695 } 3696 3697 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 3698 { 3699 /* We have all inode data except xattrs in memory here. */ 3700 return __ext4_get_inode_loc(inode, iloc, 3701 !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); 3702 } 3703 3704 void ext4_set_inode_flags(struct inode *inode) 3705 { 3706 unsigned int flags = EXT4_I(inode)->i_flags; 3707 3708 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 3709 if (flags & EXT4_SYNC_FL) 3710 inode->i_flags |= S_SYNC; 3711 if (flags & EXT4_APPEND_FL) 3712 inode->i_flags |= S_APPEND; 3713 if (flags & EXT4_IMMUTABLE_FL) 3714 inode->i_flags |= S_IMMUTABLE; 3715 if (flags & EXT4_NOATIME_FL) 3716 inode->i_flags |= S_NOATIME; 3717 if (flags & EXT4_DIRSYNC_FL) 3718 inode->i_flags |= S_DIRSYNC; 3719 } 3720 3721 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ 3722 void ext4_get_inode_flags(struct ext4_inode_info *ei) 3723 { 3724 unsigned int vfs_fl; 3725 unsigned long old_fl, new_fl; 3726 3727 do { 3728 vfs_fl = ei->vfs_inode.i_flags; 3729 old_fl = ei->i_flags; 3730 new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL| 3731 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL| 3732 EXT4_DIRSYNC_FL); 3733 if (vfs_fl & S_SYNC) 3734 new_fl |= EXT4_SYNC_FL; 3735 if (vfs_fl & S_APPEND) 3736 new_fl |= EXT4_APPEND_FL; 3737 if (vfs_fl & S_IMMUTABLE) 3738 new_fl |= EXT4_IMMUTABLE_FL; 3739 if (vfs_fl & S_NOATIME) 3740 new_fl |= EXT4_NOATIME_FL; 3741 if (vfs_fl & S_DIRSYNC) 3742 new_fl |= EXT4_DIRSYNC_FL; 3743 } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl); 3744 } 3745 3746 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 3747 struct ext4_inode_info *ei) 3748 { 3749 blkcnt_t i_blocks ; 3750 struct inode *inode = &(ei->vfs_inode); 3751 struct super_block *sb = inode->i_sb; 3752 3753 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 3754 EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { 3755 /* we are using combined 48 bit field */ 3756 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 3757 le32_to_cpu(raw_inode->i_blocks_lo); 3758 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { 3759 /* i_blocks represent file system block size */ 3760 return i_blocks << (inode->i_blkbits - 9); 3761 } else { 3762 return i_blocks; 3763 } 3764 } else { 3765 return le32_to_cpu(raw_inode->i_blocks_lo); 3766 } 3767 } 3768 3769 static inline void ext4_iget_extra_inode(struct inode *inode, 3770 struct ext4_inode *raw_inode, 3771 struct ext4_inode_info *ei) 3772 { 3773 __le32 *magic = (void *)raw_inode + 3774 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; 3775 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { 3776 ext4_set_inode_state(inode, EXT4_STATE_XATTR); 3777 ext4_find_inline_data_nolock(inode); 3778 } else 3779 EXT4_I(inode)->i_inline_off = 0; 3780 } 3781 3782 struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 3783 { 3784 struct ext4_iloc iloc; 3785 struct ext4_inode *raw_inode; 3786 struct ext4_inode_info *ei; 3787 struct inode *inode; 3788 journal_t *journal = EXT4_SB(sb)->s_journal; 3789 long ret; 3790 int block; 3791 uid_t i_uid; 3792 gid_t i_gid; 3793 3794 inode = iget_locked(sb, ino); 3795 if (!inode) 3796 return ERR_PTR(-ENOMEM); 3797 if (!(inode->i_state & I_NEW)) 3798 return inode; 3799 3800 ei = EXT4_I(inode); 3801 iloc.bh = NULL; 3802 3803 ret = __ext4_get_inode_loc(inode, &iloc, 0); 3804 if (ret < 0) 3805 goto bad_inode; 3806 raw_inode = ext4_raw_inode(&iloc); 3807 3808 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 3809 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 3810 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 3811 EXT4_INODE_SIZE(inode->i_sb)) { 3812 EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)", 3813 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize, 3814 EXT4_INODE_SIZE(inode->i_sb)); 3815 ret = -EIO; 3816 goto bad_inode; 3817 } 3818 } else 3819 ei->i_extra_isize = 0; 3820 3821 /* Precompute checksum seed for inode metadata */ 3822 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 3823 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { 3824 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3825 __u32 csum; 3826 __le32 inum = cpu_to_le32(inode->i_ino); 3827 __le32 gen = raw_inode->i_generation; 3828 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, 3829 sizeof(inum)); 3830 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, 3831 sizeof(gen)); 3832 } 3833 3834 if (!ext4_inode_csum_verify(inode, raw_inode, ei)) { 3835 EXT4_ERROR_INODE(inode, "checksum invalid"); 3836 ret = -EIO; 3837 goto bad_inode; 3838 } 3839 3840 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 3841 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 3842 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 3843 if (!(test_opt(inode->i_sb, NO_UID32))) { 3844 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 3845 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 3846 } 3847 i_uid_write(inode, i_uid); 3848 i_gid_write(inode, i_gid); 3849 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 3850 3851 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 3852 ei->i_inline_off = 0; 3853 ei->i_dir_start_lookup = 0; 3854 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 3855 /* We now have enough fields to check if the inode was active or not. 3856 * This is needed because nfsd might try to access dead inodes 3857 * the test is that same one that e2fsck uses 3858 * NeilBrown 1999oct15 3859 */ 3860 if (inode->i_nlink == 0) { 3861 if (inode->i_mode == 0 || 3862 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { 3863 /* this inode is deleted */ 3864 ret = -ESTALE; 3865 goto bad_inode; 3866 } 3867 /* The only unlinked inodes we let through here have 3868 * valid i_mode and are being read by the orphan 3869 * recovery code: that's fine, we're about to complete 3870 * the process of deleting those. */ 3871 } 3872 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 3873 inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 3874 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 3875 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) 3876 ei->i_file_acl |= 3877 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 3878 inode->i_size = ext4_isize(raw_inode); 3879 ei->i_disksize = inode->i_size; 3880 #ifdef CONFIG_QUOTA 3881 ei->i_reserved_quota = 0; 3882 #endif 3883 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 3884 ei->i_block_group = iloc.block_group; 3885 ei->i_last_alloc_group = ~0; 3886 /* 3887 * NOTE! The in-memory inode i_data array is in little-endian order 3888 * even on big-endian machines: we do NOT byteswap the block numbers! 3889 */ 3890 for (block = 0; block < EXT4_N_BLOCKS; block++) 3891 ei->i_data[block] = raw_inode->i_block[block]; 3892 INIT_LIST_HEAD(&ei->i_orphan); 3893 3894 /* 3895 * Set transaction id's of transactions that have to be committed 3896 * to finish f[data]sync. We set them to currently running transaction 3897 * as we cannot be sure that the inode or some of its metadata isn't 3898 * part of the transaction - the inode could have been reclaimed and 3899 * now it is reread from disk. 3900 */ 3901 if (journal) { 3902 transaction_t *transaction; 3903 tid_t tid; 3904 3905 read_lock(&journal->j_state_lock); 3906 if (journal->j_running_transaction) 3907 transaction = journal->j_running_transaction; 3908 else 3909 transaction = journal->j_committing_transaction; 3910 if (transaction) 3911 tid = transaction->t_tid; 3912 else 3913 tid = journal->j_commit_sequence; 3914 read_unlock(&journal->j_state_lock); 3915 ei->i_sync_tid = tid; 3916 ei->i_datasync_tid = tid; 3917 } 3918 3919 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 3920 if (ei->i_extra_isize == 0) { 3921 /* The extra space is currently unused. Use it. */ 3922 ei->i_extra_isize = sizeof(struct ext4_inode) - 3923 EXT4_GOOD_OLD_INODE_SIZE; 3924 } else { 3925 ext4_iget_extra_inode(inode, raw_inode, ei); 3926 } 3927 } 3928 3929 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 3930 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 3931 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 3932 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 3933 3934 inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 3935 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 3936 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 3937 inode->i_version |= 3938 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 3939 } 3940 3941 ret = 0; 3942 if (ei->i_file_acl && 3943 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { 3944 EXT4_ERROR_INODE(inode, "bad extended attribute block %llu", 3945 ei->i_file_acl); 3946 ret = -EIO; 3947 goto bad_inode; 3948 } else if (!ext4_has_inline_data(inode)) { 3949 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 3950 if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 3951 (S_ISLNK(inode->i_mode) && 3952 !ext4_inode_is_fast_symlink(inode)))) 3953 /* Validate extent which is part of inode */ 3954 ret = ext4_ext_check_inode(inode); 3955 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 3956 (S_ISLNK(inode->i_mode) && 3957 !ext4_inode_is_fast_symlink(inode))) { 3958 /* Validate block references which are part of inode */ 3959 ret = ext4_ind_check_inode(inode); 3960 } 3961 } 3962 if (ret) 3963 goto bad_inode; 3964 3965 if (S_ISREG(inode->i_mode)) { 3966 inode->i_op = &ext4_file_inode_operations; 3967 inode->i_fop = &ext4_file_operations; 3968 ext4_set_aops(inode); 3969 } else if (S_ISDIR(inode->i_mode)) { 3970 inode->i_op = &ext4_dir_inode_operations; 3971 inode->i_fop = &ext4_dir_operations; 3972 } else if (S_ISLNK(inode->i_mode)) { 3973 if (ext4_inode_is_fast_symlink(inode)) { 3974 inode->i_op = &ext4_fast_symlink_inode_operations; 3975 nd_terminate_link(ei->i_data, inode->i_size, 3976 sizeof(ei->i_data) - 1); 3977 } else { 3978 inode->i_op = &ext4_symlink_inode_operations; 3979 ext4_set_aops(inode); 3980 } 3981 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 3982 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 3983 inode->i_op = &ext4_special_inode_operations; 3984 if (raw_inode->i_block[0]) 3985 init_special_inode(inode, inode->i_mode, 3986 old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 3987 else 3988 init_special_inode(inode, inode->i_mode, 3989 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 3990 } else { 3991 ret = -EIO; 3992 EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode); 3993 goto bad_inode; 3994 } 3995 brelse(iloc.bh); 3996 ext4_set_inode_flags(inode); 3997 unlock_new_inode(inode); 3998 return inode; 3999 4000 bad_inode: 4001 brelse(iloc.bh); 4002 iget_failed(inode); 4003 return ERR_PTR(ret); 4004 } 4005 4006 static int ext4_inode_blocks_set(handle_t *handle, 4007 struct ext4_inode *raw_inode, 4008 struct ext4_inode_info *ei) 4009 { 4010 struct inode *inode = &(ei->vfs_inode); 4011 u64 i_blocks = inode->i_blocks; 4012 struct super_block *sb = inode->i_sb; 4013 4014 if (i_blocks <= ~0U) { 4015 /* 4016 * i_blocks can be represented in a 32 bit variable 4017 * as multiple of 512 bytes 4018 */ 4019 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4020 raw_inode->i_blocks_high = 0; 4021 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 4022 return 0; 4023 } 4024 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) 4025 return -EFBIG; 4026 4027 if (i_blocks <= 0xffffffffffffULL) { 4028 /* 4029 * i_blocks can be represented in a 48 bit variable 4030 * as multiple of 512 bytes 4031 */ 4032 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4033 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4034 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 4035 } else { 4036 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); 4037 /* i_block is stored in file system block size */ 4038 i_blocks = i_blocks >> (inode->i_blkbits - 9); 4039 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4040 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4041 } 4042 return 0; 4043 } 4044 4045 /* 4046 * Post the struct inode info into an on-disk inode location in the 4047 * buffer-cache. This gobbles the caller's reference to the 4048 * buffer_head in the inode location struct. 4049 * 4050 * The caller must have write access to iloc->bh. 4051 */ 4052 static int ext4_do_update_inode(handle_t *handle, 4053 struct inode *inode, 4054 struct ext4_iloc *iloc) 4055 { 4056 struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 4057 struct ext4_inode_info *ei = EXT4_I(inode); 4058 struct buffer_head *bh = iloc->bh; 4059 int err = 0, rc, block; 4060 int need_datasync = 0; 4061 uid_t i_uid; 4062 gid_t i_gid; 4063 4064 /* For fields not not tracking in the in-memory inode, 4065 * initialise them to zero for new inodes. */ 4066 if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) 4067 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 4068 4069 ext4_get_inode_flags(ei); 4070 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 4071 i_uid = i_uid_read(inode); 4072 i_gid = i_gid_read(inode); 4073 if (!(test_opt(inode->i_sb, NO_UID32))) { 4074 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); 4075 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); 4076 /* 4077 * Fix up interoperability with old kernels. Otherwise, old inodes get 4078 * re-used with the upper 16 bits of the uid/gid intact 4079 */ 4080 if (!ei->i_dtime) { 4081 raw_inode->i_uid_high = 4082 cpu_to_le16(high_16_bits(i_uid)); 4083 raw_inode->i_gid_high = 4084 cpu_to_le16(high_16_bits(i_gid)); 4085 } else { 4086 raw_inode->i_uid_high = 0; 4087 raw_inode->i_gid_high = 0; 4088 } 4089 } else { 4090 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); 4091 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid)); 4092 raw_inode->i_uid_high = 0; 4093 raw_inode->i_gid_high = 0; 4094 } 4095 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 4096 4097 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 4098 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 4099 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 4100 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 4101 4102 if (ext4_inode_blocks_set(handle, raw_inode, ei)) 4103 goto out_brelse; 4104 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 4105 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); 4106 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 4107 cpu_to_le32(EXT4_OS_HURD)) 4108 raw_inode->i_file_acl_high = 4109 cpu_to_le16(ei->i_file_acl >> 32); 4110 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 4111 if (ei->i_disksize != ext4_isize(raw_inode)) { 4112 ext4_isize_set(raw_inode, ei->i_disksize); 4113 need_datasync = 1; 4114 } 4115 if (ei->i_disksize > 0x7fffffffULL) { 4116 struct super_block *sb = inode->i_sb; 4117 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 4118 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || 4119 EXT4_SB(sb)->s_es->s_rev_level == 4120 cpu_to_le32(EXT4_GOOD_OLD_REV)) { 4121 /* If this is the first large file 4122 * created, add a flag to the superblock. 4123 */ 4124 err = ext4_journal_get_write_access(handle, 4125 EXT4_SB(sb)->s_sbh); 4126 if (err) 4127 goto out_brelse; 4128 ext4_update_dynamic_rev(sb); 4129 EXT4_SET_RO_COMPAT_FEATURE(sb, 4130 EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 4131 ext4_handle_sync(handle); 4132 err = ext4_handle_dirty_super(handle, sb); 4133 } 4134 } 4135 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 4136 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 4137 if (old_valid_dev(inode->i_rdev)) { 4138 raw_inode->i_block[0] = 4139 cpu_to_le32(old_encode_dev(inode->i_rdev)); 4140 raw_inode->i_block[1] = 0; 4141 } else { 4142 raw_inode->i_block[0] = 0; 4143 raw_inode->i_block[1] = 4144 cpu_to_le32(new_encode_dev(inode->i_rdev)); 4145 raw_inode->i_block[2] = 0; 4146 } 4147 } else if (!ext4_has_inline_data(inode)) { 4148 for (block = 0; block < EXT4_N_BLOCKS; block++) 4149 raw_inode->i_block[block] = ei->i_data[block]; 4150 } 4151 4152 raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 4153 if (ei->i_extra_isize) { 4154 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 4155 raw_inode->i_version_hi = 4156 cpu_to_le32(inode->i_version >> 32); 4157 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 4158 } 4159 4160 ext4_inode_csum_set(inode, raw_inode, ei); 4161 4162 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 4163 rc = ext4_handle_dirty_metadata(handle, NULL, bh); 4164 if (!err) 4165 err = rc; 4166 ext4_clear_inode_state(inode, EXT4_STATE_NEW); 4167 4168 ext4_update_inode_fsync_trans(handle, inode, need_datasync); 4169 out_brelse: 4170 brelse(bh); 4171 ext4_std_error(inode->i_sb, err); 4172 return err; 4173 } 4174 4175 /* 4176 * ext4_write_inode() 4177 * 4178 * We are called from a few places: 4179 * 4180 * - Within generic_file_write() for O_SYNC files. 4181 * Here, there will be no transaction running. We wait for any running 4182 * transaction to commit. 4183 * 4184 * - Within sys_sync(), kupdate and such. 4185 * We wait on commit, if tol to. 4186 * 4187 * - Within prune_icache() (PF_MEMALLOC == true) 4188 * Here we simply return. We can't afford to block kswapd on the 4189 * journal commit. 4190 * 4191 * In all cases it is actually safe for us to return without doing anything, 4192 * because the inode has been copied into a raw inode buffer in 4193 * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 4194 * knfsd. 4195 * 4196 * Note that we are absolutely dependent upon all inode dirtiers doing the 4197 * right thing: they *must* call mark_inode_dirty() after dirtying info in 4198 * which we are interested. 4199 * 4200 * It would be a bug for them to not do this. The code: 4201 * 4202 * mark_inode_dirty(inode) 4203 * stuff(); 4204 * inode->i_size = expr; 4205 * 4206 * is in error because a kswapd-driven write_inode() could occur while 4207 * `stuff()' is running, and the new i_size will be lost. Plus the inode 4208 * will no longer be on the superblock's dirty inode list. 4209 */ 4210 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) 4211 { 4212 int err; 4213 4214 if (current->flags & PF_MEMALLOC) 4215 return 0; 4216 4217 if (EXT4_SB(inode->i_sb)->s_journal) { 4218 if (ext4_journal_current_handle()) { 4219 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 4220 dump_stack(); 4221 return -EIO; 4222 } 4223 4224 if (wbc->sync_mode != WB_SYNC_ALL) 4225 return 0; 4226 4227 err = ext4_force_commit(inode->i_sb); 4228 } else { 4229 struct ext4_iloc iloc; 4230 4231 err = __ext4_get_inode_loc(inode, &iloc, 0); 4232 if (err) 4233 return err; 4234 if (wbc->sync_mode == WB_SYNC_ALL) 4235 sync_dirty_buffer(iloc.bh); 4236 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 4237 EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, 4238 "IO error syncing inode"); 4239 err = -EIO; 4240 } 4241 brelse(iloc.bh); 4242 } 4243 return err; 4244 } 4245 4246 /* 4247 * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate 4248 * buffers that are attached to a page stradding i_size and are undergoing 4249 * commit. In that case we have to wait for commit to finish and try again. 4250 */ 4251 static void ext4_wait_for_tail_page_commit(struct inode *inode) 4252 { 4253 struct page *page; 4254 unsigned offset; 4255 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 4256 tid_t commit_tid = 0; 4257 int ret; 4258 4259 offset = inode->i_size & (PAGE_CACHE_SIZE - 1); 4260 /* 4261 * All buffers in the last page remain valid? Then there's nothing to 4262 * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE == 4263 * blocksize case 4264 */ 4265 if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits)) 4266 return; 4267 while (1) { 4268 page = find_lock_page(inode->i_mapping, 4269 inode->i_size >> PAGE_CACHE_SHIFT); 4270 if (!page) 4271 return; 4272 ret = __ext4_journalled_invalidatepage(page, offset); 4273 unlock_page(page); 4274 page_cache_release(page); 4275 if (ret != -EBUSY) 4276 return; 4277 commit_tid = 0; 4278 read_lock(&journal->j_state_lock); 4279 if (journal->j_committing_transaction) 4280 commit_tid = journal->j_committing_transaction->t_tid; 4281 read_unlock(&journal->j_state_lock); 4282 if (commit_tid) 4283 jbd2_log_wait_commit(journal, commit_tid); 4284 } 4285 } 4286 4287 /* 4288 * ext4_setattr() 4289 * 4290 * Called from notify_change. 4291 * 4292 * We want to trap VFS attempts to truncate the file as soon as 4293 * possible. In particular, we want to make sure that when the VFS 4294 * shrinks i_size, we put the inode on the orphan list and modify 4295 * i_disksize immediately, so that during the subsequent flushing of 4296 * dirty pages and freeing of disk blocks, we can guarantee that any 4297 * commit will leave the blocks being flushed in an unused state on 4298 * disk. (On recovery, the inode will get truncated and the blocks will 4299 * be freed, so we have a strong guarantee that no future commit will 4300 * leave these blocks visible to the user.) 4301 * 4302 * Another thing we have to assure is that if we are in ordered mode 4303 * and inode is still attached to the committing transaction, we must 4304 * we start writeout of all the dirty pages which are being truncated. 4305 * This way we are sure that all the data written in the previous 4306 * transaction are already on disk (truncate waits for pages under 4307 * writeback). 4308 * 4309 * Called with inode->i_mutex down. 4310 */ 4311 int ext4_setattr(struct dentry *dentry, struct iattr *attr) 4312 { 4313 struct inode *inode = dentry->d_inode; 4314 int error, rc = 0; 4315 int orphan = 0; 4316 const unsigned int ia_valid = attr->ia_valid; 4317 4318 error = inode_change_ok(inode, attr); 4319 if (error) 4320 return error; 4321 4322 if (is_quota_modification(inode, attr)) 4323 dquot_initialize(inode); 4324 if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || 4325 (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { 4326 handle_t *handle; 4327 4328 /* (user+group)*(old+new) structure, inode write (sb, 4329 * inode block, ? - but truncate inode update has it) */ 4330 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 4331 (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) + 4332 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3); 4333 if (IS_ERR(handle)) { 4334 error = PTR_ERR(handle); 4335 goto err_out; 4336 } 4337 error = dquot_transfer(inode, attr); 4338 if (error) { 4339 ext4_journal_stop(handle); 4340 return error; 4341 } 4342 /* Update corresponding info in inode so that everything is in 4343 * one transaction */ 4344 if (attr->ia_valid & ATTR_UID) 4345 inode->i_uid = attr->ia_uid; 4346 if (attr->ia_valid & ATTR_GID) 4347 inode->i_gid = attr->ia_gid; 4348 error = ext4_mark_inode_dirty(handle, inode); 4349 ext4_journal_stop(handle); 4350 } 4351 4352 if (attr->ia_valid & ATTR_SIZE) { 4353 4354 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4355 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4356 4357 if (attr->ia_size > sbi->s_bitmap_maxbytes) 4358 return -EFBIG; 4359 } 4360 } 4361 4362 if (S_ISREG(inode->i_mode) && 4363 attr->ia_valid & ATTR_SIZE && 4364 (attr->ia_size < inode->i_size)) { 4365 handle_t *handle; 4366 4367 handle = ext4_journal_start(inode, EXT4_HT_INODE, 3); 4368 if (IS_ERR(handle)) { 4369 error = PTR_ERR(handle); 4370 goto err_out; 4371 } 4372 if (ext4_handle_valid(handle)) { 4373 error = ext4_orphan_add(handle, inode); 4374 orphan = 1; 4375 } 4376 EXT4_I(inode)->i_disksize = attr->ia_size; 4377 rc = ext4_mark_inode_dirty(handle, inode); 4378 if (!error) 4379 error = rc; 4380 ext4_journal_stop(handle); 4381 4382 if (ext4_should_order_data(inode)) { 4383 error = ext4_begin_ordered_truncate(inode, 4384 attr->ia_size); 4385 if (error) { 4386 /* Do as much error cleanup as possible */ 4387 handle = ext4_journal_start(inode, 4388 EXT4_HT_INODE, 3); 4389 if (IS_ERR(handle)) { 4390 ext4_orphan_del(NULL, inode); 4391 goto err_out; 4392 } 4393 ext4_orphan_del(handle, inode); 4394 orphan = 0; 4395 ext4_journal_stop(handle); 4396 goto err_out; 4397 } 4398 } 4399 } 4400 4401 if (attr->ia_valid & ATTR_SIZE) { 4402 if (attr->ia_size != inode->i_size) { 4403 loff_t oldsize = inode->i_size; 4404 4405 i_size_write(inode, attr->ia_size); 4406 /* 4407 * Blocks are going to be removed from the inode. Wait 4408 * for dio in flight. Temporarily disable 4409 * dioread_nolock to prevent livelock. 4410 */ 4411 if (orphan) { 4412 if (!ext4_should_journal_data(inode)) { 4413 ext4_inode_block_unlocked_dio(inode); 4414 inode_dio_wait(inode); 4415 ext4_inode_resume_unlocked_dio(inode); 4416 } else 4417 ext4_wait_for_tail_page_commit(inode); 4418 } 4419 /* 4420 * Truncate pagecache after we've waited for commit 4421 * in data=journal mode to make pages freeable. 4422 */ 4423 truncate_pagecache(inode, oldsize, inode->i_size); 4424 } 4425 ext4_truncate(inode); 4426 } 4427 4428 if (!rc) { 4429 setattr_copy(inode, attr); 4430 mark_inode_dirty(inode); 4431 } 4432 4433 /* 4434 * If the call to ext4_truncate failed to get a transaction handle at 4435 * all, we need to clean up the in-core orphan list manually. 4436 */ 4437 if (orphan && inode->i_nlink) 4438 ext4_orphan_del(NULL, inode); 4439 4440 if (!rc && (ia_valid & ATTR_MODE)) 4441 rc = ext4_acl_chmod(inode); 4442 4443 err_out: 4444 ext4_std_error(inode->i_sb, error); 4445 if (!error) 4446 error = rc; 4447 return error; 4448 } 4449 4450 int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 4451 struct kstat *stat) 4452 { 4453 struct inode *inode; 4454 unsigned long delalloc_blocks; 4455 4456 inode = dentry->d_inode; 4457 generic_fillattr(inode, stat); 4458 4459 /* 4460 * We can't update i_blocks if the block allocation is delayed 4461 * otherwise in the case of system crash before the real block 4462 * allocation is done, we will have i_blocks inconsistent with 4463 * on-disk file blocks. 4464 * We always keep i_blocks updated together with real 4465 * allocation. But to not confuse with user, stat 4466 * will return the blocks that include the delayed allocation 4467 * blocks for this file. 4468 */ 4469 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), 4470 EXT4_I(inode)->i_reserved_data_blocks); 4471 4472 stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; 4473 return 0; 4474 } 4475 4476 static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4477 { 4478 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 4479 return ext4_ind_trans_blocks(inode, nrblocks, chunk); 4480 return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); 4481 } 4482 4483 /* 4484 * Account for index blocks, block groups bitmaps and block group 4485 * descriptor blocks if modify datablocks and index blocks 4486 * worse case, the indexs blocks spread over different block groups 4487 * 4488 * If datablocks are discontiguous, they are possible to spread over 4489 * different block groups too. If they are contiguous, with flexbg, 4490 * they could still across block group boundary. 4491 * 4492 * Also account for superblock, inode, quota and xattr blocks 4493 */ 4494 static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4495 { 4496 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 4497 int gdpblocks; 4498 int idxblocks; 4499 int ret = 0; 4500 4501 /* 4502 * How many index blocks need to touch to modify nrblocks? 4503 * The "Chunk" flag indicating whether the nrblocks is 4504 * physically contiguous on disk 4505 * 4506 * For Direct IO and fallocate, they calls get_block to allocate 4507 * one single extent at a time, so they could set the "Chunk" flag 4508 */ 4509 idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); 4510 4511 ret = idxblocks; 4512 4513 /* 4514 * Now let's see how many group bitmaps and group descriptors need 4515 * to account 4516 */ 4517 groups = idxblocks; 4518 if (chunk) 4519 groups += 1; 4520 else 4521 groups += nrblocks; 4522 4523 gdpblocks = groups; 4524 if (groups > ngroups) 4525 groups = ngroups; 4526 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 4527 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 4528 4529 /* bitmaps and block group descriptor blocks */ 4530 ret += groups + gdpblocks; 4531 4532 /* Blocks for super block, inode, quota and xattr blocks */ 4533 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 4534 4535 return ret; 4536 } 4537 4538 /* 4539 * Calculate the total number of credits to reserve to fit 4540 * the modification of a single pages into a single transaction, 4541 * which may include multiple chunks of block allocations. 4542 * 4543 * This could be called via ext4_write_begin() 4544 * 4545 * We need to consider the worse case, when 4546 * one new block per extent. 4547 */ 4548 int ext4_writepage_trans_blocks(struct inode *inode) 4549 { 4550 int bpp = ext4_journal_blocks_per_page(inode); 4551 int ret; 4552 4553 ret = ext4_meta_trans_blocks(inode, bpp, 0); 4554 4555 /* Account for data blocks for journalled mode */ 4556 if (ext4_should_journal_data(inode)) 4557 ret += bpp; 4558 return ret; 4559 } 4560 4561 /* 4562 * Calculate the journal credits for a chunk of data modification. 4563 * 4564 * This is called from DIO, fallocate or whoever calling 4565 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. 4566 * 4567 * journal buffers for data blocks are not included here, as DIO 4568 * and fallocate do no need to journal data buffers. 4569 */ 4570 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 4571 { 4572 return ext4_meta_trans_blocks(inode, nrblocks, 1); 4573 } 4574 4575 /* 4576 * The caller must have previously called ext4_reserve_inode_write(). 4577 * Give this, we know that the caller already has write access to iloc->bh. 4578 */ 4579 int ext4_mark_iloc_dirty(handle_t *handle, 4580 struct inode *inode, struct ext4_iloc *iloc) 4581 { 4582 int err = 0; 4583 4584 if (IS_I_VERSION(inode)) 4585 inode_inc_iversion(inode); 4586 4587 /* the do_update_inode consumes one bh->b_count */ 4588 get_bh(iloc->bh); 4589 4590 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 4591 err = ext4_do_update_inode(handle, inode, iloc); 4592 put_bh(iloc->bh); 4593 return err; 4594 } 4595 4596 /* 4597 * On success, We end up with an outstanding reference count against 4598 * iloc->bh. This _must_ be cleaned up later. 4599 */ 4600 4601 int 4602 ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 4603 struct ext4_iloc *iloc) 4604 { 4605 int err; 4606 4607 err = ext4_get_inode_loc(inode, iloc); 4608 if (!err) { 4609 BUFFER_TRACE(iloc->bh, "get_write_access"); 4610 err = ext4_journal_get_write_access(handle, iloc->bh); 4611 if (err) { 4612 brelse(iloc->bh); 4613 iloc->bh = NULL; 4614 } 4615 } 4616 ext4_std_error(inode->i_sb, err); 4617 return err; 4618 } 4619 4620 /* 4621 * Expand an inode by new_extra_isize bytes. 4622 * Returns 0 on success or negative error number on failure. 4623 */ 4624 static int ext4_expand_extra_isize(struct inode *inode, 4625 unsigned int new_extra_isize, 4626 struct ext4_iloc iloc, 4627 handle_t *handle) 4628 { 4629 struct ext4_inode *raw_inode; 4630 struct ext4_xattr_ibody_header *header; 4631 4632 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 4633 return 0; 4634 4635 raw_inode = ext4_raw_inode(&iloc); 4636 4637 header = IHDR(inode, raw_inode); 4638 4639 /* No extended attributes present */ 4640 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 4641 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 4642 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 4643 new_extra_isize); 4644 EXT4_I(inode)->i_extra_isize = new_extra_isize; 4645 return 0; 4646 } 4647 4648 /* try to expand with EAs present */ 4649 return ext4_expand_extra_isize_ea(inode, new_extra_isize, 4650 raw_inode, handle); 4651 } 4652 4653 /* 4654 * What we do here is to mark the in-core inode as clean with respect to inode 4655 * dirtiness (it may still be data-dirty). 4656 * This means that the in-core inode may be reaped by prune_icache 4657 * without having to perform any I/O. This is a very good thing, 4658 * because *any* task may call prune_icache - even ones which 4659 * have a transaction open against a different journal. 4660 * 4661 * Is this cheating? Not really. Sure, we haven't written the 4662 * inode out, but prune_icache isn't a user-visible syncing function. 4663 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 4664 * we start and wait on commits. 4665 */ 4666 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 4667 { 4668 struct ext4_iloc iloc; 4669 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4670 static unsigned int mnt_count; 4671 int err, ret; 4672 4673 might_sleep(); 4674 trace_ext4_mark_inode_dirty(inode, _RET_IP_); 4675 err = ext4_reserve_inode_write(handle, inode, &iloc); 4676 if (ext4_handle_valid(handle) && 4677 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 4678 !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { 4679 /* 4680 * We need extra buffer credits since we may write into EA block 4681 * with this same handle. If journal_extend fails, then it will 4682 * only result in a minor loss of functionality for that inode. 4683 * If this is felt to be critical, then e2fsck should be run to 4684 * force a large enough s_min_extra_isize. 4685 */ 4686 if ((jbd2_journal_extend(handle, 4687 EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { 4688 ret = ext4_expand_extra_isize(inode, 4689 sbi->s_want_extra_isize, 4690 iloc, handle); 4691 if (ret) { 4692 ext4_set_inode_state(inode, 4693 EXT4_STATE_NO_EXPAND); 4694 if (mnt_count != 4695 le16_to_cpu(sbi->s_es->s_mnt_count)) { 4696 ext4_warning(inode->i_sb, 4697 "Unable to expand inode %lu. Delete" 4698 " some EAs or run e2fsck.", 4699 inode->i_ino); 4700 mnt_count = 4701 le16_to_cpu(sbi->s_es->s_mnt_count); 4702 } 4703 } 4704 } 4705 } 4706 if (!err) 4707 err = ext4_mark_iloc_dirty(handle, inode, &iloc); 4708 return err; 4709 } 4710 4711 /* 4712 * ext4_dirty_inode() is called from __mark_inode_dirty() 4713 * 4714 * We're really interested in the case where a file is being extended. 4715 * i_size has been changed by generic_commit_write() and we thus need 4716 * to include the updated inode in the current transaction. 4717 * 4718 * Also, dquot_alloc_block() will always dirty the inode when blocks 4719 * are allocated to the file. 4720 * 4721 * If the inode is marked synchronous, we don't honour that here - doing 4722 * so would cause a commit on atime updates, which we don't bother doing. 4723 * We handle synchronous inodes at the highest possible level. 4724 */ 4725 void ext4_dirty_inode(struct inode *inode, int flags) 4726 { 4727 handle_t *handle; 4728 4729 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 4730 if (IS_ERR(handle)) 4731 goto out; 4732 4733 ext4_mark_inode_dirty(handle, inode); 4734 4735 ext4_journal_stop(handle); 4736 out: 4737 return; 4738 } 4739 4740 #if 0 4741 /* 4742 * Bind an inode's backing buffer_head into this transaction, to prevent 4743 * it from being flushed to disk early. Unlike 4744 * ext4_reserve_inode_write, this leaves behind no bh reference and 4745 * returns no iloc structure, so the caller needs to repeat the iloc 4746 * lookup to mark the inode dirty later. 4747 */ 4748 static int ext4_pin_inode(handle_t *handle, struct inode *inode) 4749 { 4750 struct ext4_iloc iloc; 4751 4752 int err = 0; 4753 if (handle) { 4754 err = ext4_get_inode_loc(inode, &iloc); 4755 if (!err) { 4756 BUFFER_TRACE(iloc.bh, "get_write_access"); 4757 err = jbd2_journal_get_write_access(handle, iloc.bh); 4758 if (!err) 4759 err = ext4_handle_dirty_metadata(handle, 4760 NULL, 4761 iloc.bh); 4762 brelse(iloc.bh); 4763 } 4764 } 4765 ext4_std_error(inode->i_sb, err); 4766 return err; 4767 } 4768 #endif 4769 4770 int ext4_change_inode_journal_flag(struct inode *inode, int val) 4771 { 4772 journal_t *journal; 4773 handle_t *handle; 4774 int err; 4775 4776 /* 4777 * We have to be very careful here: changing a data block's 4778 * journaling status dynamically is dangerous. If we write a 4779 * data block to the journal, change the status and then delete 4780 * that block, we risk forgetting to revoke the old log record 4781 * from the journal and so a subsequent replay can corrupt data. 4782 * So, first we make sure that the journal is empty and that 4783 * nobody is changing anything. 4784 */ 4785 4786 journal = EXT4_JOURNAL(inode); 4787 if (!journal) 4788 return 0; 4789 if (is_journal_aborted(journal)) 4790 return -EROFS; 4791 /* We have to allocate physical blocks for delalloc blocks 4792 * before flushing journal. otherwise delalloc blocks can not 4793 * be allocated any more. even more truncate on delalloc blocks 4794 * could trigger BUG by flushing delalloc blocks in journal. 4795 * There is no delalloc block in non-journal data mode. 4796 */ 4797 if (val && test_opt(inode->i_sb, DELALLOC)) { 4798 err = ext4_alloc_da_blocks(inode); 4799 if (err < 0) 4800 return err; 4801 } 4802 4803 /* Wait for all existing dio workers */ 4804 ext4_inode_block_unlocked_dio(inode); 4805 inode_dio_wait(inode); 4806 4807 jbd2_journal_lock_updates(journal); 4808 4809 /* 4810 * OK, there are no updates running now, and all cached data is 4811 * synced to disk. We are now in a completely consistent state 4812 * which doesn't have anything in the journal, and we know that 4813 * no filesystem updates are running, so it is safe to modify 4814 * the inode's in-core data-journaling state flag now. 4815 */ 4816 4817 if (val) 4818 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 4819 else { 4820 jbd2_journal_flush(journal); 4821 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 4822 } 4823 ext4_set_aops(inode); 4824 4825 jbd2_journal_unlock_updates(journal); 4826 ext4_inode_resume_unlocked_dio(inode); 4827 4828 /* Finally we can mark the inode as dirty. */ 4829 4830 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); 4831 if (IS_ERR(handle)) 4832 return PTR_ERR(handle); 4833 4834 err = ext4_mark_inode_dirty(handle, inode); 4835 ext4_handle_sync(handle); 4836 ext4_journal_stop(handle); 4837 ext4_std_error(inode->i_sb, err); 4838 4839 return err; 4840 } 4841 4842 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 4843 { 4844 return !buffer_mapped(bh); 4845 } 4846 4847 int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 4848 { 4849 struct page *page = vmf->page; 4850 loff_t size; 4851 unsigned long len; 4852 int ret; 4853 struct file *file = vma->vm_file; 4854 struct inode *inode = file_inode(file); 4855 struct address_space *mapping = inode->i_mapping; 4856 handle_t *handle; 4857 get_block_t *get_block; 4858 int retries = 0; 4859 4860 sb_start_pagefault(inode->i_sb); 4861 file_update_time(vma->vm_file); 4862 /* Delalloc case is easy... */ 4863 if (test_opt(inode->i_sb, DELALLOC) && 4864 !ext4_should_journal_data(inode) && 4865 !ext4_nonda_switch(inode->i_sb)) { 4866 do { 4867 ret = __block_page_mkwrite(vma, vmf, 4868 ext4_da_get_block_prep); 4869 } while (ret == -ENOSPC && 4870 ext4_should_retry_alloc(inode->i_sb, &retries)); 4871 goto out_ret; 4872 } 4873 4874 lock_page(page); 4875 size = i_size_read(inode); 4876 /* Page got truncated from under us? */ 4877 if (page->mapping != mapping || page_offset(page) > size) { 4878 unlock_page(page); 4879 ret = VM_FAULT_NOPAGE; 4880 goto out; 4881 } 4882 4883 if (page->index == size >> PAGE_CACHE_SHIFT) 4884 len = size & ~PAGE_CACHE_MASK; 4885 else 4886 len = PAGE_CACHE_SIZE; 4887 /* 4888 * Return if we have all the buffers mapped. This avoids the need to do 4889 * journal_start/journal_stop which can block and take a long time 4890 */ 4891 if (page_has_buffers(page)) { 4892 if (!ext4_walk_page_buffers(NULL, page_buffers(page), 4893 0, len, NULL, 4894 ext4_bh_unmapped)) { 4895 /* Wait so that we don't change page under IO */ 4896 wait_for_stable_page(page); 4897 ret = VM_FAULT_LOCKED; 4898 goto out; 4899 } 4900 } 4901 unlock_page(page); 4902 /* OK, we need to fill the hole... */ 4903 if (ext4_should_dioread_nolock(inode)) 4904 get_block = ext4_get_block_write; 4905 else 4906 get_block = ext4_get_block; 4907 retry_alloc: 4908 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 4909 ext4_writepage_trans_blocks(inode)); 4910 if (IS_ERR(handle)) { 4911 ret = VM_FAULT_SIGBUS; 4912 goto out; 4913 } 4914 ret = __block_page_mkwrite(vma, vmf, get_block); 4915 if (!ret && ext4_should_journal_data(inode)) { 4916 if (ext4_walk_page_buffers(handle, page_buffers(page), 0, 4917 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) { 4918 unlock_page(page); 4919 ret = VM_FAULT_SIGBUS; 4920 ext4_journal_stop(handle); 4921 goto out; 4922 } 4923 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 4924 } 4925 ext4_journal_stop(handle); 4926 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 4927 goto retry_alloc; 4928 out_ret: 4929 ret = block_page_mkwrite_return(ret); 4930 out: 4931 sb_end_pagefault(inode->i_sb); 4932 return ret; 4933 } 4934