1 /* 2 * linux/fs/ext4/inode.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/inode.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * 64-bit file support on 64-bit platforms by Jakub Jelinek 16 * (jj@sunsite.ms.mff.cuni.cz) 17 * 18 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 19 */ 20 21 #include <linux/fs.h> 22 #include <linux/time.h> 23 #include <linux/highuid.h> 24 #include <linux/pagemap.h> 25 #include <linux/dax.h> 26 #include <linux/quotaops.h> 27 #include <linux/string.h> 28 #include <linux/buffer_head.h> 29 #include <linux/writeback.h> 30 #include <linux/pagevec.h> 31 #include <linux/mpage.h> 32 #include <linux/namei.h> 33 #include <linux/uio.h> 34 #include <linux/bio.h> 35 #include <linux/workqueue.h> 36 #include <linux/kernel.h> 37 #include <linux/printk.h> 38 #include <linux/slab.h> 39 #include <linux/bitops.h> 40 41 #include "ext4_jbd2.h" 42 #include "xattr.h" 43 #include "acl.h" 44 #include "truncate.h" 45 46 #include <trace/events/ext4.h> 47 48 #define MPAGE_DA_EXTENT_TAIL 0x01 49 50 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, 51 struct ext4_inode_info *ei) 52 { 53 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 54 __u16 csum_lo; 55 __u16 csum_hi = 0; 56 __u32 csum; 57 58 csum_lo = le16_to_cpu(raw->i_checksum_lo); 59 raw->i_checksum_lo = 0; 60 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 61 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { 62 csum_hi = le16_to_cpu(raw->i_checksum_hi); 63 raw->i_checksum_hi = 0; 64 } 65 66 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, 67 EXT4_INODE_SIZE(inode->i_sb)); 68 69 raw->i_checksum_lo = cpu_to_le16(csum_lo); 70 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 71 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 72 raw->i_checksum_hi = cpu_to_le16(csum_hi); 73 74 return csum; 75 } 76 77 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, 78 struct ext4_inode_info *ei) 79 { 80 __u32 provided, calculated; 81 82 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 83 cpu_to_le32(EXT4_OS_LINUX) || 84 !ext4_has_metadata_csum(inode->i_sb)) 85 return 1; 86 87 provided = le16_to_cpu(raw->i_checksum_lo); 88 calculated = ext4_inode_csum(inode, raw, ei); 89 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 90 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 91 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16; 92 else 93 calculated &= 0xFFFF; 94 95 return provided == calculated; 96 } 97 98 static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, 99 struct ext4_inode_info *ei) 100 { 101 __u32 csum; 102 103 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 104 cpu_to_le32(EXT4_OS_LINUX) || 105 !ext4_has_metadata_csum(inode->i_sb)) 106 return; 107 108 csum = ext4_inode_csum(inode, raw, ei); 109 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF); 110 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 111 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 112 raw->i_checksum_hi = cpu_to_le16(csum >> 16); 113 } 114 115 static inline int ext4_begin_ordered_truncate(struct inode *inode, 116 loff_t new_size) 117 { 118 trace_ext4_begin_ordered_truncate(inode, new_size); 119 /* 120 * If jinode is zero, then we never opened the file for 121 * writing, so there's no need to call 122 * jbd2_journal_begin_ordered_truncate() since there's no 123 * outstanding writes we need to flush. 124 */ 125 if (!EXT4_I(inode)->jinode) 126 return 0; 127 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), 128 EXT4_I(inode)->jinode, 129 new_size); 130 } 131 132 static void ext4_invalidatepage(struct page *page, unsigned int offset, 133 unsigned int length); 134 static int __ext4_journalled_writepage(struct page *page, unsigned int len); 135 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); 136 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, 137 int pextents); 138 139 /* 140 * Test whether an inode is a fast symlink. 141 */ 142 int ext4_inode_is_fast_symlink(struct inode *inode) 143 { 144 int ea_blocks = EXT4_I(inode)->i_file_acl ? 145 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0; 146 147 if (ext4_has_inline_data(inode)) 148 return 0; 149 150 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 151 } 152 153 /* 154 * Restart the transaction associated with *handle. This does a commit, 155 * so before we call here everything must be consistently dirtied against 156 * this transaction. 157 */ 158 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, 159 int nblocks) 160 { 161 int ret; 162 163 /* 164 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 165 * moment, get_block can be called only for blocks inside i_size since 166 * page cache has been already dropped and writes are blocked by 167 * i_mutex. So we can safely drop the i_data_sem here. 168 */ 169 BUG_ON(EXT4_JOURNAL(inode) == NULL); 170 jbd_debug(2, "restarting handle %p\n", handle); 171 up_write(&EXT4_I(inode)->i_data_sem); 172 ret = ext4_journal_restart(handle, nblocks); 173 down_write(&EXT4_I(inode)->i_data_sem); 174 ext4_discard_preallocations(inode); 175 176 return ret; 177 } 178 179 /* 180 * Called at the last iput() if i_nlink is zero. 181 */ 182 void ext4_evict_inode(struct inode *inode) 183 { 184 handle_t *handle; 185 int err; 186 187 trace_ext4_evict_inode(inode); 188 189 if (inode->i_nlink) { 190 /* 191 * When journalling data dirty buffers are tracked only in the 192 * journal. So although mm thinks everything is clean and 193 * ready for reaping the inode might still have some pages to 194 * write in the running transaction or waiting to be 195 * checkpointed. Thus calling jbd2_journal_invalidatepage() 196 * (via truncate_inode_pages()) to discard these buffers can 197 * cause data loss. Also even if we did not discard these 198 * buffers, we would have no way to find them after the inode 199 * is reaped and thus user could see stale data if he tries to 200 * read them before the transaction is checkpointed. So be 201 * careful and force everything to disk here... We use 202 * ei->i_datasync_tid to store the newest transaction 203 * containing inode's data. 204 * 205 * Note that directories do not have this problem because they 206 * don't use page cache. 207 */ 208 if (ext4_should_journal_data(inode) && 209 (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) && 210 inode->i_ino != EXT4_JOURNAL_INO) { 211 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 212 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; 213 214 jbd2_complete_transaction(journal, commit_tid); 215 filemap_write_and_wait(&inode->i_data); 216 } 217 truncate_inode_pages_final(&inode->i_data); 218 219 WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count)); 220 goto no_delete; 221 } 222 223 if (is_bad_inode(inode)) 224 goto no_delete; 225 dquot_initialize(inode); 226 227 if (ext4_should_order_data(inode)) 228 ext4_begin_ordered_truncate(inode, 0); 229 truncate_inode_pages_final(&inode->i_data); 230 231 WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count)); 232 233 /* 234 * Protect us against freezing - iput() caller didn't have to have any 235 * protection against it 236 */ 237 sb_start_intwrite(inode->i_sb); 238 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, 239 ext4_blocks_for_truncate(inode)+3); 240 if (IS_ERR(handle)) { 241 ext4_std_error(inode->i_sb, PTR_ERR(handle)); 242 /* 243 * If we're going to skip the normal cleanup, we still need to 244 * make sure that the in-core orphan linked list is properly 245 * cleaned up. 246 */ 247 ext4_orphan_del(NULL, inode); 248 sb_end_intwrite(inode->i_sb); 249 goto no_delete; 250 } 251 252 if (IS_SYNC(inode)) 253 ext4_handle_sync(handle); 254 inode->i_size = 0; 255 err = ext4_mark_inode_dirty(handle, inode); 256 if (err) { 257 ext4_warning(inode->i_sb, 258 "couldn't mark inode dirty (err %d)", err); 259 goto stop_handle; 260 } 261 if (inode->i_blocks) 262 ext4_truncate(inode); 263 264 /* 265 * ext4_ext_truncate() doesn't reserve any slop when it 266 * restarts journal transactions; therefore there may not be 267 * enough credits left in the handle to remove the inode from 268 * the orphan list and set the dtime field. 269 */ 270 if (!ext4_handle_has_enough_credits(handle, 3)) { 271 err = ext4_journal_extend(handle, 3); 272 if (err > 0) 273 err = ext4_journal_restart(handle, 3); 274 if (err != 0) { 275 ext4_warning(inode->i_sb, 276 "couldn't extend journal (err %d)", err); 277 stop_handle: 278 ext4_journal_stop(handle); 279 ext4_orphan_del(NULL, inode); 280 sb_end_intwrite(inode->i_sb); 281 goto no_delete; 282 } 283 } 284 285 /* 286 * Kill off the orphan record which ext4_truncate created. 287 * AKPM: I think this can be inside the above `if'. 288 * Note that ext4_orphan_del() has to be able to cope with the 289 * deletion of a non-existent orphan - this is because we don't 290 * know if ext4_truncate() actually created an orphan record. 291 * (Well, we could do this if we need to, but heck - it works) 292 */ 293 ext4_orphan_del(handle, inode); 294 EXT4_I(inode)->i_dtime = get_seconds(); 295 296 /* 297 * One subtle ordering requirement: if anything has gone wrong 298 * (transaction abort, IO errors, whatever), then we can still 299 * do these next steps (the fs will already have been marked as 300 * having errors), but we can't free the inode if the mark_dirty 301 * fails. 302 */ 303 if (ext4_mark_inode_dirty(handle, inode)) 304 /* If that failed, just do the required in-core inode clear. */ 305 ext4_clear_inode(inode); 306 else 307 ext4_free_inode(handle, inode); 308 ext4_journal_stop(handle); 309 sb_end_intwrite(inode->i_sb); 310 return; 311 no_delete: 312 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ 313 } 314 315 #ifdef CONFIG_QUOTA 316 qsize_t *ext4_get_reserved_space(struct inode *inode) 317 { 318 return &EXT4_I(inode)->i_reserved_quota; 319 } 320 #endif 321 322 /* 323 * Called with i_data_sem down, which is important since we can call 324 * ext4_discard_preallocations() from here. 325 */ 326 void ext4_da_update_reserve_space(struct inode *inode, 327 int used, int quota_claim) 328 { 329 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 330 struct ext4_inode_info *ei = EXT4_I(inode); 331 332 spin_lock(&ei->i_block_reservation_lock); 333 trace_ext4_da_update_reserve_space(inode, used, quota_claim); 334 if (unlikely(used > ei->i_reserved_data_blocks)) { 335 ext4_warning(inode->i_sb, "%s: ino %lu, used %d " 336 "with only %d reserved data blocks", 337 __func__, inode->i_ino, used, 338 ei->i_reserved_data_blocks); 339 WARN_ON(1); 340 used = ei->i_reserved_data_blocks; 341 } 342 343 /* Update per-inode reservations */ 344 ei->i_reserved_data_blocks -= used; 345 percpu_counter_sub(&sbi->s_dirtyclusters_counter, used); 346 347 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 348 349 /* Update quota subsystem for data blocks */ 350 if (quota_claim) 351 dquot_claim_block(inode, EXT4_C2B(sbi, used)); 352 else { 353 /* 354 * We did fallocate with an offset that is already delayed 355 * allocated. So on delayed allocated writeback we should 356 * not re-claim the quota for fallocated blocks. 357 */ 358 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); 359 } 360 361 /* 362 * If we have done all the pending block allocations and if 363 * there aren't any writers on the inode, we can discard the 364 * inode's preallocations. 365 */ 366 if ((ei->i_reserved_data_blocks == 0) && 367 (atomic_read(&inode->i_writecount) == 0)) 368 ext4_discard_preallocations(inode); 369 } 370 371 static int __check_block_validity(struct inode *inode, const char *func, 372 unsigned int line, 373 struct ext4_map_blocks *map) 374 { 375 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, 376 map->m_len)) { 377 ext4_error_inode(inode, func, line, map->m_pblk, 378 "lblock %lu mapped to illegal pblock " 379 "(length %d)", (unsigned long) map->m_lblk, 380 map->m_len); 381 return -EFSCORRUPTED; 382 } 383 return 0; 384 } 385 386 int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk, 387 ext4_lblk_t len) 388 { 389 int ret; 390 391 if (ext4_encrypted_inode(inode)) 392 return ext4_encrypted_zeroout(inode, lblk, pblk, len); 393 394 ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS); 395 if (ret > 0) 396 ret = 0; 397 398 return ret; 399 } 400 401 #define check_block_validity(inode, map) \ 402 __check_block_validity((inode), __func__, __LINE__, (map)) 403 404 #ifdef ES_AGGRESSIVE_TEST 405 static void ext4_map_blocks_es_recheck(handle_t *handle, 406 struct inode *inode, 407 struct ext4_map_blocks *es_map, 408 struct ext4_map_blocks *map, 409 int flags) 410 { 411 int retval; 412 413 map->m_flags = 0; 414 /* 415 * There is a race window that the result is not the same. 416 * e.g. xfstests #223 when dioread_nolock enables. The reason 417 * is that we lookup a block mapping in extent status tree with 418 * out taking i_data_sem. So at the time the unwritten extent 419 * could be converted. 420 */ 421 down_read(&EXT4_I(inode)->i_data_sem); 422 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 423 retval = ext4_ext_map_blocks(handle, inode, map, flags & 424 EXT4_GET_BLOCKS_KEEP_SIZE); 425 } else { 426 retval = ext4_ind_map_blocks(handle, inode, map, flags & 427 EXT4_GET_BLOCKS_KEEP_SIZE); 428 } 429 up_read((&EXT4_I(inode)->i_data_sem)); 430 431 /* 432 * We don't check m_len because extent will be collpased in status 433 * tree. So the m_len might not equal. 434 */ 435 if (es_map->m_lblk != map->m_lblk || 436 es_map->m_flags != map->m_flags || 437 es_map->m_pblk != map->m_pblk) { 438 printk("ES cache assertion failed for inode: %lu " 439 "es_cached ex [%d/%d/%llu/%x] != " 440 "found ex [%d/%d/%llu/%x] retval %d flags %x\n", 441 inode->i_ino, es_map->m_lblk, es_map->m_len, 442 es_map->m_pblk, es_map->m_flags, map->m_lblk, 443 map->m_len, map->m_pblk, map->m_flags, 444 retval, flags); 445 } 446 } 447 #endif /* ES_AGGRESSIVE_TEST */ 448 449 /* 450 * The ext4_map_blocks() function tries to look up the requested blocks, 451 * and returns if the blocks are already mapped. 452 * 453 * Otherwise it takes the write lock of the i_data_sem and allocate blocks 454 * and store the allocated blocks in the result buffer head and mark it 455 * mapped. 456 * 457 * If file type is extents based, it will call ext4_ext_map_blocks(), 458 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping 459 * based files 460 * 461 * On success, it returns the number of blocks being mapped or allocated. 462 * if create==0 and the blocks are pre-allocated and unwritten block, 463 * the result buffer head is unmapped. If the create ==1, it will make sure 464 * the buffer head is mapped. 465 * 466 * It returns 0 if plain look up failed (blocks have not been allocated), in 467 * that case, buffer head is unmapped 468 * 469 * It returns the error in case of allocation failure. 470 */ 471 int ext4_map_blocks(handle_t *handle, struct inode *inode, 472 struct ext4_map_blocks *map, int flags) 473 { 474 struct extent_status es; 475 int retval; 476 int ret = 0; 477 #ifdef ES_AGGRESSIVE_TEST 478 struct ext4_map_blocks orig_map; 479 480 memcpy(&orig_map, map, sizeof(*map)); 481 #endif 482 483 map->m_flags = 0; 484 ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," 485 "logical block %lu\n", inode->i_ino, flags, map->m_len, 486 (unsigned long) map->m_lblk); 487 488 /* 489 * ext4_map_blocks returns an int, and m_len is an unsigned int 490 */ 491 if (unlikely(map->m_len > INT_MAX)) 492 map->m_len = INT_MAX; 493 494 /* We can handle the block number less than EXT_MAX_BLOCKS */ 495 if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS)) 496 return -EFSCORRUPTED; 497 498 /* Lookup extent status tree firstly */ 499 if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) { 500 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) { 501 map->m_pblk = ext4_es_pblock(&es) + 502 map->m_lblk - es.es_lblk; 503 map->m_flags |= ext4_es_is_written(&es) ? 504 EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN; 505 retval = es.es_len - (map->m_lblk - es.es_lblk); 506 if (retval > map->m_len) 507 retval = map->m_len; 508 map->m_len = retval; 509 } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) { 510 retval = 0; 511 } else { 512 BUG_ON(1); 513 } 514 #ifdef ES_AGGRESSIVE_TEST 515 ext4_map_blocks_es_recheck(handle, inode, map, 516 &orig_map, flags); 517 #endif 518 goto found; 519 } 520 521 /* 522 * Try to see if we can get the block without requesting a new 523 * file system block. 524 */ 525 down_read(&EXT4_I(inode)->i_data_sem); 526 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 527 retval = ext4_ext_map_blocks(handle, inode, map, flags & 528 EXT4_GET_BLOCKS_KEEP_SIZE); 529 } else { 530 retval = ext4_ind_map_blocks(handle, inode, map, flags & 531 EXT4_GET_BLOCKS_KEEP_SIZE); 532 } 533 if (retval > 0) { 534 unsigned int status; 535 536 if (unlikely(retval != map->m_len)) { 537 ext4_warning(inode->i_sb, 538 "ES len assertion failed for inode " 539 "%lu: retval %d != map->m_len %d", 540 inode->i_ino, retval, map->m_len); 541 WARN_ON(1); 542 } 543 544 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 545 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 546 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 547 !(status & EXTENT_STATUS_WRITTEN) && 548 ext4_find_delalloc_range(inode, map->m_lblk, 549 map->m_lblk + map->m_len - 1)) 550 status |= EXTENT_STATUS_DELAYED; 551 ret = ext4_es_insert_extent(inode, map->m_lblk, 552 map->m_len, map->m_pblk, status); 553 if (ret < 0) 554 retval = ret; 555 } 556 up_read((&EXT4_I(inode)->i_data_sem)); 557 558 found: 559 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 560 ret = check_block_validity(inode, map); 561 if (ret != 0) 562 return ret; 563 } 564 565 /* If it is only a block(s) look up */ 566 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 567 return retval; 568 569 /* 570 * Returns if the blocks have already allocated 571 * 572 * Note that if blocks have been preallocated 573 * ext4_ext_get_block() returns the create = 0 574 * with buffer head unmapped. 575 */ 576 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) 577 /* 578 * If we need to convert extent to unwritten 579 * we continue and do the actual work in 580 * ext4_ext_map_blocks() 581 */ 582 if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) 583 return retval; 584 585 /* 586 * Here we clear m_flags because after allocating an new extent, 587 * it will be set again. 588 */ 589 map->m_flags &= ~EXT4_MAP_FLAGS; 590 591 /* 592 * New blocks allocate and/or writing to unwritten extent 593 * will possibly result in updating i_data, so we take 594 * the write lock of i_data_sem, and call get_block() 595 * with create == 1 flag. 596 */ 597 down_write(&EXT4_I(inode)->i_data_sem); 598 599 /* 600 * We need to check for EXT4 here because migrate 601 * could have changed the inode type in between 602 */ 603 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 604 retval = ext4_ext_map_blocks(handle, inode, map, flags); 605 } else { 606 retval = ext4_ind_map_blocks(handle, inode, map, flags); 607 608 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { 609 /* 610 * We allocated new blocks which will result in 611 * i_data's format changing. Force the migrate 612 * to fail by clearing migrate flags 613 */ 614 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); 615 } 616 617 /* 618 * Update reserved blocks/metadata blocks after successful 619 * block allocation which had been deferred till now. We don't 620 * support fallocate for non extent files. So we can update 621 * reserve space here. 622 */ 623 if ((retval > 0) && 624 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) 625 ext4_da_update_reserve_space(inode, retval, 1); 626 } 627 628 if (retval > 0) { 629 unsigned int status; 630 631 if (unlikely(retval != map->m_len)) { 632 ext4_warning(inode->i_sb, 633 "ES len assertion failed for inode " 634 "%lu: retval %d != map->m_len %d", 635 inode->i_ino, retval, map->m_len); 636 WARN_ON(1); 637 } 638 639 /* 640 * We have to zeroout blocks before inserting them into extent 641 * status tree. Otherwise someone could look them up there and 642 * use them before they are really zeroed. 643 */ 644 if (flags & EXT4_GET_BLOCKS_ZERO && 645 map->m_flags & EXT4_MAP_MAPPED && 646 map->m_flags & EXT4_MAP_NEW) { 647 ret = ext4_issue_zeroout(inode, map->m_lblk, 648 map->m_pblk, map->m_len); 649 if (ret) { 650 retval = ret; 651 goto out_sem; 652 } 653 } 654 655 /* 656 * If the extent has been zeroed out, we don't need to update 657 * extent status tree. 658 */ 659 if ((flags & EXT4_GET_BLOCKS_PRE_IO) && 660 ext4_es_lookup_extent(inode, map->m_lblk, &es)) { 661 if (ext4_es_is_written(&es)) 662 goto out_sem; 663 } 664 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 665 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 666 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 667 !(status & EXTENT_STATUS_WRITTEN) && 668 ext4_find_delalloc_range(inode, map->m_lblk, 669 map->m_lblk + map->m_len - 1)) 670 status |= EXTENT_STATUS_DELAYED; 671 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 672 map->m_pblk, status); 673 if (ret < 0) { 674 retval = ret; 675 goto out_sem; 676 } 677 } 678 679 out_sem: 680 up_write((&EXT4_I(inode)->i_data_sem)); 681 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 682 ret = check_block_validity(inode, map); 683 if (ret != 0) 684 return ret; 685 } 686 return retval; 687 } 688 689 /* Maximum number of blocks we map for direct IO at once. */ 690 #define DIO_MAX_BLOCKS 4096 691 692 static int _ext4_get_block(struct inode *inode, sector_t iblock, 693 struct buffer_head *bh, int flags) 694 { 695 handle_t *handle = ext4_journal_current_handle(); 696 struct ext4_map_blocks map; 697 int ret = 0, started = 0; 698 int dio_credits; 699 700 if (ext4_has_inline_data(inode)) 701 return -ERANGE; 702 703 map.m_lblk = iblock; 704 map.m_len = bh->b_size >> inode->i_blkbits; 705 706 if (flags && !handle) { 707 /* Direct IO write... */ 708 if (map.m_len > DIO_MAX_BLOCKS) 709 map.m_len = DIO_MAX_BLOCKS; 710 dio_credits = ext4_chunk_trans_blocks(inode, map.m_len); 711 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 712 dio_credits); 713 if (IS_ERR(handle)) { 714 ret = PTR_ERR(handle); 715 return ret; 716 } 717 started = 1; 718 } 719 720 ret = ext4_map_blocks(handle, inode, &map, flags); 721 if (ret > 0) { 722 ext4_io_end_t *io_end = ext4_inode_aio(inode); 723 724 map_bh(bh, inode->i_sb, map.m_pblk); 725 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 726 if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN) 727 set_buffer_defer_completion(bh); 728 bh->b_size = inode->i_sb->s_blocksize * map.m_len; 729 ret = 0; 730 } 731 if (started) 732 ext4_journal_stop(handle); 733 return ret; 734 } 735 736 int ext4_get_block(struct inode *inode, sector_t iblock, 737 struct buffer_head *bh, int create) 738 { 739 return _ext4_get_block(inode, iblock, bh, 740 create ? EXT4_GET_BLOCKS_CREATE : 0); 741 } 742 743 /* 744 * `handle' can be NULL if create is zero 745 */ 746 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 747 ext4_lblk_t block, int map_flags) 748 { 749 struct ext4_map_blocks map; 750 struct buffer_head *bh; 751 int create = map_flags & EXT4_GET_BLOCKS_CREATE; 752 int err; 753 754 J_ASSERT(handle != NULL || create == 0); 755 756 map.m_lblk = block; 757 map.m_len = 1; 758 err = ext4_map_blocks(handle, inode, &map, map_flags); 759 760 if (err == 0) 761 return create ? ERR_PTR(-ENOSPC) : NULL; 762 if (err < 0) 763 return ERR_PTR(err); 764 765 bh = sb_getblk(inode->i_sb, map.m_pblk); 766 if (unlikely(!bh)) 767 return ERR_PTR(-ENOMEM); 768 if (map.m_flags & EXT4_MAP_NEW) { 769 J_ASSERT(create != 0); 770 J_ASSERT(handle != NULL); 771 772 /* 773 * Now that we do not always journal data, we should 774 * keep in mind whether this should always journal the 775 * new buffer as metadata. For now, regular file 776 * writes use ext4_get_block instead, so it's not a 777 * problem. 778 */ 779 lock_buffer(bh); 780 BUFFER_TRACE(bh, "call get_create_access"); 781 err = ext4_journal_get_create_access(handle, bh); 782 if (unlikely(err)) { 783 unlock_buffer(bh); 784 goto errout; 785 } 786 if (!buffer_uptodate(bh)) { 787 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 788 set_buffer_uptodate(bh); 789 } 790 unlock_buffer(bh); 791 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 792 err = ext4_handle_dirty_metadata(handle, inode, bh); 793 if (unlikely(err)) 794 goto errout; 795 } else 796 BUFFER_TRACE(bh, "not a new buffer"); 797 return bh; 798 errout: 799 brelse(bh); 800 return ERR_PTR(err); 801 } 802 803 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 804 ext4_lblk_t block, int map_flags) 805 { 806 struct buffer_head *bh; 807 808 bh = ext4_getblk(handle, inode, block, map_flags); 809 if (IS_ERR(bh)) 810 return bh; 811 if (!bh || buffer_uptodate(bh)) 812 return bh; 813 ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); 814 wait_on_buffer(bh); 815 if (buffer_uptodate(bh)) 816 return bh; 817 put_bh(bh); 818 return ERR_PTR(-EIO); 819 } 820 821 int ext4_walk_page_buffers(handle_t *handle, 822 struct buffer_head *head, 823 unsigned from, 824 unsigned to, 825 int *partial, 826 int (*fn)(handle_t *handle, 827 struct buffer_head *bh)) 828 { 829 struct buffer_head *bh; 830 unsigned block_start, block_end; 831 unsigned blocksize = head->b_size; 832 int err, ret = 0; 833 struct buffer_head *next; 834 835 for (bh = head, block_start = 0; 836 ret == 0 && (bh != head || !block_start); 837 block_start = block_end, bh = next) { 838 next = bh->b_this_page; 839 block_end = block_start + blocksize; 840 if (block_end <= from || block_start >= to) { 841 if (partial && !buffer_uptodate(bh)) 842 *partial = 1; 843 continue; 844 } 845 err = (*fn)(handle, bh); 846 if (!ret) 847 ret = err; 848 } 849 return ret; 850 } 851 852 /* 853 * To preserve ordering, it is essential that the hole instantiation and 854 * the data write be encapsulated in a single transaction. We cannot 855 * close off a transaction and start a new one between the ext4_get_block() 856 * and the commit_write(). So doing the jbd2_journal_start at the start of 857 * prepare_write() is the right place. 858 * 859 * Also, this function can nest inside ext4_writepage(). In that case, we 860 * *know* that ext4_writepage() has generated enough buffer credits to do the 861 * whole page. So we won't block on the journal in that case, which is good, 862 * because the caller may be PF_MEMALLOC. 863 * 864 * By accident, ext4 can be reentered when a transaction is open via 865 * quota file writes. If we were to commit the transaction while thus 866 * reentered, there can be a deadlock - we would be holding a quota 867 * lock, and the commit would never complete if another thread had a 868 * transaction open and was blocking on the quota lock - a ranking 869 * violation. 870 * 871 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 872 * will _not_ run commit under these circumstances because handle->h_ref 873 * is elevated. We'll still have enough credits for the tiny quotafile 874 * write. 875 */ 876 int do_journal_get_write_access(handle_t *handle, 877 struct buffer_head *bh) 878 { 879 int dirty = buffer_dirty(bh); 880 int ret; 881 882 if (!buffer_mapped(bh) || buffer_freed(bh)) 883 return 0; 884 /* 885 * __block_write_begin() could have dirtied some buffers. Clean 886 * the dirty bit as jbd2_journal_get_write_access() could complain 887 * otherwise about fs integrity issues. Setting of the dirty bit 888 * by __block_write_begin() isn't a real problem here as we clear 889 * the bit before releasing a page lock and thus writeback cannot 890 * ever write the buffer. 891 */ 892 if (dirty) 893 clear_buffer_dirty(bh); 894 BUFFER_TRACE(bh, "get write access"); 895 ret = ext4_journal_get_write_access(handle, bh); 896 if (!ret && dirty) 897 ret = ext4_handle_dirty_metadata(handle, NULL, bh); 898 return ret; 899 } 900 901 #ifdef CONFIG_EXT4_FS_ENCRYPTION 902 static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, 903 get_block_t *get_block) 904 { 905 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 906 unsigned to = from + len; 907 struct inode *inode = page->mapping->host; 908 unsigned block_start, block_end; 909 sector_t block; 910 int err = 0; 911 unsigned blocksize = inode->i_sb->s_blocksize; 912 unsigned bbits; 913 struct buffer_head *bh, *head, *wait[2], **wait_bh = wait; 914 bool decrypt = false; 915 916 BUG_ON(!PageLocked(page)); 917 BUG_ON(from > PAGE_CACHE_SIZE); 918 BUG_ON(to > PAGE_CACHE_SIZE); 919 BUG_ON(from > to); 920 921 if (!page_has_buffers(page)) 922 create_empty_buffers(page, blocksize, 0); 923 head = page_buffers(page); 924 bbits = ilog2(blocksize); 925 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); 926 927 for (bh = head, block_start = 0; bh != head || !block_start; 928 block++, block_start = block_end, bh = bh->b_this_page) { 929 block_end = block_start + blocksize; 930 if (block_end <= from || block_start >= to) { 931 if (PageUptodate(page)) { 932 if (!buffer_uptodate(bh)) 933 set_buffer_uptodate(bh); 934 } 935 continue; 936 } 937 if (buffer_new(bh)) 938 clear_buffer_new(bh); 939 if (!buffer_mapped(bh)) { 940 WARN_ON(bh->b_size != blocksize); 941 err = get_block(inode, block, bh, 1); 942 if (err) 943 break; 944 if (buffer_new(bh)) { 945 unmap_underlying_metadata(bh->b_bdev, 946 bh->b_blocknr); 947 if (PageUptodate(page)) { 948 clear_buffer_new(bh); 949 set_buffer_uptodate(bh); 950 mark_buffer_dirty(bh); 951 continue; 952 } 953 if (block_end > to || block_start < from) 954 zero_user_segments(page, to, block_end, 955 block_start, from); 956 continue; 957 } 958 } 959 if (PageUptodate(page)) { 960 if (!buffer_uptodate(bh)) 961 set_buffer_uptodate(bh); 962 continue; 963 } 964 if (!buffer_uptodate(bh) && !buffer_delay(bh) && 965 !buffer_unwritten(bh) && 966 (block_start < from || block_end > to)) { 967 ll_rw_block(READ, 1, &bh); 968 *wait_bh++ = bh; 969 decrypt = ext4_encrypted_inode(inode) && 970 S_ISREG(inode->i_mode); 971 } 972 } 973 /* 974 * If we issued read requests, let them complete. 975 */ 976 while (wait_bh > wait) { 977 wait_on_buffer(*--wait_bh); 978 if (!buffer_uptodate(*wait_bh)) 979 err = -EIO; 980 } 981 if (unlikely(err)) 982 page_zero_new_buffers(page, from, to); 983 else if (decrypt) 984 err = ext4_decrypt(page); 985 return err; 986 } 987 #endif 988 989 static int ext4_write_begin(struct file *file, struct address_space *mapping, 990 loff_t pos, unsigned len, unsigned flags, 991 struct page **pagep, void **fsdata) 992 { 993 struct inode *inode = mapping->host; 994 int ret, needed_blocks; 995 handle_t *handle; 996 int retries = 0; 997 struct page *page; 998 pgoff_t index; 999 unsigned from, to; 1000 1001 trace_ext4_write_begin(inode, pos, len, flags); 1002 /* 1003 * Reserve one block more for addition to orphan list in case 1004 * we allocate blocks but write fails for some reason 1005 */ 1006 needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 1007 index = pos >> PAGE_CACHE_SHIFT; 1008 from = pos & (PAGE_CACHE_SIZE - 1); 1009 to = from + len; 1010 1011 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 1012 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, 1013 flags, pagep); 1014 if (ret < 0) 1015 return ret; 1016 if (ret == 1) 1017 return 0; 1018 } 1019 1020 /* 1021 * grab_cache_page_write_begin() can take a long time if the 1022 * system is thrashing due to memory pressure, or if the page 1023 * is being written back. So grab it first before we start 1024 * the transaction handle. This also allows us to allocate 1025 * the page (if needed) without using GFP_NOFS. 1026 */ 1027 retry_grab: 1028 page = grab_cache_page_write_begin(mapping, index, flags); 1029 if (!page) 1030 return -ENOMEM; 1031 unlock_page(page); 1032 1033 retry_journal: 1034 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); 1035 if (IS_ERR(handle)) { 1036 page_cache_release(page); 1037 return PTR_ERR(handle); 1038 } 1039 1040 lock_page(page); 1041 if (page->mapping != mapping) { 1042 /* The page got truncated from under us */ 1043 unlock_page(page); 1044 page_cache_release(page); 1045 ext4_journal_stop(handle); 1046 goto retry_grab; 1047 } 1048 /* In case writeback began while the page was unlocked */ 1049 wait_for_stable_page(page); 1050 1051 #ifdef CONFIG_EXT4_FS_ENCRYPTION 1052 if (ext4_should_dioread_nolock(inode)) 1053 ret = ext4_block_write_begin(page, pos, len, 1054 ext4_get_block_write); 1055 else 1056 ret = ext4_block_write_begin(page, pos, len, 1057 ext4_get_block); 1058 #else 1059 if (ext4_should_dioread_nolock(inode)) 1060 ret = __block_write_begin(page, pos, len, ext4_get_block_write); 1061 else 1062 ret = __block_write_begin(page, pos, len, ext4_get_block); 1063 #endif 1064 if (!ret && ext4_should_journal_data(inode)) { 1065 ret = ext4_walk_page_buffers(handle, page_buffers(page), 1066 from, to, NULL, 1067 do_journal_get_write_access); 1068 } 1069 1070 if (ret) { 1071 unlock_page(page); 1072 /* 1073 * __block_write_begin may have instantiated a few blocks 1074 * outside i_size. Trim these off again. Don't need 1075 * i_size_read because we hold i_mutex. 1076 * 1077 * Add inode to orphan list in case we crash before 1078 * truncate finishes 1079 */ 1080 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1081 ext4_orphan_add(handle, inode); 1082 1083 ext4_journal_stop(handle); 1084 if (pos + len > inode->i_size) { 1085 ext4_truncate_failed_write(inode); 1086 /* 1087 * If truncate failed early the inode might 1088 * still be on the orphan list; we need to 1089 * make sure the inode is removed from the 1090 * orphan list in that case. 1091 */ 1092 if (inode->i_nlink) 1093 ext4_orphan_del(NULL, inode); 1094 } 1095 1096 if (ret == -ENOSPC && 1097 ext4_should_retry_alloc(inode->i_sb, &retries)) 1098 goto retry_journal; 1099 page_cache_release(page); 1100 return ret; 1101 } 1102 *pagep = page; 1103 return ret; 1104 } 1105 1106 /* For write_end() in data=journal mode */ 1107 static int write_end_fn(handle_t *handle, struct buffer_head *bh) 1108 { 1109 int ret; 1110 if (!buffer_mapped(bh) || buffer_freed(bh)) 1111 return 0; 1112 set_buffer_uptodate(bh); 1113 ret = ext4_handle_dirty_metadata(handle, NULL, bh); 1114 clear_buffer_meta(bh); 1115 clear_buffer_prio(bh); 1116 return ret; 1117 } 1118 1119 /* 1120 * We need to pick up the new inode size which generic_commit_write gave us 1121 * `file' can be NULL - eg, when called from page_symlink(). 1122 * 1123 * ext4 never places buffers on inode->i_mapping->private_list. metadata 1124 * buffers are managed internally. 1125 */ 1126 static int ext4_write_end(struct file *file, 1127 struct address_space *mapping, 1128 loff_t pos, unsigned len, unsigned copied, 1129 struct page *page, void *fsdata) 1130 { 1131 handle_t *handle = ext4_journal_current_handle(); 1132 struct inode *inode = mapping->host; 1133 loff_t old_size = inode->i_size; 1134 int ret = 0, ret2; 1135 int i_size_changed = 0; 1136 1137 trace_ext4_write_end(inode, pos, len, copied); 1138 if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) { 1139 ret = ext4_jbd2_file_inode(handle, inode); 1140 if (ret) { 1141 unlock_page(page); 1142 page_cache_release(page); 1143 goto errout; 1144 } 1145 } 1146 1147 if (ext4_has_inline_data(inode)) { 1148 ret = ext4_write_inline_data_end(inode, pos, len, 1149 copied, page); 1150 if (ret < 0) 1151 goto errout; 1152 copied = ret; 1153 } else 1154 copied = block_write_end(file, mapping, pos, 1155 len, copied, page, fsdata); 1156 /* 1157 * it's important to update i_size while still holding page lock: 1158 * page writeout could otherwise come in and zero beyond i_size. 1159 */ 1160 i_size_changed = ext4_update_inode_size(inode, pos + copied); 1161 unlock_page(page); 1162 page_cache_release(page); 1163 1164 if (old_size < pos) 1165 pagecache_isize_extended(inode, old_size, pos); 1166 /* 1167 * Don't mark the inode dirty under page lock. First, it unnecessarily 1168 * makes the holding time of page lock longer. Second, it forces lock 1169 * ordering of page lock and transaction start for journaling 1170 * filesystems. 1171 */ 1172 if (i_size_changed) 1173 ext4_mark_inode_dirty(handle, inode); 1174 1175 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1176 /* if we have allocated more blocks and copied 1177 * less. We will have blocks allocated outside 1178 * inode->i_size. So truncate them 1179 */ 1180 ext4_orphan_add(handle, inode); 1181 errout: 1182 ret2 = ext4_journal_stop(handle); 1183 if (!ret) 1184 ret = ret2; 1185 1186 if (pos + len > inode->i_size) { 1187 ext4_truncate_failed_write(inode); 1188 /* 1189 * If truncate failed early the inode might still be 1190 * on the orphan list; we need to make sure the inode 1191 * is removed from the orphan list in that case. 1192 */ 1193 if (inode->i_nlink) 1194 ext4_orphan_del(NULL, inode); 1195 } 1196 1197 return ret ? ret : copied; 1198 } 1199 1200 /* 1201 * This is a private version of page_zero_new_buffers() which doesn't 1202 * set the buffer to be dirty, since in data=journalled mode we need 1203 * to call ext4_handle_dirty_metadata() instead. 1204 */ 1205 static void zero_new_buffers(struct page *page, unsigned from, unsigned to) 1206 { 1207 unsigned int block_start = 0, block_end; 1208 struct buffer_head *head, *bh; 1209 1210 bh = head = page_buffers(page); 1211 do { 1212 block_end = block_start + bh->b_size; 1213 if (buffer_new(bh)) { 1214 if (block_end > from && block_start < to) { 1215 if (!PageUptodate(page)) { 1216 unsigned start, size; 1217 1218 start = max(from, block_start); 1219 size = min(to, block_end) - start; 1220 1221 zero_user(page, start, size); 1222 set_buffer_uptodate(bh); 1223 } 1224 clear_buffer_new(bh); 1225 } 1226 } 1227 block_start = block_end; 1228 bh = bh->b_this_page; 1229 } while (bh != head); 1230 } 1231 1232 static int ext4_journalled_write_end(struct file *file, 1233 struct address_space *mapping, 1234 loff_t pos, unsigned len, unsigned copied, 1235 struct page *page, void *fsdata) 1236 { 1237 handle_t *handle = ext4_journal_current_handle(); 1238 struct inode *inode = mapping->host; 1239 loff_t old_size = inode->i_size; 1240 int ret = 0, ret2; 1241 int partial = 0; 1242 unsigned from, to; 1243 int size_changed = 0; 1244 1245 trace_ext4_journalled_write_end(inode, pos, len, copied); 1246 from = pos & (PAGE_CACHE_SIZE - 1); 1247 to = from + len; 1248 1249 BUG_ON(!ext4_handle_valid(handle)); 1250 1251 if (ext4_has_inline_data(inode)) 1252 copied = ext4_write_inline_data_end(inode, pos, len, 1253 copied, page); 1254 else { 1255 if (copied < len) { 1256 if (!PageUptodate(page)) 1257 copied = 0; 1258 zero_new_buffers(page, from+copied, to); 1259 } 1260 1261 ret = ext4_walk_page_buffers(handle, page_buffers(page), from, 1262 to, &partial, write_end_fn); 1263 if (!partial) 1264 SetPageUptodate(page); 1265 } 1266 size_changed = ext4_update_inode_size(inode, pos + copied); 1267 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 1268 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1269 unlock_page(page); 1270 page_cache_release(page); 1271 1272 if (old_size < pos) 1273 pagecache_isize_extended(inode, old_size, pos); 1274 1275 if (size_changed) { 1276 ret2 = ext4_mark_inode_dirty(handle, inode); 1277 if (!ret) 1278 ret = ret2; 1279 } 1280 1281 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1282 /* if we have allocated more blocks and copied 1283 * less. We will have blocks allocated outside 1284 * inode->i_size. So truncate them 1285 */ 1286 ext4_orphan_add(handle, inode); 1287 1288 ret2 = ext4_journal_stop(handle); 1289 if (!ret) 1290 ret = ret2; 1291 if (pos + len > inode->i_size) { 1292 ext4_truncate_failed_write(inode); 1293 /* 1294 * If truncate failed early the inode might still be 1295 * on the orphan list; we need to make sure the inode 1296 * is removed from the orphan list in that case. 1297 */ 1298 if (inode->i_nlink) 1299 ext4_orphan_del(NULL, inode); 1300 } 1301 1302 return ret ? ret : copied; 1303 } 1304 1305 /* 1306 * Reserve space for a single cluster 1307 */ 1308 static int ext4_da_reserve_space(struct inode *inode) 1309 { 1310 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1311 struct ext4_inode_info *ei = EXT4_I(inode); 1312 int ret; 1313 1314 /* 1315 * We will charge metadata quota at writeout time; this saves 1316 * us from metadata over-estimation, though we may go over by 1317 * a small amount in the end. Here we just reserve for data. 1318 */ 1319 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); 1320 if (ret) 1321 return ret; 1322 1323 spin_lock(&ei->i_block_reservation_lock); 1324 if (ext4_claim_free_clusters(sbi, 1, 0)) { 1325 spin_unlock(&ei->i_block_reservation_lock); 1326 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); 1327 return -ENOSPC; 1328 } 1329 ei->i_reserved_data_blocks++; 1330 trace_ext4_da_reserve_space(inode); 1331 spin_unlock(&ei->i_block_reservation_lock); 1332 1333 return 0; /* success */ 1334 } 1335 1336 static void ext4_da_release_space(struct inode *inode, int to_free) 1337 { 1338 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1339 struct ext4_inode_info *ei = EXT4_I(inode); 1340 1341 if (!to_free) 1342 return; /* Nothing to release, exit */ 1343 1344 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1345 1346 trace_ext4_da_release_space(inode, to_free); 1347 if (unlikely(to_free > ei->i_reserved_data_blocks)) { 1348 /* 1349 * if there aren't enough reserved blocks, then the 1350 * counter is messed up somewhere. Since this 1351 * function is called from invalidate page, it's 1352 * harmless to return without any action. 1353 */ 1354 ext4_warning(inode->i_sb, "ext4_da_release_space: " 1355 "ino %lu, to_free %d with only %d reserved " 1356 "data blocks", inode->i_ino, to_free, 1357 ei->i_reserved_data_blocks); 1358 WARN_ON(1); 1359 to_free = ei->i_reserved_data_blocks; 1360 } 1361 ei->i_reserved_data_blocks -= to_free; 1362 1363 /* update fs dirty data blocks counter */ 1364 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); 1365 1366 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1367 1368 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); 1369 } 1370 1371 static void ext4_da_page_release_reservation(struct page *page, 1372 unsigned int offset, 1373 unsigned int length) 1374 { 1375 int to_release = 0, contiguous_blks = 0; 1376 struct buffer_head *head, *bh; 1377 unsigned int curr_off = 0; 1378 struct inode *inode = page->mapping->host; 1379 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1380 unsigned int stop = offset + length; 1381 int num_clusters; 1382 ext4_fsblk_t lblk; 1383 1384 BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); 1385 1386 head = page_buffers(page); 1387 bh = head; 1388 do { 1389 unsigned int next_off = curr_off + bh->b_size; 1390 1391 if (next_off > stop) 1392 break; 1393 1394 if ((offset <= curr_off) && (buffer_delay(bh))) { 1395 to_release++; 1396 contiguous_blks++; 1397 clear_buffer_delay(bh); 1398 } else if (contiguous_blks) { 1399 lblk = page->index << 1400 (PAGE_CACHE_SHIFT - inode->i_blkbits); 1401 lblk += (curr_off >> inode->i_blkbits) - 1402 contiguous_blks; 1403 ext4_es_remove_extent(inode, lblk, contiguous_blks); 1404 contiguous_blks = 0; 1405 } 1406 curr_off = next_off; 1407 } while ((bh = bh->b_this_page) != head); 1408 1409 if (contiguous_blks) { 1410 lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1411 lblk += (curr_off >> inode->i_blkbits) - contiguous_blks; 1412 ext4_es_remove_extent(inode, lblk, contiguous_blks); 1413 } 1414 1415 /* If we have released all the blocks belonging to a cluster, then we 1416 * need to release the reserved space for that cluster. */ 1417 num_clusters = EXT4_NUM_B2C(sbi, to_release); 1418 while (num_clusters > 0) { 1419 lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) + 1420 ((num_clusters - 1) << sbi->s_cluster_bits); 1421 if (sbi->s_cluster_ratio == 1 || 1422 !ext4_find_delalloc_cluster(inode, lblk)) 1423 ext4_da_release_space(inode, 1); 1424 1425 num_clusters--; 1426 } 1427 } 1428 1429 /* 1430 * Delayed allocation stuff 1431 */ 1432 1433 struct mpage_da_data { 1434 struct inode *inode; 1435 struct writeback_control *wbc; 1436 1437 pgoff_t first_page; /* The first page to write */ 1438 pgoff_t next_page; /* Current page to examine */ 1439 pgoff_t last_page; /* Last page to examine */ 1440 /* 1441 * Extent to map - this can be after first_page because that can be 1442 * fully mapped. We somewhat abuse m_flags to store whether the extent 1443 * is delalloc or unwritten. 1444 */ 1445 struct ext4_map_blocks map; 1446 struct ext4_io_submit io_submit; /* IO submission data */ 1447 }; 1448 1449 static void mpage_release_unused_pages(struct mpage_da_data *mpd, 1450 bool invalidate) 1451 { 1452 int nr_pages, i; 1453 pgoff_t index, end; 1454 struct pagevec pvec; 1455 struct inode *inode = mpd->inode; 1456 struct address_space *mapping = inode->i_mapping; 1457 1458 /* This is necessary when next_page == 0. */ 1459 if (mpd->first_page >= mpd->next_page) 1460 return; 1461 1462 index = mpd->first_page; 1463 end = mpd->next_page - 1; 1464 if (invalidate) { 1465 ext4_lblk_t start, last; 1466 start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1467 last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1468 ext4_es_remove_extent(inode, start, last - start + 1); 1469 } 1470 1471 pagevec_init(&pvec, 0); 1472 while (index <= end) { 1473 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1474 if (nr_pages == 0) 1475 break; 1476 for (i = 0; i < nr_pages; i++) { 1477 struct page *page = pvec.pages[i]; 1478 if (page->index > end) 1479 break; 1480 BUG_ON(!PageLocked(page)); 1481 BUG_ON(PageWriteback(page)); 1482 if (invalidate) { 1483 block_invalidatepage(page, 0, PAGE_CACHE_SIZE); 1484 ClearPageUptodate(page); 1485 } 1486 unlock_page(page); 1487 } 1488 index = pvec.pages[nr_pages - 1]->index + 1; 1489 pagevec_release(&pvec); 1490 } 1491 } 1492 1493 static void ext4_print_free_blocks(struct inode *inode) 1494 { 1495 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1496 struct super_block *sb = inode->i_sb; 1497 struct ext4_inode_info *ei = EXT4_I(inode); 1498 1499 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld", 1500 EXT4_C2B(EXT4_SB(inode->i_sb), 1501 ext4_count_free_clusters(sb))); 1502 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details"); 1503 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld", 1504 (long long) EXT4_C2B(EXT4_SB(sb), 1505 percpu_counter_sum(&sbi->s_freeclusters_counter))); 1506 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld", 1507 (long long) EXT4_C2B(EXT4_SB(sb), 1508 percpu_counter_sum(&sbi->s_dirtyclusters_counter))); 1509 ext4_msg(sb, KERN_CRIT, "Block reservation details"); 1510 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u", 1511 ei->i_reserved_data_blocks); 1512 return; 1513 } 1514 1515 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) 1516 { 1517 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); 1518 } 1519 1520 /* 1521 * This function is grabs code from the very beginning of 1522 * ext4_map_blocks, but assumes that the caller is from delayed write 1523 * time. This function looks up the requested blocks and sets the 1524 * buffer delay bit under the protection of i_data_sem. 1525 */ 1526 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, 1527 struct ext4_map_blocks *map, 1528 struct buffer_head *bh) 1529 { 1530 struct extent_status es; 1531 int retval; 1532 sector_t invalid_block = ~((sector_t) 0xffff); 1533 #ifdef ES_AGGRESSIVE_TEST 1534 struct ext4_map_blocks orig_map; 1535 1536 memcpy(&orig_map, map, sizeof(*map)); 1537 #endif 1538 1539 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 1540 invalid_block = ~0; 1541 1542 map->m_flags = 0; 1543 ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u," 1544 "logical block %lu\n", inode->i_ino, map->m_len, 1545 (unsigned long) map->m_lblk); 1546 1547 /* Lookup extent status tree firstly */ 1548 if (ext4_es_lookup_extent(inode, iblock, &es)) { 1549 if (ext4_es_is_hole(&es)) { 1550 retval = 0; 1551 down_read(&EXT4_I(inode)->i_data_sem); 1552 goto add_delayed; 1553 } 1554 1555 /* 1556 * Delayed extent could be allocated by fallocate. 1557 * So we need to check it. 1558 */ 1559 if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) { 1560 map_bh(bh, inode->i_sb, invalid_block); 1561 set_buffer_new(bh); 1562 set_buffer_delay(bh); 1563 return 0; 1564 } 1565 1566 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk; 1567 retval = es.es_len - (iblock - es.es_lblk); 1568 if (retval > map->m_len) 1569 retval = map->m_len; 1570 map->m_len = retval; 1571 if (ext4_es_is_written(&es)) 1572 map->m_flags |= EXT4_MAP_MAPPED; 1573 else if (ext4_es_is_unwritten(&es)) 1574 map->m_flags |= EXT4_MAP_UNWRITTEN; 1575 else 1576 BUG_ON(1); 1577 1578 #ifdef ES_AGGRESSIVE_TEST 1579 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0); 1580 #endif 1581 return retval; 1582 } 1583 1584 /* 1585 * Try to see if we can get the block without requesting a new 1586 * file system block. 1587 */ 1588 down_read(&EXT4_I(inode)->i_data_sem); 1589 if (ext4_has_inline_data(inode)) 1590 retval = 0; 1591 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 1592 retval = ext4_ext_map_blocks(NULL, inode, map, 0); 1593 else 1594 retval = ext4_ind_map_blocks(NULL, inode, map, 0); 1595 1596 add_delayed: 1597 if (retval == 0) { 1598 int ret; 1599 /* 1600 * XXX: __block_prepare_write() unmaps passed block, 1601 * is it OK? 1602 */ 1603 /* 1604 * If the block was allocated from previously allocated cluster, 1605 * then we don't need to reserve it again. However we still need 1606 * to reserve metadata for every block we're going to write. 1607 */ 1608 if (EXT4_SB(inode->i_sb)->s_cluster_ratio == 1 || 1609 !ext4_find_delalloc_cluster(inode, map->m_lblk)) { 1610 ret = ext4_da_reserve_space(inode); 1611 if (ret) { 1612 /* not enough space to reserve */ 1613 retval = ret; 1614 goto out_unlock; 1615 } 1616 } 1617 1618 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 1619 ~0, EXTENT_STATUS_DELAYED); 1620 if (ret) { 1621 retval = ret; 1622 goto out_unlock; 1623 } 1624 1625 map_bh(bh, inode->i_sb, invalid_block); 1626 set_buffer_new(bh); 1627 set_buffer_delay(bh); 1628 } else if (retval > 0) { 1629 int ret; 1630 unsigned int status; 1631 1632 if (unlikely(retval != map->m_len)) { 1633 ext4_warning(inode->i_sb, 1634 "ES len assertion failed for inode " 1635 "%lu: retval %d != map->m_len %d", 1636 inode->i_ino, retval, map->m_len); 1637 WARN_ON(1); 1638 } 1639 1640 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 1641 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 1642 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 1643 map->m_pblk, status); 1644 if (ret != 0) 1645 retval = ret; 1646 } 1647 1648 out_unlock: 1649 up_read((&EXT4_I(inode)->i_data_sem)); 1650 1651 return retval; 1652 } 1653 1654 /* 1655 * This is a special get_block_t callback which is used by 1656 * ext4_da_write_begin(). It will either return mapped block or 1657 * reserve space for a single block. 1658 * 1659 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 1660 * We also have b_blocknr = -1 and b_bdev initialized properly 1661 * 1662 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 1663 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 1664 * initialized properly. 1665 */ 1666 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 1667 struct buffer_head *bh, int create) 1668 { 1669 struct ext4_map_blocks map; 1670 int ret = 0; 1671 1672 BUG_ON(create == 0); 1673 BUG_ON(bh->b_size != inode->i_sb->s_blocksize); 1674 1675 map.m_lblk = iblock; 1676 map.m_len = 1; 1677 1678 /* 1679 * first, we need to know whether the block is allocated already 1680 * preallocated blocks are unmapped but should treated 1681 * the same as allocated blocks. 1682 */ 1683 ret = ext4_da_map_blocks(inode, iblock, &map, bh); 1684 if (ret <= 0) 1685 return ret; 1686 1687 map_bh(bh, inode->i_sb, map.m_pblk); 1688 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 1689 1690 if (buffer_unwritten(bh)) { 1691 /* A delayed write to unwritten bh should be marked 1692 * new and mapped. Mapped ensures that we don't do 1693 * get_block multiple times when we write to the same 1694 * offset and new ensures that we do proper zero out 1695 * for partial write. 1696 */ 1697 set_buffer_new(bh); 1698 set_buffer_mapped(bh); 1699 } 1700 return 0; 1701 } 1702 1703 static int bget_one(handle_t *handle, struct buffer_head *bh) 1704 { 1705 get_bh(bh); 1706 return 0; 1707 } 1708 1709 static int bput_one(handle_t *handle, struct buffer_head *bh) 1710 { 1711 put_bh(bh); 1712 return 0; 1713 } 1714 1715 static int __ext4_journalled_writepage(struct page *page, 1716 unsigned int len) 1717 { 1718 struct address_space *mapping = page->mapping; 1719 struct inode *inode = mapping->host; 1720 struct buffer_head *page_bufs = NULL; 1721 handle_t *handle = NULL; 1722 int ret = 0, err = 0; 1723 int inline_data = ext4_has_inline_data(inode); 1724 struct buffer_head *inode_bh = NULL; 1725 1726 ClearPageChecked(page); 1727 1728 if (inline_data) { 1729 BUG_ON(page->index != 0); 1730 BUG_ON(len > ext4_get_max_inline_size(inode)); 1731 inode_bh = ext4_journalled_write_inline_data(inode, len, page); 1732 if (inode_bh == NULL) 1733 goto out; 1734 } else { 1735 page_bufs = page_buffers(page); 1736 if (!page_bufs) { 1737 BUG(); 1738 goto out; 1739 } 1740 ext4_walk_page_buffers(handle, page_bufs, 0, len, 1741 NULL, bget_one); 1742 } 1743 /* 1744 * We need to release the page lock before we start the 1745 * journal, so grab a reference so the page won't disappear 1746 * out from under us. 1747 */ 1748 get_page(page); 1749 unlock_page(page); 1750 1751 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1752 ext4_writepage_trans_blocks(inode)); 1753 if (IS_ERR(handle)) { 1754 ret = PTR_ERR(handle); 1755 put_page(page); 1756 goto out_no_pagelock; 1757 } 1758 BUG_ON(!ext4_handle_valid(handle)); 1759 1760 lock_page(page); 1761 put_page(page); 1762 if (page->mapping != mapping) { 1763 /* The page got truncated from under us */ 1764 ext4_journal_stop(handle); 1765 ret = 0; 1766 goto out; 1767 } 1768 1769 if (inline_data) { 1770 BUFFER_TRACE(inode_bh, "get write access"); 1771 ret = ext4_journal_get_write_access(handle, inode_bh); 1772 1773 err = ext4_handle_dirty_metadata(handle, inode, inode_bh); 1774 1775 } else { 1776 ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 1777 do_journal_get_write_access); 1778 1779 err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 1780 write_end_fn); 1781 } 1782 if (ret == 0) 1783 ret = err; 1784 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1785 err = ext4_journal_stop(handle); 1786 if (!ret) 1787 ret = err; 1788 1789 if (!ext4_has_inline_data(inode)) 1790 ext4_walk_page_buffers(NULL, page_bufs, 0, len, 1791 NULL, bput_one); 1792 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 1793 out: 1794 unlock_page(page); 1795 out_no_pagelock: 1796 brelse(inode_bh); 1797 return ret; 1798 } 1799 1800 /* 1801 * Note that we don't need to start a transaction unless we're journaling data 1802 * because we should have holes filled from ext4_page_mkwrite(). We even don't 1803 * need to file the inode to the transaction's list in ordered mode because if 1804 * we are writing back data added by write(), the inode is already there and if 1805 * we are writing back data modified via mmap(), no one guarantees in which 1806 * transaction the data will hit the disk. In case we are journaling data, we 1807 * cannot start transaction directly because transaction start ranks above page 1808 * lock so we have to do some magic. 1809 * 1810 * This function can get called via... 1811 * - ext4_writepages after taking page lock (have journal handle) 1812 * - journal_submit_inode_data_buffers (no journal handle) 1813 * - shrink_page_list via the kswapd/direct reclaim (no journal handle) 1814 * - grab_page_cache when doing write_begin (have journal handle) 1815 * 1816 * We don't do any block allocation in this function. If we have page with 1817 * multiple blocks we need to write those buffer_heads that are mapped. This 1818 * is important for mmaped based write. So if we do with blocksize 1K 1819 * truncate(f, 1024); 1820 * a = mmap(f, 0, 4096); 1821 * a[0] = 'a'; 1822 * truncate(f, 4096); 1823 * we have in the page first buffer_head mapped via page_mkwrite call back 1824 * but other buffer_heads would be unmapped but dirty (dirty done via the 1825 * do_wp_page). So writepage should write the first block. If we modify 1826 * the mmap area beyond 1024 we will again get a page_fault and the 1827 * page_mkwrite callback will do the block allocation and mark the 1828 * buffer_heads mapped. 1829 * 1830 * We redirty the page if we have any buffer_heads that is either delay or 1831 * unwritten in the page. 1832 * 1833 * We can get recursively called as show below. 1834 * 1835 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 1836 * ext4_writepage() 1837 * 1838 * But since we don't do any block allocation we should not deadlock. 1839 * Page also have the dirty flag cleared so we don't get recurive page_lock. 1840 */ 1841 static int ext4_writepage(struct page *page, 1842 struct writeback_control *wbc) 1843 { 1844 int ret = 0; 1845 loff_t size; 1846 unsigned int len; 1847 struct buffer_head *page_bufs = NULL; 1848 struct inode *inode = page->mapping->host; 1849 struct ext4_io_submit io_submit; 1850 bool keep_towrite = false; 1851 1852 trace_ext4_writepage(page); 1853 size = i_size_read(inode); 1854 if (page->index == size >> PAGE_CACHE_SHIFT) 1855 len = size & ~PAGE_CACHE_MASK; 1856 else 1857 len = PAGE_CACHE_SIZE; 1858 1859 page_bufs = page_buffers(page); 1860 /* 1861 * We cannot do block allocation or other extent handling in this 1862 * function. If there are buffers needing that, we have to redirty 1863 * the page. But we may reach here when we do a journal commit via 1864 * journal_submit_inode_data_buffers() and in that case we must write 1865 * allocated buffers to achieve data=ordered mode guarantees. 1866 * 1867 * Also, if there is only one buffer per page (the fs block 1868 * size == the page size), if one buffer needs block 1869 * allocation or needs to modify the extent tree to clear the 1870 * unwritten flag, we know that the page can't be written at 1871 * all, so we might as well refuse the write immediately. 1872 * Unfortunately if the block size != page size, we can't as 1873 * easily detect this case using ext4_walk_page_buffers(), but 1874 * for the extremely common case, this is an optimization that 1875 * skips a useless round trip through ext4_bio_write_page(). 1876 */ 1877 if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, 1878 ext4_bh_delay_or_unwritten)) { 1879 redirty_page_for_writepage(wbc, page); 1880 if ((current->flags & PF_MEMALLOC) || 1881 (inode->i_sb->s_blocksize == PAGE_CACHE_SIZE)) { 1882 /* 1883 * For memory cleaning there's no point in writing only 1884 * some buffers. So just bail out. Warn if we came here 1885 * from direct reclaim. 1886 */ 1887 WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) 1888 == PF_MEMALLOC); 1889 unlock_page(page); 1890 return 0; 1891 } 1892 keep_towrite = true; 1893 } 1894 1895 if (PageChecked(page) && ext4_should_journal_data(inode)) 1896 /* 1897 * It's mmapped pagecache. Add buffers and journal it. There 1898 * doesn't seem much point in redirtying the page here. 1899 */ 1900 return __ext4_journalled_writepage(page, len); 1901 1902 ext4_io_submit_init(&io_submit, wbc); 1903 io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS); 1904 if (!io_submit.io_end) { 1905 redirty_page_for_writepage(wbc, page); 1906 unlock_page(page); 1907 return -ENOMEM; 1908 } 1909 ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite); 1910 ext4_io_submit(&io_submit); 1911 /* Drop io_end reference we got from init */ 1912 ext4_put_io_end_defer(io_submit.io_end); 1913 return ret; 1914 } 1915 1916 static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) 1917 { 1918 int len; 1919 loff_t size = i_size_read(mpd->inode); 1920 int err; 1921 1922 BUG_ON(page->index != mpd->first_page); 1923 if (page->index == size >> PAGE_CACHE_SHIFT) 1924 len = size & ~PAGE_CACHE_MASK; 1925 else 1926 len = PAGE_CACHE_SIZE; 1927 clear_page_dirty_for_io(page); 1928 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false); 1929 if (!err) 1930 mpd->wbc->nr_to_write--; 1931 mpd->first_page++; 1932 1933 return err; 1934 } 1935 1936 #define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay)) 1937 1938 /* 1939 * mballoc gives us at most this number of blocks... 1940 * XXX: That seems to be only a limitation of ext4_mb_normalize_request(). 1941 * The rest of mballoc seems to handle chunks up to full group size. 1942 */ 1943 #define MAX_WRITEPAGES_EXTENT_LEN 2048 1944 1945 /* 1946 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map 1947 * 1948 * @mpd - extent of blocks 1949 * @lblk - logical number of the block in the file 1950 * @bh - buffer head we want to add to the extent 1951 * 1952 * The function is used to collect contig. blocks in the same state. If the 1953 * buffer doesn't require mapping for writeback and we haven't started the 1954 * extent of buffers to map yet, the function returns 'true' immediately - the 1955 * caller can write the buffer right away. Otherwise the function returns true 1956 * if the block has been added to the extent, false if the block couldn't be 1957 * added. 1958 */ 1959 static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk, 1960 struct buffer_head *bh) 1961 { 1962 struct ext4_map_blocks *map = &mpd->map; 1963 1964 /* Buffer that doesn't need mapping for writeback? */ 1965 if (!buffer_dirty(bh) || !buffer_mapped(bh) || 1966 (!buffer_delay(bh) && !buffer_unwritten(bh))) { 1967 /* So far no extent to map => we write the buffer right away */ 1968 if (map->m_len == 0) 1969 return true; 1970 return false; 1971 } 1972 1973 /* First block in the extent? */ 1974 if (map->m_len == 0) { 1975 map->m_lblk = lblk; 1976 map->m_len = 1; 1977 map->m_flags = bh->b_state & BH_FLAGS; 1978 return true; 1979 } 1980 1981 /* Don't go larger than mballoc is willing to allocate */ 1982 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN) 1983 return false; 1984 1985 /* Can we merge the block to our big extent? */ 1986 if (lblk == map->m_lblk + map->m_len && 1987 (bh->b_state & BH_FLAGS) == map->m_flags) { 1988 map->m_len++; 1989 return true; 1990 } 1991 return false; 1992 } 1993 1994 /* 1995 * mpage_process_page_bufs - submit page buffers for IO or add them to extent 1996 * 1997 * @mpd - extent of blocks for mapping 1998 * @head - the first buffer in the page 1999 * @bh - buffer we should start processing from 2000 * @lblk - logical number of the block in the file corresponding to @bh 2001 * 2002 * Walk through page buffers from @bh upto @head (exclusive) and either submit 2003 * the page for IO if all buffers in this page were mapped and there's no 2004 * accumulated extent of buffers to map or add buffers in the page to the 2005 * extent of buffers to map. The function returns 1 if the caller can continue 2006 * by processing the next page, 0 if it should stop adding buffers to the 2007 * extent to map because we cannot extend it anymore. It can also return value 2008 * < 0 in case of error during IO submission. 2009 */ 2010 static int mpage_process_page_bufs(struct mpage_da_data *mpd, 2011 struct buffer_head *head, 2012 struct buffer_head *bh, 2013 ext4_lblk_t lblk) 2014 { 2015 struct inode *inode = mpd->inode; 2016 int err; 2017 ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1) 2018 >> inode->i_blkbits; 2019 2020 do { 2021 BUG_ON(buffer_locked(bh)); 2022 2023 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) { 2024 /* Found extent to map? */ 2025 if (mpd->map.m_len) 2026 return 0; 2027 /* Everything mapped so far and we hit EOF */ 2028 break; 2029 } 2030 } while (lblk++, (bh = bh->b_this_page) != head); 2031 /* So far everything mapped? Submit the page for IO. */ 2032 if (mpd->map.m_len == 0) { 2033 err = mpage_submit_page(mpd, head->b_page); 2034 if (err < 0) 2035 return err; 2036 } 2037 return lblk < blocks; 2038 } 2039 2040 /* 2041 * mpage_map_buffers - update buffers corresponding to changed extent and 2042 * submit fully mapped pages for IO 2043 * 2044 * @mpd - description of extent to map, on return next extent to map 2045 * 2046 * Scan buffers corresponding to changed extent (we expect corresponding pages 2047 * to be already locked) and update buffer state according to new extent state. 2048 * We map delalloc buffers to their physical location, clear unwritten bits, 2049 * and mark buffers as uninit when we perform writes to unwritten extents 2050 * and do extent conversion after IO is finished. If the last page is not fully 2051 * mapped, we update @map to the next extent in the last page that needs 2052 * mapping. Otherwise we submit the page for IO. 2053 */ 2054 static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) 2055 { 2056 struct pagevec pvec; 2057 int nr_pages, i; 2058 struct inode *inode = mpd->inode; 2059 struct buffer_head *head, *bh; 2060 int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits; 2061 pgoff_t start, end; 2062 ext4_lblk_t lblk; 2063 sector_t pblock; 2064 int err; 2065 2066 start = mpd->map.m_lblk >> bpp_bits; 2067 end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits; 2068 lblk = start << bpp_bits; 2069 pblock = mpd->map.m_pblk; 2070 2071 pagevec_init(&pvec, 0); 2072 while (start <= end) { 2073 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start, 2074 PAGEVEC_SIZE); 2075 if (nr_pages == 0) 2076 break; 2077 for (i = 0; i < nr_pages; i++) { 2078 struct page *page = pvec.pages[i]; 2079 2080 if (page->index > end) 2081 break; 2082 /* Up to 'end' pages must be contiguous */ 2083 BUG_ON(page->index != start); 2084 bh = head = page_buffers(page); 2085 do { 2086 if (lblk < mpd->map.m_lblk) 2087 continue; 2088 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) { 2089 /* 2090 * Buffer after end of mapped extent. 2091 * Find next buffer in the page to map. 2092 */ 2093 mpd->map.m_len = 0; 2094 mpd->map.m_flags = 0; 2095 /* 2096 * FIXME: If dioread_nolock supports 2097 * blocksize < pagesize, we need to make 2098 * sure we add size mapped so far to 2099 * io_end->size as the following call 2100 * can submit the page for IO. 2101 */ 2102 err = mpage_process_page_bufs(mpd, head, 2103 bh, lblk); 2104 pagevec_release(&pvec); 2105 if (err > 0) 2106 err = 0; 2107 return err; 2108 } 2109 if (buffer_delay(bh)) { 2110 clear_buffer_delay(bh); 2111 bh->b_blocknr = pblock++; 2112 } 2113 clear_buffer_unwritten(bh); 2114 } while (lblk++, (bh = bh->b_this_page) != head); 2115 2116 /* 2117 * FIXME: This is going to break if dioread_nolock 2118 * supports blocksize < pagesize as we will try to 2119 * convert potentially unmapped parts of inode. 2120 */ 2121 mpd->io_submit.io_end->size += PAGE_CACHE_SIZE; 2122 /* Page fully mapped - let IO run! */ 2123 err = mpage_submit_page(mpd, page); 2124 if (err < 0) { 2125 pagevec_release(&pvec); 2126 return err; 2127 } 2128 start++; 2129 } 2130 pagevec_release(&pvec); 2131 } 2132 /* Extent fully mapped and matches with page boundary. We are done. */ 2133 mpd->map.m_len = 0; 2134 mpd->map.m_flags = 0; 2135 return 0; 2136 } 2137 2138 static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd) 2139 { 2140 struct inode *inode = mpd->inode; 2141 struct ext4_map_blocks *map = &mpd->map; 2142 int get_blocks_flags; 2143 int err, dioread_nolock; 2144 2145 trace_ext4_da_write_pages_extent(inode, map); 2146 /* 2147 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or 2148 * to convert an unwritten extent to be initialized (in the case 2149 * where we have written into one or more preallocated blocks). It is 2150 * possible that we're going to need more metadata blocks than 2151 * previously reserved. However we must not fail because we're in 2152 * writeback and there is nothing we can do about it so it might result 2153 * in data loss. So use reserved blocks to allocate metadata if 2154 * possible. 2155 * 2156 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if 2157 * the blocks in question are delalloc blocks. This indicates 2158 * that the blocks and quotas has already been checked when 2159 * the data was copied into the page cache. 2160 */ 2161 get_blocks_flags = EXT4_GET_BLOCKS_CREATE | 2162 EXT4_GET_BLOCKS_METADATA_NOFAIL; 2163 dioread_nolock = ext4_should_dioread_nolock(inode); 2164 if (dioread_nolock) 2165 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; 2166 if (map->m_flags & (1 << BH_Delay)) 2167 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 2168 2169 err = ext4_map_blocks(handle, inode, map, get_blocks_flags); 2170 if (err < 0) 2171 return err; 2172 if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) { 2173 if (!mpd->io_submit.io_end->handle && 2174 ext4_handle_valid(handle)) { 2175 mpd->io_submit.io_end->handle = handle->h_rsv_handle; 2176 handle->h_rsv_handle = NULL; 2177 } 2178 ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end); 2179 } 2180 2181 BUG_ON(map->m_len == 0); 2182 if (map->m_flags & EXT4_MAP_NEW) { 2183 struct block_device *bdev = inode->i_sb->s_bdev; 2184 int i; 2185 2186 for (i = 0; i < map->m_len; i++) 2187 unmap_underlying_metadata(bdev, map->m_pblk + i); 2188 } 2189 return 0; 2190 } 2191 2192 /* 2193 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length 2194 * mpd->len and submit pages underlying it for IO 2195 * 2196 * @handle - handle for journal operations 2197 * @mpd - extent to map 2198 * @give_up_on_write - we set this to true iff there is a fatal error and there 2199 * is no hope of writing the data. The caller should discard 2200 * dirty pages to avoid infinite loops. 2201 * 2202 * The function maps extent starting at mpd->lblk of length mpd->len. If it is 2203 * delayed, blocks are allocated, if it is unwritten, we may need to convert 2204 * them to initialized or split the described range from larger unwritten 2205 * extent. Note that we need not map all the described range since allocation 2206 * can return less blocks or the range is covered by more unwritten extents. We 2207 * cannot map more because we are limited by reserved transaction credits. On 2208 * the other hand we always make sure that the last touched page is fully 2209 * mapped so that it can be written out (and thus forward progress is 2210 * guaranteed). After mapping we submit all mapped pages for IO. 2211 */ 2212 static int mpage_map_and_submit_extent(handle_t *handle, 2213 struct mpage_da_data *mpd, 2214 bool *give_up_on_write) 2215 { 2216 struct inode *inode = mpd->inode; 2217 struct ext4_map_blocks *map = &mpd->map; 2218 int err; 2219 loff_t disksize; 2220 int progress = 0; 2221 2222 mpd->io_submit.io_end->offset = 2223 ((loff_t)map->m_lblk) << inode->i_blkbits; 2224 do { 2225 err = mpage_map_one_extent(handle, mpd); 2226 if (err < 0) { 2227 struct super_block *sb = inode->i_sb; 2228 2229 if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED) 2230 goto invalidate_dirty_pages; 2231 /* 2232 * Let the uper layers retry transient errors. 2233 * In the case of ENOSPC, if ext4_count_free_blocks() 2234 * is non-zero, a commit should free up blocks. 2235 */ 2236 if ((err == -ENOMEM) || 2237 (err == -ENOSPC && ext4_count_free_clusters(sb))) { 2238 if (progress) 2239 goto update_disksize; 2240 return err; 2241 } 2242 ext4_msg(sb, KERN_CRIT, 2243 "Delayed block allocation failed for " 2244 "inode %lu at logical offset %llu with" 2245 " max blocks %u with error %d", 2246 inode->i_ino, 2247 (unsigned long long)map->m_lblk, 2248 (unsigned)map->m_len, -err); 2249 ext4_msg(sb, KERN_CRIT, 2250 "This should not happen!! Data will " 2251 "be lost\n"); 2252 if (err == -ENOSPC) 2253 ext4_print_free_blocks(inode); 2254 invalidate_dirty_pages: 2255 *give_up_on_write = true; 2256 return err; 2257 } 2258 progress = 1; 2259 /* 2260 * Update buffer state, submit mapped pages, and get us new 2261 * extent to map 2262 */ 2263 err = mpage_map_and_submit_buffers(mpd); 2264 if (err < 0) 2265 goto update_disksize; 2266 } while (map->m_len); 2267 2268 update_disksize: 2269 /* 2270 * Update on-disk size after IO is submitted. Races with 2271 * truncate are avoided by checking i_size under i_data_sem. 2272 */ 2273 disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT; 2274 if (disksize > EXT4_I(inode)->i_disksize) { 2275 int err2; 2276 loff_t i_size; 2277 2278 down_write(&EXT4_I(inode)->i_data_sem); 2279 i_size = i_size_read(inode); 2280 if (disksize > i_size) 2281 disksize = i_size; 2282 if (disksize > EXT4_I(inode)->i_disksize) 2283 EXT4_I(inode)->i_disksize = disksize; 2284 err2 = ext4_mark_inode_dirty(handle, inode); 2285 up_write(&EXT4_I(inode)->i_data_sem); 2286 if (err2) 2287 ext4_error(inode->i_sb, 2288 "Failed to mark inode %lu dirty", 2289 inode->i_ino); 2290 if (!err) 2291 err = err2; 2292 } 2293 return err; 2294 } 2295 2296 /* 2297 * Calculate the total number of credits to reserve for one writepages 2298 * iteration. This is called from ext4_writepages(). We map an extent of 2299 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping 2300 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN + 2301 * bpp - 1 blocks in bpp different extents. 2302 */ 2303 static int ext4_da_writepages_trans_blocks(struct inode *inode) 2304 { 2305 int bpp = ext4_journal_blocks_per_page(inode); 2306 2307 return ext4_meta_trans_blocks(inode, 2308 MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp); 2309 } 2310 2311 /* 2312 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages 2313 * and underlying extent to map 2314 * 2315 * @mpd - where to look for pages 2316 * 2317 * Walk dirty pages in the mapping. If they are fully mapped, submit them for 2318 * IO immediately. When we find a page which isn't mapped we start accumulating 2319 * extent of buffers underlying these pages that needs mapping (formed by 2320 * either delayed or unwritten buffers). We also lock the pages containing 2321 * these buffers. The extent found is returned in @mpd structure (starting at 2322 * mpd->lblk with length mpd->len blocks). 2323 * 2324 * Note that this function can attach bios to one io_end structure which are 2325 * neither logically nor physically contiguous. Although it may seem as an 2326 * unnecessary complication, it is actually inevitable in blocksize < pagesize 2327 * case as we need to track IO to all buffers underlying a page in one io_end. 2328 */ 2329 static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) 2330 { 2331 struct address_space *mapping = mpd->inode->i_mapping; 2332 struct pagevec pvec; 2333 unsigned int nr_pages; 2334 long left = mpd->wbc->nr_to_write; 2335 pgoff_t index = mpd->first_page; 2336 pgoff_t end = mpd->last_page; 2337 int tag; 2338 int i, err = 0; 2339 int blkbits = mpd->inode->i_blkbits; 2340 ext4_lblk_t lblk; 2341 struct buffer_head *head; 2342 2343 if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages) 2344 tag = PAGECACHE_TAG_TOWRITE; 2345 else 2346 tag = PAGECACHE_TAG_DIRTY; 2347 2348 pagevec_init(&pvec, 0); 2349 mpd->map.m_len = 0; 2350 mpd->next_page = index; 2351 while (index <= end) { 2352 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 2353 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 2354 if (nr_pages == 0) 2355 goto out; 2356 2357 for (i = 0; i < nr_pages; i++) { 2358 struct page *page = pvec.pages[i]; 2359 2360 /* 2361 * At this point, the page may be truncated or 2362 * invalidated (changing page->mapping to NULL), or 2363 * even swizzled back from swapper_space to tmpfs file 2364 * mapping. However, page->index will not change 2365 * because we have a reference on the page. 2366 */ 2367 if (page->index > end) 2368 goto out; 2369 2370 /* 2371 * Accumulated enough dirty pages? This doesn't apply 2372 * to WB_SYNC_ALL mode. For integrity sync we have to 2373 * keep going because someone may be concurrently 2374 * dirtying pages, and we might have synced a lot of 2375 * newly appeared dirty pages, but have not synced all 2376 * of the old dirty pages. 2377 */ 2378 if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0) 2379 goto out; 2380 2381 /* If we can't merge this page, we are done. */ 2382 if (mpd->map.m_len > 0 && mpd->next_page != page->index) 2383 goto out; 2384 2385 lock_page(page); 2386 /* 2387 * If the page is no longer dirty, or its mapping no 2388 * longer corresponds to inode we are writing (which 2389 * means it has been truncated or invalidated), or the 2390 * page is already under writeback and we are not doing 2391 * a data integrity writeback, skip the page 2392 */ 2393 if (!PageDirty(page) || 2394 (PageWriteback(page) && 2395 (mpd->wbc->sync_mode == WB_SYNC_NONE)) || 2396 unlikely(page->mapping != mapping)) { 2397 unlock_page(page); 2398 continue; 2399 } 2400 2401 wait_on_page_writeback(page); 2402 BUG_ON(PageWriteback(page)); 2403 2404 if (mpd->map.m_len == 0) 2405 mpd->first_page = page->index; 2406 mpd->next_page = page->index + 1; 2407 /* Add all dirty buffers to mpd */ 2408 lblk = ((ext4_lblk_t)page->index) << 2409 (PAGE_CACHE_SHIFT - blkbits); 2410 head = page_buffers(page); 2411 err = mpage_process_page_bufs(mpd, head, head, lblk); 2412 if (err <= 0) 2413 goto out; 2414 err = 0; 2415 left--; 2416 } 2417 pagevec_release(&pvec); 2418 cond_resched(); 2419 } 2420 return 0; 2421 out: 2422 pagevec_release(&pvec); 2423 return err; 2424 } 2425 2426 static int __writepage(struct page *page, struct writeback_control *wbc, 2427 void *data) 2428 { 2429 struct address_space *mapping = data; 2430 int ret = ext4_writepage(page, wbc); 2431 mapping_set_error(mapping, ret); 2432 return ret; 2433 } 2434 2435 static int ext4_writepages(struct address_space *mapping, 2436 struct writeback_control *wbc) 2437 { 2438 pgoff_t writeback_index = 0; 2439 long nr_to_write = wbc->nr_to_write; 2440 int range_whole = 0; 2441 int cycled = 1; 2442 handle_t *handle = NULL; 2443 struct mpage_da_data mpd; 2444 struct inode *inode = mapping->host; 2445 int needed_blocks, rsv_blocks = 0, ret = 0; 2446 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2447 bool done; 2448 struct blk_plug plug; 2449 bool give_up_on_write = false; 2450 2451 trace_ext4_writepages(inode, wbc); 2452 2453 /* 2454 * No pages to write? This is mainly a kludge to avoid starting 2455 * a transaction for special inodes like journal inode on last iput() 2456 * because that could violate lock ordering on umount 2457 */ 2458 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 2459 goto out_writepages; 2460 2461 if (ext4_should_journal_data(inode)) { 2462 struct blk_plug plug; 2463 2464 blk_start_plug(&plug); 2465 ret = write_cache_pages(mapping, wbc, __writepage, mapping); 2466 blk_finish_plug(&plug); 2467 goto out_writepages; 2468 } 2469 2470 /* 2471 * If the filesystem has aborted, it is read-only, so return 2472 * right away instead of dumping stack traces later on that 2473 * will obscure the real source of the problem. We test 2474 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because 2475 * the latter could be true if the filesystem is mounted 2476 * read-only, and in that case, ext4_writepages should 2477 * *never* be called, so if that ever happens, we would want 2478 * the stack trace. 2479 */ 2480 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) { 2481 ret = -EROFS; 2482 goto out_writepages; 2483 } 2484 2485 if (ext4_should_dioread_nolock(inode)) { 2486 /* 2487 * We may need to convert up to one extent per block in 2488 * the page and we may dirty the inode. 2489 */ 2490 rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits); 2491 } 2492 2493 /* 2494 * If we have inline data and arrive here, it means that 2495 * we will soon create the block for the 1st page, so 2496 * we'd better clear the inline data here. 2497 */ 2498 if (ext4_has_inline_data(inode)) { 2499 /* Just inode will be modified... */ 2500 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); 2501 if (IS_ERR(handle)) { 2502 ret = PTR_ERR(handle); 2503 goto out_writepages; 2504 } 2505 BUG_ON(ext4_test_inode_state(inode, 2506 EXT4_STATE_MAY_INLINE_DATA)); 2507 ext4_destroy_inline_data(handle, inode); 2508 ext4_journal_stop(handle); 2509 } 2510 2511 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2512 range_whole = 1; 2513 2514 if (wbc->range_cyclic) { 2515 writeback_index = mapping->writeback_index; 2516 if (writeback_index) 2517 cycled = 0; 2518 mpd.first_page = writeback_index; 2519 mpd.last_page = -1; 2520 } else { 2521 mpd.first_page = wbc->range_start >> PAGE_CACHE_SHIFT; 2522 mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT; 2523 } 2524 2525 mpd.inode = inode; 2526 mpd.wbc = wbc; 2527 ext4_io_submit_init(&mpd.io_submit, wbc); 2528 retry: 2529 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 2530 tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page); 2531 done = false; 2532 blk_start_plug(&plug); 2533 while (!done && mpd.first_page <= mpd.last_page) { 2534 /* For each extent of pages we use new io_end */ 2535 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); 2536 if (!mpd.io_submit.io_end) { 2537 ret = -ENOMEM; 2538 break; 2539 } 2540 2541 /* 2542 * We have two constraints: We find one extent to map and we 2543 * must always write out whole page (makes a difference when 2544 * blocksize < pagesize) so that we don't block on IO when we 2545 * try to write out the rest of the page. Journalled mode is 2546 * not supported by delalloc. 2547 */ 2548 BUG_ON(ext4_should_journal_data(inode)); 2549 needed_blocks = ext4_da_writepages_trans_blocks(inode); 2550 2551 /* start a new transaction */ 2552 handle = ext4_journal_start_with_reserve(inode, 2553 EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks); 2554 if (IS_ERR(handle)) { 2555 ret = PTR_ERR(handle); 2556 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2557 "%ld pages, ino %lu; err %d", __func__, 2558 wbc->nr_to_write, inode->i_ino, ret); 2559 /* Release allocated io_end */ 2560 ext4_put_io_end(mpd.io_submit.io_end); 2561 break; 2562 } 2563 2564 trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc); 2565 ret = mpage_prepare_extent_to_map(&mpd); 2566 if (!ret) { 2567 if (mpd.map.m_len) 2568 ret = mpage_map_and_submit_extent(handle, &mpd, 2569 &give_up_on_write); 2570 else { 2571 /* 2572 * We scanned the whole range (or exhausted 2573 * nr_to_write), submitted what was mapped and 2574 * didn't find anything needing mapping. We are 2575 * done. 2576 */ 2577 done = true; 2578 } 2579 } 2580 ext4_journal_stop(handle); 2581 /* Submit prepared bio */ 2582 ext4_io_submit(&mpd.io_submit); 2583 /* Unlock pages we didn't use */ 2584 mpage_release_unused_pages(&mpd, give_up_on_write); 2585 /* Drop our io_end reference we got from init */ 2586 ext4_put_io_end(mpd.io_submit.io_end); 2587 2588 if (ret == -ENOSPC && sbi->s_journal) { 2589 /* 2590 * Commit the transaction which would 2591 * free blocks released in the transaction 2592 * and try again 2593 */ 2594 jbd2_journal_force_commit_nested(sbi->s_journal); 2595 ret = 0; 2596 continue; 2597 } 2598 /* Fatal error - ENOMEM, EIO... */ 2599 if (ret) 2600 break; 2601 } 2602 blk_finish_plug(&plug); 2603 if (!ret && !cycled && wbc->nr_to_write > 0) { 2604 cycled = 1; 2605 mpd.last_page = writeback_index - 1; 2606 mpd.first_page = 0; 2607 goto retry; 2608 } 2609 2610 /* Update index */ 2611 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2612 /* 2613 * Set the writeback_index so that range_cyclic 2614 * mode will write it back later 2615 */ 2616 mapping->writeback_index = mpd.first_page; 2617 2618 out_writepages: 2619 trace_ext4_writepages_result(inode, wbc, ret, 2620 nr_to_write - wbc->nr_to_write); 2621 return ret; 2622 } 2623 2624 static int ext4_nonda_switch(struct super_block *sb) 2625 { 2626 s64 free_clusters, dirty_clusters; 2627 struct ext4_sb_info *sbi = EXT4_SB(sb); 2628 2629 /* 2630 * switch to non delalloc mode if we are running low 2631 * on free block. The free block accounting via percpu 2632 * counters can get slightly wrong with percpu_counter_batch getting 2633 * accumulated on each CPU without updating global counters 2634 * Delalloc need an accurate free block accounting. So switch 2635 * to non delalloc when we are near to error range. 2636 */ 2637 free_clusters = 2638 percpu_counter_read_positive(&sbi->s_freeclusters_counter); 2639 dirty_clusters = 2640 percpu_counter_read_positive(&sbi->s_dirtyclusters_counter); 2641 /* 2642 * Start pushing delalloc when 1/2 of free blocks are dirty. 2643 */ 2644 if (dirty_clusters && (free_clusters < 2 * dirty_clusters)) 2645 try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE); 2646 2647 if (2 * free_clusters < 3 * dirty_clusters || 2648 free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) { 2649 /* 2650 * free block count is less than 150% of dirty blocks 2651 * or free blocks is less than watermark 2652 */ 2653 return 1; 2654 } 2655 return 0; 2656 } 2657 2658 /* We always reserve for an inode update; the superblock could be there too */ 2659 static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len) 2660 { 2661 if (likely(ext4_has_feature_large_file(inode->i_sb))) 2662 return 1; 2663 2664 if (pos + len <= 0x7fffffffULL) 2665 return 1; 2666 2667 /* We might need to update the superblock to set LARGE_FILE */ 2668 return 2; 2669 } 2670 2671 static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 2672 loff_t pos, unsigned len, unsigned flags, 2673 struct page **pagep, void **fsdata) 2674 { 2675 int ret, retries = 0; 2676 struct page *page; 2677 pgoff_t index; 2678 struct inode *inode = mapping->host; 2679 handle_t *handle; 2680 2681 index = pos >> PAGE_CACHE_SHIFT; 2682 2683 if (ext4_nonda_switch(inode->i_sb)) { 2684 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 2685 return ext4_write_begin(file, mapping, pos, 2686 len, flags, pagep, fsdata); 2687 } 2688 *fsdata = (void *)0; 2689 trace_ext4_da_write_begin(inode, pos, len, flags); 2690 2691 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 2692 ret = ext4_da_write_inline_data_begin(mapping, inode, 2693 pos, len, flags, 2694 pagep, fsdata); 2695 if (ret < 0) 2696 return ret; 2697 if (ret == 1) 2698 return 0; 2699 } 2700 2701 /* 2702 * grab_cache_page_write_begin() can take a long time if the 2703 * system is thrashing due to memory pressure, or if the page 2704 * is being written back. So grab it first before we start 2705 * the transaction handle. This also allows us to allocate 2706 * the page (if needed) without using GFP_NOFS. 2707 */ 2708 retry_grab: 2709 page = grab_cache_page_write_begin(mapping, index, flags); 2710 if (!page) 2711 return -ENOMEM; 2712 unlock_page(page); 2713 2714 /* 2715 * With delayed allocation, we don't log the i_disksize update 2716 * if there is delayed block allocation. But we still need 2717 * to journalling the i_disksize update if writes to the end 2718 * of file which has an already mapped buffer. 2719 */ 2720 retry_journal: 2721 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 2722 ext4_da_write_credits(inode, pos, len)); 2723 if (IS_ERR(handle)) { 2724 page_cache_release(page); 2725 return PTR_ERR(handle); 2726 } 2727 2728 lock_page(page); 2729 if (page->mapping != mapping) { 2730 /* The page got truncated from under us */ 2731 unlock_page(page); 2732 page_cache_release(page); 2733 ext4_journal_stop(handle); 2734 goto retry_grab; 2735 } 2736 /* In case writeback began while the page was unlocked */ 2737 wait_for_stable_page(page); 2738 2739 #ifdef CONFIG_EXT4_FS_ENCRYPTION 2740 ret = ext4_block_write_begin(page, pos, len, 2741 ext4_da_get_block_prep); 2742 #else 2743 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); 2744 #endif 2745 if (ret < 0) { 2746 unlock_page(page); 2747 ext4_journal_stop(handle); 2748 /* 2749 * block_write_begin may have instantiated a few blocks 2750 * outside i_size. Trim these off again. Don't need 2751 * i_size_read because we hold i_mutex. 2752 */ 2753 if (pos + len > inode->i_size) 2754 ext4_truncate_failed_write(inode); 2755 2756 if (ret == -ENOSPC && 2757 ext4_should_retry_alloc(inode->i_sb, &retries)) 2758 goto retry_journal; 2759 2760 page_cache_release(page); 2761 return ret; 2762 } 2763 2764 *pagep = page; 2765 return ret; 2766 } 2767 2768 /* 2769 * Check if we should update i_disksize 2770 * when write to the end of file but not require block allocation 2771 */ 2772 static int ext4_da_should_update_i_disksize(struct page *page, 2773 unsigned long offset) 2774 { 2775 struct buffer_head *bh; 2776 struct inode *inode = page->mapping->host; 2777 unsigned int idx; 2778 int i; 2779 2780 bh = page_buffers(page); 2781 idx = offset >> inode->i_blkbits; 2782 2783 for (i = 0; i < idx; i++) 2784 bh = bh->b_this_page; 2785 2786 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 2787 return 0; 2788 return 1; 2789 } 2790 2791 static int ext4_da_write_end(struct file *file, 2792 struct address_space *mapping, 2793 loff_t pos, unsigned len, unsigned copied, 2794 struct page *page, void *fsdata) 2795 { 2796 struct inode *inode = mapping->host; 2797 int ret = 0, ret2; 2798 handle_t *handle = ext4_journal_current_handle(); 2799 loff_t new_i_size; 2800 unsigned long start, end; 2801 int write_mode = (int)(unsigned long)fsdata; 2802 2803 if (write_mode == FALL_BACK_TO_NONDELALLOC) 2804 return ext4_write_end(file, mapping, pos, 2805 len, copied, page, fsdata); 2806 2807 trace_ext4_da_write_end(inode, pos, len, copied); 2808 start = pos & (PAGE_CACHE_SIZE - 1); 2809 end = start + copied - 1; 2810 2811 /* 2812 * generic_write_end() will run mark_inode_dirty() if i_size 2813 * changes. So let's piggyback the i_disksize mark_inode_dirty 2814 * into that. 2815 */ 2816 new_i_size = pos + copied; 2817 if (copied && new_i_size > EXT4_I(inode)->i_disksize) { 2818 if (ext4_has_inline_data(inode) || 2819 ext4_da_should_update_i_disksize(page, end)) { 2820 ext4_update_i_disksize(inode, new_i_size); 2821 /* We need to mark inode dirty even if 2822 * new_i_size is less that inode->i_size 2823 * bu greater than i_disksize.(hint delalloc) 2824 */ 2825 ext4_mark_inode_dirty(handle, inode); 2826 } 2827 } 2828 2829 if (write_mode != CONVERT_INLINE_DATA && 2830 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) && 2831 ext4_has_inline_data(inode)) 2832 ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied, 2833 page); 2834 else 2835 ret2 = generic_write_end(file, mapping, pos, len, copied, 2836 page, fsdata); 2837 2838 copied = ret2; 2839 if (ret2 < 0) 2840 ret = ret2; 2841 ret2 = ext4_journal_stop(handle); 2842 if (!ret) 2843 ret = ret2; 2844 2845 return ret ? ret : copied; 2846 } 2847 2848 static void ext4_da_invalidatepage(struct page *page, unsigned int offset, 2849 unsigned int length) 2850 { 2851 /* 2852 * Drop reserved blocks 2853 */ 2854 BUG_ON(!PageLocked(page)); 2855 if (!page_has_buffers(page)) 2856 goto out; 2857 2858 ext4_da_page_release_reservation(page, offset, length); 2859 2860 out: 2861 ext4_invalidatepage(page, offset, length); 2862 2863 return; 2864 } 2865 2866 /* 2867 * Force all delayed allocation blocks to be allocated for a given inode. 2868 */ 2869 int ext4_alloc_da_blocks(struct inode *inode) 2870 { 2871 trace_ext4_alloc_da_blocks(inode); 2872 2873 if (!EXT4_I(inode)->i_reserved_data_blocks) 2874 return 0; 2875 2876 /* 2877 * We do something simple for now. The filemap_flush() will 2878 * also start triggering a write of the data blocks, which is 2879 * not strictly speaking necessary (and for users of 2880 * laptop_mode, not even desirable). However, to do otherwise 2881 * would require replicating code paths in: 2882 * 2883 * ext4_writepages() -> 2884 * write_cache_pages() ---> (via passed in callback function) 2885 * __mpage_da_writepage() --> 2886 * mpage_add_bh_to_extent() 2887 * mpage_da_map_blocks() 2888 * 2889 * The problem is that write_cache_pages(), located in 2890 * mm/page-writeback.c, marks pages clean in preparation for 2891 * doing I/O, which is not desirable if we're not planning on 2892 * doing I/O at all. 2893 * 2894 * We could call write_cache_pages(), and then redirty all of 2895 * the pages by calling redirty_page_for_writepage() but that 2896 * would be ugly in the extreme. So instead we would need to 2897 * replicate parts of the code in the above functions, 2898 * simplifying them because we wouldn't actually intend to 2899 * write out the pages, but rather only collect contiguous 2900 * logical block extents, call the multi-block allocator, and 2901 * then update the buffer heads with the block allocations. 2902 * 2903 * For now, though, we'll cheat by calling filemap_flush(), 2904 * which will map the blocks, and start the I/O, but not 2905 * actually wait for the I/O to complete. 2906 */ 2907 return filemap_flush(inode->i_mapping); 2908 } 2909 2910 /* 2911 * bmap() is special. It gets used by applications such as lilo and by 2912 * the swapper to find the on-disk block of a specific piece of data. 2913 * 2914 * Naturally, this is dangerous if the block concerned is still in the 2915 * journal. If somebody makes a swapfile on an ext4 data-journaling 2916 * filesystem and enables swap, then they may get a nasty shock when the 2917 * data getting swapped to that swapfile suddenly gets overwritten by 2918 * the original zero's written out previously to the journal and 2919 * awaiting writeback in the kernel's buffer cache. 2920 * 2921 * So, if we see any bmap calls here on a modified, data-journaled file, 2922 * take extra steps to flush any blocks which might be in the cache. 2923 */ 2924 static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 2925 { 2926 struct inode *inode = mapping->host; 2927 journal_t *journal; 2928 int err; 2929 2930 /* 2931 * We can get here for an inline file via the FIBMAP ioctl 2932 */ 2933 if (ext4_has_inline_data(inode)) 2934 return 0; 2935 2936 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 2937 test_opt(inode->i_sb, DELALLOC)) { 2938 /* 2939 * With delalloc we want to sync the file 2940 * so that we can make sure we allocate 2941 * blocks for file 2942 */ 2943 filemap_write_and_wait(mapping); 2944 } 2945 2946 if (EXT4_JOURNAL(inode) && 2947 ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { 2948 /* 2949 * This is a REALLY heavyweight approach, but the use of 2950 * bmap on dirty files is expected to be extremely rare: 2951 * only if we run lilo or swapon on a freshly made file 2952 * do we expect this to happen. 2953 * 2954 * (bmap requires CAP_SYS_RAWIO so this does not 2955 * represent an unprivileged user DOS attack --- we'd be 2956 * in trouble if mortal users could trigger this path at 2957 * will.) 2958 * 2959 * NB. EXT4_STATE_JDATA is not set on files other than 2960 * regular files. If somebody wants to bmap a directory 2961 * or symlink and gets confused because the buffer 2962 * hasn't yet been flushed to disk, they deserve 2963 * everything they get. 2964 */ 2965 2966 ext4_clear_inode_state(inode, EXT4_STATE_JDATA); 2967 journal = EXT4_JOURNAL(inode); 2968 jbd2_journal_lock_updates(journal); 2969 err = jbd2_journal_flush(journal); 2970 jbd2_journal_unlock_updates(journal); 2971 2972 if (err) 2973 return 0; 2974 } 2975 2976 return generic_block_bmap(mapping, block, ext4_get_block); 2977 } 2978 2979 static int ext4_readpage(struct file *file, struct page *page) 2980 { 2981 int ret = -EAGAIN; 2982 struct inode *inode = page->mapping->host; 2983 2984 trace_ext4_readpage(page); 2985 2986 if (ext4_has_inline_data(inode)) 2987 ret = ext4_readpage_inline(inode, page); 2988 2989 if (ret == -EAGAIN) 2990 return ext4_mpage_readpages(page->mapping, NULL, page, 1); 2991 2992 return ret; 2993 } 2994 2995 static int 2996 ext4_readpages(struct file *file, struct address_space *mapping, 2997 struct list_head *pages, unsigned nr_pages) 2998 { 2999 struct inode *inode = mapping->host; 3000 3001 /* If the file has inline data, no need to do readpages. */ 3002 if (ext4_has_inline_data(inode)) 3003 return 0; 3004 3005 return ext4_mpage_readpages(mapping, pages, NULL, nr_pages); 3006 } 3007 3008 static void ext4_invalidatepage(struct page *page, unsigned int offset, 3009 unsigned int length) 3010 { 3011 trace_ext4_invalidatepage(page, offset, length); 3012 3013 /* No journalling happens on data buffers when this function is used */ 3014 WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page))); 3015 3016 block_invalidatepage(page, offset, length); 3017 } 3018 3019 static int __ext4_journalled_invalidatepage(struct page *page, 3020 unsigned int offset, 3021 unsigned int length) 3022 { 3023 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3024 3025 trace_ext4_journalled_invalidatepage(page, offset, length); 3026 3027 /* 3028 * If it's a full truncate we just forget about the pending dirtying 3029 */ 3030 if (offset == 0 && length == PAGE_CACHE_SIZE) 3031 ClearPageChecked(page); 3032 3033 return jbd2_journal_invalidatepage(journal, page, offset, length); 3034 } 3035 3036 /* Wrapper for aops... */ 3037 static void ext4_journalled_invalidatepage(struct page *page, 3038 unsigned int offset, 3039 unsigned int length) 3040 { 3041 WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0); 3042 } 3043 3044 static int ext4_releasepage(struct page *page, gfp_t wait) 3045 { 3046 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3047 3048 trace_ext4_releasepage(page); 3049 3050 /* Page has dirty journalled data -> cannot release */ 3051 if (PageChecked(page)) 3052 return 0; 3053 if (journal) 3054 return jbd2_journal_try_to_free_buffers(journal, page, wait); 3055 else 3056 return try_to_free_buffers(page); 3057 } 3058 3059 /* 3060 * ext4_get_block used when preparing for a DIO write or buffer write. 3061 * We allocate an uinitialized extent if blocks haven't been allocated. 3062 * The extent will be converted to initialized after the IO is complete. 3063 */ 3064 int ext4_get_block_write(struct inode *inode, sector_t iblock, 3065 struct buffer_head *bh_result, int create) 3066 { 3067 ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n", 3068 inode->i_ino, create); 3069 return _ext4_get_block(inode, iblock, bh_result, 3070 EXT4_GET_BLOCKS_IO_CREATE_EXT); 3071 } 3072 3073 static int ext4_get_block_overwrite(struct inode *inode, sector_t iblock, 3074 struct buffer_head *bh_result, int create) 3075 { 3076 int ret; 3077 3078 ext4_debug("ext4_get_block_overwrite: inode %lu, create flag %d\n", 3079 inode->i_ino, create); 3080 ret = _ext4_get_block(inode, iblock, bh_result, 0); 3081 /* 3082 * Blocks should have been preallocated! ext4_file_write_iter() checks 3083 * that. 3084 */ 3085 WARN_ON_ONCE(!buffer_mapped(bh_result)); 3086 3087 return ret; 3088 } 3089 3090 #ifdef CONFIG_FS_DAX 3091 int ext4_dax_mmap_get_block(struct inode *inode, sector_t iblock, 3092 struct buffer_head *bh_result, int create) 3093 { 3094 int ret, err; 3095 int credits; 3096 struct ext4_map_blocks map; 3097 handle_t *handle = NULL; 3098 int flags = 0; 3099 3100 ext4_debug("ext4_dax_mmap_get_block: inode %lu, create flag %d\n", 3101 inode->i_ino, create); 3102 map.m_lblk = iblock; 3103 map.m_len = bh_result->b_size >> inode->i_blkbits; 3104 credits = ext4_chunk_trans_blocks(inode, map.m_len); 3105 if (create) { 3106 flags |= EXT4_GET_BLOCKS_PRE_IO | EXT4_GET_BLOCKS_CREATE_ZERO; 3107 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits); 3108 if (IS_ERR(handle)) { 3109 ret = PTR_ERR(handle); 3110 return ret; 3111 } 3112 } 3113 3114 ret = ext4_map_blocks(handle, inode, &map, flags); 3115 if (create) { 3116 err = ext4_journal_stop(handle); 3117 if (ret >= 0 && err < 0) 3118 ret = err; 3119 } 3120 if (ret <= 0) 3121 goto out; 3122 if (map.m_flags & EXT4_MAP_UNWRITTEN) { 3123 int err2; 3124 3125 /* 3126 * We are protected by i_mmap_sem so we know block cannot go 3127 * away from under us even though we dropped i_data_sem. 3128 * Convert extent to written and write zeros there. 3129 * 3130 * Note: We may get here even when create == 0. 3131 */ 3132 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits); 3133 if (IS_ERR(handle)) { 3134 ret = PTR_ERR(handle); 3135 goto out; 3136 } 3137 3138 err = ext4_map_blocks(handle, inode, &map, 3139 EXT4_GET_BLOCKS_CONVERT | EXT4_GET_BLOCKS_CREATE_ZERO); 3140 if (err < 0) 3141 ret = err; 3142 err2 = ext4_journal_stop(handle); 3143 if (err2 < 0 && ret > 0) 3144 ret = err2; 3145 } 3146 out: 3147 WARN_ON_ONCE(ret == 0 && create); 3148 if (ret > 0) { 3149 map_bh(bh_result, inode->i_sb, map.m_pblk); 3150 bh_result->b_state = (bh_result->b_state & ~EXT4_MAP_FLAGS) | 3151 map.m_flags; 3152 /* 3153 * At least for now we have to clear BH_New so that DAX code 3154 * doesn't attempt to zero blocks again in a racy way. 3155 */ 3156 bh_result->b_state &= ~(1 << BH_New); 3157 bh_result->b_size = map.m_len << inode->i_blkbits; 3158 ret = 0; 3159 } 3160 return ret; 3161 } 3162 #endif 3163 3164 static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, 3165 ssize_t size, void *private) 3166 { 3167 ext4_io_end_t *io_end = iocb->private; 3168 3169 /* if not async direct IO just return */ 3170 if (!io_end) 3171 return; 3172 3173 ext_debug("ext4_end_io_dio(): io_end 0x%p " 3174 "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", 3175 iocb->private, io_end->inode->i_ino, iocb, offset, 3176 size); 3177 3178 iocb->private = NULL; 3179 io_end->offset = offset; 3180 io_end->size = size; 3181 ext4_put_io_end(io_end); 3182 } 3183 3184 /* 3185 * For ext4 extent files, ext4 will do direct-io write to holes, 3186 * preallocated extents, and those write extend the file, no need to 3187 * fall back to buffered IO. 3188 * 3189 * For holes, we fallocate those blocks, mark them as unwritten 3190 * If those blocks were preallocated, we mark sure they are split, but 3191 * still keep the range to write as unwritten. 3192 * 3193 * The unwritten extents will be converted to written when DIO is completed. 3194 * For async direct IO, since the IO may still pending when return, we 3195 * set up an end_io call back function, which will do the conversion 3196 * when async direct IO completed. 3197 * 3198 * If the O_DIRECT write will extend the file then add this inode to the 3199 * orphan list. So recovery will truncate it back to the original size 3200 * if the machine crashes during the write. 3201 * 3202 */ 3203 static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter, 3204 loff_t offset) 3205 { 3206 struct file *file = iocb->ki_filp; 3207 struct inode *inode = file->f_mapping->host; 3208 ssize_t ret; 3209 size_t count = iov_iter_count(iter); 3210 int overwrite = 0; 3211 get_block_t *get_block_func = NULL; 3212 int dio_flags = 0; 3213 loff_t final_size = offset + count; 3214 ext4_io_end_t *io_end = NULL; 3215 3216 /* Use the old path for reads and writes beyond i_size. */ 3217 if (iov_iter_rw(iter) != WRITE || final_size > inode->i_size) 3218 return ext4_ind_direct_IO(iocb, iter, offset); 3219 3220 BUG_ON(iocb->private == NULL); 3221 3222 /* 3223 * Make all waiters for direct IO properly wait also for extent 3224 * conversion. This also disallows race between truncate() and 3225 * overwrite DIO as i_dio_count needs to be incremented under i_mutex. 3226 */ 3227 if (iov_iter_rw(iter) == WRITE) 3228 inode_dio_begin(inode); 3229 3230 /* If we do a overwrite dio, i_mutex locking can be released */ 3231 overwrite = *((int *)iocb->private); 3232 3233 if (overwrite) 3234 inode_unlock(inode); 3235 3236 /* 3237 * We could direct write to holes and fallocate. 3238 * 3239 * Allocated blocks to fill the hole are marked as 3240 * unwritten to prevent parallel buffered read to expose 3241 * the stale data before DIO complete the data IO. 3242 * 3243 * As to previously fallocated extents, ext4 get_block will 3244 * just simply mark the buffer mapped but still keep the 3245 * extents unwritten. 3246 * 3247 * For non AIO case, we will convert those unwritten extents 3248 * to written after return back from blockdev_direct_IO. 3249 * 3250 * For async DIO, the conversion needs to be deferred when the 3251 * IO is completed. The ext4 end_io callback function will be 3252 * called to take care of the conversion work. Here for async 3253 * case, we allocate an io_end structure to hook to the iocb. 3254 */ 3255 iocb->private = NULL; 3256 ext4_inode_aio_set(inode, NULL); 3257 if (!is_sync_kiocb(iocb)) { 3258 io_end = ext4_init_io_end(inode, GFP_NOFS); 3259 if (!io_end) { 3260 ret = -ENOMEM; 3261 goto retake_lock; 3262 } 3263 /* 3264 * Grab reference for DIO. Will be dropped in ext4_end_io_dio() 3265 */ 3266 iocb->private = ext4_get_io_end(io_end); 3267 /* 3268 * we save the io structure for current async direct 3269 * IO, so that later ext4_map_blocks() could flag the 3270 * io structure whether there is a unwritten extents 3271 * needs to be converted when IO is completed. 3272 */ 3273 ext4_inode_aio_set(inode, io_end); 3274 } 3275 3276 if (overwrite) { 3277 get_block_func = ext4_get_block_overwrite; 3278 } else { 3279 get_block_func = ext4_get_block_write; 3280 dio_flags = DIO_LOCKING; 3281 } 3282 #ifdef CONFIG_EXT4_FS_ENCRYPTION 3283 BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)); 3284 #endif 3285 if (IS_DAX(inode)) 3286 ret = dax_do_io(iocb, inode, iter, offset, get_block_func, 3287 ext4_end_io_dio, dio_flags); 3288 else 3289 ret = __blockdev_direct_IO(iocb, inode, 3290 inode->i_sb->s_bdev, iter, offset, 3291 get_block_func, 3292 ext4_end_io_dio, NULL, dio_flags); 3293 3294 /* 3295 * Put our reference to io_end. This can free the io_end structure e.g. 3296 * in sync IO case or in case of error. It can even perform extent 3297 * conversion if all bios we submitted finished before we got here. 3298 * Note that in that case iocb->private can be already set to NULL 3299 * here. 3300 */ 3301 if (io_end) { 3302 ext4_inode_aio_set(inode, NULL); 3303 ext4_put_io_end(io_end); 3304 /* 3305 * When no IO was submitted ext4_end_io_dio() was not 3306 * called so we have to put iocb's reference. 3307 */ 3308 if (ret <= 0 && ret != -EIOCBQUEUED && iocb->private) { 3309 WARN_ON(iocb->private != io_end); 3310 WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); 3311 ext4_put_io_end(io_end); 3312 iocb->private = NULL; 3313 } 3314 } 3315 if (ret > 0 && !overwrite && ext4_test_inode_state(inode, 3316 EXT4_STATE_DIO_UNWRITTEN)) { 3317 int err; 3318 /* 3319 * for non AIO case, since the IO is already 3320 * completed, we could do the conversion right here 3321 */ 3322 err = ext4_convert_unwritten_extents(NULL, inode, 3323 offset, ret); 3324 if (err < 0) 3325 ret = err; 3326 ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3327 } 3328 3329 retake_lock: 3330 if (iov_iter_rw(iter) == WRITE) 3331 inode_dio_end(inode); 3332 /* take i_mutex locking again if we do a ovewrite dio */ 3333 if (overwrite) 3334 inode_lock(inode); 3335 3336 return ret; 3337 } 3338 3339 static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter, 3340 loff_t offset) 3341 { 3342 struct file *file = iocb->ki_filp; 3343 struct inode *inode = file->f_mapping->host; 3344 size_t count = iov_iter_count(iter); 3345 ssize_t ret; 3346 3347 #ifdef CONFIG_EXT4_FS_ENCRYPTION 3348 if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 3349 return 0; 3350 #endif 3351 3352 /* 3353 * If we are doing data journalling we don't support O_DIRECT 3354 */ 3355 if (ext4_should_journal_data(inode)) 3356 return 0; 3357 3358 /* Let buffer I/O handle the inline data case. */ 3359 if (ext4_has_inline_data(inode)) 3360 return 0; 3361 3362 trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); 3363 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3364 ret = ext4_ext_direct_IO(iocb, iter, offset); 3365 else 3366 ret = ext4_ind_direct_IO(iocb, iter, offset); 3367 trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret); 3368 return ret; 3369 } 3370 3371 /* 3372 * Pages can be marked dirty completely asynchronously from ext4's journalling 3373 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3374 * much here because ->set_page_dirty is called under VFS locks. The page is 3375 * not necessarily locked. 3376 * 3377 * We cannot just dirty the page and leave attached buffers clean, because the 3378 * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3379 * or jbddirty because all the journalling code will explode. 3380 * 3381 * So what we do is to mark the page "pending dirty" and next time writepage 3382 * is called, propagate that into the buffers appropriately. 3383 */ 3384 static int ext4_journalled_set_page_dirty(struct page *page) 3385 { 3386 SetPageChecked(page); 3387 return __set_page_dirty_nobuffers(page); 3388 } 3389 3390 static const struct address_space_operations ext4_aops = { 3391 .readpage = ext4_readpage, 3392 .readpages = ext4_readpages, 3393 .writepage = ext4_writepage, 3394 .writepages = ext4_writepages, 3395 .write_begin = ext4_write_begin, 3396 .write_end = ext4_write_end, 3397 .bmap = ext4_bmap, 3398 .invalidatepage = ext4_invalidatepage, 3399 .releasepage = ext4_releasepage, 3400 .direct_IO = ext4_direct_IO, 3401 .migratepage = buffer_migrate_page, 3402 .is_partially_uptodate = block_is_partially_uptodate, 3403 .error_remove_page = generic_error_remove_page, 3404 }; 3405 3406 static const struct address_space_operations ext4_journalled_aops = { 3407 .readpage = ext4_readpage, 3408 .readpages = ext4_readpages, 3409 .writepage = ext4_writepage, 3410 .writepages = ext4_writepages, 3411 .write_begin = ext4_write_begin, 3412 .write_end = ext4_journalled_write_end, 3413 .set_page_dirty = ext4_journalled_set_page_dirty, 3414 .bmap = ext4_bmap, 3415 .invalidatepage = ext4_journalled_invalidatepage, 3416 .releasepage = ext4_releasepage, 3417 .direct_IO = ext4_direct_IO, 3418 .is_partially_uptodate = block_is_partially_uptodate, 3419 .error_remove_page = generic_error_remove_page, 3420 }; 3421 3422 static const struct address_space_operations ext4_da_aops = { 3423 .readpage = ext4_readpage, 3424 .readpages = ext4_readpages, 3425 .writepage = ext4_writepage, 3426 .writepages = ext4_writepages, 3427 .write_begin = ext4_da_write_begin, 3428 .write_end = ext4_da_write_end, 3429 .bmap = ext4_bmap, 3430 .invalidatepage = ext4_da_invalidatepage, 3431 .releasepage = ext4_releasepage, 3432 .direct_IO = ext4_direct_IO, 3433 .migratepage = buffer_migrate_page, 3434 .is_partially_uptodate = block_is_partially_uptodate, 3435 .error_remove_page = generic_error_remove_page, 3436 }; 3437 3438 void ext4_set_aops(struct inode *inode) 3439 { 3440 switch (ext4_inode_journal_mode(inode)) { 3441 case EXT4_INODE_ORDERED_DATA_MODE: 3442 ext4_set_inode_state(inode, EXT4_STATE_ORDERED_MODE); 3443 break; 3444 case EXT4_INODE_WRITEBACK_DATA_MODE: 3445 ext4_clear_inode_state(inode, EXT4_STATE_ORDERED_MODE); 3446 break; 3447 case EXT4_INODE_JOURNAL_DATA_MODE: 3448 inode->i_mapping->a_ops = &ext4_journalled_aops; 3449 return; 3450 default: 3451 BUG(); 3452 } 3453 if (test_opt(inode->i_sb, DELALLOC)) 3454 inode->i_mapping->a_ops = &ext4_da_aops; 3455 else 3456 inode->i_mapping->a_ops = &ext4_aops; 3457 } 3458 3459 static int __ext4_block_zero_page_range(handle_t *handle, 3460 struct address_space *mapping, loff_t from, loff_t length) 3461 { 3462 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 3463 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3464 unsigned blocksize, pos; 3465 ext4_lblk_t iblock; 3466 struct inode *inode = mapping->host; 3467 struct buffer_head *bh; 3468 struct page *page; 3469 int err = 0; 3470 3471 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, 3472 mapping_gfp_constraint(mapping, ~__GFP_FS)); 3473 if (!page) 3474 return -ENOMEM; 3475 3476 blocksize = inode->i_sb->s_blocksize; 3477 3478 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 3479 3480 if (!page_has_buffers(page)) 3481 create_empty_buffers(page, blocksize, 0); 3482 3483 /* Find the buffer that contains "offset" */ 3484 bh = page_buffers(page); 3485 pos = blocksize; 3486 while (offset >= pos) { 3487 bh = bh->b_this_page; 3488 iblock++; 3489 pos += blocksize; 3490 } 3491 if (buffer_freed(bh)) { 3492 BUFFER_TRACE(bh, "freed: skip"); 3493 goto unlock; 3494 } 3495 if (!buffer_mapped(bh)) { 3496 BUFFER_TRACE(bh, "unmapped"); 3497 ext4_get_block(inode, iblock, bh, 0); 3498 /* unmapped? It's a hole - nothing to do */ 3499 if (!buffer_mapped(bh)) { 3500 BUFFER_TRACE(bh, "still unmapped"); 3501 goto unlock; 3502 } 3503 } 3504 3505 /* Ok, it's mapped. Make sure it's up-to-date */ 3506 if (PageUptodate(page)) 3507 set_buffer_uptodate(bh); 3508 3509 if (!buffer_uptodate(bh)) { 3510 err = -EIO; 3511 ll_rw_block(READ, 1, &bh); 3512 wait_on_buffer(bh); 3513 /* Uhhuh. Read error. Complain and punt. */ 3514 if (!buffer_uptodate(bh)) 3515 goto unlock; 3516 if (S_ISREG(inode->i_mode) && 3517 ext4_encrypted_inode(inode)) { 3518 /* We expect the key to be set. */ 3519 BUG_ON(!ext4_has_encryption_key(inode)); 3520 BUG_ON(blocksize != PAGE_CACHE_SIZE); 3521 WARN_ON_ONCE(ext4_decrypt(page)); 3522 } 3523 } 3524 if (ext4_should_journal_data(inode)) { 3525 BUFFER_TRACE(bh, "get write access"); 3526 err = ext4_journal_get_write_access(handle, bh); 3527 if (err) 3528 goto unlock; 3529 } 3530 zero_user(page, offset, length); 3531 BUFFER_TRACE(bh, "zeroed end of block"); 3532 3533 if (ext4_should_journal_data(inode)) { 3534 err = ext4_handle_dirty_metadata(handle, inode, bh); 3535 } else { 3536 err = 0; 3537 mark_buffer_dirty(bh); 3538 if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) 3539 err = ext4_jbd2_file_inode(handle, inode); 3540 } 3541 3542 unlock: 3543 unlock_page(page); 3544 page_cache_release(page); 3545 return err; 3546 } 3547 3548 /* 3549 * ext4_block_zero_page_range() zeros out a mapping of length 'length' 3550 * starting from file offset 'from'. The range to be zero'd must 3551 * be contained with in one block. If the specified range exceeds 3552 * the end of the block it will be shortened to end of the block 3553 * that cooresponds to 'from' 3554 */ 3555 static int ext4_block_zero_page_range(handle_t *handle, 3556 struct address_space *mapping, loff_t from, loff_t length) 3557 { 3558 struct inode *inode = mapping->host; 3559 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3560 unsigned blocksize = inode->i_sb->s_blocksize; 3561 unsigned max = blocksize - (offset & (blocksize - 1)); 3562 3563 /* 3564 * correct length if it does not fall between 3565 * 'from' and the end of the block 3566 */ 3567 if (length > max || length < 0) 3568 length = max; 3569 3570 if (IS_DAX(inode)) 3571 return dax_zero_page_range(inode, from, length, ext4_get_block); 3572 return __ext4_block_zero_page_range(handle, mapping, from, length); 3573 } 3574 3575 /* 3576 * ext4_block_truncate_page() zeroes out a mapping from file offset `from' 3577 * up to the end of the block which corresponds to `from'. 3578 * This required during truncate. We need to physically zero the tail end 3579 * of that block so it doesn't yield old data if the file is later grown. 3580 */ 3581 static int ext4_block_truncate_page(handle_t *handle, 3582 struct address_space *mapping, loff_t from) 3583 { 3584 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3585 unsigned length; 3586 unsigned blocksize; 3587 struct inode *inode = mapping->host; 3588 3589 blocksize = inode->i_sb->s_blocksize; 3590 length = blocksize - (offset & (blocksize - 1)); 3591 3592 return ext4_block_zero_page_range(handle, mapping, from, length); 3593 } 3594 3595 int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode, 3596 loff_t lstart, loff_t length) 3597 { 3598 struct super_block *sb = inode->i_sb; 3599 struct address_space *mapping = inode->i_mapping; 3600 unsigned partial_start, partial_end; 3601 ext4_fsblk_t start, end; 3602 loff_t byte_end = (lstart + length - 1); 3603 int err = 0; 3604 3605 partial_start = lstart & (sb->s_blocksize - 1); 3606 partial_end = byte_end & (sb->s_blocksize - 1); 3607 3608 start = lstart >> sb->s_blocksize_bits; 3609 end = byte_end >> sb->s_blocksize_bits; 3610 3611 /* Handle partial zero within the single block */ 3612 if (start == end && 3613 (partial_start || (partial_end != sb->s_blocksize - 1))) { 3614 err = ext4_block_zero_page_range(handle, mapping, 3615 lstart, length); 3616 return err; 3617 } 3618 /* Handle partial zero out on the start of the range */ 3619 if (partial_start) { 3620 err = ext4_block_zero_page_range(handle, mapping, 3621 lstart, sb->s_blocksize); 3622 if (err) 3623 return err; 3624 } 3625 /* Handle partial zero out on the end of the range */ 3626 if (partial_end != sb->s_blocksize - 1) 3627 err = ext4_block_zero_page_range(handle, mapping, 3628 byte_end - partial_end, 3629 partial_end + 1); 3630 return err; 3631 } 3632 3633 int ext4_can_truncate(struct inode *inode) 3634 { 3635 if (S_ISREG(inode->i_mode)) 3636 return 1; 3637 if (S_ISDIR(inode->i_mode)) 3638 return 1; 3639 if (S_ISLNK(inode->i_mode)) 3640 return !ext4_inode_is_fast_symlink(inode); 3641 return 0; 3642 } 3643 3644 /* 3645 * We have to make sure i_disksize gets properly updated before we truncate 3646 * page cache due to hole punching or zero range. Otherwise i_disksize update 3647 * can get lost as it may have been postponed to submission of writeback but 3648 * that will never happen after we truncate page cache. 3649 */ 3650 int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, 3651 loff_t len) 3652 { 3653 handle_t *handle; 3654 loff_t size = i_size_read(inode); 3655 3656 WARN_ON(!inode_is_locked(inode)); 3657 if (offset > size || offset + len < size) 3658 return 0; 3659 3660 if (EXT4_I(inode)->i_disksize >= size) 3661 return 0; 3662 3663 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1); 3664 if (IS_ERR(handle)) 3665 return PTR_ERR(handle); 3666 ext4_update_i_disksize(inode, size); 3667 ext4_mark_inode_dirty(handle, inode); 3668 ext4_journal_stop(handle); 3669 3670 return 0; 3671 } 3672 3673 /* 3674 * ext4_punch_hole: punches a hole in a file by releaseing the blocks 3675 * associated with the given offset and length 3676 * 3677 * @inode: File inode 3678 * @offset: The offset where the hole will begin 3679 * @len: The length of the hole 3680 * 3681 * Returns: 0 on success or negative on failure 3682 */ 3683 3684 int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) 3685 { 3686 struct super_block *sb = inode->i_sb; 3687 ext4_lblk_t first_block, stop_block; 3688 struct address_space *mapping = inode->i_mapping; 3689 loff_t first_block_offset, last_block_offset; 3690 handle_t *handle; 3691 unsigned int credits; 3692 int ret = 0; 3693 3694 if (!S_ISREG(inode->i_mode)) 3695 return -EOPNOTSUPP; 3696 3697 trace_ext4_punch_hole(inode, offset, length, 0); 3698 3699 /* 3700 * Write out all dirty pages to avoid race conditions 3701 * Then release them. 3702 */ 3703 if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 3704 ret = filemap_write_and_wait_range(mapping, offset, 3705 offset + length - 1); 3706 if (ret) 3707 return ret; 3708 } 3709 3710 inode_lock(inode); 3711 3712 /* No need to punch hole beyond i_size */ 3713 if (offset >= inode->i_size) 3714 goto out_mutex; 3715 3716 /* 3717 * If the hole extends beyond i_size, set the hole 3718 * to end after the page that contains i_size 3719 */ 3720 if (offset + length > inode->i_size) { 3721 length = inode->i_size + 3722 PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) - 3723 offset; 3724 } 3725 3726 if (offset & (sb->s_blocksize - 1) || 3727 (offset + length) & (sb->s_blocksize - 1)) { 3728 /* 3729 * Attach jinode to inode for jbd2 if we do any zeroing of 3730 * partial block 3731 */ 3732 ret = ext4_inode_attach_jinode(inode); 3733 if (ret < 0) 3734 goto out_mutex; 3735 3736 } 3737 3738 /* Wait all existing dio workers, newcomers will block on i_mutex */ 3739 ext4_inode_block_unlocked_dio(inode); 3740 inode_dio_wait(inode); 3741 3742 /* 3743 * Prevent page faults from reinstantiating pages we have released from 3744 * page cache. 3745 */ 3746 down_write(&EXT4_I(inode)->i_mmap_sem); 3747 first_block_offset = round_up(offset, sb->s_blocksize); 3748 last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; 3749 3750 /* Now release the pages and zero block aligned part of pages*/ 3751 if (last_block_offset > first_block_offset) { 3752 ret = ext4_update_disksize_before_punch(inode, offset, length); 3753 if (ret) 3754 goto out_dio; 3755 truncate_pagecache_range(inode, first_block_offset, 3756 last_block_offset); 3757 } 3758 3759 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3760 credits = ext4_writepage_trans_blocks(inode); 3761 else 3762 credits = ext4_blocks_for_truncate(inode); 3763 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 3764 if (IS_ERR(handle)) { 3765 ret = PTR_ERR(handle); 3766 ext4_std_error(sb, ret); 3767 goto out_dio; 3768 } 3769 3770 ret = ext4_zero_partial_blocks(handle, inode, offset, 3771 length); 3772 if (ret) 3773 goto out_stop; 3774 3775 first_block = (offset + sb->s_blocksize - 1) >> 3776 EXT4_BLOCK_SIZE_BITS(sb); 3777 stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); 3778 3779 /* If there are no blocks to remove, return now */ 3780 if (first_block >= stop_block) 3781 goto out_stop; 3782 3783 down_write(&EXT4_I(inode)->i_data_sem); 3784 ext4_discard_preallocations(inode); 3785 3786 ret = ext4_es_remove_extent(inode, first_block, 3787 stop_block - first_block); 3788 if (ret) { 3789 up_write(&EXT4_I(inode)->i_data_sem); 3790 goto out_stop; 3791 } 3792 3793 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3794 ret = ext4_ext_remove_space(inode, first_block, 3795 stop_block - 1); 3796 else 3797 ret = ext4_ind_remove_space(handle, inode, first_block, 3798 stop_block); 3799 3800 up_write(&EXT4_I(inode)->i_data_sem); 3801 if (IS_SYNC(inode)) 3802 ext4_handle_sync(handle); 3803 3804 inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 3805 ext4_mark_inode_dirty(handle, inode); 3806 out_stop: 3807 ext4_journal_stop(handle); 3808 out_dio: 3809 up_write(&EXT4_I(inode)->i_mmap_sem); 3810 ext4_inode_resume_unlocked_dio(inode); 3811 out_mutex: 3812 inode_unlock(inode); 3813 return ret; 3814 } 3815 3816 int ext4_inode_attach_jinode(struct inode *inode) 3817 { 3818 struct ext4_inode_info *ei = EXT4_I(inode); 3819 struct jbd2_inode *jinode; 3820 3821 if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal) 3822 return 0; 3823 3824 jinode = jbd2_alloc_inode(GFP_KERNEL); 3825 spin_lock(&inode->i_lock); 3826 if (!ei->jinode) { 3827 if (!jinode) { 3828 spin_unlock(&inode->i_lock); 3829 return -ENOMEM; 3830 } 3831 ei->jinode = jinode; 3832 jbd2_journal_init_jbd_inode(ei->jinode, inode); 3833 jinode = NULL; 3834 } 3835 spin_unlock(&inode->i_lock); 3836 if (unlikely(jinode != NULL)) 3837 jbd2_free_inode(jinode); 3838 return 0; 3839 } 3840 3841 /* 3842 * ext4_truncate() 3843 * 3844 * We block out ext4_get_block() block instantiations across the entire 3845 * transaction, and VFS/VM ensures that ext4_truncate() cannot run 3846 * simultaneously on behalf of the same inode. 3847 * 3848 * As we work through the truncate and commit bits of it to the journal there 3849 * is one core, guiding principle: the file's tree must always be consistent on 3850 * disk. We must be able to restart the truncate after a crash. 3851 * 3852 * The file's tree may be transiently inconsistent in memory (although it 3853 * probably isn't), but whenever we close off and commit a journal transaction, 3854 * the contents of (the filesystem + the journal) must be consistent and 3855 * restartable. It's pretty simple, really: bottom up, right to left (although 3856 * left-to-right works OK too). 3857 * 3858 * Note that at recovery time, journal replay occurs *before* the restart of 3859 * truncate against the orphan inode list. 3860 * 3861 * The committed inode has the new, desired i_size (which is the same as 3862 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 3863 * that this inode's truncate did not complete and it will again call 3864 * ext4_truncate() to have another go. So there will be instantiated blocks 3865 * to the right of the truncation point in a crashed ext4 filesystem. But 3866 * that's fine - as long as they are linked from the inode, the post-crash 3867 * ext4_truncate() run will find them and release them. 3868 */ 3869 void ext4_truncate(struct inode *inode) 3870 { 3871 struct ext4_inode_info *ei = EXT4_I(inode); 3872 unsigned int credits; 3873 handle_t *handle; 3874 struct address_space *mapping = inode->i_mapping; 3875 3876 /* 3877 * There is a possibility that we're either freeing the inode 3878 * or it's a completely new inode. In those cases we might not 3879 * have i_mutex locked because it's not necessary. 3880 */ 3881 if (!(inode->i_state & (I_NEW|I_FREEING))) 3882 WARN_ON(!inode_is_locked(inode)); 3883 trace_ext4_truncate_enter(inode); 3884 3885 if (!ext4_can_truncate(inode)) 3886 return; 3887 3888 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 3889 3890 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 3891 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 3892 3893 if (ext4_has_inline_data(inode)) { 3894 int has_inline = 1; 3895 3896 ext4_inline_data_truncate(inode, &has_inline); 3897 if (has_inline) 3898 return; 3899 } 3900 3901 /* If we zero-out tail of the page, we have to create jinode for jbd2 */ 3902 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { 3903 if (ext4_inode_attach_jinode(inode) < 0) 3904 return; 3905 } 3906 3907 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3908 credits = ext4_writepage_trans_blocks(inode); 3909 else 3910 credits = ext4_blocks_for_truncate(inode); 3911 3912 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 3913 if (IS_ERR(handle)) { 3914 ext4_std_error(inode->i_sb, PTR_ERR(handle)); 3915 return; 3916 } 3917 3918 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) 3919 ext4_block_truncate_page(handle, mapping, inode->i_size); 3920 3921 /* 3922 * We add the inode to the orphan list, so that if this 3923 * truncate spans multiple transactions, and we crash, we will 3924 * resume the truncate when the filesystem recovers. It also 3925 * marks the inode dirty, to catch the new size. 3926 * 3927 * Implication: the file must always be in a sane, consistent 3928 * truncatable state while each transaction commits. 3929 */ 3930 if (ext4_orphan_add(handle, inode)) 3931 goto out_stop; 3932 3933 down_write(&EXT4_I(inode)->i_data_sem); 3934 3935 ext4_discard_preallocations(inode); 3936 3937 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3938 ext4_ext_truncate(handle, inode); 3939 else 3940 ext4_ind_truncate(handle, inode); 3941 3942 up_write(&ei->i_data_sem); 3943 3944 if (IS_SYNC(inode)) 3945 ext4_handle_sync(handle); 3946 3947 out_stop: 3948 /* 3949 * If this was a simple ftruncate() and the file will remain alive, 3950 * then we need to clear up the orphan record which we created above. 3951 * However, if this was a real unlink then we were called by 3952 * ext4_evict_inode(), and we allow that function to clean up the 3953 * orphan info for us. 3954 */ 3955 if (inode->i_nlink) 3956 ext4_orphan_del(handle, inode); 3957 3958 inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 3959 ext4_mark_inode_dirty(handle, inode); 3960 ext4_journal_stop(handle); 3961 3962 trace_ext4_truncate_exit(inode); 3963 } 3964 3965 /* 3966 * ext4_get_inode_loc returns with an extra refcount against the inode's 3967 * underlying buffer_head on success. If 'in_mem' is true, we have all 3968 * data in memory that is needed to recreate the on-disk version of this 3969 * inode. 3970 */ 3971 static int __ext4_get_inode_loc(struct inode *inode, 3972 struct ext4_iloc *iloc, int in_mem) 3973 { 3974 struct ext4_group_desc *gdp; 3975 struct buffer_head *bh; 3976 struct super_block *sb = inode->i_sb; 3977 ext4_fsblk_t block; 3978 int inodes_per_block, inode_offset; 3979 3980 iloc->bh = NULL; 3981 if (!ext4_valid_inum(sb, inode->i_ino)) 3982 return -EFSCORRUPTED; 3983 3984 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 3985 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 3986 if (!gdp) 3987 return -EIO; 3988 3989 /* 3990 * Figure out the offset within the block group inode table 3991 */ 3992 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 3993 inode_offset = ((inode->i_ino - 1) % 3994 EXT4_INODES_PER_GROUP(sb)); 3995 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 3996 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 3997 3998 bh = sb_getblk(sb, block); 3999 if (unlikely(!bh)) 4000 return -ENOMEM; 4001 if (!buffer_uptodate(bh)) { 4002 lock_buffer(bh); 4003 4004 /* 4005 * If the buffer has the write error flag, we have failed 4006 * to write out another inode in the same block. In this 4007 * case, we don't have to read the block because we may 4008 * read the old inode data successfully. 4009 */ 4010 if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 4011 set_buffer_uptodate(bh); 4012 4013 if (buffer_uptodate(bh)) { 4014 /* someone brought it uptodate while we waited */ 4015 unlock_buffer(bh); 4016 goto has_buffer; 4017 } 4018 4019 /* 4020 * If we have all information of the inode in memory and this 4021 * is the only valid inode in the block, we need not read the 4022 * block. 4023 */ 4024 if (in_mem) { 4025 struct buffer_head *bitmap_bh; 4026 int i, start; 4027 4028 start = inode_offset & ~(inodes_per_block - 1); 4029 4030 /* Is the inode bitmap in cache? */ 4031 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 4032 if (unlikely(!bitmap_bh)) 4033 goto make_io; 4034 4035 /* 4036 * If the inode bitmap isn't in cache then the 4037 * optimisation may end up performing two reads instead 4038 * of one, so skip it. 4039 */ 4040 if (!buffer_uptodate(bitmap_bh)) { 4041 brelse(bitmap_bh); 4042 goto make_io; 4043 } 4044 for (i = start; i < start + inodes_per_block; i++) { 4045 if (i == inode_offset) 4046 continue; 4047 if (ext4_test_bit(i, bitmap_bh->b_data)) 4048 break; 4049 } 4050 brelse(bitmap_bh); 4051 if (i == start + inodes_per_block) { 4052 /* all other inodes are free, so skip I/O */ 4053 memset(bh->b_data, 0, bh->b_size); 4054 set_buffer_uptodate(bh); 4055 unlock_buffer(bh); 4056 goto has_buffer; 4057 } 4058 } 4059 4060 make_io: 4061 /* 4062 * If we need to do any I/O, try to pre-readahead extra 4063 * blocks from the inode table. 4064 */ 4065 if (EXT4_SB(sb)->s_inode_readahead_blks) { 4066 ext4_fsblk_t b, end, table; 4067 unsigned num; 4068 __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks; 4069 4070 table = ext4_inode_table(sb, gdp); 4071 /* s_inode_readahead_blks is always a power of 2 */ 4072 b = block & ~((ext4_fsblk_t) ra_blks - 1); 4073 if (table > b) 4074 b = table; 4075 end = b + ra_blks; 4076 num = EXT4_INODES_PER_GROUP(sb); 4077 if (ext4_has_group_desc_csum(sb)) 4078 num -= ext4_itable_unused_count(sb, gdp); 4079 table += num / inodes_per_block; 4080 if (end > table) 4081 end = table; 4082 while (b <= end) 4083 sb_breadahead(sb, b++); 4084 } 4085 4086 /* 4087 * There are other valid inodes in the buffer, this inode 4088 * has in-inode xattrs, or we don't have this inode in memory. 4089 * Read the block from disk. 4090 */ 4091 trace_ext4_load_inode(inode); 4092 get_bh(bh); 4093 bh->b_end_io = end_buffer_read_sync; 4094 submit_bh(READ | REQ_META | REQ_PRIO, bh); 4095 wait_on_buffer(bh); 4096 if (!buffer_uptodate(bh)) { 4097 EXT4_ERROR_INODE_BLOCK(inode, block, 4098 "unable to read itable block"); 4099 brelse(bh); 4100 return -EIO; 4101 } 4102 } 4103 has_buffer: 4104 iloc->bh = bh; 4105 return 0; 4106 } 4107 4108 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 4109 { 4110 /* We have all inode data except xattrs in memory here. */ 4111 return __ext4_get_inode_loc(inode, iloc, 4112 !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); 4113 } 4114 4115 void ext4_set_inode_flags(struct inode *inode) 4116 { 4117 unsigned int flags = EXT4_I(inode)->i_flags; 4118 unsigned int new_fl = 0; 4119 4120 if (flags & EXT4_SYNC_FL) 4121 new_fl |= S_SYNC; 4122 if (flags & EXT4_APPEND_FL) 4123 new_fl |= S_APPEND; 4124 if (flags & EXT4_IMMUTABLE_FL) 4125 new_fl |= S_IMMUTABLE; 4126 if (flags & EXT4_NOATIME_FL) 4127 new_fl |= S_NOATIME; 4128 if (flags & EXT4_DIRSYNC_FL) 4129 new_fl |= S_DIRSYNC; 4130 if (test_opt(inode->i_sb, DAX)) 4131 new_fl |= S_DAX; 4132 inode_set_flags(inode, new_fl, 4133 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX); 4134 } 4135 4136 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ 4137 void ext4_get_inode_flags(struct ext4_inode_info *ei) 4138 { 4139 unsigned int vfs_fl; 4140 unsigned long old_fl, new_fl; 4141 4142 do { 4143 vfs_fl = ei->vfs_inode.i_flags; 4144 old_fl = ei->i_flags; 4145 new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL| 4146 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL| 4147 EXT4_DIRSYNC_FL); 4148 if (vfs_fl & S_SYNC) 4149 new_fl |= EXT4_SYNC_FL; 4150 if (vfs_fl & S_APPEND) 4151 new_fl |= EXT4_APPEND_FL; 4152 if (vfs_fl & S_IMMUTABLE) 4153 new_fl |= EXT4_IMMUTABLE_FL; 4154 if (vfs_fl & S_NOATIME) 4155 new_fl |= EXT4_NOATIME_FL; 4156 if (vfs_fl & S_DIRSYNC) 4157 new_fl |= EXT4_DIRSYNC_FL; 4158 } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl); 4159 } 4160 4161 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 4162 struct ext4_inode_info *ei) 4163 { 4164 blkcnt_t i_blocks ; 4165 struct inode *inode = &(ei->vfs_inode); 4166 struct super_block *sb = inode->i_sb; 4167 4168 if (ext4_has_feature_huge_file(sb)) { 4169 /* we are using combined 48 bit field */ 4170 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 4171 le32_to_cpu(raw_inode->i_blocks_lo); 4172 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { 4173 /* i_blocks represent file system block size */ 4174 return i_blocks << (inode->i_blkbits - 9); 4175 } else { 4176 return i_blocks; 4177 } 4178 } else { 4179 return le32_to_cpu(raw_inode->i_blocks_lo); 4180 } 4181 } 4182 4183 static inline void ext4_iget_extra_inode(struct inode *inode, 4184 struct ext4_inode *raw_inode, 4185 struct ext4_inode_info *ei) 4186 { 4187 __le32 *magic = (void *)raw_inode + 4188 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; 4189 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { 4190 ext4_set_inode_state(inode, EXT4_STATE_XATTR); 4191 ext4_find_inline_data_nolock(inode); 4192 } else 4193 EXT4_I(inode)->i_inline_off = 0; 4194 } 4195 4196 int ext4_get_projid(struct inode *inode, kprojid_t *projid) 4197 { 4198 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_PROJECT)) 4199 return -EOPNOTSUPP; 4200 *projid = EXT4_I(inode)->i_projid; 4201 return 0; 4202 } 4203 4204 struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 4205 { 4206 struct ext4_iloc iloc; 4207 struct ext4_inode *raw_inode; 4208 struct ext4_inode_info *ei; 4209 struct inode *inode; 4210 journal_t *journal = EXT4_SB(sb)->s_journal; 4211 long ret; 4212 int block; 4213 uid_t i_uid; 4214 gid_t i_gid; 4215 projid_t i_projid; 4216 4217 inode = iget_locked(sb, ino); 4218 if (!inode) 4219 return ERR_PTR(-ENOMEM); 4220 if (!(inode->i_state & I_NEW)) 4221 return inode; 4222 4223 ei = EXT4_I(inode); 4224 iloc.bh = NULL; 4225 4226 ret = __ext4_get_inode_loc(inode, &iloc, 0); 4227 if (ret < 0) 4228 goto bad_inode; 4229 raw_inode = ext4_raw_inode(&iloc); 4230 4231 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4232 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 4233 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 4234 EXT4_INODE_SIZE(inode->i_sb)) { 4235 EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)", 4236 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize, 4237 EXT4_INODE_SIZE(inode->i_sb)); 4238 ret = -EFSCORRUPTED; 4239 goto bad_inode; 4240 } 4241 } else 4242 ei->i_extra_isize = 0; 4243 4244 /* Precompute checksum seed for inode metadata */ 4245 if (ext4_has_metadata_csum(sb)) { 4246 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4247 __u32 csum; 4248 __le32 inum = cpu_to_le32(inode->i_ino); 4249 __le32 gen = raw_inode->i_generation; 4250 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, 4251 sizeof(inum)); 4252 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, 4253 sizeof(gen)); 4254 } 4255 4256 if (!ext4_inode_csum_verify(inode, raw_inode, ei)) { 4257 EXT4_ERROR_INODE(inode, "checksum invalid"); 4258 ret = -EFSBADCRC; 4259 goto bad_inode; 4260 } 4261 4262 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 4263 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 4264 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 4265 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_PROJECT) && 4266 EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE && 4267 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) 4268 i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid); 4269 else 4270 i_projid = EXT4_DEF_PROJID; 4271 4272 if (!(test_opt(inode->i_sb, NO_UID32))) { 4273 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 4274 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 4275 } 4276 i_uid_write(inode, i_uid); 4277 i_gid_write(inode, i_gid); 4278 ei->i_projid = make_kprojid(&init_user_ns, i_projid); 4279 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 4280 4281 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 4282 ei->i_inline_off = 0; 4283 ei->i_dir_start_lookup = 0; 4284 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 4285 /* We now have enough fields to check if the inode was active or not. 4286 * This is needed because nfsd might try to access dead inodes 4287 * the test is that same one that e2fsck uses 4288 * NeilBrown 1999oct15 4289 */ 4290 if (inode->i_nlink == 0) { 4291 if ((inode->i_mode == 0 || 4292 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) && 4293 ino != EXT4_BOOT_LOADER_INO) { 4294 /* this inode is deleted */ 4295 ret = -ESTALE; 4296 goto bad_inode; 4297 } 4298 /* The only unlinked inodes we let through here have 4299 * valid i_mode and are being read by the orphan 4300 * recovery code: that's fine, we're about to complete 4301 * the process of deleting those. 4302 * OR it is the EXT4_BOOT_LOADER_INO which is 4303 * not initialized on a new filesystem. */ 4304 } 4305 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 4306 inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 4307 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 4308 if (ext4_has_feature_64bit(sb)) 4309 ei->i_file_acl |= 4310 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 4311 inode->i_size = ext4_isize(raw_inode); 4312 ei->i_disksize = inode->i_size; 4313 #ifdef CONFIG_QUOTA 4314 ei->i_reserved_quota = 0; 4315 #endif 4316 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 4317 ei->i_block_group = iloc.block_group; 4318 ei->i_last_alloc_group = ~0; 4319 /* 4320 * NOTE! The in-memory inode i_data array is in little-endian order 4321 * even on big-endian machines: we do NOT byteswap the block numbers! 4322 */ 4323 for (block = 0; block < EXT4_N_BLOCKS; block++) 4324 ei->i_data[block] = raw_inode->i_block[block]; 4325 INIT_LIST_HEAD(&ei->i_orphan); 4326 4327 /* 4328 * Set transaction id's of transactions that have to be committed 4329 * to finish f[data]sync. We set them to currently running transaction 4330 * as we cannot be sure that the inode or some of its metadata isn't 4331 * part of the transaction - the inode could have been reclaimed and 4332 * now it is reread from disk. 4333 */ 4334 if (journal) { 4335 transaction_t *transaction; 4336 tid_t tid; 4337 4338 read_lock(&journal->j_state_lock); 4339 if (journal->j_running_transaction) 4340 transaction = journal->j_running_transaction; 4341 else 4342 transaction = journal->j_committing_transaction; 4343 if (transaction) 4344 tid = transaction->t_tid; 4345 else 4346 tid = journal->j_commit_sequence; 4347 read_unlock(&journal->j_state_lock); 4348 ei->i_sync_tid = tid; 4349 ei->i_datasync_tid = tid; 4350 } 4351 4352 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4353 if (ei->i_extra_isize == 0) { 4354 /* The extra space is currently unused. Use it. */ 4355 ei->i_extra_isize = sizeof(struct ext4_inode) - 4356 EXT4_GOOD_OLD_INODE_SIZE; 4357 } else { 4358 ext4_iget_extra_inode(inode, raw_inode, ei); 4359 } 4360 } 4361 4362 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 4363 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 4364 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 4365 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 4366 4367 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { 4368 inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 4369 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4370 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 4371 inode->i_version |= 4372 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 4373 } 4374 } 4375 4376 ret = 0; 4377 if (ei->i_file_acl && 4378 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { 4379 EXT4_ERROR_INODE(inode, "bad extended attribute block %llu", 4380 ei->i_file_acl); 4381 ret = -EFSCORRUPTED; 4382 goto bad_inode; 4383 } else if (!ext4_has_inline_data(inode)) { 4384 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 4385 if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4386 (S_ISLNK(inode->i_mode) && 4387 !ext4_inode_is_fast_symlink(inode)))) 4388 /* Validate extent which is part of inode */ 4389 ret = ext4_ext_check_inode(inode); 4390 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4391 (S_ISLNK(inode->i_mode) && 4392 !ext4_inode_is_fast_symlink(inode))) { 4393 /* Validate block references which are part of inode */ 4394 ret = ext4_ind_check_inode(inode); 4395 } 4396 } 4397 if (ret) 4398 goto bad_inode; 4399 4400 if (S_ISREG(inode->i_mode)) { 4401 inode->i_op = &ext4_file_inode_operations; 4402 inode->i_fop = &ext4_file_operations; 4403 ext4_set_aops(inode); 4404 } else if (S_ISDIR(inode->i_mode)) { 4405 inode->i_op = &ext4_dir_inode_operations; 4406 inode->i_fop = &ext4_dir_operations; 4407 } else if (S_ISLNK(inode->i_mode)) { 4408 if (ext4_encrypted_inode(inode)) { 4409 inode->i_op = &ext4_encrypted_symlink_inode_operations; 4410 ext4_set_aops(inode); 4411 } else if (ext4_inode_is_fast_symlink(inode)) { 4412 inode->i_link = (char *)ei->i_data; 4413 inode->i_op = &ext4_fast_symlink_inode_operations; 4414 nd_terminate_link(ei->i_data, inode->i_size, 4415 sizeof(ei->i_data) - 1); 4416 } else { 4417 inode->i_op = &ext4_symlink_inode_operations; 4418 ext4_set_aops(inode); 4419 } 4420 inode_nohighmem(inode); 4421 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 4422 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 4423 inode->i_op = &ext4_special_inode_operations; 4424 if (raw_inode->i_block[0]) 4425 init_special_inode(inode, inode->i_mode, 4426 old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 4427 else 4428 init_special_inode(inode, inode->i_mode, 4429 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 4430 } else if (ino == EXT4_BOOT_LOADER_INO) { 4431 make_bad_inode(inode); 4432 } else { 4433 ret = -EFSCORRUPTED; 4434 EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode); 4435 goto bad_inode; 4436 } 4437 brelse(iloc.bh); 4438 ext4_set_inode_flags(inode); 4439 unlock_new_inode(inode); 4440 return inode; 4441 4442 bad_inode: 4443 brelse(iloc.bh); 4444 iget_failed(inode); 4445 return ERR_PTR(ret); 4446 } 4447 4448 struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino) 4449 { 4450 if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) 4451 return ERR_PTR(-EFSCORRUPTED); 4452 return ext4_iget(sb, ino); 4453 } 4454 4455 static int ext4_inode_blocks_set(handle_t *handle, 4456 struct ext4_inode *raw_inode, 4457 struct ext4_inode_info *ei) 4458 { 4459 struct inode *inode = &(ei->vfs_inode); 4460 u64 i_blocks = inode->i_blocks; 4461 struct super_block *sb = inode->i_sb; 4462 4463 if (i_blocks <= ~0U) { 4464 /* 4465 * i_blocks can be represented in a 32 bit variable 4466 * as multiple of 512 bytes 4467 */ 4468 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4469 raw_inode->i_blocks_high = 0; 4470 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 4471 return 0; 4472 } 4473 if (!ext4_has_feature_huge_file(sb)) 4474 return -EFBIG; 4475 4476 if (i_blocks <= 0xffffffffffffULL) { 4477 /* 4478 * i_blocks can be represented in a 48 bit variable 4479 * as multiple of 512 bytes 4480 */ 4481 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4482 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4483 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 4484 } else { 4485 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); 4486 /* i_block is stored in file system block size */ 4487 i_blocks = i_blocks >> (inode->i_blkbits - 9); 4488 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4489 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4490 } 4491 return 0; 4492 } 4493 4494 struct other_inode { 4495 unsigned long orig_ino; 4496 struct ext4_inode *raw_inode; 4497 }; 4498 4499 static int other_inode_match(struct inode * inode, unsigned long ino, 4500 void *data) 4501 { 4502 struct other_inode *oi = (struct other_inode *) data; 4503 4504 if ((inode->i_ino != ino) || 4505 (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | 4506 I_DIRTY_SYNC | I_DIRTY_DATASYNC)) || 4507 ((inode->i_state & I_DIRTY_TIME) == 0)) 4508 return 0; 4509 spin_lock(&inode->i_lock); 4510 if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | 4511 I_DIRTY_SYNC | I_DIRTY_DATASYNC)) == 0) && 4512 (inode->i_state & I_DIRTY_TIME)) { 4513 struct ext4_inode_info *ei = EXT4_I(inode); 4514 4515 inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED); 4516 spin_unlock(&inode->i_lock); 4517 4518 spin_lock(&ei->i_raw_lock); 4519 EXT4_INODE_SET_XTIME(i_ctime, inode, oi->raw_inode); 4520 EXT4_INODE_SET_XTIME(i_mtime, inode, oi->raw_inode); 4521 EXT4_INODE_SET_XTIME(i_atime, inode, oi->raw_inode); 4522 ext4_inode_csum_set(inode, oi->raw_inode, ei); 4523 spin_unlock(&ei->i_raw_lock); 4524 trace_ext4_other_inode_update_time(inode, oi->orig_ino); 4525 return -1; 4526 } 4527 spin_unlock(&inode->i_lock); 4528 return -1; 4529 } 4530 4531 /* 4532 * Opportunistically update the other time fields for other inodes in 4533 * the same inode table block. 4534 */ 4535 static void ext4_update_other_inodes_time(struct super_block *sb, 4536 unsigned long orig_ino, char *buf) 4537 { 4538 struct other_inode oi; 4539 unsigned long ino; 4540 int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 4541 int inode_size = EXT4_INODE_SIZE(sb); 4542 4543 oi.orig_ino = orig_ino; 4544 /* 4545 * Calculate the first inode in the inode table block. Inode 4546 * numbers are one-based. That is, the first inode in a block 4547 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1). 4548 */ 4549 ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1; 4550 for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) { 4551 if (ino == orig_ino) 4552 continue; 4553 oi.raw_inode = (struct ext4_inode *) buf; 4554 (void) find_inode_nowait(sb, ino, other_inode_match, &oi); 4555 } 4556 } 4557 4558 /* 4559 * Post the struct inode info into an on-disk inode location in the 4560 * buffer-cache. This gobbles the caller's reference to the 4561 * buffer_head in the inode location struct. 4562 * 4563 * The caller must have write access to iloc->bh. 4564 */ 4565 static int ext4_do_update_inode(handle_t *handle, 4566 struct inode *inode, 4567 struct ext4_iloc *iloc) 4568 { 4569 struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 4570 struct ext4_inode_info *ei = EXT4_I(inode); 4571 struct buffer_head *bh = iloc->bh; 4572 struct super_block *sb = inode->i_sb; 4573 int err = 0, rc, block; 4574 int need_datasync = 0, set_large_file = 0; 4575 uid_t i_uid; 4576 gid_t i_gid; 4577 projid_t i_projid; 4578 4579 spin_lock(&ei->i_raw_lock); 4580 4581 /* For fields not tracked in the in-memory inode, 4582 * initialise them to zero for new inodes. */ 4583 if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) 4584 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 4585 4586 ext4_get_inode_flags(ei); 4587 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 4588 i_uid = i_uid_read(inode); 4589 i_gid = i_gid_read(inode); 4590 i_projid = from_kprojid(&init_user_ns, ei->i_projid); 4591 if (!(test_opt(inode->i_sb, NO_UID32))) { 4592 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); 4593 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); 4594 /* 4595 * Fix up interoperability with old kernels. Otherwise, old inodes get 4596 * re-used with the upper 16 bits of the uid/gid intact 4597 */ 4598 if (!ei->i_dtime) { 4599 raw_inode->i_uid_high = 4600 cpu_to_le16(high_16_bits(i_uid)); 4601 raw_inode->i_gid_high = 4602 cpu_to_le16(high_16_bits(i_gid)); 4603 } else { 4604 raw_inode->i_uid_high = 0; 4605 raw_inode->i_gid_high = 0; 4606 } 4607 } else { 4608 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); 4609 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid)); 4610 raw_inode->i_uid_high = 0; 4611 raw_inode->i_gid_high = 0; 4612 } 4613 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 4614 4615 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 4616 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 4617 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 4618 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 4619 4620 err = ext4_inode_blocks_set(handle, raw_inode, ei); 4621 if (err) { 4622 spin_unlock(&ei->i_raw_lock); 4623 goto out_brelse; 4624 } 4625 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 4626 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); 4627 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) 4628 raw_inode->i_file_acl_high = 4629 cpu_to_le16(ei->i_file_acl >> 32); 4630 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 4631 if (ei->i_disksize != ext4_isize(raw_inode)) { 4632 ext4_isize_set(raw_inode, ei->i_disksize); 4633 need_datasync = 1; 4634 } 4635 if (ei->i_disksize > 0x7fffffffULL) { 4636 if (!ext4_has_feature_large_file(sb) || 4637 EXT4_SB(sb)->s_es->s_rev_level == 4638 cpu_to_le32(EXT4_GOOD_OLD_REV)) 4639 set_large_file = 1; 4640 } 4641 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 4642 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 4643 if (old_valid_dev(inode->i_rdev)) { 4644 raw_inode->i_block[0] = 4645 cpu_to_le32(old_encode_dev(inode->i_rdev)); 4646 raw_inode->i_block[1] = 0; 4647 } else { 4648 raw_inode->i_block[0] = 0; 4649 raw_inode->i_block[1] = 4650 cpu_to_le32(new_encode_dev(inode->i_rdev)); 4651 raw_inode->i_block[2] = 0; 4652 } 4653 } else if (!ext4_has_inline_data(inode)) { 4654 for (block = 0; block < EXT4_N_BLOCKS; block++) 4655 raw_inode->i_block[block] = ei->i_data[block]; 4656 } 4657 4658 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { 4659 raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 4660 if (ei->i_extra_isize) { 4661 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 4662 raw_inode->i_version_hi = 4663 cpu_to_le32(inode->i_version >> 32); 4664 raw_inode->i_extra_isize = 4665 cpu_to_le16(ei->i_extra_isize); 4666 } 4667 } 4668 4669 BUG_ON(!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 4670 EXT4_FEATURE_RO_COMPAT_PROJECT) && 4671 i_projid != EXT4_DEF_PROJID); 4672 4673 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 4674 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) 4675 raw_inode->i_projid = cpu_to_le32(i_projid); 4676 4677 ext4_inode_csum_set(inode, raw_inode, ei); 4678 spin_unlock(&ei->i_raw_lock); 4679 if (inode->i_sb->s_flags & MS_LAZYTIME) 4680 ext4_update_other_inodes_time(inode->i_sb, inode->i_ino, 4681 bh->b_data); 4682 4683 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 4684 rc = ext4_handle_dirty_metadata(handle, NULL, bh); 4685 if (!err) 4686 err = rc; 4687 ext4_clear_inode_state(inode, EXT4_STATE_NEW); 4688 if (set_large_file) { 4689 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access"); 4690 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); 4691 if (err) 4692 goto out_brelse; 4693 ext4_update_dynamic_rev(sb); 4694 ext4_set_feature_large_file(sb); 4695 ext4_handle_sync(handle); 4696 err = ext4_handle_dirty_super(handle, sb); 4697 } 4698 ext4_update_inode_fsync_trans(handle, inode, need_datasync); 4699 out_brelse: 4700 brelse(bh); 4701 ext4_std_error(inode->i_sb, err); 4702 return err; 4703 } 4704 4705 /* 4706 * ext4_write_inode() 4707 * 4708 * We are called from a few places: 4709 * 4710 * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files. 4711 * Here, there will be no transaction running. We wait for any running 4712 * transaction to commit. 4713 * 4714 * - Within flush work (sys_sync(), kupdate and such). 4715 * We wait on commit, if told to. 4716 * 4717 * - Within iput_final() -> write_inode_now() 4718 * We wait on commit, if told to. 4719 * 4720 * In all cases it is actually safe for us to return without doing anything, 4721 * because the inode has been copied into a raw inode buffer in 4722 * ext4_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL 4723 * writeback. 4724 * 4725 * Note that we are absolutely dependent upon all inode dirtiers doing the 4726 * right thing: they *must* call mark_inode_dirty() after dirtying info in 4727 * which we are interested. 4728 * 4729 * It would be a bug for them to not do this. The code: 4730 * 4731 * mark_inode_dirty(inode) 4732 * stuff(); 4733 * inode->i_size = expr; 4734 * 4735 * is in error because write_inode() could occur while `stuff()' is running, 4736 * and the new i_size will be lost. Plus the inode will no longer be on the 4737 * superblock's dirty inode list. 4738 */ 4739 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) 4740 { 4741 int err; 4742 4743 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC)) 4744 return 0; 4745 4746 if (EXT4_SB(inode->i_sb)->s_journal) { 4747 if (ext4_journal_current_handle()) { 4748 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 4749 dump_stack(); 4750 return -EIO; 4751 } 4752 4753 /* 4754 * No need to force transaction in WB_SYNC_NONE mode. Also 4755 * ext4_sync_fs() will force the commit after everything is 4756 * written. 4757 */ 4758 if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync) 4759 return 0; 4760 4761 err = ext4_force_commit(inode->i_sb); 4762 } else { 4763 struct ext4_iloc iloc; 4764 4765 err = __ext4_get_inode_loc(inode, &iloc, 0); 4766 if (err) 4767 return err; 4768 /* 4769 * sync(2) will flush the whole buffer cache. No need to do 4770 * it here separately for each inode. 4771 */ 4772 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) 4773 sync_dirty_buffer(iloc.bh); 4774 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 4775 EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, 4776 "IO error syncing inode"); 4777 err = -EIO; 4778 } 4779 brelse(iloc.bh); 4780 } 4781 return err; 4782 } 4783 4784 /* 4785 * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate 4786 * buffers that are attached to a page stradding i_size and are undergoing 4787 * commit. In that case we have to wait for commit to finish and try again. 4788 */ 4789 static void ext4_wait_for_tail_page_commit(struct inode *inode) 4790 { 4791 struct page *page; 4792 unsigned offset; 4793 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 4794 tid_t commit_tid = 0; 4795 int ret; 4796 4797 offset = inode->i_size & (PAGE_CACHE_SIZE - 1); 4798 /* 4799 * All buffers in the last page remain valid? Then there's nothing to 4800 * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE == 4801 * blocksize case 4802 */ 4803 if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits)) 4804 return; 4805 while (1) { 4806 page = find_lock_page(inode->i_mapping, 4807 inode->i_size >> PAGE_CACHE_SHIFT); 4808 if (!page) 4809 return; 4810 ret = __ext4_journalled_invalidatepage(page, offset, 4811 PAGE_CACHE_SIZE - offset); 4812 unlock_page(page); 4813 page_cache_release(page); 4814 if (ret != -EBUSY) 4815 return; 4816 commit_tid = 0; 4817 read_lock(&journal->j_state_lock); 4818 if (journal->j_committing_transaction) 4819 commit_tid = journal->j_committing_transaction->t_tid; 4820 read_unlock(&journal->j_state_lock); 4821 if (commit_tid) 4822 jbd2_log_wait_commit(journal, commit_tid); 4823 } 4824 } 4825 4826 /* 4827 * ext4_setattr() 4828 * 4829 * Called from notify_change. 4830 * 4831 * We want to trap VFS attempts to truncate the file as soon as 4832 * possible. In particular, we want to make sure that when the VFS 4833 * shrinks i_size, we put the inode on the orphan list and modify 4834 * i_disksize immediately, so that during the subsequent flushing of 4835 * dirty pages and freeing of disk blocks, we can guarantee that any 4836 * commit will leave the blocks being flushed in an unused state on 4837 * disk. (On recovery, the inode will get truncated and the blocks will 4838 * be freed, so we have a strong guarantee that no future commit will 4839 * leave these blocks visible to the user.) 4840 * 4841 * Another thing we have to assure is that if we are in ordered mode 4842 * and inode is still attached to the committing transaction, we must 4843 * we start writeout of all the dirty pages which are being truncated. 4844 * This way we are sure that all the data written in the previous 4845 * transaction are already on disk (truncate waits for pages under 4846 * writeback). 4847 * 4848 * Called with inode->i_mutex down. 4849 */ 4850 int ext4_setattr(struct dentry *dentry, struct iattr *attr) 4851 { 4852 struct inode *inode = d_inode(dentry); 4853 int error, rc = 0; 4854 int orphan = 0; 4855 const unsigned int ia_valid = attr->ia_valid; 4856 4857 error = inode_change_ok(inode, attr); 4858 if (error) 4859 return error; 4860 4861 if (is_quota_modification(inode, attr)) { 4862 error = dquot_initialize(inode); 4863 if (error) 4864 return error; 4865 } 4866 if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || 4867 (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { 4868 handle_t *handle; 4869 4870 /* (user+group)*(old+new) structure, inode write (sb, 4871 * inode block, ? - but truncate inode update has it) */ 4872 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 4873 (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) + 4874 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3); 4875 if (IS_ERR(handle)) { 4876 error = PTR_ERR(handle); 4877 goto err_out; 4878 } 4879 error = dquot_transfer(inode, attr); 4880 if (error) { 4881 ext4_journal_stop(handle); 4882 return error; 4883 } 4884 /* Update corresponding info in inode so that everything is in 4885 * one transaction */ 4886 if (attr->ia_valid & ATTR_UID) 4887 inode->i_uid = attr->ia_uid; 4888 if (attr->ia_valid & ATTR_GID) 4889 inode->i_gid = attr->ia_gid; 4890 error = ext4_mark_inode_dirty(handle, inode); 4891 ext4_journal_stop(handle); 4892 } 4893 4894 if (attr->ia_valid & ATTR_SIZE) { 4895 handle_t *handle; 4896 loff_t oldsize = inode->i_size; 4897 int shrink = (attr->ia_size <= inode->i_size); 4898 4899 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4900 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4901 4902 if (attr->ia_size > sbi->s_bitmap_maxbytes) 4903 return -EFBIG; 4904 } 4905 if (!S_ISREG(inode->i_mode)) 4906 return -EINVAL; 4907 4908 if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size) 4909 inode_inc_iversion(inode); 4910 4911 if (ext4_should_order_data(inode) && 4912 (attr->ia_size < inode->i_size)) { 4913 error = ext4_begin_ordered_truncate(inode, 4914 attr->ia_size); 4915 if (error) 4916 goto err_out; 4917 } 4918 if (attr->ia_size != inode->i_size) { 4919 handle = ext4_journal_start(inode, EXT4_HT_INODE, 3); 4920 if (IS_ERR(handle)) { 4921 error = PTR_ERR(handle); 4922 goto err_out; 4923 } 4924 if (ext4_handle_valid(handle) && shrink) { 4925 error = ext4_orphan_add(handle, inode); 4926 orphan = 1; 4927 } 4928 /* 4929 * Update c/mtime on truncate up, ext4_truncate() will 4930 * update c/mtime in shrink case below 4931 */ 4932 if (!shrink) { 4933 inode->i_mtime = ext4_current_time(inode); 4934 inode->i_ctime = inode->i_mtime; 4935 } 4936 down_write(&EXT4_I(inode)->i_data_sem); 4937 EXT4_I(inode)->i_disksize = attr->ia_size; 4938 rc = ext4_mark_inode_dirty(handle, inode); 4939 if (!error) 4940 error = rc; 4941 /* 4942 * We have to update i_size under i_data_sem together 4943 * with i_disksize to avoid races with writeback code 4944 * running ext4_wb_update_i_disksize(). 4945 */ 4946 if (!error) 4947 i_size_write(inode, attr->ia_size); 4948 up_write(&EXT4_I(inode)->i_data_sem); 4949 ext4_journal_stop(handle); 4950 if (error) { 4951 if (orphan) 4952 ext4_orphan_del(NULL, inode); 4953 goto err_out; 4954 } 4955 } 4956 if (!shrink) 4957 pagecache_isize_extended(inode, oldsize, inode->i_size); 4958 4959 /* 4960 * Blocks are going to be removed from the inode. Wait 4961 * for dio in flight. Temporarily disable 4962 * dioread_nolock to prevent livelock. 4963 */ 4964 if (orphan) { 4965 if (!ext4_should_journal_data(inode)) { 4966 ext4_inode_block_unlocked_dio(inode); 4967 inode_dio_wait(inode); 4968 ext4_inode_resume_unlocked_dio(inode); 4969 } else 4970 ext4_wait_for_tail_page_commit(inode); 4971 } 4972 down_write(&EXT4_I(inode)->i_mmap_sem); 4973 /* 4974 * Truncate pagecache after we've waited for commit 4975 * in data=journal mode to make pages freeable. 4976 */ 4977 truncate_pagecache(inode, inode->i_size); 4978 if (shrink) 4979 ext4_truncate(inode); 4980 up_write(&EXT4_I(inode)->i_mmap_sem); 4981 } 4982 4983 if (!rc) { 4984 setattr_copy(inode, attr); 4985 mark_inode_dirty(inode); 4986 } 4987 4988 /* 4989 * If the call to ext4_truncate failed to get a transaction handle at 4990 * all, we need to clean up the in-core orphan list manually. 4991 */ 4992 if (orphan && inode->i_nlink) 4993 ext4_orphan_del(NULL, inode); 4994 4995 if (!rc && (ia_valid & ATTR_MODE)) 4996 rc = posix_acl_chmod(inode, inode->i_mode); 4997 4998 err_out: 4999 ext4_std_error(inode->i_sb, error); 5000 if (!error) 5001 error = rc; 5002 return error; 5003 } 5004 5005 int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 5006 struct kstat *stat) 5007 { 5008 struct inode *inode; 5009 unsigned long long delalloc_blocks; 5010 5011 inode = d_inode(dentry); 5012 generic_fillattr(inode, stat); 5013 5014 /* 5015 * If there is inline data in the inode, the inode will normally not 5016 * have data blocks allocated (it may have an external xattr block). 5017 * Report at least one sector for such files, so tools like tar, rsync, 5018 * others doen't incorrectly think the file is completely sparse. 5019 */ 5020 if (unlikely(ext4_has_inline_data(inode))) 5021 stat->blocks += (stat->size + 511) >> 9; 5022 5023 /* 5024 * We can't update i_blocks if the block allocation is delayed 5025 * otherwise in the case of system crash before the real block 5026 * allocation is done, we will have i_blocks inconsistent with 5027 * on-disk file blocks. 5028 * We always keep i_blocks updated together with real 5029 * allocation. But to not confuse with user, stat 5030 * will return the blocks that include the delayed allocation 5031 * blocks for this file. 5032 */ 5033 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), 5034 EXT4_I(inode)->i_reserved_data_blocks); 5035 stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9); 5036 return 0; 5037 } 5038 5039 static int ext4_index_trans_blocks(struct inode *inode, int lblocks, 5040 int pextents) 5041 { 5042 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 5043 return ext4_ind_trans_blocks(inode, lblocks); 5044 return ext4_ext_index_trans_blocks(inode, pextents); 5045 } 5046 5047 /* 5048 * Account for index blocks, block groups bitmaps and block group 5049 * descriptor blocks if modify datablocks and index blocks 5050 * worse case, the indexs blocks spread over different block groups 5051 * 5052 * If datablocks are discontiguous, they are possible to spread over 5053 * different block groups too. If they are contiguous, with flexbg, 5054 * they could still across block group boundary. 5055 * 5056 * Also account for superblock, inode, quota and xattr blocks 5057 */ 5058 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, 5059 int pextents) 5060 { 5061 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 5062 int gdpblocks; 5063 int idxblocks; 5064 int ret = 0; 5065 5066 /* 5067 * How many index blocks need to touch to map @lblocks logical blocks 5068 * to @pextents physical extents? 5069 */ 5070 idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents); 5071 5072 ret = idxblocks; 5073 5074 /* 5075 * Now let's see how many group bitmaps and group descriptors need 5076 * to account 5077 */ 5078 groups = idxblocks + pextents; 5079 gdpblocks = groups; 5080 if (groups > ngroups) 5081 groups = ngroups; 5082 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 5083 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 5084 5085 /* bitmaps and block group descriptor blocks */ 5086 ret += groups + gdpblocks; 5087 5088 /* Blocks for super block, inode, quota and xattr blocks */ 5089 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 5090 5091 return ret; 5092 } 5093 5094 /* 5095 * Calculate the total number of credits to reserve to fit 5096 * the modification of a single pages into a single transaction, 5097 * which may include multiple chunks of block allocations. 5098 * 5099 * This could be called via ext4_write_begin() 5100 * 5101 * We need to consider the worse case, when 5102 * one new block per extent. 5103 */ 5104 int ext4_writepage_trans_blocks(struct inode *inode) 5105 { 5106 int bpp = ext4_journal_blocks_per_page(inode); 5107 int ret; 5108 5109 ret = ext4_meta_trans_blocks(inode, bpp, bpp); 5110 5111 /* Account for data blocks for journalled mode */ 5112 if (ext4_should_journal_data(inode)) 5113 ret += bpp; 5114 return ret; 5115 } 5116 5117 /* 5118 * Calculate the journal credits for a chunk of data modification. 5119 * 5120 * This is called from DIO, fallocate or whoever calling 5121 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. 5122 * 5123 * journal buffers for data blocks are not included here, as DIO 5124 * and fallocate do no need to journal data buffers. 5125 */ 5126 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 5127 { 5128 return ext4_meta_trans_blocks(inode, nrblocks, 1); 5129 } 5130 5131 /* 5132 * The caller must have previously called ext4_reserve_inode_write(). 5133 * Give this, we know that the caller already has write access to iloc->bh. 5134 */ 5135 int ext4_mark_iloc_dirty(handle_t *handle, 5136 struct inode *inode, struct ext4_iloc *iloc) 5137 { 5138 int err = 0; 5139 5140 if (IS_I_VERSION(inode)) 5141 inode_inc_iversion(inode); 5142 5143 /* the do_update_inode consumes one bh->b_count */ 5144 get_bh(iloc->bh); 5145 5146 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 5147 err = ext4_do_update_inode(handle, inode, iloc); 5148 put_bh(iloc->bh); 5149 return err; 5150 } 5151 5152 /* 5153 * On success, We end up with an outstanding reference count against 5154 * iloc->bh. This _must_ be cleaned up later. 5155 */ 5156 5157 int 5158 ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 5159 struct ext4_iloc *iloc) 5160 { 5161 int err; 5162 5163 err = ext4_get_inode_loc(inode, iloc); 5164 if (!err) { 5165 BUFFER_TRACE(iloc->bh, "get_write_access"); 5166 err = ext4_journal_get_write_access(handle, iloc->bh); 5167 if (err) { 5168 brelse(iloc->bh); 5169 iloc->bh = NULL; 5170 } 5171 } 5172 ext4_std_error(inode->i_sb, err); 5173 return err; 5174 } 5175 5176 /* 5177 * Expand an inode by new_extra_isize bytes. 5178 * Returns 0 on success or negative error number on failure. 5179 */ 5180 static int ext4_expand_extra_isize(struct inode *inode, 5181 unsigned int new_extra_isize, 5182 struct ext4_iloc iloc, 5183 handle_t *handle) 5184 { 5185 struct ext4_inode *raw_inode; 5186 struct ext4_xattr_ibody_header *header; 5187 5188 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 5189 return 0; 5190 5191 raw_inode = ext4_raw_inode(&iloc); 5192 5193 header = IHDR(inode, raw_inode); 5194 5195 /* No extended attributes present */ 5196 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 5197 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 5198 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 5199 new_extra_isize); 5200 EXT4_I(inode)->i_extra_isize = new_extra_isize; 5201 return 0; 5202 } 5203 5204 /* try to expand with EAs present */ 5205 return ext4_expand_extra_isize_ea(inode, new_extra_isize, 5206 raw_inode, handle); 5207 } 5208 5209 /* 5210 * What we do here is to mark the in-core inode as clean with respect to inode 5211 * dirtiness (it may still be data-dirty). 5212 * This means that the in-core inode may be reaped by prune_icache 5213 * without having to perform any I/O. This is a very good thing, 5214 * because *any* task may call prune_icache - even ones which 5215 * have a transaction open against a different journal. 5216 * 5217 * Is this cheating? Not really. Sure, we haven't written the 5218 * inode out, but prune_icache isn't a user-visible syncing function. 5219 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 5220 * we start and wait on commits. 5221 */ 5222 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 5223 { 5224 struct ext4_iloc iloc; 5225 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5226 static unsigned int mnt_count; 5227 int err, ret; 5228 5229 might_sleep(); 5230 trace_ext4_mark_inode_dirty(inode, _RET_IP_); 5231 err = ext4_reserve_inode_write(handle, inode, &iloc); 5232 if (ext4_handle_valid(handle) && 5233 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 5234 !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { 5235 /* 5236 * We need extra buffer credits since we may write into EA block 5237 * with this same handle. If journal_extend fails, then it will 5238 * only result in a minor loss of functionality for that inode. 5239 * If this is felt to be critical, then e2fsck should be run to 5240 * force a large enough s_min_extra_isize. 5241 */ 5242 if ((jbd2_journal_extend(handle, 5243 EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { 5244 ret = ext4_expand_extra_isize(inode, 5245 sbi->s_want_extra_isize, 5246 iloc, handle); 5247 if (ret) { 5248 ext4_set_inode_state(inode, 5249 EXT4_STATE_NO_EXPAND); 5250 if (mnt_count != 5251 le16_to_cpu(sbi->s_es->s_mnt_count)) { 5252 ext4_warning(inode->i_sb, 5253 "Unable to expand inode %lu. Delete" 5254 " some EAs or run e2fsck.", 5255 inode->i_ino); 5256 mnt_count = 5257 le16_to_cpu(sbi->s_es->s_mnt_count); 5258 } 5259 } 5260 } 5261 } 5262 if (!err) 5263 err = ext4_mark_iloc_dirty(handle, inode, &iloc); 5264 return err; 5265 } 5266 5267 /* 5268 * ext4_dirty_inode() is called from __mark_inode_dirty() 5269 * 5270 * We're really interested in the case where a file is being extended. 5271 * i_size has been changed by generic_commit_write() and we thus need 5272 * to include the updated inode in the current transaction. 5273 * 5274 * Also, dquot_alloc_block() will always dirty the inode when blocks 5275 * are allocated to the file. 5276 * 5277 * If the inode is marked synchronous, we don't honour that here - doing 5278 * so would cause a commit on atime updates, which we don't bother doing. 5279 * We handle synchronous inodes at the highest possible level. 5280 * 5281 * If only the I_DIRTY_TIME flag is set, we can skip everything. If 5282 * I_DIRTY_TIME and I_DIRTY_SYNC is set, the only inode fields we need 5283 * to copy into the on-disk inode structure are the timestamp files. 5284 */ 5285 void ext4_dirty_inode(struct inode *inode, int flags) 5286 { 5287 handle_t *handle; 5288 5289 if (flags == I_DIRTY_TIME) 5290 return; 5291 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 5292 if (IS_ERR(handle)) 5293 goto out; 5294 5295 ext4_mark_inode_dirty(handle, inode); 5296 5297 ext4_journal_stop(handle); 5298 out: 5299 return; 5300 } 5301 5302 #if 0 5303 /* 5304 * Bind an inode's backing buffer_head into this transaction, to prevent 5305 * it from being flushed to disk early. Unlike 5306 * ext4_reserve_inode_write, this leaves behind no bh reference and 5307 * returns no iloc structure, so the caller needs to repeat the iloc 5308 * lookup to mark the inode dirty later. 5309 */ 5310 static int ext4_pin_inode(handle_t *handle, struct inode *inode) 5311 { 5312 struct ext4_iloc iloc; 5313 5314 int err = 0; 5315 if (handle) { 5316 err = ext4_get_inode_loc(inode, &iloc); 5317 if (!err) { 5318 BUFFER_TRACE(iloc.bh, "get_write_access"); 5319 err = jbd2_journal_get_write_access(handle, iloc.bh); 5320 if (!err) 5321 err = ext4_handle_dirty_metadata(handle, 5322 NULL, 5323 iloc.bh); 5324 brelse(iloc.bh); 5325 } 5326 } 5327 ext4_std_error(inode->i_sb, err); 5328 return err; 5329 } 5330 #endif 5331 5332 int ext4_change_inode_journal_flag(struct inode *inode, int val) 5333 { 5334 journal_t *journal; 5335 handle_t *handle; 5336 int err; 5337 5338 /* 5339 * We have to be very careful here: changing a data block's 5340 * journaling status dynamically is dangerous. If we write a 5341 * data block to the journal, change the status and then delete 5342 * that block, we risk forgetting to revoke the old log record 5343 * from the journal and so a subsequent replay can corrupt data. 5344 * So, first we make sure that the journal is empty and that 5345 * nobody is changing anything. 5346 */ 5347 5348 journal = EXT4_JOURNAL(inode); 5349 if (!journal) 5350 return 0; 5351 if (is_journal_aborted(journal)) 5352 return -EROFS; 5353 /* We have to allocate physical blocks for delalloc blocks 5354 * before flushing journal. otherwise delalloc blocks can not 5355 * be allocated any more. even more truncate on delalloc blocks 5356 * could trigger BUG by flushing delalloc blocks in journal. 5357 * There is no delalloc block in non-journal data mode. 5358 */ 5359 if (val && test_opt(inode->i_sb, DELALLOC)) { 5360 err = ext4_alloc_da_blocks(inode); 5361 if (err < 0) 5362 return err; 5363 } 5364 5365 /* Wait for all existing dio workers */ 5366 ext4_inode_block_unlocked_dio(inode); 5367 inode_dio_wait(inode); 5368 5369 jbd2_journal_lock_updates(journal); 5370 5371 /* 5372 * OK, there are no updates running now, and all cached data is 5373 * synced to disk. We are now in a completely consistent state 5374 * which doesn't have anything in the journal, and we know that 5375 * no filesystem updates are running, so it is safe to modify 5376 * the inode's in-core data-journaling state flag now. 5377 */ 5378 5379 if (val) 5380 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 5381 else { 5382 err = jbd2_journal_flush(journal); 5383 if (err < 0) { 5384 jbd2_journal_unlock_updates(journal); 5385 ext4_inode_resume_unlocked_dio(inode); 5386 return err; 5387 } 5388 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 5389 } 5390 ext4_set_aops(inode); 5391 5392 jbd2_journal_unlock_updates(journal); 5393 ext4_inode_resume_unlocked_dio(inode); 5394 5395 /* Finally we can mark the inode as dirty. */ 5396 5397 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); 5398 if (IS_ERR(handle)) 5399 return PTR_ERR(handle); 5400 5401 err = ext4_mark_inode_dirty(handle, inode); 5402 ext4_handle_sync(handle); 5403 ext4_journal_stop(handle); 5404 ext4_std_error(inode->i_sb, err); 5405 5406 return err; 5407 } 5408 5409 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 5410 { 5411 return !buffer_mapped(bh); 5412 } 5413 5414 int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 5415 { 5416 struct page *page = vmf->page; 5417 loff_t size; 5418 unsigned long len; 5419 int ret; 5420 struct file *file = vma->vm_file; 5421 struct inode *inode = file_inode(file); 5422 struct address_space *mapping = inode->i_mapping; 5423 handle_t *handle; 5424 get_block_t *get_block; 5425 int retries = 0; 5426 5427 sb_start_pagefault(inode->i_sb); 5428 file_update_time(vma->vm_file); 5429 5430 down_read(&EXT4_I(inode)->i_mmap_sem); 5431 /* Delalloc case is easy... */ 5432 if (test_opt(inode->i_sb, DELALLOC) && 5433 !ext4_should_journal_data(inode) && 5434 !ext4_nonda_switch(inode->i_sb)) { 5435 do { 5436 ret = block_page_mkwrite(vma, vmf, 5437 ext4_da_get_block_prep); 5438 } while (ret == -ENOSPC && 5439 ext4_should_retry_alloc(inode->i_sb, &retries)); 5440 goto out_ret; 5441 } 5442 5443 lock_page(page); 5444 size = i_size_read(inode); 5445 /* Page got truncated from under us? */ 5446 if (page->mapping != mapping || page_offset(page) > size) { 5447 unlock_page(page); 5448 ret = VM_FAULT_NOPAGE; 5449 goto out; 5450 } 5451 5452 if (page->index == size >> PAGE_CACHE_SHIFT) 5453 len = size & ~PAGE_CACHE_MASK; 5454 else 5455 len = PAGE_CACHE_SIZE; 5456 /* 5457 * Return if we have all the buffers mapped. This avoids the need to do 5458 * journal_start/journal_stop which can block and take a long time 5459 */ 5460 if (page_has_buffers(page)) { 5461 if (!ext4_walk_page_buffers(NULL, page_buffers(page), 5462 0, len, NULL, 5463 ext4_bh_unmapped)) { 5464 /* Wait so that we don't change page under IO */ 5465 wait_for_stable_page(page); 5466 ret = VM_FAULT_LOCKED; 5467 goto out; 5468 } 5469 } 5470 unlock_page(page); 5471 /* OK, we need to fill the hole... */ 5472 if (ext4_should_dioread_nolock(inode)) 5473 get_block = ext4_get_block_write; 5474 else 5475 get_block = ext4_get_block; 5476 retry_alloc: 5477 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 5478 ext4_writepage_trans_blocks(inode)); 5479 if (IS_ERR(handle)) { 5480 ret = VM_FAULT_SIGBUS; 5481 goto out; 5482 } 5483 ret = block_page_mkwrite(vma, vmf, get_block); 5484 if (!ret && ext4_should_journal_data(inode)) { 5485 if (ext4_walk_page_buffers(handle, page_buffers(page), 0, 5486 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) { 5487 unlock_page(page); 5488 ret = VM_FAULT_SIGBUS; 5489 ext4_journal_stop(handle); 5490 goto out; 5491 } 5492 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 5493 } 5494 ext4_journal_stop(handle); 5495 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 5496 goto retry_alloc; 5497 out_ret: 5498 ret = block_page_mkwrite_return(ret); 5499 out: 5500 up_read(&EXT4_I(inode)->i_mmap_sem); 5501 sb_end_pagefault(inode->i_sb); 5502 return ret; 5503 } 5504 5505 int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 5506 { 5507 struct inode *inode = file_inode(vma->vm_file); 5508 int err; 5509 5510 down_read(&EXT4_I(inode)->i_mmap_sem); 5511 err = filemap_fault(vma, vmf); 5512 up_read(&EXT4_I(inode)->i_mmap_sem); 5513 5514 return err; 5515 } 5516