1 /* 2 * linux/fs/ext4/inode.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/inode.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * 64-bit file support on 64-bit platforms by Jakub Jelinek 16 * (jj@sunsite.ms.mff.cuni.cz) 17 * 18 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 19 */ 20 21 #include <linux/fs.h> 22 #include <linux/time.h> 23 #include <linux/highuid.h> 24 #include <linux/pagemap.h> 25 #include <linux/dax.h> 26 #include <linux/quotaops.h> 27 #include <linux/string.h> 28 #include <linux/buffer_head.h> 29 #include <linux/writeback.h> 30 #include <linux/pagevec.h> 31 #include <linux/mpage.h> 32 #include <linux/namei.h> 33 #include <linux/uio.h> 34 #include <linux/bio.h> 35 #include <linux/workqueue.h> 36 #include <linux/kernel.h> 37 #include <linux/printk.h> 38 #include <linux/slab.h> 39 #include <linux/bitops.h> 40 #include <linux/iomap.h> 41 42 #include "ext4_jbd2.h" 43 #include "xattr.h" 44 #include "acl.h" 45 #include "truncate.h" 46 47 #include <trace/events/ext4.h> 48 49 #define MPAGE_DA_EXTENT_TAIL 0x01 50 51 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, 52 struct ext4_inode_info *ei) 53 { 54 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 55 __u32 csum; 56 __u16 dummy_csum = 0; 57 int offset = offsetof(struct ext4_inode, i_checksum_lo); 58 unsigned int csum_size = sizeof(dummy_csum); 59 60 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset); 61 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size); 62 offset += csum_size; 63 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, 64 EXT4_GOOD_OLD_INODE_SIZE - offset); 65 66 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 67 offset = offsetof(struct ext4_inode, i_checksum_hi); 68 csum = ext4_chksum(sbi, csum, (__u8 *)raw + 69 EXT4_GOOD_OLD_INODE_SIZE, 70 offset - EXT4_GOOD_OLD_INODE_SIZE); 71 if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { 72 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, 73 csum_size); 74 offset += csum_size; 75 } 76 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, 77 EXT4_INODE_SIZE(inode->i_sb) - offset); 78 } 79 80 return csum; 81 } 82 83 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, 84 struct ext4_inode_info *ei) 85 { 86 __u32 provided, calculated; 87 88 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 89 cpu_to_le32(EXT4_OS_LINUX) || 90 !ext4_has_metadata_csum(inode->i_sb)) 91 return 1; 92 93 provided = le16_to_cpu(raw->i_checksum_lo); 94 calculated = ext4_inode_csum(inode, raw, ei); 95 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 96 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 97 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16; 98 else 99 calculated &= 0xFFFF; 100 101 return provided == calculated; 102 } 103 104 static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, 105 struct ext4_inode_info *ei) 106 { 107 __u32 csum; 108 109 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 110 cpu_to_le32(EXT4_OS_LINUX) || 111 !ext4_has_metadata_csum(inode->i_sb)) 112 return; 113 114 csum = ext4_inode_csum(inode, raw, ei); 115 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF); 116 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 117 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 118 raw->i_checksum_hi = cpu_to_le16(csum >> 16); 119 } 120 121 static inline int ext4_begin_ordered_truncate(struct inode *inode, 122 loff_t new_size) 123 { 124 trace_ext4_begin_ordered_truncate(inode, new_size); 125 /* 126 * If jinode is zero, then we never opened the file for 127 * writing, so there's no need to call 128 * jbd2_journal_begin_ordered_truncate() since there's no 129 * outstanding writes we need to flush. 130 */ 131 if (!EXT4_I(inode)->jinode) 132 return 0; 133 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), 134 EXT4_I(inode)->jinode, 135 new_size); 136 } 137 138 static void ext4_invalidatepage(struct page *page, unsigned int offset, 139 unsigned int length); 140 static int __ext4_journalled_writepage(struct page *page, unsigned int len); 141 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); 142 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, 143 int pextents); 144 145 /* 146 * Test whether an inode is a fast symlink. 147 */ 148 int ext4_inode_is_fast_symlink(struct inode *inode) 149 { 150 int ea_blocks = EXT4_I(inode)->i_file_acl ? 151 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0; 152 153 if (ext4_has_inline_data(inode)) 154 return 0; 155 156 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 157 } 158 159 /* 160 * Restart the transaction associated with *handle. This does a commit, 161 * so before we call here everything must be consistently dirtied against 162 * this transaction. 163 */ 164 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, 165 int nblocks) 166 { 167 int ret; 168 169 /* 170 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 171 * moment, get_block can be called only for blocks inside i_size since 172 * page cache has been already dropped and writes are blocked by 173 * i_mutex. So we can safely drop the i_data_sem here. 174 */ 175 BUG_ON(EXT4_JOURNAL(inode) == NULL); 176 jbd_debug(2, "restarting handle %p\n", handle); 177 up_write(&EXT4_I(inode)->i_data_sem); 178 ret = ext4_journal_restart(handle, nblocks); 179 down_write(&EXT4_I(inode)->i_data_sem); 180 ext4_discard_preallocations(inode); 181 182 return ret; 183 } 184 185 /* 186 * Called at the last iput() if i_nlink is zero. 187 */ 188 void ext4_evict_inode(struct inode *inode) 189 { 190 handle_t *handle; 191 int err; 192 193 trace_ext4_evict_inode(inode); 194 195 if (inode->i_nlink) { 196 /* 197 * When journalling data dirty buffers are tracked only in the 198 * journal. So although mm thinks everything is clean and 199 * ready for reaping the inode might still have some pages to 200 * write in the running transaction or waiting to be 201 * checkpointed. Thus calling jbd2_journal_invalidatepage() 202 * (via truncate_inode_pages()) to discard these buffers can 203 * cause data loss. Also even if we did not discard these 204 * buffers, we would have no way to find them after the inode 205 * is reaped and thus user could see stale data if he tries to 206 * read them before the transaction is checkpointed. So be 207 * careful and force everything to disk here... We use 208 * ei->i_datasync_tid to store the newest transaction 209 * containing inode's data. 210 * 211 * Note that directories do not have this problem because they 212 * don't use page cache. 213 */ 214 if (inode->i_ino != EXT4_JOURNAL_INO && 215 ext4_should_journal_data(inode) && 216 (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) { 217 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 218 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; 219 220 jbd2_complete_transaction(journal, commit_tid); 221 filemap_write_and_wait(&inode->i_data); 222 } 223 truncate_inode_pages_final(&inode->i_data); 224 225 goto no_delete; 226 } 227 228 if (is_bad_inode(inode)) 229 goto no_delete; 230 dquot_initialize(inode); 231 232 if (ext4_should_order_data(inode)) 233 ext4_begin_ordered_truncate(inode, 0); 234 truncate_inode_pages_final(&inode->i_data); 235 236 /* 237 * Protect us against freezing - iput() caller didn't have to have any 238 * protection against it 239 */ 240 sb_start_intwrite(inode->i_sb); 241 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, 242 ext4_blocks_for_truncate(inode)+3); 243 if (IS_ERR(handle)) { 244 ext4_std_error(inode->i_sb, PTR_ERR(handle)); 245 /* 246 * If we're going to skip the normal cleanup, we still need to 247 * make sure that the in-core orphan linked list is properly 248 * cleaned up. 249 */ 250 ext4_orphan_del(NULL, inode); 251 sb_end_intwrite(inode->i_sb); 252 goto no_delete; 253 } 254 255 if (IS_SYNC(inode)) 256 ext4_handle_sync(handle); 257 inode->i_size = 0; 258 err = ext4_mark_inode_dirty(handle, inode); 259 if (err) { 260 ext4_warning(inode->i_sb, 261 "couldn't mark inode dirty (err %d)", err); 262 goto stop_handle; 263 } 264 if (inode->i_blocks) { 265 err = ext4_truncate(inode); 266 if (err) { 267 ext4_error(inode->i_sb, 268 "couldn't truncate inode %lu (err %d)", 269 inode->i_ino, err); 270 goto stop_handle; 271 } 272 } 273 274 /* 275 * ext4_ext_truncate() doesn't reserve any slop when it 276 * restarts journal transactions; therefore there may not be 277 * enough credits left in the handle to remove the inode from 278 * the orphan list and set the dtime field. 279 */ 280 if (!ext4_handle_has_enough_credits(handle, 3)) { 281 err = ext4_journal_extend(handle, 3); 282 if (err > 0) 283 err = ext4_journal_restart(handle, 3); 284 if (err != 0) { 285 ext4_warning(inode->i_sb, 286 "couldn't extend journal (err %d)", err); 287 stop_handle: 288 ext4_journal_stop(handle); 289 ext4_orphan_del(NULL, inode); 290 sb_end_intwrite(inode->i_sb); 291 goto no_delete; 292 } 293 } 294 295 /* 296 * Kill off the orphan record which ext4_truncate created. 297 * AKPM: I think this can be inside the above `if'. 298 * Note that ext4_orphan_del() has to be able to cope with the 299 * deletion of a non-existent orphan - this is because we don't 300 * know if ext4_truncate() actually created an orphan record. 301 * (Well, we could do this if we need to, but heck - it works) 302 */ 303 ext4_orphan_del(handle, inode); 304 EXT4_I(inode)->i_dtime = get_seconds(); 305 306 /* 307 * One subtle ordering requirement: if anything has gone wrong 308 * (transaction abort, IO errors, whatever), then we can still 309 * do these next steps (the fs will already have been marked as 310 * having errors), but we can't free the inode if the mark_dirty 311 * fails. 312 */ 313 if (ext4_mark_inode_dirty(handle, inode)) 314 /* If that failed, just do the required in-core inode clear. */ 315 ext4_clear_inode(inode); 316 else 317 ext4_free_inode(handle, inode); 318 ext4_journal_stop(handle); 319 sb_end_intwrite(inode->i_sb); 320 return; 321 no_delete: 322 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ 323 } 324 325 #ifdef CONFIG_QUOTA 326 qsize_t *ext4_get_reserved_space(struct inode *inode) 327 { 328 return &EXT4_I(inode)->i_reserved_quota; 329 } 330 #endif 331 332 /* 333 * Called with i_data_sem down, which is important since we can call 334 * ext4_discard_preallocations() from here. 335 */ 336 void ext4_da_update_reserve_space(struct inode *inode, 337 int used, int quota_claim) 338 { 339 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 340 struct ext4_inode_info *ei = EXT4_I(inode); 341 342 spin_lock(&ei->i_block_reservation_lock); 343 trace_ext4_da_update_reserve_space(inode, used, quota_claim); 344 if (unlikely(used > ei->i_reserved_data_blocks)) { 345 ext4_warning(inode->i_sb, "%s: ino %lu, used %d " 346 "with only %d reserved data blocks", 347 __func__, inode->i_ino, used, 348 ei->i_reserved_data_blocks); 349 WARN_ON(1); 350 used = ei->i_reserved_data_blocks; 351 } 352 353 /* Update per-inode reservations */ 354 ei->i_reserved_data_blocks -= used; 355 percpu_counter_sub(&sbi->s_dirtyclusters_counter, used); 356 357 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 358 359 /* Update quota subsystem for data blocks */ 360 if (quota_claim) 361 dquot_claim_block(inode, EXT4_C2B(sbi, used)); 362 else { 363 /* 364 * We did fallocate with an offset that is already delayed 365 * allocated. So on delayed allocated writeback we should 366 * not re-claim the quota for fallocated blocks. 367 */ 368 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); 369 } 370 371 /* 372 * If we have done all the pending block allocations and if 373 * there aren't any writers on the inode, we can discard the 374 * inode's preallocations. 375 */ 376 if ((ei->i_reserved_data_blocks == 0) && 377 (atomic_read(&inode->i_writecount) == 0)) 378 ext4_discard_preallocations(inode); 379 } 380 381 static int __check_block_validity(struct inode *inode, const char *func, 382 unsigned int line, 383 struct ext4_map_blocks *map) 384 { 385 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, 386 map->m_len)) { 387 ext4_error_inode(inode, func, line, map->m_pblk, 388 "lblock %lu mapped to illegal pblock " 389 "(length %d)", (unsigned long) map->m_lblk, 390 map->m_len); 391 return -EFSCORRUPTED; 392 } 393 return 0; 394 } 395 396 int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk, 397 ext4_lblk_t len) 398 { 399 int ret; 400 401 if (ext4_encrypted_inode(inode)) 402 return fscrypt_zeroout_range(inode, lblk, pblk, len); 403 404 ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS); 405 if (ret > 0) 406 ret = 0; 407 408 return ret; 409 } 410 411 #define check_block_validity(inode, map) \ 412 __check_block_validity((inode), __func__, __LINE__, (map)) 413 414 #ifdef ES_AGGRESSIVE_TEST 415 static void ext4_map_blocks_es_recheck(handle_t *handle, 416 struct inode *inode, 417 struct ext4_map_blocks *es_map, 418 struct ext4_map_blocks *map, 419 int flags) 420 { 421 int retval; 422 423 map->m_flags = 0; 424 /* 425 * There is a race window that the result is not the same. 426 * e.g. xfstests #223 when dioread_nolock enables. The reason 427 * is that we lookup a block mapping in extent status tree with 428 * out taking i_data_sem. So at the time the unwritten extent 429 * could be converted. 430 */ 431 down_read(&EXT4_I(inode)->i_data_sem); 432 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 433 retval = ext4_ext_map_blocks(handle, inode, map, flags & 434 EXT4_GET_BLOCKS_KEEP_SIZE); 435 } else { 436 retval = ext4_ind_map_blocks(handle, inode, map, flags & 437 EXT4_GET_BLOCKS_KEEP_SIZE); 438 } 439 up_read((&EXT4_I(inode)->i_data_sem)); 440 441 /* 442 * We don't check m_len because extent will be collpased in status 443 * tree. So the m_len might not equal. 444 */ 445 if (es_map->m_lblk != map->m_lblk || 446 es_map->m_flags != map->m_flags || 447 es_map->m_pblk != map->m_pblk) { 448 printk("ES cache assertion failed for inode: %lu " 449 "es_cached ex [%d/%d/%llu/%x] != " 450 "found ex [%d/%d/%llu/%x] retval %d flags %x\n", 451 inode->i_ino, es_map->m_lblk, es_map->m_len, 452 es_map->m_pblk, es_map->m_flags, map->m_lblk, 453 map->m_len, map->m_pblk, map->m_flags, 454 retval, flags); 455 } 456 } 457 #endif /* ES_AGGRESSIVE_TEST */ 458 459 /* 460 * The ext4_map_blocks() function tries to look up the requested blocks, 461 * and returns if the blocks are already mapped. 462 * 463 * Otherwise it takes the write lock of the i_data_sem and allocate blocks 464 * and store the allocated blocks in the result buffer head and mark it 465 * mapped. 466 * 467 * If file type is extents based, it will call ext4_ext_map_blocks(), 468 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping 469 * based files 470 * 471 * On success, it returns the number of blocks being mapped or allocated. if 472 * create==0 and the blocks are pre-allocated and unwritten, the resulting @map 473 * is marked as unwritten. If the create == 1, it will mark @map as mapped. 474 * 475 * It returns 0 if plain look up failed (blocks have not been allocated), in 476 * that case, @map is returned as unmapped but we still do fill map->m_len to 477 * indicate the length of a hole starting at map->m_lblk. 478 * 479 * It returns the error in case of allocation failure. 480 */ 481 int ext4_map_blocks(handle_t *handle, struct inode *inode, 482 struct ext4_map_blocks *map, int flags) 483 { 484 struct extent_status es; 485 int retval; 486 int ret = 0; 487 #ifdef ES_AGGRESSIVE_TEST 488 struct ext4_map_blocks orig_map; 489 490 memcpy(&orig_map, map, sizeof(*map)); 491 #endif 492 493 map->m_flags = 0; 494 ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," 495 "logical block %lu\n", inode->i_ino, flags, map->m_len, 496 (unsigned long) map->m_lblk); 497 498 /* 499 * ext4_map_blocks returns an int, and m_len is an unsigned int 500 */ 501 if (unlikely(map->m_len > INT_MAX)) 502 map->m_len = INT_MAX; 503 504 /* We can handle the block number less than EXT_MAX_BLOCKS */ 505 if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS)) 506 return -EFSCORRUPTED; 507 508 /* Lookup extent status tree firstly */ 509 if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) { 510 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) { 511 map->m_pblk = ext4_es_pblock(&es) + 512 map->m_lblk - es.es_lblk; 513 map->m_flags |= ext4_es_is_written(&es) ? 514 EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN; 515 retval = es.es_len - (map->m_lblk - es.es_lblk); 516 if (retval > map->m_len) 517 retval = map->m_len; 518 map->m_len = retval; 519 } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) { 520 map->m_pblk = 0; 521 retval = es.es_len - (map->m_lblk - es.es_lblk); 522 if (retval > map->m_len) 523 retval = map->m_len; 524 map->m_len = retval; 525 retval = 0; 526 } else { 527 BUG_ON(1); 528 } 529 #ifdef ES_AGGRESSIVE_TEST 530 ext4_map_blocks_es_recheck(handle, inode, map, 531 &orig_map, flags); 532 #endif 533 goto found; 534 } 535 536 /* 537 * Try to see if we can get the block without requesting a new 538 * file system block. 539 */ 540 down_read(&EXT4_I(inode)->i_data_sem); 541 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 542 retval = ext4_ext_map_blocks(handle, inode, map, flags & 543 EXT4_GET_BLOCKS_KEEP_SIZE); 544 } else { 545 retval = ext4_ind_map_blocks(handle, inode, map, flags & 546 EXT4_GET_BLOCKS_KEEP_SIZE); 547 } 548 if (retval > 0) { 549 unsigned int status; 550 551 if (unlikely(retval != map->m_len)) { 552 ext4_warning(inode->i_sb, 553 "ES len assertion failed for inode " 554 "%lu: retval %d != map->m_len %d", 555 inode->i_ino, retval, map->m_len); 556 WARN_ON(1); 557 } 558 559 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 560 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 561 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 562 !(status & EXTENT_STATUS_WRITTEN) && 563 ext4_find_delalloc_range(inode, map->m_lblk, 564 map->m_lblk + map->m_len - 1)) 565 status |= EXTENT_STATUS_DELAYED; 566 ret = ext4_es_insert_extent(inode, map->m_lblk, 567 map->m_len, map->m_pblk, status); 568 if (ret < 0) 569 retval = ret; 570 } 571 up_read((&EXT4_I(inode)->i_data_sem)); 572 573 found: 574 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 575 ret = check_block_validity(inode, map); 576 if (ret != 0) 577 return ret; 578 } 579 580 /* If it is only a block(s) look up */ 581 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 582 return retval; 583 584 /* 585 * Returns if the blocks have already allocated 586 * 587 * Note that if blocks have been preallocated 588 * ext4_ext_get_block() returns the create = 0 589 * with buffer head unmapped. 590 */ 591 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) 592 /* 593 * If we need to convert extent to unwritten 594 * we continue and do the actual work in 595 * ext4_ext_map_blocks() 596 */ 597 if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) 598 return retval; 599 600 /* 601 * Here we clear m_flags because after allocating an new extent, 602 * it will be set again. 603 */ 604 map->m_flags &= ~EXT4_MAP_FLAGS; 605 606 /* 607 * New blocks allocate and/or writing to unwritten extent 608 * will possibly result in updating i_data, so we take 609 * the write lock of i_data_sem, and call get_block() 610 * with create == 1 flag. 611 */ 612 down_write(&EXT4_I(inode)->i_data_sem); 613 614 /* 615 * We need to check for EXT4 here because migrate 616 * could have changed the inode type in between 617 */ 618 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 619 retval = ext4_ext_map_blocks(handle, inode, map, flags); 620 } else { 621 retval = ext4_ind_map_blocks(handle, inode, map, flags); 622 623 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { 624 /* 625 * We allocated new blocks which will result in 626 * i_data's format changing. Force the migrate 627 * to fail by clearing migrate flags 628 */ 629 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); 630 } 631 632 /* 633 * Update reserved blocks/metadata blocks after successful 634 * block allocation which had been deferred till now. We don't 635 * support fallocate for non extent files. So we can update 636 * reserve space here. 637 */ 638 if ((retval > 0) && 639 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) 640 ext4_da_update_reserve_space(inode, retval, 1); 641 } 642 643 if (retval > 0) { 644 unsigned int status; 645 646 if (unlikely(retval != map->m_len)) { 647 ext4_warning(inode->i_sb, 648 "ES len assertion failed for inode " 649 "%lu: retval %d != map->m_len %d", 650 inode->i_ino, retval, map->m_len); 651 WARN_ON(1); 652 } 653 654 /* 655 * We have to zeroout blocks before inserting them into extent 656 * status tree. Otherwise someone could look them up there and 657 * use them before they are really zeroed. We also have to 658 * unmap metadata before zeroing as otherwise writeback can 659 * overwrite zeros with stale data from block device. 660 */ 661 if (flags & EXT4_GET_BLOCKS_ZERO && 662 map->m_flags & EXT4_MAP_MAPPED && 663 map->m_flags & EXT4_MAP_NEW) { 664 clean_bdev_aliases(inode->i_sb->s_bdev, map->m_pblk, 665 map->m_len); 666 ret = ext4_issue_zeroout(inode, map->m_lblk, 667 map->m_pblk, map->m_len); 668 if (ret) { 669 retval = ret; 670 goto out_sem; 671 } 672 } 673 674 /* 675 * If the extent has been zeroed out, we don't need to update 676 * extent status tree. 677 */ 678 if ((flags & EXT4_GET_BLOCKS_PRE_IO) && 679 ext4_es_lookup_extent(inode, map->m_lblk, &es)) { 680 if (ext4_es_is_written(&es)) 681 goto out_sem; 682 } 683 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 684 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 685 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 686 !(status & EXTENT_STATUS_WRITTEN) && 687 ext4_find_delalloc_range(inode, map->m_lblk, 688 map->m_lblk + map->m_len - 1)) 689 status |= EXTENT_STATUS_DELAYED; 690 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 691 map->m_pblk, status); 692 if (ret < 0) { 693 retval = ret; 694 goto out_sem; 695 } 696 } 697 698 out_sem: 699 up_write((&EXT4_I(inode)->i_data_sem)); 700 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 701 ret = check_block_validity(inode, map); 702 if (ret != 0) 703 return ret; 704 705 /* 706 * Inodes with freshly allocated blocks where contents will be 707 * visible after transaction commit must be on transaction's 708 * ordered data list. 709 */ 710 if (map->m_flags & EXT4_MAP_NEW && 711 !(map->m_flags & EXT4_MAP_UNWRITTEN) && 712 !(flags & EXT4_GET_BLOCKS_ZERO) && 713 !IS_NOQUOTA(inode) && 714 ext4_should_order_data(inode)) { 715 if (flags & EXT4_GET_BLOCKS_IO_SUBMIT) 716 ret = ext4_jbd2_inode_add_wait(handle, inode); 717 else 718 ret = ext4_jbd2_inode_add_write(handle, inode); 719 if (ret) 720 return ret; 721 } 722 } 723 return retval; 724 } 725 726 /* 727 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages 728 * we have to be careful as someone else may be manipulating b_state as well. 729 */ 730 static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags) 731 { 732 unsigned long old_state; 733 unsigned long new_state; 734 735 flags &= EXT4_MAP_FLAGS; 736 737 /* Dummy buffer_head? Set non-atomically. */ 738 if (!bh->b_page) { 739 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags; 740 return; 741 } 742 /* 743 * Someone else may be modifying b_state. Be careful! This is ugly but 744 * once we get rid of using bh as a container for mapping information 745 * to pass to / from get_block functions, this can go away. 746 */ 747 do { 748 old_state = READ_ONCE(bh->b_state); 749 new_state = (old_state & ~EXT4_MAP_FLAGS) | flags; 750 } while (unlikely( 751 cmpxchg(&bh->b_state, old_state, new_state) != old_state)); 752 } 753 754 static int _ext4_get_block(struct inode *inode, sector_t iblock, 755 struct buffer_head *bh, int flags) 756 { 757 struct ext4_map_blocks map; 758 int ret = 0; 759 760 if (ext4_has_inline_data(inode)) 761 return -ERANGE; 762 763 map.m_lblk = iblock; 764 map.m_len = bh->b_size >> inode->i_blkbits; 765 766 ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map, 767 flags); 768 if (ret > 0) { 769 map_bh(bh, inode->i_sb, map.m_pblk); 770 ext4_update_bh_state(bh, map.m_flags); 771 bh->b_size = inode->i_sb->s_blocksize * map.m_len; 772 ret = 0; 773 } else if (ret == 0) { 774 /* hole case, need to fill in bh->b_size */ 775 bh->b_size = inode->i_sb->s_blocksize * map.m_len; 776 } 777 return ret; 778 } 779 780 int ext4_get_block(struct inode *inode, sector_t iblock, 781 struct buffer_head *bh, int create) 782 { 783 return _ext4_get_block(inode, iblock, bh, 784 create ? EXT4_GET_BLOCKS_CREATE : 0); 785 } 786 787 /* 788 * Get block function used when preparing for buffered write if we require 789 * creating an unwritten extent if blocks haven't been allocated. The extent 790 * will be converted to written after the IO is complete. 791 */ 792 int ext4_get_block_unwritten(struct inode *inode, sector_t iblock, 793 struct buffer_head *bh_result, int create) 794 { 795 ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n", 796 inode->i_ino, create); 797 return _ext4_get_block(inode, iblock, bh_result, 798 EXT4_GET_BLOCKS_IO_CREATE_EXT); 799 } 800 801 /* Maximum number of blocks we map for direct IO at once. */ 802 #define DIO_MAX_BLOCKS 4096 803 804 /* 805 * Get blocks function for the cases that need to start a transaction - 806 * generally difference cases of direct IO and DAX IO. It also handles retries 807 * in case of ENOSPC. 808 */ 809 static int ext4_get_block_trans(struct inode *inode, sector_t iblock, 810 struct buffer_head *bh_result, int flags) 811 { 812 int dio_credits; 813 handle_t *handle; 814 int retries = 0; 815 int ret; 816 817 /* Trim mapping request to maximum we can map at once for DIO */ 818 if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS) 819 bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits; 820 dio_credits = ext4_chunk_trans_blocks(inode, 821 bh_result->b_size >> inode->i_blkbits); 822 retry: 823 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits); 824 if (IS_ERR(handle)) 825 return PTR_ERR(handle); 826 827 ret = _ext4_get_block(inode, iblock, bh_result, flags); 828 ext4_journal_stop(handle); 829 830 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 831 goto retry; 832 return ret; 833 } 834 835 /* Get block function for DIO reads and writes to inodes without extents */ 836 int ext4_dio_get_block(struct inode *inode, sector_t iblock, 837 struct buffer_head *bh, int create) 838 { 839 /* We don't expect handle for direct IO */ 840 WARN_ON_ONCE(ext4_journal_current_handle()); 841 842 if (!create) 843 return _ext4_get_block(inode, iblock, bh, 0); 844 return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE); 845 } 846 847 /* 848 * Get block function for AIO DIO writes when we create unwritten extent if 849 * blocks are not allocated yet. The extent will be converted to written 850 * after IO is complete. 851 */ 852 static int ext4_dio_get_block_unwritten_async(struct inode *inode, 853 sector_t iblock, struct buffer_head *bh_result, int create) 854 { 855 int ret; 856 857 /* We don't expect handle for direct IO */ 858 WARN_ON_ONCE(ext4_journal_current_handle()); 859 860 ret = ext4_get_block_trans(inode, iblock, bh_result, 861 EXT4_GET_BLOCKS_IO_CREATE_EXT); 862 863 /* 864 * When doing DIO using unwritten extents, we need io_end to convert 865 * unwritten extents to written on IO completion. We allocate io_end 866 * once we spot unwritten extent and store it in b_private. Generic 867 * DIO code keeps b_private set and furthermore passes the value to 868 * our completion callback in 'private' argument. 869 */ 870 if (!ret && buffer_unwritten(bh_result)) { 871 if (!bh_result->b_private) { 872 ext4_io_end_t *io_end; 873 874 io_end = ext4_init_io_end(inode, GFP_KERNEL); 875 if (!io_end) 876 return -ENOMEM; 877 bh_result->b_private = io_end; 878 ext4_set_io_unwritten_flag(inode, io_end); 879 } 880 set_buffer_defer_completion(bh_result); 881 } 882 883 return ret; 884 } 885 886 /* 887 * Get block function for non-AIO DIO writes when we create unwritten extent if 888 * blocks are not allocated yet. The extent will be converted to written 889 * after IO is complete from ext4_ext_direct_IO() function. 890 */ 891 static int ext4_dio_get_block_unwritten_sync(struct inode *inode, 892 sector_t iblock, struct buffer_head *bh_result, int create) 893 { 894 int ret; 895 896 /* We don't expect handle for direct IO */ 897 WARN_ON_ONCE(ext4_journal_current_handle()); 898 899 ret = ext4_get_block_trans(inode, iblock, bh_result, 900 EXT4_GET_BLOCKS_IO_CREATE_EXT); 901 902 /* 903 * Mark inode as having pending DIO writes to unwritten extents. 904 * ext4_ext_direct_IO() checks this flag and converts extents to 905 * written. 906 */ 907 if (!ret && buffer_unwritten(bh_result)) 908 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 909 910 return ret; 911 } 912 913 static int ext4_dio_get_block_overwrite(struct inode *inode, sector_t iblock, 914 struct buffer_head *bh_result, int create) 915 { 916 int ret; 917 918 ext4_debug("ext4_dio_get_block_overwrite: inode %lu, create flag %d\n", 919 inode->i_ino, create); 920 /* We don't expect handle for direct IO */ 921 WARN_ON_ONCE(ext4_journal_current_handle()); 922 923 ret = _ext4_get_block(inode, iblock, bh_result, 0); 924 /* 925 * Blocks should have been preallocated! ext4_file_write_iter() checks 926 * that. 927 */ 928 WARN_ON_ONCE(!buffer_mapped(bh_result) || buffer_unwritten(bh_result)); 929 930 return ret; 931 } 932 933 934 /* 935 * `handle' can be NULL if create is zero 936 */ 937 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 938 ext4_lblk_t block, int map_flags) 939 { 940 struct ext4_map_blocks map; 941 struct buffer_head *bh; 942 int create = map_flags & EXT4_GET_BLOCKS_CREATE; 943 int err; 944 945 J_ASSERT(handle != NULL || create == 0); 946 947 map.m_lblk = block; 948 map.m_len = 1; 949 err = ext4_map_blocks(handle, inode, &map, map_flags); 950 951 if (err == 0) 952 return create ? ERR_PTR(-ENOSPC) : NULL; 953 if (err < 0) 954 return ERR_PTR(err); 955 956 bh = sb_getblk(inode->i_sb, map.m_pblk); 957 if (unlikely(!bh)) 958 return ERR_PTR(-ENOMEM); 959 if (map.m_flags & EXT4_MAP_NEW) { 960 J_ASSERT(create != 0); 961 J_ASSERT(handle != NULL); 962 963 /* 964 * Now that we do not always journal data, we should 965 * keep in mind whether this should always journal the 966 * new buffer as metadata. For now, regular file 967 * writes use ext4_get_block instead, so it's not a 968 * problem. 969 */ 970 lock_buffer(bh); 971 BUFFER_TRACE(bh, "call get_create_access"); 972 err = ext4_journal_get_create_access(handle, bh); 973 if (unlikely(err)) { 974 unlock_buffer(bh); 975 goto errout; 976 } 977 if (!buffer_uptodate(bh)) { 978 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 979 set_buffer_uptodate(bh); 980 } 981 unlock_buffer(bh); 982 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 983 err = ext4_handle_dirty_metadata(handle, inode, bh); 984 if (unlikely(err)) 985 goto errout; 986 } else 987 BUFFER_TRACE(bh, "not a new buffer"); 988 return bh; 989 errout: 990 brelse(bh); 991 return ERR_PTR(err); 992 } 993 994 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 995 ext4_lblk_t block, int map_flags) 996 { 997 struct buffer_head *bh; 998 999 bh = ext4_getblk(handle, inode, block, map_flags); 1000 if (IS_ERR(bh)) 1001 return bh; 1002 if (!bh || buffer_uptodate(bh)) 1003 return bh; 1004 ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh); 1005 wait_on_buffer(bh); 1006 if (buffer_uptodate(bh)) 1007 return bh; 1008 put_bh(bh); 1009 return ERR_PTR(-EIO); 1010 } 1011 1012 int ext4_walk_page_buffers(handle_t *handle, 1013 struct buffer_head *head, 1014 unsigned from, 1015 unsigned to, 1016 int *partial, 1017 int (*fn)(handle_t *handle, 1018 struct buffer_head *bh)) 1019 { 1020 struct buffer_head *bh; 1021 unsigned block_start, block_end; 1022 unsigned blocksize = head->b_size; 1023 int err, ret = 0; 1024 struct buffer_head *next; 1025 1026 for (bh = head, block_start = 0; 1027 ret == 0 && (bh != head || !block_start); 1028 block_start = block_end, bh = next) { 1029 next = bh->b_this_page; 1030 block_end = block_start + blocksize; 1031 if (block_end <= from || block_start >= to) { 1032 if (partial && !buffer_uptodate(bh)) 1033 *partial = 1; 1034 continue; 1035 } 1036 err = (*fn)(handle, bh); 1037 if (!ret) 1038 ret = err; 1039 } 1040 return ret; 1041 } 1042 1043 /* 1044 * To preserve ordering, it is essential that the hole instantiation and 1045 * the data write be encapsulated in a single transaction. We cannot 1046 * close off a transaction and start a new one between the ext4_get_block() 1047 * and the commit_write(). So doing the jbd2_journal_start at the start of 1048 * prepare_write() is the right place. 1049 * 1050 * Also, this function can nest inside ext4_writepage(). In that case, we 1051 * *know* that ext4_writepage() has generated enough buffer credits to do the 1052 * whole page. So we won't block on the journal in that case, which is good, 1053 * because the caller may be PF_MEMALLOC. 1054 * 1055 * By accident, ext4 can be reentered when a transaction is open via 1056 * quota file writes. If we were to commit the transaction while thus 1057 * reentered, there can be a deadlock - we would be holding a quota 1058 * lock, and the commit would never complete if another thread had a 1059 * transaction open and was blocking on the quota lock - a ranking 1060 * violation. 1061 * 1062 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 1063 * will _not_ run commit under these circumstances because handle->h_ref 1064 * is elevated. We'll still have enough credits for the tiny quotafile 1065 * write. 1066 */ 1067 int do_journal_get_write_access(handle_t *handle, 1068 struct buffer_head *bh) 1069 { 1070 int dirty = buffer_dirty(bh); 1071 int ret; 1072 1073 if (!buffer_mapped(bh) || buffer_freed(bh)) 1074 return 0; 1075 /* 1076 * __block_write_begin() could have dirtied some buffers. Clean 1077 * the dirty bit as jbd2_journal_get_write_access() could complain 1078 * otherwise about fs integrity issues. Setting of the dirty bit 1079 * by __block_write_begin() isn't a real problem here as we clear 1080 * the bit before releasing a page lock and thus writeback cannot 1081 * ever write the buffer. 1082 */ 1083 if (dirty) 1084 clear_buffer_dirty(bh); 1085 BUFFER_TRACE(bh, "get write access"); 1086 ret = ext4_journal_get_write_access(handle, bh); 1087 if (!ret && dirty) 1088 ret = ext4_handle_dirty_metadata(handle, NULL, bh); 1089 return ret; 1090 } 1091 1092 #ifdef CONFIG_EXT4_FS_ENCRYPTION 1093 static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, 1094 get_block_t *get_block) 1095 { 1096 unsigned from = pos & (PAGE_SIZE - 1); 1097 unsigned to = from + len; 1098 struct inode *inode = page->mapping->host; 1099 unsigned block_start, block_end; 1100 sector_t block; 1101 int err = 0; 1102 unsigned blocksize = inode->i_sb->s_blocksize; 1103 unsigned bbits; 1104 struct buffer_head *bh, *head, *wait[2], **wait_bh = wait; 1105 bool decrypt = false; 1106 1107 BUG_ON(!PageLocked(page)); 1108 BUG_ON(from > PAGE_SIZE); 1109 BUG_ON(to > PAGE_SIZE); 1110 BUG_ON(from > to); 1111 1112 if (!page_has_buffers(page)) 1113 create_empty_buffers(page, blocksize, 0); 1114 head = page_buffers(page); 1115 bbits = ilog2(blocksize); 1116 block = (sector_t)page->index << (PAGE_SHIFT - bbits); 1117 1118 for (bh = head, block_start = 0; bh != head || !block_start; 1119 block++, block_start = block_end, bh = bh->b_this_page) { 1120 block_end = block_start + blocksize; 1121 if (block_end <= from || block_start >= to) { 1122 if (PageUptodate(page)) { 1123 if (!buffer_uptodate(bh)) 1124 set_buffer_uptodate(bh); 1125 } 1126 continue; 1127 } 1128 if (buffer_new(bh)) 1129 clear_buffer_new(bh); 1130 if (!buffer_mapped(bh)) { 1131 WARN_ON(bh->b_size != blocksize); 1132 err = get_block(inode, block, bh, 1); 1133 if (err) 1134 break; 1135 if (buffer_new(bh)) { 1136 clean_bdev_bh_alias(bh); 1137 if (PageUptodate(page)) { 1138 clear_buffer_new(bh); 1139 set_buffer_uptodate(bh); 1140 mark_buffer_dirty(bh); 1141 continue; 1142 } 1143 if (block_end > to || block_start < from) 1144 zero_user_segments(page, to, block_end, 1145 block_start, from); 1146 continue; 1147 } 1148 } 1149 if (PageUptodate(page)) { 1150 if (!buffer_uptodate(bh)) 1151 set_buffer_uptodate(bh); 1152 continue; 1153 } 1154 if (!buffer_uptodate(bh) && !buffer_delay(bh) && 1155 !buffer_unwritten(bh) && 1156 (block_start < from || block_end > to)) { 1157 ll_rw_block(REQ_OP_READ, 0, 1, &bh); 1158 *wait_bh++ = bh; 1159 decrypt = ext4_encrypted_inode(inode) && 1160 S_ISREG(inode->i_mode); 1161 } 1162 } 1163 /* 1164 * If we issued read requests, let them complete. 1165 */ 1166 while (wait_bh > wait) { 1167 wait_on_buffer(*--wait_bh); 1168 if (!buffer_uptodate(*wait_bh)) 1169 err = -EIO; 1170 } 1171 if (unlikely(err)) 1172 page_zero_new_buffers(page, from, to); 1173 else if (decrypt) 1174 err = fscrypt_decrypt_page(page->mapping->host, page, 1175 PAGE_SIZE, 0, page->index); 1176 return err; 1177 } 1178 #endif 1179 1180 static int ext4_write_begin(struct file *file, struct address_space *mapping, 1181 loff_t pos, unsigned len, unsigned flags, 1182 struct page **pagep, void **fsdata) 1183 { 1184 struct inode *inode = mapping->host; 1185 int ret, needed_blocks; 1186 handle_t *handle; 1187 int retries = 0; 1188 struct page *page; 1189 pgoff_t index; 1190 unsigned from, to; 1191 1192 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 1193 return -EIO; 1194 1195 trace_ext4_write_begin(inode, pos, len, flags); 1196 /* 1197 * Reserve one block more for addition to orphan list in case 1198 * we allocate blocks but write fails for some reason 1199 */ 1200 needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 1201 index = pos >> PAGE_SHIFT; 1202 from = pos & (PAGE_SIZE - 1); 1203 to = from + len; 1204 1205 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 1206 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, 1207 flags, pagep); 1208 if (ret < 0) 1209 return ret; 1210 if (ret == 1) 1211 return 0; 1212 } 1213 1214 /* 1215 * grab_cache_page_write_begin() can take a long time if the 1216 * system is thrashing due to memory pressure, or if the page 1217 * is being written back. So grab it first before we start 1218 * the transaction handle. This also allows us to allocate 1219 * the page (if needed) without using GFP_NOFS. 1220 */ 1221 retry_grab: 1222 page = grab_cache_page_write_begin(mapping, index, flags); 1223 if (!page) 1224 return -ENOMEM; 1225 unlock_page(page); 1226 1227 retry_journal: 1228 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); 1229 if (IS_ERR(handle)) { 1230 put_page(page); 1231 return PTR_ERR(handle); 1232 } 1233 1234 lock_page(page); 1235 if (page->mapping != mapping) { 1236 /* The page got truncated from under us */ 1237 unlock_page(page); 1238 put_page(page); 1239 ext4_journal_stop(handle); 1240 goto retry_grab; 1241 } 1242 /* In case writeback began while the page was unlocked */ 1243 wait_for_stable_page(page); 1244 1245 #ifdef CONFIG_EXT4_FS_ENCRYPTION 1246 if (ext4_should_dioread_nolock(inode)) 1247 ret = ext4_block_write_begin(page, pos, len, 1248 ext4_get_block_unwritten); 1249 else 1250 ret = ext4_block_write_begin(page, pos, len, 1251 ext4_get_block); 1252 #else 1253 if (ext4_should_dioread_nolock(inode)) 1254 ret = __block_write_begin(page, pos, len, 1255 ext4_get_block_unwritten); 1256 else 1257 ret = __block_write_begin(page, pos, len, ext4_get_block); 1258 #endif 1259 if (!ret && ext4_should_journal_data(inode)) { 1260 ret = ext4_walk_page_buffers(handle, page_buffers(page), 1261 from, to, NULL, 1262 do_journal_get_write_access); 1263 } 1264 1265 if (ret) { 1266 unlock_page(page); 1267 /* 1268 * __block_write_begin may have instantiated a few blocks 1269 * outside i_size. Trim these off again. Don't need 1270 * i_size_read because we hold i_mutex. 1271 * 1272 * Add inode to orphan list in case we crash before 1273 * truncate finishes 1274 */ 1275 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1276 ext4_orphan_add(handle, inode); 1277 1278 ext4_journal_stop(handle); 1279 if (pos + len > inode->i_size) { 1280 ext4_truncate_failed_write(inode); 1281 /* 1282 * If truncate failed early the inode might 1283 * still be on the orphan list; we need to 1284 * make sure the inode is removed from the 1285 * orphan list in that case. 1286 */ 1287 if (inode->i_nlink) 1288 ext4_orphan_del(NULL, inode); 1289 } 1290 1291 if (ret == -ENOSPC && 1292 ext4_should_retry_alloc(inode->i_sb, &retries)) 1293 goto retry_journal; 1294 put_page(page); 1295 return ret; 1296 } 1297 *pagep = page; 1298 return ret; 1299 } 1300 1301 /* For write_end() in data=journal mode */ 1302 static int write_end_fn(handle_t *handle, struct buffer_head *bh) 1303 { 1304 int ret; 1305 if (!buffer_mapped(bh) || buffer_freed(bh)) 1306 return 0; 1307 set_buffer_uptodate(bh); 1308 ret = ext4_handle_dirty_metadata(handle, NULL, bh); 1309 clear_buffer_meta(bh); 1310 clear_buffer_prio(bh); 1311 return ret; 1312 } 1313 1314 /* 1315 * We need to pick up the new inode size which generic_commit_write gave us 1316 * `file' can be NULL - eg, when called from page_symlink(). 1317 * 1318 * ext4 never places buffers on inode->i_mapping->private_list. metadata 1319 * buffers are managed internally. 1320 */ 1321 static int ext4_write_end(struct file *file, 1322 struct address_space *mapping, 1323 loff_t pos, unsigned len, unsigned copied, 1324 struct page *page, void *fsdata) 1325 { 1326 handle_t *handle = ext4_journal_current_handle(); 1327 struct inode *inode = mapping->host; 1328 loff_t old_size = inode->i_size; 1329 int ret = 0, ret2; 1330 int i_size_changed = 0; 1331 1332 trace_ext4_write_end(inode, pos, len, copied); 1333 if (ext4_has_inline_data(inode)) { 1334 ret = ext4_write_inline_data_end(inode, pos, len, 1335 copied, page); 1336 if (ret < 0) { 1337 unlock_page(page); 1338 put_page(page); 1339 goto errout; 1340 } 1341 copied = ret; 1342 } else 1343 copied = block_write_end(file, mapping, pos, 1344 len, copied, page, fsdata); 1345 /* 1346 * it's important to update i_size while still holding page lock: 1347 * page writeout could otherwise come in and zero beyond i_size. 1348 */ 1349 i_size_changed = ext4_update_inode_size(inode, pos + copied); 1350 unlock_page(page); 1351 put_page(page); 1352 1353 if (old_size < pos) 1354 pagecache_isize_extended(inode, old_size, pos); 1355 /* 1356 * Don't mark the inode dirty under page lock. First, it unnecessarily 1357 * makes the holding time of page lock longer. Second, it forces lock 1358 * ordering of page lock and transaction start for journaling 1359 * filesystems. 1360 */ 1361 if (i_size_changed) 1362 ext4_mark_inode_dirty(handle, inode); 1363 1364 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1365 /* if we have allocated more blocks and copied 1366 * less. We will have blocks allocated outside 1367 * inode->i_size. So truncate them 1368 */ 1369 ext4_orphan_add(handle, inode); 1370 errout: 1371 ret2 = ext4_journal_stop(handle); 1372 if (!ret) 1373 ret = ret2; 1374 1375 if (pos + len > inode->i_size) { 1376 ext4_truncate_failed_write(inode); 1377 /* 1378 * If truncate failed early the inode might still be 1379 * on the orphan list; we need to make sure the inode 1380 * is removed from the orphan list in that case. 1381 */ 1382 if (inode->i_nlink) 1383 ext4_orphan_del(NULL, inode); 1384 } 1385 1386 return ret ? ret : copied; 1387 } 1388 1389 /* 1390 * This is a private version of page_zero_new_buffers() which doesn't 1391 * set the buffer to be dirty, since in data=journalled mode we need 1392 * to call ext4_handle_dirty_metadata() instead. 1393 */ 1394 static void ext4_journalled_zero_new_buffers(handle_t *handle, 1395 struct page *page, 1396 unsigned from, unsigned to) 1397 { 1398 unsigned int block_start = 0, block_end; 1399 struct buffer_head *head, *bh; 1400 1401 bh = head = page_buffers(page); 1402 do { 1403 block_end = block_start + bh->b_size; 1404 if (buffer_new(bh)) { 1405 if (block_end > from && block_start < to) { 1406 if (!PageUptodate(page)) { 1407 unsigned start, size; 1408 1409 start = max(from, block_start); 1410 size = min(to, block_end) - start; 1411 1412 zero_user(page, start, size); 1413 write_end_fn(handle, bh); 1414 } 1415 clear_buffer_new(bh); 1416 } 1417 } 1418 block_start = block_end; 1419 bh = bh->b_this_page; 1420 } while (bh != head); 1421 } 1422 1423 static int ext4_journalled_write_end(struct file *file, 1424 struct address_space *mapping, 1425 loff_t pos, unsigned len, unsigned copied, 1426 struct page *page, void *fsdata) 1427 { 1428 handle_t *handle = ext4_journal_current_handle(); 1429 struct inode *inode = mapping->host; 1430 loff_t old_size = inode->i_size; 1431 int ret = 0, ret2; 1432 int partial = 0; 1433 unsigned from, to; 1434 int size_changed = 0; 1435 1436 trace_ext4_journalled_write_end(inode, pos, len, copied); 1437 from = pos & (PAGE_SIZE - 1); 1438 to = from + len; 1439 1440 BUG_ON(!ext4_handle_valid(handle)); 1441 1442 if (ext4_has_inline_data(inode)) { 1443 ret = ext4_write_inline_data_end(inode, pos, len, 1444 copied, page); 1445 if (ret < 0) { 1446 unlock_page(page); 1447 put_page(page); 1448 goto errout; 1449 } 1450 copied = ret; 1451 } else if (unlikely(copied < len) && !PageUptodate(page)) { 1452 copied = 0; 1453 ext4_journalled_zero_new_buffers(handle, page, from, to); 1454 } else { 1455 if (unlikely(copied < len)) 1456 ext4_journalled_zero_new_buffers(handle, page, 1457 from + copied, to); 1458 ret = ext4_walk_page_buffers(handle, page_buffers(page), from, 1459 from + copied, &partial, 1460 write_end_fn); 1461 if (!partial) 1462 SetPageUptodate(page); 1463 } 1464 size_changed = ext4_update_inode_size(inode, pos + copied); 1465 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 1466 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1467 unlock_page(page); 1468 put_page(page); 1469 1470 if (old_size < pos) 1471 pagecache_isize_extended(inode, old_size, pos); 1472 1473 if (size_changed) { 1474 ret2 = ext4_mark_inode_dirty(handle, inode); 1475 if (!ret) 1476 ret = ret2; 1477 } 1478 1479 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1480 /* if we have allocated more blocks and copied 1481 * less. We will have blocks allocated outside 1482 * inode->i_size. So truncate them 1483 */ 1484 ext4_orphan_add(handle, inode); 1485 1486 errout: 1487 ret2 = ext4_journal_stop(handle); 1488 if (!ret) 1489 ret = ret2; 1490 if (pos + len > inode->i_size) { 1491 ext4_truncate_failed_write(inode); 1492 /* 1493 * If truncate failed early the inode might still be 1494 * on the orphan list; we need to make sure the inode 1495 * is removed from the orphan list in that case. 1496 */ 1497 if (inode->i_nlink) 1498 ext4_orphan_del(NULL, inode); 1499 } 1500 1501 return ret ? ret : copied; 1502 } 1503 1504 /* 1505 * Reserve space for a single cluster 1506 */ 1507 static int ext4_da_reserve_space(struct inode *inode) 1508 { 1509 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1510 struct ext4_inode_info *ei = EXT4_I(inode); 1511 int ret; 1512 1513 /* 1514 * We will charge metadata quota at writeout time; this saves 1515 * us from metadata over-estimation, though we may go over by 1516 * a small amount in the end. Here we just reserve for data. 1517 */ 1518 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); 1519 if (ret) 1520 return ret; 1521 1522 spin_lock(&ei->i_block_reservation_lock); 1523 if (ext4_claim_free_clusters(sbi, 1, 0)) { 1524 spin_unlock(&ei->i_block_reservation_lock); 1525 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); 1526 return -ENOSPC; 1527 } 1528 ei->i_reserved_data_blocks++; 1529 trace_ext4_da_reserve_space(inode); 1530 spin_unlock(&ei->i_block_reservation_lock); 1531 1532 return 0; /* success */ 1533 } 1534 1535 static void ext4_da_release_space(struct inode *inode, int to_free) 1536 { 1537 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1538 struct ext4_inode_info *ei = EXT4_I(inode); 1539 1540 if (!to_free) 1541 return; /* Nothing to release, exit */ 1542 1543 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1544 1545 trace_ext4_da_release_space(inode, to_free); 1546 if (unlikely(to_free > ei->i_reserved_data_blocks)) { 1547 /* 1548 * if there aren't enough reserved blocks, then the 1549 * counter is messed up somewhere. Since this 1550 * function is called from invalidate page, it's 1551 * harmless to return without any action. 1552 */ 1553 ext4_warning(inode->i_sb, "ext4_da_release_space: " 1554 "ino %lu, to_free %d with only %d reserved " 1555 "data blocks", inode->i_ino, to_free, 1556 ei->i_reserved_data_blocks); 1557 WARN_ON(1); 1558 to_free = ei->i_reserved_data_blocks; 1559 } 1560 ei->i_reserved_data_blocks -= to_free; 1561 1562 /* update fs dirty data blocks counter */ 1563 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); 1564 1565 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1566 1567 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); 1568 } 1569 1570 static void ext4_da_page_release_reservation(struct page *page, 1571 unsigned int offset, 1572 unsigned int length) 1573 { 1574 int to_release = 0, contiguous_blks = 0; 1575 struct buffer_head *head, *bh; 1576 unsigned int curr_off = 0; 1577 struct inode *inode = page->mapping->host; 1578 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1579 unsigned int stop = offset + length; 1580 int num_clusters; 1581 ext4_fsblk_t lblk; 1582 1583 BUG_ON(stop > PAGE_SIZE || stop < length); 1584 1585 head = page_buffers(page); 1586 bh = head; 1587 do { 1588 unsigned int next_off = curr_off + bh->b_size; 1589 1590 if (next_off > stop) 1591 break; 1592 1593 if ((offset <= curr_off) && (buffer_delay(bh))) { 1594 to_release++; 1595 contiguous_blks++; 1596 clear_buffer_delay(bh); 1597 } else if (contiguous_blks) { 1598 lblk = page->index << 1599 (PAGE_SHIFT - inode->i_blkbits); 1600 lblk += (curr_off >> inode->i_blkbits) - 1601 contiguous_blks; 1602 ext4_es_remove_extent(inode, lblk, contiguous_blks); 1603 contiguous_blks = 0; 1604 } 1605 curr_off = next_off; 1606 } while ((bh = bh->b_this_page) != head); 1607 1608 if (contiguous_blks) { 1609 lblk = page->index << (PAGE_SHIFT - inode->i_blkbits); 1610 lblk += (curr_off >> inode->i_blkbits) - contiguous_blks; 1611 ext4_es_remove_extent(inode, lblk, contiguous_blks); 1612 } 1613 1614 /* If we have released all the blocks belonging to a cluster, then we 1615 * need to release the reserved space for that cluster. */ 1616 num_clusters = EXT4_NUM_B2C(sbi, to_release); 1617 while (num_clusters > 0) { 1618 lblk = (page->index << (PAGE_SHIFT - inode->i_blkbits)) + 1619 ((num_clusters - 1) << sbi->s_cluster_bits); 1620 if (sbi->s_cluster_ratio == 1 || 1621 !ext4_find_delalloc_cluster(inode, lblk)) 1622 ext4_da_release_space(inode, 1); 1623 1624 num_clusters--; 1625 } 1626 } 1627 1628 /* 1629 * Delayed allocation stuff 1630 */ 1631 1632 struct mpage_da_data { 1633 struct inode *inode; 1634 struct writeback_control *wbc; 1635 1636 pgoff_t first_page; /* The first page to write */ 1637 pgoff_t next_page; /* Current page to examine */ 1638 pgoff_t last_page; /* Last page to examine */ 1639 /* 1640 * Extent to map - this can be after first_page because that can be 1641 * fully mapped. We somewhat abuse m_flags to store whether the extent 1642 * is delalloc or unwritten. 1643 */ 1644 struct ext4_map_blocks map; 1645 struct ext4_io_submit io_submit; /* IO submission data */ 1646 unsigned int do_map:1; 1647 }; 1648 1649 static void mpage_release_unused_pages(struct mpage_da_data *mpd, 1650 bool invalidate) 1651 { 1652 int nr_pages, i; 1653 pgoff_t index, end; 1654 struct pagevec pvec; 1655 struct inode *inode = mpd->inode; 1656 struct address_space *mapping = inode->i_mapping; 1657 1658 /* This is necessary when next_page == 0. */ 1659 if (mpd->first_page >= mpd->next_page) 1660 return; 1661 1662 index = mpd->first_page; 1663 end = mpd->next_page - 1; 1664 if (invalidate) { 1665 ext4_lblk_t start, last; 1666 start = index << (PAGE_SHIFT - inode->i_blkbits); 1667 last = end << (PAGE_SHIFT - inode->i_blkbits); 1668 ext4_es_remove_extent(inode, start, last - start + 1); 1669 } 1670 1671 pagevec_init(&pvec, 0); 1672 while (index <= end) { 1673 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1674 if (nr_pages == 0) 1675 break; 1676 for (i = 0; i < nr_pages; i++) { 1677 struct page *page = pvec.pages[i]; 1678 if (page->index > end) 1679 break; 1680 BUG_ON(!PageLocked(page)); 1681 BUG_ON(PageWriteback(page)); 1682 if (invalidate) { 1683 if (page_mapped(page)) 1684 clear_page_dirty_for_io(page); 1685 block_invalidatepage(page, 0, PAGE_SIZE); 1686 ClearPageUptodate(page); 1687 } 1688 unlock_page(page); 1689 } 1690 index = pvec.pages[nr_pages - 1]->index + 1; 1691 pagevec_release(&pvec); 1692 } 1693 } 1694 1695 static void ext4_print_free_blocks(struct inode *inode) 1696 { 1697 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1698 struct super_block *sb = inode->i_sb; 1699 struct ext4_inode_info *ei = EXT4_I(inode); 1700 1701 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld", 1702 EXT4_C2B(EXT4_SB(inode->i_sb), 1703 ext4_count_free_clusters(sb))); 1704 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details"); 1705 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld", 1706 (long long) EXT4_C2B(EXT4_SB(sb), 1707 percpu_counter_sum(&sbi->s_freeclusters_counter))); 1708 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld", 1709 (long long) EXT4_C2B(EXT4_SB(sb), 1710 percpu_counter_sum(&sbi->s_dirtyclusters_counter))); 1711 ext4_msg(sb, KERN_CRIT, "Block reservation details"); 1712 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u", 1713 ei->i_reserved_data_blocks); 1714 return; 1715 } 1716 1717 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) 1718 { 1719 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); 1720 } 1721 1722 /* 1723 * This function is grabs code from the very beginning of 1724 * ext4_map_blocks, but assumes that the caller is from delayed write 1725 * time. This function looks up the requested blocks and sets the 1726 * buffer delay bit under the protection of i_data_sem. 1727 */ 1728 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, 1729 struct ext4_map_blocks *map, 1730 struct buffer_head *bh) 1731 { 1732 struct extent_status es; 1733 int retval; 1734 sector_t invalid_block = ~((sector_t) 0xffff); 1735 #ifdef ES_AGGRESSIVE_TEST 1736 struct ext4_map_blocks orig_map; 1737 1738 memcpy(&orig_map, map, sizeof(*map)); 1739 #endif 1740 1741 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 1742 invalid_block = ~0; 1743 1744 map->m_flags = 0; 1745 ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u," 1746 "logical block %lu\n", inode->i_ino, map->m_len, 1747 (unsigned long) map->m_lblk); 1748 1749 /* Lookup extent status tree firstly */ 1750 if (ext4_es_lookup_extent(inode, iblock, &es)) { 1751 if (ext4_es_is_hole(&es)) { 1752 retval = 0; 1753 down_read(&EXT4_I(inode)->i_data_sem); 1754 goto add_delayed; 1755 } 1756 1757 /* 1758 * Delayed extent could be allocated by fallocate. 1759 * So we need to check it. 1760 */ 1761 if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) { 1762 map_bh(bh, inode->i_sb, invalid_block); 1763 set_buffer_new(bh); 1764 set_buffer_delay(bh); 1765 return 0; 1766 } 1767 1768 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk; 1769 retval = es.es_len - (iblock - es.es_lblk); 1770 if (retval > map->m_len) 1771 retval = map->m_len; 1772 map->m_len = retval; 1773 if (ext4_es_is_written(&es)) 1774 map->m_flags |= EXT4_MAP_MAPPED; 1775 else if (ext4_es_is_unwritten(&es)) 1776 map->m_flags |= EXT4_MAP_UNWRITTEN; 1777 else 1778 BUG_ON(1); 1779 1780 #ifdef ES_AGGRESSIVE_TEST 1781 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0); 1782 #endif 1783 return retval; 1784 } 1785 1786 /* 1787 * Try to see if we can get the block without requesting a new 1788 * file system block. 1789 */ 1790 down_read(&EXT4_I(inode)->i_data_sem); 1791 if (ext4_has_inline_data(inode)) 1792 retval = 0; 1793 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 1794 retval = ext4_ext_map_blocks(NULL, inode, map, 0); 1795 else 1796 retval = ext4_ind_map_blocks(NULL, inode, map, 0); 1797 1798 add_delayed: 1799 if (retval == 0) { 1800 int ret; 1801 /* 1802 * XXX: __block_prepare_write() unmaps passed block, 1803 * is it OK? 1804 */ 1805 /* 1806 * If the block was allocated from previously allocated cluster, 1807 * then we don't need to reserve it again. However we still need 1808 * to reserve metadata for every block we're going to write. 1809 */ 1810 if (EXT4_SB(inode->i_sb)->s_cluster_ratio == 1 || 1811 !ext4_find_delalloc_cluster(inode, map->m_lblk)) { 1812 ret = ext4_da_reserve_space(inode); 1813 if (ret) { 1814 /* not enough space to reserve */ 1815 retval = ret; 1816 goto out_unlock; 1817 } 1818 } 1819 1820 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 1821 ~0, EXTENT_STATUS_DELAYED); 1822 if (ret) { 1823 retval = ret; 1824 goto out_unlock; 1825 } 1826 1827 map_bh(bh, inode->i_sb, invalid_block); 1828 set_buffer_new(bh); 1829 set_buffer_delay(bh); 1830 } else if (retval > 0) { 1831 int ret; 1832 unsigned int status; 1833 1834 if (unlikely(retval != map->m_len)) { 1835 ext4_warning(inode->i_sb, 1836 "ES len assertion failed for inode " 1837 "%lu: retval %d != map->m_len %d", 1838 inode->i_ino, retval, map->m_len); 1839 WARN_ON(1); 1840 } 1841 1842 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 1843 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 1844 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 1845 map->m_pblk, status); 1846 if (ret != 0) 1847 retval = ret; 1848 } 1849 1850 out_unlock: 1851 up_read((&EXT4_I(inode)->i_data_sem)); 1852 1853 return retval; 1854 } 1855 1856 /* 1857 * This is a special get_block_t callback which is used by 1858 * ext4_da_write_begin(). It will either return mapped block or 1859 * reserve space for a single block. 1860 * 1861 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 1862 * We also have b_blocknr = -1 and b_bdev initialized properly 1863 * 1864 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 1865 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 1866 * initialized properly. 1867 */ 1868 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 1869 struct buffer_head *bh, int create) 1870 { 1871 struct ext4_map_blocks map; 1872 int ret = 0; 1873 1874 BUG_ON(create == 0); 1875 BUG_ON(bh->b_size != inode->i_sb->s_blocksize); 1876 1877 map.m_lblk = iblock; 1878 map.m_len = 1; 1879 1880 /* 1881 * first, we need to know whether the block is allocated already 1882 * preallocated blocks are unmapped but should treated 1883 * the same as allocated blocks. 1884 */ 1885 ret = ext4_da_map_blocks(inode, iblock, &map, bh); 1886 if (ret <= 0) 1887 return ret; 1888 1889 map_bh(bh, inode->i_sb, map.m_pblk); 1890 ext4_update_bh_state(bh, map.m_flags); 1891 1892 if (buffer_unwritten(bh)) { 1893 /* A delayed write to unwritten bh should be marked 1894 * new and mapped. Mapped ensures that we don't do 1895 * get_block multiple times when we write to the same 1896 * offset and new ensures that we do proper zero out 1897 * for partial write. 1898 */ 1899 set_buffer_new(bh); 1900 set_buffer_mapped(bh); 1901 } 1902 return 0; 1903 } 1904 1905 static int bget_one(handle_t *handle, struct buffer_head *bh) 1906 { 1907 get_bh(bh); 1908 return 0; 1909 } 1910 1911 static int bput_one(handle_t *handle, struct buffer_head *bh) 1912 { 1913 put_bh(bh); 1914 return 0; 1915 } 1916 1917 static int __ext4_journalled_writepage(struct page *page, 1918 unsigned int len) 1919 { 1920 struct address_space *mapping = page->mapping; 1921 struct inode *inode = mapping->host; 1922 struct buffer_head *page_bufs = NULL; 1923 handle_t *handle = NULL; 1924 int ret = 0, err = 0; 1925 int inline_data = ext4_has_inline_data(inode); 1926 struct buffer_head *inode_bh = NULL; 1927 1928 ClearPageChecked(page); 1929 1930 if (inline_data) { 1931 BUG_ON(page->index != 0); 1932 BUG_ON(len > ext4_get_max_inline_size(inode)); 1933 inode_bh = ext4_journalled_write_inline_data(inode, len, page); 1934 if (inode_bh == NULL) 1935 goto out; 1936 } else { 1937 page_bufs = page_buffers(page); 1938 if (!page_bufs) { 1939 BUG(); 1940 goto out; 1941 } 1942 ext4_walk_page_buffers(handle, page_bufs, 0, len, 1943 NULL, bget_one); 1944 } 1945 /* 1946 * We need to release the page lock before we start the 1947 * journal, so grab a reference so the page won't disappear 1948 * out from under us. 1949 */ 1950 get_page(page); 1951 unlock_page(page); 1952 1953 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1954 ext4_writepage_trans_blocks(inode)); 1955 if (IS_ERR(handle)) { 1956 ret = PTR_ERR(handle); 1957 put_page(page); 1958 goto out_no_pagelock; 1959 } 1960 BUG_ON(!ext4_handle_valid(handle)); 1961 1962 lock_page(page); 1963 put_page(page); 1964 if (page->mapping != mapping) { 1965 /* The page got truncated from under us */ 1966 ext4_journal_stop(handle); 1967 ret = 0; 1968 goto out; 1969 } 1970 1971 if (inline_data) { 1972 BUFFER_TRACE(inode_bh, "get write access"); 1973 ret = ext4_journal_get_write_access(handle, inode_bh); 1974 1975 err = ext4_handle_dirty_metadata(handle, inode, inode_bh); 1976 1977 } else { 1978 ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 1979 do_journal_get_write_access); 1980 1981 err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 1982 write_end_fn); 1983 } 1984 if (ret == 0) 1985 ret = err; 1986 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1987 err = ext4_journal_stop(handle); 1988 if (!ret) 1989 ret = err; 1990 1991 if (!ext4_has_inline_data(inode)) 1992 ext4_walk_page_buffers(NULL, page_bufs, 0, len, 1993 NULL, bput_one); 1994 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 1995 out: 1996 unlock_page(page); 1997 out_no_pagelock: 1998 brelse(inode_bh); 1999 return ret; 2000 } 2001 2002 /* 2003 * Note that we don't need to start a transaction unless we're journaling data 2004 * because we should have holes filled from ext4_page_mkwrite(). We even don't 2005 * need to file the inode to the transaction's list in ordered mode because if 2006 * we are writing back data added by write(), the inode is already there and if 2007 * we are writing back data modified via mmap(), no one guarantees in which 2008 * transaction the data will hit the disk. In case we are journaling data, we 2009 * cannot start transaction directly because transaction start ranks above page 2010 * lock so we have to do some magic. 2011 * 2012 * This function can get called via... 2013 * - ext4_writepages after taking page lock (have journal handle) 2014 * - journal_submit_inode_data_buffers (no journal handle) 2015 * - shrink_page_list via the kswapd/direct reclaim (no journal handle) 2016 * - grab_page_cache when doing write_begin (have journal handle) 2017 * 2018 * We don't do any block allocation in this function. If we have page with 2019 * multiple blocks we need to write those buffer_heads that are mapped. This 2020 * is important for mmaped based write. So if we do with blocksize 1K 2021 * truncate(f, 1024); 2022 * a = mmap(f, 0, 4096); 2023 * a[0] = 'a'; 2024 * truncate(f, 4096); 2025 * we have in the page first buffer_head mapped via page_mkwrite call back 2026 * but other buffer_heads would be unmapped but dirty (dirty done via the 2027 * do_wp_page). So writepage should write the first block. If we modify 2028 * the mmap area beyond 1024 we will again get a page_fault and the 2029 * page_mkwrite callback will do the block allocation and mark the 2030 * buffer_heads mapped. 2031 * 2032 * We redirty the page if we have any buffer_heads that is either delay or 2033 * unwritten in the page. 2034 * 2035 * We can get recursively called as show below. 2036 * 2037 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 2038 * ext4_writepage() 2039 * 2040 * But since we don't do any block allocation we should not deadlock. 2041 * Page also have the dirty flag cleared so we don't get recurive page_lock. 2042 */ 2043 static int ext4_writepage(struct page *page, 2044 struct writeback_control *wbc) 2045 { 2046 int ret = 0; 2047 loff_t size; 2048 unsigned int len; 2049 struct buffer_head *page_bufs = NULL; 2050 struct inode *inode = page->mapping->host; 2051 struct ext4_io_submit io_submit; 2052 bool keep_towrite = false; 2053 2054 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) { 2055 ext4_invalidatepage(page, 0, PAGE_SIZE); 2056 unlock_page(page); 2057 return -EIO; 2058 } 2059 2060 trace_ext4_writepage(page); 2061 size = i_size_read(inode); 2062 if (page->index == size >> PAGE_SHIFT) 2063 len = size & ~PAGE_MASK; 2064 else 2065 len = PAGE_SIZE; 2066 2067 page_bufs = page_buffers(page); 2068 /* 2069 * We cannot do block allocation or other extent handling in this 2070 * function. If there are buffers needing that, we have to redirty 2071 * the page. But we may reach here when we do a journal commit via 2072 * journal_submit_inode_data_buffers() and in that case we must write 2073 * allocated buffers to achieve data=ordered mode guarantees. 2074 * 2075 * Also, if there is only one buffer per page (the fs block 2076 * size == the page size), if one buffer needs block 2077 * allocation or needs to modify the extent tree to clear the 2078 * unwritten flag, we know that the page can't be written at 2079 * all, so we might as well refuse the write immediately. 2080 * Unfortunately if the block size != page size, we can't as 2081 * easily detect this case using ext4_walk_page_buffers(), but 2082 * for the extremely common case, this is an optimization that 2083 * skips a useless round trip through ext4_bio_write_page(). 2084 */ 2085 if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2086 ext4_bh_delay_or_unwritten)) { 2087 redirty_page_for_writepage(wbc, page); 2088 if ((current->flags & PF_MEMALLOC) || 2089 (inode->i_sb->s_blocksize == PAGE_SIZE)) { 2090 /* 2091 * For memory cleaning there's no point in writing only 2092 * some buffers. So just bail out. Warn if we came here 2093 * from direct reclaim. 2094 */ 2095 WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) 2096 == PF_MEMALLOC); 2097 unlock_page(page); 2098 return 0; 2099 } 2100 keep_towrite = true; 2101 } 2102 2103 if (PageChecked(page) && ext4_should_journal_data(inode)) 2104 /* 2105 * It's mmapped pagecache. Add buffers and journal it. There 2106 * doesn't seem much point in redirtying the page here. 2107 */ 2108 return __ext4_journalled_writepage(page, len); 2109 2110 ext4_io_submit_init(&io_submit, wbc); 2111 io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS); 2112 if (!io_submit.io_end) { 2113 redirty_page_for_writepage(wbc, page); 2114 unlock_page(page); 2115 return -ENOMEM; 2116 } 2117 ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite); 2118 ext4_io_submit(&io_submit); 2119 /* Drop io_end reference we got from init */ 2120 ext4_put_io_end_defer(io_submit.io_end); 2121 return ret; 2122 } 2123 2124 static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) 2125 { 2126 int len; 2127 loff_t size = i_size_read(mpd->inode); 2128 int err; 2129 2130 BUG_ON(page->index != mpd->first_page); 2131 if (page->index == size >> PAGE_SHIFT) 2132 len = size & ~PAGE_MASK; 2133 else 2134 len = PAGE_SIZE; 2135 clear_page_dirty_for_io(page); 2136 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false); 2137 if (!err) 2138 mpd->wbc->nr_to_write--; 2139 mpd->first_page++; 2140 2141 return err; 2142 } 2143 2144 #define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay)) 2145 2146 /* 2147 * mballoc gives us at most this number of blocks... 2148 * XXX: That seems to be only a limitation of ext4_mb_normalize_request(). 2149 * The rest of mballoc seems to handle chunks up to full group size. 2150 */ 2151 #define MAX_WRITEPAGES_EXTENT_LEN 2048 2152 2153 /* 2154 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map 2155 * 2156 * @mpd - extent of blocks 2157 * @lblk - logical number of the block in the file 2158 * @bh - buffer head we want to add to the extent 2159 * 2160 * The function is used to collect contig. blocks in the same state. If the 2161 * buffer doesn't require mapping for writeback and we haven't started the 2162 * extent of buffers to map yet, the function returns 'true' immediately - the 2163 * caller can write the buffer right away. Otherwise the function returns true 2164 * if the block has been added to the extent, false if the block couldn't be 2165 * added. 2166 */ 2167 static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk, 2168 struct buffer_head *bh) 2169 { 2170 struct ext4_map_blocks *map = &mpd->map; 2171 2172 /* Buffer that doesn't need mapping for writeback? */ 2173 if (!buffer_dirty(bh) || !buffer_mapped(bh) || 2174 (!buffer_delay(bh) && !buffer_unwritten(bh))) { 2175 /* So far no extent to map => we write the buffer right away */ 2176 if (map->m_len == 0) 2177 return true; 2178 return false; 2179 } 2180 2181 /* First block in the extent? */ 2182 if (map->m_len == 0) { 2183 /* We cannot map unless handle is started... */ 2184 if (!mpd->do_map) 2185 return false; 2186 map->m_lblk = lblk; 2187 map->m_len = 1; 2188 map->m_flags = bh->b_state & BH_FLAGS; 2189 return true; 2190 } 2191 2192 /* Don't go larger than mballoc is willing to allocate */ 2193 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN) 2194 return false; 2195 2196 /* Can we merge the block to our big extent? */ 2197 if (lblk == map->m_lblk + map->m_len && 2198 (bh->b_state & BH_FLAGS) == map->m_flags) { 2199 map->m_len++; 2200 return true; 2201 } 2202 return false; 2203 } 2204 2205 /* 2206 * mpage_process_page_bufs - submit page buffers for IO or add them to extent 2207 * 2208 * @mpd - extent of blocks for mapping 2209 * @head - the first buffer in the page 2210 * @bh - buffer we should start processing from 2211 * @lblk - logical number of the block in the file corresponding to @bh 2212 * 2213 * Walk through page buffers from @bh upto @head (exclusive) and either submit 2214 * the page for IO if all buffers in this page were mapped and there's no 2215 * accumulated extent of buffers to map or add buffers in the page to the 2216 * extent of buffers to map. The function returns 1 if the caller can continue 2217 * by processing the next page, 0 if it should stop adding buffers to the 2218 * extent to map because we cannot extend it anymore. It can also return value 2219 * < 0 in case of error during IO submission. 2220 */ 2221 static int mpage_process_page_bufs(struct mpage_da_data *mpd, 2222 struct buffer_head *head, 2223 struct buffer_head *bh, 2224 ext4_lblk_t lblk) 2225 { 2226 struct inode *inode = mpd->inode; 2227 int err; 2228 ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1) 2229 >> inode->i_blkbits; 2230 2231 do { 2232 BUG_ON(buffer_locked(bh)); 2233 2234 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) { 2235 /* Found extent to map? */ 2236 if (mpd->map.m_len) 2237 return 0; 2238 /* Buffer needs mapping and handle is not started? */ 2239 if (!mpd->do_map) 2240 return 0; 2241 /* Everything mapped so far and we hit EOF */ 2242 break; 2243 } 2244 } while (lblk++, (bh = bh->b_this_page) != head); 2245 /* So far everything mapped? Submit the page for IO. */ 2246 if (mpd->map.m_len == 0) { 2247 err = mpage_submit_page(mpd, head->b_page); 2248 if (err < 0) 2249 return err; 2250 } 2251 return lblk < blocks; 2252 } 2253 2254 /* 2255 * mpage_map_buffers - update buffers corresponding to changed extent and 2256 * submit fully mapped pages for IO 2257 * 2258 * @mpd - description of extent to map, on return next extent to map 2259 * 2260 * Scan buffers corresponding to changed extent (we expect corresponding pages 2261 * to be already locked) and update buffer state according to new extent state. 2262 * We map delalloc buffers to their physical location, clear unwritten bits, 2263 * and mark buffers as uninit when we perform writes to unwritten extents 2264 * and do extent conversion after IO is finished. If the last page is not fully 2265 * mapped, we update @map to the next extent in the last page that needs 2266 * mapping. Otherwise we submit the page for IO. 2267 */ 2268 static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) 2269 { 2270 struct pagevec pvec; 2271 int nr_pages, i; 2272 struct inode *inode = mpd->inode; 2273 struct buffer_head *head, *bh; 2274 int bpp_bits = PAGE_SHIFT - inode->i_blkbits; 2275 pgoff_t start, end; 2276 ext4_lblk_t lblk; 2277 sector_t pblock; 2278 int err; 2279 2280 start = mpd->map.m_lblk >> bpp_bits; 2281 end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits; 2282 lblk = start << bpp_bits; 2283 pblock = mpd->map.m_pblk; 2284 2285 pagevec_init(&pvec, 0); 2286 while (start <= end) { 2287 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start, 2288 PAGEVEC_SIZE); 2289 if (nr_pages == 0) 2290 break; 2291 for (i = 0; i < nr_pages; i++) { 2292 struct page *page = pvec.pages[i]; 2293 2294 if (page->index > end) 2295 break; 2296 /* Up to 'end' pages must be contiguous */ 2297 BUG_ON(page->index != start); 2298 bh = head = page_buffers(page); 2299 do { 2300 if (lblk < mpd->map.m_lblk) 2301 continue; 2302 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) { 2303 /* 2304 * Buffer after end of mapped extent. 2305 * Find next buffer in the page to map. 2306 */ 2307 mpd->map.m_len = 0; 2308 mpd->map.m_flags = 0; 2309 /* 2310 * FIXME: If dioread_nolock supports 2311 * blocksize < pagesize, we need to make 2312 * sure we add size mapped so far to 2313 * io_end->size as the following call 2314 * can submit the page for IO. 2315 */ 2316 err = mpage_process_page_bufs(mpd, head, 2317 bh, lblk); 2318 pagevec_release(&pvec); 2319 if (err > 0) 2320 err = 0; 2321 return err; 2322 } 2323 if (buffer_delay(bh)) { 2324 clear_buffer_delay(bh); 2325 bh->b_blocknr = pblock++; 2326 } 2327 clear_buffer_unwritten(bh); 2328 } while (lblk++, (bh = bh->b_this_page) != head); 2329 2330 /* 2331 * FIXME: This is going to break if dioread_nolock 2332 * supports blocksize < pagesize as we will try to 2333 * convert potentially unmapped parts of inode. 2334 */ 2335 mpd->io_submit.io_end->size += PAGE_SIZE; 2336 /* Page fully mapped - let IO run! */ 2337 err = mpage_submit_page(mpd, page); 2338 if (err < 0) { 2339 pagevec_release(&pvec); 2340 return err; 2341 } 2342 start++; 2343 } 2344 pagevec_release(&pvec); 2345 } 2346 /* Extent fully mapped and matches with page boundary. We are done. */ 2347 mpd->map.m_len = 0; 2348 mpd->map.m_flags = 0; 2349 return 0; 2350 } 2351 2352 static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd) 2353 { 2354 struct inode *inode = mpd->inode; 2355 struct ext4_map_blocks *map = &mpd->map; 2356 int get_blocks_flags; 2357 int err, dioread_nolock; 2358 2359 trace_ext4_da_write_pages_extent(inode, map); 2360 /* 2361 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or 2362 * to convert an unwritten extent to be initialized (in the case 2363 * where we have written into one or more preallocated blocks). It is 2364 * possible that we're going to need more metadata blocks than 2365 * previously reserved. However we must not fail because we're in 2366 * writeback and there is nothing we can do about it so it might result 2367 * in data loss. So use reserved blocks to allocate metadata if 2368 * possible. 2369 * 2370 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if 2371 * the blocks in question are delalloc blocks. This indicates 2372 * that the blocks and quotas has already been checked when 2373 * the data was copied into the page cache. 2374 */ 2375 get_blocks_flags = EXT4_GET_BLOCKS_CREATE | 2376 EXT4_GET_BLOCKS_METADATA_NOFAIL | 2377 EXT4_GET_BLOCKS_IO_SUBMIT; 2378 dioread_nolock = ext4_should_dioread_nolock(inode); 2379 if (dioread_nolock) 2380 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; 2381 if (map->m_flags & (1 << BH_Delay)) 2382 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 2383 2384 err = ext4_map_blocks(handle, inode, map, get_blocks_flags); 2385 if (err < 0) 2386 return err; 2387 if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) { 2388 if (!mpd->io_submit.io_end->handle && 2389 ext4_handle_valid(handle)) { 2390 mpd->io_submit.io_end->handle = handle->h_rsv_handle; 2391 handle->h_rsv_handle = NULL; 2392 } 2393 ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end); 2394 } 2395 2396 BUG_ON(map->m_len == 0); 2397 if (map->m_flags & EXT4_MAP_NEW) { 2398 clean_bdev_aliases(inode->i_sb->s_bdev, map->m_pblk, 2399 map->m_len); 2400 } 2401 return 0; 2402 } 2403 2404 /* 2405 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length 2406 * mpd->len and submit pages underlying it for IO 2407 * 2408 * @handle - handle for journal operations 2409 * @mpd - extent to map 2410 * @give_up_on_write - we set this to true iff there is a fatal error and there 2411 * is no hope of writing the data. The caller should discard 2412 * dirty pages to avoid infinite loops. 2413 * 2414 * The function maps extent starting at mpd->lblk of length mpd->len. If it is 2415 * delayed, blocks are allocated, if it is unwritten, we may need to convert 2416 * them to initialized or split the described range from larger unwritten 2417 * extent. Note that we need not map all the described range since allocation 2418 * can return less blocks or the range is covered by more unwritten extents. We 2419 * cannot map more because we are limited by reserved transaction credits. On 2420 * the other hand we always make sure that the last touched page is fully 2421 * mapped so that it can be written out (and thus forward progress is 2422 * guaranteed). After mapping we submit all mapped pages for IO. 2423 */ 2424 static int mpage_map_and_submit_extent(handle_t *handle, 2425 struct mpage_da_data *mpd, 2426 bool *give_up_on_write) 2427 { 2428 struct inode *inode = mpd->inode; 2429 struct ext4_map_blocks *map = &mpd->map; 2430 int err; 2431 loff_t disksize; 2432 int progress = 0; 2433 2434 mpd->io_submit.io_end->offset = 2435 ((loff_t)map->m_lblk) << inode->i_blkbits; 2436 do { 2437 err = mpage_map_one_extent(handle, mpd); 2438 if (err < 0) { 2439 struct super_block *sb = inode->i_sb; 2440 2441 if (ext4_forced_shutdown(EXT4_SB(sb)) || 2442 EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED) 2443 goto invalidate_dirty_pages; 2444 /* 2445 * Let the uper layers retry transient errors. 2446 * In the case of ENOSPC, if ext4_count_free_blocks() 2447 * is non-zero, a commit should free up blocks. 2448 */ 2449 if ((err == -ENOMEM) || 2450 (err == -ENOSPC && ext4_count_free_clusters(sb))) { 2451 if (progress) 2452 goto update_disksize; 2453 return err; 2454 } 2455 ext4_msg(sb, KERN_CRIT, 2456 "Delayed block allocation failed for " 2457 "inode %lu at logical offset %llu with" 2458 " max blocks %u with error %d", 2459 inode->i_ino, 2460 (unsigned long long)map->m_lblk, 2461 (unsigned)map->m_len, -err); 2462 ext4_msg(sb, KERN_CRIT, 2463 "This should not happen!! Data will " 2464 "be lost\n"); 2465 if (err == -ENOSPC) 2466 ext4_print_free_blocks(inode); 2467 invalidate_dirty_pages: 2468 *give_up_on_write = true; 2469 return err; 2470 } 2471 progress = 1; 2472 /* 2473 * Update buffer state, submit mapped pages, and get us new 2474 * extent to map 2475 */ 2476 err = mpage_map_and_submit_buffers(mpd); 2477 if (err < 0) 2478 goto update_disksize; 2479 } while (map->m_len); 2480 2481 update_disksize: 2482 /* 2483 * Update on-disk size after IO is submitted. Races with 2484 * truncate are avoided by checking i_size under i_data_sem. 2485 */ 2486 disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT; 2487 if (disksize > EXT4_I(inode)->i_disksize) { 2488 int err2; 2489 loff_t i_size; 2490 2491 down_write(&EXT4_I(inode)->i_data_sem); 2492 i_size = i_size_read(inode); 2493 if (disksize > i_size) 2494 disksize = i_size; 2495 if (disksize > EXT4_I(inode)->i_disksize) 2496 EXT4_I(inode)->i_disksize = disksize; 2497 up_write(&EXT4_I(inode)->i_data_sem); 2498 err2 = ext4_mark_inode_dirty(handle, inode); 2499 if (err2) 2500 ext4_error(inode->i_sb, 2501 "Failed to mark inode %lu dirty", 2502 inode->i_ino); 2503 if (!err) 2504 err = err2; 2505 } 2506 return err; 2507 } 2508 2509 /* 2510 * Calculate the total number of credits to reserve for one writepages 2511 * iteration. This is called from ext4_writepages(). We map an extent of 2512 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping 2513 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN + 2514 * bpp - 1 blocks in bpp different extents. 2515 */ 2516 static int ext4_da_writepages_trans_blocks(struct inode *inode) 2517 { 2518 int bpp = ext4_journal_blocks_per_page(inode); 2519 2520 return ext4_meta_trans_blocks(inode, 2521 MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp); 2522 } 2523 2524 /* 2525 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages 2526 * and underlying extent to map 2527 * 2528 * @mpd - where to look for pages 2529 * 2530 * Walk dirty pages in the mapping. If they are fully mapped, submit them for 2531 * IO immediately. When we find a page which isn't mapped we start accumulating 2532 * extent of buffers underlying these pages that needs mapping (formed by 2533 * either delayed or unwritten buffers). We also lock the pages containing 2534 * these buffers. The extent found is returned in @mpd structure (starting at 2535 * mpd->lblk with length mpd->len blocks). 2536 * 2537 * Note that this function can attach bios to one io_end structure which are 2538 * neither logically nor physically contiguous. Although it may seem as an 2539 * unnecessary complication, it is actually inevitable in blocksize < pagesize 2540 * case as we need to track IO to all buffers underlying a page in one io_end. 2541 */ 2542 static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) 2543 { 2544 struct address_space *mapping = mpd->inode->i_mapping; 2545 struct pagevec pvec; 2546 unsigned int nr_pages; 2547 long left = mpd->wbc->nr_to_write; 2548 pgoff_t index = mpd->first_page; 2549 pgoff_t end = mpd->last_page; 2550 int tag; 2551 int i, err = 0; 2552 int blkbits = mpd->inode->i_blkbits; 2553 ext4_lblk_t lblk; 2554 struct buffer_head *head; 2555 2556 if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages) 2557 tag = PAGECACHE_TAG_TOWRITE; 2558 else 2559 tag = PAGECACHE_TAG_DIRTY; 2560 2561 pagevec_init(&pvec, 0); 2562 mpd->map.m_len = 0; 2563 mpd->next_page = index; 2564 while (index <= end) { 2565 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 2566 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 2567 if (nr_pages == 0) 2568 goto out; 2569 2570 for (i = 0; i < nr_pages; i++) { 2571 struct page *page = pvec.pages[i]; 2572 2573 /* 2574 * At this point, the page may be truncated or 2575 * invalidated (changing page->mapping to NULL), or 2576 * even swizzled back from swapper_space to tmpfs file 2577 * mapping. However, page->index will not change 2578 * because we have a reference on the page. 2579 */ 2580 if (page->index > end) 2581 goto out; 2582 2583 /* 2584 * Accumulated enough dirty pages? This doesn't apply 2585 * to WB_SYNC_ALL mode. For integrity sync we have to 2586 * keep going because someone may be concurrently 2587 * dirtying pages, and we might have synced a lot of 2588 * newly appeared dirty pages, but have not synced all 2589 * of the old dirty pages. 2590 */ 2591 if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0) 2592 goto out; 2593 2594 /* If we can't merge this page, we are done. */ 2595 if (mpd->map.m_len > 0 && mpd->next_page != page->index) 2596 goto out; 2597 2598 lock_page(page); 2599 /* 2600 * If the page is no longer dirty, or its mapping no 2601 * longer corresponds to inode we are writing (which 2602 * means it has been truncated or invalidated), or the 2603 * page is already under writeback and we are not doing 2604 * a data integrity writeback, skip the page 2605 */ 2606 if (!PageDirty(page) || 2607 (PageWriteback(page) && 2608 (mpd->wbc->sync_mode == WB_SYNC_NONE)) || 2609 unlikely(page->mapping != mapping)) { 2610 unlock_page(page); 2611 continue; 2612 } 2613 2614 wait_on_page_writeback(page); 2615 BUG_ON(PageWriteback(page)); 2616 2617 if (mpd->map.m_len == 0) 2618 mpd->first_page = page->index; 2619 mpd->next_page = page->index + 1; 2620 /* Add all dirty buffers to mpd */ 2621 lblk = ((ext4_lblk_t)page->index) << 2622 (PAGE_SHIFT - blkbits); 2623 head = page_buffers(page); 2624 err = mpage_process_page_bufs(mpd, head, head, lblk); 2625 if (err <= 0) 2626 goto out; 2627 err = 0; 2628 left--; 2629 } 2630 pagevec_release(&pvec); 2631 cond_resched(); 2632 } 2633 return 0; 2634 out: 2635 pagevec_release(&pvec); 2636 return err; 2637 } 2638 2639 static int __writepage(struct page *page, struct writeback_control *wbc, 2640 void *data) 2641 { 2642 struct address_space *mapping = data; 2643 int ret = ext4_writepage(page, wbc); 2644 mapping_set_error(mapping, ret); 2645 return ret; 2646 } 2647 2648 static int ext4_writepages(struct address_space *mapping, 2649 struct writeback_control *wbc) 2650 { 2651 pgoff_t writeback_index = 0; 2652 long nr_to_write = wbc->nr_to_write; 2653 int range_whole = 0; 2654 int cycled = 1; 2655 handle_t *handle = NULL; 2656 struct mpage_da_data mpd; 2657 struct inode *inode = mapping->host; 2658 int needed_blocks, rsv_blocks = 0, ret = 0; 2659 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2660 bool done; 2661 struct blk_plug plug; 2662 bool give_up_on_write = false; 2663 2664 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 2665 return -EIO; 2666 2667 percpu_down_read(&sbi->s_journal_flag_rwsem); 2668 trace_ext4_writepages(inode, wbc); 2669 2670 if (dax_mapping(mapping)) { 2671 ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, 2672 wbc); 2673 goto out_writepages; 2674 } 2675 2676 /* 2677 * No pages to write? This is mainly a kludge to avoid starting 2678 * a transaction for special inodes like journal inode on last iput() 2679 * because that could violate lock ordering on umount 2680 */ 2681 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 2682 goto out_writepages; 2683 2684 if (ext4_should_journal_data(inode)) { 2685 struct blk_plug plug; 2686 2687 blk_start_plug(&plug); 2688 ret = write_cache_pages(mapping, wbc, __writepage, mapping); 2689 blk_finish_plug(&plug); 2690 goto out_writepages; 2691 } 2692 2693 /* 2694 * If the filesystem has aborted, it is read-only, so return 2695 * right away instead of dumping stack traces later on that 2696 * will obscure the real source of the problem. We test 2697 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because 2698 * the latter could be true if the filesystem is mounted 2699 * read-only, and in that case, ext4_writepages should 2700 * *never* be called, so if that ever happens, we would want 2701 * the stack trace. 2702 */ 2703 if (unlikely(ext4_forced_shutdown(EXT4_SB(mapping->host->i_sb)) || 2704 sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) { 2705 ret = -EROFS; 2706 goto out_writepages; 2707 } 2708 2709 if (ext4_should_dioread_nolock(inode)) { 2710 /* 2711 * We may need to convert up to one extent per block in 2712 * the page and we may dirty the inode. 2713 */ 2714 rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits); 2715 } 2716 2717 /* 2718 * If we have inline data and arrive here, it means that 2719 * we will soon create the block for the 1st page, so 2720 * we'd better clear the inline data here. 2721 */ 2722 if (ext4_has_inline_data(inode)) { 2723 /* Just inode will be modified... */ 2724 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); 2725 if (IS_ERR(handle)) { 2726 ret = PTR_ERR(handle); 2727 goto out_writepages; 2728 } 2729 BUG_ON(ext4_test_inode_state(inode, 2730 EXT4_STATE_MAY_INLINE_DATA)); 2731 ext4_destroy_inline_data(handle, inode); 2732 ext4_journal_stop(handle); 2733 } 2734 2735 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2736 range_whole = 1; 2737 2738 if (wbc->range_cyclic) { 2739 writeback_index = mapping->writeback_index; 2740 if (writeback_index) 2741 cycled = 0; 2742 mpd.first_page = writeback_index; 2743 mpd.last_page = -1; 2744 } else { 2745 mpd.first_page = wbc->range_start >> PAGE_SHIFT; 2746 mpd.last_page = wbc->range_end >> PAGE_SHIFT; 2747 } 2748 2749 mpd.inode = inode; 2750 mpd.wbc = wbc; 2751 ext4_io_submit_init(&mpd.io_submit, wbc); 2752 retry: 2753 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 2754 tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page); 2755 done = false; 2756 blk_start_plug(&plug); 2757 2758 /* 2759 * First writeback pages that don't need mapping - we can avoid 2760 * starting a transaction unnecessarily and also avoid being blocked 2761 * in the block layer on device congestion while having transaction 2762 * started. 2763 */ 2764 mpd.do_map = 0; 2765 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); 2766 if (!mpd.io_submit.io_end) { 2767 ret = -ENOMEM; 2768 goto unplug; 2769 } 2770 ret = mpage_prepare_extent_to_map(&mpd); 2771 /* Submit prepared bio */ 2772 ext4_io_submit(&mpd.io_submit); 2773 ext4_put_io_end_defer(mpd.io_submit.io_end); 2774 mpd.io_submit.io_end = NULL; 2775 /* Unlock pages we didn't use */ 2776 mpage_release_unused_pages(&mpd, false); 2777 if (ret < 0) 2778 goto unplug; 2779 2780 while (!done && mpd.first_page <= mpd.last_page) { 2781 /* For each extent of pages we use new io_end */ 2782 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); 2783 if (!mpd.io_submit.io_end) { 2784 ret = -ENOMEM; 2785 break; 2786 } 2787 2788 /* 2789 * We have two constraints: We find one extent to map and we 2790 * must always write out whole page (makes a difference when 2791 * blocksize < pagesize) so that we don't block on IO when we 2792 * try to write out the rest of the page. Journalled mode is 2793 * not supported by delalloc. 2794 */ 2795 BUG_ON(ext4_should_journal_data(inode)); 2796 needed_blocks = ext4_da_writepages_trans_blocks(inode); 2797 2798 /* start a new transaction */ 2799 handle = ext4_journal_start_with_reserve(inode, 2800 EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks); 2801 if (IS_ERR(handle)) { 2802 ret = PTR_ERR(handle); 2803 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2804 "%ld pages, ino %lu; err %d", __func__, 2805 wbc->nr_to_write, inode->i_ino, ret); 2806 /* Release allocated io_end */ 2807 ext4_put_io_end(mpd.io_submit.io_end); 2808 mpd.io_submit.io_end = NULL; 2809 break; 2810 } 2811 mpd.do_map = 1; 2812 2813 trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc); 2814 ret = mpage_prepare_extent_to_map(&mpd); 2815 if (!ret) { 2816 if (mpd.map.m_len) 2817 ret = mpage_map_and_submit_extent(handle, &mpd, 2818 &give_up_on_write); 2819 else { 2820 /* 2821 * We scanned the whole range (or exhausted 2822 * nr_to_write), submitted what was mapped and 2823 * didn't find anything needing mapping. We are 2824 * done. 2825 */ 2826 done = true; 2827 } 2828 } 2829 /* 2830 * Caution: If the handle is synchronous, 2831 * ext4_journal_stop() can wait for transaction commit 2832 * to finish which may depend on writeback of pages to 2833 * complete or on page lock to be released. In that 2834 * case, we have to wait until after after we have 2835 * submitted all the IO, released page locks we hold, 2836 * and dropped io_end reference (for extent conversion 2837 * to be able to complete) before stopping the handle. 2838 */ 2839 if (!ext4_handle_valid(handle) || handle->h_sync == 0) { 2840 ext4_journal_stop(handle); 2841 handle = NULL; 2842 mpd.do_map = 0; 2843 } 2844 /* Submit prepared bio */ 2845 ext4_io_submit(&mpd.io_submit); 2846 /* Unlock pages we didn't use */ 2847 mpage_release_unused_pages(&mpd, give_up_on_write); 2848 /* 2849 * Drop our io_end reference we got from init. We have 2850 * to be careful and use deferred io_end finishing if 2851 * we are still holding the transaction as we can 2852 * release the last reference to io_end which may end 2853 * up doing unwritten extent conversion. 2854 */ 2855 if (handle) { 2856 ext4_put_io_end_defer(mpd.io_submit.io_end); 2857 ext4_journal_stop(handle); 2858 } else 2859 ext4_put_io_end(mpd.io_submit.io_end); 2860 mpd.io_submit.io_end = NULL; 2861 2862 if (ret == -ENOSPC && sbi->s_journal) { 2863 /* 2864 * Commit the transaction which would 2865 * free blocks released in the transaction 2866 * and try again 2867 */ 2868 jbd2_journal_force_commit_nested(sbi->s_journal); 2869 ret = 0; 2870 continue; 2871 } 2872 /* Fatal error - ENOMEM, EIO... */ 2873 if (ret) 2874 break; 2875 } 2876 unplug: 2877 blk_finish_plug(&plug); 2878 if (!ret && !cycled && wbc->nr_to_write > 0) { 2879 cycled = 1; 2880 mpd.last_page = writeback_index - 1; 2881 mpd.first_page = 0; 2882 goto retry; 2883 } 2884 2885 /* Update index */ 2886 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2887 /* 2888 * Set the writeback_index so that range_cyclic 2889 * mode will write it back later 2890 */ 2891 mapping->writeback_index = mpd.first_page; 2892 2893 out_writepages: 2894 trace_ext4_writepages_result(inode, wbc, ret, 2895 nr_to_write - wbc->nr_to_write); 2896 percpu_up_read(&sbi->s_journal_flag_rwsem); 2897 return ret; 2898 } 2899 2900 static int ext4_nonda_switch(struct super_block *sb) 2901 { 2902 s64 free_clusters, dirty_clusters; 2903 struct ext4_sb_info *sbi = EXT4_SB(sb); 2904 2905 /* 2906 * switch to non delalloc mode if we are running low 2907 * on free block. The free block accounting via percpu 2908 * counters can get slightly wrong with percpu_counter_batch getting 2909 * accumulated on each CPU without updating global counters 2910 * Delalloc need an accurate free block accounting. So switch 2911 * to non delalloc when we are near to error range. 2912 */ 2913 free_clusters = 2914 percpu_counter_read_positive(&sbi->s_freeclusters_counter); 2915 dirty_clusters = 2916 percpu_counter_read_positive(&sbi->s_dirtyclusters_counter); 2917 /* 2918 * Start pushing delalloc when 1/2 of free blocks are dirty. 2919 */ 2920 if (dirty_clusters && (free_clusters < 2 * dirty_clusters)) 2921 try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE); 2922 2923 if (2 * free_clusters < 3 * dirty_clusters || 2924 free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) { 2925 /* 2926 * free block count is less than 150% of dirty blocks 2927 * or free blocks is less than watermark 2928 */ 2929 return 1; 2930 } 2931 return 0; 2932 } 2933 2934 /* We always reserve for an inode update; the superblock could be there too */ 2935 static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len) 2936 { 2937 if (likely(ext4_has_feature_large_file(inode->i_sb))) 2938 return 1; 2939 2940 if (pos + len <= 0x7fffffffULL) 2941 return 1; 2942 2943 /* We might need to update the superblock to set LARGE_FILE */ 2944 return 2; 2945 } 2946 2947 static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 2948 loff_t pos, unsigned len, unsigned flags, 2949 struct page **pagep, void **fsdata) 2950 { 2951 int ret, retries = 0; 2952 struct page *page; 2953 pgoff_t index; 2954 struct inode *inode = mapping->host; 2955 handle_t *handle; 2956 2957 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 2958 return -EIO; 2959 2960 index = pos >> PAGE_SHIFT; 2961 2962 if (ext4_nonda_switch(inode->i_sb) || 2963 S_ISLNK(inode->i_mode)) { 2964 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 2965 return ext4_write_begin(file, mapping, pos, 2966 len, flags, pagep, fsdata); 2967 } 2968 *fsdata = (void *)0; 2969 trace_ext4_da_write_begin(inode, pos, len, flags); 2970 2971 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 2972 ret = ext4_da_write_inline_data_begin(mapping, inode, 2973 pos, len, flags, 2974 pagep, fsdata); 2975 if (ret < 0) 2976 return ret; 2977 if (ret == 1) 2978 return 0; 2979 } 2980 2981 /* 2982 * grab_cache_page_write_begin() can take a long time if the 2983 * system is thrashing due to memory pressure, or if the page 2984 * is being written back. So grab it first before we start 2985 * the transaction handle. This also allows us to allocate 2986 * the page (if needed) without using GFP_NOFS. 2987 */ 2988 retry_grab: 2989 page = grab_cache_page_write_begin(mapping, index, flags); 2990 if (!page) 2991 return -ENOMEM; 2992 unlock_page(page); 2993 2994 /* 2995 * With delayed allocation, we don't log the i_disksize update 2996 * if there is delayed block allocation. But we still need 2997 * to journalling the i_disksize update if writes to the end 2998 * of file which has an already mapped buffer. 2999 */ 3000 retry_journal: 3001 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 3002 ext4_da_write_credits(inode, pos, len)); 3003 if (IS_ERR(handle)) { 3004 put_page(page); 3005 return PTR_ERR(handle); 3006 } 3007 3008 lock_page(page); 3009 if (page->mapping != mapping) { 3010 /* The page got truncated from under us */ 3011 unlock_page(page); 3012 put_page(page); 3013 ext4_journal_stop(handle); 3014 goto retry_grab; 3015 } 3016 /* In case writeback began while the page was unlocked */ 3017 wait_for_stable_page(page); 3018 3019 #ifdef CONFIG_EXT4_FS_ENCRYPTION 3020 ret = ext4_block_write_begin(page, pos, len, 3021 ext4_da_get_block_prep); 3022 #else 3023 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); 3024 #endif 3025 if (ret < 0) { 3026 unlock_page(page); 3027 ext4_journal_stop(handle); 3028 /* 3029 * block_write_begin may have instantiated a few blocks 3030 * outside i_size. Trim these off again. Don't need 3031 * i_size_read because we hold i_mutex. 3032 */ 3033 if (pos + len > inode->i_size) 3034 ext4_truncate_failed_write(inode); 3035 3036 if (ret == -ENOSPC && 3037 ext4_should_retry_alloc(inode->i_sb, &retries)) 3038 goto retry_journal; 3039 3040 put_page(page); 3041 return ret; 3042 } 3043 3044 *pagep = page; 3045 return ret; 3046 } 3047 3048 /* 3049 * Check if we should update i_disksize 3050 * when write to the end of file but not require block allocation 3051 */ 3052 static int ext4_da_should_update_i_disksize(struct page *page, 3053 unsigned long offset) 3054 { 3055 struct buffer_head *bh; 3056 struct inode *inode = page->mapping->host; 3057 unsigned int idx; 3058 int i; 3059 3060 bh = page_buffers(page); 3061 idx = offset >> inode->i_blkbits; 3062 3063 for (i = 0; i < idx; i++) 3064 bh = bh->b_this_page; 3065 3066 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 3067 return 0; 3068 return 1; 3069 } 3070 3071 static int ext4_da_write_end(struct file *file, 3072 struct address_space *mapping, 3073 loff_t pos, unsigned len, unsigned copied, 3074 struct page *page, void *fsdata) 3075 { 3076 struct inode *inode = mapping->host; 3077 int ret = 0, ret2; 3078 handle_t *handle = ext4_journal_current_handle(); 3079 loff_t new_i_size; 3080 unsigned long start, end; 3081 int write_mode = (int)(unsigned long)fsdata; 3082 3083 if (write_mode == FALL_BACK_TO_NONDELALLOC) 3084 return ext4_write_end(file, mapping, pos, 3085 len, copied, page, fsdata); 3086 3087 trace_ext4_da_write_end(inode, pos, len, copied); 3088 start = pos & (PAGE_SIZE - 1); 3089 end = start + copied - 1; 3090 3091 /* 3092 * generic_write_end() will run mark_inode_dirty() if i_size 3093 * changes. So let's piggyback the i_disksize mark_inode_dirty 3094 * into that. 3095 */ 3096 new_i_size = pos + copied; 3097 if (copied && new_i_size > EXT4_I(inode)->i_disksize) { 3098 if (ext4_has_inline_data(inode) || 3099 ext4_da_should_update_i_disksize(page, end)) { 3100 ext4_update_i_disksize(inode, new_i_size); 3101 /* We need to mark inode dirty even if 3102 * new_i_size is less that inode->i_size 3103 * bu greater than i_disksize.(hint delalloc) 3104 */ 3105 ext4_mark_inode_dirty(handle, inode); 3106 } 3107 } 3108 3109 if (write_mode != CONVERT_INLINE_DATA && 3110 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) && 3111 ext4_has_inline_data(inode)) 3112 ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied, 3113 page); 3114 else 3115 ret2 = generic_write_end(file, mapping, pos, len, copied, 3116 page, fsdata); 3117 3118 copied = ret2; 3119 if (ret2 < 0) 3120 ret = ret2; 3121 ret2 = ext4_journal_stop(handle); 3122 if (!ret) 3123 ret = ret2; 3124 3125 return ret ? ret : copied; 3126 } 3127 3128 static void ext4_da_invalidatepage(struct page *page, unsigned int offset, 3129 unsigned int length) 3130 { 3131 /* 3132 * Drop reserved blocks 3133 */ 3134 BUG_ON(!PageLocked(page)); 3135 if (!page_has_buffers(page)) 3136 goto out; 3137 3138 ext4_da_page_release_reservation(page, offset, length); 3139 3140 out: 3141 ext4_invalidatepage(page, offset, length); 3142 3143 return; 3144 } 3145 3146 /* 3147 * Force all delayed allocation blocks to be allocated for a given inode. 3148 */ 3149 int ext4_alloc_da_blocks(struct inode *inode) 3150 { 3151 trace_ext4_alloc_da_blocks(inode); 3152 3153 if (!EXT4_I(inode)->i_reserved_data_blocks) 3154 return 0; 3155 3156 /* 3157 * We do something simple for now. The filemap_flush() will 3158 * also start triggering a write of the data blocks, which is 3159 * not strictly speaking necessary (and for users of 3160 * laptop_mode, not even desirable). However, to do otherwise 3161 * would require replicating code paths in: 3162 * 3163 * ext4_writepages() -> 3164 * write_cache_pages() ---> (via passed in callback function) 3165 * __mpage_da_writepage() --> 3166 * mpage_add_bh_to_extent() 3167 * mpage_da_map_blocks() 3168 * 3169 * The problem is that write_cache_pages(), located in 3170 * mm/page-writeback.c, marks pages clean in preparation for 3171 * doing I/O, which is not desirable if we're not planning on 3172 * doing I/O at all. 3173 * 3174 * We could call write_cache_pages(), and then redirty all of 3175 * the pages by calling redirty_page_for_writepage() but that 3176 * would be ugly in the extreme. So instead we would need to 3177 * replicate parts of the code in the above functions, 3178 * simplifying them because we wouldn't actually intend to 3179 * write out the pages, but rather only collect contiguous 3180 * logical block extents, call the multi-block allocator, and 3181 * then update the buffer heads with the block allocations. 3182 * 3183 * For now, though, we'll cheat by calling filemap_flush(), 3184 * which will map the blocks, and start the I/O, but not 3185 * actually wait for the I/O to complete. 3186 */ 3187 return filemap_flush(inode->i_mapping); 3188 } 3189 3190 /* 3191 * bmap() is special. It gets used by applications such as lilo and by 3192 * the swapper to find the on-disk block of a specific piece of data. 3193 * 3194 * Naturally, this is dangerous if the block concerned is still in the 3195 * journal. If somebody makes a swapfile on an ext4 data-journaling 3196 * filesystem and enables swap, then they may get a nasty shock when the 3197 * data getting swapped to that swapfile suddenly gets overwritten by 3198 * the original zero's written out previously to the journal and 3199 * awaiting writeback in the kernel's buffer cache. 3200 * 3201 * So, if we see any bmap calls here on a modified, data-journaled file, 3202 * take extra steps to flush any blocks which might be in the cache. 3203 */ 3204 static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 3205 { 3206 struct inode *inode = mapping->host; 3207 journal_t *journal; 3208 int err; 3209 3210 /* 3211 * We can get here for an inline file via the FIBMAP ioctl 3212 */ 3213 if (ext4_has_inline_data(inode)) 3214 return 0; 3215 3216 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 3217 test_opt(inode->i_sb, DELALLOC)) { 3218 /* 3219 * With delalloc we want to sync the file 3220 * so that we can make sure we allocate 3221 * blocks for file 3222 */ 3223 filemap_write_and_wait(mapping); 3224 } 3225 3226 if (EXT4_JOURNAL(inode) && 3227 ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { 3228 /* 3229 * This is a REALLY heavyweight approach, but the use of 3230 * bmap on dirty files is expected to be extremely rare: 3231 * only if we run lilo or swapon on a freshly made file 3232 * do we expect this to happen. 3233 * 3234 * (bmap requires CAP_SYS_RAWIO so this does not 3235 * represent an unprivileged user DOS attack --- we'd be 3236 * in trouble if mortal users could trigger this path at 3237 * will.) 3238 * 3239 * NB. EXT4_STATE_JDATA is not set on files other than 3240 * regular files. If somebody wants to bmap a directory 3241 * or symlink and gets confused because the buffer 3242 * hasn't yet been flushed to disk, they deserve 3243 * everything they get. 3244 */ 3245 3246 ext4_clear_inode_state(inode, EXT4_STATE_JDATA); 3247 journal = EXT4_JOURNAL(inode); 3248 jbd2_journal_lock_updates(journal); 3249 err = jbd2_journal_flush(journal); 3250 jbd2_journal_unlock_updates(journal); 3251 3252 if (err) 3253 return 0; 3254 } 3255 3256 return generic_block_bmap(mapping, block, ext4_get_block); 3257 } 3258 3259 static int ext4_readpage(struct file *file, struct page *page) 3260 { 3261 int ret = -EAGAIN; 3262 struct inode *inode = page->mapping->host; 3263 3264 trace_ext4_readpage(page); 3265 3266 if (ext4_has_inline_data(inode)) 3267 ret = ext4_readpage_inline(inode, page); 3268 3269 if (ret == -EAGAIN) 3270 return ext4_mpage_readpages(page->mapping, NULL, page, 1); 3271 3272 return ret; 3273 } 3274 3275 static int 3276 ext4_readpages(struct file *file, struct address_space *mapping, 3277 struct list_head *pages, unsigned nr_pages) 3278 { 3279 struct inode *inode = mapping->host; 3280 3281 /* If the file has inline data, no need to do readpages. */ 3282 if (ext4_has_inline_data(inode)) 3283 return 0; 3284 3285 return ext4_mpage_readpages(mapping, pages, NULL, nr_pages); 3286 } 3287 3288 static void ext4_invalidatepage(struct page *page, unsigned int offset, 3289 unsigned int length) 3290 { 3291 trace_ext4_invalidatepage(page, offset, length); 3292 3293 /* No journalling happens on data buffers when this function is used */ 3294 WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page))); 3295 3296 block_invalidatepage(page, offset, length); 3297 } 3298 3299 static int __ext4_journalled_invalidatepage(struct page *page, 3300 unsigned int offset, 3301 unsigned int length) 3302 { 3303 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3304 3305 trace_ext4_journalled_invalidatepage(page, offset, length); 3306 3307 /* 3308 * If it's a full truncate we just forget about the pending dirtying 3309 */ 3310 if (offset == 0 && length == PAGE_SIZE) 3311 ClearPageChecked(page); 3312 3313 return jbd2_journal_invalidatepage(journal, page, offset, length); 3314 } 3315 3316 /* Wrapper for aops... */ 3317 static void ext4_journalled_invalidatepage(struct page *page, 3318 unsigned int offset, 3319 unsigned int length) 3320 { 3321 WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0); 3322 } 3323 3324 static int ext4_releasepage(struct page *page, gfp_t wait) 3325 { 3326 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3327 3328 trace_ext4_releasepage(page); 3329 3330 /* Page has dirty journalled data -> cannot release */ 3331 if (PageChecked(page)) 3332 return 0; 3333 if (journal) 3334 return jbd2_journal_try_to_free_buffers(journal, page, wait); 3335 else 3336 return try_to_free_buffers(page); 3337 } 3338 3339 #ifdef CONFIG_FS_DAX 3340 static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length, 3341 unsigned flags, struct iomap *iomap) 3342 { 3343 struct block_device *bdev; 3344 unsigned int blkbits = inode->i_blkbits; 3345 unsigned long first_block = offset >> blkbits; 3346 unsigned long last_block = (offset + length - 1) >> blkbits; 3347 struct ext4_map_blocks map; 3348 int ret; 3349 3350 if (WARN_ON_ONCE(ext4_has_inline_data(inode))) 3351 return -ERANGE; 3352 3353 map.m_lblk = first_block; 3354 map.m_len = last_block - first_block + 1; 3355 3356 if (!(flags & IOMAP_WRITE)) { 3357 ret = ext4_map_blocks(NULL, inode, &map, 0); 3358 } else { 3359 int dio_credits; 3360 handle_t *handle; 3361 int retries = 0; 3362 3363 /* Trim mapping request to maximum we can map at once for DIO */ 3364 if (map.m_len > DIO_MAX_BLOCKS) 3365 map.m_len = DIO_MAX_BLOCKS; 3366 dio_credits = ext4_chunk_trans_blocks(inode, map.m_len); 3367 retry: 3368 /* 3369 * Either we allocate blocks and then we don't get unwritten 3370 * extent so we have reserved enough credits, or the blocks 3371 * are already allocated and unwritten and in that case 3372 * extent conversion fits in the credits as well. 3373 */ 3374 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 3375 dio_credits); 3376 if (IS_ERR(handle)) 3377 return PTR_ERR(handle); 3378 3379 ret = ext4_map_blocks(handle, inode, &map, 3380 EXT4_GET_BLOCKS_CREATE_ZERO); 3381 if (ret < 0) { 3382 ext4_journal_stop(handle); 3383 if (ret == -ENOSPC && 3384 ext4_should_retry_alloc(inode->i_sb, &retries)) 3385 goto retry; 3386 return ret; 3387 } 3388 3389 /* 3390 * If we added blocks beyond i_size, we need to make sure they 3391 * will get truncated if we crash before updating i_size in 3392 * ext4_iomap_end(). For faults we don't need to do that (and 3393 * even cannot because for orphan list operations inode_lock is 3394 * required) - if we happen to instantiate block beyond i_size, 3395 * it is because we race with truncate which has already added 3396 * the inode to the orphan list. 3397 */ 3398 if (!(flags & IOMAP_FAULT) && first_block + map.m_len > 3399 (i_size_read(inode) + (1 << blkbits) - 1) >> blkbits) { 3400 int err; 3401 3402 err = ext4_orphan_add(handle, inode); 3403 if (err < 0) { 3404 ext4_journal_stop(handle); 3405 return err; 3406 } 3407 } 3408 ext4_journal_stop(handle); 3409 } 3410 3411 iomap->flags = 0; 3412 bdev = inode->i_sb->s_bdev; 3413 iomap->bdev = bdev; 3414 if (blk_queue_dax(bdev->bd_queue)) 3415 iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 3416 else 3417 iomap->dax_dev = NULL; 3418 iomap->offset = first_block << blkbits; 3419 3420 if (ret == 0) { 3421 iomap->type = IOMAP_HOLE; 3422 iomap->blkno = IOMAP_NULL_BLOCK; 3423 iomap->length = (u64)map.m_len << blkbits; 3424 } else { 3425 if (map.m_flags & EXT4_MAP_MAPPED) { 3426 iomap->type = IOMAP_MAPPED; 3427 } else if (map.m_flags & EXT4_MAP_UNWRITTEN) { 3428 iomap->type = IOMAP_UNWRITTEN; 3429 } else { 3430 WARN_ON_ONCE(1); 3431 return -EIO; 3432 } 3433 iomap->blkno = (sector_t)map.m_pblk << (blkbits - 9); 3434 iomap->length = (u64)map.m_len << blkbits; 3435 } 3436 3437 if (map.m_flags & EXT4_MAP_NEW) 3438 iomap->flags |= IOMAP_F_NEW; 3439 return 0; 3440 } 3441 3442 static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length, 3443 ssize_t written, unsigned flags, struct iomap *iomap) 3444 { 3445 int ret = 0; 3446 handle_t *handle; 3447 int blkbits = inode->i_blkbits; 3448 bool truncate = false; 3449 3450 put_dax(iomap->dax_dev); 3451 if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT)) 3452 return 0; 3453 3454 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 3455 if (IS_ERR(handle)) { 3456 ret = PTR_ERR(handle); 3457 goto orphan_del; 3458 } 3459 if (ext4_update_inode_size(inode, offset + written)) 3460 ext4_mark_inode_dirty(handle, inode); 3461 /* 3462 * We may need to truncate allocated but not written blocks beyond EOF. 3463 */ 3464 if (iomap->offset + iomap->length > 3465 ALIGN(inode->i_size, 1 << blkbits)) { 3466 ext4_lblk_t written_blk, end_blk; 3467 3468 written_blk = (offset + written) >> blkbits; 3469 end_blk = (offset + length) >> blkbits; 3470 if (written_blk < end_blk && ext4_can_truncate(inode)) 3471 truncate = true; 3472 } 3473 /* 3474 * Remove inode from orphan list if we were extending a inode and 3475 * everything went fine. 3476 */ 3477 if (!truncate && inode->i_nlink && 3478 !list_empty(&EXT4_I(inode)->i_orphan)) 3479 ext4_orphan_del(handle, inode); 3480 ext4_journal_stop(handle); 3481 if (truncate) { 3482 ext4_truncate_failed_write(inode); 3483 orphan_del: 3484 /* 3485 * If truncate failed early the inode might still be on the 3486 * orphan list; we need to make sure the inode is removed from 3487 * the orphan list in that case. 3488 */ 3489 if (inode->i_nlink) 3490 ext4_orphan_del(NULL, inode); 3491 } 3492 return ret; 3493 } 3494 3495 const struct iomap_ops ext4_iomap_ops = { 3496 .iomap_begin = ext4_iomap_begin, 3497 .iomap_end = ext4_iomap_end, 3498 }; 3499 3500 #endif 3501 3502 static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset, 3503 ssize_t size, void *private) 3504 { 3505 ext4_io_end_t *io_end = private; 3506 3507 /* if not async direct IO just return */ 3508 if (!io_end) 3509 return 0; 3510 3511 ext_debug("ext4_end_io_dio(): io_end 0x%p " 3512 "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", 3513 io_end, io_end->inode->i_ino, iocb, offset, size); 3514 3515 /* 3516 * Error during AIO DIO. We cannot convert unwritten extents as the 3517 * data was not written. Just clear the unwritten flag and drop io_end. 3518 */ 3519 if (size <= 0) { 3520 ext4_clear_io_unwritten_flag(io_end); 3521 size = 0; 3522 } 3523 io_end->offset = offset; 3524 io_end->size = size; 3525 ext4_put_io_end(io_end); 3526 3527 return 0; 3528 } 3529 3530 /* 3531 * Handling of direct IO writes. 3532 * 3533 * For ext4 extent files, ext4 will do direct-io write even to holes, 3534 * preallocated extents, and those write extend the file, no need to 3535 * fall back to buffered IO. 3536 * 3537 * For holes, we fallocate those blocks, mark them as unwritten 3538 * If those blocks were preallocated, we mark sure they are split, but 3539 * still keep the range to write as unwritten. 3540 * 3541 * The unwritten extents will be converted to written when DIO is completed. 3542 * For async direct IO, since the IO may still pending when return, we 3543 * set up an end_io call back function, which will do the conversion 3544 * when async direct IO completed. 3545 * 3546 * If the O_DIRECT write will extend the file then add this inode to the 3547 * orphan list. So recovery will truncate it back to the original size 3548 * if the machine crashes during the write. 3549 * 3550 */ 3551 static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter) 3552 { 3553 struct file *file = iocb->ki_filp; 3554 struct inode *inode = file->f_mapping->host; 3555 struct ext4_inode_info *ei = EXT4_I(inode); 3556 ssize_t ret; 3557 loff_t offset = iocb->ki_pos; 3558 size_t count = iov_iter_count(iter); 3559 int overwrite = 0; 3560 get_block_t *get_block_func = NULL; 3561 int dio_flags = 0; 3562 loff_t final_size = offset + count; 3563 int orphan = 0; 3564 handle_t *handle; 3565 3566 if (final_size > inode->i_size) { 3567 /* Credits for sb + inode write */ 3568 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 3569 if (IS_ERR(handle)) { 3570 ret = PTR_ERR(handle); 3571 goto out; 3572 } 3573 ret = ext4_orphan_add(handle, inode); 3574 if (ret) { 3575 ext4_journal_stop(handle); 3576 goto out; 3577 } 3578 orphan = 1; 3579 ei->i_disksize = inode->i_size; 3580 ext4_journal_stop(handle); 3581 } 3582 3583 BUG_ON(iocb->private == NULL); 3584 3585 /* 3586 * Make all waiters for direct IO properly wait also for extent 3587 * conversion. This also disallows race between truncate() and 3588 * overwrite DIO as i_dio_count needs to be incremented under i_mutex. 3589 */ 3590 inode_dio_begin(inode); 3591 3592 /* If we do a overwrite dio, i_mutex locking can be released */ 3593 overwrite = *((int *)iocb->private); 3594 3595 if (overwrite) 3596 inode_unlock(inode); 3597 3598 /* 3599 * For extent mapped files we could direct write to holes and fallocate. 3600 * 3601 * Allocated blocks to fill the hole are marked as unwritten to prevent 3602 * parallel buffered read to expose the stale data before DIO complete 3603 * the data IO. 3604 * 3605 * As to previously fallocated extents, ext4 get_block will just simply 3606 * mark the buffer mapped but still keep the extents unwritten. 3607 * 3608 * For non AIO case, we will convert those unwritten extents to written 3609 * after return back from blockdev_direct_IO. That way we save us from 3610 * allocating io_end structure and also the overhead of offloading 3611 * the extent convertion to a workqueue. 3612 * 3613 * For async DIO, the conversion needs to be deferred when the 3614 * IO is completed. The ext4 end_io callback function will be 3615 * called to take care of the conversion work. Here for async 3616 * case, we allocate an io_end structure to hook to the iocb. 3617 */ 3618 iocb->private = NULL; 3619 if (overwrite) 3620 get_block_func = ext4_dio_get_block_overwrite; 3621 else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) || 3622 round_down(offset, i_blocksize(inode)) >= inode->i_size) { 3623 get_block_func = ext4_dio_get_block; 3624 dio_flags = DIO_LOCKING | DIO_SKIP_HOLES; 3625 } else if (is_sync_kiocb(iocb)) { 3626 get_block_func = ext4_dio_get_block_unwritten_sync; 3627 dio_flags = DIO_LOCKING; 3628 } else { 3629 get_block_func = ext4_dio_get_block_unwritten_async; 3630 dio_flags = DIO_LOCKING; 3631 } 3632 #ifdef CONFIG_EXT4_FS_ENCRYPTION 3633 BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)); 3634 #endif 3635 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, 3636 get_block_func, ext4_end_io_dio, NULL, 3637 dio_flags); 3638 3639 if (ret > 0 && !overwrite && ext4_test_inode_state(inode, 3640 EXT4_STATE_DIO_UNWRITTEN)) { 3641 int err; 3642 /* 3643 * for non AIO case, since the IO is already 3644 * completed, we could do the conversion right here 3645 */ 3646 err = ext4_convert_unwritten_extents(NULL, inode, 3647 offset, ret); 3648 if (err < 0) 3649 ret = err; 3650 ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3651 } 3652 3653 inode_dio_end(inode); 3654 /* take i_mutex locking again if we do a ovewrite dio */ 3655 if (overwrite) 3656 inode_lock(inode); 3657 3658 if (ret < 0 && final_size > inode->i_size) 3659 ext4_truncate_failed_write(inode); 3660 3661 /* Handle extending of i_size after direct IO write */ 3662 if (orphan) { 3663 int err; 3664 3665 /* Credits for sb + inode write */ 3666 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 3667 if (IS_ERR(handle)) { 3668 /* This is really bad luck. We've written the data 3669 * but cannot extend i_size. Bail out and pretend 3670 * the write failed... */ 3671 ret = PTR_ERR(handle); 3672 if (inode->i_nlink) 3673 ext4_orphan_del(NULL, inode); 3674 3675 goto out; 3676 } 3677 if (inode->i_nlink) 3678 ext4_orphan_del(handle, inode); 3679 if (ret > 0) { 3680 loff_t end = offset + ret; 3681 if (end > inode->i_size) { 3682 ei->i_disksize = end; 3683 i_size_write(inode, end); 3684 /* 3685 * We're going to return a positive `ret' 3686 * here due to non-zero-length I/O, so there's 3687 * no way of reporting error returns from 3688 * ext4_mark_inode_dirty() to userspace. So 3689 * ignore it. 3690 */ 3691 ext4_mark_inode_dirty(handle, inode); 3692 } 3693 } 3694 err = ext4_journal_stop(handle); 3695 if (ret == 0) 3696 ret = err; 3697 } 3698 out: 3699 return ret; 3700 } 3701 3702 static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter) 3703 { 3704 struct address_space *mapping = iocb->ki_filp->f_mapping; 3705 struct inode *inode = mapping->host; 3706 size_t count = iov_iter_count(iter); 3707 ssize_t ret; 3708 3709 /* 3710 * Shared inode_lock is enough for us - it protects against concurrent 3711 * writes & truncates and since we take care of writing back page cache, 3712 * we are protected against page writeback as well. 3713 */ 3714 inode_lock_shared(inode); 3715 ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, 3716 iocb->ki_pos + count); 3717 if (ret) 3718 goto out_unlock; 3719 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, 3720 iter, ext4_dio_get_block, NULL, NULL, 0); 3721 out_unlock: 3722 inode_unlock_shared(inode); 3723 return ret; 3724 } 3725 3726 static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 3727 { 3728 struct file *file = iocb->ki_filp; 3729 struct inode *inode = file->f_mapping->host; 3730 size_t count = iov_iter_count(iter); 3731 loff_t offset = iocb->ki_pos; 3732 ssize_t ret; 3733 3734 #ifdef CONFIG_EXT4_FS_ENCRYPTION 3735 if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 3736 return 0; 3737 #endif 3738 3739 /* 3740 * If we are doing data journalling we don't support O_DIRECT 3741 */ 3742 if (ext4_should_journal_data(inode)) 3743 return 0; 3744 3745 /* Let buffer I/O handle the inline data case. */ 3746 if (ext4_has_inline_data(inode)) 3747 return 0; 3748 3749 /* DAX uses iomap path now */ 3750 if (WARN_ON_ONCE(IS_DAX(inode))) 3751 return 0; 3752 3753 trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); 3754 if (iov_iter_rw(iter) == READ) 3755 ret = ext4_direct_IO_read(iocb, iter); 3756 else 3757 ret = ext4_direct_IO_write(iocb, iter); 3758 trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret); 3759 return ret; 3760 } 3761 3762 /* 3763 * Pages can be marked dirty completely asynchronously from ext4's journalling 3764 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3765 * much here because ->set_page_dirty is called under VFS locks. The page is 3766 * not necessarily locked. 3767 * 3768 * We cannot just dirty the page and leave attached buffers clean, because the 3769 * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3770 * or jbddirty because all the journalling code will explode. 3771 * 3772 * So what we do is to mark the page "pending dirty" and next time writepage 3773 * is called, propagate that into the buffers appropriately. 3774 */ 3775 static int ext4_journalled_set_page_dirty(struct page *page) 3776 { 3777 SetPageChecked(page); 3778 return __set_page_dirty_nobuffers(page); 3779 } 3780 3781 static int ext4_set_page_dirty(struct page *page) 3782 { 3783 WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page)); 3784 WARN_ON_ONCE(!page_has_buffers(page)); 3785 return __set_page_dirty_buffers(page); 3786 } 3787 3788 static const struct address_space_operations ext4_aops = { 3789 .readpage = ext4_readpage, 3790 .readpages = ext4_readpages, 3791 .writepage = ext4_writepage, 3792 .writepages = ext4_writepages, 3793 .write_begin = ext4_write_begin, 3794 .write_end = ext4_write_end, 3795 .set_page_dirty = ext4_set_page_dirty, 3796 .bmap = ext4_bmap, 3797 .invalidatepage = ext4_invalidatepage, 3798 .releasepage = ext4_releasepage, 3799 .direct_IO = ext4_direct_IO, 3800 .migratepage = buffer_migrate_page, 3801 .is_partially_uptodate = block_is_partially_uptodate, 3802 .error_remove_page = generic_error_remove_page, 3803 }; 3804 3805 static const struct address_space_operations ext4_journalled_aops = { 3806 .readpage = ext4_readpage, 3807 .readpages = ext4_readpages, 3808 .writepage = ext4_writepage, 3809 .writepages = ext4_writepages, 3810 .write_begin = ext4_write_begin, 3811 .write_end = ext4_journalled_write_end, 3812 .set_page_dirty = ext4_journalled_set_page_dirty, 3813 .bmap = ext4_bmap, 3814 .invalidatepage = ext4_journalled_invalidatepage, 3815 .releasepage = ext4_releasepage, 3816 .direct_IO = ext4_direct_IO, 3817 .is_partially_uptodate = block_is_partially_uptodate, 3818 .error_remove_page = generic_error_remove_page, 3819 }; 3820 3821 static const struct address_space_operations ext4_da_aops = { 3822 .readpage = ext4_readpage, 3823 .readpages = ext4_readpages, 3824 .writepage = ext4_writepage, 3825 .writepages = ext4_writepages, 3826 .write_begin = ext4_da_write_begin, 3827 .write_end = ext4_da_write_end, 3828 .set_page_dirty = ext4_set_page_dirty, 3829 .bmap = ext4_bmap, 3830 .invalidatepage = ext4_da_invalidatepage, 3831 .releasepage = ext4_releasepage, 3832 .direct_IO = ext4_direct_IO, 3833 .migratepage = buffer_migrate_page, 3834 .is_partially_uptodate = block_is_partially_uptodate, 3835 .error_remove_page = generic_error_remove_page, 3836 }; 3837 3838 void ext4_set_aops(struct inode *inode) 3839 { 3840 switch (ext4_inode_journal_mode(inode)) { 3841 case EXT4_INODE_ORDERED_DATA_MODE: 3842 case EXT4_INODE_WRITEBACK_DATA_MODE: 3843 break; 3844 case EXT4_INODE_JOURNAL_DATA_MODE: 3845 inode->i_mapping->a_ops = &ext4_journalled_aops; 3846 return; 3847 default: 3848 BUG(); 3849 } 3850 if (test_opt(inode->i_sb, DELALLOC)) 3851 inode->i_mapping->a_ops = &ext4_da_aops; 3852 else 3853 inode->i_mapping->a_ops = &ext4_aops; 3854 } 3855 3856 static int __ext4_block_zero_page_range(handle_t *handle, 3857 struct address_space *mapping, loff_t from, loff_t length) 3858 { 3859 ext4_fsblk_t index = from >> PAGE_SHIFT; 3860 unsigned offset = from & (PAGE_SIZE-1); 3861 unsigned blocksize, pos; 3862 ext4_lblk_t iblock; 3863 struct inode *inode = mapping->host; 3864 struct buffer_head *bh; 3865 struct page *page; 3866 int err = 0; 3867 3868 page = find_or_create_page(mapping, from >> PAGE_SHIFT, 3869 mapping_gfp_constraint(mapping, ~__GFP_FS)); 3870 if (!page) 3871 return -ENOMEM; 3872 3873 blocksize = inode->i_sb->s_blocksize; 3874 3875 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits); 3876 3877 if (!page_has_buffers(page)) 3878 create_empty_buffers(page, blocksize, 0); 3879 3880 /* Find the buffer that contains "offset" */ 3881 bh = page_buffers(page); 3882 pos = blocksize; 3883 while (offset >= pos) { 3884 bh = bh->b_this_page; 3885 iblock++; 3886 pos += blocksize; 3887 } 3888 if (buffer_freed(bh)) { 3889 BUFFER_TRACE(bh, "freed: skip"); 3890 goto unlock; 3891 } 3892 if (!buffer_mapped(bh)) { 3893 BUFFER_TRACE(bh, "unmapped"); 3894 ext4_get_block(inode, iblock, bh, 0); 3895 /* unmapped? It's a hole - nothing to do */ 3896 if (!buffer_mapped(bh)) { 3897 BUFFER_TRACE(bh, "still unmapped"); 3898 goto unlock; 3899 } 3900 } 3901 3902 /* Ok, it's mapped. Make sure it's up-to-date */ 3903 if (PageUptodate(page)) 3904 set_buffer_uptodate(bh); 3905 3906 if (!buffer_uptodate(bh)) { 3907 err = -EIO; 3908 ll_rw_block(REQ_OP_READ, 0, 1, &bh); 3909 wait_on_buffer(bh); 3910 /* Uhhuh. Read error. Complain and punt. */ 3911 if (!buffer_uptodate(bh)) 3912 goto unlock; 3913 if (S_ISREG(inode->i_mode) && 3914 ext4_encrypted_inode(inode)) { 3915 /* We expect the key to be set. */ 3916 BUG_ON(!fscrypt_has_encryption_key(inode)); 3917 BUG_ON(blocksize != PAGE_SIZE); 3918 WARN_ON_ONCE(fscrypt_decrypt_page(page->mapping->host, 3919 page, PAGE_SIZE, 0, page->index)); 3920 } 3921 } 3922 if (ext4_should_journal_data(inode)) { 3923 BUFFER_TRACE(bh, "get write access"); 3924 err = ext4_journal_get_write_access(handle, bh); 3925 if (err) 3926 goto unlock; 3927 } 3928 zero_user(page, offset, length); 3929 BUFFER_TRACE(bh, "zeroed end of block"); 3930 3931 if (ext4_should_journal_data(inode)) { 3932 err = ext4_handle_dirty_metadata(handle, inode, bh); 3933 } else { 3934 err = 0; 3935 mark_buffer_dirty(bh); 3936 if (ext4_should_order_data(inode)) 3937 err = ext4_jbd2_inode_add_write(handle, inode); 3938 } 3939 3940 unlock: 3941 unlock_page(page); 3942 put_page(page); 3943 return err; 3944 } 3945 3946 /* 3947 * ext4_block_zero_page_range() zeros out a mapping of length 'length' 3948 * starting from file offset 'from'. The range to be zero'd must 3949 * be contained with in one block. If the specified range exceeds 3950 * the end of the block it will be shortened to end of the block 3951 * that cooresponds to 'from' 3952 */ 3953 static int ext4_block_zero_page_range(handle_t *handle, 3954 struct address_space *mapping, loff_t from, loff_t length) 3955 { 3956 struct inode *inode = mapping->host; 3957 unsigned offset = from & (PAGE_SIZE-1); 3958 unsigned blocksize = inode->i_sb->s_blocksize; 3959 unsigned max = blocksize - (offset & (blocksize - 1)); 3960 3961 /* 3962 * correct length if it does not fall between 3963 * 'from' and the end of the block 3964 */ 3965 if (length > max || length < 0) 3966 length = max; 3967 3968 if (IS_DAX(inode)) { 3969 return iomap_zero_range(inode, from, length, NULL, 3970 &ext4_iomap_ops); 3971 } 3972 return __ext4_block_zero_page_range(handle, mapping, from, length); 3973 } 3974 3975 /* 3976 * ext4_block_truncate_page() zeroes out a mapping from file offset `from' 3977 * up to the end of the block which corresponds to `from'. 3978 * This required during truncate. We need to physically zero the tail end 3979 * of that block so it doesn't yield old data if the file is later grown. 3980 */ 3981 static int ext4_block_truncate_page(handle_t *handle, 3982 struct address_space *mapping, loff_t from) 3983 { 3984 unsigned offset = from & (PAGE_SIZE-1); 3985 unsigned length; 3986 unsigned blocksize; 3987 struct inode *inode = mapping->host; 3988 3989 /* If we are processing an encrypted inode during orphan list handling */ 3990 if (ext4_encrypted_inode(inode) && !fscrypt_has_encryption_key(inode)) 3991 return 0; 3992 3993 blocksize = inode->i_sb->s_blocksize; 3994 length = blocksize - (offset & (blocksize - 1)); 3995 3996 return ext4_block_zero_page_range(handle, mapping, from, length); 3997 } 3998 3999 int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode, 4000 loff_t lstart, loff_t length) 4001 { 4002 struct super_block *sb = inode->i_sb; 4003 struct address_space *mapping = inode->i_mapping; 4004 unsigned partial_start, partial_end; 4005 ext4_fsblk_t start, end; 4006 loff_t byte_end = (lstart + length - 1); 4007 int err = 0; 4008 4009 partial_start = lstart & (sb->s_blocksize - 1); 4010 partial_end = byte_end & (sb->s_blocksize - 1); 4011 4012 start = lstart >> sb->s_blocksize_bits; 4013 end = byte_end >> sb->s_blocksize_bits; 4014 4015 /* Handle partial zero within the single block */ 4016 if (start == end && 4017 (partial_start || (partial_end != sb->s_blocksize - 1))) { 4018 err = ext4_block_zero_page_range(handle, mapping, 4019 lstart, length); 4020 return err; 4021 } 4022 /* Handle partial zero out on the start of the range */ 4023 if (partial_start) { 4024 err = ext4_block_zero_page_range(handle, mapping, 4025 lstart, sb->s_blocksize); 4026 if (err) 4027 return err; 4028 } 4029 /* Handle partial zero out on the end of the range */ 4030 if (partial_end != sb->s_blocksize - 1) 4031 err = ext4_block_zero_page_range(handle, mapping, 4032 byte_end - partial_end, 4033 partial_end + 1); 4034 return err; 4035 } 4036 4037 int ext4_can_truncate(struct inode *inode) 4038 { 4039 if (S_ISREG(inode->i_mode)) 4040 return 1; 4041 if (S_ISDIR(inode->i_mode)) 4042 return 1; 4043 if (S_ISLNK(inode->i_mode)) 4044 return !ext4_inode_is_fast_symlink(inode); 4045 return 0; 4046 } 4047 4048 /* 4049 * We have to make sure i_disksize gets properly updated before we truncate 4050 * page cache due to hole punching or zero range. Otherwise i_disksize update 4051 * can get lost as it may have been postponed to submission of writeback but 4052 * that will never happen after we truncate page cache. 4053 */ 4054 int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, 4055 loff_t len) 4056 { 4057 handle_t *handle; 4058 loff_t size = i_size_read(inode); 4059 4060 WARN_ON(!inode_is_locked(inode)); 4061 if (offset > size || offset + len < size) 4062 return 0; 4063 4064 if (EXT4_I(inode)->i_disksize >= size) 4065 return 0; 4066 4067 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1); 4068 if (IS_ERR(handle)) 4069 return PTR_ERR(handle); 4070 ext4_update_i_disksize(inode, size); 4071 ext4_mark_inode_dirty(handle, inode); 4072 ext4_journal_stop(handle); 4073 4074 return 0; 4075 } 4076 4077 /* 4078 * ext4_punch_hole: punches a hole in a file by releasing the blocks 4079 * associated with the given offset and length 4080 * 4081 * @inode: File inode 4082 * @offset: The offset where the hole will begin 4083 * @len: The length of the hole 4084 * 4085 * Returns: 0 on success or negative on failure 4086 */ 4087 4088 int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) 4089 { 4090 struct super_block *sb = inode->i_sb; 4091 ext4_lblk_t first_block, stop_block; 4092 struct address_space *mapping = inode->i_mapping; 4093 loff_t first_block_offset, last_block_offset; 4094 handle_t *handle; 4095 unsigned int credits; 4096 int ret = 0; 4097 4098 if (!S_ISREG(inode->i_mode)) 4099 return -EOPNOTSUPP; 4100 4101 trace_ext4_punch_hole(inode, offset, length, 0); 4102 4103 /* 4104 * Write out all dirty pages to avoid race conditions 4105 * Then release them. 4106 */ 4107 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 4108 ret = filemap_write_and_wait_range(mapping, offset, 4109 offset + length - 1); 4110 if (ret) 4111 return ret; 4112 } 4113 4114 inode_lock(inode); 4115 4116 /* No need to punch hole beyond i_size */ 4117 if (offset >= inode->i_size) 4118 goto out_mutex; 4119 4120 /* 4121 * If the hole extends beyond i_size, set the hole 4122 * to end after the page that contains i_size 4123 */ 4124 if (offset + length > inode->i_size) { 4125 length = inode->i_size + 4126 PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) - 4127 offset; 4128 } 4129 4130 if (offset & (sb->s_blocksize - 1) || 4131 (offset + length) & (sb->s_blocksize - 1)) { 4132 /* 4133 * Attach jinode to inode for jbd2 if we do any zeroing of 4134 * partial block 4135 */ 4136 ret = ext4_inode_attach_jinode(inode); 4137 if (ret < 0) 4138 goto out_mutex; 4139 4140 } 4141 4142 /* Wait all existing dio workers, newcomers will block on i_mutex */ 4143 ext4_inode_block_unlocked_dio(inode); 4144 inode_dio_wait(inode); 4145 4146 /* 4147 * Prevent page faults from reinstantiating pages we have released from 4148 * page cache. 4149 */ 4150 down_write(&EXT4_I(inode)->i_mmap_sem); 4151 first_block_offset = round_up(offset, sb->s_blocksize); 4152 last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; 4153 4154 /* Now release the pages and zero block aligned part of pages*/ 4155 if (last_block_offset > first_block_offset) { 4156 ret = ext4_update_disksize_before_punch(inode, offset, length); 4157 if (ret) 4158 goto out_dio; 4159 truncate_pagecache_range(inode, first_block_offset, 4160 last_block_offset); 4161 } 4162 4163 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4164 credits = ext4_writepage_trans_blocks(inode); 4165 else 4166 credits = ext4_blocks_for_truncate(inode); 4167 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 4168 if (IS_ERR(handle)) { 4169 ret = PTR_ERR(handle); 4170 ext4_std_error(sb, ret); 4171 goto out_dio; 4172 } 4173 4174 ret = ext4_zero_partial_blocks(handle, inode, offset, 4175 length); 4176 if (ret) 4177 goto out_stop; 4178 4179 first_block = (offset + sb->s_blocksize - 1) >> 4180 EXT4_BLOCK_SIZE_BITS(sb); 4181 stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); 4182 4183 /* If there are no blocks to remove, return now */ 4184 if (first_block >= stop_block) 4185 goto out_stop; 4186 4187 down_write(&EXT4_I(inode)->i_data_sem); 4188 ext4_discard_preallocations(inode); 4189 4190 ret = ext4_es_remove_extent(inode, first_block, 4191 stop_block - first_block); 4192 if (ret) { 4193 up_write(&EXT4_I(inode)->i_data_sem); 4194 goto out_stop; 4195 } 4196 4197 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4198 ret = ext4_ext_remove_space(inode, first_block, 4199 stop_block - 1); 4200 else 4201 ret = ext4_ind_remove_space(handle, inode, first_block, 4202 stop_block); 4203 4204 up_write(&EXT4_I(inode)->i_data_sem); 4205 if (IS_SYNC(inode)) 4206 ext4_handle_sync(handle); 4207 4208 inode->i_mtime = inode->i_ctime = current_time(inode); 4209 ext4_mark_inode_dirty(handle, inode); 4210 out_stop: 4211 ext4_journal_stop(handle); 4212 out_dio: 4213 up_write(&EXT4_I(inode)->i_mmap_sem); 4214 ext4_inode_resume_unlocked_dio(inode); 4215 out_mutex: 4216 inode_unlock(inode); 4217 return ret; 4218 } 4219 4220 int ext4_inode_attach_jinode(struct inode *inode) 4221 { 4222 struct ext4_inode_info *ei = EXT4_I(inode); 4223 struct jbd2_inode *jinode; 4224 4225 if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal) 4226 return 0; 4227 4228 jinode = jbd2_alloc_inode(GFP_KERNEL); 4229 spin_lock(&inode->i_lock); 4230 if (!ei->jinode) { 4231 if (!jinode) { 4232 spin_unlock(&inode->i_lock); 4233 return -ENOMEM; 4234 } 4235 ei->jinode = jinode; 4236 jbd2_journal_init_jbd_inode(ei->jinode, inode); 4237 jinode = NULL; 4238 } 4239 spin_unlock(&inode->i_lock); 4240 if (unlikely(jinode != NULL)) 4241 jbd2_free_inode(jinode); 4242 return 0; 4243 } 4244 4245 /* 4246 * ext4_truncate() 4247 * 4248 * We block out ext4_get_block() block instantiations across the entire 4249 * transaction, and VFS/VM ensures that ext4_truncate() cannot run 4250 * simultaneously on behalf of the same inode. 4251 * 4252 * As we work through the truncate and commit bits of it to the journal there 4253 * is one core, guiding principle: the file's tree must always be consistent on 4254 * disk. We must be able to restart the truncate after a crash. 4255 * 4256 * The file's tree may be transiently inconsistent in memory (although it 4257 * probably isn't), but whenever we close off and commit a journal transaction, 4258 * the contents of (the filesystem + the journal) must be consistent and 4259 * restartable. It's pretty simple, really: bottom up, right to left (although 4260 * left-to-right works OK too). 4261 * 4262 * Note that at recovery time, journal replay occurs *before* the restart of 4263 * truncate against the orphan inode list. 4264 * 4265 * The committed inode has the new, desired i_size (which is the same as 4266 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 4267 * that this inode's truncate did not complete and it will again call 4268 * ext4_truncate() to have another go. So there will be instantiated blocks 4269 * to the right of the truncation point in a crashed ext4 filesystem. But 4270 * that's fine - as long as they are linked from the inode, the post-crash 4271 * ext4_truncate() run will find them and release them. 4272 */ 4273 int ext4_truncate(struct inode *inode) 4274 { 4275 struct ext4_inode_info *ei = EXT4_I(inode); 4276 unsigned int credits; 4277 int err = 0; 4278 handle_t *handle; 4279 struct address_space *mapping = inode->i_mapping; 4280 4281 /* 4282 * There is a possibility that we're either freeing the inode 4283 * or it's a completely new inode. In those cases we might not 4284 * have i_mutex locked because it's not necessary. 4285 */ 4286 if (!(inode->i_state & (I_NEW|I_FREEING))) 4287 WARN_ON(!inode_is_locked(inode)); 4288 trace_ext4_truncate_enter(inode); 4289 4290 if (!ext4_can_truncate(inode)) 4291 return 0; 4292 4293 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 4294 4295 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 4296 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 4297 4298 if (ext4_has_inline_data(inode)) { 4299 int has_inline = 1; 4300 4301 err = ext4_inline_data_truncate(inode, &has_inline); 4302 if (err) 4303 return err; 4304 if (has_inline) 4305 return 0; 4306 } 4307 4308 /* If we zero-out tail of the page, we have to create jinode for jbd2 */ 4309 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { 4310 if (ext4_inode_attach_jinode(inode) < 0) 4311 return 0; 4312 } 4313 4314 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4315 credits = ext4_writepage_trans_blocks(inode); 4316 else 4317 credits = ext4_blocks_for_truncate(inode); 4318 4319 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 4320 if (IS_ERR(handle)) 4321 return PTR_ERR(handle); 4322 4323 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) 4324 ext4_block_truncate_page(handle, mapping, inode->i_size); 4325 4326 /* 4327 * We add the inode to the orphan list, so that if this 4328 * truncate spans multiple transactions, and we crash, we will 4329 * resume the truncate when the filesystem recovers. It also 4330 * marks the inode dirty, to catch the new size. 4331 * 4332 * Implication: the file must always be in a sane, consistent 4333 * truncatable state while each transaction commits. 4334 */ 4335 err = ext4_orphan_add(handle, inode); 4336 if (err) 4337 goto out_stop; 4338 4339 down_write(&EXT4_I(inode)->i_data_sem); 4340 4341 ext4_discard_preallocations(inode); 4342 4343 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4344 err = ext4_ext_truncate(handle, inode); 4345 else 4346 ext4_ind_truncate(handle, inode); 4347 4348 up_write(&ei->i_data_sem); 4349 if (err) 4350 goto out_stop; 4351 4352 if (IS_SYNC(inode)) 4353 ext4_handle_sync(handle); 4354 4355 out_stop: 4356 /* 4357 * If this was a simple ftruncate() and the file will remain alive, 4358 * then we need to clear up the orphan record which we created above. 4359 * However, if this was a real unlink then we were called by 4360 * ext4_evict_inode(), and we allow that function to clean up the 4361 * orphan info for us. 4362 */ 4363 if (inode->i_nlink) 4364 ext4_orphan_del(handle, inode); 4365 4366 inode->i_mtime = inode->i_ctime = current_time(inode); 4367 ext4_mark_inode_dirty(handle, inode); 4368 ext4_journal_stop(handle); 4369 4370 trace_ext4_truncate_exit(inode); 4371 return err; 4372 } 4373 4374 /* 4375 * ext4_get_inode_loc returns with an extra refcount against the inode's 4376 * underlying buffer_head on success. If 'in_mem' is true, we have all 4377 * data in memory that is needed to recreate the on-disk version of this 4378 * inode. 4379 */ 4380 static int __ext4_get_inode_loc(struct inode *inode, 4381 struct ext4_iloc *iloc, int in_mem) 4382 { 4383 struct ext4_group_desc *gdp; 4384 struct buffer_head *bh; 4385 struct super_block *sb = inode->i_sb; 4386 ext4_fsblk_t block; 4387 int inodes_per_block, inode_offset; 4388 4389 iloc->bh = NULL; 4390 if (!ext4_valid_inum(sb, inode->i_ino)) 4391 return -EFSCORRUPTED; 4392 4393 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 4394 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 4395 if (!gdp) 4396 return -EIO; 4397 4398 /* 4399 * Figure out the offset within the block group inode table 4400 */ 4401 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 4402 inode_offset = ((inode->i_ino - 1) % 4403 EXT4_INODES_PER_GROUP(sb)); 4404 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 4405 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 4406 4407 bh = sb_getblk(sb, block); 4408 if (unlikely(!bh)) 4409 return -ENOMEM; 4410 if (!buffer_uptodate(bh)) { 4411 lock_buffer(bh); 4412 4413 /* 4414 * If the buffer has the write error flag, we have failed 4415 * to write out another inode in the same block. In this 4416 * case, we don't have to read the block because we may 4417 * read the old inode data successfully. 4418 */ 4419 if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 4420 set_buffer_uptodate(bh); 4421 4422 if (buffer_uptodate(bh)) { 4423 /* someone brought it uptodate while we waited */ 4424 unlock_buffer(bh); 4425 goto has_buffer; 4426 } 4427 4428 /* 4429 * If we have all information of the inode in memory and this 4430 * is the only valid inode in the block, we need not read the 4431 * block. 4432 */ 4433 if (in_mem) { 4434 struct buffer_head *bitmap_bh; 4435 int i, start; 4436 4437 start = inode_offset & ~(inodes_per_block - 1); 4438 4439 /* Is the inode bitmap in cache? */ 4440 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 4441 if (unlikely(!bitmap_bh)) 4442 goto make_io; 4443 4444 /* 4445 * If the inode bitmap isn't in cache then the 4446 * optimisation may end up performing two reads instead 4447 * of one, so skip it. 4448 */ 4449 if (!buffer_uptodate(bitmap_bh)) { 4450 brelse(bitmap_bh); 4451 goto make_io; 4452 } 4453 for (i = start; i < start + inodes_per_block; i++) { 4454 if (i == inode_offset) 4455 continue; 4456 if (ext4_test_bit(i, bitmap_bh->b_data)) 4457 break; 4458 } 4459 brelse(bitmap_bh); 4460 if (i == start + inodes_per_block) { 4461 /* all other inodes are free, so skip I/O */ 4462 memset(bh->b_data, 0, bh->b_size); 4463 set_buffer_uptodate(bh); 4464 unlock_buffer(bh); 4465 goto has_buffer; 4466 } 4467 } 4468 4469 make_io: 4470 /* 4471 * If we need to do any I/O, try to pre-readahead extra 4472 * blocks from the inode table. 4473 */ 4474 if (EXT4_SB(sb)->s_inode_readahead_blks) { 4475 ext4_fsblk_t b, end, table; 4476 unsigned num; 4477 __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks; 4478 4479 table = ext4_inode_table(sb, gdp); 4480 /* s_inode_readahead_blks is always a power of 2 */ 4481 b = block & ~((ext4_fsblk_t) ra_blks - 1); 4482 if (table > b) 4483 b = table; 4484 end = b + ra_blks; 4485 num = EXT4_INODES_PER_GROUP(sb); 4486 if (ext4_has_group_desc_csum(sb)) 4487 num -= ext4_itable_unused_count(sb, gdp); 4488 table += num / inodes_per_block; 4489 if (end > table) 4490 end = table; 4491 while (b <= end) 4492 sb_breadahead(sb, b++); 4493 } 4494 4495 /* 4496 * There are other valid inodes in the buffer, this inode 4497 * has in-inode xattrs, or we don't have this inode in memory. 4498 * Read the block from disk. 4499 */ 4500 trace_ext4_load_inode(inode); 4501 get_bh(bh); 4502 bh->b_end_io = end_buffer_read_sync; 4503 submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh); 4504 wait_on_buffer(bh); 4505 if (!buffer_uptodate(bh)) { 4506 EXT4_ERROR_INODE_BLOCK(inode, block, 4507 "unable to read itable block"); 4508 brelse(bh); 4509 return -EIO; 4510 } 4511 } 4512 has_buffer: 4513 iloc->bh = bh; 4514 return 0; 4515 } 4516 4517 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 4518 { 4519 /* We have all inode data except xattrs in memory here. */ 4520 return __ext4_get_inode_loc(inode, iloc, 4521 !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); 4522 } 4523 4524 void ext4_set_inode_flags(struct inode *inode) 4525 { 4526 unsigned int flags = EXT4_I(inode)->i_flags; 4527 unsigned int new_fl = 0; 4528 4529 if (flags & EXT4_SYNC_FL) 4530 new_fl |= S_SYNC; 4531 if (flags & EXT4_APPEND_FL) 4532 new_fl |= S_APPEND; 4533 if (flags & EXT4_IMMUTABLE_FL) 4534 new_fl |= S_IMMUTABLE; 4535 if (flags & EXT4_NOATIME_FL) 4536 new_fl |= S_NOATIME; 4537 if (flags & EXT4_DIRSYNC_FL) 4538 new_fl |= S_DIRSYNC; 4539 if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode) && 4540 !ext4_should_journal_data(inode) && !ext4_has_inline_data(inode) && 4541 !ext4_encrypted_inode(inode)) 4542 new_fl |= S_DAX; 4543 inode_set_flags(inode, new_fl, 4544 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX); 4545 } 4546 4547 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 4548 struct ext4_inode_info *ei) 4549 { 4550 blkcnt_t i_blocks ; 4551 struct inode *inode = &(ei->vfs_inode); 4552 struct super_block *sb = inode->i_sb; 4553 4554 if (ext4_has_feature_huge_file(sb)) { 4555 /* we are using combined 48 bit field */ 4556 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 4557 le32_to_cpu(raw_inode->i_blocks_lo); 4558 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { 4559 /* i_blocks represent file system block size */ 4560 return i_blocks << (inode->i_blkbits - 9); 4561 } else { 4562 return i_blocks; 4563 } 4564 } else { 4565 return le32_to_cpu(raw_inode->i_blocks_lo); 4566 } 4567 } 4568 4569 static inline void ext4_iget_extra_inode(struct inode *inode, 4570 struct ext4_inode *raw_inode, 4571 struct ext4_inode_info *ei) 4572 { 4573 __le32 *magic = (void *)raw_inode + 4574 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; 4575 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <= 4576 EXT4_INODE_SIZE(inode->i_sb) && 4577 *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { 4578 ext4_set_inode_state(inode, EXT4_STATE_XATTR); 4579 ext4_find_inline_data_nolock(inode); 4580 } else 4581 EXT4_I(inode)->i_inline_off = 0; 4582 } 4583 4584 int ext4_get_projid(struct inode *inode, kprojid_t *projid) 4585 { 4586 if (!ext4_has_feature_project(inode->i_sb)) 4587 return -EOPNOTSUPP; 4588 *projid = EXT4_I(inode)->i_projid; 4589 return 0; 4590 } 4591 4592 struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 4593 { 4594 struct ext4_iloc iloc; 4595 struct ext4_inode *raw_inode; 4596 struct ext4_inode_info *ei; 4597 struct inode *inode; 4598 journal_t *journal = EXT4_SB(sb)->s_journal; 4599 long ret; 4600 loff_t size; 4601 int block; 4602 uid_t i_uid; 4603 gid_t i_gid; 4604 projid_t i_projid; 4605 4606 inode = iget_locked(sb, ino); 4607 if (!inode) 4608 return ERR_PTR(-ENOMEM); 4609 if (!(inode->i_state & I_NEW)) 4610 return inode; 4611 4612 ei = EXT4_I(inode); 4613 iloc.bh = NULL; 4614 4615 ret = __ext4_get_inode_loc(inode, &iloc, 0); 4616 if (ret < 0) 4617 goto bad_inode; 4618 raw_inode = ext4_raw_inode(&iloc); 4619 4620 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4621 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 4622 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 4623 EXT4_INODE_SIZE(inode->i_sb) || 4624 (ei->i_extra_isize & 3)) { 4625 EXT4_ERROR_INODE(inode, 4626 "bad extra_isize %u (inode size %u)", 4627 ei->i_extra_isize, 4628 EXT4_INODE_SIZE(inode->i_sb)); 4629 ret = -EFSCORRUPTED; 4630 goto bad_inode; 4631 } 4632 } else 4633 ei->i_extra_isize = 0; 4634 4635 /* Precompute checksum seed for inode metadata */ 4636 if (ext4_has_metadata_csum(sb)) { 4637 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4638 __u32 csum; 4639 __le32 inum = cpu_to_le32(inode->i_ino); 4640 __le32 gen = raw_inode->i_generation; 4641 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, 4642 sizeof(inum)); 4643 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, 4644 sizeof(gen)); 4645 } 4646 4647 if (!ext4_inode_csum_verify(inode, raw_inode, ei)) { 4648 EXT4_ERROR_INODE(inode, "checksum invalid"); 4649 ret = -EFSBADCRC; 4650 goto bad_inode; 4651 } 4652 4653 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 4654 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 4655 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 4656 if (ext4_has_feature_project(sb) && 4657 EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE && 4658 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) 4659 i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid); 4660 else 4661 i_projid = EXT4_DEF_PROJID; 4662 4663 if (!(test_opt(inode->i_sb, NO_UID32))) { 4664 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 4665 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 4666 } 4667 i_uid_write(inode, i_uid); 4668 i_gid_write(inode, i_gid); 4669 ei->i_projid = make_kprojid(&init_user_ns, i_projid); 4670 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 4671 4672 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 4673 ei->i_inline_off = 0; 4674 ei->i_dir_start_lookup = 0; 4675 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 4676 /* We now have enough fields to check if the inode was active or not. 4677 * This is needed because nfsd might try to access dead inodes 4678 * the test is that same one that e2fsck uses 4679 * NeilBrown 1999oct15 4680 */ 4681 if (inode->i_nlink == 0) { 4682 if ((inode->i_mode == 0 || 4683 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) && 4684 ino != EXT4_BOOT_LOADER_INO) { 4685 /* this inode is deleted */ 4686 ret = -ESTALE; 4687 goto bad_inode; 4688 } 4689 /* The only unlinked inodes we let through here have 4690 * valid i_mode and are being read by the orphan 4691 * recovery code: that's fine, we're about to complete 4692 * the process of deleting those. 4693 * OR it is the EXT4_BOOT_LOADER_INO which is 4694 * not initialized on a new filesystem. */ 4695 } 4696 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 4697 inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 4698 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 4699 if (ext4_has_feature_64bit(sb)) 4700 ei->i_file_acl |= 4701 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 4702 inode->i_size = ext4_isize(raw_inode); 4703 if ((size = i_size_read(inode)) < 0) { 4704 EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size); 4705 ret = -EFSCORRUPTED; 4706 goto bad_inode; 4707 } 4708 ei->i_disksize = inode->i_size; 4709 #ifdef CONFIG_QUOTA 4710 ei->i_reserved_quota = 0; 4711 #endif 4712 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 4713 ei->i_block_group = iloc.block_group; 4714 ei->i_last_alloc_group = ~0; 4715 /* 4716 * NOTE! The in-memory inode i_data array is in little-endian order 4717 * even on big-endian machines: we do NOT byteswap the block numbers! 4718 */ 4719 for (block = 0; block < EXT4_N_BLOCKS; block++) 4720 ei->i_data[block] = raw_inode->i_block[block]; 4721 INIT_LIST_HEAD(&ei->i_orphan); 4722 4723 /* 4724 * Set transaction id's of transactions that have to be committed 4725 * to finish f[data]sync. We set them to currently running transaction 4726 * as we cannot be sure that the inode or some of its metadata isn't 4727 * part of the transaction - the inode could have been reclaimed and 4728 * now it is reread from disk. 4729 */ 4730 if (journal) { 4731 transaction_t *transaction; 4732 tid_t tid; 4733 4734 read_lock(&journal->j_state_lock); 4735 if (journal->j_running_transaction) 4736 transaction = journal->j_running_transaction; 4737 else 4738 transaction = journal->j_committing_transaction; 4739 if (transaction) 4740 tid = transaction->t_tid; 4741 else 4742 tid = journal->j_commit_sequence; 4743 read_unlock(&journal->j_state_lock); 4744 ei->i_sync_tid = tid; 4745 ei->i_datasync_tid = tid; 4746 } 4747 4748 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4749 if (ei->i_extra_isize == 0) { 4750 /* The extra space is currently unused. Use it. */ 4751 BUILD_BUG_ON(sizeof(struct ext4_inode) & 3); 4752 ei->i_extra_isize = sizeof(struct ext4_inode) - 4753 EXT4_GOOD_OLD_INODE_SIZE; 4754 } else { 4755 ext4_iget_extra_inode(inode, raw_inode, ei); 4756 } 4757 } 4758 4759 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 4760 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 4761 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 4762 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 4763 4764 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { 4765 inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 4766 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4767 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 4768 inode->i_version |= 4769 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 4770 } 4771 } 4772 4773 ret = 0; 4774 if (ei->i_file_acl && 4775 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { 4776 EXT4_ERROR_INODE(inode, "bad extended attribute block %llu", 4777 ei->i_file_acl); 4778 ret = -EFSCORRUPTED; 4779 goto bad_inode; 4780 } else if (!ext4_has_inline_data(inode)) { 4781 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 4782 if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4783 (S_ISLNK(inode->i_mode) && 4784 !ext4_inode_is_fast_symlink(inode)))) 4785 /* Validate extent which is part of inode */ 4786 ret = ext4_ext_check_inode(inode); 4787 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4788 (S_ISLNK(inode->i_mode) && 4789 !ext4_inode_is_fast_symlink(inode))) { 4790 /* Validate block references which are part of inode */ 4791 ret = ext4_ind_check_inode(inode); 4792 } 4793 } 4794 if (ret) 4795 goto bad_inode; 4796 4797 if (S_ISREG(inode->i_mode)) { 4798 inode->i_op = &ext4_file_inode_operations; 4799 inode->i_fop = &ext4_file_operations; 4800 ext4_set_aops(inode); 4801 } else if (S_ISDIR(inode->i_mode)) { 4802 inode->i_op = &ext4_dir_inode_operations; 4803 inode->i_fop = &ext4_dir_operations; 4804 } else if (S_ISLNK(inode->i_mode)) { 4805 if (ext4_encrypted_inode(inode)) { 4806 inode->i_op = &ext4_encrypted_symlink_inode_operations; 4807 ext4_set_aops(inode); 4808 } else if (ext4_inode_is_fast_symlink(inode)) { 4809 inode->i_link = (char *)ei->i_data; 4810 inode->i_op = &ext4_fast_symlink_inode_operations; 4811 nd_terminate_link(ei->i_data, inode->i_size, 4812 sizeof(ei->i_data) - 1); 4813 } else { 4814 inode->i_op = &ext4_symlink_inode_operations; 4815 ext4_set_aops(inode); 4816 } 4817 inode_nohighmem(inode); 4818 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 4819 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 4820 inode->i_op = &ext4_special_inode_operations; 4821 if (raw_inode->i_block[0]) 4822 init_special_inode(inode, inode->i_mode, 4823 old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 4824 else 4825 init_special_inode(inode, inode->i_mode, 4826 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 4827 } else if (ino == EXT4_BOOT_LOADER_INO) { 4828 make_bad_inode(inode); 4829 } else { 4830 ret = -EFSCORRUPTED; 4831 EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode); 4832 goto bad_inode; 4833 } 4834 brelse(iloc.bh); 4835 ext4_set_inode_flags(inode); 4836 unlock_new_inode(inode); 4837 return inode; 4838 4839 bad_inode: 4840 brelse(iloc.bh); 4841 iget_failed(inode); 4842 return ERR_PTR(ret); 4843 } 4844 4845 struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino) 4846 { 4847 if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) 4848 return ERR_PTR(-EFSCORRUPTED); 4849 return ext4_iget(sb, ino); 4850 } 4851 4852 static int ext4_inode_blocks_set(handle_t *handle, 4853 struct ext4_inode *raw_inode, 4854 struct ext4_inode_info *ei) 4855 { 4856 struct inode *inode = &(ei->vfs_inode); 4857 u64 i_blocks = inode->i_blocks; 4858 struct super_block *sb = inode->i_sb; 4859 4860 if (i_blocks <= ~0U) { 4861 /* 4862 * i_blocks can be represented in a 32 bit variable 4863 * as multiple of 512 bytes 4864 */ 4865 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4866 raw_inode->i_blocks_high = 0; 4867 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 4868 return 0; 4869 } 4870 if (!ext4_has_feature_huge_file(sb)) 4871 return -EFBIG; 4872 4873 if (i_blocks <= 0xffffffffffffULL) { 4874 /* 4875 * i_blocks can be represented in a 48 bit variable 4876 * as multiple of 512 bytes 4877 */ 4878 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4879 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4880 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 4881 } else { 4882 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); 4883 /* i_block is stored in file system block size */ 4884 i_blocks = i_blocks >> (inode->i_blkbits - 9); 4885 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4886 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4887 } 4888 return 0; 4889 } 4890 4891 struct other_inode { 4892 unsigned long orig_ino; 4893 struct ext4_inode *raw_inode; 4894 }; 4895 4896 static int other_inode_match(struct inode * inode, unsigned long ino, 4897 void *data) 4898 { 4899 struct other_inode *oi = (struct other_inode *) data; 4900 4901 if ((inode->i_ino != ino) || 4902 (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | 4903 I_DIRTY_SYNC | I_DIRTY_DATASYNC)) || 4904 ((inode->i_state & I_DIRTY_TIME) == 0)) 4905 return 0; 4906 spin_lock(&inode->i_lock); 4907 if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | 4908 I_DIRTY_SYNC | I_DIRTY_DATASYNC)) == 0) && 4909 (inode->i_state & I_DIRTY_TIME)) { 4910 struct ext4_inode_info *ei = EXT4_I(inode); 4911 4912 inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED); 4913 spin_unlock(&inode->i_lock); 4914 4915 spin_lock(&ei->i_raw_lock); 4916 EXT4_INODE_SET_XTIME(i_ctime, inode, oi->raw_inode); 4917 EXT4_INODE_SET_XTIME(i_mtime, inode, oi->raw_inode); 4918 EXT4_INODE_SET_XTIME(i_atime, inode, oi->raw_inode); 4919 ext4_inode_csum_set(inode, oi->raw_inode, ei); 4920 spin_unlock(&ei->i_raw_lock); 4921 trace_ext4_other_inode_update_time(inode, oi->orig_ino); 4922 return -1; 4923 } 4924 spin_unlock(&inode->i_lock); 4925 return -1; 4926 } 4927 4928 /* 4929 * Opportunistically update the other time fields for other inodes in 4930 * the same inode table block. 4931 */ 4932 static void ext4_update_other_inodes_time(struct super_block *sb, 4933 unsigned long orig_ino, char *buf) 4934 { 4935 struct other_inode oi; 4936 unsigned long ino; 4937 int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 4938 int inode_size = EXT4_INODE_SIZE(sb); 4939 4940 oi.orig_ino = orig_ino; 4941 /* 4942 * Calculate the first inode in the inode table block. Inode 4943 * numbers are one-based. That is, the first inode in a block 4944 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1). 4945 */ 4946 ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1; 4947 for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) { 4948 if (ino == orig_ino) 4949 continue; 4950 oi.raw_inode = (struct ext4_inode *) buf; 4951 (void) find_inode_nowait(sb, ino, other_inode_match, &oi); 4952 } 4953 } 4954 4955 /* 4956 * Post the struct inode info into an on-disk inode location in the 4957 * buffer-cache. This gobbles the caller's reference to the 4958 * buffer_head in the inode location struct. 4959 * 4960 * The caller must have write access to iloc->bh. 4961 */ 4962 static int ext4_do_update_inode(handle_t *handle, 4963 struct inode *inode, 4964 struct ext4_iloc *iloc) 4965 { 4966 struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 4967 struct ext4_inode_info *ei = EXT4_I(inode); 4968 struct buffer_head *bh = iloc->bh; 4969 struct super_block *sb = inode->i_sb; 4970 int err = 0, rc, block; 4971 int need_datasync = 0, set_large_file = 0; 4972 uid_t i_uid; 4973 gid_t i_gid; 4974 projid_t i_projid; 4975 4976 spin_lock(&ei->i_raw_lock); 4977 4978 /* For fields not tracked in the in-memory inode, 4979 * initialise them to zero for new inodes. */ 4980 if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) 4981 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 4982 4983 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 4984 i_uid = i_uid_read(inode); 4985 i_gid = i_gid_read(inode); 4986 i_projid = from_kprojid(&init_user_ns, ei->i_projid); 4987 if (!(test_opt(inode->i_sb, NO_UID32))) { 4988 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); 4989 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); 4990 /* 4991 * Fix up interoperability with old kernels. Otherwise, old inodes get 4992 * re-used with the upper 16 bits of the uid/gid intact 4993 */ 4994 if (ei->i_dtime && list_empty(&ei->i_orphan)) { 4995 raw_inode->i_uid_high = 0; 4996 raw_inode->i_gid_high = 0; 4997 } else { 4998 raw_inode->i_uid_high = 4999 cpu_to_le16(high_16_bits(i_uid)); 5000 raw_inode->i_gid_high = 5001 cpu_to_le16(high_16_bits(i_gid)); 5002 } 5003 } else { 5004 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); 5005 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid)); 5006 raw_inode->i_uid_high = 0; 5007 raw_inode->i_gid_high = 0; 5008 } 5009 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 5010 5011 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 5012 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 5013 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 5014 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 5015 5016 err = ext4_inode_blocks_set(handle, raw_inode, ei); 5017 if (err) { 5018 spin_unlock(&ei->i_raw_lock); 5019 goto out_brelse; 5020 } 5021 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 5022 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); 5023 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) 5024 raw_inode->i_file_acl_high = 5025 cpu_to_le16(ei->i_file_acl >> 32); 5026 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 5027 if (ei->i_disksize != ext4_isize(raw_inode)) { 5028 ext4_isize_set(raw_inode, ei->i_disksize); 5029 need_datasync = 1; 5030 } 5031 if (ei->i_disksize > 0x7fffffffULL) { 5032 if (!ext4_has_feature_large_file(sb) || 5033 EXT4_SB(sb)->s_es->s_rev_level == 5034 cpu_to_le32(EXT4_GOOD_OLD_REV)) 5035 set_large_file = 1; 5036 } 5037 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 5038 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 5039 if (old_valid_dev(inode->i_rdev)) { 5040 raw_inode->i_block[0] = 5041 cpu_to_le32(old_encode_dev(inode->i_rdev)); 5042 raw_inode->i_block[1] = 0; 5043 } else { 5044 raw_inode->i_block[0] = 0; 5045 raw_inode->i_block[1] = 5046 cpu_to_le32(new_encode_dev(inode->i_rdev)); 5047 raw_inode->i_block[2] = 0; 5048 } 5049 } else if (!ext4_has_inline_data(inode)) { 5050 for (block = 0; block < EXT4_N_BLOCKS; block++) 5051 raw_inode->i_block[block] = ei->i_data[block]; 5052 } 5053 5054 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { 5055 raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 5056 if (ei->i_extra_isize) { 5057 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 5058 raw_inode->i_version_hi = 5059 cpu_to_le32(inode->i_version >> 32); 5060 raw_inode->i_extra_isize = 5061 cpu_to_le16(ei->i_extra_isize); 5062 } 5063 } 5064 5065 BUG_ON(!ext4_has_feature_project(inode->i_sb) && 5066 i_projid != EXT4_DEF_PROJID); 5067 5068 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 5069 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) 5070 raw_inode->i_projid = cpu_to_le32(i_projid); 5071 5072 ext4_inode_csum_set(inode, raw_inode, ei); 5073 spin_unlock(&ei->i_raw_lock); 5074 if (inode->i_sb->s_flags & MS_LAZYTIME) 5075 ext4_update_other_inodes_time(inode->i_sb, inode->i_ino, 5076 bh->b_data); 5077 5078 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 5079 rc = ext4_handle_dirty_metadata(handle, NULL, bh); 5080 if (!err) 5081 err = rc; 5082 ext4_clear_inode_state(inode, EXT4_STATE_NEW); 5083 if (set_large_file) { 5084 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access"); 5085 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); 5086 if (err) 5087 goto out_brelse; 5088 ext4_update_dynamic_rev(sb); 5089 ext4_set_feature_large_file(sb); 5090 ext4_handle_sync(handle); 5091 err = ext4_handle_dirty_super(handle, sb); 5092 } 5093 ext4_update_inode_fsync_trans(handle, inode, need_datasync); 5094 out_brelse: 5095 brelse(bh); 5096 ext4_std_error(inode->i_sb, err); 5097 return err; 5098 } 5099 5100 /* 5101 * ext4_write_inode() 5102 * 5103 * We are called from a few places: 5104 * 5105 * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files. 5106 * Here, there will be no transaction running. We wait for any running 5107 * transaction to commit. 5108 * 5109 * - Within flush work (sys_sync(), kupdate and such). 5110 * We wait on commit, if told to. 5111 * 5112 * - Within iput_final() -> write_inode_now() 5113 * We wait on commit, if told to. 5114 * 5115 * In all cases it is actually safe for us to return without doing anything, 5116 * because the inode has been copied into a raw inode buffer in 5117 * ext4_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL 5118 * writeback. 5119 * 5120 * Note that we are absolutely dependent upon all inode dirtiers doing the 5121 * right thing: they *must* call mark_inode_dirty() after dirtying info in 5122 * which we are interested. 5123 * 5124 * It would be a bug for them to not do this. The code: 5125 * 5126 * mark_inode_dirty(inode) 5127 * stuff(); 5128 * inode->i_size = expr; 5129 * 5130 * is in error because write_inode() could occur while `stuff()' is running, 5131 * and the new i_size will be lost. Plus the inode will no longer be on the 5132 * superblock's dirty inode list. 5133 */ 5134 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) 5135 { 5136 int err; 5137 5138 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC)) 5139 return 0; 5140 5141 if (EXT4_SB(inode->i_sb)->s_journal) { 5142 if (ext4_journal_current_handle()) { 5143 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 5144 dump_stack(); 5145 return -EIO; 5146 } 5147 5148 /* 5149 * No need to force transaction in WB_SYNC_NONE mode. Also 5150 * ext4_sync_fs() will force the commit after everything is 5151 * written. 5152 */ 5153 if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync) 5154 return 0; 5155 5156 err = ext4_force_commit(inode->i_sb); 5157 } else { 5158 struct ext4_iloc iloc; 5159 5160 err = __ext4_get_inode_loc(inode, &iloc, 0); 5161 if (err) 5162 return err; 5163 /* 5164 * sync(2) will flush the whole buffer cache. No need to do 5165 * it here separately for each inode. 5166 */ 5167 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) 5168 sync_dirty_buffer(iloc.bh); 5169 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 5170 EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, 5171 "IO error syncing inode"); 5172 err = -EIO; 5173 } 5174 brelse(iloc.bh); 5175 } 5176 return err; 5177 } 5178 5179 /* 5180 * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate 5181 * buffers that are attached to a page stradding i_size and are undergoing 5182 * commit. In that case we have to wait for commit to finish and try again. 5183 */ 5184 static void ext4_wait_for_tail_page_commit(struct inode *inode) 5185 { 5186 struct page *page; 5187 unsigned offset; 5188 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 5189 tid_t commit_tid = 0; 5190 int ret; 5191 5192 offset = inode->i_size & (PAGE_SIZE - 1); 5193 /* 5194 * All buffers in the last page remain valid? Then there's nothing to 5195 * do. We do the check mainly to optimize the common PAGE_SIZE == 5196 * blocksize case 5197 */ 5198 if (offset > PAGE_SIZE - i_blocksize(inode)) 5199 return; 5200 while (1) { 5201 page = find_lock_page(inode->i_mapping, 5202 inode->i_size >> PAGE_SHIFT); 5203 if (!page) 5204 return; 5205 ret = __ext4_journalled_invalidatepage(page, offset, 5206 PAGE_SIZE - offset); 5207 unlock_page(page); 5208 put_page(page); 5209 if (ret != -EBUSY) 5210 return; 5211 commit_tid = 0; 5212 read_lock(&journal->j_state_lock); 5213 if (journal->j_committing_transaction) 5214 commit_tid = journal->j_committing_transaction->t_tid; 5215 read_unlock(&journal->j_state_lock); 5216 if (commit_tid) 5217 jbd2_log_wait_commit(journal, commit_tid); 5218 } 5219 } 5220 5221 /* 5222 * ext4_setattr() 5223 * 5224 * Called from notify_change. 5225 * 5226 * We want to trap VFS attempts to truncate the file as soon as 5227 * possible. In particular, we want to make sure that when the VFS 5228 * shrinks i_size, we put the inode on the orphan list and modify 5229 * i_disksize immediately, so that during the subsequent flushing of 5230 * dirty pages and freeing of disk blocks, we can guarantee that any 5231 * commit will leave the blocks being flushed in an unused state on 5232 * disk. (On recovery, the inode will get truncated and the blocks will 5233 * be freed, so we have a strong guarantee that no future commit will 5234 * leave these blocks visible to the user.) 5235 * 5236 * Another thing we have to assure is that if we are in ordered mode 5237 * and inode is still attached to the committing transaction, we must 5238 * we start writeout of all the dirty pages which are being truncated. 5239 * This way we are sure that all the data written in the previous 5240 * transaction are already on disk (truncate waits for pages under 5241 * writeback). 5242 * 5243 * Called with inode->i_mutex down. 5244 */ 5245 int ext4_setattr(struct dentry *dentry, struct iattr *attr) 5246 { 5247 struct inode *inode = d_inode(dentry); 5248 int error, rc = 0; 5249 int orphan = 0; 5250 const unsigned int ia_valid = attr->ia_valid; 5251 5252 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 5253 return -EIO; 5254 5255 error = setattr_prepare(dentry, attr); 5256 if (error) 5257 return error; 5258 5259 if (is_quota_modification(inode, attr)) { 5260 error = dquot_initialize(inode); 5261 if (error) 5262 return error; 5263 } 5264 if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || 5265 (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { 5266 handle_t *handle; 5267 5268 /* (user+group)*(old+new) structure, inode write (sb, 5269 * inode block, ? - but truncate inode update has it) */ 5270 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 5271 (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) + 5272 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3); 5273 if (IS_ERR(handle)) { 5274 error = PTR_ERR(handle); 5275 goto err_out; 5276 } 5277 error = dquot_transfer(inode, attr); 5278 if (error) { 5279 ext4_journal_stop(handle); 5280 return error; 5281 } 5282 /* Update corresponding info in inode so that everything is in 5283 * one transaction */ 5284 if (attr->ia_valid & ATTR_UID) 5285 inode->i_uid = attr->ia_uid; 5286 if (attr->ia_valid & ATTR_GID) 5287 inode->i_gid = attr->ia_gid; 5288 error = ext4_mark_inode_dirty(handle, inode); 5289 ext4_journal_stop(handle); 5290 } 5291 5292 if (attr->ia_valid & ATTR_SIZE) { 5293 handle_t *handle; 5294 loff_t oldsize = inode->i_size; 5295 int shrink = (attr->ia_size <= inode->i_size); 5296 5297 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 5298 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5299 5300 if (attr->ia_size > sbi->s_bitmap_maxbytes) 5301 return -EFBIG; 5302 } 5303 if (!S_ISREG(inode->i_mode)) 5304 return -EINVAL; 5305 5306 if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size) 5307 inode_inc_iversion(inode); 5308 5309 if (ext4_should_order_data(inode) && 5310 (attr->ia_size < inode->i_size)) { 5311 error = ext4_begin_ordered_truncate(inode, 5312 attr->ia_size); 5313 if (error) 5314 goto err_out; 5315 } 5316 if (attr->ia_size != inode->i_size) { 5317 handle = ext4_journal_start(inode, EXT4_HT_INODE, 3); 5318 if (IS_ERR(handle)) { 5319 error = PTR_ERR(handle); 5320 goto err_out; 5321 } 5322 if (ext4_handle_valid(handle) && shrink) { 5323 error = ext4_orphan_add(handle, inode); 5324 orphan = 1; 5325 } 5326 /* 5327 * Update c/mtime on truncate up, ext4_truncate() will 5328 * update c/mtime in shrink case below 5329 */ 5330 if (!shrink) { 5331 inode->i_mtime = current_time(inode); 5332 inode->i_ctime = inode->i_mtime; 5333 } 5334 down_write(&EXT4_I(inode)->i_data_sem); 5335 EXT4_I(inode)->i_disksize = attr->ia_size; 5336 rc = ext4_mark_inode_dirty(handle, inode); 5337 if (!error) 5338 error = rc; 5339 /* 5340 * We have to update i_size under i_data_sem together 5341 * with i_disksize to avoid races with writeback code 5342 * running ext4_wb_update_i_disksize(). 5343 */ 5344 if (!error) 5345 i_size_write(inode, attr->ia_size); 5346 up_write(&EXT4_I(inode)->i_data_sem); 5347 ext4_journal_stop(handle); 5348 if (error) { 5349 if (orphan) 5350 ext4_orphan_del(NULL, inode); 5351 goto err_out; 5352 } 5353 } 5354 if (!shrink) 5355 pagecache_isize_extended(inode, oldsize, inode->i_size); 5356 5357 /* 5358 * Blocks are going to be removed from the inode. Wait 5359 * for dio in flight. Temporarily disable 5360 * dioread_nolock to prevent livelock. 5361 */ 5362 if (orphan) { 5363 if (!ext4_should_journal_data(inode)) { 5364 ext4_inode_block_unlocked_dio(inode); 5365 inode_dio_wait(inode); 5366 ext4_inode_resume_unlocked_dio(inode); 5367 } else 5368 ext4_wait_for_tail_page_commit(inode); 5369 } 5370 down_write(&EXT4_I(inode)->i_mmap_sem); 5371 /* 5372 * Truncate pagecache after we've waited for commit 5373 * in data=journal mode to make pages freeable. 5374 */ 5375 truncate_pagecache(inode, inode->i_size); 5376 if (shrink) { 5377 rc = ext4_truncate(inode); 5378 if (rc) 5379 error = rc; 5380 } 5381 up_write(&EXT4_I(inode)->i_mmap_sem); 5382 } 5383 5384 if (!error) { 5385 setattr_copy(inode, attr); 5386 mark_inode_dirty(inode); 5387 } 5388 5389 /* 5390 * If the call to ext4_truncate failed to get a transaction handle at 5391 * all, we need to clean up the in-core orphan list manually. 5392 */ 5393 if (orphan && inode->i_nlink) 5394 ext4_orphan_del(NULL, inode); 5395 5396 if (!error && (ia_valid & ATTR_MODE)) 5397 rc = posix_acl_chmod(inode, inode->i_mode); 5398 5399 err_out: 5400 ext4_std_error(inode->i_sb, error); 5401 if (!error) 5402 error = rc; 5403 return error; 5404 } 5405 5406 int ext4_getattr(const struct path *path, struct kstat *stat, 5407 u32 request_mask, unsigned int query_flags) 5408 { 5409 struct inode *inode = d_inode(path->dentry); 5410 struct ext4_inode *raw_inode; 5411 struct ext4_inode_info *ei = EXT4_I(inode); 5412 unsigned int flags; 5413 5414 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) { 5415 stat->result_mask |= STATX_BTIME; 5416 stat->btime.tv_sec = ei->i_crtime.tv_sec; 5417 stat->btime.tv_nsec = ei->i_crtime.tv_nsec; 5418 } 5419 5420 flags = ei->i_flags & EXT4_FL_USER_VISIBLE; 5421 if (flags & EXT4_APPEND_FL) 5422 stat->attributes |= STATX_ATTR_APPEND; 5423 if (flags & EXT4_COMPR_FL) 5424 stat->attributes |= STATX_ATTR_COMPRESSED; 5425 if (flags & EXT4_ENCRYPT_FL) 5426 stat->attributes |= STATX_ATTR_ENCRYPTED; 5427 if (flags & EXT4_IMMUTABLE_FL) 5428 stat->attributes |= STATX_ATTR_IMMUTABLE; 5429 if (flags & EXT4_NODUMP_FL) 5430 stat->attributes |= STATX_ATTR_NODUMP; 5431 5432 stat->attributes_mask |= (STATX_ATTR_APPEND | 5433 STATX_ATTR_COMPRESSED | 5434 STATX_ATTR_ENCRYPTED | 5435 STATX_ATTR_IMMUTABLE | 5436 STATX_ATTR_NODUMP); 5437 5438 generic_fillattr(inode, stat); 5439 return 0; 5440 } 5441 5442 int ext4_file_getattr(const struct path *path, struct kstat *stat, 5443 u32 request_mask, unsigned int query_flags) 5444 { 5445 struct inode *inode = d_inode(path->dentry); 5446 u64 delalloc_blocks; 5447 5448 ext4_getattr(path, stat, request_mask, query_flags); 5449 5450 /* 5451 * If there is inline data in the inode, the inode will normally not 5452 * have data blocks allocated (it may have an external xattr block). 5453 * Report at least one sector for such files, so tools like tar, rsync, 5454 * others don't incorrectly think the file is completely sparse. 5455 */ 5456 if (unlikely(ext4_has_inline_data(inode))) 5457 stat->blocks += (stat->size + 511) >> 9; 5458 5459 /* 5460 * We can't update i_blocks if the block allocation is delayed 5461 * otherwise in the case of system crash before the real block 5462 * allocation is done, we will have i_blocks inconsistent with 5463 * on-disk file blocks. 5464 * We always keep i_blocks updated together with real 5465 * allocation. But to not confuse with user, stat 5466 * will return the blocks that include the delayed allocation 5467 * blocks for this file. 5468 */ 5469 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), 5470 EXT4_I(inode)->i_reserved_data_blocks); 5471 stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9); 5472 return 0; 5473 } 5474 5475 static int ext4_index_trans_blocks(struct inode *inode, int lblocks, 5476 int pextents) 5477 { 5478 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 5479 return ext4_ind_trans_blocks(inode, lblocks); 5480 return ext4_ext_index_trans_blocks(inode, pextents); 5481 } 5482 5483 /* 5484 * Account for index blocks, block groups bitmaps and block group 5485 * descriptor blocks if modify datablocks and index blocks 5486 * worse case, the indexs blocks spread over different block groups 5487 * 5488 * If datablocks are discontiguous, they are possible to spread over 5489 * different block groups too. If they are contiguous, with flexbg, 5490 * they could still across block group boundary. 5491 * 5492 * Also account for superblock, inode, quota and xattr blocks 5493 */ 5494 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, 5495 int pextents) 5496 { 5497 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 5498 int gdpblocks; 5499 int idxblocks; 5500 int ret = 0; 5501 5502 /* 5503 * How many index blocks need to touch to map @lblocks logical blocks 5504 * to @pextents physical extents? 5505 */ 5506 idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents); 5507 5508 ret = idxblocks; 5509 5510 /* 5511 * Now let's see how many group bitmaps and group descriptors need 5512 * to account 5513 */ 5514 groups = idxblocks + pextents; 5515 gdpblocks = groups; 5516 if (groups > ngroups) 5517 groups = ngroups; 5518 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 5519 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 5520 5521 /* bitmaps and block group descriptor blocks */ 5522 ret += groups + gdpblocks; 5523 5524 /* Blocks for super block, inode, quota and xattr blocks */ 5525 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 5526 5527 return ret; 5528 } 5529 5530 /* 5531 * Calculate the total number of credits to reserve to fit 5532 * the modification of a single pages into a single transaction, 5533 * which may include multiple chunks of block allocations. 5534 * 5535 * This could be called via ext4_write_begin() 5536 * 5537 * We need to consider the worse case, when 5538 * one new block per extent. 5539 */ 5540 int ext4_writepage_trans_blocks(struct inode *inode) 5541 { 5542 int bpp = ext4_journal_blocks_per_page(inode); 5543 int ret; 5544 5545 ret = ext4_meta_trans_blocks(inode, bpp, bpp); 5546 5547 /* Account for data blocks for journalled mode */ 5548 if (ext4_should_journal_data(inode)) 5549 ret += bpp; 5550 return ret; 5551 } 5552 5553 /* 5554 * Calculate the journal credits for a chunk of data modification. 5555 * 5556 * This is called from DIO, fallocate or whoever calling 5557 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. 5558 * 5559 * journal buffers for data blocks are not included here, as DIO 5560 * and fallocate do no need to journal data buffers. 5561 */ 5562 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 5563 { 5564 return ext4_meta_trans_blocks(inode, nrblocks, 1); 5565 } 5566 5567 /* 5568 * The caller must have previously called ext4_reserve_inode_write(). 5569 * Give this, we know that the caller already has write access to iloc->bh. 5570 */ 5571 int ext4_mark_iloc_dirty(handle_t *handle, 5572 struct inode *inode, struct ext4_iloc *iloc) 5573 { 5574 int err = 0; 5575 5576 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 5577 return -EIO; 5578 5579 if (IS_I_VERSION(inode)) 5580 inode_inc_iversion(inode); 5581 5582 /* the do_update_inode consumes one bh->b_count */ 5583 get_bh(iloc->bh); 5584 5585 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 5586 err = ext4_do_update_inode(handle, inode, iloc); 5587 put_bh(iloc->bh); 5588 return err; 5589 } 5590 5591 /* 5592 * On success, We end up with an outstanding reference count against 5593 * iloc->bh. This _must_ be cleaned up later. 5594 */ 5595 5596 int 5597 ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 5598 struct ext4_iloc *iloc) 5599 { 5600 int err; 5601 5602 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 5603 return -EIO; 5604 5605 err = ext4_get_inode_loc(inode, iloc); 5606 if (!err) { 5607 BUFFER_TRACE(iloc->bh, "get_write_access"); 5608 err = ext4_journal_get_write_access(handle, iloc->bh); 5609 if (err) { 5610 brelse(iloc->bh); 5611 iloc->bh = NULL; 5612 } 5613 } 5614 ext4_std_error(inode->i_sb, err); 5615 return err; 5616 } 5617 5618 /* 5619 * Expand an inode by new_extra_isize bytes. 5620 * Returns 0 on success or negative error number on failure. 5621 */ 5622 static int ext4_expand_extra_isize(struct inode *inode, 5623 unsigned int new_extra_isize, 5624 struct ext4_iloc iloc, 5625 handle_t *handle) 5626 { 5627 struct ext4_inode *raw_inode; 5628 struct ext4_xattr_ibody_header *header; 5629 5630 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 5631 return 0; 5632 5633 raw_inode = ext4_raw_inode(&iloc); 5634 5635 header = IHDR(inode, raw_inode); 5636 5637 /* No extended attributes present */ 5638 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 5639 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 5640 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 5641 new_extra_isize); 5642 EXT4_I(inode)->i_extra_isize = new_extra_isize; 5643 return 0; 5644 } 5645 5646 /* try to expand with EAs present */ 5647 return ext4_expand_extra_isize_ea(inode, new_extra_isize, 5648 raw_inode, handle); 5649 } 5650 5651 /* 5652 * What we do here is to mark the in-core inode as clean with respect to inode 5653 * dirtiness (it may still be data-dirty). 5654 * This means that the in-core inode may be reaped by prune_icache 5655 * without having to perform any I/O. This is a very good thing, 5656 * because *any* task may call prune_icache - even ones which 5657 * have a transaction open against a different journal. 5658 * 5659 * Is this cheating? Not really. Sure, we haven't written the 5660 * inode out, but prune_icache isn't a user-visible syncing function. 5661 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 5662 * we start and wait on commits. 5663 */ 5664 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 5665 { 5666 struct ext4_iloc iloc; 5667 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5668 static unsigned int mnt_count; 5669 int err, ret; 5670 5671 might_sleep(); 5672 trace_ext4_mark_inode_dirty(inode, _RET_IP_); 5673 err = ext4_reserve_inode_write(handle, inode, &iloc); 5674 if (err) 5675 return err; 5676 if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 5677 !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { 5678 /* 5679 * In nojournal mode, we can immediately attempt to expand 5680 * the inode. When journaled, we first need to obtain extra 5681 * buffer credits since we may write into the EA block 5682 * with this same handle. If journal_extend fails, then it will 5683 * only result in a minor loss of functionality for that inode. 5684 * If this is felt to be critical, then e2fsck should be run to 5685 * force a large enough s_min_extra_isize. 5686 */ 5687 if (!ext4_handle_valid(handle) || 5688 jbd2_journal_extend(handle, 5689 EXT4_DATA_TRANS_BLOCKS(inode->i_sb)) == 0) { 5690 ret = ext4_expand_extra_isize(inode, 5691 sbi->s_want_extra_isize, 5692 iloc, handle); 5693 if (ret) { 5694 if (mnt_count != 5695 le16_to_cpu(sbi->s_es->s_mnt_count)) { 5696 ext4_warning(inode->i_sb, 5697 "Unable to expand inode %lu. Delete" 5698 " some EAs or run e2fsck.", 5699 inode->i_ino); 5700 mnt_count = 5701 le16_to_cpu(sbi->s_es->s_mnt_count); 5702 } 5703 } 5704 } 5705 } 5706 return ext4_mark_iloc_dirty(handle, inode, &iloc); 5707 } 5708 5709 /* 5710 * ext4_dirty_inode() is called from __mark_inode_dirty() 5711 * 5712 * We're really interested in the case where a file is being extended. 5713 * i_size has been changed by generic_commit_write() and we thus need 5714 * to include the updated inode in the current transaction. 5715 * 5716 * Also, dquot_alloc_block() will always dirty the inode when blocks 5717 * are allocated to the file. 5718 * 5719 * If the inode is marked synchronous, we don't honour that here - doing 5720 * so would cause a commit on atime updates, which we don't bother doing. 5721 * We handle synchronous inodes at the highest possible level. 5722 * 5723 * If only the I_DIRTY_TIME flag is set, we can skip everything. If 5724 * I_DIRTY_TIME and I_DIRTY_SYNC is set, the only inode fields we need 5725 * to copy into the on-disk inode structure are the timestamp files. 5726 */ 5727 void ext4_dirty_inode(struct inode *inode, int flags) 5728 { 5729 handle_t *handle; 5730 5731 if (flags == I_DIRTY_TIME) 5732 return; 5733 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 5734 if (IS_ERR(handle)) 5735 goto out; 5736 5737 ext4_mark_inode_dirty(handle, inode); 5738 5739 ext4_journal_stop(handle); 5740 out: 5741 return; 5742 } 5743 5744 #if 0 5745 /* 5746 * Bind an inode's backing buffer_head into this transaction, to prevent 5747 * it from being flushed to disk early. Unlike 5748 * ext4_reserve_inode_write, this leaves behind no bh reference and 5749 * returns no iloc structure, so the caller needs to repeat the iloc 5750 * lookup to mark the inode dirty later. 5751 */ 5752 static int ext4_pin_inode(handle_t *handle, struct inode *inode) 5753 { 5754 struct ext4_iloc iloc; 5755 5756 int err = 0; 5757 if (handle) { 5758 err = ext4_get_inode_loc(inode, &iloc); 5759 if (!err) { 5760 BUFFER_TRACE(iloc.bh, "get_write_access"); 5761 err = jbd2_journal_get_write_access(handle, iloc.bh); 5762 if (!err) 5763 err = ext4_handle_dirty_metadata(handle, 5764 NULL, 5765 iloc.bh); 5766 brelse(iloc.bh); 5767 } 5768 } 5769 ext4_std_error(inode->i_sb, err); 5770 return err; 5771 } 5772 #endif 5773 5774 int ext4_change_inode_journal_flag(struct inode *inode, int val) 5775 { 5776 journal_t *journal; 5777 handle_t *handle; 5778 int err; 5779 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5780 5781 /* 5782 * We have to be very careful here: changing a data block's 5783 * journaling status dynamically is dangerous. If we write a 5784 * data block to the journal, change the status and then delete 5785 * that block, we risk forgetting to revoke the old log record 5786 * from the journal and so a subsequent replay can corrupt data. 5787 * So, first we make sure that the journal is empty and that 5788 * nobody is changing anything. 5789 */ 5790 5791 journal = EXT4_JOURNAL(inode); 5792 if (!journal) 5793 return 0; 5794 if (is_journal_aborted(journal)) 5795 return -EROFS; 5796 5797 /* Wait for all existing dio workers */ 5798 ext4_inode_block_unlocked_dio(inode); 5799 inode_dio_wait(inode); 5800 5801 /* 5802 * Before flushing the journal and switching inode's aops, we have 5803 * to flush all dirty data the inode has. There can be outstanding 5804 * delayed allocations, there can be unwritten extents created by 5805 * fallocate or buffered writes in dioread_nolock mode covered by 5806 * dirty data which can be converted only after flushing the dirty 5807 * data (and journalled aops don't know how to handle these cases). 5808 */ 5809 if (val) { 5810 down_write(&EXT4_I(inode)->i_mmap_sem); 5811 err = filemap_write_and_wait(inode->i_mapping); 5812 if (err < 0) { 5813 up_write(&EXT4_I(inode)->i_mmap_sem); 5814 ext4_inode_resume_unlocked_dio(inode); 5815 return err; 5816 } 5817 } 5818 5819 percpu_down_write(&sbi->s_journal_flag_rwsem); 5820 jbd2_journal_lock_updates(journal); 5821 5822 /* 5823 * OK, there are no updates running now, and all cached data is 5824 * synced to disk. We are now in a completely consistent state 5825 * which doesn't have anything in the journal, and we know that 5826 * no filesystem updates are running, so it is safe to modify 5827 * the inode's in-core data-journaling state flag now. 5828 */ 5829 5830 if (val) 5831 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 5832 else { 5833 err = jbd2_journal_flush(journal); 5834 if (err < 0) { 5835 jbd2_journal_unlock_updates(journal); 5836 percpu_up_write(&sbi->s_journal_flag_rwsem); 5837 ext4_inode_resume_unlocked_dio(inode); 5838 return err; 5839 } 5840 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 5841 } 5842 ext4_set_aops(inode); 5843 /* 5844 * Update inode->i_flags after EXT4_INODE_JOURNAL_DATA was updated. 5845 * E.g. S_DAX may get cleared / set. 5846 */ 5847 ext4_set_inode_flags(inode); 5848 5849 jbd2_journal_unlock_updates(journal); 5850 percpu_up_write(&sbi->s_journal_flag_rwsem); 5851 5852 if (val) 5853 up_write(&EXT4_I(inode)->i_mmap_sem); 5854 ext4_inode_resume_unlocked_dio(inode); 5855 5856 /* Finally we can mark the inode as dirty. */ 5857 5858 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); 5859 if (IS_ERR(handle)) 5860 return PTR_ERR(handle); 5861 5862 err = ext4_mark_inode_dirty(handle, inode); 5863 ext4_handle_sync(handle); 5864 ext4_journal_stop(handle); 5865 ext4_std_error(inode->i_sb, err); 5866 5867 return err; 5868 } 5869 5870 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 5871 { 5872 return !buffer_mapped(bh); 5873 } 5874 5875 int ext4_page_mkwrite(struct vm_fault *vmf) 5876 { 5877 struct vm_area_struct *vma = vmf->vma; 5878 struct page *page = vmf->page; 5879 loff_t size; 5880 unsigned long len; 5881 int ret; 5882 struct file *file = vma->vm_file; 5883 struct inode *inode = file_inode(file); 5884 struct address_space *mapping = inode->i_mapping; 5885 handle_t *handle; 5886 get_block_t *get_block; 5887 int retries = 0; 5888 5889 sb_start_pagefault(inode->i_sb); 5890 file_update_time(vma->vm_file); 5891 5892 down_read(&EXT4_I(inode)->i_mmap_sem); 5893 5894 ret = ext4_convert_inline_data(inode); 5895 if (ret) 5896 goto out_ret; 5897 5898 /* Delalloc case is easy... */ 5899 if (test_opt(inode->i_sb, DELALLOC) && 5900 !ext4_should_journal_data(inode) && 5901 !ext4_nonda_switch(inode->i_sb)) { 5902 do { 5903 ret = block_page_mkwrite(vma, vmf, 5904 ext4_da_get_block_prep); 5905 } while (ret == -ENOSPC && 5906 ext4_should_retry_alloc(inode->i_sb, &retries)); 5907 goto out_ret; 5908 } 5909 5910 lock_page(page); 5911 size = i_size_read(inode); 5912 /* Page got truncated from under us? */ 5913 if (page->mapping != mapping || page_offset(page) > size) { 5914 unlock_page(page); 5915 ret = VM_FAULT_NOPAGE; 5916 goto out; 5917 } 5918 5919 if (page->index == size >> PAGE_SHIFT) 5920 len = size & ~PAGE_MASK; 5921 else 5922 len = PAGE_SIZE; 5923 /* 5924 * Return if we have all the buffers mapped. This avoids the need to do 5925 * journal_start/journal_stop which can block and take a long time 5926 */ 5927 if (page_has_buffers(page)) { 5928 if (!ext4_walk_page_buffers(NULL, page_buffers(page), 5929 0, len, NULL, 5930 ext4_bh_unmapped)) { 5931 /* Wait so that we don't change page under IO */ 5932 wait_for_stable_page(page); 5933 ret = VM_FAULT_LOCKED; 5934 goto out; 5935 } 5936 } 5937 unlock_page(page); 5938 /* OK, we need to fill the hole... */ 5939 if (ext4_should_dioread_nolock(inode)) 5940 get_block = ext4_get_block_unwritten; 5941 else 5942 get_block = ext4_get_block; 5943 retry_alloc: 5944 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 5945 ext4_writepage_trans_blocks(inode)); 5946 if (IS_ERR(handle)) { 5947 ret = VM_FAULT_SIGBUS; 5948 goto out; 5949 } 5950 ret = block_page_mkwrite(vma, vmf, get_block); 5951 if (!ret && ext4_should_journal_data(inode)) { 5952 if (ext4_walk_page_buffers(handle, page_buffers(page), 0, 5953 PAGE_SIZE, NULL, do_journal_get_write_access)) { 5954 unlock_page(page); 5955 ret = VM_FAULT_SIGBUS; 5956 ext4_journal_stop(handle); 5957 goto out; 5958 } 5959 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 5960 } 5961 ext4_journal_stop(handle); 5962 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 5963 goto retry_alloc; 5964 out_ret: 5965 ret = block_page_mkwrite_return(ret); 5966 out: 5967 up_read(&EXT4_I(inode)->i_mmap_sem); 5968 sb_end_pagefault(inode->i_sb); 5969 return ret; 5970 } 5971 5972 int ext4_filemap_fault(struct vm_fault *vmf) 5973 { 5974 struct inode *inode = file_inode(vmf->vma->vm_file); 5975 int err; 5976 5977 down_read(&EXT4_I(inode)->i_mmap_sem); 5978 err = filemap_fault(vmf); 5979 up_read(&EXT4_I(inode)->i_mmap_sem); 5980 5981 return err; 5982 } 5983 5984 /* 5985 * Find the first extent at or after @lblk in an inode that is not a hole. 5986 * Search for @map_len blocks at most. The extent is returned in @result. 5987 * 5988 * The function returns 1 if we found an extent. The function returns 0 in 5989 * case there is no extent at or after @lblk and in that case also sets 5990 * @result->es_len to 0. In case of error, the error code is returned. 5991 */ 5992 int ext4_get_next_extent(struct inode *inode, ext4_lblk_t lblk, 5993 unsigned int map_len, struct extent_status *result) 5994 { 5995 struct ext4_map_blocks map; 5996 struct extent_status es = {}; 5997 int ret; 5998 5999 map.m_lblk = lblk; 6000 map.m_len = map_len; 6001 6002 /* 6003 * For non-extent based files this loop may iterate several times since 6004 * we do not determine full hole size. 6005 */ 6006 while (map.m_len > 0) { 6007 ret = ext4_map_blocks(NULL, inode, &map, 0); 6008 if (ret < 0) 6009 return ret; 6010 /* There's extent covering m_lblk? Just return it. */ 6011 if (ret > 0) { 6012 int status; 6013 6014 ext4_es_store_pblock(result, map.m_pblk); 6015 result->es_lblk = map.m_lblk; 6016 result->es_len = map.m_len; 6017 if (map.m_flags & EXT4_MAP_UNWRITTEN) 6018 status = EXTENT_STATUS_UNWRITTEN; 6019 else 6020 status = EXTENT_STATUS_WRITTEN; 6021 ext4_es_store_status(result, status); 6022 return 1; 6023 } 6024 ext4_es_find_delayed_extent_range(inode, map.m_lblk, 6025 map.m_lblk + map.m_len - 1, 6026 &es); 6027 /* Is delalloc data before next block in extent tree? */ 6028 if (es.es_len && es.es_lblk < map.m_lblk + map.m_len) { 6029 ext4_lblk_t offset = 0; 6030 6031 if (es.es_lblk < lblk) 6032 offset = lblk - es.es_lblk; 6033 result->es_lblk = es.es_lblk + offset; 6034 ext4_es_store_pblock(result, 6035 ext4_es_pblock(&es) + offset); 6036 result->es_len = es.es_len - offset; 6037 ext4_es_store_status(result, ext4_es_status(&es)); 6038 6039 return 1; 6040 } 6041 /* There's a hole at m_lblk, advance us after it */ 6042 map.m_lblk += map.m_len; 6043 map_len -= map.m_len; 6044 map.m_len = map_len; 6045 cond_resched(); 6046 } 6047 result->es_len = 0; 6048 return 0; 6049 } 6050