1 /* 2 * linux/fs/ext4/inode.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/inode.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * 64-bit file support on 64-bit platforms by Jakub Jelinek 16 * (jj@sunsite.ms.mff.cuni.cz) 17 * 18 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 19 */ 20 21 #include <linux/fs.h> 22 #include <linux/time.h> 23 #include <linux/highuid.h> 24 #include <linux/pagemap.h> 25 #include <linux/dax.h> 26 #include <linux/quotaops.h> 27 #include <linux/string.h> 28 #include <linux/buffer_head.h> 29 #include <linux/writeback.h> 30 #include <linux/pagevec.h> 31 #include <linux/mpage.h> 32 #include <linux/namei.h> 33 #include <linux/uio.h> 34 #include <linux/bio.h> 35 #include <linux/workqueue.h> 36 #include <linux/kernel.h> 37 #include <linux/printk.h> 38 #include <linux/slab.h> 39 #include <linux/bitops.h> 40 #include <linux/iomap.h> 41 42 #include "ext4_jbd2.h" 43 #include "xattr.h" 44 #include "acl.h" 45 #include "truncate.h" 46 47 #include <trace/events/ext4.h> 48 49 #define MPAGE_DA_EXTENT_TAIL 0x01 50 51 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, 52 struct ext4_inode_info *ei) 53 { 54 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 55 __u32 csum; 56 __u16 dummy_csum = 0; 57 int offset = offsetof(struct ext4_inode, i_checksum_lo); 58 unsigned int csum_size = sizeof(dummy_csum); 59 60 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset); 61 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size); 62 offset += csum_size; 63 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, 64 EXT4_GOOD_OLD_INODE_SIZE - offset); 65 66 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 67 offset = offsetof(struct ext4_inode, i_checksum_hi); 68 csum = ext4_chksum(sbi, csum, (__u8 *)raw + 69 EXT4_GOOD_OLD_INODE_SIZE, 70 offset - EXT4_GOOD_OLD_INODE_SIZE); 71 if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { 72 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, 73 csum_size); 74 offset += csum_size; 75 } 76 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, 77 EXT4_INODE_SIZE(inode->i_sb) - offset); 78 } 79 80 return csum; 81 } 82 83 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, 84 struct ext4_inode_info *ei) 85 { 86 __u32 provided, calculated; 87 88 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 89 cpu_to_le32(EXT4_OS_LINUX) || 90 !ext4_has_metadata_csum(inode->i_sb)) 91 return 1; 92 93 provided = le16_to_cpu(raw->i_checksum_lo); 94 calculated = ext4_inode_csum(inode, raw, ei); 95 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 96 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 97 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16; 98 else 99 calculated &= 0xFFFF; 100 101 return provided == calculated; 102 } 103 104 static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, 105 struct ext4_inode_info *ei) 106 { 107 __u32 csum; 108 109 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 110 cpu_to_le32(EXT4_OS_LINUX) || 111 !ext4_has_metadata_csum(inode->i_sb)) 112 return; 113 114 csum = ext4_inode_csum(inode, raw, ei); 115 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF); 116 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 117 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 118 raw->i_checksum_hi = cpu_to_le16(csum >> 16); 119 } 120 121 static inline int ext4_begin_ordered_truncate(struct inode *inode, 122 loff_t new_size) 123 { 124 trace_ext4_begin_ordered_truncate(inode, new_size); 125 /* 126 * If jinode is zero, then we never opened the file for 127 * writing, so there's no need to call 128 * jbd2_journal_begin_ordered_truncate() since there's no 129 * outstanding writes we need to flush. 130 */ 131 if (!EXT4_I(inode)->jinode) 132 return 0; 133 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), 134 EXT4_I(inode)->jinode, 135 new_size); 136 } 137 138 static void ext4_invalidatepage(struct page *page, unsigned int offset, 139 unsigned int length); 140 static int __ext4_journalled_writepage(struct page *page, unsigned int len); 141 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); 142 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, 143 int pextents); 144 145 /* 146 * Test whether an inode is a fast symlink. 147 * A fast symlink has its symlink data stored in ext4_inode_info->i_data. 148 */ 149 int ext4_inode_is_fast_symlink(struct inode *inode) 150 { 151 return S_ISLNK(inode->i_mode) && inode->i_size && 152 (inode->i_size < EXT4_N_BLOCKS * 4); 153 } 154 155 /* 156 * Restart the transaction associated with *handle. This does a commit, 157 * so before we call here everything must be consistently dirtied against 158 * this transaction. 159 */ 160 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, 161 int nblocks) 162 { 163 int ret; 164 165 /* 166 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 167 * moment, get_block can be called only for blocks inside i_size since 168 * page cache has been already dropped and writes are blocked by 169 * i_mutex. So we can safely drop the i_data_sem here. 170 */ 171 BUG_ON(EXT4_JOURNAL(inode) == NULL); 172 jbd_debug(2, "restarting handle %p\n", handle); 173 up_write(&EXT4_I(inode)->i_data_sem); 174 ret = ext4_journal_restart(handle, nblocks); 175 down_write(&EXT4_I(inode)->i_data_sem); 176 ext4_discard_preallocations(inode); 177 178 return ret; 179 } 180 181 /* 182 * Called at the last iput() if i_nlink is zero. 183 */ 184 void ext4_evict_inode(struct inode *inode) 185 { 186 handle_t *handle; 187 int err; 188 int extra_credits = 3; 189 struct ext4_xattr_inode_array *ea_inode_array = NULL; 190 191 trace_ext4_evict_inode(inode); 192 193 if (inode->i_nlink) { 194 /* 195 * When journalling data dirty buffers are tracked only in the 196 * journal. So although mm thinks everything is clean and 197 * ready for reaping the inode might still have some pages to 198 * write in the running transaction or waiting to be 199 * checkpointed. Thus calling jbd2_journal_invalidatepage() 200 * (via truncate_inode_pages()) to discard these buffers can 201 * cause data loss. Also even if we did not discard these 202 * buffers, we would have no way to find them after the inode 203 * is reaped and thus user could see stale data if he tries to 204 * read them before the transaction is checkpointed. So be 205 * careful and force everything to disk here... We use 206 * ei->i_datasync_tid to store the newest transaction 207 * containing inode's data. 208 * 209 * Note that directories do not have this problem because they 210 * don't use page cache. 211 */ 212 if (inode->i_ino != EXT4_JOURNAL_INO && 213 ext4_should_journal_data(inode) && 214 (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) && 215 inode->i_data.nrpages) { 216 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 217 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; 218 219 jbd2_complete_transaction(journal, commit_tid); 220 filemap_write_and_wait(&inode->i_data); 221 } 222 truncate_inode_pages_final(&inode->i_data); 223 224 goto no_delete; 225 } 226 227 if (is_bad_inode(inode)) 228 goto no_delete; 229 dquot_initialize(inode); 230 231 if (ext4_should_order_data(inode)) 232 ext4_begin_ordered_truncate(inode, 0); 233 truncate_inode_pages_final(&inode->i_data); 234 235 /* 236 * Protect us against freezing - iput() caller didn't have to have any 237 * protection against it 238 */ 239 sb_start_intwrite(inode->i_sb); 240 241 if (!IS_NOQUOTA(inode)) 242 extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb); 243 244 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, 245 ext4_blocks_for_truncate(inode)+extra_credits); 246 if (IS_ERR(handle)) { 247 ext4_std_error(inode->i_sb, PTR_ERR(handle)); 248 /* 249 * If we're going to skip the normal cleanup, we still need to 250 * make sure that the in-core orphan linked list is properly 251 * cleaned up. 252 */ 253 ext4_orphan_del(NULL, inode); 254 sb_end_intwrite(inode->i_sb); 255 goto no_delete; 256 } 257 258 if (IS_SYNC(inode)) 259 ext4_handle_sync(handle); 260 261 /* 262 * Set inode->i_size to 0 before calling ext4_truncate(). We need 263 * special handling of symlinks here because i_size is used to 264 * determine whether ext4_inode_info->i_data contains symlink data or 265 * block mappings. Setting i_size to 0 will remove its fast symlink 266 * status. Erase i_data so that it becomes a valid empty block map. 267 */ 268 if (ext4_inode_is_fast_symlink(inode)) 269 memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data)); 270 inode->i_size = 0; 271 err = ext4_mark_inode_dirty(handle, inode); 272 if (err) { 273 ext4_warning(inode->i_sb, 274 "couldn't mark inode dirty (err %d)", err); 275 goto stop_handle; 276 } 277 if (inode->i_blocks) { 278 err = ext4_truncate(inode); 279 if (err) { 280 ext4_error(inode->i_sb, 281 "couldn't truncate inode %lu (err %d)", 282 inode->i_ino, err); 283 goto stop_handle; 284 } 285 } 286 287 /* Remove xattr references. */ 288 err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array, 289 extra_credits); 290 if (err) { 291 ext4_warning(inode->i_sb, "xattr delete (err %d)", err); 292 stop_handle: 293 ext4_journal_stop(handle); 294 ext4_orphan_del(NULL, inode); 295 sb_end_intwrite(inode->i_sb); 296 ext4_xattr_inode_array_free(ea_inode_array); 297 goto no_delete; 298 } 299 300 /* 301 * Kill off the orphan record which ext4_truncate created. 302 * AKPM: I think this can be inside the above `if'. 303 * Note that ext4_orphan_del() has to be able to cope with the 304 * deletion of a non-existent orphan - this is because we don't 305 * know if ext4_truncate() actually created an orphan record. 306 * (Well, we could do this if we need to, but heck - it works) 307 */ 308 ext4_orphan_del(handle, inode); 309 EXT4_I(inode)->i_dtime = get_seconds(); 310 311 /* 312 * One subtle ordering requirement: if anything has gone wrong 313 * (transaction abort, IO errors, whatever), then we can still 314 * do these next steps (the fs will already have been marked as 315 * having errors), but we can't free the inode if the mark_dirty 316 * fails. 317 */ 318 if (ext4_mark_inode_dirty(handle, inode)) 319 /* If that failed, just do the required in-core inode clear. */ 320 ext4_clear_inode(inode); 321 else 322 ext4_free_inode(handle, inode); 323 ext4_journal_stop(handle); 324 sb_end_intwrite(inode->i_sb); 325 ext4_xattr_inode_array_free(ea_inode_array); 326 return; 327 no_delete: 328 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ 329 } 330 331 #ifdef CONFIG_QUOTA 332 qsize_t *ext4_get_reserved_space(struct inode *inode) 333 { 334 return &EXT4_I(inode)->i_reserved_quota; 335 } 336 #endif 337 338 /* 339 * Called with i_data_sem down, which is important since we can call 340 * ext4_discard_preallocations() from here. 341 */ 342 void ext4_da_update_reserve_space(struct inode *inode, 343 int used, int quota_claim) 344 { 345 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 346 struct ext4_inode_info *ei = EXT4_I(inode); 347 348 spin_lock(&ei->i_block_reservation_lock); 349 trace_ext4_da_update_reserve_space(inode, used, quota_claim); 350 if (unlikely(used > ei->i_reserved_data_blocks)) { 351 ext4_warning(inode->i_sb, "%s: ino %lu, used %d " 352 "with only %d reserved data blocks", 353 __func__, inode->i_ino, used, 354 ei->i_reserved_data_blocks); 355 WARN_ON(1); 356 used = ei->i_reserved_data_blocks; 357 } 358 359 /* Update per-inode reservations */ 360 ei->i_reserved_data_blocks -= used; 361 percpu_counter_sub(&sbi->s_dirtyclusters_counter, used); 362 363 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 364 365 /* Update quota subsystem for data blocks */ 366 if (quota_claim) 367 dquot_claim_block(inode, EXT4_C2B(sbi, used)); 368 else { 369 /* 370 * We did fallocate with an offset that is already delayed 371 * allocated. So on delayed allocated writeback we should 372 * not re-claim the quota for fallocated blocks. 373 */ 374 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); 375 } 376 377 /* 378 * If we have done all the pending block allocations and if 379 * there aren't any writers on the inode, we can discard the 380 * inode's preallocations. 381 */ 382 if ((ei->i_reserved_data_blocks == 0) && 383 (atomic_read(&inode->i_writecount) == 0)) 384 ext4_discard_preallocations(inode); 385 } 386 387 static int __check_block_validity(struct inode *inode, const char *func, 388 unsigned int line, 389 struct ext4_map_blocks *map) 390 { 391 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, 392 map->m_len)) { 393 ext4_error_inode(inode, func, line, map->m_pblk, 394 "lblock %lu mapped to illegal pblock " 395 "(length %d)", (unsigned long) map->m_lblk, 396 map->m_len); 397 return -EFSCORRUPTED; 398 } 399 return 0; 400 } 401 402 int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk, 403 ext4_lblk_t len) 404 { 405 int ret; 406 407 if (ext4_encrypted_inode(inode)) 408 return fscrypt_zeroout_range(inode, lblk, pblk, len); 409 410 ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS); 411 if (ret > 0) 412 ret = 0; 413 414 return ret; 415 } 416 417 #define check_block_validity(inode, map) \ 418 __check_block_validity((inode), __func__, __LINE__, (map)) 419 420 #ifdef ES_AGGRESSIVE_TEST 421 static void ext4_map_blocks_es_recheck(handle_t *handle, 422 struct inode *inode, 423 struct ext4_map_blocks *es_map, 424 struct ext4_map_blocks *map, 425 int flags) 426 { 427 int retval; 428 429 map->m_flags = 0; 430 /* 431 * There is a race window that the result is not the same. 432 * e.g. xfstests #223 when dioread_nolock enables. The reason 433 * is that we lookup a block mapping in extent status tree with 434 * out taking i_data_sem. So at the time the unwritten extent 435 * could be converted. 436 */ 437 down_read(&EXT4_I(inode)->i_data_sem); 438 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 439 retval = ext4_ext_map_blocks(handle, inode, map, flags & 440 EXT4_GET_BLOCKS_KEEP_SIZE); 441 } else { 442 retval = ext4_ind_map_blocks(handle, inode, map, flags & 443 EXT4_GET_BLOCKS_KEEP_SIZE); 444 } 445 up_read((&EXT4_I(inode)->i_data_sem)); 446 447 /* 448 * We don't check m_len because extent will be collpased in status 449 * tree. So the m_len might not equal. 450 */ 451 if (es_map->m_lblk != map->m_lblk || 452 es_map->m_flags != map->m_flags || 453 es_map->m_pblk != map->m_pblk) { 454 printk("ES cache assertion failed for inode: %lu " 455 "es_cached ex [%d/%d/%llu/%x] != " 456 "found ex [%d/%d/%llu/%x] retval %d flags %x\n", 457 inode->i_ino, es_map->m_lblk, es_map->m_len, 458 es_map->m_pblk, es_map->m_flags, map->m_lblk, 459 map->m_len, map->m_pblk, map->m_flags, 460 retval, flags); 461 } 462 } 463 #endif /* ES_AGGRESSIVE_TEST */ 464 465 /* 466 * The ext4_map_blocks() function tries to look up the requested blocks, 467 * and returns if the blocks are already mapped. 468 * 469 * Otherwise it takes the write lock of the i_data_sem and allocate blocks 470 * and store the allocated blocks in the result buffer head and mark it 471 * mapped. 472 * 473 * If file type is extents based, it will call ext4_ext_map_blocks(), 474 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping 475 * based files 476 * 477 * On success, it returns the number of blocks being mapped or allocated. if 478 * create==0 and the blocks are pre-allocated and unwritten, the resulting @map 479 * is marked as unwritten. If the create == 1, it will mark @map as mapped. 480 * 481 * It returns 0 if plain look up failed (blocks have not been allocated), in 482 * that case, @map is returned as unmapped but we still do fill map->m_len to 483 * indicate the length of a hole starting at map->m_lblk. 484 * 485 * It returns the error in case of allocation failure. 486 */ 487 int ext4_map_blocks(handle_t *handle, struct inode *inode, 488 struct ext4_map_blocks *map, int flags) 489 { 490 struct extent_status es; 491 int retval; 492 int ret = 0; 493 #ifdef ES_AGGRESSIVE_TEST 494 struct ext4_map_blocks orig_map; 495 496 memcpy(&orig_map, map, sizeof(*map)); 497 #endif 498 499 map->m_flags = 0; 500 ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," 501 "logical block %lu\n", inode->i_ino, flags, map->m_len, 502 (unsigned long) map->m_lblk); 503 504 /* 505 * ext4_map_blocks returns an int, and m_len is an unsigned int 506 */ 507 if (unlikely(map->m_len > INT_MAX)) 508 map->m_len = INT_MAX; 509 510 /* We can handle the block number less than EXT_MAX_BLOCKS */ 511 if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS)) 512 return -EFSCORRUPTED; 513 514 /* Lookup extent status tree firstly */ 515 if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) { 516 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) { 517 map->m_pblk = ext4_es_pblock(&es) + 518 map->m_lblk - es.es_lblk; 519 map->m_flags |= ext4_es_is_written(&es) ? 520 EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN; 521 retval = es.es_len - (map->m_lblk - es.es_lblk); 522 if (retval > map->m_len) 523 retval = map->m_len; 524 map->m_len = retval; 525 } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) { 526 map->m_pblk = 0; 527 retval = es.es_len - (map->m_lblk - es.es_lblk); 528 if (retval > map->m_len) 529 retval = map->m_len; 530 map->m_len = retval; 531 retval = 0; 532 } else { 533 BUG_ON(1); 534 } 535 #ifdef ES_AGGRESSIVE_TEST 536 ext4_map_blocks_es_recheck(handle, inode, map, 537 &orig_map, flags); 538 #endif 539 goto found; 540 } 541 542 /* 543 * Try to see if we can get the block without requesting a new 544 * file system block. 545 */ 546 down_read(&EXT4_I(inode)->i_data_sem); 547 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 548 retval = ext4_ext_map_blocks(handle, inode, map, flags & 549 EXT4_GET_BLOCKS_KEEP_SIZE); 550 } else { 551 retval = ext4_ind_map_blocks(handle, inode, map, flags & 552 EXT4_GET_BLOCKS_KEEP_SIZE); 553 } 554 if (retval > 0) { 555 unsigned int status; 556 557 if (unlikely(retval != map->m_len)) { 558 ext4_warning(inode->i_sb, 559 "ES len assertion failed for inode " 560 "%lu: retval %d != map->m_len %d", 561 inode->i_ino, retval, map->m_len); 562 WARN_ON(1); 563 } 564 565 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 566 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 567 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 568 !(status & EXTENT_STATUS_WRITTEN) && 569 ext4_find_delalloc_range(inode, map->m_lblk, 570 map->m_lblk + map->m_len - 1)) 571 status |= EXTENT_STATUS_DELAYED; 572 ret = ext4_es_insert_extent(inode, map->m_lblk, 573 map->m_len, map->m_pblk, status); 574 if (ret < 0) 575 retval = ret; 576 } 577 up_read((&EXT4_I(inode)->i_data_sem)); 578 579 found: 580 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 581 ret = check_block_validity(inode, map); 582 if (ret != 0) 583 return ret; 584 } 585 586 /* If it is only a block(s) look up */ 587 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 588 return retval; 589 590 /* 591 * Returns if the blocks have already allocated 592 * 593 * Note that if blocks have been preallocated 594 * ext4_ext_get_block() returns the create = 0 595 * with buffer head unmapped. 596 */ 597 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) 598 /* 599 * If we need to convert extent to unwritten 600 * we continue and do the actual work in 601 * ext4_ext_map_blocks() 602 */ 603 if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) 604 return retval; 605 606 /* 607 * Here we clear m_flags because after allocating an new extent, 608 * it will be set again. 609 */ 610 map->m_flags &= ~EXT4_MAP_FLAGS; 611 612 /* 613 * New blocks allocate and/or writing to unwritten extent 614 * will possibly result in updating i_data, so we take 615 * the write lock of i_data_sem, and call get_block() 616 * with create == 1 flag. 617 */ 618 down_write(&EXT4_I(inode)->i_data_sem); 619 620 /* 621 * We need to check for EXT4 here because migrate 622 * could have changed the inode type in between 623 */ 624 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 625 retval = ext4_ext_map_blocks(handle, inode, map, flags); 626 } else { 627 retval = ext4_ind_map_blocks(handle, inode, map, flags); 628 629 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { 630 /* 631 * We allocated new blocks which will result in 632 * i_data's format changing. Force the migrate 633 * to fail by clearing migrate flags 634 */ 635 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); 636 } 637 638 /* 639 * Update reserved blocks/metadata blocks after successful 640 * block allocation which had been deferred till now. We don't 641 * support fallocate for non extent files. So we can update 642 * reserve space here. 643 */ 644 if ((retval > 0) && 645 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) 646 ext4_da_update_reserve_space(inode, retval, 1); 647 } 648 649 if (retval > 0) { 650 unsigned int status; 651 652 if (unlikely(retval != map->m_len)) { 653 ext4_warning(inode->i_sb, 654 "ES len assertion failed for inode " 655 "%lu: retval %d != map->m_len %d", 656 inode->i_ino, retval, map->m_len); 657 WARN_ON(1); 658 } 659 660 /* 661 * We have to zeroout blocks before inserting them into extent 662 * status tree. Otherwise someone could look them up there and 663 * use them before they are really zeroed. We also have to 664 * unmap metadata before zeroing as otherwise writeback can 665 * overwrite zeros with stale data from block device. 666 */ 667 if (flags & EXT4_GET_BLOCKS_ZERO && 668 map->m_flags & EXT4_MAP_MAPPED && 669 map->m_flags & EXT4_MAP_NEW) { 670 clean_bdev_aliases(inode->i_sb->s_bdev, map->m_pblk, 671 map->m_len); 672 ret = ext4_issue_zeroout(inode, map->m_lblk, 673 map->m_pblk, map->m_len); 674 if (ret) { 675 retval = ret; 676 goto out_sem; 677 } 678 } 679 680 /* 681 * If the extent has been zeroed out, we don't need to update 682 * extent status tree. 683 */ 684 if ((flags & EXT4_GET_BLOCKS_PRE_IO) && 685 ext4_es_lookup_extent(inode, map->m_lblk, &es)) { 686 if (ext4_es_is_written(&es)) 687 goto out_sem; 688 } 689 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 690 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 691 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 692 !(status & EXTENT_STATUS_WRITTEN) && 693 ext4_find_delalloc_range(inode, map->m_lblk, 694 map->m_lblk + map->m_len - 1)) 695 status |= EXTENT_STATUS_DELAYED; 696 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 697 map->m_pblk, status); 698 if (ret < 0) { 699 retval = ret; 700 goto out_sem; 701 } 702 } 703 704 out_sem: 705 up_write((&EXT4_I(inode)->i_data_sem)); 706 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 707 ret = check_block_validity(inode, map); 708 if (ret != 0) 709 return ret; 710 711 /* 712 * Inodes with freshly allocated blocks where contents will be 713 * visible after transaction commit must be on transaction's 714 * ordered data list. 715 */ 716 if (map->m_flags & EXT4_MAP_NEW && 717 !(map->m_flags & EXT4_MAP_UNWRITTEN) && 718 !(flags & EXT4_GET_BLOCKS_ZERO) && 719 !ext4_is_quota_file(inode) && 720 ext4_should_order_data(inode)) { 721 if (flags & EXT4_GET_BLOCKS_IO_SUBMIT) 722 ret = ext4_jbd2_inode_add_wait(handle, inode); 723 else 724 ret = ext4_jbd2_inode_add_write(handle, inode); 725 if (ret) 726 return ret; 727 } 728 } 729 return retval; 730 } 731 732 /* 733 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages 734 * we have to be careful as someone else may be manipulating b_state as well. 735 */ 736 static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags) 737 { 738 unsigned long old_state; 739 unsigned long new_state; 740 741 flags &= EXT4_MAP_FLAGS; 742 743 /* Dummy buffer_head? Set non-atomically. */ 744 if (!bh->b_page) { 745 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags; 746 return; 747 } 748 /* 749 * Someone else may be modifying b_state. Be careful! This is ugly but 750 * once we get rid of using bh as a container for mapping information 751 * to pass to / from get_block functions, this can go away. 752 */ 753 do { 754 old_state = READ_ONCE(bh->b_state); 755 new_state = (old_state & ~EXT4_MAP_FLAGS) | flags; 756 } while (unlikely( 757 cmpxchg(&bh->b_state, old_state, new_state) != old_state)); 758 } 759 760 static int _ext4_get_block(struct inode *inode, sector_t iblock, 761 struct buffer_head *bh, int flags) 762 { 763 struct ext4_map_blocks map; 764 int ret = 0; 765 766 if (ext4_has_inline_data(inode)) 767 return -ERANGE; 768 769 map.m_lblk = iblock; 770 map.m_len = bh->b_size >> inode->i_blkbits; 771 772 ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map, 773 flags); 774 if (ret > 0) { 775 map_bh(bh, inode->i_sb, map.m_pblk); 776 ext4_update_bh_state(bh, map.m_flags); 777 bh->b_size = inode->i_sb->s_blocksize * map.m_len; 778 ret = 0; 779 } else if (ret == 0) { 780 /* hole case, need to fill in bh->b_size */ 781 bh->b_size = inode->i_sb->s_blocksize * map.m_len; 782 } 783 return ret; 784 } 785 786 int ext4_get_block(struct inode *inode, sector_t iblock, 787 struct buffer_head *bh, int create) 788 { 789 return _ext4_get_block(inode, iblock, bh, 790 create ? EXT4_GET_BLOCKS_CREATE : 0); 791 } 792 793 /* 794 * Get block function used when preparing for buffered write if we require 795 * creating an unwritten extent if blocks haven't been allocated. The extent 796 * will be converted to written after the IO is complete. 797 */ 798 int ext4_get_block_unwritten(struct inode *inode, sector_t iblock, 799 struct buffer_head *bh_result, int create) 800 { 801 ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n", 802 inode->i_ino, create); 803 return _ext4_get_block(inode, iblock, bh_result, 804 EXT4_GET_BLOCKS_IO_CREATE_EXT); 805 } 806 807 /* Maximum number of blocks we map for direct IO at once. */ 808 #define DIO_MAX_BLOCKS 4096 809 810 /* 811 * Get blocks function for the cases that need to start a transaction - 812 * generally difference cases of direct IO and DAX IO. It also handles retries 813 * in case of ENOSPC. 814 */ 815 static int ext4_get_block_trans(struct inode *inode, sector_t iblock, 816 struct buffer_head *bh_result, int flags) 817 { 818 int dio_credits; 819 handle_t *handle; 820 int retries = 0; 821 int ret; 822 823 /* Trim mapping request to maximum we can map at once for DIO */ 824 if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS) 825 bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits; 826 dio_credits = ext4_chunk_trans_blocks(inode, 827 bh_result->b_size >> inode->i_blkbits); 828 retry: 829 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits); 830 if (IS_ERR(handle)) 831 return PTR_ERR(handle); 832 833 ret = _ext4_get_block(inode, iblock, bh_result, flags); 834 ext4_journal_stop(handle); 835 836 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 837 goto retry; 838 return ret; 839 } 840 841 /* Get block function for DIO reads and writes to inodes without extents */ 842 int ext4_dio_get_block(struct inode *inode, sector_t iblock, 843 struct buffer_head *bh, int create) 844 { 845 /* We don't expect handle for direct IO */ 846 WARN_ON_ONCE(ext4_journal_current_handle()); 847 848 if (!create) 849 return _ext4_get_block(inode, iblock, bh, 0); 850 return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE); 851 } 852 853 /* 854 * Get block function for AIO DIO writes when we create unwritten extent if 855 * blocks are not allocated yet. The extent will be converted to written 856 * after IO is complete. 857 */ 858 static int ext4_dio_get_block_unwritten_async(struct inode *inode, 859 sector_t iblock, struct buffer_head *bh_result, int create) 860 { 861 int ret; 862 863 /* We don't expect handle for direct IO */ 864 WARN_ON_ONCE(ext4_journal_current_handle()); 865 866 ret = ext4_get_block_trans(inode, iblock, bh_result, 867 EXT4_GET_BLOCKS_IO_CREATE_EXT); 868 869 /* 870 * When doing DIO using unwritten extents, we need io_end to convert 871 * unwritten extents to written on IO completion. We allocate io_end 872 * once we spot unwritten extent and store it in b_private. Generic 873 * DIO code keeps b_private set and furthermore passes the value to 874 * our completion callback in 'private' argument. 875 */ 876 if (!ret && buffer_unwritten(bh_result)) { 877 if (!bh_result->b_private) { 878 ext4_io_end_t *io_end; 879 880 io_end = ext4_init_io_end(inode, GFP_KERNEL); 881 if (!io_end) 882 return -ENOMEM; 883 bh_result->b_private = io_end; 884 ext4_set_io_unwritten_flag(inode, io_end); 885 } 886 set_buffer_defer_completion(bh_result); 887 } 888 889 return ret; 890 } 891 892 /* 893 * Get block function for non-AIO DIO writes when we create unwritten extent if 894 * blocks are not allocated yet. The extent will be converted to written 895 * after IO is complete from ext4_ext_direct_IO() function. 896 */ 897 static int ext4_dio_get_block_unwritten_sync(struct inode *inode, 898 sector_t iblock, struct buffer_head *bh_result, int create) 899 { 900 int ret; 901 902 /* We don't expect handle for direct IO */ 903 WARN_ON_ONCE(ext4_journal_current_handle()); 904 905 ret = ext4_get_block_trans(inode, iblock, bh_result, 906 EXT4_GET_BLOCKS_IO_CREATE_EXT); 907 908 /* 909 * Mark inode as having pending DIO writes to unwritten extents. 910 * ext4_ext_direct_IO() checks this flag and converts extents to 911 * written. 912 */ 913 if (!ret && buffer_unwritten(bh_result)) 914 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 915 916 return ret; 917 } 918 919 static int ext4_dio_get_block_overwrite(struct inode *inode, sector_t iblock, 920 struct buffer_head *bh_result, int create) 921 { 922 int ret; 923 924 ext4_debug("ext4_dio_get_block_overwrite: inode %lu, create flag %d\n", 925 inode->i_ino, create); 926 /* We don't expect handle for direct IO */ 927 WARN_ON_ONCE(ext4_journal_current_handle()); 928 929 ret = _ext4_get_block(inode, iblock, bh_result, 0); 930 /* 931 * Blocks should have been preallocated! ext4_file_write_iter() checks 932 * that. 933 */ 934 WARN_ON_ONCE(!buffer_mapped(bh_result) || buffer_unwritten(bh_result)); 935 936 return ret; 937 } 938 939 940 /* 941 * `handle' can be NULL if create is zero 942 */ 943 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 944 ext4_lblk_t block, int map_flags) 945 { 946 struct ext4_map_blocks map; 947 struct buffer_head *bh; 948 int create = map_flags & EXT4_GET_BLOCKS_CREATE; 949 int err; 950 951 J_ASSERT(handle != NULL || create == 0); 952 953 map.m_lblk = block; 954 map.m_len = 1; 955 err = ext4_map_blocks(handle, inode, &map, map_flags); 956 957 if (err == 0) 958 return create ? ERR_PTR(-ENOSPC) : NULL; 959 if (err < 0) 960 return ERR_PTR(err); 961 962 bh = sb_getblk(inode->i_sb, map.m_pblk); 963 if (unlikely(!bh)) 964 return ERR_PTR(-ENOMEM); 965 if (map.m_flags & EXT4_MAP_NEW) { 966 J_ASSERT(create != 0); 967 J_ASSERT(handle != NULL); 968 969 /* 970 * Now that we do not always journal data, we should 971 * keep in mind whether this should always journal the 972 * new buffer as metadata. For now, regular file 973 * writes use ext4_get_block instead, so it's not a 974 * problem. 975 */ 976 lock_buffer(bh); 977 BUFFER_TRACE(bh, "call get_create_access"); 978 err = ext4_journal_get_create_access(handle, bh); 979 if (unlikely(err)) { 980 unlock_buffer(bh); 981 goto errout; 982 } 983 if (!buffer_uptodate(bh)) { 984 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 985 set_buffer_uptodate(bh); 986 } 987 unlock_buffer(bh); 988 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 989 err = ext4_handle_dirty_metadata(handle, inode, bh); 990 if (unlikely(err)) 991 goto errout; 992 } else 993 BUFFER_TRACE(bh, "not a new buffer"); 994 return bh; 995 errout: 996 brelse(bh); 997 return ERR_PTR(err); 998 } 999 1000 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 1001 ext4_lblk_t block, int map_flags) 1002 { 1003 struct buffer_head *bh; 1004 1005 bh = ext4_getblk(handle, inode, block, map_flags); 1006 if (IS_ERR(bh)) 1007 return bh; 1008 if (!bh || buffer_uptodate(bh)) 1009 return bh; 1010 ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh); 1011 wait_on_buffer(bh); 1012 if (buffer_uptodate(bh)) 1013 return bh; 1014 put_bh(bh); 1015 return ERR_PTR(-EIO); 1016 } 1017 1018 int ext4_walk_page_buffers(handle_t *handle, 1019 struct buffer_head *head, 1020 unsigned from, 1021 unsigned to, 1022 int *partial, 1023 int (*fn)(handle_t *handle, 1024 struct buffer_head *bh)) 1025 { 1026 struct buffer_head *bh; 1027 unsigned block_start, block_end; 1028 unsigned blocksize = head->b_size; 1029 int err, ret = 0; 1030 struct buffer_head *next; 1031 1032 for (bh = head, block_start = 0; 1033 ret == 0 && (bh != head || !block_start); 1034 block_start = block_end, bh = next) { 1035 next = bh->b_this_page; 1036 block_end = block_start + blocksize; 1037 if (block_end <= from || block_start >= to) { 1038 if (partial && !buffer_uptodate(bh)) 1039 *partial = 1; 1040 continue; 1041 } 1042 err = (*fn)(handle, bh); 1043 if (!ret) 1044 ret = err; 1045 } 1046 return ret; 1047 } 1048 1049 /* 1050 * To preserve ordering, it is essential that the hole instantiation and 1051 * the data write be encapsulated in a single transaction. We cannot 1052 * close off a transaction and start a new one between the ext4_get_block() 1053 * and the commit_write(). So doing the jbd2_journal_start at the start of 1054 * prepare_write() is the right place. 1055 * 1056 * Also, this function can nest inside ext4_writepage(). In that case, we 1057 * *know* that ext4_writepage() has generated enough buffer credits to do the 1058 * whole page. So we won't block on the journal in that case, which is good, 1059 * because the caller may be PF_MEMALLOC. 1060 * 1061 * By accident, ext4 can be reentered when a transaction is open via 1062 * quota file writes. If we were to commit the transaction while thus 1063 * reentered, there can be a deadlock - we would be holding a quota 1064 * lock, and the commit would never complete if another thread had a 1065 * transaction open and was blocking on the quota lock - a ranking 1066 * violation. 1067 * 1068 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 1069 * will _not_ run commit under these circumstances because handle->h_ref 1070 * is elevated. We'll still have enough credits for the tiny quotafile 1071 * write. 1072 */ 1073 int do_journal_get_write_access(handle_t *handle, 1074 struct buffer_head *bh) 1075 { 1076 int dirty = buffer_dirty(bh); 1077 int ret; 1078 1079 if (!buffer_mapped(bh) || buffer_freed(bh)) 1080 return 0; 1081 /* 1082 * __block_write_begin() could have dirtied some buffers. Clean 1083 * the dirty bit as jbd2_journal_get_write_access() could complain 1084 * otherwise about fs integrity issues. Setting of the dirty bit 1085 * by __block_write_begin() isn't a real problem here as we clear 1086 * the bit before releasing a page lock and thus writeback cannot 1087 * ever write the buffer. 1088 */ 1089 if (dirty) 1090 clear_buffer_dirty(bh); 1091 BUFFER_TRACE(bh, "get write access"); 1092 ret = ext4_journal_get_write_access(handle, bh); 1093 if (!ret && dirty) 1094 ret = ext4_handle_dirty_metadata(handle, NULL, bh); 1095 return ret; 1096 } 1097 1098 #ifdef CONFIG_EXT4_FS_ENCRYPTION 1099 static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, 1100 get_block_t *get_block) 1101 { 1102 unsigned from = pos & (PAGE_SIZE - 1); 1103 unsigned to = from + len; 1104 struct inode *inode = page->mapping->host; 1105 unsigned block_start, block_end; 1106 sector_t block; 1107 int err = 0; 1108 unsigned blocksize = inode->i_sb->s_blocksize; 1109 unsigned bbits; 1110 struct buffer_head *bh, *head, *wait[2], **wait_bh = wait; 1111 bool decrypt = false; 1112 1113 BUG_ON(!PageLocked(page)); 1114 BUG_ON(from > PAGE_SIZE); 1115 BUG_ON(to > PAGE_SIZE); 1116 BUG_ON(from > to); 1117 1118 if (!page_has_buffers(page)) 1119 create_empty_buffers(page, blocksize, 0); 1120 head = page_buffers(page); 1121 bbits = ilog2(blocksize); 1122 block = (sector_t)page->index << (PAGE_SHIFT - bbits); 1123 1124 for (bh = head, block_start = 0; bh != head || !block_start; 1125 block++, block_start = block_end, bh = bh->b_this_page) { 1126 block_end = block_start + blocksize; 1127 if (block_end <= from || block_start >= to) { 1128 if (PageUptodate(page)) { 1129 if (!buffer_uptodate(bh)) 1130 set_buffer_uptodate(bh); 1131 } 1132 continue; 1133 } 1134 if (buffer_new(bh)) 1135 clear_buffer_new(bh); 1136 if (!buffer_mapped(bh)) { 1137 WARN_ON(bh->b_size != blocksize); 1138 err = get_block(inode, block, bh, 1); 1139 if (err) 1140 break; 1141 if (buffer_new(bh)) { 1142 clean_bdev_bh_alias(bh); 1143 if (PageUptodate(page)) { 1144 clear_buffer_new(bh); 1145 set_buffer_uptodate(bh); 1146 mark_buffer_dirty(bh); 1147 continue; 1148 } 1149 if (block_end > to || block_start < from) 1150 zero_user_segments(page, to, block_end, 1151 block_start, from); 1152 continue; 1153 } 1154 } 1155 if (PageUptodate(page)) { 1156 if (!buffer_uptodate(bh)) 1157 set_buffer_uptodate(bh); 1158 continue; 1159 } 1160 if (!buffer_uptodate(bh) && !buffer_delay(bh) && 1161 !buffer_unwritten(bh) && 1162 (block_start < from || block_end > to)) { 1163 ll_rw_block(REQ_OP_READ, 0, 1, &bh); 1164 *wait_bh++ = bh; 1165 decrypt = ext4_encrypted_inode(inode) && 1166 S_ISREG(inode->i_mode); 1167 } 1168 } 1169 /* 1170 * If we issued read requests, let them complete. 1171 */ 1172 while (wait_bh > wait) { 1173 wait_on_buffer(*--wait_bh); 1174 if (!buffer_uptodate(*wait_bh)) 1175 err = -EIO; 1176 } 1177 if (unlikely(err)) 1178 page_zero_new_buffers(page, from, to); 1179 else if (decrypt) 1180 err = fscrypt_decrypt_page(page->mapping->host, page, 1181 PAGE_SIZE, 0, page->index); 1182 return err; 1183 } 1184 #endif 1185 1186 static int ext4_write_begin(struct file *file, struct address_space *mapping, 1187 loff_t pos, unsigned len, unsigned flags, 1188 struct page **pagep, void **fsdata) 1189 { 1190 struct inode *inode = mapping->host; 1191 int ret, needed_blocks; 1192 handle_t *handle; 1193 int retries = 0; 1194 struct page *page; 1195 pgoff_t index; 1196 unsigned from, to; 1197 1198 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 1199 return -EIO; 1200 1201 trace_ext4_write_begin(inode, pos, len, flags); 1202 /* 1203 * Reserve one block more for addition to orphan list in case 1204 * we allocate blocks but write fails for some reason 1205 */ 1206 needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 1207 index = pos >> PAGE_SHIFT; 1208 from = pos & (PAGE_SIZE - 1); 1209 to = from + len; 1210 1211 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 1212 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, 1213 flags, pagep); 1214 if (ret < 0) 1215 return ret; 1216 if (ret == 1) 1217 return 0; 1218 } 1219 1220 /* 1221 * grab_cache_page_write_begin() can take a long time if the 1222 * system is thrashing due to memory pressure, or if the page 1223 * is being written back. So grab it first before we start 1224 * the transaction handle. This also allows us to allocate 1225 * the page (if needed) without using GFP_NOFS. 1226 */ 1227 retry_grab: 1228 page = grab_cache_page_write_begin(mapping, index, flags); 1229 if (!page) 1230 return -ENOMEM; 1231 unlock_page(page); 1232 1233 retry_journal: 1234 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); 1235 if (IS_ERR(handle)) { 1236 put_page(page); 1237 return PTR_ERR(handle); 1238 } 1239 1240 lock_page(page); 1241 if (page->mapping != mapping) { 1242 /* The page got truncated from under us */ 1243 unlock_page(page); 1244 put_page(page); 1245 ext4_journal_stop(handle); 1246 goto retry_grab; 1247 } 1248 /* In case writeback began while the page was unlocked */ 1249 wait_for_stable_page(page); 1250 1251 #ifdef CONFIG_EXT4_FS_ENCRYPTION 1252 if (ext4_should_dioread_nolock(inode)) 1253 ret = ext4_block_write_begin(page, pos, len, 1254 ext4_get_block_unwritten); 1255 else 1256 ret = ext4_block_write_begin(page, pos, len, 1257 ext4_get_block); 1258 #else 1259 if (ext4_should_dioread_nolock(inode)) 1260 ret = __block_write_begin(page, pos, len, 1261 ext4_get_block_unwritten); 1262 else 1263 ret = __block_write_begin(page, pos, len, ext4_get_block); 1264 #endif 1265 if (!ret && ext4_should_journal_data(inode)) { 1266 ret = ext4_walk_page_buffers(handle, page_buffers(page), 1267 from, to, NULL, 1268 do_journal_get_write_access); 1269 } 1270 1271 if (ret) { 1272 unlock_page(page); 1273 /* 1274 * __block_write_begin may have instantiated a few blocks 1275 * outside i_size. Trim these off again. Don't need 1276 * i_size_read because we hold i_mutex. 1277 * 1278 * Add inode to orphan list in case we crash before 1279 * truncate finishes 1280 */ 1281 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1282 ext4_orphan_add(handle, inode); 1283 1284 ext4_journal_stop(handle); 1285 if (pos + len > inode->i_size) { 1286 ext4_truncate_failed_write(inode); 1287 /* 1288 * If truncate failed early the inode might 1289 * still be on the orphan list; we need to 1290 * make sure the inode is removed from the 1291 * orphan list in that case. 1292 */ 1293 if (inode->i_nlink) 1294 ext4_orphan_del(NULL, inode); 1295 } 1296 1297 if (ret == -ENOSPC && 1298 ext4_should_retry_alloc(inode->i_sb, &retries)) 1299 goto retry_journal; 1300 put_page(page); 1301 return ret; 1302 } 1303 *pagep = page; 1304 return ret; 1305 } 1306 1307 /* For write_end() in data=journal mode */ 1308 static int write_end_fn(handle_t *handle, struct buffer_head *bh) 1309 { 1310 int ret; 1311 if (!buffer_mapped(bh) || buffer_freed(bh)) 1312 return 0; 1313 set_buffer_uptodate(bh); 1314 ret = ext4_handle_dirty_metadata(handle, NULL, bh); 1315 clear_buffer_meta(bh); 1316 clear_buffer_prio(bh); 1317 return ret; 1318 } 1319 1320 /* 1321 * We need to pick up the new inode size which generic_commit_write gave us 1322 * `file' can be NULL - eg, when called from page_symlink(). 1323 * 1324 * ext4 never places buffers on inode->i_mapping->private_list. metadata 1325 * buffers are managed internally. 1326 */ 1327 static int ext4_write_end(struct file *file, 1328 struct address_space *mapping, 1329 loff_t pos, unsigned len, unsigned copied, 1330 struct page *page, void *fsdata) 1331 { 1332 handle_t *handle = ext4_journal_current_handle(); 1333 struct inode *inode = mapping->host; 1334 loff_t old_size = inode->i_size; 1335 int ret = 0, ret2; 1336 int i_size_changed = 0; 1337 1338 trace_ext4_write_end(inode, pos, len, copied); 1339 if (ext4_has_inline_data(inode)) { 1340 ret = ext4_write_inline_data_end(inode, pos, len, 1341 copied, page); 1342 if (ret < 0) { 1343 unlock_page(page); 1344 put_page(page); 1345 goto errout; 1346 } 1347 copied = ret; 1348 } else 1349 copied = block_write_end(file, mapping, pos, 1350 len, copied, page, fsdata); 1351 /* 1352 * it's important to update i_size while still holding page lock: 1353 * page writeout could otherwise come in and zero beyond i_size. 1354 */ 1355 i_size_changed = ext4_update_inode_size(inode, pos + copied); 1356 unlock_page(page); 1357 put_page(page); 1358 1359 if (old_size < pos) 1360 pagecache_isize_extended(inode, old_size, pos); 1361 /* 1362 * Don't mark the inode dirty under page lock. First, it unnecessarily 1363 * makes the holding time of page lock longer. Second, it forces lock 1364 * ordering of page lock and transaction start for journaling 1365 * filesystems. 1366 */ 1367 if (i_size_changed) 1368 ext4_mark_inode_dirty(handle, inode); 1369 1370 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1371 /* if we have allocated more blocks and copied 1372 * less. We will have blocks allocated outside 1373 * inode->i_size. So truncate them 1374 */ 1375 ext4_orphan_add(handle, inode); 1376 errout: 1377 ret2 = ext4_journal_stop(handle); 1378 if (!ret) 1379 ret = ret2; 1380 1381 if (pos + len > inode->i_size) { 1382 ext4_truncate_failed_write(inode); 1383 /* 1384 * If truncate failed early the inode might still be 1385 * on the orphan list; we need to make sure the inode 1386 * is removed from the orphan list in that case. 1387 */ 1388 if (inode->i_nlink) 1389 ext4_orphan_del(NULL, inode); 1390 } 1391 1392 return ret ? ret : copied; 1393 } 1394 1395 /* 1396 * This is a private version of page_zero_new_buffers() which doesn't 1397 * set the buffer to be dirty, since in data=journalled mode we need 1398 * to call ext4_handle_dirty_metadata() instead. 1399 */ 1400 static void ext4_journalled_zero_new_buffers(handle_t *handle, 1401 struct page *page, 1402 unsigned from, unsigned to) 1403 { 1404 unsigned int block_start = 0, block_end; 1405 struct buffer_head *head, *bh; 1406 1407 bh = head = page_buffers(page); 1408 do { 1409 block_end = block_start + bh->b_size; 1410 if (buffer_new(bh)) { 1411 if (block_end > from && block_start < to) { 1412 if (!PageUptodate(page)) { 1413 unsigned start, size; 1414 1415 start = max(from, block_start); 1416 size = min(to, block_end) - start; 1417 1418 zero_user(page, start, size); 1419 write_end_fn(handle, bh); 1420 } 1421 clear_buffer_new(bh); 1422 } 1423 } 1424 block_start = block_end; 1425 bh = bh->b_this_page; 1426 } while (bh != head); 1427 } 1428 1429 static int ext4_journalled_write_end(struct file *file, 1430 struct address_space *mapping, 1431 loff_t pos, unsigned len, unsigned copied, 1432 struct page *page, void *fsdata) 1433 { 1434 handle_t *handle = ext4_journal_current_handle(); 1435 struct inode *inode = mapping->host; 1436 loff_t old_size = inode->i_size; 1437 int ret = 0, ret2; 1438 int partial = 0; 1439 unsigned from, to; 1440 int size_changed = 0; 1441 1442 trace_ext4_journalled_write_end(inode, pos, len, copied); 1443 from = pos & (PAGE_SIZE - 1); 1444 to = from + len; 1445 1446 BUG_ON(!ext4_handle_valid(handle)); 1447 1448 if (ext4_has_inline_data(inode)) { 1449 ret = ext4_write_inline_data_end(inode, pos, len, 1450 copied, page); 1451 if (ret < 0) { 1452 unlock_page(page); 1453 put_page(page); 1454 goto errout; 1455 } 1456 copied = ret; 1457 } else if (unlikely(copied < len) && !PageUptodate(page)) { 1458 copied = 0; 1459 ext4_journalled_zero_new_buffers(handle, page, from, to); 1460 } else { 1461 if (unlikely(copied < len)) 1462 ext4_journalled_zero_new_buffers(handle, page, 1463 from + copied, to); 1464 ret = ext4_walk_page_buffers(handle, page_buffers(page), from, 1465 from + copied, &partial, 1466 write_end_fn); 1467 if (!partial) 1468 SetPageUptodate(page); 1469 } 1470 size_changed = ext4_update_inode_size(inode, pos + copied); 1471 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 1472 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1473 unlock_page(page); 1474 put_page(page); 1475 1476 if (old_size < pos) 1477 pagecache_isize_extended(inode, old_size, pos); 1478 1479 if (size_changed) { 1480 ret2 = ext4_mark_inode_dirty(handle, inode); 1481 if (!ret) 1482 ret = ret2; 1483 } 1484 1485 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1486 /* if we have allocated more blocks and copied 1487 * less. We will have blocks allocated outside 1488 * inode->i_size. So truncate them 1489 */ 1490 ext4_orphan_add(handle, inode); 1491 1492 errout: 1493 ret2 = ext4_journal_stop(handle); 1494 if (!ret) 1495 ret = ret2; 1496 if (pos + len > inode->i_size) { 1497 ext4_truncate_failed_write(inode); 1498 /* 1499 * If truncate failed early the inode might still be 1500 * on the orphan list; we need to make sure the inode 1501 * is removed from the orphan list in that case. 1502 */ 1503 if (inode->i_nlink) 1504 ext4_orphan_del(NULL, inode); 1505 } 1506 1507 return ret ? ret : copied; 1508 } 1509 1510 /* 1511 * Reserve space for a single cluster 1512 */ 1513 static int ext4_da_reserve_space(struct inode *inode) 1514 { 1515 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1516 struct ext4_inode_info *ei = EXT4_I(inode); 1517 int ret; 1518 1519 /* 1520 * We will charge metadata quota at writeout time; this saves 1521 * us from metadata over-estimation, though we may go over by 1522 * a small amount in the end. Here we just reserve for data. 1523 */ 1524 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); 1525 if (ret) 1526 return ret; 1527 1528 spin_lock(&ei->i_block_reservation_lock); 1529 if (ext4_claim_free_clusters(sbi, 1, 0)) { 1530 spin_unlock(&ei->i_block_reservation_lock); 1531 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); 1532 return -ENOSPC; 1533 } 1534 ei->i_reserved_data_blocks++; 1535 trace_ext4_da_reserve_space(inode); 1536 spin_unlock(&ei->i_block_reservation_lock); 1537 1538 return 0; /* success */ 1539 } 1540 1541 static void ext4_da_release_space(struct inode *inode, int to_free) 1542 { 1543 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1544 struct ext4_inode_info *ei = EXT4_I(inode); 1545 1546 if (!to_free) 1547 return; /* Nothing to release, exit */ 1548 1549 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1550 1551 trace_ext4_da_release_space(inode, to_free); 1552 if (unlikely(to_free > ei->i_reserved_data_blocks)) { 1553 /* 1554 * if there aren't enough reserved blocks, then the 1555 * counter is messed up somewhere. Since this 1556 * function is called from invalidate page, it's 1557 * harmless to return without any action. 1558 */ 1559 ext4_warning(inode->i_sb, "ext4_da_release_space: " 1560 "ino %lu, to_free %d with only %d reserved " 1561 "data blocks", inode->i_ino, to_free, 1562 ei->i_reserved_data_blocks); 1563 WARN_ON(1); 1564 to_free = ei->i_reserved_data_blocks; 1565 } 1566 ei->i_reserved_data_blocks -= to_free; 1567 1568 /* update fs dirty data blocks counter */ 1569 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); 1570 1571 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1572 1573 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); 1574 } 1575 1576 static void ext4_da_page_release_reservation(struct page *page, 1577 unsigned int offset, 1578 unsigned int length) 1579 { 1580 int to_release = 0, contiguous_blks = 0; 1581 struct buffer_head *head, *bh; 1582 unsigned int curr_off = 0; 1583 struct inode *inode = page->mapping->host; 1584 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1585 unsigned int stop = offset + length; 1586 int num_clusters; 1587 ext4_fsblk_t lblk; 1588 1589 BUG_ON(stop > PAGE_SIZE || stop < length); 1590 1591 head = page_buffers(page); 1592 bh = head; 1593 do { 1594 unsigned int next_off = curr_off + bh->b_size; 1595 1596 if (next_off > stop) 1597 break; 1598 1599 if ((offset <= curr_off) && (buffer_delay(bh))) { 1600 to_release++; 1601 contiguous_blks++; 1602 clear_buffer_delay(bh); 1603 } else if (contiguous_blks) { 1604 lblk = page->index << 1605 (PAGE_SHIFT - inode->i_blkbits); 1606 lblk += (curr_off >> inode->i_blkbits) - 1607 contiguous_blks; 1608 ext4_es_remove_extent(inode, lblk, contiguous_blks); 1609 contiguous_blks = 0; 1610 } 1611 curr_off = next_off; 1612 } while ((bh = bh->b_this_page) != head); 1613 1614 if (contiguous_blks) { 1615 lblk = page->index << (PAGE_SHIFT - inode->i_blkbits); 1616 lblk += (curr_off >> inode->i_blkbits) - contiguous_blks; 1617 ext4_es_remove_extent(inode, lblk, contiguous_blks); 1618 } 1619 1620 /* If we have released all the blocks belonging to a cluster, then we 1621 * need to release the reserved space for that cluster. */ 1622 num_clusters = EXT4_NUM_B2C(sbi, to_release); 1623 while (num_clusters > 0) { 1624 lblk = (page->index << (PAGE_SHIFT - inode->i_blkbits)) + 1625 ((num_clusters - 1) << sbi->s_cluster_bits); 1626 if (sbi->s_cluster_ratio == 1 || 1627 !ext4_find_delalloc_cluster(inode, lblk)) 1628 ext4_da_release_space(inode, 1); 1629 1630 num_clusters--; 1631 } 1632 } 1633 1634 /* 1635 * Delayed allocation stuff 1636 */ 1637 1638 struct mpage_da_data { 1639 struct inode *inode; 1640 struct writeback_control *wbc; 1641 1642 pgoff_t first_page; /* The first page to write */ 1643 pgoff_t next_page; /* Current page to examine */ 1644 pgoff_t last_page; /* Last page to examine */ 1645 /* 1646 * Extent to map - this can be after first_page because that can be 1647 * fully mapped. We somewhat abuse m_flags to store whether the extent 1648 * is delalloc or unwritten. 1649 */ 1650 struct ext4_map_blocks map; 1651 struct ext4_io_submit io_submit; /* IO submission data */ 1652 unsigned int do_map:1; 1653 }; 1654 1655 static void mpage_release_unused_pages(struct mpage_da_data *mpd, 1656 bool invalidate) 1657 { 1658 int nr_pages, i; 1659 pgoff_t index, end; 1660 struct pagevec pvec; 1661 struct inode *inode = mpd->inode; 1662 struct address_space *mapping = inode->i_mapping; 1663 1664 /* This is necessary when next_page == 0. */ 1665 if (mpd->first_page >= mpd->next_page) 1666 return; 1667 1668 index = mpd->first_page; 1669 end = mpd->next_page - 1; 1670 if (invalidate) { 1671 ext4_lblk_t start, last; 1672 start = index << (PAGE_SHIFT - inode->i_blkbits); 1673 last = end << (PAGE_SHIFT - inode->i_blkbits); 1674 ext4_es_remove_extent(inode, start, last - start + 1); 1675 } 1676 1677 pagevec_init(&pvec, 0); 1678 while (index <= end) { 1679 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1680 if (nr_pages == 0) 1681 break; 1682 for (i = 0; i < nr_pages; i++) { 1683 struct page *page = pvec.pages[i]; 1684 if (page->index > end) 1685 break; 1686 BUG_ON(!PageLocked(page)); 1687 BUG_ON(PageWriteback(page)); 1688 if (invalidate) { 1689 if (page_mapped(page)) 1690 clear_page_dirty_for_io(page); 1691 block_invalidatepage(page, 0, PAGE_SIZE); 1692 ClearPageUptodate(page); 1693 } 1694 unlock_page(page); 1695 } 1696 index = pvec.pages[nr_pages - 1]->index + 1; 1697 pagevec_release(&pvec); 1698 } 1699 } 1700 1701 static void ext4_print_free_blocks(struct inode *inode) 1702 { 1703 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1704 struct super_block *sb = inode->i_sb; 1705 struct ext4_inode_info *ei = EXT4_I(inode); 1706 1707 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld", 1708 EXT4_C2B(EXT4_SB(inode->i_sb), 1709 ext4_count_free_clusters(sb))); 1710 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details"); 1711 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld", 1712 (long long) EXT4_C2B(EXT4_SB(sb), 1713 percpu_counter_sum(&sbi->s_freeclusters_counter))); 1714 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld", 1715 (long long) EXT4_C2B(EXT4_SB(sb), 1716 percpu_counter_sum(&sbi->s_dirtyclusters_counter))); 1717 ext4_msg(sb, KERN_CRIT, "Block reservation details"); 1718 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u", 1719 ei->i_reserved_data_blocks); 1720 return; 1721 } 1722 1723 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) 1724 { 1725 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); 1726 } 1727 1728 /* 1729 * This function is grabs code from the very beginning of 1730 * ext4_map_blocks, but assumes that the caller is from delayed write 1731 * time. This function looks up the requested blocks and sets the 1732 * buffer delay bit under the protection of i_data_sem. 1733 */ 1734 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, 1735 struct ext4_map_blocks *map, 1736 struct buffer_head *bh) 1737 { 1738 struct extent_status es; 1739 int retval; 1740 sector_t invalid_block = ~((sector_t) 0xffff); 1741 #ifdef ES_AGGRESSIVE_TEST 1742 struct ext4_map_blocks orig_map; 1743 1744 memcpy(&orig_map, map, sizeof(*map)); 1745 #endif 1746 1747 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 1748 invalid_block = ~0; 1749 1750 map->m_flags = 0; 1751 ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u," 1752 "logical block %lu\n", inode->i_ino, map->m_len, 1753 (unsigned long) map->m_lblk); 1754 1755 /* Lookup extent status tree firstly */ 1756 if (ext4_es_lookup_extent(inode, iblock, &es)) { 1757 if (ext4_es_is_hole(&es)) { 1758 retval = 0; 1759 down_read(&EXT4_I(inode)->i_data_sem); 1760 goto add_delayed; 1761 } 1762 1763 /* 1764 * Delayed extent could be allocated by fallocate. 1765 * So we need to check it. 1766 */ 1767 if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) { 1768 map_bh(bh, inode->i_sb, invalid_block); 1769 set_buffer_new(bh); 1770 set_buffer_delay(bh); 1771 return 0; 1772 } 1773 1774 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk; 1775 retval = es.es_len - (iblock - es.es_lblk); 1776 if (retval > map->m_len) 1777 retval = map->m_len; 1778 map->m_len = retval; 1779 if (ext4_es_is_written(&es)) 1780 map->m_flags |= EXT4_MAP_MAPPED; 1781 else if (ext4_es_is_unwritten(&es)) 1782 map->m_flags |= EXT4_MAP_UNWRITTEN; 1783 else 1784 BUG_ON(1); 1785 1786 #ifdef ES_AGGRESSIVE_TEST 1787 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0); 1788 #endif 1789 return retval; 1790 } 1791 1792 /* 1793 * Try to see if we can get the block without requesting a new 1794 * file system block. 1795 */ 1796 down_read(&EXT4_I(inode)->i_data_sem); 1797 if (ext4_has_inline_data(inode)) 1798 retval = 0; 1799 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 1800 retval = ext4_ext_map_blocks(NULL, inode, map, 0); 1801 else 1802 retval = ext4_ind_map_blocks(NULL, inode, map, 0); 1803 1804 add_delayed: 1805 if (retval == 0) { 1806 int ret; 1807 /* 1808 * XXX: __block_prepare_write() unmaps passed block, 1809 * is it OK? 1810 */ 1811 /* 1812 * If the block was allocated from previously allocated cluster, 1813 * then we don't need to reserve it again. However we still need 1814 * to reserve metadata for every block we're going to write. 1815 */ 1816 if (EXT4_SB(inode->i_sb)->s_cluster_ratio == 1 || 1817 !ext4_find_delalloc_cluster(inode, map->m_lblk)) { 1818 ret = ext4_da_reserve_space(inode); 1819 if (ret) { 1820 /* not enough space to reserve */ 1821 retval = ret; 1822 goto out_unlock; 1823 } 1824 } 1825 1826 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 1827 ~0, EXTENT_STATUS_DELAYED); 1828 if (ret) { 1829 retval = ret; 1830 goto out_unlock; 1831 } 1832 1833 map_bh(bh, inode->i_sb, invalid_block); 1834 set_buffer_new(bh); 1835 set_buffer_delay(bh); 1836 } else if (retval > 0) { 1837 int ret; 1838 unsigned int status; 1839 1840 if (unlikely(retval != map->m_len)) { 1841 ext4_warning(inode->i_sb, 1842 "ES len assertion failed for inode " 1843 "%lu: retval %d != map->m_len %d", 1844 inode->i_ino, retval, map->m_len); 1845 WARN_ON(1); 1846 } 1847 1848 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 1849 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 1850 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 1851 map->m_pblk, status); 1852 if (ret != 0) 1853 retval = ret; 1854 } 1855 1856 out_unlock: 1857 up_read((&EXT4_I(inode)->i_data_sem)); 1858 1859 return retval; 1860 } 1861 1862 /* 1863 * This is a special get_block_t callback which is used by 1864 * ext4_da_write_begin(). It will either return mapped block or 1865 * reserve space for a single block. 1866 * 1867 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 1868 * We also have b_blocknr = -1 and b_bdev initialized properly 1869 * 1870 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 1871 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 1872 * initialized properly. 1873 */ 1874 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 1875 struct buffer_head *bh, int create) 1876 { 1877 struct ext4_map_blocks map; 1878 int ret = 0; 1879 1880 BUG_ON(create == 0); 1881 BUG_ON(bh->b_size != inode->i_sb->s_blocksize); 1882 1883 map.m_lblk = iblock; 1884 map.m_len = 1; 1885 1886 /* 1887 * first, we need to know whether the block is allocated already 1888 * preallocated blocks are unmapped but should treated 1889 * the same as allocated blocks. 1890 */ 1891 ret = ext4_da_map_blocks(inode, iblock, &map, bh); 1892 if (ret <= 0) 1893 return ret; 1894 1895 map_bh(bh, inode->i_sb, map.m_pblk); 1896 ext4_update_bh_state(bh, map.m_flags); 1897 1898 if (buffer_unwritten(bh)) { 1899 /* A delayed write to unwritten bh should be marked 1900 * new and mapped. Mapped ensures that we don't do 1901 * get_block multiple times when we write to the same 1902 * offset and new ensures that we do proper zero out 1903 * for partial write. 1904 */ 1905 set_buffer_new(bh); 1906 set_buffer_mapped(bh); 1907 } 1908 return 0; 1909 } 1910 1911 static int bget_one(handle_t *handle, struct buffer_head *bh) 1912 { 1913 get_bh(bh); 1914 return 0; 1915 } 1916 1917 static int bput_one(handle_t *handle, struct buffer_head *bh) 1918 { 1919 put_bh(bh); 1920 return 0; 1921 } 1922 1923 static int __ext4_journalled_writepage(struct page *page, 1924 unsigned int len) 1925 { 1926 struct address_space *mapping = page->mapping; 1927 struct inode *inode = mapping->host; 1928 struct buffer_head *page_bufs = NULL; 1929 handle_t *handle = NULL; 1930 int ret = 0, err = 0; 1931 int inline_data = ext4_has_inline_data(inode); 1932 struct buffer_head *inode_bh = NULL; 1933 1934 ClearPageChecked(page); 1935 1936 if (inline_data) { 1937 BUG_ON(page->index != 0); 1938 BUG_ON(len > ext4_get_max_inline_size(inode)); 1939 inode_bh = ext4_journalled_write_inline_data(inode, len, page); 1940 if (inode_bh == NULL) 1941 goto out; 1942 } else { 1943 page_bufs = page_buffers(page); 1944 if (!page_bufs) { 1945 BUG(); 1946 goto out; 1947 } 1948 ext4_walk_page_buffers(handle, page_bufs, 0, len, 1949 NULL, bget_one); 1950 } 1951 /* 1952 * We need to release the page lock before we start the 1953 * journal, so grab a reference so the page won't disappear 1954 * out from under us. 1955 */ 1956 get_page(page); 1957 unlock_page(page); 1958 1959 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1960 ext4_writepage_trans_blocks(inode)); 1961 if (IS_ERR(handle)) { 1962 ret = PTR_ERR(handle); 1963 put_page(page); 1964 goto out_no_pagelock; 1965 } 1966 BUG_ON(!ext4_handle_valid(handle)); 1967 1968 lock_page(page); 1969 put_page(page); 1970 if (page->mapping != mapping) { 1971 /* The page got truncated from under us */ 1972 ext4_journal_stop(handle); 1973 ret = 0; 1974 goto out; 1975 } 1976 1977 if (inline_data) { 1978 BUFFER_TRACE(inode_bh, "get write access"); 1979 ret = ext4_journal_get_write_access(handle, inode_bh); 1980 1981 err = ext4_handle_dirty_metadata(handle, inode, inode_bh); 1982 1983 } else { 1984 ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 1985 do_journal_get_write_access); 1986 1987 err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 1988 write_end_fn); 1989 } 1990 if (ret == 0) 1991 ret = err; 1992 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1993 err = ext4_journal_stop(handle); 1994 if (!ret) 1995 ret = err; 1996 1997 if (!ext4_has_inline_data(inode)) 1998 ext4_walk_page_buffers(NULL, page_bufs, 0, len, 1999 NULL, bput_one); 2000 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 2001 out: 2002 unlock_page(page); 2003 out_no_pagelock: 2004 brelse(inode_bh); 2005 return ret; 2006 } 2007 2008 /* 2009 * Note that we don't need to start a transaction unless we're journaling data 2010 * because we should have holes filled from ext4_page_mkwrite(). We even don't 2011 * need to file the inode to the transaction's list in ordered mode because if 2012 * we are writing back data added by write(), the inode is already there and if 2013 * we are writing back data modified via mmap(), no one guarantees in which 2014 * transaction the data will hit the disk. In case we are journaling data, we 2015 * cannot start transaction directly because transaction start ranks above page 2016 * lock so we have to do some magic. 2017 * 2018 * This function can get called via... 2019 * - ext4_writepages after taking page lock (have journal handle) 2020 * - journal_submit_inode_data_buffers (no journal handle) 2021 * - shrink_page_list via the kswapd/direct reclaim (no journal handle) 2022 * - grab_page_cache when doing write_begin (have journal handle) 2023 * 2024 * We don't do any block allocation in this function. If we have page with 2025 * multiple blocks we need to write those buffer_heads that are mapped. This 2026 * is important for mmaped based write. So if we do with blocksize 1K 2027 * truncate(f, 1024); 2028 * a = mmap(f, 0, 4096); 2029 * a[0] = 'a'; 2030 * truncate(f, 4096); 2031 * we have in the page first buffer_head mapped via page_mkwrite call back 2032 * but other buffer_heads would be unmapped but dirty (dirty done via the 2033 * do_wp_page). So writepage should write the first block. If we modify 2034 * the mmap area beyond 1024 we will again get a page_fault and the 2035 * page_mkwrite callback will do the block allocation and mark the 2036 * buffer_heads mapped. 2037 * 2038 * We redirty the page if we have any buffer_heads that is either delay or 2039 * unwritten in the page. 2040 * 2041 * We can get recursively called as show below. 2042 * 2043 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 2044 * ext4_writepage() 2045 * 2046 * But since we don't do any block allocation we should not deadlock. 2047 * Page also have the dirty flag cleared so we don't get recurive page_lock. 2048 */ 2049 static int ext4_writepage(struct page *page, 2050 struct writeback_control *wbc) 2051 { 2052 int ret = 0; 2053 loff_t size; 2054 unsigned int len; 2055 struct buffer_head *page_bufs = NULL; 2056 struct inode *inode = page->mapping->host; 2057 struct ext4_io_submit io_submit; 2058 bool keep_towrite = false; 2059 2060 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) { 2061 ext4_invalidatepage(page, 0, PAGE_SIZE); 2062 unlock_page(page); 2063 return -EIO; 2064 } 2065 2066 trace_ext4_writepage(page); 2067 size = i_size_read(inode); 2068 if (page->index == size >> PAGE_SHIFT) 2069 len = size & ~PAGE_MASK; 2070 else 2071 len = PAGE_SIZE; 2072 2073 page_bufs = page_buffers(page); 2074 /* 2075 * We cannot do block allocation or other extent handling in this 2076 * function. If there are buffers needing that, we have to redirty 2077 * the page. But we may reach here when we do a journal commit via 2078 * journal_submit_inode_data_buffers() and in that case we must write 2079 * allocated buffers to achieve data=ordered mode guarantees. 2080 * 2081 * Also, if there is only one buffer per page (the fs block 2082 * size == the page size), if one buffer needs block 2083 * allocation or needs to modify the extent tree to clear the 2084 * unwritten flag, we know that the page can't be written at 2085 * all, so we might as well refuse the write immediately. 2086 * Unfortunately if the block size != page size, we can't as 2087 * easily detect this case using ext4_walk_page_buffers(), but 2088 * for the extremely common case, this is an optimization that 2089 * skips a useless round trip through ext4_bio_write_page(). 2090 */ 2091 if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2092 ext4_bh_delay_or_unwritten)) { 2093 redirty_page_for_writepage(wbc, page); 2094 if ((current->flags & PF_MEMALLOC) || 2095 (inode->i_sb->s_blocksize == PAGE_SIZE)) { 2096 /* 2097 * For memory cleaning there's no point in writing only 2098 * some buffers. So just bail out. Warn if we came here 2099 * from direct reclaim. 2100 */ 2101 WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) 2102 == PF_MEMALLOC); 2103 unlock_page(page); 2104 return 0; 2105 } 2106 keep_towrite = true; 2107 } 2108 2109 if (PageChecked(page) && ext4_should_journal_data(inode)) 2110 /* 2111 * It's mmapped pagecache. Add buffers and journal it. There 2112 * doesn't seem much point in redirtying the page here. 2113 */ 2114 return __ext4_journalled_writepage(page, len); 2115 2116 ext4_io_submit_init(&io_submit, wbc); 2117 io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS); 2118 if (!io_submit.io_end) { 2119 redirty_page_for_writepage(wbc, page); 2120 unlock_page(page); 2121 return -ENOMEM; 2122 } 2123 ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite); 2124 ext4_io_submit(&io_submit); 2125 /* Drop io_end reference we got from init */ 2126 ext4_put_io_end_defer(io_submit.io_end); 2127 return ret; 2128 } 2129 2130 static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) 2131 { 2132 int len; 2133 loff_t size; 2134 int err; 2135 2136 BUG_ON(page->index != mpd->first_page); 2137 clear_page_dirty_for_io(page); 2138 /* 2139 * We have to be very careful here! Nothing protects writeback path 2140 * against i_size changes and the page can be writeably mapped into 2141 * page tables. So an application can be growing i_size and writing 2142 * data through mmap while writeback runs. clear_page_dirty_for_io() 2143 * write-protects our page in page tables and the page cannot get 2144 * written to again until we release page lock. So only after 2145 * clear_page_dirty_for_io() we are safe to sample i_size for 2146 * ext4_bio_write_page() to zero-out tail of the written page. We rely 2147 * on the barrier provided by TestClearPageDirty in 2148 * clear_page_dirty_for_io() to make sure i_size is really sampled only 2149 * after page tables are updated. 2150 */ 2151 size = i_size_read(mpd->inode); 2152 if (page->index == size >> PAGE_SHIFT) 2153 len = size & ~PAGE_MASK; 2154 else 2155 len = PAGE_SIZE; 2156 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false); 2157 if (!err) 2158 mpd->wbc->nr_to_write--; 2159 mpd->first_page++; 2160 2161 return err; 2162 } 2163 2164 #define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay)) 2165 2166 /* 2167 * mballoc gives us at most this number of blocks... 2168 * XXX: That seems to be only a limitation of ext4_mb_normalize_request(). 2169 * The rest of mballoc seems to handle chunks up to full group size. 2170 */ 2171 #define MAX_WRITEPAGES_EXTENT_LEN 2048 2172 2173 /* 2174 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map 2175 * 2176 * @mpd - extent of blocks 2177 * @lblk - logical number of the block in the file 2178 * @bh - buffer head we want to add to the extent 2179 * 2180 * The function is used to collect contig. blocks in the same state. If the 2181 * buffer doesn't require mapping for writeback and we haven't started the 2182 * extent of buffers to map yet, the function returns 'true' immediately - the 2183 * caller can write the buffer right away. Otherwise the function returns true 2184 * if the block has been added to the extent, false if the block couldn't be 2185 * added. 2186 */ 2187 static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk, 2188 struct buffer_head *bh) 2189 { 2190 struct ext4_map_blocks *map = &mpd->map; 2191 2192 /* Buffer that doesn't need mapping for writeback? */ 2193 if (!buffer_dirty(bh) || !buffer_mapped(bh) || 2194 (!buffer_delay(bh) && !buffer_unwritten(bh))) { 2195 /* So far no extent to map => we write the buffer right away */ 2196 if (map->m_len == 0) 2197 return true; 2198 return false; 2199 } 2200 2201 /* First block in the extent? */ 2202 if (map->m_len == 0) { 2203 /* We cannot map unless handle is started... */ 2204 if (!mpd->do_map) 2205 return false; 2206 map->m_lblk = lblk; 2207 map->m_len = 1; 2208 map->m_flags = bh->b_state & BH_FLAGS; 2209 return true; 2210 } 2211 2212 /* Don't go larger than mballoc is willing to allocate */ 2213 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN) 2214 return false; 2215 2216 /* Can we merge the block to our big extent? */ 2217 if (lblk == map->m_lblk + map->m_len && 2218 (bh->b_state & BH_FLAGS) == map->m_flags) { 2219 map->m_len++; 2220 return true; 2221 } 2222 return false; 2223 } 2224 2225 /* 2226 * mpage_process_page_bufs - submit page buffers for IO or add them to extent 2227 * 2228 * @mpd - extent of blocks for mapping 2229 * @head - the first buffer in the page 2230 * @bh - buffer we should start processing from 2231 * @lblk - logical number of the block in the file corresponding to @bh 2232 * 2233 * Walk through page buffers from @bh upto @head (exclusive) and either submit 2234 * the page for IO if all buffers in this page were mapped and there's no 2235 * accumulated extent of buffers to map or add buffers in the page to the 2236 * extent of buffers to map. The function returns 1 if the caller can continue 2237 * by processing the next page, 0 if it should stop adding buffers to the 2238 * extent to map because we cannot extend it anymore. It can also return value 2239 * < 0 in case of error during IO submission. 2240 */ 2241 static int mpage_process_page_bufs(struct mpage_da_data *mpd, 2242 struct buffer_head *head, 2243 struct buffer_head *bh, 2244 ext4_lblk_t lblk) 2245 { 2246 struct inode *inode = mpd->inode; 2247 int err; 2248 ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1) 2249 >> inode->i_blkbits; 2250 2251 do { 2252 BUG_ON(buffer_locked(bh)); 2253 2254 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) { 2255 /* Found extent to map? */ 2256 if (mpd->map.m_len) 2257 return 0; 2258 /* Buffer needs mapping and handle is not started? */ 2259 if (!mpd->do_map) 2260 return 0; 2261 /* Everything mapped so far and we hit EOF */ 2262 break; 2263 } 2264 } while (lblk++, (bh = bh->b_this_page) != head); 2265 /* So far everything mapped? Submit the page for IO. */ 2266 if (mpd->map.m_len == 0) { 2267 err = mpage_submit_page(mpd, head->b_page); 2268 if (err < 0) 2269 return err; 2270 } 2271 return lblk < blocks; 2272 } 2273 2274 /* 2275 * mpage_map_buffers - update buffers corresponding to changed extent and 2276 * submit fully mapped pages for IO 2277 * 2278 * @mpd - description of extent to map, on return next extent to map 2279 * 2280 * Scan buffers corresponding to changed extent (we expect corresponding pages 2281 * to be already locked) and update buffer state according to new extent state. 2282 * We map delalloc buffers to their physical location, clear unwritten bits, 2283 * and mark buffers as uninit when we perform writes to unwritten extents 2284 * and do extent conversion after IO is finished. If the last page is not fully 2285 * mapped, we update @map to the next extent in the last page that needs 2286 * mapping. Otherwise we submit the page for IO. 2287 */ 2288 static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) 2289 { 2290 struct pagevec pvec; 2291 int nr_pages, i; 2292 struct inode *inode = mpd->inode; 2293 struct buffer_head *head, *bh; 2294 int bpp_bits = PAGE_SHIFT - inode->i_blkbits; 2295 pgoff_t start, end; 2296 ext4_lblk_t lblk; 2297 sector_t pblock; 2298 int err; 2299 2300 start = mpd->map.m_lblk >> bpp_bits; 2301 end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits; 2302 lblk = start << bpp_bits; 2303 pblock = mpd->map.m_pblk; 2304 2305 pagevec_init(&pvec, 0); 2306 while (start <= end) { 2307 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start, 2308 PAGEVEC_SIZE); 2309 if (nr_pages == 0) 2310 break; 2311 for (i = 0; i < nr_pages; i++) { 2312 struct page *page = pvec.pages[i]; 2313 2314 if (page->index > end) 2315 break; 2316 /* Up to 'end' pages must be contiguous */ 2317 BUG_ON(page->index != start); 2318 bh = head = page_buffers(page); 2319 do { 2320 if (lblk < mpd->map.m_lblk) 2321 continue; 2322 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) { 2323 /* 2324 * Buffer after end of mapped extent. 2325 * Find next buffer in the page to map. 2326 */ 2327 mpd->map.m_len = 0; 2328 mpd->map.m_flags = 0; 2329 /* 2330 * FIXME: If dioread_nolock supports 2331 * blocksize < pagesize, we need to make 2332 * sure we add size mapped so far to 2333 * io_end->size as the following call 2334 * can submit the page for IO. 2335 */ 2336 err = mpage_process_page_bufs(mpd, head, 2337 bh, lblk); 2338 pagevec_release(&pvec); 2339 if (err > 0) 2340 err = 0; 2341 return err; 2342 } 2343 if (buffer_delay(bh)) { 2344 clear_buffer_delay(bh); 2345 bh->b_blocknr = pblock++; 2346 } 2347 clear_buffer_unwritten(bh); 2348 } while (lblk++, (bh = bh->b_this_page) != head); 2349 2350 /* 2351 * FIXME: This is going to break if dioread_nolock 2352 * supports blocksize < pagesize as we will try to 2353 * convert potentially unmapped parts of inode. 2354 */ 2355 mpd->io_submit.io_end->size += PAGE_SIZE; 2356 /* Page fully mapped - let IO run! */ 2357 err = mpage_submit_page(mpd, page); 2358 if (err < 0) { 2359 pagevec_release(&pvec); 2360 return err; 2361 } 2362 start++; 2363 } 2364 pagevec_release(&pvec); 2365 } 2366 /* Extent fully mapped and matches with page boundary. We are done. */ 2367 mpd->map.m_len = 0; 2368 mpd->map.m_flags = 0; 2369 return 0; 2370 } 2371 2372 static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd) 2373 { 2374 struct inode *inode = mpd->inode; 2375 struct ext4_map_blocks *map = &mpd->map; 2376 int get_blocks_flags; 2377 int err, dioread_nolock; 2378 2379 trace_ext4_da_write_pages_extent(inode, map); 2380 /* 2381 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or 2382 * to convert an unwritten extent to be initialized (in the case 2383 * where we have written into one or more preallocated blocks). It is 2384 * possible that we're going to need more metadata blocks than 2385 * previously reserved. However we must not fail because we're in 2386 * writeback and there is nothing we can do about it so it might result 2387 * in data loss. So use reserved blocks to allocate metadata if 2388 * possible. 2389 * 2390 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if 2391 * the blocks in question are delalloc blocks. This indicates 2392 * that the blocks and quotas has already been checked when 2393 * the data was copied into the page cache. 2394 */ 2395 get_blocks_flags = EXT4_GET_BLOCKS_CREATE | 2396 EXT4_GET_BLOCKS_METADATA_NOFAIL | 2397 EXT4_GET_BLOCKS_IO_SUBMIT; 2398 dioread_nolock = ext4_should_dioread_nolock(inode); 2399 if (dioread_nolock) 2400 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; 2401 if (map->m_flags & (1 << BH_Delay)) 2402 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 2403 2404 err = ext4_map_blocks(handle, inode, map, get_blocks_flags); 2405 if (err < 0) 2406 return err; 2407 if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) { 2408 if (!mpd->io_submit.io_end->handle && 2409 ext4_handle_valid(handle)) { 2410 mpd->io_submit.io_end->handle = handle->h_rsv_handle; 2411 handle->h_rsv_handle = NULL; 2412 } 2413 ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end); 2414 } 2415 2416 BUG_ON(map->m_len == 0); 2417 if (map->m_flags & EXT4_MAP_NEW) { 2418 clean_bdev_aliases(inode->i_sb->s_bdev, map->m_pblk, 2419 map->m_len); 2420 } 2421 return 0; 2422 } 2423 2424 /* 2425 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length 2426 * mpd->len and submit pages underlying it for IO 2427 * 2428 * @handle - handle for journal operations 2429 * @mpd - extent to map 2430 * @give_up_on_write - we set this to true iff there is a fatal error and there 2431 * is no hope of writing the data. The caller should discard 2432 * dirty pages to avoid infinite loops. 2433 * 2434 * The function maps extent starting at mpd->lblk of length mpd->len. If it is 2435 * delayed, blocks are allocated, if it is unwritten, we may need to convert 2436 * them to initialized or split the described range from larger unwritten 2437 * extent. Note that we need not map all the described range since allocation 2438 * can return less blocks or the range is covered by more unwritten extents. We 2439 * cannot map more because we are limited by reserved transaction credits. On 2440 * the other hand we always make sure that the last touched page is fully 2441 * mapped so that it can be written out (and thus forward progress is 2442 * guaranteed). After mapping we submit all mapped pages for IO. 2443 */ 2444 static int mpage_map_and_submit_extent(handle_t *handle, 2445 struct mpage_da_data *mpd, 2446 bool *give_up_on_write) 2447 { 2448 struct inode *inode = mpd->inode; 2449 struct ext4_map_blocks *map = &mpd->map; 2450 int err; 2451 loff_t disksize; 2452 int progress = 0; 2453 2454 mpd->io_submit.io_end->offset = 2455 ((loff_t)map->m_lblk) << inode->i_blkbits; 2456 do { 2457 err = mpage_map_one_extent(handle, mpd); 2458 if (err < 0) { 2459 struct super_block *sb = inode->i_sb; 2460 2461 if (ext4_forced_shutdown(EXT4_SB(sb)) || 2462 EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED) 2463 goto invalidate_dirty_pages; 2464 /* 2465 * Let the uper layers retry transient errors. 2466 * In the case of ENOSPC, if ext4_count_free_blocks() 2467 * is non-zero, a commit should free up blocks. 2468 */ 2469 if ((err == -ENOMEM) || 2470 (err == -ENOSPC && ext4_count_free_clusters(sb))) { 2471 if (progress) 2472 goto update_disksize; 2473 return err; 2474 } 2475 ext4_msg(sb, KERN_CRIT, 2476 "Delayed block allocation failed for " 2477 "inode %lu at logical offset %llu with" 2478 " max blocks %u with error %d", 2479 inode->i_ino, 2480 (unsigned long long)map->m_lblk, 2481 (unsigned)map->m_len, -err); 2482 ext4_msg(sb, KERN_CRIT, 2483 "This should not happen!! Data will " 2484 "be lost\n"); 2485 if (err == -ENOSPC) 2486 ext4_print_free_blocks(inode); 2487 invalidate_dirty_pages: 2488 *give_up_on_write = true; 2489 return err; 2490 } 2491 progress = 1; 2492 /* 2493 * Update buffer state, submit mapped pages, and get us new 2494 * extent to map 2495 */ 2496 err = mpage_map_and_submit_buffers(mpd); 2497 if (err < 0) 2498 goto update_disksize; 2499 } while (map->m_len); 2500 2501 update_disksize: 2502 /* 2503 * Update on-disk size after IO is submitted. Races with 2504 * truncate are avoided by checking i_size under i_data_sem. 2505 */ 2506 disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT; 2507 if (disksize > EXT4_I(inode)->i_disksize) { 2508 int err2; 2509 loff_t i_size; 2510 2511 down_write(&EXT4_I(inode)->i_data_sem); 2512 i_size = i_size_read(inode); 2513 if (disksize > i_size) 2514 disksize = i_size; 2515 if (disksize > EXT4_I(inode)->i_disksize) 2516 EXT4_I(inode)->i_disksize = disksize; 2517 up_write(&EXT4_I(inode)->i_data_sem); 2518 err2 = ext4_mark_inode_dirty(handle, inode); 2519 if (err2) 2520 ext4_error(inode->i_sb, 2521 "Failed to mark inode %lu dirty", 2522 inode->i_ino); 2523 if (!err) 2524 err = err2; 2525 } 2526 return err; 2527 } 2528 2529 /* 2530 * Calculate the total number of credits to reserve for one writepages 2531 * iteration. This is called from ext4_writepages(). We map an extent of 2532 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping 2533 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN + 2534 * bpp - 1 blocks in bpp different extents. 2535 */ 2536 static int ext4_da_writepages_trans_blocks(struct inode *inode) 2537 { 2538 int bpp = ext4_journal_blocks_per_page(inode); 2539 2540 return ext4_meta_trans_blocks(inode, 2541 MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp); 2542 } 2543 2544 /* 2545 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages 2546 * and underlying extent to map 2547 * 2548 * @mpd - where to look for pages 2549 * 2550 * Walk dirty pages in the mapping. If they are fully mapped, submit them for 2551 * IO immediately. When we find a page which isn't mapped we start accumulating 2552 * extent of buffers underlying these pages that needs mapping (formed by 2553 * either delayed or unwritten buffers). We also lock the pages containing 2554 * these buffers. The extent found is returned in @mpd structure (starting at 2555 * mpd->lblk with length mpd->len blocks). 2556 * 2557 * Note that this function can attach bios to one io_end structure which are 2558 * neither logically nor physically contiguous. Although it may seem as an 2559 * unnecessary complication, it is actually inevitable in blocksize < pagesize 2560 * case as we need to track IO to all buffers underlying a page in one io_end. 2561 */ 2562 static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) 2563 { 2564 struct address_space *mapping = mpd->inode->i_mapping; 2565 struct pagevec pvec; 2566 unsigned int nr_pages; 2567 long left = mpd->wbc->nr_to_write; 2568 pgoff_t index = mpd->first_page; 2569 pgoff_t end = mpd->last_page; 2570 int tag; 2571 int i, err = 0; 2572 int blkbits = mpd->inode->i_blkbits; 2573 ext4_lblk_t lblk; 2574 struct buffer_head *head; 2575 2576 if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages) 2577 tag = PAGECACHE_TAG_TOWRITE; 2578 else 2579 tag = PAGECACHE_TAG_DIRTY; 2580 2581 pagevec_init(&pvec, 0); 2582 mpd->map.m_len = 0; 2583 mpd->next_page = index; 2584 while (index <= end) { 2585 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 2586 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 2587 if (nr_pages == 0) 2588 goto out; 2589 2590 for (i = 0; i < nr_pages; i++) { 2591 struct page *page = pvec.pages[i]; 2592 2593 /* 2594 * At this point, the page may be truncated or 2595 * invalidated (changing page->mapping to NULL), or 2596 * even swizzled back from swapper_space to tmpfs file 2597 * mapping. However, page->index will not change 2598 * because we have a reference on the page. 2599 */ 2600 if (page->index > end) 2601 goto out; 2602 2603 /* 2604 * Accumulated enough dirty pages? This doesn't apply 2605 * to WB_SYNC_ALL mode. For integrity sync we have to 2606 * keep going because someone may be concurrently 2607 * dirtying pages, and we might have synced a lot of 2608 * newly appeared dirty pages, but have not synced all 2609 * of the old dirty pages. 2610 */ 2611 if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0) 2612 goto out; 2613 2614 /* If we can't merge this page, we are done. */ 2615 if (mpd->map.m_len > 0 && mpd->next_page != page->index) 2616 goto out; 2617 2618 lock_page(page); 2619 /* 2620 * If the page is no longer dirty, or its mapping no 2621 * longer corresponds to inode we are writing (which 2622 * means it has been truncated or invalidated), or the 2623 * page is already under writeback and we are not doing 2624 * a data integrity writeback, skip the page 2625 */ 2626 if (!PageDirty(page) || 2627 (PageWriteback(page) && 2628 (mpd->wbc->sync_mode == WB_SYNC_NONE)) || 2629 unlikely(page->mapping != mapping)) { 2630 unlock_page(page); 2631 continue; 2632 } 2633 2634 wait_on_page_writeback(page); 2635 BUG_ON(PageWriteback(page)); 2636 2637 if (mpd->map.m_len == 0) 2638 mpd->first_page = page->index; 2639 mpd->next_page = page->index + 1; 2640 /* Add all dirty buffers to mpd */ 2641 lblk = ((ext4_lblk_t)page->index) << 2642 (PAGE_SHIFT - blkbits); 2643 head = page_buffers(page); 2644 err = mpage_process_page_bufs(mpd, head, head, lblk); 2645 if (err <= 0) 2646 goto out; 2647 err = 0; 2648 left--; 2649 } 2650 pagevec_release(&pvec); 2651 cond_resched(); 2652 } 2653 return 0; 2654 out: 2655 pagevec_release(&pvec); 2656 return err; 2657 } 2658 2659 static int __writepage(struct page *page, struct writeback_control *wbc, 2660 void *data) 2661 { 2662 struct address_space *mapping = data; 2663 int ret = ext4_writepage(page, wbc); 2664 mapping_set_error(mapping, ret); 2665 return ret; 2666 } 2667 2668 static int ext4_writepages(struct address_space *mapping, 2669 struct writeback_control *wbc) 2670 { 2671 pgoff_t writeback_index = 0; 2672 long nr_to_write = wbc->nr_to_write; 2673 int range_whole = 0; 2674 int cycled = 1; 2675 handle_t *handle = NULL; 2676 struct mpage_da_data mpd; 2677 struct inode *inode = mapping->host; 2678 int needed_blocks, rsv_blocks = 0, ret = 0; 2679 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2680 bool done; 2681 struct blk_plug plug; 2682 bool give_up_on_write = false; 2683 2684 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 2685 return -EIO; 2686 2687 percpu_down_read(&sbi->s_journal_flag_rwsem); 2688 trace_ext4_writepages(inode, wbc); 2689 2690 if (dax_mapping(mapping)) { 2691 ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, 2692 wbc); 2693 goto out_writepages; 2694 } 2695 2696 /* 2697 * No pages to write? This is mainly a kludge to avoid starting 2698 * a transaction for special inodes like journal inode on last iput() 2699 * because that could violate lock ordering on umount 2700 */ 2701 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 2702 goto out_writepages; 2703 2704 if (ext4_should_journal_data(inode)) { 2705 struct blk_plug plug; 2706 2707 blk_start_plug(&plug); 2708 ret = write_cache_pages(mapping, wbc, __writepage, mapping); 2709 blk_finish_plug(&plug); 2710 goto out_writepages; 2711 } 2712 2713 /* 2714 * If the filesystem has aborted, it is read-only, so return 2715 * right away instead of dumping stack traces later on that 2716 * will obscure the real source of the problem. We test 2717 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because 2718 * the latter could be true if the filesystem is mounted 2719 * read-only, and in that case, ext4_writepages should 2720 * *never* be called, so if that ever happens, we would want 2721 * the stack trace. 2722 */ 2723 if (unlikely(ext4_forced_shutdown(EXT4_SB(mapping->host->i_sb)) || 2724 sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) { 2725 ret = -EROFS; 2726 goto out_writepages; 2727 } 2728 2729 if (ext4_should_dioread_nolock(inode)) { 2730 /* 2731 * We may need to convert up to one extent per block in 2732 * the page and we may dirty the inode. 2733 */ 2734 rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits); 2735 } 2736 2737 /* 2738 * If we have inline data and arrive here, it means that 2739 * we will soon create the block for the 1st page, so 2740 * we'd better clear the inline data here. 2741 */ 2742 if (ext4_has_inline_data(inode)) { 2743 /* Just inode will be modified... */ 2744 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); 2745 if (IS_ERR(handle)) { 2746 ret = PTR_ERR(handle); 2747 goto out_writepages; 2748 } 2749 BUG_ON(ext4_test_inode_state(inode, 2750 EXT4_STATE_MAY_INLINE_DATA)); 2751 ext4_destroy_inline_data(handle, inode); 2752 ext4_journal_stop(handle); 2753 } 2754 2755 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2756 range_whole = 1; 2757 2758 if (wbc->range_cyclic) { 2759 writeback_index = mapping->writeback_index; 2760 if (writeback_index) 2761 cycled = 0; 2762 mpd.first_page = writeback_index; 2763 mpd.last_page = -1; 2764 } else { 2765 mpd.first_page = wbc->range_start >> PAGE_SHIFT; 2766 mpd.last_page = wbc->range_end >> PAGE_SHIFT; 2767 } 2768 2769 mpd.inode = inode; 2770 mpd.wbc = wbc; 2771 ext4_io_submit_init(&mpd.io_submit, wbc); 2772 retry: 2773 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 2774 tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page); 2775 done = false; 2776 blk_start_plug(&plug); 2777 2778 /* 2779 * First writeback pages that don't need mapping - we can avoid 2780 * starting a transaction unnecessarily and also avoid being blocked 2781 * in the block layer on device congestion while having transaction 2782 * started. 2783 */ 2784 mpd.do_map = 0; 2785 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); 2786 if (!mpd.io_submit.io_end) { 2787 ret = -ENOMEM; 2788 goto unplug; 2789 } 2790 ret = mpage_prepare_extent_to_map(&mpd); 2791 /* Submit prepared bio */ 2792 ext4_io_submit(&mpd.io_submit); 2793 ext4_put_io_end_defer(mpd.io_submit.io_end); 2794 mpd.io_submit.io_end = NULL; 2795 /* Unlock pages we didn't use */ 2796 mpage_release_unused_pages(&mpd, false); 2797 if (ret < 0) 2798 goto unplug; 2799 2800 while (!done && mpd.first_page <= mpd.last_page) { 2801 /* For each extent of pages we use new io_end */ 2802 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); 2803 if (!mpd.io_submit.io_end) { 2804 ret = -ENOMEM; 2805 break; 2806 } 2807 2808 /* 2809 * We have two constraints: We find one extent to map and we 2810 * must always write out whole page (makes a difference when 2811 * blocksize < pagesize) so that we don't block on IO when we 2812 * try to write out the rest of the page. Journalled mode is 2813 * not supported by delalloc. 2814 */ 2815 BUG_ON(ext4_should_journal_data(inode)); 2816 needed_blocks = ext4_da_writepages_trans_blocks(inode); 2817 2818 /* start a new transaction */ 2819 handle = ext4_journal_start_with_reserve(inode, 2820 EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks); 2821 if (IS_ERR(handle)) { 2822 ret = PTR_ERR(handle); 2823 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2824 "%ld pages, ino %lu; err %d", __func__, 2825 wbc->nr_to_write, inode->i_ino, ret); 2826 /* Release allocated io_end */ 2827 ext4_put_io_end(mpd.io_submit.io_end); 2828 mpd.io_submit.io_end = NULL; 2829 break; 2830 } 2831 mpd.do_map = 1; 2832 2833 trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc); 2834 ret = mpage_prepare_extent_to_map(&mpd); 2835 if (!ret) { 2836 if (mpd.map.m_len) 2837 ret = mpage_map_and_submit_extent(handle, &mpd, 2838 &give_up_on_write); 2839 else { 2840 /* 2841 * We scanned the whole range (or exhausted 2842 * nr_to_write), submitted what was mapped and 2843 * didn't find anything needing mapping. We are 2844 * done. 2845 */ 2846 done = true; 2847 } 2848 } 2849 /* 2850 * Caution: If the handle is synchronous, 2851 * ext4_journal_stop() can wait for transaction commit 2852 * to finish which may depend on writeback of pages to 2853 * complete or on page lock to be released. In that 2854 * case, we have to wait until after after we have 2855 * submitted all the IO, released page locks we hold, 2856 * and dropped io_end reference (for extent conversion 2857 * to be able to complete) before stopping the handle. 2858 */ 2859 if (!ext4_handle_valid(handle) || handle->h_sync == 0) { 2860 ext4_journal_stop(handle); 2861 handle = NULL; 2862 mpd.do_map = 0; 2863 } 2864 /* Submit prepared bio */ 2865 ext4_io_submit(&mpd.io_submit); 2866 /* Unlock pages we didn't use */ 2867 mpage_release_unused_pages(&mpd, give_up_on_write); 2868 /* 2869 * Drop our io_end reference we got from init. We have 2870 * to be careful and use deferred io_end finishing if 2871 * we are still holding the transaction as we can 2872 * release the last reference to io_end which may end 2873 * up doing unwritten extent conversion. 2874 */ 2875 if (handle) { 2876 ext4_put_io_end_defer(mpd.io_submit.io_end); 2877 ext4_journal_stop(handle); 2878 } else 2879 ext4_put_io_end(mpd.io_submit.io_end); 2880 mpd.io_submit.io_end = NULL; 2881 2882 if (ret == -ENOSPC && sbi->s_journal) { 2883 /* 2884 * Commit the transaction which would 2885 * free blocks released in the transaction 2886 * and try again 2887 */ 2888 jbd2_journal_force_commit_nested(sbi->s_journal); 2889 ret = 0; 2890 continue; 2891 } 2892 /* Fatal error - ENOMEM, EIO... */ 2893 if (ret) 2894 break; 2895 } 2896 unplug: 2897 blk_finish_plug(&plug); 2898 if (!ret && !cycled && wbc->nr_to_write > 0) { 2899 cycled = 1; 2900 mpd.last_page = writeback_index - 1; 2901 mpd.first_page = 0; 2902 goto retry; 2903 } 2904 2905 /* Update index */ 2906 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2907 /* 2908 * Set the writeback_index so that range_cyclic 2909 * mode will write it back later 2910 */ 2911 mapping->writeback_index = mpd.first_page; 2912 2913 out_writepages: 2914 trace_ext4_writepages_result(inode, wbc, ret, 2915 nr_to_write - wbc->nr_to_write); 2916 percpu_up_read(&sbi->s_journal_flag_rwsem); 2917 return ret; 2918 } 2919 2920 static int ext4_nonda_switch(struct super_block *sb) 2921 { 2922 s64 free_clusters, dirty_clusters; 2923 struct ext4_sb_info *sbi = EXT4_SB(sb); 2924 2925 /* 2926 * switch to non delalloc mode if we are running low 2927 * on free block. The free block accounting via percpu 2928 * counters can get slightly wrong with percpu_counter_batch getting 2929 * accumulated on each CPU without updating global counters 2930 * Delalloc need an accurate free block accounting. So switch 2931 * to non delalloc when we are near to error range. 2932 */ 2933 free_clusters = 2934 percpu_counter_read_positive(&sbi->s_freeclusters_counter); 2935 dirty_clusters = 2936 percpu_counter_read_positive(&sbi->s_dirtyclusters_counter); 2937 /* 2938 * Start pushing delalloc when 1/2 of free blocks are dirty. 2939 */ 2940 if (dirty_clusters && (free_clusters < 2 * dirty_clusters)) 2941 try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE); 2942 2943 if (2 * free_clusters < 3 * dirty_clusters || 2944 free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) { 2945 /* 2946 * free block count is less than 150% of dirty blocks 2947 * or free blocks is less than watermark 2948 */ 2949 return 1; 2950 } 2951 return 0; 2952 } 2953 2954 /* We always reserve for an inode update; the superblock could be there too */ 2955 static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len) 2956 { 2957 if (likely(ext4_has_feature_large_file(inode->i_sb))) 2958 return 1; 2959 2960 if (pos + len <= 0x7fffffffULL) 2961 return 1; 2962 2963 /* We might need to update the superblock to set LARGE_FILE */ 2964 return 2; 2965 } 2966 2967 static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 2968 loff_t pos, unsigned len, unsigned flags, 2969 struct page **pagep, void **fsdata) 2970 { 2971 int ret, retries = 0; 2972 struct page *page; 2973 pgoff_t index; 2974 struct inode *inode = mapping->host; 2975 handle_t *handle; 2976 2977 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 2978 return -EIO; 2979 2980 index = pos >> PAGE_SHIFT; 2981 2982 if (ext4_nonda_switch(inode->i_sb) || 2983 S_ISLNK(inode->i_mode)) { 2984 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 2985 return ext4_write_begin(file, mapping, pos, 2986 len, flags, pagep, fsdata); 2987 } 2988 *fsdata = (void *)0; 2989 trace_ext4_da_write_begin(inode, pos, len, flags); 2990 2991 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 2992 ret = ext4_da_write_inline_data_begin(mapping, inode, 2993 pos, len, flags, 2994 pagep, fsdata); 2995 if (ret < 0) 2996 return ret; 2997 if (ret == 1) 2998 return 0; 2999 } 3000 3001 /* 3002 * grab_cache_page_write_begin() can take a long time if the 3003 * system is thrashing due to memory pressure, or if the page 3004 * is being written back. So grab it first before we start 3005 * the transaction handle. This also allows us to allocate 3006 * the page (if needed) without using GFP_NOFS. 3007 */ 3008 retry_grab: 3009 page = grab_cache_page_write_begin(mapping, index, flags); 3010 if (!page) 3011 return -ENOMEM; 3012 unlock_page(page); 3013 3014 /* 3015 * With delayed allocation, we don't log the i_disksize update 3016 * if there is delayed block allocation. But we still need 3017 * to journalling the i_disksize update if writes to the end 3018 * of file which has an already mapped buffer. 3019 */ 3020 retry_journal: 3021 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 3022 ext4_da_write_credits(inode, pos, len)); 3023 if (IS_ERR(handle)) { 3024 put_page(page); 3025 return PTR_ERR(handle); 3026 } 3027 3028 lock_page(page); 3029 if (page->mapping != mapping) { 3030 /* The page got truncated from under us */ 3031 unlock_page(page); 3032 put_page(page); 3033 ext4_journal_stop(handle); 3034 goto retry_grab; 3035 } 3036 /* In case writeback began while the page was unlocked */ 3037 wait_for_stable_page(page); 3038 3039 #ifdef CONFIG_EXT4_FS_ENCRYPTION 3040 ret = ext4_block_write_begin(page, pos, len, 3041 ext4_da_get_block_prep); 3042 #else 3043 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); 3044 #endif 3045 if (ret < 0) { 3046 unlock_page(page); 3047 ext4_journal_stop(handle); 3048 /* 3049 * block_write_begin may have instantiated a few blocks 3050 * outside i_size. Trim these off again. Don't need 3051 * i_size_read because we hold i_mutex. 3052 */ 3053 if (pos + len > inode->i_size) 3054 ext4_truncate_failed_write(inode); 3055 3056 if (ret == -ENOSPC && 3057 ext4_should_retry_alloc(inode->i_sb, &retries)) 3058 goto retry_journal; 3059 3060 put_page(page); 3061 return ret; 3062 } 3063 3064 *pagep = page; 3065 return ret; 3066 } 3067 3068 /* 3069 * Check if we should update i_disksize 3070 * when write to the end of file but not require block allocation 3071 */ 3072 static int ext4_da_should_update_i_disksize(struct page *page, 3073 unsigned long offset) 3074 { 3075 struct buffer_head *bh; 3076 struct inode *inode = page->mapping->host; 3077 unsigned int idx; 3078 int i; 3079 3080 bh = page_buffers(page); 3081 idx = offset >> inode->i_blkbits; 3082 3083 for (i = 0; i < idx; i++) 3084 bh = bh->b_this_page; 3085 3086 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 3087 return 0; 3088 return 1; 3089 } 3090 3091 static int ext4_da_write_end(struct file *file, 3092 struct address_space *mapping, 3093 loff_t pos, unsigned len, unsigned copied, 3094 struct page *page, void *fsdata) 3095 { 3096 struct inode *inode = mapping->host; 3097 int ret = 0, ret2; 3098 handle_t *handle = ext4_journal_current_handle(); 3099 loff_t new_i_size; 3100 unsigned long start, end; 3101 int write_mode = (int)(unsigned long)fsdata; 3102 3103 if (write_mode == FALL_BACK_TO_NONDELALLOC) 3104 return ext4_write_end(file, mapping, pos, 3105 len, copied, page, fsdata); 3106 3107 trace_ext4_da_write_end(inode, pos, len, copied); 3108 start = pos & (PAGE_SIZE - 1); 3109 end = start + copied - 1; 3110 3111 /* 3112 * generic_write_end() will run mark_inode_dirty() if i_size 3113 * changes. So let's piggyback the i_disksize mark_inode_dirty 3114 * into that. 3115 */ 3116 new_i_size = pos + copied; 3117 if (copied && new_i_size > EXT4_I(inode)->i_disksize) { 3118 if (ext4_has_inline_data(inode) || 3119 ext4_da_should_update_i_disksize(page, end)) { 3120 ext4_update_i_disksize(inode, new_i_size); 3121 /* We need to mark inode dirty even if 3122 * new_i_size is less that inode->i_size 3123 * bu greater than i_disksize.(hint delalloc) 3124 */ 3125 ext4_mark_inode_dirty(handle, inode); 3126 } 3127 } 3128 3129 if (write_mode != CONVERT_INLINE_DATA && 3130 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) && 3131 ext4_has_inline_data(inode)) 3132 ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied, 3133 page); 3134 else 3135 ret2 = generic_write_end(file, mapping, pos, len, copied, 3136 page, fsdata); 3137 3138 copied = ret2; 3139 if (ret2 < 0) 3140 ret = ret2; 3141 ret2 = ext4_journal_stop(handle); 3142 if (!ret) 3143 ret = ret2; 3144 3145 return ret ? ret : copied; 3146 } 3147 3148 static void ext4_da_invalidatepage(struct page *page, unsigned int offset, 3149 unsigned int length) 3150 { 3151 /* 3152 * Drop reserved blocks 3153 */ 3154 BUG_ON(!PageLocked(page)); 3155 if (!page_has_buffers(page)) 3156 goto out; 3157 3158 ext4_da_page_release_reservation(page, offset, length); 3159 3160 out: 3161 ext4_invalidatepage(page, offset, length); 3162 3163 return; 3164 } 3165 3166 /* 3167 * Force all delayed allocation blocks to be allocated for a given inode. 3168 */ 3169 int ext4_alloc_da_blocks(struct inode *inode) 3170 { 3171 trace_ext4_alloc_da_blocks(inode); 3172 3173 if (!EXT4_I(inode)->i_reserved_data_blocks) 3174 return 0; 3175 3176 /* 3177 * We do something simple for now. The filemap_flush() will 3178 * also start triggering a write of the data blocks, which is 3179 * not strictly speaking necessary (and for users of 3180 * laptop_mode, not even desirable). However, to do otherwise 3181 * would require replicating code paths in: 3182 * 3183 * ext4_writepages() -> 3184 * write_cache_pages() ---> (via passed in callback function) 3185 * __mpage_da_writepage() --> 3186 * mpage_add_bh_to_extent() 3187 * mpage_da_map_blocks() 3188 * 3189 * The problem is that write_cache_pages(), located in 3190 * mm/page-writeback.c, marks pages clean in preparation for 3191 * doing I/O, which is not desirable if we're not planning on 3192 * doing I/O at all. 3193 * 3194 * We could call write_cache_pages(), and then redirty all of 3195 * the pages by calling redirty_page_for_writepage() but that 3196 * would be ugly in the extreme. So instead we would need to 3197 * replicate parts of the code in the above functions, 3198 * simplifying them because we wouldn't actually intend to 3199 * write out the pages, but rather only collect contiguous 3200 * logical block extents, call the multi-block allocator, and 3201 * then update the buffer heads with the block allocations. 3202 * 3203 * For now, though, we'll cheat by calling filemap_flush(), 3204 * which will map the blocks, and start the I/O, but not 3205 * actually wait for the I/O to complete. 3206 */ 3207 return filemap_flush(inode->i_mapping); 3208 } 3209 3210 /* 3211 * bmap() is special. It gets used by applications such as lilo and by 3212 * the swapper to find the on-disk block of a specific piece of data. 3213 * 3214 * Naturally, this is dangerous if the block concerned is still in the 3215 * journal. If somebody makes a swapfile on an ext4 data-journaling 3216 * filesystem and enables swap, then they may get a nasty shock when the 3217 * data getting swapped to that swapfile suddenly gets overwritten by 3218 * the original zero's written out previously to the journal and 3219 * awaiting writeback in the kernel's buffer cache. 3220 * 3221 * So, if we see any bmap calls here on a modified, data-journaled file, 3222 * take extra steps to flush any blocks which might be in the cache. 3223 */ 3224 static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 3225 { 3226 struct inode *inode = mapping->host; 3227 journal_t *journal; 3228 int err; 3229 3230 /* 3231 * We can get here for an inline file via the FIBMAP ioctl 3232 */ 3233 if (ext4_has_inline_data(inode)) 3234 return 0; 3235 3236 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 3237 test_opt(inode->i_sb, DELALLOC)) { 3238 /* 3239 * With delalloc we want to sync the file 3240 * so that we can make sure we allocate 3241 * blocks for file 3242 */ 3243 filemap_write_and_wait(mapping); 3244 } 3245 3246 if (EXT4_JOURNAL(inode) && 3247 ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { 3248 /* 3249 * This is a REALLY heavyweight approach, but the use of 3250 * bmap on dirty files is expected to be extremely rare: 3251 * only if we run lilo or swapon on a freshly made file 3252 * do we expect this to happen. 3253 * 3254 * (bmap requires CAP_SYS_RAWIO so this does not 3255 * represent an unprivileged user DOS attack --- we'd be 3256 * in trouble if mortal users could trigger this path at 3257 * will.) 3258 * 3259 * NB. EXT4_STATE_JDATA is not set on files other than 3260 * regular files. If somebody wants to bmap a directory 3261 * or symlink and gets confused because the buffer 3262 * hasn't yet been flushed to disk, they deserve 3263 * everything they get. 3264 */ 3265 3266 ext4_clear_inode_state(inode, EXT4_STATE_JDATA); 3267 journal = EXT4_JOURNAL(inode); 3268 jbd2_journal_lock_updates(journal); 3269 err = jbd2_journal_flush(journal); 3270 jbd2_journal_unlock_updates(journal); 3271 3272 if (err) 3273 return 0; 3274 } 3275 3276 return generic_block_bmap(mapping, block, ext4_get_block); 3277 } 3278 3279 static int ext4_readpage(struct file *file, struct page *page) 3280 { 3281 int ret = -EAGAIN; 3282 struct inode *inode = page->mapping->host; 3283 3284 trace_ext4_readpage(page); 3285 3286 if (ext4_has_inline_data(inode)) 3287 ret = ext4_readpage_inline(inode, page); 3288 3289 if (ret == -EAGAIN) 3290 return ext4_mpage_readpages(page->mapping, NULL, page, 1); 3291 3292 return ret; 3293 } 3294 3295 static int 3296 ext4_readpages(struct file *file, struct address_space *mapping, 3297 struct list_head *pages, unsigned nr_pages) 3298 { 3299 struct inode *inode = mapping->host; 3300 3301 /* If the file has inline data, no need to do readpages. */ 3302 if (ext4_has_inline_data(inode)) 3303 return 0; 3304 3305 return ext4_mpage_readpages(mapping, pages, NULL, nr_pages); 3306 } 3307 3308 static void ext4_invalidatepage(struct page *page, unsigned int offset, 3309 unsigned int length) 3310 { 3311 trace_ext4_invalidatepage(page, offset, length); 3312 3313 /* No journalling happens on data buffers when this function is used */ 3314 WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page))); 3315 3316 block_invalidatepage(page, offset, length); 3317 } 3318 3319 static int __ext4_journalled_invalidatepage(struct page *page, 3320 unsigned int offset, 3321 unsigned int length) 3322 { 3323 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3324 3325 trace_ext4_journalled_invalidatepage(page, offset, length); 3326 3327 /* 3328 * If it's a full truncate we just forget about the pending dirtying 3329 */ 3330 if (offset == 0 && length == PAGE_SIZE) 3331 ClearPageChecked(page); 3332 3333 return jbd2_journal_invalidatepage(journal, page, offset, length); 3334 } 3335 3336 /* Wrapper for aops... */ 3337 static void ext4_journalled_invalidatepage(struct page *page, 3338 unsigned int offset, 3339 unsigned int length) 3340 { 3341 WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0); 3342 } 3343 3344 static int ext4_releasepage(struct page *page, gfp_t wait) 3345 { 3346 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3347 3348 trace_ext4_releasepage(page); 3349 3350 /* Page has dirty journalled data -> cannot release */ 3351 if (PageChecked(page)) 3352 return 0; 3353 if (journal) 3354 return jbd2_journal_try_to_free_buffers(journal, page, wait); 3355 else 3356 return try_to_free_buffers(page); 3357 } 3358 3359 #ifdef CONFIG_FS_DAX 3360 static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length, 3361 unsigned flags, struct iomap *iomap) 3362 { 3363 struct block_device *bdev; 3364 unsigned int blkbits = inode->i_blkbits; 3365 unsigned long first_block = offset >> blkbits; 3366 unsigned long last_block = (offset + length - 1) >> blkbits; 3367 struct ext4_map_blocks map; 3368 int ret; 3369 3370 if (WARN_ON_ONCE(ext4_has_inline_data(inode))) 3371 return -ERANGE; 3372 3373 map.m_lblk = first_block; 3374 map.m_len = last_block - first_block + 1; 3375 3376 if (!(flags & IOMAP_WRITE)) { 3377 ret = ext4_map_blocks(NULL, inode, &map, 0); 3378 } else { 3379 int dio_credits; 3380 handle_t *handle; 3381 int retries = 0; 3382 3383 /* Trim mapping request to maximum we can map at once for DIO */ 3384 if (map.m_len > DIO_MAX_BLOCKS) 3385 map.m_len = DIO_MAX_BLOCKS; 3386 dio_credits = ext4_chunk_trans_blocks(inode, map.m_len); 3387 retry: 3388 /* 3389 * Either we allocate blocks and then we don't get unwritten 3390 * extent so we have reserved enough credits, or the blocks 3391 * are already allocated and unwritten and in that case 3392 * extent conversion fits in the credits as well. 3393 */ 3394 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 3395 dio_credits); 3396 if (IS_ERR(handle)) 3397 return PTR_ERR(handle); 3398 3399 ret = ext4_map_blocks(handle, inode, &map, 3400 EXT4_GET_BLOCKS_CREATE_ZERO); 3401 if (ret < 0) { 3402 ext4_journal_stop(handle); 3403 if (ret == -ENOSPC && 3404 ext4_should_retry_alloc(inode->i_sb, &retries)) 3405 goto retry; 3406 return ret; 3407 } 3408 3409 /* 3410 * If we added blocks beyond i_size, we need to make sure they 3411 * will get truncated if we crash before updating i_size in 3412 * ext4_iomap_end(). For faults we don't need to do that (and 3413 * even cannot because for orphan list operations inode_lock is 3414 * required) - if we happen to instantiate block beyond i_size, 3415 * it is because we race with truncate which has already added 3416 * the inode to the orphan list. 3417 */ 3418 if (!(flags & IOMAP_FAULT) && first_block + map.m_len > 3419 (i_size_read(inode) + (1 << blkbits) - 1) >> blkbits) { 3420 int err; 3421 3422 err = ext4_orphan_add(handle, inode); 3423 if (err < 0) { 3424 ext4_journal_stop(handle); 3425 return err; 3426 } 3427 } 3428 ext4_journal_stop(handle); 3429 } 3430 3431 iomap->flags = 0; 3432 bdev = inode->i_sb->s_bdev; 3433 iomap->bdev = bdev; 3434 if (blk_queue_dax(bdev->bd_queue)) 3435 iomap->dax_dev = fs_dax_get_by_host(bdev->bd_disk->disk_name); 3436 else 3437 iomap->dax_dev = NULL; 3438 iomap->offset = first_block << blkbits; 3439 3440 if (ret == 0) { 3441 iomap->type = IOMAP_HOLE; 3442 iomap->blkno = IOMAP_NULL_BLOCK; 3443 iomap->length = (u64)map.m_len << blkbits; 3444 } else { 3445 if (map.m_flags & EXT4_MAP_MAPPED) { 3446 iomap->type = IOMAP_MAPPED; 3447 } else if (map.m_flags & EXT4_MAP_UNWRITTEN) { 3448 iomap->type = IOMAP_UNWRITTEN; 3449 } else { 3450 WARN_ON_ONCE(1); 3451 return -EIO; 3452 } 3453 iomap->blkno = (sector_t)map.m_pblk << (blkbits - 9); 3454 iomap->length = (u64)map.m_len << blkbits; 3455 } 3456 3457 if (map.m_flags & EXT4_MAP_NEW) 3458 iomap->flags |= IOMAP_F_NEW; 3459 return 0; 3460 } 3461 3462 static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length, 3463 ssize_t written, unsigned flags, struct iomap *iomap) 3464 { 3465 int ret = 0; 3466 handle_t *handle; 3467 int blkbits = inode->i_blkbits; 3468 bool truncate = false; 3469 3470 fs_put_dax(iomap->dax_dev); 3471 if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT)) 3472 return 0; 3473 3474 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 3475 if (IS_ERR(handle)) { 3476 ret = PTR_ERR(handle); 3477 goto orphan_del; 3478 } 3479 if (ext4_update_inode_size(inode, offset + written)) 3480 ext4_mark_inode_dirty(handle, inode); 3481 /* 3482 * We may need to truncate allocated but not written blocks beyond EOF. 3483 */ 3484 if (iomap->offset + iomap->length > 3485 ALIGN(inode->i_size, 1 << blkbits)) { 3486 ext4_lblk_t written_blk, end_blk; 3487 3488 written_blk = (offset + written) >> blkbits; 3489 end_blk = (offset + length) >> blkbits; 3490 if (written_blk < end_blk && ext4_can_truncate(inode)) 3491 truncate = true; 3492 } 3493 /* 3494 * Remove inode from orphan list if we were extending a inode and 3495 * everything went fine. 3496 */ 3497 if (!truncate && inode->i_nlink && 3498 !list_empty(&EXT4_I(inode)->i_orphan)) 3499 ext4_orphan_del(handle, inode); 3500 ext4_journal_stop(handle); 3501 if (truncate) { 3502 ext4_truncate_failed_write(inode); 3503 orphan_del: 3504 /* 3505 * If truncate failed early the inode might still be on the 3506 * orphan list; we need to make sure the inode is removed from 3507 * the orphan list in that case. 3508 */ 3509 if (inode->i_nlink) 3510 ext4_orphan_del(NULL, inode); 3511 } 3512 return ret; 3513 } 3514 3515 const struct iomap_ops ext4_iomap_ops = { 3516 .iomap_begin = ext4_iomap_begin, 3517 .iomap_end = ext4_iomap_end, 3518 }; 3519 3520 #endif 3521 3522 static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset, 3523 ssize_t size, void *private) 3524 { 3525 ext4_io_end_t *io_end = private; 3526 3527 /* if not async direct IO just return */ 3528 if (!io_end) 3529 return 0; 3530 3531 ext_debug("ext4_end_io_dio(): io_end 0x%p " 3532 "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", 3533 io_end, io_end->inode->i_ino, iocb, offset, size); 3534 3535 /* 3536 * Error during AIO DIO. We cannot convert unwritten extents as the 3537 * data was not written. Just clear the unwritten flag and drop io_end. 3538 */ 3539 if (size <= 0) { 3540 ext4_clear_io_unwritten_flag(io_end); 3541 size = 0; 3542 } 3543 io_end->offset = offset; 3544 io_end->size = size; 3545 ext4_put_io_end(io_end); 3546 3547 return 0; 3548 } 3549 3550 /* 3551 * Handling of direct IO writes. 3552 * 3553 * For ext4 extent files, ext4 will do direct-io write even to holes, 3554 * preallocated extents, and those write extend the file, no need to 3555 * fall back to buffered IO. 3556 * 3557 * For holes, we fallocate those blocks, mark them as unwritten 3558 * If those blocks were preallocated, we mark sure they are split, but 3559 * still keep the range to write as unwritten. 3560 * 3561 * The unwritten extents will be converted to written when DIO is completed. 3562 * For async direct IO, since the IO may still pending when return, we 3563 * set up an end_io call back function, which will do the conversion 3564 * when async direct IO completed. 3565 * 3566 * If the O_DIRECT write will extend the file then add this inode to the 3567 * orphan list. So recovery will truncate it back to the original size 3568 * if the machine crashes during the write. 3569 * 3570 */ 3571 static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter) 3572 { 3573 struct file *file = iocb->ki_filp; 3574 struct inode *inode = file->f_mapping->host; 3575 struct ext4_inode_info *ei = EXT4_I(inode); 3576 ssize_t ret; 3577 loff_t offset = iocb->ki_pos; 3578 size_t count = iov_iter_count(iter); 3579 int overwrite = 0; 3580 get_block_t *get_block_func = NULL; 3581 int dio_flags = 0; 3582 loff_t final_size = offset + count; 3583 int orphan = 0; 3584 handle_t *handle; 3585 3586 if (final_size > inode->i_size) { 3587 /* Credits for sb + inode write */ 3588 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 3589 if (IS_ERR(handle)) { 3590 ret = PTR_ERR(handle); 3591 goto out; 3592 } 3593 ret = ext4_orphan_add(handle, inode); 3594 if (ret) { 3595 ext4_journal_stop(handle); 3596 goto out; 3597 } 3598 orphan = 1; 3599 ei->i_disksize = inode->i_size; 3600 ext4_journal_stop(handle); 3601 } 3602 3603 BUG_ON(iocb->private == NULL); 3604 3605 /* 3606 * Make all waiters for direct IO properly wait also for extent 3607 * conversion. This also disallows race between truncate() and 3608 * overwrite DIO as i_dio_count needs to be incremented under i_mutex. 3609 */ 3610 inode_dio_begin(inode); 3611 3612 /* If we do a overwrite dio, i_mutex locking can be released */ 3613 overwrite = *((int *)iocb->private); 3614 3615 if (overwrite) 3616 inode_unlock(inode); 3617 3618 /* 3619 * For extent mapped files we could direct write to holes and fallocate. 3620 * 3621 * Allocated blocks to fill the hole are marked as unwritten to prevent 3622 * parallel buffered read to expose the stale data before DIO complete 3623 * the data IO. 3624 * 3625 * As to previously fallocated extents, ext4 get_block will just simply 3626 * mark the buffer mapped but still keep the extents unwritten. 3627 * 3628 * For non AIO case, we will convert those unwritten extents to written 3629 * after return back from blockdev_direct_IO. That way we save us from 3630 * allocating io_end structure and also the overhead of offloading 3631 * the extent convertion to a workqueue. 3632 * 3633 * For async DIO, the conversion needs to be deferred when the 3634 * IO is completed. The ext4 end_io callback function will be 3635 * called to take care of the conversion work. Here for async 3636 * case, we allocate an io_end structure to hook to the iocb. 3637 */ 3638 iocb->private = NULL; 3639 if (overwrite) 3640 get_block_func = ext4_dio_get_block_overwrite; 3641 else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) || 3642 round_down(offset, i_blocksize(inode)) >= inode->i_size) { 3643 get_block_func = ext4_dio_get_block; 3644 dio_flags = DIO_LOCKING | DIO_SKIP_HOLES; 3645 } else if (is_sync_kiocb(iocb)) { 3646 get_block_func = ext4_dio_get_block_unwritten_sync; 3647 dio_flags = DIO_LOCKING; 3648 } else { 3649 get_block_func = ext4_dio_get_block_unwritten_async; 3650 dio_flags = DIO_LOCKING; 3651 } 3652 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, 3653 get_block_func, ext4_end_io_dio, NULL, 3654 dio_flags); 3655 3656 if (ret > 0 && !overwrite && ext4_test_inode_state(inode, 3657 EXT4_STATE_DIO_UNWRITTEN)) { 3658 int err; 3659 /* 3660 * for non AIO case, since the IO is already 3661 * completed, we could do the conversion right here 3662 */ 3663 err = ext4_convert_unwritten_extents(NULL, inode, 3664 offset, ret); 3665 if (err < 0) 3666 ret = err; 3667 ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3668 } 3669 3670 inode_dio_end(inode); 3671 /* take i_mutex locking again if we do a ovewrite dio */ 3672 if (overwrite) 3673 inode_lock(inode); 3674 3675 if (ret < 0 && final_size > inode->i_size) 3676 ext4_truncate_failed_write(inode); 3677 3678 /* Handle extending of i_size after direct IO write */ 3679 if (orphan) { 3680 int err; 3681 3682 /* Credits for sb + inode write */ 3683 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 3684 if (IS_ERR(handle)) { 3685 /* This is really bad luck. We've written the data 3686 * but cannot extend i_size. Bail out and pretend 3687 * the write failed... */ 3688 ret = PTR_ERR(handle); 3689 if (inode->i_nlink) 3690 ext4_orphan_del(NULL, inode); 3691 3692 goto out; 3693 } 3694 if (inode->i_nlink) 3695 ext4_orphan_del(handle, inode); 3696 if (ret > 0) { 3697 loff_t end = offset + ret; 3698 if (end > inode->i_size) { 3699 ei->i_disksize = end; 3700 i_size_write(inode, end); 3701 /* 3702 * We're going to return a positive `ret' 3703 * here due to non-zero-length I/O, so there's 3704 * no way of reporting error returns from 3705 * ext4_mark_inode_dirty() to userspace. So 3706 * ignore it. 3707 */ 3708 ext4_mark_inode_dirty(handle, inode); 3709 } 3710 } 3711 err = ext4_journal_stop(handle); 3712 if (ret == 0) 3713 ret = err; 3714 } 3715 out: 3716 return ret; 3717 } 3718 3719 static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter) 3720 { 3721 struct address_space *mapping = iocb->ki_filp->f_mapping; 3722 struct inode *inode = mapping->host; 3723 size_t count = iov_iter_count(iter); 3724 ssize_t ret; 3725 3726 /* 3727 * Shared inode_lock is enough for us - it protects against concurrent 3728 * writes & truncates and since we take care of writing back page cache, 3729 * we are protected against page writeback as well. 3730 */ 3731 inode_lock_shared(inode); 3732 ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, 3733 iocb->ki_pos + count - 1); 3734 if (ret) 3735 goto out_unlock; 3736 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, 3737 iter, ext4_dio_get_block, NULL, NULL, 0); 3738 out_unlock: 3739 inode_unlock_shared(inode); 3740 return ret; 3741 } 3742 3743 static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 3744 { 3745 struct file *file = iocb->ki_filp; 3746 struct inode *inode = file->f_mapping->host; 3747 size_t count = iov_iter_count(iter); 3748 loff_t offset = iocb->ki_pos; 3749 ssize_t ret; 3750 3751 #ifdef CONFIG_EXT4_FS_ENCRYPTION 3752 if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 3753 return 0; 3754 #endif 3755 3756 /* 3757 * If we are doing data journalling we don't support O_DIRECT 3758 */ 3759 if (ext4_should_journal_data(inode)) 3760 return 0; 3761 3762 /* Let buffer I/O handle the inline data case. */ 3763 if (ext4_has_inline_data(inode)) 3764 return 0; 3765 3766 /* DAX uses iomap path now */ 3767 if (WARN_ON_ONCE(IS_DAX(inode))) 3768 return 0; 3769 3770 trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); 3771 if (iov_iter_rw(iter) == READ) 3772 ret = ext4_direct_IO_read(iocb, iter); 3773 else 3774 ret = ext4_direct_IO_write(iocb, iter); 3775 trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret); 3776 return ret; 3777 } 3778 3779 /* 3780 * Pages can be marked dirty completely asynchronously from ext4's journalling 3781 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3782 * much here because ->set_page_dirty is called under VFS locks. The page is 3783 * not necessarily locked. 3784 * 3785 * We cannot just dirty the page and leave attached buffers clean, because the 3786 * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3787 * or jbddirty because all the journalling code will explode. 3788 * 3789 * So what we do is to mark the page "pending dirty" and next time writepage 3790 * is called, propagate that into the buffers appropriately. 3791 */ 3792 static int ext4_journalled_set_page_dirty(struct page *page) 3793 { 3794 SetPageChecked(page); 3795 return __set_page_dirty_nobuffers(page); 3796 } 3797 3798 static int ext4_set_page_dirty(struct page *page) 3799 { 3800 WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page)); 3801 WARN_ON_ONCE(!page_has_buffers(page)); 3802 return __set_page_dirty_buffers(page); 3803 } 3804 3805 static const struct address_space_operations ext4_aops = { 3806 .readpage = ext4_readpage, 3807 .readpages = ext4_readpages, 3808 .writepage = ext4_writepage, 3809 .writepages = ext4_writepages, 3810 .write_begin = ext4_write_begin, 3811 .write_end = ext4_write_end, 3812 .set_page_dirty = ext4_set_page_dirty, 3813 .bmap = ext4_bmap, 3814 .invalidatepage = ext4_invalidatepage, 3815 .releasepage = ext4_releasepage, 3816 .direct_IO = ext4_direct_IO, 3817 .migratepage = buffer_migrate_page, 3818 .is_partially_uptodate = block_is_partially_uptodate, 3819 .error_remove_page = generic_error_remove_page, 3820 }; 3821 3822 static const struct address_space_operations ext4_journalled_aops = { 3823 .readpage = ext4_readpage, 3824 .readpages = ext4_readpages, 3825 .writepage = ext4_writepage, 3826 .writepages = ext4_writepages, 3827 .write_begin = ext4_write_begin, 3828 .write_end = ext4_journalled_write_end, 3829 .set_page_dirty = ext4_journalled_set_page_dirty, 3830 .bmap = ext4_bmap, 3831 .invalidatepage = ext4_journalled_invalidatepage, 3832 .releasepage = ext4_releasepage, 3833 .direct_IO = ext4_direct_IO, 3834 .is_partially_uptodate = block_is_partially_uptodate, 3835 .error_remove_page = generic_error_remove_page, 3836 }; 3837 3838 static const struct address_space_operations ext4_da_aops = { 3839 .readpage = ext4_readpage, 3840 .readpages = ext4_readpages, 3841 .writepage = ext4_writepage, 3842 .writepages = ext4_writepages, 3843 .write_begin = ext4_da_write_begin, 3844 .write_end = ext4_da_write_end, 3845 .set_page_dirty = ext4_set_page_dirty, 3846 .bmap = ext4_bmap, 3847 .invalidatepage = ext4_da_invalidatepage, 3848 .releasepage = ext4_releasepage, 3849 .direct_IO = ext4_direct_IO, 3850 .migratepage = buffer_migrate_page, 3851 .is_partially_uptodate = block_is_partially_uptodate, 3852 .error_remove_page = generic_error_remove_page, 3853 }; 3854 3855 void ext4_set_aops(struct inode *inode) 3856 { 3857 switch (ext4_inode_journal_mode(inode)) { 3858 case EXT4_INODE_ORDERED_DATA_MODE: 3859 case EXT4_INODE_WRITEBACK_DATA_MODE: 3860 break; 3861 case EXT4_INODE_JOURNAL_DATA_MODE: 3862 inode->i_mapping->a_ops = &ext4_journalled_aops; 3863 return; 3864 default: 3865 BUG(); 3866 } 3867 if (test_opt(inode->i_sb, DELALLOC)) 3868 inode->i_mapping->a_ops = &ext4_da_aops; 3869 else 3870 inode->i_mapping->a_ops = &ext4_aops; 3871 } 3872 3873 static int __ext4_block_zero_page_range(handle_t *handle, 3874 struct address_space *mapping, loff_t from, loff_t length) 3875 { 3876 ext4_fsblk_t index = from >> PAGE_SHIFT; 3877 unsigned offset = from & (PAGE_SIZE-1); 3878 unsigned blocksize, pos; 3879 ext4_lblk_t iblock; 3880 struct inode *inode = mapping->host; 3881 struct buffer_head *bh; 3882 struct page *page; 3883 int err = 0; 3884 3885 page = find_or_create_page(mapping, from >> PAGE_SHIFT, 3886 mapping_gfp_constraint(mapping, ~__GFP_FS)); 3887 if (!page) 3888 return -ENOMEM; 3889 3890 blocksize = inode->i_sb->s_blocksize; 3891 3892 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits); 3893 3894 if (!page_has_buffers(page)) 3895 create_empty_buffers(page, blocksize, 0); 3896 3897 /* Find the buffer that contains "offset" */ 3898 bh = page_buffers(page); 3899 pos = blocksize; 3900 while (offset >= pos) { 3901 bh = bh->b_this_page; 3902 iblock++; 3903 pos += blocksize; 3904 } 3905 if (buffer_freed(bh)) { 3906 BUFFER_TRACE(bh, "freed: skip"); 3907 goto unlock; 3908 } 3909 if (!buffer_mapped(bh)) { 3910 BUFFER_TRACE(bh, "unmapped"); 3911 ext4_get_block(inode, iblock, bh, 0); 3912 /* unmapped? It's a hole - nothing to do */ 3913 if (!buffer_mapped(bh)) { 3914 BUFFER_TRACE(bh, "still unmapped"); 3915 goto unlock; 3916 } 3917 } 3918 3919 /* Ok, it's mapped. Make sure it's up-to-date */ 3920 if (PageUptodate(page)) 3921 set_buffer_uptodate(bh); 3922 3923 if (!buffer_uptodate(bh)) { 3924 err = -EIO; 3925 ll_rw_block(REQ_OP_READ, 0, 1, &bh); 3926 wait_on_buffer(bh); 3927 /* Uhhuh. Read error. Complain and punt. */ 3928 if (!buffer_uptodate(bh)) 3929 goto unlock; 3930 if (S_ISREG(inode->i_mode) && 3931 ext4_encrypted_inode(inode)) { 3932 /* We expect the key to be set. */ 3933 BUG_ON(!fscrypt_has_encryption_key(inode)); 3934 BUG_ON(blocksize != PAGE_SIZE); 3935 WARN_ON_ONCE(fscrypt_decrypt_page(page->mapping->host, 3936 page, PAGE_SIZE, 0, page->index)); 3937 } 3938 } 3939 if (ext4_should_journal_data(inode)) { 3940 BUFFER_TRACE(bh, "get write access"); 3941 err = ext4_journal_get_write_access(handle, bh); 3942 if (err) 3943 goto unlock; 3944 } 3945 zero_user(page, offset, length); 3946 BUFFER_TRACE(bh, "zeroed end of block"); 3947 3948 if (ext4_should_journal_data(inode)) { 3949 err = ext4_handle_dirty_metadata(handle, inode, bh); 3950 } else { 3951 err = 0; 3952 mark_buffer_dirty(bh); 3953 if (ext4_should_order_data(inode)) 3954 err = ext4_jbd2_inode_add_write(handle, inode); 3955 } 3956 3957 unlock: 3958 unlock_page(page); 3959 put_page(page); 3960 return err; 3961 } 3962 3963 /* 3964 * ext4_block_zero_page_range() zeros out a mapping of length 'length' 3965 * starting from file offset 'from'. The range to be zero'd must 3966 * be contained with in one block. If the specified range exceeds 3967 * the end of the block it will be shortened to end of the block 3968 * that cooresponds to 'from' 3969 */ 3970 static int ext4_block_zero_page_range(handle_t *handle, 3971 struct address_space *mapping, loff_t from, loff_t length) 3972 { 3973 struct inode *inode = mapping->host; 3974 unsigned offset = from & (PAGE_SIZE-1); 3975 unsigned blocksize = inode->i_sb->s_blocksize; 3976 unsigned max = blocksize - (offset & (blocksize - 1)); 3977 3978 /* 3979 * correct length if it does not fall between 3980 * 'from' and the end of the block 3981 */ 3982 if (length > max || length < 0) 3983 length = max; 3984 3985 if (IS_DAX(inode)) { 3986 return iomap_zero_range(inode, from, length, NULL, 3987 &ext4_iomap_ops); 3988 } 3989 return __ext4_block_zero_page_range(handle, mapping, from, length); 3990 } 3991 3992 /* 3993 * ext4_block_truncate_page() zeroes out a mapping from file offset `from' 3994 * up to the end of the block which corresponds to `from'. 3995 * This required during truncate. We need to physically zero the tail end 3996 * of that block so it doesn't yield old data if the file is later grown. 3997 */ 3998 static int ext4_block_truncate_page(handle_t *handle, 3999 struct address_space *mapping, loff_t from) 4000 { 4001 unsigned offset = from & (PAGE_SIZE-1); 4002 unsigned length; 4003 unsigned blocksize; 4004 struct inode *inode = mapping->host; 4005 4006 /* If we are processing an encrypted inode during orphan list handling */ 4007 if (ext4_encrypted_inode(inode) && !fscrypt_has_encryption_key(inode)) 4008 return 0; 4009 4010 blocksize = inode->i_sb->s_blocksize; 4011 length = blocksize - (offset & (blocksize - 1)); 4012 4013 return ext4_block_zero_page_range(handle, mapping, from, length); 4014 } 4015 4016 int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode, 4017 loff_t lstart, loff_t length) 4018 { 4019 struct super_block *sb = inode->i_sb; 4020 struct address_space *mapping = inode->i_mapping; 4021 unsigned partial_start, partial_end; 4022 ext4_fsblk_t start, end; 4023 loff_t byte_end = (lstart + length - 1); 4024 int err = 0; 4025 4026 partial_start = lstart & (sb->s_blocksize - 1); 4027 partial_end = byte_end & (sb->s_blocksize - 1); 4028 4029 start = lstart >> sb->s_blocksize_bits; 4030 end = byte_end >> sb->s_blocksize_bits; 4031 4032 /* Handle partial zero within the single block */ 4033 if (start == end && 4034 (partial_start || (partial_end != sb->s_blocksize - 1))) { 4035 err = ext4_block_zero_page_range(handle, mapping, 4036 lstart, length); 4037 return err; 4038 } 4039 /* Handle partial zero out on the start of the range */ 4040 if (partial_start) { 4041 err = ext4_block_zero_page_range(handle, mapping, 4042 lstart, sb->s_blocksize); 4043 if (err) 4044 return err; 4045 } 4046 /* Handle partial zero out on the end of the range */ 4047 if (partial_end != sb->s_blocksize - 1) 4048 err = ext4_block_zero_page_range(handle, mapping, 4049 byte_end - partial_end, 4050 partial_end + 1); 4051 return err; 4052 } 4053 4054 int ext4_can_truncate(struct inode *inode) 4055 { 4056 if (S_ISREG(inode->i_mode)) 4057 return 1; 4058 if (S_ISDIR(inode->i_mode)) 4059 return 1; 4060 if (S_ISLNK(inode->i_mode)) 4061 return !ext4_inode_is_fast_symlink(inode); 4062 return 0; 4063 } 4064 4065 /* 4066 * We have to make sure i_disksize gets properly updated before we truncate 4067 * page cache due to hole punching or zero range. Otherwise i_disksize update 4068 * can get lost as it may have been postponed to submission of writeback but 4069 * that will never happen after we truncate page cache. 4070 */ 4071 int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, 4072 loff_t len) 4073 { 4074 handle_t *handle; 4075 loff_t size = i_size_read(inode); 4076 4077 WARN_ON(!inode_is_locked(inode)); 4078 if (offset > size || offset + len < size) 4079 return 0; 4080 4081 if (EXT4_I(inode)->i_disksize >= size) 4082 return 0; 4083 4084 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1); 4085 if (IS_ERR(handle)) 4086 return PTR_ERR(handle); 4087 ext4_update_i_disksize(inode, size); 4088 ext4_mark_inode_dirty(handle, inode); 4089 ext4_journal_stop(handle); 4090 4091 return 0; 4092 } 4093 4094 /* 4095 * ext4_punch_hole: punches a hole in a file by releasing the blocks 4096 * associated with the given offset and length 4097 * 4098 * @inode: File inode 4099 * @offset: The offset where the hole will begin 4100 * @len: The length of the hole 4101 * 4102 * Returns: 0 on success or negative on failure 4103 */ 4104 4105 int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) 4106 { 4107 struct super_block *sb = inode->i_sb; 4108 ext4_lblk_t first_block, stop_block; 4109 struct address_space *mapping = inode->i_mapping; 4110 loff_t first_block_offset, last_block_offset; 4111 handle_t *handle; 4112 unsigned int credits; 4113 int ret = 0; 4114 4115 if (!S_ISREG(inode->i_mode)) 4116 return -EOPNOTSUPP; 4117 4118 trace_ext4_punch_hole(inode, offset, length, 0); 4119 4120 /* 4121 * Write out all dirty pages to avoid race conditions 4122 * Then release them. 4123 */ 4124 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 4125 ret = filemap_write_and_wait_range(mapping, offset, 4126 offset + length - 1); 4127 if (ret) 4128 return ret; 4129 } 4130 4131 inode_lock(inode); 4132 4133 /* No need to punch hole beyond i_size */ 4134 if (offset >= inode->i_size) 4135 goto out_mutex; 4136 4137 /* 4138 * If the hole extends beyond i_size, set the hole 4139 * to end after the page that contains i_size 4140 */ 4141 if (offset + length > inode->i_size) { 4142 length = inode->i_size + 4143 PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) - 4144 offset; 4145 } 4146 4147 if (offset & (sb->s_blocksize - 1) || 4148 (offset + length) & (sb->s_blocksize - 1)) { 4149 /* 4150 * Attach jinode to inode for jbd2 if we do any zeroing of 4151 * partial block 4152 */ 4153 ret = ext4_inode_attach_jinode(inode); 4154 if (ret < 0) 4155 goto out_mutex; 4156 4157 } 4158 4159 /* Wait all existing dio workers, newcomers will block on i_mutex */ 4160 ext4_inode_block_unlocked_dio(inode); 4161 inode_dio_wait(inode); 4162 4163 /* 4164 * Prevent page faults from reinstantiating pages we have released from 4165 * page cache. 4166 */ 4167 down_write(&EXT4_I(inode)->i_mmap_sem); 4168 first_block_offset = round_up(offset, sb->s_blocksize); 4169 last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; 4170 4171 /* Now release the pages and zero block aligned part of pages*/ 4172 if (last_block_offset > first_block_offset) { 4173 ret = ext4_update_disksize_before_punch(inode, offset, length); 4174 if (ret) 4175 goto out_dio; 4176 truncate_pagecache_range(inode, first_block_offset, 4177 last_block_offset); 4178 } 4179 4180 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4181 credits = ext4_writepage_trans_blocks(inode); 4182 else 4183 credits = ext4_blocks_for_truncate(inode); 4184 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 4185 if (IS_ERR(handle)) { 4186 ret = PTR_ERR(handle); 4187 ext4_std_error(sb, ret); 4188 goto out_dio; 4189 } 4190 4191 ret = ext4_zero_partial_blocks(handle, inode, offset, 4192 length); 4193 if (ret) 4194 goto out_stop; 4195 4196 first_block = (offset + sb->s_blocksize - 1) >> 4197 EXT4_BLOCK_SIZE_BITS(sb); 4198 stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); 4199 4200 /* If there are no blocks to remove, return now */ 4201 if (first_block >= stop_block) 4202 goto out_stop; 4203 4204 down_write(&EXT4_I(inode)->i_data_sem); 4205 ext4_discard_preallocations(inode); 4206 4207 ret = ext4_es_remove_extent(inode, first_block, 4208 stop_block - first_block); 4209 if (ret) { 4210 up_write(&EXT4_I(inode)->i_data_sem); 4211 goto out_stop; 4212 } 4213 4214 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4215 ret = ext4_ext_remove_space(inode, first_block, 4216 stop_block - 1); 4217 else 4218 ret = ext4_ind_remove_space(handle, inode, first_block, 4219 stop_block); 4220 4221 up_write(&EXT4_I(inode)->i_data_sem); 4222 if (IS_SYNC(inode)) 4223 ext4_handle_sync(handle); 4224 4225 inode->i_mtime = inode->i_ctime = current_time(inode); 4226 ext4_mark_inode_dirty(handle, inode); 4227 if (ret >= 0) 4228 ext4_update_inode_fsync_trans(handle, inode, 1); 4229 out_stop: 4230 ext4_journal_stop(handle); 4231 out_dio: 4232 up_write(&EXT4_I(inode)->i_mmap_sem); 4233 ext4_inode_resume_unlocked_dio(inode); 4234 out_mutex: 4235 inode_unlock(inode); 4236 return ret; 4237 } 4238 4239 int ext4_inode_attach_jinode(struct inode *inode) 4240 { 4241 struct ext4_inode_info *ei = EXT4_I(inode); 4242 struct jbd2_inode *jinode; 4243 4244 if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal) 4245 return 0; 4246 4247 jinode = jbd2_alloc_inode(GFP_KERNEL); 4248 spin_lock(&inode->i_lock); 4249 if (!ei->jinode) { 4250 if (!jinode) { 4251 spin_unlock(&inode->i_lock); 4252 return -ENOMEM; 4253 } 4254 ei->jinode = jinode; 4255 jbd2_journal_init_jbd_inode(ei->jinode, inode); 4256 jinode = NULL; 4257 } 4258 spin_unlock(&inode->i_lock); 4259 if (unlikely(jinode != NULL)) 4260 jbd2_free_inode(jinode); 4261 return 0; 4262 } 4263 4264 /* 4265 * ext4_truncate() 4266 * 4267 * We block out ext4_get_block() block instantiations across the entire 4268 * transaction, and VFS/VM ensures that ext4_truncate() cannot run 4269 * simultaneously on behalf of the same inode. 4270 * 4271 * As we work through the truncate and commit bits of it to the journal there 4272 * is one core, guiding principle: the file's tree must always be consistent on 4273 * disk. We must be able to restart the truncate after a crash. 4274 * 4275 * The file's tree may be transiently inconsistent in memory (although it 4276 * probably isn't), but whenever we close off and commit a journal transaction, 4277 * the contents of (the filesystem + the journal) must be consistent and 4278 * restartable. It's pretty simple, really: bottom up, right to left (although 4279 * left-to-right works OK too). 4280 * 4281 * Note that at recovery time, journal replay occurs *before* the restart of 4282 * truncate against the orphan inode list. 4283 * 4284 * The committed inode has the new, desired i_size (which is the same as 4285 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 4286 * that this inode's truncate did not complete and it will again call 4287 * ext4_truncate() to have another go. So there will be instantiated blocks 4288 * to the right of the truncation point in a crashed ext4 filesystem. But 4289 * that's fine - as long as they are linked from the inode, the post-crash 4290 * ext4_truncate() run will find them and release them. 4291 */ 4292 int ext4_truncate(struct inode *inode) 4293 { 4294 struct ext4_inode_info *ei = EXT4_I(inode); 4295 unsigned int credits; 4296 int err = 0; 4297 handle_t *handle; 4298 struct address_space *mapping = inode->i_mapping; 4299 4300 /* 4301 * There is a possibility that we're either freeing the inode 4302 * or it's a completely new inode. In those cases we might not 4303 * have i_mutex locked because it's not necessary. 4304 */ 4305 if (!(inode->i_state & (I_NEW|I_FREEING))) 4306 WARN_ON(!inode_is_locked(inode)); 4307 trace_ext4_truncate_enter(inode); 4308 4309 if (!ext4_can_truncate(inode)) 4310 return 0; 4311 4312 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 4313 4314 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 4315 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 4316 4317 if (ext4_has_inline_data(inode)) { 4318 int has_inline = 1; 4319 4320 err = ext4_inline_data_truncate(inode, &has_inline); 4321 if (err) 4322 return err; 4323 if (has_inline) 4324 return 0; 4325 } 4326 4327 /* If we zero-out tail of the page, we have to create jinode for jbd2 */ 4328 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { 4329 if (ext4_inode_attach_jinode(inode) < 0) 4330 return 0; 4331 } 4332 4333 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4334 credits = ext4_writepage_trans_blocks(inode); 4335 else 4336 credits = ext4_blocks_for_truncate(inode); 4337 4338 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 4339 if (IS_ERR(handle)) 4340 return PTR_ERR(handle); 4341 4342 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) 4343 ext4_block_truncate_page(handle, mapping, inode->i_size); 4344 4345 /* 4346 * We add the inode to the orphan list, so that if this 4347 * truncate spans multiple transactions, and we crash, we will 4348 * resume the truncate when the filesystem recovers. It also 4349 * marks the inode dirty, to catch the new size. 4350 * 4351 * Implication: the file must always be in a sane, consistent 4352 * truncatable state while each transaction commits. 4353 */ 4354 err = ext4_orphan_add(handle, inode); 4355 if (err) 4356 goto out_stop; 4357 4358 down_write(&EXT4_I(inode)->i_data_sem); 4359 4360 ext4_discard_preallocations(inode); 4361 4362 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4363 err = ext4_ext_truncate(handle, inode); 4364 else 4365 ext4_ind_truncate(handle, inode); 4366 4367 up_write(&ei->i_data_sem); 4368 if (err) 4369 goto out_stop; 4370 4371 if (IS_SYNC(inode)) 4372 ext4_handle_sync(handle); 4373 4374 out_stop: 4375 /* 4376 * If this was a simple ftruncate() and the file will remain alive, 4377 * then we need to clear up the orphan record which we created above. 4378 * However, if this was a real unlink then we were called by 4379 * ext4_evict_inode(), and we allow that function to clean up the 4380 * orphan info for us. 4381 */ 4382 if (inode->i_nlink) 4383 ext4_orphan_del(handle, inode); 4384 4385 inode->i_mtime = inode->i_ctime = current_time(inode); 4386 ext4_mark_inode_dirty(handle, inode); 4387 ext4_journal_stop(handle); 4388 4389 trace_ext4_truncate_exit(inode); 4390 return err; 4391 } 4392 4393 /* 4394 * ext4_get_inode_loc returns with an extra refcount against the inode's 4395 * underlying buffer_head on success. If 'in_mem' is true, we have all 4396 * data in memory that is needed to recreate the on-disk version of this 4397 * inode. 4398 */ 4399 static int __ext4_get_inode_loc(struct inode *inode, 4400 struct ext4_iloc *iloc, int in_mem) 4401 { 4402 struct ext4_group_desc *gdp; 4403 struct buffer_head *bh; 4404 struct super_block *sb = inode->i_sb; 4405 ext4_fsblk_t block; 4406 int inodes_per_block, inode_offset; 4407 4408 iloc->bh = NULL; 4409 if (!ext4_valid_inum(sb, inode->i_ino)) 4410 return -EFSCORRUPTED; 4411 4412 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 4413 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 4414 if (!gdp) 4415 return -EIO; 4416 4417 /* 4418 * Figure out the offset within the block group inode table 4419 */ 4420 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 4421 inode_offset = ((inode->i_ino - 1) % 4422 EXT4_INODES_PER_GROUP(sb)); 4423 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 4424 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 4425 4426 bh = sb_getblk(sb, block); 4427 if (unlikely(!bh)) 4428 return -ENOMEM; 4429 if (!buffer_uptodate(bh)) { 4430 lock_buffer(bh); 4431 4432 /* 4433 * If the buffer has the write error flag, we have failed 4434 * to write out another inode in the same block. In this 4435 * case, we don't have to read the block because we may 4436 * read the old inode data successfully. 4437 */ 4438 if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 4439 set_buffer_uptodate(bh); 4440 4441 if (buffer_uptodate(bh)) { 4442 /* someone brought it uptodate while we waited */ 4443 unlock_buffer(bh); 4444 goto has_buffer; 4445 } 4446 4447 /* 4448 * If we have all information of the inode in memory and this 4449 * is the only valid inode in the block, we need not read the 4450 * block. 4451 */ 4452 if (in_mem) { 4453 struct buffer_head *bitmap_bh; 4454 int i, start; 4455 4456 start = inode_offset & ~(inodes_per_block - 1); 4457 4458 /* Is the inode bitmap in cache? */ 4459 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 4460 if (unlikely(!bitmap_bh)) 4461 goto make_io; 4462 4463 /* 4464 * If the inode bitmap isn't in cache then the 4465 * optimisation may end up performing two reads instead 4466 * of one, so skip it. 4467 */ 4468 if (!buffer_uptodate(bitmap_bh)) { 4469 brelse(bitmap_bh); 4470 goto make_io; 4471 } 4472 for (i = start; i < start + inodes_per_block; i++) { 4473 if (i == inode_offset) 4474 continue; 4475 if (ext4_test_bit(i, bitmap_bh->b_data)) 4476 break; 4477 } 4478 brelse(bitmap_bh); 4479 if (i == start + inodes_per_block) { 4480 /* all other inodes are free, so skip I/O */ 4481 memset(bh->b_data, 0, bh->b_size); 4482 set_buffer_uptodate(bh); 4483 unlock_buffer(bh); 4484 goto has_buffer; 4485 } 4486 } 4487 4488 make_io: 4489 /* 4490 * If we need to do any I/O, try to pre-readahead extra 4491 * blocks from the inode table. 4492 */ 4493 if (EXT4_SB(sb)->s_inode_readahead_blks) { 4494 ext4_fsblk_t b, end, table; 4495 unsigned num; 4496 __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks; 4497 4498 table = ext4_inode_table(sb, gdp); 4499 /* s_inode_readahead_blks is always a power of 2 */ 4500 b = block & ~((ext4_fsblk_t) ra_blks - 1); 4501 if (table > b) 4502 b = table; 4503 end = b + ra_blks; 4504 num = EXT4_INODES_PER_GROUP(sb); 4505 if (ext4_has_group_desc_csum(sb)) 4506 num -= ext4_itable_unused_count(sb, gdp); 4507 table += num / inodes_per_block; 4508 if (end > table) 4509 end = table; 4510 while (b <= end) 4511 sb_breadahead(sb, b++); 4512 } 4513 4514 /* 4515 * There are other valid inodes in the buffer, this inode 4516 * has in-inode xattrs, or we don't have this inode in memory. 4517 * Read the block from disk. 4518 */ 4519 trace_ext4_load_inode(inode); 4520 get_bh(bh); 4521 bh->b_end_io = end_buffer_read_sync; 4522 submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh); 4523 wait_on_buffer(bh); 4524 if (!buffer_uptodate(bh)) { 4525 EXT4_ERROR_INODE_BLOCK(inode, block, 4526 "unable to read itable block"); 4527 brelse(bh); 4528 return -EIO; 4529 } 4530 } 4531 has_buffer: 4532 iloc->bh = bh; 4533 return 0; 4534 } 4535 4536 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 4537 { 4538 /* We have all inode data except xattrs in memory here. */ 4539 return __ext4_get_inode_loc(inode, iloc, 4540 !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); 4541 } 4542 4543 void ext4_set_inode_flags(struct inode *inode) 4544 { 4545 unsigned int flags = EXT4_I(inode)->i_flags; 4546 unsigned int new_fl = 0; 4547 4548 if (flags & EXT4_SYNC_FL) 4549 new_fl |= S_SYNC; 4550 if (flags & EXT4_APPEND_FL) 4551 new_fl |= S_APPEND; 4552 if (flags & EXT4_IMMUTABLE_FL) 4553 new_fl |= S_IMMUTABLE; 4554 if (flags & EXT4_NOATIME_FL) 4555 new_fl |= S_NOATIME; 4556 if (flags & EXT4_DIRSYNC_FL) 4557 new_fl |= S_DIRSYNC; 4558 if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode) && 4559 !ext4_should_journal_data(inode) && !ext4_has_inline_data(inode) && 4560 !ext4_encrypted_inode(inode)) 4561 new_fl |= S_DAX; 4562 inode_set_flags(inode, new_fl, 4563 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX); 4564 } 4565 4566 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 4567 struct ext4_inode_info *ei) 4568 { 4569 blkcnt_t i_blocks ; 4570 struct inode *inode = &(ei->vfs_inode); 4571 struct super_block *sb = inode->i_sb; 4572 4573 if (ext4_has_feature_huge_file(sb)) { 4574 /* we are using combined 48 bit field */ 4575 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 4576 le32_to_cpu(raw_inode->i_blocks_lo); 4577 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { 4578 /* i_blocks represent file system block size */ 4579 return i_blocks << (inode->i_blkbits - 9); 4580 } else { 4581 return i_blocks; 4582 } 4583 } else { 4584 return le32_to_cpu(raw_inode->i_blocks_lo); 4585 } 4586 } 4587 4588 static inline void ext4_iget_extra_inode(struct inode *inode, 4589 struct ext4_inode *raw_inode, 4590 struct ext4_inode_info *ei) 4591 { 4592 __le32 *magic = (void *)raw_inode + 4593 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; 4594 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <= 4595 EXT4_INODE_SIZE(inode->i_sb) && 4596 *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { 4597 ext4_set_inode_state(inode, EXT4_STATE_XATTR); 4598 ext4_find_inline_data_nolock(inode); 4599 } else 4600 EXT4_I(inode)->i_inline_off = 0; 4601 } 4602 4603 int ext4_get_projid(struct inode *inode, kprojid_t *projid) 4604 { 4605 if (!ext4_has_feature_project(inode->i_sb)) 4606 return -EOPNOTSUPP; 4607 *projid = EXT4_I(inode)->i_projid; 4608 return 0; 4609 } 4610 4611 struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 4612 { 4613 struct ext4_iloc iloc; 4614 struct ext4_inode *raw_inode; 4615 struct ext4_inode_info *ei; 4616 struct inode *inode; 4617 journal_t *journal = EXT4_SB(sb)->s_journal; 4618 long ret; 4619 loff_t size; 4620 int block; 4621 uid_t i_uid; 4622 gid_t i_gid; 4623 projid_t i_projid; 4624 4625 inode = iget_locked(sb, ino); 4626 if (!inode) 4627 return ERR_PTR(-ENOMEM); 4628 if (!(inode->i_state & I_NEW)) 4629 return inode; 4630 4631 ei = EXT4_I(inode); 4632 iloc.bh = NULL; 4633 4634 ret = __ext4_get_inode_loc(inode, &iloc, 0); 4635 if (ret < 0) 4636 goto bad_inode; 4637 raw_inode = ext4_raw_inode(&iloc); 4638 4639 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4640 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 4641 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 4642 EXT4_INODE_SIZE(inode->i_sb) || 4643 (ei->i_extra_isize & 3)) { 4644 EXT4_ERROR_INODE(inode, 4645 "bad extra_isize %u (inode size %u)", 4646 ei->i_extra_isize, 4647 EXT4_INODE_SIZE(inode->i_sb)); 4648 ret = -EFSCORRUPTED; 4649 goto bad_inode; 4650 } 4651 } else 4652 ei->i_extra_isize = 0; 4653 4654 /* Precompute checksum seed for inode metadata */ 4655 if (ext4_has_metadata_csum(sb)) { 4656 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4657 __u32 csum; 4658 __le32 inum = cpu_to_le32(inode->i_ino); 4659 __le32 gen = raw_inode->i_generation; 4660 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, 4661 sizeof(inum)); 4662 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, 4663 sizeof(gen)); 4664 } 4665 4666 if (!ext4_inode_csum_verify(inode, raw_inode, ei)) { 4667 EXT4_ERROR_INODE(inode, "checksum invalid"); 4668 ret = -EFSBADCRC; 4669 goto bad_inode; 4670 } 4671 4672 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 4673 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 4674 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 4675 if (ext4_has_feature_project(sb) && 4676 EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE && 4677 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) 4678 i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid); 4679 else 4680 i_projid = EXT4_DEF_PROJID; 4681 4682 if (!(test_opt(inode->i_sb, NO_UID32))) { 4683 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 4684 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 4685 } 4686 i_uid_write(inode, i_uid); 4687 i_gid_write(inode, i_gid); 4688 ei->i_projid = make_kprojid(&init_user_ns, i_projid); 4689 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 4690 4691 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 4692 ei->i_inline_off = 0; 4693 ei->i_dir_start_lookup = 0; 4694 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 4695 /* We now have enough fields to check if the inode was active or not. 4696 * This is needed because nfsd might try to access dead inodes 4697 * the test is that same one that e2fsck uses 4698 * NeilBrown 1999oct15 4699 */ 4700 if (inode->i_nlink == 0) { 4701 if ((inode->i_mode == 0 || 4702 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) && 4703 ino != EXT4_BOOT_LOADER_INO) { 4704 /* this inode is deleted */ 4705 ret = -ESTALE; 4706 goto bad_inode; 4707 } 4708 /* The only unlinked inodes we let through here have 4709 * valid i_mode and are being read by the orphan 4710 * recovery code: that's fine, we're about to complete 4711 * the process of deleting those. 4712 * OR it is the EXT4_BOOT_LOADER_INO which is 4713 * not initialized on a new filesystem. */ 4714 } 4715 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 4716 inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 4717 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 4718 if (ext4_has_feature_64bit(sb)) 4719 ei->i_file_acl |= 4720 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 4721 inode->i_size = ext4_isize(sb, raw_inode); 4722 if ((size = i_size_read(inode)) < 0) { 4723 EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size); 4724 ret = -EFSCORRUPTED; 4725 goto bad_inode; 4726 } 4727 ei->i_disksize = inode->i_size; 4728 #ifdef CONFIG_QUOTA 4729 ei->i_reserved_quota = 0; 4730 #endif 4731 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 4732 ei->i_block_group = iloc.block_group; 4733 ei->i_last_alloc_group = ~0; 4734 /* 4735 * NOTE! The in-memory inode i_data array is in little-endian order 4736 * even on big-endian machines: we do NOT byteswap the block numbers! 4737 */ 4738 for (block = 0; block < EXT4_N_BLOCKS; block++) 4739 ei->i_data[block] = raw_inode->i_block[block]; 4740 INIT_LIST_HEAD(&ei->i_orphan); 4741 4742 /* 4743 * Set transaction id's of transactions that have to be committed 4744 * to finish f[data]sync. We set them to currently running transaction 4745 * as we cannot be sure that the inode or some of its metadata isn't 4746 * part of the transaction - the inode could have been reclaimed and 4747 * now it is reread from disk. 4748 */ 4749 if (journal) { 4750 transaction_t *transaction; 4751 tid_t tid; 4752 4753 read_lock(&journal->j_state_lock); 4754 if (journal->j_running_transaction) 4755 transaction = journal->j_running_transaction; 4756 else 4757 transaction = journal->j_committing_transaction; 4758 if (transaction) 4759 tid = transaction->t_tid; 4760 else 4761 tid = journal->j_commit_sequence; 4762 read_unlock(&journal->j_state_lock); 4763 ei->i_sync_tid = tid; 4764 ei->i_datasync_tid = tid; 4765 } 4766 4767 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4768 if (ei->i_extra_isize == 0) { 4769 /* The extra space is currently unused. Use it. */ 4770 BUILD_BUG_ON(sizeof(struct ext4_inode) & 3); 4771 ei->i_extra_isize = sizeof(struct ext4_inode) - 4772 EXT4_GOOD_OLD_INODE_SIZE; 4773 } else { 4774 ext4_iget_extra_inode(inode, raw_inode, ei); 4775 } 4776 } 4777 4778 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 4779 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 4780 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 4781 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 4782 4783 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { 4784 inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 4785 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4786 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 4787 inode->i_version |= 4788 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 4789 } 4790 } 4791 4792 ret = 0; 4793 if (ei->i_file_acl && 4794 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { 4795 EXT4_ERROR_INODE(inode, "bad extended attribute block %llu", 4796 ei->i_file_acl); 4797 ret = -EFSCORRUPTED; 4798 goto bad_inode; 4799 } else if (!ext4_has_inline_data(inode)) { 4800 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 4801 if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4802 (S_ISLNK(inode->i_mode) && 4803 !ext4_inode_is_fast_symlink(inode)))) 4804 /* Validate extent which is part of inode */ 4805 ret = ext4_ext_check_inode(inode); 4806 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4807 (S_ISLNK(inode->i_mode) && 4808 !ext4_inode_is_fast_symlink(inode))) { 4809 /* Validate block references which are part of inode */ 4810 ret = ext4_ind_check_inode(inode); 4811 } 4812 } 4813 if (ret) 4814 goto bad_inode; 4815 4816 if (S_ISREG(inode->i_mode)) { 4817 inode->i_op = &ext4_file_inode_operations; 4818 inode->i_fop = &ext4_file_operations; 4819 ext4_set_aops(inode); 4820 } else if (S_ISDIR(inode->i_mode)) { 4821 inode->i_op = &ext4_dir_inode_operations; 4822 inode->i_fop = &ext4_dir_operations; 4823 } else if (S_ISLNK(inode->i_mode)) { 4824 if (ext4_encrypted_inode(inode)) { 4825 inode->i_op = &ext4_encrypted_symlink_inode_operations; 4826 ext4_set_aops(inode); 4827 } else if (ext4_inode_is_fast_symlink(inode)) { 4828 inode->i_link = (char *)ei->i_data; 4829 inode->i_op = &ext4_fast_symlink_inode_operations; 4830 nd_terminate_link(ei->i_data, inode->i_size, 4831 sizeof(ei->i_data) - 1); 4832 } else { 4833 inode->i_op = &ext4_symlink_inode_operations; 4834 ext4_set_aops(inode); 4835 } 4836 inode_nohighmem(inode); 4837 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 4838 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 4839 inode->i_op = &ext4_special_inode_operations; 4840 if (raw_inode->i_block[0]) 4841 init_special_inode(inode, inode->i_mode, 4842 old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 4843 else 4844 init_special_inode(inode, inode->i_mode, 4845 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 4846 } else if (ino == EXT4_BOOT_LOADER_INO) { 4847 make_bad_inode(inode); 4848 } else { 4849 ret = -EFSCORRUPTED; 4850 EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode); 4851 goto bad_inode; 4852 } 4853 brelse(iloc.bh); 4854 ext4_set_inode_flags(inode); 4855 4856 if (ei->i_flags & EXT4_EA_INODE_FL) { 4857 ext4_xattr_inode_set_class(inode); 4858 4859 inode_lock(inode); 4860 inode->i_flags |= S_NOQUOTA; 4861 inode_unlock(inode); 4862 } 4863 4864 unlock_new_inode(inode); 4865 return inode; 4866 4867 bad_inode: 4868 brelse(iloc.bh); 4869 iget_failed(inode); 4870 return ERR_PTR(ret); 4871 } 4872 4873 struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino) 4874 { 4875 if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) 4876 return ERR_PTR(-EFSCORRUPTED); 4877 return ext4_iget(sb, ino); 4878 } 4879 4880 static int ext4_inode_blocks_set(handle_t *handle, 4881 struct ext4_inode *raw_inode, 4882 struct ext4_inode_info *ei) 4883 { 4884 struct inode *inode = &(ei->vfs_inode); 4885 u64 i_blocks = inode->i_blocks; 4886 struct super_block *sb = inode->i_sb; 4887 4888 if (i_blocks <= ~0U) { 4889 /* 4890 * i_blocks can be represented in a 32 bit variable 4891 * as multiple of 512 bytes 4892 */ 4893 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4894 raw_inode->i_blocks_high = 0; 4895 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 4896 return 0; 4897 } 4898 if (!ext4_has_feature_huge_file(sb)) 4899 return -EFBIG; 4900 4901 if (i_blocks <= 0xffffffffffffULL) { 4902 /* 4903 * i_blocks can be represented in a 48 bit variable 4904 * as multiple of 512 bytes 4905 */ 4906 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4907 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4908 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 4909 } else { 4910 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); 4911 /* i_block is stored in file system block size */ 4912 i_blocks = i_blocks >> (inode->i_blkbits - 9); 4913 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4914 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4915 } 4916 return 0; 4917 } 4918 4919 struct other_inode { 4920 unsigned long orig_ino; 4921 struct ext4_inode *raw_inode; 4922 }; 4923 4924 static int other_inode_match(struct inode * inode, unsigned long ino, 4925 void *data) 4926 { 4927 struct other_inode *oi = (struct other_inode *) data; 4928 4929 if ((inode->i_ino != ino) || 4930 (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | 4931 I_DIRTY_SYNC | I_DIRTY_DATASYNC)) || 4932 ((inode->i_state & I_DIRTY_TIME) == 0)) 4933 return 0; 4934 spin_lock(&inode->i_lock); 4935 if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | 4936 I_DIRTY_SYNC | I_DIRTY_DATASYNC)) == 0) && 4937 (inode->i_state & I_DIRTY_TIME)) { 4938 struct ext4_inode_info *ei = EXT4_I(inode); 4939 4940 inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED); 4941 spin_unlock(&inode->i_lock); 4942 4943 spin_lock(&ei->i_raw_lock); 4944 EXT4_INODE_SET_XTIME(i_ctime, inode, oi->raw_inode); 4945 EXT4_INODE_SET_XTIME(i_mtime, inode, oi->raw_inode); 4946 EXT4_INODE_SET_XTIME(i_atime, inode, oi->raw_inode); 4947 ext4_inode_csum_set(inode, oi->raw_inode, ei); 4948 spin_unlock(&ei->i_raw_lock); 4949 trace_ext4_other_inode_update_time(inode, oi->orig_ino); 4950 return -1; 4951 } 4952 spin_unlock(&inode->i_lock); 4953 return -1; 4954 } 4955 4956 /* 4957 * Opportunistically update the other time fields for other inodes in 4958 * the same inode table block. 4959 */ 4960 static void ext4_update_other_inodes_time(struct super_block *sb, 4961 unsigned long orig_ino, char *buf) 4962 { 4963 struct other_inode oi; 4964 unsigned long ino; 4965 int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 4966 int inode_size = EXT4_INODE_SIZE(sb); 4967 4968 oi.orig_ino = orig_ino; 4969 /* 4970 * Calculate the first inode in the inode table block. Inode 4971 * numbers are one-based. That is, the first inode in a block 4972 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1). 4973 */ 4974 ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1; 4975 for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) { 4976 if (ino == orig_ino) 4977 continue; 4978 oi.raw_inode = (struct ext4_inode *) buf; 4979 (void) find_inode_nowait(sb, ino, other_inode_match, &oi); 4980 } 4981 } 4982 4983 /* 4984 * Post the struct inode info into an on-disk inode location in the 4985 * buffer-cache. This gobbles the caller's reference to the 4986 * buffer_head in the inode location struct. 4987 * 4988 * The caller must have write access to iloc->bh. 4989 */ 4990 static int ext4_do_update_inode(handle_t *handle, 4991 struct inode *inode, 4992 struct ext4_iloc *iloc) 4993 { 4994 struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 4995 struct ext4_inode_info *ei = EXT4_I(inode); 4996 struct buffer_head *bh = iloc->bh; 4997 struct super_block *sb = inode->i_sb; 4998 int err = 0, rc, block; 4999 int need_datasync = 0, set_large_file = 0; 5000 uid_t i_uid; 5001 gid_t i_gid; 5002 projid_t i_projid; 5003 5004 spin_lock(&ei->i_raw_lock); 5005 5006 /* For fields not tracked in the in-memory inode, 5007 * initialise them to zero for new inodes. */ 5008 if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) 5009 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 5010 5011 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 5012 i_uid = i_uid_read(inode); 5013 i_gid = i_gid_read(inode); 5014 i_projid = from_kprojid(&init_user_ns, ei->i_projid); 5015 if (!(test_opt(inode->i_sb, NO_UID32))) { 5016 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); 5017 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); 5018 /* 5019 * Fix up interoperability with old kernels. Otherwise, old inodes get 5020 * re-used with the upper 16 bits of the uid/gid intact 5021 */ 5022 if (ei->i_dtime && list_empty(&ei->i_orphan)) { 5023 raw_inode->i_uid_high = 0; 5024 raw_inode->i_gid_high = 0; 5025 } else { 5026 raw_inode->i_uid_high = 5027 cpu_to_le16(high_16_bits(i_uid)); 5028 raw_inode->i_gid_high = 5029 cpu_to_le16(high_16_bits(i_gid)); 5030 } 5031 } else { 5032 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); 5033 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid)); 5034 raw_inode->i_uid_high = 0; 5035 raw_inode->i_gid_high = 0; 5036 } 5037 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 5038 5039 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 5040 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 5041 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 5042 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 5043 5044 err = ext4_inode_blocks_set(handle, raw_inode, ei); 5045 if (err) { 5046 spin_unlock(&ei->i_raw_lock); 5047 goto out_brelse; 5048 } 5049 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 5050 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); 5051 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) 5052 raw_inode->i_file_acl_high = 5053 cpu_to_le16(ei->i_file_acl >> 32); 5054 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 5055 if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) { 5056 ext4_isize_set(raw_inode, ei->i_disksize); 5057 need_datasync = 1; 5058 } 5059 if (ei->i_disksize > 0x7fffffffULL) { 5060 if (!ext4_has_feature_large_file(sb) || 5061 EXT4_SB(sb)->s_es->s_rev_level == 5062 cpu_to_le32(EXT4_GOOD_OLD_REV)) 5063 set_large_file = 1; 5064 } 5065 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 5066 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 5067 if (old_valid_dev(inode->i_rdev)) { 5068 raw_inode->i_block[0] = 5069 cpu_to_le32(old_encode_dev(inode->i_rdev)); 5070 raw_inode->i_block[1] = 0; 5071 } else { 5072 raw_inode->i_block[0] = 0; 5073 raw_inode->i_block[1] = 5074 cpu_to_le32(new_encode_dev(inode->i_rdev)); 5075 raw_inode->i_block[2] = 0; 5076 } 5077 } else if (!ext4_has_inline_data(inode)) { 5078 for (block = 0; block < EXT4_N_BLOCKS; block++) 5079 raw_inode->i_block[block] = ei->i_data[block]; 5080 } 5081 5082 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { 5083 raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 5084 if (ei->i_extra_isize) { 5085 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 5086 raw_inode->i_version_hi = 5087 cpu_to_le32(inode->i_version >> 32); 5088 raw_inode->i_extra_isize = 5089 cpu_to_le16(ei->i_extra_isize); 5090 } 5091 } 5092 5093 BUG_ON(!ext4_has_feature_project(inode->i_sb) && 5094 i_projid != EXT4_DEF_PROJID); 5095 5096 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 5097 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) 5098 raw_inode->i_projid = cpu_to_le32(i_projid); 5099 5100 ext4_inode_csum_set(inode, raw_inode, ei); 5101 spin_unlock(&ei->i_raw_lock); 5102 if (inode->i_sb->s_flags & MS_LAZYTIME) 5103 ext4_update_other_inodes_time(inode->i_sb, inode->i_ino, 5104 bh->b_data); 5105 5106 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 5107 rc = ext4_handle_dirty_metadata(handle, NULL, bh); 5108 if (!err) 5109 err = rc; 5110 ext4_clear_inode_state(inode, EXT4_STATE_NEW); 5111 if (set_large_file) { 5112 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access"); 5113 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); 5114 if (err) 5115 goto out_brelse; 5116 ext4_update_dynamic_rev(sb); 5117 ext4_set_feature_large_file(sb); 5118 ext4_handle_sync(handle); 5119 err = ext4_handle_dirty_super(handle, sb); 5120 } 5121 ext4_update_inode_fsync_trans(handle, inode, need_datasync); 5122 out_brelse: 5123 brelse(bh); 5124 ext4_std_error(inode->i_sb, err); 5125 return err; 5126 } 5127 5128 /* 5129 * ext4_write_inode() 5130 * 5131 * We are called from a few places: 5132 * 5133 * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files. 5134 * Here, there will be no transaction running. We wait for any running 5135 * transaction to commit. 5136 * 5137 * - Within flush work (sys_sync(), kupdate and such). 5138 * We wait on commit, if told to. 5139 * 5140 * - Within iput_final() -> write_inode_now() 5141 * We wait on commit, if told to. 5142 * 5143 * In all cases it is actually safe for us to return without doing anything, 5144 * because the inode has been copied into a raw inode buffer in 5145 * ext4_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL 5146 * writeback. 5147 * 5148 * Note that we are absolutely dependent upon all inode dirtiers doing the 5149 * right thing: they *must* call mark_inode_dirty() after dirtying info in 5150 * which we are interested. 5151 * 5152 * It would be a bug for them to not do this. The code: 5153 * 5154 * mark_inode_dirty(inode) 5155 * stuff(); 5156 * inode->i_size = expr; 5157 * 5158 * is in error because write_inode() could occur while `stuff()' is running, 5159 * and the new i_size will be lost. Plus the inode will no longer be on the 5160 * superblock's dirty inode list. 5161 */ 5162 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) 5163 { 5164 int err; 5165 5166 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC)) 5167 return 0; 5168 5169 if (EXT4_SB(inode->i_sb)->s_journal) { 5170 if (ext4_journal_current_handle()) { 5171 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 5172 dump_stack(); 5173 return -EIO; 5174 } 5175 5176 /* 5177 * No need to force transaction in WB_SYNC_NONE mode. Also 5178 * ext4_sync_fs() will force the commit after everything is 5179 * written. 5180 */ 5181 if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync) 5182 return 0; 5183 5184 err = ext4_force_commit(inode->i_sb); 5185 } else { 5186 struct ext4_iloc iloc; 5187 5188 err = __ext4_get_inode_loc(inode, &iloc, 0); 5189 if (err) 5190 return err; 5191 /* 5192 * sync(2) will flush the whole buffer cache. No need to do 5193 * it here separately for each inode. 5194 */ 5195 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) 5196 sync_dirty_buffer(iloc.bh); 5197 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 5198 EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, 5199 "IO error syncing inode"); 5200 err = -EIO; 5201 } 5202 brelse(iloc.bh); 5203 } 5204 return err; 5205 } 5206 5207 /* 5208 * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate 5209 * buffers that are attached to a page stradding i_size and are undergoing 5210 * commit. In that case we have to wait for commit to finish and try again. 5211 */ 5212 static void ext4_wait_for_tail_page_commit(struct inode *inode) 5213 { 5214 struct page *page; 5215 unsigned offset; 5216 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 5217 tid_t commit_tid = 0; 5218 int ret; 5219 5220 offset = inode->i_size & (PAGE_SIZE - 1); 5221 /* 5222 * All buffers in the last page remain valid? Then there's nothing to 5223 * do. We do the check mainly to optimize the common PAGE_SIZE == 5224 * blocksize case 5225 */ 5226 if (offset > PAGE_SIZE - i_blocksize(inode)) 5227 return; 5228 while (1) { 5229 page = find_lock_page(inode->i_mapping, 5230 inode->i_size >> PAGE_SHIFT); 5231 if (!page) 5232 return; 5233 ret = __ext4_journalled_invalidatepage(page, offset, 5234 PAGE_SIZE - offset); 5235 unlock_page(page); 5236 put_page(page); 5237 if (ret != -EBUSY) 5238 return; 5239 commit_tid = 0; 5240 read_lock(&journal->j_state_lock); 5241 if (journal->j_committing_transaction) 5242 commit_tid = journal->j_committing_transaction->t_tid; 5243 read_unlock(&journal->j_state_lock); 5244 if (commit_tid) 5245 jbd2_log_wait_commit(journal, commit_tid); 5246 } 5247 } 5248 5249 /* 5250 * ext4_setattr() 5251 * 5252 * Called from notify_change. 5253 * 5254 * We want to trap VFS attempts to truncate the file as soon as 5255 * possible. In particular, we want to make sure that when the VFS 5256 * shrinks i_size, we put the inode on the orphan list and modify 5257 * i_disksize immediately, so that during the subsequent flushing of 5258 * dirty pages and freeing of disk blocks, we can guarantee that any 5259 * commit will leave the blocks being flushed in an unused state on 5260 * disk. (On recovery, the inode will get truncated and the blocks will 5261 * be freed, so we have a strong guarantee that no future commit will 5262 * leave these blocks visible to the user.) 5263 * 5264 * Another thing we have to assure is that if we are in ordered mode 5265 * and inode is still attached to the committing transaction, we must 5266 * we start writeout of all the dirty pages which are being truncated. 5267 * This way we are sure that all the data written in the previous 5268 * transaction are already on disk (truncate waits for pages under 5269 * writeback). 5270 * 5271 * Called with inode->i_mutex down. 5272 */ 5273 int ext4_setattr(struct dentry *dentry, struct iattr *attr) 5274 { 5275 struct inode *inode = d_inode(dentry); 5276 int error, rc = 0; 5277 int orphan = 0; 5278 const unsigned int ia_valid = attr->ia_valid; 5279 5280 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 5281 return -EIO; 5282 5283 error = setattr_prepare(dentry, attr); 5284 if (error) 5285 return error; 5286 5287 if (is_quota_modification(inode, attr)) { 5288 error = dquot_initialize(inode); 5289 if (error) 5290 return error; 5291 } 5292 if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || 5293 (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { 5294 handle_t *handle; 5295 5296 /* (user+group)*(old+new) structure, inode write (sb, 5297 * inode block, ? - but truncate inode update has it) */ 5298 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 5299 (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) + 5300 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3); 5301 if (IS_ERR(handle)) { 5302 error = PTR_ERR(handle); 5303 goto err_out; 5304 } 5305 5306 /* dquot_transfer() calls back ext4_get_inode_usage() which 5307 * counts xattr inode references. 5308 */ 5309 down_read(&EXT4_I(inode)->xattr_sem); 5310 error = dquot_transfer(inode, attr); 5311 up_read(&EXT4_I(inode)->xattr_sem); 5312 5313 if (error) { 5314 ext4_journal_stop(handle); 5315 return error; 5316 } 5317 /* Update corresponding info in inode so that everything is in 5318 * one transaction */ 5319 if (attr->ia_valid & ATTR_UID) 5320 inode->i_uid = attr->ia_uid; 5321 if (attr->ia_valid & ATTR_GID) 5322 inode->i_gid = attr->ia_gid; 5323 error = ext4_mark_inode_dirty(handle, inode); 5324 ext4_journal_stop(handle); 5325 } 5326 5327 if (attr->ia_valid & ATTR_SIZE) { 5328 handle_t *handle; 5329 loff_t oldsize = inode->i_size; 5330 int shrink = (attr->ia_size <= inode->i_size); 5331 5332 if (ext4_encrypted_inode(inode)) { 5333 error = fscrypt_get_encryption_info(inode); 5334 if (error) 5335 return error; 5336 if (!fscrypt_has_encryption_key(inode)) 5337 return -ENOKEY; 5338 } 5339 5340 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 5341 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5342 5343 if (attr->ia_size > sbi->s_bitmap_maxbytes) 5344 return -EFBIG; 5345 } 5346 if (!S_ISREG(inode->i_mode)) 5347 return -EINVAL; 5348 5349 if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size) 5350 inode_inc_iversion(inode); 5351 5352 if (ext4_should_order_data(inode) && 5353 (attr->ia_size < inode->i_size)) { 5354 error = ext4_begin_ordered_truncate(inode, 5355 attr->ia_size); 5356 if (error) 5357 goto err_out; 5358 } 5359 if (attr->ia_size != inode->i_size) { 5360 handle = ext4_journal_start(inode, EXT4_HT_INODE, 3); 5361 if (IS_ERR(handle)) { 5362 error = PTR_ERR(handle); 5363 goto err_out; 5364 } 5365 if (ext4_handle_valid(handle) && shrink) { 5366 error = ext4_orphan_add(handle, inode); 5367 orphan = 1; 5368 } 5369 /* 5370 * Update c/mtime on truncate up, ext4_truncate() will 5371 * update c/mtime in shrink case below 5372 */ 5373 if (!shrink) { 5374 inode->i_mtime = current_time(inode); 5375 inode->i_ctime = inode->i_mtime; 5376 } 5377 down_write(&EXT4_I(inode)->i_data_sem); 5378 EXT4_I(inode)->i_disksize = attr->ia_size; 5379 rc = ext4_mark_inode_dirty(handle, inode); 5380 if (!error) 5381 error = rc; 5382 /* 5383 * We have to update i_size under i_data_sem together 5384 * with i_disksize to avoid races with writeback code 5385 * running ext4_wb_update_i_disksize(). 5386 */ 5387 if (!error) 5388 i_size_write(inode, attr->ia_size); 5389 up_write(&EXT4_I(inode)->i_data_sem); 5390 ext4_journal_stop(handle); 5391 if (error) { 5392 if (orphan) 5393 ext4_orphan_del(NULL, inode); 5394 goto err_out; 5395 } 5396 } 5397 if (!shrink) 5398 pagecache_isize_extended(inode, oldsize, inode->i_size); 5399 5400 /* 5401 * Blocks are going to be removed from the inode. Wait 5402 * for dio in flight. Temporarily disable 5403 * dioread_nolock to prevent livelock. 5404 */ 5405 if (orphan) { 5406 if (!ext4_should_journal_data(inode)) { 5407 ext4_inode_block_unlocked_dio(inode); 5408 inode_dio_wait(inode); 5409 ext4_inode_resume_unlocked_dio(inode); 5410 } else 5411 ext4_wait_for_tail_page_commit(inode); 5412 } 5413 down_write(&EXT4_I(inode)->i_mmap_sem); 5414 /* 5415 * Truncate pagecache after we've waited for commit 5416 * in data=journal mode to make pages freeable. 5417 */ 5418 truncate_pagecache(inode, inode->i_size); 5419 if (shrink) { 5420 rc = ext4_truncate(inode); 5421 if (rc) 5422 error = rc; 5423 } 5424 up_write(&EXT4_I(inode)->i_mmap_sem); 5425 } 5426 5427 if (!error) { 5428 setattr_copy(inode, attr); 5429 mark_inode_dirty(inode); 5430 } 5431 5432 /* 5433 * If the call to ext4_truncate failed to get a transaction handle at 5434 * all, we need to clean up the in-core orphan list manually. 5435 */ 5436 if (orphan && inode->i_nlink) 5437 ext4_orphan_del(NULL, inode); 5438 5439 if (!error && (ia_valid & ATTR_MODE)) 5440 rc = posix_acl_chmod(inode, inode->i_mode); 5441 5442 err_out: 5443 ext4_std_error(inode->i_sb, error); 5444 if (!error) 5445 error = rc; 5446 return error; 5447 } 5448 5449 int ext4_getattr(const struct path *path, struct kstat *stat, 5450 u32 request_mask, unsigned int query_flags) 5451 { 5452 struct inode *inode = d_inode(path->dentry); 5453 struct ext4_inode *raw_inode; 5454 struct ext4_inode_info *ei = EXT4_I(inode); 5455 unsigned int flags; 5456 5457 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) { 5458 stat->result_mask |= STATX_BTIME; 5459 stat->btime.tv_sec = ei->i_crtime.tv_sec; 5460 stat->btime.tv_nsec = ei->i_crtime.tv_nsec; 5461 } 5462 5463 flags = ei->i_flags & EXT4_FL_USER_VISIBLE; 5464 if (flags & EXT4_APPEND_FL) 5465 stat->attributes |= STATX_ATTR_APPEND; 5466 if (flags & EXT4_COMPR_FL) 5467 stat->attributes |= STATX_ATTR_COMPRESSED; 5468 if (flags & EXT4_ENCRYPT_FL) 5469 stat->attributes |= STATX_ATTR_ENCRYPTED; 5470 if (flags & EXT4_IMMUTABLE_FL) 5471 stat->attributes |= STATX_ATTR_IMMUTABLE; 5472 if (flags & EXT4_NODUMP_FL) 5473 stat->attributes |= STATX_ATTR_NODUMP; 5474 5475 stat->attributes_mask |= (STATX_ATTR_APPEND | 5476 STATX_ATTR_COMPRESSED | 5477 STATX_ATTR_ENCRYPTED | 5478 STATX_ATTR_IMMUTABLE | 5479 STATX_ATTR_NODUMP); 5480 5481 generic_fillattr(inode, stat); 5482 return 0; 5483 } 5484 5485 int ext4_file_getattr(const struct path *path, struct kstat *stat, 5486 u32 request_mask, unsigned int query_flags) 5487 { 5488 struct inode *inode = d_inode(path->dentry); 5489 u64 delalloc_blocks; 5490 5491 ext4_getattr(path, stat, request_mask, query_flags); 5492 5493 /* 5494 * If there is inline data in the inode, the inode will normally not 5495 * have data blocks allocated (it may have an external xattr block). 5496 * Report at least one sector for such files, so tools like tar, rsync, 5497 * others don't incorrectly think the file is completely sparse. 5498 */ 5499 if (unlikely(ext4_has_inline_data(inode))) 5500 stat->blocks += (stat->size + 511) >> 9; 5501 5502 /* 5503 * We can't update i_blocks if the block allocation is delayed 5504 * otherwise in the case of system crash before the real block 5505 * allocation is done, we will have i_blocks inconsistent with 5506 * on-disk file blocks. 5507 * We always keep i_blocks updated together with real 5508 * allocation. But to not confuse with user, stat 5509 * will return the blocks that include the delayed allocation 5510 * blocks for this file. 5511 */ 5512 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), 5513 EXT4_I(inode)->i_reserved_data_blocks); 5514 stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9); 5515 return 0; 5516 } 5517 5518 static int ext4_index_trans_blocks(struct inode *inode, int lblocks, 5519 int pextents) 5520 { 5521 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 5522 return ext4_ind_trans_blocks(inode, lblocks); 5523 return ext4_ext_index_trans_blocks(inode, pextents); 5524 } 5525 5526 /* 5527 * Account for index blocks, block groups bitmaps and block group 5528 * descriptor blocks if modify datablocks and index blocks 5529 * worse case, the indexs blocks spread over different block groups 5530 * 5531 * If datablocks are discontiguous, they are possible to spread over 5532 * different block groups too. If they are contiguous, with flexbg, 5533 * they could still across block group boundary. 5534 * 5535 * Also account for superblock, inode, quota and xattr blocks 5536 */ 5537 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, 5538 int pextents) 5539 { 5540 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 5541 int gdpblocks; 5542 int idxblocks; 5543 int ret = 0; 5544 5545 /* 5546 * How many index blocks need to touch to map @lblocks logical blocks 5547 * to @pextents physical extents? 5548 */ 5549 idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents); 5550 5551 ret = idxblocks; 5552 5553 /* 5554 * Now let's see how many group bitmaps and group descriptors need 5555 * to account 5556 */ 5557 groups = idxblocks + pextents; 5558 gdpblocks = groups; 5559 if (groups > ngroups) 5560 groups = ngroups; 5561 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 5562 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 5563 5564 /* bitmaps and block group descriptor blocks */ 5565 ret += groups + gdpblocks; 5566 5567 /* Blocks for super block, inode, quota and xattr blocks */ 5568 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 5569 5570 return ret; 5571 } 5572 5573 /* 5574 * Calculate the total number of credits to reserve to fit 5575 * the modification of a single pages into a single transaction, 5576 * which may include multiple chunks of block allocations. 5577 * 5578 * This could be called via ext4_write_begin() 5579 * 5580 * We need to consider the worse case, when 5581 * one new block per extent. 5582 */ 5583 int ext4_writepage_trans_blocks(struct inode *inode) 5584 { 5585 int bpp = ext4_journal_blocks_per_page(inode); 5586 int ret; 5587 5588 ret = ext4_meta_trans_blocks(inode, bpp, bpp); 5589 5590 /* Account for data blocks for journalled mode */ 5591 if (ext4_should_journal_data(inode)) 5592 ret += bpp; 5593 return ret; 5594 } 5595 5596 /* 5597 * Calculate the journal credits for a chunk of data modification. 5598 * 5599 * This is called from DIO, fallocate or whoever calling 5600 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. 5601 * 5602 * journal buffers for data blocks are not included here, as DIO 5603 * and fallocate do no need to journal data buffers. 5604 */ 5605 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 5606 { 5607 return ext4_meta_trans_blocks(inode, nrblocks, 1); 5608 } 5609 5610 /* 5611 * The caller must have previously called ext4_reserve_inode_write(). 5612 * Give this, we know that the caller already has write access to iloc->bh. 5613 */ 5614 int ext4_mark_iloc_dirty(handle_t *handle, 5615 struct inode *inode, struct ext4_iloc *iloc) 5616 { 5617 int err = 0; 5618 5619 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 5620 return -EIO; 5621 5622 if (IS_I_VERSION(inode)) 5623 inode_inc_iversion(inode); 5624 5625 /* the do_update_inode consumes one bh->b_count */ 5626 get_bh(iloc->bh); 5627 5628 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 5629 err = ext4_do_update_inode(handle, inode, iloc); 5630 put_bh(iloc->bh); 5631 return err; 5632 } 5633 5634 /* 5635 * On success, We end up with an outstanding reference count against 5636 * iloc->bh. This _must_ be cleaned up later. 5637 */ 5638 5639 int 5640 ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 5641 struct ext4_iloc *iloc) 5642 { 5643 int err; 5644 5645 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 5646 return -EIO; 5647 5648 err = ext4_get_inode_loc(inode, iloc); 5649 if (!err) { 5650 BUFFER_TRACE(iloc->bh, "get_write_access"); 5651 err = ext4_journal_get_write_access(handle, iloc->bh); 5652 if (err) { 5653 brelse(iloc->bh); 5654 iloc->bh = NULL; 5655 } 5656 } 5657 ext4_std_error(inode->i_sb, err); 5658 return err; 5659 } 5660 5661 /* 5662 * Expand an inode by new_extra_isize bytes. 5663 * Returns 0 on success or negative error number on failure. 5664 */ 5665 static int ext4_expand_extra_isize(struct inode *inode, 5666 unsigned int new_extra_isize, 5667 struct ext4_iloc iloc, 5668 handle_t *handle) 5669 { 5670 struct ext4_inode *raw_inode; 5671 struct ext4_xattr_ibody_header *header; 5672 5673 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 5674 return 0; 5675 5676 raw_inode = ext4_raw_inode(&iloc); 5677 5678 header = IHDR(inode, raw_inode); 5679 5680 /* No extended attributes present */ 5681 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 5682 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 5683 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + 5684 EXT4_I(inode)->i_extra_isize, 0, 5685 new_extra_isize - EXT4_I(inode)->i_extra_isize); 5686 EXT4_I(inode)->i_extra_isize = new_extra_isize; 5687 return 0; 5688 } 5689 5690 /* try to expand with EAs present */ 5691 return ext4_expand_extra_isize_ea(inode, new_extra_isize, 5692 raw_inode, handle); 5693 } 5694 5695 /* 5696 * What we do here is to mark the in-core inode as clean with respect to inode 5697 * dirtiness (it may still be data-dirty). 5698 * This means that the in-core inode may be reaped by prune_icache 5699 * without having to perform any I/O. This is a very good thing, 5700 * because *any* task may call prune_icache - even ones which 5701 * have a transaction open against a different journal. 5702 * 5703 * Is this cheating? Not really. Sure, we haven't written the 5704 * inode out, but prune_icache isn't a user-visible syncing function. 5705 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 5706 * we start and wait on commits. 5707 */ 5708 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 5709 { 5710 struct ext4_iloc iloc; 5711 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5712 static unsigned int mnt_count; 5713 int err, ret; 5714 5715 might_sleep(); 5716 trace_ext4_mark_inode_dirty(inode, _RET_IP_); 5717 err = ext4_reserve_inode_write(handle, inode, &iloc); 5718 if (err) 5719 return err; 5720 if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 5721 !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { 5722 /* 5723 * In nojournal mode, we can immediately attempt to expand 5724 * the inode. When journaled, we first need to obtain extra 5725 * buffer credits since we may write into the EA block 5726 * with this same handle. If journal_extend fails, then it will 5727 * only result in a minor loss of functionality for that inode. 5728 * If this is felt to be critical, then e2fsck should be run to 5729 * force a large enough s_min_extra_isize. 5730 */ 5731 if (!ext4_handle_valid(handle) || 5732 jbd2_journal_extend(handle, 5733 EXT4_DATA_TRANS_BLOCKS(inode->i_sb)) == 0) { 5734 ret = ext4_expand_extra_isize(inode, 5735 sbi->s_want_extra_isize, 5736 iloc, handle); 5737 if (ret) { 5738 if (mnt_count != 5739 le16_to_cpu(sbi->s_es->s_mnt_count)) { 5740 ext4_warning(inode->i_sb, 5741 "Unable to expand inode %lu. Delete" 5742 " some EAs or run e2fsck.", 5743 inode->i_ino); 5744 mnt_count = 5745 le16_to_cpu(sbi->s_es->s_mnt_count); 5746 } 5747 } 5748 } 5749 } 5750 return ext4_mark_iloc_dirty(handle, inode, &iloc); 5751 } 5752 5753 /* 5754 * ext4_dirty_inode() is called from __mark_inode_dirty() 5755 * 5756 * We're really interested in the case where a file is being extended. 5757 * i_size has been changed by generic_commit_write() and we thus need 5758 * to include the updated inode in the current transaction. 5759 * 5760 * Also, dquot_alloc_block() will always dirty the inode when blocks 5761 * are allocated to the file. 5762 * 5763 * If the inode is marked synchronous, we don't honour that here - doing 5764 * so would cause a commit on atime updates, which we don't bother doing. 5765 * We handle synchronous inodes at the highest possible level. 5766 * 5767 * If only the I_DIRTY_TIME flag is set, we can skip everything. If 5768 * I_DIRTY_TIME and I_DIRTY_SYNC is set, the only inode fields we need 5769 * to copy into the on-disk inode structure are the timestamp files. 5770 */ 5771 void ext4_dirty_inode(struct inode *inode, int flags) 5772 { 5773 handle_t *handle; 5774 5775 if (flags == I_DIRTY_TIME) 5776 return; 5777 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 5778 if (IS_ERR(handle)) 5779 goto out; 5780 5781 ext4_mark_inode_dirty(handle, inode); 5782 5783 ext4_journal_stop(handle); 5784 out: 5785 return; 5786 } 5787 5788 #if 0 5789 /* 5790 * Bind an inode's backing buffer_head into this transaction, to prevent 5791 * it from being flushed to disk early. Unlike 5792 * ext4_reserve_inode_write, this leaves behind no bh reference and 5793 * returns no iloc structure, so the caller needs to repeat the iloc 5794 * lookup to mark the inode dirty later. 5795 */ 5796 static int ext4_pin_inode(handle_t *handle, struct inode *inode) 5797 { 5798 struct ext4_iloc iloc; 5799 5800 int err = 0; 5801 if (handle) { 5802 err = ext4_get_inode_loc(inode, &iloc); 5803 if (!err) { 5804 BUFFER_TRACE(iloc.bh, "get_write_access"); 5805 err = jbd2_journal_get_write_access(handle, iloc.bh); 5806 if (!err) 5807 err = ext4_handle_dirty_metadata(handle, 5808 NULL, 5809 iloc.bh); 5810 brelse(iloc.bh); 5811 } 5812 } 5813 ext4_std_error(inode->i_sb, err); 5814 return err; 5815 } 5816 #endif 5817 5818 int ext4_change_inode_journal_flag(struct inode *inode, int val) 5819 { 5820 journal_t *journal; 5821 handle_t *handle; 5822 int err; 5823 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5824 5825 /* 5826 * We have to be very careful here: changing a data block's 5827 * journaling status dynamically is dangerous. If we write a 5828 * data block to the journal, change the status and then delete 5829 * that block, we risk forgetting to revoke the old log record 5830 * from the journal and so a subsequent replay can corrupt data. 5831 * So, first we make sure that the journal is empty and that 5832 * nobody is changing anything. 5833 */ 5834 5835 journal = EXT4_JOURNAL(inode); 5836 if (!journal) 5837 return 0; 5838 if (is_journal_aborted(journal)) 5839 return -EROFS; 5840 5841 /* Wait for all existing dio workers */ 5842 ext4_inode_block_unlocked_dio(inode); 5843 inode_dio_wait(inode); 5844 5845 /* 5846 * Before flushing the journal and switching inode's aops, we have 5847 * to flush all dirty data the inode has. There can be outstanding 5848 * delayed allocations, there can be unwritten extents created by 5849 * fallocate or buffered writes in dioread_nolock mode covered by 5850 * dirty data which can be converted only after flushing the dirty 5851 * data (and journalled aops don't know how to handle these cases). 5852 */ 5853 if (val) { 5854 down_write(&EXT4_I(inode)->i_mmap_sem); 5855 err = filemap_write_and_wait(inode->i_mapping); 5856 if (err < 0) { 5857 up_write(&EXT4_I(inode)->i_mmap_sem); 5858 ext4_inode_resume_unlocked_dio(inode); 5859 return err; 5860 } 5861 } 5862 5863 percpu_down_write(&sbi->s_journal_flag_rwsem); 5864 jbd2_journal_lock_updates(journal); 5865 5866 /* 5867 * OK, there are no updates running now, and all cached data is 5868 * synced to disk. We are now in a completely consistent state 5869 * which doesn't have anything in the journal, and we know that 5870 * no filesystem updates are running, so it is safe to modify 5871 * the inode's in-core data-journaling state flag now. 5872 */ 5873 5874 if (val) 5875 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 5876 else { 5877 err = jbd2_journal_flush(journal); 5878 if (err < 0) { 5879 jbd2_journal_unlock_updates(journal); 5880 percpu_up_write(&sbi->s_journal_flag_rwsem); 5881 ext4_inode_resume_unlocked_dio(inode); 5882 return err; 5883 } 5884 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 5885 } 5886 ext4_set_aops(inode); 5887 /* 5888 * Update inode->i_flags after EXT4_INODE_JOURNAL_DATA was updated. 5889 * E.g. S_DAX may get cleared / set. 5890 */ 5891 ext4_set_inode_flags(inode); 5892 5893 jbd2_journal_unlock_updates(journal); 5894 percpu_up_write(&sbi->s_journal_flag_rwsem); 5895 5896 if (val) 5897 up_write(&EXT4_I(inode)->i_mmap_sem); 5898 ext4_inode_resume_unlocked_dio(inode); 5899 5900 /* Finally we can mark the inode as dirty. */ 5901 5902 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); 5903 if (IS_ERR(handle)) 5904 return PTR_ERR(handle); 5905 5906 err = ext4_mark_inode_dirty(handle, inode); 5907 ext4_handle_sync(handle); 5908 ext4_journal_stop(handle); 5909 ext4_std_error(inode->i_sb, err); 5910 5911 return err; 5912 } 5913 5914 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 5915 { 5916 return !buffer_mapped(bh); 5917 } 5918 5919 int ext4_page_mkwrite(struct vm_fault *vmf) 5920 { 5921 struct vm_area_struct *vma = vmf->vma; 5922 struct page *page = vmf->page; 5923 loff_t size; 5924 unsigned long len; 5925 int ret; 5926 struct file *file = vma->vm_file; 5927 struct inode *inode = file_inode(file); 5928 struct address_space *mapping = inode->i_mapping; 5929 handle_t *handle; 5930 get_block_t *get_block; 5931 int retries = 0; 5932 5933 sb_start_pagefault(inode->i_sb); 5934 file_update_time(vma->vm_file); 5935 5936 down_read(&EXT4_I(inode)->i_mmap_sem); 5937 5938 ret = ext4_convert_inline_data(inode); 5939 if (ret) 5940 goto out_ret; 5941 5942 /* Delalloc case is easy... */ 5943 if (test_opt(inode->i_sb, DELALLOC) && 5944 !ext4_should_journal_data(inode) && 5945 !ext4_nonda_switch(inode->i_sb)) { 5946 do { 5947 ret = block_page_mkwrite(vma, vmf, 5948 ext4_da_get_block_prep); 5949 } while (ret == -ENOSPC && 5950 ext4_should_retry_alloc(inode->i_sb, &retries)); 5951 goto out_ret; 5952 } 5953 5954 lock_page(page); 5955 size = i_size_read(inode); 5956 /* Page got truncated from under us? */ 5957 if (page->mapping != mapping || page_offset(page) > size) { 5958 unlock_page(page); 5959 ret = VM_FAULT_NOPAGE; 5960 goto out; 5961 } 5962 5963 if (page->index == size >> PAGE_SHIFT) 5964 len = size & ~PAGE_MASK; 5965 else 5966 len = PAGE_SIZE; 5967 /* 5968 * Return if we have all the buffers mapped. This avoids the need to do 5969 * journal_start/journal_stop which can block and take a long time 5970 */ 5971 if (page_has_buffers(page)) { 5972 if (!ext4_walk_page_buffers(NULL, page_buffers(page), 5973 0, len, NULL, 5974 ext4_bh_unmapped)) { 5975 /* Wait so that we don't change page under IO */ 5976 wait_for_stable_page(page); 5977 ret = VM_FAULT_LOCKED; 5978 goto out; 5979 } 5980 } 5981 unlock_page(page); 5982 /* OK, we need to fill the hole... */ 5983 if (ext4_should_dioread_nolock(inode)) 5984 get_block = ext4_get_block_unwritten; 5985 else 5986 get_block = ext4_get_block; 5987 retry_alloc: 5988 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 5989 ext4_writepage_trans_blocks(inode)); 5990 if (IS_ERR(handle)) { 5991 ret = VM_FAULT_SIGBUS; 5992 goto out; 5993 } 5994 ret = block_page_mkwrite(vma, vmf, get_block); 5995 if (!ret && ext4_should_journal_data(inode)) { 5996 if (ext4_walk_page_buffers(handle, page_buffers(page), 0, 5997 PAGE_SIZE, NULL, do_journal_get_write_access)) { 5998 unlock_page(page); 5999 ret = VM_FAULT_SIGBUS; 6000 ext4_journal_stop(handle); 6001 goto out; 6002 } 6003 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 6004 } 6005 ext4_journal_stop(handle); 6006 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 6007 goto retry_alloc; 6008 out_ret: 6009 ret = block_page_mkwrite_return(ret); 6010 out: 6011 up_read(&EXT4_I(inode)->i_mmap_sem); 6012 sb_end_pagefault(inode->i_sb); 6013 return ret; 6014 } 6015 6016 int ext4_filemap_fault(struct vm_fault *vmf) 6017 { 6018 struct inode *inode = file_inode(vmf->vma->vm_file); 6019 int err; 6020 6021 down_read(&EXT4_I(inode)->i_mmap_sem); 6022 err = filemap_fault(vmf); 6023 up_read(&EXT4_I(inode)->i_mmap_sem); 6024 6025 return err; 6026 } 6027 6028 /* 6029 * Find the first extent at or after @lblk in an inode that is not a hole. 6030 * Search for @map_len blocks at most. The extent is returned in @result. 6031 * 6032 * The function returns 1 if we found an extent. The function returns 0 in 6033 * case there is no extent at or after @lblk and in that case also sets 6034 * @result->es_len to 0. In case of error, the error code is returned. 6035 */ 6036 int ext4_get_next_extent(struct inode *inode, ext4_lblk_t lblk, 6037 unsigned int map_len, struct extent_status *result) 6038 { 6039 struct ext4_map_blocks map; 6040 struct extent_status es = {}; 6041 int ret; 6042 6043 map.m_lblk = lblk; 6044 map.m_len = map_len; 6045 6046 /* 6047 * For non-extent based files this loop may iterate several times since 6048 * we do not determine full hole size. 6049 */ 6050 while (map.m_len > 0) { 6051 ret = ext4_map_blocks(NULL, inode, &map, 0); 6052 if (ret < 0) 6053 return ret; 6054 /* There's extent covering m_lblk? Just return it. */ 6055 if (ret > 0) { 6056 int status; 6057 6058 ext4_es_store_pblock(result, map.m_pblk); 6059 result->es_lblk = map.m_lblk; 6060 result->es_len = map.m_len; 6061 if (map.m_flags & EXT4_MAP_UNWRITTEN) 6062 status = EXTENT_STATUS_UNWRITTEN; 6063 else 6064 status = EXTENT_STATUS_WRITTEN; 6065 ext4_es_store_status(result, status); 6066 return 1; 6067 } 6068 ext4_es_find_delayed_extent_range(inode, map.m_lblk, 6069 map.m_lblk + map.m_len - 1, 6070 &es); 6071 /* Is delalloc data before next block in extent tree? */ 6072 if (es.es_len && es.es_lblk < map.m_lblk + map.m_len) { 6073 ext4_lblk_t offset = 0; 6074 6075 if (es.es_lblk < lblk) 6076 offset = lblk - es.es_lblk; 6077 result->es_lblk = es.es_lblk + offset; 6078 ext4_es_store_pblock(result, 6079 ext4_es_pblock(&es) + offset); 6080 result->es_len = es.es_len - offset; 6081 ext4_es_store_status(result, ext4_es_status(&es)); 6082 6083 return 1; 6084 } 6085 /* There's a hole at m_lblk, advance us after it */ 6086 map.m_lblk += map.m_len; 6087 map_len -= map.m_len; 6088 map.m_len = map_len; 6089 cond_resched(); 6090 } 6091 result->es_len = 0; 6092 return 0; 6093 } 6094