1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext4/inode.c 4 * 5 * Copyright (C) 1992, 1993, 1994, 1995 6 * Remy Card (card@masi.ibp.fr) 7 * Laboratoire MASI - Institut Blaise Pascal 8 * Universite Pierre et Marie Curie (Paris VI) 9 * 10 * from 11 * 12 * linux/fs/minix/inode.c 13 * 14 * Copyright (C) 1991, 1992 Linus Torvalds 15 * 16 * 64-bit file support on 64-bit platforms by Jakub Jelinek 17 * (jj@sunsite.ms.mff.cuni.cz) 18 * 19 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 20 */ 21 22 #include <linux/fs.h> 23 #include <linux/time.h> 24 #include <linux/highuid.h> 25 #include <linux/pagemap.h> 26 #include <linux/dax.h> 27 #include <linux/quotaops.h> 28 #include <linux/string.h> 29 #include <linux/buffer_head.h> 30 #include <linux/writeback.h> 31 #include <linux/pagevec.h> 32 #include <linux/mpage.h> 33 #include <linux/namei.h> 34 #include <linux/uio.h> 35 #include <linux/bio.h> 36 #include <linux/workqueue.h> 37 #include <linux/kernel.h> 38 #include <linux/printk.h> 39 #include <linux/slab.h> 40 #include <linux/bitops.h> 41 #include <linux/iomap.h> 42 #include <linux/iversion.h> 43 44 #include "ext4_jbd2.h" 45 #include "xattr.h" 46 #include "acl.h" 47 #include "truncate.h" 48 49 #include <trace/events/ext4.h> 50 51 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, 52 struct ext4_inode_info *ei) 53 { 54 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 55 __u32 csum; 56 __u16 dummy_csum = 0; 57 int offset = offsetof(struct ext4_inode, i_checksum_lo); 58 unsigned int csum_size = sizeof(dummy_csum); 59 60 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset); 61 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size); 62 offset += csum_size; 63 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, 64 EXT4_GOOD_OLD_INODE_SIZE - offset); 65 66 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 67 offset = offsetof(struct ext4_inode, i_checksum_hi); 68 csum = ext4_chksum(sbi, csum, (__u8 *)raw + 69 EXT4_GOOD_OLD_INODE_SIZE, 70 offset - EXT4_GOOD_OLD_INODE_SIZE); 71 if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { 72 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, 73 csum_size); 74 offset += csum_size; 75 } 76 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, 77 EXT4_INODE_SIZE(inode->i_sb) - offset); 78 } 79 80 return csum; 81 } 82 83 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, 84 struct ext4_inode_info *ei) 85 { 86 __u32 provided, calculated; 87 88 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 89 cpu_to_le32(EXT4_OS_LINUX) || 90 !ext4_has_metadata_csum(inode->i_sb)) 91 return 1; 92 93 provided = le16_to_cpu(raw->i_checksum_lo); 94 calculated = ext4_inode_csum(inode, raw, ei); 95 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 96 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 97 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16; 98 else 99 calculated &= 0xFFFF; 100 101 return provided == calculated; 102 } 103 104 static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, 105 struct ext4_inode_info *ei) 106 { 107 __u32 csum; 108 109 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 110 cpu_to_le32(EXT4_OS_LINUX) || 111 !ext4_has_metadata_csum(inode->i_sb)) 112 return; 113 114 csum = ext4_inode_csum(inode, raw, ei); 115 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF); 116 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 117 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 118 raw->i_checksum_hi = cpu_to_le16(csum >> 16); 119 } 120 121 static inline int ext4_begin_ordered_truncate(struct inode *inode, 122 loff_t new_size) 123 { 124 trace_ext4_begin_ordered_truncate(inode, new_size); 125 /* 126 * If jinode is zero, then we never opened the file for 127 * writing, so there's no need to call 128 * jbd2_journal_begin_ordered_truncate() since there's no 129 * outstanding writes we need to flush. 130 */ 131 if (!EXT4_I(inode)->jinode) 132 return 0; 133 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), 134 EXT4_I(inode)->jinode, 135 new_size); 136 } 137 138 static void ext4_invalidatepage(struct page *page, unsigned int offset, 139 unsigned int length); 140 static int __ext4_journalled_writepage(struct page *page, unsigned int len); 141 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); 142 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, 143 int pextents); 144 145 /* 146 * Test whether an inode is a fast symlink. 147 * A fast symlink has its symlink data stored in ext4_inode_info->i_data. 148 */ 149 int ext4_inode_is_fast_symlink(struct inode *inode) 150 { 151 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) { 152 int ea_blocks = EXT4_I(inode)->i_file_acl ? 153 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0; 154 155 if (ext4_has_inline_data(inode)) 156 return 0; 157 158 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 159 } 160 return S_ISLNK(inode->i_mode) && inode->i_size && 161 (inode->i_size < EXT4_N_BLOCKS * 4); 162 } 163 164 /* 165 * Called at the last iput() if i_nlink is zero. 166 */ 167 void ext4_evict_inode(struct inode *inode) 168 { 169 handle_t *handle; 170 int err; 171 /* 172 * Credits for final inode cleanup and freeing: 173 * sb + inode (ext4_orphan_del()), block bitmap, group descriptor 174 * (xattr block freeing), bitmap, group descriptor (inode freeing) 175 */ 176 int extra_credits = 6; 177 struct ext4_xattr_inode_array *ea_inode_array = NULL; 178 179 trace_ext4_evict_inode(inode); 180 181 if (inode->i_nlink) { 182 /* 183 * When journalling data dirty buffers are tracked only in the 184 * journal. So although mm thinks everything is clean and 185 * ready for reaping the inode might still have some pages to 186 * write in the running transaction or waiting to be 187 * checkpointed. Thus calling jbd2_journal_invalidatepage() 188 * (via truncate_inode_pages()) to discard these buffers can 189 * cause data loss. Also even if we did not discard these 190 * buffers, we would have no way to find them after the inode 191 * is reaped and thus user could see stale data if he tries to 192 * read them before the transaction is checkpointed. So be 193 * careful and force everything to disk here... We use 194 * ei->i_datasync_tid to store the newest transaction 195 * containing inode's data. 196 * 197 * Note that directories do not have this problem because they 198 * don't use page cache. 199 */ 200 if (inode->i_ino != EXT4_JOURNAL_INO && 201 ext4_should_journal_data(inode) && 202 (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) && 203 inode->i_data.nrpages) { 204 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 205 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; 206 207 jbd2_complete_transaction(journal, commit_tid); 208 filemap_write_and_wait(&inode->i_data); 209 } 210 truncate_inode_pages_final(&inode->i_data); 211 212 goto no_delete; 213 } 214 215 if (is_bad_inode(inode)) 216 goto no_delete; 217 dquot_initialize(inode); 218 219 if (ext4_should_order_data(inode)) 220 ext4_begin_ordered_truncate(inode, 0); 221 truncate_inode_pages_final(&inode->i_data); 222 223 /* 224 * Protect us against freezing - iput() caller didn't have to have any 225 * protection against it 226 */ 227 sb_start_intwrite(inode->i_sb); 228 229 if (!IS_NOQUOTA(inode)) 230 extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb); 231 232 /* 233 * Block bitmap, group descriptor, and inode are accounted in both 234 * ext4_blocks_for_truncate() and extra_credits. So subtract 3. 235 */ 236 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, 237 ext4_blocks_for_truncate(inode) + extra_credits - 3); 238 if (IS_ERR(handle)) { 239 ext4_std_error(inode->i_sb, PTR_ERR(handle)); 240 /* 241 * If we're going to skip the normal cleanup, we still need to 242 * make sure that the in-core orphan linked list is properly 243 * cleaned up. 244 */ 245 ext4_orphan_del(NULL, inode); 246 sb_end_intwrite(inode->i_sb); 247 goto no_delete; 248 } 249 250 if (IS_SYNC(inode)) 251 ext4_handle_sync(handle); 252 253 /* 254 * Set inode->i_size to 0 before calling ext4_truncate(). We need 255 * special handling of symlinks here because i_size is used to 256 * determine whether ext4_inode_info->i_data contains symlink data or 257 * block mappings. Setting i_size to 0 will remove its fast symlink 258 * status. Erase i_data so that it becomes a valid empty block map. 259 */ 260 if (ext4_inode_is_fast_symlink(inode)) 261 memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data)); 262 inode->i_size = 0; 263 err = ext4_mark_inode_dirty(handle, inode); 264 if (err) { 265 ext4_warning(inode->i_sb, 266 "couldn't mark inode dirty (err %d)", err); 267 goto stop_handle; 268 } 269 if (inode->i_blocks) { 270 err = ext4_truncate(inode); 271 if (err) { 272 ext4_error_err(inode->i_sb, -err, 273 "couldn't truncate inode %lu (err %d)", 274 inode->i_ino, err); 275 goto stop_handle; 276 } 277 } 278 279 /* Remove xattr references. */ 280 err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array, 281 extra_credits); 282 if (err) { 283 ext4_warning(inode->i_sb, "xattr delete (err %d)", err); 284 stop_handle: 285 ext4_journal_stop(handle); 286 ext4_orphan_del(NULL, inode); 287 sb_end_intwrite(inode->i_sb); 288 ext4_xattr_inode_array_free(ea_inode_array); 289 goto no_delete; 290 } 291 292 /* 293 * Kill off the orphan record which ext4_truncate created. 294 * AKPM: I think this can be inside the above `if'. 295 * Note that ext4_orphan_del() has to be able to cope with the 296 * deletion of a non-existent orphan - this is because we don't 297 * know if ext4_truncate() actually created an orphan record. 298 * (Well, we could do this if we need to, but heck - it works) 299 */ 300 ext4_orphan_del(handle, inode); 301 EXT4_I(inode)->i_dtime = (__u32)ktime_get_real_seconds(); 302 303 /* 304 * One subtle ordering requirement: if anything has gone wrong 305 * (transaction abort, IO errors, whatever), then we can still 306 * do these next steps (the fs will already have been marked as 307 * having errors), but we can't free the inode if the mark_dirty 308 * fails. 309 */ 310 if (ext4_mark_inode_dirty(handle, inode)) 311 /* If that failed, just do the required in-core inode clear. */ 312 ext4_clear_inode(inode); 313 else 314 ext4_free_inode(handle, inode); 315 ext4_journal_stop(handle); 316 sb_end_intwrite(inode->i_sb); 317 ext4_xattr_inode_array_free(ea_inode_array); 318 return; 319 no_delete: 320 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ 321 } 322 323 #ifdef CONFIG_QUOTA 324 qsize_t *ext4_get_reserved_space(struct inode *inode) 325 { 326 return &EXT4_I(inode)->i_reserved_quota; 327 } 328 #endif 329 330 /* 331 * Called with i_data_sem down, which is important since we can call 332 * ext4_discard_preallocations() from here. 333 */ 334 void ext4_da_update_reserve_space(struct inode *inode, 335 int used, int quota_claim) 336 { 337 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 338 struct ext4_inode_info *ei = EXT4_I(inode); 339 340 spin_lock(&ei->i_block_reservation_lock); 341 trace_ext4_da_update_reserve_space(inode, used, quota_claim); 342 if (unlikely(used > ei->i_reserved_data_blocks)) { 343 ext4_warning(inode->i_sb, "%s: ino %lu, used %d " 344 "with only %d reserved data blocks", 345 __func__, inode->i_ino, used, 346 ei->i_reserved_data_blocks); 347 WARN_ON(1); 348 used = ei->i_reserved_data_blocks; 349 } 350 351 /* Update per-inode reservations */ 352 ei->i_reserved_data_blocks -= used; 353 percpu_counter_sub(&sbi->s_dirtyclusters_counter, used); 354 355 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 356 357 /* Update quota subsystem for data blocks */ 358 if (quota_claim) 359 dquot_claim_block(inode, EXT4_C2B(sbi, used)); 360 else { 361 /* 362 * We did fallocate with an offset that is already delayed 363 * allocated. So on delayed allocated writeback we should 364 * not re-claim the quota for fallocated blocks. 365 */ 366 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); 367 } 368 369 /* 370 * If we have done all the pending block allocations and if 371 * there aren't any writers on the inode, we can discard the 372 * inode's preallocations. 373 */ 374 if ((ei->i_reserved_data_blocks == 0) && 375 !inode_is_open_for_write(inode)) 376 ext4_discard_preallocations(inode); 377 } 378 379 static int __check_block_validity(struct inode *inode, const char *func, 380 unsigned int line, 381 struct ext4_map_blocks *map) 382 { 383 if (ext4_has_feature_journal(inode->i_sb) && 384 (inode->i_ino == 385 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) 386 return 0; 387 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, 388 map->m_len)) { 389 ext4_error_inode(inode, func, line, map->m_pblk, 390 "lblock %lu mapped to illegal pblock %llu " 391 "(length %d)", (unsigned long) map->m_lblk, 392 map->m_pblk, map->m_len); 393 return -EFSCORRUPTED; 394 } 395 return 0; 396 } 397 398 int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk, 399 ext4_lblk_t len) 400 { 401 int ret; 402 403 if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) 404 return fscrypt_zeroout_range(inode, lblk, pblk, len); 405 406 ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS); 407 if (ret > 0) 408 ret = 0; 409 410 return ret; 411 } 412 413 #define check_block_validity(inode, map) \ 414 __check_block_validity((inode), __func__, __LINE__, (map)) 415 416 #ifdef ES_AGGRESSIVE_TEST 417 static void ext4_map_blocks_es_recheck(handle_t *handle, 418 struct inode *inode, 419 struct ext4_map_blocks *es_map, 420 struct ext4_map_blocks *map, 421 int flags) 422 { 423 int retval; 424 425 map->m_flags = 0; 426 /* 427 * There is a race window that the result is not the same. 428 * e.g. xfstests #223 when dioread_nolock enables. The reason 429 * is that we lookup a block mapping in extent status tree with 430 * out taking i_data_sem. So at the time the unwritten extent 431 * could be converted. 432 */ 433 down_read(&EXT4_I(inode)->i_data_sem); 434 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 435 retval = ext4_ext_map_blocks(handle, inode, map, flags & 436 EXT4_GET_BLOCKS_KEEP_SIZE); 437 } else { 438 retval = ext4_ind_map_blocks(handle, inode, map, flags & 439 EXT4_GET_BLOCKS_KEEP_SIZE); 440 } 441 up_read((&EXT4_I(inode)->i_data_sem)); 442 443 /* 444 * We don't check m_len because extent will be collpased in status 445 * tree. So the m_len might not equal. 446 */ 447 if (es_map->m_lblk != map->m_lblk || 448 es_map->m_flags != map->m_flags || 449 es_map->m_pblk != map->m_pblk) { 450 printk("ES cache assertion failed for inode: %lu " 451 "es_cached ex [%d/%d/%llu/%x] != " 452 "found ex [%d/%d/%llu/%x] retval %d flags %x\n", 453 inode->i_ino, es_map->m_lblk, es_map->m_len, 454 es_map->m_pblk, es_map->m_flags, map->m_lblk, 455 map->m_len, map->m_pblk, map->m_flags, 456 retval, flags); 457 } 458 } 459 #endif /* ES_AGGRESSIVE_TEST */ 460 461 /* 462 * The ext4_map_blocks() function tries to look up the requested blocks, 463 * and returns if the blocks are already mapped. 464 * 465 * Otherwise it takes the write lock of the i_data_sem and allocate blocks 466 * and store the allocated blocks in the result buffer head and mark it 467 * mapped. 468 * 469 * If file type is extents based, it will call ext4_ext_map_blocks(), 470 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping 471 * based files 472 * 473 * On success, it returns the number of blocks being mapped or allocated. if 474 * create==0 and the blocks are pre-allocated and unwritten, the resulting @map 475 * is marked as unwritten. If the create == 1, it will mark @map as mapped. 476 * 477 * It returns 0 if plain look up failed (blocks have not been allocated), in 478 * that case, @map is returned as unmapped but we still do fill map->m_len to 479 * indicate the length of a hole starting at map->m_lblk. 480 * 481 * It returns the error in case of allocation failure. 482 */ 483 int ext4_map_blocks(handle_t *handle, struct inode *inode, 484 struct ext4_map_blocks *map, int flags) 485 { 486 struct extent_status es; 487 int retval; 488 int ret = 0; 489 #ifdef ES_AGGRESSIVE_TEST 490 struct ext4_map_blocks orig_map; 491 492 memcpy(&orig_map, map, sizeof(*map)); 493 #endif 494 495 map->m_flags = 0; 496 ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," 497 "logical block %lu\n", inode->i_ino, flags, map->m_len, 498 (unsigned long) map->m_lblk); 499 500 /* 501 * ext4_map_blocks returns an int, and m_len is an unsigned int 502 */ 503 if (unlikely(map->m_len > INT_MAX)) 504 map->m_len = INT_MAX; 505 506 /* We can handle the block number less than EXT_MAX_BLOCKS */ 507 if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS)) 508 return -EFSCORRUPTED; 509 510 /* Lookup extent status tree firstly */ 511 if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) { 512 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) { 513 map->m_pblk = ext4_es_pblock(&es) + 514 map->m_lblk - es.es_lblk; 515 map->m_flags |= ext4_es_is_written(&es) ? 516 EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN; 517 retval = es.es_len - (map->m_lblk - es.es_lblk); 518 if (retval > map->m_len) 519 retval = map->m_len; 520 map->m_len = retval; 521 } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) { 522 map->m_pblk = 0; 523 retval = es.es_len - (map->m_lblk - es.es_lblk); 524 if (retval > map->m_len) 525 retval = map->m_len; 526 map->m_len = retval; 527 retval = 0; 528 } else { 529 BUG(); 530 } 531 #ifdef ES_AGGRESSIVE_TEST 532 ext4_map_blocks_es_recheck(handle, inode, map, 533 &orig_map, flags); 534 #endif 535 goto found; 536 } 537 538 /* 539 * Try to see if we can get the block without requesting a new 540 * file system block. 541 */ 542 down_read(&EXT4_I(inode)->i_data_sem); 543 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 544 retval = ext4_ext_map_blocks(handle, inode, map, flags & 545 EXT4_GET_BLOCKS_KEEP_SIZE); 546 } else { 547 retval = ext4_ind_map_blocks(handle, inode, map, flags & 548 EXT4_GET_BLOCKS_KEEP_SIZE); 549 } 550 if (retval > 0) { 551 unsigned int status; 552 553 if (unlikely(retval != map->m_len)) { 554 ext4_warning(inode->i_sb, 555 "ES len assertion failed for inode " 556 "%lu: retval %d != map->m_len %d", 557 inode->i_ino, retval, map->m_len); 558 WARN_ON(1); 559 } 560 561 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 562 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 563 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 564 !(status & EXTENT_STATUS_WRITTEN) && 565 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk, 566 map->m_lblk + map->m_len - 1)) 567 status |= EXTENT_STATUS_DELAYED; 568 ret = ext4_es_insert_extent(inode, map->m_lblk, 569 map->m_len, map->m_pblk, status); 570 if (ret < 0) 571 retval = ret; 572 } 573 up_read((&EXT4_I(inode)->i_data_sem)); 574 575 found: 576 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 577 ret = check_block_validity(inode, map); 578 if (ret != 0) 579 return ret; 580 } 581 582 /* If it is only a block(s) look up */ 583 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 584 return retval; 585 586 /* 587 * Returns if the blocks have already allocated 588 * 589 * Note that if blocks have been preallocated 590 * ext4_ext_get_block() returns the create = 0 591 * with buffer head unmapped. 592 */ 593 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) 594 /* 595 * If we need to convert extent to unwritten 596 * we continue and do the actual work in 597 * ext4_ext_map_blocks() 598 */ 599 if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) 600 return retval; 601 602 /* 603 * Here we clear m_flags because after allocating an new extent, 604 * it will be set again. 605 */ 606 map->m_flags &= ~EXT4_MAP_FLAGS; 607 608 /* 609 * New blocks allocate and/or writing to unwritten extent 610 * will possibly result in updating i_data, so we take 611 * the write lock of i_data_sem, and call get_block() 612 * with create == 1 flag. 613 */ 614 down_write(&EXT4_I(inode)->i_data_sem); 615 616 /* 617 * We need to check for EXT4 here because migrate 618 * could have changed the inode type in between 619 */ 620 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 621 retval = ext4_ext_map_blocks(handle, inode, map, flags); 622 } else { 623 retval = ext4_ind_map_blocks(handle, inode, map, flags); 624 625 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { 626 /* 627 * We allocated new blocks which will result in 628 * i_data's format changing. Force the migrate 629 * to fail by clearing migrate flags 630 */ 631 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); 632 } 633 634 /* 635 * Update reserved blocks/metadata blocks after successful 636 * block allocation which had been deferred till now. We don't 637 * support fallocate for non extent files. So we can update 638 * reserve space here. 639 */ 640 if ((retval > 0) && 641 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) 642 ext4_da_update_reserve_space(inode, retval, 1); 643 } 644 645 if (retval > 0) { 646 unsigned int status; 647 648 if (unlikely(retval != map->m_len)) { 649 ext4_warning(inode->i_sb, 650 "ES len assertion failed for inode " 651 "%lu: retval %d != map->m_len %d", 652 inode->i_ino, retval, map->m_len); 653 WARN_ON(1); 654 } 655 656 /* 657 * We have to zeroout blocks before inserting them into extent 658 * status tree. Otherwise someone could look them up there and 659 * use them before they are really zeroed. We also have to 660 * unmap metadata before zeroing as otherwise writeback can 661 * overwrite zeros with stale data from block device. 662 */ 663 if (flags & EXT4_GET_BLOCKS_ZERO && 664 map->m_flags & EXT4_MAP_MAPPED && 665 map->m_flags & EXT4_MAP_NEW) { 666 ret = ext4_issue_zeroout(inode, map->m_lblk, 667 map->m_pblk, map->m_len); 668 if (ret) { 669 retval = ret; 670 goto out_sem; 671 } 672 } 673 674 /* 675 * If the extent has been zeroed out, we don't need to update 676 * extent status tree. 677 */ 678 if ((flags & EXT4_GET_BLOCKS_PRE_IO) && 679 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) { 680 if (ext4_es_is_written(&es)) 681 goto out_sem; 682 } 683 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 684 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 685 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 686 !(status & EXTENT_STATUS_WRITTEN) && 687 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk, 688 map->m_lblk + map->m_len - 1)) 689 status |= EXTENT_STATUS_DELAYED; 690 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 691 map->m_pblk, status); 692 if (ret < 0) { 693 retval = ret; 694 goto out_sem; 695 } 696 } 697 698 out_sem: 699 up_write((&EXT4_I(inode)->i_data_sem)); 700 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 701 ret = check_block_validity(inode, map); 702 if (ret != 0) 703 return ret; 704 705 /* 706 * Inodes with freshly allocated blocks where contents will be 707 * visible after transaction commit must be on transaction's 708 * ordered data list. 709 */ 710 if (map->m_flags & EXT4_MAP_NEW && 711 !(map->m_flags & EXT4_MAP_UNWRITTEN) && 712 !(flags & EXT4_GET_BLOCKS_ZERO) && 713 !ext4_is_quota_file(inode) && 714 ext4_should_order_data(inode)) { 715 loff_t start_byte = 716 (loff_t)map->m_lblk << inode->i_blkbits; 717 loff_t length = (loff_t)map->m_len << inode->i_blkbits; 718 719 if (flags & EXT4_GET_BLOCKS_IO_SUBMIT) 720 ret = ext4_jbd2_inode_add_wait(handle, inode, 721 start_byte, length); 722 else 723 ret = ext4_jbd2_inode_add_write(handle, inode, 724 start_byte, length); 725 if (ret) 726 return ret; 727 } 728 } 729 return retval; 730 } 731 732 /* 733 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages 734 * we have to be careful as someone else may be manipulating b_state as well. 735 */ 736 static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags) 737 { 738 unsigned long old_state; 739 unsigned long new_state; 740 741 flags &= EXT4_MAP_FLAGS; 742 743 /* Dummy buffer_head? Set non-atomically. */ 744 if (!bh->b_page) { 745 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags; 746 return; 747 } 748 /* 749 * Someone else may be modifying b_state. Be careful! This is ugly but 750 * once we get rid of using bh as a container for mapping information 751 * to pass to / from get_block functions, this can go away. 752 */ 753 do { 754 old_state = READ_ONCE(bh->b_state); 755 new_state = (old_state & ~EXT4_MAP_FLAGS) | flags; 756 } while (unlikely( 757 cmpxchg(&bh->b_state, old_state, new_state) != old_state)); 758 } 759 760 static int _ext4_get_block(struct inode *inode, sector_t iblock, 761 struct buffer_head *bh, int flags) 762 { 763 struct ext4_map_blocks map; 764 int ret = 0; 765 766 if (ext4_has_inline_data(inode)) 767 return -ERANGE; 768 769 map.m_lblk = iblock; 770 map.m_len = bh->b_size >> inode->i_blkbits; 771 772 ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map, 773 flags); 774 if (ret > 0) { 775 map_bh(bh, inode->i_sb, map.m_pblk); 776 ext4_update_bh_state(bh, map.m_flags); 777 bh->b_size = inode->i_sb->s_blocksize * map.m_len; 778 ret = 0; 779 } else if (ret == 0) { 780 /* hole case, need to fill in bh->b_size */ 781 bh->b_size = inode->i_sb->s_blocksize * map.m_len; 782 } 783 return ret; 784 } 785 786 int ext4_get_block(struct inode *inode, sector_t iblock, 787 struct buffer_head *bh, int create) 788 { 789 return _ext4_get_block(inode, iblock, bh, 790 create ? EXT4_GET_BLOCKS_CREATE : 0); 791 } 792 793 /* 794 * Get block function used when preparing for buffered write if we require 795 * creating an unwritten extent if blocks haven't been allocated. The extent 796 * will be converted to written after the IO is complete. 797 */ 798 int ext4_get_block_unwritten(struct inode *inode, sector_t iblock, 799 struct buffer_head *bh_result, int create) 800 { 801 ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n", 802 inode->i_ino, create); 803 return _ext4_get_block(inode, iblock, bh_result, 804 EXT4_GET_BLOCKS_IO_CREATE_EXT); 805 } 806 807 /* Maximum number of blocks we map for direct IO at once. */ 808 #define DIO_MAX_BLOCKS 4096 809 810 /* 811 * `handle' can be NULL if create is zero 812 */ 813 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 814 ext4_lblk_t block, int map_flags) 815 { 816 struct ext4_map_blocks map; 817 struct buffer_head *bh; 818 int create = map_flags & EXT4_GET_BLOCKS_CREATE; 819 int err; 820 821 J_ASSERT(handle != NULL || create == 0); 822 823 map.m_lblk = block; 824 map.m_len = 1; 825 err = ext4_map_blocks(handle, inode, &map, map_flags); 826 827 if (err == 0) 828 return create ? ERR_PTR(-ENOSPC) : NULL; 829 if (err < 0) 830 return ERR_PTR(err); 831 832 bh = sb_getblk(inode->i_sb, map.m_pblk); 833 if (unlikely(!bh)) 834 return ERR_PTR(-ENOMEM); 835 if (map.m_flags & EXT4_MAP_NEW) { 836 J_ASSERT(create != 0); 837 J_ASSERT(handle != NULL); 838 839 /* 840 * Now that we do not always journal data, we should 841 * keep in mind whether this should always journal the 842 * new buffer as metadata. For now, regular file 843 * writes use ext4_get_block instead, so it's not a 844 * problem. 845 */ 846 lock_buffer(bh); 847 BUFFER_TRACE(bh, "call get_create_access"); 848 err = ext4_journal_get_create_access(handle, bh); 849 if (unlikely(err)) { 850 unlock_buffer(bh); 851 goto errout; 852 } 853 if (!buffer_uptodate(bh)) { 854 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 855 set_buffer_uptodate(bh); 856 } 857 unlock_buffer(bh); 858 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 859 err = ext4_handle_dirty_metadata(handle, inode, bh); 860 if (unlikely(err)) 861 goto errout; 862 } else 863 BUFFER_TRACE(bh, "not a new buffer"); 864 return bh; 865 errout: 866 brelse(bh); 867 return ERR_PTR(err); 868 } 869 870 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 871 ext4_lblk_t block, int map_flags) 872 { 873 struct buffer_head *bh; 874 875 bh = ext4_getblk(handle, inode, block, map_flags); 876 if (IS_ERR(bh)) 877 return bh; 878 if (!bh || ext4_buffer_uptodate(bh)) 879 return bh; 880 ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh); 881 wait_on_buffer(bh); 882 if (buffer_uptodate(bh)) 883 return bh; 884 put_bh(bh); 885 return ERR_PTR(-EIO); 886 } 887 888 /* Read a contiguous batch of blocks. */ 889 int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count, 890 bool wait, struct buffer_head **bhs) 891 { 892 int i, err; 893 894 for (i = 0; i < bh_count; i++) { 895 bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */); 896 if (IS_ERR(bhs[i])) { 897 err = PTR_ERR(bhs[i]); 898 bh_count = i; 899 goto out_brelse; 900 } 901 } 902 903 for (i = 0; i < bh_count; i++) 904 /* Note that NULL bhs[i] is valid because of holes. */ 905 if (bhs[i] && !ext4_buffer_uptodate(bhs[i])) 906 ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, 907 &bhs[i]); 908 909 if (!wait) 910 return 0; 911 912 for (i = 0; i < bh_count; i++) 913 if (bhs[i]) 914 wait_on_buffer(bhs[i]); 915 916 for (i = 0; i < bh_count; i++) { 917 if (bhs[i] && !buffer_uptodate(bhs[i])) { 918 err = -EIO; 919 goto out_brelse; 920 } 921 } 922 return 0; 923 924 out_brelse: 925 for (i = 0; i < bh_count; i++) { 926 brelse(bhs[i]); 927 bhs[i] = NULL; 928 } 929 return err; 930 } 931 932 int ext4_walk_page_buffers(handle_t *handle, 933 struct buffer_head *head, 934 unsigned from, 935 unsigned to, 936 int *partial, 937 int (*fn)(handle_t *handle, 938 struct buffer_head *bh)) 939 { 940 struct buffer_head *bh; 941 unsigned block_start, block_end; 942 unsigned blocksize = head->b_size; 943 int err, ret = 0; 944 struct buffer_head *next; 945 946 for (bh = head, block_start = 0; 947 ret == 0 && (bh != head || !block_start); 948 block_start = block_end, bh = next) { 949 next = bh->b_this_page; 950 block_end = block_start + blocksize; 951 if (block_end <= from || block_start >= to) { 952 if (partial && !buffer_uptodate(bh)) 953 *partial = 1; 954 continue; 955 } 956 err = (*fn)(handle, bh); 957 if (!ret) 958 ret = err; 959 } 960 return ret; 961 } 962 963 /* 964 * To preserve ordering, it is essential that the hole instantiation and 965 * the data write be encapsulated in a single transaction. We cannot 966 * close off a transaction and start a new one between the ext4_get_block() 967 * and the commit_write(). So doing the jbd2_journal_start at the start of 968 * prepare_write() is the right place. 969 * 970 * Also, this function can nest inside ext4_writepage(). In that case, we 971 * *know* that ext4_writepage() has generated enough buffer credits to do the 972 * whole page. So we won't block on the journal in that case, which is good, 973 * because the caller may be PF_MEMALLOC. 974 * 975 * By accident, ext4 can be reentered when a transaction is open via 976 * quota file writes. If we were to commit the transaction while thus 977 * reentered, there can be a deadlock - we would be holding a quota 978 * lock, and the commit would never complete if another thread had a 979 * transaction open and was blocking on the quota lock - a ranking 980 * violation. 981 * 982 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 983 * will _not_ run commit under these circumstances because handle->h_ref 984 * is elevated. We'll still have enough credits for the tiny quotafile 985 * write. 986 */ 987 int do_journal_get_write_access(handle_t *handle, 988 struct buffer_head *bh) 989 { 990 int dirty = buffer_dirty(bh); 991 int ret; 992 993 if (!buffer_mapped(bh) || buffer_freed(bh)) 994 return 0; 995 /* 996 * __block_write_begin() could have dirtied some buffers. Clean 997 * the dirty bit as jbd2_journal_get_write_access() could complain 998 * otherwise about fs integrity issues. Setting of the dirty bit 999 * by __block_write_begin() isn't a real problem here as we clear 1000 * the bit before releasing a page lock and thus writeback cannot 1001 * ever write the buffer. 1002 */ 1003 if (dirty) 1004 clear_buffer_dirty(bh); 1005 BUFFER_TRACE(bh, "get write access"); 1006 ret = ext4_journal_get_write_access(handle, bh); 1007 if (!ret && dirty) 1008 ret = ext4_handle_dirty_metadata(handle, NULL, bh); 1009 return ret; 1010 } 1011 1012 #ifdef CONFIG_FS_ENCRYPTION 1013 static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, 1014 get_block_t *get_block) 1015 { 1016 unsigned from = pos & (PAGE_SIZE - 1); 1017 unsigned to = from + len; 1018 struct inode *inode = page->mapping->host; 1019 unsigned block_start, block_end; 1020 sector_t block; 1021 int err = 0; 1022 unsigned blocksize = inode->i_sb->s_blocksize; 1023 unsigned bbits; 1024 struct buffer_head *bh, *head, *wait[2]; 1025 int nr_wait = 0; 1026 int i; 1027 1028 BUG_ON(!PageLocked(page)); 1029 BUG_ON(from > PAGE_SIZE); 1030 BUG_ON(to > PAGE_SIZE); 1031 BUG_ON(from > to); 1032 1033 if (!page_has_buffers(page)) 1034 create_empty_buffers(page, blocksize, 0); 1035 head = page_buffers(page); 1036 bbits = ilog2(blocksize); 1037 block = (sector_t)page->index << (PAGE_SHIFT - bbits); 1038 1039 for (bh = head, block_start = 0; bh != head || !block_start; 1040 block++, block_start = block_end, bh = bh->b_this_page) { 1041 block_end = block_start + blocksize; 1042 if (block_end <= from || block_start >= to) { 1043 if (PageUptodate(page)) { 1044 if (!buffer_uptodate(bh)) 1045 set_buffer_uptodate(bh); 1046 } 1047 continue; 1048 } 1049 if (buffer_new(bh)) 1050 clear_buffer_new(bh); 1051 if (!buffer_mapped(bh)) { 1052 WARN_ON(bh->b_size != blocksize); 1053 err = get_block(inode, block, bh, 1); 1054 if (err) 1055 break; 1056 if (buffer_new(bh)) { 1057 if (PageUptodate(page)) { 1058 clear_buffer_new(bh); 1059 set_buffer_uptodate(bh); 1060 mark_buffer_dirty(bh); 1061 continue; 1062 } 1063 if (block_end > to || block_start < from) 1064 zero_user_segments(page, to, block_end, 1065 block_start, from); 1066 continue; 1067 } 1068 } 1069 if (PageUptodate(page)) { 1070 if (!buffer_uptodate(bh)) 1071 set_buffer_uptodate(bh); 1072 continue; 1073 } 1074 if (!buffer_uptodate(bh) && !buffer_delay(bh) && 1075 !buffer_unwritten(bh) && 1076 (block_start < from || block_end > to)) { 1077 ll_rw_block(REQ_OP_READ, 0, 1, &bh); 1078 wait[nr_wait++] = bh; 1079 } 1080 } 1081 /* 1082 * If we issued read requests, let them complete. 1083 */ 1084 for (i = 0; i < nr_wait; i++) { 1085 wait_on_buffer(wait[i]); 1086 if (!buffer_uptodate(wait[i])) 1087 err = -EIO; 1088 } 1089 if (unlikely(err)) { 1090 page_zero_new_buffers(page, from, to); 1091 } else if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) { 1092 for (i = 0; i < nr_wait; i++) { 1093 int err2; 1094 1095 err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize, 1096 bh_offset(wait[i])); 1097 if (err2) { 1098 clear_buffer_uptodate(wait[i]); 1099 err = err2; 1100 } 1101 } 1102 } 1103 1104 return err; 1105 } 1106 #endif 1107 1108 static int ext4_write_begin(struct file *file, struct address_space *mapping, 1109 loff_t pos, unsigned len, unsigned flags, 1110 struct page **pagep, void **fsdata) 1111 { 1112 struct inode *inode = mapping->host; 1113 int ret, needed_blocks; 1114 handle_t *handle; 1115 int retries = 0; 1116 struct page *page; 1117 pgoff_t index; 1118 unsigned from, to; 1119 1120 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 1121 return -EIO; 1122 1123 trace_ext4_write_begin(inode, pos, len, flags); 1124 /* 1125 * Reserve one block more for addition to orphan list in case 1126 * we allocate blocks but write fails for some reason 1127 */ 1128 needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 1129 index = pos >> PAGE_SHIFT; 1130 from = pos & (PAGE_SIZE - 1); 1131 to = from + len; 1132 1133 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 1134 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, 1135 flags, pagep); 1136 if (ret < 0) 1137 return ret; 1138 if (ret == 1) 1139 return 0; 1140 } 1141 1142 /* 1143 * grab_cache_page_write_begin() can take a long time if the 1144 * system is thrashing due to memory pressure, or if the page 1145 * is being written back. So grab it first before we start 1146 * the transaction handle. This also allows us to allocate 1147 * the page (if needed) without using GFP_NOFS. 1148 */ 1149 retry_grab: 1150 page = grab_cache_page_write_begin(mapping, index, flags); 1151 if (!page) 1152 return -ENOMEM; 1153 unlock_page(page); 1154 1155 retry_journal: 1156 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); 1157 if (IS_ERR(handle)) { 1158 put_page(page); 1159 return PTR_ERR(handle); 1160 } 1161 1162 lock_page(page); 1163 if (page->mapping != mapping) { 1164 /* The page got truncated from under us */ 1165 unlock_page(page); 1166 put_page(page); 1167 ext4_journal_stop(handle); 1168 goto retry_grab; 1169 } 1170 /* In case writeback began while the page was unlocked */ 1171 wait_for_stable_page(page); 1172 1173 #ifdef CONFIG_FS_ENCRYPTION 1174 if (ext4_should_dioread_nolock(inode)) 1175 ret = ext4_block_write_begin(page, pos, len, 1176 ext4_get_block_unwritten); 1177 else 1178 ret = ext4_block_write_begin(page, pos, len, 1179 ext4_get_block); 1180 #else 1181 if (ext4_should_dioread_nolock(inode)) 1182 ret = __block_write_begin(page, pos, len, 1183 ext4_get_block_unwritten); 1184 else 1185 ret = __block_write_begin(page, pos, len, ext4_get_block); 1186 #endif 1187 if (!ret && ext4_should_journal_data(inode)) { 1188 ret = ext4_walk_page_buffers(handle, page_buffers(page), 1189 from, to, NULL, 1190 do_journal_get_write_access); 1191 } 1192 1193 if (ret) { 1194 bool extended = (pos + len > inode->i_size) && 1195 !ext4_verity_in_progress(inode); 1196 1197 unlock_page(page); 1198 /* 1199 * __block_write_begin may have instantiated a few blocks 1200 * outside i_size. Trim these off again. Don't need 1201 * i_size_read because we hold i_mutex. 1202 * 1203 * Add inode to orphan list in case we crash before 1204 * truncate finishes 1205 */ 1206 if (extended && ext4_can_truncate(inode)) 1207 ext4_orphan_add(handle, inode); 1208 1209 ext4_journal_stop(handle); 1210 if (extended) { 1211 ext4_truncate_failed_write(inode); 1212 /* 1213 * If truncate failed early the inode might 1214 * still be on the orphan list; we need to 1215 * make sure the inode is removed from the 1216 * orphan list in that case. 1217 */ 1218 if (inode->i_nlink) 1219 ext4_orphan_del(NULL, inode); 1220 } 1221 1222 if (ret == -ENOSPC && 1223 ext4_should_retry_alloc(inode->i_sb, &retries)) 1224 goto retry_journal; 1225 put_page(page); 1226 return ret; 1227 } 1228 *pagep = page; 1229 return ret; 1230 } 1231 1232 /* For write_end() in data=journal mode */ 1233 static int write_end_fn(handle_t *handle, struct buffer_head *bh) 1234 { 1235 int ret; 1236 if (!buffer_mapped(bh) || buffer_freed(bh)) 1237 return 0; 1238 set_buffer_uptodate(bh); 1239 ret = ext4_handle_dirty_metadata(handle, NULL, bh); 1240 clear_buffer_meta(bh); 1241 clear_buffer_prio(bh); 1242 return ret; 1243 } 1244 1245 /* 1246 * We need to pick up the new inode size which generic_commit_write gave us 1247 * `file' can be NULL - eg, when called from page_symlink(). 1248 * 1249 * ext4 never places buffers on inode->i_mapping->private_list. metadata 1250 * buffers are managed internally. 1251 */ 1252 static int ext4_write_end(struct file *file, 1253 struct address_space *mapping, 1254 loff_t pos, unsigned len, unsigned copied, 1255 struct page *page, void *fsdata) 1256 { 1257 handle_t *handle = ext4_journal_current_handle(); 1258 struct inode *inode = mapping->host; 1259 loff_t old_size = inode->i_size; 1260 int ret = 0, ret2; 1261 int i_size_changed = 0; 1262 int inline_data = ext4_has_inline_data(inode); 1263 bool verity = ext4_verity_in_progress(inode); 1264 1265 trace_ext4_write_end(inode, pos, len, copied); 1266 if (inline_data) { 1267 ret = ext4_write_inline_data_end(inode, pos, len, 1268 copied, page); 1269 if (ret < 0) { 1270 unlock_page(page); 1271 put_page(page); 1272 goto errout; 1273 } 1274 copied = ret; 1275 } else 1276 copied = block_write_end(file, mapping, pos, 1277 len, copied, page, fsdata); 1278 /* 1279 * it's important to update i_size while still holding page lock: 1280 * page writeout could otherwise come in and zero beyond i_size. 1281 * 1282 * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree 1283 * blocks are being written past EOF, so skip the i_size update. 1284 */ 1285 if (!verity) 1286 i_size_changed = ext4_update_inode_size(inode, pos + copied); 1287 unlock_page(page); 1288 put_page(page); 1289 1290 if (old_size < pos && !verity) 1291 pagecache_isize_extended(inode, old_size, pos); 1292 /* 1293 * Don't mark the inode dirty under page lock. First, it unnecessarily 1294 * makes the holding time of page lock longer. Second, it forces lock 1295 * ordering of page lock and transaction start for journaling 1296 * filesystems. 1297 */ 1298 if (i_size_changed || inline_data) 1299 ext4_mark_inode_dirty(handle, inode); 1300 1301 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode)) 1302 /* if we have allocated more blocks and copied 1303 * less. We will have blocks allocated outside 1304 * inode->i_size. So truncate them 1305 */ 1306 ext4_orphan_add(handle, inode); 1307 errout: 1308 ret2 = ext4_journal_stop(handle); 1309 if (!ret) 1310 ret = ret2; 1311 1312 if (pos + len > inode->i_size && !verity) { 1313 ext4_truncate_failed_write(inode); 1314 /* 1315 * If truncate failed early the inode might still be 1316 * on the orphan list; we need to make sure the inode 1317 * is removed from the orphan list in that case. 1318 */ 1319 if (inode->i_nlink) 1320 ext4_orphan_del(NULL, inode); 1321 } 1322 1323 return ret ? ret : copied; 1324 } 1325 1326 /* 1327 * This is a private version of page_zero_new_buffers() which doesn't 1328 * set the buffer to be dirty, since in data=journalled mode we need 1329 * to call ext4_handle_dirty_metadata() instead. 1330 */ 1331 static void ext4_journalled_zero_new_buffers(handle_t *handle, 1332 struct page *page, 1333 unsigned from, unsigned to) 1334 { 1335 unsigned int block_start = 0, block_end; 1336 struct buffer_head *head, *bh; 1337 1338 bh = head = page_buffers(page); 1339 do { 1340 block_end = block_start + bh->b_size; 1341 if (buffer_new(bh)) { 1342 if (block_end > from && block_start < to) { 1343 if (!PageUptodate(page)) { 1344 unsigned start, size; 1345 1346 start = max(from, block_start); 1347 size = min(to, block_end) - start; 1348 1349 zero_user(page, start, size); 1350 write_end_fn(handle, bh); 1351 } 1352 clear_buffer_new(bh); 1353 } 1354 } 1355 block_start = block_end; 1356 bh = bh->b_this_page; 1357 } while (bh != head); 1358 } 1359 1360 static int ext4_journalled_write_end(struct file *file, 1361 struct address_space *mapping, 1362 loff_t pos, unsigned len, unsigned copied, 1363 struct page *page, void *fsdata) 1364 { 1365 handle_t *handle = ext4_journal_current_handle(); 1366 struct inode *inode = mapping->host; 1367 loff_t old_size = inode->i_size; 1368 int ret = 0, ret2; 1369 int partial = 0; 1370 unsigned from, to; 1371 int size_changed = 0; 1372 int inline_data = ext4_has_inline_data(inode); 1373 bool verity = ext4_verity_in_progress(inode); 1374 1375 trace_ext4_journalled_write_end(inode, pos, len, copied); 1376 from = pos & (PAGE_SIZE - 1); 1377 to = from + len; 1378 1379 BUG_ON(!ext4_handle_valid(handle)); 1380 1381 if (inline_data) { 1382 ret = ext4_write_inline_data_end(inode, pos, len, 1383 copied, page); 1384 if (ret < 0) { 1385 unlock_page(page); 1386 put_page(page); 1387 goto errout; 1388 } 1389 copied = ret; 1390 } else if (unlikely(copied < len) && !PageUptodate(page)) { 1391 copied = 0; 1392 ext4_journalled_zero_new_buffers(handle, page, from, to); 1393 } else { 1394 if (unlikely(copied < len)) 1395 ext4_journalled_zero_new_buffers(handle, page, 1396 from + copied, to); 1397 ret = ext4_walk_page_buffers(handle, page_buffers(page), from, 1398 from + copied, &partial, 1399 write_end_fn); 1400 if (!partial) 1401 SetPageUptodate(page); 1402 } 1403 if (!verity) 1404 size_changed = ext4_update_inode_size(inode, pos + copied); 1405 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 1406 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1407 unlock_page(page); 1408 put_page(page); 1409 1410 if (old_size < pos && !verity) 1411 pagecache_isize_extended(inode, old_size, pos); 1412 1413 if (size_changed || inline_data) { 1414 ret2 = ext4_mark_inode_dirty(handle, inode); 1415 if (!ret) 1416 ret = ret2; 1417 } 1418 1419 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode)) 1420 /* if we have allocated more blocks and copied 1421 * less. We will have blocks allocated outside 1422 * inode->i_size. So truncate them 1423 */ 1424 ext4_orphan_add(handle, inode); 1425 1426 errout: 1427 ret2 = ext4_journal_stop(handle); 1428 if (!ret) 1429 ret = ret2; 1430 if (pos + len > inode->i_size && !verity) { 1431 ext4_truncate_failed_write(inode); 1432 /* 1433 * If truncate failed early the inode might still be 1434 * on the orphan list; we need to make sure the inode 1435 * is removed from the orphan list in that case. 1436 */ 1437 if (inode->i_nlink) 1438 ext4_orphan_del(NULL, inode); 1439 } 1440 1441 return ret ? ret : copied; 1442 } 1443 1444 /* 1445 * Reserve space for a single cluster 1446 */ 1447 static int ext4_da_reserve_space(struct inode *inode) 1448 { 1449 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1450 struct ext4_inode_info *ei = EXT4_I(inode); 1451 int ret; 1452 1453 /* 1454 * We will charge metadata quota at writeout time; this saves 1455 * us from metadata over-estimation, though we may go over by 1456 * a small amount in the end. Here we just reserve for data. 1457 */ 1458 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); 1459 if (ret) 1460 return ret; 1461 1462 spin_lock(&ei->i_block_reservation_lock); 1463 if (ext4_claim_free_clusters(sbi, 1, 0)) { 1464 spin_unlock(&ei->i_block_reservation_lock); 1465 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); 1466 return -ENOSPC; 1467 } 1468 ei->i_reserved_data_blocks++; 1469 trace_ext4_da_reserve_space(inode); 1470 spin_unlock(&ei->i_block_reservation_lock); 1471 1472 return 0; /* success */ 1473 } 1474 1475 void ext4_da_release_space(struct inode *inode, int to_free) 1476 { 1477 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1478 struct ext4_inode_info *ei = EXT4_I(inode); 1479 1480 if (!to_free) 1481 return; /* Nothing to release, exit */ 1482 1483 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1484 1485 trace_ext4_da_release_space(inode, to_free); 1486 if (unlikely(to_free > ei->i_reserved_data_blocks)) { 1487 /* 1488 * if there aren't enough reserved blocks, then the 1489 * counter is messed up somewhere. Since this 1490 * function is called from invalidate page, it's 1491 * harmless to return without any action. 1492 */ 1493 ext4_warning(inode->i_sb, "ext4_da_release_space: " 1494 "ino %lu, to_free %d with only %d reserved " 1495 "data blocks", inode->i_ino, to_free, 1496 ei->i_reserved_data_blocks); 1497 WARN_ON(1); 1498 to_free = ei->i_reserved_data_blocks; 1499 } 1500 ei->i_reserved_data_blocks -= to_free; 1501 1502 /* update fs dirty data blocks counter */ 1503 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); 1504 1505 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1506 1507 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); 1508 } 1509 1510 /* 1511 * Delayed allocation stuff 1512 */ 1513 1514 struct mpage_da_data { 1515 struct inode *inode; 1516 struct writeback_control *wbc; 1517 1518 pgoff_t first_page; /* The first page to write */ 1519 pgoff_t next_page; /* Current page to examine */ 1520 pgoff_t last_page; /* Last page to examine */ 1521 /* 1522 * Extent to map - this can be after first_page because that can be 1523 * fully mapped. We somewhat abuse m_flags to store whether the extent 1524 * is delalloc or unwritten. 1525 */ 1526 struct ext4_map_blocks map; 1527 struct ext4_io_submit io_submit; /* IO submission data */ 1528 unsigned int do_map:1; 1529 }; 1530 1531 static void mpage_release_unused_pages(struct mpage_da_data *mpd, 1532 bool invalidate) 1533 { 1534 int nr_pages, i; 1535 pgoff_t index, end; 1536 struct pagevec pvec; 1537 struct inode *inode = mpd->inode; 1538 struct address_space *mapping = inode->i_mapping; 1539 1540 /* This is necessary when next_page == 0. */ 1541 if (mpd->first_page >= mpd->next_page) 1542 return; 1543 1544 index = mpd->first_page; 1545 end = mpd->next_page - 1; 1546 if (invalidate) { 1547 ext4_lblk_t start, last; 1548 start = index << (PAGE_SHIFT - inode->i_blkbits); 1549 last = end << (PAGE_SHIFT - inode->i_blkbits); 1550 ext4_es_remove_extent(inode, start, last - start + 1); 1551 } 1552 1553 pagevec_init(&pvec); 1554 while (index <= end) { 1555 nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end); 1556 if (nr_pages == 0) 1557 break; 1558 for (i = 0; i < nr_pages; i++) { 1559 struct page *page = pvec.pages[i]; 1560 1561 BUG_ON(!PageLocked(page)); 1562 BUG_ON(PageWriteback(page)); 1563 if (invalidate) { 1564 if (page_mapped(page)) 1565 clear_page_dirty_for_io(page); 1566 block_invalidatepage(page, 0, PAGE_SIZE); 1567 ClearPageUptodate(page); 1568 } 1569 unlock_page(page); 1570 } 1571 pagevec_release(&pvec); 1572 } 1573 } 1574 1575 static void ext4_print_free_blocks(struct inode *inode) 1576 { 1577 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1578 struct super_block *sb = inode->i_sb; 1579 struct ext4_inode_info *ei = EXT4_I(inode); 1580 1581 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld", 1582 EXT4_C2B(EXT4_SB(inode->i_sb), 1583 ext4_count_free_clusters(sb))); 1584 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details"); 1585 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld", 1586 (long long) EXT4_C2B(EXT4_SB(sb), 1587 percpu_counter_sum(&sbi->s_freeclusters_counter))); 1588 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld", 1589 (long long) EXT4_C2B(EXT4_SB(sb), 1590 percpu_counter_sum(&sbi->s_dirtyclusters_counter))); 1591 ext4_msg(sb, KERN_CRIT, "Block reservation details"); 1592 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u", 1593 ei->i_reserved_data_blocks); 1594 return; 1595 } 1596 1597 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) 1598 { 1599 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); 1600 } 1601 1602 /* 1603 * ext4_insert_delayed_block - adds a delayed block to the extents status 1604 * tree, incrementing the reserved cluster/block 1605 * count or making a pending reservation 1606 * where needed 1607 * 1608 * @inode - file containing the newly added block 1609 * @lblk - logical block to be added 1610 * 1611 * Returns 0 on success, negative error code on failure. 1612 */ 1613 static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk) 1614 { 1615 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1616 int ret; 1617 bool allocated = false; 1618 1619 /* 1620 * If the cluster containing lblk is shared with a delayed, 1621 * written, or unwritten extent in a bigalloc file system, it's 1622 * already been accounted for and does not need to be reserved. 1623 * A pending reservation must be made for the cluster if it's 1624 * shared with a written or unwritten extent and doesn't already 1625 * have one. Written and unwritten extents can be purged from the 1626 * extents status tree if the system is under memory pressure, so 1627 * it's necessary to examine the extent tree if a search of the 1628 * extents status tree doesn't get a match. 1629 */ 1630 if (sbi->s_cluster_ratio == 1) { 1631 ret = ext4_da_reserve_space(inode); 1632 if (ret != 0) /* ENOSPC */ 1633 goto errout; 1634 } else { /* bigalloc */ 1635 if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) { 1636 if (!ext4_es_scan_clu(inode, 1637 &ext4_es_is_mapped, lblk)) { 1638 ret = ext4_clu_mapped(inode, 1639 EXT4_B2C(sbi, lblk)); 1640 if (ret < 0) 1641 goto errout; 1642 if (ret == 0) { 1643 ret = ext4_da_reserve_space(inode); 1644 if (ret != 0) /* ENOSPC */ 1645 goto errout; 1646 } else { 1647 allocated = true; 1648 } 1649 } else { 1650 allocated = true; 1651 } 1652 } 1653 } 1654 1655 ret = ext4_es_insert_delayed_block(inode, lblk, allocated); 1656 1657 errout: 1658 return ret; 1659 } 1660 1661 /* 1662 * This function is grabs code from the very beginning of 1663 * ext4_map_blocks, but assumes that the caller is from delayed write 1664 * time. This function looks up the requested blocks and sets the 1665 * buffer delay bit under the protection of i_data_sem. 1666 */ 1667 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, 1668 struct ext4_map_blocks *map, 1669 struct buffer_head *bh) 1670 { 1671 struct extent_status es; 1672 int retval; 1673 sector_t invalid_block = ~((sector_t) 0xffff); 1674 #ifdef ES_AGGRESSIVE_TEST 1675 struct ext4_map_blocks orig_map; 1676 1677 memcpy(&orig_map, map, sizeof(*map)); 1678 #endif 1679 1680 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 1681 invalid_block = ~0; 1682 1683 map->m_flags = 0; 1684 ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u," 1685 "logical block %lu\n", inode->i_ino, map->m_len, 1686 (unsigned long) map->m_lblk); 1687 1688 /* Lookup extent status tree firstly */ 1689 if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) { 1690 if (ext4_es_is_hole(&es)) { 1691 retval = 0; 1692 down_read(&EXT4_I(inode)->i_data_sem); 1693 goto add_delayed; 1694 } 1695 1696 /* 1697 * Delayed extent could be allocated by fallocate. 1698 * So we need to check it. 1699 */ 1700 if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) { 1701 map_bh(bh, inode->i_sb, invalid_block); 1702 set_buffer_new(bh); 1703 set_buffer_delay(bh); 1704 return 0; 1705 } 1706 1707 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk; 1708 retval = es.es_len - (iblock - es.es_lblk); 1709 if (retval > map->m_len) 1710 retval = map->m_len; 1711 map->m_len = retval; 1712 if (ext4_es_is_written(&es)) 1713 map->m_flags |= EXT4_MAP_MAPPED; 1714 else if (ext4_es_is_unwritten(&es)) 1715 map->m_flags |= EXT4_MAP_UNWRITTEN; 1716 else 1717 BUG(); 1718 1719 #ifdef ES_AGGRESSIVE_TEST 1720 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0); 1721 #endif 1722 return retval; 1723 } 1724 1725 /* 1726 * Try to see if we can get the block without requesting a new 1727 * file system block. 1728 */ 1729 down_read(&EXT4_I(inode)->i_data_sem); 1730 if (ext4_has_inline_data(inode)) 1731 retval = 0; 1732 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 1733 retval = ext4_ext_map_blocks(NULL, inode, map, 0); 1734 else 1735 retval = ext4_ind_map_blocks(NULL, inode, map, 0); 1736 1737 add_delayed: 1738 if (retval == 0) { 1739 int ret; 1740 1741 /* 1742 * XXX: __block_prepare_write() unmaps passed block, 1743 * is it OK? 1744 */ 1745 1746 ret = ext4_insert_delayed_block(inode, map->m_lblk); 1747 if (ret != 0) { 1748 retval = ret; 1749 goto out_unlock; 1750 } 1751 1752 map_bh(bh, inode->i_sb, invalid_block); 1753 set_buffer_new(bh); 1754 set_buffer_delay(bh); 1755 } else if (retval > 0) { 1756 int ret; 1757 unsigned int status; 1758 1759 if (unlikely(retval != map->m_len)) { 1760 ext4_warning(inode->i_sb, 1761 "ES len assertion failed for inode " 1762 "%lu: retval %d != map->m_len %d", 1763 inode->i_ino, retval, map->m_len); 1764 WARN_ON(1); 1765 } 1766 1767 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 1768 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 1769 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 1770 map->m_pblk, status); 1771 if (ret != 0) 1772 retval = ret; 1773 } 1774 1775 out_unlock: 1776 up_read((&EXT4_I(inode)->i_data_sem)); 1777 1778 return retval; 1779 } 1780 1781 /* 1782 * This is a special get_block_t callback which is used by 1783 * ext4_da_write_begin(). It will either return mapped block or 1784 * reserve space for a single block. 1785 * 1786 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 1787 * We also have b_blocknr = -1 and b_bdev initialized properly 1788 * 1789 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 1790 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 1791 * initialized properly. 1792 */ 1793 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 1794 struct buffer_head *bh, int create) 1795 { 1796 struct ext4_map_blocks map; 1797 int ret = 0; 1798 1799 BUG_ON(create == 0); 1800 BUG_ON(bh->b_size != inode->i_sb->s_blocksize); 1801 1802 map.m_lblk = iblock; 1803 map.m_len = 1; 1804 1805 /* 1806 * first, we need to know whether the block is allocated already 1807 * preallocated blocks are unmapped but should treated 1808 * the same as allocated blocks. 1809 */ 1810 ret = ext4_da_map_blocks(inode, iblock, &map, bh); 1811 if (ret <= 0) 1812 return ret; 1813 1814 map_bh(bh, inode->i_sb, map.m_pblk); 1815 ext4_update_bh_state(bh, map.m_flags); 1816 1817 if (buffer_unwritten(bh)) { 1818 /* A delayed write to unwritten bh should be marked 1819 * new and mapped. Mapped ensures that we don't do 1820 * get_block multiple times when we write to the same 1821 * offset and new ensures that we do proper zero out 1822 * for partial write. 1823 */ 1824 set_buffer_new(bh); 1825 set_buffer_mapped(bh); 1826 } 1827 return 0; 1828 } 1829 1830 static int bget_one(handle_t *handle, struct buffer_head *bh) 1831 { 1832 get_bh(bh); 1833 return 0; 1834 } 1835 1836 static int bput_one(handle_t *handle, struct buffer_head *bh) 1837 { 1838 put_bh(bh); 1839 return 0; 1840 } 1841 1842 static int __ext4_journalled_writepage(struct page *page, 1843 unsigned int len) 1844 { 1845 struct address_space *mapping = page->mapping; 1846 struct inode *inode = mapping->host; 1847 struct buffer_head *page_bufs = NULL; 1848 handle_t *handle = NULL; 1849 int ret = 0, err = 0; 1850 int inline_data = ext4_has_inline_data(inode); 1851 struct buffer_head *inode_bh = NULL; 1852 1853 ClearPageChecked(page); 1854 1855 if (inline_data) { 1856 BUG_ON(page->index != 0); 1857 BUG_ON(len > ext4_get_max_inline_size(inode)); 1858 inode_bh = ext4_journalled_write_inline_data(inode, len, page); 1859 if (inode_bh == NULL) 1860 goto out; 1861 } else { 1862 page_bufs = page_buffers(page); 1863 if (!page_bufs) { 1864 BUG(); 1865 goto out; 1866 } 1867 ext4_walk_page_buffers(handle, page_bufs, 0, len, 1868 NULL, bget_one); 1869 } 1870 /* 1871 * We need to release the page lock before we start the 1872 * journal, so grab a reference so the page won't disappear 1873 * out from under us. 1874 */ 1875 get_page(page); 1876 unlock_page(page); 1877 1878 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1879 ext4_writepage_trans_blocks(inode)); 1880 if (IS_ERR(handle)) { 1881 ret = PTR_ERR(handle); 1882 put_page(page); 1883 goto out_no_pagelock; 1884 } 1885 BUG_ON(!ext4_handle_valid(handle)); 1886 1887 lock_page(page); 1888 put_page(page); 1889 if (page->mapping != mapping) { 1890 /* The page got truncated from under us */ 1891 ext4_journal_stop(handle); 1892 ret = 0; 1893 goto out; 1894 } 1895 1896 if (inline_data) { 1897 ret = ext4_mark_inode_dirty(handle, inode); 1898 } else { 1899 ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 1900 do_journal_get_write_access); 1901 1902 err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 1903 write_end_fn); 1904 } 1905 if (ret == 0) 1906 ret = err; 1907 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1908 err = ext4_journal_stop(handle); 1909 if (!ret) 1910 ret = err; 1911 1912 if (!ext4_has_inline_data(inode)) 1913 ext4_walk_page_buffers(NULL, page_bufs, 0, len, 1914 NULL, bput_one); 1915 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 1916 out: 1917 unlock_page(page); 1918 out_no_pagelock: 1919 brelse(inode_bh); 1920 return ret; 1921 } 1922 1923 /* 1924 * Note that we don't need to start a transaction unless we're journaling data 1925 * because we should have holes filled from ext4_page_mkwrite(). We even don't 1926 * need to file the inode to the transaction's list in ordered mode because if 1927 * we are writing back data added by write(), the inode is already there and if 1928 * we are writing back data modified via mmap(), no one guarantees in which 1929 * transaction the data will hit the disk. In case we are journaling data, we 1930 * cannot start transaction directly because transaction start ranks above page 1931 * lock so we have to do some magic. 1932 * 1933 * This function can get called via... 1934 * - ext4_writepages after taking page lock (have journal handle) 1935 * - journal_submit_inode_data_buffers (no journal handle) 1936 * - shrink_page_list via the kswapd/direct reclaim (no journal handle) 1937 * - grab_page_cache when doing write_begin (have journal handle) 1938 * 1939 * We don't do any block allocation in this function. If we have page with 1940 * multiple blocks we need to write those buffer_heads that are mapped. This 1941 * is important for mmaped based write. So if we do with blocksize 1K 1942 * truncate(f, 1024); 1943 * a = mmap(f, 0, 4096); 1944 * a[0] = 'a'; 1945 * truncate(f, 4096); 1946 * we have in the page first buffer_head mapped via page_mkwrite call back 1947 * but other buffer_heads would be unmapped but dirty (dirty done via the 1948 * do_wp_page). So writepage should write the first block. If we modify 1949 * the mmap area beyond 1024 we will again get a page_fault and the 1950 * page_mkwrite callback will do the block allocation and mark the 1951 * buffer_heads mapped. 1952 * 1953 * We redirty the page if we have any buffer_heads that is either delay or 1954 * unwritten in the page. 1955 * 1956 * We can get recursively called as show below. 1957 * 1958 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 1959 * ext4_writepage() 1960 * 1961 * But since we don't do any block allocation we should not deadlock. 1962 * Page also have the dirty flag cleared so we don't get recurive page_lock. 1963 */ 1964 static int ext4_writepage(struct page *page, 1965 struct writeback_control *wbc) 1966 { 1967 int ret = 0; 1968 loff_t size; 1969 unsigned int len; 1970 struct buffer_head *page_bufs = NULL; 1971 struct inode *inode = page->mapping->host; 1972 struct ext4_io_submit io_submit; 1973 bool keep_towrite = false; 1974 1975 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) { 1976 inode->i_mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); 1977 unlock_page(page); 1978 return -EIO; 1979 } 1980 1981 trace_ext4_writepage(page); 1982 size = i_size_read(inode); 1983 if (page->index == size >> PAGE_SHIFT && 1984 !ext4_verity_in_progress(inode)) 1985 len = size & ~PAGE_MASK; 1986 else 1987 len = PAGE_SIZE; 1988 1989 page_bufs = page_buffers(page); 1990 /* 1991 * We cannot do block allocation or other extent handling in this 1992 * function. If there are buffers needing that, we have to redirty 1993 * the page. But we may reach here when we do a journal commit via 1994 * journal_submit_inode_data_buffers() and in that case we must write 1995 * allocated buffers to achieve data=ordered mode guarantees. 1996 * 1997 * Also, if there is only one buffer per page (the fs block 1998 * size == the page size), if one buffer needs block 1999 * allocation or needs to modify the extent tree to clear the 2000 * unwritten flag, we know that the page can't be written at 2001 * all, so we might as well refuse the write immediately. 2002 * Unfortunately if the block size != page size, we can't as 2003 * easily detect this case using ext4_walk_page_buffers(), but 2004 * for the extremely common case, this is an optimization that 2005 * skips a useless round trip through ext4_bio_write_page(). 2006 */ 2007 if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2008 ext4_bh_delay_or_unwritten)) { 2009 redirty_page_for_writepage(wbc, page); 2010 if ((current->flags & PF_MEMALLOC) || 2011 (inode->i_sb->s_blocksize == PAGE_SIZE)) { 2012 /* 2013 * For memory cleaning there's no point in writing only 2014 * some buffers. So just bail out. Warn if we came here 2015 * from direct reclaim. 2016 */ 2017 WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) 2018 == PF_MEMALLOC); 2019 unlock_page(page); 2020 return 0; 2021 } 2022 keep_towrite = true; 2023 } 2024 2025 if (PageChecked(page) && ext4_should_journal_data(inode)) 2026 /* 2027 * It's mmapped pagecache. Add buffers and journal it. There 2028 * doesn't seem much point in redirtying the page here. 2029 */ 2030 return __ext4_journalled_writepage(page, len); 2031 2032 ext4_io_submit_init(&io_submit, wbc); 2033 io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS); 2034 if (!io_submit.io_end) { 2035 redirty_page_for_writepage(wbc, page); 2036 unlock_page(page); 2037 return -ENOMEM; 2038 } 2039 ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite); 2040 ext4_io_submit(&io_submit); 2041 /* Drop io_end reference we got from init */ 2042 ext4_put_io_end_defer(io_submit.io_end); 2043 return ret; 2044 } 2045 2046 static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) 2047 { 2048 int len; 2049 loff_t size; 2050 int err; 2051 2052 BUG_ON(page->index != mpd->first_page); 2053 clear_page_dirty_for_io(page); 2054 /* 2055 * We have to be very careful here! Nothing protects writeback path 2056 * against i_size changes and the page can be writeably mapped into 2057 * page tables. So an application can be growing i_size and writing 2058 * data through mmap while writeback runs. clear_page_dirty_for_io() 2059 * write-protects our page in page tables and the page cannot get 2060 * written to again until we release page lock. So only after 2061 * clear_page_dirty_for_io() we are safe to sample i_size for 2062 * ext4_bio_write_page() to zero-out tail of the written page. We rely 2063 * on the barrier provided by TestClearPageDirty in 2064 * clear_page_dirty_for_io() to make sure i_size is really sampled only 2065 * after page tables are updated. 2066 */ 2067 size = i_size_read(mpd->inode); 2068 if (page->index == size >> PAGE_SHIFT && 2069 !ext4_verity_in_progress(mpd->inode)) 2070 len = size & ~PAGE_MASK; 2071 else 2072 len = PAGE_SIZE; 2073 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false); 2074 if (!err) 2075 mpd->wbc->nr_to_write--; 2076 mpd->first_page++; 2077 2078 return err; 2079 } 2080 2081 #define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay)) 2082 2083 /* 2084 * mballoc gives us at most this number of blocks... 2085 * XXX: That seems to be only a limitation of ext4_mb_normalize_request(). 2086 * The rest of mballoc seems to handle chunks up to full group size. 2087 */ 2088 #define MAX_WRITEPAGES_EXTENT_LEN 2048 2089 2090 /* 2091 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map 2092 * 2093 * @mpd - extent of blocks 2094 * @lblk - logical number of the block in the file 2095 * @bh - buffer head we want to add to the extent 2096 * 2097 * The function is used to collect contig. blocks in the same state. If the 2098 * buffer doesn't require mapping for writeback and we haven't started the 2099 * extent of buffers to map yet, the function returns 'true' immediately - the 2100 * caller can write the buffer right away. Otherwise the function returns true 2101 * if the block has been added to the extent, false if the block couldn't be 2102 * added. 2103 */ 2104 static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk, 2105 struct buffer_head *bh) 2106 { 2107 struct ext4_map_blocks *map = &mpd->map; 2108 2109 /* Buffer that doesn't need mapping for writeback? */ 2110 if (!buffer_dirty(bh) || !buffer_mapped(bh) || 2111 (!buffer_delay(bh) && !buffer_unwritten(bh))) { 2112 /* So far no extent to map => we write the buffer right away */ 2113 if (map->m_len == 0) 2114 return true; 2115 return false; 2116 } 2117 2118 /* First block in the extent? */ 2119 if (map->m_len == 0) { 2120 /* We cannot map unless handle is started... */ 2121 if (!mpd->do_map) 2122 return false; 2123 map->m_lblk = lblk; 2124 map->m_len = 1; 2125 map->m_flags = bh->b_state & BH_FLAGS; 2126 return true; 2127 } 2128 2129 /* Don't go larger than mballoc is willing to allocate */ 2130 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN) 2131 return false; 2132 2133 /* Can we merge the block to our big extent? */ 2134 if (lblk == map->m_lblk + map->m_len && 2135 (bh->b_state & BH_FLAGS) == map->m_flags) { 2136 map->m_len++; 2137 return true; 2138 } 2139 return false; 2140 } 2141 2142 /* 2143 * mpage_process_page_bufs - submit page buffers for IO or add them to extent 2144 * 2145 * @mpd - extent of blocks for mapping 2146 * @head - the first buffer in the page 2147 * @bh - buffer we should start processing from 2148 * @lblk - logical number of the block in the file corresponding to @bh 2149 * 2150 * Walk through page buffers from @bh upto @head (exclusive) and either submit 2151 * the page for IO if all buffers in this page were mapped and there's no 2152 * accumulated extent of buffers to map or add buffers in the page to the 2153 * extent of buffers to map. The function returns 1 if the caller can continue 2154 * by processing the next page, 0 if it should stop adding buffers to the 2155 * extent to map because we cannot extend it anymore. It can also return value 2156 * < 0 in case of error during IO submission. 2157 */ 2158 static int mpage_process_page_bufs(struct mpage_da_data *mpd, 2159 struct buffer_head *head, 2160 struct buffer_head *bh, 2161 ext4_lblk_t lblk) 2162 { 2163 struct inode *inode = mpd->inode; 2164 int err; 2165 ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1) 2166 >> inode->i_blkbits; 2167 2168 if (ext4_verity_in_progress(inode)) 2169 blocks = EXT_MAX_BLOCKS; 2170 2171 do { 2172 BUG_ON(buffer_locked(bh)); 2173 2174 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) { 2175 /* Found extent to map? */ 2176 if (mpd->map.m_len) 2177 return 0; 2178 /* Buffer needs mapping and handle is not started? */ 2179 if (!mpd->do_map) 2180 return 0; 2181 /* Everything mapped so far and we hit EOF */ 2182 break; 2183 } 2184 } while (lblk++, (bh = bh->b_this_page) != head); 2185 /* So far everything mapped? Submit the page for IO. */ 2186 if (mpd->map.m_len == 0) { 2187 err = mpage_submit_page(mpd, head->b_page); 2188 if (err < 0) 2189 return err; 2190 } 2191 return lblk < blocks; 2192 } 2193 2194 /* 2195 * mpage_process_page - update page buffers corresponding to changed extent and 2196 * may submit fully mapped page for IO 2197 * 2198 * @mpd - description of extent to map, on return next extent to map 2199 * @m_lblk - logical block mapping. 2200 * @m_pblk - corresponding physical mapping. 2201 * @map_bh - determines on return whether this page requires any further 2202 * mapping or not. 2203 * Scan given page buffers corresponding to changed extent and update buffer 2204 * state according to new extent state. 2205 * We map delalloc buffers to their physical location, clear unwritten bits. 2206 * If the given page is not fully mapped, we update @map to the next extent in 2207 * the given page that needs mapping & return @map_bh as true. 2208 */ 2209 static int mpage_process_page(struct mpage_da_data *mpd, struct page *page, 2210 ext4_lblk_t *m_lblk, ext4_fsblk_t *m_pblk, 2211 bool *map_bh) 2212 { 2213 struct buffer_head *head, *bh; 2214 ext4_io_end_t *io_end = mpd->io_submit.io_end; 2215 ext4_lblk_t lblk = *m_lblk; 2216 ext4_fsblk_t pblock = *m_pblk; 2217 int err = 0; 2218 int blkbits = mpd->inode->i_blkbits; 2219 ssize_t io_end_size = 0; 2220 struct ext4_io_end_vec *io_end_vec = ext4_last_io_end_vec(io_end); 2221 2222 bh = head = page_buffers(page); 2223 do { 2224 if (lblk < mpd->map.m_lblk) 2225 continue; 2226 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) { 2227 /* 2228 * Buffer after end of mapped extent. 2229 * Find next buffer in the page to map. 2230 */ 2231 mpd->map.m_len = 0; 2232 mpd->map.m_flags = 0; 2233 io_end_vec->size += io_end_size; 2234 io_end_size = 0; 2235 2236 err = mpage_process_page_bufs(mpd, head, bh, lblk); 2237 if (err > 0) 2238 err = 0; 2239 if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) { 2240 io_end_vec = ext4_alloc_io_end_vec(io_end); 2241 if (IS_ERR(io_end_vec)) { 2242 err = PTR_ERR(io_end_vec); 2243 goto out; 2244 } 2245 io_end_vec->offset = mpd->map.m_lblk << blkbits; 2246 } 2247 *map_bh = true; 2248 goto out; 2249 } 2250 if (buffer_delay(bh)) { 2251 clear_buffer_delay(bh); 2252 bh->b_blocknr = pblock++; 2253 } 2254 clear_buffer_unwritten(bh); 2255 io_end_size += (1 << blkbits); 2256 } while (lblk++, (bh = bh->b_this_page) != head); 2257 2258 io_end_vec->size += io_end_size; 2259 io_end_size = 0; 2260 *map_bh = false; 2261 out: 2262 *m_lblk = lblk; 2263 *m_pblk = pblock; 2264 return err; 2265 } 2266 2267 /* 2268 * mpage_map_buffers - update buffers corresponding to changed extent and 2269 * submit fully mapped pages for IO 2270 * 2271 * @mpd - description of extent to map, on return next extent to map 2272 * 2273 * Scan buffers corresponding to changed extent (we expect corresponding pages 2274 * to be already locked) and update buffer state according to new extent state. 2275 * We map delalloc buffers to their physical location, clear unwritten bits, 2276 * and mark buffers as uninit when we perform writes to unwritten extents 2277 * and do extent conversion after IO is finished. If the last page is not fully 2278 * mapped, we update @map to the next extent in the last page that needs 2279 * mapping. Otherwise we submit the page for IO. 2280 */ 2281 static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) 2282 { 2283 struct pagevec pvec; 2284 int nr_pages, i; 2285 struct inode *inode = mpd->inode; 2286 int bpp_bits = PAGE_SHIFT - inode->i_blkbits; 2287 pgoff_t start, end; 2288 ext4_lblk_t lblk; 2289 ext4_fsblk_t pblock; 2290 int err; 2291 bool map_bh = false; 2292 2293 start = mpd->map.m_lblk >> bpp_bits; 2294 end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits; 2295 lblk = start << bpp_bits; 2296 pblock = mpd->map.m_pblk; 2297 2298 pagevec_init(&pvec); 2299 while (start <= end) { 2300 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, 2301 &start, end); 2302 if (nr_pages == 0) 2303 break; 2304 for (i = 0; i < nr_pages; i++) { 2305 struct page *page = pvec.pages[i]; 2306 2307 err = mpage_process_page(mpd, page, &lblk, &pblock, 2308 &map_bh); 2309 /* 2310 * If map_bh is true, means page may require further bh 2311 * mapping, or maybe the page was submitted for IO. 2312 * So we return to call further extent mapping. 2313 */ 2314 if (err < 0 || map_bh == true) 2315 goto out; 2316 /* Page fully mapped - let IO run! */ 2317 err = mpage_submit_page(mpd, page); 2318 if (err < 0) 2319 goto out; 2320 } 2321 pagevec_release(&pvec); 2322 } 2323 /* Extent fully mapped and matches with page boundary. We are done. */ 2324 mpd->map.m_len = 0; 2325 mpd->map.m_flags = 0; 2326 return 0; 2327 out: 2328 pagevec_release(&pvec); 2329 return err; 2330 } 2331 2332 static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd) 2333 { 2334 struct inode *inode = mpd->inode; 2335 struct ext4_map_blocks *map = &mpd->map; 2336 int get_blocks_flags; 2337 int err, dioread_nolock; 2338 2339 trace_ext4_da_write_pages_extent(inode, map); 2340 /* 2341 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or 2342 * to convert an unwritten extent to be initialized (in the case 2343 * where we have written into one or more preallocated blocks). It is 2344 * possible that we're going to need more metadata blocks than 2345 * previously reserved. However we must not fail because we're in 2346 * writeback and there is nothing we can do about it so it might result 2347 * in data loss. So use reserved blocks to allocate metadata if 2348 * possible. 2349 * 2350 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if 2351 * the blocks in question are delalloc blocks. This indicates 2352 * that the blocks and quotas has already been checked when 2353 * the data was copied into the page cache. 2354 */ 2355 get_blocks_flags = EXT4_GET_BLOCKS_CREATE | 2356 EXT4_GET_BLOCKS_METADATA_NOFAIL | 2357 EXT4_GET_BLOCKS_IO_SUBMIT; 2358 dioread_nolock = ext4_should_dioread_nolock(inode); 2359 if (dioread_nolock) 2360 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; 2361 if (map->m_flags & (1 << BH_Delay)) 2362 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 2363 2364 err = ext4_map_blocks(handle, inode, map, get_blocks_flags); 2365 if (err < 0) 2366 return err; 2367 if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) { 2368 if (!mpd->io_submit.io_end->handle && 2369 ext4_handle_valid(handle)) { 2370 mpd->io_submit.io_end->handle = handle->h_rsv_handle; 2371 handle->h_rsv_handle = NULL; 2372 } 2373 ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end); 2374 } 2375 2376 BUG_ON(map->m_len == 0); 2377 return 0; 2378 } 2379 2380 /* 2381 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length 2382 * mpd->len and submit pages underlying it for IO 2383 * 2384 * @handle - handle for journal operations 2385 * @mpd - extent to map 2386 * @give_up_on_write - we set this to true iff there is a fatal error and there 2387 * is no hope of writing the data. The caller should discard 2388 * dirty pages to avoid infinite loops. 2389 * 2390 * The function maps extent starting at mpd->lblk of length mpd->len. If it is 2391 * delayed, blocks are allocated, if it is unwritten, we may need to convert 2392 * them to initialized or split the described range from larger unwritten 2393 * extent. Note that we need not map all the described range since allocation 2394 * can return less blocks or the range is covered by more unwritten extents. We 2395 * cannot map more because we are limited by reserved transaction credits. On 2396 * the other hand we always make sure that the last touched page is fully 2397 * mapped so that it can be written out (and thus forward progress is 2398 * guaranteed). After mapping we submit all mapped pages for IO. 2399 */ 2400 static int mpage_map_and_submit_extent(handle_t *handle, 2401 struct mpage_da_data *mpd, 2402 bool *give_up_on_write) 2403 { 2404 struct inode *inode = mpd->inode; 2405 struct ext4_map_blocks *map = &mpd->map; 2406 int err; 2407 loff_t disksize; 2408 int progress = 0; 2409 ext4_io_end_t *io_end = mpd->io_submit.io_end; 2410 struct ext4_io_end_vec *io_end_vec; 2411 2412 io_end_vec = ext4_alloc_io_end_vec(io_end); 2413 if (IS_ERR(io_end_vec)) 2414 return PTR_ERR(io_end_vec); 2415 io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits; 2416 do { 2417 err = mpage_map_one_extent(handle, mpd); 2418 if (err < 0) { 2419 struct super_block *sb = inode->i_sb; 2420 2421 if (ext4_forced_shutdown(EXT4_SB(sb)) || 2422 EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED) 2423 goto invalidate_dirty_pages; 2424 /* 2425 * Let the uper layers retry transient errors. 2426 * In the case of ENOSPC, if ext4_count_free_blocks() 2427 * is non-zero, a commit should free up blocks. 2428 */ 2429 if ((err == -ENOMEM) || 2430 (err == -ENOSPC && ext4_count_free_clusters(sb))) { 2431 if (progress) 2432 goto update_disksize; 2433 return err; 2434 } 2435 ext4_msg(sb, KERN_CRIT, 2436 "Delayed block allocation failed for " 2437 "inode %lu at logical offset %llu with" 2438 " max blocks %u with error %d", 2439 inode->i_ino, 2440 (unsigned long long)map->m_lblk, 2441 (unsigned)map->m_len, -err); 2442 ext4_msg(sb, KERN_CRIT, 2443 "This should not happen!! Data will " 2444 "be lost\n"); 2445 if (err == -ENOSPC) 2446 ext4_print_free_blocks(inode); 2447 invalidate_dirty_pages: 2448 *give_up_on_write = true; 2449 return err; 2450 } 2451 progress = 1; 2452 /* 2453 * Update buffer state, submit mapped pages, and get us new 2454 * extent to map 2455 */ 2456 err = mpage_map_and_submit_buffers(mpd); 2457 if (err < 0) 2458 goto update_disksize; 2459 } while (map->m_len); 2460 2461 update_disksize: 2462 /* 2463 * Update on-disk size after IO is submitted. Races with 2464 * truncate are avoided by checking i_size under i_data_sem. 2465 */ 2466 disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT; 2467 if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) { 2468 int err2; 2469 loff_t i_size; 2470 2471 down_write(&EXT4_I(inode)->i_data_sem); 2472 i_size = i_size_read(inode); 2473 if (disksize > i_size) 2474 disksize = i_size; 2475 if (disksize > EXT4_I(inode)->i_disksize) 2476 EXT4_I(inode)->i_disksize = disksize; 2477 up_write(&EXT4_I(inode)->i_data_sem); 2478 err2 = ext4_mark_inode_dirty(handle, inode); 2479 if (err2) { 2480 ext4_error_err(inode->i_sb, -err2, 2481 "Failed to mark inode %lu dirty", 2482 inode->i_ino); 2483 } 2484 if (!err) 2485 err = err2; 2486 } 2487 return err; 2488 } 2489 2490 /* 2491 * Calculate the total number of credits to reserve for one writepages 2492 * iteration. This is called from ext4_writepages(). We map an extent of 2493 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping 2494 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN + 2495 * bpp - 1 blocks in bpp different extents. 2496 */ 2497 static int ext4_da_writepages_trans_blocks(struct inode *inode) 2498 { 2499 int bpp = ext4_journal_blocks_per_page(inode); 2500 2501 return ext4_meta_trans_blocks(inode, 2502 MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp); 2503 } 2504 2505 /* 2506 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages 2507 * and underlying extent to map 2508 * 2509 * @mpd - where to look for pages 2510 * 2511 * Walk dirty pages in the mapping. If they are fully mapped, submit them for 2512 * IO immediately. When we find a page which isn't mapped we start accumulating 2513 * extent of buffers underlying these pages that needs mapping (formed by 2514 * either delayed or unwritten buffers). We also lock the pages containing 2515 * these buffers. The extent found is returned in @mpd structure (starting at 2516 * mpd->lblk with length mpd->len blocks). 2517 * 2518 * Note that this function can attach bios to one io_end structure which are 2519 * neither logically nor physically contiguous. Although it may seem as an 2520 * unnecessary complication, it is actually inevitable in blocksize < pagesize 2521 * case as we need to track IO to all buffers underlying a page in one io_end. 2522 */ 2523 static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) 2524 { 2525 struct address_space *mapping = mpd->inode->i_mapping; 2526 struct pagevec pvec; 2527 unsigned int nr_pages; 2528 long left = mpd->wbc->nr_to_write; 2529 pgoff_t index = mpd->first_page; 2530 pgoff_t end = mpd->last_page; 2531 xa_mark_t tag; 2532 int i, err = 0; 2533 int blkbits = mpd->inode->i_blkbits; 2534 ext4_lblk_t lblk; 2535 struct buffer_head *head; 2536 2537 if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages) 2538 tag = PAGECACHE_TAG_TOWRITE; 2539 else 2540 tag = PAGECACHE_TAG_DIRTY; 2541 2542 pagevec_init(&pvec); 2543 mpd->map.m_len = 0; 2544 mpd->next_page = index; 2545 while (index <= end) { 2546 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, 2547 tag); 2548 if (nr_pages == 0) 2549 goto out; 2550 2551 for (i = 0; i < nr_pages; i++) { 2552 struct page *page = pvec.pages[i]; 2553 2554 /* 2555 * Accumulated enough dirty pages? This doesn't apply 2556 * to WB_SYNC_ALL mode. For integrity sync we have to 2557 * keep going because someone may be concurrently 2558 * dirtying pages, and we might have synced a lot of 2559 * newly appeared dirty pages, but have not synced all 2560 * of the old dirty pages. 2561 */ 2562 if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0) 2563 goto out; 2564 2565 /* If we can't merge this page, we are done. */ 2566 if (mpd->map.m_len > 0 && mpd->next_page != page->index) 2567 goto out; 2568 2569 lock_page(page); 2570 /* 2571 * If the page is no longer dirty, or its mapping no 2572 * longer corresponds to inode we are writing (which 2573 * means it has been truncated or invalidated), or the 2574 * page is already under writeback and we are not doing 2575 * a data integrity writeback, skip the page 2576 */ 2577 if (!PageDirty(page) || 2578 (PageWriteback(page) && 2579 (mpd->wbc->sync_mode == WB_SYNC_NONE)) || 2580 unlikely(page->mapping != mapping)) { 2581 unlock_page(page); 2582 continue; 2583 } 2584 2585 wait_on_page_writeback(page); 2586 BUG_ON(PageWriteback(page)); 2587 2588 if (mpd->map.m_len == 0) 2589 mpd->first_page = page->index; 2590 mpd->next_page = page->index + 1; 2591 /* Add all dirty buffers to mpd */ 2592 lblk = ((ext4_lblk_t)page->index) << 2593 (PAGE_SHIFT - blkbits); 2594 head = page_buffers(page); 2595 err = mpage_process_page_bufs(mpd, head, head, lblk); 2596 if (err <= 0) 2597 goto out; 2598 err = 0; 2599 left--; 2600 } 2601 pagevec_release(&pvec); 2602 cond_resched(); 2603 } 2604 return 0; 2605 out: 2606 pagevec_release(&pvec); 2607 return err; 2608 } 2609 2610 static int ext4_writepages(struct address_space *mapping, 2611 struct writeback_control *wbc) 2612 { 2613 pgoff_t writeback_index = 0; 2614 long nr_to_write = wbc->nr_to_write; 2615 int range_whole = 0; 2616 int cycled = 1; 2617 handle_t *handle = NULL; 2618 struct mpage_da_data mpd; 2619 struct inode *inode = mapping->host; 2620 int needed_blocks, rsv_blocks = 0, ret = 0; 2621 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2622 bool done; 2623 struct blk_plug plug; 2624 bool give_up_on_write = false; 2625 2626 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 2627 return -EIO; 2628 2629 percpu_down_read(&sbi->s_writepages_rwsem); 2630 trace_ext4_writepages(inode, wbc); 2631 2632 /* 2633 * No pages to write? This is mainly a kludge to avoid starting 2634 * a transaction for special inodes like journal inode on last iput() 2635 * because that could violate lock ordering on umount 2636 */ 2637 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 2638 goto out_writepages; 2639 2640 if (ext4_should_journal_data(inode)) { 2641 ret = generic_writepages(mapping, wbc); 2642 goto out_writepages; 2643 } 2644 2645 /* 2646 * If the filesystem has aborted, it is read-only, so return 2647 * right away instead of dumping stack traces later on that 2648 * will obscure the real source of the problem. We test 2649 * EXT4_MF_FS_ABORTED instead of sb->s_flag's SB_RDONLY because 2650 * the latter could be true if the filesystem is mounted 2651 * read-only, and in that case, ext4_writepages should 2652 * *never* be called, so if that ever happens, we would want 2653 * the stack trace. 2654 */ 2655 if (unlikely(ext4_forced_shutdown(EXT4_SB(mapping->host->i_sb)) || 2656 sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) { 2657 ret = -EROFS; 2658 goto out_writepages; 2659 } 2660 2661 /* 2662 * If we have inline data and arrive here, it means that 2663 * we will soon create the block for the 1st page, so 2664 * we'd better clear the inline data here. 2665 */ 2666 if (ext4_has_inline_data(inode)) { 2667 /* Just inode will be modified... */ 2668 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); 2669 if (IS_ERR(handle)) { 2670 ret = PTR_ERR(handle); 2671 goto out_writepages; 2672 } 2673 BUG_ON(ext4_test_inode_state(inode, 2674 EXT4_STATE_MAY_INLINE_DATA)); 2675 ext4_destroy_inline_data(handle, inode); 2676 ext4_journal_stop(handle); 2677 } 2678 2679 if (ext4_should_dioread_nolock(inode)) { 2680 /* 2681 * We may need to convert up to one extent per block in 2682 * the page and we may dirty the inode. 2683 */ 2684 rsv_blocks = 1 + ext4_chunk_trans_blocks(inode, 2685 PAGE_SIZE >> inode->i_blkbits); 2686 } 2687 2688 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2689 range_whole = 1; 2690 2691 if (wbc->range_cyclic) { 2692 writeback_index = mapping->writeback_index; 2693 if (writeback_index) 2694 cycled = 0; 2695 mpd.first_page = writeback_index; 2696 mpd.last_page = -1; 2697 } else { 2698 mpd.first_page = wbc->range_start >> PAGE_SHIFT; 2699 mpd.last_page = wbc->range_end >> PAGE_SHIFT; 2700 } 2701 2702 mpd.inode = inode; 2703 mpd.wbc = wbc; 2704 ext4_io_submit_init(&mpd.io_submit, wbc); 2705 retry: 2706 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 2707 tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page); 2708 done = false; 2709 blk_start_plug(&plug); 2710 2711 /* 2712 * First writeback pages that don't need mapping - we can avoid 2713 * starting a transaction unnecessarily and also avoid being blocked 2714 * in the block layer on device congestion while having transaction 2715 * started. 2716 */ 2717 mpd.do_map = 0; 2718 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); 2719 if (!mpd.io_submit.io_end) { 2720 ret = -ENOMEM; 2721 goto unplug; 2722 } 2723 ret = mpage_prepare_extent_to_map(&mpd); 2724 /* Unlock pages we didn't use */ 2725 mpage_release_unused_pages(&mpd, false); 2726 /* Submit prepared bio */ 2727 ext4_io_submit(&mpd.io_submit); 2728 ext4_put_io_end_defer(mpd.io_submit.io_end); 2729 mpd.io_submit.io_end = NULL; 2730 if (ret < 0) 2731 goto unplug; 2732 2733 while (!done && mpd.first_page <= mpd.last_page) { 2734 /* For each extent of pages we use new io_end */ 2735 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); 2736 if (!mpd.io_submit.io_end) { 2737 ret = -ENOMEM; 2738 break; 2739 } 2740 2741 /* 2742 * We have two constraints: We find one extent to map and we 2743 * must always write out whole page (makes a difference when 2744 * blocksize < pagesize) so that we don't block on IO when we 2745 * try to write out the rest of the page. Journalled mode is 2746 * not supported by delalloc. 2747 */ 2748 BUG_ON(ext4_should_journal_data(inode)); 2749 needed_blocks = ext4_da_writepages_trans_blocks(inode); 2750 2751 /* start a new transaction */ 2752 handle = ext4_journal_start_with_reserve(inode, 2753 EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks); 2754 if (IS_ERR(handle)) { 2755 ret = PTR_ERR(handle); 2756 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2757 "%ld pages, ino %lu; err %d", __func__, 2758 wbc->nr_to_write, inode->i_ino, ret); 2759 /* Release allocated io_end */ 2760 ext4_put_io_end(mpd.io_submit.io_end); 2761 mpd.io_submit.io_end = NULL; 2762 break; 2763 } 2764 mpd.do_map = 1; 2765 2766 trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc); 2767 ret = mpage_prepare_extent_to_map(&mpd); 2768 if (!ret) { 2769 if (mpd.map.m_len) 2770 ret = mpage_map_and_submit_extent(handle, &mpd, 2771 &give_up_on_write); 2772 else { 2773 /* 2774 * We scanned the whole range (or exhausted 2775 * nr_to_write), submitted what was mapped and 2776 * didn't find anything needing mapping. We are 2777 * done. 2778 */ 2779 done = true; 2780 } 2781 } 2782 /* 2783 * Caution: If the handle is synchronous, 2784 * ext4_journal_stop() can wait for transaction commit 2785 * to finish which may depend on writeback of pages to 2786 * complete or on page lock to be released. In that 2787 * case, we have to wait until after after we have 2788 * submitted all the IO, released page locks we hold, 2789 * and dropped io_end reference (for extent conversion 2790 * to be able to complete) before stopping the handle. 2791 */ 2792 if (!ext4_handle_valid(handle) || handle->h_sync == 0) { 2793 ext4_journal_stop(handle); 2794 handle = NULL; 2795 mpd.do_map = 0; 2796 } 2797 /* Unlock pages we didn't use */ 2798 mpage_release_unused_pages(&mpd, give_up_on_write); 2799 /* Submit prepared bio */ 2800 ext4_io_submit(&mpd.io_submit); 2801 2802 /* 2803 * Drop our io_end reference we got from init. We have 2804 * to be careful and use deferred io_end finishing if 2805 * we are still holding the transaction as we can 2806 * release the last reference to io_end which may end 2807 * up doing unwritten extent conversion. 2808 */ 2809 if (handle) { 2810 ext4_put_io_end_defer(mpd.io_submit.io_end); 2811 ext4_journal_stop(handle); 2812 } else 2813 ext4_put_io_end(mpd.io_submit.io_end); 2814 mpd.io_submit.io_end = NULL; 2815 2816 if (ret == -ENOSPC && sbi->s_journal) { 2817 /* 2818 * Commit the transaction which would 2819 * free blocks released in the transaction 2820 * and try again 2821 */ 2822 jbd2_journal_force_commit_nested(sbi->s_journal); 2823 ret = 0; 2824 continue; 2825 } 2826 /* Fatal error - ENOMEM, EIO... */ 2827 if (ret) 2828 break; 2829 } 2830 unplug: 2831 blk_finish_plug(&plug); 2832 if (!ret && !cycled && wbc->nr_to_write > 0) { 2833 cycled = 1; 2834 mpd.last_page = writeback_index - 1; 2835 mpd.first_page = 0; 2836 goto retry; 2837 } 2838 2839 /* Update index */ 2840 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2841 /* 2842 * Set the writeback_index so that range_cyclic 2843 * mode will write it back later 2844 */ 2845 mapping->writeback_index = mpd.first_page; 2846 2847 out_writepages: 2848 trace_ext4_writepages_result(inode, wbc, ret, 2849 nr_to_write - wbc->nr_to_write); 2850 percpu_up_read(&sbi->s_writepages_rwsem); 2851 return ret; 2852 } 2853 2854 static int ext4_dax_writepages(struct address_space *mapping, 2855 struct writeback_control *wbc) 2856 { 2857 int ret; 2858 long nr_to_write = wbc->nr_to_write; 2859 struct inode *inode = mapping->host; 2860 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2861 2862 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 2863 return -EIO; 2864 2865 percpu_down_read(&sbi->s_writepages_rwsem); 2866 trace_ext4_writepages(inode, wbc); 2867 2868 ret = dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc); 2869 trace_ext4_writepages_result(inode, wbc, ret, 2870 nr_to_write - wbc->nr_to_write); 2871 percpu_up_read(&sbi->s_writepages_rwsem); 2872 return ret; 2873 } 2874 2875 static int ext4_nonda_switch(struct super_block *sb) 2876 { 2877 s64 free_clusters, dirty_clusters; 2878 struct ext4_sb_info *sbi = EXT4_SB(sb); 2879 2880 /* 2881 * switch to non delalloc mode if we are running low 2882 * on free block. The free block accounting via percpu 2883 * counters can get slightly wrong with percpu_counter_batch getting 2884 * accumulated on each CPU without updating global counters 2885 * Delalloc need an accurate free block accounting. So switch 2886 * to non delalloc when we are near to error range. 2887 */ 2888 free_clusters = 2889 percpu_counter_read_positive(&sbi->s_freeclusters_counter); 2890 dirty_clusters = 2891 percpu_counter_read_positive(&sbi->s_dirtyclusters_counter); 2892 /* 2893 * Start pushing delalloc when 1/2 of free blocks are dirty. 2894 */ 2895 if (dirty_clusters && (free_clusters < 2 * dirty_clusters)) 2896 try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE); 2897 2898 if (2 * free_clusters < 3 * dirty_clusters || 2899 free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) { 2900 /* 2901 * free block count is less than 150% of dirty blocks 2902 * or free blocks is less than watermark 2903 */ 2904 return 1; 2905 } 2906 return 0; 2907 } 2908 2909 /* We always reserve for an inode update; the superblock could be there too */ 2910 static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len) 2911 { 2912 if (likely(ext4_has_feature_large_file(inode->i_sb))) 2913 return 1; 2914 2915 if (pos + len <= 0x7fffffffULL) 2916 return 1; 2917 2918 /* We might need to update the superblock to set LARGE_FILE */ 2919 return 2; 2920 } 2921 2922 static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 2923 loff_t pos, unsigned len, unsigned flags, 2924 struct page **pagep, void **fsdata) 2925 { 2926 int ret, retries = 0; 2927 struct page *page; 2928 pgoff_t index; 2929 struct inode *inode = mapping->host; 2930 handle_t *handle; 2931 2932 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 2933 return -EIO; 2934 2935 index = pos >> PAGE_SHIFT; 2936 2937 if (ext4_nonda_switch(inode->i_sb) || S_ISLNK(inode->i_mode) || 2938 ext4_verity_in_progress(inode)) { 2939 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 2940 return ext4_write_begin(file, mapping, pos, 2941 len, flags, pagep, fsdata); 2942 } 2943 *fsdata = (void *)0; 2944 trace_ext4_da_write_begin(inode, pos, len, flags); 2945 2946 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 2947 ret = ext4_da_write_inline_data_begin(mapping, inode, 2948 pos, len, flags, 2949 pagep, fsdata); 2950 if (ret < 0) 2951 return ret; 2952 if (ret == 1) 2953 return 0; 2954 } 2955 2956 /* 2957 * grab_cache_page_write_begin() can take a long time if the 2958 * system is thrashing due to memory pressure, or if the page 2959 * is being written back. So grab it first before we start 2960 * the transaction handle. This also allows us to allocate 2961 * the page (if needed) without using GFP_NOFS. 2962 */ 2963 retry_grab: 2964 page = grab_cache_page_write_begin(mapping, index, flags); 2965 if (!page) 2966 return -ENOMEM; 2967 unlock_page(page); 2968 2969 /* 2970 * With delayed allocation, we don't log the i_disksize update 2971 * if there is delayed block allocation. But we still need 2972 * to journalling the i_disksize update if writes to the end 2973 * of file which has an already mapped buffer. 2974 */ 2975 retry_journal: 2976 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 2977 ext4_da_write_credits(inode, pos, len)); 2978 if (IS_ERR(handle)) { 2979 put_page(page); 2980 return PTR_ERR(handle); 2981 } 2982 2983 lock_page(page); 2984 if (page->mapping != mapping) { 2985 /* The page got truncated from under us */ 2986 unlock_page(page); 2987 put_page(page); 2988 ext4_journal_stop(handle); 2989 goto retry_grab; 2990 } 2991 /* In case writeback began while the page was unlocked */ 2992 wait_for_stable_page(page); 2993 2994 #ifdef CONFIG_FS_ENCRYPTION 2995 ret = ext4_block_write_begin(page, pos, len, 2996 ext4_da_get_block_prep); 2997 #else 2998 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); 2999 #endif 3000 if (ret < 0) { 3001 unlock_page(page); 3002 ext4_journal_stop(handle); 3003 /* 3004 * block_write_begin may have instantiated a few blocks 3005 * outside i_size. Trim these off again. Don't need 3006 * i_size_read because we hold i_mutex. 3007 */ 3008 if (pos + len > inode->i_size) 3009 ext4_truncate_failed_write(inode); 3010 3011 if (ret == -ENOSPC && 3012 ext4_should_retry_alloc(inode->i_sb, &retries)) 3013 goto retry_journal; 3014 3015 put_page(page); 3016 return ret; 3017 } 3018 3019 *pagep = page; 3020 return ret; 3021 } 3022 3023 /* 3024 * Check if we should update i_disksize 3025 * when write to the end of file but not require block allocation 3026 */ 3027 static int ext4_da_should_update_i_disksize(struct page *page, 3028 unsigned long offset) 3029 { 3030 struct buffer_head *bh; 3031 struct inode *inode = page->mapping->host; 3032 unsigned int idx; 3033 int i; 3034 3035 bh = page_buffers(page); 3036 idx = offset >> inode->i_blkbits; 3037 3038 for (i = 0; i < idx; i++) 3039 bh = bh->b_this_page; 3040 3041 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 3042 return 0; 3043 return 1; 3044 } 3045 3046 static int ext4_da_write_end(struct file *file, 3047 struct address_space *mapping, 3048 loff_t pos, unsigned len, unsigned copied, 3049 struct page *page, void *fsdata) 3050 { 3051 struct inode *inode = mapping->host; 3052 int ret = 0, ret2; 3053 handle_t *handle = ext4_journal_current_handle(); 3054 loff_t new_i_size; 3055 unsigned long start, end; 3056 int write_mode = (int)(unsigned long)fsdata; 3057 3058 if (write_mode == FALL_BACK_TO_NONDELALLOC) 3059 return ext4_write_end(file, mapping, pos, 3060 len, copied, page, fsdata); 3061 3062 trace_ext4_da_write_end(inode, pos, len, copied); 3063 start = pos & (PAGE_SIZE - 1); 3064 end = start + copied - 1; 3065 3066 /* 3067 * generic_write_end() will run mark_inode_dirty() if i_size 3068 * changes. So let's piggyback the i_disksize mark_inode_dirty 3069 * into that. 3070 */ 3071 new_i_size = pos + copied; 3072 if (copied && new_i_size > EXT4_I(inode)->i_disksize) { 3073 if (ext4_has_inline_data(inode) || 3074 ext4_da_should_update_i_disksize(page, end)) { 3075 ext4_update_i_disksize(inode, new_i_size); 3076 /* We need to mark inode dirty even if 3077 * new_i_size is less that inode->i_size 3078 * bu greater than i_disksize.(hint delalloc) 3079 */ 3080 ext4_mark_inode_dirty(handle, inode); 3081 } 3082 } 3083 3084 if (write_mode != CONVERT_INLINE_DATA && 3085 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) && 3086 ext4_has_inline_data(inode)) 3087 ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied, 3088 page); 3089 else 3090 ret2 = generic_write_end(file, mapping, pos, len, copied, 3091 page, fsdata); 3092 3093 copied = ret2; 3094 if (ret2 < 0) 3095 ret = ret2; 3096 ret2 = ext4_journal_stop(handle); 3097 if (!ret) 3098 ret = ret2; 3099 3100 return ret ? ret : copied; 3101 } 3102 3103 /* 3104 * Force all delayed allocation blocks to be allocated for a given inode. 3105 */ 3106 int ext4_alloc_da_blocks(struct inode *inode) 3107 { 3108 trace_ext4_alloc_da_blocks(inode); 3109 3110 if (!EXT4_I(inode)->i_reserved_data_blocks) 3111 return 0; 3112 3113 /* 3114 * We do something simple for now. The filemap_flush() will 3115 * also start triggering a write of the data blocks, which is 3116 * not strictly speaking necessary (and for users of 3117 * laptop_mode, not even desirable). However, to do otherwise 3118 * would require replicating code paths in: 3119 * 3120 * ext4_writepages() -> 3121 * write_cache_pages() ---> (via passed in callback function) 3122 * __mpage_da_writepage() --> 3123 * mpage_add_bh_to_extent() 3124 * mpage_da_map_blocks() 3125 * 3126 * The problem is that write_cache_pages(), located in 3127 * mm/page-writeback.c, marks pages clean in preparation for 3128 * doing I/O, which is not desirable if we're not planning on 3129 * doing I/O at all. 3130 * 3131 * We could call write_cache_pages(), and then redirty all of 3132 * the pages by calling redirty_page_for_writepage() but that 3133 * would be ugly in the extreme. So instead we would need to 3134 * replicate parts of the code in the above functions, 3135 * simplifying them because we wouldn't actually intend to 3136 * write out the pages, but rather only collect contiguous 3137 * logical block extents, call the multi-block allocator, and 3138 * then update the buffer heads with the block allocations. 3139 * 3140 * For now, though, we'll cheat by calling filemap_flush(), 3141 * which will map the blocks, and start the I/O, but not 3142 * actually wait for the I/O to complete. 3143 */ 3144 return filemap_flush(inode->i_mapping); 3145 } 3146 3147 /* 3148 * bmap() is special. It gets used by applications such as lilo and by 3149 * the swapper to find the on-disk block of a specific piece of data. 3150 * 3151 * Naturally, this is dangerous if the block concerned is still in the 3152 * journal. If somebody makes a swapfile on an ext4 data-journaling 3153 * filesystem and enables swap, then they may get a nasty shock when the 3154 * data getting swapped to that swapfile suddenly gets overwritten by 3155 * the original zero's written out previously to the journal and 3156 * awaiting writeback in the kernel's buffer cache. 3157 * 3158 * So, if we see any bmap calls here on a modified, data-journaled file, 3159 * take extra steps to flush any blocks which might be in the cache. 3160 */ 3161 static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 3162 { 3163 struct inode *inode = mapping->host; 3164 journal_t *journal; 3165 int err; 3166 3167 /* 3168 * We can get here for an inline file via the FIBMAP ioctl 3169 */ 3170 if (ext4_has_inline_data(inode)) 3171 return 0; 3172 3173 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 3174 test_opt(inode->i_sb, DELALLOC)) { 3175 /* 3176 * With delalloc we want to sync the file 3177 * so that we can make sure we allocate 3178 * blocks for file 3179 */ 3180 filemap_write_and_wait(mapping); 3181 } 3182 3183 if (EXT4_JOURNAL(inode) && 3184 ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { 3185 /* 3186 * This is a REALLY heavyweight approach, but the use of 3187 * bmap on dirty files is expected to be extremely rare: 3188 * only if we run lilo or swapon on a freshly made file 3189 * do we expect this to happen. 3190 * 3191 * (bmap requires CAP_SYS_RAWIO so this does not 3192 * represent an unprivileged user DOS attack --- we'd be 3193 * in trouble if mortal users could trigger this path at 3194 * will.) 3195 * 3196 * NB. EXT4_STATE_JDATA is not set on files other than 3197 * regular files. If somebody wants to bmap a directory 3198 * or symlink and gets confused because the buffer 3199 * hasn't yet been flushed to disk, they deserve 3200 * everything they get. 3201 */ 3202 3203 ext4_clear_inode_state(inode, EXT4_STATE_JDATA); 3204 journal = EXT4_JOURNAL(inode); 3205 jbd2_journal_lock_updates(journal); 3206 err = jbd2_journal_flush(journal); 3207 jbd2_journal_unlock_updates(journal); 3208 3209 if (err) 3210 return 0; 3211 } 3212 3213 return iomap_bmap(mapping, block, &ext4_iomap_ops); 3214 } 3215 3216 static int ext4_readpage(struct file *file, struct page *page) 3217 { 3218 int ret = -EAGAIN; 3219 struct inode *inode = page->mapping->host; 3220 3221 trace_ext4_readpage(page); 3222 3223 if (ext4_has_inline_data(inode)) 3224 ret = ext4_readpage_inline(inode, page); 3225 3226 if (ret == -EAGAIN) 3227 return ext4_mpage_readpages(inode, NULL, page); 3228 3229 return ret; 3230 } 3231 3232 static void ext4_readahead(struct readahead_control *rac) 3233 { 3234 struct inode *inode = rac->mapping->host; 3235 3236 /* If the file has inline data, no need to do readahead. */ 3237 if (ext4_has_inline_data(inode)) 3238 return; 3239 3240 ext4_mpage_readpages(inode, rac, NULL); 3241 } 3242 3243 static void ext4_invalidatepage(struct page *page, unsigned int offset, 3244 unsigned int length) 3245 { 3246 trace_ext4_invalidatepage(page, offset, length); 3247 3248 /* No journalling happens on data buffers when this function is used */ 3249 WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page))); 3250 3251 block_invalidatepage(page, offset, length); 3252 } 3253 3254 static int __ext4_journalled_invalidatepage(struct page *page, 3255 unsigned int offset, 3256 unsigned int length) 3257 { 3258 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3259 3260 trace_ext4_journalled_invalidatepage(page, offset, length); 3261 3262 /* 3263 * If it's a full truncate we just forget about the pending dirtying 3264 */ 3265 if (offset == 0 && length == PAGE_SIZE) 3266 ClearPageChecked(page); 3267 3268 return jbd2_journal_invalidatepage(journal, page, offset, length); 3269 } 3270 3271 /* Wrapper for aops... */ 3272 static void ext4_journalled_invalidatepage(struct page *page, 3273 unsigned int offset, 3274 unsigned int length) 3275 { 3276 WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0); 3277 } 3278 3279 static int ext4_releasepage(struct page *page, gfp_t wait) 3280 { 3281 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3282 3283 trace_ext4_releasepage(page); 3284 3285 /* Page has dirty journalled data -> cannot release */ 3286 if (PageChecked(page)) 3287 return 0; 3288 if (journal) 3289 return jbd2_journal_try_to_free_buffers(journal, page, wait); 3290 else 3291 return try_to_free_buffers(page); 3292 } 3293 3294 static bool ext4_inode_datasync_dirty(struct inode *inode) 3295 { 3296 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 3297 3298 if (journal) 3299 return !jbd2_transaction_committed(journal, 3300 EXT4_I(inode)->i_datasync_tid); 3301 /* Any metadata buffers to write? */ 3302 if (!list_empty(&inode->i_mapping->private_list)) 3303 return true; 3304 return inode->i_state & I_DIRTY_DATASYNC; 3305 } 3306 3307 static void ext4_set_iomap(struct inode *inode, struct iomap *iomap, 3308 struct ext4_map_blocks *map, loff_t offset, 3309 loff_t length) 3310 { 3311 u8 blkbits = inode->i_blkbits; 3312 3313 /* 3314 * Writes that span EOF might trigger an I/O size update on completion, 3315 * so consider them to be dirty for the purpose of O_DSYNC, even if 3316 * there is no other metadata changes being made or are pending. 3317 */ 3318 iomap->flags = 0; 3319 if (ext4_inode_datasync_dirty(inode) || 3320 offset + length > i_size_read(inode)) 3321 iomap->flags |= IOMAP_F_DIRTY; 3322 3323 if (map->m_flags & EXT4_MAP_NEW) 3324 iomap->flags |= IOMAP_F_NEW; 3325 3326 iomap->bdev = inode->i_sb->s_bdev; 3327 iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev; 3328 iomap->offset = (u64) map->m_lblk << blkbits; 3329 iomap->length = (u64) map->m_len << blkbits; 3330 3331 if ((map->m_flags & EXT4_MAP_MAPPED) && 3332 !ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3333 iomap->flags |= IOMAP_F_MERGED; 3334 3335 /* 3336 * Flags passed to ext4_map_blocks() for direct I/O writes can result 3337 * in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits 3338 * set. In order for any allocated unwritten extents to be converted 3339 * into written extents correctly within the ->end_io() handler, we 3340 * need to ensure that the iomap->type is set appropriately. Hence, the 3341 * reason why we need to check whether the EXT4_MAP_UNWRITTEN bit has 3342 * been set first. 3343 */ 3344 if (map->m_flags & EXT4_MAP_UNWRITTEN) { 3345 iomap->type = IOMAP_UNWRITTEN; 3346 iomap->addr = (u64) map->m_pblk << blkbits; 3347 } else if (map->m_flags & EXT4_MAP_MAPPED) { 3348 iomap->type = IOMAP_MAPPED; 3349 iomap->addr = (u64) map->m_pblk << blkbits; 3350 } else { 3351 iomap->type = IOMAP_HOLE; 3352 iomap->addr = IOMAP_NULL_ADDR; 3353 } 3354 } 3355 3356 static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map, 3357 unsigned int flags) 3358 { 3359 handle_t *handle; 3360 u8 blkbits = inode->i_blkbits; 3361 int ret, dio_credits, m_flags = 0, retries = 0; 3362 3363 /* 3364 * Trim the mapping request to the maximum value that we can map at 3365 * once for direct I/O. 3366 */ 3367 if (map->m_len > DIO_MAX_BLOCKS) 3368 map->m_len = DIO_MAX_BLOCKS; 3369 dio_credits = ext4_chunk_trans_blocks(inode, map->m_len); 3370 3371 retry: 3372 /* 3373 * Either we allocate blocks and then don't get an unwritten extent, so 3374 * in that case we have reserved enough credits. Or, the blocks are 3375 * already allocated and unwritten. In that case, the extent conversion 3376 * fits into the credits as well. 3377 */ 3378 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits); 3379 if (IS_ERR(handle)) 3380 return PTR_ERR(handle); 3381 3382 /* 3383 * DAX and direct I/O are the only two operations that are currently 3384 * supported with IOMAP_WRITE. 3385 */ 3386 WARN_ON(!IS_DAX(inode) && !(flags & IOMAP_DIRECT)); 3387 if (IS_DAX(inode)) 3388 m_flags = EXT4_GET_BLOCKS_CREATE_ZERO; 3389 /* 3390 * We use i_size instead of i_disksize here because delalloc writeback 3391 * can complete at any point during the I/O and subsequently push the 3392 * i_disksize out to i_size. This could be beyond where direct I/O is 3393 * happening and thus expose allocated blocks to direct I/O reads. 3394 */ 3395 else if ((map->m_lblk * (1 << blkbits)) >= i_size_read(inode)) 3396 m_flags = EXT4_GET_BLOCKS_CREATE; 3397 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3398 m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT; 3399 3400 ret = ext4_map_blocks(handle, inode, map, m_flags); 3401 3402 /* 3403 * We cannot fill holes in indirect tree based inodes as that could 3404 * expose stale data in the case of a crash. Use the magic error code 3405 * to fallback to buffered I/O. 3406 */ 3407 if (!m_flags && !ret) 3408 ret = -ENOTBLK; 3409 3410 ext4_journal_stop(handle); 3411 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 3412 goto retry; 3413 3414 return ret; 3415 } 3416 3417 3418 static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length, 3419 unsigned flags, struct iomap *iomap, struct iomap *srcmap) 3420 { 3421 int ret; 3422 struct ext4_map_blocks map; 3423 u8 blkbits = inode->i_blkbits; 3424 3425 if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK) 3426 return -EINVAL; 3427 3428 if (WARN_ON_ONCE(ext4_has_inline_data(inode))) 3429 return -ERANGE; 3430 3431 /* 3432 * Calculate the first and last logical blocks respectively. 3433 */ 3434 map.m_lblk = offset >> blkbits; 3435 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits, 3436 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1; 3437 3438 if (flags & IOMAP_WRITE) 3439 ret = ext4_iomap_alloc(inode, &map, flags); 3440 else 3441 ret = ext4_map_blocks(NULL, inode, &map, 0); 3442 3443 if (ret < 0) 3444 return ret; 3445 3446 ext4_set_iomap(inode, iomap, &map, offset, length); 3447 3448 return 0; 3449 } 3450 3451 static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset, 3452 loff_t length, unsigned flags, struct iomap *iomap, 3453 struct iomap *srcmap) 3454 { 3455 int ret; 3456 3457 /* 3458 * Even for writes we don't need to allocate blocks, so just pretend 3459 * we are reading to save overhead of starting a transaction. 3460 */ 3461 flags &= ~IOMAP_WRITE; 3462 ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap); 3463 WARN_ON_ONCE(iomap->type != IOMAP_MAPPED); 3464 return ret; 3465 } 3466 3467 static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length, 3468 ssize_t written, unsigned flags, struct iomap *iomap) 3469 { 3470 /* 3471 * Check to see whether an error occurred while writing out the data to 3472 * the allocated blocks. If so, return the magic error code so that we 3473 * fallback to buffered I/O and attempt to complete the remainder of 3474 * the I/O. Any blocks that may have been allocated in preparation for 3475 * the direct I/O will be reused during buffered I/O. 3476 */ 3477 if (flags & (IOMAP_WRITE | IOMAP_DIRECT) && written == 0) 3478 return -ENOTBLK; 3479 3480 return 0; 3481 } 3482 3483 const struct iomap_ops ext4_iomap_ops = { 3484 .iomap_begin = ext4_iomap_begin, 3485 .iomap_end = ext4_iomap_end, 3486 }; 3487 3488 const struct iomap_ops ext4_iomap_overwrite_ops = { 3489 .iomap_begin = ext4_iomap_overwrite_begin, 3490 .iomap_end = ext4_iomap_end, 3491 }; 3492 3493 static bool ext4_iomap_is_delalloc(struct inode *inode, 3494 struct ext4_map_blocks *map) 3495 { 3496 struct extent_status es; 3497 ext4_lblk_t offset = 0, end = map->m_lblk + map->m_len - 1; 3498 3499 ext4_es_find_extent_range(inode, &ext4_es_is_delayed, 3500 map->m_lblk, end, &es); 3501 3502 if (!es.es_len || es.es_lblk > end) 3503 return false; 3504 3505 if (es.es_lblk > map->m_lblk) { 3506 map->m_len = es.es_lblk - map->m_lblk; 3507 return false; 3508 } 3509 3510 offset = map->m_lblk - es.es_lblk; 3511 map->m_len = es.es_len - offset; 3512 3513 return true; 3514 } 3515 3516 static int ext4_iomap_begin_report(struct inode *inode, loff_t offset, 3517 loff_t length, unsigned int flags, 3518 struct iomap *iomap, struct iomap *srcmap) 3519 { 3520 int ret; 3521 bool delalloc = false; 3522 struct ext4_map_blocks map; 3523 u8 blkbits = inode->i_blkbits; 3524 3525 if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK) 3526 return -EINVAL; 3527 3528 if (ext4_has_inline_data(inode)) { 3529 ret = ext4_inline_data_iomap(inode, iomap); 3530 if (ret != -EAGAIN) { 3531 if (ret == 0 && offset >= iomap->length) 3532 ret = -ENOENT; 3533 return ret; 3534 } 3535 } 3536 3537 /* 3538 * Calculate the first and last logical block respectively. 3539 */ 3540 map.m_lblk = offset >> blkbits; 3541 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits, 3542 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1; 3543 3544 /* 3545 * Fiemap callers may call for offset beyond s_bitmap_maxbytes. 3546 * So handle it here itself instead of querying ext4_map_blocks(). 3547 * Since ext4_map_blocks() will warn about it and will return 3548 * -EIO error. 3549 */ 3550 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 3551 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3552 3553 if (offset >= sbi->s_bitmap_maxbytes) { 3554 map.m_flags = 0; 3555 goto set_iomap; 3556 } 3557 } 3558 3559 ret = ext4_map_blocks(NULL, inode, &map, 0); 3560 if (ret < 0) 3561 return ret; 3562 if (ret == 0) 3563 delalloc = ext4_iomap_is_delalloc(inode, &map); 3564 3565 set_iomap: 3566 ext4_set_iomap(inode, iomap, &map, offset, length); 3567 if (delalloc && iomap->type == IOMAP_HOLE) 3568 iomap->type = IOMAP_DELALLOC; 3569 3570 return 0; 3571 } 3572 3573 const struct iomap_ops ext4_iomap_report_ops = { 3574 .iomap_begin = ext4_iomap_begin_report, 3575 }; 3576 3577 /* 3578 * Pages can be marked dirty completely asynchronously from ext4's journalling 3579 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3580 * much here because ->set_page_dirty is called under VFS locks. The page is 3581 * not necessarily locked. 3582 * 3583 * We cannot just dirty the page and leave attached buffers clean, because the 3584 * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3585 * or jbddirty because all the journalling code will explode. 3586 * 3587 * So what we do is to mark the page "pending dirty" and next time writepage 3588 * is called, propagate that into the buffers appropriately. 3589 */ 3590 static int ext4_journalled_set_page_dirty(struct page *page) 3591 { 3592 SetPageChecked(page); 3593 return __set_page_dirty_nobuffers(page); 3594 } 3595 3596 static int ext4_set_page_dirty(struct page *page) 3597 { 3598 WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page)); 3599 WARN_ON_ONCE(!page_has_buffers(page)); 3600 return __set_page_dirty_buffers(page); 3601 } 3602 3603 static const struct address_space_operations ext4_aops = { 3604 .readpage = ext4_readpage, 3605 .readahead = ext4_readahead, 3606 .writepage = ext4_writepage, 3607 .writepages = ext4_writepages, 3608 .write_begin = ext4_write_begin, 3609 .write_end = ext4_write_end, 3610 .set_page_dirty = ext4_set_page_dirty, 3611 .bmap = ext4_bmap, 3612 .invalidatepage = ext4_invalidatepage, 3613 .releasepage = ext4_releasepage, 3614 .direct_IO = noop_direct_IO, 3615 .migratepage = buffer_migrate_page, 3616 .is_partially_uptodate = block_is_partially_uptodate, 3617 .error_remove_page = generic_error_remove_page, 3618 }; 3619 3620 static const struct address_space_operations ext4_journalled_aops = { 3621 .readpage = ext4_readpage, 3622 .readahead = ext4_readahead, 3623 .writepage = ext4_writepage, 3624 .writepages = ext4_writepages, 3625 .write_begin = ext4_write_begin, 3626 .write_end = ext4_journalled_write_end, 3627 .set_page_dirty = ext4_journalled_set_page_dirty, 3628 .bmap = ext4_bmap, 3629 .invalidatepage = ext4_journalled_invalidatepage, 3630 .releasepage = ext4_releasepage, 3631 .direct_IO = noop_direct_IO, 3632 .is_partially_uptodate = block_is_partially_uptodate, 3633 .error_remove_page = generic_error_remove_page, 3634 }; 3635 3636 static const struct address_space_operations ext4_da_aops = { 3637 .readpage = ext4_readpage, 3638 .readahead = ext4_readahead, 3639 .writepage = ext4_writepage, 3640 .writepages = ext4_writepages, 3641 .write_begin = ext4_da_write_begin, 3642 .write_end = ext4_da_write_end, 3643 .set_page_dirty = ext4_set_page_dirty, 3644 .bmap = ext4_bmap, 3645 .invalidatepage = ext4_invalidatepage, 3646 .releasepage = ext4_releasepage, 3647 .direct_IO = noop_direct_IO, 3648 .migratepage = buffer_migrate_page, 3649 .is_partially_uptodate = block_is_partially_uptodate, 3650 .error_remove_page = generic_error_remove_page, 3651 }; 3652 3653 static const struct address_space_operations ext4_dax_aops = { 3654 .writepages = ext4_dax_writepages, 3655 .direct_IO = noop_direct_IO, 3656 .set_page_dirty = noop_set_page_dirty, 3657 .bmap = ext4_bmap, 3658 .invalidatepage = noop_invalidatepage, 3659 }; 3660 3661 void ext4_set_aops(struct inode *inode) 3662 { 3663 switch (ext4_inode_journal_mode(inode)) { 3664 case EXT4_INODE_ORDERED_DATA_MODE: 3665 case EXT4_INODE_WRITEBACK_DATA_MODE: 3666 break; 3667 case EXT4_INODE_JOURNAL_DATA_MODE: 3668 inode->i_mapping->a_ops = &ext4_journalled_aops; 3669 return; 3670 default: 3671 BUG(); 3672 } 3673 if (IS_DAX(inode)) 3674 inode->i_mapping->a_ops = &ext4_dax_aops; 3675 else if (test_opt(inode->i_sb, DELALLOC)) 3676 inode->i_mapping->a_ops = &ext4_da_aops; 3677 else 3678 inode->i_mapping->a_ops = &ext4_aops; 3679 } 3680 3681 static int __ext4_block_zero_page_range(handle_t *handle, 3682 struct address_space *mapping, loff_t from, loff_t length) 3683 { 3684 ext4_fsblk_t index = from >> PAGE_SHIFT; 3685 unsigned offset = from & (PAGE_SIZE-1); 3686 unsigned blocksize, pos; 3687 ext4_lblk_t iblock; 3688 struct inode *inode = mapping->host; 3689 struct buffer_head *bh; 3690 struct page *page; 3691 int err = 0; 3692 3693 page = find_or_create_page(mapping, from >> PAGE_SHIFT, 3694 mapping_gfp_constraint(mapping, ~__GFP_FS)); 3695 if (!page) 3696 return -ENOMEM; 3697 3698 blocksize = inode->i_sb->s_blocksize; 3699 3700 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits); 3701 3702 if (!page_has_buffers(page)) 3703 create_empty_buffers(page, blocksize, 0); 3704 3705 /* Find the buffer that contains "offset" */ 3706 bh = page_buffers(page); 3707 pos = blocksize; 3708 while (offset >= pos) { 3709 bh = bh->b_this_page; 3710 iblock++; 3711 pos += blocksize; 3712 } 3713 if (buffer_freed(bh)) { 3714 BUFFER_TRACE(bh, "freed: skip"); 3715 goto unlock; 3716 } 3717 if (!buffer_mapped(bh)) { 3718 BUFFER_TRACE(bh, "unmapped"); 3719 ext4_get_block(inode, iblock, bh, 0); 3720 /* unmapped? It's a hole - nothing to do */ 3721 if (!buffer_mapped(bh)) { 3722 BUFFER_TRACE(bh, "still unmapped"); 3723 goto unlock; 3724 } 3725 } 3726 3727 /* Ok, it's mapped. Make sure it's up-to-date */ 3728 if (PageUptodate(page)) 3729 set_buffer_uptodate(bh); 3730 3731 if (!buffer_uptodate(bh)) { 3732 err = -EIO; 3733 ll_rw_block(REQ_OP_READ, 0, 1, &bh); 3734 wait_on_buffer(bh); 3735 /* Uhhuh. Read error. Complain and punt. */ 3736 if (!buffer_uptodate(bh)) 3737 goto unlock; 3738 if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) { 3739 /* We expect the key to be set. */ 3740 BUG_ON(!fscrypt_has_encryption_key(inode)); 3741 err = fscrypt_decrypt_pagecache_blocks(page, blocksize, 3742 bh_offset(bh)); 3743 if (err) { 3744 clear_buffer_uptodate(bh); 3745 goto unlock; 3746 } 3747 } 3748 } 3749 if (ext4_should_journal_data(inode)) { 3750 BUFFER_TRACE(bh, "get write access"); 3751 err = ext4_journal_get_write_access(handle, bh); 3752 if (err) 3753 goto unlock; 3754 } 3755 zero_user(page, offset, length); 3756 BUFFER_TRACE(bh, "zeroed end of block"); 3757 3758 if (ext4_should_journal_data(inode)) { 3759 err = ext4_handle_dirty_metadata(handle, inode, bh); 3760 } else { 3761 err = 0; 3762 mark_buffer_dirty(bh); 3763 if (ext4_should_order_data(inode)) 3764 err = ext4_jbd2_inode_add_write(handle, inode, from, 3765 length); 3766 } 3767 3768 unlock: 3769 unlock_page(page); 3770 put_page(page); 3771 return err; 3772 } 3773 3774 /* 3775 * ext4_block_zero_page_range() zeros out a mapping of length 'length' 3776 * starting from file offset 'from'. The range to be zero'd must 3777 * be contained with in one block. If the specified range exceeds 3778 * the end of the block it will be shortened to end of the block 3779 * that cooresponds to 'from' 3780 */ 3781 static int ext4_block_zero_page_range(handle_t *handle, 3782 struct address_space *mapping, loff_t from, loff_t length) 3783 { 3784 struct inode *inode = mapping->host; 3785 unsigned offset = from & (PAGE_SIZE-1); 3786 unsigned blocksize = inode->i_sb->s_blocksize; 3787 unsigned max = blocksize - (offset & (blocksize - 1)); 3788 3789 /* 3790 * correct length if it does not fall between 3791 * 'from' and the end of the block 3792 */ 3793 if (length > max || length < 0) 3794 length = max; 3795 3796 if (IS_DAX(inode)) { 3797 return iomap_zero_range(inode, from, length, NULL, 3798 &ext4_iomap_ops); 3799 } 3800 return __ext4_block_zero_page_range(handle, mapping, from, length); 3801 } 3802 3803 /* 3804 * ext4_block_truncate_page() zeroes out a mapping from file offset `from' 3805 * up to the end of the block which corresponds to `from'. 3806 * This required during truncate. We need to physically zero the tail end 3807 * of that block so it doesn't yield old data if the file is later grown. 3808 */ 3809 static int ext4_block_truncate_page(handle_t *handle, 3810 struct address_space *mapping, loff_t from) 3811 { 3812 unsigned offset = from & (PAGE_SIZE-1); 3813 unsigned length; 3814 unsigned blocksize; 3815 struct inode *inode = mapping->host; 3816 3817 /* If we are processing an encrypted inode during orphan list handling */ 3818 if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode)) 3819 return 0; 3820 3821 blocksize = inode->i_sb->s_blocksize; 3822 length = blocksize - (offset & (blocksize - 1)); 3823 3824 return ext4_block_zero_page_range(handle, mapping, from, length); 3825 } 3826 3827 int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode, 3828 loff_t lstart, loff_t length) 3829 { 3830 struct super_block *sb = inode->i_sb; 3831 struct address_space *mapping = inode->i_mapping; 3832 unsigned partial_start, partial_end; 3833 ext4_fsblk_t start, end; 3834 loff_t byte_end = (lstart + length - 1); 3835 int err = 0; 3836 3837 partial_start = lstart & (sb->s_blocksize - 1); 3838 partial_end = byte_end & (sb->s_blocksize - 1); 3839 3840 start = lstart >> sb->s_blocksize_bits; 3841 end = byte_end >> sb->s_blocksize_bits; 3842 3843 /* Handle partial zero within the single block */ 3844 if (start == end && 3845 (partial_start || (partial_end != sb->s_blocksize - 1))) { 3846 err = ext4_block_zero_page_range(handle, mapping, 3847 lstart, length); 3848 return err; 3849 } 3850 /* Handle partial zero out on the start of the range */ 3851 if (partial_start) { 3852 err = ext4_block_zero_page_range(handle, mapping, 3853 lstart, sb->s_blocksize); 3854 if (err) 3855 return err; 3856 } 3857 /* Handle partial zero out on the end of the range */ 3858 if (partial_end != sb->s_blocksize - 1) 3859 err = ext4_block_zero_page_range(handle, mapping, 3860 byte_end - partial_end, 3861 partial_end + 1); 3862 return err; 3863 } 3864 3865 int ext4_can_truncate(struct inode *inode) 3866 { 3867 if (S_ISREG(inode->i_mode)) 3868 return 1; 3869 if (S_ISDIR(inode->i_mode)) 3870 return 1; 3871 if (S_ISLNK(inode->i_mode)) 3872 return !ext4_inode_is_fast_symlink(inode); 3873 return 0; 3874 } 3875 3876 /* 3877 * We have to make sure i_disksize gets properly updated before we truncate 3878 * page cache due to hole punching or zero range. Otherwise i_disksize update 3879 * can get lost as it may have been postponed to submission of writeback but 3880 * that will never happen after we truncate page cache. 3881 */ 3882 int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, 3883 loff_t len) 3884 { 3885 handle_t *handle; 3886 loff_t size = i_size_read(inode); 3887 3888 WARN_ON(!inode_is_locked(inode)); 3889 if (offset > size || offset + len < size) 3890 return 0; 3891 3892 if (EXT4_I(inode)->i_disksize >= size) 3893 return 0; 3894 3895 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1); 3896 if (IS_ERR(handle)) 3897 return PTR_ERR(handle); 3898 ext4_update_i_disksize(inode, size); 3899 ext4_mark_inode_dirty(handle, inode); 3900 ext4_journal_stop(handle); 3901 3902 return 0; 3903 } 3904 3905 static void ext4_wait_dax_page(struct ext4_inode_info *ei) 3906 { 3907 up_write(&ei->i_mmap_sem); 3908 schedule(); 3909 down_write(&ei->i_mmap_sem); 3910 } 3911 3912 int ext4_break_layouts(struct inode *inode) 3913 { 3914 struct ext4_inode_info *ei = EXT4_I(inode); 3915 struct page *page; 3916 int error; 3917 3918 if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem))) 3919 return -EINVAL; 3920 3921 do { 3922 page = dax_layout_busy_page(inode->i_mapping); 3923 if (!page) 3924 return 0; 3925 3926 error = ___wait_var_event(&page->_refcount, 3927 atomic_read(&page->_refcount) == 1, 3928 TASK_INTERRUPTIBLE, 0, 0, 3929 ext4_wait_dax_page(ei)); 3930 } while (error == 0); 3931 3932 return error; 3933 } 3934 3935 /* 3936 * ext4_punch_hole: punches a hole in a file by releasing the blocks 3937 * associated with the given offset and length 3938 * 3939 * @inode: File inode 3940 * @offset: The offset where the hole will begin 3941 * @len: The length of the hole 3942 * 3943 * Returns: 0 on success or negative on failure 3944 */ 3945 3946 int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) 3947 { 3948 struct super_block *sb = inode->i_sb; 3949 ext4_lblk_t first_block, stop_block; 3950 struct address_space *mapping = inode->i_mapping; 3951 loff_t first_block_offset, last_block_offset; 3952 handle_t *handle; 3953 unsigned int credits; 3954 int ret = 0; 3955 3956 trace_ext4_punch_hole(inode, offset, length, 0); 3957 3958 ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); 3959 if (ext4_has_inline_data(inode)) { 3960 down_write(&EXT4_I(inode)->i_mmap_sem); 3961 ret = ext4_convert_inline_data(inode); 3962 up_write(&EXT4_I(inode)->i_mmap_sem); 3963 if (ret) 3964 return ret; 3965 } 3966 3967 /* 3968 * Write out all dirty pages to avoid race conditions 3969 * Then release them. 3970 */ 3971 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 3972 ret = filemap_write_and_wait_range(mapping, offset, 3973 offset + length - 1); 3974 if (ret) 3975 return ret; 3976 } 3977 3978 inode_lock(inode); 3979 3980 /* No need to punch hole beyond i_size */ 3981 if (offset >= inode->i_size) 3982 goto out_mutex; 3983 3984 /* 3985 * If the hole extends beyond i_size, set the hole 3986 * to end after the page that contains i_size 3987 */ 3988 if (offset + length > inode->i_size) { 3989 length = inode->i_size + 3990 PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) - 3991 offset; 3992 } 3993 3994 if (offset & (sb->s_blocksize - 1) || 3995 (offset + length) & (sb->s_blocksize - 1)) { 3996 /* 3997 * Attach jinode to inode for jbd2 if we do any zeroing of 3998 * partial block 3999 */ 4000 ret = ext4_inode_attach_jinode(inode); 4001 if (ret < 0) 4002 goto out_mutex; 4003 4004 } 4005 4006 /* Wait all existing dio workers, newcomers will block on i_mutex */ 4007 inode_dio_wait(inode); 4008 4009 /* 4010 * Prevent page faults from reinstantiating pages we have released from 4011 * page cache. 4012 */ 4013 down_write(&EXT4_I(inode)->i_mmap_sem); 4014 4015 ret = ext4_break_layouts(inode); 4016 if (ret) 4017 goto out_dio; 4018 4019 first_block_offset = round_up(offset, sb->s_blocksize); 4020 last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; 4021 4022 /* Now release the pages and zero block aligned part of pages*/ 4023 if (last_block_offset > first_block_offset) { 4024 ret = ext4_update_disksize_before_punch(inode, offset, length); 4025 if (ret) 4026 goto out_dio; 4027 truncate_pagecache_range(inode, first_block_offset, 4028 last_block_offset); 4029 } 4030 4031 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4032 credits = ext4_writepage_trans_blocks(inode); 4033 else 4034 credits = ext4_blocks_for_truncate(inode); 4035 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 4036 if (IS_ERR(handle)) { 4037 ret = PTR_ERR(handle); 4038 ext4_std_error(sb, ret); 4039 goto out_dio; 4040 } 4041 4042 ret = ext4_zero_partial_blocks(handle, inode, offset, 4043 length); 4044 if (ret) 4045 goto out_stop; 4046 4047 first_block = (offset + sb->s_blocksize - 1) >> 4048 EXT4_BLOCK_SIZE_BITS(sb); 4049 stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); 4050 4051 /* If there are blocks to remove, do it */ 4052 if (stop_block > first_block) { 4053 4054 down_write(&EXT4_I(inode)->i_data_sem); 4055 ext4_discard_preallocations(inode); 4056 4057 ret = ext4_es_remove_extent(inode, first_block, 4058 stop_block - first_block); 4059 if (ret) { 4060 up_write(&EXT4_I(inode)->i_data_sem); 4061 goto out_stop; 4062 } 4063 4064 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4065 ret = ext4_ext_remove_space(inode, first_block, 4066 stop_block - 1); 4067 else 4068 ret = ext4_ind_remove_space(handle, inode, first_block, 4069 stop_block); 4070 4071 up_write(&EXT4_I(inode)->i_data_sem); 4072 } 4073 if (IS_SYNC(inode)) 4074 ext4_handle_sync(handle); 4075 4076 inode->i_mtime = inode->i_ctime = current_time(inode); 4077 ext4_mark_inode_dirty(handle, inode); 4078 if (ret >= 0) 4079 ext4_update_inode_fsync_trans(handle, inode, 1); 4080 out_stop: 4081 ext4_journal_stop(handle); 4082 out_dio: 4083 up_write(&EXT4_I(inode)->i_mmap_sem); 4084 out_mutex: 4085 inode_unlock(inode); 4086 return ret; 4087 } 4088 4089 int ext4_inode_attach_jinode(struct inode *inode) 4090 { 4091 struct ext4_inode_info *ei = EXT4_I(inode); 4092 struct jbd2_inode *jinode; 4093 4094 if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal) 4095 return 0; 4096 4097 jinode = jbd2_alloc_inode(GFP_KERNEL); 4098 spin_lock(&inode->i_lock); 4099 if (!ei->jinode) { 4100 if (!jinode) { 4101 spin_unlock(&inode->i_lock); 4102 return -ENOMEM; 4103 } 4104 ei->jinode = jinode; 4105 jbd2_journal_init_jbd_inode(ei->jinode, inode); 4106 jinode = NULL; 4107 } 4108 spin_unlock(&inode->i_lock); 4109 if (unlikely(jinode != NULL)) 4110 jbd2_free_inode(jinode); 4111 return 0; 4112 } 4113 4114 /* 4115 * ext4_truncate() 4116 * 4117 * We block out ext4_get_block() block instantiations across the entire 4118 * transaction, and VFS/VM ensures that ext4_truncate() cannot run 4119 * simultaneously on behalf of the same inode. 4120 * 4121 * As we work through the truncate and commit bits of it to the journal there 4122 * is one core, guiding principle: the file's tree must always be consistent on 4123 * disk. We must be able to restart the truncate after a crash. 4124 * 4125 * The file's tree may be transiently inconsistent in memory (although it 4126 * probably isn't), but whenever we close off and commit a journal transaction, 4127 * the contents of (the filesystem + the journal) must be consistent and 4128 * restartable. It's pretty simple, really: bottom up, right to left (although 4129 * left-to-right works OK too). 4130 * 4131 * Note that at recovery time, journal replay occurs *before* the restart of 4132 * truncate against the orphan inode list. 4133 * 4134 * The committed inode has the new, desired i_size (which is the same as 4135 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 4136 * that this inode's truncate did not complete and it will again call 4137 * ext4_truncate() to have another go. So there will be instantiated blocks 4138 * to the right of the truncation point in a crashed ext4 filesystem. But 4139 * that's fine - as long as they are linked from the inode, the post-crash 4140 * ext4_truncate() run will find them and release them. 4141 */ 4142 int ext4_truncate(struct inode *inode) 4143 { 4144 struct ext4_inode_info *ei = EXT4_I(inode); 4145 unsigned int credits; 4146 int err = 0; 4147 handle_t *handle; 4148 struct address_space *mapping = inode->i_mapping; 4149 4150 /* 4151 * There is a possibility that we're either freeing the inode 4152 * or it's a completely new inode. In those cases we might not 4153 * have i_mutex locked because it's not necessary. 4154 */ 4155 if (!(inode->i_state & (I_NEW|I_FREEING))) 4156 WARN_ON(!inode_is_locked(inode)); 4157 trace_ext4_truncate_enter(inode); 4158 4159 if (!ext4_can_truncate(inode)) 4160 return 0; 4161 4162 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 4163 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 4164 4165 if (ext4_has_inline_data(inode)) { 4166 int has_inline = 1; 4167 4168 err = ext4_inline_data_truncate(inode, &has_inline); 4169 if (err) 4170 return err; 4171 if (has_inline) 4172 return 0; 4173 } 4174 4175 /* If we zero-out tail of the page, we have to create jinode for jbd2 */ 4176 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { 4177 if (ext4_inode_attach_jinode(inode) < 0) 4178 return 0; 4179 } 4180 4181 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4182 credits = ext4_writepage_trans_blocks(inode); 4183 else 4184 credits = ext4_blocks_for_truncate(inode); 4185 4186 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 4187 if (IS_ERR(handle)) 4188 return PTR_ERR(handle); 4189 4190 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) 4191 ext4_block_truncate_page(handle, mapping, inode->i_size); 4192 4193 /* 4194 * We add the inode to the orphan list, so that if this 4195 * truncate spans multiple transactions, and we crash, we will 4196 * resume the truncate when the filesystem recovers. It also 4197 * marks the inode dirty, to catch the new size. 4198 * 4199 * Implication: the file must always be in a sane, consistent 4200 * truncatable state while each transaction commits. 4201 */ 4202 err = ext4_orphan_add(handle, inode); 4203 if (err) 4204 goto out_stop; 4205 4206 down_write(&EXT4_I(inode)->i_data_sem); 4207 4208 ext4_discard_preallocations(inode); 4209 4210 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4211 err = ext4_ext_truncate(handle, inode); 4212 else 4213 ext4_ind_truncate(handle, inode); 4214 4215 up_write(&ei->i_data_sem); 4216 if (err) 4217 goto out_stop; 4218 4219 if (IS_SYNC(inode)) 4220 ext4_handle_sync(handle); 4221 4222 out_stop: 4223 /* 4224 * If this was a simple ftruncate() and the file will remain alive, 4225 * then we need to clear up the orphan record which we created above. 4226 * However, if this was a real unlink then we were called by 4227 * ext4_evict_inode(), and we allow that function to clean up the 4228 * orphan info for us. 4229 */ 4230 if (inode->i_nlink) 4231 ext4_orphan_del(handle, inode); 4232 4233 inode->i_mtime = inode->i_ctime = current_time(inode); 4234 ext4_mark_inode_dirty(handle, inode); 4235 ext4_journal_stop(handle); 4236 4237 trace_ext4_truncate_exit(inode); 4238 return err; 4239 } 4240 4241 /* 4242 * ext4_get_inode_loc returns with an extra refcount against the inode's 4243 * underlying buffer_head on success. If 'in_mem' is true, we have all 4244 * data in memory that is needed to recreate the on-disk version of this 4245 * inode. 4246 */ 4247 static int __ext4_get_inode_loc(struct inode *inode, 4248 struct ext4_iloc *iloc, int in_mem) 4249 { 4250 struct ext4_group_desc *gdp; 4251 struct buffer_head *bh; 4252 struct super_block *sb = inode->i_sb; 4253 ext4_fsblk_t block; 4254 struct blk_plug plug; 4255 int inodes_per_block, inode_offset; 4256 4257 iloc->bh = NULL; 4258 if (inode->i_ino < EXT4_ROOT_INO || 4259 inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)) 4260 return -EFSCORRUPTED; 4261 4262 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 4263 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 4264 if (!gdp) 4265 return -EIO; 4266 4267 /* 4268 * Figure out the offset within the block group inode table 4269 */ 4270 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 4271 inode_offset = ((inode->i_ino - 1) % 4272 EXT4_INODES_PER_GROUP(sb)); 4273 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 4274 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 4275 4276 bh = sb_getblk(sb, block); 4277 if (unlikely(!bh)) 4278 return -ENOMEM; 4279 if (ext4_simulate_fail(sb, EXT4_SIM_INODE_EIO)) 4280 goto simulate_eio; 4281 if (!buffer_uptodate(bh)) { 4282 lock_buffer(bh); 4283 4284 /* 4285 * If the buffer has the write error flag, we have failed 4286 * to write out another inode in the same block. In this 4287 * case, we don't have to read the block because we may 4288 * read the old inode data successfully. 4289 */ 4290 if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 4291 set_buffer_uptodate(bh); 4292 4293 if (buffer_uptodate(bh)) { 4294 /* someone brought it uptodate while we waited */ 4295 unlock_buffer(bh); 4296 goto has_buffer; 4297 } 4298 4299 /* 4300 * If we have all information of the inode in memory and this 4301 * is the only valid inode in the block, we need not read the 4302 * block. 4303 */ 4304 if (in_mem) { 4305 struct buffer_head *bitmap_bh; 4306 int i, start; 4307 4308 start = inode_offset & ~(inodes_per_block - 1); 4309 4310 /* Is the inode bitmap in cache? */ 4311 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 4312 if (unlikely(!bitmap_bh)) 4313 goto make_io; 4314 4315 /* 4316 * If the inode bitmap isn't in cache then the 4317 * optimisation may end up performing two reads instead 4318 * of one, so skip it. 4319 */ 4320 if (!buffer_uptodate(bitmap_bh)) { 4321 brelse(bitmap_bh); 4322 goto make_io; 4323 } 4324 for (i = start; i < start + inodes_per_block; i++) { 4325 if (i == inode_offset) 4326 continue; 4327 if (ext4_test_bit(i, bitmap_bh->b_data)) 4328 break; 4329 } 4330 brelse(bitmap_bh); 4331 if (i == start + inodes_per_block) { 4332 /* all other inodes are free, so skip I/O */ 4333 memset(bh->b_data, 0, bh->b_size); 4334 set_buffer_uptodate(bh); 4335 unlock_buffer(bh); 4336 goto has_buffer; 4337 } 4338 } 4339 4340 make_io: 4341 /* 4342 * If we need to do any I/O, try to pre-readahead extra 4343 * blocks from the inode table. 4344 */ 4345 blk_start_plug(&plug); 4346 if (EXT4_SB(sb)->s_inode_readahead_blks) { 4347 ext4_fsblk_t b, end, table; 4348 unsigned num; 4349 __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks; 4350 4351 table = ext4_inode_table(sb, gdp); 4352 /* s_inode_readahead_blks is always a power of 2 */ 4353 b = block & ~((ext4_fsblk_t) ra_blks - 1); 4354 if (table > b) 4355 b = table; 4356 end = b + ra_blks; 4357 num = EXT4_INODES_PER_GROUP(sb); 4358 if (ext4_has_group_desc_csum(sb)) 4359 num -= ext4_itable_unused_count(sb, gdp); 4360 table += num / inodes_per_block; 4361 if (end > table) 4362 end = table; 4363 while (b <= end) 4364 sb_breadahead_unmovable(sb, b++); 4365 } 4366 4367 /* 4368 * There are other valid inodes in the buffer, this inode 4369 * has in-inode xattrs, or we don't have this inode in memory. 4370 * Read the block from disk. 4371 */ 4372 trace_ext4_load_inode(inode); 4373 get_bh(bh); 4374 bh->b_end_io = end_buffer_read_sync; 4375 submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh); 4376 blk_finish_plug(&plug); 4377 wait_on_buffer(bh); 4378 if (!buffer_uptodate(bh)) { 4379 simulate_eio: 4380 ext4_error_inode_block(inode, block, EIO, 4381 "unable to read itable block"); 4382 brelse(bh); 4383 return -EIO; 4384 } 4385 } 4386 has_buffer: 4387 iloc->bh = bh; 4388 return 0; 4389 } 4390 4391 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 4392 { 4393 /* We have all inode data except xattrs in memory here. */ 4394 return __ext4_get_inode_loc(inode, iloc, 4395 !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); 4396 } 4397 4398 static bool ext4_should_use_dax(struct inode *inode) 4399 { 4400 if (!test_opt(inode->i_sb, DAX)) 4401 return false; 4402 if (!S_ISREG(inode->i_mode)) 4403 return false; 4404 if (ext4_should_journal_data(inode)) 4405 return false; 4406 if (ext4_has_inline_data(inode)) 4407 return false; 4408 if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT)) 4409 return false; 4410 if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY)) 4411 return false; 4412 return true; 4413 } 4414 4415 void ext4_set_inode_flags(struct inode *inode) 4416 { 4417 unsigned int flags = EXT4_I(inode)->i_flags; 4418 unsigned int new_fl = 0; 4419 4420 if (flags & EXT4_SYNC_FL) 4421 new_fl |= S_SYNC; 4422 if (flags & EXT4_APPEND_FL) 4423 new_fl |= S_APPEND; 4424 if (flags & EXT4_IMMUTABLE_FL) 4425 new_fl |= S_IMMUTABLE; 4426 if (flags & EXT4_NOATIME_FL) 4427 new_fl |= S_NOATIME; 4428 if (flags & EXT4_DIRSYNC_FL) 4429 new_fl |= S_DIRSYNC; 4430 if (ext4_should_use_dax(inode)) 4431 new_fl |= S_DAX; 4432 if (flags & EXT4_ENCRYPT_FL) 4433 new_fl |= S_ENCRYPTED; 4434 if (flags & EXT4_CASEFOLD_FL) 4435 new_fl |= S_CASEFOLD; 4436 if (flags & EXT4_VERITY_FL) 4437 new_fl |= S_VERITY; 4438 inode_set_flags(inode, new_fl, 4439 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX| 4440 S_ENCRYPTED|S_CASEFOLD|S_VERITY); 4441 } 4442 4443 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 4444 struct ext4_inode_info *ei) 4445 { 4446 blkcnt_t i_blocks ; 4447 struct inode *inode = &(ei->vfs_inode); 4448 struct super_block *sb = inode->i_sb; 4449 4450 if (ext4_has_feature_huge_file(sb)) { 4451 /* we are using combined 48 bit field */ 4452 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 4453 le32_to_cpu(raw_inode->i_blocks_lo); 4454 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { 4455 /* i_blocks represent file system block size */ 4456 return i_blocks << (inode->i_blkbits - 9); 4457 } else { 4458 return i_blocks; 4459 } 4460 } else { 4461 return le32_to_cpu(raw_inode->i_blocks_lo); 4462 } 4463 } 4464 4465 static inline int ext4_iget_extra_inode(struct inode *inode, 4466 struct ext4_inode *raw_inode, 4467 struct ext4_inode_info *ei) 4468 { 4469 __le32 *magic = (void *)raw_inode + 4470 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; 4471 4472 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <= 4473 EXT4_INODE_SIZE(inode->i_sb) && 4474 *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { 4475 ext4_set_inode_state(inode, EXT4_STATE_XATTR); 4476 return ext4_find_inline_data_nolock(inode); 4477 } else 4478 EXT4_I(inode)->i_inline_off = 0; 4479 return 0; 4480 } 4481 4482 int ext4_get_projid(struct inode *inode, kprojid_t *projid) 4483 { 4484 if (!ext4_has_feature_project(inode->i_sb)) 4485 return -EOPNOTSUPP; 4486 *projid = EXT4_I(inode)->i_projid; 4487 return 0; 4488 } 4489 4490 /* 4491 * ext4 has self-managed i_version for ea inodes, it stores the lower 32bit of 4492 * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag 4493 * set. 4494 */ 4495 static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val) 4496 { 4497 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) 4498 inode_set_iversion_raw(inode, val); 4499 else 4500 inode_set_iversion_queried(inode, val); 4501 } 4502 static inline u64 ext4_inode_peek_iversion(const struct inode *inode) 4503 { 4504 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) 4505 return inode_peek_iversion_raw(inode); 4506 else 4507 return inode_peek_iversion(inode); 4508 } 4509 4510 struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, 4511 ext4_iget_flags flags, const char *function, 4512 unsigned int line) 4513 { 4514 struct ext4_iloc iloc; 4515 struct ext4_inode *raw_inode; 4516 struct ext4_inode_info *ei; 4517 struct inode *inode; 4518 journal_t *journal = EXT4_SB(sb)->s_journal; 4519 long ret; 4520 loff_t size; 4521 int block; 4522 uid_t i_uid; 4523 gid_t i_gid; 4524 projid_t i_projid; 4525 4526 if ((!(flags & EXT4_IGET_SPECIAL) && 4527 (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) || 4528 (ino < EXT4_ROOT_INO) || 4529 (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) { 4530 if (flags & EXT4_IGET_HANDLE) 4531 return ERR_PTR(-ESTALE); 4532 __ext4_error(sb, function, line, EFSCORRUPTED, 0, 4533 "inode #%lu: comm %s: iget: illegal inode #", 4534 ino, current->comm); 4535 return ERR_PTR(-EFSCORRUPTED); 4536 } 4537 4538 inode = iget_locked(sb, ino); 4539 if (!inode) 4540 return ERR_PTR(-ENOMEM); 4541 if (!(inode->i_state & I_NEW)) 4542 return inode; 4543 4544 ei = EXT4_I(inode); 4545 iloc.bh = NULL; 4546 4547 ret = __ext4_get_inode_loc(inode, &iloc, 0); 4548 if (ret < 0) 4549 goto bad_inode; 4550 raw_inode = ext4_raw_inode(&iloc); 4551 4552 if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) { 4553 ext4_error_inode(inode, function, line, 0, 4554 "iget: root inode unallocated"); 4555 ret = -EFSCORRUPTED; 4556 goto bad_inode; 4557 } 4558 4559 if ((flags & EXT4_IGET_HANDLE) && 4560 (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) { 4561 ret = -ESTALE; 4562 goto bad_inode; 4563 } 4564 4565 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4566 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 4567 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 4568 EXT4_INODE_SIZE(inode->i_sb) || 4569 (ei->i_extra_isize & 3)) { 4570 ext4_error_inode(inode, function, line, 0, 4571 "iget: bad extra_isize %u " 4572 "(inode size %u)", 4573 ei->i_extra_isize, 4574 EXT4_INODE_SIZE(inode->i_sb)); 4575 ret = -EFSCORRUPTED; 4576 goto bad_inode; 4577 } 4578 } else 4579 ei->i_extra_isize = 0; 4580 4581 /* Precompute checksum seed for inode metadata */ 4582 if (ext4_has_metadata_csum(sb)) { 4583 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4584 __u32 csum; 4585 __le32 inum = cpu_to_le32(inode->i_ino); 4586 __le32 gen = raw_inode->i_generation; 4587 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, 4588 sizeof(inum)); 4589 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, 4590 sizeof(gen)); 4591 } 4592 4593 if (!ext4_inode_csum_verify(inode, raw_inode, ei) || 4594 ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) { 4595 ext4_error_inode_err(inode, function, line, 0, EFSBADCRC, 4596 "iget: checksum invalid"); 4597 ret = -EFSBADCRC; 4598 goto bad_inode; 4599 } 4600 4601 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 4602 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 4603 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 4604 if (ext4_has_feature_project(sb) && 4605 EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE && 4606 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) 4607 i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid); 4608 else 4609 i_projid = EXT4_DEF_PROJID; 4610 4611 if (!(test_opt(inode->i_sb, NO_UID32))) { 4612 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 4613 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 4614 } 4615 i_uid_write(inode, i_uid); 4616 i_gid_write(inode, i_gid); 4617 ei->i_projid = make_kprojid(&init_user_ns, i_projid); 4618 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 4619 4620 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 4621 ei->i_inline_off = 0; 4622 ei->i_dir_start_lookup = 0; 4623 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 4624 /* We now have enough fields to check if the inode was active or not. 4625 * This is needed because nfsd might try to access dead inodes 4626 * the test is that same one that e2fsck uses 4627 * NeilBrown 1999oct15 4628 */ 4629 if (inode->i_nlink == 0) { 4630 if ((inode->i_mode == 0 || 4631 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) && 4632 ino != EXT4_BOOT_LOADER_INO) { 4633 /* this inode is deleted */ 4634 ret = -ESTALE; 4635 goto bad_inode; 4636 } 4637 /* The only unlinked inodes we let through here have 4638 * valid i_mode and are being read by the orphan 4639 * recovery code: that's fine, we're about to complete 4640 * the process of deleting those. 4641 * OR it is the EXT4_BOOT_LOADER_INO which is 4642 * not initialized on a new filesystem. */ 4643 } 4644 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 4645 ext4_set_inode_flags(inode); 4646 inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 4647 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 4648 if (ext4_has_feature_64bit(sb)) 4649 ei->i_file_acl |= 4650 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 4651 inode->i_size = ext4_isize(sb, raw_inode); 4652 if ((size = i_size_read(inode)) < 0) { 4653 ext4_error_inode(inode, function, line, 0, 4654 "iget: bad i_size value: %lld", size); 4655 ret = -EFSCORRUPTED; 4656 goto bad_inode; 4657 } 4658 /* 4659 * If dir_index is not enabled but there's dir with INDEX flag set, 4660 * we'd normally treat htree data as empty space. But with metadata 4661 * checksumming that corrupts checksums so forbid that. 4662 */ 4663 if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) && 4664 ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) { 4665 ext4_error_inode(inode, function, line, 0, 4666 "iget: Dir with htree data on filesystem without dir_index feature."); 4667 ret = -EFSCORRUPTED; 4668 goto bad_inode; 4669 } 4670 ei->i_disksize = inode->i_size; 4671 #ifdef CONFIG_QUOTA 4672 ei->i_reserved_quota = 0; 4673 #endif 4674 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 4675 ei->i_block_group = iloc.block_group; 4676 ei->i_last_alloc_group = ~0; 4677 /* 4678 * NOTE! The in-memory inode i_data array is in little-endian order 4679 * even on big-endian machines: we do NOT byteswap the block numbers! 4680 */ 4681 for (block = 0; block < EXT4_N_BLOCKS; block++) 4682 ei->i_data[block] = raw_inode->i_block[block]; 4683 INIT_LIST_HEAD(&ei->i_orphan); 4684 4685 /* 4686 * Set transaction id's of transactions that have to be committed 4687 * to finish f[data]sync. We set them to currently running transaction 4688 * as we cannot be sure that the inode or some of its metadata isn't 4689 * part of the transaction - the inode could have been reclaimed and 4690 * now it is reread from disk. 4691 */ 4692 if (journal) { 4693 transaction_t *transaction; 4694 tid_t tid; 4695 4696 read_lock(&journal->j_state_lock); 4697 if (journal->j_running_transaction) 4698 transaction = journal->j_running_transaction; 4699 else 4700 transaction = journal->j_committing_transaction; 4701 if (transaction) 4702 tid = transaction->t_tid; 4703 else 4704 tid = journal->j_commit_sequence; 4705 read_unlock(&journal->j_state_lock); 4706 ei->i_sync_tid = tid; 4707 ei->i_datasync_tid = tid; 4708 } 4709 4710 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4711 if (ei->i_extra_isize == 0) { 4712 /* The extra space is currently unused. Use it. */ 4713 BUILD_BUG_ON(sizeof(struct ext4_inode) & 3); 4714 ei->i_extra_isize = sizeof(struct ext4_inode) - 4715 EXT4_GOOD_OLD_INODE_SIZE; 4716 } else { 4717 ret = ext4_iget_extra_inode(inode, raw_inode, ei); 4718 if (ret) 4719 goto bad_inode; 4720 } 4721 } 4722 4723 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 4724 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 4725 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 4726 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 4727 4728 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { 4729 u64 ivers = le32_to_cpu(raw_inode->i_disk_version); 4730 4731 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4732 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 4733 ivers |= 4734 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 4735 } 4736 ext4_inode_set_iversion_queried(inode, ivers); 4737 } 4738 4739 ret = 0; 4740 if (ei->i_file_acl && 4741 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { 4742 ext4_error_inode(inode, function, line, 0, 4743 "iget: bad extended attribute block %llu", 4744 ei->i_file_acl); 4745 ret = -EFSCORRUPTED; 4746 goto bad_inode; 4747 } else if (!ext4_has_inline_data(inode)) { 4748 /* validate the block references in the inode */ 4749 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4750 (S_ISLNK(inode->i_mode) && 4751 !ext4_inode_is_fast_symlink(inode))) { 4752 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4753 ret = ext4_ext_check_inode(inode); 4754 else 4755 ret = ext4_ind_check_inode(inode); 4756 } 4757 } 4758 if (ret) 4759 goto bad_inode; 4760 4761 if (S_ISREG(inode->i_mode)) { 4762 inode->i_op = &ext4_file_inode_operations; 4763 inode->i_fop = &ext4_file_operations; 4764 ext4_set_aops(inode); 4765 } else if (S_ISDIR(inode->i_mode)) { 4766 inode->i_op = &ext4_dir_inode_operations; 4767 inode->i_fop = &ext4_dir_operations; 4768 } else if (S_ISLNK(inode->i_mode)) { 4769 /* VFS does not allow setting these so must be corruption */ 4770 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) { 4771 ext4_error_inode(inode, function, line, 0, 4772 "iget: immutable or append flags " 4773 "not allowed on symlinks"); 4774 ret = -EFSCORRUPTED; 4775 goto bad_inode; 4776 } 4777 if (IS_ENCRYPTED(inode)) { 4778 inode->i_op = &ext4_encrypted_symlink_inode_operations; 4779 ext4_set_aops(inode); 4780 } else if (ext4_inode_is_fast_symlink(inode)) { 4781 inode->i_link = (char *)ei->i_data; 4782 inode->i_op = &ext4_fast_symlink_inode_operations; 4783 nd_terminate_link(ei->i_data, inode->i_size, 4784 sizeof(ei->i_data) - 1); 4785 } else { 4786 inode->i_op = &ext4_symlink_inode_operations; 4787 ext4_set_aops(inode); 4788 } 4789 inode_nohighmem(inode); 4790 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 4791 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 4792 inode->i_op = &ext4_special_inode_operations; 4793 if (raw_inode->i_block[0]) 4794 init_special_inode(inode, inode->i_mode, 4795 old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 4796 else 4797 init_special_inode(inode, inode->i_mode, 4798 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 4799 } else if (ino == EXT4_BOOT_LOADER_INO) { 4800 make_bad_inode(inode); 4801 } else { 4802 ret = -EFSCORRUPTED; 4803 ext4_error_inode(inode, function, line, 0, 4804 "iget: bogus i_mode (%o)", inode->i_mode); 4805 goto bad_inode; 4806 } 4807 if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb)) 4808 ext4_error_inode(inode, function, line, 0, 4809 "casefold flag without casefold feature"); 4810 brelse(iloc.bh); 4811 4812 unlock_new_inode(inode); 4813 return inode; 4814 4815 bad_inode: 4816 brelse(iloc.bh); 4817 iget_failed(inode); 4818 return ERR_PTR(ret); 4819 } 4820 4821 static int ext4_inode_blocks_set(handle_t *handle, 4822 struct ext4_inode *raw_inode, 4823 struct ext4_inode_info *ei) 4824 { 4825 struct inode *inode = &(ei->vfs_inode); 4826 u64 i_blocks = READ_ONCE(inode->i_blocks); 4827 struct super_block *sb = inode->i_sb; 4828 4829 if (i_blocks <= ~0U) { 4830 /* 4831 * i_blocks can be represented in a 32 bit variable 4832 * as multiple of 512 bytes 4833 */ 4834 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4835 raw_inode->i_blocks_high = 0; 4836 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 4837 return 0; 4838 } 4839 if (!ext4_has_feature_huge_file(sb)) 4840 return -EFBIG; 4841 4842 if (i_blocks <= 0xffffffffffffULL) { 4843 /* 4844 * i_blocks can be represented in a 48 bit variable 4845 * as multiple of 512 bytes 4846 */ 4847 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4848 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4849 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 4850 } else { 4851 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); 4852 /* i_block is stored in file system block size */ 4853 i_blocks = i_blocks >> (inode->i_blkbits - 9); 4854 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4855 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4856 } 4857 return 0; 4858 } 4859 4860 struct other_inode { 4861 unsigned long orig_ino; 4862 struct ext4_inode *raw_inode; 4863 }; 4864 4865 static int other_inode_match(struct inode * inode, unsigned long ino, 4866 void *data) 4867 { 4868 struct other_inode *oi = (struct other_inode *) data; 4869 4870 if ((inode->i_ino != ino) || 4871 (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | 4872 I_DIRTY_INODE)) || 4873 ((inode->i_state & I_DIRTY_TIME) == 0)) 4874 return 0; 4875 spin_lock(&inode->i_lock); 4876 if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | 4877 I_DIRTY_INODE)) == 0) && 4878 (inode->i_state & I_DIRTY_TIME)) { 4879 struct ext4_inode_info *ei = EXT4_I(inode); 4880 4881 inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED); 4882 spin_unlock(&inode->i_lock); 4883 4884 spin_lock(&ei->i_raw_lock); 4885 EXT4_INODE_SET_XTIME(i_ctime, inode, oi->raw_inode); 4886 EXT4_INODE_SET_XTIME(i_mtime, inode, oi->raw_inode); 4887 EXT4_INODE_SET_XTIME(i_atime, inode, oi->raw_inode); 4888 ext4_inode_csum_set(inode, oi->raw_inode, ei); 4889 spin_unlock(&ei->i_raw_lock); 4890 trace_ext4_other_inode_update_time(inode, oi->orig_ino); 4891 return -1; 4892 } 4893 spin_unlock(&inode->i_lock); 4894 return -1; 4895 } 4896 4897 /* 4898 * Opportunistically update the other time fields for other inodes in 4899 * the same inode table block. 4900 */ 4901 static void ext4_update_other_inodes_time(struct super_block *sb, 4902 unsigned long orig_ino, char *buf) 4903 { 4904 struct other_inode oi; 4905 unsigned long ino; 4906 int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 4907 int inode_size = EXT4_INODE_SIZE(sb); 4908 4909 oi.orig_ino = orig_ino; 4910 /* 4911 * Calculate the first inode in the inode table block. Inode 4912 * numbers are one-based. That is, the first inode in a block 4913 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1). 4914 */ 4915 ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1; 4916 for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) { 4917 if (ino == orig_ino) 4918 continue; 4919 oi.raw_inode = (struct ext4_inode *) buf; 4920 (void) find_inode_nowait(sb, ino, other_inode_match, &oi); 4921 } 4922 } 4923 4924 /* 4925 * Post the struct inode info into an on-disk inode location in the 4926 * buffer-cache. This gobbles the caller's reference to the 4927 * buffer_head in the inode location struct. 4928 * 4929 * The caller must have write access to iloc->bh. 4930 */ 4931 static int ext4_do_update_inode(handle_t *handle, 4932 struct inode *inode, 4933 struct ext4_iloc *iloc) 4934 { 4935 struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 4936 struct ext4_inode_info *ei = EXT4_I(inode); 4937 struct buffer_head *bh = iloc->bh; 4938 struct super_block *sb = inode->i_sb; 4939 int err = 0, rc, block; 4940 int need_datasync = 0, set_large_file = 0; 4941 uid_t i_uid; 4942 gid_t i_gid; 4943 projid_t i_projid; 4944 4945 spin_lock(&ei->i_raw_lock); 4946 4947 /* For fields not tracked in the in-memory inode, 4948 * initialise them to zero for new inodes. */ 4949 if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) 4950 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 4951 4952 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 4953 i_uid = i_uid_read(inode); 4954 i_gid = i_gid_read(inode); 4955 i_projid = from_kprojid(&init_user_ns, ei->i_projid); 4956 if (!(test_opt(inode->i_sb, NO_UID32))) { 4957 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); 4958 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); 4959 /* 4960 * Fix up interoperability with old kernels. Otherwise, old inodes get 4961 * re-used with the upper 16 bits of the uid/gid intact 4962 */ 4963 if (ei->i_dtime && list_empty(&ei->i_orphan)) { 4964 raw_inode->i_uid_high = 0; 4965 raw_inode->i_gid_high = 0; 4966 } else { 4967 raw_inode->i_uid_high = 4968 cpu_to_le16(high_16_bits(i_uid)); 4969 raw_inode->i_gid_high = 4970 cpu_to_le16(high_16_bits(i_gid)); 4971 } 4972 } else { 4973 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); 4974 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid)); 4975 raw_inode->i_uid_high = 0; 4976 raw_inode->i_gid_high = 0; 4977 } 4978 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 4979 4980 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 4981 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 4982 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 4983 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 4984 4985 err = ext4_inode_blocks_set(handle, raw_inode, ei); 4986 if (err) { 4987 spin_unlock(&ei->i_raw_lock); 4988 goto out_brelse; 4989 } 4990 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 4991 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); 4992 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) 4993 raw_inode->i_file_acl_high = 4994 cpu_to_le16(ei->i_file_acl >> 32); 4995 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 4996 if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode)) { 4997 ext4_isize_set(raw_inode, ei->i_disksize); 4998 need_datasync = 1; 4999 } 5000 if (ei->i_disksize > 0x7fffffffULL) { 5001 if (!ext4_has_feature_large_file(sb) || 5002 EXT4_SB(sb)->s_es->s_rev_level == 5003 cpu_to_le32(EXT4_GOOD_OLD_REV)) 5004 set_large_file = 1; 5005 } 5006 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 5007 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 5008 if (old_valid_dev(inode->i_rdev)) { 5009 raw_inode->i_block[0] = 5010 cpu_to_le32(old_encode_dev(inode->i_rdev)); 5011 raw_inode->i_block[1] = 0; 5012 } else { 5013 raw_inode->i_block[0] = 0; 5014 raw_inode->i_block[1] = 5015 cpu_to_le32(new_encode_dev(inode->i_rdev)); 5016 raw_inode->i_block[2] = 0; 5017 } 5018 } else if (!ext4_has_inline_data(inode)) { 5019 for (block = 0; block < EXT4_N_BLOCKS; block++) 5020 raw_inode->i_block[block] = ei->i_data[block]; 5021 } 5022 5023 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { 5024 u64 ivers = ext4_inode_peek_iversion(inode); 5025 5026 raw_inode->i_disk_version = cpu_to_le32(ivers); 5027 if (ei->i_extra_isize) { 5028 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 5029 raw_inode->i_version_hi = 5030 cpu_to_le32(ivers >> 32); 5031 raw_inode->i_extra_isize = 5032 cpu_to_le16(ei->i_extra_isize); 5033 } 5034 } 5035 5036 BUG_ON(!ext4_has_feature_project(inode->i_sb) && 5037 i_projid != EXT4_DEF_PROJID); 5038 5039 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 5040 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) 5041 raw_inode->i_projid = cpu_to_le32(i_projid); 5042 5043 ext4_inode_csum_set(inode, raw_inode, ei); 5044 spin_unlock(&ei->i_raw_lock); 5045 if (inode->i_sb->s_flags & SB_LAZYTIME) 5046 ext4_update_other_inodes_time(inode->i_sb, inode->i_ino, 5047 bh->b_data); 5048 5049 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 5050 rc = ext4_handle_dirty_metadata(handle, NULL, bh); 5051 if (!err) 5052 err = rc; 5053 ext4_clear_inode_state(inode, EXT4_STATE_NEW); 5054 if (set_large_file) { 5055 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access"); 5056 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); 5057 if (err) 5058 goto out_brelse; 5059 ext4_set_feature_large_file(sb); 5060 ext4_handle_sync(handle); 5061 err = ext4_handle_dirty_super(handle, sb); 5062 } 5063 ext4_update_inode_fsync_trans(handle, inode, need_datasync); 5064 out_brelse: 5065 brelse(bh); 5066 ext4_std_error(inode->i_sb, err); 5067 return err; 5068 } 5069 5070 /* 5071 * ext4_write_inode() 5072 * 5073 * We are called from a few places: 5074 * 5075 * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files. 5076 * Here, there will be no transaction running. We wait for any running 5077 * transaction to commit. 5078 * 5079 * - Within flush work (sys_sync(), kupdate and such). 5080 * We wait on commit, if told to. 5081 * 5082 * - Within iput_final() -> write_inode_now() 5083 * We wait on commit, if told to. 5084 * 5085 * In all cases it is actually safe for us to return without doing anything, 5086 * because the inode has been copied into a raw inode buffer in 5087 * ext4_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL 5088 * writeback. 5089 * 5090 * Note that we are absolutely dependent upon all inode dirtiers doing the 5091 * right thing: they *must* call mark_inode_dirty() after dirtying info in 5092 * which we are interested. 5093 * 5094 * It would be a bug for them to not do this. The code: 5095 * 5096 * mark_inode_dirty(inode) 5097 * stuff(); 5098 * inode->i_size = expr; 5099 * 5100 * is in error because write_inode() could occur while `stuff()' is running, 5101 * and the new i_size will be lost. Plus the inode will no longer be on the 5102 * superblock's dirty inode list. 5103 */ 5104 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) 5105 { 5106 int err; 5107 5108 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) || 5109 sb_rdonly(inode->i_sb)) 5110 return 0; 5111 5112 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 5113 return -EIO; 5114 5115 if (EXT4_SB(inode->i_sb)->s_journal) { 5116 if (ext4_journal_current_handle()) { 5117 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 5118 dump_stack(); 5119 return -EIO; 5120 } 5121 5122 /* 5123 * No need to force transaction in WB_SYNC_NONE mode. Also 5124 * ext4_sync_fs() will force the commit after everything is 5125 * written. 5126 */ 5127 if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync) 5128 return 0; 5129 5130 err = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal, 5131 EXT4_I(inode)->i_sync_tid); 5132 } else { 5133 struct ext4_iloc iloc; 5134 5135 err = __ext4_get_inode_loc(inode, &iloc, 0); 5136 if (err) 5137 return err; 5138 /* 5139 * sync(2) will flush the whole buffer cache. No need to do 5140 * it here separately for each inode. 5141 */ 5142 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) 5143 sync_dirty_buffer(iloc.bh); 5144 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 5145 ext4_error_inode_block(inode, iloc.bh->b_blocknr, EIO, 5146 "IO error syncing inode"); 5147 err = -EIO; 5148 } 5149 brelse(iloc.bh); 5150 } 5151 return err; 5152 } 5153 5154 /* 5155 * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate 5156 * buffers that are attached to a page stradding i_size and are undergoing 5157 * commit. In that case we have to wait for commit to finish and try again. 5158 */ 5159 static void ext4_wait_for_tail_page_commit(struct inode *inode) 5160 { 5161 struct page *page; 5162 unsigned offset; 5163 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 5164 tid_t commit_tid = 0; 5165 int ret; 5166 5167 offset = inode->i_size & (PAGE_SIZE - 1); 5168 /* 5169 * If the page is fully truncated, we don't need to wait for any commit 5170 * (and we even should not as __ext4_journalled_invalidatepage() may 5171 * strip all buffers from the page but keep the page dirty which can then 5172 * confuse e.g. concurrent ext4_writepage() seeing dirty page without 5173 * buffers). Also we don't need to wait for any commit if all buffers in 5174 * the page remain valid. This is most beneficial for the common case of 5175 * blocksize == PAGESIZE. 5176 */ 5177 if (!offset || offset > (PAGE_SIZE - i_blocksize(inode))) 5178 return; 5179 while (1) { 5180 page = find_lock_page(inode->i_mapping, 5181 inode->i_size >> PAGE_SHIFT); 5182 if (!page) 5183 return; 5184 ret = __ext4_journalled_invalidatepage(page, offset, 5185 PAGE_SIZE - offset); 5186 unlock_page(page); 5187 put_page(page); 5188 if (ret != -EBUSY) 5189 return; 5190 commit_tid = 0; 5191 read_lock(&journal->j_state_lock); 5192 if (journal->j_committing_transaction) 5193 commit_tid = journal->j_committing_transaction->t_tid; 5194 read_unlock(&journal->j_state_lock); 5195 if (commit_tid) 5196 jbd2_log_wait_commit(journal, commit_tid); 5197 } 5198 } 5199 5200 /* 5201 * ext4_setattr() 5202 * 5203 * Called from notify_change. 5204 * 5205 * We want to trap VFS attempts to truncate the file as soon as 5206 * possible. In particular, we want to make sure that when the VFS 5207 * shrinks i_size, we put the inode on the orphan list and modify 5208 * i_disksize immediately, so that during the subsequent flushing of 5209 * dirty pages and freeing of disk blocks, we can guarantee that any 5210 * commit will leave the blocks being flushed in an unused state on 5211 * disk. (On recovery, the inode will get truncated and the blocks will 5212 * be freed, so we have a strong guarantee that no future commit will 5213 * leave these blocks visible to the user.) 5214 * 5215 * Another thing we have to assure is that if we are in ordered mode 5216 * and inode is still attached to the committing transaction, we must 5217 * we start writeout of all the dirty pages which are being truncated. 5218 * This way we are sure that all the data written in the previous 5219 * transaction are already on disk (truncate waits for pages under 5220 * writeback). 5221 * 5222 * Called with inode->i_mutex down. 5223 */ 5224 int ext4_setattr(struct dentry *dentry, struct iattr *attr) 5225 { 5226 struct inode *inode = d_inode(dentry); 5227 int error, rc = 0; 5228 int orphan = 0; 5229 const unsigned int ia_valid = attr->ia_valid; 5230 5231 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 5232 return -EIO; 5233 5234 if (unlikely(IS_IMMUTABLE(inode))) 5235 return -EPERM; 5236 5237 if (unlikely(IS_APPEND(inode) && 5238 (ia_valid & (ATTR_MODE | ATTR_UID | 5239 ATTR_GID | ATTR_TIMES_SET)))) 5240 return -EPERM; 5241 5242 error = setattr_prepare(dentry, attr); 5243 if (error) 5244 return error; 5245 5246 error = fscrypt_prepare_setattr(dentry, attr); 5247 if (error) 5248 return error; 5249 5250 error = fsverity_prepare_setattr(dentry, attr); 5251 if (error) 5252 return error; 5253 5254 if (is_quota_modification(inode, attr)) { 5255 error = dquot_initialize(inode); 5256 if (error) 5257 return error; 5258 } 5259 if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || 5260 (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { 5261 handle_t *handle; 5262 5263 /* (user+group)*(old+new) structure, inode write (sb, 5264 * inode block, ? - but truncate inode update has it) */ 5265 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 5266 (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) + 5267 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3); 5268 if (IS_ERR(handle)) { 5269 error = PTR_ERR(handle); 5270 goto err_out; 5271 } 5272 5273 /* dquot_transfer() calls back ext4_get_inode_usage() which 5274 * counts xattr inode references. 5275 */ 5276 down_read(&EXT4_I(inode)->xattr_sem); 5277 error = dquot_transfer(inode, attr); 5278 up_read(&EXT4_I(inode)->xattr_sem); 5279 5280 if (error) { 5281 ext4_journal_stop(handle); 5282 return error; 5283 } 5284 /* Update corresponding info in inode so that everything is in 5285 * one transaction */ 5286 if (attr->ia_valid & ATTR_UID) 5287 inode->i_uid = attr->ia_uid; 5288 if (attr->ia_valid & ATTR_GID) 5289 inode->i_gid = attr->ia_gid; 5290 error = ext4_mark_inode_dirty(handle, inode); 5291 ext4_journal_stop(handle); 5292 } 5293 5294 if (attr->ia_valid & ATTR_SIZE) { 5295 handle_t *handle; 5296 loff_t oldsize = inode->i_size; 5297 int shrink = (attr->ia_size < inode->i_size); 5298 5299 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 5300 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5301 5302 if (attr->ia_size > sbi->s_bitmap_maxbytes) 5303 return -EFBIG; 5304 } 5305 if (!S_ISREG(inode->i_mode)) 5306 return -EINVAL; 5307 5308 if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size) 5309 inode_inc_iversion(inode); 5310 5311 if (shrink) { 5312 if (ext4_should_order_data(inode)) { 5313 error = ext4_begin_ordered_truncate(inode, 5314 attr->ia_size); 5315 if (error) 5316 goto err_out; 5317 } 5318 /* 5319 * Blocks are going to be removed from the inode. Wait 5320 * for dio in flight. 5321 */ 5322 inode_dio_wait(inode); 5323 } 5324 5325 down_write(&EXT4_I(inode)->i_mmap_sem); 5326 5327 rc = ext4_break_layouts(inode); 5328 if (rc) { 5329 up_write(&EXT4_I(inode)->i_mmap_sem); 5330 return rc; 5331 } 5332 5333 if (attr->ia_size != inode->i_size) { 5334 handle = ext4_journal_start(inode, EXT4_HT_INODE, 3); 5335 if (IS_ERR(handle)) { 5336 error = PTR_ERR(handle); 5337 goto out_mmap_sem; 5338 } 5339 if (ext4_handle_valid(handle) && shrink) { 5340 error = ext4_orphan_add(handle, inode); 5341 orphan = 1; 5342 } 5343 /* 5344 * Update c/mtime on truncate up, ext4_truncate() will 5345 * update c/mtime in shrink case below 5346 */ 5347 if (!shrink) { 5348 inode->i_mtime = current_time(inode); 5349 inode->i_ctime = inode->i_mtime; 5350 } 5351 down_write(&EXT4_I(inode)->i_data_sem); 5352 EXT4_I(inode)->i_disksize = attr->ia_size; 5353 rc = ext4_mark_inode_dirty(handle, inode); 5354 if (!error) 5355 error = rc; 5356 /* 5357 * We have to update i_size under i_data_sem together 5358 * with i_disksize to avoid races with writeback code 5359 * running ext4_wb_update_i_disksize(). 5360 */ 5361 if (!error) 5362 i_size_write(inode, attr->ia_size); 5363 up_write(&EXT4_I(inode)->i_data_sem); 5364 ext4_journal_stop(handle); 5365 if (error) 5366 goto out_mmap_sem; 5367 if (!shrink) { 5368 pagecache_isize_extended(inode, oldsize, 5369 inode->i_size); 5370 } else if (ext4_should_journal_data(inode)) { 5371 ext4_wait_for_tail_page_commit(inode); 5372 } 5373 } 5374 5375 /* 5376 * Truncate pagecache after we've waited for commit 5377 * in data=journal mode to make pages freeable. 5378 */ 5379 truncate_pagecache(inode, inode->i_size); 5380 /* 5381 * Call ext4_truncate() even if i_size didn't change to 5382 * truncate possible preallocated blocks. 5383 */ 5384 if (attr->ia_size <= oldsize) { 5385 rc = ext4_truncate(inode); 5386 if (rc) 5387 error = rc; 5388 } 5389 out_mmap_sem: 5390 up_write(&EXT4_I(inode)->i_mmap_sem); 5391 } 5392 5393 if (!error) { 5394 setattr_copy(inode, attr); 5395 mark_inode_dirty(inode); 5396 } 5397 5398 /* 5399 * If the call to ext4_truncate failed to get a transaction handle at 5400 * all, we need to clean up the in-core orphan list manually. 5401 */ 5402 if (orphan && inode->i_nlink) 5403 ext4_orphan_del(NULL, inode); 5404 5405 if (!error && (ia_valid & ATTR_MODE)) 5406 rc = posix_acl_chmod(inode, inode->i_mode); 5407 5408 err_out: 5409 ext4_std_error(inode->i_sb, error); 5410 if (!error) 5411 error = rc; 5412 return error; 5413 } 5414 5415 int ext4_getattr(const struct path *path, struct kstat *stat, 5416 u32 request_mask, unsigned int query_flags) 5417 { 5418 struct inode *inode = d_inode(path->dentry); 5419 struct ext4_inode *raw_inode; 5420 struct ext4_inode_info *ei = EXT4_I(inode); 5421 unsigned int flags; 5422 5423 if ((request_mask & STATX_BTIME) && 5424 EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) { 5425 stat->result_mask |= STATX_BTIME; 5426 stat->btime.tv_sec = ei->i_crtime.tv_sec; 5427 stat->btime.tv_nsec = ei->i_crtime.tv_nsec; 5428 } 5429 5430 flags = ei->i_flags & EXT4_FL_USER_VISIBLE; 5431 if (flags & EXT4_APPEND_FL) 5432 stat->attributes |= STATX_ATTR_APPEND; 5433 if (flags & EXT4_COMPR_FL) 5434 stat->attributes |= STATX_ATTR_COMPRESSED; 5435 if (flags & EXT4_ENCRYPT_FL) 5436 stat->attributes |= STATX_ATTR_ENCRYPTED; 5437 if (flags & EXT4_IMMUTABLE_FL) 5438 stat->attributes |= STATX_ATTR_IMMUTABLE; 5439 if (flags & EXT4_NODUMP_FL) 5440 stat->attributes |= STATX_ATTR_NODUMP; 5441 if (flags & EXT4_VERITY_FL) 5442 stat->attributes |= STATX_ATTR_VERITY; 5443 5444 stat->attributes_mask |= (STATX_ATTR_APPEND | 5445 STATX_ATTR_COMPRESSED | 5446 STATX_ATTR_ENCRYPTED | 5447 STATX_ATTR_IMMUTABLE | 5448 STATX_ATTR_NODUMP | 5449 STATX_ATTR_VERITY); 5450 5451 generic_fillattr(inode, stat); 5452 return 0; 5453 } 5454 5455 int ext4_file_getattr(const struct path *path, struct kstat *stat, 5456 u32 request_mask, unsigned int query_flags) 5457 { 5458 struct inode *inode = d_inode(path->dentry); 5459 u64 delalloc_blocks; 5460 5461 ext4_getattr(path, stat, request_mask, query_flags); 5462 5463 /* 5464 * If there is inline data in the inode, the inode will normally not 5465 * have data blocks allocated (it may have an external xattr block). 5466 * Report at least one sector for such files, so tools like tar, rsync, 5467 * others don't incorrectly think the file is completely sparse. 5468 */ 5469 if (unlikely(ext4_has_inline_data(inode))) 5470 stat->blocks += (stat->size + 511) >> 9; 5471 5472 /* 5473 * We can't update i_blocks if the block allocation is delayed 5474 * otherwise in the case of system crash before the real block 5475 * allocation is done, we will have i_blocks inconsistent with 5476 * on-disk file blocks. 5477 * We always keep i_blocks updated together with real 5478 * allocation. But to not confuse with user, stat 5479 * will return the blocks that include the delayed allocation 5480 * blocks for this file. 5481 */ 5482 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), 5483 EXT4_I(inode)->i_reserved_data_blocks); 5484 stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9); 5485 return 0; 5486 } 5487 5488 static int ext4_index_trans_blocks(struct inode *inode, int lblocks, 5489 int pextents) 5490 { 5491 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 5492 return ext4_ind_trans_blocks(inode, lblocks); 5493 return ext4_ext_index_trans_blocks(inode, pextents); 5494 } 5495 5496 /* 5497 * Account for index blocks, block groups bitmaps and block group 5498 * descriptor blocks if modify datablocks and index blocks 5499 * worse case, the indexs blocks spread over different block groups 5500 * 5501 * If datablocks are discontiguous, they are possible to spread over 5502 * different block groups too. If they are contiguous, with flexbg, 5503 * they could still across block group boundary. 5504 * 5505 * Also account for superblock, inode, quota and xattr blocks 5506 */ 5507 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, 5508 int pextents) 5509 { 5510 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 5511 int gdpblocks; 5512 int idxblocks; 5513 int ret = 0; 5514 5515 /* 5516 * How many index blocks need to touch to map @lblocks logical blocks 5517 * to @pextents physical extents? 5518 */ 5519 idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents); 5520 5521 ret = idxblocks; 5522 5523 /* 5524 * Now let's see how many group bitmaps and group descriptors need 5525 * to account 5526 */ 5527 groups = idxblocks + pextents; 5528 gdpblocks = groups; 5529 if (groups > ngroups) 5530 groups = ngroups; 5531 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 5532 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 5533 5534 /* bitmaps and block group descriptor blocks */ 5535 ret += groups + gdpblocks; 5536 5537 /* Blocks for super block, inode, quota and xattr blocks */ 5538 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 5539 5540 return ret; 5541 } 5542 5543 /* 5544 * Calculate the total number of credits to reserve to fit 5545 * the modification of a single pages into a single transaction, 5546 * which may include multiple chunks of block allocations. 5547 * 5548 * This could be called via ext4_write_begin() 5549 * 5550 * We need to consider the worse case, when 5551 * one new block per extent. 5552 */ 5553 int ext4_writepage_trans_blocks(struct inode *inode) 5554 { 5555 int bpp = ext4_journal_blocks_per_page(inode); 5556 int ret; 5557 5558 ret = ext4_meta_trans_blocks(inode, bpp, bpp); 5559 5560 /* Account for data blocks for journalled mode */ 5561 if (ext4_should_journal_data(inode)) 5562 ret += bpp; 5563 return ret; 5564 } 5565 5566 /* 5567 * Calculate the journal credits for a chunk of data modification. 5568 * 5569 * This is called from DIO, fallocate or whoever calling 5570 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. 5571 * 5572 * journal buffers for data blocks are not included here, as DIO 5573 * and fallocate do no need to journal data buffers. 5574 */ 5575 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 5576 { 5577 return ext4_meta_trans_blocks(inode, nrblocks, 1); 5578 } 5579 5580 /* 5581 * The caller must have previously called ext4_reserve_inode_write(). 5582 * Give this, we know that the caller already has write access to iloc->bh. 5583 */ 5584 int ext4_mark_iloc_dirty(handle_t *handle, 5585 struct inode *inode, struct ext4_iloc *iloc) 5586 { 5587 int err = 0; 5588 5589 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) { 5590 put_bh(iloc->bh); 5591 return -EIO; 5592 } 5593 if (IS_I_VERSION(inode)) 5594 inode_inc_iversion(inode); 5595 5596 /* the do_update_inode consumes one bh->b_count */ 5597 get_bh(iloc->bh); 5598 5599 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 5600 err = ext4_do_update_inode(handle, inode, iloc); 5601 put_bh(iloc->bh); 5602 return err; 5603 } 5604 5605 /* 5606 * On success, We end up with an outstanding reference count against 5607 * iloc->bh. This _must_ be cleaned up later. 5608 */ 5609 5610 int 5611 ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 5612 struct ext4_iloc *iloc) 5613 { 5614 int err; 5615 5616 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 5617 return -EIO; 5618 5619 err = ext4_get_inode_loc(inode, iloc); 5620 if (!err) { 5621 BUFFER_TRACE(iloc->bh, "get_write_access"); 5622 err = ext4_journal_get_write_access(handle, iloc->bh); 5623 if (err) { 5624 brelse(iloc->bh); 5625 iloc->bh = NULL; 5626 } 5627 } 5628 ext4_std_error(inode->i_sb, err); 5629 return err; 5630 } 5631 5632 static int __ext4_expand_extra_isize(struct inode *inode, 5633 unsigned int new_extra_isize, 5634 struct ext4_iloc *iloc, 5635 handle_t *handle, int *no_expand) 5636 { 5637 struct ext4_inode *raw_inode; 5638 struct ext4_xattr_ibody_header *header; 5639 unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb); 5640 struct ext4_inode_info *ei = EXT4_I(inode); 5641 int error; 5642 5643 /* this was checked at iget time, but double check for good measure */ 5644 if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) || 5645 (ei->i_extra_isize & 3)) { 5646 EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)", 5647 ei->i_extra_isize, 5648 EXT4_INODE_SIZE(inode->i_sb)); 5649 return -EFSCORRUPTED; 5650 } 5651 if ((new_extra_isize < ei->i_extra_isize) || 5652 (new_extra_isize < 4) || 5653 (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE)) 5654 return -EINVAL; /* Should never happen */ 5655 5656 raw_inode = ext4_raw_inode(iloc); 5657 5658 header = IHDR(inode, raw_inode); 5659 5660 /* No extended attributes present */ 5661 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 5662 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 5663 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + 5664 EXT4_I(inode)->i_extra_isize, 0, 5665 new_extra_isize - EXT4_I(inode)->i_extra_isize); 5666 EXT4_I(inode)->i_extra_isize = new_extra_isize; 5667 return 0; 5668 } 5669 5670 /* try to expand with EAs present */ 5671 error = ext4_expand_extra_isize_ea(inode, new_extra_isize, 5672 raw_inode, handle); 5673 if (error) { 5674 /* 5675 * Inode size expansion failed; don't try again 5676 */ 5677 *no_expand = 1; 5678 } 5679 5680 return error; 5681 } 5682 5683 /* 5684 * Expand an inode by new_extra_isize bytes. 5685 * Returns 0 on success or negative error number on failure. 5686 */ 5687 static int ext4_try_to_expand_extra_isize(struct inode *inode, 5688 unsigned int new_extra_isize, 5689 struct ext4_iloc iloc, 5690 handle_t *handle) 5691 { 5692 int no_expand; 5693 int error; 5694 5695 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) 5696 return -EOVERFLOW; 5697 5698 /* 5699 * In nojournal mode, we can immediately attempt to expand 5700 * the inode. When journaled, we first need to obtain extra 5701 * buffer credits since we may write into the EA block 5702 * with this same handle. If journal_extend fails, then it will 5703 * only result in a minor loss of functionality for that inode. 5704 * If this is felt to be critical, then e2fsck should be run to 5705 * force a large enough s_min_extra_isize. 5706 */ 5707 if (ext4_journal_extend(handle, 5708 EXT4_DATA_TRANS_BLOCKS(inode->i_sb), 0) != 0) 5709 return -ENOSPC; 5710 5711 if (ext4_write_trylock_xattr(inode, &no_expand) == 0) 5712 return -EBUSY; 5713 5714 error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc, 5715 handle, &no_expand); 5716 ext4_write_unlock_xattr(inode, &no_expand); 5717 5718 return error; 5719 } 5720 5721 int ext4_expand_extra_isize(struct inode *inode, 5722 unsigned int new_extra_isize, 5723 struct ext4_iloc *iloc) 5724 { 5725 handle_t *handle; 5726 int no_expand; 5727 int error, rc; 5728 5729 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { 5730 brelse(iloc->bh); 5731 return -EOVERFLOW; 5732 } 5733 5734 handle = ext4_journal_start(inode, EXT4_HT_INODE, 5735 EXT4_DATA_TRANS_BLOCKS(inode->i_sb)); 5736 if (IS_ERR(handle)) { 5737 error = PTR_ERR(handle); 5738 brelse(iloc->bh); 5739 return error; 5740 } 5741 5742 ext4_write_lock_xattr(inode, &no_expand); 5743 5744 BUFFER_TRACE(iloc->bh, "get_write_access"); 5745 error = ext4_journal_get_write_access(handle, iloc->bh); 5746 if (error) { 5747 brelse(iloc->bh); 5748 goto out_unlock; 5749 } 5750 5751 error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc, 5752 handle, &no_expand); 5753 5754 rc = ext4_mark_iloc_dirty(handle, inode, iloc); 5755 if (!error) 5756 error = rc; 5757 5758 out_unlock: 5759 ext4_write_unlock_xattr(inode, &no_expand); 5760 ext4_journal_stop(handle); 5761 return error; 5762 } 5763 5764 /* 5765 * What we do here is to mark the in-core inode as clean with respect to inode 5766 * dirtiness (it may still be data-dirty). 5767 * This means that the in-core inode may be reaped by prune_icache 5768 * without having to perform any I/O. This is a very good thing, 5769 * because *any* task may call prune_icache - even ones which 5770 * have a transaction open against a different journal. 5771 * 5772 * Is this cheating? Not really. Sure, we haven't written the 5773 * inode out, but prune_icache isn't a user-visible syncing function. 5774 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 5775 * we start and wait on commits. 5776 */ 5777 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 5778 { 5779 struct ext4_iloc iloc; 5780 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5781 int err; 5782 5783 might_sleep(); 5784 trace_ext4_mark_inode_dirty(inode, _RET_IP_); 5785 err = ext4_reserve_inode_write(handle, inode, &iloc); 5786 if (err) 5787 return err; 5788 5789 if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize) 5790 ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize, 5791 iloc, handle); 5792 5793 return ext4_mark_iloc_dirty(handle, inode, &iloc); 5794 } 5795 5796 /* 5797 * ext4_dirty_inode() is called from __mark_inode_dirty() 5798 * 5799 * We're really interested in the case where a file is being extended. 5800 * i_size has been changed by generic_commit_write() and we thus need 5801 * to include the updated inode in the current transaction. 5802 * 5803 * Also, dquot_alloc_block() will always dirty the inode when blocks 5804 * are allocated to the file. 5805 * 5806 * If the inode is marked synchronous, we don't honour that here - doing 5807 * so would cause a commit on atime updates, which we don't bother doing. 5808 * We handle synchronous inodes at the highest possible level. 5809 * 5810 * If only the I_DIRTY_TIME flag is set, we can skip everything. If 5811 * I_DIRTY_TIME and I_DIRTY_SYNC is set, the only inode fields we need 5812 * to copy into the on-disk inode structure are the timestamp files. 5813 */ 5814 void ext4_dirty_inode(struct inode *inode, int flags) 5815 { 5816 handle_t *handle; 5817 5818 if (flags == I_DIRTY_TIME) 5819 return; 5820 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 5821 if (IS_ERR(handle)) 5822 goto out; 5823 5824 ext4_mark_inode_dirty(handle, inode); 5825 5826 ext4_journal_stop(handle); 5827 out: 5828 return; 5829 } 5830 5831 int ext4_change_inode_journal_flag(struct inode *inode, int val) 5832 { 5833 journal_t *journal; 5834 handle_t *handle; 5835 int err; 5836 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5837 5838 /* 5839 * We have to be very careful here: changing a data block's 5840 * journaling status dynamically is dangerous. If we write a 5841 * data block to the journal, change the status and then delete 5842 * that block, we risk forgetting to revoke the old log record 5843 * from the journal and so a subsequent replay can corrupt data. 5844 * So, first we make sure that the journal is empty and that 5845 * nobody is changing anything. 5846 */ 5847 5848 journal = EXT4_JOURNAL(inode); 5849 if (!journal) 5850 return 0; 5851 if (is_journal_aborted(journal)) 5852 return -EROFS; 5853 5854 /* Wait for all existing dio workers */ 5855 inode_dio_wait(inode); 5856 5857 /* 5858 * Before flushing the journal and switching inode's aops, we have 5859 * to flush all dirty data the inode has. There can be outstanding 5860 * delayed allocations, there can be unwritten extents created by 5861 * fallocate or buffered writes in dioread_nolock mode covered by 5862 * dirty data which can be converted only after flushing the dirty 5863 * data (and journalled aops don't know how to handle these cases). 5864 */ 5865 if (val) { 5866 down_write(&EXT4_I(inode)->i_mmap_sem); 5867 err = filemap_write_and_wait(inode->i_mapping); 5868 if (err < 0) { 5869 up_write(&EXT4_I(inode)->i_mmap_sem); 5870 return err; 5871 } 5872 } 5873 5874 percpu_down_write(&sbi->s_writepages_rwsem); 5875 jbd2_journal_lock_updates(journal); 5876 5877 /* 5878 * OK, there are no updates running now, and all cached data is 5879 * synced to disk. We are now in a completely consistent state 5880 * which doesn't have anything in the journal, and we know that 5881 * no filesystem updates are running, so it is safe to modify 5882 * the inode's in-core data-journaling state flag now. 5883 */ 5884 5885 if (val) 5886 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 5887 else { 5888 err = jbd2_journal_flush(journal); 5889 if (err < 0) { 5890 jbd2_journal_unlock_updates(journal); 5891 percpu_up_write(&sbi->s_writepages_rwsem); 5892 return err; 5893 } 5894 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 5895 } 5896 ext4_set_aops(inode); 5897 5898 jbd2_journal_unlock_updates(journal); 5899 percpu_up_write(&sbi->s_writepages_rwsem); 5900 5901 if (val) 5902 up_write(&EXT4_I(inode)->i_mmap_sem); 5903 5904 /* Finally we can mark the inode as dirty. */ 5905 5906 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); 5907 if (IS_ERR(handle)) 5908 return PTR_ERR(handle); 5909 5910 err = ext4_mark_inode_dirty(handle, inode); 5911 ext4_handle_sync(handle); 5912 ext4_journal_stop(handle); 5913 ext4_std_error(inode->i_sb, err); 5914 5915 return err; 5916 } 5917 5918 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 5919 { 5920 return !buffer_mapped(bh); 5921 } 5922 5923 vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf) 5924 { 5925 struct vm_area_struct *vma = vmf->vma; 5926 struct page *page = vmf->page; 5927 loff_t size; 5928 unsigned long len; 5929 int err; 5930 vm_fault_t ret; 5931 struct file *file = vma->vm_file; 5932 struct inode *inode = file_inode(file); 5933 struct address_space *mapping = inode->i_mapping; 5934 handle_t *handle; 5935 get_block_t *get_block; 5936 int retries = 0; 5937 5938 if (unlikely(IS_IMMUTABLE(inode))) 5939 return VM_FAULT_SIGBUS; 5940 5941 sb_start_pagefault(inode->i_sb); 5942 file_update_time(vma->vm_file); 5943 5944 down_read(&EXT4_I(inode)->i_mmap_sem); 5945 5946 err = ext4_convert_inline_data(inode); 5947 if (err) 5948 goto out_ret; 5949 5950 /* Delalloc case is easy... */ 5951 if (test_opt(inode->i_sb, DELALLOC) && 5952 !ext4_should_journal_data(inode) && 5953 !ext4_nonda_switch(inode->i_sb)) { 5954 do { 5955 err = block_page_mkwrite(vma, vmf, 5956 ext4_da_get_block_prep); 5957 } while (err == -ENOSPC && 5958 ext4_should_retry_alloc(inode->i_sb, &retries)); 5959 goto out_ret; 5960 } 5961 5962 lock_page(page); 5963 size = i_size_read(inode); 5964 /* Page got truncated from under us? */ 5965 if (page->mapping != mapping || page_offset(page) > size) { 5966 unlock_page(page); 5967 ret = VM_FAULT_NOPAGE; 5968 goto out; 5969 } 5970 5971 if (page->index == size >> PAGE_SHIFT) 5972 len = size & ~PAGE_MASK; 5973 else 5974 len = PAGE_SIZE; 5975 /* 5976 * Return if we have all the buffers mapped. This avoids the need to do 5977 * journal_start/journal_stop which can block and take a long time 5978 */ 5979 if (page_has_buffers(page)) { 5980 if (!ext4_walk_page_buffers(NULL, page_buffers(page), 5981 0, len, NULL, 5982 ext4_bh_unmapped)) { 5983 /* Wait so that we don't change page under IO */ 5984 wait_for_stable_page(page); 5985 ret = VM_FAULT_LOCKED; 5986 goto out; 5987 } 5988 } 5989 unlock_page(page); 5990 /* OK, we need to fill the hole... */ 5991 if (ext4_should_dioread_nolock(inode)) 5992 get_block = ext4_get_block_unwritten; 5993 else 5994 get_block = ext4_get_block; 5995 retry_alloc: 5996 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 5997 ext4_writepage_trans_blocks(inode)); 5998 if (IS_ERR(handle)) { 5999 ret = VM_FAULT_SIGBUS; 6000 goto out; 6001 } 6002 err = block_page_mkwrite(vma, vmf, get_block); 6003 if (!err && ext4_should_journal_data(inode)) { 6004 if (ext4_walk_page_buffers(handle, page_buffers(page), 0, 6005 PAGE_SIZE, NULL, do_journal_get_write_access)) { 6006 unlock_page(page); 6007 ret = VM_FAULT_SIGBUS; 6008 ext4_journal_stop(handle); 6009 goto out; 6010 } 6011 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 6012 } 6013 ext4_journal_stop(handle); 6014 if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 6015 goto retry_alloc; 6016 out_ret: 6017 ret = block_page_mkwrite_return(err); 6018 out: 6019 up_read(&EXT4_I(inode)->i_mmap_sem); 6020 sb_end_pagefault(inode->i_sb); 6021 return ret; 6022 } 6023 6024 vm_fault_t ext4_filemap_fault(struct vm_fault *vmf) 6025 { 6026 struct inode *inode = file_inode(vmf->vma->vm_file); 6027 vm_fault_t ret; 6028 6029 down_read(&EXT4_I(inode)->i_mmap_sem); 6030 ret = filemap_fault(vmf); 6031 up_read(&EXT4_I(inode)->i_mmap_sem); 6032 6033 return ret; 6034 } 6035