1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext4/inode.c 4 * 5 * Copyright (C) 1992, 1993, 1994, 1995 6 * Remy Card (card@masi.ibp.fr) 7 * Laboratoire MASI - Institut Blaise Pascal 8 * Universite Pierre et Marie Curie (Paris VI) 9 * 10 * from 11 * 12 * linux/fs/minix/inode.c 13 * 14 * Copyright (C) 1991, 1992 Linus Torvalds 15 * 16 * 64-bit file support on 64-bit platforms by Jakub Jelinek 17 * (jj@sunsite.ms.mff.cuni.cz) 18 * 19 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 20 */ 21 22 #include <linux/fs.h> 23 #include <linux/mount.h> 24 #include <linux/time.h> 25 #include <linux/highuid.h> 26 #include <linux/pagemap.h> 27 #include <linux/dax.h> 28 #include <linux/quotaops.h> 29 #include <linux/string.h> 30 #include <linux/buffer_head.h> 31 #include <linux/writeback.h> 32 #include <linux/pagevec.h> 33 #include <linux/mpage.h> 34 #include <linux/namei.h> 35 #include <linux/uio.h> 36 #include <linux/bio.h> 37 #include <linux/workqueue.h> 38 #include <linux/kernel.h> 39 #include <linux/printk.h> 40 #include <linux/slab.h> 41 #include <linux/bitops.h> 42 #include <linux/iomap.h> 43 #include <linux/iversion.h> 44 45 #include "ext4_jbd2.h" 46 #include "xattr.h" 47 #include "acl.h" 48 #include "truncate.h" 49 50 #include <trace/events/ext4.h> 51 52 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, 53 struct ext4_inode_info *ei) 54 { 55 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 56 __u32 csum; 57 __u16 dummy_csum = 0; 58 int offset = offsetof(struct ext4_inode, i_checksum_lo); 59 unsigned int csum_size = sizeof(dummy_csum); 60 61 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset); 62 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size); 63 offset += csum_size; 64 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, 65 EXT4_GOOD_OLD_INODE_SIZE - offset); 66 67 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 68 offset = offsetof(struct ext4_inode, i_checksum_hi); 69 csum = ext4_chksum(sbi, csum, (__u8 *)raw + 70 EXT4_GOOD_OLD_INODE_SIZE, 71 offset - EXT4_GOOD_OLD_INODE_SIZE); 72 if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { 73 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, 74 csum_size); 75 offset += csum_size; 76 } 77 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, 78 EXT4_INODE_SIZE(inode->i_sb) - offset); 79 } 80 81 return csum; 82 } 83 84 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, 85 struct ext4_inode_info *ei) 86 { 87 __u32 provided, calculated; 88 89 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 90 cpu_to_le32(EXT4_OS_LINUX) || 91 !ext4_has_metadata_csum(inode->i_sb)) 92 return 1; 93 94 provided = le16_to_cpu(raw->i_checksum_lo); 95 calculated = ext4_inode_csum(inode, raw, ei); 96 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 97 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 98 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16; 99 else 100 calculated &= 0xFFFF; 101 102 return provided == calculated; 103 } 104 105 void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, 106 struct ext4_inode_info *ei) 107 { 108 __u32 csum; 109 110 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 111 cpu_to_le32(EXT4_OS_LINUX) || 112 !ext4_has_metadata_csum(inode->i_sb)) 113 return; 114 115 csum = ext4_inode_csum(inode, raw, ei); 116 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF); 117 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 118 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 119 raw->i_checksum_hi = cpu_to_le16(csum >> 16); 120 } 121 122 static inline int ext4_begin_ordered_truncate(struct inode *inode, 123 loff_t new_size) 124 { 125 trace_ext4_begin_ordered_truncate(inode, new_size); 126 /* 127 * If jinode is zero, then we never opened the file for 128 * writing, so there's no need to call 129 * jbd2_journal_begin_ordered_truncate() since there's no 130 * outstanding writes we need to flush. 131 */ 132 if (!EXT4_I(inode)->jinode) 133 return 0; 134 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), 135 EXT4_I(inode)->jinode, 136 new_size); 137 } 138 139 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, 140 int pextents); 141 142 /* 143 * Test whether an inode is a fast symlink. 144 * A fast symlink has its symlink data stored in ext4_inode_info->i_data. 145 */ 146 int ext4_inode_is_fast_symlink(struct inode *inode) 147 { 148 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) { 149 int ea_blocks = EXT4_I(inode)->i_file_acl ? 150 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0; 151 152 if (ext4_has_inline_data(inode)) 153 return 0; 154 155 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 156 } 157 return S_ISLNK(inode->i_mode) && inode->i_size && 158 (inode->i_size < EXT4_N_BLOCKS * 4); 159 } 160 161 /* 162 * Called at the last iput() if i_nlink is zero. 163 */ 164 void ext4_evict_inode(struct inode *inode) 165 { 166 handle_t *handle; 167 int err; 168 /* 169 * Credits for final inode cleanup and freeing: 170 * sb + inode (ext4_orphan_del()), block bitmap, group descriptor 171 * (xattr block freeing), bitmap, group descriptor (inode freeing) 172 */ 173 int extra_credits = 6; 174 struct ext4_xattr_inode_array *ea_inode_array = NULL; 175 bool freeze_protected = false; 176 177 trace_ext4_evict_inode(inode); 178 179 if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL) 180 ext4_evict_ea_inode(inode); 181 if (inode->i_nlink) { 182 truncate_inode_pages_final(&inode->i_data); 183 184 goto no_delete; 185 } 186 187 if (is_bad_inode(inode)) 188 goto no_delete; 189 dquot_initialize(inode); 190 191 if (ext4_should_order_data(inode)) 192 ext4_begin_ordered_truncate(inode, 0); 193 truncate_inode_pages_final(&inode->i_data); 194 195 /* 196 * For inodes with journalled data, transaction commit could have 197 * dirtied the inode. And for inodes with dioread_nolock, unwritten 198 * extents converting worker could merge extents and also have dirtied 199 * the inode. Flush worker is ignoring it because of I_FREEING flag but 200 * we still need to remove the inode from the writeback lists. 201 */ 202 if (!list_empty_careful(&inode->i_io_list)) 203 inode_io_list_del(inode); 204 205 /* 206 * Protect us against freezing - iput() caller didn't have to have any 207 * protection against it. When we are in a running transaction though, 208 * we are already protected against freezing and we cannot grab further 209 * protection due to lock ordering constraints. 210 */ 211 if (!ext4_journal_current_handle()) { 212 sb_start_intwrite(inode->i_sb); 213 freeze_protected = true; 214 } 215 216 if (!IS_NOQUOTA(inode)) 217 extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb); 218 219 /* 220 * Block bitmap, group descriptor, and inode are accounted in both 221 * ext4_blocks_for_truncate() and extra_credits. So subtract 3. 222 */ 223 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, 224 ext4_blocks_for_truncate(inode) + extra_credits - 3); 225 if (IS_ERR(handle)) { 226 ext4_std_error(inode->i_sb, PTR_ERR(handle)); 227 /* 228 * If we're going to skip the normal cleanup, we still need to 229 * make sure that the in-core orphan linked list is properly 230 * cleaned up. 231 */ 232 ext4_orphan_del(NULL, inode); 233 if (freeze_protected) 234 sb_end_intwrite(inode->i_sb); 235 goto no_delete; 236 } 237 238 if (IS_SYNC(inode)) 239 ext4_handle_sync(handle); 240 241 /* 242 * Set inode->i_size to 0 before calling ext4_truncate(). We need 243 * special handling of symlinks here because i_size is used to 244 * determine whether ext4_inode_info->i_data contains symlink data or 245 * block mappings. Setting i_size to 0 will remove its fast symlink 246 * status. Erase i_data so that it becomes a valid empty block map. 247 */ 248 if (ext4_inode_is_fast_symlink(inode)) 249 memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data)); 250 inode->i_size = 0; 251 err = ext4_mark_inode_dirty(handle, inode); 252 if (err) { 253 ext4_warning(inode->i_sb, 254 "couldn't mark inode dirty (err %d)", err); 255 goto stop_handle; 256 } 257 if (inode->i_blocks) { 258 err = ext4_truncate(inode); 259 if (err) { 260 ext4_error_err(inode->i_sb, -err, 261 "couldn't truncate inode %lu (err %d)", 262 inode->i_ino, err); 263 goto stop_handle; 264 } 265 } 266 267 /* Remove xattr references. */ 268 err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array, 269 extra_credits); 270 if (err) { 271 ext4_warning(inode->i_sb, "xattr delete (err %d)", err); 272 stop_handle: 273 ext4_journal_stop(handle); 274 ext4_orphan_del(NULL, inode); 275 if (freeze_protected) 276 sb_end_intwrite(inode->i_sb); 277 ext4_xattr_inode_array_free(ea_inode_array); 278 goto no_delete; 279 } 280 281 /* 282 * Kill off the orphan record which ext4_truncate created. 283 * AKPM: I think this can be inside the above `if'. 284 * Note that ext4_orphan_del() has to be able to cope with the 285 * deletion of a non-existent orphan - this is because we don't 286 * know if ext4_truncate() actually created an orphan record. 287 * (Well, we could do this if we need to, but heck - it works) 288 */ 289 ext4_orphan_del(handle, inode); 290 EXT4_I(inode)->i_dtime = (__u32)ktime_get_real_seconds(); 291 292 /* 293 * One subtle ordering requirement: if anything has gone wrong 294 * (transaction abort, IO errors, whatever), then we can still 295 * do these next steps (the fs will already have been marked as 296 * having errors), but we can't free the inode if the mark_dirty 297 * fails. 298 */ 299 if (ext4_mark_inode_dirty(handle, inode)) 300 /* If that failed, just do the required in-core inode clear. */ 301 ext4_clear_inode(inode); 302 else 303 ext4_free_inode(handle, inode); 304 ext4_journal_stop(handle); 305 if (freeze_protected) 306 sb_end_intwrite(inode->i_sb); 307 ext4_xattr_inode_array_free(ea_inode_array); 308 return; 309 no_delete: 310 /* 311 * Check out some where else accidentally dirty the evicting inode, 312 * which may probably cause inode use-after-free issues later. 313 */ 314 WARN_ON_ONCE(!list_empty_careful(&inode->i_io_list)); 315 316 if (!list_empty(&EXT4_I(inode)->i_fc_list)) 317 ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL); 318 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ 319 } 320 321 #ifdef CONFIG_QUOTA 322 qsize_t *ext4_get_reserved_space(struct inode *inode) 323 { 324 return &EXT4_I(inode)->i_reserved_quota; 325 } 326 #endif 327 328 /* 329 * Called with i_data_sem down, which is important since we can call 330 * ext4_discard_preallocations() from here. 331 */ 332 void ext4_da_update_reserve_space(struct inode *inode, 333 int used, int quota_claim) 334 { 335 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 336 struct ext4_inode_info *ei = EXT4_I(inode); 337 338 spin_lock(&ei->i_block_reservation_lock); 339 trace_ext4_da_update_reserve_space(inode, used, quota_claim); 340 if (unlikely(used > ei->i_reserved_data_blocks)) { 341 ext4_warning(inode->i_sb, "%s: ino %lu, used %d " 342 "with only %d reserved data blocks", 343 __func__, inode->i_ino, used, 344 ei->i_reserved_data_blocks); 345 WARN_ON(1); 346 used = ei->i_reserved_data_blocks; 347 } 348 349 /* Update per-inode reservations */ 350 ei->i_reserved_data_blocks -= used; 351 percpu_counter_sub(&sbi->s_dirtyclusters_counter, used); 352 353 spin_unlock(&ei->i_block_reservation_lock); 354 355 /* Update quota subsystem for data blocks */ 356 if (quota_claim) 357 dquot_claim_block(inode, EXT4_C2B(sbi, used)); 358 else { 359 /* 360 * We did fallocate with an offset that is already delayed 361 * allocated. So on delayed allocated writeback we should 362 * not re-claim the quota for fallocated blocks. 363 */ 364 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); 365 } 366 367 /* 368 * If we have done all the pending block allocations and if 369 * there aren't any writers on the inode, we can discard the 370 * inode's preallocations. 371 */ 372 if ((ei->i_reserved_data_blocks == 0) && 373 !inode_is_open_for_write(inode)) 374 ext4_discard_preallocations(inode, 0); 375 } 376 377 static int __check_block_validity(struct inode *inode, const char *func, 378 unsigned int line, 379 struct ext4_map_blocks *map) 380 { 381 if (ext4_has_feature_journal(inode->i_sb) && 382 (inode->i_ino == 383 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) 384 return 0; 385 if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) { 386 ext4_error_inode(inode, func, line, map->m_pblk, 387 "lblock %lu mapped to illegal pblock %llu " 388 "(length %d)", (unsigned long) map->m_lblk, 389 map->m_pblk, map->m_len); 390 return -EFSCORRUPTED; 391 } 392 return 0; 393 } 394 395 int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk, 396 ext4_lblk_t len) 397 { 398 int ret; 399 400 if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) 401 return fscrypt_zeroout_range(inode, lblk, pblk, len); 402 403 ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS); 404 if (ret > 0) 405 ret = 0; 406 407 return ret; 408 } 409 410 #define check_block_validity(inode, map) \ 411 __check_block_validity((inode), __func__, __LINE__, (map)) 412 413 #ifdef ES_AGGRESSIVE_TEST 414 static void ext4_map_blocks_es_recheck(handle_t *handle, 415 struct inode *inode, 416 struct ext4_map_blocks *es_map, 417 struct ext4_map_blocks *map, 418 int flags) 419 { 420 int retval; 421 422 map->m_flags = 0; 423 /* 424 * There is a race window that the result is not the same. 425 * e.g. xfstests #223 when dioread_nolock enables. The reason 426 * is that we lookup a block mapping in extent status tree with 427 * out taking i_data_sem. So at the time the unwritten extent 428 * could be converted. 429 */ 430 down_read(&EXT4_I(inode)->i_data_sem); 431 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 432 retval = ext4_ext_map_blocks(handle, inode, map, 0); 433 } else { 434 retval = ext4_ind_map_blocks(handle, inode, map, 0); 435 } 436 up_read((&EXT4_I(inode)->i_data_sem)); 437 438 /* 439 * We don't check m_len because extent will be collpased in status 440 * tree. So the m_len might not equal. 441 */ 442 if (es_map->m_lblk != map->m_lblk || 443 es_map->m_flags != map->m_flags || 444 es_map->m_pblk != map->m_pblk) { 445 printk("ES cache assertion failed for inode: %lu " 446 "es_cached ex [%d/%d/%llu/%x] != " 447 "found ex [%d/%d/%llu/%x] retval %d flags %x\n", 448 inode->i_ino, es_map->m_lblk, es_map->m_len, 449 es_map->m_pblk, es_map->m_flags, map->m_lblk, 450 map->m_len, map->m_pblk, map->m_flags, 451 retval, flags); 452 } 453 } 454 #endif /* ES_AGGRESSIVE_TEST */ 455 456 static int ext4_map_query_blocks(handle_t *handle, struct inode *inode, 457 struct ext4_map_blocks *map) 458 { 459 unsigned int status; 460 int retval; 461 462 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 463 retval = ext4_ext_map_blocks(handle, inode, map, 0); 464 else 465 retval = ext4_ind_map_blocks(handle, inode, map, 0); 466 467 if (retval <= 0) 468 return retval; 469 470 if (unlikely(retval != map->m_len)) { 471 ext4_warning(inode->i_sb, 472 "ES len assertion failed for inode " 473 "%lu: retval %d != map->m_len %d", 474 inode->i_ino, retval, map->m_len); 475 WARN_ON(1); 476 } 477 478 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 479 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 480 ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 481 map->m_pblk, status); 482 return retval; 483 } 484 485 /* 486 * The ext4_map_blocks() function tries to look up the requested blocks, 487 * and returns if the blocks are already mapped. 488 * 489 * Otherwise it takes the write lock of the i_data_sem and allocate blocks 490 * and store the allocated blocks in the result buffer head and mark it 491 * mapped. 492 * 493 * If file type is extents based, it will call ext4_ext_map_blocks(), 494 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping 495 * based files 496 * 497 * On success, it returns the number of blocks being mapped or allocated. if 498 * create==0 and the blocks are pre-allocated and unwritten, the resulting @map 499 * is marked as unwritten. If the create == 1, it will mark @map as mapped. 500 * 501 * It returns 0 if plain look up failed (blocks have not been allocated), in 502 * that case, @map is returned as unmapped but we still do fill map->m_len to 503 * indicate the length of a hole starting at map->m_lblk. 504 * 505 * It returns the error in case of allocation failure. 506 */ 507 int ext4_map_blocks(handle_t *handle, struct inode *inode, 508 struct ext4_map_blocks *map, int flags) 509 { 510 struct extent_status es; 511 int retval; 512 int ret = 0; 513 #ifdef ES_AGGRESSIVE_TEST 514 struct ext4_map_blocks orig_map; 515 516 memcpy(&orig_map, map, sizeof(*map)); 517 #endif 518 519 map->m_flags = 0; 520 ext_debug(inode, "flag 0x%x, max_blocks %u, logical block %lu\n", 521 flags, map->m_len, (unsigned long) map->m_lblk); 522 523 /* 524 * ext4_map_blocks returns an int, and m_len is an unsigned int 525 */ 526 if (unlikely(map->m_len > INT_MAX)) 527 map->m_len = INT_MAX; 528 529 /* We can handle the block number less than EXT_MAX_BLOCKS */ 530 if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS)) 531 return -EFSCORRUPTED; 532 533 /* Lookup extent status tree firstly */ 534 if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) && 535 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) { 536 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) { 537 map->m_pblk = ext4_es_pblock(&es) + 538 map->m_lblk - es.es_lblk; 539 map->m_flags |= ext4_es_is_written(&es) ? 540 EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN; 541 retval = es.es_len - (map->m_lblk - es.es_lblk); 542 if (retval > map->m_len) 543 retval = map->m_len; 544 map->m_len = retval; 545 } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) { 546 map->m_pblk = 0; 547 retval = es.es_len - (map->m_lblk - es.es_lblk); 548 if (retval > map->m_len) 549 retval = map->m_len; 550 map->m_len = retval; 551 retval = 0; 552 } else { 553 BUG(); 554 } 555 556 if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT) 557 return retval; 558 #ifdef ES_AGGRESSIVE_TEST 559 ext4_map_blocks_es_recheck(handle, inode, map, 560 &orig_map, flags); 561 #endif 562 goto found; 563 } 564 /* 565 * In the query cache no-wait mode, nothing we can do more if we 566 * cannot find extent in the cache. 567 */ 568 if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT) 569 return 0; 570 571 /* 572 * Try to see if we can get the block without requesting a new 573 * file system block. 574 */ 575 down_read(&EXT4_I(inode)->i_data_sem); 576 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 577 retval = ext4_ext_map_blocks(handle, inode, map, 0); 578 } else { 579 retval = ext4_ind_map_blocks(handle, inode, map, 0); 580 } 581 if (retval > 0) { 582 unsigned int status; 583 584 if (unlikely(retval != map->m_len)) { 585 ext4_warning(inode->i_sb, 586 "ES len assertion failed for inode " 587 "%lu: retval %d != map->m_len %d", 588 inode->i_ino, retval, map->m_len); 589 WARN_ON(1); 590 } 591 592 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 593 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 594 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 595 !(status & EXTENT_STATUS_WRITTEN) && 596 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk, 597 map->m_lblk + map->m_len - 1)) 598 status |= EXTENT_STATUS_DELAYED; 599 ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 600 map->m_pblk, status); 601 } 602 up_read((&EXT4_I(inode)->i_data_sem)); 603 604 found: 605 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 606 ret = check_block_validity(inode, map); 607 if (ret != 0) 608 return ret; 609 } 610 611 /* If it is only a block(s) look up */ 612 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 613 return retval; 614 615 /* 616 * Returns if the blocks have already allocated 617 * 618 * Note that if blocks have been preallocated 619 * ext4_ext_get_block() returns the create = 0 620 * with buffer head unmapped. 621 */ 622 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) 623 /* 624 * If we need to convert extent to unwritten 625 * we continue and do the actual work in 626 * ext4_ext_map_blocks() 627 */ 628 if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) 629 return retval; 630 631 /* 632 * Here we clear m_flags because after allocating an new extent, 633 * it will be set again. 634 */ 635 map->m_flags &= ~EXT4_MAP_FLAGS; 636 637 /* 638 * New blocks allocate and/or writing to unwritten extent 639 * will possibly result in updating i_data, so we take 640 * the write lock of i_data_sem, and call get_block() 641 * with create == 1 flag. 642 */ 643 down_write(&EXT4_I(inode)->i_data_sem); 644 645 /* 646 * We need to check for EXT4 here because migrate 647 * could have changed the inode type in between 648 */ 649 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 650 retval = ext4_ext_map_blocks(handle, inode, map, flags); 651 } else { 652 retval = ext4_ind_map_blocks(handle, inode, map, flags); 653 654 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { 655 /* 656 * We allocated new blocks which will result in 657 * i_data's format changing. Force the migrate 658 * to fail by clearing migrate flags 659 */ 660 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); 661 } 662 } 663 664 if (retval > 0) { 665 unsigned int status; 666 667 if (unlikely(retval != map->m_len)) { 668 ext4_warning(inode->i_sb, 669 "ES len assertion failed for inode " 670 "%lu: retval %d != map->m_len %d", 671 inode->i_ino, retval, map->m_len); 672 WARN_ON(1); 673 } 674 675 /* 676 * We have to zeroout blocks before inserting them into extent 677 * status tree. Otherwise someone could look them up there and 678 * use them before they are really zeroed. We also have to 679 * unmap metadata before zeroing as otherwise writeback can 680 * overwrite zeros with stale data from block device. 681 */ 682 if (flags & EXT4_GET_BLOCKS_ZERO && 683 map->m_flags & EXT4_MAP_MAPPED && 684 map->m_flags & EXT4_MAP_NEW) { 685 ret = ext4_issue_zeroout(inode, map->m_lblk, 686 map->m_pblk, map->m_len); 687 if (ret) { 688 retval = ret; 689 goto out_sem; 690 } 691 } 692 693 /* 694 * If the extent has been zeroed out, we don't need to update 695 * extent status tree. 696 */ 697 if ((flags & EXT4_GET_BLOCKS_PRE_IO) && 698 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) { 699 if (ext4_es_is_written(&es)) 700 goto out_sem; 701 } 702 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 703 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 704 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 705 !(status & EXTENT_STATUS_WRITTEN) && 706 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk, 707 map->m_lblk + map->m_len - 1)) 708 status |= EXTENT_STATUS_DELAYED; 709 ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 710 map->m_pblk, status); 711 } 712 713 out_sem: 714 up_write((&EXT4_I(inode)->i_data_sem)); 715 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 716 ret = check_block_validity(inode, map); 717 if (ret != 0) 718 return ret; 719 720 /* 721 * Inodes with freshly allocated blocks where contents will be 722 * visible after transaction commit must be on transaction's 723 * ordered data list. 724 */ 725 if (map->m_flags & EXT4_MAP_NEW && 726 !(map->m_flags & EXT4_MAP_UNWRITTEN) && 727 !(flags & EXT4_GET_BLOCKS_ZERO) && 728 !ext4_is_quota_file(inode) && 729 ext4_should_order_data(inode)) { 730 loff_t start_byte = 731 (loff_t)map->m_lblk << inode->i_blkbits; 732 loff_t length = (loff_t)map->m_len << inode->i_blkbits; 733 734 if (flags & EXT4_GET_BLOCKS_IO_SUBMIT) 735 ret = ext4_jbd2_inode_add_wait(handle, inode, 736 start_byte, length); 737 else 738 ret = ext4_jbd2_inode_add_write(handle, inode, 739 start_byte, length); 740 if (ret) 741 return ret; 742 } 743 } 744 if (retval > 0 && (map->m_flags & EXT4_MAP_UNWRITTEN || 745 map->m_flags & EXT4_MAP_MAPPED)) 746 ext4_fc_track_range(handle, inode, map->m_lblk, 747 map->m_lblk + map->m_len - 1); 748 if (retval < 0) 749 ext_debug(inode, "failed with err %d\n", retval); 750 return retval; 751 } 752 753 /* 754 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages 755 * we have to be careful as someone else may be manipulating b_state as well. 756 */ 757 static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags) 758 { 759 unsigned long old_state; 760 unsigned long new_state; 761 762 flags &= EXT4_MAP_FLAGS; 763 764 /* Dummy buffer_head? Set non-atomically. */ 765 if (!bh->b_page) { 766 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags; 767 return; 768 } 769 /* 770 * Someone else may be modifying b_state. Be careful! This is ugly but 771 * once we get rid of using bh as a container for mapping information 772 * to pass to / from get_block functions, this can go away. 773 */ 774 old_state = READ_ONCE(bh->b_state); 775 do { 776 new_state = (old_state & ~EXT4_MAP_FLAGS) | flags; 777 } while (unlikely(!try_cmpxchg(&bh->b_state, &old_state, new_state))); 778 } 779 780 static int _ext4_get_block(struct inode *inode, sector_t iblock, 781 struct buffer_head *bh, int flags) 782 { 783 struct ext4_map_blocks map; 784 int ret = 0; 785 786 if (ext4_has_inline_data(inode)) 787 return -ERANGE; 788 789 map.m_lblk = iblock; 790 map.m_len = bh->b_size >> inode->i_blkbits; 791 792 ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map, 793 flags); 794 if (ret > 0) { 795 map_bh(bh, inode->i_sb, map.m_pblk); 796 ext4_update_bh_state(bh, map.m_flags); 797 bh->b_size = inode->i_sb->s_blocksize * map.m_len; 798 ret = 0; 799 } else if (ret == 0) { 800 /* hole case, need to fill in bh->b_size */ 801 bh->b_size = inode->i_sb->s_blocksize * map.m_len; 802 } 803 return ret; 804 } 805 806 int ext4_get_block(struct inode *inode, sector_t iblock, 807 struct buffer_head *bh, int create) 808 { 809 return _ext4_get_block(inode, iblock, bh, 810 create ? EXT4_GET_BLOCKS_CREATE : 0); 811 } 812 813 /* 814 * Get block function used when preparing for buffered write if we require 815 * creating an unwritten extent if blocks haven't been allocated. The extent 816 * will be converted to written after the IO is complete. 817 */ 818 int ext4_get_block_unwritten(struct inode *inode, sector_t iblock, 819 struct buffer_head *bh_result, int create) 820 { 821 int ret = 0; 822 823 ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n", 824 inode->i_ino, create); 825 ret = _ext4_get_block(inode, iblock, bh_result, 826 EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT); 827 828 /* 829 * If the buffer is marked unwritten, mark it as new to make sure it is 830 * zeroed out correctly in case of partial writes. Otherwise, there is 831 * a chance of stale data getting exposed. 832 */ 833 if (ret == 0 && buffer_unwritten(bh_result)) 834 set_buffer_new(bh_result); 835 836 return ret; 837 } 838 839 /* Maximum number of blocks we map for direct IO at once. */ 840 #define DIO_MAX_BLOCKS 4096 841 842 /* 843 * `handle' can be NULL if create is zero 844 */ 845 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 846 ext4_lblk_t block, int map_flags) 847 { 848 struct ext4_map_blocks map; 849 struct buffer_head *bh; 850 int create = map_flags & EXT4_GET_BLOCKS_CREATE; 851 bool nowait = map_flags & EXT4_GET_BLOCKS_CACHED_NOWAIT; 852 int err; 853 854 ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 855 || handle != NULL || create == 0); 856 ASSERT(create == 0 || !nowait); 857 858 map.m_lblk = block; 859 map.m_len = 1; 860 err = ext4_map_blocks(handle, inode, &map, map_flags); 861 862 if (err == 0) 863 return create ? ERR_PTR(-ENOSPC) : NULL; 864 if (err < 0) 865 return ERR_PTR(err); 866 867 if (nowait) 868 return sb_find_get_block(inode->i_sb, map.m_pblk); 869 870 bh = sb_getblk(inode->i_sb, map.m_pblk); 871 if (unlikely(!bh)) 872 return ERR_PTR(-ENOMEM); 873 if (map.m_flags & EXT4_MAP_NEW) { 874 ASSERT(create != 0); 875 ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 876 || (handle != NULL)); 877 878 /* 879 * Now that we do not always journal data, we should 880 * keep in mind whether this should always journal the 881 * new buffer as metadata. For now, regular file 882 * writes use ext4_get_block instead, so it's not a 883 * problem. 884 */ 885 lock_buffer(bh); 886 BUFFER_TRACE(bh, "call get_create_access"); 887 err = ext4_journal_get_create_access(handle, inode->i_sb, bh, 888 EXT4_JTR_NONE); 889 if (unlikely(err)) { 890 unlock_buffer(bh); 891 goto errout; 892 } 893 if (!buffer_uptodate(bh)) { 894 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 895 set_buffer_uptodate(bh); 896 } 897 unlock_buffer(bh); 898 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 899 err = ext4_handle_dirty_metadata(handle, inode, bh); 900 if (unlikely(err)) 901 goto errout; 902 } else 903 BUFFER_TRACE(bh, "not a new buffer"); 904 return bh; 905 errout: 906 brelse(bh); 907 return ERR_PTR(err); 908 } 909 910 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 911 ext4_lblk_t block, int map_flags) 912 { 913 struct buffer_head *bh; 914 int ret; 915 916 bh = ext4_getblk(handle, inode, block, map_flags); 917 if (IS_ERR(bh)) 918 return bh; 919 if (!bh || ext4_buffer_uptodate(bh)) 920 return bh; 921 922 ret = ext4_read_bh_lock(bh, REQ_META | REQ_PRIO, true); 923 if (ret) { 924 put_bh(bh); 925 return ERR_PTR(ret); 926 } 927 return bh; 928 } 929 930 /* Read a contiguous batch of blocks. */ 931 int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count, 932 bool wait, struct buffer_head **bhs) 933 { 934 int i, err; 935 936 for (i = 0; i < bh_count; i++) { 937 bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */); 938 if (IS_ERR(bhs[i])) { 939 err = PTR_ERR(bhs[i]); 940 bh_count = i; 941 goto out_brelse; 942 } 943 } 944 945 for (i = 0; i < bh_count; i++) 946 /* Note that NULL bhs[i] is valid because of holes. */ 947 if (bhs[i] && !ext4_buffer_uptodate(bhs[i])) 948 ext4_read_bh_lock(bhs[i], REQ_META | REQ_PRIO, false); 949 950 if (!wait) 951 return 0; 952 953 for (i = 0; i < bh_count; i++) 954 if (bhs[i]) 955 wait_on_buffer(bhs[i]); 956 957 for (i = 0; i < bh_count; i++) { 958 if (bhs[i] && !buffer_uptodate(bhs[i])) { 959 err = -EIO; 960 goto out_brelse; 961 } 962 } 963 return 0; 964 965 out_brelse: 966 for (i = 0; i < bh_count; i++) { 967 brelse(bhs[i]); 968 bhs[i] = NULL; 969 } 970 return err; 971 } 972 973 int ext4_walk_page_buffers(handle_t *handle, struct inode *inode, 974 struct buffer_head *head, 975 unsigned from, 976 unsigned to, 977 int *partial, 978 int (*fn)(handle_t *handle, struct inode *inode, 979 struct buffer_head *bh)) 980 { 981 struct buffer_head *bh; 982 unsigned block_start, block_end; 983 unsigned blocksize = head->b_size; 984 int err, ret = 0; 985 struct buffer_head *next; 986 987 for (bh = head, block_start = 0; 988 ret == 0 && (bh != head || !block_start); 989 block_start = block_end, bh = next) { 990 next = bh->b_this_page; 991 block_end = block_start + blocksize; 992 if (block_end <= from || block_start >= to) { 993 if (partial && !buffer_uptodate(bh)) 994 *partial = 1; 995 continue; 996 } 997 err = (*fn)(handle, inode, bh); 998 if (!ret) 999 ret = err; 1000 } 1001 return ret; 1002 } 1003 1004 /* 1005 * Helper for handling dirtying of journalled data. We also mark the folio as 1006 * dirty so that writeback code knows about this page (and inode) contains 1007 * dirty data. ext4_writepages() then commits appropriate transaction to 1008 * make data stable. 1009 */ 1010 static int ext4_dirty_journalled_data(handle_t *handle, struct buffer_head *bh) 1011 { 1012 folio_mark_dirty(bh->b_folio); 1013 return ext4_handle_dirty_metadata(handle, NULL, bh); 1014 } 1015 1016 int do_journal_get_write_access(handle_t *handle, struct inode *inode, 1017 struct buffer_head *bh) 1018 { 1019 int dirty = buffer_dirty(bh); 1020 int ret; 1021 1022 if (!buffer_mapped(bh) || buffer_freed(bh)) 1023 return 0; 1024 /* 1025 * __block_write_begin() could have dirtied some buffers. Clean 1026 * the dirty bit as jbd2_journal_get_write_access() could complain 1027 * otherwise about fs integrity issues. Setting of the dirty bit 1028 * by __block_write_begin() isn't a real problem here as we clear 1029 * the bit before releasing a page lock and thus writeback cannot 1030 * ever write the buffer. 1031 */ 1032 if (dirty) 1033 clear_buffer_dirty(bh); 1034 BUFFER_TRACE(bh, "get write access"); 1035 ret = ext4_journal_get_write_access(handle, inode->i_sb, bh, 1036 EXT4_JTR_NONE); 1037 if (!ret && dirty) 1038 ret = ext4_dirty_journalled_data(handle, bh); 1039 return ret; 1040 } 1041 1042 #ifdef CONFIG_FS_ENCRYPTION 1043 static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len, 1044 get_block_t *get_block) 1045 { 1046 unsigned from = pos & (PAGE_SIZE - 1); 1047 unsigned to = from + len; 1048 struct inode *inode = folio->mapping->host; 1049 unsigned block_start, block_end; 1050 sector_t block; 1051 int err = 0; 1052 unsigned blocksize = inode->i_sb->s_blocksize; 1053 unsigned bbits; 1054 struct buffer_head *bh, *head, *wait[2]; 1055 int nr_wait = 0; 1056 int i; 1057 1058 BUG_ON(!folio_test_locked(folio)); 1059 BUG_ON(from > PAGE_SIZE); 1060 BUG_ON(to > PAGE_SIZE); 1061 BUG_ON(from > to); 1062 1063 head = folio_buffers(folio); 1064 if (!head) { 1065 create_empty_buffers(&folio->page, blocksize, 0); 1066 head = folio_buffers(folio); 1067 } 1068 bbits = ilog2(blocksize); 1069 block = (sector_t)folio->index << (PAGE_SHIFT - bbits); 1070 1071 for (bh = head, block_start = 0; bh != head || !block_start; 1072 block++, block_start = block_end, bh = bh->b_this_page) { 1073 block_end = block_start + blocksize; 1074 if (block_end <= from || block_start >= to) { 1075 if (folio_test_uptodate(folio)) { 1076 set_buffer_uptodate(bh); 1077 } 1078 continue; 1079 } 1080 if (buffer_new(bh)) 1081 clear_buffer_new(bh); 1082 if (!buffer_mapped(bh)) { 1083 WARN_ON(bh->b_size != blocksize); 1084 err = get_block(inode, block, bh, 1); 1085 if (err) 1086 break; 1087 if (buffer_new(bh)) { 1088 if (folio_test_uptodate(folio)) { 1089 clear_buffer_new(bh); 1090 set_buffer_uptodate(bh); 1091 mark_buffer_dirty(bh); 1092 continue; 1093 } 1094 if (block_end > to || block_start < from) 1095 folio_zero_segments(folio, to, 1096 block_end, 1097 block_start, from); 1098 continue; 1099 } 1100 } 1101 if (folio_test_uptodate(folio)) { 1102 set_buffer_uptodate(bh); 1103 continue; 1104 } 1105 if (!buffer_uptodate(bh) && !buffer_delay(bh) && 1106 !buffer_unwritten(bh) && 1107 (block_start < from || block_end > to)) { 1108 ext4_read_bh_lock(bh, 0, false); 1109 wait[nr_wait++] = bh; 1110 } 1111 } 1112 /* 1113 * If we issued read requests, let them complete. 1114 */ 1115 for (i = 0; i < nr_wait; i++) { 1116 wait_on_buffer(wait[i]); 1117 if (!buffer_uptodate(wait[i])) 1118 err = -EIO; 1119 } 1120 if (unlikely(err)) { 1121 folio_zero_new_buffers(folio, from, to); 1122 } else if (fscrypt_inode_uses_fs_layer_crypto(inode)) { 1123 for (i = 0; i < nr_wait; i++) { 1124 int err2; 1125 1126 err2 = fscrypt_decrypt_pagecache_blocks(folio, 1127 blocksize, bh_offset(wait[i])); 1128 if (err2) { 1129 clear_buffer_uptodate(wait[i]); 1130 err = err2; 1131 } 1132 } 1133 } 1134 1135 return err; 1136 } 1137 #endif 1138 1139 /* 1140 * To preserve ordering, it is essential that the hole instantiation and 1141 * the data write be encapsulated in a single transaction. We cannot 1142 * close off a transaction and start a new one between the ext4_get_block() 1143 * and the ext4_write_end(). So doing the jbd2_journal_start at the start of 1144 * ext4_write_begin() is the right place. 1145 */ 1146 static int ext4_write_begin(struct file *file, struct address_space *mapping, 1147 loff_t pos, unsigned len, 1148 struct page **pagep, void **fsdata) 1149 { 1150 struct inode *inode = mapping->host; 1151 int ret, needed_blocks; 1152 handle_t *handle; 1153 int retries = 0; 1154 struct folio *folio; 1155 pgoff_t index; 1156 unsigned from, to; 1157 1158 if (unlikely(ext4_forced_shutdown(inode->i_sb))) 1159 return -EIO; 1160 1161 trace_ext4_write_begin(inode, pos, len); 1162 /* 1163 * Reserve one block more for addition to orphan list in case 1164 * we allocate blocks but write fails for some reason 1165 */ 1166 needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 1167 index = pos >> PAGE_SHIFT; 1168 from = pos & (PAGE_SIZE - 1); 1169 to = from + len; 1170 1171 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 1172 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, 1173 pagep); 1174 if (ret < 0) 1175 return ret; 1176 if (ret == 1) 1177 return 0; 1178 } 1179 1180 /* 1181 * __filemap_get_folio() can take a long time if the 1182 * system is thrashing due to memory pressure, or if the folio 1183 * is being written back. So grab it first before we start 1184 * the transaction handle. This also allows us to allocate 1185 * the folio (if needed) without using GFP_NOFS. 1186 */ 1187 retry_grab: 1188 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, 1189 mapping_gfp_mask(mapping)); 1190 if (IS_ERR(folio)) 1191 return PTR_ERR(folio); 1192 /* 1193 * The same as page allocation, we prealloc buffer heads before 1194 * starting the handle. 1195 */ 1196 if (!folio_buffers(folio)) 1197 create_empty_buffers(&folio->page, inode->i_sb->s_blocksize, 0); 1198 1199 folio_unlock(folio); 1200 1201 retry_journal: 1202 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); 1203 if (IS_ERR(handle)) { 1204 folio_put(folio); 1205 return PTR_ERR(handle); 1206 } 1207 1208 folio_lock(folio); 1209 if (folio->mapping != mapping) { 1210 /* The folio got truncated from under us */ 1211 folio_unlock(folio); 1212 folio_put(folio); 1213 ext4_journal_stop(handle); 1214 goto retry_grab; 1215 } 1216 /* In case writeback began while the folio was unlocked */ 1217 folio_wait_stable(folio); 1218 1219 #ifdef CONFIG_FS_ENCRYPTION 1220 if (ext4_should_dioread_nolock(inode)) 1221 ret = ext4_block_write_begin(folio, pos, len, 1222 ext4_get_block_unwritten); 1223 else 1224 ret = ext4_block_write_begin(folio, pos, len, ext4_get_block); 1225 #else 1226 if (ext4_should_dioread_nolock(inode)) 1227 ret = __block_write_begin(&folio->page, pos, len, 1228 ext4_get_block_unwritten); 1229 else 1230 ret = __block_write_begin(&folio->page, pos, len, ext4_get_block); 1231 #endif 1232 if (!ret && ext4_should_journal_data(inode)) { 1233 ret = ext4_walk_page_buffers(handle, inode, 1234 folio_buffers(folio), from, to, 1235 NULL, do_journal_get_write_access); 1236 } 1237 1238 if (ret) { 1239 bool extended = (pos + len > inode->i_size) && 1240 !ext4_verity_in_progress(inode); 1241 1242 folio_unlock(folio); 1243 /* 1244 * __block_write_begin may have instantiated a few blocks 1245 * outside i_size. Trim these off again. Don't need 1246 * i_size_read because we hold i_rwsem. 1247 * 1248 * Add inode to orphan list in case we crash before 1249 * truncate finishes 1250 */ 1251 if (extended && ext4_can_truncate(inode)) 1252 ext4_orphan_add(handle, inode); 1253 1254 ext4_journal_stop(handle); 1255 if (extended) { 1256 ext4_truncate_failed_write(inode); 1257 /* 1258 * If truncate failed early the inode might 1259 * still be on the orphan list; we need to 1260 * make sure the inode is removed from the 1261 * orphan list in that case. 1262 */ 1263 if (inode->i_nlink) 1264 ext4_orphan_del(NULL, inode); 1265 } 1266 1267 if (ret == -ENOSPC && 1268 ext4_should_retry_alloc(inode->i_sb, &retries)) 1269 goto retry_journal; 1270 folio_put(folio); 1271 return ret; 1272 } 1273 *pagep = &folio->page; 1274 return ret; 1275 } 1276 1277 /* For write_end() in data=journal mode */ 1278 static int write_end_fn(handle_t *handle, struct inode *inode, 1279 struct buffer_head *bh) 1280 { 1281 int ret; 1282 if (!buffer_mapped(bh) || buffer_freed(bh)) 1283 return 0; 1284 set_buffer_uptodate(bh); 1285 ret = ext4_dirty_journalled_data(handle, bh); 1286 clear_buffer_meta(bh); 1287 clear_buffer_prio(bh); 1288 return ret; 1289 } 1290 1291 /* 1292 * We need to pick up the new inode size which generic_commit_write gave us 1293 * `file' can be NULL - eg, when called from page_symlink(). 1294 * 1295 * ext4 never places buffers on inode->i_mapping->private_list. metadata 1296 * buffers are managed internally. 1297 */ 1298 static int ext4_write_end(struct file *file, 1299 struct address_space *mapping, 1300 loff_t pos, unsigned len, unsigned copied, 1301 struct page *page, void *fsdata) 1302 { 1303 struct folio *folio = page_folio(page); 1304 handle_t *handle = ext4_journal_current_handle(); 1305 struct inode *inode = mapping->host; 1306 loff_t old_size = inode->i_size; 1307 int ret = 0, ret2; 1308 int i_size_changed = 0; 1309 bool verity = ext4_verity_in_progress(inode); 1310 1311 trace_ext4_write_end(inode, pos, len, copied); 1312 1313 if (ext4_has_inline_data(inode) && 1314 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) 1315 return ext4_write_inline_data_end(inode, pos, len, copied, 1316 folio); 1317 1318 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 1319 /* 1320 * it's important to update i_size while still holding folio lock: 1321 * page writeout could otherwise come in and zero beyond i_size. 1322 * 1323 * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree 1324 * blocks are being written past EOF, so skip the i_size update. 1325 */ 1326 if (!verity) 1327 i_size_changed = ext4_update_inode_size(inode, pos + copied); 1328 folio_unlock(folio); 1329 folio_put(folio); 1330 1331 if (old_size < pos && !verity) 1332 pagecache_isize_extended(inode, old_size, pos); 1333 /* 1334 * Don't mark the inode dirty under folio lock. First, it unnecessarily 1335 * makes the holding time of folio lock longer. Second, it forces lock 1336 * ordering of folio lock and transaction start for journaling 1337 * filesystems. 1338 */ 1339 if (i_size_changed) 1340 ret = ext4_mark_inode_dirty(handle, inode); 1341 1342 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode)) 1343 /* if we have allocated more blocks and copied 1344 * less. We will have blocks allocated outside 1345 * inode->i_size. So truncate them 1346 */ 1347 ext4_orphan_add(handle, inode); 1348 1349 ret2 = ext4_journal_stop(handle); 1350 if (!ret) 1351 ret = ret2; 1352 1353 if (pos + len > inode->i_size && !verity) { 1354 ext4_truncate_failed_write(inode); 1355 /* 1356 * If truncate failed early the inode might still be 1357 * on the orphan list; we need to make sure the inode 1358 * is removed from the orphan list in that case. 1359 */ 1360 if (inode->i_nlink) 1361 ext4_orphan_del(NULL, inode); 1362 } 1363 1364 return ret ? ret : copied; 1365 } 1366 1367 /* 1368 * This is a private version of folio_zero_new_buffers() which doesn't 1369 * set the buffer to be dirty, since in data=journalled mode we need 1370 * to call ext4_dirty_journalled_data() instead. 1371 */ 1372 static void ext4_journalled_zero_new_buffers(handle_t *handle, 1373 struct inode *inode, 1374 struct folio *folio, 1375 unsigned from, unsigned to) 1376 { 1377 unsigned int block_start = 0, block_end; 1378 struct buffer_head *head, *bh; 1379 1380 bh = head = folio_buffers(folio); 1381 do { 1382 block_end = block_start + bh->b_size; 1383 if (buffer_new(bh)) { 1384 if (block_end > from && block_start < to) { 1385 if (!folio_test_uptodate(folio)) { 1386 unsigned start, size; 1387 1388 start = max(from, block_start); 1389 size = min(to, block_end) - start; 1390 1391 folio_zero_range(folio, start, size); 1392 write_end_fn(handle, inode, bh); 1393 } 1394 clear_buffer_new(bh); 1395 } 1396 } 1397 block_start = block_end; 1398 bh = bh->b_this_page; 1399 } while (bh != head); 1400 } 1401 1402 static int ext4_journalled_write_end(struct file *file, 1403 struct address_space *mapping, 1404 loff_t pos, unsigned len, unsigned copied, 1405 struct page *page, void *fsdata) 1406 { 1407 struct folio *folio = page_folio(page); 1408 handle_t *handle = ext4_journal_current_handle(); 1409 struct inode *inode = mapping->host; 1410 loff_t old_size = inode->i_size; 1411 int ret = 0, ret2; 1412 int partial = 0; 1413 unsigned from, to; 1414 int size_changed = 0; 1415 bool verity = ext4_verity_in_progress(inode); 1416 1417 trace_ext4_journalled_write_end(inode, pos, len, copied); 1418 from = pos & (PAGE_SIZE - 1); 1419 to = from + len; 1420 1421 BUG_ON(!ext4_handle_valid(handle)); 1422 1423 if (ext4_has_inline_data(inode)) 1424 return ext4_write_inline_data_end(inode, pos, len, copied, 1425 folio); 1426 1427 if (unlikely(copied < len) && !folio_test_uptodate(folio)) { 1428 copied = 0; 1429 ext4_journalled_zero_new_buffers(handle, inode, folio, 1430 from, to); 1431 } else { 1432 if (unlikely(copied < len)) 1433 ext4_journalled_zero_new_buffers(handle, inode, folio, 1434 from + copied, to); 1435 ret = ext4_walk_page_buffers(handle, inode, 1436 folio_buffers(folio), 1437 from, from + copied, &partial, 1438 write_end_fn); 1439 if (!partial) 1440 folio_mark_uptodate(folio); 1441 } 1442 if (!verity) 1443 size_changed = ext4_update_inode_size(inode, pos + copied); 1444 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1445 folio_unlock(folio); 1446 folio_put(folio); 1447 1448 if (old_size < pos && !verity) 1449 pagecache_isize_extended(inode, old_size, pos); 1450 1451 if (size_changed) { 1452 ret2 = ext4_mark_inode_dirty(handle, inode); 1453 if (!ret) 1454 ret = ret2; 1455 } 1456 1457 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode)) 1458 /* if we have allocated more blocks and copied 1459 * less. We will have blocks allocated outside 1460 * inode->i_size. So truncate them 1461 */ 1462 ext4_orphan_add(handle, inode); 1463 1464 ret2 = ext4_journal_stop(handle); 1465 if (!ret) 1466 ret = ret2; 1467 if (pos + len > inode->i_size && !verity) { 1468 ext4_truncate_failed_write(inode); 1469 /* 1470 * If truncate failed early the inode might still be 1471 * on the orphan list; we need to make sure the inode 1472 * is removed from the orphan list in that case. 1473 */ 1474 if (inode->i_nlink) 1475 ext4_orphan_del(NULL, inode); 1476 } 1477 1478 return ret ? ret : copied; 1479 } 1480 1481 /* 1482 * Reserve space for a single cluster 1483 */ 1484 static int ext4_da_reserve_space(struct inode *inode) 1485 { 1486 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1487 struct ext4_inode_info *ei = EXT4_I(inode); 1488 int ret; 1489 1490 /* 1491 * We will charge metadata quota at writeout time; this saves 1492 * us from metadata over-estimation, though we may go over by 1493 * a small amount in the end. Here we just reserve for data. 1494 */ 1495 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); 1496 if (ret) 1497 return ret; 1498 1499 spin_lock(&ei->i_block_reservation_lock); 1500 if (ext4_claim_free_clusters(sbi, 1, 0)) { 1501 spin_unlock(&ei->i_block_reservation_lock); 1502 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); 1503 return -ENOSPC; 1504 } 1505 ei->i_reserved_data_blocks++; 1506 trace_ext4_da_reserve_space(inode); 1507 spin_unlock(&ei->i_block_reservation_lock); 1508 1509 return 0; /* success */ 1510 } 1511 1512 void ext4_da_release_space(struct inode *inode, int to_free) 1513 { 1514 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1515 struct ext4_inode_info *ei = EXT4_I(inode); 1516 1517 if (!to_free) 1518 return; /* Nothing to release, exit */ 1519 1520 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1521 1522 trace_ext4_da_release_space(inode, to_free); 1523 if (unlikely(to_free > ei->i_reserved_data_blocks)) { 1524 /* 1525 * if there aren't enough reserved blocks, then the 1526 * counter is messed up somewhere. Since this 1527 * function is called from invalidate page, it's 1528 * harmless to return without any action. 1529 */ 1530 ext4_warning(inode->i_sb, "ext4_da_release_space: " 1531 "ino %lu, to_free %d with only %d reserved " 1532 "data blocks", inode->i_ino, to_free, 1533 ei->i_reserved_data_blocks); 1534 WARN_ON(1); 1535 to_free = ei->i_reserved_data_blocks; 1536 } 1537 ei->i_reserved_data_blocks -= to_free; 1538 1539 /* update fs dirty data blocks counter */ 1540 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); 1541 1542 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1543 1544 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); 1545 } 1546 1547 /* 1548 * Delayed allocation stuff 1549 */ 1550 1551 struct mpage_da_data { 1552 /* These are input fields for ext4_do_writepages() */ 1553 struct inode *inode; 1554 struct writeback_control *wbc; 1555 unsigned int can_map:1; /* Can writepages call map blocks? */ 1556 1557 /* These are internal state of ext4_do_writepages() */ 1558 pgoff_t first_page; /* The first page to write */ 1559 pgoff_t next_page; /* Current page to examine */ 1560 pgoff_t last_page; /* Last page to examine */ 1561 /* 1562 * Extent to map - this can be after first_page because that can be 1563 * fully mapped. We somewhat abuse m_flags to store whether the extent 1564 * is delalloc or unwritten. 1565 */ 1566 struct ext4_map_blocks map; 1567 struct ext4_io_submit io_submit; /* IO submission data */ 1568 unsigned int do_map:1; 1569 unsigned int scanned_until_end:1; 1570 unsigned int journalled_more_data:1; 1571 }; 1572 1573 static void mpage_release_unused_pages(struct mpage_da_data *mpd, 1574 bool invalidate) 1575 { 1576 unsigned nr, i; 1577 pgoff_t index, end; 1578 struct folio_batch fbatch; 1579 struct inode *inode = mpd->inode; 1580 struct address_space *mapping = inode->i_mapping; 1581 1582 /* This is necessary when next_page == 0. */ 1583 if (mpd->first_page >= mpd->next_page) 1584 return; 1585 1586 mpd->scanned_until_end = 0; 1587 index = mpd->first_page; 1588 end = mpd->next_page - 1; 1589 if (invalidate) { 1590 ext4_lblk_t start, last; 1591 start = index << (PAGE_SHIFT - inode->i_blkbits); 1592 last = end << (PAGE_SHIFT - inode->i_blkbits); 1593 1594 /* 1595 * avoid racing with extent status tree scans made by 1596 * ext4_insert_delayed_block() 1597 */ 1598 down_write(&EXT4_I(inode)->i_data_sem); 1599 ext4_es_remove_extent(inode, start, last - start + 1); 1600 up_write(&EXT4_I(inode)->i_data_sem); 1601 } 1602 1603 folio_batch_init(&fbatch); 1604 while (index <= end) { 1605 nr = filemap_get_folios(mapping, &index, end, &fbatch); 1606 if (nr == 0) 1607 break; 1608 for (i = 0; i < nr; i++) { 1609 struct folio *folio = fbatch.folios[i]; 1610 1611 if (folio->index < mpd->first_page) 1612 continue; 1613 if (folio_next_index(folio) - 1 > end) 1614 continue; 1615 BUG_ON(!folio_test_locked(folio)); 1616 BUG_ON(folio_test_writeback(folio)); 1617 if (invalidate) { 1618 if (folio_mapped(folio)) 1619 folio_clear_dirty_for_io(folio); 1620 block_invalidate_folio(folio, 0, 1621 folio_size(folio)); 1622 folio_clear_uptodate(folio); 1623 } 1624 folio_unlock(folio); 1625 } 1626 folio_batch_release(&fbatch); 1627 } 1628 } 1629 1630 static void ext4_print_free_blocks(struct inode *inode) 1631 { 1632 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1633 struct super_block *sb = inode->i_sb; 1634 struct ext4_inode_info *ei = EXT4_I(inode); 1635 1636 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld", 1637 EXT4_C2B(EXT4_SB(inode->i_sb), 1638 ext4_count_free_clusters(sb))); 1639 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details"); 1640 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld", 1641 (long long) EXT4_C2B(EXT4_SB(sb), 1642 percpu_counter_sum(&sbi->s_freeclusters_counter))); 1643 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld", 1644 (long long) EXT4_C2B(EXT4_SB(sb), 1645 percpu_counter_sum(&sbi->s_dirtyclusters_counter))); 1646 ext4_msg(sb, KERN_CRIT, "Block reservation details"); 1647 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u", 1648 ei->i_reserved_data_blocks); 1649 return; 1650 } 1651 1652 /* 1653 * ext4_insert_delayed_block - adds a delayed block to the extents status 1654 * tree, incrementing the reserved cluster/block 1655 * count or making a pending reservation 1656 * where needed 1657 * 1658 * @inode - file containing the newly added block 1659 * @lblk - logical block to be added 1660 * 1661 * Returns 0 on success, negative error code on failure. 1662 */ 1663 static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk) 1664 { 1665 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1666 int ret; 1667 bool allocated = false; 1668 1669 /* 1670 * If the cluster containing lblk is shared with a delayed, 1671 * written, or unwritten extent in a bigalloc file system, it's 1672 * already been accounted for and does not need to be reserved. 1673 * A pending reservation must be made for the cluster if it's 1674 * shared with a written or unwritten extent and doesn't already 1675 * have one. Written and unwritten extents can be purged from the 1676 * extents status tree if the system is under memory pressure, so 1677 * it's necessary to examine the extent tree if a search of the 1678 * extents status tree doesn't get a match. 1679 */ 1680 if (sbi->s_cluster_ratio == 1) { 1681 ret = ext4_da_reserve_space(inode); 1682 if (ret != 0) /* ENOSPC */ 1683 return ret; 1684 } else { /* bigalloc */ 1685 if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) { 1686 if (!ext4_es_scan_clu(inode, 1687 &ext4_es_is_mapped, lblk)) { 1688 ret = ext4_clu_mapped(inode, 1689 EXT4_B2C(sbi, lblk)); 1690 if (ret < 0) 1691 return ret; 1692 if (ret == 0) { 1693 ret = ext4_da_reserve_space(inode); 1694 if (ret != 0) /* ENOSPC */ 1695 return ret; 1696 } else { 1697 allocated = true; 1698 } 1699 } else { 1700 allocated = true; 1701 } 1702 } 1703 } 1704 1705 ext4_es_insert_delayed_block(inode, lblk, allocated); 1706 return 0; 1707 } 1708 1709 /* 1710 * This function is grabs code from the very beginning of 1711 * ext4_map_blocks, but assumes that the caller is from delayed write 1712 * time. This function looks up the requested blocks and sets the 1713 * buffer delay bit under the protection of i_data_sem. 1714 */ 1715 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, 1716 struct ext4_map_blocks *map, 1717 struct buffer_head *bh) 1718 { 1719 struct extent_status es; 1720 int retval; 1721 sector_t invalid_block = ~((sector_t) 0xffff); 1722 #ifdef ES_AGGRESSIVE_TEST 1723 struct ext4_map_blocks orig_map; 1724 1725 memcpy(&orig_map, map, sizeof(*map)); 1726 #endif 1727 1728 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 1729 invalid_block = ~0; 1730 1731 map->m_flags = 0; 1732 ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len, 1733 (unsigned long) map->m_lblk); 1734 1735 /* Lookup extent status tree firstly */ 1736 if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) { 1737 if (ext4_es_is_hole(&es)) 1738 goto add_delayed; 1739 1740 found: 1741 /* 1742 * Delayed extent could be allocated by fallocate. 1743 * So we need to check it. 1744 */ 1745 if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) { 1746 map_bh(bh, inode->i_sb, invalid_block); 1747 set_buffer_new(bh); 1748 set_buffer_delay(bh); 1749 return 0; 1750 } 1751 1752 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk; 1753 retval = es.es_len - (iblock - es.es_lblk); 1754 if (retval > map->m_len) 1755 retval = map->m_len; 1756 map->m_len = retval; 1757 if (ext4_es_is_written(&es)) 1758 map->m_flags |= EXT4_MAP_MAPPED; 1759 else if (ext4_es_is_unwritten(&es)) 1760 map->m_flags |= EXT4_MAP_UNWRITTEN; 1761 else 1762 BUG(); 1763 1764 #ifdef ES_AGGRESSIVE_TEST 1765 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0); 1766 #endif 1767 return retval; 1768 } 1769 1770 /* 1771 * Try to see if we can get the block without requesting a new 1772 * file system block. 1773 */ 1774 down_read(&EXT4_I(inode)->i_data_sem); 1775 if (ext4_has_inline_data(inode)) 1776 retval = 0; 1777 else 1778 retval = ext4_map_query_blocks(NULL, inode, map); 1779 up_read(&EXT4_I(inode)->i_data_sem); 1780 if (retval) 1781 return retval; 1782 1783 add_delayed: 1784 down_write(&EXT4_I(inode)->i_data_sem); 1785 /* 1786 * Page fault path (ext4_page_mkwrite does not take i_rwsem) 1787 * and fallocate path (no folio lock) can race. Make sure we 1788 * lookup the extent status tree here again while i_data_sem 1789 * is held in write mode, before inserting a new da entry in 1790 * the extent status tree. 1791 */ 1792 if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) { 1793 if (!ext4_es_is_hole(&es)) { 1794 up_write(&EXT4_I(inode)->i_data_sem); 1795 goto found; 1796 } 1797 } else if (!ext4_has_inline_data(inode)) { 1798 retval = ext4_map_query_blocks(NULL, inode, map); 1799 if (retval) { 1800 up_write(&EXT4_I(inode)->i_data_sem); 1801 return retval; 1802 } 1803 } 1804 1805 retval = ext4_insert_delayed_block(inode, map->m_lblk); 1806 up_write(&EXT4_I(inode)->i_data_sem); 1807 if (retval) 1808 return retval; 1809 1810 map_bh(bh, inode->i_sb, invalid_block); 1811 set_buffer_new(bh); 1812 set_buffer_delay(bh); 1813 return retval; 1814 } 1815 1816 /* 1817 * This is a special get_block_t callback which is used by 1818 * ext4_da_write_begin(). It will either return mapped block or 1819 * reserve space for a single block. 1820 * 1821 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 1822 * We also have b_blocknr = -1 and b_bdev initialized properly 1823 * 1824 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 1825 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 1826 * initialized properly. 1827 */ 1828 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 1829 struct buffer_head *bh, int create) 1830 { 1831 struct ext4_map_blocks map; 1832 int ret = 0; 1833 1834 BUG_ON(create == 0); 1835 BUG_ON(bh->b_size != inode->i_sb->s_blocksize); 1836 1837 map.m_lblk = iblock; 1838 map.m_len = 1; 1839 1840 /* 1841 * first, we need to know whether the block is allocated already 1842 * preallocated blocks are unmapped but should treated 1843 * the same as allocated blocks. 1844 */ 1845 ret = ext4_da_map_blocks(inode, iblock, &map, bh); 1846 if (ret <= 0) 1847 return ret; 1848 1849 map_bh(bh, inode->i_sb, map.m_pblk); 1850 ext4_update_bh_state(bh, map.m_flags); 1851 1852 if (buffer_unwritten(bh)) { 1853 /* A delayed write to unwritten bh should be marked 1854 * new and mapped. Mapped ensures that we don't do 1855 * get_block multiple times when we write to the same 1856 * offset and new ensures that we do proper zero out 1857 * for partial write. 1858 */ 1859 set_buffer_new(bh); 1860 set_buffer_mapped(bh); 1861 } 1862 return 0; 1863 } 1864 1865 static void mpage_folio_done(struct mpage_da_data *mpd, struct folio *folio) 1866 { 1867 mpd->first_page += folio_nr_pages(folio); 1868 folio_unlock(folio); 1869 } 1870 1871 static int mpage_submit_folio(struct mpage_da_data *mpd, struct folio *folio) 1872 { 1873 size_t len; 1874 loff_t size; 1875 int err; 1876 1877 BUG_ON(folio->index != mpd->first_page); 1878 folio_clear_dirty_for_io(folio); 1879 /* 1880 * We have to be very careful here! Nothing protects writeback path 1881 * against i_size changes and the page can be writeably mapped into 1882 * page tables. So an application can be growing i_size and writing 1883 * data through mmap while writeback runs. folio_clear_dirty_for_io() 1884 * write-protects our page in page tables and the page cannot get 1885 * written to again until we release folio lock. So only after 1886 * folio_clear_dirty_for_io() we are safe to sample i_size for 1887 * ext4_bio_write_folio() to zero-out tail of the written page. We rely 1888 * on the barrier provided by folio_test_clear_dirty() in 1889 * folio_clear_dirty_for_io() to make sure i_size is really sampled only 1890 * after page tables are updated. 1891 */ 1892 size = i_size_read(mpd->inode); 1893 len = folio_size(folio); 1894 if (folio_pos(folio) + len > size && 1895 !ext4_verity_in_progress(mpd->inode)) 1896 len = size & ~PAGE_MASK; 1897 err = ext4_bio_write_folio(&mpd->io_submit, folio, len); 1898 if (!err) 1899 mpd->wbc->nr_to_write--; 1900 1901 return err; 1902 } 1903 1904 #define BH_FLAGS (BIT(BH_Unwritten) | BIT(BH_Delay)) 1905 1906 /* 1907 * mballoc gives us at most this number of blocks... 1908 * XXX: That seems to be only a limitation of ext4_mb_normalize_request(). 1909 * The rest of mballoc seems to handle chunks up to full group size. 1910 */ 1911 #define MAX_WRITEPAGES_EXTENT_LEN 2048 1912 1913 /* 1914 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map 1915 * 1916 * @mpd - extent of blocks 1917 * @lblk - logical number of the block in the file 1918 * @bh - buffer head we want to add to the extent 1919 * 1920 * The function is used to collect contig. blocks in the same state. If the 1921 * buffer doesn't require mapping for writeback and we haven't started the 1922 * extent of buffers to map yet, the function returns 'true' immediately - the 1923 * caller can write the buffer right away. Otherwise the function returns true 1924 * if the block has been added to the extent, false if the block couldn't be 1925 * added. 1926 */ 1927 static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk, 1928 struct buffer_head *bh) 1929 { 1930 struct ext4_map_blocks *map = &mpd->map; 1931 1932 /* Buffer that doesn't need mapping for writeback? */ 1933 if (!buffer_dirty(bh) || !buffer_mapped(bh) || 1934 (!buffer_delay(bh) && !buffer_unwritten(bh))) { 1935 /* So far no extent to map => we write the buffer right away */ 1936 if (map->m_len == 0) 1937 return true; 1938 return false; 1939 } 1940 1941 /* First block in the extent? */ 1942 if (map->m_len == 0) { 1943 /* We cannot map unless handle is started... */ 1944 if (!mpd->do_map) 1945 return false; 1946 map->m_lblk = lblk; 1947 map->m_len = 1; 1948 map->m_flags = bh->b_state & BH_FLAGS; 1949 return true; 1950 } 1951 1952 /* Don't go larger than mballoc is willing to allocate */ 1953 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN) 1954 return false; 1955 1956 /* Can we merge the block to our big extent? */ 1957 if (lblk == map->m_lblk + map->m_len && 1958 (bh->b_state & BH_FLAGS) == map->m_flags) { 1959 map->m_len++; 1960 return true; 1961 } 1962 return false; 1963 } 1964 1965 /* 1966 * mpage_process_page_bufs - submit page buffers for IO or add them to extent 1967 * 1968 * @mpd - extent of blocks for mapping 1969 * @head - the first buffer in the page 1970 * @bh - buffer we should start processing from 1971 * @lblk - logical number of the block in the file corresponding to @bh 1972 * 1973 * Walk through page buffers from @bh upto @head (exclusive) and either submit 1974 * the page for IO if all buffers in this page were mapped and there's no 1975 * accumulated extent of buffers to map or add buffers in the page to the 1976 * extent of buffers to map. The function returns 1 if the caller can continue 1977 * by processing the next page, 0 if it should stop adding buffers to the 1978 * extent to map because we cannot extend it anymore. It can also return value 1979 * < 0 in case of error during IO submission. 1980 */ 1981 static int mpage_process_page_bufs(struct mpage_da_data *mpd, 1982 struct buffer_head *head, 1983 struct buffer_head *bh, 1984 ext4_lblk_t lblk) 1985 { 1986 struct inode *inode = mpd->inode; 1987 int err; 1988 ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1) 1989 >> inode->i_blkbits; 1990 1991 if (ext4_verity_in_progress(inode)) 1992 blocks = EXT_MAX_BLOCKS; 1993 1994 do { 1995 BUG_ON(buffer_locked(bh)); 1996 1997 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) { 1998 /* Found extent to map? */ 1999 if (mpd->map.m_len) 2000 return 0; 2001 /* Buffer needs mapping and handle is not started? */ 2002 if (!mpd->do_map) 2003 return 0; 2004 /* Everything mapped so far and we hit EOF */ 2005 break; 2006 } 2007 } while (lblk++, (bh = bh->b_this_page) != head); 2008 /* So far everything mapped? Submit the page for IO. */ 2009 if (mpd->map.m_len == 0) { 2010 err = mpage_submit_folio(mpd, head->b_folio); 2011 if (err < 0) 2012 return err; 2013 mpage_folio_done(mpd, head->b_folio); 2014 } 2015 if (lblk >= blocks) { 2016 mpd->scanned_until_end = 1; 2017 return 0; 2018 } 2019 return 1; 2020 } 2021 2022 /* 2023 * mpage_process_folio - update folio buffers corresponding to changed extent 2024 * and may submit fully mapped page for IO 2025 * @mpd: description of extent to map, on return next extent to map 2026 * @folio: Contains these buffers. 2027 * @m_lblk: logical block mapping. 2028 * @m_pblk: corresponding physical mapping. 2029 * @map_bh: determines on return whether this page requires any further 2030 * mapping or not. 2031 * 2032 * Scan given folio buffers corresponding to changed extent and update buffer 2033 * state according to new extent state. 2034 * We map delalloc buffers to their physical location, clear unwritten bits. 2035 * If the given folio is not fully mapped, we update @mpd to the next extent in 2036 * the given folio that needs mapping & return @map_bh as true. 2037 */ 2038 static int mpage_process_folio(struct mpage_da_data *mpd, struct folio *folio, 2039 ext4_lblk_t *m_lblk, ext4_fsblk_t *m_pblk, 2040 bool *map_bh) 2041 { 2042 struct buffer_head *head, *bh; 2043 ext4_io_end_t *io_end = mpd->io_submit.io_end; 2044 ext4_lblk_t lblk = *m_lblk; 2045 ext4_fsblk_t pblock = *m_pblk; 2046 int err = 0; 2047 int blkbits = mpd->inode->i_blkbits; 2048 ssize_t io_end_size = 0; 2049 struct ext4_io_end_vec *io_end_vec = ext4_last_io_end_vec(io_end); 2050 2051 bh = head = folio_buffers(folio); 2052 do { 2053 if (lblk < mpd->map.m_lblk) 2054 continue; 2055 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) { 2056 /* 2057 * Buffer after end of mapped extent. 2058 * Find next buffer in the folio to map. 2059 */ 2060 mpd->map.m_len = 0; 2061 mpd->map.m_flags = 0; 2062 io_end_vec->size += io_end_size; 2063 2064 err = mpage_process_page_bufs(mpd, head, bh, lblk); 2065 if (err > 0) 2066 err = 0; 2067 if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) { 2068 io_end_vec = ext4_alloc_io_end_vec(io_end); 2069 if (IS_ERR(io_end_vec)) { 2070 err = PTR_ERR(io_end_vec); 2071 goto out; 2072 } 2073 io_end_vec->offset = (loff_t)mpd->map.m_lblk << blkbits; 2074 } 2075 *map_bh = true; 2076 goto out; 2077 } 2078 if (buffer_delay(bh)) { 2079 clear_buffer_delay(bh); 2080 bh->b_blocknr = pblock++; 2081 } 2082 clear_buffer_unwritten(bh); 2083 io_end_size += (1 << blkbits); 2084 } while (lblk++, (bh = bh->b_this_page) != head); 2085 2086 io_end_vec->size += io_end_size; 2087 *map_bh = false; 2088 out: 2089 *m_lblk = lblk; 2090 *m_pblk = pblock; 2091 return err; 2092 } 2093 2094 /* 2095 * mpage_map_buffers - update buffers corresponding to changed extent and 2096 * submit fully mapped pages for IO 2097 * 2098 * @mpd - description of extent to map, on return next extent to map 2099 * 2100 * Scan buffers corresponding to changed extent (we expect corresponding pages 2101 * to be already locked) and update buffer state according to new extent state. 2102 * We map delalloc buffers to their physical location, clear unwritten bits, 2103 * and mark buffers as uninit when we perform writes to unwritten extents 2104 * and do extent conversion after IO is finished. If the last page is not fully 2105 * mapped, we update @map to the next extent in the last page that needs 2106 * mapping. Otherwise we submit the page for IO. 2107 */ 2108 static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) 2109 { 2110 struct folio_batch fbatch; 2111 unsigned nr, i; 2112 struct inode *inode = mpd->inode; 2113 int bpp_bits = PAGE_SHIFT - inode->i_blkbits; 2114 pgoff_t start, end; 2115 ext4_lblk_t lblk; 2116 ext4_fsblk_t pblock; 2117 int err; 2118 bool map_bh = false; 2119 2120 start = mpd->map.m_lblk >> bpp_bits; 2121 end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits; 2122 lblk = start << bpp_bits; 2123 pblock = mpd->map.m_pblk; 2124 2125 folio_batch_init(&fbatch); 2126 while (start <= end) { 2127 nr = filemap_get_folios(inode->i_mapping, &start, end, &fbatch); 2128 if (nr == 0) 2129 break; 2130 for (i = 0; i < nr; i++) { 2131 struct folio *folio = fbatch.folios[i]; 2132 2133 err = mpage_process_folio(mpd, folio, &lblk, &pblock, 2134 &map_bh); 2135 /* 2136 * If map_bh is true, means page may require further bh 2137 * mapping, or maybe the page was submitted for IO. 2138 * So we return to call further extent mapping. 2139 */ 2140 if (err < 0 || map_bh) 2141 goto out; 2142 /* Page fully mapped - let IO run! */ 2143 err = mpage_submit_folio(mpd, folio); 2144 if (err < 0) 2145 goto out; 2146 mpage_folio_done(mpd, folio); 2147 } 2148 folio_batch_release(&fbatch); 2149 } 2150 /* Extent fully mapped and matches with page boundary. We are done. */ 2151 mpd->map.m_len = 0; 2152 mpd->map.m_flags = 0; 2153 return 0; 2154 out: 2155 folio_batch_release(&fbatch); 2156 return err; 2157 } 2158 2159 static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd) 2160 { 2161 struct inode *inode = mpd->inode; 2162 struct ext4_map_blocks *map = &mpd->map; 2163 int get_blocks_flags; 2164 int err, dioread_nolock; 2165 2166 trace_ext4_da_write_pages_extent(inode, map); 2167 /* 2168 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or 2169 * to convert an unwritten extent to be initialized (in the case 2170 * where we have written into one or more preallocated blocks). It is 2171 * possible that we're going to need more metadata blocks than 2172 * previously reserved. However we must not fail because we're in 2173 * writeback and there is nothing we can do about it so it might result 2174 * in data loss. So use reserved blocks to allocate metadata if 2175 * possible. 2176 * 2177 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if 2178 * the blocks in question are delalloc blocks. This indicates 2179 * that the blocks and quotas has already been checked when 2180 * the data was copied into the page cache. 2181 */ 2182 get_blocks_flags = EXT4_GET_BLOCKS_CREATE | 2183 EXT4_GET_BLOCKS_METADATA_NOFAIL | 2184 EXT4_GET_BLOCKS_IO_SUBMIT; 2185 dioread_nolock = ext4_should_dioread_nolock(inode); 2186 if (dioread_nolock) 2187 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; 2188 if (map->m_flags & BIT(BH_Delay)) 2189 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 2190 2191 err = ext4_map_blocks(handle, inode, map, get_blocks_flags); 2192 if (err < 0) 2193 return err; 2194 if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) { 2195 if (!mpd->io_submit.io_end->handle && 2196 ext4_handle_valid(handle)) { 2197 mpd->io_submit.io_end->handle = handle->h_rsv_handle; 2198 handle->h_rsv_handle = NULL; 2199 } 2200 ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end); 2201 } 2202 2203 BUG_ON(map->m_len == 0); 2204 return 0; 2205 } 2206 2207 /* 2208 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length 2209 * mpd->len and submit pages underlying it for IO 2210 * 2211 * @handle - handle for journal operations 2212 * @mpd - extent to map 2213 * @give_up_on_write - we set this to true iff there is a fatal error and there 2214 * is no hope of writing the data. The caller should discard 2215 * dirty pages to avoid infinite loops. 2216 * 2217 * The function maps extent starting at mpd->lblk of length mpd->len. If it is 2218 * delayed, blocks are allocated, if it is unwritten, we may need to convert 2219 * them to initialized or split the described range from larger unwritten 2220 * extent. Note that we need not map all the described range since allocation 2221 * can return less blocks or the range is covered by more unwritten extents. We 2222 * cannot map more because we are limited by reserved transaction credits. On 2223 * the other hand we always make sure that the last touched page is fully 2224 * mapped so that it can be written out (and thus forward progress is 2225 * guaranteed). After mapping we submit all mapped pages for IO. 2226 */ 2227 static int mpage_map_and_submit_extent(handle_t *handle, 2228 struct mpage_da_data *mpd, 2229 bool *give_up_on_write) 2230 { 2231 struct inode *inode = mpd->inode; 2232 struct ext4_map_blocks *map = &mpd->map; 2233 int err; 2234 loff_t disksize; 2235 int progress = 0; 2236 ext4_io_end_t *io_end = mpd->io_submit.io_end; 2237 struct ext4_io_end_vec *io_end_vec; 2238 2239 io_end_vec = ext4_alloc_io_end_vec(io_end); 2240 if (IS_ERR(io_end_vec)) 2241 return PTR_ERR(io_end_vec); 2242 io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits; 2243 do { 2244 err = mpage_map_one_extent(handle, mpd); 2245 if (err < 0) { 2246 struct super_block *sb = inode->i_sb; 2247 2248 if (ext4_forced_shutdown(sb)) 2249 goto invalidate_dirty_pages; 2250 /* 2251 * Let the uper layers retry transient errors. 2252 * In the case of ENOSPC, if ext4_count_free_blocks() 2253 * is non-zero, a commit should free up blocks. 2254 */ 2255 if ((err == -ENOMEM) || 2256 (err == -ENOSPC && ext4_count_free_clusters(sb))) { 2257 if (progress) 2258 goto update_disksize; 2259 return err; 2260 } 2261 ext4_msg(sb, KERN_CRIT, 2262 "Delayed block allocation failed for " 2263 "inode %lu at logical offset %llu with" 2264 " max blocks %u with error %d", 2265 inode->i_ino, 2266 (unsigned long long)map->m_lblk, 2267 (unsigned)map->m_len, -err); 2268 ext4_msg(sb, KERN_CRIT, 2269 "This should not happen!! Data will " 2270 "be lost\n"); 2271 if (err == -ENOSPC) 2272 ext4_print_free_blocks(inode); 2273 invalidate_dirty_pages: 2274 *give_up_on_write = true; 2275 return err; 2276 } 2277 progress = 1; 2278 /* 2279 * Update buffer state, submit mapped pages, and get us new 2280 * extent to map 2281 */ 2282 err = mpage_map_and_submit_buffers(mpd); 2283 if (err < 0) 2284 goto update_disksize; 2285 } while (map->m_len); 2286 2287 update_disksize: 2288 /* 2289 * Update on-disk size after IO is submitted. Races with 2290 * truncate are avoided by checking i_size under i_data_sem. 2291 */ 2292 disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT; 2293 if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) { 2294 int err2; 2295 loff_t i_size; 2296 2297 down_write(&EXT4_I(inode)->i_data_sem); 2298 i_size = i_size_read(inode); 2299 if (disksize > i_size) 2300 disksize = i_size; 2301 if (disksize > EXT4_I(inode)->i_disksize) 2302 EXT4_I(inode)->i_disksize = disksize; 2303 up_write(&EXT4_I(inode)->i_data_sem); 2304 err2 = ext4_mark_inode_dirty(handle, inode); 2305 if (err2) { 2306 ext4_error_err(inode->i_sb, -err2, 2307 "Failed to mark inode %lu dirty", 2308 inode->i_ino); 2309 } 2310 if (!err) 2311 err = err2; 2312 } 2313 return err; 2314 } 2315 2316 /* 2317 * Calculate the total number of credits to reserve for one writepages 2318 * iteration. This is called from ext4_writepages(). We map an extent of 2319 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping 2320 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN + 2321 * bpp - 1 blocks in bpp different extents. 2322 */ 2323 static int ext4_da_writepages_trans_blocks(struct inode *inode) 2324 { 2325 int bpp = ext4_journal_blocks_per_page(inode); 2326 2327 return ext4_meta_trans_blocks(inode, 2328 MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp); 2329 } 2330 2331 static int ext4_journal_folio_buffers(handle_t *handle, struct folio *folio, 2332 size_t len) 2333 { 2334 struct buffer_head *page_bufs = folio_buffers(folio); 2335 struct inode *inode = folio->mapping->host; 2336 int ret, err; 2337 2338 ret = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len, 2339 NULL, do_journal_get_write_access); 2340 err = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len, 2341 NULL, write_end_fn); 2342 if (ret == 0) 2343 ret = err; 2344 err = ext4_jbd2_inode_add_write(handle, inode, folio_pos(folio), len); 2345 if (ret == 0) 2346 ret = err; 2347 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 2348 2349 return ret; 2350 } 2351 2352 static int mpage_journal_page_buffers(handle_t *handle, 2353 struct mpage_da_data *mpd, 2354 struct folio *folio) 2355 { 2356 struct inode *inode = mpd->inode; 2357 loff_t size = i_size_read(inode); 2358 size_t len = folio_size(folio); 2359 2360 folio_clear_checked(folio); 2361 mpd->wbc->nr_to_write--; 2362 2363 if (folio_pos(folio) + len > size && 2364 !ext4_verity_in_progress(inode)) 2365 len = size & (len - 1); 2366 2367 return ext4_journal_folio_buffers(handle, folio, len); 2368 } 2369 2370 /* 2371 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages 2372 * needing mapping, submit mapped pages 2373 * 2374 * @mpd - where to look for pages 2375 * 2376 * Walk dirty pages in the mapping. If they are fully mapped, submit them for 2377 * IO immediately. If we cannot map blocks, we submit just already mapped 2378 * buffers in the page for IO and keep page dirty. When we can map blocks and 2379 * we find a page which isn't mapped we start accumulating extent of buffers 2380 * underlying these pages that needs mapping (formed by either delayed or 2381 * unwritten buffers). We also lock the pages containing these buffers. The 2382 * extent found is returned in @mpd structure (starting at mpd->lblk with 2383 * length mpd->len blocks). 2384 * 2385 * Note that this function can attach bios to one io_end structure which are 2386 * neither logically nor physically contiguous. Although it may seem as an 2387 * unnecessary complication, it is actually inevitable in blocksize < pagesize 2388 * case as we need to track IO to all buffers underlying a page in one io_end. 2389 */ 2390 static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) 2391 { 2392 struct address_space *mapping = mpd->inode->i_mapping; 2393 struct folio_batch fbatch; 2394 unsigned int nr_folios; 2395 pgoff_t index = mpd->first_page; 2396 pgoff_t end = mpd->last_page; 2397 xa_mark_t tag; 2398 int i, err = 0; 2399 int blkbits = mpd->inode->i_blkbits; 2400 ext4_lblk_t lblk; 2401 struct buffer_head *head; 2402 handle_t *handle = NULL; 2403 int bpp = ext4_journal_blocks_per_page(mpd->inode); 2404 2405 if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages) 2406 tag = PAGECACHE_TAG_TOWRITE; 2407 else 2408 tag = PAGECACHE_TAG_DIRTY; 2409 2410 mpd->map.m_len = 0; 2411 mpd->next_page = index; 2412 if (ext4_should_journal_data(mpd->inode)) { 2413 handle = ext4_journal_start(mpd->inode, EXT4_HT_WRITE_PAGE, 2414 bpp); 2415 if (IS_ERR(handle)) 2416 return PTR_ERR(handle); 2417 } 2418 folio_batch_init(&fbatch); 2419 while (index <= end) { 2420 nr_folios = filemap_get_folios_tag(mapping, &index, end, 2421 tag, &fbatch); 2422 if (nr_folios == 0) 2423 break; 2424 2425 for (i = 0; i < nr_folios; i++) { 2426 struct folio *folio = fbatch.folios[i]; 2427 2428 /* 2429 * Accumulated enough dirty pages? This doesn't apply 2430 * to WB_SYNC_ALL mode. For integrity sync we have to 2431 * keep going because someone may be concurrently 2432 * dirtying pages, and we might have synced a lot of 2433 * newly appeared dirty pages, but have not synced all 2434 * of the old dirty pages. 2435 */ 2436 if (mpd->wbc->sync_mode == WB_SYNC_NONE && 2437 mpd->wbc->nr_to_write <= 2438 mpd->map.m_len >> (PAGE_SHIFT - blkbits)) 2439 goto out; 2440 2441 /* If we can't merge this page, we are done. */ 2442 if (mpd->map.m_len > 0 && mpd->next_page != folio->index) 2443 goto out; 2444 2445 if (handle) { 2446 err = ext4_journal_ensure_credits(handle, bpp, 2447 0); 2448 if (err < 0) 2449 goto out; 2450 } 2451 2452 folio_lock(folio); 2453 /* 2454 * If the page is no longer dirty, or its mapping no 2455 * longer corresponds to inode we are writing (which 2456 * means it has been truncated or invalidated), or the 2457 * page is already under writeback and we are not doing 2458 * a data integrity writeback, skip the page 2459 */ 2460 if (!folio_test_dirty(folio) || 2461 (folio_test_writeback(folio) && 2462 (mpd->wbc->sync_mode == WB_SYNC_NONE)) || 2463 unlikely(folio->mapping != mapping)) { 2464 folio_unlock(folio); 2465 continue; 2466 } 2467 2468 folio_wait_writeback(folio); 2469 BUG_ON(folio_test_writeback(folio)); 2470 2471 /* 2472 * Should never happen but for buggy code in 2473 * other subsystems that call 2474 * set_page_dirty() without properly warning 2475 * the file system first. See [1] for more 2476 * information. 2477 * 2478 * [1] https://lore.kernel.org/linux-mm/20180103100430.GE4911@quack2.suse.cz 2479 */ 2480 if (!folio_buffers(folio)) { 2481 ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", folio->index); 2482 folio_clear_dirty(folio); 2483 folio_unlock(folio); 2484 continue; 2485 } 2486 2487 if (mpd->map.m_len == 0) 2488 mpd->first_page = folio->index; 2489 mpd->next_page = folio_next_index(folio); 2490 /* 2491 * Writeout when we cannot modify metadata is simple. 2492 * Just submit the page. For data=journal mode we 2493 * first handle writeout of the page for checkpoint and 2494 * only after that handle delayed page dirtying. This 2495 * makes sure current data is checkpointed to the final 2496 * location before possibly journalling it again which 2497 * is desirable when the page is frequently dirtied 2498 * through a pin. 2499 */ 2500 if (!mpd->can_map) { 2501 err = mpage_submit_folio(mpd, folio); 2502 if (err < 0) 2503 goto out; 2504 /* Pending dirtying of journalled data? */ 2505 if (folio_test_checked(folio)) { 2506 err = mpage_journal_page_buffers(handle, 2507 mpd, folio); 2508 if (err < 0) 2509 goto out; 2510 mpd->journalled_more_data = 1; 2511 } 2512 mpage_folio_done(mpd, folio); 2513 } else { 2514 /* Add all dirty buffers to mpd */ 2515 lblk = ((ext4_lblk_t)folio->index) << 2516 (PAGE_SHIFT - blkbits); 2517 head = folio_buffers(folio); 2518 err = mpage_process_page_bufs(mpd, head, head, 2519 lblk); 2520 if (err <= 0) 2521 goto out; 2522 err = 0; 2523 } 2524 } 2525 folio_batch_release(&fbatch); 2526 cond_resched(); 2527 } 2528 mpd->scanned_until_end = 1; 2529 if (handle) 2530 ext4_journal_stop(handle); 2531 return 0; 2532 out: 2533 folio_batch_release(&fbatch); 2534 if (handle) 2535 ext4_journal_stop(handle); 2536 return err; 2537 } 2538 2539 static int ext4_do_writepages(struct mpage_da_data *mpd) 2540 { 2541 struct writeback_control *wbc = mpd->wbc; 2542 pgoff_t writeback_index = 0; 2543 long nr_to_write = wbc->nr_to_write; 2544 int range_whole = 0; 2545 int cycled = 1; 2546 handle_t *handle = NULL; 2547 struct inode *inode = mpd->inode; 2548 struct address_space *mapping = inode->i_mapping; 2549 int needed_blocks, rsv_blocks = 0, ret = 0; 2550 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2551 struct blk_plug plug; 2552 bool give_up_on_write = false; 2553 2554 trace_ext4_writepages(inode, wbc); 2555 2556 /* 2557 * No pages to write? This is mainly a kludge to avoid starting 2558 * a transaction for special inodes like journal inode on last iput() 2559 * because that could violate lock ordering on umount 2560 */ 2561 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 2562 goto out_writepages; 2563 2564 /* 2565 * If the filesystem has aborted, it is read-only, so return 2566 * right away instead of dumping stack traces later on that 2567 * will obscure the real source of the problem. We test 2568 * fs shutdown state instead of sb->s_flag's SB_RDONLY because 2569 * the latter could be true if the filesystem is mounted 2570 * read-only, and in that case, ext4_writepages should 2571 * *never* be called, so if that ever happens, we would want 2572 * the stack trace. 2573 */ 2574 if (unlikely(ext4_forced_shutdown(mapping->host->i_sb))) { 2575 ret = -EROFS; 2576 goto out_writepages; 2577 } 2578 2579 /* 2580 * If we have inline data and arrive here, it means that 2581 * we will soon create the block for the 1st page, so 2582 * we'd better clear the inline data here. 2583 */ 2584 if (ext4_has_inline_data(inode)) { 2585 /* Just inode will be modified... */ 2586 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); 2587 if (IS_ERR(handle)) { 2588 ret = PTR_ERR(handle); 2589 goto out_writepages; 2590 } 2591 BUG_ON(ext4_test_inode_state(inode, 2592 EXT4_STATE_MAY_INLINE_DATA)); 2593 ext4_destroy_inline_data(handle, inode); 2594 ext4_journal_stop(handle); 2595 } 2596 2597 /* 2598 * data=journal mode does not do delalloc so we just need to writeout / 2599 * journal already mapped buffers. On the other hand we need to commit 2600 * transaction to make data stable. We expect all the data to be 2601 * already in the journal (the only exception are DMA pinned pages 2602 * dirtied behind our back) so we commit transaction here and run the 2603 * writeback loop to checkpoint them. The checkpointing is not actually 2604 * necessary to make data persistent *but* quite a few places (extent 2605 * shifting operations, fsverity, ...) depend on being able to drop 2606 * pagecache pages after calling filemap_write_and_wait() and for that 2607 * checkpointing needs to happen. 2608 */ 2609 if (ext4_should_journal_data(inode)) { 2610 mpd->can_map = 0; 2611 if (wbc->sync_mode == WB_SYNC_ALL) 2612 ext4_fc_commit(sbi->s_journal, 2613 EXT4_I(inode)->i_datasync_tid); 2614 } 2615 mpd->journalled_more_data = 0; 2616 2617 if (ext4_should_dioread_nolock(inode)) { 2618 /* 2619 * We may need to convert up to one extent per block in 2620 * the page and we may dirty the inode. 2621 */ 2622 rsv_blocks = 1 + ext4_chunk_trans_blocks(inode, 2623 PAGE_SIZE >> inode->i_blkbits); 2624 } 2625 2626 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2627 range_whole = 1; 2628 2629 if (wbc->range_cyclic) { 2630 writeback_index = mapping->writeback_index; 2631 if (writeback_index) 2632 cycled = 0; 2633 mpd->first_page = writeback_index; 2634 mpd->last_page = -1; 2635 } else { 2636 mpd->first_page = wbc->range_start >> PAGE_SHIFT; 2637 mpd->last_page = wbc->range_end >> PAGE_SHIFT; 2638 } 2639 2640 ext4_io_submit_init(&mpd->io_submit, wbc); 2641 retry: 2642 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 2643 tag_pages_for_writeback(mapping, mpd->first_page, 2644 mpd->last_page); 2645 blk_start_plug(&plug); 2646 2647 /* 2648 * First writeback pages that don't need mapping - we can avoid 2649 * starting a transaction unnecessarily and also avoid being blocked 2650 * in the block layer on device congestion while having transaction 2651 * started. 2652 */ 2653 mpd->do_map = 0; 2654 mpd->scanned_until_end = 0; 2655 mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); 2656 if (!mpd->io_submit.io_end) { 2657 ret = -ENOMEM; 2658 goto unplug; 2659 } 2660 ret = mpage_prepare_extent_to_map(mpd); 2661 /* Unlock pages we didn't use */ 2662 mpage_release_unused_pages(mpd, false); 2663 /* Submit prepared bio */ 2664 ext4_io_submit(&mpd->io_submit); 2665 ext4_put_io_end_defer(mpd->io_submit.io_end); 2666 mpd->io_submit.io_end = NULL; 2667 if (ret < 0) 2668 goto unplug; 2669 2670 while (!mpd->scanned_until_end && wbc->nr_to_write > 0) { 2671 /* For each extent of pages we use new io_end */ 2672 mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); 2673 if (!mpd->io_submit.io_end) { 2674 ret = -ENOMEM; 2675 break; 2676 } 2677 2678 WARN_ON_ONCE(!mpd->can_map); 2679 /* 2680 * We have two constraints: We find one extent to map and we 2681 * must always write out whole page (makes a difference when 2682 * blocksize < pagesize) so that we don't block on IO when we 2683 * try to write out the rest of the page. Journalled mode is 2684 * not supported by delalloc. 2685 */ 2686 BUG_ON(ext4_should_journal_data(inode)); 2687 needed_blocks = ext4_da_writepages_trans_blocks(inode); 2688 2689 /* start a new transaction */ 2690 handle = ext4_journal_start_with_reserve(inode, 2691 EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks); 2692 if (IS_ERR(handle)) { 2693 ret = PTR_ERR(handle); 2694 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2695 "%ld pages, ino %lu; err %d", __func__, 2696 wbc->nr_to_write, inode->i_ino, ret); 2697 /* Release allocated io_end */ 2698 ext4_put_io_end(mpd->io_submit.io_end); 2699 mpd->io_submit.io_end = NULL; 2700 break; 2701 } 2702 mpd->do_map = 1; 2703 2704 trace_ext4_da_write_pages(inode, mpd->first_page, wbc); 2705 ret = mpage_prepare_extent_to_map(mpd); 2706 if (!ret && mpd->map.m_len) 2707 ret = mpage_map_and_submit_extent(handle, mpd, 2708 &give_up_on_write); 2709 /* 2710 * Caution: If the handle is synchronous, 2711 * ext4_journal_stop() can wait for transaction commit 2712 * to finish which may depend on writeback of pages to 2713 * complete or on page lock to be released. In that 2714 * case, we have to wait until after we have 2715 * submitted all the IO, released page locks we hold, 2716 * and dropped io_end reference (for extent conversion 2717 * to be able to complete) before stopping the handle. 2718 */ 2719 if (!ext4_handle_valid(handle) || handle->h_sync == 0) { 2720 ext4_journal_stop(handle); 2721 handle = NULL; 2722 mpd->do_map = 0; 2723 } 2724 /* Unlock pages we didn't use */ 2725 mpage_release_unused_pages(mpd, give_up_on_write); 2726 /* Submit prepared bio */ 2727 ext4_io_submit(&mpd->io_submit); 2728 2729 /* 2730 * Drop our io_end reference we got from init. We have 2731 * to be careful and use deferred io_end finishing if 2732 * we are still holding the transaction as we can 2733 * release the last reference to io_end which may end 2734 * up doing unwritten extent conversion. 2735 */ 2736 if (handle) { 2737 ext4_put_io_end_defer(mpd->io_submit.io_end); 2738 ext4_journal_stop(handle); 2739 } else 2740 ext4_put_io_end(mpd->io_submit.io_end); 2741 mpd->io_submit.io_end = NULL; 2742 2743 if (ret == -ENOSPC && sbi->s_journal) { 2744 /* 2745 * Commit the transaction which would 2746 * free blocks released in the transaction 2747 * and try again 2748 */ 2749 jbd2_journal_force_commit_nested(sbi->s_journal); 2750 ret = 0; 2751 continue; 2752 } 2753 /* Fatal error - ENOMEM, EIO... */ 2754 if (ret) 2755 break; 2756 } 2757 unplug: 2758 blk_finish_plug(&plug); 2759 if (!ret && !cycled && wbc->nr_to_write > 0) { 2760 cycled = 1; 2761 mpd->last_page = writeback_index - 1; 2762 mpd->first_page = 0; 2763 goto retry; 2764 } 2765 2766 /* Update index */ 2767 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2768 /* 2769 * Set the writeback_index so that range_cyclic 2770 * mode will write it back later 2771 */ 2772 mapping->writeback_index = mpd->first_page; 2773 2774 out_writepages: 2775 trace_ext4_writepages_result(inode, wbc, ret, 2776 nr_to_write - wbc->nr_to_write); 2777 return ret; 2778 } 2779 2780 static int ext4_writepages(struct address_space *mapping, 2781 struct writeback_control *wbc) 2782 { 2783 struct super_block *sb = mapping->host->i_sb; 2784 struct mpage_da_data mpd = { 2785 .inode = mapping->host, 2786 .wbc = wbc, 2787 .can_map = 1, 2788 }; 2789 int ret; 2790 int alloc_ctx; 2791 2792 if (unlikely(ext4_forced_shutdown(sb))) 2793 return -EIO; 2794 2795 alloc_ctx = ext4_writepages_down_read(sb); 2796 ret = ext4_do_writepages(&mpd); 2797 /* 2798 * For data=journal writeback we could have come across pages marked 2799 * for delayed dirtying (PageChecked) which were just added to the 2800 * running transaction. Try once more to get them to stable storage. 2801 */ 2802 if (!ret && mpd.journalled_more_data) 2803 ret = ext4_do_writepages(&mpd); 2804 ext4_writepages_up_read(sb, alloc_ctx); 2805 2806 return ret; 2807 } 2808 2809 int ext4_normal_submit_inode_data_buffers(struct jbd2_inode *jinode) 2810 { 2811 struct writeback_control wbc = { 2812 .sync_mode = WB_SYNC_ALL, 2813 .nr_to_write = LONG_MAX, 2814 .range_start = jinode->i_dirty_start, 2815 .range_end = jinode->i_dirty_end, 2816 }; 2817 struct mpage_da_data mpd = { 2818 .inode = jinode->i_vfs_inode, 2819 .wbc = &wbc, 2820 .can_map = 0, 2821 }; 2822 return ext4_do_writepages(&mpd); 2823 } 2824 2825 static int ext4_dax_writepages(struct address_space *mapping, 2826 struct writeback_control *wbc) 2827 { 2828 int ret; 2829 long nr_to_write = wbc->nr_to_write; 2830 struct inode *inode = mapping->host; 2831 int alloc_ctx; 2832 2833 if (unlikely(ext4_forced_shutdown(inode->i_sb))) 2834 return -EIO; 2835 2836 alloc_ctx = ext4_writepages_down_read(inode->i_sb); 2837 trace_ext4_writepages(inode, wbc); 2838 2839 ret = dax_writeback_mapping_range(mapping, 2840 EXT4_SB(inode->i_sb)->s_daxdev, wbc); 2841 trace_ext4_writepages_result(inode, wbc, ret, 2842 nr_to_write - wbc->nr_to_write); 2843 ext4_writepages_up_read(inode->i_sb, alloc_ctx); 2844 return ret; 2845 } 2846 2847 static int ext4_nonda_switch(struct super_block *sb) 2848 { 2849 s64 free_clusters, dirty_clusters; 2850 struct ext4_sb_info *sbi = EXT4_SB(sb); 2851 2852 /* 2853 * switch to non delalloc mode if we are running low 2854 * on free block. The free block accounting via percpu 2855 * counters can get slightly wrong with percpu_counter_batch getting 2856 * accumulated on each CPU without updating global counters 2857 * Delalloc need an accurate free block accounting. So switch 2858 * to non delalloc when we are near to error range. 2859 */ 2860 free_clusters = 2861 percpu_counter_read_positive(&sbi->s_freeclusters_counter); 2862 dirty_clusters = 2863 percpu_counter_read_positive(&sbi->s_dirtyclusters_counter); 2864 /* 2865 * Start pushing delalloc when 1/2 of free blocks are dirty. 2866 */ 2867 if (dirty_clusters && (free_clusters < 2 * dirty_clusters)) 2868 try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE); 2869 2870 if (2 * free_clusters < 3 * dirty_clusters || 2871 free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) { 2872 /* 2873 * free block count is less than 150% of dirty blocks 2874 * or free blocks is less than watermark 2875 */ 2876 return 1; 2877 } 2878 return 0; 2879 } 2880 2881 static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 2882 loff_t pos, unsigned len, 2883 struct page **pagep, void **fsdata) 2884 { 2885 int ret, retries = 0; 2886 struct folio *folio; 2887 pgoff_t index; 2888 struct inode *inode = mapping->host; 2889 2890 if (unlikely(ext4_forced_shutdown(inode->i_sb))) 2891 return -EIO; 2892 2893 index = pos >> PAGE_SHIFT; 2894 2895 if (ext4_nonda_switch(inode->i_sb) || ext4_verity_in_progress(inode)) { 2896 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 2897 return ext4_write_begin(file, mapping, pos, 2898 len, pagep, fsdata); 2899 } 2900 *fsdata = (void *)0; 2901 trace_ext4_da_write_begin(inode, pos, len); 2902 2903 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 2904 ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len, 2905 pagep, fsdata); 2906 if (ret < 0) 2907 return ret; 2908 if (ret == 1) 2909 return 0; 2910 } 2911 2912 retry: 2913 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, 2914 mapping_gfp_mask(mapping)); 2915 if (IS_ERR(folio)) 2916 return PTR_ERR(folio); 2917 2918 #ifdef CONFIG_FS_ENCRYPTION 2919 ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep); 2920 #else 2921 ret = __block_write_begin(&folio->page, pos, len, ext4_da_get_block_prep); 2922 #endif 2923 if (ret < 0) { 2924 folio_unlock(folio); 2925 folio_put(folio); 2926 /* 2927 * block_write_begin may have instantiated a few blocks 2928 * outside i_size. Trim these off again. Don't need 2929 * i_size_read because we hold inode lock. 2930 */ 2931 if (pos + len > inode->i_size) 2932 ext4_truncate_failed_write(inode); 2933 2934 if (ret == -ENOSPC && 2935 ext4_should_retry_alloc(inode->i_sb, &retries)) 2936 goto retry; 2937 return ret; 2938 } 2939 2940 *pagep = &folio->page; 2941 return ret; 2942 } 2943 2944 /* 2945 * Check if we should update i_disksize 2946 * when write to the end of file but not require block allocation 2947 */ 2948 static int ext4_da_should_update_i_disksize(struct folio *folio, 2949 unsigned long offset) 2950 { 2951 struct buffer_head *bh; 2952 struct inode *inode = folio->mapping->host; 2953 unsigned int idx; 2954 int i; 2955 2956 bh = folio_buffers(folio); 2957 idx = offset >> inode->i_blkbits; 2958 2959 for (i = 0; i < idx; i++) 2960 bh = bh->b_this_page; 2961 2962 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 2963 return 0; 2964 return 1; 2965 } 2966 2967 static int ext4_da_do_write_end(struct address_space *mapping, 2968 loff_t pos, unsigned len, unsigned copied, 2969 struct page *page) 2970 { 2971 struct inode *inode = mapping->host; 2972 loff_t old_size = inode->i_size; 2973 bool disksize_changed = false; 2974 loff_t new_i_size; 2975 2976 /* 2977 * block_write_end() will mark the inode as dirty with I_DIRTY_PAGES 2978 * flag, which all that's needed to trigger page writeback. 2979 */ 2980 copied = block_write_end(NULL, mapping, pos, len, copied, page, NULL); 2981 new_i_size = pos + copied; 2982 2983 /* 2984 * It's important to update i_size while still holding page lock, 2985 * because page writeout could otherwise come in and zero beyond 2986 * i_size. 2987 * 2988 * Since we are holding inode lock, we are sure i_disksize <= 2989 * i_size. We also know that if i_disksize < i_size, there are 2990 * delalloc writes pending in the range up to i_size. If the end of 2991 * the current write is <= i_size, there's no need to touch 2992 * i_disksize since writeback will push i_disksize up to i_size 2993 * eventually. If the end of the current write is > i_size and 2994 * inside an allocated block which ext4_da_should_update_i_disksize() 2995 * checked, we need to update i_disksize here as certain 2996 * ext4_writepages() paths not allocating blocks and update i_disksize. 2997 */ 2998 if (new_i_size > inode->i_size) { 2999 unsigned long end; 3000 3001 i_size_write(inode, new_i_size); 3002 end = (new_i_size - 1) & (PAGE_SIZE - 1); 3003 if (copied && ext4_da_should_update_i_disksize(page_folio(page), end)) { 3004 ext4_update_i_disksize(inode, new_i_size); 3005 disksize_changed = true; 3006 } 3007 } 3008 3009 unlock_page(page); 3010 put_page(page); 3011 3012 if (old_size < pos) 3013 pagecache_isize_extended(inode, old_size, pos); 3014 3015 if (disksize_changed) { 3016 handle_t *handle; 3017 3018 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 3019 if (IS_ERR(handle)) 3020 return PTR_ERR(handle); 3021 ext4_mark_inode_dirty(handle, inode); 3022 ext4_journal_stop(handle); 3023 } 3024 3025 return copied; 3026 } 3027 3028 static int ext4_da_write_end(struct file *file, 3029 struct address_space *mapping, 3030 loff_t pos, unsigned len, unsigned copied, 3031 struct page *page, void *fsdata) 3032 { 3033 struct inode *inode = mapping->host; 3034 int write_mode = (int)(unsigned long)fsdata; 3035 struct folio *folio = page_folio(page); 3036 3037 if (write_mode == FALL_BACK_TO_NONDELALLOC) 3038 return ext4_write_end(file, mapping, pos, 3039 len, copied, &folio->page, fsdata); 3040 3041 trace_ext4_da_write_end(inode, pos, len, copied); 3042 3043 if (write_mode != CONVERT_INLINE_DATA && 3044 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) && 3045 ext4_has_inline_data(inode)) 3046 return ext4_write_inline_data_end(inode, pos, len, copied, 3047 folio); 3048 3049 if (unlikely(copied < len) && !PageUptodate(page)) 3050 copied = 0; 3051 3052 return ext4_da_do_write_end(mapping, pos, len, copied, &folio->page); 3053 } 3054 3055 /* 3056 * Force all delayed allocation blocks to be allocated for a given inode. 3057 */ 3058 int ext4_alloc_da_blocks(struct inode *inode) 3059 { 3060 trace_ext4_alloc_da_blocks(inode); 3061 3062 if (!EXT4_I(inode)->i_reserved_data_blocks) 3063 return 0; 3064 3065 /* 3066 * We do something simple for now. The filemap_flush() will 3067 * also start triggering a write of the data blocks, which is 3068 * not strictly speaking necessary (and for users of 3069 * laptop_mode, not even desirable). However, to do otherwise 3070 * would require replicating code paths in: 3071 * 3072 * ext4_writepages() -> 3073 * write_cache_pages() ---> (via passed in callback function) 3074 * __mpage_da_writepage() --> 3075 * mpage_add_bh_to_extent() 3076 * mpage_da_map_blocks() 3077 * 3078 * The problem is that write_cache_pages(), located in 3079 * mm/page-writeback.c, marks pages clean in preparation for 3080 * doing I/O, which is not desirable if we're not planning on 3081 * doing I/O at all. 3082 * 3083 * We could call write_cache_pages(), and then redirty all of 3084 * the pages by calling redirty_page_for_writepage() but that 3085 * would be ugly in the extreme. So instead we would need to 3086 * replicate parts of the code in the above functions, 3087 * simplifying them because we wouldn't actually intend to 3088 * write out the pages, but rather only collect contiguous 3089 * logical block extents, call the multi-block allocator, and 3090 * then update the buffer heads with the block allocations. 3091 * 3092 * For now, though, we'll cheat by calling filemap_flush(), 3093 * which will map the blocks, and start the I/O, but not 3094 * actually wait for the I/O to complete. 3095 */ 3096 return filemap_flush(inode->i_mapping); 3097 } 3098 3099 /* 3100 * bmap() is special. It gets used by applications such as lilo and by 3101 * the swapper to find the on-disk block of a specific piece of data. 3102 * 3103 * Naturally, this is dangerous if the block concerned is still in the 3104 * journal. If somebody makes a swapfile on an ext4 data-journaling 3105 * filesystem and enables swap, then they may get a nasty shock when the 3106 * data getting swapped to that swapfile suddenly gets overwritten by 3107 * the original zero's written out previously to the journal and 3108 * awaiting writeback in the kernel's buffer cache. 3109 * 3110 * So, if we see any bmap calls here on a modified, data-journaled file, 3111 * take extra steps to flush any blocks which might be in the cache. 3112 */ 3113 static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 3114 { 3115 struct inode *inode = mapping->host; 3116 sector_t ret = 0; 3117 3118 inode_lock_shared(inode); 3119 /* 3120 * We can get here for an inline file via the FIBMAP ioctl 3121 */ 3122 if (ext4_has_inline_data(inode)) 3123 goto out; 3124 3125 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 3126 (test_opt(inode->i_sb, DELALLOC) || 3127 ext4_should_journal_data(inode))) { 3128 /* 3129 * With delalloc or journalled data we want to sync the file so 3130 * that we can make sure we allocate blocks for file and data 3131 * is in place for the user to see it 3132 */ 3133 filemap_write_and_wait(mapping); 3134 } 3135 3136 ret = iomap_bmap(mapping, block, &ext4_iomap_ops); 3137 3138 out: 3139 inode_unlock_shared(inode); 3140 return ret; 3141 } 3142 3143 static int ext4_read_folio(struct file *file, struct folio *folio) 3144 { 3145 int ret = -EAGAIN; 3146 struct inode *inode = folio->mapping->host; 3147 3148 trace_ext4_read_folio(inode, folio); 3149 3150 if (ext4_has_inline_data(inode)) 3151 ret = ext4_readpage_inline(inode, folio); 3152 3153 if (ret == -EAGAIN) 3154 return ext4_mpage_readpages(inode, NULL, folio); 3155 3156 return ret; 3157 } 3158 3159 static void ext4_readahead(struct readahead_control *rac) 3160 { 3161 struct inode *inode = rac->mapping->host; 3162 3163 /* If the file has inline data, no need to do readahead. */ 3164 if (ext4_has_inline_data(inode)) 3165 return; 3166 3167 ext4_mpage_readpages(inode, rac, NULL); 3168 } 3169 3170 static void ext4_invalidate_folio(struct folio *folio, size_t offset, 3171 size_t length) 3172 { 3173 trace_ext4_invalidate_folio(folio, offset, length); 3174 3175 /* No journalling happens on data buffers when this function is used */ 3176 WARN_ON(folio_buffers(folio) && buffer_jbd(folio_buffers(folio))); 3177 3178 block_invalidate_folio(folio, offset, length); 3179 } 3180 3181 static int __ext4_journalled_invalidate_folio(struct folio *folio, 3182 size_t offset, size_t length) 3183 { 3184 journal_t *journal = EXT4_JOURNAL(folio->mapping->host); 3185 3186 trace_ext4_journalled_invalidate_folio(folio, offset, length); 3187 3188 /* 3189 * If it's a full truncate we just forget about the pending dirtying 3190 */ 3191 if (offset == 0 && length == folio_size(folio)) 3192 folio_clear_checked(folio); 3193 3194 return jbd2_journal_invalidate_folio(journal, folio, offset, length); 3195 } 3196 3197 /* Wrapper for aops... */ 3198 static void ext4_journalled_invalidate_folio(struct folio *folio, 3199 size_t offset, 3200 size_t length) 3201 { 3202 WARN_ON(__ext4_journalled_invalidate_folio(folio, offset, length) < 0); 3203 } 3204 3205 static bool ext4_release_folio(struct folio *folio, gfp_t wait) 3206 { 3207 struct inode *inode = folio->mapping->host; 3208 journal_t *journal = EXT4_JOURNAL(inode); 3209 3210 trace_ext4_release_folio(inode, folio); 3211 3212 /* Page has dirty journalled data -> cannot release */ 3213 if (folio_test_checked(folio)) 3214 return false; 3215 if (journal) 3216 return jbd2_journal_try_to_free_buffers(journal, folio); 3217 else 3218 return try_to_free_buffers(folio); 3219 } 3220 3221 static bool ext4_inode_datasync_dirty(struct inode *inode) 3222 { 3223 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 3224 3225 if (journal) { 3226 if (jbd2_transaction_committed(journal, 3227 EXT4_I(inode)->i_datasync_tid)) 3228 return false; 3229 if (test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT)) 3230 return !list_empty(&EXT4_I(inode)->i_fc_list); 3231 return true; 3232 } 3233 3234 /* Any metadata buffers to write? */ 3235 if (!list_empty(&inode->i_mapping->private_list)) 3236 return true; 3237 return inode->i_state & I_DIRTY_DATASYNC; 3238 } 3239 3240 static void ext4_set_iomap(struct inode *inode, struct iomap *iomap, 3241 struct ext4_map_blocks *map, loff_t offset, 3242 loff_t length, unsigned int flags) 3243 { 3244 u8 blkbits = inode->i_blkbits; 3245 3246 /* 3247 * Writes that span EOF might trigger an I/O size update on completion, 3248 * so consider them to be dirty for the purpose of O_DSYNC, even if 3249 * there is no other metadata changes being made or are pending. 3250 */ 3251 iomap->flags = 0; 3252 if (ext4_inode_datasync_dirty(inode) || 3253 offset + length > i_size_read(inode)) 3254 iomap->flags |= IOMAP_F_DIRTY; 3255 3256 if (map->m_flags & EXT4_MAP_NEW) 3257 iomap->flags |= IOMAP_F_NEW; 3258 3259 if (flags & IOMAP_DAX) 3260 iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev; 3261 else 3262 iomap->bdev = inode->i_sb->s_bdev; 3263 iomap->offset = (u64) map->m_lblk << blkbits; 3264 iomap->length = (u64) map->m_len << blkbits; 3265 3266 if ((map->m_flags & EXT4_MAP_MAPPED) && 3267 !ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3268 iomap->flags |= IOMAP_F_MERGED; 3269 3270 /* 3271 * Flags passed to ext4_map_blocks() for direct I/O writes can result 3272 * in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits 3273 * set. In order for any allocated unwritten extents to be converted 3274 * into written extents correctly within the ->end_io() handler, we 3275 * need to ensure that the iomap->type is set appropriately. Hence, the 3276 * reason why we need to check whether the EXT4_MAP_UNWRITTEN bit has 3277 * been set first. 3278 */ 3279 if (map->m_flags & EXT4_MAP_UNWRITTEN) { 3280 iomap->type = IOMAP_UNWRITTEN; 3281 iomap->addr = (u64) map->m_pblk << blkbits; 3282 if (flags & IOMAP_DAX) 3283 iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off; 3284 } else if (map->m_flags & EXT4_MAP_MAPPED) { 3285 iomap->type = IOMAP_MAPPED; 3286 iomap->addr = (u64) map->m_pblk << blkbits; 3287 if (flags & IOMAP_DAX) 3288 iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off; 3289 } else { 3290 iomap->type = IOMAP_HOLE; 3291 iomap->addr = IOMAP_NULL_ADDR; 3292 } 3293 } 3294 3295 static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map, 3296 unsigned int flags) 3297 { 3298 handle_t *handle; 3299 u8 blkbits = inode->i_blkbits; 3300 int ret, dio_credits, m_flags = 0, retries = 0; 3301 3302 /* 3303 * Trim the mapping request to the maximum value that we can map at 3304 * once for direct I/O. 3305 */ 3306 if (map->m_len > DIO_MAX_BLOCKS) 3307 map->m_len = DIO_MAX_BLOCKS; 3308 dio_credits = ext4_chunk_trans_blocks(inode, map->m_len); 3309 3310 retry: 3311 /* 3312 * Either we allocate blocks and then don't get an unwritten extent, so 3313 * in that case we have reserved enough credits. Or, the blocks are 3314 * already allocated and unwritten. In that case, the extent conversion 3315 * fits into the credits as well. 3316 */ 3317 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits); 3318 if (IS_ERR(handle)) 3319 return PTR_ERR(handle); 3320 3321 /* 3322 * DAX and direct I/O are the only two operations that are currently 3323 * supported with IOMAP_WRITE. 3324 */ 3325 WARN_ON(!(flags & (IOMAP_DAX | IOMAP_DIRECT))); 3326 if (flags & IOMAP_DAX) 3327 m_flags = EXT4_GET_BLOCKS_CREATE_ZERO; 3328 /* 3329 * We use i_size instead of i_disksize here because delalloc writeback 3330 * can complete at any point during the I/O and subsequently push the 3331 * i_disksize out to i_size. This could be beyond where direct I/O is 3332 * happening and thus expose allocated blocks to direct I/O reads. 3333 */ 3334 else if (((loff_t)map->m_lblk << blkbits) >= i_size_read(inode)) 3335 m_flags = EXT4_GET_BLOCKS_CREATE; 3336 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3337 m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT; 3338 3339 ret = ext4_map_blocks(handle, inode, map, m_flags); 3340 3341 /* 3342 * We cannot fill holes in indirect tree based inodes as that could 3343 * expose stale data in the case of a crash. Use the magic error code 3344 * to fallback to buffered I/O. 3345 */ 3346 if (!m_flags && !ret) 3347 ret = -ENOTBLK; 3348 3349 ext4_journal_stop(handle); 3350 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 3351 goto retry; 3352 3353 return ret; 3354 } 3355 3356 3357 static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length, 3358 unsigned flags, struct iomap *iomap, struct iomap *srcmap) 3359 { 3360 int ret; 3361 struct ext4_map_blocks map; 3362 u8 blkbits = inode->i_blkbits; 3363 3364 if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK) 3365 return -EINVAL; 3366 3367 if (WARN_ON_ONCE(ext4_has_inline_data(inode))) 3368 return -ERANGE; 3369 3370 /* 3371 * Calculate the first and last logical blocks respectively. 3372 */ 3373 map.m_lblk = offset >> blkbits; 3374 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits, 3375 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1; 3376 3377 if (flags & IOMAP_WRITE) { 3378 /* 3379 * We check here if the blocks are already allocated, then we 3380 * don't need to start a journal txn and we can directly return 3381 * the mapping information. This could boost performance 3382 * especially in multi-threaded overwrite requests. 3383 */ 3384 if (offset + length <= i_size_read(inode)) { 3385 ret = ext4_map_blocks(NULL, inode, &map, 0); 3386 if (ret > 0 && (map.m_flags & EXT4_MAP_MAPPED)) 3387 goto out; 3388 } 3389 ret = ext4_iomap_alloc(inode, &map, flags); 3390 } else { 3391 ret = ext4_map_blocks(NULL, inode, &map, 0); 3392 } 3393 3394 if (ret < 0) 3395 return ret; 3396 out: 3397 /* 3398 * When inline encryption is enabled, sometimes I/O to an encrypted file 3399 * has to be broken up to guarantee DUN contiguity. Handle this by 3400 * limiting the length of the mapping returned. 3401 */ 3402 map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len); 3403 3404 ext4_set_iomap(inode, iomap, &map, offset, length, flags); 3405 3406 return 0; 3407 } 3408 3409 static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset, 3410 loff_t length, unsigned flags, struct iomap *iomap, 3411 struct iomap *srcmap) 3412 { 3413 int ret; 3414 3415 /* 3416 * Even for writes we don't need to allocate blocks, so just pretend 3417 * we are reading to save overhead of starting a transaction. 3418 */ 3419 flags &= ~IOMAP_WRITE; 3420 ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap); 3421 WARN_ON_ONCE(!ret && iomap->type != IOMAP_MAPPED); 3422 return ret; 3423 } 3424 3425 static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length, 3426 ssize_t written, unsigned flags, struct iomap *iomap) 3427 { 3428 /* 3429 * Check to see whether an error occurred while writing out the data to 3430 * the allocated blocks. If so, return the magic error code so that we 3431 * fallback to buffered I/O and attempt to complete the remainder of 3432 * the I/O. Any blocks that may have been allocated in preparation for 3433 * the direct I/O will be reused during buffered I/O. 3434 */ 3435 if (flags & (IOMAP_WRITE | IOMAP_DIRECT) && written == 0) 3436 return -ENOTBLK; 3437 3438 return 0; 3439 } 3440 3441 const struct iomap_ops ext4_iomap_ops = { 3442 .iomap_begin = ext4_iomap_begin, 3443 .iomap_end = ext4_iomap_end, 3444 }; 3445 3446 const struct iomap_ops ext4_iomap_overwrite_ops = { 3447 .iomap_begin = ext4_iomap_overwrite_begin, 3448 .iomap_end = ext4_iomap_end, 3449 }; 3450 3451 static bool ext4_iomap_is_delalloc(struct inode *inode, 3452 struct ext4_map_blocks *map) 3453 { 3454 struct extent_status es; 3455 ext4_lblk_t offset = 0, end = map->m_lblk + map->m_len - 1; 3456 3457 ext4_es_find_extent_range(inode, &ext4_es_is_delayed, 3458 map->m_lblk, end, &es); 3459 3460 if (!es.es_len || es.es_lblk > end) 3461 return false; 3462 3463 if (es.es_lblk > map->m_lblk) { 3464 map->m_len = es.es_lblk - map->m_lblk; 3465 return false; 3466 } 3467 3468 offset = map->m_lblk - es.es_lblk; 3469 map->m_len = es.es_len - offset; 3470 3471 return true; 3472 } 3473 3474 static int ext4_iomap_begin_report(struct inode *inode, loff_t offset, 3475 loff_t length, unsigned int flags, 3476 struct iomap *iomap, struct iomap *srcmap) 3477 { 3478 int ret; 3479 bool delalloc = false; 3480 struct ext4_map_blocks map; 3481 u8 blkbits = inode->i_blkbits; 3482 3483 if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK) 3484 return -EINVAL; 3485 3486 if (ext4_has_inline_data(inode)) { 3487 ret = ext4_inline_data_iomap(inode, iomap); 3488 if (ret != -EAGAIN) { 3489 if (ret == 0 && offset >= iomap->length) 3490 ret = -ENOENT; 3491 return ret; 3492 } 3493 } 3494 3495 /* 3496 * Calculate the first and last logical block respectively. 3497 */ 3498 map.m_lblk = offset >> blkbits; 3499 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits, 3500 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1; 3501 3502 /* 3503 * Fiemap callers may call for offset beyond s_bitmap_maxbytes. 3504 * So handle it here itself instead of querying ext4_map_blocks(). 3505 * Since ext4_map_blocks() will warn about it and will return 3506 * -EIO error. 3507 */ 3508 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 3509 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3510 3511 if (offset >= sbi->s_bitmap_maxbytes) { 3512 map.m_flags = 0; 3513 goto set_iomap; 3514 } 3515 } 3516 3517 ret = ext4_map_blocks(NULL, inode, &map, 0); 3518 if (ret < 0) 3519 return ret; 3520 if (ret == 0) 3521 delalloc = ext4_iomap_is_delalloc(inode, &map); 3522 3523 set_iomap: 3524 ext4_set_iomap(inode, iomap, &map, offset, length, flags); 3525 if (delalloc && iomap->type == IOMAP_HOLE) 3526 iomap->type = IOMAP_DELALLOC; 3527 3528 return 0; 3529 } 3530 3531 const struct iomap_ops ext4_iomap_report_ops = { 3532 .iomap_begin = ext4_iomap_begin_report, 3533 }; 3534 3535 /* 3536 * For data=journal mode, folio should be marked dirty only when it was 3537 * writeably mapped. When that happens, it was already attached to the 3538 * transaction and marked as jbddirty (we take care of this in 3539 * ext4_page_mkwrite()). On transaction commit, we writeprotect page mappings 3540 * so we should have nothing to do here, except for the case when someone 3541 * had the page pinned and dirtied the page through this pin (e.g. by doing 3542 * direct IO to it). In that case we'd need to attach buffers here to the 3543 * transaction but we cannot due to lock ordering. We cannot just dirty the 3544 * folio and leave attached buffers clean, because the buffers' dirty state is 3545 * "definitive". We cannot just set the buffers dirty or jbddirty because all 3546 * the journalling code will explode. So what we do is to mark the folio 3547 * "pending dirty" and next time ext4_writepages() is called, attach buffers 3548 * to the transaction appropriately. 3549 */ 3550 static bool ext4_journalled_dirty_folio(struct address_space *mapping, 3551 struct folio *folio) 3552 { 3553 WARN_ON_ONCE(!folio_buffers(folio)); 3554 if (folio_maybe_dma_pinned(folio)) 3555 folio_set_checked(folio); 3556 return filemap_dirty_folio(mapping, folio); 3557 } 3558 3559 static bool ext4_dirty_folio(struct address_space *mapping, struct folio *folio) 3560 { 3561 WARN_ON_ONCE(!folio_test_locked(folio) && !folio_test_dirty(folio)); 3562 WARN_ON_ONCE(!folio_buffers(folio)); 3563 return block_dirty_folio(mapping, folio); 3564 } 3565 3566 static int ext4_iomap_swap_activate(struct swap_info_struct *sis, 3567 struct file *file, sector_t *span) 3568 { 3569 return iomap_swapfile_activate(sis, file, span, 3570 &ext4_iomap_report_ops); 3571 } 3572 3573 static const struct address_space_operations ext4_aops = { 3574 .read_folio = ext4_read_folio, 3575 .readahead = ext4_readahead, 3576 .writepages = ext4_writepages, 3577 .write_begin = ext4_write_begin, 3578 .write_end = ext4_write_end, 3579 .dirty_folio = ext4_dirty_folio, 3580 .bmap = ext4_bmap, 3581 .invalidate_folio = ext4_invalidate_folio, 3582 .release_folio = ext4_release_folio, 3583 .direct_IO = noop_direct_IO, 3584 .migrate_folio = buffer_migrate_folio, 3585 .is_partially_uptodate = block_is_partially_uptodate, 3586 .error_remove_page = generic_error_remove_page, 3587 .swap_activate = ext4_iomap_swap_activate, 3588 }; 3589 3590 static const struct address_space_operations ext4_journalled_aops = { 3591 .read_folio = ext4_read_folio, 3592 .readahead = ext4_readahead, 3593 .writepages = ext4_writepages, 3594 .write_begin = ext4_write_begin, 3595 .write_end = ext4_journalled_write_end, 3596 .dirty_folio = ext4_journalled_dirty_folio, 3597 .bmap = ext4_bmap, 3598 .invalidate_folio = ext4_journalled_invalidate_folio, 3599 .release_folio = ext4_release_folio, 3600 .direct_IO = noop_direct_IO, 3601 .migrate_folio = buffer_migrate_folio_norefs, 3602 .is_partially_uptodate = block_is_partially_uptodate, 3603 .error_remove_page = generic_error_remove_page, 3604 .swap_activate = ext4_iomap_swap_activate, 3605 }; 3606 3607 static const struct address_space_operations ext4_da_aops = { 3608 .read_folio = ext4_read_folio, 3609 .readahead = ext4_readahead, 3610 .writepages = ext4_writepages, 3611 .write_begin = ext4_da_write_begin, 3612 .write_end = ext4_da_write_end, 3613 .dirty_folio = ext4_dirty_folio, 3614 .bmap = ext4_bmap, 3615 .invalidate_folio = ext4_invalidate_folio, 3616 .release_folio = ext4_release_folio, 3617 .direct_IO = noop_direct_IO, 3618 .migrate_folio = buffer_migrate_folio, 3619 .is_partially_uptodate = block_is_partially_uptodate, 3620 .error_remove_page = generic_error_remove_page, 3621 .swap_activate = ext4_iomap_swap_activate, 3622 }; 3623 3624 static const struct address_space_operations ext4_dax_aops = { 3625 .writepages = ext4_dax_writepages, 3626 .direct_IO = noop_direct_IO, 3627 .dirty_folio = noop_dirty_folio, 3628 .bmap = ext4_bmap, 3629 .swap_activate = ext4_iomap_swap_activate, 3630 }; 3631 3632 void ext4_set_aops(struct inode *inode) 3633 { 3634 switch (ext4_inode_journal_mode(inode)) { 3635 case EXT4_INODE_ORDERED_DATA_MODE: 3636 case EXT4_INODE_WRITEBACK_DATA_MODE: 3637 break; 3638 case EXT4_INODE_JOURNAL_DATA_MODE: 3639 inode->i_mapping->a_ops = &ext4_journalled_aops; 3640 return; 3641 default: 3642 BUG(); 3643 } 3644 if (IS_DAX(inode)) 3645 inode->i_mapping->a_ops = &ext4_dax_aops; 3646 else if (test_opt(inode->i_sb, DELALLOC)) 3647 inode->i_mapping->a_ops = &ext4_da_aops; 3648 else 3649 inode->i_mapping->a_ops = &ext4_aops; 3650 } 3651 3652 static int __ext4_block_zero_page_range(handle_t *handle, 3653 struct address_space *mapping, loff_t from, loff_t length) 3654 { 3655 ext4_fsblk_t index = from >> PAGE_SHIFT; 3656 unsigned offset = from & (PAGE_SIZE-1); 3657 unsigned blocksize, pos; 3658 ext4_lblk_t iblock; 3659 struct inode *inode = mapping->host; 3660 struct buffer_head *bh; 3661 struct folio *folio; 3662 int err = 0; 3663 3664 folio = __filemap_get_folio(mapping, from >> PAGE_SHIFT, 3665 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 3666 mapping_gfp_constraint(mapping, ~__GFP_FS)); 3667 if (IS_ERR(folio)) 3668 return PTR_ERR(folio); 3669 3670 blocksize = inode->i_sb->s_blocksize; 3671 3672 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits); 3673 3674 bh = folio_buffers(folio); 3675 if (!bh) { 3676 create_empty_buffers(&folio->page, blocksize, 0); 3677 bh = folio_buffers(folio); 3678 } 3679 3680 /* Find the buffer that contains "offset" */ 3681 pos = blocksize; 3682 while (offset >= pos) { 3683 bh = bh->b_this_page; 3684 iblock++; 3685 pos += blocksize; 3686 } 3687 if (buffer_freed(bh)) { 3688 BUFFER_TRACE(bh, "freed: skip"); 3689 goto unlock; 3690 } 3691 if (!buffer_mapped(bh)) { 3692 BUFFER_TRACE(bh, "unmapped"); 3693 ext4_get_block(inode, iblock, bh, 0); 3694 /* unmapped? It's a hole - nothing to do */ 3695 if (!buffer_mapped(bh)) { 3696 BUFFER_TRACE(bh, "still unmapped"); 3697 goto unlock; 3698 } 3699 } 3700 3701 /* Ok, it's mapped. Make sure it's up-to-date */ 3702 if (folio_test_uptodate(folio)) 3703 set_buffer_uptodate(bh); 3704 3705 if (!buffer_uptodate(bh)) { 3706 err = ext4_read_bh_lock(bh, 0, true); 3707 if (err) 3708 goto unlock; 3709 if (fscrypt_inode_uses_fs_layer_crypto(inode)) { 3710 /* We expect the key to be set. */ 3711 BUG_ON(!fscrypt_has_encryption_key(inode)); 3712 err = fscrypt_decrypt_pagecache_blocks(folio, 3713 blocksize, 3714 bh_offset(bh)); 3715 if (err) { 3716 clear_buffer_uptodate(bh); 3717 goto unlock; 3718 } 3719 } 3720 } 3721 if (ext4_should_journal_data(inode)) { 3722 BUFFER_TRACE(bh, "get write access"); 3723 err = ext4_journal_get_write_access(handle, inode->i_sb, bh, 3724 EXT4_JTR_NONE); 3725 if (err) 3726 goto unlock; 3727 } 3728 folio_zero_range(folio, offset, length); 3729 BUFFER_TRACE(bh, "zeroed end of block"); 3730 3731 if (ext4_should_journal_data(inode)) { 3732 err = ext4_dirty_journalled_data(handle, bh); 3733 } else { 3734 err = 0; 3735 mark_buffer_dirty(bh); 3736 if (ext4_should_order_data(inode)) 3737 err = ext4_jbd2_inode_add_write(handle, inode, from, 3738 length); 3739 } 3740 3741 unlock: 3742 folio_unlock(folio); 3743 folio_put(folio); 3744 return err; 3745 } 3746 3747 /* 3748 * ext4_block_zero_page_range() zeros out a mapping of length 'length' 3749 * starting from file offset 'from'. The range to be zero'd must 3750 * be contained with in one block. If the specified range exceeds 3751 * the end of the block it will be shortened to end of the block 3752 * that corresponds to 'from' 3753 */ 3754 static int ext4_block_zero_page_range(handle_t *handle, 3755 struct address_space *mapping, loff_t from, loff_t length) 3756 { 3757 struct inode *inode = mapping->host; 3758 unsigned offset = from & (PAGE_SIZE-1); 3759 unsigned blocksize = inode->i_sb->s_blocksize; 3760 unsigned max = blocksize - (offset & (blocksize - 1)); 3761 3762 /* 3763 * correct length if it does not fall between 3764 * 'from' and the end of the block 3765 */ 3766 if (length > max || length < 0) 3767 length = max; 3768 3769 if (IS_DAX(inode)) { 3770 return dax_zero_range(inode, from, length, NULL, 3771 &ext4_iomap_ops); 3772 } 3773 return __ext4_block_zero_page_range(handle, mapping, from, length); 3774 } 3775 3776 /* 3777 * ext4_block_truncate_page() zeroes out a mapping from file offset `from' 3778 * up to the end of the block which corresponds to `from'. 3779 * This required during truncate. We need to physically zero the tail end 3780 * of that block so it doesn't yield old data if the file is later grown. 3781 */ 3782 static int ext4_block_truncate_page(handle_t *handle, 3783 struct address_space *mapping, loff_t from) 3784 { 3785 unsigned offset = from & (PAGE_SIZE-1); 3786 unsigned length; 3787 unsigned blocksize; 3788 struct inode *inode = mapping->host; 3789 3790 /* If we are processing an encrypted inode during orphan list handling */ 3791 if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode)) 3792 return 0; 3793 3794 blocksize = inode->i_sb->s_blocksize; 3795 length = blocksize - (offset & (blocksize - 1)); 3796 3797 return ext4_block_zero_page_range(handle, mapping, from, length); 3798 } 3799 3800 int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode, 3801 loff_t lstart, loff_t length) 3802 { 3803 struct super_block *sb = inode->i_sb; 3804 struct address_space *mapping = inode->i_mapping; 3805 unsigned partial_start, partial_end; 3806 ext4_fsblk_t start, end; 3807 loff_t byte_end = (lstart + length - 1); 3808 int err = 0; 3809 3810 partial_start = lstart & (sb->s_blocksize - 1); 3811 partial_end = byte_end & (sb->s_blocksize - 1); 3812 3813 start = lstart >> sb->s_blocksize_bits; 3814 end = byte_end >> sb->s_blocksize_bits; 3815 3816 /* Handle partial zero within the single block */ 3817 if (start == end && 3818 (partial_start || (partial_end != sb->s_blocksize - 1))) { 3819 err = ext4_block_zero_page_range(handle, mapping, 3820 lstart, length); 3821 return err; 3822 } 3823 /* Handle partial zero out on the start of the range */ 3824 if (partial_start) { 3825 err = ext4_block_zero_page_range(handle, mapping, 3826 lstart, sb->s_blocksize); 3827 if (err) 3828 return err; 3829 } 3830 /* Handle partial zero out on the end of the range */ 3831 if (partial_end != sb->s_blocksize - 1) 3832 err = ext4_block_zero_page_range(handle, mapping, 3833 byte_end - partial_end, 3834 partial_end + 1); 3835 return err; 3836 } 3837 3838 int ext4_can_truncate(struct inode *inode) 3839 { 3840 if (S_ISREG(inode->i_mode)) 3841 return 1; 3842 if (S_ISDIR(inode->i_mode)) 3843 return 1; 3844 if (S_ISLNK(inode->i_mode)) 3845 return !ext4_inode_is_fast_symlink(inode); 3846 return 0; 3847 } 3848 3849 /* 3850 * We have to make sure i_disksize gets properly updated before we truncate 3851 * page cache due to hole punching or zero range. Otherwise i_disksize update 3852 * can get lost as it may have been postponed to submission of writeback but 3853 * that will never happen after we truncate page cache. 3854 */ 3855 int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, 3856 loff_t len) 3857 { 3858 handle_t *handle; 3859 int ret; 3860 3861 loff_t size = i_size_read(inode); 3862 3863 WARN_ON(!inode_is_locked(inode)); 3864 if (offset > size || offset + len < size) 3865 return 0; 3866 3867 if (EXT4_I(inode)->i_disksize >= size) 3868 return 0; 3869 3870 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1); 3871 if (IS_ERR(handle)) 3872 return PTR_ERR(handle); 3873 ext4_update_i_disksize(inode, size); 3874 ret = ext4_mark_inode_dirty(handle, inode); 3875 ext4_journal_stop(handle); 3876 3877 return ret; 3878 } 3879 3880 static void ext4_wait_dax_page(struct inode *inode) 3881 { 3882 filemap_invalidate_unlock(inode->i_mapping); 3883 schedule(); 3884 filemap_invalidate_lock(inode->i_mapping); 3885 } 3886 3887 int ext4_break_layouts(struct inode *inode) 3888 { 3889 struct page *page; 3890 int error; 3891 3892 if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock))) 3893 return -EINVAL; 3894 3895 do { 3896 page = dax_layout_busy_page(inode->i_mapping); 3897 if (!page) 3898 return 0; 3899 3900 error = ___wait_var_event(&page->_refcount, 3901 atomic_read(&page->_refcount) == 1, 3902 TASK_INTERRUPTIBLE, 0, 0, 3903 ext4_wait_dax_page(inode)); 3904 } while (error == 0); 3905 3906 return error; 3907 } 3908 3909 /* 3910 * ext4_punch_hole: punches a hole in a file by releasing the blocks 3911 * associated with the given offset and length 3912 * 3913 * @inode: File inode 3914 * @offset: The offset where the hole will begin 3915 * @len: The length of the hole 3916 * 3917 * Returns: 0 on success or negative on failure 3918 */ 3919 3920 int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) 3921 { 3922 struct inode *inode = file_inode(file); 3923 struct super_block *sb = inode->i_sb; 3924 ext4_lblk_t first_block, stop_block; 3925 struct address_space *mapping = inode->i_mapping; 3926 loff_t first_block_offset, last_block_offset, max_length; 3927 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3928 handle_t *handle; 3929 unsigned int credits; 3930 int ret = 0, ret2 = 0; 3931 3932 trace_ext4_punch_hole(inode, offset, length, 0); 3933 3934 /* 3935 * Write out all dirty pages to avoid race conditions 3936 * Then release them. 3937 */ 3938 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 3939 ret = filemap_write_and_wait_range(mapping, offset, 3940 offset + length - 1); 3941 if (ret) 3942 return ret; 3943 } 3944 3945 inode_lock(inode); 3946 3947 /* No need to punch hole beyond i_size */ 3948 if (offset >= inode->i_size) 3949 goto out_mutex; 3950 3951 /* 3952 * If the hole extends beyond i_size, set the hole 3953 * to end after the page that contains i_size 3954 */ 3955 if (offset + length > inode->i_size) { 3956 length = inode->i_size + 3957 PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) - 3958 offset; 3959 } 3960 3961 /* 3962 * For punch hole the length + offset needs to be within one block 3963 * before last range. Adjust the length if it goes beyond that limit. 3964 */ 3965 max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize; 3966 if (offset + length > max_length) 3967 length = max_length - offset; 3968 3969 if (offset & (sb->s_blocksize - 1) || 3970 (offset + length) & (sb->s_blocksize - 1)) { 3971 /* 3972 * Attach jinode to inode for jbd2 if we do any zeroing of 3973 * partial block 3974 */ 3975 ret = ext4_inode_attach_jinode(inode); 3976 if (ret < 0) 3977 goto out_mutex; 3978 3979 } 3980 3981 /* Wait all existing dio workers, newcomers will block on i_rwsem */ 3982 inode_dio_wait(inode); 3983 3984 ret = file_modified(file); 3985 if (ret) 3986 goto out_mutex; 3987 3988 /* 3989 * Prevent page faults from reinstantiating pages we have released from 3990 * page cache. 3991 */ 3992 filemap_invalidate_lock(mapping); 3993 3994 ret = ext4_break_layouts(inode); 3995 if (ret) 3996 goto out_dio; 3997 3998 first_block_offset = round_up(offset, sb->s_blocksize); 3999 last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; 4000 4001 /* Now release the pages and zero block aligned part of pages*/ 4002 if (last_block_offset > first_block_offset) { 4003 ret = ext4_update_disksize_before_punch(inode, offset, length); 4004 if (ret) 4005 goto out_dio; 4006 truncate_pagecache_range(inode, first_block_offset, 4007 last_block_offset); 4008 } 4009 4010 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4011 credits = ext4_writepage_trans_blocks(inode); 4012 else 4013 credits = ext4_blocks_for_truncate(inode); 4014 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 4015 if (IS_ERR(handle)) { 4016 ret = PTR_ERR(handle); 4017 ext4_std_error(sb, ret); 4018 goto out_dio; 4019 } 4020 4021 ret = ext4_zero_partial_blocks(handle, inode, offset, 4022 length); 4023 if (ret) 4024 goto out_stop; 4025 4026 first_block = (offset + sb->s_blocksize - 1) >> 4027 EXT4_BLOCK_SIZE_BITS(sb); 4028 stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); 4029 4030 /* If there are blocks to remove, do it */ 4031 if (stop_block > first_block) { 4032 4033 down_write(&EXT4_I(inode)->i_data_sem); 4034 ext4_discard_preallocations(inode, 0); 4035 4036 ext4_es_remove_extent(inode, first_block, 4037 stop_block - first_block); 4038 4039 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4040 ret = ext4_ext_remove_space(inode, first_block, 4041 stop_block - 1); 4042 else 4043 ret = ext4_ind_remove_space(handle, inode, first_block, 4044 stop_block); 4045 4046 up_write(&EXT4_I(inode)->i_data_sem); 4047 } 4048 ext4_fc_track_range(handle, inode, first_block, stop_block); 4049 if (IS_SYNC(inode)) 4050 ext4_handle_sync(handle); 4051 4052 inode->i_mtime = inode_set_ctime_current(inode); 4053 ret2 = ext4_mark_inode_dirty(handle, inode); 4054 if (unlikely(ret2)) 4055 ret = ret2; 4056 if (ret >= 0) 4057 ext4_update_inode_fsync_trans(handle, inode, 1); 4058 out_stop: 4059 ext4_journal_stop(handle); 4060 out_dio: 4061 filemap_invalidate_unlock(mapping); 4062 out_mutex: 4063 inode_unlock(inode); 4064 return ret; 4065 } 4066 4067 int ext4_inode_attach_jinode(struct inode *inode) 4068 { 4069 struct ext4_inode_info *ei = EXT4_I(inode); 4070 struct jbd2_inode *jinode; 4071 4072 if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal) 4073 return 0; 4074 4075 jinode = jbd2_alloc_inode(GFP_KERNEL); 4076 spin_lock(&inode->i_lock); 4077 if (!ei->jinode) { 4078 if (!jinode) { 4079 spin_unlock(&inode->i_lock); 4080 return -ENOMEM; 4081 } 4082 ei->jinode = jinode; 4083 jbd2_journal_init_jbd_inode(ei->jinode, inode); 4084 jinode = NULL; 4085 } 4086 spin_unlock(&inode->i_lock); 4087 if (unlikely(jinode != NULL)) 4088 jbd2_free_inode(jinode); 4089 return 0; 4090 } 4091 4092 /* 4093 * ext4_truncate() 4094 * 4095 * We block out ext4_get_block() block instantiations across the entire 4096 * transaction, and VFS/VM ensures that ext4_truncate() cannot run 4097 * simultaneously on behalf of the same inode. 4098 * 4099 * As we work through the truncate and commit bits of it to the journal there 4100 * is one core, guiding principle: the file's tree must always be consistent on 4101 * disk. We must be able to restart the truncate after a crash. 4102 * 4103 * The file's tree may be transiently inconsistent in memory (although it 4104 * probably isn't), but whenever we close off and commit a journal transaction, 4105 * the contents of (the filesystem + the journal) must be consistent and 4106 * restartable. It's pretty simple, really: bottom up, right to left (although 4107 * left-to-right works OK too). 4108 * 4109 * Note that at recovery time, journal replay occurs *before* the restart of 4110 * truncate against the orphan inode list. 4111 * 4112 * The committed inode has the new, desired i_size (which is the same as 4113 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 4114 * that this inode's truncate did not complete and it will again call 4115 * ext4_truncate() to have another go. So there will be instantiated blocks 4116 * to the right of the truncation point in a crashed ext4 filesystem. But 4117 * that's fine - as long as they are linked from the inode, the post-crash 4118 * ext4_truncate() run will find them and release them. 4119 */ 4120 int ext4_truncate(struct inode *inode) 4121 { 4122 struct ext4_inode_info *ei = EXT4_I(inode); 4123 unsigned int credits; 4124 int err = 0, err2; 4125 handle_t *handle; 4126 struct address_space *mapping = inode->i_mapping; 4127 4128 /* 4129 * There is a possibility that we're either freeing the inode 4130 * or it's a completely new inode. In those cases we might not 4131 * have i_rwsem locked because it's not necessary. 4132 */ 4133 if (!(inode->i_state & (I_NEW|I_FREEING))) 4134 WARN_ON(!inode_is_locked(inode)); 4135 trace_ext4_truncate_enter(inode); 4136 4137 if (!ext4_can_truncate(inode)) 4138 goto out_trace; 4139 4140 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 4141 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 4142 4143 if (ext4_has_inline_data(inode)) { 4144 int has_inline = 1; 4145 4146 err = ext4_inline_data_truncate(inode, &has_inline); 4147 if (err || has_inline) 4148 goto out_trace; 4149 } 4150 4151 /* If we zero-out tail of the page, we have to create jinode for jbd2 */ 4152 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { 4153 err = ext4_inode_attach_jinode(inode); 4154 if (err) 4155 goto out_trace; 4156 } 4157 4158 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4159 credits = ext4_writepage_trans_blocks(inode); 4160 else 4161 credits = ext4_blocks_for_truncate(inode); 4162 4163 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 4164 if (IS_ERR(handle)) { 4165 err = PTR_ERR(handle); 4166 goto out_trace; 4167 } 4168 4169 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) 4170 ext4_block_truncate_page(handle, mapping, inode->i_size); 4171 4172 /* 4173 * We add the inode to the orphan list, so that if this 4174 * truncate spans multiple transactions, and we crash, we will 4175 * resume the truncate when the filesystem recovers. It also 4176 * marks the inode dirty, to catch the new size. 4177 * 4178 * Implication: the file must always be in a sane, consistent 4179 * truncatable state while each transaction commits. 4180 */ 4181 err = ext4_orphan_add(handle, inode); 4182 if (err) 4183 goto out_stop; 4184 4185 down_write(&EXT4_I(inode)->i_data_sem); 4186 4187 ext4_discard_preallocations(inode, 0); 4188 4189 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4190 err = ext4_ext_truncate(handle, inode); 4191 else 4192 ext4_ind_truncate(handle, inode); 4193 4194 up_write(&ei->i_data_sem); 4195 if (err) 4196 goto out_stop; 4197 4198 if (IS_SYNC(inode)) 4199 ext4_handle_sync(handle); 4200 4201 out_stop: 4202 /* 4203 * If this was a simple ftruncate() and the file will remain alive, 4204 * then we need to clear up the orphan record which we created above. 4205 * However, if this was a real unlink then we were called by 4206 * ext4_evict_inode(), and we allow that function to clean up the 4207 * orphan info for us. 4208 */ 4209 if (inode->i_nlink) 4210 ext4_orphan_del(handle, inode); 4211 4212 inode->i_mtime = inode_set_ctime_current(inode); 4213 err2 = ext4_mark_inode_dirty(handle, inode); 4214 if (unlikely(err2 && !err)) 4215 err = err2; 4216 ext4_journal_stop(handle); 4217 4218 out_trace: 4219 trace_ext4_truncate_exit(inode); 4220 return err; 4221 } 4222 4223 static inline u64 ext4_inode_peek_iversion(const struct inode *inode) 4224 { 4225 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) 4226 return inode_peek_iversion_raw(inode); 4227 else 4228 return inode_peek_iversion(inode); 4229 } 4230 4231 static int ext4_inode_blocks_set(struct ext4_inode *raw_inode, 4232 struct ext4_inode_info *ei) 4233 { 4234 struct inode *inode = &(ei->vfs_inode); 4235 u64 i_blocks = READ_ONCE(inode->i_blocks); 4236 struct super_block *sb = inode->i_sb; 4237 4238 if (i_blocks <= ~0U) { 4239 /* 4240 * i_blocks can be represented in a 32 bit variable 4241 * as multiple of 512 bytes 4242 */ 4243 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4244 raw_inode->i_blocks_high = 0; 4245 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 4246 return 0; 4247 } 4248 4249 /* 4250 * This should never happen since sb->s_maxbytes should not have 4251 * allowed this, sb->s_maxbytes was set according to the huge_file 4252 * feature in ext4_fill_super(). 4253 */ 4254 if (!ext4_has_feature_huge_file(sb)) 4255 return -EFSCORRUPTED; 4256 4257 if (i_blocks <= 0xffffffffffffULL) { 4258 /* 4259 * i_blocks can be represented in a 48 bit variable 4260 * as multiple of 512 bytes 4261 */ 4262 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4263 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4264 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 4265 } else { 4266 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); 4267 /* i_block is stored in file system block size */ 4268 i_blocks = i_blocks >> (inode->i_blkbits - 9); 4269 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 4270 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 4271 } 4272 return 0; 4273 } 4274 4275 static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode) 4276 { 4277 struct ext4_inode_info *ei = EXT4_I(inode); 4278 uid_t i_uid; 4279 gid_t i_gid; 4280 projid_t i_projid; 4281 int block; 4282 int err; 4283 4284 err = ext4_inode_blocks_set(raw_inode, ei); 4285 4286 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 4287 i_uid = i_uid_read(inode); 4288 i_gid = i_gid_read(inode); 4289 i_projid = from_kprojid(&init_user_ns, ei->i_projid); 4290 if (!(test_opt(inode->i_sb, NO_UID32))) { 4291 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); 4292 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); 4293 /* 4294 * Fix up interoperability with old kernels. Otherwise, 4295 * old inodes get re-used with the upper 16 bits of the 4296 * uid/gid intact. 4297 */ 4298 if (ei->i_dtime && list_empty(&ei->i_orphan)) { 4299 raw_inode->i_uid_high = 0; 4300 raw_inode->i_gid_high = 0; 4301 } else { 4302 raw_inode->i_uid_high = 4303 cpu_to_le16(high_16_bits(i_uid)); 4304 raw_inode->i_gid_high = 4305 cpu_to_le16(high_16_bits(i_gid)); 4306 } 4307 } else { 4308 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); 4309 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid)); 4310 raw_inode->i_uid_high = 0; 4311 raw_inode->i_gid_high = 0; 4312 } 4313 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 4314 4315 EXT4_INODE_SET_CTIME(inode, raw_inode); 4316 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 4317 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 4318 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 4319 4320 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 4321 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); 4322 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) 4323 raw_inode->i_file_acl_high = 4324 cpu_to_le16(ei->i_file_acl >> 32); 4325 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 4326 ext4_isize_set(raw_inode, ei->i_disksize); 4327 4328 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 4329 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 4330 if (old_valid_dev(inode->i_rdev)) { 4331 raw_inode->i_block[0] = 4332 cpu_to_le32(old_encode_dev(inode->i_rdev)); 4333 raw_inode->i_block[1] = 0; 4334 } else { 4335 raw_inode->i_block[0] = 0; 4336 raw_inode->i_block[1] = 4337 cpu_to_le32(new_encode_dev(inode->i_rdev)); 4338 raw_inode->i_block[2] = 0; 4339 } 4340 } else if (!ext4_has_inline_data(inode)) { 4341 for (block = 0; block < EXT4_N_BLOCKS; block++) 4342 raw_inode->i_block[block] = ei->i_data[block]; 4343 } 4344 4345 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { 4346 u64 ivers = ext4_inode_peek_iversion(inode); 4347 4348 raw_inode->i_disk_version = cpu_to_le32(ivers); 4349 if (ei->i_extra_isize) { 4350 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 4351 raw_inode->i_version_hi = 4352 cpu_to_le32(ivers >> 32); 4353 raw_inode->i_extra_isize = 4354 cpu_to_le16(ei->i_extra_isize); 4355 } 4356 } 4357 4358 if (i_projid != EXT4_DEF_PROJID && 4359 !ext4_has_feature_project(inode->i_sb)) 4360 err = err ?: -EFSCORRUPTED; 4361 4362 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 4363 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) 4364 raw_inode->i_projid = cpu_to_le32(i_projid); 4365 4366 ext4_inode_csum_set(inode, raw_inode, ei); 4367 return err; 4368 } 4369 4370 /* 4371 * ext4_get_inode_loc returns with an extra refcount against the inode's 4372 * underlying buffer_head on success. If we pass 'inode' and it does not 4373 * have in-inode xattr, we have all inode data in memory that is needed 4374 * to recreate the on-disk version of this inode. 4375 */ 4376 static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino, 4377 struct inode *inode, struct ext4_iloc *iloc, 4378 ext4_fsblk_t *ret_block) 4379 { 4380 struct ext4_group_desc *gdp; 4381 struct buffer_head *bh; 4382 ext4_fsblk_t block; 4383 struct blk_plug plug; 4384 int inodes_per_block, inode_offset; 4385 4386 iloc->bh = NULL; 4387 if (ino < EXT4_ROOT_INO || 4388 ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)) 4389 return -EFSCORRUPTED; 4390 4391 iloc->block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); 4392 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 4393 if (!gdp) 4394 return -EIO; 4395 4396 /* 4397 * Figure out the offset within the block group inode table 4398 */ 4399 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 4400 inode_offset = ((ino - 1) % 4401 EXT4_INODES_PER_GROUP(sb)); 4402 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 4403 4404 block = ext4_inode_table(sb, gdp); 4405 if ((block <= le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) || 4406 (block >= ext4_blocks_count(EXT4_SB(sb)->s_es))) { 4407 ext4_error(sb, "Invalid inode table block %llu in " 4408 "block_group %u", block, iloc->block_group); 4409 return -EFSCORRUPTED; 4410 } 4411 block += (inode_offset / inodes_per_block); 4412 4413 bh = sb_getblk(sb, block); 4414 if (unlikely(!bh)) 4415 return -ENOMEM; 4416 if (ext4_buffer_uptodate(bh)) 4417 goto has_buffer; 4418 4419 lock_buffer(bh); 4420 if (ext4_buffer_uptodate(bh)) { 4421 /* Someone brought it uptodate while we waited */ 4422 unlock_buffer(bh); 4423 goto has_buffer; 4424 } 4425 4426 /* 4427 * If we have all information of the inode in memory and this 4428 * is the only valid inode in the block, we need not read the 4429 * block. 4430 */ 4431 if (inode && !ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { 4432 struct buffer_head *bitmap_bh; 4433 int i, start; 4434 4435 start = inode_offset & ~(inodes_per_block - 1); 4436 4437 /* Is the inode bitmap in cache? */ 4438 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 4439 if (unlikely(!bitmap_bh)) 4440 goto make_io; 4441 4442 /* 4443 * If the inode bitmap isn't in cache then the 4444 * optimisation may end up performing two reads instead 4445 * of one, so skip it. 4446 */ 4447 if (!buffer_uptodate(bitmap_bh)) { 4448 brelse(bitmap_bh); 4449 goto make_io; 4450 } 4451 for (i = start; i < start + inodes_per_block; i++) { 4452 if (i == inode_offset) 4453 continue; 4454 if (ext4_test_bit(i, bitmap_bh->b_data)) 4455 break; 4456 } 4457 brelse(bitmap_bh); 4458 if (i == start + inodes_per_block) { 4459 struct ext4_inode *raw_inode = 4460 (struct ext4_inode *) (bh->b_data + iloc->offset); 4461 4462 /* all other inodes are free, so skip I/O */ 4463 memset(bh->b_data, 0, bh->b_size); 4464 if (!ext4_test_inode_state(inode, EXT4_STATE_NEW)) 4465 ext4_fill_raw_inode(inode, raw_inode); 4466 set_buffer_uptodate(bh); 4467 unlock_buffer(bh); 4468 goto has_buffer; 4469 } 4470 } 4471 4472 make_io: 4473 /* 4474 * If we need to do any I/O, try to pre-readahead extra 4475 * blocks from the inode table. 4476 */ 4477 blk_start_plug(&plug); 4478 if (EXT4_SB(sb)->s_inode_readahead_blks) { 4479 ext4_fsblk_t b, end, table; 4480 unsigned num; 4481 __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks; 4482 4483 table = ext4_inode_table(sb, gdp); 4484 /* s_inode_readahead_blks is always a power of 2 */ 4485 b = block & ~((ext4_fsblk_t) ra_blks - 1); 4486 if (table > b) 4487 b = table; 4488 end = b + ra_blks; 4489 num = EXT4_INODES_PER_GROUP(sb); 4490 if (ext4_has_group_desc_csum(sb)) 4491 num -= ext4_itable_unused_count(sb, gdp); 4492 table += num / inodes_per_block; 4493 if (end > table) 4494 end = table; 4495 while (b <= end) 4496 ext4_sb_breadahead_unmovable(sb, b++); 4497 } 4498 4499 /* 4500 * There are other valid inodes in the buffer, this inode 4501 * has in-inode xattrs, or we don't have this inode in memory. 4502 * Read the block from disk. 4503 */ 4504 trace_ext4_load_inode(sb, ino); 4505 ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL); 4506 blk_finish_plug(&plug); 4507 wait_on_buffer(bh); 4508 ext4_simulate_fail_bh(sb, bh, EXT4_SIM_INODE_EIO); 4509 if (!buffer_uptodate(bh)) { 4510 if (ret_block) 4511 *ret_block = block; 4512 brelse(bh); 4513 return -EIO; 4514 } 4515 has_buffer: 4516 iloc->bh = bh; 4517 return 0; 4518 } 4519 4520 static int __ext4_get_inode_loc_noinmem(struct inode *inode, 4521 struct ext4_iloc *iloc) 4522 { 4523 ext4_fsblk_t err_blk = 0; 4524 int ret; 4525 4526 ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, NULL, iloc, 4527 &err_blk); 4528 4529 if (ret == -EIO) 4530 ext4_error_inode_block(inode, err_blk, EIO, 4531 "unable to read itable block"); 4532 4533 return ret; 4534 } 4535 4536 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 4537 { 4538 ext4_fsblk_t err_blk = 0; 4539 int ret; 4540 4541 ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, inode, iloc, 4542 &err_blk); 4543 4544 if (ret == -EIO) 4545 ext4_error_inode_block(inode, err_blk, EIO, 4546 "unable to read itable block"); 4547 4548 return ret; 4549 } 4550 4551 4552 int ext4_get_fc_inode_loc(struct super_block *sb, unsigned long ino, 4553 struct ext4_iloc *iloc) 4554 { 4555 return __ext4_get_inode_loc(sb, ino, NULL, iloc, NULL); 4556 } 4557 4558 static bool ext4_should_enable_dax(struct inode *inode) 4559 { 4560 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4561 4562 if (test_opt2(inode->i_sb, DAX_NEVER)) 4563 return false; 4564 if (!S_ISREG(inode->i_mode)) 4565 return false; 4566 if (ext4_should_journal_data(inode)) 4567 return false; 4568 if (ext4_has_inline_data(inode)) 4569 return false; 4570 if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT)) 4571 return false; 4572 if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY)) 4573 return false; 4574 if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags)) 4575 return false; 4576 if (test_opt(inode->i_sb, DAX_ALWAYS)) 4577 return true; 4578 4579 return ext4_test_inode_flag(inode, EXT4_INODE_DAX); 4580 } 4581 4582 void ext4_set_inode_flags(struct inode *inode, bool init) 4583 { 4584 unsigned int flags = EXT4_I(inode)->i_flags; 4585 unsigned int new_fl = 0; 4586 4587 WARN_ON_ONCE(IS_DAX(inode) && init); 4588 4589 if (flags & EXT4_SYNC_FL) 4590 new_fl |= S_SYNC; 4591 if (flags & EXT4_APPEND_FL) 4592 new_fl |= S_APPEND; 4593 if (flags & EXT4_IMMUTABLE_FL) 4594 new_fl |= S_IMMUTABLE; 4595 if (flags & EXT4_NOATIME_FL) 4596 new_fl |= S_NOATIME; 4597 if (flags & EXT4_DIRSYNC_FL) 4598 new_fl |= S_DIRSYNC; 4599 4600 /* Because of the way inode_set_flags() works we must preserve S_DAX 4601 * here if already set. */ 4602 new_fl |= (inode->i_flags & S_DAX); 4603 if (init && ext4_should_enable_dax(inode)) 4604 new_fl |= S_DAX; 4605 4606 if (flags & EXT4_ENCRYPT_FL) 4607 new_fl |= S_ENCRYPTED; 4608 if (flags & EXT4_CASEFOLD_FL) 4609 new_fl |= S_CASEFOLD; 4610 if (flags & EXT4_VERITY_FL) 4611 new_fl |= S_VERITY; 4612 inode_set_flags(inode, new_fl, 4613 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX| 4614 S_ENCRYPTED|S_CASEFOLD|S_VERITY); 4615 } 4616 4617 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 4618 struct ext4_inode_info *ei) 4619 { 4620 blkcnt_t i_blocks ; 4621 struct inode *inode = &(ei->vfs_inode); 4622 struct super_block *sb = inode->i_sb; 4623 4624 if (ext4_has_feature_huge_file(sb)) { 4625 /* we are using combined 48 bit field */ 4626 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 4627 le32_to_cpu(raw_inode->i_blocks_lo); 4628 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { 4629 /* i_blocks represent file system block size */ 4630 return i_blocks << (inode->i_blkbits - 9); 4631 } else { 4632 return i_blocks; 4633 } 4634 } else { 4635 return le32_to_cpu(raw_inode->i_blocks_lo); 4636 } 4637 } 4638 4639 static inline int ext4_iget_extra_inode(struct inode *inode, 4640 struct ext4_inode *raw_inode, 4641 struct ext4_inode_info *ei) 4642 { 4643 __le32 *magic = (void *)raw_inode + 4644 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; 4645 4646 if (EXT4_INODE_HAS_XATTR_SPACE(inode) && 4647 *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { 4648 int err; 4649 4650 ext4_set_inode_state(inode, EXT4_STATE_XATTR); 4651 err = ext4_find_inline_data_nolock(inode); 4652 if (!err && ext4_has_inline_data(inode)) 4653 ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); 4654 return err; 4655 } else 4656 EXT4_I(inode)->i_inline_off = 0; 4657 return 0; 4658 } 4659 4660 int ext4_get_projid(struct inode *inode, kprojid_t *projid) 4661 { 4662 if (!ext4_has_feature_project(inode->i_sb)) 4663 return -EOPNOTSUPP; 4664 *projid = EXT4_I(inode)->i_projid; 4665 return 0; 4666 } 4667 4668 /* 4669 * ext4 has self-managed i_version for ea inodes, it stores the lower 32bit of 4670 * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag 4671 * set. 4672 */ 4673 static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val) 4674 { 4675 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) 4676 inode_set_iversion_raw(inode, val); 4677 else 4678 inode_set_iversion_queried(inode, val); 4679 } 4680 4681 static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags) 4682 4683 { 4684 if (flags & EXT4_IGET_EA_INODE) { 4685 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) 4686 return "missing EA_INODE flag"; 4687 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 4688 EXT4_I(inode)->i_file_acl) 4689 return "ea_inode with extended attributes"; 4690 } else { 4691 if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) 4692 return "unexpected EA_INODE flag"; 4693 } 4694 if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) 4695 return "unexpected bad inode w/o EXT4_IGET_BAD"; 4696 return NULL; 4697 } 4698 4699 struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, 4700 ext4_iget_flags flags, const char *function, 4701 unsigned int line) 4702 { 4703 struct ext4_iloc iloc; 4704 struct ext4_inode *raw_inode; 4705 struct ext4_inode_info *ei; 4706 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 4707 struct inode *inode; 4708 const char *err_str; 4709 journal_t *journal = EXT4_SB(sb)->s_journal; 4710 long ret; 4711 loff_t size; 4712 int block; 4713 uid_t i_uid; 4714 gid_t i_gid; 4715 projid_t i_projid; 4716 4717 if ((!(flags & EXT4_IGET_SPECIAL) && 4718 ((ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) || 4719 ino == le32_to_cpu(es->s_usr_quota_inum) || 4720 ino == le32_to_cpu(es->s_grp_quota_inum) || 4721 ino == le32_to_cpu(es->s_prj_quota_inum) || 4722 ino == le32_to_cpu(es->s_orphan_file_inum))) || 4723 (ino < EXT4_ROOT_INO) || 4724 (ino > le32_to_cpu(es->s_inodes_count))) { 4725 if (flags & EXT4_IGET_HANDLE) 4726 return ERR_PTR(-ESTALE); 4727 __ext4_error(sb, function, line, false, EFSCORRUPTED, 0, 4728 "inode #%lu: comm %s: iget: illegal inode #", 4729 ino, current->comm); 4730 return ERR_PTR(-EFSCORRUPTED); 4731 } 4732 4733 inode = iget_locked(sb, ino); 4734 if (!inode) 4735 return ERR_PTR(-ENOMEM); 4736 if (!(inode->i_state & I_NEW)) { 4737 if ((err_str = check_igot_inode(inode, flags)) != NULL) { 4738 ext4_error_inode(inode, function, line, 0, err_str); 4739 iput(inode); 4740 return ERR_PTR(-EFSCORRUPTED); 4741 } 4742 return inode; 4743 } 4744 4745 ei = EXT4_I(inode); 4746 iloc.bh = NULL; 4747 4748 ret = __ext4_get_inode_loc_noinmem(inode, &iloc); 4749 if (ret < 0) 4750 goto bad_inode; 4751 raw_inode = ext4_raw_inode(&iloc); 4752 4753 if ((flags & EXT4_IGET_HANDLE) && 4754 (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) { 4755 ret = -ESTALE; 4756 goto bad_inode; 4757 } 4758 4759 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4760 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 4761 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 4762 EXT4_INODE_SIZE(inode->i_sb) || 4763 (ei->i_extra_isize & 3)) { 4764 ext4_error_inode(inode, function, line, 0, 4765 "iget: bad extra_isize %u " 4766 "(inode size %u)", 4767 ei->i_extra_isize, 4768 EXT4_INODE_SIZE(inode->i_sb)); 4769 ret = -EFSCORRUPTED; 4770 goto bad_inode; 4771 } 4772 } else 4773 ei->i_extra_isize = 0; 4774 4775 /* Precompute checksum seed for inode metadata */ 4776 if (ext4_has_metadata_csum(sb)) { 4777 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4778 __u32 csum; 4779 __le32 inum = cpu_to_le32(inode->i_ino); 4780 __le32 gen = raw_inode->i_generation; 4781 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, 4782 sizeof(inum)); 4783 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, 4784 sizeof(gen)); 4785 } 4786 4787 if ((!ext4_inode_csum_verify(inode, raw_inode, ei) || 4788 ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) && 4789 (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))) { 4790 ext4_error_inode_err(inode, function, line, 0, 4791 EFSBADCRC, "iget: checksum invalid"); 4792 ret = -EFSBADCRC; 4793 goto bad_inode; 4794 } 4795 4796 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 4797 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 4798 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 4799 if (ext4_has_feature_project(sb) && 4800 EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE && 4801 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) 4802 i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid); 4803 else 4804 i_projid = EXT4_DEF_PROJID; 4805 4806 if (!(test_opt(inode->i_sb, NO_UID32))) { 4807 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 4808 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 4809 } 4810 i_uid_write(inode, i_uid); 4811 i_gid_write(inode, i_gid); 4812 ei->i_projid = make_kprojid(&init_user_ns, i_projid); 4813 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 4814 4815 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 4816 ei->i_inline_off = 0; 4817 ei->i_dir_start_lookup = 0; 4818 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 4819 /* We now have enough fields to check if the inode was active or not. 4820 * This is needed because nfsd might try to access dead inodes 4821 * the test is that same one that e2fsck uses 4822 * NeilBrown 1999oct15 4823 */ 4824 if (inode->i_nlink == 0) { 4825 if ((inode->i_mode == 0 || flags & EXT4_IGET_SPECIAL || 4826 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) && 4827 ino != EXT4_BOOT_LOADER_INO) { 4828 /* this inode is deleted or unallocated */ 4829 if (flags & EXT4_IGET_SPECIAL) { 4830 ext4_error_inode(inode, function, line, 0, 4831 "iget: special inode unallocated"); 4832 ret = -EFSCORRUPTED; 4833 } else 4834 ret = -ESTALE; 4835 goto bad_inode; 4836 } 4837 /* The only unlinked inodes we let through here have 4838 * valid i_mode and are being read by the orphan 4839 * recovery code: that's fine, we're about to complete 4840 * the process of deleting those. 4841 * OR it is the EXT4_BOOT_LOADER_INO which is 4842 * not initialized on a new filesystem. */ 4843 } 4844 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 4845 ext4_set_inode_flags(inode, true); 4846 inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 4847 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 4848 if (ext4_has_feature_64bit(sb)) 4849 ei->i_file_acl |= 4850 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 4851 inode->i_size = ext4_isize(sb, raw_inode); 4852 if ((size = i_size_read(inode)) < 0) { 4853 ext4_error_inode(inode, function, line, 0, 4854 "iget: bad i_size value: %lld", size); 4855 ret = -EFSCORRUPTED; 4856 goto bad_inode; 4857 } 4858 /* 4859 * If dir_index is not enabled but there's dir with INDEX flag set, 4860 * we'd normally treat htree data as empty space. But with metadata 4861 * checksumming that corrupts checksums so forbid that. 4862 */ 4863 if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) && 4864 ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) { 4865 ext4_error_inode(inode, function, line, 0, 4866 "iget: Dir with htree data on filesystem without dir_index feature."); 4867 ret = -EFSCORRUPTED; 4868 goto bad_inode; 4869 } 4870 ei->i_disksize = inode->i_size; 4871 #ifdef CONFIG_QUOTA 4872 ei->i_reserved_quota = 0; 4873 #endif 4874 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 4875 ei->i_block_group = iloc.block_group; 4876 ei->i_last_alloc_group = ~0; 4877 /* 4878 * NOTE! The in-memory inode i_data array is in little-endian order 4879 * even on big-endian machines: we do NOT byteswap the block numbers! 4880 */ 4881 for (block = 0; block < EXT4_N_BLOCKS; block++) 4882 ei->i_data[block] = raw_inode->i_block[block]; 4883 INIT_LIST_HEAD(&ei->i_orphan); 4884 ext4_fc_init_inode(&ei->vfs_inode); 4885 4886 /* 4887 * Set transaction id's of transactions that have to be committed 4888 * to finish f[data]sync. We set them to currently running transaction 4889 * as we cannot be sure that the inode or some of its metadata isn't 4890 * part of the transaction - the inode could have been reclaimed and 4891 * now it is reread from disk. 4892 */ 4893 if (journal) { 4894 transaction_t *transaction; 4895 tid_t tid; 4896 4897 read_lock(&journal->j_state_lock); 4898 if (journal->j_running_transaction) 4899 transaction = journal->j_running_transaction; 4900 else 4901 transaction = journal->j_committing_transaction; 4902 if (transaction) 4903 tid = transaction->t_tid; 4904 else 4905 tid = journal->j_commit_sequence; 4906 read_unlock(&journal->j_state_lock); 4907 ei->i_sync_tid = tid; 4908 ei->i_datasync_tid = tid; 4909 } 4910 4911 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4912 if (ei->i_extra_isize == 0) { 4913 /* The extra space is currently unused. Use it. */ 4914 BUILD_BUG_ON(sizeof(struct ext4_inode) & 3); 4915 ei->i_extra_isize = sizeof(struct ext4_inode) - 4916 EXT4_GOOD_OLD_INODE_SIZE; 4917 } else { 4918 ret = ext4_iget_extra_inode(inode, raw_inode, ei); 4919 if (ret) 4920 goto bad_inode; 4921 } 4922 } 4923 4924 EXT4_INODE_GET_CTIME(inode, raw_inode); 4925 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 4926 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 4927 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 4928 4929 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { 4930 u64 ivers = le32_to_cpu(raw_inode->i_disk_version); 4931 4932 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4933 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 4934 ivers |= 4935 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 4936 } 4937 ext4_inode_set_iversion_queried(inode, ivers); 4938 } 4939 4940 ret = 0; 4941 if (ei->i_file_acl && 4942 !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) { 4943 ext4_error_inode(inode, function, line, 0, 4944 "iget: bad extended attribute block %llu", 4945 ei->i_file_acl); 4946 ret = -EFSCORRUPTED; 4947 goto bad_inode; 4948 } else if (!ext4_has_inline_data(inode)) { 4949 /* validate the block references in the inode */ 4950 if (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) && 4951 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4952 (S_ISLNK(inode->i_mode) && 4953 !ext4_inode_is_fast_symlink(inode)))) { 4954 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4955 ret = ext4_ext_check_inode(inode); 4956 else 4957 ret = ext4_ind_check_inode(inode); 4958 } 4959 } 4960 if (ret) 4961 goto bad_inode; 4962 4963 if (S_ISREG(inode->i_mode)) { 4964 inode->i_op = &ext4_file_inode_operations; 4965 inode->i_fop = &ext4_file_operations; 4966 ext4_set_aops(inode); 4967 } else if (S_ISDIR(inode->i_mode)) { 4968 inode->i_op = &ext4_dir_inode_operations; 4969 inode->i_fop = &ext4_dir_operations; 4970 } else if (S_ISLNK(inode->i_mode)) { 4971 /* VFS does not allow setting these so must be corruption */ 4972 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) { 4973 ext4_error_inode(inode, function, line, 0, 4974 "iget: immutable or append flags " 4975 "not allowed on symlinks"); 4976 ret = -EFSCORRUPTED; 4977 goto bad_inode; 4978 } 4979 if (IS_ENCRYPTED(inode)) { 4980 inode->i_op = &ext4_encrypted_symlink_inode_operations; 4981 } else if (ext4_inode_is_fast_symlink(inode)) { 4982 inode->i_link = (char *)ei->i_data; 4983 inode->i_op = &ext4_fast_symlink_inode_operations; 4984 nd_terminate_link(ei->i_data, inode->i_size, 4985 sizeof(ei->i_data) - 1); 4986 } else { 4987 inode->i_op = &ext4_symlink_inode_operations; 4988 } 4989 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 4990 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 4991 inode->i_op = &ext4_special_inode_operations; 4992 if (raw_inode->i_block[0]) 4993 init_special_inode(inode, inode->i_mode, 4994 old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 4995 else 4996 init_special_inode(inode, inode->i_mode, 4997 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 4998 } else if (ino == EXT4_BOOT_LOADER_INO) { 4999 make_bad_inode(inode); 5000 } else { 5001 ret = -EFSCORRUPTED; 5002 ext4_error_inode(inode, function, line, 0, 5003 "iget: bogus i_mode (%o)", inode->i_mode); 5004 goto bad_inode; 5005 } 5006 if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb)) { 5007 ext4_error_inode(inode, function, line, 0, 5008 "casefold flag without casefold feature"); 5009 ret = -EFSCORRUPTED; 5010 goto bad_inode; 5011 } 5012 if ((err_str = check_igot_inode(inode, flags)) != NULL) { 5013 ext4_error_inode(inode, function, line, 0, err_str); 5014 ret = -EFSCORRUPTED; 5015 goto bad_inode; 5016 } 5017 5018 brelse(iloc.bh); 5019 unlock_new_inode(inode); 5020 return inode; 5021 5022 bad_inode: 5023 brelse(iloc.bh); 5024 iget_failed(inode); 5025 return ERR_PTR(ret); 5026 } 5027 5028 static void __ext4_update_other_inode_time(struct super_block *sb, 5029 unsigned long orig_ino, 5030 unsigned long ino, 5031 struct ext4_inode *raw_inode) 5032 { 5033 struct inode *inode; 5034 5035 inode = find_inode_by_ino_rcu(sb, ino); 5036 if (!inode) 5037 return; 5038 5039 if (!inode_is_dirtytime_only(inode)) 5040 return; 5041 5042 spin_lock(&inode->i_lock); 5043 if (inode_is_dirtytime_only(inode)) { 5044 struct ext4_inode_info *ei = EXT4_I(inode); 5045 5046 inode->i_state &= ~I_DIRTY_TIME; 5047 spin_unlock(&inode->i_lock); 5048 5049 spin_lock(&ei->i_raw_lock); 5050 EXT4_INODE_SET_CTIME(inode, raw_inode); 5051 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 5052 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 5053 ext4_inode_csum_set(inode, raw_inode, ei); 5054 spin_unlock(&ei->i_raw_lock); 5055 trace_ext4_other_inode_update_time(inode, orig_ino); 5056 return; 5057 } 5058 spin_unlock(&inode->i_lock); 5059 } 5060 5061 /* 5062 * Opportunistically update the other time fields for other inodes in 5063 * the same inode table block. 5064 */ 5065 static void ext4_update_other_inodes_time(struct super_block *sb, 5066 unsigned long orig_ino, char *buf) 5067 { 5068 unsigned long ino; 5069 int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 5070 int inode_size = EXT4_INODE_SIZE(sb); 5071 5072 /* 5073 * Calculate the first inode in the inode table block. Inode 5074 * numbers are one-based. That is, the first inode in a block 5075 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1). 5076 */ 5077 ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1; 5078 rcu_read_lock(); 5079 for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) { 5080 if (ino == orig_ino) 5081 continue; 5082 __ext4_update_other_inode_time(sb, orig_ino, ino, 5083 (struct ext4_inode *)buf); 5084 } 5085 rcu_read_unlock(); 5086 } 5087 5088 /* 5089 * Post the struct inode info into an on-disk inode location in the 5090 * buffer-cache. This gobbles the caller's reference to the 5091 * buffer_head in the inode location struct. 5092 * 5093 * The caller must have write access to iloc->bh. 5094 */ 5095 static int ext4_do_update_inode(handle_t *handle, 5096 struct inode *inode, 5097 struct ext4_iloc *iloc) 5098 { 5099 struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 5100 struct ext4_inode_info *ei = EXT4_I(inode); 5101 struct buffer_head *bh = iloc->bh; 5102 struct super_block *sb = inode->i_sb; 5103 int err; 5104 int need_datasync = 0, set_large_file = 0; 5105 5106 spin_lock(&ei->i_raw_lock); 5107 5108 /* 5109 * For fields not tracked in the in-memory inode, initialise them 5110 * to zero for new inodes. 5111 */ 5112 if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) 5113 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 5114 5115 if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode)) 5116 need_datasync = 1; 5117 if (ei->i_disksize > 0x7fffffffULL) { 5118 if (!ext4_has_feature_large_file(sb) || 5119 EXT4_SB(sb)->s_es->s_rev_level == cpu_to_le32(EXT4_GOOD_OLD_REV)) 5120 set_large_file = 1; 5121 } 5122 5123 err = ext4_fill_raw_inode(inode, raw_inode); 5124 spin_unlock(&ei->i_raw_lock); 5125 if (err) { 5126 EXT4_ERROR_INODE(inode, "corrupted inode contents"); 5127 goto out_brelse; 5128 } 5129 5130 if (inode->i_sb->s_flags & SB_LAZYTIME) 5131 ext4_update_other_inodes_time(inode->i_sb, inode->i_ino, 5132 bh->b_data); 5133 5134 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 5135 err = ext4_handle_dirty_metadata(handle, NULL, bh); 5136 if (err) 5137 goto out_error; 5138 ext4_clear_inode_state(inode, EXT4_STATE_NEW); 5139 if (set_large_file) { 5140 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access"); 5141 err = ext4_journal_get_write_access(handle, sb, 5142 EXT4_SB(sb)->s_sbh, 5143 EXT4_JTR_NONE); 5144 if (err) 5145 goto out_error; 5146 lock_buffer(EXT4_SB(sb)->s_sbh); 5147 ext4_set_feature_large_file(sb); 5148 ext4_superblock_csum_set(sb); 5149 unlock_buffer(EXT4_SB(sb)->s_sbh); 5150 ext4_handle_sync(handle); 5151 err = ext4_handle_dirty_metadata(handle, NULL, 5152 EXT4_SB(sb)->s_sbh); 5153 } 5154 ext4_update_inode_fsync_trans(handle, inode, need_datasync); 5155 out_error: 5156 ext4_std_error(inode->i_sb, err); 5157 out_brelse: 5158 brelse(bh); 5159 return err; 5160 } 5161 5162 /* 5163 * ext4_write_inode() 5164 * 5165 * We are called from a few places: 5166 * 5167 * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files. 5168 * Here, there will be no transaction running. We wait for any running 5169 * transaction to commit. 5170 * 5171 * - Within flush work (sys_sync(), kupdate and such). 5172 * We wait on commit, if told to. 5173 * 5174 * - Within iput_final() -> write_inode_now() 5175 * We wait on commit, if told to. 5176 * 5177 * In all cases it is actually safe for us to return without doing anything, 5178 * because the inode has been copied into a raw inode buffer in 5179 * ext4_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL 5180 * writeback. 5181 * 5182 * Note that we are absolutely dependent upon all inode dirtiers doing the 5183 * right thing: they *must* call mark_inode_dirty() after dirtying info in 5184 * which we are interested. 5185 * 5186 * It would be a bug for them to not do this. The code: 5187 * 5188 * mark_inode_dirty(inode) 5189 * stuff(); 5190 * inode->i_size = expr; 5191 * 5192 * is in error because write_inode() could occur while `stuff()' is running, 5193 * and the new i_size will be lost. Plus the inode will no longer be on the 5194 * superblock's dirty inode list. 5195 */ 5196 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) 5197 { 5198 int err; 5199 5200 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC)) 5201 return 0; 5202 5203 if (unlikely(ext4_forced_shutdown(inode->i_sb))) 5204 return -EIO; 5205 5206 if (EXT4_SB(inode->i_sb)->s_journal) { 5207 if (ext4_journal_current_handle()) { 5208 ext4_debug("called recursively, non-PF_MEMALLOC!\n"); 5209 dump_stack(); 5210 return -EIO; 5211 } 5212 5213 /* 5214 * No need to force transaction in WB_SYNC_NONE mode. Also 5215 * ext4_sync_fs() will force the commit after everything is 5216 * written. 5217 */ 5218 if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync) 5219 return 0; 5220 5221 err = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal, 5222 EXT4_I(inode)->i_sync_tid); 5223 } else { 5224 struct ext4_iloc iloc; 5225 5226 err = __ext4_get_inode_loc_noinmem(inode, &iloc); 5227 if (err) 5228 return err; 5229 /* 5230 * sync(2) will flush the whole buffer cache. No need to do 5231 * it here separately for each inode. 5232 */ 5233 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) 5234 sync_dirty_buffer(iloc.bh); 5235 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 5236 ext4_error_inode_block(inode, iloc.bh->b_blocknr, EIO, 5237 "IO error syncing inode"); 5238 err = -EIO; 5239 } 5240 brelse(iloc.bh); 5241 } 5242 return err; 5243 } 5244 5245 /* 5246 * In data=journal mode ext4_journalled_invalidate_folio() may fail to invalidate 5247 * buffers that are attached to a folio straddling i_size and are undergoing 5248 * commit. In that case we have to wait for commit to finish and try again. 5249 */ 5250 static void ext4_wait_for_tail_page_commit(struct inode *inode) 5251 { 5252 unsigned offset; 5253 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 5254 tid_t commit_tid = 0; 5255 int ret; 5256 5257 offset = inode->i_size & (PAGE_SIZE - 1); 5258 /* 5259 * If the folio is fully truncated, we don't need to wait for any commit 5260 * (and we even should not as __ext4_journalled_invalidate_folio() may 5261 * strip all buffers from the folio but keep the folio dirty which can then 5262 * confuse e.g. concurrent ext4_writepages() seeing dirty folio without 5263 * buffers). Also we don't need to wait for any commit if all buffers in 5264 * the folio remain valid. This is most beneficial for the common case of 5265 * blocksize == PAGESIZE. 5266 */ 5267 if (!offset || offset > (PAGE_SIZE - i_blocksize(inode))) 5268 return; 5269 while (1) { 5270 struct folio *folio = filemap_lock_folio(inode->i_mapping, 5271 inode->i_size >> PAGE_SHIFT); 5272 if (IS_ERR(folio)) 5273 return; 5274 ret = __ext4_journalled_invalidate_folio(folio, offset, 5275 folio_size(folio) - offset); 5276 folio_unlock(folio); 5277 folio_put(folio); 5278 if (ret != -EBUSY) 5279 return; 5280 commit_tid = 0; 5281 read_lock(&journal->j_state_lock); 5282 if (journal->j_committing_transaction) 5283 commit_tid = journal->j_committing_transaction->t_tid; 5284 read_unlock(&journal->j_state_lock); 5285 if (commit_tid) 5286 jbd2_log_wait_commit(journal, commit_tid); 5287 } 5288 } 5289 5290 /* 5291 * ext4_setattr() 5292 * 5293 * Called from notify_change. 5294 * 5295 * We want to trap VFS attempts to truncate the file as soon as 5296 * possible. In particular, we want to make sure that when the VFS 5297 * shrinks i_size, we put the inode on the orphan list and modify 5298 * i_disksize immediately, so that during the subsequent flushing of 5299 * dirty pages and freeing of disk blocks, we can guarantee that any 5300 * commit will leave the blocks being flushed in an unused state on 5301 * disk. (On recovery, the inode will get truncated and the blocks will 5302 * be freed, so we have a strong guarantee that no future commit will 5303 * leave these blocks visible to the user.) 5304 * 5305 * Another thing we have to assure is that if we are in ordered mode 5306 * and inode is still attached to the committing transaction, we must 5307 * we start writeout of all the dirty pages which are being truncated. 5308 * This way we are sure that all the data written in the previous 5309 * transaction are already on disk (truncate waits for pages under 5310 * writeback). 5311 * 5312 * Called with inode->i_rwsem down. 5313 */ 5314 int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry, 5315 struct iattr *attr) 5316 { 5317 struct inode *inode = d_inode(dentry); 5318 int error, rc = 0; 5319 int orphan = 0; 5320 const unsigned int ia_valid = attr->ia_valid; 5321 bool inc_ivers = true; 5322 5323 if (unlikely(ext4_forced_shutdown(inode->i_sb))) 5324 return -EIO; 5325 5326 if (unlikely(IS_IMMUTABLE(inode))) 5327 return -EPERM; 5328 5329 if (unlikely(IS_APPEND(inode) && 5330 (ia_valid & (ATTR_MODE | ATTR_UID | 5331 ATTR_GID | ATTR_TIMES_SET)))) 5332 return -EPERM; 5333 5334 error = setattr_prepare(idmap, dentry, attr); 5335 if (error) 5336 return error; 5337 5338 error = fscrypt_prepare_setattr(dentry, attr); 5339 if (error) 5340 return error; 5341 5342 error = fsverity_prepare_setattr(dentry, attr); 5343 if (error) 5344 return error; 5345 5346 if (is_quota_modification(idmap, inode, attr)) { 5347 error = dquot_initialize(inode); 5348 if (error) 5349 return error; 5350 } 5351 5352 if (i_uid_needs_update(idmap, attr, inode) || 5353 i_gid_needs_update(idmap, attr, inode)) { 5354 handle_t *handle; 5355 5356 /* (user+group)*(old+new) structure, inode write (sb, 5357 * inode block, ? - but truncate inode update has it) */ 5358 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 5359 (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) + 5360 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3); 5361 if (IS_ERR(handle)) { 5362 error = PTR_ERR(handle); 5363 goto err_out; 5364 } 5365 5366 /* dquot_transfer() calls back ext4_get_inode_usage() which 5367 * counts xattr inode references. 5368 */ 5369 down_read(&EXT4_I(inode)->xattr_sem); 5370 error = dquot_transfer(idmap, inode, attr); 5371 up_read(&EXT4_I(inode)->xattr_sem); 5372 5373 if (error) { 5374 ext4_journal_stop(handle); 5375 return error; 5376 } 5377 /* Update corresponding info in inode so that everything is in 5378 * one transaction */ 5379 i_uid_update(idmap, attr, inode); 5380 i_gid_update(idmap, attr, inode); 5381 error = ext4_mark_inode_dirty(handle, inode); 5382 ext4_journal_stop(handle); 5383 if (unlikely(error)) { 5384 return error; 5385 } 5386 } 5387 5388 if (attr->ia_valid & ATTR_SIZE) { 5389 handle_t *handle; 5390 loff_t oldsize = inode->i_size; 5391 loff_t old_disksize; 5392 int shrink = (attr->ia_size < inode->i_size); 5393 5394 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 5395 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5396 5397 if (attr->ia_size > sbi->s_bitmap_maxbytes) { 5398 return -EFBIG; 5399 } 5400 } 5401 if (!S_ISREG(inode->i_mode)) { 5402 return -EINVAL; 5403 } 5404 5405 if (attr->ia_size == inode->i_size) 5406 inc_ivers = false; 5407 5408 if (shrink) { 5409 if (ext4_should_order_data(inode)) { 5410 error = ext4_begin_ordered_truncate(inode, 5411 attr->ia_size); 5412 if (error) 5413 goto err_out; 5414 } 5415 /* 5416 * Blocks are going to be removed from the inode. Wait 5417 * for dio in flight. 5418 */ 5419 inode_dio_wait(inode); 5420 } 5421 5422 filemap_invalidate_lock(inode->i_mapping); 5423 5424 rc = ext4_break_layouts(inode); 5425 if (rc) { 5426 filemap_invalidate_unlock(inode->i_mapping); 5427 goto err_out; 5428 } 5429 5430 if (attr->ia_size != inode->i_size) { 5431 handle = ext4_journal_start(inode, EXT4_HT_INODE, 3); 5432 if (IS_ERR(handle)) { 5433 error = PTR_ERR(handle); 5434 goto out_mmap_sem; 5435 } 5436 if (ext4_handle_valid(handle) && shrink) { 5437 error = ext4_orphan_add(handle, inode); 5438 orphan = 1; 5439 } 5440 /* 5441 * Update c/mtime on truncate up, ext4_truncate() will 5442 * update c/mtime in shrink case below 5443 */ 5444 if (!shrink) 5445 inode->i_mtime = inode_set_ctime_current(inode); 5446 5447 if (shrink) 5448 ext4_fc_track_range(handle, inode, 5449 (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >> 5450 inode->i_sb->s_blocksize_bits, 5451 EXT_MAX_BLOCKS - 1); 5452 else 5453 ext4_fc_track_range( 5454 handle, inode, 5455 (oldsize > 0 ? oldsize - 1 : oldsize) >> 5456 inode->i_sb->s_blocksize_bits, 5457 (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >> 5458 inode->i_sb->s_blocksize_bits); 5459 5460 down_write(&EXT4_I(inode)->i_data_sem); 5461 old_disksize = EXT4_I(inode)->i_disksize; 5462 EXT4_I(inode)->i_disksize = attr->ia_size; 5463 rc = ext4_mark_inode_dirty(handle, inode); 5464 if (!error) 5465 error = rc; 5466 /* 5467 * We have to update i_size under i_data_sem together 5468 * with i_disksize to avoid races with writeback code 5469 * running ext4_wb_update_i_disksize(). 5470 */ 5471 if (!error) 5472 i_size_write(inode, attr->ia_size); 5473 else 5474 EXT4_I(inode)->i_disksize = old_disksize; 5475 up_write(&EXT4_I(inode)->i_data_sem); 5476 ext4_journal_stop(handle); 5477 if (error) 5478 goto out_mmap_sem; 5479 if (!shrink) { 5480 pagecache_isize_extended(inode, oldsize, 5481 inode->i_size); 5482 } else if (ext4_should_journal_data(inode)) { 5483 ext4_wait_for_tail_page_commit(inode); 5484 } 5485 } 5486 5487 /* 5488 * Truncate pagecache after we've waited for commit 5489 * in data=journal mode to make pages freeable. 5490 */ 5491 truncate_pagecache(inode, inode->i_size); 5492 /* 5493 * Call ext4_truncate() even if i_size didn't change to 5494 * truncate possible preallocated blocks. 5495 */ 5496 if (attr->ia_size <= oldsize) { 5497 rc = ext4_truncate(inode); 5498 if (rc) 5499 error = rc; 5500 } 5501 out_mmap_sem: 5502 filemap_invalidate_unlock(inode->i_mapping); 5503 } 5504 5505 if (!error) { 5506 if (inc_ivers) 5507 inode_inc_iversion(inode); 5508 setattr_copy(idmap, inode, attr); 5509 mark_inode_dirty(inode); 5510 } 5511 5512 /* 5513 * If the call to ext4_truncate failed to get a transaction handle at 5514 * all, we need to clean up the in-core orphan list manually. 5515 */ 5516 if (orphan && inode->i_nlink) 5517 ext4_orphan_del(NULL, inode); 5518 5519 if (!error && (ia_valid & ATTR_MODE)) 5520 rc = posix_acl_chmod(idmap, dentry, inode->i_mode); 5521 5522 err_out: 5523 if (error) 5524 ext4_std_error(inode->i_sb, error); 5525 if (!error) 5526 error = rc; 5527 return error; 5528 } 5529 5530 u32 ext4_dio_alignment(struct inode *inode) 5531 { 5532 if (fsverity_active(inode)) 5533 return 0; 5534 if (ext4_should_journal_data(inode)) 5535 return 0; 5536 if (ext4_has_inline_data(inode)) 5537 return 0; 5538 if (IS_ENCRYPTED(inode)) { 5539 if (!fscrypt_dio_supported(inode)) 5540 return 0; 5541 return i_blocksize(inode); 5542 } 5543 return 1; /* use the iomap defaults */ 5544 } 5545 5546 int ext4_getattr(struct mnt_idmap *idmap, const struct path *path, 5547 struct kstat *stat, u32 request_mask, unsigned int query_flags) 5548 { 5549 struct inode *inode = d_inode(path->dentry); 5550 struct ext4_inode *raw_inode; 5551 struct ext4_inode_info *ei = EXT4_I(inode); 5552 unsigned int flags; 5553 5554 if ((request_mask & STATX_BTIME) && 5555 EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) { 5556 stat->result_mask |= STATX_BTIME; 5557 stat->btime.tv_sec = ei->i_crtime.tv_sec; 5558 stat->btime.tv_nsec = ei->i_crtime.tv_nsec; 5559 } 5560 5561 /* 5562 * Return the DIO alignment restrictions if requested. We only return 5563 * this information when requested, since on encrypted files it might 5564 * take a fair bit of work to get if the file wasn't opened recently. 5565 */ 5566 if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) { 5567 u32 dio_align = ext4_dio_alignment(inode); 5568 5569 stat->result_mask |= STATX_DIOALIGN; 5570 if (dio_align == 1) { 5571 struct block_device *bdev = inode->i_sb->s_bdev; 5572 5573 /* iomap defaults */ 5574 stat->dio_mem_align = bdev_dma_alignment(bdev) + 1; 5575 stat->dio_offset_align = bdev_logical_block_size(bdev); 5576 } else { 5577 stat->dio_mem_align = dio_align; 5578 stat->dio_offset_align = dio_align; 5579 } 5580 } 5581 5582 flags = ei->i_flags & EXT4_FL_USER_VISIBLE; 5583 if (flags & EXT4_APPEND_FL) 5584 stat->attributes |= STATX_ATTR_APPEND; 5585 if (flags & EXT4_COMPR_FL) 5586 stat->attributes |= STATX_ATTR_COMPRESSED; 5587 if (flags & EXT4_ENCRYPT_FL) 5588 stat->attributes |= STATX_ATTR_ENCRYPTED; 5589 if (flags & EXT4_IMMUTABLE_FL) 5590 stat->attributes |= STATX_ATTR_IMMUTABLE; 5591 if (flags & EXT4_NODUMP_FL) 5592 stat->attributes |= STATX_ATTR_NODUMP; 5593 if (flags & EXT4_VERITY_FL) 5594 stat->attributes |= STATX_ATTR_VERITY; 5595 5596 stat->attributes_mask |= (STATX_ATTR_APPEND | 5597 STATX_ATTR_COMPRESSED | 5598 STATX_ATTR_ENCRYPTED | 5599 STATX_ATTR_IMMUTABLE | 5600 STATX_ATTR_NODUMP | 5601 STATX_ATTR_VERITY); 5602 5603 generic_fillattr(idmap, request_mask, inode, stat); 5604 return 0; 5605 } 5606 5607 int ext4_file_getattr(struct mnt_idmap *idmap, 5608 const struct path *path, struct kstat *stat, 5609 u32 request_mask, unsigned int query_flags) 5610 { 5611 struct inode *inode = d_inode(path->dentry); 5612 u64 delalloc_blocks; 5613 5614 ext4_getattr(idmap, path, stat, request_mask, query_flags); 5615 5616 /* 5617 * If there is inline data in the inode, the inode will normally not 5618 * have data blocks allocated (it may have an external xattr block). 5619 * Report at least one sector for such files, so tools like tar, rsync, 5620 * others don't incorrectly think the file is completely sparse. 5621 */ 5622 if (unlikely(ext4_has_inline_data(inode))) 5623 stat->blocks += (stat->size + 511) >> 9; 5624 5625 /* 5626 * We can't update i_blocks if the block allocation is delayed 5627 * otherwise in the case of system crash before the real block 5628 * allocation is done, we will have i_blocks inconsistent with 5629 * on-disk file blocks. 5630 * We always keep i_blocks updated together with real 5631 * allocation. But to not confuse with user, stat 5632 * will return the blocks that include the delayed allocation 5633 * blocks for this file. 5634 */ 5635 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), 5636 EXT4_I(inode)->i_reserved_data_blocks); 5637 stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9); 5638 return 0; 5639 } 5640 5641 static int ext4_index_trans_blocks(struct inode *inode, int lblocks, 5642 int pextents) 5643 { 5644 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 5645 return ext4_ind_trans_blocks(inode, lblocks); 5646 return ext4_ext_index_trans_blocks(inode, pextents); 5647 } 5648 5649 /* 5650 * Account for index blocks, block groups bitmaps and block group 5651 * descriptor blocks if modify datablocks and index blocks 5652 * worse case, the indexs blocks spread over different block groups 5653 * 5654 * If datablocks are discontiguous, they are possible to spread over 5655 * different block groups too. If they are contiguous, with flexbg, 5656 * they could still across block group boundary. 5657 * 5658 * Also account for superblock, inode, quota and xattr blocks 5659 */ 5660 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, 5661 int pextents) 5662 { 5663 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 5664 int gdpblocks; 5665 int idxblocks; 5666 int ret; 5667 5668 /* 5669 * How many index blocks need to touch to map @lblocks logical blocks 5670 * to @pextents physical extents? 5671 */ 5672 idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents); 5673 5674 ret = idxblocks; 5675 5676 /* 5677 * Now let's see how many group bitmaps and group descriptors need 5678 * to account 5679 */ 5680 groups = idxblocks + pextents; 5681 gdpblocks = groups; 5682 if (groups > ngroups) 5683 groups = ngroups; 5684 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 5685 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 5686 5687 /* bitmaps and block group descriptor blocks */ 5688 ret += groups + gdpblocks; 5689 5690 /* Blocks for super block, inode, quota and xattr blocks */ 5691 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 5692 5693 return ret; 5694 } 5695 5696 /* 5697 * Calculate the total number of credits to reserve to fit 5698 * the modification of a single pages into a single transaction, 5699 * which may include multiple chunks of block allocations. 5700 * 5701 * This could be called via ext4_write_begin() 5702 * 5703 * We need to consider the worse case, when 5704 * one new block per extent. 5705 */ 5706 int ext4_writepage_trans_blocks(struct inode *inode) 5707 { 5708 int bpp = ext4_journal_blocks_per_page(inode); 5709 int ret; 5710 5711 ret = ext4_meta_trans_blocks(inode, bpp, bpp); 5712 5713 /* Account for data blocks for journalled mode */ 5714 if (ext4_should_journal_data(inode)) 5715 ret += bpp; 5716 return ret; 5717 } 5718 5719 /* 5720 * Calculate the journal credits for a chunk of data modification. 5721 * 5722 * This is called from DIO, fallocate or whoever calling 5723 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. 5724 * 5725 * journal buffers for data blocks are not included here, as DIO 5726 * and fallocate do no need to journal data buffers. 5727 */ 5728 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 5729 { 5730 return ext4_meta_trans_blocks(inode, nrblocks, 1); 5731 } 5732 5733 /* 5734 * The caller must have previously called ext4_reserve_inode_write(). 5735 * Give this, we know that the caller already has write access to iloc->bh. 5736 */ 5737 int ext4_mark_iloc_dirty(handle_t *handle, 5738 struct inode *inode, struct ext4_iloc *iloc) 5739 { 5740 int err = 0; 5741 5742 if (unlikely(ext4_forced_shutdown(inode->i_sb))) { 5743 put_bh(iloc->bh); 5744 return -EIO; 5745 } 5746 ext4_fc_track_inode(handle, inode); 5747 5748 /* the do_update_inode consumes one bh->b_count */ 5749 get_bh(iloc->bh); 5750 5751 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 5752 err = ext4_do_update_inode(handle, inode, iloc); 5753 put_bh(iloc->bh); 5754 return err; 5755 } 5756 5757 /* 5758 * On success, We end up with an outstanding reference count against 5759 * iloc->bh. This _must_ be cleaned up later. 5760 */ 5761 5762 int 5763 ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 5764 struct ext4_iloc *iloc) 5765 { 5766 int err; 5767 5768 if (unlikely(ext4_forced_shutdown(inode->i_sb))) 5769 return -EIO; 5770 5771 err = ext4_get_inode_loc(inode, iloc); 5772 if (!err) { 5773 BUFFER_TRACE(iloc->bh, "get_write_access"); 5774 err = ext4_journal_get_write_access(handle, inode->i_sb, 5775 iloc->bh, EXT4_JTR_NONE); 5776 if (err) { 5777 brelse(iloc->bh); 5778 iloc->bh = NULL; 5779 } 5780 } 5781 ext4_std_error(inode->i_sb, err); 5782 return err; 5783 } 5784 5785 static int __ext4_expand_extra_isize(struct inode *inode, 5786 unsigned int new_extra_isize, 5787 struct ext4_iloc *iloc, 5788 handle_t *handle, int *no_expand) 5789 { 5790 struct ext4_inode *raw_inode; 5791 struct ext4_xattr_ibody_header *header; 5792 unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb); 5793 struct ext4_inode_info *ei = EXT4_I(inode); 5794 int error; 5795 5796 /* this was checked at iget time, but double check for good measure */ 5797 if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) || 5798 (ei->i_extra_isize & 3)) { 5799 EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)", 5800 ei->i_extra_isize, 5801 EXT4_INODE_SIZE(inode->i_sb)); 5802 return -EFSCORRUPTED; 5803 } 5804 if ((new_extra_isize < ei->i_extra_isize) || 5805 (new_extra_isize < 4) || 5806 (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE)) 5807 return -EINVAL; /* Should never happen */ 5808 5809 raw_inode = ext4_raw_inode(iloc); 5810 5811 header = IHDR(inode, raw_inode); 5812 5813 /* No extended attributes present */ 5814 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 5815 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 5816 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + 5817 EXT4_I(inode)->i_extra_isize, 0, 5818 new_extra_isize - EXT4_I(inode)->i_extra_isize); 5819 EXT4_I(inode)->i_extra_isize = new_extra_isize; 5820 return 0; 5821 } 5822 5823 /* 5824 * We may need to allocate external xattr block so we need quotas 5825 * initialized. Here we can be called with various locks held so we 5826 * cannot affort to initialize quotas ourselves. So just bail. 5827 */ 5828 if (dquot_initialize_needed(inode)) 5829 return -EAGAIN; 5830 5831 /* try to expand with EAs present */ 5832 error = ext4_expand_extra_isize_ea(inode, new_extra_isize, 5833 raw_inode, handle); 5834 if (error) { 5835 /* 5836 * Inode size expansion failed; don't try again 5837 */ 5838 *no_expand = 1; 5839 } 5840 5841 return error; 5842 } 5843 5844 /* 5845 * Expand an inode by new_extra_isize bytes. 5846 * Returns 0 on success or negative error number on failure. 5847 */ 5848 static int ext4_try_to_expand_extra_isize(struct inode *inode, 5849 unsigned int new_extra_isize, 5850 struct ext4_iloc iloc, 5851 handle_t *handle) 5852 { 5853 int no_expand; 5854 int error; 5855 5856 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) 5857 return -EOVERFLOW; 5858 5859 /* 5860 * In nojournal mode, we can immediately attempt to expand 5861 * the inode. When journaled, we first need to obtain extra 5862 * buffer credits since we may write into the EA block 5863 * with this same handle. If journal_extend fails, then it will 5864 * only result in a minor loss of functionality for that inode. 5865 * If this is felt to be critical, then e2fsck should be run to 5866 * force a large enough s_min_extra_isize. 5867 */ 5868 if (ext4_journal_extend(handle, 5869 EXT4_DATA_TRANS_BLOCKS(inode->i_sb), 0) != 0) 5870 return -ENOSPC; 5871 5872 if (ext4_write_trylock_xattr(inode, &no_expand) == 0) 5873 return -EBUSY; 5874 5875 error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc, 5876 handle, &no_expand); 5877 ext4_write_unlock_xattr(inode, &no_expand); 5878 5879 return error; 5880 } 5881 5882 int ext4_expand_extra_isize(struct inode *inode, 5883 unsigned int new_extra_isize, 5884 struct ext4_iloc *iloc) 5885 { 5886 handle_t *handle; 5887 int no_expand; 5888 int error, rc; 5889 5890 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { 5891 brelse(iloc->bh); 5892 return -EOVERFLOW; 5893 } 5894 5895 handle = ext4_journal_start(inode, EXT4_HT_INODE, 5896 EXT4_DATA_TRANS_BLOCKS(inode->i_sb)); 5897 if (IS_ERR(handle)) { 5898 error = PTR_ERR(handle); 5899 brelse(iloc->bh); 5900 return error; 5901 } 5902 5903 ext4_write_lock_xattr(inode, &no_expand); 5904 5905 BUFFER_TRACE(iloc->bh, "get_write_access"); 5906 error = ext4_journal_get_write_access(handle, inode->i_sb, iloc->bh, 5907 EXT4_JTR_NONE); 5908 if (error) { 5909 brelse(iloc->bh); 5910 goto out_unlock; 5911 } 5912 5913 error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc, 5914 handle, &no_expand); 5915 5916 rc = ext4_mark_iloc_dirty(handle, inode, iloc); 5917 if (!error) 5918 error = rc; 5919 5920 out_unlock: 5921 ext4_write_unlock_xattr(inode, &no_expand); 5922 ext4_journal_stop(handle); 5923 return error; 5924 } 5925 5926 /* 5927 * What we do here is to mark the in-core inode as clean with respect to inode 5928 * dirtiness (it may still be data-dirty). 5929 * This means that the in-core inode may be reaped by prune_icache 5930 * without having to perform any I/O. This is a very good thing, 5931 * because *any* task may call prune_icache - even ones which 5932 * have a transaction open against a different journal. 5933 * 5934 * Is this cheating? Not really. Sure, we haven't written the 5935 * inode out, but prune_icache isn't a user-visible syncing function. 5936 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 5937 * we start and wait on commits. 5938 */ 5939 int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode, 5940 const char *func, unsigned int line) 5941 { 5942 struct ext4_iloc iloc; 5943 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5944 int err; 5945 5946 might_sleep(); 5947 trace_ext4_mark_inode_dirty(inode, _RET_IP_); 5948 err = ext4_reserve_inode_write(handle, inode, &iloc); 5949 if (err) 5950 goto out; 5951 5952 if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize) 5953 ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize, 5954 iloc, handle); 5955 5956 err = ext4_mark_iloc_dirty(handle, inode, &iloc); 5957 out: 5958 if (unlikely(err)) 5959 ext4_error_inode_err(inode, func, line, 0, err, 5960 "mark_inode_dirty error"); 5961 return err; 5962 } 5963 5964 /* 5965 * ext4_dirty_inode() is called from __mark_inode_dirty() 5966 * 5967 * We're really interested in the case where a file is being extended. 5968 * i_size has been changed by generic_commit_write() and we thus need 5969 * to include the updated inode in the current transaction. 5970 * 5971 * Also, dquot_alloc_block() will always dirty the inode when blocks 5972 * are allocated to the file. 5973 * 5974 * If the inode is marked synchronous, we don't honour that here - doing 5975 * so would cause a commit on atime updates, which we don't bother doing. 5976 * We handle synchronous inodes at the highest possible level. 5977 */ 5978 void ext4_dirty_inode(struct inode *inode, int flags) 5979 { 5980 handle_t *handle; 5981 5982 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 5983 if (IS_ERR(handle)) 5984 return; 5985 ext4_mark_inode_dirty(handle, inode); 5986 ext4_journal_stop(handle); 5987 } 5988 5989 int ext4_change_inode_journal_flag(struct inode *inode, int val) 5990 { 5991 journal_t *journal; 5992 handle_t *handle; 5993 int err; 5994 int alloc_ctx; 5995 5996 /* 5997 * We have to be very careful here: changing a data block's 5998 * journaling status dynamically is dangerous. If we write a 5999 * data block to the journal, change the status and then delete 6000 * that block, we risk forgetting to revoke the old log record 6001 * from the journal and so a subsequent replay can corrupt data. 6002 * So, first we make sure that the journal is empty and that 6003 * nobody is changing anything. 6004 */ 6005 6006 journal = EXT4_JOURNAL(inode); 6007 if (!journal) 6008 return 0; 6009 if (is_journal_aborted(journal)) 6010 return -EROFS; 6011 6012 /* Wait for all existing dio workers */ 6013 inode_dio_wait(inode); 6014 6015 /* 6016 * Before flushing the journal and switching inode's aops, we have 6017 * to flush all dirty data the inode has. There can be outstanding 6018 * delayed allocations, there can be unwritten extents created by 6019 * fallocate or buffered writes in dioread_nolock mode covered by 6020 * dirty data which can be converted only after flushing the dirty 6021 * data (and journalled aops don't know how to handle these cases). 6022 */ 6023 if (val) { 6024 filemap_invalidate_lock(inode->i_mapping); 6025 err = filemap_write_and_wait(inode->i_mapping); 6026 if (err < 0) { 6027 filemap_invalidate_unlock(inode->i_mapping); 6028 return err; 6029 } 6030 } 6031 6032 alloc_ctx = ext4_writepages_down_write(inode->i_sb); 6033 jbd2_journal_lock_updates(journal); 6034 6035 /* 6036 * OK, there are no updates running now, and all cached data is 6037 * synced to disk. We are now in a completely consistent state 6038 * which doesn't have anything in the journal, and we know that 6039 * no filesystem updates are running, so it is safe to modify 6040 * the inode's in-core data-journaling state flag now. 6041 */ 6042 6043 if (val) 6044 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 6045 else { 6046 err = jbd2_journal_flush(journal, 0); 6047 if (err < 0) { 6048 jbd2_journal_unlock_updates(journal); 6049 ext4_writepages_up_write(inode->i_sb, alloc_ctx); 6050 return err; 6051 } 6052 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 6053 } 6054 ext4_set_aops(inode); 6055 6056 jbd2_journal_unlock_updates(journal); 6057 ext4_writepages_up_write(inode->i_sb, alloc_ctx); 6058 6059 if (val) 6060 filemap_invalidate_unlock(inode->i_mapping); 6061 6062 /* Finally we can mark the inode as dirty. */ 6063 6064 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); 6065 if (IS_ERR(handle)) 6066 return PTR_ERR(handle); 6067 6068 ext4_fc_mark_ineligible(inode->i_sb, 6069 EXT4_FC_REASON_JOURNAL_FLAG_CHANGE, handle); 6070 err = ext4_mark_inode_dirty(handle, inode); 6071 ext4_handle_sync(handle); 6072 ext4_journal_stop(handle); 6073 ext4_std_error(inode->i_sb, err); 6074 6075 return err; 6076 } 6077 6078 static int ext4_bh_unmapped(handle_t *handle, struct inode *inode, 6079 struct buffer_head *bh) 6080 { 6081 return !buffer_mapped(bh); 6082 } 6083 6084 vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf) 6085 { 6086 struct vm_area_struct *vma = vmf->vma; 6087 struct folio *folio = page_folio(vmf->page); 6088 loff_t size; 6089 unsigned long len; 6090 int err; 6091 vm_fault_t ret; 6092 struct file *file = vma->vm_file; 6093 struct inode *inode = file_inode(file); 6094 struct address_space *mapping = inode->i_mapping; 6095 handle_t *handle; 6096 get_block_t *get_block; 6097 int retries = 0; 6098 6099 if (unlikely(IS_IMMUTABLE(inode))) 6100 return VM_FAULT_SIGBUS; 6101 6102 sb_start_pagefault(inode->i_sb); 6103 file_update_time(vma->vm_file); 6104 6105 filemap_invalidate_lock_shared(mapping); 6106 6107 err = ext4_convert_inline_data(inode); 6108 if (err) 6109 goto out_ret; 6110 6111 /* 6112 * On data journalling we skip straight to the transaction handle: 6113 * there's no delalloc; page truncated will be checked later; the 6114 * early return w/ all buffers mapped (calculates size/len) can't 6115 * be used; and there's no dioread_nolock, so only ext4_get_block. 6116 */ 6117 if (ext4_should_journal_data(inode)) 6118 goto retry_alloc; 6119 6120 /* Delalloc case is easy... */ 6121 if (test_opt(inode->i_sb, DELALLOC) && 6122 !ext4_nonda_switch(inode->i_sb)) { 6123 do { 6124 err = block_page_mkwrite(vma, vmf, 6125 ext4_da_get_block_prep); 6126 } while (err == -ENOSPC && 6127 ext4_should_retry_alloc(inode->i_sb, &retries)); 6128 goto out_ret; 6129 } 6130 6131 folio_lock(folio); 6132 size = i_size_read(inode); 6133 /* Page got truncated from under us? */ 6134 if (folio->mapping != mapping || folio_pos(folio) > size) { 6135 folio_unlock(folio); 6136 ret = VM_FAULT_NOPAGE; 6137 goto out; 6138 } 6139 6140 len = folio_size(folio); 6141 if (folio_pos(folio) + len > size) 6142 len = size - folio_pos(folio); 6143 /* 6144 * Return if we have all the buffers mapped. This avoids the need to do 6145 * journal_start/journal_stop which can block and take a long time 6146 * 6147 * This cannot be done for data journalling, as we have to add the 6148 * inode to the transaction's list to writeprotect pages on commit. 6149 */ 6150 if (folio_buffers(folio)) { 6151 if (!ext4_walk_page_buffers(NULL, inode, folio_buffers(folio), 6152 0, len, NULL, 6153 ext4_bh_unmapped)) { 6154 /* Wait so that we don't change page under IO */ 6155 folio_wait_stable(folio); 6156 ret = VM_FAULT_LOCKED; 6157 goto out; 6158 } 6159 } 6160 folio_unlock(folio); 6161 /* OK, we need to fill the hole... */ 6162 if (ext4_should_dioread_nolock(inode)) 6163 get_block = ext4_get_block_unwritten; 6164 else 6165 get_block = ext4_get_block; 6166 retry_alloc: 6167 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 6168 ext4_writepage_trans_blocks(inode)); 6169 if (IS_ERR(handle)) { 6170 ret = VM_FAULT_SIGBUS; 6171 goto out; 6172 } 6173 /* 6174 * Data journalling can't use block_page_mkwrite() because it 6175 * will set_buffer_dirty() before do_journal_get_write_access() 6176 * thus might hit warning messages for dirty metadata buffers. 6177 */ 6178 if (!ext4_should_journal_data(inode)) { 6179 err = block_page_mkwrite(vma, vmf, get_block); 6180 } else { 6181 folio_lock(folio); 6182 size = i_size_read(inode); 6183 /* Page got truncated from under us? */ 6184 if (folio->mapping != mapping || folio_pos(folio) > size) { 6185 ret = VM_FAULT_NOPAGE; 6186 goto out_error; 6187 } 6188 6189 len = folio_size(folio); 6190 if (folio_pos(folio) + len > size) 6191 len = size - folio_pos(folio); 6192 6193 err = __block_write_begin(&folio->page, 0, len, ext4_get_block); 6194 if (!err) { 6195 ret = VM_FAULT_SIGBUS; 6196 if (ext4_journal_folio_buffers(handle, folio, len)) 6197 goto out_error; 6198 } else { 6199 folio_unlock(folio); 6200 } 6201 } 6202 ext4_journal_stop(handle); 6203 if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 6204 goto retry_alloc; 6205 out_ret: 6206 ret = vmf_fs_error(err); 6207 out: 6208 filemap_invalidate_unlock_shared(mapping); 6209 sb_end_pagefault(inode->i_sb); 6210 return ret; 6211 out_error: 6212 folio_unlock(folio); 6213 ext4_journal_stop(handle); 6214 goto out; 6215 } 6216