1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext4/inode.c 4 * 5 * Copyright (C) 1992, 1993, 1994, 1995 6 * Remy Card (card@masi.ibp.fr) 7 * Laboratoire MASI - Institut Blaise Pascal 8 * Universite Pierre et Marie Curie (Paris VI) 9 * 10 * from 11 * 12 * linux/fs/minix/inode.c 13 * 14 * Copyright (C) 1991, 1992 Linus Torvalds 15 * 16 * 64-bit file support on 64-bit platforms by Jakub Jelinek 17 * (jj@sunsite.ms.mff.cuni.cz) 18 * 19 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 20 */ 21 22 #include <linux/fs.h> 23 #include <linux/time.h> 24 #include <linux/highuid.h> 25 #include <linux/pagemap.h> 26 #include <linux/dax.h> 27 #include <linux/quotaops.h> 28 #include <linux/string.h> 29 #include <linux/buffer_head.h> 30 #include <linux/writeback.h> 31 #include <linux/pagevec.h> 32 #include <linux/mpage.h> 33 #include <linux/namei.h> 34 #include <linux/uio.h> 35 #include <linux/bio.h> 36 #include <linux/workqueue.h> 37 #include <linux/kernel.h> 38 #include <linux/printk.h> 39 #include <linux/slab.h> 40 #include <linux/bitops.h> 41 #include <linux/iomap.h> 42 #include <linux/iversion.h> 43 44 #include "ext4_jbd2.h" 45 #include "xattr.h" 46 #include "acl.h" 47 #include "truncate.h" 48 49 #include <trace/events/ext4.h> 50 51 #define MPAGE_DA_EXTENT_TAIL 0x01 52 53 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, 54 struct ext4_inode_info *ei) 55 { 56 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 57 __u32 csum; 58 __u16 dummy_csum = 0; 59 int offset = offsetof(struct ext4_inode, i_checksum_lo); 60 unsigned int csum_size = sizeof(dummy_csum); 61 62 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset); 63 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size); 64 offset += csum_size; 65 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, 66 EXT4_GOOD_OLD_INODE_SIZE - offset); 67 68 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 69 offset = offsetof(struct ext4_inode, i_checksum_hi); 70 csum = ext4_chksum(sbi, csum, (__u8 *)raw + 71 EXT4_GOOD_OLD_INODE_SIZE, 72 offset - EXT4_GOOD_OLD_INODE_SIZE); 73 if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { 74 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, 75 csum_size); 76 offset += csum_size; 77 } 78 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, 79 EXT4_INODE_SIZE(inode->i_sb) - offset); 80 } 81 82 return csum; 83 } 84 85 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, 86 struct ext4_inode_info *ei) 87 { 88 __u32 provided, calculated; 89 90 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 91 cpu_to_le32(EXT4_OS_LINUX) || 92 !ext4_has_metadata_csum(inode->i_sb)) 93 return 1; 94 95 provided = le16_to_cpu(raw->i_checksum_lo); 96 calculated = ext4_inode_csum(inode, raw, ei); 97 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 98 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 99 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16; 100 else 101 calculated &= 0xFFFF; 102 103 return provided == calculated; 104 } 105 106 static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, 107 struct ext4_inode_info *ei) 108 { 109 __u32 csum; 110 111 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 112 cpu_to_le32(EXT4_OS_LINUX) || 113 !ext4_has_metadata_csum(inode->i_sb)) 114 return; 115 116 csum = ext4_inode_csum(inode, raw, ei); 117 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF); 118 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 119 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 120 raw->i_checksum_hi = cpu_to_le16(csum >> 16); 121 } 122 123 static inline int ext4_begin_ordered_truncate(struct inode *inode, 124 loff_t new_size) 125 { 126 trace_ext4_begin_ordered_truncate(inode, new_size); 127 /* 128 * If jinode is zero, then we never opened the file for 129 * writing, so there's no need to call 130 * jbd2_journal_begin_ordered_truncate() since there's no 131 * outstanding writes we need to flush. 132 */ 133 if (!EXT4_I(inode)->jinode) 134 return 0; 135 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), 136 EXT4_I(inode)->jinode, 137 new_size); 138 } 139 140 static void ext4_invalidatepage(struct page *page, unsigned int offset, 141 unsigned int length); 142 static int __ext4_journalled_writepage(struct page *page, unsigned int len); 143 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); 144 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, 145 int pextents); 146 147 /* 148 * Test whether an inode is a fast symlink. 149 * A fast symlink has its symlink data stored in ext4_inode_info->i_data. 150 */ 151 int ext4_inode_is_fast_symlink(struct inode *inode) 152 { 153 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) { 154 int ea_blocks = EXT4_I(inode)->i_file_acl ? 155 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0; 156 157 if (ext4_has_inline_data(inode)) 158 return 0; 159 160 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 161 } 162 return S_ISLNK(inode->i_mode) && inode->i_size && 163 (inode->i_size < EXT4_N_BLOCKS * 4); 164 } 165 166 /* 167 * Restart the transaction associated with *handle. This does a commit, 168 * so before we call here everything must be consistently dirtied against 169 * this transaction. 170 */ 171 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, 172 int nblocks) 173 { 174 int ret; 175 176 /* 177 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 178 * moment, get_block can be called only for blocks inside i_size since 179 * page cache has been already dropped and writes are blocked by 180 * i_mutex. So we can safely drop the i_data_sem here. 181 */ 182 BUG_ON(EXT4_JOURNAL(inode) == NULL); 183 jbd_debug(2, "restarting handle %p\n", handle); 184 up_write(&EXT4_I(inode)->i_data_sem); 185 ret = ext4_journal_restart(handle, nblocks); 186 down_write(&EXT4_I(inode)->i_data_sem); 187 ext4_discard_preallocations(inode); 188 189 return ret; 190 } 191 192 /* 193 * Called at the last iput() if i_nlink is zero. 194 */ 195 void ext4_evict_inode(struct inode *inode) 196 { 197 handle_t *handle; 198 int err; 199 int extra_credits = 3; 200 struct ext4_xattr_inode_array *ea_inode_array = NULL; 201 202 trace_ext4_evict_inode(inode); 203 204 if (inode->i_nlink) { 205 /* 206 * When journalling data dirty buffers are tracked only in the 207 * journal. So although mm thinks everything is clean and 208 * ready for reaping the inode might still have some pages to 209 * write in the running transaction or waiting to be 210 * checkpointed. Thus calling jbd2_journal_invalidatepage() 211 * (via truncate_inode_pages()) to discard these buffers can 212 * cause data loss. Also even if we did not discard these 213 * buffers, we would have no way to find them after the inode 214 * is reaped and thus user could see stale data if he tries to 215 * read them before the transaction is checkpointed. So be 216 * careful and force everything to disk here... We use 217 * ei->i_datasync_tid to store the newest transaction 218 * containing inode's data. 219 * 220 * Note that directories do not have this problem because they 221 * don't use page cache. 222 */ 223 if (inode->i_ino != EXT4_JOURNAL_INO && 224 ext4_should_journal_data(inode) && 225 (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) && 226 inode->i_data.nrpages) { 227 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 228 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; 229 230 jbd2_complete_transaction(journal, commit_tid); 231 filemap_write_and_wait(&inode->i_data); 232 } 233 truncate_inode_pages_final(&inode->i_data); 234 235 goto no_delete; 236 } 237 238 if (is_bad_inode(inode)) 239 goto no_delete; 240 dquot_initialize(inode); 241 242 if (ext4_should_order_data(inode)) 243 ext4_begin_ordered_truncate(inode, 0); 244 truncate_inode_pages_final(&inode->i_data); 245 246 /* 247 * Protect us against freezing - iput() caller didn't have to have any 248 * protection against it 249 */ 250 sb_start_intwrite(inode->i_sb); 251 252 if (!IS_NOQUOTA(inode)) 253 extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb); 254 255 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, 256 ext4_blocks_for_truncate(inode)+extra_credits); 257 if (IS_ERR(handle)) { 258 ext4_std_error(inode->i_sb, PTR_ERR(handle)); 259 /* 260 * If we're going to skip the normal cleanup, we still need to 261 * make sure that the in-core orphan linked list is properly 262 * cleaned up. 263 */ 264 ext4_orphan_del(NULL, inode); 265 sb_end_intwrite(inode->i_sb); 266 goto no_delete; 267 } 268 269 if (IS_SYNC(inode)) 270 ext4_handle_sync(handle); 271 272 /* 273 * Set inode->i_size to 0 before calling ext4_truncate(). We need 274 * special handling of symlinks here because i_size is used to 275 * determine whether ext4_inode_info->i_data contains symlink data or 276 * block mappings. Setting i_size to 0 will remove its fast symlink 277 * status. Erase i_data so that it becomes a valid empty block map. 278 */ 279 if (ext4_inode_is_fast_symlink(inode)) 280 memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data)); 281 inode->i_size = 0; 282 err = ext4_mark_inode_dirty(handle, inode); 283 if (err) { 284 ext4_warning(inode->i_sb, 285 "couldn't mark inode dirty (err %d)", err); 286 goto stop_handle; 287 } 288 if (inode->i_blocks) { 289 err = ext4_truncate(inode); 290 if (err) { 291 ext4_error(inode->i_sb, 292 "couldn't truncate inode %lu (err %d)", 293 inode->i_ino, err); 294 goto stop_handle; 295 } 296 } 297 298 /* Remove xattr references. */ 299 err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array, 300 extra_credits); 301 if (err) { 302 ext4_warning(inode->i_sb, "xattr delete (err %d)", err); 303 stop_handle: 304 ext4_journal_stop(handle); 305 ext4_orphan_del(NULL, inode); 306 sb_end_intwrite(inode->i_sb); 307 ext4_xattr_inode_array_free(ea_inode_array); 308 goto no_delete; 309 } 310 311 /* 312 * Kill off the orphan record which ext4_truncate created. 313 * AKPM: I think this can be inside the above `if'. 314 * Note that ext4_orphan_del() has to be able to cope with the 315 * deletion of a non-existent orphan - this is because we don't 316 * know if ext4_truncate() actually created an orphan record. 317 * (Well, we could do this if we need to, but heck - it works) 318 */ 319 ext4_orphan_del(handle, inode); 320 EXT4_I(inode)->i_dtime = (__u32)ktime_get_real_seconds(); 321 322 /* 323 * One subtle ordering requirement: if anything has gone wrong 324 * (transaction abort, IO errors, whatever), then we can still 325 * do these next steps (the fs will already have been marked as 326 * having errors), but we can't free the inode if the mark_dirty 327 * fails. 328 */ 329 if (ext4_mark_inode_dirty(handle, inode)) 330 /* If that failed, just do the required in-core inode clear. */ 331 ext4_clear_inode(inode); 332 else 333 ext4_free_inode(handle, inode); 334 ext4_journal_stop(handle); 335 sb_end_intwrite(inode->i_sb); 336 ext4_xattr_inode_array_free(ea_inode_array); 337 return; 338 no_delete: 339 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ 340 } 341 342 #ifdef CONFIG_QUOTA 343 qsize_t *ext4_get_reserved_space(struct inode *inode) 344 { 345 return &EXT4_I(inode)->i_reserved_quota; 346 } 347 #endif 348 349 /* 350 * Called with i_data_sem down, which is important since we can call 351 * ext4_discard_preallocations() from here. 352 */ 353 void ext4_da_update_reserve_space(struct inode *inode, 354 int used, int quota_claim) 355 { 356 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 357 struct ext4_inode_info *ei = EXT4_I(inode); 358 359 spin_lock(&ei->i_block_reservation_lock); 360 trace_ext4_da_update_reserve_space(inode, used, quota_claim); 361 if (unlikely(used > ei->i_reserved_data_blocks)) { 362 ext4_warning(inode->i_sb, "%s: ino %lu, used %d " 363 "with only %d reserved data blocks", 364 __func__, inode->i_ino, used, 365 ei->i_reserved_data_blocks); 366 WARN_ON(1); 367 used = ei->i_reserved_data_blocks; 368 } 369 370 /* Update per-inode reservations */ 371 ei->i_reserved_data_blocks -= used; 372 percpu_counter_sub(&sbi->s_dirtyclusters_counter, used); 373 374 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 375 376 /* Update quota subsystem for data blocks */ 377 if (quota_claim) 378 dquot_claim_block(inode, EXT4_C2B(sbi, used)); 379 else { 380 /* 381 * We did fallocate with an offset that is already delayed 382 * allocated. So on delayed allocated writeback we should 383 * not re-claim the quota for fallocated blocks. 384 */ 385 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); 386 } 387 388 /* 389 * If we have done all the pending block allocations and if 390 * there aren't any writers on the inode, we can discard the 391 * inode's preallocations. 392 */ 393 if ((ei->i_reserved_data_blocks == 0) && 394 (atomic_read(&inode->i_writecount) == 0)) 395 ext4_discard_preallocations(inode); 396 } 397 398 static int __check_block_validity(struct inode *inode, const char *func, 399 unsigned int line, 400 struct ext4_map_blocks *map) 401 { 402 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, 403 map->m_len)) { 404 ext4_error_inode(inode, func, line, map->m_pblk, 405 "lblock %lu mapped to illegal pblock %llu " 406 "(length %d)", (unsigned long) map->m_lblk, 407 map->m_pblk, map->m_len); 408 return -EFSCORRUPTED; 409 } 410 return 0; 411 } 412 413 int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk, 414 ext4_lblk_t len) 415 { 416 int ret; 417 418 if (ext4_encrypted_inode(inode)) 419 return fscrypt_zeroout_range(inode, lblk, pblk, len); 420 421 ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS); 422 if (ret > 0) 423 ret = 0; 424 425 return ret; 426 } 427 428 #define check_block_validity(inode, map) \ 429 __check_block_validity((inode), __func__, __LINE__, (map)) 430 431 #ifdef ES_AGGRESSIVE_TEST 432 static void ext4_map_blocks_es_recheck(handle_t *handle, 433 struct inode *inode, 434 struct ext4_map_blocks *es_map, 435 struct ext4_map_blocks *map, 436 int flags) 437 { 438 int retval; 439 440 map->m_flags = 0; 441 /* 442 * There is a race window that the result is not the same. 443 * e.g. xfstests #223 when dioread_nolock enables. The reason 444 * is that we lookup a block mapping in extent status tree with 445 * out taking i_data_sem. So at the time the unwritten extent 446 * could be converted. 447 */ 448 down_read(&EXT4_I(inode)->i_data_sem); 449 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 450 retval = ext4_ext_map_blocks(handle, inode, map, flags & 451 EXT4_GET_BLOCKS_KEEP_SIZE); 452 } else { 453 retval = ext4_ind_map_blocks(handle, inode, map, flags & 454 EXT4_GET_BLOCKS_KEEP_SIZE); 455 } 456 up_read((&EXT4_I(inode)->i_data_sem)); 457 458 /* 459 * We don't check m_len because extent will be collpased in status 460 * tree. So the m_len might not equal. 461 */ 462 if (es_map->m_lblk != map->m_lblk || 463 es_map->m_flags != map->m_flags || 464 es_map->m_pblk != map->m_pblk) { 465 printk("ES cache assertion failed for inode: %lu " 466 "es_cached ex [%d/%d/%llu/%x] != " 467 "found ex [%d/%d/%llu/%x] retval %d flags %x\n", 468 inode->i_ino, es_map->m_lblk, es_map->m_len, 469 es_map->m_pblk, es_map->m_flags, map->m_lblk, 470 map->m_len, map->m_pblk, map->m_flags, 471 retval, flags); 472 } 473 } 474 #endif /* ES_AGGRESSIVE_TEST */ 475 476 /* 477 * The ext4_map_blocks() function tries to look up the requested blocks, 478 * and returns if the blocks are already mapped. 479 * 480 * Otherwise it takes the write lock of the i_data_sem and allocate blocks 481 * and store the allocated blocks in the result buffer head and mark it 482 * mapped. 483 * 484 * If file type is extents based, it will call ext4_ext_map_blocks(), 485 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping 486 * based files 487 * 488 * On success, it returns the number of blocks being mapped or allocated. if 489 * create==0 and the blocks are pre-allocated and unwritten, the resulting @map 490 * is marked as unwritten. If the create == 1, it will mark @map as mapped. 491 * 492 * It returns 0 if plain look up failed (blocks have not been allocated), in 493 * that case, @map is returned as unmapped but we still do fill map->m_len to 494 * indicate the length of a hole starting at map->m_lblk. 495 * 496 * It returns the error in case of allocation failure. 497 */ 498 int ext4_map_blocks(handle_t *handle, struct inode *inode, 499 struct ext4_map_blocks *map, int flags) 500 { 501 struct extent_status es; 502 int retval; 503 int ret = 0; 504 #ifdef ES_AGGRESSIVE_TEST 505 struct ext4_map_blocks orig_map; 506 507 memcpy(&orig_map, map, sizeof(*map)); 508 #endif 509 510 map->m_flags = 0; 511 ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," 512 "logical block %lu\n", inode->i_ino, flags, map->m_len, 513 (unsigned long) map->m_lblk); 514 515 /* 516 * ext4_map_blocks returns an int, and m_len is an unsigned int 517 */ 518 if (unlikely(map->m_len > INT_MAX)) 519 map->m_len = INT_MAX; 520 521 /* We can handle the block number less than EXT_MAX_BLOCKS */ 522 if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS)) 523 return -EFSCORRUPTED; 524 525 /* Lookup extent status tree firstly */ 526 if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) { 527 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) { 528 map->m_pblk = ext4_es_pblock(&es) + 529 map->m_lblk - es.es_lblk; 530 map->m_flags |= ext4_es_is_written(&es) ? 531 EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN; 532 retval = es.es_len - (map->m_lblk - es.es_lblk); 533 if (retval > map->m_len) 534 retval = map->m_len; 535 map->m_len = retval; 536 } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) { 537 map->m_pblk = 0; 538 retval = es.es_len - (map->m_lblk - es.es_lblk); 539 if (retval > map->m_len) 540 retval = map->m_len; 541 map->m_len = retval; 542 retval = 0; 543 } else { 544 BUG_ON(1); 545 } 546 #ifdef ES_AGGRESSIVE_TEST 547 ext4_map_blocks_es_recheck(handle, inode, map, 548 &orig_map, flags); 549 #endif 550 goto found; 551 } 552 553 /* 554 * Try to see if we can get the block without requesting a new 555 * file system block. 556 */ 557 down_read(&EXT4_I(inode)->i_data_sem); 558 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 559 retval = ext4_ext_map_blocks(handle, inode, map, flags & 560 EXT4_GET_BLOCKS_KEEP_SIZE); 561 } else { 562 retval = ext4_ind_map_blocks(handle, inode, map, flags & 563 EXT4_GET_BLOCKS_KEEP_SIZE); 564 } 565 if (retval > 0) { 566 unsigned int status; 567 568 if (unlikely(retval != map->m_len)) { 569 ext4_warning(inode->i_sb, 570 "ES len assertion failed for inode " 571 "%lu: retval %d != map->m_len %d", 572 inode->i_ino, retval, map->m_len); 573 WARN_ON(1); 574 } 575 576 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 577 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 578 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 579 !(status & EXTENT_STATUS_WRITTEN) && 580 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk, 581 map->m_lblk + map->m_len - 1)) 582 status |= EXTENT_STATUS_DELAYED; 583 ret = ext4_es_insert_extent(inode, map->m_lblk, 584 map->m_len, map->m_pblk, status); 585 if (ret < 0) 586 retval = ret; 587 } 588 up_read((&EXT4_I(inode)->i_data_sem)); 589 590 found: 591 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 592 ret = check_block_validity(inode, map); 593 if (ret != 0) 594 return ret; 595 } 596 597 /* If it is only a block(s) look up */ 598 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 599 return retval; 600 601 /* 602 * Returns if the blocks have already allocated 603 * 604 * Note that if blocks have been preallocated 605 * ext4_ext_get_block() returns the create = 0 606 * with buffer head unmapped. 607 */ 608 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) 609 /* 610 * If we need to convert extent to unwritten 611 * we continue and do the actual work in 612 * ext4_ext_map_blocks() 613 */ 614 if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) 615 return retval; 616 617 /* 618 * Here we clear m_flags because after allocating an new extent, 619 * it will be set again. 620 */ 621 map->m_flags &= ~EXT4_MAP_FLAGS; 622 623 /* 624 * New blocks allocate and/or writing to unwritten extent 625 * will possibly result in updating i_data, so we take 626 * the write lock of i_data_sem, and call get_block() 627 * with create == 1 flag. 628 */ 629 down_write(&EXT4_I(inode)->i_data_sem); 630 631 /* 632 * We need to check for EXT4 here because migrate 633 * could have changed the inode type in between 634 */ 635 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 636 retval = ext4_ext_map_blocks(handle, inode, map, flags); 637 } else { 638 retval = ext4_ind_map_blocks(handle, inode, map, flags); 639 640 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { 641 /* 642 * We allocated new blocks which will result in 643 * i_data's format changing. Force the migrate 644 * to fail by clearing migrate flags 645 */ 646 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); 647 } 648 649 /* 650 * Update reserved blocks/metadata blocks after successful 651 * block allocation which had been deferred till now. We don't 652 * support fallocate for non extent files. So we can update 653 * reserve space here. 654 */ 655 if ((retval > 0) && 656 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) 657 ext4_da_update_reserve_space(inode, retval, 1); 658 } 659 660 if (retval > 0) { 661 unsigned int status; 662 663 if (unlikely(retval != map->m_len)) { 664 ext4_warning(inode->i_sb, 665 "ES len assertion failed for inode " 666 "%lu: retval %d != map->m_len %d", 667 inode->i_ino, retval, map->m_len); 668 WARN_ON(1); 669 } 670 671 /* 672 * We have to zeroout blocks before inserting them into extent 673 * status tree. Otherwise someone could look them up there and 674 * use them before they are really zeroed. We also have to 675 * unmap metadata before zeroing as otherwise writeback can 676 * overwrite zeros with stale data from block device. 677 */ 678 if (flags & EXT4_GET_BLOCKS_ZERO && 679 map->m_flags & EXT4_MAP_MAPPED && 680 map->m_flags & EXT4_MAP_NEW) { 681 clean_bdev_aliases(inode->i_sb->s_bdev, map->m_pblk, 682 map->m_len); 683 ret = ext4_issue_zeroout(inode, map->m_lblk, 684 map->m_pblk, map->m_len); 685 if (ret) { 686 retval = ret; 687 goto out_sem; 688 } 689 } 690 691 /* 692 * If the extent has been zeroed out, we don't need to update 693 * extent status tree. 694 */ 695 if ((flags & EXT4_GET_BLOCKS_PRE_IO) && 696 ext4_es_lookup_extent(inode, map->m_lblk, &es)) { 697 if (ext4_es_is_written(&es)) 698 goto out_sem; 699 } 700 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 701 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 702 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 703 !(status & EXTENT_STATUS_WRITTEN) && 704 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk, 705 map->m_lblk + map->m_len - 1)) 706 status |= EXTENT_STATUS_DELAYED; 707 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 708 map->m_pblk, status); 709 if (ret < 0) { 710 retval = ret; 711 goto out_sem; 712 } 713 } 714 715 out_sem: 716 up_write((&EXT4_I(inode)->i_data_sem)); 717 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 718 ret = check_block_validity(inode, map); 719 if (ret != 0) 720 return ret; 721 722 /* 723 * Inodes with freshly allocated blocks where contents will be 724 * visible after transaction commit must be on transaction's 725 * ordered data list. 726 */ 727 if (map->m_flags & EXT4_MAP_NEW && 728 !(map->m_flags & EXT4_MAP_UNWRITTEN) && 729 !(flags & EXT4_GET_BLOCKS_ZERO) && 730 !ext4_is_quota_file(inode) && 731 ext4_should_order_data(inode)) { 732 if (flags & EXT4_GET_BLOCKS_IO_SUBMIT) 733 ret = ext4_jbd2_inode_add_wait(handle, inode); 734 else 735 ret = ext4_jbd2_inode_add_write(handle, inode); 736 if (ret) 737 return ret; 738 } 739 } 740 return retval; 741 } 742 743 /* 744 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages 745 * we have to be careful as someone else may be manipulating b_state as well. 746 */ 747 static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags) 748 { 749 unsigned long old_state; 750 unsigned long new_state; 751 752 flags &= EXT4_MAP_FLAGS; 753 754 /* Dummy buffer_head? Set non-atomically. */ 755 if (!bh->b_page) { 756 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags; 757 return; 758 } 759 /* 760 * Someone else may be modifying b_state. Be careful! This is ugly but 761 * once we get rid of using bh as a container for mapping information 762 * to pass to / from get_block functions, this can go away. 763 */ 764 do { 765 old_state = READ_ONCE(bh->b_state); 766 new_state = (old_state & ~EXT4_MAP_FLAGS) | flags; 767 } while (unlikely( 768 cmpxchg(&bh->b_state, old_state, new_state) != old_state)); 769 } 770 771 static int _ext4_get_block(struct inode *inode, sector_t iblock, 772 struct buffer_head *bh, int flags) 773 { 774 struct ext4_map_blocks map; 775 int ret = 0; 776 777 if (ext4_has_inline_data(inode)) 778 return -ERANGE; 779 780 map.m_lblk = iblock; 781 map.m_len = bh->b_size >> inode->i_blkbits; 782 783 ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map, 784 flags); 785 if (ret > 0) { 786 map_bh(bh, inode->i_sb, map.m_pblk); 787 ext4_update_bh_state(bh, map.m_flags); 788 bh->b_size = inode->i_sb->s_blocksize * map.m_len; 789 ret = 0; 790 } else if (ret == 0) { 791 /* hole case, need to fill in bh->b_size */ 792 bh->b_size = inode->i_sb->s_blocksize * map.m_len; 793 } 794 return ret; 795 } 796 797 int ext4_get_block(struct inode *inode, sector_t iblock, 798 struct buffer_head *bh, int create) 799 { 800 return _ext4_get_block(inode, iblock, bh, 801 create ? EXT4_GET_BLOCKS_CREATE : 0); 802 } 803 804 /* 805 * Get block function used when preparing for buffered write if we require 806 * creating an unwritten extent if blocks haven't been allocated. The extent 807 * will be converted to written after the IO is complete. 808 */ 809 int ext4_get_block_unwritten(struct inode *inode, sector_t iblock, 810 struct buffer_head *bh_result, int create) 811 { 812 ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n", 813 inode->i_ino, create); 814 return _ext4_get_block(inode, iblock, bh_result, 815 EXT4_GET_BLOCKS_IO_CREATE_EXT); 816 } 817 818 /* Maximum number of blocks we map for direct IO at once. */ 819 #define DIO_MAX_BLOCKS 4096 820 821 /* 822 * Get blocks function for the cases that need to start a transaction - 823 * generally difference cases of direct IO and DAX IO. It also handles retries 824 * in case of ENOSPC. 825 */ 826 static int ext4_get_block_trans(struct inode *inode, sector_t iblock, 827 struct buffer_head *bh_result, int flags) 828 { 829 int dio_credits; 830 handle_t *handle; 831 int retries = 0; 832 int ret; 833 834 /* Trim mapping request to maximum we can map at once for DIO */ 835 if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS) 836 bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits; 837 dio_credits = ext4_chunk_trans_blocks(inode, 838 bh_result->b_size >> inode->i_blkbits); 839 retry: 840 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits); 841 if (IS_ERR(handle)) 842 return PTR_ERR(handle); 843 844 ret = _ext4_get_block(inode, iblock, bh_result, flags); 845 ext4_journal_stop(handle); 846 847 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 848 goto retry; 849 return ret; 850 } 851 852 /* Get block function for DIO reads and writes to inodes without extents */ 853 int ext4_dio_get_block(struct inode *inode, sector_t iblock, 854 struct buffer_head *bh, int create) 855 { 856 /* We don't expect handle for direct IO */ 857 WARN_ON_ONCE(ext4_journal_current_handle()); 858 859 if (!create) 860 return _ext4_get_block(inode, iblock, bh, 0); 861 return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE); 862 } 863 864 /* 865 * Get block function for AIO DIO writes when we create unwritten extent if 866 * blocks are not allocated yet. The extent will be converted to written 867 * after IO is complete. 868 */ 869 static int ext4_dio_get_block_unwritten_async(struct inode *inode, 870 sector_t iblock, struct buffer_head *bh_result, int create) 871 { 872 int ret; 873 874 /* We don't expect handle for direct IO */ 875 WARN_ON_ONCE(ext4_journal_current_handle()); 876 877 ret = ext4_get_block_trans(inode, iblock, bh_result, 878 EXT4_GET_BLOCKS_IO_CREATE_EXT); 879 880 /* 881 * When doing DIO using unwritten extents, we need io_end to convert 882 * unwritten extents to written on IO completion. We allocate io_end 883 * once we spot unwritten extent and store it in b_private. Generic 884 * DIO code keeps b_private set and furthermore passes the value to 885 * our completion callback in 'private' argument. 886 */ 887 if (!ret && buffer_unwritten(bh_result)) { 888 if (!bh_result->b_private) { 889 ext4_io_end_t *io_end; 890 891 io_end = ext4_init_io_end(inode, GFP_KERNEL); 892 if (!io_end) 893 return -ENOMEM; 894 bh_result->b_private = io_end; 895 ext4_set_io_unwritten_flag(inode, io_end); 896 } 897 set_buffer_defer_completion(bh_result); 898 } 899 900 return ret; 901 } 902 903 /* 904 * Get block function for non-AIO DIO writes when we create unwritten extent if 905 * blocks are not allocated yet. The extent will be converted to written 906 * after IO is complete by ext4_direct_IO_write(). 907 */ 908 static int ext4_dio_get_block_unwritten_sync(struct inode *inode, 909 sector_t iblock, struct buffer_head *bh_result, int create) 910 { 911 int ret; 912 913 /* We don't expect handle for direct IO */ 914 WARN_ON_ONCE(ext4_journal_current_handle()); 915 916 ret = ext4_get_block_trans(inode, iblock, bh_result, 917 EXT4_GET_BLOCKS_IO_CREATE_EXT); 918 919 /* 920 * Mark inode as having pending DIO writes to unwritten extents. 921 * ext4_direct_IO_write() checks this flag and converts extents to 922 * written. 923 */ 924 if (!ret && buffer_unwritten(bh_result)) 925 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 926 927 return ret; 928 } 929 930 static int ext4_dio_get_block_overwrite(struct inode *inode, sector_t iblock, 931 struct buffer_head *bh_result, int create) 932 { 933 int ret; 934 935 ext4_debug("ext4_dio_get_block_overwrite: inode %lu, create flag %d\n", 936 inode->i_ino, create); 937 /* We don't expect handle for direct IO */ 938 WARN_ON_ONCE(ext4_journal_current_handle()); 939 940 ret = _ext4_get_block(inode, iblock, bh_result, 0); 941 /* 942 * Blocks should have been preallocated! ext4_file_write_iter() checks 943 * that. 944 */ 945 WARN_ON_ONCE(!buffer_mapped(bh_result) || buffer_unwritten(bh_result)); 946 947 return ret; 948 } 949 950 951 /* 952 * `handle' can be NULL if create is zero 953 */ 954 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 955 ext4_lblk_t block, int map_flags) 956 { 957 struct ext4_map_blocks map; 958 struct buffer_head *bh; 959 int create = map_flags & EXT4_GET_BLOCKS_CREATE; 960 int err; 961 962 J_ASSERT(handle != NULL || create == 0); 963 964 map.m_lblk = block; 965 map.m_len = 1; 966 err = ext4_map_blocks(handle, inode, &map, map_flags); 967 968 if (err == 0) 969 return create ? ERR_PTR(-ENOSPC) : NULL; 970 if (err < 0) 971 return ERR_PTR(err); 972 973 bh = sb_getblk(inode->i_sb, map.m_pblk); 974 if (unlikely(!bh)) 975 return ERR_PTR(-ENOMEM); 976 if (map.m_flags & EXT4_MAP_NEW) { 977 J_ASSERT(create != 0); 978 J_ASSERT(handle != NULL); 979 980 /* 981 * Now that we do not always journal data, we should 982 * keep in mind whether this should always journal the 983 * new buffer as metadata. For now, regular file 984 * writes use ext4_get_block instead, so it's not a 985 * problem. 986 */ 987 lock_buffer(bh); 988 BUFFER_TRACE(bh, "call get_create_access"); 989 err = ext4_journal_get_create_access(handle, bh); 990 if (unlikely(err)) { 991 unlock_buffer(bh); 992 goto errout; 993 } 994 if (!buffer_uptodate(bh)) { 995 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 996 set_buffer_uptodate(bh); 997 } 998 unlock_buffer(bh); 999 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 1000 err = ext4_handle_dirty_metadata(handle, inode, bh); 1001 if (unlikely(err)) 1002 goto errout; 1003 } else 1004 BUFFER_TRACE(bh, "not a new buffer"); 1005 return bh; 1006 errout: 1007 brelse(bh); 1008 return ERR_PTR(err); 1009 } 1010 1011 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 1012 ext4_lblk_t block, int map_flags) 1013 { 1014 struct buffer_head *bh; 1015 1016 bh = ext4_getblk(handle, inode, block, map_flags); 1017 if (IS_ERR(bh)) 1018 return bh; 1019 if (!bh || buffer_uptodate(bh)) 1020 return bh; 1021 ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh); 1022 wait_on_buffer(bh); 1023 if (buffer_uptodate(bh)) 1024 return bh; 1025 put_bh(bh); 1026 return ERR_PTR(-EIO); 1027 } 1028 1029 /* Read a contiguous batch of blocks. */ 1030 int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count, 1031 bool wait, struct buffer_head **bhs) 1032 { 1033 int i, err; 1034 1035 for (i = 0; i < bh_count; i++) { 1036 bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */); 1037 if (IS_ERR(bhs[i])) { 1038 err = PTR_ERR(bhs[i]); 1039 bh_count = i; 1040 goto out_brelse; 1041 } 1042 } 1043 1044 for (i = 0; i < bh_count; i++) 1045 /* Note that NULL bhs[i] is valid because of holes. */ 1046 if (bhs[i] && !buffer_uptodate(bhs[i])) 1047 ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, 1048 &bhs[i]); 1049 1050 if (!wait) 1051 return 0; 1052 1053 for (i = 0; i < bh_count; i++) 1054 if (bhs[i]) 1055 wait_on_buffer(bhs[i]); 1056 1057 for (i = 0; i < bh_count; i++) { 1058 if (bhs[i] && !buffer_uptodate(bhs[i])) { 1059 err = -EIO; 1060 goto out_brelse; 1061 } 1062 } 1063 return 0; 1064 1065 out_brelse: 1066 for (i = 0; i < bh_count; i++) { 1067 brelse(bhs[i]); 1068 bhs[i] = NULL; 1069 } 1070 return err; 1071 } 1072 1073 int ext4_walk_page_buffers(handle_t *handle, 1074 struct buffer_head *head, 1075 unsigned from, 1076 unsigned to, 1077 int *partial, 1078 int (*fn)(handle_t *handle, 1079 struct buffer_head *bh)) 1080 { 1081 struct buffer_head *bh; 1082 unsigned block_start, block_end; 1083 unsigned blocksize = head->b_size; 1084 int err, ret = 0; 1085 struct buffer_head *next; 1086 1087 for (bh = head, block_start = 0; 1088 ret == 0 && (bh != head || !block_start); 1089 block_start = block_end, bh = next) { 1090 next = bh->b_this_page; 1091 block_end = block_start + blocksize; 1092 if (block_end <= from || block_start >= to) { 1093 if (partial && !buffer_uptodate(bh)) 1094 *partial = 1; 1095 continue; 1096 } 1097 err = (*fn)(handle, bh); 1098 if (!ret) 1099 ret = err; 1100 } 1101 return ret; 1102 } 1103 1104 /* 1105 * To preserve ordering, it is essential that the hole instantiation and 1106 * the data write be encapsulated in a single transaction. We cannot 1107 * close off a transaction and start a new one between the ext4_get_block() 1108 * and the commit_write(). So doing the jbd2_journal_start at the start of 1109 * prepare_write() is the right place. 1110 * 1111 * Also, this function can nest inside ext4_writepage(). In that case, we 1112 * *know* that ext4_writepage() has generated enough buffer credits to do the 1113 * whole page. So we won't block on the journal in that case, which is good, 1114 * because the caller may be PF_MEMALLOC. 1115 * 1116 * By accident, ext4 can be reentered when a transaction is open via 1117 * quota file writes. If we were to commit the transaction while thus 1118 * reentered, there can be a deadlock - we would be holding a quota 1119 * lock, and the commit would never complete if another thread had a 1120 * transaction open and was blocking on the quota lock - a ranking 1121 * violation. 1122 * 1123 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 1124 * will _not_ run commit under these circumstances because handle->h_ref 1125 * is elevated. We'll still have enough credits for the tiny quotafile 1126 * write. 1127 */ 1128 int do_journal_get_write_access(handle_t *handle, 1129 struct buffer_head *bh) 1130 { 1131 int dirty = buffer_dirty(bh); 1132 int ret; 1133 1134 if (!buffer_mapped(bh) || buffer_freed(bh)) 1135 return 0; 1136 /* 1137 * __block_write_begin() could have dirtied some buffers. Clean 1138 * the dirty bit as jbd2_journal_get_write_access() could complain 1139 * otherwise about fs integrity issues. Setting of the dirty bit 1140 * by __block_write_begin() isn't a real problem here as we clear 1141 * the bit before releasing a page lock and thus writeback cannot 1142 * ever write the buffer. 1143 */ 1144 if (dirty) 1145 clear_buffer_dirty(bh); 1146 BUFFER_TRACE(bh, "get write access"); 1147 ret = ext4_journal_get_write_access(handle, bh); 1148 if (!ret && dirty) 1149 ret = ext4_handle_dirty_metadata(handle, NULL, bh); 1150 return ret; 1151 } 1152 1153 #ifdef CONFIG_EXT4_FS_ENCRYPTION 1154 static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, 1155 get_block_t *get_block) 1156 { 1157 unsigned from = pos & (PAGE_SIZE - 1); 1158 unsigned to = from + len; 1159 struct inode *inode = page->mapping->host; 1160 unsigned block_start, block_end; 1161 sector_t block; 1162 int err = 0; 1163 unsigned blocksize = inode->i_sb->s_blocksize; 1164 unsigned bbits; 1165 struct buffer_head *bh, *head, *wait[2], **wait_bh = wait; 1166 bool decrypt = false; 1167 1168 BUG_ON(!PageLocked(page)); 1169 BUG_ON(from > PAGE_SIZE); 1170 BUG_ON(to > PAGE_SIZE); 1171 BUG_ON(from > to); 1172 1173 if (!page_has_buffers(page)) 1174 create_empty_buffers(page, blocksize, 0); 1175 head = page_buffers(page); 1176 bbits = ilog2(blocksize); 1177 block = (sector_t)page->index << (PAGE_SHIFT - bbits); 1178 1179 for (bh = head, block_start = 0; bh != head || !block_start; 1180 block++, block_start = block_end, bh = bh->b_this_page) { 1181 block_end = block_start + blocksize; 1182 if (block_end <= from || block_start >= to) { 1183 if (PageUptodate(page)) { 1184 if (!buffer_uptodate(bh)) 1185 set_buffer_uptodate(bh); 1186 } 1187 continue; 1188 } 1189 if (buffer_new(bh)) 1190 clear_buffer_new(bh); 1191 if (!buffer_mapped(bh)) { 1192 WARN_ON(bh->b_size != blocksize); 1193 err = get_block(inode, block, bh, 1); 1194 if (err) 1195 break; 1196 if (buffer_new(bh)) { 1197 clean_bdev_bh_alias(bh); 1198 if (PageUptodate(page)) { 1199 clear_buffer_new(bh); 1200 set_buffer_uptodate(bh); 1201 mark_buffer_dirty(bh); 1202 continue; 1203 } 1204 if (block_end > to || block_start < from) 1205 zero_user_segments(page, to, block_end, 1206 block_start, from); 1207 continue; 1208 } 1209 } 1210 if (PageUptodate(page)) { 1211 if (!buffer_uptodate(bh)) 1212 set_buffer_uptodate(bh); 1213 continue; 1214 } 1215 if (!buffer_uptodate(bh) && !buffer_delay(bh) && 1216 !buffer_unwritten(bh) && 1217 (block_start < from || block_end > to)) { 1218 ll_rw_block(REQ_OP_READ, 0, 1, &bh); 1219 *wait_bh++ = bh; 1220 decrypt = ext4_encrypted_inode(inode) && 1221 S_ISREG(inode->i_mode); 1222 } 1223 } 1224 /* 1225 * If we issued read requests, let them complete. 1226 */ 1227 while (wait_bh > wait) { 1228 wait_on_buffer(*--wait_bh); 1229 if (!buffer_uptodate(*wait_bh)) 1230 err = -EIO; 1231 } 1232 if (unlikely(err)) 1233 page_zero_new_buffers(page, from, to); 1234 else if (decrypt) 1235 err = fscrypt_decrypt_page(page->mapping->host, page, 1236 PAGE_SIZE, 0, page->index); 1237 return err; 1238 } 1239 #endif 1240 1241 static int ext4_write_begin(struct file *file, struct address_space *mapping, 1242 loff_t pos, unsigned len, unsigned flags, 1243 struct page **pagep, void **fsdata) 1244 { 1245 struct inode *inode = mapping->host; 1246 int ret, needed_blocks; 1247 handle_t *handle; 1248 int retries = 0; 1249 struct page *page; 1250 pgoff_t index; 1251 unsigned from, to; 1252 1253 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 1254 return -EIO; 1255 1256 trace_ext4_write_begin(inode, pos, len, flags); 1257 /* 1258 * Reserve one block more for addition to orphan list in case 1259 * we allocate blocks but write fails for some reason 1260 */ 1261 needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 1262 index = pos >> PAGE_SHIFT; 1263 from = pos & (PAGE_SIZE - 1); 1264 to = from + len; 1265 1266 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 1267 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, 1268 flags, pagep); 1269 if (ret < 0) 1270 return ret; 1271 if (ret == 1) 1272 return 0; 1273 } 1274 1275 /* 1276 * grab_cache_page_write_begin() can take a long time if the 1277 * system is thrashing due to memory pressure, or if the page 1278 * is being written back. So grab it first before we start 1279 * the transaction handle. This also allows us to allocate 1280 * the page (if needed) without using GFP_NOFS. 1281 */ 1282 retry_grab: 1283 page = grab_cache_page_write_begin(mapping, index, flags); 1284 if (!page) 1285 return -ENOMEM; 1286 unlock_page(page); 1287 1288 retry_journal: 1289 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); 1290 if (IS_ERR(handle)) { 1291 put_page(page); 1292 return PTR_ERR(handle); 1293 } 1294 1295 lock_page(page); 1296 if (page->mapping != mapping) { 1297 /* The page got truncated from under us */ 1298 unlock_page(page); 1299 put_page(page); 1300 ext4_journal_stop(handle); 1301 goto retry_grab; 1302 } 1303 /* In case writeback began while the page was unlocked */ 1304 wait_for_stable_page(page); 1305 1306 #ifdef CONFIG_EXT4_FS_ENCRYPTION 1307 if (ext4_should_dioread_nolock(inode)) 1308 ret = ext4_block_write_begin(page, pos, len, 1309 ext4_get_block_unwritten); 1310 else 1311 ret = ext4_block_write_begin(page, pos, len, 1312 ext4_get_block); 1313 #else 1314 if (ext4_should_dioread_nolock(inode)) 1315 ret = __block_write_begin(page, pos, len, 1316 ext4_get_block_unwritten); 1317 else 1318 ret = __block_write_begin(page, pos, len, ext4_get_block); 1319 #endif 1320 if (!ret && ext4_should_journal_data(inode)) { 1321 ret = ext4_walk_page_buffers(handle, page_buffers(page), 1322 from, to, NULL, 1323 do_journal_get_write_access); 1324 } 1325 1326 if (ret) { 1327 unlock_page(page); 1328 /* 1329 * __block_write_begin may have instantiated a few blocks 1330 * outside i_size. Trim these off again. Don't need 1331 * i_size_read because we hold i_mutex. 1332 * 1333 * Add inode to orphan list in case we crash before 1334 * truncate finishes 1335 */ 1336 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1337 ext4_orphan_add(handle, inode); 1338 1339 ext4_journal_stop(handle); 1340 if (pos + len > inode->i_size) { 1341 ext4_truncate_failed_write(inode); 1342 /* 1343 * If truncate failed early the inode might 1344 * still be on the orphan list; we need to 1345 * make sure the inode is removed from the 1346 * orphan list in that case. 1347 */ 1348 if (inode->i_nlink) 1349 ext4_orphan_del(NULL, inode); 1350 } 1351 1352 if (ret == -ENOSPC && 1353 ext4_should_retry_alloc(inode->i_sb, &retries)) 1354 goto retry_journal; 1355 put_page(page); 1356 return ret; 1357 } 1358 *pagep = page; 1359 return ret; 1360 } 1361 1362 /* For write_end() in data=journal mode */ 1363 static int write_end_fn(handle_t *handle, struct buffer_head *bh) 1364 { 1365 int ret; 1366 if (!buffer_mapped(bh) || buffer_freed(bh)) 1367 return 0; 1368 set_buffer_uptodate(bh); 1369 ret = ext4_handle_dirty_metadata(handle, NULL, bh); 1370 clear_buffer_meta(bh); 1371 clear_buffer_prio(bh); 1372 return ret; 1373 } 1374 1375 /* 1376 * We need to pick up the new inode size which generic_commit_write gave us 1377 * `file' can be NULL - eg, when called from page_symlink(). 1378 * 1379 * ext4 never places buffers on inode->i_mapping->private_list. metadata 1380 * buffers are managed internally. 1381 */ 1382 static int ext4_write_end(struct file *file, 1383 struct address_space *mapping, 1384 loff_t pos, unsigned len, unsigned copied, 1385 struct page *page, void *fsdata) 1386 { 1387 handle_t *handle = ext4_journal_current_handle(); 1388 struct inode *inode = mapping->host; 1389 loff_t old_size = inode->i_size; 1390 int ret = 0, ret2; 1391 int i_size_changed = 0; 1392 int inline_data = ext4_has_inline_data(inode); 1393 1394 trace_ext4_write_end(inode, pos, len, copied); 1395 if (inline_data) { 1396 ret = ext4_write_inline_data_end(inode, pos, len, 1397 copied, page); 1398 if (ret < 0) { 1399 unlock_page(page); 1400 put_page(page); 1401 goto errout; 1402 } 1403 copied = ret; 1404 } else 1405 copied = block_write_end(file, mapping, pos, 1406 len, copied, page, fsdata); 1407 /* 1408 * it's important to update i_size while still holding page lock: 1409 * page writeout could otherwise come in and zero beyond i_size. 1410 */ 1411 i_size_changed = ext4_update_inode_size(inode, pos + copied); 1412 unlock_page(page); 1413 put_page(page); 1414 1415 if (old_size < pos) 1416 pagecache_isize_extended(inode, old_size, pos); 1417 /* 1418 * Don't mark the inode dirty under page lock. First, it unnecessarily 1419 * makes the holding time of page lock longer. Second, it forces lock 1420 * ordering of page lock and transaction start for journaling 1421 * filesystems. 1422 */ 1423 if (i_size_changed || inline_data) 1424 ext4_mark_inode_dirty(handle, inode); 1425 1426 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1427 /* if we have allocated more blocks and copied 1428 * less. We will have blocks allocated outside 1429 * inode->i_size. So truncate them 1430 */ 1431 ext4_orphan_add(handle, inode); 1432 errout: 1433 ret2 = ext4_journal_stop(handle); 1434 if (!ret) 1435 ret = ret2; 1436 1437 if (pos + len > inode->i_size) { 1438 ext4_truncate_failed_write(inode); 1439 /* 1440 * If truncate failed early the inode might still be 1441 * on the orphan list; we need to make sure the inode 1442 * is removed from the orphan list in that case. 1443 */ 1444 if (inode->i_nlink) 1445 ext4_orphan_del(NULL, inode); 1446 } 1447 1448 return ret ? ret : copied; 1449 } 1450 1451 /* 1452 * This is a private version of page_zero_new_buffers() which doesn't 1453 * set the buffer to be dirty, since in data=journalled mode we need 1454 * to call ext4_handle_dirty_metadata() instead. 1455 */ 1456 static void ext4_journalled_zero_new_buffers(handle_t *handle, 1457 struct page *page, 1458 unsigned from, unsigned to) 1459 { 1460 unsigned int block_start = 0, block_end; 1461 struct buffer_head *head, *bh; 1462 1463 bh = head = page_buffers(page); 1464 do { 1465 block_end = block_start + bh->b_size; 1466 if (buffer_new(bh)) { 1467 if (block_end > from && block_start < to) { 1468 if (!PageUptodate(page)) { 1469 unsigned start, size; 1470 1471 start = max(from, block_start); 1472 size = min(to, block_end) - start; 1473 1474 zero_user(page, start, size); 1475 write_end_fn(handle, bh); 1476 } 1477 clear_buffer_new(bh); 1478 } 1479 } 1480 block_start = block_end; 1481 bh = bh->b_this_page; 1482 } while (bh != head); 1483 } 1484 1485 static int ext4_journalled_write_end(struct file *file, 1486 struct address_space *mapping, 1487 loff_t pos, unsigned len, unsigned copied, 1488 struct page *page, void *fsdata) 1489 { 1490 handle_t *handle = ext4_journal_current_handle(); 1491 struct inode *inode = mapping->host; 1492 loff_t old_size = inode->i_size; 1493 int ret = 0, ret2; 1494 int partial = 0; 1495 unsigned from, to; 1496 int size_changed = 0; 1497 int inline_data = ext4_has_inline_data(inode); 1498 1499 trace_ext4_journalled_write_end(inode, pos, len, copied); 1500 from = pos & (PAGE_SIZE - 1); 1501 to = from + len; 1502 1503 BUG_ON(!ext4_handle_valid(handle)); 1504 1505 if (inline_data) { 1506 ret = ext4_write_inline_data_end(inode, pos, len, 1507 copied, page); 1508 if (ret < 0) { 1509 unlock_page(page); 1510 put_page(page); 1511 goto errout; 1512 } 1513 copied = ret; 1514 } else if (unlikely(copied < len) && !PageUptodate(page)) { 1515 copied = 0; 1516 ext4_journalled_zero_new_buffers(handle, page, from, to); 1517 } else { 1518 if (unlikely(copied < len)) 1519 ext4_journalled_zero_new_buffers(handle, page, 1520 from + copied, to); 1521 ret = ext4_walk_page_buffers(handle, page_buffers(page), from, 1522 from + copied, &partial, 1523 write_end_fn); 1524 if (!partial) 1525 SetPageUptodate(page); 1526 } 1527 size_changed = ext4_update_inode_size(inode, pos + copied); 1528 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 1529 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1530 unlock_page(page); 1531 put_page(page); 1532 1533 if (old_size < pos) 1534 pagecache_isize_extended(inode, old_size, pos); 1535 1536 if (size_changed || inline_data) { 1537 ret2 = ext4_mark_inode_dirty(handle, inode); 1538 if (!ret) 1539 ret = ret2; 1540 } 1541 1542 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1543 /* if we have allocated more blocks and copied 1544 * less. We will have blocks allocated outside 1545 * inode->i_size. So truncate them 1546 */ 1547 ext4_orphan_add(handle, inode); 1548 1549 errout: 1550 ret2 = ext4_journal_stop(handle); 1551 if (!ret) 1552 ret = ret2; 1553 if (pos + len > inode->i_size) { 1554 ext4_truncate_failed_write(inode); 1555 /* 1556 * If truncate failed early the inode might still be 1557 * on the orphan list; we need to make sure the inode 1558 * is removed from the orphan list in that case. 1559 */ 1560 if (inode->i_nlink) 1561 ext4_orphan_del(NULL, inode); 1562 } 1563 1564 return ret ? ret : copied; 1565 } 1566 1567 /* 1568 * Reserve space for a single cluster 1569 */ 1570 static int ext4_da_reserve_space(struct inode *inode) 1571 { 1572 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1573 struct ext4_inode_info *ei = EXT4_I(inode); 1574 int ret; 1575 1576 /* 1577 * We will charge metadata quota at writeout time; this saves 1578 * us from metadata over-estimation, though we may go over by 1579 * a small amount in the end. Here we just reserve for data. 1580 */ 1581 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); 1582 if (ret) 1583 return ret; 1584 1585 spin_lock(&ei->i_block_reservation_lock); 1586 if (ext4_claim_free_clusters(sbi, 1, 0)) { 1587 spin_unlock(&ei->i_block_reservation_lock); 1588 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); 1589 return -ENOSPC; 1590 } 1591 ei->i_reserved_data_blocks++; 1592 trace_ext4_da_reserve_space(inode); 1593 spin_unlock(&ei->i_block_reservation_lock); 1594 1595 return 0; /* success */ 1596 } 1597 1598 void ext4_da_release_space(struct inode *inode, int to_free) 1599 { 1600 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1601 struct ext4_inode_info *ei = EXT4_I(inode); 1602 1603 if (!to_free) 1604 return; /* Nothing to release, exit */ 1605 1606 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1607 1608 trace_ext4_da_release_space(inode, to_free); 1609 if (unlikely(to_free > ei->i_reserved_data_blocks)) { 1610 /* 1611 * if there aren't enough reserved blocks, then the 1612 * counter is messed up somewhere. Since this 1613 * function is called from invalidate page, it's 1614 * harmless to return without any action. 1615 */ 1616 ext4_warning(inode->i_sb, "ext4_da_release_space: " 1617 "ino %lu, to_free %d with only %d reserved " 1618 "data blocks", inode->i_ino, to_free, 1619 ei->i_reserved_data_blocks); 1620 WARN_ON(1); 1621 to_free = ei->i_reserved_data_blocks; 1622 } 1623 ei->i_reserved_data_blocks -= to_free; 1624 1625 /* update fs dirty data blocks counter */ 1626 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); 1627 1628 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1629 1630 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); 1631 } 1632 1633 static void ext4_da_page_release_reservation(struct page *page, 1634 unsigned int offset, 1635 unsigned int length) 1636 { 1637 int contiguous_blks = 0; 1638 struct buffer_head *head, *bh; 1639 unsigned int curr_off = 0; 1640 struct inode *inode = page->mapping->host; 1641 unsigned int stop = offset + length; 1642 ext4_fsblk_t lblk; 1643 1644 BUG_ON(stop > PAGE_SIZE || stop < length); 1645 1646 head = page_buffers(page); 1647 bh = head; 1648 do { 1649 unsigned int next_off = curr_off + bh->b_size; 1650 1651 if (next_off > stop) 1652 break; 1653 1654 if ((offset <= curr_off) && (buffer_delay(bh))) { 1655 contiguous_blks++; 1656 clear_buffer_delay(bh); 1657 } else if (contiguous_blks) { 1658 lblk = page->index << 1659 (PAGE_SHIFT - inode->i_blkbits); 1660 lblk += (curr_off >> inode->i_blkbits) - 1661 contiguous_blks; 1662 ext4_es_remove_blks(inode, lblk, contiguous_blks); 1663 contiguous_blks = 0; 1664 } 1665 curr_off = next_off; 1666 } while ((bh = bh->b_this_page) != head); 1667 1668 if (contiguous_blks) { 1669 lblk = page->index << (PAGE_SHIFT - inode->i_blkbits); 1670 lblk += (curr_off >> inode->i_blkbits) - contiguous_blks; 1671 ext4_es_remove_blks(inode, lblk, contiguous_blks); 1672 } 1673 1674 } 1675 1676 /* 1677 * Delayed allocation stuff 1678 */ 1679 1680 struct mpage_da_data { 1681 struct inode *inode; 1682 struct writeback_control *wbc; 1683 1684 pgoff_t first_page; /* The first page to write */ 1685 pgoff_t next_page; /* Current page to examine */ 1686 pgoff_t last_page; /* Last page to examine */ 1687 /* 1688 * Extent to map - this can be after first_page because that can be 1689 * fully mapped. We somewhat abuse m_flags to store whether the extent 1690 * is delalloc or unwritten. 1691 */ 1692 struct ext4_map_blocks map; 1693 struct ext4_io_submit io_submit; /* IO submission data */ 1694 unsigned int do_map:1; 1695 }; 1696 1697 static void mpage_release_unused_pages(struct mpage_da_data *mpd, 1698 bool invalidate) 1699 { 1700 int nr_pages, i; 1701 pgoff_t index, end; 1702 struct pagevec pvec; 1703 struct inode *inode = mpd->inode; 1704 struct address_space *mapping = inode->i_mapping; 1705 1706 /* This is necessary when next_page == 0. */ 1707 if (mpd->first_page >= mpd->next_page) 1708 return; 1709 1710 index = mpd->first_page; 1711 end = mpd->next_page - 1; 1712 if (invalidate) { 1713 ext4_lblk_t start, last; 1714 start = index << (PAGE_SHIFT - inode->i_blkbits); 1715 last = end << (PAGE_SHIFT - inode->i_blkbits); 1716 ext4_es_remove_extent(inode, start, last - start + 1); 1717 } 1718 1719 pagevec_init(&pvec); 1720 while (index <= end) { 1721 nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end); 1722 if (nr_pages == 0) 1723 break; 1724 for (i = 0; i < nr_pages; i++) { 1725 struct page *page = pvec.pages[i]; 1726 1727 BUG_ON(!PageLocked(page)); 1728 BUG_ON(PageWriteback(page)); 1729 if (invalidate) { 1730 if (page_mapped(page)) 1731 clear_page_dirty_for_io(page); 1732 block_invalidatepage(page, 0, PAGE_SIZE); 1733 ClearPageUptodate(page); 1734 } 1735 unlock_page(page); 1736 } 1737 pagevec_release(&pvec); 1738 } 1739 } 1740 1741 static void ext4_print_free_blocks(struct inode *inode) 1742 { 1743 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1744 struct super_block *sb = inode->i_sb; 1745 struct ext4_inode_info *ei = EXT4_I(inode); 1746 1747 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld", 1748 EXT4_C2B(EXT4_SB(inode->i_sb), 1749 ext4_count_free_clusters(sb))); 1750 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details"); 1751 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld", 1752 (long long) EXT4_C2B(EXT4_SB(sb), 1753 percpu_counter_sum(&sbi->s_freeclusters_counter))); 1754 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld", 1755 (long long) EXT4_C2B(EXT4_SB(sb), 1756 percpu_counter_sum(&sbi->s_dirtyclusters_counter))); 1757 ext4_msg(sb, KERN_CRIT, "Block reservation details"); 1758 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u", 1759 ei->i_reserved_data_blocks); 1760 return; 1761 } 1762 1763 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) 1764 { 1765 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); 1766 } 1767 1768 /* 1769 * ext4_insert_delayed_block - adds a delayed block to the extents status 1770 * tree, incrementing the reserved cluster/block 1771 * count or making a pending reservation 1772 * where needed 1773 * 1774 * @inode - file containing the newly added block 1775 * @lblk - logical block to be added 1776 * 1777 * Returns 0 on success, negative error code on failure. 1778 */ 1779 static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk) 1780 { 1781 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1782 int ret; 1783 bool allocated = false; 1784 1785 /* 1786 * If the cluster containing lblk is shared with a delayed, 1787 * written, or unwritten extent in a bigalloc file system, it's 1788 * already been accounted for and does not need to be reserved. 1789 * A pending reservation must be made for the cluster if it's 1790 * shared with a written or unwritten extent and doesn't already 1791 * have one. Written and unwritten extents can be purged from the 1792 * extents status tree if the system is under memory pressure, so 1793 * it's necessary to examine the extent tree if a search of the 1794 * extents status tree doesn't get a match. 1795 */ 1796 if (sbi->s_cluster_ratio == 1) { 1797 ret = ext4_da_reserve_space(inode); 1798 if (ret != 0) /* ENOSPC */ 1799 goto errout; 1800 } else { /* bigalloc */ 1801 if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) { 1802 if (!ext4_es_scan_clu(inode, 1803 &ext4_es_is_mapped, lblk)) { 1804 ret = ext4_clu_mapped(inode, 1805 EXT4_B2C(sbi, lblk)); 1806 if (ret < 0) 1807 goto errout; 1808 if (ret == 0) { 1809 ret = ext4_da_reserve_space(inode); 1810 if (ret != 0) /* ENOSPC */ 1811 goto errout; 1812 } else { 1813 allocated = true; 1814 } 1815 } else { 1816 allocated = true; 1817 } 1818 } 1819 } 1820 1821 ret = ext4_es_insert_delayed_block(inode, lblk, allocated); 1822 1823 errout: 1824 return ret; 1825 } 1826 1827 /* 1828 * This function is grabs code from the very beginning of 1829 * ext4_map_blocks, but assumes that the caller is from delayed write 1830 * time. This function looks up the requested blocks and sets the 1831 * buffer delay bit under the protection of i_data_sem. 1832 */ 1833 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, 1834 struct ext4_map_blocks *map, 1835 struct buffer_head *bh) 1836 { 1837 struct extent_status es; 1838 int retval; 1839 sector_t invalid_block = ~((sector_t) 0xffff); 1840 #ifdef ES_AGGRESSIVE_TEST 1841 struct ext4_map_blocks orig_map; 1842 1843 memcpy(&orig_map, map, sizeof(*map)); 1844 #endif 1845 1846 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 1847 invalid_block = ~0; 1848 1849 map->m_flags = 0; 1850 ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u," 1851 "logical block %lu\n", inode->i_ino, map->m_len, 1852 (unsigned long) map->m_lblk); 1853 1854 /* Lookup extent status tree firstly */ 1855 if (ext4_es_lookup_extent(inode, iblock, &es)) { 1856 if (ext4_es_is_hole(&es)) { 1857 retval = 0; 1858 down_read(&EXT4_I(inode)->i_data_sem); 1859 goto add_delayed; 1860 } 1861 1862 /* 1863 * Delayed extent could be allocated by fallocate. 1864 * So we need to check it. 1865 */ 1866 if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) { 1867 map_bh(bh, inode->i_sb, invalid_block); 1868 set_buffer_new(bh); 1869 set_buffer_delay(bh); 1870 return 0; 1871 } 1872 1873 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk; 1874 retval = es.es_len - (iblock - es.es_lblk); 1875 if (retval > map->m_len) 1876 retval = map->m_len; 1877 map->m_len = retval; 1878 if (ext4_es_is_written(&es)) 1879 map->m_flags |= EXT4_MAP_MAPPED; 1880 else if (ext4_es_is_unwritten(&es)) 1881 map->m_flags |= EXT4_MAP_UNWRITTEN; 1882 else 1883 BUG_ON(1); 1884 1885 #ifdef ES_AGGRESSIVE_TEST 1886 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0); 1887 #endif 1888 return retval; 1889 } 1890 1891 /* 1892 * Try to see if we can get the block without requesting a new 1893 * file system block. 1894 */ 1895 down_read(&EXT4_I(inode)->i_data_sem); 1896 if (ext4_has_inline_data(inode)) 1897 retval = 0; 1898 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 1899 retval = ext4_ext_map_blocks(NULL, inode, map, 0); 1900 else 1901 retval = ext4_ind_map_blocks(NULL, inode, map, 0); 1902 1903 add_delayed: 1904 if (retval == 0) { 1905 int ret; 1906 1907 /* 1908 * XXX: __block_prepare_write() unmaps passed block, 1909 * is it OK? 1910 */ 1911 1912 ret = ext4_insert_delayed_block(inode, map->m_lblk); 1913 if (ret != 0) { 1914 retval = ret; 1915 goto out_unlock; 1916 } 1917 1918 map_bh(bh, inode->i_sb, invalid_block); 1919 set_buffer_new(bh); 1920 set_buffer_delay(bh); 1921 } else if (retval > 0) { 1922 int ret; 1923 unsigned int status; 1924 1925 if (unlikely(retval != map->m_len)) { 1926 ext4_warning(inode->i_sb, 1927 "ES len assertion failed for inode " 1928 "%lu: retval %d != map->m_len %d", 1929 inode->i_ino, retval, map->m_len); 1930 WARN_ON(1); 1931 } 1932 1933 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 1934 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 1935 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 1936 map->m_pblk, status); 1937 if (ret != 0) 1938 retval = ret; 1939 } 1940 1941 out_unlock: 1942 up_read((&EXT4_I(inode)->i_data_sem)); 1943 1944 return retval; 1945 } 1946 1947 /* 1948 * This is a special get_block_t callback which is used by 1949 * ext4_da_write_begin(). It will either return mapped block or 1950 * reserve space for a single block. 1951 * 1952 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 1953 * We also have b_blocknr = -1 and b_bdev initialized properly 1954 * 1955 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 1956 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 1957 * initialized properly. 1958 */ 1959 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 1960 struct buffer_head *bh, int create) 1961 { 1962 struct ext4_map_blocks map; 1963 int ret = 0; 1964 1965 BUG_ON(create == 0); 1966 BUG_ON(bh->b_size != inode->i_sb->s_blocksize); 1967 1968 map.m_lblk = iblock; 1969 map.m_len = 1; 1970 1971 /* 1972 * first, we need to know whether the block is allocated already 1973 * preallocated blocks are unmapped but should treated 1974 * the same as allocated blocks. 1975 */ 1976 ret = ext4_da_map_blocks(inode, iblock, &map, bh); 1977 if (ret <= 0) 1978 return ret; 1979 1980 map_bh(bh, inode->i_sb, map.m_pblk); 1981 ext4_update_bh_state(bh, map.m_flags); 1982 1983 if (buffer_unwritten(bh)) { 1984 /* A delayed write to unwritten bh should be marked 1985 * new and mapped. Mapped ensures that we don't do 1986 * get_block multiple times when we write to the same 1987 * offset and new ensures that we do proper zero out 1988 * for partial write. 1989 */ 1990 set_buffer_new(bh); 1991 set_buffer_mapped(bh); 1992 } 1993 return 0; 1994 } 1995 1996 static int bget_one(handle_t *handle, struct buffer_head *bh) 1997 { 1998 get_bh(bh); 1999 return 0; 2000 } 2001 2002 static int bput_one(handle_t *handle, struct buffer_head *bh) 2003 { 2004 put_bh(bh); 2005 return 0; 2006 } 2007 2008 static int __ext4_journalled_writepage(struct page *page, 2009 unsigned int len) 2010 { 2011 struct address_space *mapping = page->mapping; 2012 struct inode *inode = mapping->host; 2013 struct buffer_head *page_bufs = NULL; 2014 handle_t *handle = NULL; 2015 int ret = 0, err = 0; 2016 int inline_data = ext4_has_inline_data(inode); 2017 struct buffer_head *inode_bh = NULL; 2018 2019 ClearPageChecked(page); 2020 2021 if (inline_data) { 2022 BUG_ON(page->index != 0); 2023 BUG_ON(len > ext4_get_max_inline_size(inode)); 2024 inode_bh = ext4_journalled_write_inline_data(inode, len, page); 2025 if (inode_bh == NULL) 2026 goto out; 2027 } else { 2028 page_bufs = page_buffers(page); 2029 if (!page_bufs) { 2030 BUG(); 2031 goto out; 2032 } 2033 ext4_walk_page_buffers(handle, page_bufs, 0, len, 2034 NULL, bget_one); 2035 } 2036 /* 2037 * We need to release the page lock before we start the 2038 * journal, so grab a reference so the page won't disappear 2039 * out from under us. 2040 */ 2041 get_page(page); 2042 unlock_page(page); 2043 2044 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 2045 ext4_writepage_trans_blocks(inode)); 2046 if (IS_ERR(handle)) { 2047 ret = PTR_ERR(handle); 2048 put_page(page); 2049 goto out_no_pagelock; 2050 } 2051 BUG_ON(!ext4_handle_valid(handle)); 2052 2053 lock_page(page); 2054 put_page(page); 2055 if (page->mapping != mapping) { 2056 /* The page got truncated from under us */ 2057 ext4_journal_stop(handle); 2058 ret = 0; 2059 goto out; 2060 } 2061 2062 if (inline_data) { 2063 ret = ext4_mark_inode_dirty(handle, inode); 2064 } else { 2065 ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 2066 do_journal_get_write_access); 2067 2068 err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 2069 write_end_fn); 2070 } 2071 if (ret == 0) 2072 ret = err; 2073 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 2074 err = ext4_journal_stop(handle); 2075 if (!ret) 2076 ret = err; 2077 2078 if (!ext4_has_inline_data(inode)) 2079 ext4_walk_page_buffers(NULL, page_bufs, 0, len, 2080 NULL, bput_one); 2081 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 2082 out: 2083 unlock_page(page); 2084 out_no_pagelock: 2085 brelse(inode_bh); 2086 return ret; 2087 } 2088 2089 /* 2090 * Note that we don't need to start a transaction unless we're journaling data 2091 * because we should have holes filled from ext4_page_mkwrite(). We even don't 2092 * need to file the inode to the transaction's list in ordered mode because if 2093 * we are writing back data added by write(), the inode is already there and if 2094 * we are writing back data modified via mmap(), no one guarantees in which 2095 * transaction the data will hit the disk. In case we are journaling data, we 2096 * cannot start transaction directly because transaction start ranks above page 2097 * lock so we have to do some magic. 2098 * 2099 * This function can get called via... 2100 * - ext4_writepages after taking page lock (have journal handle) 2101 * - journal_submit_inode_data_buffers (no journal handle) 2102 * - shrink_page_list via the kswapd/direct reclaim (no journal handle) 2103 * - grab_page_cache when doing write_begin (have journal handle) 2104 * 2105 * We don't do any block allocation in this function. If we have page with 2106 * multiple blocks we need to write those buffer_heads that are mapped. This 2107 * is important for mmaped based write. So if we do with blocksize 1K 2108 * truncate(f, 1024); 2109 * a = mmap(f, 0, 4096); 2110 * a[0] = 'a'; 2111 * truncate(f, 4096); 2112 * we have in the page first buffer_head mapped via page_mkwrite call back 2113 * but other buffer_heads would be unmapped but dirty (dirty done via the 2114 * do_wp_page). So writepage should write the first block. If we modify 2115 * the mmap area beyond 1024 we will again get a page_fault and the 2116 * page_mkwrite callback will do the block allocation and mark the 2117 * buffer_heads mapped. 2118 * 2119 * We redirty the page if we have any buffer_heads that is either delay or 2120 * unwritten in the page. 2121 * 2122 * We can get recursively called as show below. 2123 * 2124 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 2125 * ext4_writepage() 2126 * 2127 * But since we don't do any block allocation we should not deadlock. 2128 * Page also have the dirty flag cleared so we don't get recurive page_lock. 2129 */ 2130 static int ext4_writepage(struct page *page, 2131 struct writeback_control *wbc) 2132 { 2133 int ret = 0; 2134 loff_t size; 2135 unsigned int len; 2136 struct buffer_head *page_bufs = NULL; 2137 struct inode *inode = page->mapping->host; 2138 struct ext4_io_submit io_submit; 2139 bool keep_towrite = false; 2140 2141 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) { 2142 ext4_invalidatepage(page, 0, PAGE_SIZE); 2143 unlock_page(page); 2144 return -EIO; 2145 } 2146 2147 trace_ext4_writepage(page); 2148 size = i_size_read(inode); 2149 if (page->index == size >> PAGE_SHIFT) 2150 len = size & ~PAGE_MASK; 2151 else 2152 len = PAGE_SIZE; 2153 2154 page_bufs = page_buffers(page); 2155 /* 2156 * We cannot do block allocation or other extent handling in this 2157 * function. If there are buffers needing that, we have to redirty 2158 * the page. But we may reach here when we do a journal commit via 2159 * journal_submit_inode_data_buffers() and in that case we must write 2160 * allocated buffers to achieve data=ordered mode guarantees. 2161 * 2162 * Also, if there is only one buffer per page (the fs block 2163 * size == the page size), if one buffer needs block 2164 * allocation or needs to modify the extent tree to clear the 2165 * unwritten flag, we know that the page can't be written at 2166 * all, so we might as well refuse the write immediately. 2167 * Unfortunately if the block size != page size, we can't as 2168 * easily detect this case using ext4_walk_page_buffers(), but 2169 * for the extremely common case, this is an optimization that 2170 * skips a useless round trip through ext4_bio_write_page(). 2171 */ 2172 if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2173 ext4_bh_delay_or_unwritten)) { 2174 redirty_page_for_writepage(wbc, page); 2175 if ((current->flags & PF_MEMALLOC) || 2176 (inode->i_sb->s_blocksize == PAGE_SIZE)) { 2177 /* 2178 * For memory cleaning there's no point in writing only 2179 * some buffers. So just bail out. Warn if we came here 2180 * from direct reclaim. 2181 */ 2182 WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) 2183 == PF_MEMALLOC); 2184 unlock_page(page); 2185 return 0; 2186 } 2187 keep_towrite = true; 2188 } 2189 2190 if (PageChecked(page) && ext4_should_journal_data(inode)) 2191 /* 2192 * It's mmapped pagecache. Add buffers and journal it. There 2193 * doesn't seem much point in redirtying the page here. 2194 */ 2195 return __ext4_journalled_writepage(page, len); 2196 2197 ext4_io_submit_init(&io_submit, wbc); 2198 io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS); 2199 if (!io_submit.io_end) { 2200 redirty_page_for_writepage(wbc, page); 2201 unlock_page(page); 2202 return -ENOMEM; 2203 } 2204 ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite); 2205 ext4_io_submit(&io_submit); 2206 /* Drop io_end reference we got from init */ 2207 ext4_put_io_end_defer(io_submit.io_end); 2208 return ret; 2209 } 2210 2211 static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) 2212 { 2213 int len; 2214 loff_t size; 2215 int err; 2216 2217 BUG_ON(page->index != mpd->first_page); 2218 clear_page_dirty_for_io(page); 2219 /* 2220 * We have to be very careful here! Nothing protects writeback path 2221 * against i_size changes and the page can be writeably mapped into 2222 * page tables. So an application can be growing i_size and writing 2223 * data through mmap while writeback runs. clear_page_dirty_for_io() 2224 * write-protects our page in page tables and the page cannot get 2225 * written to again until we release page lock. So only after 2226 * clear_page_dirty_for_io() we are safe to sample i_size for 2227 * ext4_bio_write_page() to zero-out tail of the written page. We rely 2228 * on the barrier provided by TestClearPageDirty in 2229 * clear_page_dirty_for_io() to make sure i_size is really sampled only 2230 * after page tables are updated. 2231 */ 2232 size = i_size_read(mpd->inode); 2233 if (page->index == size >> PAGE_SHIFT) 2234 len = size & ~PAGE_MASK; 2235 else 2236 len = PAGE_SIZE; 2237 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false); 2238 if (!err) 2239 mpd->wbc->nr_to_write--; 2240 mpd->first_page++; 2241 2242 return err; 2243 } 2244 2245 #define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay)) 2246 2247 /* 2248 * mballoc gives us at most this number of blocks... 2249 * XXX: That seems to be only a limitation of ext4_mb_normalize_request(). 2250 * The rest of mballoc seems to handle chunks up to full group size. 2251 */ 2252 #define MAX_WRITEPAGES_EXTENT_LEN 2048 2253 2254 /* 2255 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map 2256 * 2257 * @mpd - extent of blocks 2258 * @lblk - logical number of the block in the file 2259 * @bh - buffer head we want to add to the extent 2260 * 2261 * The function is used to collect contig. blocks in the same state. If the 2262 * buffer doesn't require mapping for writeback and we haven't started the 2263 * extent of buffers to map yet, the function returns 'true' immediately - the 2264 * caller can write the buffer right away. Otherwise the function returns true 2265 * if the block has been added to the extent, false if the block couldn't be 2266 * added. 2267 */ 2268 static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk, 2269 struct buffer_head *bh) 2270 { 2271 struct ext4_map_blocks *map = &mpd->map; 2272 2273 /* Buffer that doesn't need mapping for writeback? */ 2274 if (!buffer_dirty(bh) || !buffer_mapped(bh) || 2275 (!buffer_delay(bh) && !buffer_unwritten(bh))) { 2276 /* So far no extent to map => we write the buffer right away */ 2277 if (map->m_len == 0) 2278 return true; 2279 return false; 2280 } 2281 2282 /* First block in the extent? */ 2283 if (map->m_len == 0) { 2284 /* We cannot map unless handle is started... */ 2285 if (!mpd->do_map) 2286 return false; 2287 map->m_lblk = lblk; 2288 map->m_len = 1; 2289 map->m_flags = bh->b_state & BH_FLAGS; 2290 return true; 2291 } 2292 2293 /* Don't go larger than mballoc is willing to allocate */ 2294 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN) 2295 return false; 2296 2297 /* Can we merge the block to our big extent? */ 2298 if (lblk == map->m_lblk + map->m_len && 2299 (bh->b_state & BH_FLAGS) == map->m_flags) { 2300 map->m_len++; 2301 return true; 2302 } 2303 return false; 2304 } 2305 2306 /* 2307 * mpage_process_page_bufs - submit page buffers for IO or add them to extent 2308 * 2309 * @mpd - extent of blocks for mapping 2310 * @head - the first buffer in the page 2311 * @bh - buffer we should start processing from 2312 * @lblk - logical number of the block in the file corresponding to @bh 2313 * 2314 * Walk through page buffers from @bh upto @head (exclusive) and either submit 2315 * the page for IO if all buffers in this page were mapped and there's no 2316 * accumulated extent of buffers to map or add buffers in the page to the 2317 * extent of buffers to map. The function returns 1 if the caller can continue 2318 * by processing the next page, 0 if it should stop adding buffers to the 2319 * extent to map because we cannot extend it anymore. It can also return value 2320 * < 0 in case of error during IO submission. 2321 */ 2322 static int mpage_process_page_bufs(struct mpage_da_data *mpd, 2323 struct buffer_head *head, 2324 struct buffer_head *bh, 2325 ext4_lblk_t lblk) 2326 { 2327 struct inode *inode = mpd->inode; 2328 int err; 2329 ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1) 2330 >> inode->i_blkbits; 2331 2332 do { 2333 BUG_ON(buffer_locked(bh)); 2334 2335 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) { 2336 /* Found extent to map? */ 2337 if (mpd->map.m_len) 2338 return 0; 2339 /* Buffer needs mapping and handle is not started? */ 2340 if (!mpd->do_map) 2341 return 0; 2342 /* Everything mapped so far and we hit EOF */ 2343 break; 2344 } 2345 } while (lblk++, (bh = bh->b_this_page) != head); 2346 /* So far everything mapped? Submit the page for IO. */ 2347 if (mpd->map.m_len == 0) { 2348 err = mpage_submit_page(mpd, head->b_page); 2349 if (err < 0) 2350 return err; 2351 } 2352 return lblk < blocks; 2353 } 2354 2355 /* 2356 * mpage_map_buffers - update buffers corresponding to changed extent and 2357 * submit fully mapped pages for IO 2358 * 2359 * @mpd - description of extent to map, on return next extent to map 2360 * 2361 * Scan buffers corresponding to changed extent (we expect corresponding pages 2362 * to be already locked) and update buffer state according to new extent state. 2363 * We map delalloc buffers to their physical location, clear unwritten bits, 2364 * and mark buffers as uninit when we perform writes to unwritten extents 2365 * and do extent conversion after IO is finished. If the last page is not fully 2366 * mapped, we update @map to the next extent in the last page that needs 2367 * mapping. Otherwise we submit the page for IO. 2368 */ 2369 static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) 2370 { 2371 struct pagevec pvec; 2372 int nr_pages, i; 2373 struct inode *inode = mpd->inode; 2374 struct buffer_head *head, *bh; 2375 int bpp_bits = PAGE_SHIFT - inode->i_blkbits; 2376 pgoff_t start, end; 2377 ext4_lblk_t lblk; 2378 sector_t pblock; 2379 int err; 2380 2381 start = mpd->map.m_lblk >> bpp_bits; 2382 end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits; 2383 lblk = start << bpp_bits; 2384 pblock = mpd->map.m_pblk; 2385 2386 pagevec_init(&pvec); 2387 while (start <= end) { 2388 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, 2389 &start, end); 2390 if (nr_pages == 0) 2391 break; 2392 for (i = 0; i < nr_pages; i++) { 2393 struct page *page = pvec.pages[i]; 2394 2395 bh = head = page_buffers(page); 2396 do { 2397 if (lblk < mpd->map.m_lblk) 2398 continue; 2399 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) { 2400 /* 2401 * Buffer after end of mapped extent. 2402 * Find next buffer in the page to map. 2403 */ 2404 mpd->map.m_len = 0; 2405 mpd->map.m_flags = 0; 2406 /* 2407 * FIXME: If dioread_nolock supports 2408 * blocksize < pagesize, we need to make 2409 * sure we add size mapped so far to 2410 * io_end->size as the following call 2411 * can submit the page for IO. 2412 */ 2413 err = mpage_process_page_bufs(mpd, head, 2414 bh, lblk); 2415 pagevec_release(&pvec); 2416 if (err > 0) 2417 err = 0; 2418 return err; 2419 } 2420 if (buffer_delay(bh)) { 2421 clear_buffer_delay(bh); 2422 bh->b_blocknr = pblock++; 2423 } 2424 clear_buffer_unwritten(bh); 2425 } while (lblk++, (bh = bh->b_this_page) != head); 2426 2427 /* 2428 * FIXME: This is going to break if dioread_nolock 2429 * supports blocksize < pagesize as we will try to 2430 * convert potentially unmapped parts of inode. 2431 */ 2432 mpd->io_submit.io_end->size += PAGE_SIZE; 2433 /* Page fully mapped - let IO run! */ 2434 err = mpage_submit_page(mpd, page); 2435 if (err < 0) { 2436 pagevec_release(&pvec); 2437 return err; 2438 } 2439 } 2440 pagevec_release(&pvec); 2441 } 2442 /* Extent fully mapped and matches with page boundary. We are done. */ 2443 mpd->map.m_len = 0; 2444 mpd->map.m_flags = 0; 2445 return 0; 2446 } 2447 2448 static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd) 2449 { 2450 struct inode *inode = mpd->inode; 2451 struct ext4_map_blocks *map = &mpd->map; 2452 int get_blocks_flags; 2453 int err, dioread_nolock; 2454 2455 trace_ext4_da_write_pages_extent(inode, map); 2456 /* 2457 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or 2458 * to convert an unwritten extent to be initialized (in the case 2459 * where we have written into one or more preallocated blocks). It is 2460 * possible that we're going to need more metadata blocks than 2461 * previously reserved. However we must not fail because we're in 2462 * writeback and there is nothing we can do about it so it might result 2463 * in data loss. So use reserved blocks to allocate metadata if 2464 * possible. 2465 * 2466 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if 2467 * the blocks in question are delalloc blocks. This indicates 2468 * that the blocks and quotas has already been checked when 2469 * the data was copied into the page cache. 2470 */ 2471 get_blocks_flags = EXT4_GET_BLOCKS_CREATE | 2472 EXT4_GET_BLOCKS_METADATA_NOFAIL | 2473 EXT4_GET_BLOCKS_IO_SUBMIT; 2474 dioread_nolock = ext4_should_dioread_nolock(inode); 2475 if (dioread_nolock) 2476 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; 2477 if (map->m_flags & (1 << BH_Delay)) 2478 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 2479 2480 err = ext4_map_blocks(handle, inode, map, get_blocks_flags); 2481 if (err < 0) 2482 return err; 2483 if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) { 2484 if (!mpd->io_submit.io_end->handle && 2485 ext4_handle_valid(handle)) { 2486 mpd->io_submit.io_end->handle = handle->h_rsv_handle; 2487 handle->h_rsv_handle = NULL; 2488 } 2489 ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end); 2490 } 2491 2492 BUG_ON(map->m_len == 0); 2493 if (map->m_flags & EXT4_MAP_NEW) { 2494 clean_bdev_aliases(inode->i_sb->s_bdev, map->m_pblk, 2495 map->m_len); 2496 } 2497 return 0; 2498 } 2499 2500 /* 2501 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length 2502 * mpd->len and submit pages underlying it for IO 2503 * 2504 * @handle - handle for journal operations 2505 * @mpd - extent to map 2506 * @give_up_on_write - we set this to true iff there is a fatal error and there 2507 * is no hope of writing the data. The caller should discard 2508 * dirty pages to avoid infinite loops. 2509 * 2510 * The function maps extent starting at mpd->lblk of length mpd->len. If it is 2511 * delayed, blocks are allocated, if it is unwritten, we may need to convert 2512 * them to initialized or split the described range from larger unwritten 2513 * extent. Note that we need not map all the described range since allocation 2514 * can return less blocks or the range is covered by more unwritten extents. We 2515 * cannot map more because we are limited by reserved transaction credits. On 2516 * the other hand we always make sure that the last touched page is fully 2517 * mapped so that it can be written out (and thus forward progress is 2518 * guaranteed). After mapping we submit all mapped pages for IO. 2519 */ 2520 static int mpage_map_and_submit_extent(handle_t *handle, 2521 struct mpage_da_data *mpd, 2522 bool *give_up_on_write) 2523 { 2524 struct inode *inode = mpd->inode; 2525 struct ext4_map_blocks *map = &mpd->map; 2526 int err; 2527 loff_t disksize; 2528 int progress = 0; 2529 2530 mpd->io_submit.io_end->offset = 2531 ((loff_t)map->m_lblk) << inode->i_blkbits; 2532 do { 2533 err = mpage_map_one_extent(handle, mpd); 2534 if (err < 0) { 2535 struct super_block *sb = inode->i_sb; 2536 2537 if (ext4_forced_shutdown(EXT4_SB(sb)) || 2538 EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED) 2539 goto invalidate_dirty_pages; 2540 /* 2541 * Let the uper layers retry transient errors. 2542 * In the case of ENOSPC, if ext4_count_free_blocks() 2543 * is non-zero, a commit should free up blocks. 2544 */ 2545 if ((err == -ENOMEM) || 2546 (err == -ENOSPC && ext4_count_free_clusters(sb))) { 2547 if (progress) 2548 goto update_disksize; 2549 return err; 2550 } 2551 ext4_msg(sb, KERN_CRIT, 2552 "Delayed block allocation failed for " 2553 "inode %lu at logical offset %llu with" 2554 " max blocks %u with error %d", 2555 inode->i_ino, 2556 (unsigned long long)map->m_lblk, 2557 (unsigned)map->m_len, -err); 2558 ext4_msg(sb, KERN_CRIT, 2559 "This should not happen!! Data will " 2560 "be lost\n"); 2561 if (err == -ENOSPC) 2562 ext4_print_free_blocks(inode); 2563 invalidate_dirty_pages: 2564 *give_up_on_write = true; 2565 return err; 2566 } 2567 progress = 1; 2568 /* 2569 * Update buffer state, submit mapped pages, and get us new 2570 * extent to map 2571 */ 2572 err = mpage_map_and_submit_buffers(mpd); 2573 if (err < 0) 2574 goto update_disksize; 2575 } while (map->m_len); 2576 2577 update_disksize: 2578 /* 2579 * Update on-disk size after IO is submitted. Races with 2580 * truncate are avoided by checking i_size under i_data_sem. 2581 */ 2582 disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT; 2583 if (disksize > EXT4_I(inode)->i_disksize) { 2584 int err2; 2585 loff_t i_size; 2586 2587 down_write(&EXT4_I(inode)->i_data_sem); 2588 i_size = i_size_read(inode); 2589 if (disksize > i_size) 2590 disksize = i_size; 2591 if (disksize > EXT4_I(inode)->i_disksize) 2592 EXT4_I(inode)->i_disksize = disksize; 2593 up_write(&EXT4_I(inode)->i_data_sem); 2594 err2 = ext4_mark_inode_dirty(handle, inode); 2595 if (err2) 2596 ext4_error(inode->i_sb, 2597 "Failed to mark inode %lu dirty", 2598 inode->i_ino); 2599 if (!err) 2600 err = err2; 2601 } 2602 return err; 2603 } 2604 2605 /* 2606 * Calculate the total number of credits to reserve for one writepages 2607 * iteration. This is called from ext4_writepages(). We map an extent of 2608 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping 2609 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN + 2610 * bpp - 1 blocks in bpp different extents. 2611 */ 2612 static int ext4_da_writepages_trans_blocks(struct inode *inode) 2613 { 2614 int bpp = ext4_journal_blocks_per_page(inode); 2615 2616 return ext4_meta_trans_blocks(inode, 2617 MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp); 2618 } 2619 2620 /* 2621 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages 2622 * and underlying extent to map 2623 * 2624 * @mpd - where to look for pages 2625 * 2626 * Walk dirty pages in the mapping. If they are fully mapped, submit them for 2627 * IO immediately. When we find a page which isn't mapped we start accumulating 2628 * extent of buffers underlying these pages that needs mapping (formed by 2629 * either delayed or unwritten buffers). We also lock the pages containing 2630 * these buffers. The extent found is returned in @mpd structure (starting at 2631 * mpd->lblk with length mpd->len blocks). 2632 * 2633 * Note that this function can attach bios to one io_end structure which are 2634 * neither logically nor physically contiguous. Although it may seem as an 2635 * unnecessary complication, it is actually inevitable in blocksize < pagesize 2636 * case as we need to track IO to all buffers underlying a page in one io_end. 2637 */ 2638 static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) 2639 { 2640 struct address_space *mapping = mpd->inode->i_mapping; 2641 struct pagevec pvec; 2642 unsigned int nr_pages; 2643 long left = mpd->wbc->nr_to_write; 2644 pgoff_t index = mpd->first_page; 2645 pgoff_t end = mpd->last_page; 2646 xa_mark_t tag; 2647 int i, err = 0; 2648 int blkbits = mpd->inode->i_blkbits; 2649 ext4_lblk_t lblk; 2650 struct buffer_head *head; 2651 2652 if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages) 2653 tag = PAGECACHE_TAG_TOWRITE; 2654 else 2655 tag = PAGECACHE_TAG_DIRTY; 2656 2657 pagevec_init(&pvec); 2658 mpd->map.m_len = 0; 2659 mpd->next_page = index; 2660 while (index <= end) { 2661 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, 2662 tag); 2663 if (nr_pages == 0) 2664 goto out; 2665 2666 for (i = 0; i < nr_pages; i++) { 2667 struct page *page = pvec.pages[i]; 2668 2669 /* 2670 * Accumulated enough dirty pages? This doesn't apply 2671 * to WB_SYNC_ALL mode. For integrity sync we have to 2672 * keep going because someone may be concurrently 2673 * dirtying pages, and we might have synced a lot of 2674 * newly appeared dirty pages, but have not synced all 2675 * of the old dirty pages. 2676 */ 2677 if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0) 2678 goto out; 2679 2680 /* If we can't merge this page, we are done. */ 2681 if (mpd->map.m_len > 0 && mpd->next_page != page->index) 2682 goto out; 2683 2684 lock_page(page); 2685 /* 2686 * If the page is no longer dirty, or its mapping no 2687 * longer corresponds to inode we are writing (which 2688 * means it has been truncated or invalidated), or the 2689 * page is already under writeback and we are not doing 2690 * a data integrity writeback, skip the page 2691 */ 2692 if (!PageDirty(page) || 2693 (PageWriteback(page) && 2694 (mpd->wbc->sync_mode == WB_SYNC_NONE)) || 2695 unlikely(page->mapping != mapping)) { 2696 unlock_page(page); 2697 continue; 2698 } 2699 2700 wait_on_page_writeback(page); 2701 BUG_ON(PageWriteback(page)); 2702 2703 if (mpd->map.m_len == 0) 2704 mpd->first_page = page->index; 2705 mpd->next_page = page->index + 1; 2706 /* Add all dirty buffers to mpd */ 2707 lblk = ((ext4_lblk_t)page->index) << 2708 (PAGE_SHIFT - blkbits); 2709 head = page_buffers(page); 2710 err = mpage_process_page_bufs(mpd, head, head, lblk); 2711 if (err <= 0) 2712 goto out; 2713 err = 0; 2714 left--; 2715 } 2716 pagevec_release(&pvec); 2717 cond_resched(); 2718 } 2719 return 0; 2720 out: 2721 pagevec_release(&pvec); 2722 return err; 2723 } 2724 2725 static int ext4_writepages(struct address_space *mapping, 2726 struct writeback_control *wbc) 2727 { 2728 pgoff_t writeback_index = 0; 2729 long nr_to_write = wbc->nr_to_write; 2730 int range_whole = 0; 2731 int cycled = 1; 2732 handle_t *handle = NULL; 2733 struct mpage_da_data mpd; 2734 struct inode *inode = mapping->host; 2735 int needed_blocks, rsv_blocks = 0, ret = 0; 2736 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2737 bool done; 2738 struct blk_plug plug; 2739 bool give_up_on_write = false; 2740 2741 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 2742 return -EIO; 2743 2744 percpu_down_read(&sbi->s_journal_flag_rwsem); 2745 trace_ext4_writepages(inode, wbc); 2746 2747 /* 2748 * No pages to write? This is mainly a kludge to avoid starting 2749 * a transaction for special inodes like journal inode on last iput() 2750 * because that could violate lock ordering on umount 2751 */ 2752 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 2753 goto out_writepages; 2754 2755 if (ext4_should_journal_data(inode)) { 2756 ret = generic_writepages(mapping, wbc); 2757 goto out_writepages; 2758 } 2759 2760 /* 2761 * If the filesystem has aborted, it is read-only, so return 2762 * right away instead of dumping stack traces later on that 2763 * will obscure the real source of the problem. We test 2764 * EXT4_MF_FS_ABORTED instead of sb->s_flag's SB_RDONLY because 2765 * the latter could be true if the filesystem is mounted 2766 * read-only, and in that case, ext4_writepages should 2767 * *never* be called, so if that ever happens, we would want 2768 * the stack trace. 2769 */ 2770 if (unlikely(ext4_forced_shutdown(EXT4_SB(mapping->host->i_sb)) || 2771 sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) { 2772 ret = -EROFS; 2773 goto out_writepages; 2774 } 2775 2776 if (ext4_should_dioread_nolock(inode)) { 2777 /* 2778 * We may need to convert up to one extent per block in 2779 * the page and we may dirty the inode. 2780 */ 2781 rsv_blocks = 1 + ext4_chunk_trans_blocks(inode, 2782 PAGE_SIZE >> inode->i_blkbits); 2783 } 2784 2785 /* 2786 * If we have inline data and arrive here, it means that 2787 * we will soon create the block for the 1st page, so 2788 * we'd better clear the inline data here. 2789 */ 2790 if (ext4_has_inline_data(inode)) { 2791 /* Just inode will be modified... */ 2792 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); 2793 if (IS_ERR(handle)) { 2794 ret = PTR_ERR(handle); 2795 goto out_writepages; 2796 } 2797 BUG_ON(ext4_test_inode_state(inode, 2798 EXT4_STATE_MAY_INLINE_DATA)); 2799 ext4_destroy_inline_data(handle, inode); 2800 ext4_journal_stop(handle); 2801 } 2802 2803 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2804 range_whole = 1; 2805 2806 if (wbc->range_cyclic) { 2807 writeback_index = mapping->writeback_index; 2808 if (writeback_index) 2809 cycled = 0; 2810 mpd.first_page = writeback_index; 2811 mpd.last_page = -1; 2812 } else { 2813 mpd.first_page = wbc->range_start >> PAGE_SHIFT; 2814 mpd.last_page = wbc->range_end >> PAGE_SHIFT; 2815 } 2816 2817 mpd.inode = inode; 2818 mpd.wbc = wbc; 2819 ext4_io_submit_init(&mpd.io_submit, wbc); 2820 retry: 2821 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 2822 tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page); 2823 done = false; 2824 blk_start_plug(&plug); 2825 2826 /* 2827 * First writeback pages that don't need mapping - we can avoid 2828 * starting a transaction unnecessarily and also avoid being blocked 2829 * in the block layer on device congestion while having transaction 2830 * started. 2831 */ 2832 mpd.do_map = 0; 2833 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); 2834 if (!mpd.io_submit.io_end) { 2835 ret = -ENOMEM; 2836 goto unplug; 2837 } 2838 ret = mpage_prepare_extent_to_map(&mpd); 2839 /* Submit prepared bio */ 2840 ext4_io_submit(&mpd.io_submit); 2841 ext4_put_io_end_defer(mpd.io_submit.io_end); 2842 mpd.io_submit.io_end = NULL; 2843 /* Unlock pages we didn't use */ 2844 mpage_release_unused_pages(&mpd, false); 2845 if (ret < 0) 2846 goto unplug; 2847 2848 while (!done && mpd.first_page <= mpd.last_page) { 2849 /* For each extent of pages we use new io_end */ 2850 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); 2851 if (!mpd.io_submit.io_end) { 2852 ret = -ENOMEM; 2853 break; 2854 } 2855 2856 /* 2857 * We have two constraints: We find one extent to map and we 2858 * must always write out whole page (makes a difference when 2859 * blocksize < pagesize) so that we don't block on IO when we 2860 * try to write out the rest of the page. Journalled mode is 2861 * not supported by delalloc. 2862 */ 2863 BUG_ON(ext4_should_journal_data(inode)); 2864 needed_blocks = ext4_da_writepages_trans_blocks(inode); 2865 2866 /* start a new transaction */ 2867 handle = ext4_journal_start_with_reserve(inode, 2868 EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks); 2869 if (IS_ERR(handle)) { 2870 ret = PTR_ERR(handle); 2871 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2872 "%ld pages, ino %lu; err %d", __func__, 2873 wbc->nr_to_write, inode->i_ino, ret); 2874 /* Release allocated io_end */ 2875 ext4_put_io_end(mpd.io_submit.io_end); 2876 mpd.io_submit.io_end = NULL; 2877 break; 2878 } 2879 mpd.do_map = 1; 2880 2881 trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc); 2882 ret = mpage_prepare_extent_to_map(&mpd); 2883 if (!ret) { 2884 if (mpd.map.m_len) 2885 ret = mpage_map_and_submit_extent(handle, &mpd, 2886 &give_up_on_write); 2887 else { 2888 /* 2889 * We scanned the whole range (or exhausted 2890 * nr_to_write), submitted what was mapped and 2891 * didn't find anything needing mapping. We are 2892 * done. 2893 */ 2894 done = true; 2895 } 2896 } 2897 /* 2898 * Caution: If the handle is synchronous, 2899 * ext4_journal_stop() can wait for transaction commit 2900 * to finish which may depend on writeback of pages to 2901 * complete or on page lock to be released. In that 2902 * case, we have to wait until after after we have 2903 * submitted all the IO, released page locks we hold, 2904 * and dropped io_end reference (for extent conversion 2905 * to be able to complete) before stopping the handle. 2906 */ 2907 if (!ext4_handle_valid(handle) || handle->h_sync == 0) { 2908 ext4_journal_stop(handle); 2909 handle = NULL; 2910 mpd.do_map = 0; 2911 } 2912 /* Submit prepared bio */ 2913 ext4_io_submit(&mpd.io_submit); 2914 /* Unlock pages we didn't use */ 2915 mpage_release_unused_pages(&mpd, give_up_on_write); 2916 /* 2917 * Drop our io_end reference we got from init. We have 2918 * to be careful and use deferred io_end finishing if 2919 * we are still holding the transaction as we can 2920 * release the last reference to io_end which may end 2921 * up doing unwritten extent conversion. 2922 */ 2923 if (handle) { 2924 ext4_put_io_end_defer(mpd.io_submit.io_end); 2925 ext4_journal_stop(handle); 2926 } else 2927 ext4_put_io_end(mpd.io_submit.io_end); 2928 mpd.io_submit.io_end = NULL; 2929 2930 if (ret == -ENOSPC && sbi->s_journal) { 2931 /* 2932 * Commit the transaction which would 2933 * free blocks released in the transaction 2934 * and try again 2935 */ 2936 jbd2_journal_force_commit_nested(sbi->s_journal); 2937 ret = 0; 2938 continue; 2939 } 2940 /* Fatal error - ENOMEM, EIO... */ 2941 if (ret) 2942 break; 2943 } 2944 unplug: 2945 blk_finish_plug(&plug); 2946 if (!ret && !cycled && wbc->nr_to_write > 0) { 2947 cycled = 1; 2948 mpd.last_page = writeback_index - 1; 2949 mpd.first_page = 0; 2950 goto retry; 2951 } 2952 2953 /* Update index */ 2954 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2955 /* 2956 * Set the writeback_index so that range_cyclic 2957 * mode will write it back later 2958 */ 2959 mapping->writeback_index = mpd.first_page; 2960 2961 out_writepages: 2962 trace_ext4_writepages_result(inode, wbc, ret, 2963 nr_to_write - wbc->nr_to_write); 2964 percpu_up_read(&sbi->s_journal_flag_rwsem); 2965 return ret; 2966 } 2967 2968 static int ext4_dax_writepages(struct address_space *mapping, 2969 struct writeback_control *wbc) 2970 { 2971 int ret; 2972 long nr_to_write = wbc->nr_to_write; 2973 struct inode *inode = mapping->host; 2974 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2975 2976 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 2977 return -EIO; 2978 2979 percpu_down_read(&sbi->s_journal_flag_rwsem); 2980 trace_ext4_writepages(inode, wbc); 2981 2982 ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, wbc); 2983 trace_ext4_writepages_result(inode, wbc, ret, 2984 nr_to_write - wbc->nr_to_write); 2985 percpu_up_read(&sbi->s_journal_flag_rwsem); 2986 return ret; 2987 } 2988 2989 static int ext4_nonda_switch(struct super_block *sb) 2990 { 2991 s64 free_clusters, dirty_clusters; 2992 struct ext4_sb_info *sbi = EXT4_SB(sb); 2993 2994 /* 2995 * switch to non delalloc mode if we are running low 2996 * on free block. The free block accounting via percpu 2997 * counters can get slightly wrong with percpu_counter_batch getting 2998 * accumulated on each CPU without updating global counters 2999 * Delalloc need an accurate free block accounting. So switch 3000 * to non delalloc when we are near to error range. 3001 */ 3002 free_clusters = 3003 percpu_counter_read_positive(&sbi->s_freeclusters_counter); 3004 dirty_clusters = 3005 percpu_counter_read_positive(&sbi->s_dirtyclusters_counter); 3006 /* 3007 * Start pushing delalloc when 1/2 of free blocks are dirty. 3008 */ 3009 if (dirty_clusters && (free_clusters < 2 * dirty_clusters)) 3010 try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE); 3011 3012 if (2 * free_clusters < 3 * dirty_clusters || 3013 free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) { 3014 /* 3015 * free block count is less than 150% of dirty blocks 3016 * or free blocks is less than watermark 3017 */ 3018 return 1; 3019 } 3020 return 0; 3021 } 3022 3023 /* We always reserve for an inode update; the superblock could be there too */ 3024 static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len) 3025 { 3026 if (likely(ext4_has_feature_large_file(inode->i_sb))) 3027 return 1; 3028 3029 if (pos + len <= 0x7fffffffULL) 3030 return 1; 3031 3032 /* We might need to update the superblock to set LARGE_FILE */ 3033 return 2; 3034 } 3035 3036 static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 3037 loff_t pos, unsigned len, unsigned flags, 3038 struct page **pagep, void **fsdata) 3039 { 3040 int ret, retries = 0; 3041 struct page *page; 3042 pgoff_t index; 3043 struct inode *inode = mapping->host; 3044 handle_t *handle; 3045 3046 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 3047 return -EIO; 3048 3049 index = pos >> PAGE_SHIFT; 3050 3051 if (ext4_nonda_switch(inode->i_sb) || 3052 S_ISLNK(inode->i_mode)) { 3053 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 3054 return ext4_write_begin(file, mapping, pos, 3055 len, flags, pagep, fsdata); 3056 } 3057 *fsdata = (void *)0; 3058 trace_ext4_da_write_begin(inode, pos, len, flags); 3059 3060 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 3061 ret = ext4_da_write_inline_data_begin(mapping, inode, 3062 pos, len, flags, 3063 pagep, fsdata); 3064 if (ret < 0) 3065 return ret; 3066 if (ret == 1) 3067 return 0; 3068 } 3069 3070 /* 3071 * grab_cache_page_write_begin() can take a long time if the 3072 * system is thrashing due to memory pressure, or if the page 3073 * is being written back. So grab it first before we start 3074 * the transaction handle. This also allows us to allocate 3075 * the page (if needed) without using GFP_NOFS. 3076 */ 3077 retry_grab: 3078 page = grab_cache_page_write_begin(mapping, index, flags); 3079 if (!page) 3080 return -ENOMEM; 3081 unlock_page(page); 3082 3083 /* 3084 * With delayed allocation, we don't log the i_disksize update 3085 * if there is delayed block allocation. But we still need 3086 * to journalling the i_disksize update if writes to the end 3087 * of file which has an already mapped buffer. 3088 */ 3089 retry_journal: 3090 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 3091 ext4_da_write_credits(inode, pos, len)); 3092 if (IS_ERR(handle)) { 3093 put_page(page); 3094 return PTR_ERR(handle); 3095 } 3096 3097 lock_page(page); 3098 if (page->mapping != mapping) { 3099 /* The page got truncated from under us */ 3100 unlock_page(page); 3101 put_page(page); 3102 ext4_journal_stop(handle); 3103 goto retry_grab; 3104 } 3105 /* In case writeback began while the page was unlocked */ 3106 wait_for_stable_page(page); 3107 3108 #ifdef CONFIG_EXT4_FS_ENCRYPTION 3109 ret = ext4_block_write_begin(page, pos, len, 3110 ext4_da_get_block_prep); 3111 #else 3112 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); 3113 #endif 3114 if (ret < 0) { 3115 unlock_page(page); 3116 ext4_journal_stop(handle); 3117 /* 3118 * block_write_begin may have instantiated a few blocks 3119 * outside i_size. Trim these off again. Don't need 3120 * i_size_read because we hold i_mutex. 3121 */ 3122 if (pos + len > inode->i_size) 3123 ext4_truncate_failed_write(inode); 3124 3125 if (ret == -ENOSPC && 3126 ext4_should_retry_alloc(inode->i_sb, &retries)) 3127 goto retry_journal; 3128 3129 put_page(page); 3130 return ret; 3131 } 3132 3133 *pagep = page; 3134 return ret; 3135 } 3136 3137 /* 3138 * Check if we should update i_disksize 3139 * when write to the end of file but not require block allocation 3140 */ 3141 static int ext4_da_should_update_i_disksize(struct page *page, 3142 unsigned long offset) 3143 { 3144 struct buffer_head *bh; 3145 struct inode *inode = page->mapping->host; 3146 unsigned int idx; 3147 int i; 3148 3149 bh = page_buffers(page); 3150 idx = offset >> inode->i_blkbits; 3151 3152 for (i = 0; i < idx; i++) 3153 bh = bh->b_this_page; 3154 3155 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 3156 return 0; 3157 return 1; 3158 } 3159 3160 static int ext4_da_write_end(struct file *file, 3161 struct address_space *mapping, 3162 loff_t pos, unsigned len, unsigned copied, 3163 struct page *page, void *fsdata) 3164 { 3165 struct inode *inode = mapping->host; 3166 int ret = 0, ret2; 3167 handle_t *handle = ext4_journal_current_handle(); 3168 loff_t new_i_size; 3169 unsigned long start, end; 3170 int write_mode = (int)(unsigned long)fsdata; 3171 3172 if (write_mode == FALL_BACK_TO_NONDELALLOC) 3173 return ext4_write_end(file, mapping, pos, 3174 len, copied, page, fsdata); 3175 3176 trace_ext4_da_write_end(inode, pos, len, copied); 3177 start = pos & (PAGE_SIZE - 1); 3178 end = start + copied - 1; 3179 3180 /* 3181 * generic_write_end() will run mark_inode_dirty() if i_size 3182 * changes. So let's piggyback the i_disksize mark_inode_dirty 3183 * into that. 3184 */ 3185 new_i_size = pos + copied; 3186 if (copied && new_i_size > EXT4_I(inode)->i_disksize) { 3187 if (ext4_has_inline_data(inode) || 3188 ext4_da_should_update_i_disksize(page, end)) { 3189 ext4_update_i_disksize(inode, new_i_size); 3190 /* We need to mark inode dirty even if 3191 * new_i_size is less that inode->i_size 3192 * bu greater than i_disksize.(hint delalloc) 3193 */ 3194 ext4_mark_inode_dirty(handle, inode); 3195 } 3196 } 3197 3198 if (write_mode != CONVERT_INLINE_DATA && 3199 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) && 3200 ext4_has_inline_data(inode)) 3201 ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied, 3202 page); 3203 else 3204 ret2 = generic_write_end(file, mapping, pos, len, copied, 3205 page, fsdata); 3206 3207 copied = ret2; 3208 if (ret2 < 0) 3209 ret = ret2; 3210 ret2 = ext4_journal_stop(handle); 3211 if (!ret) 3212 ret = ret2; 3213 3214 return ret ? ret : copied; 3215 } 3216 3217 static void ext4_da_invalidatepage(struct page *page, unsigned int offset, 3218 unsigned int length) 3219 { 3220 /* 3221 * Drop reserved blocks 3222 */ 3223 BUG_ON(!PageLocked(page)); 3224 if (!page_has_buffers(page)) 3225 goto out; 3226 3227 ext4_da_page_release_reservation(page, offset, length); 3228 3229 out: 3230 ext4_invalidatepage(page, offset, length); 3231 3232 return; 3233 } 3234 3235 /* 3236 * Force all delayed allocation blocks to be allocated for a given inode. 3237 */ 3238 int ext4_alloc_da_blocks(struct inode *inode) 3239 { 3240 trace_ext4_alloc_da_blocks(inode); 3241 3242 if (!EXT4_I(inode)->i_reserved_data_blocks) 3243 return 0; 3244 3245 /* 3246 * We do something simple for now. The filemap_flush() will 3247 * also start triggering a write of the data blocks, which is 3248 * not strictly speaking necessary (and for users of 3249 * laptop_mode, not even desirable). However, to do otherwise 3250 * would require replicating code paths in: 3251 * 3252 * ext4_writepages() -> 3253 * write_cache_pages() ---> (via passed in callback function) 3254 * __mpage_da_writepage() --> 3255 * mpage_add_bh_to_extent() 3256 * mpage_da_map_blocks() 3257 * 3258 * The problem is that write_cache_pages(), located in 3259 * mm/page-writeback.c, marks pages clean in preparation for 3260 * doing I/O, which is not desirable if we're not planning on 3261 * doing I/O at all. 3262 * 3263 * We could call write_cache_pages(), and then redirty all of 3264 * the pages by calling redirty_page_for_writepage() but that 3265 * would be ugly in the extreme. So instead we would need to 3266 * replicate parts of the code in the above functions, 3267 * simplifying them because we wouldn't actually intend to 3268 * write out the pages, but rather only collect contiguous 3269 * logical block extents, call the multi-block allocator, and 3270 * then update the buffer heads with the block allocations. 3271 * 3272 * For now, though, we'll cheat by calling filemap_flush(), 3273 * which will map the blocks, and start the I/O, but not 3274 * actually wait for the I/O to complete. 3275 */ 3276 return filemap_flush(inode->i_mapping); 3277 } 3278 3279 /* 3280 * bmap() is special. It gets used by applications such as lilo and by 3281 * the swapper to find the on-disk block of a specific piece of data. 3282 * 3283 * Naturally, this is dangerous if the block concerned is still in the 3284 * journal. If somebody makes a swapfile on an ext4 data-journaling 3285 * filesystem and enables swap, then they may get a nasty shock when the 3286 * data getting swapped to that swapfile suddenly gets overwritten by 3287 * the original zero's written out previously to the journal and 3288 * awaiting writeback in the kernel's buffer cache. 3289 * 3290 * So, if we see any bmap calls here on a modified, data-journaled file, 3291 * take extra steps to flush any blocks which might be in the cache. 3292 */ 3293 static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 3294 { 3295 struct inode *inode = mapping->host; 3296 journal_t *journal; 3297 int err; 3298 3299 /* 3300 * We can get here for an inline file via the FIBMAP ioctl 3301 */ 3302 if (ext4_has_inline_data(inode)) 3303 return 0; 3304 3305 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 3306 test_opt(inode->i_sb, DELALLOC)) { 3307 /* 3308 * With delalloc we want to sync the file 3309 * so that we can make sure we allocate 3310 * blocks for file 3311 */ 3312 filemap_write_and_wait(mapping); 3313 } 3314 3315 if (EXT4_JOURNAL(inode) && 3316 ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { 3317 /* 3318 * This is a REALLY heavyweight approach, but the use of 3319 * bmap on dirty files is expected to be extremely rare: 3320 * only if we run lilo or swapon on a freshly made file 3321 * do we expect this to happen. 3322 * 3323 * (bmap requires CAP_SYS_RAWIO so this does not 3324 * represent an unprivileged user DOS attack --- we'd be 3325 * in trouble if mortal users could trigger this path at 3326 * will.) 3327 * 3328 * NB. EXT4_STATE_JDATA is not set on files other than 3329 * regular files. If somebody wants to bmap a directory 3330 * or symlink and gets confused because the buffer 3331 * hasn't yet been flushed to disk, they deserve 3332 * everything they get. 3333 */ 3334 3335 ext4_clear_inode_state(inode, EXT4_STATE_JDATA); 3336 journal = EXT4_JOURNAL(inode); 3337 jbd2_journal_lock_updates(journal); 3338 err = jbd2_journal_flush(journal); 3339 jbd2_journal_unlock_updates(journal); 3340 3341 if (err) 3342 return 0; 3343 } 3344 3345 return generic_block_bmap(mapping, block, ext4_get_block); 3346 } 3347 3348 static int ext4_readpage(struct file *file, struct page *page) 3349 { 3350 int ret = -EAGAIN; 3351 struct inode *inode = page->mapping->host; 3352 3353 trace_ext4_readpage(page); 3354 3355 if (ext4_has_inline_data(inode)) 3356 ret = ext4_readpage_inline(inode, page); 3357 3358 if (ret == -EAGAIN) 3359 return ext4_mpage_readpages(page->mapping, NULL, page, 1, 3360 false); 3361 3362 return ret; 3363 } 3364 3365 static int 3366 ext4_readpages(struct file *file, struct address_space *mapping, 3367 struct list_head *pages, unsigned nr_pages) 3368 { 3369 struct inode *inode = mapping->host; 3370 3371 /* If the file has inline data, no need to do readpages. */ 3372 if (ext4_has_inline_data(inode)) 3373 return 0; 3374 3375 return ext4_mpage_readpages(mapping, pages, NULL, nr_pages, true); 3376 } 3377 3378 static void ext4_invalidatepage(struct page *page, unsigned int offset, 3379 unsigned int length) 3380 { 3381 trace_ext4_invalidatepage(page, offset, length); 3382 3383 /* No journalling happens on data buffers when this function is used */ 3384 WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page))); 3385 3386 block_invalidatepage(page, offset, length); 3387 } 3388 3389 static int __ext4_journalled_invalidatepage(struct page *page, 3390 unsigned int offset, 3391 unsigned int length) 3392 { 3393 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3394 3395 trace_ext4_journalled_invalidatepage(page, offset, length); 3396 3397 /* 3398 * If it's a full truncate we just forget about the pending dirtying 3399 */ 3400 if (offset == 0 && length == PAGE_SIZE) 3401 ClearPageChecked(page); 3402 3403 return jbd2_journal_invalidatepage(journal, page, offset, length); 3404 } 3405 3406 /* Wrapper for aops... */ 3407 static void ext4_journalled_invalidatepage(struct page *page, 3408 unsigned int offset, 3409 unsigned int length) 3410 { 3411 WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0); 3412 } 3413 3414 static int ext4_releasepage(struct page *page, gfp_t wait) 3415 { 3416 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3417 3418 trace_ext4_releasepage(page); 3419 3420 /* Page has dirty journalled data -> cannot release */ 3421 if (PageChecked(page)) 3422 return 0; 3423 if (journal) 3424 return jbd2_journal_try_to_free_buffers(journal, page, wait); 3425 else 3426 return try_to_free_buffers(page); 3427 } 3428 3429 static bool ext4_inode_datasync_dirty(struct inode *inode) 3430 { 3431 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 3432 3433 if (journal) 3434 return !jbd2_transaction_committed(journal, 3435 EXT4_I(inode)->i_datasync_tid); 3436 /* Any metadata buffers to write? */ 3437 if (!list_empty(&inode->i_mapping->private_list)) 3438 return true; 3439 return inode->i_state & I_DIRTY_DATASYNC; 3440 } 3441 3442 static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length, 3443 unsigned flags, struct iomap *iomap) 3444 { 3445 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3446 unsigned int blkbits = inode->i_blkbits; 3447 unsigned long first_block, last_block; 3448 struct ext4_map_blocks map; 3449 bool delalloc = false; 3450 int ret; 3451 3452 if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK) 3453 return -EINVAL; 3454 first_block = offset >> blkbits; 3455 last_block = min_t(loff_t, (offset + length - 1) >> blkbits, 3456 EXT4_MAX_LOGICAL_BLOCK); 3457 3458 if (flags & IOMAP_REPORT) { 3459 if (ext4_has_inline_data(inode)) { 3460 ret = ext4_inline_data_iomap(inode, iomap); 3461 if (ret != -EAGAIN) { 3462 if (ret == 0 && offset >= iomap->length) 3463 ret = -ENOENT; 3464 return ret; 3465 } 3466 } 3467 } else { 3468 if (WARN_ON_ONCE(ext4_has_inline_data(inode))) 3469 return -ERANGE; 3470 } 3471 3472 map.m_lblk = first_block; 3473 map.m_len = last_block - first_block + 1; 3474 3475 if (flags & IOMAP_REPORT) { 3476 ret = ext4_map_blocks(NULL, inode, &map, 0); 3477 if (ret < 0) 3478 return ret; 3479 3480 if (ret == 0) { 3481 ext4_lblk_t end = map.m_lblk + map.m_len - 1; 3482 struct extent_status es; 3483 3484 ext4_es_find_extent_range(inode, &ext4_es_is_delayed, 3485 map.m_lblk, end, &es); 3486 3487 if (!es.es_len || es.es_lblk > end) { 3488 /* entire range is a hole */ 3489 } else if (es.es_lblk > map.m_lblk) { 3490 /* range starts with a hole */ 3491 map.m_len = es.es_lblk - map.m_lblk; 3492 } else { 3493 ext4_lblk_t offs = 0; 3494 3495 if (es.es_lblk < map.m_lblk) 3496 offs = map.m_lblk - es.es_lblk; 3497 map.m_lblk = es.es_lblk + offs; 3498 map.m_len = es.es_len - offs; 3499 delalloc = true; 3500 } 3501 } 3502 } else if (flags & IOMAP_WRITE) { 3503 int dio_credits; 3504 handle_t *handle; 3505 int retries = 0; 3506 3507 /* Trim mapping request to maximum we can map at once for DIO */ 3508 if (map.m_len > DIO_MAX_BLOCKS) 3509 map.m_len = DIO_MAX_BLOCKS; 3510 dio_credits = ext4_chunk_trans_blocks(inode, map.m_len); 3511 retry: 3512 /* 3513 * Either we allocate blocks and then we don't get unwritten 3514 * extent so we have reserved enough credits, or the blocks 3515 * are already allocated and unwritten and in that case 3516 * extent conversion fits in the credits as well. 3517 */ 3518 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 3519 dio_credits); 3520 if (IS_ERR(handle)) 3521 return PTR_ERR(handle); 3522 3523 ret = ext4_map_blocks(handle, inode, &map, 3524 EXT4_GET_BLOCKS_CREATE_ZERO); 3525 if (ret < 0) { 3526 ext4_journal_stop(handle); 3527 if (ret == -ENOSPC && 3528 ext4_should_retry_alloc(inode->i_sb, &retries)) 3529 goto retry; 3530 return ret; 3531 } 3532 3533 /* 3534 * If we added blocks beyond i_size, we need to make sure they 3535 * will get truncated if we crash before updating i_size in 3536 * ext4_iomap_end(). For faults we don't need to do that (and 3537 * even cannot because for orphan list operations inode_lock is 3538 * required) - if we happen to instantiate block beyond i_size, 3539 * it is because we race with truncate which has already added 3540 * the inode to the orphan list. 3541 */ 3542 if (!(flags & IOMAP_FAULT) && first_block + map.m_len > 3543 (i_size_read(inode) + (1 << blkbits) - 1) >> blkbits) { 3544 int err; 3545 3546 err = ext4_orphan_add(handle, inode); 3547 if (err < 0) { 3548 ext4_journal_stop(handle); 3549 return err; 3550 } 3551 } 3552 ext4_journal_stop(handle); 3553 } else { 3554 ret = ext4_map_blocks(NULL, inode, &map, 0); 3555 if (ret < 0) 3556 return ret; 3557 } 3558 3559 iomap->flags = 0; 3560 if (ext4_inode_datasync_dirty(inode)) 3561 iomap->flags |= IOMAP_F_DIRTY; 3562 iomap->bdev = inode->i_sb->s_bdev; 3563 iomap->dax_dev = sbi->s_daxdev; 3564 iomap->offset = (u64)first_block << blkbits; 3565 iomap->length = (u64)map.m_len << blkbits; 3566 3567 if (ret == 0) { 3568 iomap->type = delalloc ? IOMAP_DELALLOC : IOMAP_HOLE; 3569 iomap->addr = IOMAP_NULL_ADDR; 3570 } else { 3571 if (map.m_flags & EXT4_MAP_MAPPED) { 3572 iomap->type = IOMAP_MAPPED; 3573 } else if (map.m_flags & EXT4_MAP_UNWRITTEN) { 3574 iomap->type = IOMAP_UNWRITTEN; 3575 } else { 3576 WARN_ON_ONCE(1); 3577 return -EIO; 3578 } 3579 iomap->addr = (u64)map.m_pblk << blkbits; 3580 } 3581 3582 if (map.m_flags & EXT4_MAP_NEW) 3583 iomap->flags |= IOMAP_F_NEW; 3584 3585 return 0; 3586 } 3587 3588 static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length, 3589 ssize_t written, unsigned flags, struct iomap *iomap) 3590 { 3591 int ret = 0; 3592 handle_t *handle; 3593 int blkbits = inode->i_blkbits; 3594 bool truncate = false; 3595 3596 if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT)) 3597 return 0; 3598 3599 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 3600 if (IS_ERR(handle)) { 3601 ret = PTR_ERR(handle); 3602 goto orphan_del; 3603 } 3604 if (ext4_update_inode_size(inode, offset + written)) 3605 ext4_mark_inode_dirty(handle, inode); 3606 /* 3607 * We may need to truncate allocated but not written blocks beyond EOF. 3608 */ 3609 if (iomap->offset + iomap->length > 3610 ALIGN(inode->i_size, 1 << blkbits)) { 3611 ext4_lblk_t written_blk, end_blk; 3612 3613 written_blk = (offset + written) >> blkbits; 3614 end_blk = (offset + length) >> blkbits; 3615 if (written_blk < end_blk && ext4_can_truncate(inode)) 3616 truncate = true; 3617 } 3618 /* 3619 * Remove inode from orphan list if we were extending a inode and 3620 * everything went fine. 3621 */ 3622 if (!truncate && inode->i_nlink && 3623 !list_empty(&EXT4_I(inode)->i_orphan)) 3624 ext4_orphan_del(handle, inode); 3625 ext4_journal_stop(handle); 3626 if (truncate) { 3627 ext4_truncate_failed_write(inode); 3628 orphan_del: 3629 /* 3630 * If truncate failed early the inode might still be on the 3631 * orphan list; we need to make sure the inode is removed from 3632 * the orphan list in that case. 3633 */ 3634 if (inode->i_nlink) 3635 ext4_orphan_del(NULL, inode); 3636 } 3637 return ret; 3638 } 3639 3640 const struct iomap_ops ext4_iomap_ops = { 3641 .iomap_begin = ext4_iomap_begin, 3642 .iomap_end = ext4_iomap_end, 3643 }; 3644 3645 static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset, 3646 ssize_t size, void *private) 3647 { 3648 ext4_io_end_t *io_end = private; 3649 3650 /* if not async direct IO just return */ 3651 if (!io_end) 3652 return 0; 3653 3654 ext_debug("ext4_end_io_dio(): io_end 0x%p " 3655 "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", 3656 io_end, io_end->inode->i_ino, iocb, offset, size); 3657 3658 /* 3659 * Error during AIO DIO. We cannot convert unwritten extents as the 3660 * data was not written. Just clear the unwritten flag and drop io_end. 3661 */ 3662 if (size <= 0) { 3663 ext4_clear_io_unwritten_flag(io_end); 3664 size = 0; 3665 } 3666 io_end->offset = offset; 3667 io_end->size = size; 3668 ext4_put_io_end(io_end); 3669 3670 return 0; 3671 } 3672 3673 /* 3674 * Handling of direct IO writes. 3675 * 3676 * For ext4 extent files, ext4 will do direct-io write even to holes, 3677 * preallocated extents, and those write extend the file, no need to 3678 * fall back to buffered IO. 3679 * 3680 * For holes, we fallocate those blocks, mark them as unwritten 3681 * If those blocks were preallocated, we mark sure they are split, but 3682 * still keep the range to write as unwritten. 3683 * 3684 * The unwritten extents will be converted to written when DIO is completed. 3685 * For async direct IO, since the IO may still pending when return, we 3686 * set up an end_io call back function, which will do the conversion 3687 * when async direct IO completed. 3688 * 3689 * If the O_DIRECT write will extend the file then add this inode to the 3690 * orphan list. So recovery will truncate it back to the original size 3691 * if the machine crashes during the write. 3692 * 3693 */ 3694 static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter) 3695 { 3696 struct file *file = iocb->ki_filp; 3697 struct inode *inode = file->f_mapping->host; 3698 struct ext4_inode_info *ei = EXT4_I(inode); 3699 ssize_t ret; 3700 loff_t offset = iocb->ki_pos; 3701 size_t count = iov_iter_count(iter); 3702 int overwrite = 0; 3703 get_block_t *get_block_func = NULL; 3704 int dio_flags = 0; 3705 loff_t final_size = offset + count; 3706 int orphan = 0; 3707 handle_t *handle; 3708 3709 if (final_size > inode->i_size || final_size > ei->i_disksize) { 3710 /* Credits for sb + inode write */ 3711 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 3712 if (IS_ERR(handle)) { 3713 ret = PTR_ERR(handle); 3714 goto out; 3715 } 3716 ret = ext4_orphan_add(handle, inode); 3717 if (ret) { 3718 ext4_journal_stop(handle); 3719 goto out; 3720 } 3721 orphan = 1; 3722 ext4_update_i_disksize(inode, inode->i_size); 3723 ext4_journal_stop(handle); 3724 } 3725 3726 BUG_ON(iocb->private == NULL); 3727 3728 /* 3729 * Make all waiters for direct IO properly wait also for extent 3730 * conversion. This also disallows race between truncate() and 3731 * overwrite DIO as i_dio_count needs to be incremented under i_mutex. 3732 */ 3733 inode_dio_begin(inode); 3734 3735 /* If we do a overwrite dio, i_mutex locking can be released */ 3736 overwrite = *((int *)iocb->private); 3737 3738 if (overwrite) 3739 inode_unlock(inode); 3740 3741 /* 3742 * For extent mapped files we could direct write to holes and fallocate. 3743 * 3744 * Allocated blocks to fill the hole are marked as unwritten to prevent 3745 * parallel buffered read to expose the stale data before DIO complete 3746 * the data IO. 3747 * 3748 * As to previously fallocated extents, ext4 get_block will just simply 3749 * mark the buffer mapped but still keep the extents unwritten. 3750 * 3751 * For non AIO case, we will convert those unwritten extents to written 3752 * after return back from blockdev_direct_IO. That way we save us from 3753 * allocating io_end structure and also the overhead of offloading 3754 * the extent convertion to a workqueue. 3755 * 3756 * For async DIO, the conversion needs to be deferred when the 3757 * IO is completed. The ext4 end_io callback function will be 3758 * called to take care of the conversion work. Here for async 3759 * case, we allocate an io_end structure to hook to the iocb. 3760 */ 3761 iocb->private = NULL; 3762 if (overwrite) 3763 get_block_func = ext4_dio_get_block_overwrite; 3764 else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) || 3765 round_down(offset, i_blocksize(inode)) >= inode->i_size) { 3766 get_block_func = ext4_dio_get_block; 3767 dio_flags = DIO_LOCKING | DIO_SKIP_HOLES; 3768 } else if (is_sync_kiocb(iocb)) { 3769 get_block_func = ext4_dio_get_block_unwritten_sync; 3770 dio_flags = DIO_LOCKING; 3771 } else { 3772 get_block_func = ext4_dio_get_block_unwritten_async; 3773 dio_flags = DIO_LOCKING; 3774 } 3775 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, 3776 get_block_func, ext4_end_io_dio, NULL, 3777 dio_flags); 3778 3779 if (ret > 0 && !overwrite && ext4_test_inode_state(inode, 3780 EXT4_STATE_DIO_UNWRITTEN)) { 3781 int err; 3782 /* 3783 * for non AIO case, since the IO is already 3784 * completed, we could do the conversion right here 3785 */ 3786 err = ext4_convert_unwritten_extents(NULL, inode, 3787 offset, ret); 3788 if (err < 0) 3789 ret = err; 3790 ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3791 } 3792 3793 inode_dio_end(inode); 3794 /* take i_mutex locking again if we do a ovewrite dio */ 3795 if (overwrite) 3796 inode_lock(inode); 3797 3798 if (ret < 0 && final_size > inode->i_size) 3799 ext4_truncate_failed_write(inode); 3800 3801 /* Handle extending of i_size after direct IO write */ 3802 if (orphan) { 3803 int err; 3804 3805 /* Credits for sb + inode write */ 3806 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 3807 if (IS_ERR(handle)) { 3808 /* 3809 * We wrote the data but cannot extend 3810 * i_size. Bail out. In async io case, we do 3811 * not return error here because we have 3812 * already submmitted the corresponding 3813 * bio. Returning error here makes the caller 3814 * think that this IO is done and failed 3815 * resulting in race with bio's completion 3816 * handler. 3817 */ 3818 if (!ret) 3819 ret = PTR_ERR(handle); 3820 if (inode->i_nlink) 3821 ext4_orphan_del(NULL, inode); 3822 3823 goto out; 3824 } 3825 if (inode->i_nlink) 3826 ext4_orphan_del(handle, inode); 3827 if (ret > 0) { 3828 loff_t end = offset + ret; 3829 if (end > inode->i_size || end > ei->i_disksize) { 3830 ext4_update_i_disksize(inode, end); 3831 if (end > inode->i_size) 3832 i_size_write(inode, end); 3833 /* 3834 * We're going to return a positive `ret' 3835 * here due to non-zero-length I/O, so there's 3836 * no way of reporting error returns from 3837 * ext4_mark_inode_dirty() to userspace. So 3838 * ignore it. 3839 */ 3840 ext4_mark_inode_dirty(handle, inode); 3841 } 3842 } 3843 err = ext4_journal_stop(handle); 3844 if (ret == 0) 3845 ret = err; 3846 } 3847 out: 3848 return ret; 3849 } 3850 3851 static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter) 3852 { 3853 struct address_space *mapping = iocb->ki_filp->f_mapping; 3854 struct inode *inode = mapping->host; 3855 size_t count = iov_iter_count(iter); 3856 ssize_t ret; 3857 3858 /* 3859 * Shared inode_lock is enough for us - it protects against concurrent 3860 * writes & truncates and since we take care of writing back page cache, 3861 * we are protected against page writeback as well. 3862 */ 3863 inode_lock_shared(inode); 3864 ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, 3865 iocb->ki_pos + count - 1); 3866 if (ret) 3867 goto out_unlock; 3868 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, 3869 iter, ext4_dio_get_block, NULL, NULL, 0); 3870 out_unlock: 3871 inode_unlock_shared(inode); 3872 return ret; 3873 } 3874 3875 static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 3876 { 3877 struct file *file = iocb->ki_filp; 3878 struct inode *inode = file->f_mapping->host; 3879 size_t count = iov_iter_count(iter); 3880 loff_t offset = iocb->ki_pos; 3881 ssize_t ret; 3882 3883 #ifdef CONFIG_EXT4_FS_ENCRYPTION 3884 if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 3885 return 0; 3886 #endif 3887 3888 /* 3889 * If we are doing data journalling we don't support O_DIRECT 3890 */ 3891 if (ext4_should_journal_data(inode)) 3892 return 0; 3893 3894 /* Let buffer I/O handle the inline data case. */ 3895 if (ext4_has_inline_data(inode)) 3896 return 0; 3897 3898 trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); 3899 if (iov_iter_rw(iter) == READ) 3900 ret = ext4_direct_IO_read(iocb, iter); 3901 else 3902 ret = ext4_direct_IO_write(iocb, iter); 3903 trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret); 3904 return ret; 3905 } 3906 3907 /* 3908 * Pages can be marked dirty completely asynchronously from ext4's journalling 3909 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3910 * much here because ->set_page_dirty is called under VFS locks. The page is 3911 * not necessarily locked. 3912 * 3913 * We cannot just dirty the page and leave attached buffers clean, because the 3914 * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3915 * or jbddirty because all the journalling code will explode. 3916 * 3917 * So what we do is to mark the page "pending dirty" and next time writepage 3918 * is called, propagate that into the buffers appropriately. 3919 */ 3920 static int ext4_journalled_set_page_dirty(struct page *page) 3921 { 3922 SetPageChecked(page); 3923 return __set_page_dirty_nobuffers(page); 3924 } 3925 3926 static int ext4_set_page_dirty(struct page *page) 3927 { 3928 WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page)); 3929 WARN_ON_ONCE(!page_has_buffers(page)); 3930 return __set_page_dirty_buffers(page); 3931 } 3932 3933 static const struct address_space_operations ext4_aops = { 3934 .readpage = ext4_readpage, 3935 .readpages = ext4_readpages, 3936 .writepage = ext4_writepage, 3937 .writepages = ext4_writepages, 3938 .write_begin = ext4_write_begin, 3939 .write_end = ext4_write_end, 3940 .set_page_dirty = ext4_set_page_dirty, 3941 .bmap = ext4_bmap, 3942 .invalidatepage = ext4_invalidatepage, 3943 .releasepage = ext4_releasepage, 3944 .direct_IO = ext4_direct_IO, 3945 .migratepage = buffer_migrate_page, 3946 .is_partially_uptodate = block_is_partially_uptodate, 3947 .error_remove_page = generic_error_remove_page, 3948 }; 3949 3950 static const struct address_space_operations ext4_journalled_aops = { 3951 .readpage = ext4_readpage, 3952 .readpages = ext4_readpages, 3953 .writepage = ext4_writepage, 3954 .writepages = ext4_writepages, 3955 .write_begin = ext4_write_begin, 3956 .write_end = ext4_journalled_write_end, 3957 .set_page_dirty = ext4_journalled_set_page_dirty, 3958 .bmap = ext4_bmap, 3959 .invalidatepage = ext4_journalled_invalidatepage, 3960 .releasepage = ext4_releasepage, 3961 .direct_IO = ext4_direct_IO, 3962 .is_partially_uptodate = block_is_partially_uptodate, 3963 .error_remove_page = generic_error_remove_page, 3964 }; 3965 3966 static const struct address_space_operations ext4_da_aops = { 3967 .readpage = ext4_readpage, 3968 .readpages = ext4_readpages, 3969 .writepage = ext4_writepage, 3970 .writepages = ext4_writepages, 3971 .write_begin = ext4_da_write_begin, 3972 .write_end = ext4_da_write_end, 3973 .set_page_dirty = ext4_set_page_dirty, 3974 .bmap = ext4_bmap, 3975 .invalidatepage = ext4_da_invalidatepage, 3976 .releasepage = ext4_releasepage, 3977 .direct_IO = ext4_direct_IO, 3978 .migratepage = buffer_migrate_page, 3979 .is_partially_uptodate = block_is_partially_uptodate, 3980 .error_remove_page = generic_error_remove_page, 3981 }; 3982 3983 static const struct address_space_operations ext4_dax_aops = { 3984 .writepages = ext4_dax_writepages, 3985 .direct_IO = noop_direct_IO, 3986 .set_page_dirty = noop_set_page_dirty, 3987 .bmap = ext4_bmap, 3988 .invalidatepage = noop_invalidatepage, 3989 }; 3990 3991 void ext4_set_aops(struct inode *inode) 3992 { 3993 switch (ext4_inode_journal_mode(inode)) { 3994 case EXT4_INODE_ORDERED_DATA_MODE: 3995 case EXT4_INODE_WRITEBACK_DATA_MODE: 3996 break; 3997 case EXT4_INODE_JOURNAL_DATA_MODE: 3998 inode->i_mapping->a_ops = &ext4_journalled_aops; 3999 return; 4000 default: 4001 BUG(); 4002 } 4003 if (IS_DAX(inode)) 4004 inode->i_mapping->a_ops = &ext4_dax_aops; 4005 else if (test_opt(inode->i_sb, DELALLOC)) 4006 inode->i_mapping->a_ops = &ext4_da_aops; 4007 else 4008 inode->i_mapping->a_ops = &ext4_aops; 4009 } 4010 4011 static int __ext4_block_zero_page_range(handle_t *handle, 4012 struct address_space *mapping, loff_t from, loff_t length) 4013 { 4014 ext4_fsblk_t index = from >> PAGE_SHIFT; 4015 unsigned offset = from & (PAGE_SIZE-1); 4016 unsigned blocksize, pos; 4017 ext4_lblk_t iblock; 4018 struct inode *inode = mapping->host; 4019 struct buffer_head *bh; 4020 struct page *page; 4021 int err = 0; 4022 4023 page = find_or_create_page(mapping, from >> PAGE_SHIFT, 4024 mapping_gfp_constraint(mapping, ~__GFP_FS)); 4025 if (!page) 4026 return -ENOMEM; 4027 4028 blocksize = inode->i_sb->s_blocksize; 4029 4030 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits); 4031 4032 if (!page_has_buffers(page)) 4033 create_empty_buffers(page, blocksize, 0); 4034 4035 /* Find the buffer that contains "offset" */ 4036 bh = page_buffers(page); 4037 pos = blocksize; 4038 while (offset >= pos) { 4039 bh = bh->b_this_page; 4040 iblock++; 4041 pos += blocksize; 4042 } 4043 if (buffer_freed(bh)) { 4044 BUFFER_TRACE(bh, "freed: skip"); 4045 goto unlock; 4046 } 4047 if (!buffer_mapped(bh)) { 4048 BUFFER_TRACE(bh, "unmapped"); 4049 ext4_get_block(inode, iblock, bh, 0); 4050 /* unmapped? It's a hole - nothing to do */ 4051 if (!buffer_mapped(bh)) { 4052 BUFFER_TRACE(bh, "still unmapped"); 4053 goto unlock; 4054 } 4055 } 4056 4057 /* Ok, it's mapped. Make sure it's up-to-date */ 4058 if (PageUptodate(page)) 4059 set_buffer_uptodate(bh); 4060 4061 if (!buffer_uptodate(bh)) { 4062 err = -EIO; 4063 ll_rw_block(REQ_OP_READ, 0, 1, &bh); 4064 wait_on_buffer(bh); 4065 /* Uhhuh. Read error. Complain and punt. */ 4066 if (!buffer_uptodate(bh)) 4067 goto unlock; 4068 if (S_ISREG(inode->i_mode) && 4069 ext4_encrypted_inode(inode)) { 4070 /* We expect the key to be set. */ 4071 BUG_ON(!fscrypt_has_encryption_key(inode)); 4072 BUG_ON(blocksize != PAGE_SIZE); 4073 WARN_ON_ONCE(fscrypt_decrypt_page(page->mapping->host, 4074 page, PAGE_SIZE, 0, page->index)); 4075 } 4076 } 4077 if (ext4_should_journal_data(inode)) { 4078 BUFFER_TRACE(bh, "get write access"); 4079 err = ext4_journal_get_write_access(handle, bh); 4080 if (err) 4081 goto unlock; 4082 } 4083 zero_user(page, offset, length); 4084 BUFFER_TRACE(bh, "zeroed end of block"); 4085 4086 if (ext4_should_journal_data(inode)) { 4087 err = ext4_handle_dirty_metadata(handle, inode, bh); 4088 } else { 4089 err = 0; 4090 mark_buffer_dirty(bh); 4091 if (ext4_should_order_data(inode)) 4092 err = ext4_jbd2_inode_add_write(handle, inode); 4093 } 4094 4095 unlock: 4096 unlock_page(page); 4097 put_page(page); 4098 return err; 4099 } 4100 4101 /* 4102 * ext4_block_zero_page_range() zeros out a mapping of length 'length' 4103 * starting from file offset 'from'. The range to be zero'd must 4104 * be contained with in one block. If the specified range exceeds 4105 * the end of the block it will be shortened to end of the block 4106 * that cooresponds to 'from' 4107 */ 4108 static int ext4_block_zero_page_range(handle_t *handle, 4109 struct address_space *mapping, loff_t from, loff_t length) 4110 { 4111 struct inode *inode = mapping->host; 4112 unsigned offset = from & (PAGE_SIZE-1); 4113 unsigned blocksize = inode->i_sb->s_blocksize; 4114 unsigned max = blocksize - (offset & (blocksize - 1)); 4115 4116 /* 4117 * correct length if it does not fall between 4118 * 'from' and the end of the block 4119 */ 4120 if (length > max || length < 0) 4121 length = max; 4122 4123 if (IS_DAX(inode)) { 4124 return iomap_zero_range(inode, from, length, NULL, 4125 &ext4_iomap_ops); 4126 } 4127 return __ext4_block_zero_page_range(handle, mapping, from, length); 4128 } 4129 4130 /* 4131 * ext4_block_truncate_page() zeroes out a mapping from file offset `from' 4132 * up to the end of the block which corresponds to `from'. 4133 * This required during truncate. We need to physically zero the tail end 4134 * of that block so it doesn't yield old data if the file is later grown. 4135 */ 4136 static int ext4_block_truncate_page(handle_t *handle, 4137 struct address_space *mapping, loff_t from) 4138 { 4139 unsigned offset = from & (PAGE_SIZE-1); 4140 unsigned length; 4141 unsigned blocksize; 4142 struct inode *inode = mapping->host; 4143 4144 /* If we are processing an encrypted inode during orphan list handling */ 4145 if (ext4_encrypted_inode(inode) && !fscrypt_has_encryption_key(inode)) 4146 return 0; 4147 4148 blocksize = inode->i_sb->s_blocksize; 4149 length = blocksize - (offset & (blocksize - 1)); 4150 4151 return ext4_block_zero_page_range(handle, mapping, from, length); 4152 } 4153 4154 int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode, 4155 loff_t lstart, loff_t length) 4156 { 4157 struct super_block *sb = inode->i_sb; 4158 struct address_space *mapping = inode->i_mapping; 4159 unsigned partial_start, partial_end; 4160 ext4_fsblk_t start, end; 4161 loff_t byte_end = (lstart + length - 1); 4162 int err = 0; 4163 4164 partial_start = lstart & (sb->s_blocksize - 1); 4165 partial_end = byte_end & (sb->s_blocksize - 1); 4166 4167 start = lstart >> sb->s_blocksize_bits; 4168 end = byte_end >> sb->s_blocksize_bits; 4169 4170 /* Handle partial zero within the single block */ 4171 if (start == end && 4172 (partial_start || (partial_end != sb->s_blocksize - 1))) { 4173 err = ext4_block_zero_page_range(handle, mapping, 4174 lstart, length); 4175 return err; 4176 } 4177 /* Handle partial zero out on the start of the range */ 4178 if (partial_start) { 4179 err = ext4_block_zero_page_range(handle, mapping, 4180 lstart, sb->s_blocksize); 4181 if (err) 4182 return err; 4183 } 4184 /* Handle partial zero out on the end of the range */ 4185 if (partial_end != sb->s_blocksize - 1) 4186 err = ext4_block_zero_page_range(handle, mapping, 4187 byte_end - partial_end, 4188 partial_end + 1); 4189 return err; 4190 } 4191 4192 int ext4_can_truncate(struct inode *inode) 4193 { 4194 if (S_ISREG(inode->i_mode)) 4195 return 1; 4196 if (S_ISDIR(inode->i_mode)) 4197 return 1; 4198 if (S_ISLNK(inode->i_mode)) 4199 return !ext4_inode_is_fast_symlink(inode); 4200 return 0; 4201 } 4202 4203 /* 4204 * We have to make sure i_disksize gets properly updated before we truncate 4205 * page cache due to hole punching or zero range. Otherwise i_disksize update 4206 * can get lost as it may have been postponed to submission of writeback but 4207 * that will never happen after we truncate page cache. 4208 */ 4209 int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, 4210 loff_t len) 4211 { 4212 handle_t *handle; 4213 loff_t size = i_size_read(inode); 4214 4215 WARN_ON(!inode_is_locked(inode)); 4216 if (offset > size || offset + len < size) 4217 return 0; 4218 4219 if (EXT4_I(inode)->i_disksize >= size) 4220 return 0; 4221 4222 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1); 4223 if (IS_ERR(handle)) 4224 return PTR_ERR(handle); 4225 ext4_update_i_disksize(inode, size); 4226 ext4_mark_inode_dirty(handle, inode); 4227 ext4_journal_stop(handle); 4228 4229 return 0; 4230 } 4231 4232 static void ext4_wait_dax_page(struct ext4_inode_info *ei) 4233 { 4234 up_write(&ei->i_mmap_sem); 4235 schedule(); 4236 down_write(&ei->i_mmap_sem); 4237 } 4238 4239 int ext4_break_layouts(struct inode *inode) 4240 { 4241 struct ext4_inode_info *ei = EXT4_I(inode); 4242 struct page *page; 4243 int error; 4244 4245 if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem))) 4246 return -EINVAL; 4247 4248 do { 4249 page = dax_layout_busy_page(inode->i_mapping); 4250 if (!page) 4251 return 0; 4252 4253 error = ___wait_var_event(&page->_refcount, 4254 atomic_read(&page->_refcount) == 1, 4255 TASK_INTERRUPTIBLE, 0, 0, 4256 ext4_wait_dax_page(ei)); 4257 } while (error == 0); 4258 4259 return error; 4260 } 4261 4262 /* 4263 * ext4_punch_hole: punches a hole in a file by releasing the blocks 4264 * associated with the given offset and length 4265 * 4266 * @inode: File inode 4267 * @offset: The offset where the hole will begin 4268 * @len: The length of the hole 4269 * 4270 * Returns: 0 on success or negative on failure 4271 */ 4272 4273 int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) 4274 { 4275 struct super_block *sb = inode->i_sb; 4276 ext4_lblk_t first_block, stop_block; 4277 struct address_space *mapping = inode->i_mapping; 4278 loff_t first_block_offset, last_block_offset; 4279 handle_t *handle; 4280 unsigned int credits; 4281 int ret = 0; 4282 4283 if (!S_ISREG(inode->i_mode)) 4284 return -EOPNOTSUPP; 4285 4286 trace_ext4_punch_hole(inode, offset, length, 0); 4287 4288 /* 4289 * Write out all dirty pages to avoid race conditions 4290 * Then release them. 4291 */ 4292 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 4293 ret = filemap_write_and_wait_range(mapping, offset, 4294 offset + length - 1); 4295 if (ret) 4296 return ret; 4297 } 4298 4299 inode_lock(inode); 4300 4301 /* No need to punch hole beyond i_size */ 4302 if (offset >= inode->i_size) 4303 goto out_mutex; 4304 4305 /* 4306 * If the hole extends beyond i_size, set the hole 4307 * to end after the page that contains i_size 4308 */ 4309 if (offset + length > inode->i_size) { 4310 length = inode->i_size + 4311 PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) - 4312 offset; 4313 } 4314 4315 if (offset & (sb->s_blocksize - 1) || 4316 (offset + length) & (sb->s_blocksize - 1)) { 4317 /* 4318 * Attach jinode to inode for jbd2 if we do any zeroing of 4319 * partial block 4320 */ 4321 ret = ext4_inode_attach_jinode(inode); 4322 if (ret < 0) 4323 goto out_mutex; 4324 4325 } 4326 4327 /* Wait all existing dio workers, newcomers will block on i_mutex */ 4328 inode_dio_wait(inode); 4329 4330 /* 4331 * Prevent page faults from reinstantiating pages we have released from 4332 * page cache. 4333 */ 4334 down_write(&EXT4_I(inode)->i_mmap_sem); 4335 4336 ret = ext4_break_layouts(inode); 4337 if (ret) 4338 goto out_dio; 4339 4340 first_block_offset = round_up(offset, sb->s_blocksize); 4341 last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; 4342 4343 /* Now release the pages and zero block aligned part of pages*/ 4344 if (last_block_offset > first_block_offset) { 4345 ret = ext4_update_disksize_before_punch(inode, offset, length); 4346 if (ret) 4347 goto out_dio; 4348 truncate_pagecache_range(inode, first_block_offset, 4349 last_block_offset); 4350 } 4351 4352 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4353 credits = ext4_writepage_trans_blocks(inode); 4354 else 4355 credits = ext4_blocks_for_truncate(inode); 4356 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 4357 if (IS_ERR(handle)) { 4358 ret = PTR_ERR(handle); 4359 ext4_std_error(sb, ret); 4360 goto out_dio; 4361 } 4362 4363 ret = ext4_zero_partial_blocks(handle, inode, offset, 4364 length); 4365 if (ret) 4366 goto out_stop; 4367 4368 first_block = (offset + sb->s_blocksize - 1) >> 4369 EXT4_BLOCK_SIZE_BITS(sb); 4370 stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); 4371 4372 /* If there are blocks to remove, do it */ 4373 if (stop_block > first_block) { 4374 4375 down_write(&EXT4_I(inode)->i_data_sem); 4376 ext4_discard_preallocations(inode); 4377 4378 ret = ext4_es_remove_extent(inode, first_block, 4379 stop_block - first_block); 4380 if (ret) { 4381 up_write(&EXT4_I(inode)->i_data_sem); 4382 goto out_stop; 4383 } 4384 4385 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4386 ret = ext4_ext_remove_space(inode, first_block, 4387 stop_block - 1); 4388 else 4389 ret = ext4_ind_remove_space(handle, inode, first_block, 4390 stop_block); 4391 4392 up_write(&EXT4_I(inode)->i_data_sem); 4393 } 4394 if (IS_SYNC(inode)) 4395 ext4_handle_sync(handle); 4396 4397 inode->i_mtime = inode->i_ctime = current_time(inode); 4398 ext4_mark_inode_dirty(handle, inode); 4399 if (ret >= 0) 4400 ext4_update_inode_fsync_trans(handle, inode, 1); 4401 out_stop: 4402 ext4_journal_stop(handle); 4403 out_dio: 4404 up_write(&EXT4_I(inode)->i_mmap_sem); 4405 out_mutex: 4406 inode_unlock(inode); 4407 return ret; 4408 } 4409 4410 int ext4_inode_attach_jinode(struct inode *inode) 4411 { 4412 struct ext4_inode_info *ei = EXT4_I(inode); 4413 struct jbd2_inode *jinode; 4414 4415 if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal) 4416 return 0; 4417 4418 jinode = jbd2_alloc_inode(GFP_KERNEL); 4419 spin_lock(&inode->i_lock); 4420 if (!ei->jinode) { 4421 if (!jinode) { 4422 spin_unlock(&inode->i_lock); 4423 return -ENOMEM; 4424 } 4425 ei->jinode = jinode; 4426 jbd2_journal_init_jbd_inode(ei->jinode, inode); 4427 jinode = NULL; 4428 } 4429 spin_unlock(&inode->i_lock); 4430 if (unlikely(jinode != NULL)) 4431 jbd2_free_inode(jinode); 4432 return 0; 4433 } 4434 4435 /* 4436 * ext4_truncate() 4437 * 4438 * We block out ext4_get_block() block instantiations across the entire 4439 * transaction, and VFS/VM ensures that ext4_truncate() cannot run 4440 * simultaneously on behalf of the same inode. 4441 * 4442 * As we work through the truncate and commit bits of it to the journal there 4443 * is one core, guiding principle: the file's tree must always be consistent on 4444 * disk. We must be able to restart the truncate after a crash. 4445 * 4446 * The file's tree may be transiently inconsistent in memory (although it 4447 * probably isn't), but whenever we close off and commit a journal transaction, 4448 * the contents of (the filesystem + the journal) must be consistent and 4449 * restartable. It's pretty simple, really: bottom up, right to left (although 4450 * left-to-right works OK too). 4451 * 4452 * Note that at recovery time, journal replay occurs *before* the restart of 4453 * truncate against the orphan inode list. 4454 * 4455 * The committed inode has the new, desired i_size (which is the same as 4456 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 4457 * that this inode's truncate did not complete and it will again call 4458 * ext4_truncate() to have another go. So there will be instantiated blocks 4459 * to the right of the truncation point in a crashed ext4 filesystem. But 4460 * that's fine - as long as they are linked from the inode, the post-crash 4461 * ext4_truncate() run will find them and release them. 4462 */ 4463 int ext4_truncate(struct inode *inode) 4464 { 4465 struct ext4_inode_info *ei = EXT4_I(inode); 4466 unsigned int credits; 4467 int err = 0; 4468 handle_t *handle; 4469 struct address_space *mapping = inode->i_mapping; 4470 4471 /* 4472 * There is a possibility that we're either freeing the inode 4473 * or it's a completely new inode. In those cases we might not 4474 * have i_mutex locked because it's not necessary. 4475 */ 4476 if (!(inode->i_state & (I_NEW|I_FREEING))) 4477 WARN_ON(!inode_is_locked(inode)); 4478 trace_ext4_truncate_enter(inode); 4479 4480 if (!ext4_can_truncate(inode)) 4481 return 0; 4482 4483 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 4484 4485 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 4486 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 4487 4488 if (ext4_has_inline_data(inode)) { 4489 int has_inline = 1; 4490 4491 err = ext4_inline_data_truncate(inode, &has_inline); 4492 if (err) 4493 return err; 4494 if (has_inline) 4495 return 0; 4496 } 4497 4498 /* If we zero-out tail of the page, we have to create jinode for jbd2 */ 4499 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { 4500 if (ext4_inode_attach_jinode(inode) < 0) 4501 return 0; 4502 } 4503 4504 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4505 credits = ext4_writepage_trans_blocks(inode); 4506 else 4507 credits = ext4_blocks_for_truncate(inode); 4508 4509 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 4510 if (IS_ERR(handle)) 4511 return PTR_ERR(handle); 4512 4513 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) 4514 ext4_block_truncate_page(handle, mapping, inode->i_size); 4515 4516 /* 4517 * We add the inode to the orphan list, so that if this 4518 * truncate spans multiple transactions, and we crash, we will 4519 * resume the truncate when the filesystem recovers. It also 4520 * marks the inode dirty, to catch the new size. 4521 * 4522 * Implication: the file must always be in a sane, consistent 4523 * truncatable state while each transaction commits. 4524 */ 4525 err = ext4_orphan_add(handle, inode); 4526 if (err) 4527 goto out_stop; 4528 4529 down_write(&EXT4_I(inode)->i_data_sem); 4530 4531 ext4_discard_preallocations(inode); 4532 4533 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4534 err = ext4_ext_truncate(handle, inode); 4535 else 4536 ext4_ind_truncate(handle, inode); 4537 4538 up_write(&ei->i_data_sem); 4539 if (err) 4540 goto out_stop; 4541 4542 if (IS_SYNC(inode)) 4543 ext4_handle_sync(handle); 4544 4545 out_stop: 4546 /* 4547 * If this was a simple ftruncate() and the file will remain alive, 4548 * then we need to clear up the orphan record which we created above. 4549 * However, if this was a real unlink then we were called by 4550 * ext4_evict_inode(), and we allow that function to clean up the 4551 * orphan info for us. 4552 */ 4553 if (inode->i_nlink) 4554 ext4_orphan_del(handle, inode); 4555 4556 inode->i_mtime = inode->i_ctime = current_time(inode); 4557 ext4_mark_inode_dirty(handle, inode); 4558 ext4_journal_stop(handle); 4559 4560 trace_ext4_truncate_exit(inode); 4561 return err; 4562 } 4563 4564 /* 4565 * ext4_get_inode_loc returns with an extra refcount against the inode's 4566 * underlying buffer_head on success. If 'in_mem' is true, we have all 4567 * data in memory that is needed to recreate the on-disk version of this 4568 * inode. 4569 */ 4570 static int __ext4_get_inode_loc(struct inode *inode, 4571 struct ext4_iloc *iloc, int in_mem) 4572 { 4573 struct ext4_group_desc *gdp; 4574 struct buffer_head *bh; 4575 struct super_block *sb = inode->i_sb; 4576 ext4_fsblk_t block; 4577 int inodes_per_block, inode_offset; 4578 4579 iloc->bh = NULL; 4580 if (inode->i_ino < EXT4_ROOT_INO || 4581 inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)) 4582 return -EFSCORRUPTED; 4583 4584 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 4585 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 4586 if (!gdp) 4587 return -EIO; 4588 4589 /* 4590 * Figure out the offset within the block group inode table 4591 */ 4592 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 4593 inode_offset = ((inode->i_ino - 1) % 4594 EXT4_INODES_PER_GROUP(sb)); 4595 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 4596 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 4597 4598 bh = sb_getblk(sb, block); 4599 if (unlikely(!bh)) 4600 return -ENOMEM; 4601 if (!buffer_uptodate(bh)) { 4602 lock_buffer(bh); 4603 4604 /* 4605 * If the buffer has the write error flag, we have failed 4606 * to write out another inode in the same block. In this 4607 * case, we don't have to read the block because we may 4608 * read the old inode data successfully. 4609 */ 4610 if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 4611 set_buffer_uptodate(bh); 4612 4613 if (buffer_uptodate(bh)) { 4614 /* someone brought it uptodate while we waited */ 4615 unlock_buffer(bh); 4616 goto has_buffer; 4617 } 4618 4619 /* 4620 * If we have all information of the inode in memory and this 4621 * is the only valid inode in the block, we need not read the 4622 * block. 4623 */ 4624 if (in_mem) { 4625 struct buffer_head *bitmap_bh; 4626 int i, start; 4627 4628 start = inode_offset & ~(inodes_per_block - 1); 4629 4630 /* Is the inode bitmap in cache? */ 4631 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 4632 if (unlikely(!bitmap_bh)) 4633 goto make_io; 4634 4635 /* 4636 * If the inode bitmap isn't in cache then the 4637 * optimisation may end up performing two reads instead 4638 * of one, so skip it. 4639 */ 4640 if (!buffer_uptodate(bitmap_bh)) { 4641 brelse(bitmap_bh); 4642 goto make_io; 4643 } 4644 for (i = start; i < start + inodes_per_block; i++) { 4645 if (i == inode_offset) 4646 continue; 4647 if (ext4_test_bit(i, bitmap_bh->b_data)) 4648 break; 4649 } 4650 brelse(bitmap_bh); 4651 if (i == start + inodes_per_block) { 4652 /* all other inodes are free, so skip I/O */ 4653 memset(bh->b_data, 0, bh->b_size); 4654 set_buffer_uptodate(bh); 4655 unlock_buffer(bh); 4656 goto has_buffer; 4657 } 4658 } 4659 4660 make_io: 4661 /* 4662 * If we need to do any I/O, try to pre-readahead extra 4663 * blocks from the inode table. 4664 */ 4665 if (EXT4_SB(sb)->s_inode_readahead_blks) { 4666 ext4_fsblk_t b, end, table; 4667 unsigned num; 4668 __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks; 4669 4670 table = ext4_inode_table(sb, gdp); 4671 /* s_inode_readahead_blks is always a power of 2 */ 4672 b = block & ~((ext4_fsblk_t) ra_blks - 1); 4673 if (table > b) 4674 b = table; 4675 end = b + ra_blks; 4676 num = EXT4_INODES_PER_GROUP(sb); 4677 if (ext4_has_group_desc_csum(sb)) 4678 num -= ext4_itable_unused_count(sb, gdp); 4679 table += num / inodes_per_block; 4680 if (end > table) 4681 end = table; 4682 while (b <= end) 4683 sb_breadahead(sb, b++); 4684 } 4685 4686 /* 4687 * There are other valid inodes in the buffer, this inode 4688 * has in-inode xattrs, or we don't have this inode in memory. 4689 * Read the block from disk. 4690 */ 4691 trace_ext4_load_inode(inode); 4692 get_bh(bh); 4693 bh->b_end_io = end_buffer_read_sync; 4694 submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh); 4695 wait_on_buffer(bh); 4696 if (!buffer_uptodate(bh)) { 4697 EXT4_ERROR_INODE_BLOCK(inode, block, 4698 "unable to read itable block"); 4699 brelse(bh); 4700 return -EIO; 4701 } 4702 } 4703 has_buffer: 4704 iloc->bh = bh; 4705 return 0; 4706 } 4707 4708 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 4709 { 4710 /* We have all inode data except xattrs in memory here. */ 4711 return __ext4_get_inode_loc(inode, iloc, 4712 !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); 4713 } 4714 4715 static bool ext4_should_use_dax(struct inode *inode) 4716 { 4717 if (!test_opt(inode->i_sb, DAX)) 4718 return false; 4719 if (!S_ISREG(inode->i_mode)) 4720 return false; 4721 if (ext4_should_journal_data(inode)) 4722 return false; 4723 if (ext4_has_inline_data(inode)) 4724 return false; 4725 if (ext4_encrypted_inode(inode)) 4726 return false; 4727 return true; 4728 } 4729 4730 void ext4_set_inode_flags(struct inode *inode) 4731 { 4732 unsigned int flags = EXT4_I(inode)->i_flags; 4733 unsigned int new_fl = 0; 4734 4735 if (flags & EXT4_SYNC_FL) 4736 new_fl |= S_SYNC; 4737 if (flags & EXT4_APPEND_FL) 4738 new_fl |= S_APPEND; 4739 if (flags & EXT4_IMMUTABLE_FL) 4740 new_fl |= S_IMMUTABLE; 4741 if (flags & EXT4_NOATIME_FL) 4742 new_fl |= S_NOATIME; 4743 if (flags & EXT4_DIRSYNC_FL) 4744 new_fl |= S_DIRSYNC; 4745 if (ext4_should_use_dax(inode)) 4746 new_fl |= S_DAX; 4747 if (flags & EXT4_ENCRYPT_FL) 4748 new_fl |= S_ENCRYPTED; 4749 inode_set_flags(inode, new_fl, 4750 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX| 4751 S_ENCRYPTED); 4752 } 4753 4754 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 4755 struct ext4_inode_info *ei) 4756 { 4757 blkcnt_t i_blocks ; 4758 struct inode *inode = &(ei->vfs_inode); 4759 struct super_block *sb = inode->i_sb; 4760 4761 if (ext4_has_feature_huge_file(sb)) { 4762 /* we are using combined 48 bit field */ 4763 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 4764 le32_to_cpu(raw_inode->i_blocks_lo); 4765 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { 4766 /* i_blocks represent file system block size */ 4767 return i_blocks << (inode->i_blkbits - 9); 4768 } else { 4769 return i_blocks; 4770 } 4771 } else { 4772 return le32_to_cpu(raw_inode->i_blocks_lo); 4773 } 4774 } 4775 4776 static inline int ext4_iget_extra_inode(struct inode *inode, 4777 struct ext4_inode *raw_inode, 4778 struct ext4_inode_info *ei) 4779 { 4780 __le32 *magic = (void *)raw_inode + 4781 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; 4782 4783 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <= 4784 EXT4_INODE_SIZE(inode->i_sb) && 4785 *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { 4786 ext4_set_inode_state(inode, EXT4_STATE_XATTR); 4787 return ext4_find_inline_data_nolock(inode); 4788 } else 4789 EXT4_I(inode)->i_inline_off = 0; 4790 return 0; 4791 } 4792 4793 int ext4_get_projid(struct inode *inode, kprojid_t *projid) 4794 { 4795 if (!ext4_has_feature_project(inode->i_sb)) 4796 return -EOPNOTSUPP; 4797 *projid = EXT4_I(inode)->i_projid; 4798 return 0; 4799 } 4800 4801 /* 4802 * ext4 has self-managed i_version for ea inodes, it stores the lower 32bit of 4803 * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag 4804 * set. 4805 */ 4806 static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val) 4807 { 4808 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) 4809 inode_set_iversion_raw(inode, val); 4810 else 4811 inode_set_iversion_queried(inode, val); 4812 } 4813 static inline u64 ext4_inode_peek_iversion(const struct inode *inode) 4814 { 4815 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) 4816 return inode_peek_iversion_raw(inode); 4817 else 4818 return inode_peek_iversion(inode); 4819 } 4820 4821 struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, 4822 ext4_iget_flags flags, const char *function, 4823 unsigned int line) 4824 { 4825 struct ext4_iloc iloc; 4826 struct ext4_inode *raw_inode; 4827 struct ext4_inode_info *ei; 4828 struct inode *inode; 4829 journal_t *journal = EXT4_SB(sb)->s_journal; 4830 long ret; 4831 loff_t size; 4832 int block; 4833 uid_t i_uid; 4834 gid_t i_gid; 4835 projid_t i_projid; 4836 4837 if ((!(flags & EXT4_IGET_SPECIAL) && 4838 (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) || 4839 (ino < EXT4_ROOT_INO) || 4840 (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) { 4841 if (flags & EXT4_IGET_HANDLE) 4842 return ERR_PTR(-ESTALE); 4843 __ext4_error(sb, function, line, 4844 "inode #%lu: comm %s: iget: illegal inode #", 4845 ino, current->comm); 4846 return ERR_PTR(-EFSCORRUPTED); 4847 } 4848 4849 inode = iget_locked(sb, ino); 4850 if (!inode) 4851 return ERR_PTR(-ENOMEM); 4852 if (!(inode->i_state & I_NEW)) 4853 return inode; 4854 4855 ei = EXT4_I(inode); 4856 iloc.bh = NULL; 4857 4858 ret = __ext4_get_inode_loc(inode, &iloc, 0); 4859 if (ret < 0) 4860 goto bad_inode; 4861 raw_inode = ext4_raw_inode(&iloc); 4862 4863 if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) { 4864 ext4_error_inode(inode, function, line, 0, 4865 "iget: root inode unallocated"); 4866 ret = -EFSCORRUPTED; 4867 goto bad_inode; 4868 } 4869 4870 if ((flags & EXT4_IGET_HANDLE) && 4871 (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) { 4872 ret = -ESTALE; 4873 goto bad_inode; 4874 } 4875 4876 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4877 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 4878 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 4879 EXT4_INODE_SIZE(inode->i_sb) || 4880 (ei->i_extra_isize & 3)) { 4881 ext4_error_inode(inode, function, line, 0, 4882 "iget: bad extra_isize %u " 4883 "(inode size %u)", 4884 ei->i_extra_isize, 4885 EXT4_INODE_SIZE(inode->i_sb)); 4886 ret = -EFSCORRUPTED; 4887 goto bad_inode; 4888 } 4889 } else 4890 ei->i_extra_isize = 0; 4891 4892 /* Precompute checksum seed for inode metadata */ 4893 if (ext4_has_metadata_csum(sb)) { 4894 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4895 __u32 csum; 4896 __le32 inum = cpu_to_le32(inode->i_ino); 4897 __le32 gen = raw_inode->i_generation; 4898 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, 4899 sizeof(inum)); 4900 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, 4901 sizeof(gen)); 4902 } 4903 4904 if (!ext4_inode_csum_verify(inode, raw_inode, ei)) { 4905 ext4_error_inode(inode, function, line, 0, 4906 "iget: checksum invalid"); 4907 ret = -EFSBADCRC; 4908 goto bad_inode; 4909 } 4910 4911 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 4912 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 4913 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 4914 if (ext4_has_feature_project(sb) && 4915 EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE && 4916 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) 4917 i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid); 4918 else 4919 i_projid = EXT4_DEF_PROJID; 4920 4921 if (!(test_opt(inode->i_sb, NO_UID32))) { 4922 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 4923 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 4924 } 4925 i_uid_write(inode, i_uid); 4926 i_gid_write(inode, i_gid); 4927 ei->i_projid = make_kprojid(&init_user_ns, i_projid); 4928 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 4929 4930 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 4931 ei->i_inline_off = 0; 4932 ei->i_dir_start_lookup = 0; 4933 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 4934 /* We now have enough fields to check if the inode was active or not. 4935 * This is needed because nfsd might try to access dead inodes 4936 * the test is that same one that e2fsck uses 4937 * NeilBrown 1999oct15 4938 */ 4939 if (inode->i_nlink == 0) { 4940 if ((inode->i_mode == 0 || 4941 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) && 4942 ino != EXT4_BOOT_LOADER_INO) { 4943 /* this inode is deleted */ 4944 ret = -ESTALE; 4945 goto bad_inode; 4946 } 4947 /* The only unlinked inodes we let through here have 4948 * valid i_mode and are being read by the orphan 4949 * recovery code: that's fine, we're about to complete 4950 * the process of deleting those. 4951 * OR it is the EXT4_BOOT_LOADER_INO which is 4952 * not initialized on a new filesystem. */ 4953 } 4954 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 4955 ext4_set_inode_flags(inode); 4956 inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 4957 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 4958 if (ext4_has_feature_64bit(sb)) 4959 ei->i_file_acl |= 4960 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 4961 inode->i_size = ext4_isize(sb, raw_inode); 4962 if ((size = i_size_read(inode)) < 0) { 4963 ext4_error_inode(inode, function, line, 0, 4964 "iget: bad i_size value: %lld", size); 4965 ret = -EFSCORRUPTED; 4966 goto bad_inode; 4967 } 4968 ei->i_disksize = inode->i_size; 4969 #ifdef CONFIG_QUOTA 4970 ei->i_reserved_quota = 0; 4971 #endif 4972 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 4973 ei->i_block_group = iloc.block_group; 4974 ei->i_last_alloc_group = ~0; 4975 /* 4976 * NOTE! The in-memory inode i_data array is in little-endian order 4977 * even on big-endian machines: we do NOT byteswap the block numbers! 4978 */ 4979 for (block = 0; block < EXT4_N_BLOCKS; block++) 4980 ei->i_data[block] = raw_inode->i_block[block]; 4981 INIT_LIST_HEAD(&ei->i_orphan); 4982 4983 /* 4984 * Set transaction id's of transactions that have to be committed 4985 * to finish f[data]sync. We set them to currently running transaction 4986 * as we cannot be sure that the inode or some of its metadata isn't 4987 * part of the transaction - the inode could have been reclaimed and 4988 * now it is reread from disk. 4989 */ 4990 if (journal) { 4991 transaction_t *transaction; 4992 tid_t tid; 4993 4994 read_lock(&journal->j_state_lock); 4995 if (journal->j_running_transaction) 4996 transaction = journal->j_running_transaction; 4997 else 4998 transaction = journal->j_committing_transaction; 4999 if (transaction) 5000 tid = transaction->t_tid; 5001 else 5002 tid = journal->j_commit_sequence; 5003 read_unlock(&journal->j_state_lock); 5004 ei->i_sync_tid = tid; 5005 ei->i_datasync_tid = tid; 5006 } 5007 5008 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 5009 if (ei->i_extra_isize == 0) { 5010 /* The extra space is currently unused. Use it. */ 5011 BUILD_BUG_ON(sizeof(struct ext4_inode) & 3); 5012 ei->i_extra_isize = sizeof(struct ext4_inode) - 5013 EXT4_GOOD_OLD_INODE_SIZE; 5014 } else { 5015 ret = ext4_iget_extra_inode(inode, raw_inode, ei); 5016 if (ret) 5017 goto bad_inode; 5018 } 5019 } 5020 5021 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 5022 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 5023 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 5024 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 5025 5026 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { 5027 u64 ivers = le32_to_cpu(raw_inode->i_disk_version); 5028 5029 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 5030 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 5031 ivers |= 5032 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 5033 } 5034 ext4_inode_set_iversion_queried(inode, ivers); 5035 } 5036 5037 ret = 0; 5038 if (ei->i_file_acl && 5039 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { 5040 ext4_error_inode(inode, function, line, 0, 5041 "iget: bad extended attribute block %llu", 5042 ei->i_file_acl); 5043 ret = -EFSCORRUPTED; 5044 goto bad_inode; 5045 } else if (!ext4_has_inline_data(inode)) { 5046 /* validate the block references in the inode */ 5047 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 5048 (S_ISLNK(inode->i_mode) && 5049 !ext4_inode_is_fast_symlink(inode))) { 5050 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 5051 ret = ext4_ext_check_inode(inode); 5052 else 5053 ret = ext4_ind_check_inode(inode); 5054 } 5055 } 5056 if (ret) 5057 goto bad_inode; 5058 5059 if (S_ISREG(inode->i_mode)) { 5060 inode->i_op = &ext4_file_inode_operations; 5061 inode->i_fop = &ext4_file_operations; 5062 ext4_set_aops(inode); 5063 } else if (S_ISDIR(inode->i_mode)) { 5064 inode->i_op = &ext4_dir_inode_operations; 5065 inode->i_fop = &ext4_dir_operations; 5066 } else if (S_ISLNK(inode->i_mode)) { 5067 /* VFS does not allow setting these so must be corruption */ 5068 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) { 5069 ext4_error_inode(inode, function, line, 0, 5070 "iget: immutable or append flags " 5071 "not allowed on symlinks"); 5072 ret = -EFSCORRUPTED; 5073 goto bad_inode; 5074 } 5075 if (ext4_encrypted_inode(inode)) { 5076 inode->i_op = &ext4_encrypted_symlink_inode_operations; 5077 ext4_set_aops(inode); 5078 } else if (ext4_inode_is_fast_symlink(inode)) { 5079 inode->i_link = (char *)ei->i_data; 5080 inode->i_op = &ext4_fast_symlink_inode_operations; 5081 nd_terminate_link(ei->i_data, inode->i_size, 5082 sizeof(ei->i_data) - 1); 5083 } else { 5084 inode->i_op = &ext4_symlink_inode_operations; 5085 ext4_set_aops(inode); 5086 } 5087 inode_nohighmem(inode); 5088 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 5089 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 5090 inode->i_op = &ext4_special_inode_operations; 5091 if (raw_inode->i_block[0]) 5092 init_special_inode(inode, inode->i_mode, 5093 old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 5094 else 5095 init_special_inode(inode, inode->i_mode, 5096 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 5097 } else if (ino == EXT4_BOOT_LOADER_INO) { 5098 make_bad_inode(inode); 5099 } else { 5100 ret = -EFSCORRUPTED; 5101 ext4_error_inode(inode, function, line, 0, 5102 "iget: bogus i_mode (%o)", inode->i_mode); 5103 goto bad_inode; 5104 } 5105 brelse(iloc.bh); 5106 5107 unlock_new_inode(inode); 5108 return inode; 5109 5110 bad_inode: 5111 brelse(iloc.bh); 5112 iget_failed(inode); 5113 return ERR_PTR(ret); 5114 } 5115 5116 static int ext4_inode_blocks_set(handle_t *handle, 5117 struct ext4_inode *raw_inode, 5118 struct ext4_inode_info *ei) 5119 { 5120 struct inode *inode = &(ei->vfs_inode); 5121 u64 i_blocks = inode->i_blocks; 5122 struct super_block *sb = inode->i_sb; 5123 5124 if (i_blocks <= ~0U) { 5125 /* 5126 * i_blocks can be represented in a 32 bit variable 5127 * as multiple of 512 bytes 5128 */ 5129 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 5130 raw_inode->i_blocks_high = 0; 5131 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 5132 return 0; 5133 } 5134 if (!ext4_has_feature_huge_file(sb)) 5135 return -EFBIG; 5136 5137 if (i_blocks <= 0xffffffffffffULL) { 5138 /* 5139 * i_blocks can be represented in a 48 bit variable 5140 * as multiple of 512 bytes 5141 */ 5142 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 5143 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 5144 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 5145 } else { 5146 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); 5147 /* i_block is stored in file system block size */ 5148 i_blocks = i_blocks >> (inode->i_blkbits - 9); 5149 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 5150 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 5151 } 5152 return 0; 5153 } 5154 5155 struct other_inode { 5156 unsigned long orig_ino; 5157 struct ext4_inode *raw_inode; 5158 }; 5159 5160 static int other_inode_match(struct inode * inode, unsigned long ino, 5161 void *data) 5162 { 5163 struct other_inode *oi = (struct other_inode *) data; 5164 5165 if ((inode->i_ino != ino) || 5166 (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | 5167 I_DIRTY_INODE)) || 5168 ((inode->i_state & I_DIRTY_TIME) == 0)) 5169 return 0; 5170 spin_lock(&inode->i_lock); 5171 if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | 5172 I_DIRTY_INODE)) == 0) && 5173 (inode->i_state & I_DIRTY_TIME)) { 5174 struct ext4_inode_info *ei = EXT4_I(inode); 5175 5176 inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED); 5177 spin_unlock(&inode->i_lock); 5178 5179 spin_lock(&ei->i_raw_lock); 5180 EXT4_INODE_SET_XTIME(i_ctime, inode, oi->raw_inode); 5181 EXT4_INODE_SET_XTIME(i_mtime, inode, oi->raw_inode); 5182 EXT4_INODE_SET_XTIME(i_atime, inode, oi->raw_inode); 5183 ext4_inode_csum_set(inode, oi->raw_inode, ei); 5184 spin_unlock(&ei->i_raw_lock); 5185 trace_ext4_other_inode_update_time(inode, oi->orig_ino); 5186 return -1; 5187 } 5188 spin_unlock(&inode->i_lock); 5189 return -1; 5190 } 5191 5192 /* 5193 * Opportunistically update the other time fields for other inodes in 5194 * the same inode table block. 5195 */ 5196 static void ext4_update_other_inodes_time(struct super_block *sb, 5197 unsigned long orig_ino, char *buf) 5198 { 5199 struct other_inode oi; 5200 unsigned long ino; 5201 int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 5202 int inode_size = EXT4_INODE_SIZE(sb); 5203 5204 oi.orig_ino = orig_ino; 5205 /* 5206 * Calculate the first inode in the inode table block. Inode 5207 * numbers are one-based. That is, the first inode in a block 5208 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1). 5209 */ 5210 ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1; 5211 for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) { 5212 if (ino == orig_ino) 5213 continue; 5214 oi.raw_inode = (struct ext4_inode *) buf; 5215 (void) find_inode_nowait(sb, ino, other_inode_match, &oi); 5216 } 5217 } 5218 5219 /* 5220 * Post the struct inode info into an on-disk inode location in the 5221 * buffer-cache. This gobbles the caller's reference to the 5222 * buffer_head in the inode location struct. 5223 * 5224 * The caller must have write access to iloc->bh. 5225 */ 5226 static int ext4_do_update_inode(handle_t *handle, 5227 struct inode *inode, 5228 struct ext4_iloc *iloc) 5229 { 5230 struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 5231 struct ext4_inode_info *ei = EXT4_I(inode); 5232 struct buffer_head *bh = iloc->bh; 5233 struct super_block *sb = inode->i_sb; 5234 int err = 0, rc, block; 5235 int need_datasync = 0, set_large_file = 0; 5236 uid_t i_uid; 5237 gid_t i_gid; 5238 projid_t i_projid; 5239 5240 spin_lock(&ei->i_raw_lock); 5241 5242 /* For fields not tracked in the in-memory inode, 5243 * initialise them to zero for new inodes. */ 5244 if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) 5245 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 5246 5247 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 5248 i_uid = i_uid_read(inode); 5249 i_gid = i_gid_read(inode); 5250 i_projid = from_kprojid(&init_user_ns, ei->i_projid); 5251 if (!(test_opt(inode->i_sb, NO_UID32))) { 5252 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); 5253 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); 5254 /* 5255 * Fix up interoperability with old kernels. Otherwise, old inodes get 5256 * re-used with the upper 16 bits of the uid/gid intact 5257 */ 5258 if (ei->i_dtime && list_empty(&ei->i_orphan)) { 5259 raw_inode->i_uid_high = 0; 5260 raw_inode->i_gid_high = 0; 5261 } else { 5262 raw_inode->i_uid_high = 5263 cpu_to_le16(high_16_bits(i_uid)); 5264 raw_inode->i_gid_high = 5265 cpu_to_le16(high_16_bits(i_gid)); 5266 } 5267 } else { 5268 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); 5269 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid)); 5270 raw_inode->i_uid_high = 0; 5271 raw_inode->i_gid_high = 0; 5272 } 5273 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 5274 5275 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 5276 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 5277 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 5278 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 5279 5280 err = ext4_inode_blocks_set(handle, raw_inode, ei); 5281 if (err) { 5282 spin_unlock(&ei->i_raw_lock); 5283 goto out_brelse; 5284 } 5285 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 5286 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); 5287 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) 5288 raw_inode->i_file_acl_high = 5289 cpu_to_le16(ei->i_file_acl >> 32); 5290 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 5291 if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) { 5292 ext4_isize_set(raw_inode, ei->i_disksize); 5293 need_datasync = 1; 5294 } 5295 if (ei->i_disksize > 0x7fffffffULL) { 5296 if (!ext4_has_feature_large_file(sb) || 5297 EXT4_SB(sb)->s_es->s_rev_level == 5298 cpu_to_le32(EXT4_GOOD_OLD_REV)) 5299 set_large_file = 1; 5300 } 5301 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 5302 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 5303 if (old_valid_dev(inode->i_rdev)) { 5304 raw_inode->i_block[0] = 5305 cpu_to_le32(old_encode_dev(inode->i_rdev)); 5306 raw_inode->i_block[1] = 0; 5307 } else { 5308 raw_inode->i_block[0] = 0; 5309 raw_inode->i_block[1] = 5310 cpu_to_le32(new_encode_dev(inode->i_rdev)); 5311 raw_inode->i_block[2] = 0; 5312 } 5313 } else if (!ext4_has_inline_data(inode)) { 5314 for (block = 0; block < EXT4_N_BLOCKS; block++) 5315 raw_inode->i_block[block] = ei->i_data[block]; 5316 } 5317 5318 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { 5319 u64 ivers = ext4_inode_peek_iversion(inode); 5320 5321 raw_inode->i_disk_version = cpu_to_le32(ivers); 5322 if (ei->i_extra_isize) { 5323 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 5324 raw_inode->i_version_hi = 5325 cpu_to_le32(ivers >> 32); 5326 raw_inode->i_extra_isize = 5327 cpu_to_le16(ei->i_extra_isize); 5328 } 5329 } 5330 5331 BUG_ON(!ext4_has_feature_project(inode->i_sb) && 5332 i_projid != EXT4_DEF_PROJID); 5333 5334 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 5335 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) 5336 raw_inode->i_projid = cpu_to_le32(i_projid); 5337 5338 ext4_inode_csum_set(inode, raw_inode, ei); 5339 spin_unlock(&ei->i_raw_lock); 5340 if (inode->i_sb->s_flags & SB_LAZYTIME) 5341 ext4_update_other_inodes_time(inode->i_sb, inode->i_ino, 5342 bh->b_data); 5343 5344 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 5345 rc = ext4_handle_dirty_metadata(handle, NULL, bh); 5346 if (!err) 5347 err = rc; 5348 ext4_clear_inode_state(inode, EXT4_STATE_NEW); 5349 if (set_large_file) { 5350 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access"); 5351 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); 5352 if (err) 5353 goto out_brelse; 5354 ext4_update_dynamic_rev(sb); 5355 ext4_set_feature_large_file(sb); 5356 ext4_handle_sync(handle); 5357 err = ext4_handle_dirty_super(handle, sb); 5358 } 5359 ext4_update_inode_fsync_trans(handle, inode, need_datasync); 5360 out_brelse: 5361 brelse(bh); 5362 ext4_std_error(inode->i_sb, err); 5363 return err; 5364 } 5365 5366 /* 5367 * ext4_write_inode() 5368 * 5369 * We are called from a few places: 5370 * 5371 * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files. 5372 * Here, there will be no transaction running. We wait for any running 5373 * transaction to commit. 5374 * 5375 * - Within flush work (sys_sync(), kupdate and such). 5376 * We wait on commit, if told to. 5377 * 5378 * - Within iput_final() -> write_inode_now() 5379 * We wait on commit, if told to. 5380 * 5381 * In all cases it is actually safe for us to return without doing anything, 5382 * because the inode has been copied into a raw inode buffer in 5383 * ext4_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL 5384 * writeback. 5385 * 5386 * Note that we are absolutely dependent upon all inode dirtiers doing the 5387 * right thing: they *must* call mark_inode_dirty() after dirtying info in 5388 * which we are interested. 5389 * 5390 * It would be a bug for them to not do this. The code: 5391 * 5392 * mark_inode_dirty(inode) 5393 * stuff(); 5394 * inode->i_size = expr; 5395 * 5396 * is in error because write_inode() could occur while `stuff()' is running, 5397 * and the new i_size will be lost. Plus the inode will no longer be on the 5398 * superblock's dirty inode list. 5399 */ 5400 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) 5401 { 5402 int err; 5403 5404 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) || 5405 sb_rdonly(inode->i_sb)) 5406 return 0; 5407 5408 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 5409 return -EIO; 5410 5411 if (EXT4_SB(inode->i_sb)->s_journal) { 5412 if (ext4_journal_current_handle()) { 5413 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 5414 dump_stack(); 5415 return -EIO; 5416 } 5417 5418 /* 5419 * No need to force transaction in WB_SYNC_NONE mode. Also 5420 * ext4_sync_fs() will force the commit after everything is 5421 * written. 5422 */ 5423 if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync) 5424 return 0; 5425 5426 err = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal, 5427 EXT4_I(inode)->i_sync_tid); 5428 } else { 5429 struct ext4_iloc iloc; 5430 5431 err = __ext4_get_inode_loc(inode, &iloc, 0); 5432 if (err) 5433 return err; 5434 /* 5435 * sync(2) will flush the whole buffer cache. No need to do 5436 * it here separately for each inode. 5437 */ 5438 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) 5439 sync_dirty_buffer(iloc.bh); 5440 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 5441 EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, 5442 "IO error syncing inode"); 5443 err = -EIO; 5444 } 5445 brelse(iloc.bh); 5446 } 5447 return err; 5448 } 5449 5450 /* 5451 * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate 5452 * buffers that are attached to a page stradding i_size and are undergoing 5453 * commit. In that case we have to wait for commit to finish and try again. 5454 */ 5455 static void ext4_wait_for_tail_page_commit(struct inode *inode) 5456 { 5457 struct page *page; 5458 unsigned offset; 5459 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 5460 tid_t commit_tid = 0; 5461 int ret; 5462 5463 offset = inode->i_size & (PAGE_SIZE - 1); 5464 /* 5465 * All buffers in the last page remain valid? Then there's nothing to 5466 * do. We do the check mainly to optimize the common PAGE_SIZE == 5467 * blocksize case 5468 */ 5469 if (offset > PAGE_SIZE - i_blocksize(inode)) 5470 return; 5471 while (1) { 5472 page = find_lock_page(inode->i_mapping, 5473 inode->i_size >> PAGE_SHIFT); 5474 if (!page) 5475 return; 5476 ret = __ext4_journalled_invalidatepage(page, offset, 5477 PAGE_SIZE - offset); 5478 unlock_page(page); 5479 put_page(page); 5480 if (ret != -EBUSY) 5481 return; 5482 commit_tid = 0; 5483 read_lock(&journal->j_state_lock); 5484 if (journal->j_committing_transaction) 5485 commit_tid = journal->j_committing_transaction->t_tid; 5486 read_unlock(&journal->j_state_lock); 5487 if (commit_tid) 5488 jbd2_log_wait_commit(journal, commit_tid); 5489 } 5490 } 5491 5492 /* 5493 * ext4_setattr() 5494 * 5495 * Called from notify_change. 5496 * 5497 * We want to trap VFS attempts to truncate the file as soon as 5498 * possible. In particular, we want to make sure that when the VFS 5499 * shrinks i_size, we put the inode on the orphan list and modify 5500 * i_disksize immediately, so that during the subsequent flushing of 5501 * dirty pages and freeing of disk blocks, we can guarantee that any 5502 * commit will leave the blocks being flushed in an unused state on 5503 * disk. (On recovery, the inode will get truncated and the blocks will 5504 * be freed, so we have a strong guarantee that no future commit will 5505 * leave these blocks visible to the user.) 5506 * 5507 * Another thing we have to assure is that if we are in ordered mode 5508 * and inode is still attached to the committing transaction, we must 5509 * we start writeout of all the dirty pages which are being truncated. 5510 * This way we are sure that all the data written in the previous 5511 * transaction are already on disk (truncate waits for pages under 5512 * writeback). 5513 * 5514 * Called with inode->i_mutex down. 5515 */ 5516 int ext4_setattr(struct dentry *dentry, struct iattr *attr) 5517 { 5518 struct inode *inode = d_inode(dentry); 5519 int error, rc = 0; 5520 int orphan = 0; 5521 const unsigned int ia_valid = attr->ia_valid; 5522 5523 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 5524 return -EIO; 5525 5526 error = setattr_prepare(dentry, attr); 5527 if (error) 5528 return error; 5529 5530 error = fscrypt_prepare_setattr(dentry, attr); 5531 if (error) 5532 return error; 5533 5534 if (is_quota_modification(inode, attr)) { 5535 error = dquot_initialize(inode); 5536 if (error) 5537 return error; 5538 } 5539 if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || 5540 (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { 5541 handle_t *handle; 5542 5543 /* (user+group)*(old+new) structure, inode write (sb, 5544 * inode block, ? - but truncate inode update has it) */ 5545 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 5546 (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) + 5547 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3); 5548 if (IS_ERR(handle)) { 5549 error = PTR_ERR(handle); 5550 goto err_out; 5551 } 5552 5553 /* dquot_transfer() calls back ext4_get_inode_usage() which 5554 * counts xattr inode references. 5555 */ 5556 down_read(&EXT4_I(inode)->xattr_sem); 5557 error = dquot_transfer(inode, attr); 5558 up_read(&EXT4_I(inode)->xattr_sem); 5559 5560 if (error) { 5561 ext4_journal_stop(handle); 5562 return error; 5563 } 5564 /* Update corresponding info in inode so that everything is in 5565 * one transaction */ 5566 if (attr->ia_valid & ATTR_UID) 5567 inode->i_uid = attr->ia_uid; 5568 if (attr->ia_valid & ATTR_GID) 5569 inode->i_gid = attr->ia_gid; 5570 error = ext4_mark_inode_dirty(handle, inode); 5571 ext4_journal_stop(handle); 5572 } 5573 5574 if (attr->ia_valid & ATTR_SIZE) { 5575 handle_t *handle; 5576 loff_t oldsize = inode->i_size; 5577 int shrink = (attr->ia_size <= inode->i_size); 5578 5579 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 5580 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5581 5582 if (attr->ia_size > sbi->s_bitmap_maxbytes) 5583 return -EFBIG; 5584 } 5585 if (!S_ISREG(inode->i_mode)) 5586 return -EINVAL; 5587 5588 if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size) 5589 inode_inc_iversion(inode); 5590 5591 if (ext4_should_order_data(inode) && 5592 (attr->ia_size < inode->i_size)) { 5593 error = ext4_begin_ordered_truncate(inode, 5594 attr->ia_size); 5595 if (error) 5596 goto err_out; 5597 } 5598 if (attr->ia_size != inode->i_size) { 5599 handle = ext4_journal_start(inode, EXT4_HT_INODE, 3); 5600 if (IS_ERR(handle)) { 5601 error = PTR_ERR(handle); 5602 goto err_out; 5603 } 5604 if (ext4_handle_valid(handle) && shrink) { 5605 error = ext4_orphan_add(handle, inode); 5606 orphan = 1; 5607 } 5608 /* 5609 * Update c/mtime on truncate up, ext4_truncate() will 5610 * update c/mtime in shrink case below 5611 */ 5612 if (!shrink) { 5613 inode->i_mtime = current_time(inode); 5614 inode->i_ctime = inode->i_mtime; 5615 } 5616 down_write(&EXT4_I(inode)->i_data_sem); 5617 EXT4_I(inode)->i_disksize = attr->ia_size; 5618 rc = ext4_mark_inode_dirty(handle, inode); 5619 if (!error) 5620 error = rc; 5621 /* 5622 * We have to update i_size under i_data_sem together 5623 * with i_disksize to avoid races with writeback code 5624 * running ext4_wb_update_i_disksize(). 5625 */ 5626 if (!error) 5627 i_size_write(inode, attr->ia_size); 5628 up_write(&EXT4_I(inode)->i_data_sem); 5629 ext4_journal_stop(handle); 5630 if (error) { 5631 if (orphan) 5632 ext4_orphan_del(NULL, inode); 5633 goto err_out; 5634 } 5635 } 5636 if (!shrink) 5637 pagecache_isize_extended(inode, oldsize, inode->i_size); 5638 5639 /* 5640 * Blocks are going to be removed from the inode. Wait 5641 * for dio in flight. Temporarily disable 5642 * dioread_nolock to prevent livelock. 5643 */ 5644 if (orphan) { 5645 if (!ext4_should_journal_data(inode)) { 5646 inode_dio_wait(inode); 5647 } else 5648 ext4_wait_for_tail_page_commit(inode); 5649 } 5650 down_write(&EXT4_I(inode)->i_mmap_sem); 5651 5652 rc = ext4_break_layouts(inode); 5653 if (rc) { 5654 up_write(&EXT4_I(inode)->i_mmap_sem); 5655 error = rc; 5656 goto err_out; 5657 } 5658 5659 /* 5660 * Truncate pagecache after we've waited for commit 5661 * in data=journal mode to make pages freeable. 5662 */ 5663 truncate_pagecache(inode, inode->i_size); 5664 if (shrink) { 5665 rc = ext4_truncate(inode); 5666 if (rc) 5667 error = rc; 5668 } 5669 up_write(&EXT4_I(inode)->i_mmap_sem); 5670 } 5671 5672 if (!error) { 5673 setattr_copy(inode, attr); 5674 mark_inode_dirty(inode); 5675 } 5676 5677 /* 5678 * If the call to ext4_truncate failed to get a transaction handle at 5679 * all, we need to clean up the in-core orphan list manually. 5680 */ 5681 if (orphan && inode->i_nlink) 5682 ext4_orphan_del(NULL, inode); 5683 5684 if (!error && (ia_valid & ATTR_MODE)) 5685 rc = posix_acl_chmod(inode, inode->i_mode); 5686 5687 err_out: 5688 ext4_std_error(inode->i_sb, error); 5689 if (!error) 5690 error = rc; 5691 return error; 5692 } 5693 5694 int ext4_getattr(const struct path *path, struct kstat *stat, 5695 u32 request_mask, unsigned int query_flags) 5696 { 5697 struct inode *inode = d_inode(path->dentry); 5698 struct ext4_inode *raw_inode; 5699 struct ext4_inode_info *ei = EXT4_I(inode); 5700 unsigned int flags; 5701 5702 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) { 5703 stat->result_mask |= STATX_BTIME; 5704 stat->btime.tv_sec = ei->i_crtime.tv_sec; 5705 stat->btime.tv_nsec = ei->i_crtime.tv_nsec; 5706 } 5707 5708 flags = ei->i_flags & EXT4_FL_USER_VISIBLE; 5709 if (flags & EXT4_APPEND_FL) 5710 stat->attributes |= STATX_ATTR_APPEND; 5711 if (flags & EXT4_COMPR_FL) 5712 stat->attributes |= STATX_ATTR_COMPRESSED; 5713 if (flags & EXT4_ENCRYPT_FL) 5714 stat->attributes |= STATX_ATTR_ENCRYPTED; 5715 if (flags & EXT4_IMMUTABLE_FL) 5716 stat->attributes |= STATX_ATTR_IMMUTABLE; 5717 if (flags & EXT4_NODUMP_FL) 5718 stat->attributes |= STATX_ATTR_NODUMP; 5719 5720 stat->attributes_mask |= (STATX_ATTR_APPEND | 5721 STATX_ATTR_COMPRESSED | 5722 STATX_ATTR_ENCRYPTED | 5723 STATX_ATTR_IMMUTABLE | 5724 STATX_ATTR_NODUMP); 5725 5726 generic_fillattr(inode, stat); 5727 return 0; 5728 } 5729 5730 int ext4_file_getattr(const struct path *path, struct kstat *stat, 5731 u32 request_mask, unsigned int query_flags) 5732 { 5733 struct inode *inode = d_inode(path->dentry); 5734 u64 delalloc_blocks; 5735 5736 ext4_getattr(path, stat, request_mask, query_flags); 5737 5738 /* 5739 * If there is inline data in the inode, the inode will normally not 5740 * have data blocks allocated (it may have an external xattr block). 5741 * Report at least one sector for such files, so tools like tar, rsync, 5742 * others don't incorrectly think the file is completely sparse. 5743 */ 5744 if (unlikely(ext4_has_inline_data(inode))) 5745 stat->blocks += (stat->size + 511) >> 9; 5746 5747 /* 5748 * We can't update i_blocks if the block allocation is delayed 5749 * otherwise in the case of system crash before the real block 5750 * allocation is done, we will have i_blocks inconsistent with 5751 * on-disk file blocks. 5752 * We always keep i_blocks updated together with real 5753 * allocation. But to not confuse with user, stat 5754 * will return the blocks that include the delayed allocation 5755 * blocks for this file. 5756 */ 5757 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), 5758 EXT4_I(inode)->i_reserved_data_blocks); 5759 stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9); 5760 return 0; 5761 } 5762 5763 static int ext4_index_trans_blocks(struct inode *inode, int lblocks, 5764 int pextents) 5765 { 5766 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 5767 return ext4_ind_trans_blocks(inode, lblocks); 5768 return ext4_ext_index_trans_blocks(inode, pextents); 5769 } 5770 5771 /* 5772 * Account for index blocks, block groups bitmaps and block group 5773 * descriptor blocks if modify datablocks and index blocks 5774 * worse case, the indexs blocks spread over different block groups 5775 * 5776 * If datablocks are discontiguous, they are possible to spread over 5777 * different block groups too. If they are contiguous, with flexbg, 5778 * they could still across block group boundary. 5779 * 5780 * Also account for superblock, inode, quota and xattr blocks 5781 */ 5782 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, 5783 int pextents) 5784 { 5785 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 5786 int gdpblocks; 5787 int idxblocks; 5788 int ret = 0; 5789 5790 /* 5791 * How many index blocks need to touch to map @lblocks logical blocks 5792 * to @pextents physical extents? 5793 */ 5794 idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents); 5795 5796 ret = idxblocks; 5797 5798 /* 5799 * Now let's see how many group bitmaps and group descriptors need 5800 * to account 5801 */ 5802 groups = idxblocks + pextents; 5803 gdpblocks = groups; 5804 if (groups > ngroups) 5805 groups = ngroups; 5806 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 5807 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 5808 5809 /* bitmaps and block group descriptor blocks */ 5810 ret += groups + gdpblocks; 5811 5812 /* Blocks for super block, inode, quota and xattr blocks */ 5813 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 5814 5815 return ret; 5816 } 5817 5818 /* 5819 * Calculate the total number of credits to reserve to fit 5820 * the modification of a single pages into a single transaction, 5821 * which may include multiple chunks of block allocations. 5822 * 5823 * This could be called via ext4_write_begin() 5824 * 5825 * We need to consider the worse case, when 5826 * one new block per extent. 5827 */ 5828 int ext4_writepage_trans_blocks(struct inode *inode) 5829 { 5830 int bpp = ext4_journal_blocks_per_page(inode); 5831 int ret; 5832 5833 ret = ext4_meta_trans_blocks(inode, bpp, bpp); 5834 5835 /* Account for data blocks for journalled mode */ 5836 if (ext4_should_journal_data(inode)) 5837 ret += bpp; 5838 return ret; 5839 } 5840 5841 /* 5842 * Calculate the journal credits for a chunk of data modification. 5843 * 5844 * This is called from DIO, fallocate or whoever calling 5845 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. 5846 * 5847 * journal buffers for data blocks are not included here, as DIO 5848 * and fallocate do no need to journal data buffers. 5849 */ 5850 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 5851 { 5852 return ext4_meta_trans_blocks(inode, nrblocks, 1); 5853 } 5854 5855 /* 5856 * The caller must have previously called ext4_reserve_inode_write(). 5857 * Give this, we know that the caller already has write access to iloc->bh. 5858 */ 5859 int ext4_mark_iloc_dirty(handle_t *handle, 5860 struct inode *inode, struct ext4_iloc *iloc) 5861 { 5862 int err = 0; 5863 5864 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) { 5865 put_bh(iloc->bh); 5866 return -EIO; 5867 } 5868 if (IS_I_VERSION(inode)) 5869 inode_inc_iversion(inode); 5870 5871 /* the do_update_inode consumes one bh->b_count */ 5872 get_bh(iloc->bh); 5873 5874 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 5875 err = ext4_do_update_inode(handle, inode, iloc); 5876 put_bh(iloc->bh); 5877 return err; 5878 } 5879 5880 /* 5881 * On success, We end up with an outstanding reference count against 5882 * iloc->bh. This _must_ be cleaned up later. 5883 */ 5884 5885 int 5886 ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 5887 struct ext4_iloc *iloc) 5888 { 5889 int err; 5890 5891 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 5892 return -EIO; 5893 5894 err = ext4_get_inode_loc(inode, iloc); 5895 if (!err) { 5896 BUFFER_TRACE(iloc->bh, "get_write_access"); 5897 err = ext4_journal_get_write_access(handle, iloc->bh); 5898 if (err) { 5899 brelse(iloc->bh); 5900 iloc->bh = NULL; 5901 } 5902 } 5903 ext4_std_error(inode->i_sb, err); 5904 return err; 5905 } 5906 5907 static int __ext4_expand_extra_isize(struct inode *inode, 5908 unsigned int new_extra_isize, 5909 struct ext4_iloc *iloc, 5910 handle_t *handle, int *no_expand) 5911 { 5912 struct ext4_inode *raw_inode; 5913 struct ext4_xattr_ibody_header *header; 5914 int error; 5915 5916 raw_inode = ext4_raw_inode(iloc); 5917 5918 header = IHDR(inode, raw_inode); 5919 5920 /* No extended attributes present */ 5921 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 5922 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 5923 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + 5924 EXT4_I(inode)->i_extra_isize, 0, 5925 new_extra_isize - EXT4_I(inode)->i_extra_isize); 5926 EXT4_I(inode)->i_extra_isize = new_extra_isize; 5927 return 0; 5928 } 5929 5930 /* try to expand with EAs present */ 5931 error = ext4_expand_extra_isize_ea(inode, new_extra_isize, 5932 raw_inode, handle); 5933 if (error) { 5934 /* 5935 * Inode size expansion failed; don't try again 5936 */ 5937 *no_expand = 1; 5938 } 5939 5940 return error; 5941 } 5942 5943 /* 5944 * Expand an inode by new_extra_isize bytes. 5945 * Returns 0 on success or negative error number on failure. 5946 */ 5947 static int ext4_try_to_expand_extra_isize(struct inode *inode, 5948 unsigned int new_extra_isize, 5949 struct ext4_iloc iloc, 5950 handle_t *handle) 5951 { 5952 int no_expand; 5953 int error; 5954 5955 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) 5956 return -EOVERFLOW; 5957 5958 /* 5959 * In nojournal mode, we can immediately attempt to expand 5960 * the inode. When journaled, we first need to obtain extra 5961 * buffer credits since we may write into the EA block 5962 * with this same handle. If journal_extend fails, then it will 5963 * only result in a minor loss of functionality for that inode. 5964 * If this is felt to be critical, then e2fsck should be run to 5965 * force a large enough s_min_extra_isize. 5966 */ 5967 if (ext4_handle_valid(handle) && 5968 jbd2_journal_extend(handle, 5969 EXT4_DATA_TRANS_BLOCKS(inode->i_sb)) != 0) 5970 return -ENOSPC; 5971 5972 if (ext4_write_trylock_xattr(inode, &no_expand) == 0) 5973 return -EBUSY; 5974 5975 error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc, 5976 handle, &no_expand); 5977 ext4_write_unlock_xattr(inode, &no_expand); 5978 5979 return error; 5980 } 5981 5982 int ext4_expand_extra_isize(struct inode *inode, 5983 unsigned int new_extra_isize, 5984 struct ext4_iloc *iloc) 5985 { 5986 handle_t *handle; 5987 int no_expand; 5988 int error, rc; 5989 5990 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { 5991 brelse(iloc->bh); 5992 return -EOVERFLOW; 5993 } 5994 5995 handle = ext4_journal_start(inode, EXT4_HT_INODE, 5996 EXT4_DATA_TRANS_BLOCKS(inode->i_sb)); 5997 if (IS_ERR(handle)) { 5998 error = PTR_ERR(handle); 5999 brelse(iloc->bh); 6000 return error; 6001 } 6002 6003 ext4_write_lock_xattr(inode, &no_expand); 6004 6005 BUFFER_TRACE(iloc.bh, "get_write_access"); 6006 error = ext4_journal_get_write_access(handle, iloc->bh); 6007 if (error) { 6008 brelse(iloc->bh); 6009 goto out_stop; 6010 } 6011 6012 error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc, 6013 handle, &no_expand); 6014 6015 rc = ext4_mark_iloc_dirty(handle, inode, iloc); 6016 if (!error) 6017 error = rc; 6018 6019 ext4_write_unlock_xattr(inode, &no_expand); 6020 out_stop: 6021 ext4_journal_stop(handle); 6022 return error; 6023 } 6024 6025 /* 6026 * What we do here is to mark the in-core inode as clean with respect to inode 6027 * dirtiness (it may still be data-dirty). 6028 * This means that the in-core inode may be reaped by prune_icache 6029 * without having to perform any I/O. This is a very good thing, 6030 * because *any* task may call prune_icache - even ones which 6031 * have a transaction open against a different journal. 6032 * 6033 * Is this cheating? Not really. Sure, we haven't written the 6034 * inode out, but prune_icache isn't a user-visible syncing function. 6035 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 6036 * we start and wait on commits. 6037 */ 6038 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 6039 { 6040 struct ext4_iloc iloc; 6041 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 6042 int err; 6043 6044 might_sleep(); 6045 trace_ext4_mark_inode_dirty(inode, _RET_IP_); 6046 err = ext4_reserve_inode_write(handle, inode, &iloc); 6047 if (err) 6048 return err; 6049 6050 if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize) 6051 ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize, 6052 iloc, handle); 6053 6054 return ext4_mark_iloc_dirty(handle, inode, &iloc); 6055 } 6056 6057 /* 6058 * ext4_dirty_inode() is called from __mark_inode_dirty() 6059 * 6060 * We're really interested in the case where a file is being extended. 6061 * i_size has been changed by generic_commit_write() and we thus need 6062 * to include the updated inode in the current transaction. 6063 * 6064 * Also, dquot_alloc_block() will always dirty the inode when blocks 6065 * are allocated to the file. 6066 * 6067 * If the inode is marked synchronous, we don't honour that here - doing 6068 * so would cause a commit on atime updates, which we don't bother doing. 6069 * We handle synchronous inodes at the highest possible level. 6070 * 6071 * If only the I_DIRTY_TIME flag is set, we can skip everything. If 6072 * I_DIRTY_TIME and I_DIRTY_SYNC is set, the only inode fields we need 6073 * to copy into the on-disk inode structure are the timestamp files. 6074 */ 6075 void ext4_dirty_inode(struct inode *inode, int flags) 6076 { 6077 handle_t *handle; 6078 6079 if (flags == I_DIRTY_TIME) 6080 return; 6081 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 6082 if (IS_ERR(handle)) 6083 goto out; 6084 6085 ext4_mark_inode_dirty(handle, inode); 6086 6087 ext4_journal_stop(handle); 6088 out: 6089 return; 6090 } 6091 6092 #if 0 6093 /* 6094 * Bind an inode's backing buffer_head into this transaction, to prevent 6095 * it from being flushed to disk early. Unlike 6096 * ext4_reserve_inode_write, this leaves behind no bh reference and 6097 * returns no iloc structure, so the caller needs to repeat the iloc 6098 * lookup to mark the inode dirty later. 6099 */ 6100 static int ext4_pin_inode(handle_t *handle, struct inode *inode) 6101 { 6102 struct ext4_iloc iloc; 6103 6104 int err = 0; 6105 if (handle) { 6106 err = ext4_get_inode_loc(inode, &iloc); 6107 if (!err) { 6108 BUFFER_TRACE(iloc.bh, "get_write_access"); 6109 err = jbd2_journal_get_write_access(handle, iloc.bh); 6110 if (!err) 6111 err = ext4_handle_dirty_metadata(handle, 6112 NULL, 6113 iloc.bh); 6114 brelse(iloc.bh); 6115 } 6116 } 6117 ext4_std_error(inode->i_sb, err); 6118 return err; 6119 } 6120 #endif 6121 6122 int ext4_change_inode_journal_flag(struct inode *inode, int val) 6123 { 6124 journal_t *journal; 6125 handle_t *handle; 6126 int err; 6127 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 6128 6129 /* 6130 * We have to be very careful here: changing a data block's 6131 * journaling status dynamically is dangerous. If we write a 6132 * data block to the journal, change the status and then delete 6133 * that block, we risk forgetting to revoke the old log record 6134 * from the journal and so a subsequent replay can corrupt data. 6135 * So, first we make sure that the journal is empty and that 6136 * nobody is changing anything. 6137 */ 6138 6139 journal = EXT4_JOURNAL(inode); 6140 if (!journal) 6141 return 0; 6142 if (is_journal_aborted(journal)) 6143 return -EROFS; 6144 6145 /* Wait for all existing dio workers */ 6146 inode_dio_wait(inode); 6147 6148 /* 6149 * Before flushing the journal and switching inode's aops, we have 6150 * to flush all dirty data the inode has. There can be outstanding 6151 * delayed allocations, there can be unwritten extents created by 6152 * fallocate or buffered writes in dioread_nolock mode covered by 6153 * dirty data which can be converted only after flushing the dirty 6154 * data (and journalled aops don't know how to handle these cases). 6155 */ 6156 if (val) { 6157 down_write(&EXT4_I(inode)->i_mmap_sem); 6158 err = filemap_write_and_wait(inode->i_mapping); 6159 if (err < 0) { 6160 up_write(&EXT4_I(inode)->i_mmap_sem); 6161 return err; 6162 } 6163 } 6164 6165 percpu_down_write(&sbi->s_journal_flag_rwsem); 6166 jbd2_journal_lock_updates(journal); 6167 6168 /* 6169 * OK, there are no updates running now, and all cached data is 6170 * synced to disk. We are now in a completely consistent state 6171 * which doesn't have anything in the journal, and we know that 6172 * no filesystem updates are running, so it is safe to modify 6173 * the inode's in-core data-journaling state flag now. 6174 */ 6175 6176 if (val) 6177 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 6178 else { 6179 err = jbd2_journal_flush(journal); 6180 if (err < 0) { 6181 jbd2_journal_unlock_updates(journal); 6182 percpu_up_write(&sbi->s_journal_flag_rwsem); 6183 return err; 6184 } 6185 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 6186 } 6187 ext4_set_aops(inode); 6188 6189 jbd2_journal_unlock_updates(journal); 6190 percpu_up_write(&sbi->s_journal_flag_rwsem); 6191 6192 if (val) 6193 up_write(&EXT4_I(inode)->i_mmap_sem); 6194 6195 /* Finally we can mark the inode as dirty. */ 6196 6197 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); 6198 if (IS_ERR(handle)) 6199 return PTR_ERR(handle); 6200 6201 err = ext4_mark_inode_dirty(handle, inode); 6202 ext4_handle_sync(handle); 6203 ext4_journal_stop(handle); 6204 ext4_std_error(inode->i_sb, err); 6205 6206 return err; 6207 } 6208 6209 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 6210 { 6211 return !buffer_mapped(bh); 6212 } 6213 6214 vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf) 6215 { 6216 struct vm_area_struct *vma = vmf->vma; 6217 struct page *page = vmf->page; 6218 loff_t size; 6219 unsigned long len; 6220 int err; 6221 vm_fault_t ret; 6222 struct file *file = vma->vm_file; 6223 struct inode *inode = file_inode(file); 6224 struct address_space *mapping = inode->i_mapping; 6225 handle_t *handle; 6226 get_block_t *get_block; 6227 int retries = 0; 6228 6229 sb_start_pagefault(inode->i_sb); 6230 file_update_time(vma->vm_file); 6231 6232 down_read(&EXT4_I(inode)->i_mmap_sem); 6233 6234 err = ext4_convert_inline_data(inode); 6235 if (err) 6236 goto out_ret; 6237 6238 /* Delalloc case is easy... */ 6239 if (test_opt(inode->i_sb, DELALLOC) && 6240 !ext4_should_journal_data(inode) && 6241 !ext4_nonda_switch(inode->i_sb)) { 6242 do { 6243 err = block_page_mkwrite(vma, vmf, 6244 ext4_da_get_block_prep); 6245 } while (err == -ENOSPC && 6246 ext4_should_retry_alloc(inode->i_sb, &retries)); 6247 goto out_ret; 6248 } 6249 6250 lock_page(page); 6251 size = i_size_read(inode); 6252 /* Page got truncated from under us? */ 6253 if (page->mapping != mapping || page_offset(page) > size) { 6254 unlock_page(page); 6255 ret = VM_FAULT_NOPAGE; 6256 goto out; 6257 } 6258 6259 if (page->index == size >> PAGE_SHIFT) 6260 len = size & ~PAGE_MASK; 6261 else 6262 len = PAGE_SIZE; 6263 /* 6264 * Return if we have all the buffers mapped. This avoids the need to do 6265 * journal_start/journal_stop which can block and take a long time 6266 */ 6267 if (page_has_buffers(page)) { 6268 if (!ext4_walk_page_buffers(NULL, page_buffers(page), 6269 0, len, NULL, 6270 ext4_bh_unmapped)) { 6271 /* Wait so that we don't change page under IO */ 6272 wait_for_stable_page(page); 6273 ret = VM_FAULT_LOCKED; 6274 goto out; 6275 } 6276 } 6277 unlock_page(page); 6278 /* OK, we need to fill the hole... */ 6279 if (ext4_should_dioread_nolock(inode)) 6280 get_block = ext4_get_block_unwritten; 6281 else 6282 get_block = ext4_get_block; 6283 retry_alloc: 6284 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 6285 ext4_writepage_trans_blocks(inode)); 6286 if (IS_ERR(handle)) { 6287 ret = VM_FAULT_SIGBUS; 6288 goto out; 6289 } 6290 err = block_page_mkwrite(vma, vmf, get_block); 6291 if (!err && ext4_should_journal_data(inode)) { 6292 if (ext4_walk_page_buffers(handle, page_buffers(page), 0, 6293 PAGE_SIZE, NULL, do_journal_get_write_access)) { 6294 unlock_page(page); 6295 ret = VM_FAULT_SIGBUS; 6296 ext4_journal_stop(handle); 6297 goto out; 6298 } 6299 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 6300 } 6301 ext4_journal_stop(handle); 6302 if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 6303 goto retry_alloc; 6304 out_ret: 6305 ret = block_page_mkwrite_return(err); 6306 out: 6307 up_read(&EXT4_I(inode)->i_mmap_sem); 6308 sb_end_pagefault(inode->i_sb); 6309 return ret; 6310 } 6311 6312 vm_fault_t ext4_filemap_fault(struct vm_fault *vmf) 6313 { 6314 struct inode *inode = file_inode(vmf->vma->vm_file); 6315 vm_fault_t ret; 6316 6317 down_read(&EXT4_I(inode)->i_mmap_sem); 6318 ret = filemap_fault(vmf); 6319 up_read(&EXT4_I(inode)->i_mmap_sem); 6320 6321 return ret; 6322 } 6323