1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext4/inode.c 4 * 5 * Copyright (C) 1992, 1993, 1994, 1995 6 * Remy Card (card@masi.ibp.fr) 7 * Laboratoire MASI - Institut Blaise Pascal 8 * Universite Pierre et Marie Curie (Paris VI) 9 * 10 * from 11 * 12 * linux/fs/minix/inode.c 13 * 14 * Copyright (C) 1991, 1992 Linus Torvalds 15 * 16 * 64-bit file support on 64-bit platforms by Jakub Jelinek 17 * (jj@sunsite.ms.mff.cuni.cz) 18 * 19 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 20 */ 21 22 #include <linux/fs.h> 23 #include <linux/time.h> 24 #include <linux/highuid.h> 25 #include <linux/pagemap.h> 26 #include <linux/dax.h> 27 #include <linux/quotaops.h> 28 #include <linux/string.h> 29 #include <linux/buffer_head.h> 30 #include <linux/writeback.h> 31 #include <linux/pagevec.h> 32 #include <linux/mpage.h> 33 #include <linux/namei.h> 34 #include <linux/uio.h> 35 #include <linux/bio.h> 36 #include <linux/workqueue.h> 37 #include <linux/kernel.h> 38 #include <linux/printk.h> 39 #include <linux/slab.h> 40 #include <linux/bitops.h> 41 #include <linux/iomap.h> 42 #include <linux/iversion.h> 43 44 #include "ext4_jbd2.h" 45 #include "xattr.h" 46 #include "acl.h" 47 #include "truncate.h" 48 49 #include <trace/events/ext4.h> 50 51 #define MPAGE_DA_EXTENT_TAIL 0x01 52 53 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, 54 struct ext4_inode_info *ei) 55 { 56 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 57 __u32 csum; 58 __u16 dummy_csum = 0; 59 int offset = offsetof(struct ext4_inode, i_checksum_lo); 60 unsigned int csum_size = sizeof(dummy_csum); 61 62 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset); 63 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size); 64 offset += csum_size; 65 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, 66 EXT4_GOOD_OLD_INODE_SIZE - offset); 67 68 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 69 offset = offsetof(struct ext4_inode, i_checksum_hi); 70 csum = ext4_chksum(sbi, csum, (__u8 *)raw + 71 EXT4_GOOD_OLD_INODE_SIZE, 72 offset - EXT4_GOOD_OLD_INODE_SIZE); 73 if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { 74 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, 75 csum_size); 76 offset += csum_size; 77 } 78 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, 79 EXT4_INODE_SIZE(inode->i_sb) - offset); 80 } 81 82 return csum; 83 } 84 85 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, 86 struct ext4_inode_info *ei) 87 { 88 __u32 provided, calculated; 89 90 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 91 cpu_to_le32(EXT4_OS_LINUX) || 92 !ext4_has_metadata_csum(inode->i_sb)) 93 return 1; 94 95 provided = le16_to_cpu(raw->i_checksum_lo); 96 calculated = ext4_inode_csum(inode, raw, ei); 97 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 98 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 99 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16; 100 else 101 calculated &= 0xFFFF; 102 103 return provided == calculated; 104 } 105 106 static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, 107 struct ext4_inode_info *ei) 108 { 109 __u32 csum; 110 111 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 112 cpu_to_le32(EXT4_OS_LINUX) || 113 !ext4_has_metadata_csum(inode->i_sb)) 114 return; 115 116 csum = ext4_inode_csum(inode, raw, ei); 117 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF); 118 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 119 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 120 raw->i_checksum_hi = cpu_to_le16(csum >> 16); 121 } 122 123 static inline int ext4_begin_ordered_truncate(struct inode *inode, 124 loff_t new_size) 125 { 126 trace_ext4_begin_ordered_truncate(inode, new_size); 127 /* 128 * If jinode is zero, then we never opened the file for 129 * writing, so there's no need to call 130 * jbd2_journal_begin_ordered_truncate() since there's no 131 * outstanding writes we need to flush. 132 */ 133 if (!EXT4_I(inode)->jinode) 134 return 0; 135 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), 136 EXT4_I(inode)->jinode, 137 new_size); 138 } 139 140 static void ext4_invalidatepage(struct page *page, unsigned int offset, 141 unsigned int length); 142 static int __ext4_journalled_writepage(struct page *page, unsigned int len); 143 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); 144 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, 145 int pextents); 146 147 /* 148 * Test whether an inode is a fast symlink. 149 * A fast symlink has its symlink data stored in ext4_inode_info->i_data. 150 */ 151 int ext4_inode_is_fast_symlink(struct inode *inode) 152 { 153 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) { 154 int ea_blocks = EXT4_I(inode)->i_file_acl ? 155 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0; 156 157 if (ext4_has_inline_data(inode)) 158 return 0; 159 160 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 161 } 162 return S_ISLNK(inode->i_mode) && inode->i_size && 163 (inode->i_size < EXT4_N_BLOCKS * 4); 164 } 165 166 /* 167 * Restart the transaction associated with *handle. This does a commit, 168 * so before we call here everything must be consistently dirtied against 169 * this transaction. 170 */ 171 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, 172 int nblocks) 173 { 174 int ret; 175 176 /* 177 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 178 * moment, get_block can be called only for blocks inside i_size since 179 * page cache has been already dropped and writes are blocked by 180 * i_mutex. So we can safely drop the i_data_sem here. 181 */ 182 BUG_ON(EXT4_JOURNAL(inode) == NULL); 183 jbd_debug(2, "restarting handle %p\n", handle); 184 up_write(&EXT4_I(inode)->i_data_sem); 185 ret = ext4_journal_restart(handle, nblocks); 186 down_write(&EXT4_I(inode)->i_data_sem); 187 ext4_discard_preallocations(inode); 188 189 return ret; 190 } 191 192 /* 193 * Called at the last iput() if i_nlink is zero. 194 */ 195 void ext4_evict_inode(struct inode *inode) 196 { 197 handle_t *handle; 198 int err; 199 int extra_credits = 3; 200 struct ext4_xattr_inode_array *ea_inode_array = NULL; 201 202 trace_ext4_evict_inode(inode); 203 204 if (inode->i_nlink) { 205 /* 206 * When journalling data dirty buffers are tracked only in the 207 * journal. So although mm thinks everything is clean and 208 * ready for reaping the inode might still have some pages to 209 * write in the running transaction or waiting to be 210 * checkpointed. Thus calling jbd2_journal_invalidatepage() 211 * (via truncate_inode_pages()) to discard these buffers can 212 * cause data loss. Also even if we did not discard these 213 * buffers, we would have no way to find them after the inode 214 * is reaped and thus user could see stale data if he tries to 215 * read them before the transaction is checkpointed. So be 216 * careful and force everything to disk here... We use 217 * ei->i_datasync_tid to store the newest transaction 218 * containing inode's data. 219 * 220 * Note that directories do not have this problem because they 221 * don't use page cache. 222 */ 223 if (inode->i_ino != EXT4_JOURNAL_INO && 224 ext4_should_journal_data(inode) && 225 (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) && 226 inode->i_data.nrpages) { 227 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 228 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; 229 230 jbd2_complete_transaction(journal, commit_tid); 231 filemap_write_and_wait(&inode->i_data); 232 } 233 truncate_inode_pages_final(&inode->i_data); 234 235 goto no_delete; 236 } 237 238 if (is_bad_inode(inode)) 239 goto no_delete; 240 dquot_initialize(inode); 241 242 if (ext4_should_order_data(inode)) 243 ext4_begin_ordered_truncate(inode, 0); 244 truncate_inode_pages_final(&inode->i_data); 245 246 /* 247 * Protect us against freezing - iput() caller didn't have to have any 248 * protection against it 249 */ 250 sb_start_intwrite(inode->i_sb); 251 252 if (!IS_NOQUOTA(inode)) 253 extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb); 254 255 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, 256 ext4_blocks_for_truncate(inode)+extra_credits); 257 if (IS_ERR(handle)) { 258 ext4_std_error(inode->i_sb, PTR_ERR(handle)); 259 /* 260 * If we're going to skip the normal cleanup, we still need to 261 * make sure that the in-core orphan linked list is properly 262 * cleaned up. 263 */ 264 ext4_orphan_del(NULL, inode); 265 sb_end_intwrite(inode->i_sb); 266 goto no_delete; 267 } 268 269 if (IS_SYNC(inode)) 270 ext4_handle_sync(handle); 271 272 /* 273 * Set inode->i_size to 0 before calling ext4_truncate(). We need 274 * special handling of symlinks here because i_size is used to 275 * determine whether ext4_inode_info->i_data contains symlink data or 276 * block mappings. Setting i_size to 0 will remove its fast symlink 277 * status. Erase i_data so that it becomes a valid empty block map. 278 */ 279 if (ext4_inode_is_fast_symlink(inode)) 280 memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data)); 281 inode->i_size = 0; 282 err = ext4_mark_inode_dirty(handle, inode); 283 if (err) { 284 ext4_warning(inode->i_sb, 285 "couldn't mark inode dirty (err %d)", err); 286 goto stop_handle; 287 } 288 if (inode->i_blocks) { 289 err = ext4_truncate(inode); 290 if (err) { 291 ext4_error(inode->i_sb, 292 "couldn't truncate inode %lu (err %d)", 293 inode->i_ino, err); 294 goto stop_handle; 295 } 296 } 297 298 /* Remove xattr references. */ 299 err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array, 300 extra_credits); 301 if (err) { 302 ext4_warning(inode->i_sb, "xattr delete (err %d)", err); 303 stop_handle: 304 ext4_journal_stop(handle); 305 ext4_orphan_del(NULL, inode); 306 sb_end_intwrite(inode->i_sb); 307 ext4_xattr_inode_array_free(ea_inode_array); 308 goto no_delete; 309 } 310 311 /* 312 * Kill off the orphan record which ext4_truncate created. 313 * AKPM: I think this can be inside the above `if'. 314 * Note that ext4_orphan_del() has to be able to cope with the 315 * deletion of a non-existent orphan - this is because we don't 316 * know if ext4_truncate() actually created an orphan record. 317 * (Well, we could do this if we need to, but heck - it works) 318 */ 319 ext4_orphan_del(handle, inode); 320 EXT4_I(inode)->i_dtime = (__u32)ktime_get_real_seconds(); 321 322 /* 323 * One subtle ordering requirement: if anything has gone wrong 324 * (transaction abort, IO errors, whatever), then we can still 325 * do these next steps (the fs will already have been marked as 326 * having errors), but we can't free the inode if the mark_dirty 327 * fails. 328 */ 329 if (ext4_mark_inode_dirty(handle, inode)) 330 /* If that failed, just do the required in-core inode clear. */ 331 ext4_clear_inode(inode); 332 else 333 ext4_free_inode(handle, inode); 334 ext4_journal_stop(handle); 335 sb_end_intwrite(inode->i_sb); 336 ext4_xattr_inode_array_free(ea_inode_array); 337 return; 338 no_delete: 339 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ 340 } 341 342 #ifdef CONFIG_QUOTA 343 qsize_t *ext4_get_reserved_space(struct inode *inode) 344 { 345 return &EXT4_I(inode)->i_reserved_quota; 346 } 347 #endif 348 349 /* 350 * Called with i_data_sem down, which is important since we can call 351 * ext4_discard_preallocations() from here. 352 */ 353 void ext4_da_update_reserve_space(struct inode *inode, 354 int used, int quota_claim) 355 { 356 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 357 struct ext4_inode_info *ei = EXT4_I(inode); 358 359 spin_lock(&ei->i_block_reservation_lock); 360 trace_ext4_da_update_reserve_space(inode, used, quota_claim); 361 if (unlikely(used > ei->i_reserved_data_blocks)) { 362 ext4_warning(inode->i_sb, "%s: ino %lu, used %d " 363 "with only %d reserved data blocks", 364 __func__, inode->i_ino, used, 365 ei->i_reserved_data_blocks); 366 WARN_ON(1); 367 used = ei->i_reserved_data_blocks; 368 } 369 370 /* Update per-inode reservations */ 371 ei->i_reserved_data_blocks -= used; 372 percpu_counter_sub(&sbi->s_dirtyclusters_counter, used); 373 374 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 375 376 /* Update quota subsystem for data blocks */ 377 if (quota_claim) 378 dquot_claim_block(inode, EXT4_C2B(sbi, used)); 379 else { 380 /* 381 * We did fallocate with an offset that is already delayed 382 * allocated. So on delayed allocated writeback we should 383 * not re-claim the quota for fallocated blocks. 384 */ 385 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); 386 } 387 388 /* 389 * If we have done all the pending block allocations and if 390 * there aren't any writers on the inode, we can discard the 391 * inode's preallocations. 392 */ 393 if ((ei->i_reserved_data_blocks == 0) && 394 (atomic_read(&inode->i_writecount) == 0)) 395 ext4_discard_preallocations(inode); 396 } 397 398 static int __check_block_validity(struct inode *inode, const char *func, 399 unsigned int line, 400 struct ext4_map_blocks *map) 401 { 402 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, 403 map->m_len)) { 404 ext4_error_inode(inode, func, line, map->m_pblk, 405 "lblock %lu mapped to illegal pblock %llu " 406 "(length %d)", (unsigned long) map->m_lblk, 407 map->m_pblk, map->m_len); 408 return -EFSCORRUPTED; 409 } 410 return 0; 411 } 412 413 int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk, 414 ext4_lblk_t len) 415 { 416 int ret; 417 418 if (ext4_encrypted_inode(inode)) 419 return fscrypt_zeroout_range(inode, lblk, pblk, len); 420 421 ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS); 422 if (ret > 0) 423 ret = 0; 424 425 return ret; 426 } 427 428 #define check_block_validity(inode, map) \ 429 __check_block_validity((inode), __func__, __LINE__, (map)) 430 431 #ifdef ES_AGGRESSIVE_TEST 432 static void ext4_map_blocks_es_recheck(handle_t *handle, 433 struct inode *inode, 434 struct ext4_map_blocks *es_map, 435 struct ext4_map_blocks *map, 436 int flags) 437 { 438 int retval; 439 440 map->m_flags = 0; 441 /* 442 * There is a race window that the result is not the same. 443 * e.g. xfstests #223 when dioread_nolock enables. The reason 444 * is that we lookup a block mapping in extent status tree with 445 * out taking i_data_sem. So at the time the unwritten extent 446 * could be converted. 447 */ 448 down_read(&EXT4_I(inode)->i_data_sem); 449 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 450 retval = ext4_ext_map_blocks(handle, inode, map, flags & 451 EXT4_GET_BLOCKS_KEEP_SIZE); 452 } else { 453 retval = ext4_ind_map_blocks(handle, inode, map, flags & 454 EXT4_GET_BLOCKS_KEEP_SIZE); 455 } 456 up_read((&EXT4_I(inode)->i_data_sem)); 457 458 /* 459 * We don't check m_len because extent will be collpased in status 460 * tree. So the m_len might not equal. 461 */ 462 if (es_map->m_lblk != map->m_lblk || 463 es_map->m_flags != map->m_flags || 464 es_map->m_pblk != map->m_pblk) { 465 printk("ES cache assertion failed for inode: %lu " 466 "es_cached ex [%d/%d/%llu/%x] != " 467 "found ex [%d/%d/%llu/%x] retval %d flags %x\n", 468 inode->i_ino, es_map->m_lblk, es_map->m_len, 469 es_map->m_pblk, es_map->m_flags, map->m_lblk, 470 map->m_len, map->m_pblk, map->m_flags, 471 retval, flags); 472 } 473 } 474 #endif /* ES_AGGRESSIVE_TEST */ 475 476 /* 477 * The ext4_map_blocks() function tries to look up the requested blocks, 478 * and returns if the blocks are already mapped. 479 * 480 * Otherwise it takes the write lock of the i_data_sem and allocate blocks 481 * and store the allocated blocks in the result buffer head and mark it 482 * mapped. 483 * 484 * If file type is extents based, it will call ext4_ext_map_blocks(), 485 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping 486 * based files 487 * 488 * On success, it returns the number of blocks being mapped or allocated. if 489 * create==0 and the blocks are pre-allocated and unwritten, the resulting @map 490 * is marked as unwritten. If the create == 1, it will mark @map as mapped. 491 * 492 * It returns 0 if plain look up failed (blocks have not been allocated), in 493 * that case, @map is returned as unmapped but we still do fill map->m_len to 494 * indicate the length of a hole starting at map->m_lblk. 495 * 496 * It returns the error in case of allocation failure. 497 */ 498 int ext4_map_blocks(handle_t *handle, struct inode *inode, 499 struct ext4_map_blocks *map, int flags) 500 { 501 struct extent_status es; 502 int retval; 503 int ret = 0; 504 #ifdef ES_AGGRESSIVE_TEST 505 struct ext4_map_blocks orig_map; 506 507 memcpy(&orig_map, map, sizeof(*map)); 508 #endif 509 510 map->m_flags = 0; 511 ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," 512 "logical block %lu\n", inode->i_ino, flags, map->m_len, 513 (unsigned long) map->m_lblk); 514 515 /* 516 * ext4_map_blocks returns an int, and m_len is an unsigned int 517 */ 518 if (unlikely(map->m_len > INT_MAX)) 519 map->m_len = INT_MAX; 520 521 /* We can handle the block number less than EXT_MAX_BLOCKS */ 522 if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS)) 523 return -EFSCORRUPTED; 524 525 /* Lookup extent status tree firstly */ 526 if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) { 527 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) { 528 map->m_pblk = ext4_es_pblock(&es) + 529 map->m_lblk - es.es_lblk; 530 map->m_flags |= ext4_es_is_written(&es) ? 531 EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN; 532 retval = es.es_len - (map->m_lblk - es.es_lblk); 533 if (retval > map->m_len) 534 retval = map->m_len; 535 map->m_len = retval; 536 } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) { 537 map->m_pblk = 0; 538 retval = es.es_len - (map->m_lblk - es.es_lblk); 539 if (retval > map->m_len) 540 retval = map->m_len; 541 map->m_len = retval; 542 retval = 0; 543 } else { 544 BUG_ON(1); 545 } 546 #ifdef ES_AGGRESSIVE_TEST 547 ext4_map_blocks_es_recheck(handle, inode, map, 548 &orig_map, flags); 549 #endif 550 goto found; 551 } 552 553 /* 554 * Try to see if we can get the block without requesting a new 555 * file system block. 556 */ 557 down_read(&EXT4_I(inode)->i_data_sem); 558 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 559 retval = ext4_ext_map_blocks(handle, inode, map, flags & 560 EXT4_GET_BLOCKS_KEEP_SIZE); 561 } else { 562 retval = ext4_ind_map_blocks(handle, inode, map, flags & 563 EXT4_GET_BLOCKS_KEEP_SIZE); 564 } 565 if (retval > 0) { 566 unsigned int status; 567 568 if (unlikely(retval != map->m_len)) { 569 ext4_warning(inode->i_sb, 570 "ES len assertion failed for inode " 571 "%lu: retval %d != map->m_len %d", 572 inode->i_ino, retval, map->m_len); 573 WARN_ON(1); 574 } 575 576 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 577 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 578 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 579 !(status & EXTENT_STATUS_WRITTEN) && 580 ext4_find_delalloc_range(inode, map->m_lblk, 581 map->m_lblk + map->m_len - 1)) 582 status |= EXTENT_STATUS_DELAYED; 583 ret = ext4_es_insert_extent(inode, map->m_lblk, 584 map->m_len, map->m_pblk, status); 585 if (ret < 0) 586 retval = ret; 587 } 588 up_read((&EXT4_I(inode)->i_data_sem)); 589 590 found: 591 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 592 ret = check_block_validity(inode, map); 593 if (ret != 0) 594 return ret; 595 } 596 597 /* If it is only a block(s) look up */ 598 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 599 return retval; 600 601 /* 602 * Returns if the blocks have already allocated 603 * 604 * Note that if blocks have been preallocated 605 * ext4_ext_get_block() returns the create = 0 606 * with buffer head unmapped. 607 */ 608 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) 609 /* 610 * If we need to convert extent to unwritten 611 * we continue and do the actual work in 612 * ext4_ext_map_blocks() 613 */ 614 if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) 615 return retval; 616 617 /* 618 * Here we clear m_flags because after allocating an new extent, 619 * it will be set again. 620 */ 621 map->m_flags &= ~EXT4_MAP_FLAGS; 622 623 /* 624 * New blocks allocate and/or writing to unwritten extent 625 * will possibly result in updating i_data, so we take 626 * the write lock of i_data_sem, and call get_block() 627 * with create == 1 flag. 628 */ 629 down_write(&EXT4_I(inode)->i_data_sem); 630 631 /* 632 * We need to check for EXT4 here because migrate 633 * could have changed the inode type in between 634 */ 635 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 636 retval = ext4_ext_map_blocks(handle, inode, map, flags); 637 } else { 638 retval = ext4_ind_map_blocks(handle, inode, map, flags); 639 640 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { 641 /* 642 * We allocated new blocks which will result in 643 * i_data's format changing. Force the migrate 644 * to fail by clearing migrate flags 645 */ 646 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); 647 } 648 649 /* 650 * Update reserved blocks/metadata blocks after successful 651 * block allocation which had been deferred till now. We don't 652 * support fallocate for non extent files. So we can update 653 * reserve space here. 654 */ 655 if ((retval > 0) && 656 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) 657 ext4_da_update_reserve_space(inode, retval, 1); 658 } 659 660 if (retval > 0) { 661 unsigned int status; 662 663 if (unlikely(retval != map->m_len)) { 664 ext4_warning(inode->i_sb, 665 "ES len assertion failed for inode " 666 "%lu: retval %d != map->m_len %d", 667 inode->i_ino, retval, map->m_len); 668 WARN_ON(1); 669 } 670 671 /* 672 * We have to zeroout blocks before inserting them into extent 673 * status tree. Otherwise someone could look them up there and 674 * use them before they are really zeroed. We also have to 675 * unmap metadata before zeroing as otherwise writeback can 676 * overwrite zeros with stale data from block device. 677 */ 678 if (flags & EXT4_GET_BLOCKS_ZERO && 679 map->m_flags & EXT4_MAP_MAPPED && 680 map->m_flags & EXT4_MAP_NEW) { 681 clean_bdev_aliases(inode->i_sb->s_bdev, map->m_pblk, 682 map->m_len); 683 ret = ext4_issue_zeroout(inode, map->m_lblk, 684 map->m_pblk, map->m_len); 685 if (ret) { 686 retval = ret; 687 goto out_sem; 688 } 689 } 690 691 /* 692 * If the extent has been zeroed out, we don't need to update 693 * extent status tree. 694 */ 695 if ((flags & EXT4_GET_BLOCKS_PRE_IO) && 696 ext4_es_lookup_extent(inode, map->m_lblk, &es)) { 697 if (ext4_es_is_written(&es)) 698 goto out_sem; 699 } 700 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 701 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 702 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 703 !(status & EXTENT_STATUS_WRITTEN) && 704 ext4_find_delalloc_range(inode, map->m_lblk, 705 map->m_lblk + map->m_len - 1)) 706 status |= EXTENT_STATUS_DELAYED; 707 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 708 map->m_pblk, status); 709 if (ret < 0) { 710 retval = ret; 711 goto out_sem; 712 } 713 } 714 715 out_sem: 716 up_write((&EXT4_I(inode)->i_data_sem)); 717 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 718 ret = check_block_validity(inode, map); 719 if (ret != 0) 720 return ret; 721 722 /* 723 * Inodes with freshly allocated blocks where contents will be 724 * visible after transaction commit must be on transaction's 725 * ordered data list. 726 */ 727 if (map->m_flags & EXT4_MAP_NEW && 728 !(map->m_flags & EXT4_MAP_UNWRITTEN) && 729 !(flags & EXT4_GET_BLOCKS_ZERO) && 730 !ext4_is_quota_file(inode) && 731 ext4_should_order_data(inode)) { 732 if (flags & EXT4_GET_BLOCKS_IO_SUBMIT) 733 ret = ext4_jbd2_inode_add_wait(handle, inode); 734 else 735 ret = ext4_jbd2_inode_add_write(handle, inode); 736 if (ret) 737 return ret; 738 } 739 } 740 return retval; 741 } 742 743 /* 744 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages 745 * we have to be careful as someone else may be manipulating b_state as well. 746 */ 747 static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags) 748 { 749 unsigned long old_state; 750 unsigned long new_state; 751 752 flags &= EXT4_MAP_FLAGS; 753 754 /* Dummy buffer_head? Set non-atomically. */ 755 if (!bh->b_page) { 756 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags; 757 return; 758 } 759 /* 760 * Someone else may be modifying b_state. Be careful! This is ugly but 761 * once we get rid of using bh as a container for mapping information 762 * to pass to / from get_block functions, this can go away. 763 */ 764 do { 765 old_state = READ_ONCE(bh->b_state); 766 new_state = (old_state & ~EXT4_MAP_FLAGS) | flags; 767 } while (unlikely( 768 cmpxchg(&bh->b_state, old_state, new_state) != old_state)); 769 } 770 771 static int _ext4_get_block(struct inode *inode, sector_t iblock, 772 struct buffer_head *bh, int flags) 773 { 774 struct ext4_map_blocks map; 775 int ret = 0; 776 777 if (ext4_has_inline_data(inode)) 778 return -ERANGE; 779 780 map.m_lblk = iblock; 781 map.m_len = bh->b_size >> inode->i_blkbits; 782 783 ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map, 784 flags); 785 if (ret > 0) { 786 map_bh(bh, inode->i_sb, map.m_pblk); 787 ext4_update_bh_state(bh, map.m_flags); 788 bh->b_size = inode->i_sb->s_blocksize * map.m_len; 789 ret = 0; 790 } else if (ret == 0) { 791 /* hole case, need to fill in bh->b_size */ 792 bh->b_size = inode->i_sb->s_blocksize * map.m_len; 793 } 794 return ret; 795 } 796 797 int ext4_get_block(struct inode *inode, sector_t iblock, 798 struct buffer_head *bh, int create) 799 { 800 return _ext4_get_block(inode, iblock, bh, 801 create ? EXT4_GET_BLOCKS_CREATE : 0); 802 } 803 804 /* 805 * Get block function used when preparing for buffered write if we require 806 * creating an unwritten extent if blocks haven't been allocated. The extent 807 * will be converted to written after the IO is complete. 808 */ 809 int ext4_get_block_unwritten(struct inode *inode, sector_t iblock, 810 struct buffer_head *bh_result, int create) 811 { 812 ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n", 813 inode->i_ino, create); 814 return _ext4_get_block(inode, iblock, bh_result, 815 EXT4_GET_BLOCKS_IO_CREATE_EXT); 816 } 817 818 /* Maximum number of blocks we map for direct IO at once. */ 819 #define DIO_MAX_BLOCKS 4096 820 821 /* 822 * Get blocks function for the cases that need to start a transaction - 823 * generally difference cases of direct IO and DAX IO. It also handles retries 824 * in case of ENOSPC. 825 */ 826 static int ext4_get_block_trans(struct inode *inode, sector_t iblock, 827 struct buffer_head *bh_result, int flags) 828 { 829 int dio_credits; 830 handle_t *handle; 831 int retries = 0; 832 int ret; 833 834 /* Trim mapping request to maximum we can map at once for DIO */ 835 if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS) 836 bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits; 837 dio_credits = ext4_chunk_trans_blocks(inode, 838 bh_result->b_size >> inode->i_blkbits); 839 retry: 840 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits); 841 if (IS_ERR(handle)) 842 return PTR_ERR(handle); 843 844 ret = _ext4_get_block(inode, iblock, bh_result, flags); 845 ext4_journal_stop(handle); 846 847 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 848 goto retry; 849 return ret; 850 } 851 852 /* Get block function for DIO reads and writes to inodes without extents */ 853 int ext4_dio_get_block(struct inode *inode, sector_t iblock, 854 struct buffer_head *bh, int create) 855 { 856 /* We don't expect handle for direct IO */ 857 WARN_ON_ONCE(ext4_journal_current_handle()); 858 859 if (!create) 860 return _ext4_get_block(inode, iblock, bh, 0); 861 return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE); 862 } 863 864 /* 865 * Get block function for AIO DIO writes when we create unwritten extent if 866 * blocks are not allocated yet. The extent will be converted to written 867 * after IO is complete. 868 */ 869 static int ext4_dio_get_block_unwritten_async(struct inode *inode, 870 sector_t iblock, struct buffer_head *bh_result, int create) 871 { 872 int ret; 873 874 /* We don't expect handle for direct IO */ 875 WARN_ON_ONCE(ext4_journal_current_handle()); 876 877 ret = ext4_get_block_trans(inode, iblock, bh_result, 878 EXT4_GET_BLOCKS_IO_CREATE_EXT); 879 880 /* 881 * When doing DIO using unwritten extents, we need io_end to convert 882 * unwritten extents to written on IO completion. We allocate io_end 883 * once we spot unwritten extent and store it in b_private. Generic 884 * DIO code keeps b_private set and furthermore passes the value to 885 * our completion callback in 'private' argument. 886 */ 887 if (!ret && buffer_unwritten(bh_result)) { 888 if (!bh_result->b_private) { 889 ext4_io_end_t *io_end; 890 891 io_end = ext4_init_io_end(inode, GFP_KERNEL); 892 if (!io_end) 893 return -ENOMEM; 894 bh_result->b_private = io_end; 895 ext4_set_io_unwritten_flag(inode, io_end); 896 } 897 set_buffer_defer_completion(bh_result); 898 } 899 900 return ret; 901 } 902 903 /* 904 * Get block function for non-AIO DIO writes when we create unwritten extent if 905 * blocks are not allocated yet. The extent will be converted to written 906 * after IO is complete by ext4_direct_IO_write(). 907 */ 908 static int ext4_dio_get_block_unwritten_sync(struct inode *inode, 909 sector_t iblock, struct buffer_head *bh_result, int create) 910 { 911 int ret; 912 913 /* We don't expect handle for direct IO */ 914 WARN_ON_ONCE(ext4_journal_current_handle()); 915 916 ret = ext4_get_block_trans(inode, iblock, bh_result, 917 EXT4_GET_BLOCKS_IO_CREATE_EXT); 918 919 /* 920 * Mark inode as having pending DIO writes to unwritten extents. 921 * ext4_direct_IO_write() checks this flag and converts extents to 922 * written. 923 */ 924 if (!ret && buffer_unwritten(bh_result)) 925 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 926 927 return ret; 928 } 929 930 static int ext4_dio_get_block_overwrite(struct inode *inode, sector_t iblock, 931 struct buffer_head *bh_result, int create) 932 { 933 int ret; 934 935 ext4_debug("ext4_dio_get_block_overwrite: inode %lu, create flag %d\n", 936 inode->i_ino, create); 937 /* We don't expect handle for direct IO */ 938 WARN_ON_ONCE(ext4_journal_current_handle()); 939 940 ret = _ext4_get_block(inode, iblock, bh_result, 0); 941 /* 942 * Blocks should have been preallocated! ext4_file_write_iter() checks 943 * that. 944 */ 945 WARN_ON_ONCE(!buffer_mapped(bh_result) || buffer_unwritten(bh_result)); 946 947 return ret; 948 } 949 950 951 /* 952 * `handle' can be NULL if create is zero 953 */ 954 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 955 ext4_lblk_t block, int map_flags) 956 { 957 struct ext4_map_blocks map; 958 struct buffer_head *bh; 959 int create = map_flags & EXT4_GET_BLOCKS_CREATE; 960 int err; 961 962 J_ASSERT(handle != NULL || create == 0); 963 964 map.m_lblk = block; 965 map.m_len = 1; 966 err = ext4_map_blocks(handle, inode, &map, map_flags); 967 968 if (err == 0) 969 return create ? ERR_PTR(-ENOSPC) : NULL; 970 if (err < 0) 971 return ERR_PTR(err); 972 973 bh = sb_getblk(inode->i_sb, map.m_pblk); 974 if (unlikely(!bh)) 975 return ERR_PTR(-ENOMEM); 976 if (map.m_flags & EXT4_MAP_NEW) { 977 J_ASSERT(create != 0); 978 J_ASSERT(handle != NULL); 979 980 /* 981 * Now that we do not always journal data, we should 982 * keep in mind whether this should always journal the 983 * new buffer as metadata. For now, regular file 984 * writes use ext4_get_block instead, so it's not a 985 * problem. 986 */ 987 lock_buffer(bh); 988 BUFFER_TRACE(bh, "call get_create_access"); 989 err = ext4_journal_get_create_access(handle, bh); 990 if (unlikely(err)) { 991 unlock_buffer(bh); 992 goto errout; 993 } 994 if (!buffer_uptodate(bh)) { 995 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 996 set_buffer_uptodate(bh); 997 } 998 unlock_buffer(bh); 999 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 1000 err = ext4_handle_dirty_metadata(handle, inode, bh); 1001 if (unlikely(err)) 1002 goto errout; 1003 } else 1004 BUFFER_TRACE(bh, "not a new buffer"); 1005 return bh; 1006 errout: 1007 brelse(bh); 1008 return ERR_PTR(err); 1009 } 1010 1011 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 1012 ext4_lblk_t block, int map_flags) 1013 { 1014 struct buffer_head *bh; 1015 1016 bh = ext4_getblk(handle, inode, block, map_flags); 1017 if (IS_ERR(bh)) 1018 return bh; 1019 if (!bh || buffer_uptodate(bh)) 1020 return bh; 1021 ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh); 1022 wait_on_buffer(bh); 1023 if (buffer_uptodate(bh)) 1024 return bh; 1025 put_bh(bh); 1026 return ERR_PTR(-EIO); 1027 } 1028 1029 /* Read a contiguous batch of blocks. */ 1030 int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count, 1031 bool wait, struct buffer_head **bhs) 1032 { 1033 int i, err; 1034 1035 for (i = 0; i < bh_count; i++) { 1036 bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */); 1037 if (IS_ERR(bhs[i])) { 1038 err = PTR_ERR(bhs[i]); 1039 bh_count = i; 1040 goto out_brelse; 1041 } 1042 } 1043 1044 for (i = 0; i < bh_count; i++) 1045 /* Note that NULL bhs[i] is valid because of holes. */ 1046 if (bhs[i] && !buffer_uptodate(bhs[i])) 1047 ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, 1048 &bhs[i]); 1049 1050 if (!wait) 1051 return 0; 1052 1053 for (i = 0; i < bh_count; i++) 1054 if (bhs[i]) 1055 wait_on_buffer(bhs[i]); 1056 1057 for (i = 0; i < bh_count; i++) { 1058 if (bhs[i] && !buffer_uptodate(bhs[i])) { 1059 err = -EIO; 1060 goto out_brelse; 1061 } 1062 } 1063 return 0; 1064 1065 out_brelse: 1066 for (i = 0; i < bh_count; i++) { 1067 brelse(bhs[i]); 1068 bhs[i] = NULL; 1069 } 1070 return err; 1071 } 1072 1073 int ext4_walk_page_buffers(handle_t *handle, 1074 struct buffer_head *head, 1075 unsigned from, 1076 unsigned to, 1077 int *partial, 1078 int (*fn)(handle_t *handle, 1079 struct buffer_head *bh)) 1080 { 1081 struct buffer_head *bh; 1082 unsigned block_start, block_end; 1083 unsigned blocksize = head->b_size; 1084 int err, ret = 0; 1085 struct buffer_head *next; 1086 1087 for (bh = head, block_start = 0; 1088 ret == 0 && (bh != head || !block_start); 1089 block_start = block_end, bh = next) { 1090 next = bh->b_this_page; 1091 block_end = block_start + blocksize; 1092 if (block_end <= from || block_start >= to) { 1093 if (partial && !buffer_uptodate(bh)) 1094 *partial = 1; 1095 continue; 1096 } 1097 err = (*fn)(handle, bh); 1098 if (!ret) 1099 ret = err; 1100 } 1101 return ret; 1102 } 1103 1104 /* 1105 * To preserve ordering, it is essential that the hole instantiation and 1106 * the data write be encapsulated in a single transaction. We cannot 1107 * close off a transaction and start a new one between the ext4_get_block() 1108 * and the commit_write(). So doing the jbd2_journal_start at the start of 1109 * prepare_write() is the right place. 1110 * 1111 * Also, this function can nest inside ext4_writepage(). In that case, we 1112 * *know* that ext4_writepage() has generated enough buffer credits to do the 1113 * whole page. So we won't block on the journal in that case, which is good, 1114 * because the caller may be PF_MEMALLOC. 1115 * 1116 * By accident, ext4 can be reentered when a transaction is open via 1117 * quota file writes. If we were to commit the transaction while thus 1118 * reentered, there can be a deadlock - we would be holding a quota 1119 * lock, and the commit would never complete if another thread had a 1120 * transaction open and was blocking on the quota lock - a ranking 1121 * violation. 1122 * 1123 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 1124 * will _not_ run commit under these circumstances because handle->h_ref 1125 * is elevated. We'll still have enough credits for the tiny quotafile 1126 * write. 1127 */ 1128 int do_journal_get_write_access(handle_t *handle, 1129 struct buffer_head *bh) 1130 { 1131 int dirty = buffer_dirty(bh); 1132 int ret; 1133 1134 if (!buffer_mapped(bh) || buffer_freed(bh)) 1135 return 0; 1136 /* 1137 * __block_write_begin() could have dirtied some buffers. Clean 1138 * the dirty bit as jbd2_journal_get_write_access() could complain 1139 * otherwise about fs integrity issues. Setting of the dirty bit 1140 * by __block_write_begin() isn't a real problem here as we clear 1141 * the bit before releasing a page lock and thus writeback cannot 1142 * ever write the buffer. 1143 */ 1144 if (dirty) 1145 clear_buffer_dirty(bh); 1146 BUFFER_TRACE(bh, "get write access"); 1147 ret = ext4_journal_get_write_access(handle, bh); 1148 if (!ret && dirty) 1149 ret = ext4_handle_dirty_metadata(handle, NULL, bh); 1150 return ret; 1151 } 1152 1153 #ifdef CONFIG_EXT4_FS_ENCRYPTION 1154 static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, 1155 get_block_t *get_block) 1156 { 1157 unsigned from = pos & (PAGE_SIZE - 1); 1158 unsigned to = from + len; 1159 struct inode *inode = page->mapping->host; 1160 unsigned block_start, block_end; 1161 sector_t block; 1162 int err = 0; 1163 unsigned blocksize = inode->i_sb->s_blocksize; 1164 unsigned bbits; 1165 struct buffer_head *bh, *head, *wait[2], **wait_bh = wait; 1166 bool decrypt = false; 1167 1168 BUG_ON(!PageLocked(page)); 1169 BUG_ON(from > PAGE_SIZE); 1170 BUG_ON(to > PAGE_SIZE); 1171 BUG_ON(from > to); 1172 1173 if (!page_has_buffers(page)) 1174 create_empty_buffers(page, blocksize, 0); 1175 head = page_buffers(page); 1176 bbits = ilog2(blocksize); 1177 block = (sector_t)page->index << (PAGE_SHIFT - bbits); 1178 1179 for (bh = head, block_start = 0; bh != head || !block_start; 1180 block++, block_start = block_end, bh = bh->b_this_page) { 1181 block_end = block_start + blocksize; 1182 if (block_end <= from || block_start >= to) { 1183 if (PageUptodate(page)) { 1184 if (!buffer_uptodate(bh)) 1185 set_buffer_uptodate(bh); 1186 } 1187 continue; 1188 } 1189 if (buffer_new(bh)) 1190 clear_buffer_new(bh); 1191 if (!buffer_mapped(bh)) { 1192 WARN_ON(bh->b_size != blocksize); 1193 err = get_block(inode, block, bh, 1); 1194 if (err) 1195 break; 1196 if (buffer_new(bh)) { 1197 clean_bdev_bh_alias(bh); 1198 if (PageUptodate(page)) { 1199 clear_buffer_new(bh); 1200 set_buffer_uptodate(bh); 1201 mark_buffer_dirty(bh); 1202 continue; 1203 } 1204 if (block_end > to || block_start < from) 1205 zero_user_segments(page, to, block_end, 1206 block_start, from); 1207 continue; 1208 } 1209 } 1210 if (PageUptodate(page)) { 1211 if (!buffer_uptodate(bh)) 1212 set_buffer_uptodate(bh); 1213 continue; 1214 } 1215 if (!buffer_uptodate(bh) && !buffer_delay(bh) && 1216 !buffer_unwritten(bh) && 1217 (block_start < from || block_end > to)) { 1218 ll_rw_block(REQ_OP_READ, 0, 1, &bh); 1219 *wait_bh++ = bh; 1220 decrypt = ext4_encrypted_inode(inode) && 1221 S_ISREG(inode->i_mode); 1222 } 1223 } 1224 /* 1225 * If we issued read requests, let them complete. 1226 */ 1227 while (wait_bh > wait) { 1228 wait_on_buffer(*--wait_bh); 1229 if (!buffer_uptodate(*wait_bh)) 1230 err = -EIO; 1231 } 1232 if (unlikely(err)) 1233 page_zero_new_buffers(page, from, to); 1234 else if (decrypt) 1235 err = fscrypt_decrypt_page(page->mapping->host, page, 1236 PAGE_SIZE, 0, page->index); 1237 return err; 1238 } 1239 #endif 1240 1241 static int ext4_write_begin(struct file *file, struct address_space *mapping, 1242 loff_t pos, unsigned len, unsigned flags, 1243 struct page **pagep, void **fsdata) 1244 { 1245 struct inode *inode = mapping->host; 1246 int ret, needed_blocks; 1247 handle_t *handle; 1248 int retries = 0; 1249 struct page *page; 1250 pgoff_t index; 1251 unsigned from, to; 1252 1253 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 1254 return -EIO; 1255 1256 trace_ext4_write_begin(inode, pos, len, flags); 1257 /* 1258 * Reserve one block more for addition to orphan list in case 1259 * we allocate blocks but write fails for some reason 1260 */ 1261 needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 1262 index = pos >> PAGE_SHIFT; 1263 from = pos & (PAGE_SIZE - 1); 1264 to = from + len; 1265 1266 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 1267 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, 1268 flags, pagep); 1269 if (ret < 0) 1270 return ret; 1271 if (ret == 1) 1272 return 0; 1273 } 1274 1275 /* 1276 * grab_cache_page_write_begin() can take a long time if the 1277 * system is thrashing due to memory pressure, or if the page 1278 * is being written back. So grab it first before we start 1279 * the transaction handle. This also allows us to allocate 1280 * the page (if needed) without using GFP_NOFS. 1281 */ 1282 retry_grab: 1283 page = grab_cache_page_write_begin(mapping, index, flags); 1284 if (!page) 1285 return -ENOMEM; 1286 unlock_page(page); 1287 1288 retry_journal: 1289 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); 1290 if (IS_ERR(handle)) { 1291 put_page(page); 1292 return PTR_ERR(handle); 1293 } 1294 1295 lock_page(page); 1296 if (page->mapping != mapping) { 1297 /* The page got truncated from under us */ 1298 unlock_page(page); 1299 put_page(page); 1300 ext4_journal_stop(handle); 1301 goto retry_grab; 1302 } 1303 /* In case writeback began while the page was unlocked */ 1304 wait_for_stable_page(page); 1305 1306 #ifdef CONFIG_EXT4_FS_ENCRYPTION 1307 if (ext4_should_dioread_nolock(inode)) 1308 ret = ext4_block_write_begin(page, pos, len, 1309 ext4_get_block_unwritten); 1310 else 1311 ret = ext4_block_write_begin(page, pos, len, 1312 ext4_get_block); 1313 #else 1314 if (ext4_should_dioread_nolock(inode)) 1315 ret = __block_write_begin(page, pos, len, 1316 ext4_get_block_unwritten); 1317 else 1318 ret = __block_write_begin(page, pos, len, ext4_get_block); 1319 #endif 1320 if (!ret && ext4_should_journal_data(inode)) { 1321 ret = ext4_walk_page_buffers(handle, page_buffers(page), 1322 from, to, NULL, 1323 do_journal_get_write_access); 1324 } 1325 1326 if (ret) { 1327 unlock_page(page); 1328 /* 1329 * __block_write_begin may have instantiated a few blocks 1330 * outside i_size. Trim these off again. Don't need 1331 * i_size_read because we hold i_mutex. 1332 * 1333 * Add inode to orphan list in case we crash before 1334 * truncate finishes 1335 */ 1336 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1337 ext4_orphan_add(handle, inode); 1338 1339 ext4_journal_stop(handle); 1340 if (pos + len > inode->i_size) { 1341 ext4_truncate_failed_write(inode); 1342 /* 1343 * If truncate failed early the inode might 1344 * still be on the orphan list; we need to 1345 * make sure the inode is removed from the 1346 * orphan list in that case. 1347 */ 1348 if (inode->i_nlink) 1349 ext4_orphan_del(NULL, inode); 1350 } 1351 1352 if (ret == -ENOSPC && 1353 ext4_should_retry_alloc(inode->i_sb, &retries)) 1354 goto retry_journal; 1355 put_page(page); 1356 return ret; 1357 } 1358 *pagep = page; 1359 return ret; 1360 } 1361 1362 /* For write_end() in data=journal mode */ 1363 static int write_end_fn(handle_t *handle, struct buffer_head *bh) 1364 { 1365 int ret; 1366 if (!buffer_mapped(bh) || buffer_freed(bh)) 1367 return 0; 1368 set_buffer_uptodate(bh); 1369 ret = ext4_handle_dirty_metadata(handle, NULL, bh); 1370 clear_buffer_meta(bh); 1371 clear_buffer_prio(bh); 1372 return ret; 1373 } 1374 1375 /* 1376 * We need to pick up the new inode size which generic_commit_write gave us 1377 * `file' can be NULL - eg, when called from page_symlink(). 1378 * 1379 * ext4 never places buffers on inode->i_mapping->private_list. metadata 1380 * buffers are managed internally. 1381 */ 1382 static int ext4_write_end(struct file *file, 1383 struct address_space *mapping, 1384 loff_t pos, unsigned len, unsigned copied, 1385 struct page *page, void *fsdata) 1386 { 1387 handle_t *handle = ext4_journal_current_handle(); 1388 struct inode *inode = mapping->host; 1389 loff_t old_size = inode->i_size; 1390 int ret = 0, ret2; 1391 int i_size_changed = 0; 1392 int inline_data = ext4_has_inline_data(inode); 1393 1394 trace_ext4_write_end(inode, pos, len, copied); 1395 if (inline_data) { 1396 ret = ext4_write_inline_data_end(inode, pos, len, 1397 copied, page); 1398 if (ret < 0) { 1399 unlock_page(page); 1400 put_page(page); 1401 goto errout; 1402 } 1403 copied = ret; 1404 } else 1405 copied = block_write_end(file, mapping, pos, 1406 len, copied, page, fsdata); 1407 /* 1408 * it's important to update i_size while still holding page lock: 1409 * page writeout could otherwise come in and zero beyond i_size. 1410 */ 1411 i_size_changed = ext4_update_inode_size(inode, pos + copied); 1412 unlock_page(page); 1413 put_page(page); 1414 1415 if (old_size < pos) 1416 pagecache_isize_extended(inode, old_size, pos); 1417 /* 1418 * Don't mark the inode dirty under page lock. First, it unnecessarily 1419 * makes the holding time of page lock longer. Second, it forces lock 1420 * ordering of page lock and transaction start for journaling 1421 * filesystems. 1422 */ 1423 if (i_size_changed || inline_data) 1424 ext4_mark_inode_dirty(handle, inode); 1425 1426 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1427 /* if we have allocated more blocks and copied 1428 * less. We will have blocks allocated outside 1429 * inode->i_size. So truncate them 1430 */ 1431 ext4_orphan_add(handle, inode); 1432 errout: 1433 ret2 = ext4_journal_stop(handle); 1434 if (!ret) 1435 ret = ret2; 1436 1437 if (pos + len > inode->i_size) { 1438 ext4_truncate_failed_write(inode); 1439 /* 1440 * If truncate failed early the inode might still be 1441 * on the orphan list; we need to make sure the inode 1442 * is removed from the orphan list in that case. 1443 */ 1444 if (inode->i_nlink) 1445 ext4_orphan_del(NULL, inode); 1446 } 1447 1448 return ret ? ret : copied; 1449 } 1450 1451 /* 1452 * This is a private version of page_zero_new_buffers() which doesn't 1453 * set the buffer to be dirty, since in data=journalled mode we need 1454 * to call ext4_handle_dirty_metadata() instead. 1455 */ 1456 static void ext4_journalled_zero_new_buffers(handle_t *handle, 1457 struct page *page, 1458 unsigned from, unsigned to) 1459 { 1460 unsigned int block_start = 0, block_end; 1461 struct buffer_head *head, *bh; 1462 1463 bh = head = page_buffers(page); 1464 do { 1465 block_end = block_start + bh->b_size; 1466 if (buffer_new(bh)) { 1467 if (block_end > from && block_start < to) { 1468 if (!PageUptodate(page)) { 1469 unsigned start, size; 1470 1471 start = max(from, block_start); 1472 size = min(to, block_end) - start; 1473 1474 zero_user(page, start, size); 1475 write_end_fn(handle, bh); 1476 } 1477 clear_buffer_new(bh); 1478 } 1479 } 1480 block_start = block_end; 1481 bh = bh->b_this_page; 1482 } while (bh != head); 1483 } 1484 1485 static int ext4_journalled_write_end(struct file *file, 1486 struct address_space *mapping, 1487 loff_t pos, unsigned len, unsigned copied, 1488 struct page *page, void *fsdata) 1489 { 1490 handle_t *handle = ext4_journal_current_handle(); 1491 struct inode *inode = mapping->host; 1492 loff_t old_size = inode->i_size; 1493 int ret = 0, ret2; 1494 int partial = 0; 1495 unsigned from, to; 1496 int size_changed = 0; 1497 int inline_data = ext4_has_inline_data(inode); 1498 1499 trace_ext4_journalled_write_end(inode, pos, len, copied); 1500 from = pos & (PAGE_SIZE - 1); 1501 to = from + len; 1502 1503 BUG_ON(!ext4_handle_valid(handle)); 1504 1505 if (inline_data) { 1506 ret = ext4_write_inline_data_end(inode, pos, len, 1507 copied, page); 1508 if (ret < 0) { 1509 unlock_page(page); 1510 put_page(page); 1511 goto errout; 1512 } 1513 copied = ret; 1514 } else if (unlikely(copied < len) && !PageUptodate(page)) { 1515 copied = 0; 1516 ext4_journalled_zero_new_buffers(handle, page, from, to); 1517 } else { 1518 if (unlikely(copied < len)) 1519 ext4_journalled_zero_new_buffers(handle, page, 1520 from + copied, to); 1521 ret = ext4_walk_page_buffers(handle, page_buffers(page), from, 1522 from + copied, &partial, 1523 write_end_fn); 1524 if (!partial) 1525 SetPageUptodate(page); 1526 } 1527 size_changed = ext4_update_inode_size(inode, pos + copied); 1528 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 1529 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1530 unlock_page(page); 1531 put_page(page); 1532 1533 if (old_size < pos) 1534 pagecache_isize_extended(inode, old_size, pos); 1535 1536 if (size_changed || inline_data) { 1537 ret2 = ext4_mark_inode_dirty(handle, inode); 1538 if (!ret) 1539 ret = ret2; 1540 } 1541 1542 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1543 /* if we have allocated more blocks and copied 1544 * less. We will have blocks allocated outside 1545 * inode->i_size. So truncate them 1546 */ 1547 ext4_orphan_add(handle, inode); 1548 1549 errout: 1550 ret2 = ext4_journal_stop(handle); 1551 if (!ret) 1552 ret = ret2; 1553 if (pos + len > inode->i_size) { 1554 ext4_truncate_failed_write(inode); 1555 /* 1556 * If truncate failed early the inode might still be 1557 * on the orphan list; we need to make sure the inode 1558 * is removed from the orphan list in that case. 1559 */ 1560 if (inode->i_nlink) 1561 ext4_orphan_del(NULL, inode); 1562 } 1563 1564 return ret ? ret : copied; 1565 } 1566 1567 /* 1568 * Reserve space for a single cluster 1569 */ 1570 static int ext4_da_reserve_space(struct inode *inode) 1571 { 1572 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1573 struct ext4_inode_info *ei = EXT4_I(inode); 1574 int ret; 1575 1576 /* 1577 * We will charge metadata quota at writeout time; this saves 1578 * us from metadata over-estimation, though we may go over by 1579 * a small amount in the end. Here we just reserve for data. 1580 */ 1581 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); 1582 if (ret) 1583 return ret; 1584 1585 spin_lock(&ei->i_block_reservation_lock); 1586 if (ext4_claim_free_clusters(sbi, 1, 0)) { 1587 spin_unlock(&ei->i_block_reservation_lock); 1588 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); 1589 return -ENOSPC; 1590 } 1591 ei->i_reserved_data_blocks++; 1592 trace_ext4_da_reserve_space(inode); 1593 spin_unlock(&ei->i_block_reservation_lock); 1594 1595 return 0; /* success */ 1596 } 1597 1598 static void ext4_da_release_space(struct inode *inode, int to_free) 1599 { 1600 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1601 struct ext4_inode_info *ei = EXT4_I(inode); 1602 1603 if (!to_free) 1604 return; /* Nothing to release, exit */ 1605 1606 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1607 1608 trace_ext4_da_release_space(inode, to_free); 1609 if (unlikely(to_free > ei->i_reserved_data_blocks)) { 1610 /* 1611 * if there aren't enough reserved blocks, then the 1612 * counter is messed up somewhere. Since this 1613 * function is called from invalidate page, it's 1614 * harmless to return without any action. 1615 */ 1616 ext4_warning(inode->i_sb, "ext4_da_release_space: " 1617 "ino %lu, to_free %d with only %d reserved " 1618 "data blocks", inode->i_ino, to_free, 1619 ei->i_reserved_data_blocks); 1620 WARN_ON(1); 1621 to_free = ei->i_reserved_data_blocks; 1622 } 1623 ei->i_reserved_data_blocks -= to_free; 1624 1625 /* update fs dirty data blocks counter */ 1626 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); 1627 1628 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1629 1630 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); 1631 } 1632 1633 static void ext4_da_page_release_reservation(struct page *page, 1634 unsigned int offset, 1635 unsigned int length) 1636 { 1637 int to_release = 0, contiguous_blks = 0; 1638 struct buffer_head *head, *bh; 1639 unsigned int curr_off = 0; 1640 struct inode *inode = page->mapping->host; 1641 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1642 unsigned int stop = offset + length; 1643 int num_clusters; 1644 ext4_fsblk_t lblk; 1645 1646 BUG_ON(stop > PAGE_SIZE || stop < length); 1647 1648 head = page_buffers(page); 1649 bh = head; 1650 do { 1651 unsigned int next_off = curr_off + bh->b_size; 1652 1653 if (next_off > stop) 1654 break; 1655 1656 if ((offset <= curr_off) && (buffer_delay(bh))) { 1657 to_release++; 1658 contiguous_blks++; 1659 clear_buffer_delay(bh); 1660 } else if (contiguous_blks) { 1661 lblk = page->index << 1662 (PAGE_SHIFT - inode->i_blkbits); 1663 lblk += (curr_off >> inode->i_blkbits) - 1664 contiguous_blks; 1665 ext4_es_remove_extent(inode, lblk, contiguous_blks); 1666 contiguous_blks = 0; 1667 } 1668 curr_off = next_off; 1669 } while ((bh = bh->b_this_page) != head); 1670 1671 if (contiguous_blks) { 1672 lblk = page->index << (PAGE_SHIFT - inode->i_blkbits); 1673 lblk += (curr_off >> inode->i_blkbits) - contiguous_blks; 1674 ext4_es_remove_extent(inode, lblk, contiguous_blks); 1675 } 1676 1677 /* If we have released all the blocks belonging to a cluster, then we 1678 * need to release the reserved space for that cluster. */ 1679 num_clusters = EXT4_NUM_B2C(sbi, to_release); 1680 while (num_clusters > 0) { 1681 lblk = (page->index << (PAGE_SHIFT - inode->i_blkbits)) + 1682 ((num_clusters - 1) << sbi->s_cluster_bits); 1683 if (sbi->s_cluster_ratio == 1 || 1684 !ext4_find_delalloc_cluster(inode, lblk)) 1685 ext4_da_release_space(inode, 1); 1686 1687 num_clusters--; 1688 } 1689 } 1690 1691 /* 1692 * Delayed allocation stuff 1693 */ 1694 1695 struct mpage_da_data { 1696 struct inode *inode; 1697 struct writeback_control *wbc; 1698 1699 pgoff_t first_page; /* The first page to write */ 1700 pgoff_t next_page; /* Current page to examine */ 1701 pgoff_t last_page; /* Last page to examine */ 1702 /* 1703 * Extent to map - this can be after first_page because that can be 1704 * fully mapped. We somewhat abuse m_flags to store whether the extent 1705 * is delalloc or unwritten. 1706 */ 1707 struct ext4_map_blocks map; 1708 struct ext4_io_submit io_submit; /* IO submission data */ 1709 unsigned int do_map:1; 1710 }; 1711 1712 static void mpage_release_unused_pages(struct mpage_da_data *mpd, 1713 bool invalidate) 1714 { 1715 int nr_pages, i; 1716 pgoff_t index, end; 1717 struct pagevec pvec; 1718 struct inode *inode = mpd->inode; 1719 struct address_space *mapping = inode->i_mapping; 1720 1721 /* This is necessary when next_page == 0. */ 1722 if (mpd->first_page >= mpd->next_page) 1723 return; 1724 1725 index = mpd->first_page; 1726 end = mpd->next_page - 1; 1727 if (invalidate) { 1728 ext4_lblk_t start, last; 1729 start = index << (PAGE_SHIFT - inode->i_blkbits); 1730 last = end << (PAGE_SHIFT - inode->i_blkbits); 1731 ext4_es_remove_extent(inode, start, last - start + 1); 1732 } 1733 1734 pagevec_init(&pvec); 1735 while (index <= end) { 1736 nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end); 1737 if (nr_pages == 0) 1738 break; 1739 for (i = 0; i < nr_pages; i++) { 1740 struct page *page = pvec.pages[i]; 1741 1742 BUG_ON(!PageLocked(page)); 1743 BUG_ON(PageWriteback(page)); 1744 if (invalidate) { 1745 if (page_mapped(page)) 1746 clear_page_dirty_for_io(page); 1747 block_invalidatepage(page, 0, PAGE_SIZE); 1748 ClearPageUptodate(page); 1749 } 1750 unlock_page(page); 1751 } 1752 pagevec_release(&pvec); 1753 } 1754 } 1755 1756 static void ext4_print_free_blocks(struct inode *inode) 1757 { 1758 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1759 struct super_block *sb = inode->i_sb; 1760 struct ext4_inode_info *ei = EXT4_I(inode); 1761 1762 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld", 1763 EXT4_C2B(EXT4_SB(inode->i_sb), 1764 ext4_count_free_clusters(sb))); 1765 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details"); 1766 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld", 1767 (long long) EXT4_C2B(EXT4_SB(sb), 1768 percpu_counter_sum(&sbi->s_freeclusters_counter))); 1769 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld", 1770 (long long) EXT4_C2B(EXT4_SB(sb), 1771 percpu_counter_sum(&sbi->s_dirtyclusters_counter))); 1772 ext4_msg(sb, KERN_CRIT, "Block reservation details"); 1773 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u", 1774 ei->i_reserved_data_blocks); 1775 return; 1776 } 1777 1778 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) 1779 { 1780 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); 1781 } 1782 1783 /* 1784 * This function is grabs code from the very beginning of 1785 * ext4_map_blocks, but assumes that the caller is from delayed write 1786 * time. This function looks up the requested blocks and sets the 1787 * buffer delay bit under the protection of i_data_sem. 1788 */ 1789 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, 1790 struct ext4_map_blocks *map, 1791 struct buffer_head *bh) 1792 { 1793 struct extent_status es; 1794 int retval; 1795 sector_t invalid_block = ~((sector_t) 0xffff); 1796 #ifdef ES_AGGRESSIVE_TEST 1797 struct ext4_map_blocks orig_map; 1798 1799 memcpy(&orig_map, map, sizeof(*map)); 1800 #endif 1801 1802 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 1803 invalid_block = ~0; 1804 1805 map->m_flags = 0; 1806 ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u," 1807 "logical block %lu\n", inode->i_ino, map->m_len, 1808 (unsigned long) map->m_lblk); 1809 1810 /* Lookup extent status tree firstly */ 1811 if (ext4_es_lookup_extent(inode, iblock, &es)) { 1812 if (ext4_es_is_hole(&es)) { 1813 retval = 0; 1814 down_read(&EXT4_I(inode)->i_data_sem); 1815 goto add_delayed; 1816 } 1817 1818 /* 1819 * Delayed extent could be allocated by fallocate. 1820 * So we need to check it. 1821 */ 1822 if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) { 1823 map_bh(bh, inode->i_sb, invalid_block); 1824 set_buffer_new(bh); 1825 set_buffer_delay(bh); 1826 return 0; 1827 } 1828 1829 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk; 1830 retval = es.es_len - (iblock - es.es_lblk); 1831 if (retval > map->m_len) 1832 retval = map->m_len; 1833 map->m_len = retval; 1834 if (ext4_es_is_written(&es)) 1835 map->m_flags |= EXT4_MAP_MAPPED; 1836 else if (ext4_es_is_unwritten(&es)) 1837 map->m_flags |= EXT4_MAP_UNWRITTEN; 1838 else 1839 BUG_ON(1); 1840 1841 #ifdef ES_AGGRESSIVE_TEST 1842 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0); 1843 #endif 1844 return retval; 1845 } 1846 1847 /* 1848 * Try to see if we can get the block without requesting a new 1849 * file system block. 1850 */ 1851 down_read(&EXT4_I(inode)->i_data_sem); 1852 if (ext4_has_inline_data(inode)) 1853 retval = 0; 1854 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 1855 retval = ext4_ext_map_blocks(NULL, inode, map, 0); 1856 else 1857 retval = ext4_ind_map_blocks(NULL, inode, map, 0); 1858 1859 add_delayed: 1860 if (retval == 0) { 1861 int ret; 1862 /* 1863 * XXX: __block_prepare_write() unmaps passed block, 1864 * is it OK? 1865 */ 1866 /* 1867 * If the block was allocated from previously allocated cluster, 1868 * then we don't need to reserve it again. However we still need 1869 * to reserve metadata for every block we're going to write. 1870 */ 1871 if (EXT4_SB(inode->i_sb)->s_cluster_ratio == 1 || 1872 !ext4_find_delalloc_cluster(inode, map->m_lblk)) { 1873 ret = ext4_da_reserve_space(inode); 1874 if (ret) { 1875 /* not enough space to reserve */ 1876 retval = ret; 1877 goto out_unlock; 1878 } 1879 } 1880 1881 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 1882 ~0, EXTENT_STATUS_DELAYED); 1883 if (ret) { 1884 retval = ret; 1885 goto out_unlock; 1886 } 1887 1888 map_bh(bh, inode->i_sb, invalid_block); 1889 set_buffer_new(bh); 1890 set_buffer_delay(bh); 1891 } else if (retval > 0) { 1892 int ret; 1893 unsigned int status; 1894 1895 if (unlikely(retval != map->m_len)) { 1896 ext4_warning(inode->i_sb, 1897 "ES len assertion failed for inode " 1898 "%lu: retval %d != map->m_len %d", 1899 inode->i_ino, retval, map->m_len); 1900 WARN_ON(1); 1901 } 1902 1903 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 1904 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 1905 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 1906 map->m_pblk, status); 1907 if (ret != 0) 1908 retval = ret; 1909 } 1910 1911 out_unlock: 1912 up_read((&EXT4_I(inode)->i_data_sem)); 1913 1914 return retval; 1915 } 1916 1917 /* 1918 * This is a special get_block_t callback which is used by 1919 * ext4_da_write_begin(). It will either return mapped block or 1920 * reserve space for a single block. 1921 * 1922 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 1923 * We also have b_blocknr = -1 and b_bdev initialized properly 1924 * 1925 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 1926 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 1927 * initialized properly. 1928 */ 1929 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 1930 struct buffer_head *bh, int create) 1931 { 1932 struct ext4_map_blocks map; 1933 int ret = 0; 1934 1935 BUG_ON(create == 0); 1936 BUG_ON(bh->b_size != inode->i_sb->s_blocksize); 1937 1938 map.m_lblk = iblock; 1939 map.m_len = 1; 1940 1941 /* 1942 * first, we need to know whether the block is allocated already 1943 * preallocated blocks are unmapped but should treated 1944 * the same as allocated blocks. 1945 */ 1946 ret = ext4_da_map_blocks(inode, iblock, &map, bh); 1947 if (ret <= 0) 1948 return ret; 1949 1950 map_bh(bh, inode->i_sb, map.m_pblk); 1951 ext4_update_bh_state(bh, map.m_flags); 1952 1953 if (buffer_unwritten(bh)) { 1954 /* A delayed write to unwritten bh should be marked 1955 * new and mapped. Mapped ensures that we don't do 1956 * get_block multiple times when we write to the same 1957 * offset and new ensures that we do proper zero out 1958 * for partial write. 1959 */ 1960 set_buffer_new(bh); 1961 set_buffer_mapped(bh); 1962 } 1963 return 0; 1964 } 1965 1966 static int bget_one(handle_t *handle, struct buffer_head *bh) 1967 { 1968 get_bh(bh); 1969 return 0; 1970 } 1971 1972 static int bput_one(handle_t *handle, struct buffer_head *bh) 1973 { 1974 put_bh(bh); 1975 return 0; 1976 } 1977 1978 static int __ext4_journalled_writepage(struct page *page, 1979 unsigned int len) 1980 { 1981 struct address_space *mapping = page->mapping; 1982 struct inode *inode = mapping->host; 1983 struct buffer_head *page_bufs = NULL; 1984 handle_t *handle = NULL; 1985 int ret = 0, err = 0; 1986 int inline_data = ext4_has_inline_data(inode); 1987 struct buffer_head *inode_bh = NULL; 1988 1989 ClearPageChecked(page); 1990 1991 if (inline_data) { 1992 BUG_ON(page->index != 0); 1993 BUG_ON(len > ext4_get_max_inline_size(inode)); 1994 inode_bh = ext4_journalled_write_inline_data(inode, len, page); 1995 if (inode_bh == NULL) 1996 goto out; 1997 } else { 1998 page_bufs = page_buffers(page); 1999 if (!page_bufs) { 2000 BUG(); 2001 goto out; 2002 } 2003 ext4_walk_page_buffers(handle, page_bufs, 0, len, 2004 NULL, bget_one); 2005 } 2006 /* 2007 * We need to release the page lock before we start the 2008 * journal, so grab a reference so the page won't disappear 2009 * out from under us. 2010 */ 2011 get_page(page); 2012 unlock_page(page); 2013 2014 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 2015 ext4_writepage_trans_blocks(inode)); 2016 if (IS_ERR(handle)) { 2017 ret = PTR_ERR(handle); 2018 put_page(page); 2019 goto out_no_pagelock; 2020 } 2021 BUG_ON(!ext4_handle_valid(handle)); 2022 2023 lock_page(page); 2024 put_page(page); 2025 if (page->mapping != mapping) { 2026 /* The page got truncated from under us */ 2027 ext4_journal_stop(handle); 2028 ret = 0; 2029 goto out; 2030 } 2031 2032 if (inline_data) { 2033 ret = ext4_mark_inode_dirty(handle, inode); 2034 } else { 2035 ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 2036 do_journal_get_write_access); 2037 2038 err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 2039 write_end_fn); 2040 } 2041 if (ret == 0) 2042 ret = err; 2043 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 2044 err = ext4_journal_stop(handle); 2045 if (!ret) 2046 ret = err; 2047 2048 if (!ext4_has_inline_data(inode)) 2049 ext4_walk_page_buffers(NULL, page_bufs, 0, len, 2050 NULL, bput_one); 2051 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 2052 out: 2053 unlock_page(page); 2054 out_no_pagelock: 2055 brelse(inode_bh); 2056 return ret; 2057 } 2058 2059 /* 2060 * Note that we don't need to start a transaction unless we're journaling data 2061 * because we should have holes filled from ext4_page_mkwrite(). We even don't 2062 * need to file the inode to the transaction's list in ordered mode because if 2063 * we are writing back data added by write(), the inode is already there and if 2064 * we are writing back data modified via mmap(), no one guarantees in which 2065 * transaction the data will hit the disk. In case we are journaling data, we 2066 * cannot start transaction directly because transaction start ranks above page 2067 * lock so we have to do some magic. 2068 * 2069 * This function can get called via... 2070 * - ext4_writepages after taking page lock (have journal handle) 2071 * - journal_submit_inode_data_buffers (no journal handle) 2072 * - shrink_page_list via the kswapd/direct reclaim (no journal handle) 2073 * - grab_page_cache when doing write_begin (have journal handle) 2074 * 2075 * We don't do any block allocation in this function. If we have page with 2076 * multiple blocks we need to write those buffer_heads that are mapped. This 2077 * is important for mmaped based write. So if we do with blocksize 1K 2078 * truncate(f, 1024); 2079 * a = mmap(f, 0, 4096); 2080 * a[0] = 'a'; 2081 * truncate(f, 4096); 2082 * we have in the page first buffer_head mapped via page_mkwrite call back 2083 * but other buffer_heads would be unmapped but dirty (dirty done via the 2084 * do_wp_page). So writepage should write the first block. If we modify 2085 * the mmap area beyond 1024 we will again get a page_fault and the 2086 * page_mkwrite callback will do the block allocation and mark the 2087 * buffer_heads mapped. 2088 * 2089 * We redirty the page if we have any buffer_heads that is either delay or 2090 * unwritten in the page. 2091 * 2092 * We can get recursively called as show below. 2093 * 2094 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 2095 * ext4_writepage() 2096 * 2097 * But since we don't do any block allocation we should not deadlock. 2098 * Page also have the dirty flag cleared so we don't get recurive page_lock. 2099 */ 2100 static int ext4_writepage(struct page *page, 2101 struct writeback_control *wbc) 2102 { 2103 int ret = 0; 2104 loff_t size; 2105 unsigned int len; 2106 struct buffer_head *page_bufs = NULL; 2107 struct inode *inode = page->mapping->host; 2108 struct ext4_io_submit io_submit; 2109 bool keep_towrite = false; 2110 2111 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) { 2112 ext4_invalidatepage(page, 0, PAGE_SIZE); 2113 unlock_page(page); 2114 return -EIO; 2115 } 2116 2117 trace_ext4_writepage(page); 2118 size = i_size_read(inode); 2119 if (page->index == size >> PAGE_SHIFT) 2120 len = size & ~PAGE_MASK; 2121 else 2122 len = PAGE_SIZE; 2123 2124 page_bufs = page_buffers(page); 2125 /* 2126 * We cannot do block allocation or other extent handling in this 2127 * function. If there are buffers needing that, we have to redirty 2128 * the page. But we may reach here when we do a journal commit via 2129 * journal_submit_inode_data_buffers() and in that case we must write 2130 * allocated buffers to achieve data=ordered mode guarantees. 2131 * 2132 * Also, if there is only one buffer per page (the fs block 2133 * size == the page size), if one buffer needs block 2134 * allocation or needs to modify the extent tree to clear the 2135 * unwritten flag, we know that the page can't be written at 2136 * all, so we might as well refuse the write immediately. 2137 * Unfortunately if the block size != page size, we can't as 2138 * easily detect this case using ext4_walk_page_buffers(), but 2139 * for the extremely common case, this is an optimization that 2140 * skips a useless round trip through ext4_bio_write_page(). 2141 */ 2142 if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2143 ext4_bh_delay_or_unwritten)) { 2144 redirty_page_for_writepage(wbc, page); 2145 if ((current->flags & PF_MEMALLOC) || 2146 (inode->i_sb->s_blocksize == PAGE_SIZE)) { 2147 /* 2148 * For memory cleaning there's no point in writing only 2149 * some buffers. So just bail out. Warn if we came here 2150 * from direct reclaim. 2151 */ 2152 WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) 2153 == PF_MEMALLOC); 2154 unlock_page(page); 2155 return 0; 2156 } 2157 keep_towrite = true; 2158 } 2159 2160 if (PageChecked(page) && ext4_should_journal_data(inode)) 2161 /* 2162 * It's mmapped pagecache. Add buffers and journal it. There 2163 * doesn't seem much point in redirtying the page here. 2164 */ 2165 return __ext4_journalled_writepage(page, len); 2166 2167 ext4_io_submit_init(&io_submit, wbc); 2168 io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS); 2169 if (!io_submit.io_end) { 2170 redirty_page_for_writepage(wbc, page); 2171 unlock_page(page); 2172 return -ENOMEM; 2173 } 2174 ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite); 2175 ext4_io_submit(&io_submit); 2176 /* Drop io_end reference we got from init */ 2177 ext4_put_io_end_defer(io_submit.io_end); 2178 return ret; 2179 } 2180 2181 static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) 2182 { 2183 int len; 2184 loff_t size; 2185 int err; 2186 2187 BUG_ON(page->index != mpd->first_page); 2188 clear_page_dirty_for_io(page); 2189 /* 2190 * We have to be very careful here! Nothing protects writeback path 2191 * against i_size changes and the page can be writeably mapped into 2192 * page tables. So an application can be growing i_size and writing 2193 * data through mmap while writeback runs. clear_page_dirty_for_io() 2194 * write-protects our page in page tables and the page cannot get 2195 * written to again until we release page lock. So only after 2196 * clear_page_dirty_for_io() we are safe to sample i_size for 2197 * ext4_bio_write_page() to zero-out tail of the written page. We rely 2198 * on the barrier provided by TestClearPageDirty in 2199 * clear_page_dirty_for_io() to make sure i_size is really sampled only 2200 * after page tables are updated. 2201 */ 2202 size = i_size_read(mpd->inode); 2203 if (page->index == size >> PAGE_SHIFT) 2204 len = size & ~PAGE_MASK; 2205 else 2206 len = PAGE_SIZE; 2207 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false); 2208 if (!err) 2209 mpd->wbc->nr_to_write--; 2210 mpd->first_page++; 2211 2212 return err; 2213 } 2214 2215 #define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay)) 2216 2217 /* 2218 * mballoc gives us at most this number of blocks... 2219 * XXX: That seems to be only a limitation of ext4_mb_normalize_request(). 2220 * The rest of mballoc seems to handle chunks up to full group size. 2221 */ 2222 #define MAX_WRITEPAGES_EXTENT_LEN 2048 2223 2224 /* 2225 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map 2226 * 2227 * @mpd - extent of blocks 2228 * @lblk - logical number of the block in the file 2229 * @bh - buffer head we want to add to the extent 2230 * 2231 * The function is used to collect contig. blocks in the same state. If the 2232 * buffer doesn't require mapping for writeback and we haven't started the 2233 * extent of buffers to map yet, the function returns 'true' immediately - the 2234 * caller can write the buffer right away. Otherwise the function returns true 2235 * if the block has been added to the extent, false if the block couldn't be 2236 * added. 2237 */ 2238 static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk, 2239 struct buffer_head *bh) 2240 { 2241 struct ext4_map_blocks *map = &mpd->map; 2242 2243 /* Buffer that doesn't need mapping for writeback? */ 2244 if (!buffer_dirty(bh) || !buffer_mapped(bh) || 2245 (!buffer_delay(bh) && !buffer_unwritten(bh))) { 2246 /* So far no extent to map => we write the buffer right away */ 2247 if (map->m_len == 0) 2248 return true; 2249 return false; 2250 } 2251 2252 /* First block in the extent? */ 2253 if (map->m_len == 0) { 2254 /* We cannot map unless handle is started... */ 2255 if (!mpd->do_map) 2256 return false; 2257 map->m_lblk = lblk; 2258 map->m_len = 1; 2259 map->m_flags = bh->b_state & BH_FLAGS; 2260 return true; 2261 } 2262 2263 /* Don't go larger than mballoc is willing to allocate */ 2264 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN) 2265 return false; 2266 2267 /* Can we merge the block to our big extent? */ 2268 if (lblk == map->m_lblk + map->m_len && 2269 (bh->b_state & BH_FLAGS) == map->m_flags) { 2270 map->m_len++; 2271 return true; 2272 } 2273 return false; 2274 } 2275 2276 /* 2277 * mpage_process_page_bufs - submit page buffers for IO or add them to extent 2278 * 2279 * @mpd - extent of blocks for mapping 2280 * @head - the first buffer in the page 2281 * @bh - buffer we should start processing from 2282 * @lblk - logical number of the block in the file corresponding to @bh 2283 * 2284 * Walk through page buffers from @bh upto @head (exclusive) and either submit 2285 * the page for IO if all buffers in this page were mapped and there's no 2286 * accumulated extent of buffers to map or add buffers in the page to the 2287 * extent of buffers to map. The function returns 1 if the caller can continue 2288 * by processing the next page, 0 if it should stop adding buffers to the 2289 * extent to map because we cannot extend it anymore. It can also return value 2290 * < 0 in case of error during IO submission. 2291 */ 2292 static int mpage_process_page_bufs(struct mpage_da_data *mpd, 2293 struct buffer_head *head, 2294 struct buffer_head *bh, 2295 ext4_lblk_t lblk) 2296 { 2297 struct inode *inode = mpd->inode; 2298 int err; 2299 ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1) 2300 >> inode->i_blkbits; 2301 2302 do { 2303 BUG_ON(buffer_locked(bh)); 2304 2305 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) { 2306 /* Found extent to map? */ 2307 if (mpd->map.m_len) 2308 return 0; 2309 /* Buffer needs mapping and handle is not started? */ 2310 if (!mpd->do_map) 2311 return 0; 2312 /* Everything mapped so far and we hit EOF */ 2313 break; 2314 } 2315 } while (lblk++, (bh = bh->b_this_page) != head); 2316 /* So far everything mapped? Submit the page for IO. */ 2317 if (mpd->map.m_len == 0) { 2318 err = mpage_submit_page(mpd, head->b_page); 2319 if (err < 0) 2320 return err; 2321 } 2322 return lblk < blocks; 2323 } 2324 2325 /* 2326 * mpage_map_buffers - update buffers corresponding to changed extent and 2327 * submit fully mapped pages for IO 2328 * 2329 * @mpd - description of extent to map, on return next extent to map 2330 * 2331 * Scan buffers corresponding to changed extent (we expect corresponding pages 2332 * to be already locked) and update buffer state according to new extent state. 2333 * We map delalloc buffers to their physical location, clear unwritten bits, 2334 * and mark buffers as uninit when we perform writes to unwritten extents 2335 * and do extent conversion after IO is finished. If the last page is not fully 2336 * mapped, we update @map to the next extent in the last page that needs 2337 * mapping. Otherwise we submit the page for IO. 2338 */ 2339 static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) 2340 { 2341 struct pagevec pvec; 2342 int nr_pages, i; 2343 struct inode *inode = mpd->inode; 2344 struct buffer_head *head, *bh; 2345 int bpp_bits = PAGE_SHIFT - inode->i_blkbits; 2346 pgoff_t start, end; 2347 ext4_lblk_t lblk; 2348 sector_t pblock; 2349 int err; 2350 2351 start = mpd->map.m_lblk >> bpp_bits; 2352 end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits; 2353 lblk = start << bpp_bits; 2354 pblock = mpd->map.m_pblk; 2355 2356 pagevec_init(&pvec); 2357 while (start <= end) { 2358 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, 2359 &start, end); 2360 if (nr_pages == 0) 2361 break; 2362 for (i = 0; i < nr_pages; i++) { 2363 struct page *page = pvec.pages[i]; 2364 2365 bh = head = page_buffers(page); 2366 do { 2367 if (lblk < mpd->map.m_lblk) 2368 continue; 2369 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) { 2370 /* 2371 * Buffer after end of mapped extent. 2372 * Find next buffer in the page to map. 2373 */ 2374 mpd->map.m_len = 0; 2375 mpd->map.m_flags = 0; 2376 /* 2377 * FIXME: If dioread_nolock supports 2378 * blocksize < pagesize, we need to make 2379 * sure we add size mapped so far to 2380 * io_end->size as the following call 2381 * can submit the page for IO. 2382 */ 2383 err = mpage_process_page_bufs(mpd, head, 2384 bh, lblk); 2385 pagevec_release(&pvec); 2386 if (err > 0) 2387 err = 0; 2388 return err; 2389 } 2390 if (buffer_delay(bh)) { 2391 clear_buffer_delay(bh); 2392 bh->b_blocknr = pblock++; 2393 } 2394 clear_buffer_unwritten(bh); 2395 } while (lblk++, (bh = bh->b_this_page) != head); 2396 2397 /* 2398 * FIXME: This is going to break if dioread_nolock 2399 * supports blocksize < pagesize as we will try to 2400 * convert potentially unmapped parts of inode. 2401 */ 2402 mpd->io_submit.io_end->size += PAGE_SIZE; 2403 /* Page fully mapped - let IO run! */ 2404 err = mpage_submit_page(mpd, page); 2405 if (err < 0) { 2406 pagevec_release(&pvec); 2407 return err; 2408 } 2409 } 2410 pagevec_release(&pvec); 2411 } 2412 /* Extent fully mapped and matches with page boundary. We are done. */ 2413 mpd->map.m_len = 0; 2414 mpd->map.m_flags = 0; 2415 return 0; 2416 } 2417 2418 static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd) 2419 { 2420 struct inode *inode = mpd->inode; 2421 struct ext4_map_blocks *map = &mpd->map; 2422 int get_blocks_flags; 2423 int err, dioread_nolock; 2424 2425 trace_ext4_da_write_pages_extent(inode, map); 2426 /* 2427 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or 2428 * to convert an unwritten extent to be initialized (in the case 2429 * where we have written into one or more preallocated blocks). It is 2430 * possible that we're going to need more metadata blocks than 2431 * previously reserved. However we must not fail because we're in 2432 * writeback and there is nothing we can do about it so it might result 2433 * in data loss. So use reserved blocks to allocate metadata if 2434 * possible. 2435 * 2436 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if 2437 * the blocks in question are delalloc blocks. This indicates 2438 * that the blocks and quotas has already been checked when 2439 * the data was copied into the page cache. 2440 */ 2441 get_blocks_flags = EXT4_GET_BLOCKS_CREATE | 2442 EXT4_GET_BLOCKS_METADATA_NOFAIL | 2443 EXT4_GET_BLOCKS_IO_SUBMIT; 2444 dioread_nolock = ext4_should_dioread_nolock(inode); 2445 if (dioread_nolock) 2446 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; 2447 if (map->m_flags & (1 << BH_Delay)) 2448 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 2449 2450 err = ext4_map_blocks(handle, inode, map, get_blocks_flags); 2451 if (err < 0) 2452 return err; 2453 if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) { 2454 if (!mpd->io_submit.io_end->handle && 2455 ext4_handle_valid(handle)) { 2456 mpd->io_submit.io_end->handle = handle->h_rsv_handle; 2457 handle->h_rsv_handle = NULL; 2458 } 2459 ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end); 2460 } 2461 2462 BUG_ON(map->m_len == 0); 2463 if (map->m_flags & EXT4_MAP_NEW) { 2464 clean_bdev_aliases(inode->i_sb->s_bdev, map->m_pblk, 2465 map->m_len); 2466 } 2467 return 0; 2468 } 2469 2470 /* 2471 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length 2472 * mpd->len and submit pages underlying it for IO 2473 * 2474 * @handle - handle for journal operations 2475 * @mpd - extent to map 2476 * @give_up_on_write - we set this to true iff there is a fatal error and there 2477 * is no hope of writing the data. The caller should discard 2478 * dirty pages to avoid infinite loops. 2479 * 2480 * The function maps extent starting at mpd->lblk of length mpd->len. If it is 2481 * delayed, blocks are allocated, if it is unwritten, we may need to convert 2482 * them to initialized or split the described range from larger unwritten 2483 * extent. Note that we need not map all the described range since allocation 2484 * can return less blocks or the range is covered by more unwritten extents. We 2485 * cannot map more because we are limited by reserved transaction credits. On 2486 * the other hand we always make sure that the last touched page is fully 2487 * mapped so that it can be written out (and thus forward progress is 2488 * guaranteed). After mapping we submit all mapped pages for IO. 2489 */ 2490 static int mpage_map_and_submit_extent(handle_t *handle, 2491 struct mpage_da_data *mpd, 2492 bool *give_up_on_write) 2493 { 2494 struct inode *inode = mpd->inode; 2495 struct ext4_map_blocks *map = &mpd->map; 2496 int err; 2497 loff_t disksize; 2498 int progress = 0; 2499 2500 mpd->io_submit.io_end->offset = 2501 ((loff_t)map->m_lblk) << inode->i_blkbits; 2502 do { 2503 err = mpage_map_one_extent(handle, mpd); 2504 if (err < 0) { 2505 struct super_block *sb = inode->i_sb; 2506 2507 if (ext4_forced_shutdown(EXT4_SB(sb)) || 2508 EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED) 2509 goto invalidate_dirty_pages; 2510 /* 2511 * Let the uper layers retry transient errors. 2512 * In the case of ENOSPC, if ext4_count_free_blocks() 2513 * is non-zero, a commit should free up blocks. 2514 */ 2515 if ((err == -ENOMEM) || 2516 (err == -ENOSPC && ext4_count_free_clusters(sb))) { 2517 if (progress) 2518 goto update_disksize; 2519 return err; 2520 } 2521 ext4_msg(sb, KERN_CRIT, 2522 "Delayed block allocation failed for " 2523 "inode %lu at logical offset %llu with" 2524 " max blocks %u with error %d", 2525 inode->i_ino, 2526 (unsigned long long)map->m_lblk, 2527 (unsigned)map->m_len, -err); 2528 ext4_msg(sb, KERN_CRIT, 2529 "This should not happen!! Data will " 2530 "be lost\n"); 2531 if (err == -ENOSPC) 2532 ext4_print_free_blocks(inode); 2533 invalidate_dirty_pages: 2534 *give_up_on_write = true; 2535 return err; 2536 } 2537 progress = 1; 2538 /* 2539 * Update buffer state, submit mapped pages, and get us new 2540 * extent to map 2541 */ 2542 err = mpage_map_and_submit_buffers(mpd); 2543 if (err < 0) 2544 goto update_disksize; 2545 } while (map->m_len); 2546 2547 update_disksize: 2548 /* 2549 * Update on-disk size after IO is submitted. Races with 2550 * truncate are avoided by checking i_size under i_data_sem. 2551 */ 2552 disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT; 2553 if (disksize > EXT4_I(inode)->i_disksize) { 2554 int err2; 2555 loff_t i_size; 2556 2557 down_write(&EXT4_I(inode)->i_data_sem); 2558 i_size = i_size_read(inode); 2559 if (disksize > i_size) 2560 disksize = i_size; 2561 if (disksize > EXT4_I(inode)->i_disksize) 2562 EXT4_I(inode)->i_disksize = disksize; 2563 up_write(&EXT4_I(inode)->i_data_sem); 2564 err2 = ext4_mark_inode_dirty(handle, inode); 2565 if (err2) 2566 ext4_error(inode->i_sb, 2567 "Failed to mark inode %lu dirty", 2568 inode->i_ino); 2569 if (!err) 2570 err = err2; 2571 } 2572 return err; 2573 } 2574 2575 /* 2576 * Calculate the total number of credits to reserve for one writepages 2577 * iteration. This is called from ext4_writepages(). We map an extent of 2578 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping 2579 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN + 2580 * bpp - 1 blocks in bpp different extents. 2581 */ 2582 static int ext4_da_writepages_trans_blocks(struct inode *inode) 2583 { 2584 int bpp = ext4_journal_blocks_per_page(inode); 2585 2586 return ext4_meta_trans_blocks(inode, 2587 MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp); 2588 } 2589 2590 /* 2591 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages 2592 * and underlying extent to map 2593 * 2594 * @mpd - where to look for pages 2595 * 2596 * Walk dirty pages in the mapping. If they are fully mapped, submit them for 2597 * IO immediately. When we find a page which isn't mapped we start accumulating 2598 * extent of buffers underlying these pages that needs mapping (formed by 2599 * either delayed or unwritten buffers). We also lock the pages containing 2600 * these buffers. The extent found is returned in @mpd structure (starting at 2601 * mpd->lblk with length mpd->len blocks). 2602 * 2603 * Note that this function can attach bios to one io_end structure which are 2604 * neither logically nor physically contiguous. Although it may seem as an 2605 * unnecessary complication, it is actually inevitable in blocksize < pagesize 2606 * case as we need to track IO to all buffers underlying a page in one io_end. 2607 */ 2608 static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) 2609 { 2610 struct address_space *mapping = mpd->inode->i_mapping; 2611 struct pagevec pvec; 2612 unsigned int nr_pages; 2613 long left = mpd->wbc->nr_to_write; 2614 pgoff_t index = mpd->first_page; 2615 pgoff_t end = mpd->last_page; 2616 int tag; 2617 int i, err = 0; 2618 int blkbits = mpd->inode->i_blkbits; 2619 ext4_lblk_t lblk; 2620 struct buffer_head *head; 2621 2622 if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages) 2623 tag = PAGECACHE_TAG_TOWRITE; 2624 else 2625 tag = PAGECACHE_TAG_DIRTY; 2626 2627 pagevec_init(&pvec); 2628 mpd->map.m_len = 0; 2629 mpd->next_page = index; 2630 while (index <= end) { 2631 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, 2632 tag); 2633 if (nr_pages == 0) 2634 goto out; 2635 2636 for (i = 0; i < nr_pages; i++) { 2637 struct page *page = pvec.pages[i]; 2638 2639 /* 2640 * Accumulated enough dirty pages? This doesn't apply 2641 * to WB_SYNC_ALL mode. For integrity sync we have to 2642 * keep going because someone may be concurrently 2643 * dirtying pages, and we might have synced a lot of 2644 * newly appeared dirty pages, but have not synced all 2645 * of the old dirty pages. 2646 */ 2647 if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0) 2648 goto out; 2649 2650 /* If we can't merge this page, we are done. */ 2651 if (mpd->map.m_len > 0 && mpd->next_page != page->index) 2652 goto out; 2653 2654 lock_page(page); 2655 /* 2656 * If the page is no longer dirty, or its mapping no 2657 * longer corresponds to inode we are writing (which 2658 * means it has been truncated or invalidated), or the 2659 * page is already under writeback and we are not doing 2660 * a data integrity writeback, skip the page 2661 */ 2662 if (!PageDirty(page) || 2663 (PageWriteback(page) && 2664 (mpd->wbc->sync_mode == WB_SYNC_NONE)) || 2665 unlikely(page->mapping != mapping)) { 2666 unlock_page(page); 2667 continue; 2668 } 2669 2670 wait_on_page_writeback(page); 2671 BUG_ON(PageWriteback(page)); 2672 2673 if (mpd->map.m_len == 0) 2674 mpd->first_page = page->index; 2675 mpd->next_page = page->index + 1; 2676 /* Add all dirty buffers to mpd */ 2677 lblk = ((ext4_lblk_t)page->index) << 2678 (PAGE_SHIFT - blkbits); 2679 head = page_buffers(page); 2680 err = mpage_process_page_bufs(mpd, head, head, lblk); 2681 if (err <= 0) 2682 goto out; 2683 err = 0; 2684 left--; 2685 } 2686 pagevec_release(&pvec); 2687 cond_resched(); 2688 } 2689 return 0; 2690 out: 2691 pagevec_release(&pvec); 2692 return err; 2693 } 2694 2695 static int ext4_writepages(struct address_space *mapping, 2696 struct writeback_control *wbc) 2697 { 2698 pgoff_t writeback_index = 0; 2699 long nr_to_write = wbc->nr_to_write; 2700 int range_whole = 0; 2701 int cycled = 1; 2702 handle_t *handle = NULL; 2703 struct mpage_da_data mpd; 2704 struct inode *inode = mapping->host; 2705 int needed_blocks, rsv_blocks = 0, ret = 0; 2706 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2707 bool done; 2708 struct blk_plug plug; 2709 bool give_up_on_write = false; 2710 2711 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 2712 return -EIO; 2713 2714 percpu_down_read(&sbi->s_journal_flag_rwsem); 2715 trace_ext4_writepages(inode, wbc); 2716 2717 /* 2718 * No pages to write? This is mainly a kludge to avoid starting 2719 * a transaction for special inodes like journal inode on last iput() 2720 * because that could violate lock ordering on umount 2721 */ 2722 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 2723 goto out_writepages; 2724 2725 if (ext4_should_journal_data(inode)) { 2726 ret = generic_writepages(mapping, wbc); 2727 goto out_writepages; 2728 } 2729 2730 /* 2731 * If the filesystem has aborted, it is read-only, so return 2732 * right away instead of dumping stack traces later on that 2733 * will obscure the real source of the problem. We test 2734 * EXT4_MF_FS_ABORTED instead of sb->s_flag's SB_RDONLY because 2735 * the latter could be true if the filesystem is mounted 2736 * read-only, and in that case, ext4_writepages should 2737 * *never* be called, so if that ever happens, we would want 2738 * the stack trace. 2739 */ 2740 if (unlikely(ext4_forced_shutdown(EXT4_SB(mapping->host->i_sb)) || 2741 sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) { 2742 ret = -EROFS; 2743 goto out_writepages; 2744 } 2745 2746 if (ext4_should_dioread_nolock(inode)) { 2747 /* 2748 * We may need to convert up to one extent per block in 2749 * the page and we may dirty the inode. 2750 */ 2751 rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits); 2752 } 2753 2754 /* 2755 * If we have inline data and arrive here, it means that 2756 * we will soon create the block for the 1st page, so 2757 * we'd better clear the inline data here. 2758 */ 2759 if (ext4_has_inline_data(inode)) { 2760 /* Just inode will be modified... */ 2761 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); 2762 if (IS_ERR(handle)) { 2763 ret = PTR_ERR(handle); 2764 goto out_writepages; 2765 } 2766 BUG_ON(ext4_test_inode_state(inode, 2767 EXT4_STATE_MAY_INLINE_DATA)); 2768 ext4_destroy_inline_data(handle, inode); 2769 ext4_journal_stop(handle); 2770 } 2771 2772 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2773 range_whole = 1; 2774 2775 if (wbc->range_cyclic) { 2776 writeback_index = mapping->writeback_index; 2777 if (writeback_index) 2778 cycled = 0; 2779 mpd.first_page = writeback_index; 2780 mpd.last_page = -1; 2781 } else { 2782 mpd.first_page = wbc->range_start >> PAGE_SHIFT; 2783 mpd.last_page = wbc->range_end >> PAGE_SHIFT; 2784 } 2785 2786 mpd.inode = inode; 2787 mpd.wbc = wbc; 2788 ext4_io_submit_init(&mpd.io_submit, wbc); 2789 retry: 2790 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 2791 tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page); 2792 done = false; 2793 blk_start_plug(&plug); 2794 2795 /* 2796 * First writeback pages that don't need mapping - we can avoid 2797 * starting a transaction unnecessarily and also avoid being blocked 2798 * in the block layer on device congestion while having transaction 2799 * started. 2800 */ 2801 mpd.do_map = 0; 2802 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); 2803 if (!mpd.io_submit.io_end) { 2804 ret = -ENOMEM; 2805 goto unplug; 2806 } 2807 ret = mpage_prepare_extent_to_map(&mpd); 2808 /* Submit prepared bio */ 2809 ext4_io_submit(&mpd.io_submit); 2810 ext4_put_io_end_defer(mpd.io_submit.io_end); 2811 mpd.io_submit.io_end = NULL; 2812 /* Unlock pages we didn't use */ 2813 mpage_release_unused_pages(&mpd, false); 2814 if (ret < 0) 2815 goto unplug; 2816 2817 while (!done && mpd.first_page <= mpd.last_page) { 2818 /* For each extent of pages we use new io_end */ 2819 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); 2820 if (!mpd.io_submit.io_end) { 2821 ret = -ENOMEM; 2822 break; 2823 } 2824 2825 /* 2826 * We have two constraints: We find one extent to map and we 2827 * must always write out whole page (makes a difference when 2828 * blocksize < pagesize) so that we don't block on IO when we 2829 * try to write out the rest of the page. Journalled mode is 2830 * not supported by delalloc. 2831 */ 2832 BUG_ON(ext4_should_journal_data(inode)); 2833 needed_blocks = ext4_da_writepages_trans_blocks(inode); 2834 2835 /* start a new transaction */ 2836 handle = ext4_journal_start_with_reserve(inode, 2837 EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks); 2838 if (IS_ERR(handle)) { 2839 ret = PTR_ERR(handle); 2840 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2841 "%ld pages, ino %lu; err %d", __func__, 2842 wbc->nr_to_write, inode->i_ino, ret); 2843 /* Release allocated io_end */ 2844 ext4_put_io_end(mpd.io_submit.io_end); 2845 mpd.io_submit.io_end = NULL; 2846 break; 2847 } 2848 mpd.do_map = 1; 2849 2850 trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc); 2851 ret = mpage_prepare_extent_to_map(&mpd); 2852 if (!ret) { 2853 if (mpd.map.m_len) 2854 ret = mpage_map_and_submit_extent(handle, &mpd, 2855 &give_up_on_write); 2856 else { 2857 /* 2858 * We scanned the whole range (or exhausted 2859 * nr_to_write), submitted what was mapped and 2860 * didn't find anything needing mapping. We are 2861 * done. 2862 */ 2863 done = true; 2864 } 2865 } 2866 /* 2867 * Caution: If the handle is synchronous, 2868 * ext4_journal_stop() can wait for transaction commit 2869 * to finish which may depend on writeback of pages to 2870 * complete or on page lock to be released. In that 2871 * case, we have to wait until after after we have 2872 * submitted all the IO, released page locks we hold, 2873 * and dropped io_end reference (for extent conversion 2874 * to be able to complete) before stopping the handle. 2875 */ 2876 if (!ext4_handle_valid(handle) || handle->h_sync == 0) { 2877 ext4_journal_stop(handle); 2878 handle = NULL; 2879 mpd.do_map = 0; 2880 } 2881 /* Submit prepared bio */ 2882 ext4_io_submit(&mpd.io_submit); 2883 /* Unlock pages we didn't use */ 2884 mpage_release_unused_pages(&mpd, give_up_on_write); 2885 /* 2886 * Drop our io_end reference we got from init. We have 2887 * to be careful and use deferred io_end finishing if 2888 * we are still holding the transaction as we can 2889 * release the last reference to io_end which may end 2890 * up doing unwritten extent conversion. 2891 */ 2892 if (handle) { 2893 ext4_put_io_end_defer(mpd.io_submit.io_end); 2894 ext4_journal_stop(handle); 2895 } else 2896 ext4_put_io_end(mpd.io_submit.io_end); 2897 mpd.io_submit.io_end = NULL; 2898 2899 if (ret == -ENOSPC && sbi->s_journal) { 2900 /* 2901 * Commit the transaction which would 2902 * free blocks released in the transaction 2903 * and try again 2904 */ 2905 jbd2_journal_force_commit_nested(sbi->s_journal); 2906 ret = 0; 2907 continue; 2908 } 2909 /* Fatal error - ENOMEM, EIO... */ 2910 if (ret) 2911 break; 2912 } 2913 unplug: 2914 blk_finish_plug(&plug); 2915 if (!ret && !cycled && wbc->nr_to_write > 0) { 2916 cycled = 1; 2917 mpd.last_page = writeback_index - 1; 2918 mpd.first_page = 0; 2919 goto retry; 2920 } 2921 2922 /* Update index */ 2923 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2924 /* 2925 * Set the writeback_index so that range_cyclic 2926 * mode will write it back later 2927 */ 2928 mapping->writeback_index = mpd.first_page; 2929 2930 out_writepages: 2931 trace_ext4_writepages_result(inode, wbc, ret, 2932 nr_to_write - wbc->nr_to_write); 2933 percpu_up_read(&sbi->s_journal_flag_rwsem); 2934 return ret; 2935 } 2936 2937 static int ext4_dax_writepages(struct address_space *mapping, 2938 struct writeback_control *wbc) 2939 { 2940 int ret; 2941 long nr_to_write = wbc->nr_to_write; 2942 struct inode *inode = mapping->host; 2943 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2944 2945 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 2946 return -EIO; 2947 2948 percpu_down_read(&sbi->s_journal_flag_rwsem); 2949 trace_ext4_writepages(inode, wbc); 2950 2951 ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, wbc); 2952 trace_ext4_writepages_result(inode, wbc, ret, 2953 nr_to_write - wbc->nr_to_write); 2954 percpu_up_read(&sbi->s_journal_flag_rwsem); 2955 return ret; 2956 } 2957 2958 static int ext4_nonda_switch(struct super_block *sb) 2959 { 2960 s64 free_clusters, dirty_clusters; 2961 struct ext4_sb_info *sbi = EXT4_SB(sb); 2962 2963 /* 2964 * switch to non delalloc mode if we are running low 2965 * on free block. The free block accounting via percpu 2966 * counters can get slightly wrong with percpu_counter_batch getting 2967 * accumulated on each CPU without updating global counters 2968 * Delalloc need an accurate free block accounting. So switch 2969 * to non delalloc when we are near to error range. 2970 */ 2971 free_clusters = 2972 percpu_counter_read_positive(&sbi->s_freeclusters_counter); 2973 dirty_clusters = 2974 percpu_counter_read_positive(&sbi->s_dirtyclusters_counter); 2975 /* 2976 * Start pushing delalloc when 1/2 of free blocks are dirty. 2977 */ 2978 if (dirty_clusters && (free_clusters < 2 * dirty_clusters)) 2979 try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE); 2980 2981 if (2 * free_clusters < 3 * dirty_clusters || 2982 free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) { 2983 /* 2984 * free block count is less than 150% of dirty blocks 2985 * or free blocks is less than watermark 2986 */ 2987 return 1; 2988 } 2989 return 0; 2990 } 2991 2992 /* We always reserve for an inode update; the superblock could be there too */ 2993 static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len) 2994 { 2995 if (likely(ext4_has_feature_large_file(inode->i_sb))) 2996 return 1; 2997 2998 if (pos + len <= 0x7fffffffULL) 2999 return 1; 3000 3001 /* We might need to update the superblock to set LARGE_FILE */ 3002 return 2; 3003 } 3004 3005 static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 3006 loff_t pos, unsigned len, unsigned flags, 3007 struct page **pagep, void **fsdata) 3008 { 3009 int ret, retries = 0; 3010 struct page *page; 3011 pgoff_t index; 3012 struct inode *inode = mapping->host; 3013 handle_t *handle; 3014 3015 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 3016 return -EIO; 3017 3018 index = pos >> PAGE_SHIFT; 3019 3020 if (ext4_nonda_switch(inode->i_sb) || 3021 S_ISLNK(inode->i_mode)) { 3022 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 3023 return ext4_write_begin(file, mapping, pos, 3024 len, flags, pagep, fsdata); 3025 } 3026 *fsdata = (void *)0; 3027 trace_ext4_da_write_begin(inode, pos, len, flags); 3028 3029 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 3030 ret = ext4_da_write_inline_data_begin(mapping, inode, 3031 pos, len, flags, 3032 pagep, fsdata); 3033 if (ret < 0) 3034 return ret; 3035 if (ret == 1) 3036 return 0; 3037 } 3038 3039 /* 3040 * grab_cache_page_write_begin() can take a long time if the 3041 * system is thrashing due to memory pressure, or if the page 3042 * is being written back. So grab it first before we start 3043 * the transaction handle. This also allows us to allocate 3044 * the page (if needed) without using GFP_NOFS. 3045 */ 3046 retry_grab: 3047 page = grab_cache_page_write_begin(mapping, index, flags); 3048 if (!page) 3049 return -ENOMEM; 3050 unlock_page(page); 3051 3052 /* 3053 * With delayed allocation, we don't log the i_disksize update 3054 * if there is delayed block allocation. But we still need 3055 * to journalling the i_disksize update if writes to the end 3056 * of file which has an already mapped buffer. 3057 */ 3058 retry_journal: 3059 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 3060 ext4_da_write_credits(inode, pos, len)); 3061 if (IS_ERR(handle)) { 3062 put_page(page); 3063 return PTR_ERR(handle); 3064 } 3065 3066 lock_page(page); 3067 if (page->mapping != mapping) { 3068 /* The page got truncated from under us */ 3069 unlock_page(page); 3070 put_page(page); 3071 ext4_journal_stop(handle); 3072 goto retry_grab; 3073 } 3074 /* In case writeback began while the page was unlocked */ 3075 wait_for_stable_page(page); 3076 3077 #ifdef CONFIG_EXT4_FS_ENCRYPTION 3078 ret = ext4_block_write_begin(page, pos, len, 3079 ext4_da_get_block_prep); 3080 #else 3081 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); 3082 #endif 3083 if (ret < 0) { 3084 unlock_page(page); 3085 ext4_journal_stop(handle); 3086 /* 3087 * block_write_begin may have instantiated a few blocks 3088 * outside i_size. Trim these off again. Don't need 3089 * i_size_read because we hold i_mutex. 3090 */ 3091 if (pos + len > inode->i_size) 3092 ext4_truncate_failed_write(inode); 3093 3094 if (ret == -ENOSPC && 3095 ext4_should_retry_alloc(inode->i_sb, &retries)) 3096 goto retry_journal; 3097 3098 put_page(page); 3099 return ret; 3100 } 3101 3102 *pagep = page; 3103 return ret; 3104 } 3105 3106 /* 3107 * Check if we should update i_disksize 3108 * when write to the end of file but not require block allocation 3109 */ 3110 static int ext4_da_should_update_i_disksize(struct page *page, 3111 unsigned long offset) 3112 { 3113 struct buffer_head *bh; 3114 struct inode *inode = page->mapping->host; 3115 unsigned int idx; 3116 int i; 3117 3118 bh = page_buffers(page); 3119 idx = offset >> inode->i_blkbits; 3120 3121 for (i = 0; i < idx; i++) 3122 bh = bh->b_this_page; 3123 3124 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 3125 return 0; 3126 return 1; 3127 } 3128 3129 static int ext4_da_write_end(struct file *file, 3130 struct address_space *mapping, 3131 loff_t pos, unsigned len, unsigned copied, 3132 struct page *page, void *fsdata) 3133 { 3134 struct inode *inode = mapping->host; 3135 int ret = 0, ret2; 3136 handle_t *handle = ext4_journal_current_handle(); 3137 loff_t new_i_size; 3138 unsigned long start, end; 3139 int write_mode = (int)(unsigned long)fsdata; 3140 3141 if (write_mode == FALL_BACK_TO_NONDELALLOC) 3142 return ext4_write_end(file, mapping, pos, 3143 len, copied, page, fsdata); 3144 3145 trace_ext4_da_write_end(inode, pos, len, copied); 3146 start = pos & (PAGE_SIZE - 1); 3147 end = start + copied - 1; 3148 3149 /* 3150 * generic_write_end() will run mark_inode_dirty() if i_size 3151 * changes. So let's piggyback the i_disksize mark_inode_dirty 3152 * into that. 3153 */ 3154 new_i_size = pos + copied; 3155 if (copied && new_i_size > EXT4_I(inode)->i_disksize) { 3156 if (ext4_has_inline_data(inode) || 3157 ext4_da_should_update_i_disksize(page, end)) { 3158 ext4_update_i_disksize(inode, new_i_size); 3159 /* We need to mark inode dirty even if 3160 * new_i_size is less that inode->i_size 3161 * bu greater than i_disksize.(hint delalloc) 3162 */ 3163 ext4_mark_inode_dirty(handle, inode); 3164 } 3165 } 3166 3167 if (write_mode != CONVERT_INLINE_DATA && 3168 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) && 3169 ext4_has_inline_data(inode)) 3170 ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied, 3171 page); 3172 else 3173 ret2 = generic_write_end(file, mapping, pos, len, copied, 3174 page, fsdata); 3175 3176 copied = ret2; 3177 if (ret2 < 0) 3178 ret = ret2; 3179 ret2 = ext4_journal_stop(handle); 3180 if (!ret) 3181 ret = ret2; 3182 3183 return ret ? ret : copied; 3184 } 3185 3186 static void ext4_da_invalidatepage(struct page *page, unsigned int offset, 3187 unsigned int length) 3188 { 3189 /* 3190 * Drop reserved blocks 3191 */ 3192 BUG_ON(!PageLocked(page)); 3193 if (!page_has_buffers(page)) 3194 goto out; 3195 3196 ext4_da_page_release_reservation(page, offset, length); 3197 3198 out: 3199 ext4_invalidatepage(page, offset, length); 3200 3201 return; 3202 } 3203 3204 /* 3205 * Force all delayed allocation blocks to be allocated for a given inode. 3206 */ 3207 int ext4_alloc_da_blocks(struct inode *inode) 3208 { 3209 trace_ext4_alloc_da_blocks(inode); 3210 3211 if (!EXT4_I(inode)->i_reserved_data_blocks) 3212 return 0; 3213 3214 /* 3215 * We do something simple for now. The filemap_flush() will 3216 * also start triggering a write of the data blocks, which is 3217 * not strictly speaking necessary (and for users of 3218 * laptop_mode, not even desirable). However, to do otherwise 3219 * would require replicating code paths in: 3220 * 3221 * ext4_writepages() -> 3222 * write_cache_pages() ---> (via passed in callback function) 3223 * __mpage_da_writepage() --> 3224 * mpage_add_bh_to_extent() 3225 * mpage_da_map_blocks() 3226 * 3227 * The problem is that write_cache_pages(), located in 3228 * mm/page-writeback.c, marks pages clean in preparation for 3229 * doing I/O, which is not desirable if we're not planning on 3230 * doing I/O at all. 3231 * 3232 * We could call write_cache_pages(), and then redirty all of 3233 * the pages by calling redirty_page_for_writepage() but that 3234 * would be ugly in the extreme. So instead we would need to 3235 * replicate parts of the code in the above functions, 3236 * simplifying them because we wouldn't actually intend to 3237 * write out the pages, but rather only collect contiguous 3238 * logical block extents, call the multi-block allocator, and 3239 * then update the buffer heads with the block allocations. 3240 * 3241 * For now, though, we'll cheat by calling filemap_flush(), 3242 * which will map the blocks, and start the I/O, but not 3243 * actually wait for the I/O to complete. 3244 */ 3245 return filemap_flush(inode->i_mapping); 3246 } 3247 3248 /* 3249 * bmap() is special. It gets used by applications such as lilo and by 3250 * the swapper to find the on-disk block of a specific piece of data. 3251 * 3252 * Naturally, this is dangerous if the block concerned is still in the 3253 * journal. If somebody makes a swapfile on an ext4 data-journaling 3254 * filesystem and enables swap, then they may get a nasty shock when the 3255 * data getting swapped to that swapfile suddenly gets overwritten by 3256 * the original zero's written out previously to the journal and 3257 * awaiting writeback in the kernel's buffer cache. 3258 * 3259 * So, if we see any bmap calls here on a modified, data-journaled file, 3260 * take extra steps to flush any blocks which might be in the cache. 3261 */ 3262 static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 3263 { 3264 struct inode *inode = mapping->host; 3265 journal_t *journal; 3266 int err; 3267 3268 /* 3269 * We can get here for an inline file via the FIBMAP ioctl 3270 */ 3271 if (ext4_has_inline_data(inode)) 3272 return 0; 3273 3274 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 3275 test_opt(inode->i_sb, DELALLOC)) { 3276 /* 3277 * With delalloc we want to sync the file 3278 * so that we can make sure we allocate 3279 * blocks for file 3280 */ 3281 filemap_write_and_wait(mapping); 3282 } 3283 3284 if (EXT4_JOURNAL(inode) && 3285 ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { 3286 /* 3287 * This is a REALLY heavyweight approach, but the use of 3288 * bmap on dirty files is expected to be extremely rare: 3289 * only if we run lilo or swapon on a freshly made file 3290 * do we expect this to happen. 3291 * 3292 * (bmap requires CAP_SYS_RAWIO so this does not 3293 * represent an unprivileged user DOS attack --- we'd be 3294 * in trouble if mortal users could trigger this path at 3295 * will.) 3296 * 3297 * NB. EXT4_STATE_JDATA is not set on files other than 3298 * regular files. If somebody wants to bmap a directory 3299 * or symlink and gets confused because the buffer 3300 * hasn't yet been flushed to disk, they deserve 3301 * everything they get. 3302 */ 3303 3304 ext4_clear_inode_state(inode, EXT4_STATE_JDATA); 3305 journal = EXT4_JOURNAL(inode); 3306 jbd2_journal_lock_updates(journal); 3307 err = jbd2_journal_flush(journal); 3308 jbd2_journal_unlock_updates(journal); 3309 3310 if (err) 3311 return 0; 3312 } 3313 3314 return generic_block_bmap(mapping, block, ext4_get_block); 3315 } 3316 3317 static int ext4_readpage(struct file *file, struct page *page) 3318 { 3319 int ret = -EAGAIN; 3320 struct inode *inode = page->mapping->host; 3321 3322 trace_ext4_readpage(page); 3323 3324 if (ext4_has_inline_data(inode)) 3325 ret = ext4_readpage_inline(inode, page); 3326 3327 if (ret == -EAGAIN) 3328 return ext4_mpage_readpages(page->mapping, NULL, page, 1, 3329 false); 3330 3331 return ret; 3332 } 3333 3334 static int 3335 ext4_readpages(struct file *file, struct address_space *mapping, 3336 struct list_head *pages, unsigned nr_pages) 3337 { 3338 struct inode *inode = mapping->host; 3339 3340 /* If the file has inline data, no need to do readpages. */ 3341 if (ext4_has_inline_data(inode)) 3342 return 0; 3343 3344 return ext4_mpage_readpages(mapping, pages, NULL, nr_pages, true); 3345 } 3346 3347 static void ext4_invalidatepage(struct page *page, unsigned int offset, 3348 unsigned int length) 3349 { 3350 trace_ext4_invalidatepage(page, offset, length); 3351 3352 /* No journalling happens on data buffers when this function is used */ 3353 WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page))); 3354 3355 block_invalidatepage(page, offset, length); 3356 } 3357 3358 static int __ext4_journalled_invalidatepage(struct page *page, 3359 unsigned int offset, 3360 unsigned int length) 3361 { 3362 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3363 3364 trace_ext4_journalled_invalidatepage(page, offset, length); 3365 3366 /* 3367 * If it's a full truncate we just forget about the pending dirtying 3368 */ 3369 if (offset == 0 && length == PAGE_SIZE) 3370 ClearPageChecked(page); 3371 3372 return jbd2_journal_invalidatepage(journal, page, offset, length); 3373 } 3374 3375 /* Wrapper for aops... */ 3376 static void ext4_journalled_invalidatepage(struct page *page, 3377 unsigned int offset, 3378 unsigned int length) 3379 { 3380 WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0); 3381 } 3382 3383 static int ext4_releasepage(struct page *page, gfp_t wait) 3384 { 3385 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3386 3387 trace_ext4_releasepage(page); 3388 3389 /* Page has dirty journalled data -> cannot release */ 3390 if (PageChecked(page)) 3391 return 0; 3392 if (journal) 3393 return jbd2_journal_try_to_free_buffers(journal, page, wait); 3394 else 3395 return try_to_free_buffers(page); 3396 } 3397 3398 static bool ext4_inode_datasync_dirty(struct inode *inode) 3399 { 3400 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 3401 3402 if (journal) 3403 return !jbd2_transaction_committed(journal, 3404 EXT4_I(inode)->i_datasync_tid); 3405 /* Any metadata buffers to write? */ 3406 if (!list_empty(&inode->i_mapping->private_list)) 3407 return true; 3408 return inode->i_state & I_DIRTY_DATASYNC; 3409 } 3410 3411 static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length, 3412 unsigned flags, struct iomap *iomap) 3413 { 3414 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3415 unsigned int blkbits = inode->i_blkbits; 3416 unsigned long first_block = offset >> blkbits; 3417 unsigned long last_block = (offset + length - 1) >> blkbits; 3418 struct ext4_map_blocks map; 3419 bool delalloc = false; 3420 int ret; 3421 3422 3423 if (flags & IOMAP_REPORT) { 3424 if (ext4_has_inline_data(inode)) { 3425 ret = ext4_inline_data_iomap(inode, iomap); 3426 if (ret != -EAGAIN) { 3427 if (ret == 0 && offset >= iomap->length) 3428 ret = -ENOENT; 3429 return ret; 3430 } 3431 } 3432 } else { 3433 if (WARN_ON_ONCE(ext4_has_inline_data(inode))) 3434 return -ERANGE; 3435 } 3436 3437 map.m_lblk = first_block; 3438 map.m_len = last_block - first_block + 1; 3439 3440 if (flags & IOMAP_REPORT) { 3441 ret = ext4_map_blocks(NULL, inode, &map, 0); 3442 if (ret < 0) 3443 return ret; 3444 3445 if (ret == 0) { 3446 ext4_lblk_t end = map.m_lblk + map.m_len - 1; 3447 struct extent_status es; 3448 3449 ext4_es_find_delayed_extent_range(inode, map.m_lblk, end, &es); 3450 3451 if (!es.es_len || es.es_lblk > end) { 3452 /* entire range is a hole */ 3453 } else if (es.es_lblk > map.m_lblk) { 3454 /* range starts with a hole */ 3455 map.m_len = es.es_lblk - map.m_lblk; 3456 } else { 3457 ext4_lblk_t offs = 0; 3458 3459 if (es.es_lblk < map.m_lblk) 3460 offs = map.m_lblk - es.es_lblk; 3461 map.m_lblk = es.es_lblk + offs; 3462 map.m_len = es.es_len - offs; 3463 delalloc = true; 3464 } 3465 } 3466 } else if (flags & IOMAP_WRITE) { 3467 int dio_credits; 3468 handle_t *handle; 3469 int retries = 0; 3470 3471 /* Trim mapping request to maximum we can map at once for DIO */ 3472 if (map.m_len > DIO_MAX_BLOCKS) 3473 map.m_len = DIO_MAX_BLOCKS; 3474 dio_credits = ext4_chunk_trans_blocks(inode, map.m_len); 3475 retry: 3476 /* 3477 * Either we allocate blocks and then we don't get unwritten 3478 * extent so we have reserved enough credits, or the blocks 3479 * are already allocated and unwritten and in that case 3480 * extent conversion fits in the credits as well. 3481 */ 3482 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 3483 dio_credits); 3484 if (IS_ERR(handle)) 3485 return PTR_ERR(handle); 3486 3487 ret = ext4_map_blocks(handle, inode, &map, 3488 EXT4_GET_BLOCKS_CREATE_ZERO); 3489 if (ret < 0) { 3490 ext4_journal_stop(handle); 3491 if (ret == -ENOSPC && 3492 ext4_should_retry_alloc(inode->i_sb, &retries)) 3493 goto retry; 3494 return ret; 3495 } 3496 3497 /* 3498 * If we added blocks beyond i_size, we need to make sure they 3499 * will get truncated if we crash before updating i_size in 3500 * ext4_iomap_end(). For faults we don't need to do that (and 3501 * even cannot because for orphan list operations inode_lock is 3502 * required) - if we happen to instantiate block beyond i_size, 3503 * it is because we race with truncate which has already added 3504 * the inode to the orphan list. 3505 */ 3506 if (!(flags & IOMAP_FAULT) && first_block + map.m_len > 3507 (i_size_read(inode) + (1 << blkbits) - 1) >> blkbits) { 3508 int err; 3509 3510 err = ext4_orphan_add(handle, inode); 3511 if (err < 0) { 3512 ext4_journal_stop(handle); 3513 return err; 3514 } 3515 } 3516 ext4_journal_stop(handle); 3517 } else { 3518 ret = ext4_map_blocks(NULL, inode, &map, 0); 3519 if (ret < 0) 3520 return ret; 3521 } 3522 3523 iomap->flags = 0; 3524 if (ext4_inode_datasync_dirty(inode)) 3525 iomap->flags |= IOMAP_F_DIRTY; 3526 iomap->bdev = inode->i_sb->s_bdev; 3527 iomap->dax_dev = sbi->s_daxdev; 3528 iomap->offset = (u64)first_block << blkbits; 3529 iomap->length = (u64)map.m_len << blkbits; 3530 3531 if (ret == 0) { 3532 iomap->type = delalloc ? IOMAP_DELALLOC : IOMAP_HOLE; 3533 iomap->addr = IOMAP_NULL_ADDR; 3534 } else { 3535 if (map.m_flags & EXT4_MAP_MAPPED) { 3536 iomap->type = IOMAP_MAPPED; 3537 } else if (map.m_flags & EXT4_MAP_UNWRITTEN) { 3538 iomap->type = IOMAP_UNWRITTEN; 3539 } else { 3540 WARN_ON_ONCE(1); 3541 return -EIO; 3542 } 3543 iomap->addr = (u64)map.m_pblk << blkbits; 3544 } 3545 3546 if (map.m_flags & EXT4_MAP_NEW) 3547 iomap->flags |= IOMAP_F_NEW; 3548 3549 return 0; 3550 } 3551 3552 static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length, 3553 ssize_t written, unsigned flags, struct iomap *iomap) 3554 { 3555 int ret = 0; 3556 handle_t *handle; 3557 int blkbits = inode->i_blkbits; 3558 bool truncate = false; 3559 3560 if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT)) 3561 return 0; 3562 3563 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 3564 if (IS_ERR(handle)) { 3565 ret = PTR_ERR(handle); 3566 goto orphan_del; 3567 } 3568 if (ext4_update_inode_size(inode, offset + written)) 3569 ext4_mark_inode_dirty(handle, inode); 3570 /* 3571 * We may need to truncate allocated but not written blocks beyond EOF. 3572 */ 3573 if (iomap->offset + iomap->length > 3574 ALIGN(inode->i_size, 1 << blkbits)) { 3575 ext4_lblk_t written_blk, end_blk; 3576 3577 written_blk = (offset + written) >> blkbits; 3578 end_blk = (offset + length) >> blkbits; 3579 if (written_blk < end_blk && ext4_can_truncate(inode)) 3580 truncate = true; 3581 } 3582 /* 3583 * Remove inode from orphan list if we were extending a inode and 3584 * everything went fine. 3585 */ 3586 if (!truncate && inode->i_nlink && 3587 !list_empty(&EXT4_I(inode)->i_orphan)) 3588 ext4_orphan_del(handle, inode); 3589 ext4_journal_stop(handle); 3590 if (truncate) { 3591 ext4_truncate_failed_write(inode); 3592 orphan_del: 3593 /* 3594 * If truncate failed early the inode might still be on the 3595 * orphan list; we need to make sure the inode is removed from 3596 * the orphan list in that case. 3597 */ 3598 if (inode->i_nlink) 3599 ext4_orphan_del(NULL, inode); 3600 } 3601 return ret; 3602 } 3603 3604 const struct iomap_ops ext4_iomap_ops = { 3605 .iomap_begin = ext4_iomap_begin, 3606 .iomap_end = ext4_iomap_end, 3607 }; 3608 3609 static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset, 3610 ssize_t size, void *private) 3611 { 3612 ext4_io_end_t *io_end = private; 3613 3614 /* if not async direct IO just return */ 3615 if (!io_end) 3616 return 0; 3617 3618 ext_debug("ext4_end_io_dio(): io_end 0x%p " 3619 "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", 3620 io_end, io_end->inode->i_ino, iocb, offset, size); 3621 3622 /* 3623 * Error during AIO DIO. We cannot convert unwritten extents as the 3624 * data was not written. Just clear the unwritten flag and drop io_end. 3625 */ 3626 if (size <= 0) { 3627 ext4_clear_io_unwritten_flag(io_end); 3628 size = 0; 3629 } 3630 io_end->offset = offset; 3631 io_end->size = size; 3632 ext4_put_io_end(io_end); 3633 3634 return 0; 3635 } 3636 3637 /* 3638 * Handling of direct IO writes. 3639 * 3640 * For ext4 extent files, ext4 will do direct-io write even to holes, 3641 * preallocated extents, and those write extend the file, no need to 3642 * fall back to buffered IO. 3643 * 3644 * For holes, we fallocate those blocks, mark them as unwritten 3645 * If those blocks were preallocated, we mark sure they are split, but 3646 * still keep the range to write as unwritten. 3647 * 3648 * The unwritten extents will be converted to written when DIO is completed. 3649 * For async direct IO, since the IO may still pending when return, we 3650 * set up an end_io call back function, which will do the conversion 3651 * when async direct IO completed. 3652 * 3653 * If the O_DIRECT write will extend the file then add this inode to the 3654 * orphan list. So recovery will truncate it back to the original size 3655 * if the machine crashes during the write. 3656 * 3657 */ 3658 static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter) 3659 { 3660 struct file *file = iocb->ki_filp; 3661 struct inode *inode = file->f_mapping->host; 3662 struct ext4_inode_info *ei = EXT4_I(inode); 3663 ssize_t ret; 3664 loff_t offset = iocb->ki_pos; 3665 size_t count = iov_iter_count(iter); 3666 int overwrite = 0; 3667 get_block_t *get_block_func = NULL; 3668 int dio_flags = 0; 3669 loff_t final_size = offset + count; 3670 int orphan = 0; 3671 handle_t *handle; 3672 3673 if (final_size > inode->i_size || final_size > ei->i_disksize) { 3674 /* Credits for sb + inode write */ 3675 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 3676 if (IS_ERR(handle)) { 3677 ret = PTR_ERR(handle); 3678 goto out; 3679 } 3680 ret = ext4_orphan_add(handle, inode); 3681 if (ret) { 3682 ext4_journal_stop(handle); 3683 goto out; 3684 } 3685 orphan = 1; 3686 ext4_update_i_disksize(inode, inode->i_size); 3687 ext4_journal_stop(handle); 3688 } 3689 3690 BUG_ON(iocb->private == NULL); 3691 3692 /* 3693 * Make all waiters for direct IO properly wait also for extent 3694 * conversion. This also disallows race between truncate() and 3695 * overwrite DIO as i_dio_count needs to be incremented under i_mutex. 3696 */ 3697 inode_dio_begin(inode); 3698 3699 /* If we do a overwrite dio, i_mutex locking can be released */ 3700 overwrite = *((int *)iocb->private); 3701 3702 if (overwrite) 3703 inode_unlock(inode); 3704 3705 /* 3706 * For extent mapped files we could direct write to holes and fallocate. 3707 * 3708 * Allocated blocks to fill the hole are marked as unwritten to prevent 3709 * parallel buffered read to expose the stale data before DIO complete 3710 * the data IO. 3711 * 3712 * As to previously fallocated extents, ext4 get_block will just simply 3713 * mark the buffer mapped but still keep the extents unwritten. 3714 * 3715 * For non AIO case, we will convert those unwritten extents to written 3716 * after return back from blockdev_direct_IO. That way we save us from 3717 * allocating io_end structure and also the overhead of offloading 3718 * the extent convertion to a workqueue. 3719 * 3720 * For async DIO, the conversion needs to be deferred when the 3721 * IO is completed. The ext4 end_io callback function will be 3722 * called to take care of the conversion work. Here for async 3723 * case, we allocate an io_end structure to hook to the iocb. 3724 */ 3725 iocb->private = NULL; 3726 if (overwrite) 3727 get_block_func = ext4_dio_get_block_overwrite; 3728 else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) || 3729 round_down(offset, i_blocksize(inode)) >= inode->i_size) { 3730 get_block_func = ext4_dio_get_block; 3731 dio_flags = DIO_LOCKING | DIO_SKIP_HOLES; 3732 } else if (is_sync_kiocb(iocb)) { 3733 get_block_func = ext4_dio_get_block_unwritten_sync; 3734 dio_flags = DIO_LOCKING; 3735 } else { 3736 get_block_func = ext4_dio_get_block_unwritten_async; 3737 dio_flags = DIO_LOCKING; 3738 } 3739 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, 3740 get_block_func, ext4_end_io_dio, NULL, 3741 dio_flags); 3742 3743 if (ret > 0 && !overwrite && ext4_test_inode_state(inode, 3744 EXT4_STATE_DIO_UNWRITTEN)) { 3745 int err; 3746 /* 3747 * for non AIO case, since the IO is already 3748 * completed, we could do the conversion right here 3749 */ 3750 err = ext4_convert_unwritten_extents(NULL, inode, 3751 offset, ret); 3752 if (err < 0) 3753 ret = err; 3754 ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3755 } 3756 3757 inode_dio_end(inode); 3758 /* take i_mutex locking again if we do a ovewrite dio */ 3759 if (overwrite) 3760 inode_lock(inode); 3761 3762 if (ret < 0 && final_size > inode->i_size) 3763 ext4_truncate_failed_write(inode); 3764 3765 /* Handle extending of i_size after direct IO write */ 3766 if (orphan) { 3767 int err; 3768 3769 /* Credits for sb + inode write */ 3770 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 3771 if (IS_ERR(handle)) { 3772 /* 3773 * We wrote the data but cannot extend 3774 * i_size. Bail out. In async io case, we do 3775 * not return error here because we have 3776 * already submmitted the corresponding 3777 * bio. Returning error here makes the caller 3778 * think that this IO is done and failed 3779 * resulting in race with bio's completion 3780 * handler. 3781 */ 3782 if (!ret) 3783 ret = PTR_ERR(handle); 3784 if (inode->i_nlink) 3785 ext4_orphan_del(NULL, inode); 3786 3787 goto out; 3788 } 3789 if (inode->i_nlink) 3790 ext4_orphan_del(handle, inode); 3791 if (ret > 0) { 3792 loff_t end = offset + ret; 3793 if (end > inode->i_size || end > ei->i_disksize) { 3794 ext4_update_i_disksize(inode, end); 3795 if (end > inode->i_size) 3796 i_size_write(inode, end); 3797 /* 3798 * We're going to return a positive `ret' 3799 * here due to non-zero-length I/O, so there's 3800 * no way of reporting error returns from 3801 * ext4_mark_inode_dirty() to userspace. So 3802 * ignore it. 3803 */ 3804 ext4_mark_inode_dirty(handle, inode); 3805 } 3806 } 3807 err = ext4_journal_stop(handle); 3808 if (ret == 0) 3809 ret = err; 3810 } 3811 out: 3812 return ret; 3813 } 3814 3815 static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter) 3816 { 3817 struct address_space *mapping = iocb->ki_filp->f_mapping; 3818 struct inode *inode = mapping->host; 3819 size_t count = iov_iter_count(iter); 3820 ssize_t ret; 3821 3822 /* 3823 * Shared inode_lock is enough for us - it protects against concurrent 3824 * writes & truncates and since we take care of writing back page cache, 3825 * we are protected against page writeback as well. 3826 */ 3827 inode_lock_shared(inode); 3828 ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, 3829 iocb->ki_pos + count - 1); 3830 if (ret) 3831 goto out_unlock; 3832 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, 3833 iter, ext4_dio_get_block, NULL, NULL, 0); 3834 out_unlock: 3835 inode_unlock_shared(inode); 3836 return ret; 3837 } 3838 3839 static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 3840 { 3841 struct file *file = iocb->ki_filp; 3842 struct inode *inode = file->f_mapping->host; 3843 size_t count = iov_iter_count(iter); 3844 loff_t offset = iocb->ki_pos; 3845 ssize_t ret; 3846 3847 #ifdef CONFIG_EXT4_FS_ENCRYPTION 3848 if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 3849 return 0; 3850 #endif 3851 3852 /* 3853 * If we are doing data journalling we don't support O_DIRECT 3854 */ 3855 if (ext4_should_journal_data(inode)) 3856 return 0; 3857 3858 /* Let buffer I/O handle the inline data case. */ 3859 if (ext4_has_inline_data(inode)) 3860 return 0; 3861 3862 trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); 3863 if (iov_iter_rw(iter) == READ) 3864 ret = ext4_direct_IO_read(iocb, iter); 3865 else 3866 ret = ext4_direct_IO_write(iocb, iter); 3867 trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret); 3868 return ret; 3869 } 3870 3871 /* 3872 * Pages can be marked dirty completely asynchronously from ext4's journalling 3873 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3874 * much here because ->set_page_dirty is called under VFS locks. The page is 3875 * not necessarily locked. 3876 * 3877 * We cannot just dirty the page and leave attached buffers clean, because the 3878 * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3879 * or jbddirty because all the journalling code will explode. 3880 * 3881 * So what we do is to mark the page "pending dirty" and next time writepage 3882 * is called, propagate that into the buffers appropriately. 3883 */ 3884 static int ext4_journalled_set_page_dirty(struct page *page) 3885 { 3886 SetPageChecked(page); 3887 return __set_page_dirty_nobuffers(page); 3888 } 3889 3890 static int ext4_set_page_dirty(struct page *page) 3891 { 3892 WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page)); 3893 WARN_ON_ONCE(!page_has_buffers(page)); 3894 return __set_page_dirty_buffers(page); 3895 } 3896 3897 static const struct address_space_operations ext4_aops = { 3898 .readpage = ext4_readpage, 3899 .readpages = ext4_readpages, 3900 .writepage = ext4_writepage, 3901 .writepages = ext4_writepages, 3902 .write_begin = ext4_write_begin, 3903 .write_end = ext4_write_end, 3904 .set_page_dirty = ext4_set_page_dirty, 3905 .bmap = ext4_bmap, 3906 .invalidatepage = ext4_invalidatepage, 3907 .releasepage = ext4_releasepage, 3908 .direct_IO = ext4_direct_IO, 3909 .migratepage = buffer_migrate_page, 3910 .is_partially_uptodate = block_is_partially_uptodate, 3911 .error_remove_page = generic_error_remove_page, 3912 }; 3913 3914 static const struct address_space_operations ext4_journalled_aops = { 3915 .readpage = ext4_readpage, 3916 .readpages = ext4_readpages, 3917 .writepage = ext4_writepage, 3918 .writepages = ext4_writepages, 3919 .write_begin = ext4_write_begin, 3920 .write_end = ext4_journalled_write_end, 3921 .set_page_dirty = ext4_journalled_set_page_dirty, 3922 .bmap = ext4_bmap, 3923 .invalidatepage = ext4_journalled_invalidatepage, 3924 .releasepage = ext4_releasepage, 3925 .direct_IO = ext4_direct_IO, 3926 .is_partially_uptodate = block_is_partially_uptodate, 3927 .error_remove_page = generic_error_remove_page, 3928 }; 3929 3930 static const struct address_space_operations ext4_da_aops = { 3931 .readpage = ext4_readpage, 3932 .readpages = ext4_readpages, 3933 .writepage = ext4_writepage, 3934 .writepages = ext4_writepages, 3935 .write_begin = ext4_da_write_begin, 3936 .write_end = ext4_da_write_end, 3937 .set_page_dirty = ext4_set_page_dirty, 3938 .bmap = ext4_bmap, 3939 .invalidatepage = ext4_da_invalidatepage, 3940 .releasepage = ext4_releasepage, 3941 .direct_IO = ext4_direct_IO, 3942 .migratepage = buffer_migrate_page, 3943 .is_partially_uptodate = block_is_partially_uptodate, 3944 .error_remove_page = generic_error_remove_page, 3945 }; 3946 3947 static const struct address_space_operations ext4_dax_aops = { 3948 .writepages = ext4_dax_writepages, 3949 .direct_IO = noop_direct_IO, 3950 .set_page_dirty = noop_set_page_dirty, 3951 .invalidatepage = noop_invalidatepage, 3952 }; 3953 3954 void ext4_set_aops(struct inode *inode) 3955 { 3956 switch (ext4_inode_journal_mode(inode)) { 3957 case EXT4_INODE_ORDERED_DATA_MODE: 3958 case EXT4_INODE_WRITEBACK_DATA_MODE: 3959 break; 3960 case EXT4_INODE_JOURNAL_DATA_MODE: 3961 inode->i_mapping->a_ops = &ext4_journalled_aops; 3962 return; 3963 default: 3964 BUG(); 3965 } 3966 if (IS_DAX(inode)) 3967 inode->i_mapping->a_ops = &ext4_dax_aops; 3968 else if (test_opt(inode->i_sb, DELALLOC)) 3969 inode->i_mapping->a_ops = &ext4_da_aops; 3970 else 3971 inode->i_mapping->a_ops = &ext4_aops; 3972 } 3973 3974 static int __ext4_block_zero_page_range(handle_t *handle, 3975 struct address_space *mapping, loff_t from, loff_t length) 3976 { 3977 ext4_fsblk_t index = from >> PAGE_SHIFT; 3978 unsigned offset = from & (PAGE_SIZE-1); 3979 unsigned blocksize, pos; 3980 ext4_lblk_t iblock; 3981 struct inode *inode = mapping->host; 3982 struct buffer_head *bh; 3983 struct page *page; 3984 int err = 0; 3985 3986 page = find_or_create_page(mapping, from >> PAGE_SHIFT, 3987 mapping_gfp_constraint(mapping, ~__GFP_FS)); 3988 if (!page) 3989 return -ENOMEM; 3990 3991 blocksize = inode->i_sb->s_blocksize; 3992 3993 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits); 3994 3995 if (!page_has_buffers(page)) 3996 create_empty_buffers(page, blocksize, 0); 3997 3998 /* Find the buffer that contains "offset" */ 3999 bh = page_buffers(page); 4000 pos = blocksize; 4001 while (offset >= pos) { 4002 bh = bh->b_this_page; 4003 iblock++; 4004 pos += blocksize; 4005 } 4006 if (buffer_freed(bh)) { 4007 BUFFER_TRACE(bh, "freed: skip"); 4008 goto unlock; 4009 } 4010 if (!buffer_mapped(bh)) { 4011 BUFFER_TRACE(bh, "unmapped"); 4012 ext4_get_block(inode, iblock, bh, 0); 4013 /* unmapped? It's a hole - nothing to do */ 4014 if (!buffer_mapped(bh)) { 4015 BUFFER_TRACE(bh, "still unmapped"); 4016 goto unlock; 4017 } 4018 } 4019 4020 /* Ok, it's mapped. Make sure it's up-to-date */ 4021 if (PageUptodate(page)) 4022 set_buffer_uptodate(bh); 4023 4024 if (!buffer_uptodate(bh)) { 4025 err = -EIO; 4026 ll_rw_block(REQ_OP_READ, 0, 1, &bh); 4027 wait_on_buffer(bh); 4028 /* Uhhuh. Read error. Complain and punt. */ 4029 if (!buffer_uptodate(bh)) 4030 goto unlock; 4031 if (S_ISREG(inode->i_mode) && 4032 ext4_encrypted_inode(inode)) { 4033 /* We expect the key to be set. */ 4034 BUG_ON(!fscrypt_has_encryption_key(inode)); 4035 BUG_ON(blocksize != PAGE_SIZE); 4036 WARN_ON_ONCE(fscrypt_decrypt_page(page->mapping->host, 4037 page, PAGE_SIZE, 0, page->index)); 4038 } 4039 } 4040 if (ext4_should_journal_data(inode)) { 4041 BUFFER_TRACE(bh, "get write access"); 4042 err = ext4_journal_get_write_access(handle, bh); 4043 if (err) 4044 goto unlock; 4045 } 4046 zero_user(page, offset, length); 4047 BUFFER_TRACE(bh, "zeroed end of block"); 4048 4049 if (ext4_should_journal_data(inode)) { 4050 err = ext4_handle_dirty_metadata(handle, inode, bh); 4051 } else { 4052 err = 0; 4053 mark_buffer_dirty(bh); 4054 if (ext4_should_order_data(inode)) 4055 err = ext4_jbd2_inode_add_write(handle, inode); 4056 } 4057 4058 unlock: 4059 unlock_page(page); 4060 put_page(page); 4061 return err; 4062 } 4063 4064 /* 4065 * ext4_block_zero_page_range() zeros out a mapping of length 'length' 4066 * starting from file offset 'from'. The range to be zero'd must 4067 * be contained with in one block. If the specified range exceeds 4068 * the end of the block it will be shortened to end of the block 4069 * that cooresponds to 'from' 4070 */ 4071 static int ext4_block_zero_page_range(handle_t *handle, 4072 struct address_space *mapping, loff_t from, loff_t length) 4073 { 4074 struct inode *inode = mapping->host; 4075 unsigned offset = from & (PAGE_SIZE-1); 4076 unsigned blocksize = inode->i_sb->s_blocksize; 4077 unsigned max = blocksize - (offset & (blocksize - 1)); 4078 4079 /* 4080 * correct length if it does not fall between 4081 * 'from' and the end of the block 4082 */ 4083 if (length > max || length < 0) 4084 length = max; 4085 4086 if (IS_DAX(inode)) { 4087 return iomap_zero_range(inode, from, length, NULL, 4088 &ext4_iomap_ops); 4089 } 4090 return __ext4_block_zero_page_range(handle, mapping, from, length); 4091 } 4092 4093 /* 4094 * ext4_block_truncate_page() zeroes out a mapping from file offset `from' 4095 * up to the end of the block which corresponds to `from'. 4096 * This required during truncate. We need to physically zero the tail end 4097 * of that block so it doesn't yield old data if the file is later grown. 4098 */ 4099 static int ext4_block_truncate_page(handle_t *handle, 4100 struct address_space *mapping, loff_t from) 4101 { 4102 unsigned offset = from & (PAGE_SIZE-1); 4103 unsigned length; 4104 unsigned blocksize; 4105 struct inode *inode = mapping->host; 4106 4107 /* If we are processing an encrypted inode during orphan list handling */ 4108 if (ext4_encrypted_inode(inode) && !fscrypt_has_encryption_key(inode)) 4109 return 0; 4110 4111 blocksize = inode->i_sb->s_blocksize; 4112 length = blocksize - (offset & (blocksize - 1)); 4113 4114 return ext4_block_zero_page_range(handle, mapping, from, length); 4115 } 4116 4117 int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode, 4118 loff_t lstart, loff_t length) 4119 { 4120 struct super_block *sb = inode->i_sb; 4121 struct address_space *mapping = inode->i_mapping; 4122 unsigned partial_start, partial_end; 4123 ext4_fsblk_t start, end; 4124 loff_t byte_end = (lstart + length - 1); 4125 int err = 0; 4126 4127 partial_start = lstart & (sb->s_blocksize - 1); 4128 partial_end = byte_end & (sb->s_blocksize - 1); 4129 4130 start = lstart >> sb->s_blocksize_bits; 4131 end = byte_end >> sb->s_blocksize_bits; 4132 4133 /* Handle partial zero within the single block */ 4134 if (start == end && 4135 (partial_start || (partial_end != sb->s_blocksize - 1))) { 4136 err = ext4_block_zero_page_range(handle, mapping, 4137 lstart, length); 4138 return err; 4139 } 4140 /* Handle partial zero out on the start of the range */ 4141 if (partial_start) { 4142 err = ext4_block_zero_page_range(handle, mapping, 4143 lstart, sb->s_blocksize); 4144 if (err) 4145 return err; 4146 } 4147 /* Handle partial zero out on the end of the range */ 4148 if (partial_end != sb->s_blocksize - 1) 4149 err = ext4_block_zero_page_range(handle, mapping, 4150 byte_end - partial_end, 4151 partial_end + 1); 4152 return err; 4153 } 4154 4155 int ext4_can_truncate(struct inode *inode) 4156 { 4157 if (S_ISREG(inode->i_mode)) 4158 return 1; 4159 if (S_ISDIR(inode->i_mode)) 4160 return 1; 4161 if (S_ISLNK(inode->i_mode)) 4162 return !ext4_inode_is_fast_symlink(inode); 4163 return 0; 4164 } 4165 4166 /* 4167 * We have to make sure i_disksize gets properly updated before we truncate 4168 * page cache due to hole punching or zero range. Otherwise i_disksize update 4169 * can get lost as it may have been postponed to submission of writeback but 4170 * that will never happen after we truncate page cache. 4171 */ 4172 int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, 4173 loff_t len) 4174 { 4175 handle_t *handle; 4176 loff_t size = i_size_read(inode); 4177 4178 WARN_ON(!inode_is_locked(inode)); 4179 if (offset > size || offset + len < size) 4180 return 0; 4181 4182 if (EXT4_I(inode)->i_disksize >= size) 4183 return 0; 4184 4185 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1); 4186 if (IS_ERR(handle)) 4187 return PTR_ERR(handle); 4188 ext4_update_i_disksize(inode, size); 4189 ext4_mark_inode_dirty(handle, inode); 4190 ext4_journal_stop(handle); 4191 4192 return 0; 4193 } 4194 4195 static void ext4_wait_dax_page(struct ext4_inode_info *ei, bool *did_unlock) 4196 { 4197 *did_unlock = true; 4198 up_write(&ei->i_mmap_sem); 4199 schedule(); 4200 down_write(&ei->i_mmap_sem); 4201 } 4202 4203 int ext4_break_layouts(struct inode *inode) 4204 { 4205 struct ext4_inode_info *ei = EXT4_I(inode); 4206 struct page *page; 4207 bool retry; 4208 int error; 4209 4210 if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem))) 4211 return -EINVAL; 4212 4213 do { 4214 retry = false; 4215 page = dax_layout_busy_page(inode->i_mapping); 4216 if (!page) 4217 return 0; 4218 4219 error = ___wait_var_event(&page->_refcount, 4220 atomic_read(&page->_refcount) == 1, 4221 TASK_INTERRUPTIBLE, 0, 0, 4222 ext4_wait_dax_page(ei, &retry)); 4223 } while (error == 0 && retry); 4224 4225 return error; 4226 } 4227 4228 /* 4229 * ext4_punch_hole: punches a hole in a file by releasing the blocks 4230 * associated with the given offset and length 4231 * 4232 * @inode: File inode 4233 * @offset: The offset where the hole will begin 4234 * @len: The length of the hole 4235 * 4236 * Returns: 0 on success or negative on failure 4237 */ 4238 4239 int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) 4240 { 4241 struct super_block *sb = inode->i_sb; 4242 ext4_lblk_t first_block, stop_block; 4243 struct address_space *mapping = inode->i_mapping; 4244 loff_t first_block_offset, last_block_offset; 4245 handle_t *handle; 4246 unsigned int credits; 4247 int ret = 0; 4248 4249 if (!S_ISREG(inode->i_mode)) 4250 return -EOPNOTSUPP; 4251 4252 trace_ext4_punch_hole(inode, offset, length, 0); 4253 4254 /* 4255 * Write out all dirty pages to avoid race conditions 4256 * Then release them. 4257 */ 4258 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 4259 ret = filemap_write_and_wait_range(mapping, offset, 4260 offset + length - 1); 4261 if (ret) 4262 return ret; 4263 } 4264 4265 inode_lock(inode); 4266 4267 /* No need to punch hole beyond i_size */ 4268 if (offset >= inode->i_size) 4269 goto out_mutex; 4270 4271 /* 4272 * If the hole extends beyond i_size, set the hole 4273 * to end after the page that contains i_size 4274 */ 4275 if (offset + length > inode->i_size) { 4276 length = inode->i_size + 4277 PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) - 4278 offset; 4279 } 4280 4281 if (offset & (sb->s_blocksize - 1) || 4282 (offset + length) & (sb->s_blocksize - 1)) { 4283 /* 4284 * Attach jinode to inode for jbd2 if we do any zeroing of 4285 * partial block 4286 */ 4287 ret = ext4_inode_attach_jinode(inode); 4288 if (ret < 0) 4289 goto out_mutex; 4290 4291 } 4292 4293 /* Wait all existing dio workers, newcomers will block on i_mutex */ 4294 inode_dio_wait(inode); 4295 4296 /* 4297 * Prevent page faults from reinstantiating pages we have released from 4298 * page cache. 4299 */ 4300 down_write(&EXT4_I(inode)->i_mmap_sem); 4301 4302 ret = ext4_break_layouts(inode); 4303 if (ret) 4304 goto out_dio; 4305 4306 first_block_offset = round_up(offset, sb->s_blocksize); 4307 last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; 4308 4309 /* Now release the pages and zero block aligned part of pages*/ 4310 if (last_block_offset > first_block_offset) { 4311 ret = ext4_update_disksize_before_punch(inode, offset, length); 4312 if (ret) 4313 goto out_dio; 4314 truncate_pagecache_range(inode, first_block_offset, 4315 last_block_offset); 4316 } 4317 4318 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4319 credits = ext4_writepage_trans_blocks(inode); 4320 else 4321 credits = ext4_blocks_for_truncate(inode); 4322 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 4323 if (IS_ERR(handle)) { 4324 ret = PTR_ERR(handle); 4325 ext4_std_error(sb, ret); 4326 goto out_dio; 4327 } 4328 4329 ret = ext4_zero_partial_blocks(handle, inode, offset, 4330 length); 4331 if (ret) 4332 goto out_stop; 4333 4334 first_block = (offset + sb->s_blocksize - 1) >> 4335 EXT4_BLOCK_SIZE_BITS(sb); 4336 stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); 4337 4338 /* If there are blocks to remove, do it */ 4339 if (stop_block > first_block) { 4340 4341 down_write(&EXT4_I(inode)->i_data_sem); 4342 ext4_discard_preallocations(inode); 4343 4344 ret = ext4_es_remove_extent(inode, first_block, 4345 stop_block - first_block); 4346 if (ret) { 4347 up_write(&EXT4_I(inode)->i_data_sem); 4348 goto out_stop; 4349 } 4350 4351 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4352 ret = ext4_ext_remove_space(inode, first_block, 4353 stop_block - 1); 4354 else 4355 ret = ext4_ind_remove_space(handle, inode, first_block, 4356 stop_block); 4357 4358 up_write(&EXT4_I(inode)->i_data_sem); 4359 } 4360 if (IS_SYNC(inode)) 4361 ext4_handle_sync(handle); 4362 4363 inode->i_mtime = inode->i_ctime = current_time(inode); 4364 ext4_mark_inode_dirty(handle, inode); 4365 if (ret >= 0) 4366 ext4_update_inode_fsync_trans(handle, inode, 1); 4367 out_stop: 4368 ext4_journal_stop(handle); 4369 out_dio: 4370 up_write(&EXT4_I(inode)->i_mmap_sem); 4371 out_mutex: 4372 inode_unlock(inode); 4373 return ret; 4374 } 4375 4376 int ext4_inode_attach_jinode(struct inode *inode) 4377 { 4378 struct ext4_inode_info *ei = EXT4_I(inode); 4379 struct jbd2_inode *jinode; 4380 4381 if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal) 4382 return 0; 4383 4384 jinode = jbd2_alloc_inode(GFP_KERNEL); 4385 spin_lock(&inode->i_lock); 4386 if (!ei->jinode) { 4387 if (!jinode) { 4388 spin_unlock(&inode->i_lock); 4389 return -ENOMEM; 4390 } 4391 ei->jinode = jinode; 4392 jbd2_journal_init_jbd_inode(ei->jinode, inode); 4393 jinode = NULL; 4394 } 4395 spin_unlock(&inode->i_lock); 4396 if (unlikely(jinode != NULL)) 4397 jbd2_free_inode(jinode); 4398 return 0; 4399 } 4400 4401 /* 4402 * ext4_truncate() 4403 * 4404 * We block out ext4_get_block() block instantiations across the entire 4405 * transaction, and VFS/VM ensures that ext4_truncate() cannot run 4406 * simultaneously on behalf of the same inode. 4407 * 4408 * As we work through the truncate and commit bits of it to the journal there 4409 * is one core, guiding principle: the file's tree must always be consistent on 4410 * disk. We must be able to restart the truncate after a crash. 4411 * 4412 * The file's tree may be transiently inconsistent in memory (although it 4413 * probably isn't), but whenever we close off and commit a journal transaction, 4414 * the contents of (the filesystem + the journal) must be consistent and 4415 * restartable. It's pretty simple, really: bottom up, right to left (although 4416 * left-to-right works OK too). 4417 * 4418 * Note that at recovery time, journal replay occurs *before* the restart of 4419 * truncate against the orphan inode list. 4420 * 4421 * The committed inode has the new, desired i_size (which is the same as 4422 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 4423 * that this inode's truncate did not complete and it will again call 4424 * ext4_truncate() to have another go. So there will be instantiated blocks 4425 * to the right of the truncation point in a crashed ext4 filesystem. But 4426 * that's fine - as long as they are linked from the inode, the post-crash 4427 * ext4_truncate() run will find them and release them. 4428 */ 4429 int ext4_truncate(struct inode *inode) 4430 { 4431 struct ext4_inode_info *ei = EXT4_I(inode); 4432 unsigned int credits; 4433 int err = 0; 4434 handle_t *handle; 4435 struct address_space *mapping = inode->i_mapping; 4436 4437 /* 4438 * There is a possibility that we're either freeing the inode 4439 * or it's a completely new inode. In those cases we might not 4440 * have i_mutex locked because it's not necessary. 4441 */ 4442 if (!(inode->i_state & (I_NEW|I_FREEING))) 4443 WARN_ON(!inode_is_locked(inode)); 4444 trace_ext4_truncate_enter(inode); 4445 4446 if (!ext4_can_truncate(inode)) 4447 return 0; 4448 4449 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 4450 4451 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 4452 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 4453 4454 if (ext4_has_inline_data(inode)) { 4455 int has_inline = 1; 4456 4457 err = ext4_inline_data_truncate(inode, &has_inline); 4458 if (err) 4459 return err; 4460 if (has_inline) 4461 return 0; 4462 } 4463 4464 /* If we zero-out tail of the page, we have to create jinode for jbd2 */ 4465 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { 4466 if (ext4_inode_attach_jinode(inode) < 0) 4467 return 0; 4468 } 4469 4470 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4471 credits = ext4_writepage_trans_blocks(inode); 4472 else 4473 credits = ext4_blocks_for_truncate(inode); 4474 4475 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 4476 if (IS_ERR(handle)) 4477 return PTR_ERR(handle); 4478 4479 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) 4480 ext4_block_truncate_page(handle, mapping, inode->i_size); 4481 4482 /* 4483 * We add the inode to the orphan list, so that if this 4484 * truncate spans multiple transactions, and we crash, we will 4485 * resume the truncate when the filesystem recovers. It also 4486 * marks the inode dirty, to catch the new size. 4487 * 4488 * Implication: the file must always be in a sane, consistent 4489 * truncatable state while each transaction commits. 4490 */ 4491 err = ext4_orphan_add(handle, inode); 4492 if (err) 4493 goto out_stop; 4494 4495 down_write(&EXT4_I(inode)->i_data_sem); 4496 4497 ext4_discard_preallocations(inode); 4498 4499 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4500 err = ext4_ext_truncate(handle, inode); 4501 else 4502 ext4_ind_truncate(handle, inode); 4503 4504 up_write(&ei->i_data_sem); 4505 if (err) 4506 goto out_stop; 4507 4508 if (IS_SYNC(inode)) 4509 ext4_handle_sync(handle); 4510 4511 out_stop: 4512 /* 4513 * If this was a simple ftruncate() and the file will remain alive, 4514 * then we need to clear up the orphan record which we created above. 4515 * However, if this was a real unlink then we were called by 4516 * ext4_evict_inode(), and we allow that function to clean up the 4517 * orphan info for us. 4518 */ 4519 if (inode->i_nlink) 4520 ext4_orphan_del(handle, inode); 4521 4522 inode->i_mtime = inode->i_ctime = current_time(inode); 4523 ext4_mark_inode_dirty(handle, inode); 4524 ext4_journal_stop(handle); 4525 4526 trace_ext4_truncate_exit(inode); 4527 return err; 4528 } 4529 4530 /* 4531 * ext4_get_inode_loc returns with an extra refcount against the inode's 4532 * underlying buffer_head on success. If 'in_mem' is true, we have all 4533 * data in memory that is needed to recreate the on-disk version of this 4534 * inode. 4535 */ 4536 static int __ext4_get_inode_loc(struct inode *inode, 4537 struct ext4_iloc *iloc, int in_mem) 4538 { 4539 struct ext4_group_desc *gdp; 4540 struct buffer_head *bh; 4541 struct super_block *sb = inode->i_sb; 4542 ext4_fsblk_t block; 4543 int inodes_per_block, inode_offset; 4544 4545 iloc->bh = NULL; 4546 if (inode->i_ino < EXT4_ROOT_INO || 4547 inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)) 4548 return -EFSCORRUPTED; 4549 4550 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 4551 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 4552 if (!gdp) 4553 return -EIO; 4554 4555 /* 4556 * Figure out the offset within the block group inode table 4557 */ 4558 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 4559 inode_offset = ((inode->i_ino - 1) % 4560 EXT4_INODES_PER_GROUP(sb)); 4561 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 4562 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 4563 4564 bh = sb_getblk(sb, block); 4565 if (unlikely(!bh)) 4566 return -ENOMEM; 4567 if (!buffer_uptodate(bh)) { 4568 lock_buffer(bh); 4569 4570 /* 4571 * If the buffer has the write error flag, we have failed 4572 * to write out another inode in the same block. In this 4573 * case, we don't have to read the block because we may 4574 * read the old inode data successfully. 4575 */ 4576 if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 4577 set_buffer_uptodate(bh); 4578 4579 if (buffer_uptodate(bh)) { 4580 /* someone brought it uptodate while we waited */ 4581 unlock_buffer(bh); 4582 goto has_buffer; 4583 } 4584 4585 /* 4586 * If we have all information of the inode in memory and this 4587 * is the only valid inode in the block, we need not read the 4588 * block. 4589 */ 4590 if (in_mem) { 4591 struct buffer_head *bitmap_bh; 4592 int i, start; 4593 4594 start = inode_offset & ~(inodes_per_block - 1); 4595 4596 /* Is the inode bitmap in cache? */ 4597 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 4598 if (unlikely(!bitmap_bh)) 4599 goto make_io; 4600 4601 /* 4602 * If the inode bitmap isn't in cache then the 4603 * optimisation may end up performing two reads instead 4604 * of one, so skip it. 4605 */ 4606 if (!buffer_uptodate(bitmap_bh)) { 4607 brelse(bitmap_bh); 4608 goto make_io; 4609 } 4610 for (i = start; i < start + inodes_per_block; i++) { 4611 if (i == inode_offset) 4612 continue; 4613 if (ext4_test_bit(i, bitmap_bh->b_data)) 4614 break; 4615 } 4616 brelse(bitmap_bh); 4617 if (i == start + inodes_per_block) { 4618 /* all other inodes are free, so skip I/O */ 4619 memset(bh->b_data, 0, bh->b_size); 4620 set_buffer_uptodate(bh); 4621 unlock_buffer(bh); 4622 goto has_buffer; 4623 } 4624 } 4625 4626 make_io: 4627 /* 4628 * If we need to do any I/O, try to pre-readahead extra 4629 * blocks from the inode table. 4630 */ 4631 if (EXT4_SB(sb)->s_inode_readahead_blks) { 4632 ext4_fsblk_t b, end, table; 4633 unsigned num; 4634 __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks; 4635 4636 table = ext4_inode_table(sb, gdp); 4637 /* s_inode_readahead_blks is always a power of 2 */ 4638 b = block & ~((ext4_fsblk_t) ra_blks - 1); 4639 if (table > b) 4640 b = table; 4641 end = b + ra_blks; 4642 num = EXT4_INODES_PER_GROUP(sb); 4643 if (ext4_has_group_desc_csum(sb)) 4644 num -= ext4_itable_unused_count(sb, gdp); 4645 table += num / inodes_per_block; 4646 if (end > table) 4647 end = table; 4648 while (b <= end) 4649 sb_breadahead(sb, b++); 4650 } 4651 4652 /* 4653 * There are other valid inodes in the buffer, this inode 4654 * has in-inode xattrs, or we don't have this inode in memory. 4655 * Read the block from disk. 4656 */ 4657 trace_ext4_load_inode(inode); 4658 get_bh(bh); 4659 bh->b_end_io = end_buffer_read_sync; 4660 submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh); 4661 wait_on_buffer(bh); 4662 if (!buffer_uptodate(bh)) { 4663 EXT4_ERROR_INODE_BLOCK(inode, block, 4664 "unable to read itable block"); 4665 brelse(bh); 4666 return -EIO; 4667 } 4668 } 4669 has_buffer: 4670 iloc->bh = bh; 4671 return 0; 4672 } 4673 4674 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 4675 { 4676 /* We have all inode data except xattrs in memory here. */ 4677 return __ext4_get_inode_loc(inode, iloc, 4678 !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); 4679 } 4680 4681 static bool ext4_should_use_dax(struct inode *inode) 4682 { 4683 if (!test_opt(inode->i_sb, DAX)) 4684 return false; 4685 if (!S_ISREG(inode->i_mode)) 4686 return false; 4687 if (ext4_should_journal_data(inode)) 4688 return false; 4689 if (ext4_has_inline_data(inode)) 4690 return false; 4691 if (ext4_encrypted_inode(inode)) 4692 return false; 4693 return true; 4694 } 4695 4696 void ext4_set_inode_flags(struct inode *inode) 4697 { 4698 unsigned int flags = EXT4_I(inode)->i_flags; 4699 unsigned int new_fl = 0; 4700 4701 if (flags & EXT4_SYNC_FL) 4702 new_fl |= S_SYNC; 4703 if (flags & EXT4_APPEND_FL) 4704 new_fl |= S_APPEND; 4705 if (flags & EXT4_IMMUTABLE_FL) 4706 new_fl |= S_IMMUTABLE; 4707 if (flags & EXT4_NOATIME_FL) 4708 new_fl |= S_NOATIME; 4709 if (flags & EXT4_DIRSYNC_FL) 4710 new_fl |= S_DIRSYNC; 4711 if (ext4_should_use_dax(inode)) 4712 new_fl |= S_DAX; 4713 if (flags & EXT4_ENCRYPT_FL) 4714 new_fl |= S_ENCRYPTED; 4715 inode_set_flags(inode, new_fl, 4716 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX| 4717 S_ENCRYPTED); 4718 } 4719 4720 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 4721 struct ext4_inode_info *ei) 4722 { 4723 blkcnt_t i_blocks ; 4724 struct inode *inode = &(ei->vfs_inode); 4725 struct super_block *sb = inode->i_sb; 4726 4727 if (ext4_has_feature_huge_file(sb)) { 4728 /* we are using combined 48 bit field */ 4729 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 4730 le32_to_cpu(raw_inode->i_blocks_lo); 4731 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { 4732 /* i_blocks represent file system block size */ 4733 return i_blocks << (inode->i_blkbits - 9); 4734 } else { 4735 return i_blocks; 4736 } 4737 } else { 4738 return le32_to_cpu(raw_inode->i_blocks_lo); 4739 } 4740 } 4741 4742 static inline int ext4_iget_extra_inode(struct inode *inode, 4743 struct ext4_inode *raw_inode, 4744 struct ext4_inode_info *ei) 4745 { 4746 __le32 *magic = (void *)raw_inode + 4747 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; 4748 4749 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <= 4750 EXT4_INODE_SIZE(inode->i_sb) && 4751 *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { 4752 ext4_set_inode_state(inode, EXT4_STATE_XATTR); 4753 return ext4_find_inline_data_nolock(inode); 4754 } else 4755 EXT4_I(inode)->i_inline_off = 0; 4756 return 0; 4757 } 4758 4759 int ext4_get_projid(struct inode *inode, kprojid_t *projid) 4760 { 4761 if (!ext4_has_feature_project(inode->i_sb)) 4762 return -EOPNOTSUPP; 4763 *projid = EXT4_I(inode)->i_projid; 4764 return 0; 4765 } 4766 4767 /* 4768 * ext4 has self-managed i_version for ea inodes, it stores the lower 32bit of 4769 * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag 4770 * set. 4771 */ 4772 static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val) 4773 { 4774 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) 4775 inode_set_iversion_raw(inode, val); 4776 else 4777 inode_set_iversion_queried(inode, val); 4778 } 4779 static inline u64 ext4_inode_peek_iversion(const struct inode *inode) 4780 { 4781 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) 4782 return inode_peek_iversion_raw(inode); 4783 else 4784 return inode_peek_iversion(inode); 4785 } 4786 4787 struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 4788 { 4789 struct ext4_iloc iloc; 4790 struct ext4_inode *raw_inode; 4791 struct ext4_inode_info *ei; 4792 struct inode *inode; 4793 journal_t *journal = EXT4_SB(sb)->s_journal; 4794 long ret; 4795 loff_t size; 4796 int block; 4797 uid_t i_uid; 4798 gid_t i_gid; 4799 projid_t i_projid; 4800 4801 inode = iget_locked(sb, ino); 4802 if (!inode) 4803 return ERR_PTR(-ENOMEM); 4804 if (!(inode->i_state & I_NEW)) 4805 return inode; 4806 4807 ei = EXT4_I(inode); 4808 iloc.bh = NULL; 4809 4810 ret = __ext4_get_inode_loc(inode, &iloc, 0); 4811 if (ret < 0) 4812 goto bad_inode; 4813 raw_inode = ext4_raw_inode(&iloc); 4814 4815 if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) { 4816 EXT4_ERROR_INODE(inode, "root inode unallocated"); 4817 ret = -EFSCORRUPTED; 4818 goto bad_inode; 4819 } 4820 4821 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4822 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 4823 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 4824 EXT4_INODE_SIZE(inode->i_sb) || 4825 (ei->i_extra_isize & 3)) { 4826 EXT4_ERROR_INODE(inode, 4827 "bad extra_isize %u (inode size %u)", 4828 ei->i_extra_isize, 4829 EXT4_INODE_SIZE(inode->i_sb)); 4830 ret = -EFSCORRUPTED; 4831 goto bad_inode; 4832 } 4833 } else 4834 ei->i_extra_isize = 0; 4835 4836 /* Precompute checksum seed for inode metadata */ 4837 if (ext4_has_metadata_csum(sb)) { 4838 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4839 __u32 csum; 4840 __le32 inum = cpu_to_le32(inode->i_ino); 4841 __le32 gen = raw_inode->i_generation; 4842 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, 4843 sizeof(inum)); 4844 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, 4845 sizeof(gen)); 4846 } 4847 4848 if (!ext4_inode_csum_verify(inode, raw_inode, ei)) { 4849 EXT4_ERROR_INODE(inode, "checksum invalid"); 4850 ret = -EFSBADCRC; 4851 goto bad_inode; 4852 } 4853 4854 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 4855 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 4856 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 4857 if (ext4_has_feature_project(sb) && 4858 EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE && 4859 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) 4860 i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid); 4861 else 4862 i_projid = EXT4_DEF_PROJID; 4863 4864 if (!(test_opt(inode->i_sb, NO_UID32))) { 4865 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 4866 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 4867 } 4868 i_uid_write(inode, i_uid); 4869 i_gid_write(inode, i_gid); 4870 ei->i_projid = make_kprojid(&init_user_ns, i_projid); 4871 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 4872 4873 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 4874 ei->i_inline_off = 0; 4875 ei->i_dir_start_lookup = 0; 4876 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 4877 /* We now have enough fields to check if the inode was active or not. 4878 * This is needed because nfsd might try to access dead inodes 4879 * the test is that same one that e2fsck uses 4880 * NeilBrown 1999oct15 4881 */ 4882 if (inode->i_nlink == 0) { 4883 if ((inode->i_mode == 0 || 4884 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) && 4885 ino != EXT4_BOOT_LOADER_INO) { 4886 /* this inode is deleted */ 4887 ret = -ESTALE; 4888 goto bad_inode; 4889 } 4890 /* The only unlinked inodes we let through here have 4891 * valid i_mode and are being read by the orphan 4892 * recovery code: that's fine, we're about to complete 4893 * the process of deleting those. 4894 * OR it is the EXT4_BOOT_LOADER_INO which is 4895 * not initialized on a new filesystem. */ 4896 } 4897 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 4898 inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 4899 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 4900 if (ext4_has_feature_64bit(sb)) 4901 ei->i_file_acl |= 4902 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 4903 inode->i_size = ext4_isize(sb, raw_inode); 4904 if ((size = i_size_read(inode)) < 0) { 4905 EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size); 4906 ret = -EFSCORRUPTED; 4907 goto bad_inode; 4908 } 4909 ei->i_disksize = inode->i_size; 4910 #ifdef CONFIG_QUOTA 4911 ei->i_reserved_quota = 0; 4912 #endif 4913 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 4914 ei->i_block_group = iloc.block_group; 4915 ei->i_last_alloc_group = ~0; 4916 /* 4917 * NOTE! The in-memory inode i_data array is in little-endian order 4918 * even on big-endian machines: we do NOT byteswap the block numbers! 4919 */ 4920 for (block = 0; block < EXT4_N_BLOCKS; block++) 4921 ei->i_data[block] = raw_inode->i_block[block]; 4922 INIT_LIST_HEAD(&ei->i_orphan); 4923 4924 /* 4925 * Set transaction id's of transactions that have to be committed 4926 * to finish f[data]sync. We set them to currently running transaction 4927 * as we cannot be sure that the inode or some of its metadata isn't 4928 * part of the transaction - the inode could have been reclaimed and 4929 * now it is reread from disk. 4930 */ 4931 if (journal) { 4932 transaction_t *transaction; 4933 tid_t tid; 4934 4935 read_lock(&journal->j_state_lock); 4936 if (journal->j_running_transaction) 4937 transaction = journal->j_running_transaction; 4938 else 4939 transaction = journal->j_committing_transaction; 4940 if (transaction) 4941 tid = transaction->t_tid; 4942 else 4943 tid = journal->j_commit_sequence; 4944 read_unlock(&journal->j_state_lock); 4945 ei->i_sync_tid = tid; 4946 ei->i_datasync_tid = tid; 4947 } 4948 4949 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4950 if (ei->i_extra_isize == 0) { 4951 /* The extra space is currently unused. Use it. */ 4952 BUILD_BUG_ON(sizeof(struct ext4_inode) & 3); 4953 ei->i_extra_isize = sizeof(struct ext4_inode) - 4954 EXT4_GOOD_OLD_INODE_SIZE; 4955 } else { 4956 ret = ext4_iget_extra_inode(inode, raw_inode, ei); 4957 if (ret) 4958 goto bad_inode; 4959 } 4960 } 4961 4962 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 4963 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 4964 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 4965 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 4966 4967 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { 4968 u64 ivers = le32_to_cpu(raw_inode->i_disk_version); 4969 4970 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4971 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 4972 ivers |= 4973 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 4974 } 4975 ext4_inode_set_iversion_queried(inode, ivers); 4976 } 4977 4978 ret = 0; 4979 if (ei->i_file_acl && 4980 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { 4981 EXT4_ERROR_INODE(inode, "bad extended attribute block %llu", 4982 ei->i_file_acl); 4983 ret = -EFSCORRUPTED; 4984 goto bad_inode; 4985 } else if (!ext4_has_inline_data(inode)) { 4986 /* validate the block references in the inode */ 4987 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4988 (S_ISLNK(inode->i_mode) && 4989 !ext4_inode_is_fast_symlink(inode))) { 4990 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4991 ret = ext4_ext_check_inode(inode); 4992 else 4993 ret = ext4_ind_check_inode(inode); 4994 } 4995 } 4996 if (ret) 4997 goto bad_inode; 4998 4999 if (S_ISREG(inode->i_mode)) { 5000 inode->i_op = &ext4_file_inode_operations; 5001 inode->i_fop = &ext4_file_operations; 5002 ext4_set_aops(inode); 5003 } else if (S_ISDIR(inode->i_mode)) { 5004 inode->i_op = &ext4_dir_inode_operations; 5005 inode->i_fop = &ext4_dir_operations; 5006 } else if (S_ISLNK(inode->i_mode)) { 5007 /* VFS does not allow setting these so must be corruption */ 5008 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) { 5009 EXT4_ERROR_INODE(inode, 5010 "immutable or append flags not allowed on symlinks"); 5011 ret = -EFSCORRUPTED; 5012 goto bad_inode; 5013 } 5014 if (ext4_encrypted_inode(inode)) { 5015 inode->i_op = &ext4_encrypted_symlink_inode_operations; 5016 ext4_set_aops(inode); 5017 } else if (ext4_inode_is_fast_symlink(inode)) { 5018 inode->i_link = (char *)ei->i_data; 5019 inode->i_op = &ext4_fast_symlink_inode_operations; 5020 nd_terminate_link(ei->i_data, inode->i_size, 5021 sizeof(ei->i_data) - 1); 5022 } else { 5023 inode->i_op = &ext4_symlink_inode_operations; 5024 ext4_set_aops(inode); 5025 } 5026 inode_nohighmem(inode); 5027 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 5028 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 5029 inode->i_op = &ext4_special_inode_operations; 5030 if (raw_inode->i_block[0]) 5031 init_special_inode(inode, inode->i_mode, 5032 old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 5033 else 5034 init_special_inode(inode, inode->i_mode, 5035 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 5036 } else if (ino == EXT4_BOOT_LOADER_INO) { 5037 make_bad_inode(inode); 5038 } else { 5039 ret = -EFSCORRUPTED; 5040 EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode); 5041 goto bad_inode; 5042 } 5043 brelse(iloc.bh); 5044 ext4_set_inode_flags(inode); 5045 5046 unlock_new_inode(inode); 5047 return inode; 5048 5049 bad_inode: 5050 brelse(iloc.bh); 5051 iget_failed(inode); 5052 return ERR_PTR(ret); 5053 } 5054 5055 struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino) 5056 { 5057 if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) 5058 return ERR_PTR(-EFSCORRUPTED); 5059 return ext4_iget(sb, ino); 5060 } 5061 5062 static int ext4_inode_blocks_set(handle_t *handle, 5063 struct ext4_inode *raw_inode, 5064 struct ext4_inode_info *ei) 5065 { 5066 struct inode *inode = &(ei->vfs_inode); 5067 u64 i_blocks = inode->i_blocks; 5068 struct super_block *sb = inode->i_sb; 5069 5070 if (i_blocks <= ~0U) { 5071 /* 5072 * i_blocks can be represented in a 32 bit variable 5073 * as multiple of 512 bytes 5074 */ 5075 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 5076 raw_inode->i_blocks_high = 0; 5077 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 5078 return 0; 5079 } 5080 if (!ext4_has_feature_huge_file(sb)) 5081 return -EFBIG; 5082 5083 if (i_blocks <= 0xffffffffffffULL) { 5084 /* 5085 * i_blocks can be represented in a 48 bit variable 5086 * as multiple of 512 bytes 5087 */ 5088 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 5089 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 5090 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 5091 } else { 5092 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); 5093 /* i_block is stored in file system block size */ 5094 i_blocks = i_blocks >> (inode->i_blkbits - 9); 5095 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 5096 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 5097 } 5098 return 0; 5099 } 5100 5101 struct other_inode { 5102 unsigned long orig_ino; 5103 struct ext4_inode *raw_inode; 5104 }; 5105 5106 static int other_inode_match(struct inode * inode, unsigned long ino, 5107 void *data) 5108 { 5109 struct other_inode *oi = (struct other_inode *) data; 5110 5111 if ((inode->i_ino != ino) || 5112 (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | 5113 I_DIRTY_INODE)) || 5114 ((inode->i_state & I_DIRTY_TIME) == 0)) 5115 return 0; 5116 spin_lock(&inode->i_lock); 5117 if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | 5118 I_DIRTY_INODE)) == 0) && 5119 (inode->i_state & I_DIRTY_TIME)) { 5120 struct ext4_inode_info *ei = EXT4_I(inode); 5121 5122 inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED); 5123 spin_unlock(&inode->i_lock); 5124 5125 spin_lock(&ei->i_raw_lock); 5126 EXT4_INODE_SET_XTIME(i_ctime, inode, oi->raw_inode); 5127 EXT4_INODE_SET_XTIME(i_mtime, inode, oi->raw_inode); 5128 EXT4_INODE_SET_XTIME(i_atime, inode, oi->raw_inode); 5129 ext4_inode_csum_set(inode, oi->raw_inode, ei); 5130 spin_unlock(&ei->i_raw_lock); 5131 trace_ext4_other_inode_update_time(inode, oi->orig_ino); 5132 return -1; 5133 } 5134 spin_unlock(&inode->i_lock); 5135 return -1; 5136 } 5137 5138 /* 5139 * Opportunistically update the other time fields for other inodes in 5140 * the same inode table block. 5141 */ 5142 static void ext4_update_other_inodes_time(struct super_block *sb, 5143 unsigned long orig_ino, char *buf) 5144 { 5145 struct other_inode oi; 5146 unsigned long ino; 5147 int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 5148 int inode_size = EXT4_INODE_SIZE(sb); 5149 5150 oi.orig_ino = orig_ino; 5151 /* 5152 * Calculate the first inode in the inode table block. Inode 5153 * numbers are one-based. That is, the first inode in a block 5154 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1). 5155 */ 5156 ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1; 5157 for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) { 5158 if (ino == orig_ino) 5159 continue; 5160 oi.raw_inode = (struct ext4_inode *) buf; 5161 (void) find_inode_nowait(sb, ino, other_inode_match, &oi); 5162 } 5163 } 5164 5165 /* 5166 * Post the struct inode info into an on-disk inode location in the 5167 * buffer-cache. This gobbles the caller's reference to the 5168 * buffer_head in the inode location struct. 5169 * 5170 * The caller must have write access to iloc->bh. 5171 */ 5172 static int ext4_do_update_inode(handle_t *handle, 5173 struct inode *inode, 5174 struct ext4_iloc *iloc) 5175 { 5176 struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 5177 struct ext4_inode_info *ei = EXT4_I(inode); 5178 struct buffer_head *bh = iloc->bh; 5179 struct super_block *sb = inode->i_sb; 5180 int err = 0, rc, block; 5181 int need_datasync = 0, set_large_file = 0; 5182 uid_t i_uid; 5183 gid_t i_gid; 5184 projid_t i_projid; 5185 5186 spin_lock(&ei->i_raw_lock); 5187 5188 /* For fields not tracked in the in-memory inode, 5189 * initialise them to zero for new inodes. */ 5190 if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) 5191 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 5192 5193 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 5194 i_uid = i_uid_read(inode); 5195 i_gid = i_gid_read(inode); 5196 i_projid = from_kprojid(&init_user_ns, ei->i_projid); 5197 if (!(test_opt(inode->i_sb, NO_UID32))) { 5198 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); 5199 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); 5200 /* 5201 * Fix up interoperability with old kernels. Otherwise, old inodes get 5202 * re-used with the upper 16 bits of the uid/gid intact 5203 */ 5204 if (ei->i_dtime && list_empty(&ei->i_orphan)) { 5205 raw_inode->i_uid_high = 0; 5206 raw_inode->i_gid_high = 0; 5207 } else { 5208 raw_inode->i_uid_high = 5209 cpu_to_le16(high_16_bits(i_uid)); 5210 raw_inode->i_gid_high = 5211 cpu_to_le16(high_16_bits(i_gid)); 5212 } 5213 } else { 5214 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); 5215 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid)); 5216 raw_inode->i_uid_high = 0; 5217 raw_inode->i_gid_high = 0; 5218 } 5219 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 5220 5221 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 5222 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 5223 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 5224 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 5225 5226 err = ext4_inode_blocks_set(handle, raw_inode, ei); 5227 if (err) { 5228 spin_unlock(&ei->i_raw_lock); 5229 goto out_brelse; 5230 } 5231 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 5232 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); 5233 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) 5234 raw_inode->i_file_acl_high = 5235 cpu_to_le16(ei->i_file_acl >> 32); 5236 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 5237 if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) { 5238 ext4_isize_set(raw_inode, ei->i_disksize); 5239 need_datasync = 1; 5240 } 5241 if (ei->i_disksize > 0x7fffffffULL) { 5242 if (!ext4_has_feature_large_file(sb) || 5243 EXT4_SB(sb)->s_es->s_rev_level == 5244 cpu_to_le32(EXT4_GOOD_OLD_REV)) 5245 set_large_file = 1; 5246 } 5247 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 5248 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 5249 if (old_valid_dev(inode->i_rdev)) { 5250 raw_inode->i_block[0] = 5251 cpu_to_le32(old_encode_dev(inode->i_rdev)); 5252 raw_inode->i_block[1] = 0; 5253 } else { 5254 raw_inode->i_block[0] = 0; 5255 raw_inode->i_block[1] = 5256 cpu_to_le32(new_encode_dev(inode->i_rdev)); 5257 raw_inode->i_block[2] = 0; 5258 } 5259 } else if (!ext4_has_inline_data(inode)) { 5260 for (block = 0; block < EXT4_N_BLOCKS; block++) 5261 raw_inode->i_block[block] = ei->i_data[block]; 5262 } 5263 5264 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { 5265 u64 ivers = ext4_inode_peek_iversion(inode); 5266 5267 raw_inode->i_disk_version = cpu_to_le32(ivers); 5268 if (ei->i_extra_isize) { 5269 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 5270 raw_inode->i_version_hi = 5271 cpu_to_le32(ivers >> 32); 5272 raw_inode->i_extra_isize = 5273 cpu_to_le16(ei->i_extra_isize); 5274 } 5275 } 5276 5277 BUG_ON(!ext4_has_feature_project(inode->i_sb) && 5278 i_projid != EXT4_DEF_PROJID); 5279 5280 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 5281 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) 5282 raw_inode->i_projid = cpu_to_le32(i_projid); 5283 5284 ext4_inode_csum_set(inode, raw_inode, ei); 5285 spin_unlock(&ei->i_raw_lock); 5286 if (inode->i_sb->s_flags & SB_LAZYTIME) 5287 ext4_update_other_inodes_time(inode->i_sb, inode->i_ino, 5288 bh->b_data); 5289 5290 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 5291 rc = ext4_handle_dirty_metadata(handle, NULL, bh); 5292 if (!err) 5293 err = rc; 5294 ext4_clear_inode_state(inode, EXT4_STATE_NEW); 5295 if (set_large_file) { 5296 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access"); 5297 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); 5298 if (err) 5299 goto out_brelse; 5300 ext4_update_dynamic_rev(sb); 5301 ext4_set_feature_large_file(sb); 5302 ext4_handle_sync(handle); 5303 err = ext4_handle_dirty_super(handle, sb); 5304 } 5305 ext4_update_inode_fsync_trans(handle, inode, need_datasync); 5306 out_brelse: 5307 brelse(bh); 5308 ext4_std_error(inode->i_sb, err); 5309 return err; 5310 } 5311 5312 /* 5313 * ext4_write_inode() 5314 * 5315 * We are called from a few places: 5316 * 5317 * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files. 5318 * Here, there will be no transaction running. We wait for any running 5319 * transaction to commit. 5320 * 5321 * - Within flush work (sys_sync(), kupdate and such). 5322 * We wait on commit, if told to. 5323 * 5324 * - Within iput_final() -> write_inode_now() 5325 * We wait on commit, if told to. 5326 * 5327 * In all cases it is actually safe for us to return without doing anything, 5328 * because the inode has been copied into a raw inode buffer in 5329 * ext4_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL 5330 * writeback. 5331 * 5332 * Note that we are absolutely dependent upon all inode dirtiers doing the 5333 * right thing: they *must* call mark_inode_dirty() after dirtying info in 5334 * which we are interested. 5335 * 5336 * It would be a bug for them to not do this. The code: 5337 * 5338 * mark_inode_dirty(inode) 5339 * stuff(); 5340 * inode->i_size = expr; 5341 * 5342 * is in error because write_inode() could occur while `stuff()' is running, 5343 * and the new i_size will be lost. Plus the inode will no longer be on the 5344 * superblock's dirty inode list. 5345 */ 5346 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) 5347 { 5348 int err; 5349 5350 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC)) 5351 return 0; 5352 5353 if (EXT4_SB(inode->i_sb)->s_journal) { 5354 if (ext4_journal_current_handle()) { 5355 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 5356 dump_stack(); 5357 return -EIO; 5358 } 5359 5360 /* 5361 * No need to force transaction in WB_SYNC_NONE mode. Also 5362 * ext4_sync_fs() will force the commit after everything is 5363 * written. 5364 */ 5365 if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync) 5366 return 0; 5367 5368 err = ext4_force_commit(inode->i_sb); 5369 } else { 5370 struct ext4_iloc iloc; 5371 5372 err = __ext4_get_inode_loc(inode, &iloc, 0); 5373 if (err) 5374 return err; 5375 /* 5376 * sync(2) will flush the whole buffer cache. No need to do 5377 * it here separately for each inode. 5378 */ 5379 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) 5380 sync_dirty_buffer(iloc.bh); 5381 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 5382 EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, 5383 "IO error syncing inode"); 5384 err = -EIO; 5385 } 5386 brelse(iloc.bh); 5387 } 5388 return err; 5389 } 5390 5391 /* 5392 * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate 5393 * buffers that are attached to a page stradding i_size and are undergoing 5394 * commit. In that case we have to wait for commit to finish and try again. 5395 */ 5396 static void ext4_wait_for_tail_page_commit(struct inode *inode) 5397 { 5398 struct page *page; 5399 unsigned offset; 5400 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 5401 tid_t commit_tid = 0; 5402 int ret; 5403 5404 offset = inode->i_size & (PAGE_SIZE - 1); 5405 /* 5406 * All buffers in the last page remain valid? Then there's nothing to 5407 * do. We do the check mainly to optimize the common PAGE_SIZE == 5408 * blocksize case 5409 */ 5410 if (offset > PAGE_SIZE - i_blocksize(inode)) 5411 return; 5412 while (1) { 5413 page = find_lock_page(inode->i_mapping, 5414 inode->i_size >> PAGE_SHIFT); 5415 if (!page) 5416 return; 5417 ret = __ext4_journalled_invalidatepage(page, offset, 5418 PAGE_SIZE - offset); 5419 unlock_page(page); 5420 put_page(page); 5421 if (ret != -EBUSY) 5422 return; 5423 commit_tid = 0; 5424 read_lock(&journal->j_state_lock); 5425 if (journal->j_committing_transaction) 5426 commit_tid = journal->j_committing_transaction->t_tid; 5427 read_unlock(&journal->j_state_lock); 5428 if (commit_tid) 5429 jbd2_log_wait_commit(journal, commit_tid); 5430 } 5431 } 5432 5433 /* 5434 * ext4_setattr() 5435 * 5436 * Called from notify_change. 5437 * 5438 * We want to trap VFS attempts to truncate the file as soon as 5439 * possible. In particular, we want to make sure that when the VFS 5440 * shrinks i_size, we put the inode on the orphan list and modify 5441 * i_disksize immediately, so that during the subsequent flushing of 5442 * dirty pages and freeing of disk blocks, we can guarantee that any 5443 * commit will leave the blocks being flushed in an unused state on 5444 * disk. (On recovery, the inode will get truncated and the blocks will 5445 * be freed, so we have a strong guarantee that no future commit will 5446 * leave these blocks visible to the user.) 5447 * 5448 * Another thing we have to assure is that if we are in ordered mode 5449 * and inode is still attached to the committing transaction, we must 5450 * we start writeout of all the dirty pages which are being truncated. 5451 * This way we are sure that all the data written in the previous 5452 * transaction are already on disk (truncate waits for pages under 5453 * writeback). 5454 * 5455 * Called with inode->i_mutex down. 5456 */ 5457 int ext4_setattr(struct dentry *dentry, struct iattr *attr) 5458 { 5459 struct inode *inode = d_inode(dentry); 5460 int error, rc = 0; 5461 int orphan = 0; 5462 const unsigned int ia_valid = attr->ia_valid; 5463 5464 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 5465 return -EIO; 5466 5467 error = setattr_prepare(dentry, attr); 5468 if (error) 5469 return error; 5470 5471 error = fscrypt_prepare_setattr(dentry, attr); 5472 if (error) 5473 return error; 5474 5475 if (is_quota_modification(inode, attr)) { 5476 error = dquot_initialize(inode); 5477 if (error) 5478 return error; 5479 } 5480 if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || 5481 (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { 5482 handle_t *handle; 5483 5484 /* (user+group)*(old+new) structure, inode write (sb, 5485 * inode block, ? - but truncate inode update has it) */ 5486 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 5487 (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) + 5488 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3); 5489 if (IS_ERR(handle)) { 5490 error = PTR_ERR(handle); 5491 goto err_out; 5492 } 5493 5494 /* dquot_transfer() calls back ext4_get_inode_usage() which 5495 * counts xattr inode references. 5496 */ 5497 down_read(&EXT4_I(inode)->xattr_sem); 5498 error = dquot_transfer(inode, attr); 5499 up_read(&EXT4_I(inode)->xattr_sem); 5500 5501 if (error) { 5502 ext4_journal_stop(handle); 5503 return error; 5504 } 5505 /* Update corresponding info in inode so that everything is in 5506 * one transaction */ 5507 if (attr->ia_valid & ATTR_UID) 5508 inode->i_uid = attr->ia_uid; 5509 if (attr->ia_valid & ATTR_GID) 5510 inode->i_gid = attr->ia_gid; 5511 error = ext4_mark_inode_dirty(handle, inode); 5512 ext4_journal_stop(handle); 5513 } 5514 5515 if (attr->ia_valid & ATTR_SIZE) { 5516 handle_t *handle; 5517 loff_t oldsize = inode->i_size; 5518 int shrink = (attr->ia_size <= inode->i_size); 5519 5520 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 5521 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5522 5523 if (attr->ia_size > sbi->s_bitmap_maxbytes) 5524 return -EFBIG; 5525 } 5526 if (!S_ISREG(inode->i_mode)) 5527 return -EINVAL; 5528 5529 if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size) 5530 inode_inc_iversion(inode); 5531 5532 if (ext4_should_order_data(inode) && 5533 (attr->ia_size < inode->i_size)) { 5534 error = ext4_begin_ordered_truncate(inode, 5535 attr->ia_size); 5536 if (error) 5537 goto err_out; 5538 } 5539 if (attr->ia_size != inode->i_size) { 5540 handle = ext4_journal_start(inode, EXT4_HT_INODE, 3); 5541 if (IS_ERR(handle)) { 5542 error = PTR_ERR(handle); 5543 goto err_out; 5544 } 5545 if (ext4_handle_valid(handle) && shrink) { 5546 error = ext4_orphan_add(handle, inode); 5547 orphan = 1; 5548 } 5549 /* 5550 * Update c/mtime on truncate up, ext4_truncate() will 5551 * update c/mtime in shrink case below 5552 */ 5553 if (!shrink) { 5554 inode->i_mtime = current_time(inode); 5555 inode->i_ctime = inode->i_mtime; 5556 } 5557 down_write(&EXT4_I(inode)->i_data_sem); 5558 EXT4_I(inode)->i_disksize = attr->ia_size; 5559 rc = ext4_mark_inode_dirty(handle, inode); 5560 if (!error) 5561 error = rc; 5562 /* 5563 * We have to update i_size under i_data_sem together 5564 * with i_disksize to avoid races with writeback code 5565 * running ext4_wb_update_i_disksize(). 5566 */ 5567 if (!error) 5568 i_size_write(inode, attr->ia_size); 5569 up_write(&EXT4_I(inode)->i_data_sem); 5570 ext4_journal_stop(handle); 5571 if (error) { 5572 if (orphan) 5573 ext4_orphan_del(NULL, inode); 5574 goto err_out; 5575 } 5576 } 5577 if (!shrink) 5578 pagecache_isize_extended(inode, oldsize, inode->i_size); 5579 5580 /* 5581 * Blocks are going to be removed from the inode. Wait 5582 * for dio in flight. Temporarily disable 5583 * dioread_nolock to prevent livelock. 5584 */ 5585 if (orphan) { 5586 if (!ext4_should_journal_data(inode)) { 5587 inode_dio_wait(inode); 5588 } else 5589 ext4_wait_for_tail_page_commit(inode); 5590 } 5591 down_write(&EXT4_I(inode)->i_mmap_sem); 5592 5593 rc = ext4_break_layouts(inode); 5594 if (rc) { 5595 up_write(&EXT4_I(inode)->i_mmap_sem); 5596 error = rc; 5597 goto err_out; 5598 } 5599 5600 /* 5601 * Truncate pagecache after we've waited for commit 5602 * in data=journal mode to make pages freeable. 5603 */ 5604 truncate_pagecache(inode, inode->i_size); 5605 if (shrink) { 5606 rc = ext4_truncate(inode); 5607 if (rc) 5608 error = rc; 5609 } 5610 up_write(&EXT4_I(inode)->i_mmap_sem); 5611 } 5612 5613 if (!error) { 5614 setattr_copy(inode, attr); 5615 mark_inode_dirty(inode); 5616 } 5617 5618 /* 5619 * If the call to ext4_truncate failed to get a transaction handle at 5620 * all, we need to clean up the in-core orphan list manually. 5621 */ 5622 if (orphan && inode->i_nlink) 5623 ext4_orphan_del(NULL, inode); 5624 5625 if (!error && (ia_valid & ATTR_MODE)) 5626 rc = posix_acl_chmod(inode, inode->i_mode); 5627 5628 err_out: 5629 ext4_std_error(inode->i_sb, error); 5630 if (!error) 5631 error = rc; 5632 return error; 5633 } 5634 5635 int ext4_getattr(const struct path *path, struct kstat *stat, 5636 u32 request_mask, unsigned int query_flags) 5637 { 5638 struct inode *inode = d_inode(path->dentry); 5639 struct ext4_inode *raw_inode; 5640 struct ext4_inode_info *ei = EXT4_I(inode); 5641 unsigned int flags; 5642 5643 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) { 5644 stat->result_mask |= STATX_BTIME; 5645 stat->btime.tv_sec = ei->i_crtime.tv_sec; 5646 stat->btime.tv_nsec = ei->i_crtime.tv_nsec; 5647 } 5648 5649 flags = ei->i_flags & EXT4_FL_USER_VISIBLE; 5650 if (flags & EXT4_APPEND_FL) 5651 stat->attributes |= STATX_ATTR_APPEND; 5652 if (flags & EXT4_COMPR_FL) 5653 stat->attributes |= STATX_ATTR_COMPRESSED; 5654 if (flags & EXT4_ENCRYPT_FL) 5655 stat->attributes |= STATX_ATTR_ENCRYPTED; 5656 if (flags & EXT4_IMMUTABLE_FL) 5657 stat->attributes |= STATX_ATTR_IMMUTABLE; 5658 if (flags & EXT4_NODUMP_FL) 5659 stat->attributes |= STATX_ATTR_NODUMP; 5660 5661 stat->attributes_mask |= (STATX_ATTR_APPEND | 5662 STATX_ATTR_COMPRESSED | 5663 STATX_ATTR_ENCRYPTED | 5664 STATX_ATTR_IMMUTABLE | 5665 STATX_ATTR_NODUMP); 5666 5667 generic_fillattr(inode, stat); 5668 return 0; 5669 } 5670 5671 int ext4_file_getattr(const struct path *path, struct kstat *stat, 5672 u32 request_mask, unsigned int query_flags) 5673 { 5674 struct inode *inode = d_inode(path->dentry); 5675 u64 delalloc_blocks; 5676 5677 ext4_getattr(path, stat, request_mask, query_flags); 5678 5679 /* 5680 * If there is inline data in the inode, the inode will normally not 5681 * have data blocks allocated (it may have an external xattr block). 5682 * Report at least one sector for such files, so tools like tar, rsync, 5683 * others don't incorrectly think the file is completely sparse. 5684 */ 5685 if (unlikely(ext4_has_inline_data(inode))) 5686 stat->blocks += (stat->size + 511) >> 9; 5687 5688 /* 5689 * We can't update i_blocks if the block allocation is delayed 5690 * otherwise in the case of system crash before the real block 5691 * allocation is done, we will have i_blocks inconsistent with 5692 * on-disk file blocks. 5693 * We always keep i_blocks updated together with real 5694 * allocation. But to not confuse with user, stat 5695 * will return the blocks that include the delayed allocation 5696 * blocks for this file. 5697 */ 5698 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), 5699 EXT4_I(inode)->i_reserved_data_blocks); 5700 stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9); 5701 return 0; 5702 } 5703 5704 static int ext4_index_trans_blocks(struct inode *inode, int lblocks, 5705 int pextents) 5706 { 5707 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 5708 return ext4_ind_trans_blocks(inode, lblocks); 5709 return ext4_ext_index_trans_blocks(inode, pextents); 5710 } 5711 5712 /* 5713 * Account for index blocks, block groups bitmaps and block group 5714 * descriptor blocks if modify datablocks and index blocks 5715 * worse case, the indexs blocks spread over different block groups 5716 * 5717 * If datablocks are discontiguous, they are possible to spread over 5718 * different block groups too. If they are contiguous, with flexbg, 5719 * they could still across block group boundary. 5720 * 5721 * Also account for superblock, inode, quota and xattr blocks 5722 */ 5723 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, 5724 int pextents) 5725 { 5726 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 5727 int gdpblocks; 5728 int idxblocks; 5729 int ret = 0; 5730 5731 /* 5732 * How many index blocks need to touch to map @lblocks logical blocks 5733 * to @pextents physical extents? 5734 */ 5735 idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents); 5736 5737 ret = idxblocks; 5738 5739 /* 5740 * Now let's see how many group bitmaps and group descriptors need 5741 * to account 5742 */ 5743 groups = idxblocks + pextents; 5744 gdpblocks = groups; 5745 if (groups > ngroups) 5746 groups = ngroups; 5747 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 5748 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 5749 5750 /* bitmaps and block group descriptor blocks */ 5751 ret += groups + gdpblocks; 5752 5753 /* Blocks for super block, inode, quota and xattr blocks */ 5754 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 5755 5756 return ret; 5757 } 5758 5759 /* 5760 * Calculate the total number of credits to reserve to fit 5761 * the modification of a single pages into a single transaction, 5762 * which may include multiple chunks of block allocations. 5763 * 5764 * This could be called via ext4_write_begin() 5765 * 5766 * We need to consider the worse case, when 5767 * one new block per extent. 5768 */ 5769 int ext4_writepage_trans_blocks(struct inode *inode) 5770 { 5771 int bpp = ext4_journal_blocks_per_page(inode); 5772 int ret; 5773 5774 ret = ext4_meta_trans_blocks(inode, bpp, bpp); 5775 5776 /* Account for data blocks for journalled mode */ 5777 if (ext4_should_journal_data(inode)) 5778 ret += bpp; 5779 return ret; 5780 } 5781 5782 /* 5783 * Calculate the journal credits for a chunk of data modification. 5784 * 5785 * This is called from DIO, fallocate or whoever calling 5786 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. 5787 * 5788 * journal buffers for data blocks are not included here, as DIO 5789 * and fallocate do no need to journal data buffers. 5790 */ 5791 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 5792 { 5793 return ext4_meta_trans_blocks(inode, nrblocks, 1); 5794 } 5795 5796 /* 5797 * The caller must have previously called ext4_reserve_inode_write(). 5798 * Give this, we know that the caller already has write access to iloc->bh. 5799 */ 5800 int ext4_mark_iloc_dirty(handle_t *handle, 5801 struct inode *inode, struct ext4_iloc *iloc) 5802 { 5803 int err = 0; 5804 5805 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 5806 return -EIO; 5807 5808 if (IS_I_VERSION(inode)) 5809 inode_inc_iversion(inode); 5810 5811 /* the do_update_inode consumes one bh->b_count */ 5812 get_bh(iloc->bh); 5813 5814 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 5815 err = ext4_do_update_inode(handle, inode, iloc); 5816 put_bh(iloc->bh); 5817 return err; 5818 } 5819 5820 /* 5821 * On success, We end up with an outstanding reference count against 5822 * iloc->bh. This _must_ be cleaned up later. 5823 */ 5824 5825 int 5826 ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 5827 struct ext4_iloc *iloc) 5828 { 5829 int err; 5830 5831 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 5832 return -EIO; 5833 5834 err = ext4_get_inode_loc(inode, iloc); 5835 if (!err) { 5836 BUFFER_TRACE(iloc->bh, "get_write_access"); 5837 err = ext4_journal_get_write_access(handle, iloc->bh); 5838 if (err) { 5839 brelse(iloc->bh); 5840 iloc->bh = NULL; 5841 } 5842 } 5843 ext4_std_error(inode->i_sb, err); 5844 return err; 5845 } 5846 5847 static int __ext4_expand_extra_isize(struct inode *inode, 5848 unsigned int new_extra_isize, 5849 struct ext4_iloc *iloc, 5850 handle_t *handle, int *no_expand) 5851 { 5852 struct ext4_inode *raw_inode; 5853 struct ext4_xattr_ibody_header *header; 5854 int error; 5855 5856 raw_inode = ext4_raw_inode(iloc); 5857 5858 header = IHDR(inode, raw_inode); 5859 5860 /* No extended attributes present */ 5861 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 5862 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 5863 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + 5864 EXT4_I(inode)->i_extra_isize, 0, 5865 new_extra_isize - EXT4_I(inode)->i_extra_isize); 5866 EXT4_I(inode)->i_extra_isize = new_extra_isize; 5867 return 0; 5868 } 5869 5870 /* try to expand with EAs present */ 5871 error = ext4_expand_extra_isize_ea(inode, new_extra_isize, 5872 raw_inode, handle); 5873 if (error) { 5874 /* 5875 * Inode size expansion failed; don't try again 5876 */ 5877 *no_expand = 1; 5878 } 5879 5880 return error; 5881 } 5882 5883 /* 5884 * Expand an inode by new_extra_isize bytes. 5885 * Returns 0 on success or negative error number on failure. 5886 */ 5887 static int ext4_try_to_expand_extra_isize(struct inode *inode, 5888 unsigned int new_extra_isize, 5889 struct ext4_iloc iloc, 5890 handle_t *handle) 5891 { 5892 int no_expand; 5893 int error; 5894 5895 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) 5896 return -EOVERFLOW; 5897 5898 /* 5899 * In nojournal mode, we can immediately attempt to expand 5900 * the inode. When journaled, we first need to obtain extra 5901 * buffer credits since we may write into the EA block 5902 * with this same handle. If journal_extend fails, then it will 5903 * only result in a minor loss of functionality for that inode. 5904 * If this is felt to be critical, then e2fsck should be run to 5905 * force a large enough s_min_extra_isize. 5906 */ 5907 if (ext4_handle_valid(handle) && 5908 jbd2_journal_extend(handle, 5909 EXT4_DATA_TRANS_BLOCKS(inode->i_sb)) != 0) 5910 return -ENOSPC; 5911 5912 if (ext4_write_trylock_xattr(inode, &no_expand) == 0) 5913 return -EBUSY; 5914 5915 error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc, 5916 handle, &no_expand); 5917 ext4_write_unlock_xattr(inode, &no_expand); 5918 5919 return error; 5920 } 5921 5922 int ext4_expand_extra_isize(struct inode *inode, 5923 unsigned int new_extra_isize, 5924 struct ext4_iloc *iloc) 5925 { 5926 handle_t *handle; 5927 int no_expand; 5928 int error, rc; 5929 5930 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { 5931 brelse(iloc->bh); 5932 return -EOVERFLOW; 5933 } 5934 5935 handle = ext4_journal_start(inode, EXT4_HT_INODE, 5936 EXT4_DATA_TRANS_BLOCKS(inode->i_sb)); 5937 if (IS_ERR(handle)) { 5938 error = PTR_ERR(handle); 5939 brelse(iloc->bh); 5940 return error; 5941 } 5942 5943 ext4_write_lock_xattr(inode, &no_expand); 5944 5945 BUFFER_TRACE(iloc.bh, "get_write_access"); 5946 error = ext4_journal_get_write_access(handle, iloc->bh); 5947 if (error) { 5948 brelse(iloc->bh); 5949 goto out_stop; 5950 } 5951 5952 error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc, 5953 handle, &no_expand); 5954 5955 rc = ext4_mark_iloc_dirty(handle, inode, iloc); 5956 if (!error) 5957 error = rc; 5958 5959 ext4_write_unlock_xattr(inode, &no_expand); 5960 out_stop: 5961 ext4_journal_stop(handle); 5962 return error; 5963 } 5964 5965 /* 5966 * What we do here is to mark the in-core inode as clean with respect to inode 5967 * dirtiness (it may still be data-dirty). 5968 * This means that the in-core inode may be reaped by prune_icache 5969 * without having to perform any I/O. This is a very good thing, 5970 * because *any* task may call prune_icache - even ones which 5971 * have a transaction open against a different journal. 5972 * 5973 * Is this cheating? Not really. Sure, we haven't written the 5974 * inode out, but prune_icache isn't a user-visible syncing function. 5975 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 5976 * we start and wait on commits. 5977 */ 5978 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 5979 { 5980 struct ext4_iloc iloc; 5981 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5982 int err; 5983 5984 might_sleep(); 5985 trace_ext4_mark_inode_dirty(inode, _RET_IP_); 5986 err = ext4_reserve_inode_write(handle, inode, &iloc); 5987 if (err) 5988 return err; 5989 5990 if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize) 5991 ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize, 5992 iloc, handle); 5993 5994 return ext4_mark_iloc_dirty(handle, inode, &iloc); 5995 } 5996 5997 /* 5998 * ext4_dirty_inode() is called from __mark_inode_dirty() 5999 * 6000 * We're really interested in the case where a file is being extended. 6001 * i_size has been changed by generic_commit_write() and we thus need 6002 * to include the updated inode in the current transaction. 6003 * 6004 * Also, dquot_alloc_block() will always dirty the inode when blocks 6005 * are allocated to the file. 6006 * 6007 * If the inode is marked synchronous, we don't honour that here - doing 6008 * so would cause a commit on atime updates, which we don't bother doing. 6009 * We handle synchronous inodes at the highest possible level. 6010 * 6011 * If only the I_DIRTY_TIME flag is set, we can skip everything. If 6012 * I_DIRTY_TIME and I_DIRTY_SYNC is set, the only inode fields we need 6013 * to copy into the on-disk inode structure are the timestamp files. 6014 */ 6015 void ext4_dirty_inode(struct inode *inode, int flags) 6016 { 6017 handle_t *handle; 6018 6019 if (flags == I_DIRTY_TIME) 6020 return; 6021 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 6022 if (IS_ERR(handle)) 6023 goto out; 6024 6025 ext4_mark_inode_dirty(handle, inode); 6026 6027 ext4_journal_stop(handle); 6028 out: 6029 return; 6030 } 6031 6032 #if 0 6033 /* 6034 * Bind an inode's backing buffer_head into this transaction, to prevent 6035 * it from being flushed to disk early. Unlike 6036 * ext4_reserve_inode_write, this leaves behind no bh reference and 6037 * returns no iloc structure, so the caller needs to repeat the iloc 6038 * lookup to mark the inode dirty later. 6039 */ 6040 static int ext4_pin_inode(handle_t *handle, struct inode *inode) 6041 { 6042 struct ext4_iloc iloc; 6043 6044 int err = 0; 6045 if (handle) { 6046 err = ext4_get_inode_loc(inode, &iloc); 6047 if (!err) { 6048 BUFFER_TRACE(iloc.bh, "get_write_access"); 6049 err = jbd2_journal_get_write_access(handle, iloc.bh); 6050 if (!err) 6051 err = ext4_handle_dirty_metadata(handle, 6052 NULL, 6053 iloc.bh); 6054 brelse(iloc.bh); 6055 } 6056 } 6057 ext4_std_error(inode->i_sb, err); 6058 return err; 6059 } 6060 #endif 6061 6062 int ext4_change_inode_journal_flag(struct inode *inode, int val) 6063 { 6064 journal_t *journal; 6065 handle_t *handle; 6066 int err; 6067 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 6068 6069 /* 6070 * We have to be very careful here: changing a data block's 6071 * journaling status dynamically is dangerous. If we write a 6072 * data block to the journal, change the status and then delete 6073 * that block, we risk forgetting to revoke the old log record 6074 * from the journal and so a subsequent replay can corrupt data. 6075 * So, first we make sure that the journal is empty and that 6076 * nobody is changing anything. 6077 */ 6078 6079 journal = EXT4_JOURNAL(inode); 6080 if (!journal) 6081 return 0; 6082 if (is_journal_aborted(journal)) 6083 return -EROFS; 6084 6085 /* Wait for all existing dio workers */ 6086 inode_dio_wait(inode); 6087 6088 /* 6089 * Before flushing the journal and switching inode's aops, we have 6090 * to flush all dirty data the inode has. There can be outstanding 6091 * delayed allocations, there can be unwritten extents created by 6092 * fallocate or buffered writes in dioread_nolock mode covered by 6093 * dirty data which can be converted only after flushing the dirty 6094 * data (and journalled aops don't know how to handle these cases). 6095 */ 6096 if (val) { 6097 down_write(&EXT4_I(inode)->i_mmap_sem); 6098 err = filemap_write_and_wait(inode->i_mapping); 6099 if (err < 0) { 6100 up_write(&EXT4_I(inode)->i_mmap_sem); 6101 return err; 6102 } 6103 } 6104 6105 percpu_down_write(&sbi->s_journal_flag_rwsem); 6106 jbd2_journal_lock_updates(journal); 6107 6108 /* 6109 * OK, there are no updates running now, and all cached data is 6110 * synced to disk. We are now in a completely consistent state 6111 * which doesn't have anything in the journal, and we know that 6112 * no filesystem updates are running, so it is safe to modify 6113 * the inode's in-core data-journaling state flag now. 6114 */ 6115 6116 if (val) 6117 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 6118 else { 6119 err = jbd2_journal_flush(journal); 6120 if (err < 0) { 6121 jbd2_journal_unlock_updates(journal); 6122 percpu_up_write(&sbi->s_journal_flag_rwsem); 6123 return err; 6124 } 6125 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 6126 } 6127 ext4_set_aops(inode); 6128 6129 jbd2_journal_unlock_updates(journal); 6130 percpu_up_write(&sbi->s_journal_flag_rwsem); 6131 6132 if (val) 6133 up_write(&EXT4_I(inode)->i_mmap_sem); 6134 6135 /* Finally we can mark the inode as dirty. */ 6136 6137 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); 6138 if (IS_ERR(handle)) 6139 return PTR_ERR(handle); 6140 6141 err = ext4_mark_inode_dirty(handle, inode); 6142 ext4_handle_sync(handle); 6143 ext4_journal_stop(handle); 6144 ext4_std_error(inode->i_sb, err); 6145 6146 return err; 6147 } 6148 6149 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 6150 { 6151 return !buffer_mapped(bh); 6152 } 6153 6154 int ext4_page_mkwrite(struct vm_fault *vmf) 6155 { 6156 struct vm_area_struct *vma = vmf->vma; 6157 struct page *page = vmf->page; 6158 loff_t size; 6159 unsigned long len; 6160 int ret; 6161 struct file *file = vma->vm_file; 6162 struct inode *inode = file_inode(file); 6163 struct address_space *mapping = inode->i_mapping; 6164 handle_t *handle; 6165 get_block_t *get_block; 6166 int retries = 0; 6167 6168 sb_start_pagefault(inode->i_sb); 6169 file_update_time(vma->vm_file); 6170 6171 down_read(&EXT4_I(inode)->i_mmap_sem); 6172 6173 ret = ext4_convert_inline_data(inode); 6174 if (ret) 6175 goto out_ret; 6176 6177 /* Delalloc case is easy... */ 6178 if (test_opt(inode->i_sb, DELALLOC) && 6179 !ext4_should_journal_data(inode) && 6180 !ext4_nonda_switch(inode->i_sb)) { 6181 do { 6182 ret = block_page_mkwrite(vma, vmf, 6183 ext4_da_get_block_prep); 6184 } while (ret == -ENOSPC && 6185 ext4_should_retry_alloc(inode->i_sb, &retries)); 6186 goto out_ret; 6187 } 6188 6189 lock_page(page); 6190 size = i_size_read(inode); 6191 /* Page got truncated from under us? */ 6192 if (page->mapping != mapping || page_offset(page) > size) { 6193 unlock_page(page); 6194 ret = VM_FAULT_NOPAGE; 6195 goto out; 6196 } 6197 6198 if (page->index == size >> PAGE_SHIFT) 6199 len = size & ~PAGE_MASK; 6200 else 6201 len = PAGE_SIZE; 6202 /* 6203 * Return if we have all the buffers mapped. This avoids the need to do 6204 * journal_start/journal_stop which can block and take a long time 6205 */ 6206 if (page_has_buffers(page)) { 6207 if (!ext4_walk_page_buffers(NULL, page_buffers(page), 6208 0, len, NULL, 6209 ext4_bh_unmapped)) { 6210 /* Wait so that we don't change page under IO */ 6211 wait_for_stable_page(page); 6212 ret = VM_FAULT_LOCKED; 6213 goto out; 6214 } 6215 } 6216 unlock_page(page); 6217 /* OK, we need to fill the hole... */ 6218 if (ext4_should_dioread_nolock(inode)) 6219 get_block = ext4_get_block_unwritten; 6220 else 6221 get_block = ext4_get_block; 6222 retry_alloc: 6223 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 6224 ext4_writepage_trans_blocks(inode)); 6225 if (IS_ERR(handle)) { 6226 ret = VM_FAULT_SIGBUS; 6227 goto out; 6228 } 6229 ret = block_page_mkwrite(vma, vmf, get_block); 6230 if (!ret && ext4_should_journal_data(inode)) { 6231 if (ext4_walk_page_buffers(handle, page_buffers(page), 0, 6232 PAGE_SIZE, NULL, do_journal_get_write_access)) { 6233 unlock_page(page); 6234 ret = VM_FAULT_SIGBUS; 6235 ext4_journal_stop(handle); 6236 goto out; 6237 } 6238 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 6239 } 6240 ext4_journal_stop(handle); 6241 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 6242 goto retry_alloc; 6243 out_ret: 6244 ret = block_page_mkwrite_return(ret); 6245 out: 6246 up_read(&EXT4_I(inode)->i_mmap_sem); 6247 sb_end_pagefault(inode->i_sb); 6248 return ret; 6249 } 6250 6251 int ext4_filemap_fault(struct vm_fault *vmf) 6252 { 6253 struct inode *inode = file_inode(vmf->vma->vm_file); 6254 int err; 6255 6256 down_read(&EXT4_I(inode)->i_mmap_sem); 6257 err = filemap_fault(vmf); 6258 up_read(&EXT4_I(inode)->i_mmap_sem); 6259 6260 return err; 6261 } 6262