1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/data.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/buffer_head.h> 11 #include <linux/mpage.h> 12 #include <linux/writeback.h> 13 #include <linux/backing-dev.h> 14 #include <linux/pagevec.h> 15 #include <linux/blkdev.h> 16 #include <linux/bio.h> 17 #include <linux/prefetch.h> 18 #include <linux/uio.h> 19 #include <linux/cleancache.h> 20 #include <linux/sched/signal.h> 21 22 #include "f2fs.h" 23 #include "node.h" 24 #include "segment.h" 25 #include "trace.h" 26 #include <trace/events/f2fs.h> 27 28 #define NUM_PREALLOC_POST_READ_CTXS 128 29 30 static struct kmem_cache *bio_post_read_ctx_cache; 31 static mempool_t *bio_post_read_ctx_pool; 32 33 static bool __is_cp_guaranteed(struct page *page) 34 { 35 struct address_space *mapping = page->mapping; 36 struct inode *inode; 37 struct f2fs_sb_info *sbi; 38 39 if (!mapping) 40 return false; 41 42 inode = mapping->host; 43 sbi = F2FS_I_SB(inode); 44 45 if (inode->i_ino == F2FS_META_INO(sbi) || 46 inode->i_ino == F2FS_NODE_INO(sbi) || 47 S_ISDIR(inode->i_mode) || 48 (S_ISREG(inode->i_mode) && 49 (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) || 50 is_cold_data(page)) 51 return true; 52 return false; 53 } 54 55 static enum count_type __read_io_type(struct page *page) 56 { 57 struct address_space *mapping = page->mapping; 58 59 if (mapping) { 60 struct inode *inode = mapping->host; 61 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 62 63 if (inode->i_ino == F2FS_META_INO(sbi)) 64 return F2FS_RD_META; 65 66 if (inode->i_ino == F2FS_NODE_INO(sbi)) 67 return F2FS_RD_NODE; 68 } 69 return F2FS_RD_DATA; 70 } 71 72 /* postprocessing steps for read bios */ 73 enum bio_post_read_step { 74 STEP_INITIAL = 0, 75 STEP_DECRYPT, 76 }; 77 78 struct bio_post_read_ctx { 79 struct bio *bio; 80 struct work_struct work; 81 unsigned int cur_step; 82 unsigned int enabled_steps; 83 }; 84 85 static void __read_end_io(struct bio *bio) 86 { 87 struct page *page; 88 struct bio_vec *bv; 89 struct bvec_iter_all iter_all; 90 91 bio_for_each_segment_all(bv, bio, iter_all) { 92 page = bv->bv_page; 93 94 /* PG_error was set if any post_read step failed */ 95 if (bio->bi_status || PageError(page)) { 96 ClearPageUptodate(page); 97 /* will re-read again later */ 98 ClearPageError(page); 99 } else { 100 SetPageUptodate(page); 101 } 102 dec_page_count(F2FS_P_SB(page), __read_io_type(page)); 103 unlock_page(page); 104 } 105 if (bio->bi_private) 106 mempool_free(bio->bi_private, bio_post_read_ctx_pool); 107 bio_put(bio); 108 } 109 110 static void bio_post_read_processing(struct bio_post_read_ctx *ctx); 111 112 static void decrypt_work(struct work_struct *work) 113 { 114 struct bio_post_read_ctx *ctx = 115 container_of(work, struct bio_post_read_ctx, work); 116 117 fscrypt_decrypt_bio(ctx->bio); 118 119 bio_post_read_processing(ctx); 120 } 121 122 static void bio_post_read_processing(struct bio_post_read_ctx *ctx) 123 { 124 switch (++ctx->cur_step) { 125 case STEP_DECRYPT: 126 if (ctx->enabled_steps & (1 << STEP_DECRYPT)) { 127 INIT_WORK(&ctx->work, decrypt_work); 128 fscrypt_enqueue_decrypt_work(&ctx->work); 129 return; 130 } 131 ctx->cur_step++; 132 /* fall-through */ 133 default: 134 __read_end_io(ctx->bio); 135 } 136 } 137 138 static bool f2fs_bio_post_read_required(struct bio *bio) 139 { 140 return bio->bi_private && !bio->bi_status; 141 } 142 143 static void f2fs_read_end_io(struct bio *bio) 144 { 145 if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)), 146 FAULT_READ_IO)) { 147 f2fs_show_injection_info(FAULT_READ_IO); 148 bio->bi_status = BLK_STS_IOERR; 149 } 150 151 if (f2fs_bio_post_read_required(bio)) { 152 struct bio_post_read_ctx *ctx = bio->bi_private; 153 154 ctx->cur_step = STEP_INITIAL; 155 bio_post_read_processing(ctx); 156 return; 157 } 158 159 __read_end_io(bio); 160 } 161 162 static void f2fs_write_end_io(struct bio *bio) 163 { 164 struct f2fs_sb_info *sbi = bio->bi_private; 165 struct bio_vec *bvec; 166 struct bvec_iter_all iter_all; 167 168 if (time_to_inject(sbi, FAULT_WRITE_IO)) { 169 f2fs_show_injection_info(FAULT_WRITE_IO); 170 bio->bi_status = BLK_STS_IOERR; 171 } 172 173 bio_for_each_segment_all(bvec, bio, iter_all) { 174 struct page *page = bvec->bv_page; 175 enum count_type type = WB_DATA_TYPE(page); 176 177 if (IS_DUMMY_WRITTEN_PAGE(page)) { 178 set_page_private(page, (unsigned long)NULL); 179 ClearPagePrivate(page); 180 unlock_page(page); 181 mempool_free(page, sbi->write_io_dummy); 182 183 if (unlikely(bio->bi_status)) 184 f2fs_stop_checkpoint(sbi, true); 185 continue; 186 } 187 188 fscrypt_pullback_bio_page(&page, true); 189 190 if (unlikely(bio->bi_status)) { 191 mapping_set_error(page->mapping, -EIO); 192 if (type == F2FS_WB_CP_DATA) 193 f2fs_stop_checkpoint(sbi, true); 194 } 195 196 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) && 197 page->index != nid_of_node(page)); 198 199 dec_page_count(sbi, type); 200 if (f2fs_in_warm_node_list(sbi, page)) 201 f2fs_del_fsync_node_entry(sbi, page); 202 clear_cold_data(page); 203 end_page_writeback(page); 204 } 205 if (!get_pages(sbi, F2FS_WB_CP_DATA) && 206 wq_has_sleeper(&sbi->cp_wait)) 207 wake_up(&sbi->cp_wait); 208 209 bio_put(bio); 210 } 211 212 /* 213 * Return true, if pre_bio's bdev is same as its target device. 214 */ 215 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, 216 block_t blk_addr, struct bio *bio) 217 { 218 struct block_device *bdev = sbi->sb->s_bdev; 219 int i; 220 221 if (f2fs_is_multi_device(sbi)) { 222 for (i = 0; i < sbi->s_ndevs; i++) { 223 if (FDEV(i).start_blk <= blk_addr && 224 FDEV(i).end_blk >= blk_addr) { 225 blk_addr -= FDEV(i).start_blk; 226 bdev = FDEV(i).bdev; 227 break; 228 } 229 } 230 } 231 if (bio) { 232 bio_set_dev(bio, bdev); 233 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr); 234 } 235 return bdev; 236 } 237 238 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr) 239 { 240 int i; 241 242 if (!f2fs_is_multi_device(sbi)) 243 return 0; 244 245 for (i = 0; i < sbi->s_ndevs; i++) 246 if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr) 247 return i; 248 return 0; 249 } 250 251 static bool __same_bdev(struct f2fs_sb_info *sbi, 252 block_t blk_addr, struct bio *bio) 253 { 254 struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL); 255 return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno; 256 } 257 258 /* 259 * Low-level block read/write IO operations. 260 */ 261 static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, 262 struct writeback_control *wbc, 263 int npages, bool is_read, 264 enum page_type type, enum temp_type temp) 265 { 266 struct bio *bio; 267 268 bio = f2fs_bio_alloc(sbi, npages, true); 269 270 f2fs_target_device(sbi, blk_addr, bio); 271 if (is_read) { 272 bio->bi_end_io = f2fs_read_end_io; 273 bio->bi_private = NULL; 274 } else { 275 bio->bi_end_io = f2fs_write_end_io; 276 bio->bi_private = sbi; 277 bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, type, temp); 278 } 279 if (wbc) 280 wbc_init_bio(wbc, bio); 281 282 return bio; 283 } 284 285 static inline void __submit_bio(struct f2fs_sb_info *sbi, 286 struct bio *bio, enum page_type type) 287 { 288 if (!is_read_io(bio_op(bio))) { 289 unsigned int start; 290 291 if (type != DATA && type != NODE) 292 goto submit_io; 293 294 if (test_opt(sbi, LFS) && current->plug) 295 blk_finish_plug(current->plug); 296 297 start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS; 298 start %= F2FS_IO_SIZE(sbi); 299 300 if (start == 0) 301 goto submit_io; 302 303 /* fill dummy pages */ 304 for (; start < F2FS_IO_SIZE(sbi); start++) { 305 struct page *page = 306 mempool_alloc(sbi->write_io_dummy, 307 GFP_NOIO | __GFP_NOFAIL); 308 f2fs_bug_on(sbi, !page); 309 310 zero_user_segment(page, 0, PAGE_SIZE); 311 SetPagePrivate(page); 312 set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE); 313 lock_page(page); 314 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) 315 f2fs_bug_on(sbi, 1); 316 } 317 /* 318 * In the NODE case, we lose next block address chain. So, we 319 * need to do checkpoint in f2fs_sync_file. 320 */ 321 if (type == NODE) 322 set_sbi_flag(sbi, SBI_NEED_CP); 323 } 324 submit_io: 325 if (is_read_io(bio_op(bio))) 326 trace_f2fs_submit_read_bio(sbi->sb, type, bio); 327 else 328 trace_f2fs_submit_write_bio(sbi->sb, type, bio); 329 submit_bio(bio); 330 } 331 332 static void __submit_merged_bio(struct f2fs_bio_info *io) 333 { 334 struct f2fs_io_info *fio = &io->fio; 335 336 if (!io->bio) 337 return; 338 339 bio_set_op_attrs(io->bio, fio->op, fio->op_flags); 340 341 if (is_read_io(fio->op)) 342 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio); 343 else 344 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio); 345 346 __submit_bio(io->sbi, io->bio, fio->type); 347 io->bio = NULL; 348 } 349 350 static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode, 351 struct page *page, nid_t ino) 352 { 353 struct bio_vec *bvec; 354 struct page *target; 355 struct bvec_iter_all iter_all; 356 357 if (!io->bio) 358 return false; 359 360 if (!inode && !page && !ino) 361 return true; 362 363 bio_for_each_segment_all(bvec, io->bio, iter_all) { 364 365 if (bvec->bv_page->mapping) 366 target = bvec->bv_page; 367 else 368 target = fscrypt_control_page(bvec->bv_page); 369 370 if (inode && inode == target->mapping->host) 371 return true; 372 if (page && page == target) 373 return true; 374 if (ino && ino == ino_of_node(target)) 375 return true; 376 } 377 378 return false; 379 } 380 381 static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi, 382 enum page_type type, enum temp_type temp) 383 { 384 enum page_type btype = PAGE_TYPE_OF_BIO(type); 385 struct f2fs_bio_info *io = sbi->write_io[btype] + temp; 386 387 down_write(&io->io_rwsem); 388 389 /* change META to META_FLUSH in the checkpoint procedure */ 390 if (type >= META_FLUSH) { 391 io->fio.type = META_FLUSH; 392 io->fio.op = REQ_OP_WRITE; 393 io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC; 394 if (!test_opt(sbi, NOBARRIER)) 395 io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA; 396 } 397 __submit_merged_bio(io); 398 up_write(&io->io_rwsem); 399 } 400 401 static void __submit_merged_write_cond(struct f2fs_sb_info *sbi, 402 struct inode *inode, struct page *page, 403 nid_t ino, enum page_type type, bool force) 404 { 405 enum temp_type temp; 406 bool ret = true; 407 408 for (temp = HOT; temp < NR_TEMP_TYPE; temp++) { 409 if (!force) { 410 enum page_type btype = PAGE_TYPE_OF_BIO(type); 411 struct f2fs_bio_info *io = sbi->write_io[btype] + temp; 412 413 down_read(&io->io_rwsem); 414 ret = __has_merged_page(io, inode, page, ino); 415 up_read(&io->io_rwsem); 416 } 417 if (ret) 418 __f2fs_submit_merged_write(sbi, type, temp); 419 420 /* TODO: use HOT temp only for meta pages now. */ 421 if (type >= META) 422 break; 423 } 424 } 425 426 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type) 427 { 428 __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true); 429 } 430 431 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, 432 struct inode *inode, struct page *page, 433 nid_t ino, enum page_type type) 434 { 435 __submit_merged_write_cond(sbi, inode, page, ino, type, false); 436 } 437 438 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi) 439 { 440 f2fs_submit_merged_write(sbi, DATA); 441 f2fs_submit_merged_write(sbi, NODE); 442 f2fs_submit_merged_write(sbi, META); 443 } 444 445 /* 446 * Fill the locked page with data located in the block address. 447 * A caller needs to unlock the page on failure. 448 */ 449 int f2fs_submit_page_bio(struct f2fs_io_info *fio) 450 { 451 struct bio *bio; 452 struct page *page = fio->encrypted_page ? 453 fio->encrypted_page : fio->page; 454 455 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, 456 fio->is_por ? META_POR : (__is_meta_io(fio) ? 457 META_GENERIC : DATA_GENERIC_ENHANCE))) 458 return -EFAULT; 459 460 trace_f2fs_submit_page_bio(page, fio); 461 f2fs_trace_ios(fio, 0); 462 463 /* Allocate a new bio */ 464 bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc, 465 1, is_read_io(fio->op), fio->type, fio->temp); 466 467 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { 468 bio_put(bio); 469 return -EFAULT; 470 } 471 472 if (fio->io_wbc && !is_read_io(fio->op)) 473 wbc_account_io(fio->io_wbc, page, PAGE_SIZE); 474 475 bio_set_op_attrs(bio, fio->op, fio->op_flags); 476 477 inc_page_count(fio->sbi, is_read_io(fio->op) ? 478 __read_io_type(page): WB_DATA_TYPE(fio->page)); 479 480 __submit_bio(fio->sbi, bio, fio->type); 481 return 0; 482 } 483 484 void f2fs_submit_page_write(struct f2fs_io_info *fio) 485 { 486 struct f2fs_sb_info *sbi = fio->sbi; 487 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); 488 struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp; 489 struct page *bio_page; 490 491 f2fs_bug_on(sbi, is_read_io(fio->op)); 492 493 down_write(&io->io_rwsem); 494 next: 495 if (fio->in_list) { 496 spin_lock(&io->io_lock); 497 if (list_empty(&io->io_list)) { 498 spin_unlock(&io->io_lock); 499 goto out; 500 } 501 fio = list_first_entry(&io->io_list, 502 struct f2fs_io_info, list); 503 list_del(&fio->list); 504 spin_unlock(&io->io_lock); 505 } 506 507 verify_fio_blkaddr(fio); 508 509 bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; 510 511 /* set submitted = true as a return value */ 512 fio->submitted = true; 513 514 inc_page_count(sbi, WB_DATA_TYPE(bio_page)); 515 516 if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 || 517 (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) || 518 !__same_bdev(sbi, fio->new_blkaddr, io->bio))) 519 __submit_merged_bio(io); 520 alloc_new: 521 if (io->bio == NULL) { 522 if ((fio->type == DATA || fio->type == NODE) && 523 fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) { 524 dec_page_count(sbi, WB_DATA_TYPE(bio_page)); 525 fio->retry = true; 526 goto skip; 527 } 528 io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc, 529 BIO_MAX_PAGES, false, 530 fio->type, fio->temp); 531 io->fio = *fio; 532 } 533 534 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) { 535 __submit_merged_bio(io); 536 goto alloc_new; 537 } 538 539 if (fio->io_wbc) 540 wbc_account_io(fio->io_wbc, bio_page, PAGE_SIZE); 541 542 io->last_block_in_bio = fio->new_blkaddr; 543 f2fs_trace_ios(fio, 0); 544 545 trace_f2fs_submit_page_write(fio->page, fio); 546 skip: 547 if (fio->in_list) 548 goto next; 549 out: 550 if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) || 551 f2fs_is_checkpoint_ready(sbi)) 552 __submit_merged_bio(io); 553 up_write(&io->io_rwsem); 554 } 555 556 static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, 557 unsigned nr_pages, unsigned op_flag) 558 { 559 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 560 struct bio *bio; 561 struct bio_post_read_ctx *ctx; 562 unsigned int post_read_steps = 0; 563 564 bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false); 565 if (!bio) 566 return ERR_PTR(-ENOMEM); 567 f2fs_target_device(sbi, blkaddr, bio); 568 bio->bi_end_io = f2fs_read_end_io; 569 bio_set_op_attrs(bio, REQ_OP_READ, op_flag); 570 571 if (f2fs_encrypted_file(inode)) 572 post_read_steps |= 1 << STEP_DECRYPT; 573 if (post_read_steps) { 574 ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS); 575 if (!ctx) { 576 bio_put(bio); 577 return ERR_PTR(-ENOMEM); 578 } 579 ctx->bio = bio; 580 ctx->enabled_steps = post_read_steps; 581 bio->bi_private = ctx; 582 } 583 584 return bio; 585 } 586 587 /* This can handle encryption stuffs */ 588 static int f2fs_submit_page_read(struct inode *inode, struct page *page, 589 block_t blkaddr) 590 { 591 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 592 struct bio *bio; 593 594 bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0); 595 if (IS_ERR(bio)) 596 return PTR_ERR(bio); 597 598 /* wait for GCed page writeback via META_MAPPING */ 599 f2fs_wait_on_block_writeback(inode, blkaddr); 600 601 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { 602 bio_put(bio); 603 return -EFAULT; 604 } 605 ClearPageError(page); 606 inc_page_count(sbi, F2FS_RD_DATA); 607 __submit_bio(sbi, bio, DATA); 608 return 0; 609 } 610 611 static void __set_data_blkaddr(struct dnode_of_data *dn) 612 { 613 struct f2fs_node *rn = F2FS_NODE(dn->node_page); 614 __le32 *addr_array; 615 int base = 0; 616 617 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode)) 618 base = get_extra_isize(dn->inode); 619 620 /* Get physical address of data block */ 621 addr_array = blkaddr_in_node(rn); 622 addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr); 623 } 624 625 /* 626 * Lock ordering for the change of data block address: 627 * ->data_page 628 * ->node_page 629 * update block addresses in the node page 630 */ 631 void f2fs_set_data_blkaddr(struct dnode_of_data *dn) 632 { 633 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true); 634 __set_data_blkaddr(dn); 635 if (set_page_dirty(dn->node_page)) 636 dn->node_changed = true; 637 } 638 639 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr) 640 { 641 dn->data_blkaddr = blkaddr; 642 f2fs_set_data_blkaddr(dn); 643 f2fs_update_extent_cache(dn); 644 } 645 646 /* dn->ofs_in_node will be returned with up-to-date last block pointer */ 647 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count) 648 { 649 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 650 int err; 651 652 if (!count) 653 return 0; 654 655 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) 656 return -EPERM; 657 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count)))) 658 return err; 659 660 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid, 661 dn->ofs_in_node, count); 662 663 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true); 664 665 for (; count > 0; dn->ofs_in_node++) { 666 block_t blkaddr = datablock_addr(dn->inode, 667 dn->node_page, dn->ofs_in_node); 668 if (blkaddr == NULL_ADDR) { 669 dn->data_blkaddr = NEW_ADDR; 670 __set_data_blkaddr(dn); 671 count--; 672 } 673 } 674 675 if (set_page_dirty(dn->node_page)) 676 dn->node_changed = true; 677 return 0; 678 } 679 680 /* Should keep dn->ofs_in_node unchanged */ 681 int f2fs_reserve_new_block(struct dnode_of_data *dn) 682 { 683 unsigned int ofs_in_node = dn->ofs_in_node; 684 int ret; 685 686 ret = f2fs_reserve_new_blocks(dn, 1); 687 dn->ofs_in_node = ofs_in_node; 688 return ret; 689 } 690 691 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) 692 { 693 bool need_put = dn->inode_page ? false : true; 694 int err; 695 696 err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE); 697 if (err) 698 return err; 699 700 if (dn->data_blkaddr == NULL_ADDR) 701 err = f2fs_reserve_new_block(dn); 702 if (err || need_put) 703 f2fs_put_dnode(dn); 704 return err; 705 } 706 707 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index) 708 { 709 struct extent_info ei = {0,0,0}; 710 struct inode *inode = dn->inode; 711 712 if (f2fs_lookup_extent_cache(inode, index, &ei)) { 713 dn->data_blkaddr = ei.blk + index - ei.fofs; 714 return 0; 715 } 716 717 return f2fs_reserve_block(dn, index); 718 } 719 720 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index, 721 int op_flags, bool for_write) 722 { 723 struct address_space *mapping = inode->i_mapping; 724 struct dnode_of_data dn; 725 struct page *page; 726 struct extent_info ei = {0,0,0}; 727 int err; 728 729 page = f2fs_grab_cache_page(mapping, index, for_write); 730 if (!page) 731 return ERR_PTR(-ENOMEM); 732 733 if (f2fs_lookup_extent_cache(inode, index, &ei)) { 734 dn.data_blkaddr = ei.blk + index - ei.fofs; 735 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr, 736 DATA_GENERIC_ENHANCE_READ)) { 737 err = -EFAULT; 738 goto put_err; 739 } 740 goto got_it; 741 } 742 743 set_new_dnode(&dn, inode, NULL, NULL, 0); 744 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); 745 if (err) 746 goto put_err; 747 f2fs_put_dnode(&dn); 748 749 if (unlikely(dn.data_blkaddr == NULL_ADDR)) { 750 err = -ENOENT; 751 goto put_err; 752 } 753 if (dn.data_blkaddr != NEW_ADDR && 754 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode), 755 dn.data_blkaddr, 756 DATA_GENERIC_ENHANCE)) { 757 err = -EFAULT; 758 goto put_err; 759 } 760 got_it: 761 if (PageUptodate(page)) { 762 unlock_page(page); 763 return page; 764 } 765 766 /* 767 * A new dentry page is allocated but not able to be written, since its 768 * new inode page couldn't be allocated due to -ENOSPC. 769 * In such the case, its blkaddr can be remained as NEW_ADDR. 770 * see, f2fs_add_link -> f2fs_get_new_data_page -> 771 * f2fs_init_inode_metadata. 772 */ 773 if (dn.data_blkaddr == NEW_ADDR) { 774 zero_user_segment(page, 0, PAGE_SIZE); 775 if (!PageUptodate(page)) 776 SetPageUptodate(page); 777 unlock_page(page); 778 return page; 779 } 780 781 err = f2fs_submit_page_read(inode, page, dn.data_blkaddr); 782 if (err) 783 goto put_err; 784 return page; 785 786 put_err: 787 f2fs_put_page(page, 1); 788 return ERR_PTR(err); 789 } 790 791 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index) 792 { 793 struct address_space *mapping = inode->i_mapping; 794 struct page *page; 795 796 page = find_get_page(mapping, index); 797 if (page && PageUptodate(page)) 798 return page; 799 f2fs_put_page(page, 0); 800 801 page = f2fs_get_read_data_page(inode, index, 0, false); 802 if (IS_ERR(page)) 803 return page; 804 805 if (PageUptodate(page)) 806 return page; 807 808 wait_on_page_locked(page); 809 if (unlikely(!PageUptodate(page))) { 810 f2fs_put_page(page, 0); 811 return ERR_PTR(-EIO); 812 } 813 return page; 814 } 815 816 /* 817 * If it tries to access a hole, return an error. 818 * Because, the callers, functions in dir.c and GC, should be able to know 819 * whether this page exists or not. 820 */ 821 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index, 822 bool for_write) 823 { 824 struct address_space *mapping = inode->i_mapping; 825 struct page *page; 826 repeat: 827 page = f2fs_get_read_data_page(inode, index, 0, for_write); 828 if (IS_ERR(page)) 829 return page; 830 831 /* wait for read completion */ 832 lock_page(page); 833 if (unlikely(page->mapping != mapping)) { 834 f2fs_put_page(page, 1); 835 goto repeat; 836 } 837 if (unlikely(!PageUptodate(page))) { 838 f2fs_put_page(page, 1); 839 return ERR_PTR(-EIO); 840 } 841 return page; 842 } 843 844 /* 845 * Caller ensures that this data page is never allocated. 846 * A new zero-filled data page is allocated in the page cache. 847 * 848 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and 849 * f2fs_unlock_op(). 850 * Note that, ipage is set only by make_empty_dir, and if any error occur, 851 * ipage should be released by this function. 852 */ 853 struct page *f2fs_get_new_data_page(struct inode *inode, 854 struct page *ipage, pgoff_t index, bool new_i_size) 855 { 856 struct address_space *mapping = inode->i_mapping; 857 struct page *page; 858 struct dnode_of_data dn; 859 int err; 860 861 page = f2fs_grab_cache_page(mapping, index, true); 862 if (!page) { 863 /* 864 * before exiting, we should make sure ipage will be released 865 * if any error occur. 866 */ 867 f2fs_put_page(ipage, 1); 868 return ERR_PTR(-ENOMEM); 869 } 870 871 set_new_dnode(&dn, inode, ipage, NULL, 0); 872 err = f2fs_reserve_block(&dn, index); 873 if (err) { 874 f2fs_put_page(page, 1); 875 return ERR_PTR(err); 876 } 877 if (!ipage) 878 f2fs_put_dnode(&dn); 879 880 if (PageUptodate(page)) 881 goto got_it; 882 883 if (dn.data_blkaddr == NEW_ADDR) { 884 zero_user_segment(page, 0, PAGE_SIZE); 885 if (!PageUptodate(page)) 886 SetPageUptodate(page); 887 } else { 888 f2fs_put_page(page, 1); 889 890 /* if ipage exists, blkaddr should be NEW_ADDR */ 891 f2fs_bug_on(F2FS_I_SB(inode), ipage); 892 page = f2fs_get_lock_data_page(inode, index, true); 893 if (IS_ERR(page)) 894 return page; 895 } 896 got_it: 897 if (new_i_size && i_size_read(inode) < 898 ((loff_t)(index + 1) << PAGE_SHIFT)) 899 f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT)); 900 return page; 901 } 902 903 static int __allocate_data_block(struct dnode_of_data *dn, int seg_type) 904 { 905 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 906 struct f2fs_summary sum; 907 struct node_info ni; 908 block_t old_blkaddr; 909 blkcnt_t count = 1; 910 int err; 911 912 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) 913 return -EPERM; 914 915 err = f2fs_get_node_info(sbi, dn->nid, &ni); 916 if (err) 917 return err; 918 919 dn->data_blkaddr = datablock_addr(dn->inode, 920 dn->node_page, dn->ofs_in_node); 921 if (dn->data_blkaddr != NULL_ADDR) 922 goto alloc; 923 924 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count)))) 925 return err; 926 927 alloc: 928 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); 929 old_blkaddr = dn->data_blkaddr; 930 f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr, 931 &sum, seg_type, NULL, false); 932 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) 933 invalidate_mapping_pages(META_MAPPING(sbi), 934 old_blkaddr, old_blkaddr); 935 f2fs_set_data_blkaddr(dn); 936 937 /* 938 * i_size will be updated by direct_IO. Otherwise, we'll get stale 939 * data from unwritten block via dio_read. 940 */ 941 return 0; 942 } 943 944 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from) 945 { 946 struct inode *inode = file_inode(iocb->ki_filp); 947 struct f2fs_map_blocks map; 948 int flag; 949 int err = 0; 950 bool direct_io = iocb->ki_flags & IOCB_DIRECT; 951 952 /* convert inline data for Direct I/O*/ 953 if (direct_io) { 954 err = f2fs_convert_inline_inode(inode); 955 if (err) 956 return err; 957 } 958 959 if (direct_io && allow_outplace_dio(inode, iocb, from)) 960 return 0; 961 962 if (is_inode_flag_set(inode, FI_NO_PREALLOC)) 963 return 0; 964 965 map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos); 966 map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from)); 967 if (map.m_len > map.m_lblk) 968 map.m_len -= map.m_lblk; 969 else 970 map.m_len = 0; 971 972 map.m_next_pgofs = NULL; 973 map.m_next_extent = NULL; 974 map.m_seg_type = NO_CHECK_TYPE; 975 map.m_may_create = true; 976 977 if (direct_io) { 978 map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint); 979 flag = f2fs_force_buffered_io(inode, iocb, from) ? 980 F2FS_GET_BLOCK_PRE_AIO : 981 F2FS_GET_BLOCK_PRE_DIO; 982 goto map_blocks; 983 } 984 if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) { 985 err = f2fs_convert_inline_inode(inode); 986 if (err) 987 return err; 988 } 989 if (f2fs_has_inline_data(inode)) 990 return err; 991 992 flag = F2FS_GET_BLOCK_PRE_AIO; 993 994 map_blocks: 995 err = f2fs_map_blocks(inode, &map, 1, flag); 996 if (map.m_len > 0 && err == -ENOSPC) { 997 if (!direct_io) 998 set_inode_flag(inode, FI_NO_PREALLOC); 999 err = 0; 1000 } 1001 return err; 1002 } 1003 1004 void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock) 1005 { 1006 if (flag == F2FS_GET_BLOCK_PRE_AIO) { 1007 if (lock) 1008 down_read(&sbi->node_change); 1009 else 1010 up_read(&sbi->node_change); 1011 } else { 1012 if (lock) 1013 f2fs_lock_op(sbi); 1014 else 1015 f2fs_unlock_op(sbi); 1016 } 1017 } 1018 1019 /* 1020 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with 1021 * f2fs_map_blocks structure. 1022 * If original data blocks are allocated, then give them to blockdev. 1023 * Otherwise, 1024 * a. preallocate requested block addresses 1025 * b. do not use extent cache for better performance 1026 * c. give the block addresses to blockdev 1027 */ 1028 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, 1029 int create, int flag) 1030 { 1031 unsigned int maxblocks = map->m_len; 1032 struct dnode_of_data dn; 1033 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1034 int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE; 1035 pgoff_t pgofs, end_offset, end; 1036 int err = 0, ofs = 1; 1037 unsigned int ofs_in_node, last_ofs_in_node; 1038 blkcnt_t prealloc; 1039 struct extent_info ei = {0,0,0}; 1040 block_t blkaddr; 1041 unsigned int start_pgofs; 1042 1043 if (!maxblocks) 1044 return 0; 1045 1046 map->m_len = 0; 1047 map->m_flags = 0; 1048 1049 /* it only supports block size == page size */ 1050 pgofs = (pgoff_t)map->m_lblk; 1051 end = pgofs + maxblocks; 1052 1053 if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) { 1054 if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO && 1055 map->m_may_create) 1056 goto next_dnode; 1057 1058 map->m_pblk = ei.blk + pgofs - ei.fofs; 1059 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs); 1060 map->m_flags = F2FS_MAP_MAPPED; 1061 if (map->m_next_extent) 1062 *map->m_next_extent = pgofs + map->m_len; 1063 1064 /* for hardware encryption, but to avoid potential issue in future */ 1065 if (flag == F2FS_GET_BLOCK_DIO) 1066 f2fs_wait_on_block_writeback_range(inode, 1067 map->m_pblk, map->m_len); 1068 goto out; 1069 } 1070 1071 next_dnode: 1072 if (map->m_may_create) 1073 __do_map_lock(sbi, flag, true); 1074 1075 /* When reading holes, we need its node page */ 1076 set_new_dnode(&dn, inode, NULL, NULL, 0); 1077 err = f2fs_get_dnode_of_data(&dn, pgofs, mode); 1078 if (err) { 1079 if (flag == F2FS_GET_BLOCK_BMAP) 1080 map->m_pblk = 0; 1081 if (err == -ENOENT) { 1082 err = 0; 1083 if (map->m_next_pgofs) 1084 *map->m_next_pgofs = 1085 f2fs_get_next_page_offset(&dn, pgofs); 1086 if (map->m_next_extent) 1087 *map->m_next_extent = 1088 f2fs_get_next_page_offset(&dn, pgofs); 1089 } 1090 goto unlock_out; 1091 } 1092 1093 start_pgofs = pgofs; 1094 prealloc = 0; 1095 last_ofs_in_node = ofs_in_node = dn.ofs_in_node; 1096 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 1097 1098 next_block: 1099 blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node); 1100 1101 if (__is_valid_data_blkaddr(blkaddr) && 1102 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) { 1103 err = -EFAULT; 1104 goto sync_out; 1105 } 1106 1107 if (__is_valid_data_blkaddr(blkaddr)) { 1108 /* use out-place-update for driect IO under LFS mode */ 1109 if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO && 1110 map->m_may_create) { 1111 err = __allocate_data_block(&dn, map->m_seg_type); 1112 if (!err) { 1113 blkaddr = dn.data_blkaddr; 1114 set_inode_flag(inode, FI_APPEND_WRITE); 1115 } 1116 } 1117 } else { 1118 if (create) { 1119 if (unlikely(f2fs_cp_error(sbi))) { 1120 err = -EIO; 1121 goto sync_out; 1122 } 1123 if (flag == F2FS_GET_BLOCK_PRE_AIO) { 1124 if (blkaddr == NULL_ADDR) { 1125 prealloc++; 1126 last_ofs_in_node = dn.ofs_in_node; 1127 } 1128 } else { 1129 WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO && 1130 flag != F2FS_GET_BLOCK_DIO); 1131 err = __allocate_data_block(&dn, 1132 map->m_seg_type); 1133 if (!err) 1134 set_inode_flag(inode, FI_APPEND_WRITE); 1135 } 1136 if (err) 1137 goto sync_out; 1138 map->m_flags |= F2FS_MAP_NEW; 1139 blkaddr = dn.data_blkaddr; 1140 } else { 1141 if (flag == F2FS_GET_BLOCK_BMAP) { 1142 map->m_pblk = 0; 1143 goto sync_out; 1144 } 1145 if (flag == F2FS_GET_BLOCK_PRECACHE) 1146 goto sync_out; 1147 if (flag == F2FS_GET_BLOCK_FIEMAP && 1148 blkaddr == NULL_ADDR) { 1149 if (map->m_next_pgofs) 1150 *map->m_next_pgofs = pgofs + 1; 1151 goto sync_out; 1152 } 1153 if (flag != F2FS_GET_BLOCK_FIEMAP) { 1154 /* for defragment case */ 1155 if (map->m_next_pgofs) 1156 *map->m_next_pgofs = pgofs + 1; 1157 goto sync_out; 1158 } 1159 } 1160 } 1161 1162 if (flag == F2FS_GET_BLOCK_PRE_AIO) 1163 goto skip; 1164 1165 if (map->m_len == 0) { 1166 /* preallocated unwritten block should be mapped for fiemap. */ 1167 if (blkaddr == NEW_ADDR) 1168 map->m_flags |= F2FS_MAP_UNWRITTEN; 1169 map->m_flags |= F2FS_MAP_MAPPED; 1170 1171 map->m_pblk = blkaddr; 1172 map->m_len = 1; 1173 } else if ((map->m_pblk != NEW_ADDR && 1174 blkaddr == (map->m_pblk + ofs)) || 1175 (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) || 1176 flag == F2FS_GET_BLOCK_PRE_DIO) { 1177 ofs++; 1178 map->m_len++; 1179 } else { 1180 goto sync_out; 1181 } 1182 1183 skip: 1184 dn.ofs_in_node++; 1185 pgofs++; 1186 1187 /* preallocate blocks in batch for one dnode page */ 1188 if (flag == F2FS_GET_BLOCK_PRE_AIO && 1189 (pgofs == end || dn.ofs_in_node == end_offset)) { 1190 1191 dn.ofs_in_node = ofs_in_node; 1192 err = f2fs_reserve_new_blocks(&dn, prealloc); 1193 if (err) 1194 goto sync_out; 1195 1196 map->m_len += dn.ofs_in_node - ofs_in_node; 1197 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) { 1198 err = -ENOSPC; 1199 goto sync_out; 1200 } 1201 dn.ofs_in_node = end_offset; 1202 } 1203 1204 if (pgofs >= end) 1205 goto sync_out; 1206 else if (dn.ofs_in_node < end_offset) 1207 goto next_block; 1208 1209 if (flag == F2FS_GET_BLOCK_PRECACHE) { 1210 if (map->m_flags & F2FS_MAP_MAPPED) { 1211 unsigned int ofs = start_pgofs - map->m_lblk; 1212 1213 f2fs_update_extent_cache_range(&dn, 1214 start_pgofs, map->m_pblk + ofs, 1215 map->m_len - ofs); 1216 } 1217 } 1218 1219 f2fs_put_dnode(&dn); 1220 1221 if (map->m_may_create) { 1222 __do_map_lock(sbi, flag, false); 1223 f2fs_balance_fs(sbi, dn.node_changed); 1224 } 1225 goto next_dnode; 1226 1227 sync_out: 1228 1229 /* for hardware encryption, but to avoid potential issue in future */ 1230 if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) 1231 f2fs_wait_on_block_writeback_range(inode, 1232 map->m_pblk, map->m_len); 1233 1234 if (flag == F2FS_GET_BLOCK_PRECACHE) { 1235 if (map->m_flags & F2FS_MAP_MAPPED) { 1236 unsigned int ofs = start_pgofs - map->m_lblk; 1237 1238 f2fs_update_extent_cache_range(&dn, 1239 start_pgofs, map->m_pblk + ofs, 1240 map->m_len - ofs); 1241 } 1242 if (map->m_next_extent) 1243 *map->m_next_extent = pgofs + 1; 1244 } 1245 f2fs_put_dnode(&dn); 1246 unlock_out: 1247 if (map->m_may_create) { 1248 __do_map_lock(sbi, flag, false); 1249 f2fs_balance_fs(sbi, dn.node_changed); 1250 } 1251 out: 1252 trace_f2fs_map_blocks(inode, map, err); 1253 return err; 1254 } 1255 1256 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len) 1257 { 1258 struct f2fs_map_blocks map; 1259 block_t last_lblk; 1260 int err; 1261 1262 if (pos + len > i_size_read(inode)) 1263 return false; 1264 1265 map.m_lblk = F2FS_BYTES_TO_BLK(pos); 1266 map.m_next_pgofs = NULL; 1267 map.m_next_extent = NULL; 1268 map.m_seg_type = NO_CHECK_TYPE; 1269 map.m_may_create = false; 1270 last_lblk = F2FS_BLK_ALIGN(pos + len); 1271 1272 while (map.m_lblk < last_lblk) { 1273 map.m_len = last_lblk - map.m_lblk; 1274 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); 1275 if (err || map.m_len == 0) 1276 return false; 1277 map.m_lblk += map.m_len; 1278 } 1279 return true; 1280 } 1281 1282 static int __get_data_block(struct inode *inode, sector_t iblock, 1283 struct buffer_head *bh, int create, int flag, 1284 pgoff_t *next_pgofs, int seg_type, bool may_write) 1285 { 1286 struct f2fs_map_blocks map; 1287 int err; 1288 1289 map.m_lblk = iblock; 1290 map.m_len = bh->b_size >> inode->i_blkbits; 1291 map.m_next_pgofs = next_pgofs; 1292 map.m_next_extent = NULL; 1293 map.m_seg_type = seg_type; 1294 map.m_may_create = may_write; 1295 1296 err = f2fs_map_blocks(inode, &map, create, flag); 1297 if (!err) { 1298 map_bh(bh, inode->i_sb, map.m_pblk); 1299 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags; 1300 bh->b_size = (u64)map.m_len << inode->i_blkbits; 1301 } 1302 return err; 1303 } 1304 1305 static int get_data_block(struct inode *inode, sector_t iblock, 1306 struct buffer_head *bh_result, int create, int flag, 1307 pgoff_t *next_pgofs) 1308 { 1309 return __get_data_block(inode, iblock, bh_result, create, 1310 flag, next_pgofs, 1311 NO_CHECK_TYPE, create); 1312 } 1313 1314 static int get_data_block_dio_write(struct inode *inode, sector_t iblock, 1315 struct buffer_head *bh_result, int create) 1316 { 1317 return __get_data_block(inode, iblock, bh_result, create, 1318 F2FS_GET_BLOCK_DIO, NULL, 1319 f2fs_rw_hint_to_seg_type(inode->i_write_hint), 1320 true); 1321 } 1322 1323 static int get_data_block_dio(struct inode *inode, sector_t iblock, 1324 struct buffer_head *bh_result, int create) 1325 { 1326 return __get_data_block(inode, iblock, bh_result, create, 1327 F2FS_GET_BLOCK_DIO, NULL, 1328 f2fs_rw_hint_to_seg_type(inode->i_write_hint), 1329 false); 1330 } 1331 1332 static int get_data_block_bmap(struct inode *inode, sector_t iblock, 1333 struct buffer_head *bh_result, int create) 1334 { 1335 /* Block number less than F2FS MAX BLOCKS */ 1336 if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks)) 1337 return -EFBIG; 1338 1339 return __get_data_block(inode, iblock, bh_result, create, 1340 F2FS_GET_BLOCK_BMAP, NULL, 1341 NO_CHECK_TYPE, create); 1342 } 1343 1344 static inline sector_t logical_to_blk(struct inode *inode, loff_t offset) 1345 { 1346 return (offset >> inode->i_blkbits); 1347 } 1348 1349 static inline loff_t blk_to_logical(struct inode *inode, sector_t blk) 1350 { 1351 return (blk << inode->i_blkbits); 1352 } 1353 1354 static int f2fs_xattr_fiemap(struct inode *inode, 1355 struct fiemap_extent_info *fieinfo) 1356 { 1357 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1358 struct page *page; 1359 struct node_info ni; 1360 __u64 phys = 0, len; 1361 __u32 flags; 1362 nid_t xnid = F2FS_I(inode)->i_xattr_nid; 1363 int err = 0; 1364 1365 if (f2fs_has_inline_xattr(inode)) { 1366 int offset; 1367 1368 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), 1369 inode->i_ino, false); 1370 if (!page) 1371 return -ENOMEM; 1372 1373 err = f2fs_get_node_info(sbi, inode->i_ino, &ni); 1374 if (err) { 1375 f2fs_put_page(page, 1); 1376 return err; 1377 } 1378 1379 phys = (__u64)blk_to_logical(inode, ni.blk_addr); 1380 offset = offsetof(struct f2fs_inode, i_addr) + 1381 sizeof(__le32) * (DEF_ADDRS_PER_INODE - 1382 get_inline_xattr_addrs(inode)); 1383 1384 phys += offset; 1385 len = inline_xattr_size(inode); 1386 1387 f2fs_put_page(page, 1); 1388 1389 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED; 1390 1391 if (!xnid) 1392 flags |= FIEMAP_EXTENT_LAST; 1393 1394 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags); 1395 if (err || err == 1) 1396 return err; 1397 } 1398 1399 if (xnid) { 1400 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false); 1401 if (!page) 1402 return -ENOMEM; 1403 1404 err = f2fs_get_node_info(sbi, xnid, &ni); 1405 if (err) { 1406 f2fs_put_page(page, 1); 1407 return err; 1408 } 1409 1410 phys = (__u64)blk_to_logical(inode, ni.blk_addr); 1411 len = inode->i_sb->s_blocksize; 1412 1413 f2fs_put_page(page, 1); 1414 1415 flags = FIEMAP_EXTENT_LAST; 1416 } 1417 1418 if (phys) 1419 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags); 1420 1421 return (err < 0 ? err : 0); 1422 } 1423 1424 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1425 u64 start, u64 len) 1426 { 1427 struct buffer_head map_bh; 1428 sector_t start_blk, last_blk; 1429 pgoff_t next_pgofs; 1430 u64 logical = 0, phys = 0, size = 0; 1431 u32 flags = 0; 1432 int ret = 0; 1433 1434 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { 1435 ret = f2fs_precache_extents(inode); 1436 if (ret) 1437 return ret; 1438 } 1439 1440 ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR); 1441 if (ret) 1442 return ret; 1443 1444 inode_lock(inode); 1445 1446 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { 1447 ret = f2fs_xattr_fiemap(inode, fieinfo); 1448 goto out; 1449 } 1450 1451 if (f2fs_has_inline_data(inode)) { 1452 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len); 1453 if (ret != -EAGAIN) 1454 goto out; 1455 } 1456 1457 if (logical_to_blk(inode, len) == 0) 1458 len = blk_to_logical(inode, 1); 1459 1460 start_blk = logical_to_blk(inode, start); 1461 last_blk = logical_to_blk(inode, start + len - 1); 1462 1463 next: 1464 memset(&map_bh, 0, sizeof(struct buffer_head)); 1465 map_bh.b_size = len; 1466 1467 ret = get_data_block(inode, start_blk, &map_bh, 0, 1468 F2FS_GET_BLOCK_FIEMAP, &next_pgofs); 1469 if (ret) 1470 goto out; 1471 1472 /* HOLE */ 1473 if (!buffer_mapped(&map_bh)) { 1474 start_blk = next_pgofs; 1475 1476 if (blk_to_logical(inode, start_blk) < blk_to_logical(inode, 1477 F2FS_I_SB(inode)->max_file_blocks)) 1478 goto prep_next; 1479 1480 flags |= FIEMAP_EXTENT_LAST; 1481 } 1482 1483 if (size) { 1484 if (IS_ENCRYPTED(inode)) 1485 flags |= FIEMAP_EXTENT_DATA_ENCRYPTED; 1486 1487 ret = fiemap_fill_next_extent(fieinfo, logical, 1488 phys, size, flags); 1489 } 1490 1491 if (start_blk > last_blk || ret) 1492 goto out; 1493 1494 logical = blk_to_logical(inode, start_blk); 1495 phys = blk_to_logical(inode, map_bh.b_blocknr); 1496 size = map_bh.b_size; 1497 flags = 0; 1498 if (buffer_unwritten(&map_bh)) 1499 flags = FIEMAP_EXTENT_UNWRITTEN; 1500 1501 start_blk += logical_to_blk(inode, size); 1502 1503 prep_next: 1504 cond_resched(); 1505 if (fatal_signal_pending(current)) 1506 ret = -EINTR; 1507 else 1508 goto next; 1509 out: 1510 if (ret == 1) 1511 ret = 0; 1512 1513 inode_unlock(inode); 1514 return ret; 1515 } 1516 1517 static int f2fs_read_single_page(struct inode *inode, struct page *page, 1518 unsigned nr_pages, 1519 struct f2fs_map_blocks *map, 1520 struct bio **bio_ret, 1521 sector_t *last_block_in_bio, 1522 bool is_readahead) 1523 { 1524 struct bio *bio = *bio_ret; 1525 const unsigned blkbits = inode->i_blkbits; 1526 const unsigned blocksize = 1 << blkbits; 1527 sector_t block_in_file; 1528 sector_t last_block; 1529 sector_t last_block_in_file; 1530 sector_t block_nr; 1531 int ret = 0; 1532 1533 block_in_file = (sector_t)page->index; 1534 last_block = block_in_file + nr_pages; 1535 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> 1536 blkbits; 1537 if (last_block > last_block_in_file) 1538 last_block = last_block_in_file; 1539 1540 /* just zeroing out page which is beyond EOF */ 1541 if (block_in_file >= last_block) 1542 goto zero_out; 1543 /* 1544 * Map blocks using the previous result first. 1545 */ 1546 if ((map->m_flags & F2FS_MAP_MAPPED) && 1547 block_in_file > map->m_lblk && 1548 block_in_file < (map->m_lblk + map->m_len)) 1549 goto got_it; 1550 1551 /* 1552 * Then do more f2fs_map_blocks() calls until we are 1553 * done with this page. 1554 */ 1555 map->m_lblk = block_in_file; 1556 map->m_len = last_block - block_in_file; 1557 1558 ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT); 1559 if (ret) 1560 goto out; 1561 got_it: 1562 if ((map->m_flags & F2FS_MAP_MAPPED)) { 1563 block_nr = map->m_pblk + block_in_file - map->m_lblk; 1564 SetPageMappedToDisk(page); 1565 1566 if (!PageUptodate(page) && !cleancache_get_page(page)) { 1567 SetPageUptodate(page); 1568 goto confused; 1569 } 1570 1571 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr, 1572 DATA_GENERIC_ENHANCE_READ)) { 1573 ret = -EFAULT; 1574 goto out; 1575 } 1576 } else { 1577 zero_out: 1578 zero_user_segment(page, 0, PAGE_SIZE); 1579 if (!PageUptodate(page)) 1580 SetPageUptodate(page); 1581 unlock_page(page); 1582 goto out; 1583 } 1584 1585 /* 1586 * This page will go to BIO. Do we need to send this 1587 * BIO off first? 1588 */ 1589 if (bio && (*last_block_in_bio != block_nr - 1 || 1590 !__same_bdev(F2FS_I_SB(inode), block_nr, bio))) { 1591 submit_and_realloc: 1592 __submit_bio(F2FS_I_SB(inode), bio, DATA); 1593 bio = NULL; 1594 } 1595 if (bio == NULL) { 1596 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages, 1597 is_readahead ? REQ_RAHEAD : 0); 1598 if (IS_ERR(bio)) { 1599 ret = PTR_ERR(bio); 1600 bio = NULL; 1601 goto out; 1602 } 1603 } 1604 1605 /* 1606 * If the page is under writeback, we need to wait for 1607 * its completion to see the correct decrypted data. 1608 */ 1609 f2fs_wait_on_block_writeback(inode, block_nr); 1610 1611 if (bio_add_page(bio, page, blocksize, 0) < blocksize) 1612 goto submit_and_realloc; 1613 1614 inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA); 1615 ClearPageError(page); 1616 *last_block_in_bio = block_nr; 1617 goto out; 1618 confused: 1619 if (bio) { 1620 __submit_bio(F2FS_I_SB(inode), bio, DATA); 1621 bio = NULL; 1622 } 1623 unlock_page(page); 1624 out: 1625 *bio_ret = bio; 1626 return ret; 1627 } 1628 1629 /* 1630 * This function was originally taken from fs/mpage.c, and customized for f2fs. 1631 * Major change was from block_size == page_size in f2fs by default. 1632 * 1633 * Note that the aops->readpages() function is ONLY used for read-ahead. If 1634 * this function ever deviates from doing just read-ahead, it should either 1635 * use ->readpage() or do the necessary surgery to decouple ->readpages() 1636 * from read-ahead. 1637 */ 1638 static int f2fs_mpage_readpages(struct address_space *mapping, 1639 struct list_head *pages, struct page *page, 1640 unsigned nr_pages, bool is_readahead) 1641 { 1642 struct bio *bio = NULL; 1643 sector_t last_block_in_bio = 0; 1644 struct inode *inode = mapping->host; 1645 struct f2fs_map_blocks map; 1646 int ret = 0; 1647 1648 map.m_pblk = 0; 1649 map.m_lblk = 0; 1650 map.m_len = 0; 1651 map.m_flags = 0; 1652 map.m_next_pgofs = NULL; 1653 map.m_next_extent = NULL; 1654 map.m_seg_type = NO_CHECK_TYPE; 1655 map.m_may_create = false; 1656 1657 for (; nr_pages; nr_pages--) { 1658 if (pages) { 1659 page = list_last_entry(pages, struct page, lru); 1660 1661 prefetchw(&page->flags); 1662 list_del(&page->lru); 1663 if (add_to_page_cache_lru(page, mapping, 1664 page->index, 1665 readahead_gfp_mask(mapping))) 1666 goto next_page; 1667 } 1668 1669 ret = f2fs_read_single_page(inode, page, nr_pages, &map, &bio, 1670 &last_block_in_bio, is_readahead); 1671 if (ret) { 1672 SetPageError(page); 1673 zero_user_segment(page, 0, PAGE_SIZE); 1674 unlock_page(page); 1675 } 1676 next_page: 1677 if (pages) 1678 put_page(page); 1679 } 1680 BUG_ON(pages && !list_empty(pages)); 1681 if (bio) 1682 __submit_bio(F2FS_I_SB(inode), bio, DATA); 1683 return pages ? 0 : ret; 1684 } 1685 1686 static int f2fs_read_data_page(struct file *file, struct page *page) 1687 { 1688 struct inode *inode = page->mapping->host; 1689 int ret = -EAGAIN; 1690 1691 trace_f2fs_readpage(page, DATA); 1692 1693 /* If the file has inline data, try to read it directly */ 1694 if (f2fs_has_inline_data(inode)) 1695 ret = f2fs_read_inline_data(inode, page); 1696 if (ret == -EAGAIN) 1697 ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1, false); 1698 return ret; 1699 } 1700 1701 static int f2fs_read_data_pages(struct file *file, 1702 struct address_space *mapping, 1703 struct list_head *pages, unsigned nr_pages) 1704 { 1705 struct inode *inode = mapping->host; 1706 struct page *page = list_last_entry(pages, struct page, lru); 1707 1708 trace_f2fs_readpages(inode, page, nr_pages); 1709 1710 /* If the file has inline data, skip readpages */ 1711 if (f2fs_has_inline_data(inode)) 1712 return 0; 1713 1714 return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages, true); 1715 } 1716 1717 static int encrypt_one_page(struct f2fs_io_info *fio) 1718 { 1719 struct inode *inode = fio->page->mapping->host; 1720 struct page *mpage; 1721 gfp_t gfp_flags = GFP_NOFS; 1722 1723 if (!f2fs_encrypted_file(inode)) 1724 return 0; 1725 1726 /* wait for GCed page writeback via META_MAPPING */ 1727 f2fs_wait_on_block_writeback(inode, fio->old_blkaddr); 1728 1729 retry_encrypt: 1730 fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page, 1731 PAGE_SIZE, 0, fio->page->index, gfp_flags); 1732 if (IS_ERR(fio->encrypted_page)) { 1733 /* flush pending IOs and wait for a while in the ENOMEM case */ 1734 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) { 1735 f2fs_flush_merged_writes(fio->sbi); 1736 congestion_wait(BLK_RW_ASYNC, HZ/50); 1737 gfp_flags |= __GFP_NOFAIL; 1738 goto retry_encrypt; 1739 } 1740 return PTR_ERR(fio->encrypted_page); 1741 } 1742 1743 mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr); 1744 if (mpage) { 1745 if (PageUptodate(mpage)) 1746 memcpy(page_address(mpage), 1747 page_address(fio->encrypted_page), PAGE_SIZE); 1748 f2fs_put_page(mpage, 1); 1749 } 1750 return 0; 1751 } 1752 1753 static inline bool check_inplace_update_policy(struct inode *inode, 1754 struct f2fs_io_info *fio) 1755 { 1756 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1757 unsigned int policy = SM_I(sbi)->ipu_policy; 1758 1759 if (policy & (0x1 << F2FS_IPU_FORCE)) 1760 return true; 1761 if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi)) 1762 return true; 1763 if (policy & (0x1 << F2FS_IPU_UTIL) && 1764 utilization(sbi) > SM_I(sbi)->min_ipu_util) 1765 return true; 1766 if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) && 1767 utilization(sbi) > SM_I(sbi)->min_ipu_util) 1768 return true; 1769 1770 /* 1771 * IPU for rewrite async pages 1772 */ 1773 if (policy & (0x1 << F2FS_IPU_ASYNC) && 1774 fio && fio->op == REQ_OP_WRITE && 1775 !(fio->op_flags & REQ_SYNC) && 1776 !IS_ENCRYPTED(inode)) 1777 return true; 1778 1779 /* this is only set during fdatasync */ 1780 if (policy & (0x1 << F2FS_IPU_FSYNC) && 1781 is_inode_flag_set(inode, FI_NEED_IPU)) 1782 return true; 1783 1784 if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) && 1785 !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr))) 1786 return true; 1787 1788 return false; 1789 } 1790 1791 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio) 1792 { 1793 if (f2fs_is_pinned_file(inode)) 1794 return true; 1795 1796 /* if this is cold file, we should overwrite to avoid fragmentation */ 1797 if (file_is_cold(inode)) 1798 return true; 1799 1800 return check_inplace_update_policy(inode, fio); 1801 } 1802 1803 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio) 1804 { 1805 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1806 1807 if (test_opt(sbi, LFS)) 1808 return true; 1809 if (S_ISDIR(inode->i_mode)) 1810 return true; 1811 if (IS_NOQUOTA(inode)) 1812 return true; 1813 if (f2fs_is_atomic_file(inode)) 1814 return true; 1815 if (fio) { 1816 if (is_cold_data(fio->page)) 1817 return true; 1818 if (IS_ATOMIC_WRITTEN_PAGE(fio->page)) 1819 return true; 1820 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) && 1821 f2fs_is_checkpointed_data(sbi, fio->old_blkaddr))) 1822 return true; 1823 } 1824 return false; 1825 } 1826 1827 static inline bool need_inplace_update(struct f2fs_io_info *fio) 1828 { 1829 struct inode *inode = fio->page->mapping->host; 1830 1831 if (f2fs_should_update_outplace(inode, fio)) 1832 return false; 1833 1834 return f2fs_should_update_inplace(inode, fio); 1835 } 1836 1837 int f2fs_do_write_data_page(struct f2fs_io_info *fio) 1838 { 1839 struct page *page = fio->page; 1840 struct inode *inode = page->mapping->host; 1841 struct dnode_of_data dn; 1842 struct extent_info ei = {0,0,0}; 1843 struct node_info ni; 1844 bool ipu_force = false; 1845 int err = 0; 1846 1847 set_new_dnode(&dn, inode, NULL, NULL, 0); 1848 if (need_inplace_update(fio) && 1849 f2fs_lookup_extent_cache(inode, page->index, &ei)) { 1850 fio->old_blkaddr = ei.blk + page->index - ei.fofs; 1851 1852 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, 1853 DATA_GENERIC_ENHANCE)) 1854 return -EFAULT; 1855 1856 ipu_force = true; 1857 fio->need_lock = LOCK_DONE; 1858 goto got_it; 1859 } 1860 1861 /* Deadlock due to between page->lock and f2fs_lock_op */ 1862 if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi)) 1863 return -EAGAIN; 1864 1865 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE); 1866 if (err) 1867 goto out; 1868 1869 fio->old_blkaddr = dn.data_blkaddr; 1870 1871 /* This page is already truncated */ 1872 if (fio->old_blkaddr == NULL_ADDR) { 1873 ClearPageUptodate(page); 1874 clear_cold_data(page); 1875 goto out_writepage; 1876 } 1877 got_it: 1878 if (__is_valid_data_blkaddr(fio->old_blkaddr) && 1879 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, 1880 DATA_GENERIC_ENHANCE)) { 1881 err = -EFAULT; 1882 goto out_writepage; 1883 } 1884 /* 1885 * If current allocation needs SSR, 1886 * it had better in-place writes for updated data. 1887 */ 1888 if (ipu_force || 1889 (__is_valid_data_blkaddr(fio->old_blkaddr) && 1890 need_inplace_update(fio))) { 1891 err = encrypt_one_page(fio); 1892 if (err) 1893 goto out_writepage; 1894 1895 set_page_writeback(page); 1896 ClearPageError(page); 1897 f2fs_put_dnode(&dn); 1898 if (fio->need_lock == LOCK_REQ) 1899 f2fs_unlock_op(fio->sbi); 1900 err = f2fs_inplace_write_data(fio); 1901 if (err) { 1902 if (f2fs_encrypted_file(inode)) 1903 fscrypt_pullback_bio_page(&fio->encrypted_page, 1904 true); 1905 if (PageWriteback(page)) 1906 end_page_writeback(page); 1907 } else { 1908 set_inode_flag(inode, FI_UPDATE_WRITE); 1909 } 1910 trace_f2fs_do_write_data_page(fio->page, IPU); 1911 return err; 1912 } 1913 1914 if (fio->need_lock == LOCK_RETRY) { 1915 if (!f2fs_trylock_op(fio->sbi)) { 1916 err = -EAGAIN; 1917 goto out_writepage; 1918 } 1919 fio->need_lock = LOCK_REQ; 1920 } 1921 1922 err = f2fs_get_node_info(fio->sbi, dn.nid, &ni); 1923 if (err) 1924 goto out_writepage; 1925 1926 fio->version = ni.version; 1927 1928 err = encrypt_one_page(fio); 1929 if (err) 1930 goto out_writepage; 1931 1932 set_page_writeback(page); 1933 ClearPageError(page); 1934 1935 /* LFS mode write path */ 1936 f2fs_outplace_write_data(&dn, fio); 1937 trace_f2fs_do_write_data_page(page, OPU); 1938 set_inode_flag(inode, FI_APPEND_WRITE); 1939 if (page->index == 0) 1940 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN); 1941 out_writepage: 1942 f2fs_put_dnode(&dn); 1943 out: 1944 if (fio->need_lock == LOCK_REQ) 1945 f2fs_unlock_op(fio->sbi); 1946 return err; 1947 } 1948 1949 static int __write_data_page(struct page *page, bool *submitted, 1950 struct writeback_control *wbc, 1951 enum iostat_type io_type) 1952 { 1953 struct inode *inode = page->mapping->host; 1954 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1955 loff_t i_size = i_size_read(inode); 1956 const pgoff_t end_index = ((unsigned long long) i_size) 1957 >> PAGE_SHIFT; 1958 loff_t psize = (page->index + 1) << PAGE_SHIFT; 1959 unsigned offset = 0; 1960 bool need_balance_fs = false; 1961 int err = 0; 1962 struct f2fs_io_info fio = { 1963 .sbi = sbi, 1964 .ino = inode->i_ino, 1965 .type = DATA, 1966 .op = REQ_OP_WRITE, 1967 .op_flags = wbc_to_write_flags(wbc), 1968 .old_blkaddr = NULL_ADDR, 1969 .page = page, 1970 .encrypted_page = NULL, 1971 .submitted = false, 1972 .need_lock = LOCK_RETRY, 1973 .io_type = io_type, 1974 .io_wbc = wbc, 1975 }; 1976 1977 trace_f2fs_writepage(page, DATA); 1978 1979 /* we should bypass data pages to proceed the kworkder jobs */ 1980 if (unlikely(f2fs_cp_error(sbi))) { 1981 mapping_set_error(page->mapping, -EIO); 1982 /* 1983 * don't drop any dirty dentry pages for keeping lastest 1984 * directory structure. 1985 */ 1986 if (S_ISDIR(inode->i_mode)) 1987 goto redirty_out; 1988 goto out; 1989 } 1990 1991 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1992 goto redirty_out; 1993 1994 if (page->index < end_index) 1995 goto write; 1996 1997 /* 1998 * If the offset is out-of-range of file size, 1999 * this page does not have to be written to disk. 2000 */ 2001 offset = i_size & (PAGE_SIZE - 1); 2002 if ((page->index >= end_index + 1) || !offset) 2003 goto out; 2004 2005 zero_user_segment(page, offset, PAGE_SIZE); 2006 write: 2007 if (f2fs_is_drop_cache(inode)) 2008 goto out; 2009 /* we should not write 0'th page having journal header */ 2010 if (f2fs_is_volatile_file(inode) && (!page->index || 2011 (!wbc->for_reclaim && 2012 f2fs_available_free_memory(sbi, BASE_CHECK)))) 2013 goto redirty_out; 2014 2015 /* Dentry blocks are controlled by checkpoint */ 2016 if (S_ISDIR(inode->i_mode)) { 2017 fio.need_lock = LOCK_DONE; 2018 err = f2fs_do_write_data_page(&fio); 2019 goto done; 2020 } 2021 2022 if (!wbc->for_reclaim) 2023 need_balance_fs = true; 2024 else if (has_not_enough_free_secs(sbi, 0, 0)) 2025 goto redirty_out; 2026 else 2027 set_inode_flag(inode, FI_HOT_DATA); 2028 2029 err = -EAGAIN; 2030 if (f2fs_has_inline_data(inode)) { 2031 err = f2fs_write_inline_data(inode, page); 2032 if (!err) 2033 goto out; 2034 } 2035 2036 if (err == -EAGAIN) { 2037 err = f2fs_do_write_data_page(&fio); 2038 if (err == -EAGAIN) { 2039 fio.need_lock = LOCK_REQ; 2040 err = f2fs_do_write_data_page(&fio); 2041 } 2042 } 2043 2044 if (err) { 2045 file_set_keep_isize(inode); 2046 } else { 2047 down_write(&F2FS_I(inode)->i_sem); 2048 if (F2FS_I(inode)->last_disk_size < psize) 2049 F2FS_I(inode)->last_disk_size = psize; 2050 up_write(&F2FS_I(inode)->i_sem); 2051 } 2052 2053 done: 2054 if (err && err != -ENOENT) 2055 goto redirty_out; 2056 2057 out: 2058 inode_dec_dirty_pages(inode); 2059 if (err) { 2060 ClearPageUptodate(page); 2061 clear_cold_data(page); 2062 } 2063 2064 if (wbc->for_reclaim) { 2065 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA); 2066 clear_inode_flag(inode, FI_HOT_DATA); 2067 f2fs_remove_dirty_inode(inode); 2068 submitted = NULL; 2069 } 2070 2071 unlock_page(page); 2072 if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) && 2073 !F2FS_I(inode)->cp_task) 2074 f2fs_balance_fs(sbi, need_balance_fs); 2075 2076 if (unlikely(f2fs_cp_error(sbi))) { 2077 f2fs_submit_merged_write(sbi, DATA); 2078 submitted = NULL; 2079 } 2080 2081 if (submitted) 2082 *submitted = fio.submitted; 2083 2084 return 0; 2085 2086 redirty_out: 2087 redirty_page_for_writepage(wbc, page); 2088 /* 2089 * pageout() in MM traslates EAGAIN, so calls handle_write_error() 2090 * -> mapping_set_error() -> set_bit(AS_EIO, ...). 2091 * file_write_and_wait_range() will see EIO error, which is critical 2092 * to return value of fsync() followed by atomic_write failure to user. 2093 */ 2094 if (!err || wbc->for_reclaim) 2095 return AOP_WRITEPAGE_ACTIVATE; 2096 unlock_page(page); 2097 return err; 2098 } 2099 2100 static int f2fs_write_data_page(struct page *page, 2101 struct writeback_control *wbc) 2102 { 2103 return __write_data_page(page, NULL, wbc, FS_DATA_IO); 2104 } 2105 2106 /* 2107 * This function was copied from write_cche_pages from mm/page-writeback.c. 2108 * The major change is making write step of cold data page separately from 2109 * warm/hot data page. 2110 */ 2111 static int f2fs_write_cache_pages(struct address_space *mapping, 2112 struct writeback_control *wbc, 2113 enum iostat_type io_type) 2114 { 2115 int ret = 0; 2116 int done = 0; 2117 struct pagevec pvec; 2118 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); 2119 int nr_pages; 2120 pgoff_t uninitialized_var(writeback_index); 2121 pgoff_t index; 2122 pgoff_t end; /* Inclusive */ 2123 pgoff_t done_index; 2124 int cycled; 2125 int range_whole = 0; 2126 xa_mark_t tag; 2127 int nwritten = 0; 2128 2129 pagevec_init(&pvec); 2130 2131 if (get_dirty_pages(mapping->host) <= 2132 SM_I(F2FS_M_SB(mapping))->min_hot_blocks) 2133 set_inode_flag(mapping->host, FI_HOT_DATA); 2134 else 2135 clear_inode_flag(mapping->host, FI_HOT_DATA); 2136 2137 if (wbc->range_cyclic) { 2138 writeback_index = mapping->writeback_index; /* prev offset */ 2139 index = writeback_index; 2140 if (index == 0) 2141 cycled = 1; 2142 else 2143 cycled = 0; 2144 end = -1; 2145 } else { 2146 index = wbc->range_start >> PAGE_SHIFT; 2147 end = wbc->range_end >> PAGE_SHIFT; 2148 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2149 range_whole = 1; 2150 cycled = 1; /* ignore range_cyclic tests */ 2151 } 2152 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 2153 tag = PAGECACHE_TAG_TOWRITE; 2154 else 2155 tag = PAGECACHE_TAG_DIRTY; 2156 retry: 2157 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 2158 tag_pages_for_writeback(mapping, index, end); 2159 done_index = index; 2160 while (!done && (index <= end)) { 2161 int i; 2162 2163 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, 2164 tag); 2165 if (nr_pages == 0) 2166 break; 2167 2168 for (i = 0; i < nr_pages; i++) { 2169 struct page *page = pvec.pages[i]; 2170 bool submitted = false; 2171 2172 /* give a priority to WB_SYNC threads */ 2173 if (atomic_read(&sbi->wb_sync_req[DATA]) && 2174 wbc->sync_mode == WB_SYNC_NONE) { 2175 done = 1; 2176 break; 2177 } 2178 2179 done_index = page->index; 2180 retry_write: 2181 lock_page(page); 2182 2183 if (unlikely(page->mapping != mapping)) { 2184 continue_unlock: 2185 unlock_page(page); 2186 continue; 2187 } 2188 2189 if (!PageDirty(page)) { 2190 /* someone wrote it for us */ 2191 goto continue_unlock; 2192 } 2193 2194 if (PageWriteback(page)) { 2195 if (wbc->sync_mode != WB_SYNC_NONE) 2196 f2fs_wait_on_page_writeback(page, 2197 DATA, true, true); 2198 else 2199 goto continue_unlock; 2200 } 2201 2202 if (!clear_page_dirty_for_io(page)) 2203 goto continue_unlock; 2204 2205 ret = __write_data_page(page, &submitted, wbc, io_type); 2206 if (unlikely(ret)) { 2207 /* 2208 * keep nr_to_write, since vfs uses this to 2209 * get # of written pages. 2210 */ 2211 if (ret == AOP_WRITEPAGE_ACTIVATE) { 2212 unlock_page(page); 2213 ret = 0; 2214 continue; 2215 } else if (ret == -EAGAIN) { 2216 ret = 0; 2217 if (wbc->sync_mode == WB_SYNC_ALL) { 2218 cond_resched(); 2219 congestion_wait(BLK_RW_ASYNC, 2220 HZ/50); 2221 goto retry_write; 2222 } 2223 continue; 2224 } 2225 done_index = page->index + 1; 2226 done = 1; 2227 break; 2228 } else if (submitted) { 2229 nwritten++; 2230 } 2231 2232 if (--wbc->nr_to_write <= 0 && 2233 wbc->sync_mode == WB_SYNC_NONE) { 2234 done = 1; 2235 break; 2236 } 2237 } 2238 pagevec_release(&pvec); 2239 cond_resched(); 2240 } 2241 2242 if (!cycled && !done) { 2243 cycled = 1; 2244 index = 0; 2245 end = writeback_index - 1; 2246 goto retry; 2247 } 2248 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2249 mapping->writeback_index = done_index; 2250 2251 if (nwritten) 2252 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host, 2253 NULL, 0, DATA); 2254 2255 return ret; 2256 } 2257 2258 static inline bool __should_serialize_io(struct inode *inode, 2259 struct writeback_control *wbc) 2260 { 2261 if (!S_ISREG(inode->i_mode)) 2262 return false; 2263 if (IS_NOQUOTA(inode)) 2264 return false; 2265 if (wbc->sync_mode != WB_SYNC_ALL) 2266 return true; 2267 if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks) 2268 return true; 2269 return false; 2270 } 2271 2272 static int __f2fs_write_data_pages(struct address_space *mapping, 2273 struct writeback_control *wbc, 2274 enum iostat_type io_type) 2275 { 2276 struct inode *inode = mapping->host; 2277 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2278 struct blk_plug plug; 2279 int ret; 2280 bool locked = false; 2281 2282 /* deal with chardevs and other special file */ 2283 if (!mapping->a_ops->writepage) 2284 return 0; 2285 2286 /* skip writing if there is no dirty page in this inode */ 2287 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE) 2288 return 0; 2289 2290 /* during POR, we don't need to trigger writepage at all. */ 2291 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 2292 goto skip_write; 2293 2294 if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) && 2295 wbc->sync_mode == WB_SYNC_NONE && 2296 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) && 2297 f2fs_available_free_memory(sbi, DIRTY_DENTS)) 2298 goto skip_write; 2299 2300 /* skip writing during file defragment */ 2301 if (is_inode_flag_set(inode, FI_DO_DEFRAG)) 2302 goto skip_write; 2303 2304 trace_f2fs_writepages(mapping->host, wbc, DATA); 2305 2306 /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */ 2307 if (wbc->sync_mode == WB_SYNC_ALL) 2308 atomic_inc(&sbi->wb_sync_req[DATA]); 2309 else if (atomic_read(&sbi->wb_sync_req[DATA])) 2310 goto skip_write; 2311 2312 if (__should_serialize_io(inode, wbc)) { 2313 mutex_lock(&sbi->writepages); 2314 locked = true; 2315 } 2316 2317 blk_start_plug(&plug); 2318 ret = f2fs_write_cache_pages(mapping, wbc, io_type); 2319 blk_finish_plug(&plug); 2320 2321 if (locked) 2322 mutex_unlock(&sbi->writepages); 2323 2324 if (wbc->sync_mode == WB_SYNC_ALL) 2325 atomic_dec(&sbi->wb_sync_req[DATA]); 2326 /* 2327 * if some pages were truncated, we cannot guarantee its mapping->host 2328 * to detect pending bios. 2329 */ 2330 2331 f2fs_remove_dirty_inode(inode); 2332 return ret; 2333 2334 skip_write: 2335 wbc->pages_skipped += get_dirty_pages(inode); 2336 trace_f2fs_writepages(mapping->host, wbc, DATA); 2337 return 0; 2338 } 2339 2340 static int f2fs_write_data_pages(struct address_space *mapping, 2341 struct writeback_control *wbc) 2342 { 2343 struct inode *inode = mapping->host; 2344 2345 return __f2fs_write_data_pages(mapping, wbc, 2346 F2FS_I(inode)->cp_task == current ? 2347 FS_CP_DATA_IO : FS_DATA_IO); 2348 } 2349 2350 static void f2fs_write_failed(struct address_space *mapping, loff_t to) 2351 { 2352 struct inode *inode = mapping->host; 2353 loff_t i_size = i_size_read(inode); 2354 2355 if (to > i_size) { 2356 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 2357 down_write(&F2FS_I(inode)->i_mmap_sem); 2358 2359 truncate_pagecache(inode, i_size); 2360 if (!IS_NOQUOTA(inode)) 2361 f2fs_truncate_blocks(inode, i_size, true); 2362 2363 up_write(&F2FS_I(inode)->i_mmap_sem); 2364 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 2365 } 2366 } 2367 2368 static int prepare_write_begin(struct f2fs_sb_info *sbi, 2369 struct page *page, loff_t pos, unsigned len, 2370 block_t *blk_addr, bool *node_changed) 2371 { 2372 struct inode *inode = page->mapping->host; 2373 pgoff_t index = page->index; 2374 struct dnode_of_data dn; 2375 struct page *ipage; 2376 bool locked = false; 2377 struct extent_info ei = {0,0,0}; 2378 int err = 0; 2379 int flag; 2380 2381 /* 2382 * we already allocated all the blocks, so we don't need to get 2383 * the block addresses when there is no need to fill the page. 2384 */ 2385 if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE && 2386 !is_inode_flag_set(inode, FI_NO_PREALLOC)) 2387 return 0; 2388 2389 /* f2fs_lock_op avoids race between write CP and convert_inline_page */ 2390 if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode)) 2391 flag = F2FS_GET_BLOCK_DEFAULT; 2392 else 2393 flag = F2FS_GET_BLOCK_PRE_AIO; 2394 2395 if (f2fs_has_inline_data(inode) || 2396 (pos & PAGE_MASK) >= i_size_read(inode)) { 2397 __do_map_lock(sbi, flag, true); 2398 locked = true; 2399 } 2400 restart: 2401 /* check inline_data */ 2402 ipage = f2fs_get_node_page(sbi, inode->i_ino); 2403 if (IS_ERR(ipage)) { 2404 err = PTR_ERR(ipage); 2405 goto unlock_out; 2406 } 2407 2408 set_new_dnode(&dn, inode, ipage, ipage, 0); 2409 2410 if (f2fs_has_inline_data(inode)) { 2411 if (pos + len <= MAX_INLINE_DATA(inode)) { 2412 f2fs_do_read_inline_data(page, ipage); 2413 set_inode_flag(inode, FI_DATA_EXIST); 2414 if (inode->i_nlink) 2415 set_inline_node(ipage); 2416 } else { 2417 err = f2fs_convert_inline_page(&dn, page); 2418 if (err) 2419 goto out; 2420 if (dn.data_blkaddr == NULL_ADDR) 2421 err = f2fs_get_block(&dn, index); 2422 } 2423 } else if (locked) { 2424 err = f2fs_get_block(&dn, index); 2425 } else { 2426 if (f2fs_lookup_extent_cache(inode, index, &ei)) { 2427 dn.data_blkaddr = ei.blk + index - ei.fofs; 2428 } else { 2429 /* hole case */ 2430 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); 2431 if (err || dn.data_blkaddr == NULL_ADDR) { 2432 f2fs_put_dnode(&dn); 2433 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, 2434 true); 2435 WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO); 2436 locked = true; 2437 goto restart; 2438 } 2439 } 2440 } 2441 2442 /* convert_inline_page can make node_changed */ 2443 *blk_addr = dn.data_blkaddr; 2444 *node_changed = dn.node_changed; 2445 out: 2446 f2fs_put_dnode(&dn); 2447 unlock_out: 2448 if (locked) 2449 __do_map_lock(sbi, flag, false); 2450 return err; 2451 } 2452 2453 static int f2fs_write_begin(struct file *file, struct address_space *mapping, 2454 loff_t pos, unsigned len, unsigned flags, 2455 struct page **pagep, void **fsdata) 2456 { 2457 struct inode *inode = mapping->host; 2458 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2459 struct page *page = NULL; 2460 pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT; 2461 bool need_balance = false, drop_atomic = false; 2462 block_t blkaddr = NULL_ADDR; 2463 int err = 0; 2464 2465 trace_f2fs_write_begin(inode, pos, len, flags); 2466 2467 err = f2fs_is_checkpoint_ready(sbi); 2468 if (err) 2469 goto fail; 2470 2471 if ((f2fs_is_atomic_file(inode) && 2472 !f2fs_available_free_memory(sbi, INMEM_PAGES)) || 2473 is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) { 2474 err = -ENOMEM; 2475 drop_atomic = true; 2476 goto fail; 2477 } 2478 2479 /* 2480 * We should check this at this moment to avoid deadlock on inode page 2481 * and #0 page. The locking rule for inline_data conversion should be: 2482 * lock_page(page #0) -> lock_page(inode_page) 2483 */ 2484 if (index != 0) { 2485 err = f2fs_convert_inline_inode(inode); 2486 if (err) 2487 goto fail; 2488 } 2489 repeat: 2490 /* 2491 * Do not use grab_cache_page_write_begin() to avoid deadlock due to 2492 * wait_for_stable_page. Will wait that below with our IO control. 2493 */ 2494 page = f2fs_pagecache_get_page(mapping, index, 2495 FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS); 2496 if (!page) { 2497 err = -ENOMEM; 2498 goto fail; 2499 } 2500 2501 *pagep = page; 2502 2503 err = prepare_write_begin(sbi, page, pos, len, 2504 &blkaddr, &need_balance); 2505 if (err) 2506 goto fail; 2507 2508 if (need_balance && !IS_NOQUOTA(inode) && 2509 has_not_enough_free_secs(sbi, 0, 0)) { 2510 unlock_page(page); 2511 f2fs_balance_fs(sbi, true); 2512 lock_page(page); 2513 if (page->mapping != mapping) { 2514 /* The page got truncated from under us */ 2515 f2fs_put_page(page, 1); 2516 goto repeat; 2517 } 2518 } 2519 2520 f2fs_wait_on_page_writeback(page, DATA, false, true); 2521 2522 if (len == PAGE_SIZE || PageUptodate(page)) 2523 return 0; 2524 2525 if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode)) { 2526 zero_user_segment(page, len, PAGE_SIZE); 2527 return 0; 2528 } 2529 2530 if (blkaddr == NEW_ADDR) { 2531 zero_user_segment(page, 0, PAGE_SIZE); 2532 SetPageUptodate(page); 2533 } else { 2534 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, 2535 DATA_GENERIC_ENHANCE_READ)) { 2536 err = -EFAULT; 2537 goto fail; 2538 } 2539 err = f2fs_submit_page_read(inode, page, blkaddr); 2540 if (err) 2541 goto fail; 2542 2543 lock_page(page); 2544 if (unlikely(page->mapping != mapping)) { 2545 f2fs_put_page(page, 1); 2546 goto repeat; 2547 } 2548 if (unlikely(!PageUptodate(page))) { 2549 err = -EIO; 2550 goto fail; 2551 } 2552 } 2553 return 0; 2554 2555 fail: 2556 f2fs_put_page(page, 1); 2557 f2fs_write_failed(mapping, pos + len); 2558 if (drop_atomic) 2559 f2fs_drop_inmem_pages_all(sbi, false); 2560 return err; 2561 } 2562 2563 static int f2fs_write_end(struct file *file, 2564 struct address_space *mapping, 2565 loff_t pos, unsigned len, unsigned copied, 2566 struct page *page, void *fsdata) 2567 { 2568 struct inode *inode = page->mapping->host; 2569 2570 trace_f2fs_write_end(inode, pos, len, copied); 2571 2572 /* 2573 * This should be come from len == PAGE_SIZE, and we expect copied 2574 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and 2575 * let generic_perform_write() try to copy data again through copied=0. 2576 */ 2577 if (!PageUptodate(page)) { 2578 if (unlikely(copied != len)) 2579 copied = 0; 2580 else 2581 SetPageUptodate(page); 2582 } 2583 if (!copied) 2584 goto unlock_out; 2585 2586 set_page_dirty(page); 2587 2588 if (pos + copied > i_size_read(inode)) 2589 f2fs_i_size_write(inode, pos + copied); 2590 unlock_out: 2591 f2fs_put_page(page, 1); 2592 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2593 return copied; 2594 } 2595 2596 static int check_direct_IO(struct inode *inode, struct iov_iter *iter, 2597 loff_t offset) 2598 { 2599 unsigned i_blkbits = READ_ONCE(inode->i_blkbits); 2600 unsigned blkbits = i_blkbits; 2601 unsigned blocksize_mask = (1 << blkbits) - 1; 2602 unsigned long align = offset | iov_iter_alignment(iter); 2603 struct block_device *bdev = inode->i_sb->s_bdev; 2604 2605 if (align & blocksize_mask) { 2606 if (bdev) 2607 blkbits = blksize_bits(bdev_logical_block_size(bdev)); 2608 blocksize_mask = (1 << blkbits) - 1; 2609 if (align & blocksize_mask) 2610 return -EINVAL; 2611 return 1; 2612 } 2613 return 0; 2614 } 2615 2616 static void f2fs_dio_end_io(struct bio *bio) 2617 { 2618 struct f2fs_private_dio *dio = bio->bi_private; 2619 2620 dec_page_count(F2FS_I_SB(dio->inode), 2621 dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ); 2622 2623 bio->bi_private = dio->orig_private; 2624 bio->bi_end_io = dio->orig_end_io; 2625 2626 kvfree(dio); 2627 2628 bio_endio(bio); 2629 } 2630 2631 static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode, 2632 loff_t file_offset) 2633 { 2634 struct f2fs_private_dio *dio; 2635 bool write = (bio_op(bio) == REQ_OP_WRITE); 2636 2637 dio = f2fs_kzalloc(F2FS_I_SB(inode), 2638 sizeof(struct f2fs_private_dio), GFP_NOFS); 2639 if (!dio) 2640 goto out; 2641 2642 dio->inode = inode; 2643 dio->orig_end_io = bio->bi_end_io; 2644 dio->orig_private = bio->bi_private; 2645 dio->write = write; 2646 2647 bio->bi_end_io = f2fs_dio_end_io; 2648 bio->bi_private = dio; 2649 2650 inc_page_count(F2FS_I_SB(inode), 2651 write ? F2FS_DIO_WRITE : F2FS_DIO_READ); 2652 2653 submit_bio(bio); 2654 return; 2655 out: 2656 bio->bi_status = BLK_STS_IOERR; 2657 bio_endio(bio); 2658 } 2659 2660 static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 2661 { 2662 struct address_space *mapping = iocb->ki_filp->f_mapping; 2663 struct inode *inode = mapping->host; 2664 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2665 struct f2fs_inode_info *fi = F2FS_I(inode); 2666 size_t count = iov_iter_count(iter); 2667 loff_t offset = iocb->ki_pos; 2668 int rw = iov_iter_rw(iter); 2669 int err; 2670 enum rw_hint hint = iocb->ki_hint; 2671 int whint_mode = F2FS_OPTION(sbi).whint_mode; 2672 bool do_opu; 2673 2674 err = check_direct_IO(inode, iter, offset); 2675 if (err) 2676 return err < 0 ? err : 0; 2677 2678 if (f2fs_force_buffered_io(inode, iocb, iter)) 2679 return 0; 2680 2681 do_opu = allow_outplace_dio(inode, iocb, iter); 2682 2683 trace_f2fs_direct_IO_enter(inode, offset, count, rw); 2684 2685 if (rw == WRITE && whint_mode == WHINT_MODE_OFF) 2686 iocb->ki_hint = WRITE_LIFE_NOT_SET; 2687 2688 if (iocb->ki_flags & IOCB_NOWAIT) { 2689 if (!down_read_trylock(&fi->i_gc_rwsem[rw])) { 2690 iocb->ki_hint = hint; 2691 err = -EAGAIN; 2692 goto out; 2693 } 2694 if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) { 2695 up_read(&fi->i_gc_rwsem[rw]); 2696 iocb->ki_hint = hint; 2697 err = -EAGAIN; 2698 goto out; 2699 } 2700 } else { 2701 down_read(&fi->i_gc_rwsem[rw]); 2702 if (do_opu) 2703 down_read(&fi->i_gc_rwsem[READ]); 2704 } 2705 2706 err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, 2707 iter, rw == WRITE ? get_data_block_dio_write : 2708 get_data_block_dio, NULL, f2fs_dio_submit_bio, 2709 DIO_LOCKING | DIO_SKIP_HOLES); 2710 2711 if (do_opu) 2712 up_read(&fi->i_gc_rwsem[READ]); 2713 2714 up_read(&fi->i_gc_rwsem[rw]); 2715 2716 if (rw == WRITE) { 2717 if (whint_mode == WHINT_MODE_OFF) 2718 iocb->ki_hint = hint; 2719 if (err > 0) { 2720 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO, 2721 err); 2722 if (!do_opu) 2723 set_inode_flag(inode, FI_UPDATE_WRITE); 2724 } else if (err < 0) { 2725 f2fs_write_failed(mapping, offset + count); 2726 } 2727 } 2728 2729 out: 2730 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err); 2731 2732 return err; 2733 } 2734 2735 void f2fs_invalidate_page(struct page *page, unsigned int offset, 2736 unsigned int length) 2737 { 2738 struct inode *inode = page->mapping->host; 2739 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2740 2741 if (inode->i_ino >= F2FS_ROOT_INO(sbi) && 2742 (offset % PAGE_SIZE || length != PAGE_SIZE)) 2743 return; 2744 2745 if (PageDirty(page)) { 2746 if (inode->i_ino == F2FS_META_INO(sbi)) { 2747 dec_page_count(sbi, F2FS_DIRTY_META); 2748 } else if (inode->i_ino == F2FS_NODE_INO(sbi)) { 2749 dec_page_count(sbi, F2FS_DIRTY_NODES); 2750 } else { 2751 inode_dec_dirty_pages(inode); 2752 f2fs_remove_dirty_inode(inode); 2753 } 2754 } 2755 2756 clear_cold_data(page); 2757 2758 if (IS_ATOMIC_WRITTEN_PAGE(page)) 2759 return f2fs_drop_inmem_page(inode, page); 2760 2761 f2fs_clear_page_private(page); 2762 } 2763 2764 int f2fs_release_page(struct page *page, gfp_t wait) 2765 { 2766 /* If this is dirty page, keep PagePrivate */ 2767 if (PageDirty(page)) 2768 return 0; 2769 2770 /* This is atomic written page, keep Private */ 2771 if (IS_ATOMIC_WRITTEN_PAGE(page)) 2772 return 0; 2773 2774 clear_cold_data(page); 2775 f2fs_clear_page_private(page); 2776 return 1; 2777 } 2778 2779 static int f2fs_set_data_page_dirty(struct page *page) 2780 { 2781 struct address_space *mapping = page->mapping; 2782 struct inode *inode = mapping->host; 2783 2784 trace_f2fs_set_page_dirty(page, DATA); 2785 2786 if (!PageUptodate(page)) 2787 SetPageUptodate(page); 2788 2789 if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) { 2790 if (!IS_ATOMIC_WRITTEN_PAGE(page)) { 2791 f2fs_register_inmem_page(inode, page); 2792 return 1; 2793 } 2794 /* 2795 * Previously, this page has been registered, we just 2796 * return here. 2797 */ 2798 return 0; 2799 } 2800 2801 if (!PageDirty(page)) { 2802 __set_page_dirty_nobuffers(page); 2803 f2fs_update_dirty_page(inode, page); 2804 return 1; 2805 } 2806 return 0; 2807 } 2808 2809 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) 2810 { 2811 struct inode *inode = mapping->host; 2812 2813 if (f2fs_has_inline_data(inode)) 2814 return 0; 2815 2816 /* make sure allocating whole blocks */ 2817 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 2818 filemap_write_and_wait(mapping); 2819 2820 return generic_block_bmap(mapping, block, get_data_block_bmap); 2821 } 2822 2823 #ifdef CONFIG_MIGRATION 2824 #include <linux/migrate.h> 2825 2826 int f2fs_migrate_page(struct address_space *mapping, 2827 struct page *newpage, struct page *page, enum migrate_mode mode) 2828 { 2829 int rc, extra_count; 2830 struct f2fs_inode_info *fi = F2FS_I(mapping->host); 2831 bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page); 2832 2833 BUG_ON(PageWriteback(page)); 2834 2835 /* migrating an atomic written page is safe with the inmem_lock hold */ 2836 if (atomic_written) { 2837 if (mode != MIGRATE_SYNC) 2838 return -EBUSY; 2839 if (!mutex_trylock(&fi->inmem_lock)) 2840 return -EAGAIN; 2841 } 2842 2843 /* one extra reference was held for atomic_write page */ 2844 extra_count = atomic_written ? 1 : 0; 2845 rc = migrate_page_move_mapping(mapping, newpage, 2846 page, mode, extra_count); 2847 if (rc != MIGRATEPAGE_SUCCESS) { 2848 if (atomic_written) 2849 mutex_unlock(&fi->inmem_lock); 2850 return rc; 2851 } 2852 2853 if (atomic_written) { 2854 struct inmem_pages *cur; 2855 list_for_each_entry(cur, &fi->inmem_pages, list) 2856 if (cur->page == page) { 2857 cur->page = newpage; 2858 break; 2859 } 2860 mutex_unlock(&fi->inmem_lock); 2861 put_page(page); 2862 get_page(newpage); 2863 } 2864 2865 if (PagePrivate(page)) { 2866 f2fs_set_page_private(newpage, page_private(page)); 2867 f2fs_clear_page_private(page); 2868 } 2869 2870 if (mode != MIGRATE_SYNC_NO_COPY) 2871 migrate_page_copy(newpage, page); 2872 else 2873 migrate_page_states(newpage, page); 2874 2875 return MIGRATEPAGE_SUCCESS; 2876 } 2877 #endif 2878 2879 const struct address_space_operations f2fs_dblock_aops = { 2880 .readpage = f2fs_read_data_page, 2881 .readpages = f2fs_read_data_pages, 2882 .writepage = f2fs_write_data_page, 2883 .writepages = f2fs_write_data_pages, 2884 .write_begin = f2fs_write_begin, 2885 .write_end = f2fs_write_end, 2886 .set_page_dirty = f2fs_set_data_page_dirty, 2887 .invalidatepage = f2fs_invalidate_page, 2888 .releasepage = f2fs_release_page, 2889 .direct_IO = f2fs_direct_IO, 2890 .bmap = f2fs_bmap, 2891 #ifdef CONFIG_MIGRATION 2892 .migratepage = f2fs_migrate_page, 2893 #endif 2894 }; 2895 2896 void f2fs_clear_page_cache_dirty_tag(struct page *page) 2897 { 2898 struct address_space *mapping = page_mapping(page); 2899 unsigned long flags; 2900 2901 xa_lock_irqsave(&mapping->i_pages, flags); 2902 __xa_clear_mark(&mapping->i_pages, page_index(page), 2903 PAGECACHE_TAG_DIRTY); 2904 xa_unlock_irqrestore(&mapping->i_pages, flags); 2905 } 2906 2907 int __init f2fs_init_post_read_processing(void) 2908 { 2909 bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, 0); 2910 if (!bio_post_read_ctx_cache) 2911 goto fail; 2912 bio_post_read_ctx_pool = 2913 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS, 2914 bio_post_read_ctx_cache); 2915 if (!bio_post_read_ctx_pool) 2916 goto fail_free_cache; 2917 return 0; 2918 2919 fail_free_cache: 2920 kmem_cache_destroy(bio_post_read_ctx_cache); 2921 fail: 2922 return -ENOMEM; 2923 } 2924 2925 void __exit f2fs_destroy_post_read_processing(void) 2926 { 2927 mempool_destroy(bio_post_read_ctx_pool); 2928 kmem_cache_destroy(bio_post_read_ctx_cache); 2929 } 2930