1 /* 2 * fs/f2fs/data.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/buffer_head.h> 14 #include <linux/mpage.h> 15 #include <linux/aio.h> 16 #include <linux/writeback.h> 17 #include <linux/backing-dev.h> 18 #include <linux/blkdev.h> 19 #include <linux/bio.h> 20 #include <linux/prefetch.h> 21 22 #include "f2fs.h" 23 #include "node.h" 24 #include "segment.h" 25 #include <trace/events/f2fs.h> 26 27 static void f2fs_read_end_io(struct bio *bio, int err) 28 { 29 struct bio_vec *bvec; 30 int i; 31 32 bio_for_each_segment_all(bvec, bio, i) { 33 struct page *page = bvec->bv_page; 34 35 if (!err) { 36 SetPageUptodate(page); 37 } else { 38 ClearPageUptodate(page); 39 SetPageError(page); 40 } 41 unlock_page(page); 42 } 43 bio_put(bio); 44 } 45 46 static void f2fs_write_end_io(struct bio *bio, int err) 47 { 48 struct f2fs_sb_info *sbi = bio->bi_private; 49 struct bio_vec *bvec; 50 int i; 51 52 bio_for_each_segment_all(bvec, bio, i) { 53 struct page *page = bvec->bv_page; 54 55 if (unlikely(err)) { 56 SetPageError(page); 57 set_bit(AS_EIO, &page->mapping->flags); 58 f2fs_stop_checkpoint(sbi); 59 } 60 end_page_writeback(page); 61 dec_page_count(sbi, F2FS_WRITEBACK); 62 } 63 64 if (sbi->wait_io) { 65 complete(sbi->wait_io); 66 sbi->wait_io = NULL; 67 } 68 69 if (!get_pages(sbi, F2FS_WRITEBACK) && 70 !list_empty(&sbi->cp_wait.task_list)) 71 wake_up(&sbi->cp_wait); 72 73 bio_put(bio); 74 } 75 76 /* 77 * Low-level block read/write IO operations. 78 */ 79 static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, 80 int npages, bool is_read) 81 { 82 struct bio *bio; 83 84 /* No failure on bio allocation */ 85 bio = bio_alloc(GFP_NOIO, npages); 86 87 bio->bi_bdev = sbi->sb->s_bdev; 88 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); 89 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io; 90 bio->bi_private = sbi; 91 92 return bio; 93 } 94 95 static void __submit_merged_bio(struct f2fs_bio_info *io) 96 { 97 struct f2fs_io_info *fio = &io->fio; 98 int rw; 99 100 if (!io->bio) 101 return; 102 103 rw = fio->rw; 104 105 if (is_read_io(rw)) { 106 trace_f2fs_submit_read_bio(io->sbi->sb, rw, 107 fio->type, io->bio); 108 submit_bio(rw, io->bio); 109 } else { 110 trace_f2fs_submit_write_bio(io->sbi->sb, rw, 111 fio->type, io->bio); 112 /* 113 * META_FLUSH is only from the checkpoint procedure, and we 114 * should wait this metadata bio for FS consistency. 115 */ 116 if (fio->type == META_FLUSH) { 117 DECLARE_COMPLETION_ONSTACK(wait); 118 io->sbi->wait_io = &wait; 119 submit_bio(rw, io->bio); 120 wait_for_completion(&wait); 121 } else { 122 submit_bio(rw, io->bio); 123 } 124 } 125 126 io->bio = NULL; 127 } 128 129 void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, 130 enum page_type type, int rw) 131 { 132 enum page_type btype = PAGE_TYPE_OF_BIO(type); 133 struct f2fs_bio_info *io; 134 135 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype]; 136 137 down_write(&io->io_rwsem); 138 139 /* change META to META_FLUSH in the checkpoint procedure */ 140 if (type >= META_FLUSH) { 141 io->fio.type = META_FLUSH; 142 io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO; 143 } 144 __submit_merged_bio(io); 145 up_write(&io->io_rwsem); 146 } 147 148 /* 149 * Fill the locked page with data located in the block address. 150 * Return unlocked page. 151 */ 152 int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page, 153 block_t blk_addr, int rw) 154 { 155 struct bio *bio; 156 157 trace_f2fs_submit_page_bio(page, blk_addr, rw); 158 159 /* Allocate a new bio */ 160 bio = __bio_alloc(sbi, blk_addr, 1, is_read_io(rw)); 161 162 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { 163 bio_put(bio); 164 f2fs_put_page(page, 1); 165 return -EFAULT; 166 } 167 168 submit_bio(rw, bio); 169 return 0; 170 } 171 172 void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page, 173 block_t blk_addr, struct f2fs_io_info *fio) 174 { 175 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); 176 struct f2fs_bio_info *io; 177 bool is_read = is_read_io(fio->rw); 178 179 io = is_read ? &sbi->read_io : &sbi->write_io[btype]; 180 181 verify_block_addr(sbi, blk_addr); 182 183 down_write(&io->io_rwsem); 184 185 if (!is_read) 186 inc_page_count(sbi, F2FS_WRITEBACK); 187 188 if (io->bio && (io->last_block_in_bio != blk_addr - 1 || 189 io->fio.rw != fio->rw)) 190 __submit_merged_bio(io); 191 alloc_new: 192 if (io->bio == NULL) { 193 int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); 194 195 io->bio = __bio_alloc(sbi, blk_addr, bio_blocks, is_read); 196 io->fio = *fio; 197 } 198 199 if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) < 200 PAGE_CACHE_SIZE) { 201 __submit_merged_bio(io); 202 goto alloc_new; 203 } 204 205 io->last_block_in_bio = blk_addr; 206 207 up_write(&io->io_rwsem); 208 trace_f2fs_submit_page_mbio(page, fio->rw, fio->type, blk_addr); 209 } 210 211 /* 212 * Lock ordering for the change of data block address: 213 * ->data_page 214 * ->node_page 215 * update block addresses in the node page 216 */ 217 static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr) 218 { 219 struct f2fs_node *rn; 220 __le32 *addr_array; 221 struct page *node_page = dn->node_page; 222 unsigned int ofs_in_node = dn->ofs_in_node; 223 224 f2fs_wait_on_page_writeback(node_page, NODE); 225 226 rn = F2FS_NODE(node_page); 227 228 /* Get physical address of data block */ 229 addr_array = blkaddr_in_node(rn); 230 addr_array[ofs_in_node] = cpu_to_le32(new_addr); 231 set_page_dirty(node_page); 232 } 233 234 int reserve_new_block(struct dnode_of_data *dn) 235 { 236 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 237 238 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) 239 return -EPERM; 240 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) 241 return -ENOSPC; 242 243 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node); 244 245 __set_data_blkaddr(dn, NEW_ADDR); 246 dn->data_blkaddr = NEW_ADDR; 247 mark_inode_dirty(dn->inode); 248 sync_inode_page(dn); 249 return 0; 250 } 251 252 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) 253 { 254 bool need_put = dn->inode_page ? false : true; 255 int err; 256 257 /* if inode_page exists, index should be zero */ 258 f2fs_bug_on(!need_put && index); 259 260 err = get_dnode_of_data(dn, index, ALLOC_NODE); 261 if (err) 262 return err; 263 264 if (dn->data_blkaddr == NULL_ADDR) 265 err = reserve_new_block(dn); 266 if (err || need_put) 267 f2fs_put_dnode(dn); 268 return err; 269 } 270 271 static int check_extent_cache(struct inode *inode, pgoff_t pgofs, 272 struct buffer_head *bh_result) 273 { 274 struct f2fs_inode_info *fi = F2FS_I(inode); 275 pgoff_t start_fofs, end_fofs; 276 block_t start_blkaddr; 277 278 if (is_inode_flag_set(fi, FI_NO_EXTENT)) 279 return 0; 280 281 read_lock(&fi->ext.ext_lock); 282 if (fi->ext.len == 0) { 283 read_unlock(&fi->ext.ext_lock); 284 return 0; 285 } 286 287 stat_inc_total_hit(inode->i_sb); 288 289 start_fofs = fi->ext.fofs; 290 end_fofs = fi->ext.fofs + fi->ext.len - 1; 291 start_blkaddr = fi->ext.blk_addr; 292 293 if (pgofs >= start_fofs && pgofs <= end_fofs) { 294 unsigned int blkbits = inode->i_sb->s_blocksize_bits; 295 size_t count; 296 297 clear_buffer_new(bh_result); 298 map_bh(bh_result, inode->i_sb, 299 start_blkaddr + pgofs - start_fofs); 300 count = end_fofs - pgofs + 1; 301 if (count < (UINT_MAX >> blkbits)) 302 bh_result->b_size = (count << blkbits); 303 else 304 bh_result->b_size = UINT_MAX; 305 306 stat_inc_read_hit(inode->i_sb); 307 read_unlock(&fi->ext.ext_lock); 308 return 1; 309 } 310 read_unlock(&fi->ext.ext_lock); 311 return 0; 312 } 313 314 void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn) 315 { 316 struct f2fs_inode_info *fi = F2FS_I(dn->inode); 317 pgoff_t fofs, start_fofs, end_fofs; 318 block_t start_blkaddr, end_blkaddr; 319 int need_update = true; 320 321 f2fs_bug_on(blk_addr == NEW_ADDR); 322 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + 323 dn->ofs_in_node; 324 325 /* Update the page address in the parent node */ 326 __set_data_blkaddr(dn, blk_addr); 327 328 if (is_inode_flag_set(fi, FI_NO_EXTENT)) 329 return; 330 331 write_lock(&fi->ext.ext_lock); 332 333 start_fofs = fi->ext.fofs; 334 end_fofs = fi->ext.fofs + fi->ext.len - 1; 335 start_blkaddr = fi->ext.blk_addr; 336 end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1; 337 338 /* Drop and initialize the matched extent */ 339 if (fi->ext.len == 1 && fofs == start_fofs) 340 fi->ext.len = 0; 341 342 /* Initial extent */ 343 if (fi->ext.len == 0) { 344 if (blk_addr != NULL_ADDR) { 345 fi->ext.fofs = fofs; 346 fi->ext.blk_addr = blk_addr; 347 fi->ext.len = 1; 348 } 349 goto end_update; 350 } 351 352 /* Front merge */ 353 if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) { 354 fi->ext.fofs--; 355 fi->ext.blk_addr--; 356 fi->ext.len++; 357 goto end_update; 358 } 359 360 /* Back merge */ 361 if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) { 362 fi->ext.len++; 363 goto end_update; 364 } 365 366 /* Split the existing extent */ 367 if (fi->ext.len > 1 && 368 fofs >= start_fofs && fofs <= end_fofs) { 369 if ((end_fofs - fofs) < (fi->ext.len >> 1)) { 370 fi->ext.len = fofs - start_fofs; 371 } else { 372 fi->ext.fofs = fofs + 1; 373 fi->ext.blk_addr = start_blkaddr + 374 fofs - start_fofs + 1; 375 fi->ext.len -= fofs - start_fofs + 1; 376 } 377 } else { 378 need_update = false; 379 } 380 381 /* Finally, if the extent is very fragmented, let's drop the cache. */ 382 if (fi->ext.len < F2FS_MIN_EXTENT_LEN) { 383 fi->ext.len = 0; 384 set_inode_flag(fi, FI_NO_EXTENT); 385 need_update = true; 386 } 387 end_update: 388 write_unlock(&fi->ext.ext_lock); 389 if (need_update) 390 sync_inode_page(dn); 391 return; 392 } 393 394 struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync) 395 { 396 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 397 struct address_space *mapping = inode->i_mapping; 398 struct dnode_of_data dn; 399 struct page *page; 400 int err; 401 402 page = find_get_page(mapping, index); 403 if (page && PageUptodate(page)) 404 return page; 405 f2fs_put_page(page, 0); 406 407 set_new_dnode(&dn, inode, NULL, NULL, 0); 408 err = get_dnode_of_data(&dn, index, LOOKUP_NODE); 409 if (err) 410 return ERR_PTR(err); 411 f2fs_put_dnode(&dn); 412 413 if (dn.data_blkaddr == NULL_ADDR) 414 return ERR_PTR(-ENOENT); 415 416 /* By fallocate(), there is no cached page, but with NEW_ADDR */ 417 if (unlikely(dn.data_blkaddr == NEW_ADDR)) 418 return ERR_PTR(-EINVAL); 419 420 page = grab_cache_page(mapping, index); 421 if (!page) 422 return ERR_PTR(-ENOMEM); 423 424 if (PageUptodate(page)) { 425 unlock_page(page); 426 return page; 427 } 428 429 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, 430 sync ? READ_SYNC : READA); 431 if (err) 432 return ERR_PTR(err); 433 434 if (sync) { 435 wait_on_page_locked(page); 436 if (unlikely(!PageUptodate(page))) { 437 f2fs_put_page(page, 0); 438 return ERR_PTR(-EIO); 439 } 440 } 441 return page; 442 } 443 444 /* 445 * If it tries to access a hole, return an error. 446 * Because, the callers, functions in dir.c and GC, should be able to know 447 * whether this page exists or not. 448 */ 449 struct page *get_lock_data_page(struct inode *inode, pgoff_t index) 450 { 451 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 452 struct address_space *mapping = inode->i_mapping; 453 struct dnode_of_data dn; 454 struct page *page; 455 int err; 456 457 repeat: 458 page = grab_cache_page(mapping, index); 459 if (!page) 460 return ERR_PTR(-ENOMEM); 461 462 set_new_dnode(&dn, inode, NULL, NULL, 0); 463 err = get_dnode_of_data(&dn, index, LOOKUP_NODE); 464 if (err) { 465 f2fs_put_page(page, 1); 466 return ERR_PTR(err); 467 } 468 f2fs_put_dnode(&dn); 469 470 if (unlikely(dn.data_blkaddr == NULL_ADDR)) { 471 f2fs_put_page(page, 1); 472 return ERR_PTR(-ENOENT); 473 } 474 475 if (PageUptodate(page)) 476 return page; 477 478 /* 479 * A new dentry page is allocated but not able to be written, since its 480 * new inode page couldn't be allocated due to -ENOSPC. 481 * In such the case, its blkaddr can be remained as NEW_ADDR. 482 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata. 483 */ 484 if (dn.data_blkaddr == NEW_ADDR) { 485 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 486 SetPageUptodate(page); 487 return page; 488 } 489 490 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, READ_SYNC); 491 if (err) 492 return ERR_PTR(err); 493 494 lock_page(page); 495 if (unlikely(!PageUptodate(page))) { 496 f2fs_put_page(page, 1); 497 return ERR_PTR(-EIO); 498 } 499 if (unlikely(page->mapping != mapping)) { 500 f2fs_put_page(page, 1); 501 goto repeat; 502 } 503 return page; 504 } 505 506 /* 507 * Caller ensures that this data page is never allocated. 508 * A new zero-filled data page is allocated in the page cache. 509 * 510 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and 511 * f2fs_unlock_op(). 512 * Note that, ipage is set only by make_empty_dir. 513 */ 514 struct page *get_new_data_page(struct inode *inode, 515 struct page *ipage, pgoff_t index, bool new_i_size) 516 { 517 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 518 struct address_space *mapping = inode->i_mapping; 519 struct page *page; 520 struct dnode_of_data dn; 521 int err; 522 523 set_new_dnode(&dn, inode, ipage, NULL, 0); 524 err = f2fs_reserve_block(&dn, index); 525 if (err) 526 return ERR_PTR(err); 527 repeat: 528 page = grab_cache_page(mapping, index); 529 if (!page) { 530 err = -ENOMEM; 531 goto put_err; 532 } 533 534 if (PageUptodate(page)) 535 return page; 536 537 if (dn.data_blkaddr == NEW_ADDR) { 538 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 539 SetPageUptodate(page); 540 } else { 541 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, 542 READ_SYNC); 543 if (err) 544 goto put_err; 545 546 lock_page(page); 547 if (unlikely(!PageUptodate(page))) { 548 f2fs_put_page(page, 1); 549 err = -EIO; 550 goto put_err; 551 } 552 if (unlikely(page->mapping != mapping)) { 553 f2fs_put_page(page, 1); 554 goto repeat; 555 } 556 } 557 558 if (new_i_size && 559 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) { 560 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT)); 561 /* Only the directory inode sets new_i_size */ 562 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); 563 } 564 return page; 565 566 put_err: 567 f2fs_put_dnode(&dn); 568 return ERR_PTR(err); 569 } 570 571 static int __allocate_data_block(struct dnode_of_data *dn) 572 { 573 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 574 struct f2fs_summary sum; 575 block_t new_blkaddr; 576 struct node_info ni; 577 int type; 578 579 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) 580 return -EPERM; 581 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) 582 return -ENOSPC; 583 584 __set_data_blkaddr(dn, NEW_ADDR); 585 dn->data_blkaddr = NEW_ADDR; 586 587 get_node_info(sbi, dn->nid, &ni); 588 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); 589 590 type = CURSEG_WARM_DATA; 591 592 allocate_data_block(sbi, NULL, NULL_ADDR, &new_blkaddr, &sum, type); 593 594 /* direct IO doesn't use extent cache to maximize the performance */ 595 set_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT); 596 update_extent_cache(new_blkaddr, dn); 597 clear_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT); 598 599 dn->data_blkaddr = new_blkaddr; 600 return 0; 601 } 602 603 /* 604 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh. 605 * If original data blocks are allocated, then give them to blockdev. 606 * Otherwise, 607 * a. preallocate requested block addresses 608 * b. do not use extent cache for better performance 609 * c. give the block addresses to blockdev 610 */ 611 static int get_data_block(struct inode *inode, sector_t iblock, 612 struct buffer_head *bh_result, int create) 613 { 614 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 615 unsigned int blkbits = inode->i_sb->s_blocksize_bits; 616 unsigned maxblocks = bh_result->b_size >> blkbits; 617 struct dnode_of_data dn; 618 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA; 619 pgoff_t pgofs, end_offset; 620 int err = 0, ofs = 1; 621 bool allocated = false; 622 623 /* Get the page offset from the block offset(iblock) */ 624 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits)); 625 626 if (check_extent_cache(inode, pgofs, bh_result)) 627 goto out; 628 629 if (create) 630 f2fs_lock_op(sbi); 631 632 /* When reading holes, we need its node page */ 633 set_new_dnode(&dn, inode, NULL, NULL, 0); 634 err = get_dnode_of_data(&dn, pgofs, mode); 635 if (err) { 636 if (err == -ENOENT) 637 err = 0; 638 goto unlock_out; 639 } 640 if (dn.data_blkaddr == NEW_ADDR) 641 goto put_out; 642 643 if (dn.data_blkaddr != NULL_ADDR) { 644 map_bh(bh_result, inode->i_sb, dn.data_blkaddr); 645 } else if (create) { 646 err = __allocate_data_block(&dn); 647 if (err) 648 goto put_out; 649 allocated = true; 650 map_bh(bh_result, inode->i_sb, dn.data_blkaddr); 651 } else { 652 goto put_out; 653 } 654 655 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); 656 bh_result->b_size = (((size_t)1) << blkbits); 657 dn.ofs_in_node++; 658 pgofs++; 659 660 get_next: 661 if (dn.ofs_in_node >= end_offset) { 662 if (allocated) 663 sync_inode_page(&dn); 664 allocated = false; 665 f2fs_put_dnode(&dn); 666 667 set_new_dnode(&dn, inode, NULL, NULL, 0); 668 err = get_dnode_of_data(&dn, pgofs, mode); 669 if (err) { 670 if (err == -ENOENT) 671 err = 0; 672 goto unlock_out; 673 } 674 if (dn.data_blkaddr == NEW_ADDR) 675 goto put_out; 676 677 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); 678 } 679 680 if (maxblocks > (bh_result->b_size >> blkbits)) { 681 block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); 682 if (blkaddr == NULL_ADDR && create) { 683 err = __allocate_data_block(&dn); 684 if (err) 685 goto sync_out; 686 allocated = true; 687 blkaddr = dn.data_blkaddr; 688 } 689 /* Give more consecutive addresses for the read ahead */ 690 if (blkaddr == (bh_result->b_blocknr + ofs)) { 691 ofs++; 692 dn.ofs_in_node++; 693 pgofs++; 694 bh_result->b_size += (((size_t)1) << blkbits); 695 goto get_next; 696 } 697 } 698 sync_out: 699 if (allocated) 700 sync_inode_page(&dn); 701 put_out: 702 f2fs_put_dnode(&dn); 703 unlock_out: 704 if (create) 705 f2fs_unlock_op(sbi); 706 out: 707 trace_f2fs_get_data_block(inode, iblock, bh_result, err); 708 return err; 709 } 710 711 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 712 u64 start, u64 len) 713 { 714 return generic_block_fiemap(inode, fieinfo, start, len, get_data_block); 715 } 716 717 static int f2fs_read_data_page(struct file *file, struct page *page) 718 { 719 struct inode *inode = page->mapping->host; 720 int ret; 721 722 trace_f2fs_readpage(page, DATA); 723 724 /* If the file has inline data, try to read it directlly */ 725 if (f2fs_has_inline_data(inode)) 726 ret = f2fs_read_inline_data(inode, page); 727 else 728 ret = mpage_readpage(page, get_data_block); 729 730 return ret; 731 } 732 733 static int f2fs_read_data_pages(struct file *file, 734 struct address_space *mapping, 735 struct list_head *pages, unsigned nr_pages) 736 { 737 struct inode *inode = file->f_mapping->host; 738 739 /* If the file has inline data, skip readpages */ 740 if (f2fs_has_inline_data(inode)) 741 return 0; 742 743 return mpage_readpages(mapping, pages, nr_pages, get_data_block); 744 } 745 746 int do_write_data_page(struct page *page, struct f2fs_io_info *fio) 747 { 748 struct inode *inode = page->mapping->host; 749 block_t old_blkaddr, new_blkaddr; 750 struct dnode_of_data dn; 751 int err = 0; 752 753 set_new_dnode(&dn, inode, NULL, NULL, 0); 754 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE); 755 if (err) 756 return err; 757 758 old_blkaddr = dn.data_blkaddr; 759 760 /* This page is already truncated */ 761 if (old_blkaddr == NULL_ADDR) 762 goto out_writepage; 763 764 set_page_writeback(page); 765 766 /* 767 * If current allocation needs SSR, 768 * it had better in-place writes for updated data. 769 */ 770 if (unlikely(old_blkaddr != NEW_ADDR && 771 !is_cold_data(page) && 772 need_inplace_update(inode))) { 773 rewrite_data_page(page, old_blkaddr, fio); 774 } else { 775 write_data_page(page, &dn, &new_blkaddr, fio); 776 update_extent_cache(new_blkaddr, &dn); 777 } 778 out_writepage: 779 f2fs_put_dnode(&dn); 780 return err; 781 } 782 783 static int f2fs_write_data_page(struct page *page, 784 struct writeback_control *wbc) 785 { 786 struct inode *inode = page->mapping->host; 787 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 788 loff_t i_size = i_size_read(inode); 789 const pgoff_t end_index = ((unsigned long long) i_size) 790 >> PAGE_CACHE_SHIFT; 791 unsigned offset = 0; 792 bool need_balance_fs = false; 793 int err = 0; 794 struct f2fs_io_info fio = { 795 .type = DATA, 796 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, 797 }; 798 799 trace_f2fs_writepage(page, DATA); 800 801 if (page->index < end_index) 802 goto write; 803 804 /* 805 * If the offset is out-of-range of file size, 806 * this page does not have to be written to disk. 807 */ 808 offset = i_size & (PAGE_CACHE_SIZE - 1); 809 if ((page->index >= end_index + 1) || !offset) 810 goto out; 811 812 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 813 write: 814 if (unlikely(sbi->por_doing)) 815 goto redirty_out; 816 817 /* Dentry blocks are controlled by checkpoint */ 818 if (S_ISDIR(inode->i_mode)) { 819 err = do_write_data_page(page, &fio); 820 goto done; 821 } 822 823 if (!wbc->for_reclaim) 824 need_balance_fs = true; 825 else if (has_not_enough_free_secs(sbi, 0)) 826 goto redirty_out; 827 828 f2fs_lock_op(sbi); 829 if (f2fs_has_inline_data(inode) || f2fs_may_inline(inode)) 830 err = f2fs_write_inline_data(inode, page, offset); 831 else 832 err = do_write_data_page(page, &fio); 833 f2fs_unlock_op(sbi); 834 done: 835 if (err && err != -ENOENT) 836 goto redirty_out; 837 838 clear_cold_data(page); 839 out: 840 inode_dec_dirty_dents(inode); 841 unlock_page(page); 842 if (need_balance_fs) 843 f2fs_balance_fs(sbi); 844 if (wbc->for_reclaim) 845 f2fs_submit_merged_bio(sbi, DATA, WRITE); 846 return 0; 847 848 redirty_out: 849 redirty_page_for_writepage(wbc, page); 850 return AOP_WRITEPAGE_ACTIVATE; 851 } 852 853 static int __f2fs_writepage(struct page *page, struct writeback_control *wbc, 854 void *data) 855 { 856 struct address_space *mapping = data; 857 int ret = mapping->a_ops->writepage(page, wbc); 858 mapping_set_error(mapping, ret); 859 return ret; 860 } 861 862 static int f2fs_write_data_pages(struct address_space *mapping, 863 struct writeback_control *wbc) 864 { 865 struct inode *inode = mapping->host; 866 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 867 bool locked = false; 868 int ret; 869 long diff; 870 871 trace_f2fs_writepages(mapping->host, wbc, DATA); 872 873 /* deal with chardevs and other special file */ 874 if (!mapping->a_ops->writepage) 875 return 0; 876 877 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE && 878 get_dirty_dents(inode) < nr_pages_to_skip(sbi, DATA) && 879 available_free_memory(sbi, DIRTY_DENTS)) 880 goto skip_write; 881 882 diff = nr_pages_to_write(sbi, DATA, wbc); 883 884 if (!S_ISDIR(inode->i_mode)) { 885 mutex_lock(&sbi->writepages); 886 locked = true; 887 } 888 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); 889 if (locked) 890 mutex_unlock(&sbi->writepages); 891 892 f2fs_submit_merged_bio(sbi, DATA, WRITE); 893 894 remove_dirty_dir_inode(inode); 895 896 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 897 return ret; 898 899 skip_write: 900 wbc->pages_skipped += get_dirty_dents(inode); 901 return 0; 902 } 903 904 static int f2fs_write_begin(struct file *file, struct address_space *mapping, 905 loff_t pos, unsigned len, unsigned flags, 906 struct page **pagep, void **fsdata) 907 { 908 struct inode *inode = mapping->host; 909 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 910 struct page *page; 911 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; 912 struct dnode_of_data dn; 913 int err = 0; 914 915 trace_f2fs_write_begin(inode, pos, len, flags); 916 917 f2fs_balance_fs(sbi); 918 repeat: 919 err = f2fs_convert_inline_data(inode, pos + len); 920 if (err) 921 return err; 922 923 page = grab_cache_page_write_begin(mapping, index, flags); 924 if (!page) 925 return -ENOMEM; 926 927 /* to avoid latency during memory pressure */ 928 unlock_page(page); 929 930 *pagep = page; 931 932 if (f2fs_has_inline_data(inode) && (pos + len) <= MAX_INLINE_DATA) 933 goto inline_data; 934 935 f2fs_lock_op(sbi); 936 set_new_dnode(&dn, inode, NULL, NULL, 0); 937 err = f2fs_reserve_block(&dn, index); 938 f2fs_unlock_op(sbi); 939 940 if (err) { 941 f2fs_put_page(page, 0); 942 return err; 943 } 944 inline_data: 945 lock_page(page); 946 if (unlikely(page->mapping != mapping)) { 947 f2fs_put_page(page, 1); 948 goto repeat; 949 } 950 951 f2fs_wait_on_page_writeback(page, DATA); 952 953 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) 954 return 0; 955 956 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { 957 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 958 unsigned end = start + len; 959 960 /* Reading beyond i_size is simple: memset to zero */ 961 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); 962 goto out; 963 } 964 965 if (dn.data_blkaddr == NEW_ADDR) { 966 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 967 } else { 968 if (f2fs_has_inline_data(inode)) { 969 err = f2fs_read_inline_data(inode, page); 970 if (err) { 971 page_cache_release(page); 972 return err; 973 } 974 } else { 975 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, 976 READ_SYNC); 977 if (err) 978 return err; 979 } 980 981 lock_page(page); 982 if (unlikely(!PageUptodate(page))) { 983 f2fs_put_page(page, 1); 984 return -EIO; 985 } 986 if (unlikely(page->mapping != mapping)) { 987 f2fs_put_page(page, 1); 988 goto repeat; 989 } 990 } 991 out: 992 SetPageUptodate(page); 993 clear_cold_data(page); 994 return 0; 995 } 996 997 static int f2fs_write_end(struct file *file, 998 struct address_space *mapping, 999 loff_t pos, unsigned len, unsigned copied, 1000 struct page *page, void *fsdata) 1001 { 1002 struct inode *inode = page->mapping->host; 1003 1004 trace_f2fs_write_end(inode, pos, len, copied); 1005 1006 SetPageUptodate(page); 1007 set_page_dirty(page); 1008 1009 if (pos + copied > i_size_read(inode)) { 1010 i_size_write(inode, pos + copied); 1011 mark_inode_dirty(inode); 1012 update_inode_page(inode); 1013 } 1014 1015 f2fs_put_page(page, 1); 1016 return copied; 1017 } 1018 1019 static int check_direct_IO(struct inode *inode, int rw, 1020 struct iov_iter *iter, loff_t offset) 1021 { 1022 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1; 1023 1024 if (rw == READ) 1025 return 0; 1026 1027 if (offset & blocksize_mask) 1028 return -EINVAL; 1029 1030 if (iov_iter_alignment(iter) & blocksize_mask) 1031 return -EINVAL; 1032 1033 return 0; 1034 } 1035 1036 static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb, 1037 struct iov_iter *iter, loff_t offset) 1038 { 1039 struct file *file = iocb->ki_filp; 1040 struct inode *inode = file->f_mapping->host; 1041 1042 /* Let buffer I/O handle the inline data case. */ 1043 if (f2fs_has_inline_data(inode)) 1044 return 0; 1045 1046 if (check_direct_IO(inode, rw, iter, offset)) 1047 return 0; 1048 1049 /* clear fsync mark to recover these blocks */ 1050 fsync_mark_clear(F2FS_SB(inode->i_sb), inode->i_ino); 1051 1052 return blockdev_direct_IO(rw, iocb, inode, iter, offset, 1053 get_data_block); 1054 } 1055 1056 static void f2fs_invalidate_data_page(struct page *page, unsigned int offset, 1057 unsigned int length) 1058 { 1059 struct inode *inode = page->mapping->host; 1060 if (PageDirty(page)) 1061 inode_dec_dirty_dents(inode); 1062 ClearPagePrivate(page); 1063 } 1064 1065 static int f2fs_release_data_page(struct page *page, gfp_t wait) 1066 { 1067 ClearPagePrivate(page); 1068 return 1; 1069 } 1070 1071 static int f2fs_set_data_page_dirty(struct page *page) 1072 { 1073 struct address_space *mapping = page->mapping; 1074 struct inode *inode = mapping->host; 1075 1076 trace_f2fs_set_page_dirty(page, DATA); 1077 1078 SetPageUptodate(page); 1079 mark_inode_dirty(inode); 1080 1081 if (!PageDirty(page)) { 1082 __set_page_dirty_nobuffers(page); 1083 set_dirty_dir_page(inode, page); 1084 return 1; 1085 } 1086 return 0; 1087 } 1088 1089 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) 1090 { 1091 struct inode *inode = mapping->host; 1092 1093 if (f2fs_has_inline_data(inode)) 1094 return 0; 1095 1096 return generic_block_bmap(mapping, block, get_data_block); 1097 } 1098 1099 const struct address_space_operations f2fs_dblock_aops = { 1100 .readpage = f2fs_read_data_page, 1101 .readpages = f2fs_read_data_pages, 1102 .writepage = f2fs_write_data_page, 1103 .writepages = f2fs_write_data_pages, 1104 .write_begin = f2fs_write_begin, 1105 .write_end = f2fs_write_end, 1106 .set_page_dirty = f2fs_set_data_page_dirty, 1107 .invalidatepage = f2fs_invalidate_data_page, 1108 .releasepage = f2fs_release_data_page, 1109 .direct_IO = f2fs_direct_IO, 1110 .bmap = f2fs_bmap, 1111 }; 1112