1 /* 2 * fs/f2fs/data.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/buffer_head.h> 14 #include <linux/mpage.h> 15 #include <linux/aio.h> 16 #include <linux/writeback.h> 17 #include <linux/backing-dev.h> 18 #include <linux/blkdev.h> 19 #include <linux/bio.h> 20 #include <linux/prefetch.h> 21 22 #include "f2fs.h" 23 #include "node.h" 24 #include "segment.h" 25 #include <trace/events/f2fs.h> 26 27 static void f2fs_read_end_io(struct bio *bio, int err) 28 { 29 struct bio_vec *bvec; 30 int i; 31 32 bio_for_each_segment_all(bvec, bio, i) { 33 struct page *page = bvec->bv_page; 34 35 if (!err) { 36 SetPageUptodate(page); 37 } else { 38 ClearPageUptodate(page); 39 SetPageError(page); 40 } 41 unlock_page(page); 42 } 43 bio_put(bio); 44 } 45 46 static void f2fs_write_end_io(struct bio *bio, int err) 47 { 48 struct f2fs_sb_info *sbi = bio->bi_private; 49 struct bio_vec *bvec; 50 int i; 51 52 bio_for_each_segment_all(bvec, bio, i) { 53 struct page *page = bvec->bv_page; 54 55 if (unlikely(err)) { 56 set_page_dirty(page); 57 set_bit(AS_EIO, &page->mapping->flags); 58 f2fs_stop_checkpoint(sbi); 59 } 60 end_page_writeback(page); 61 dec_page_count(sbi, F2FS_WRITEBACK); 62 } 63 64 if (sbi->wait_io) { 65 complete(sbi->wait_io); 66 sbi->wait_io = NULL; 67 } 68 69 if (!get_pages(sbi, F2FS_WRITEBACK) && 70 !list_empty(&sbi->cp_wait.task_list)) 71 wake_up(&sbi->cp_wait); 72 73 bio_put(bio); 74 } 75 76 /* 77 * Low-level block read/write IO operations. 78 */ 79 static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, 80 int npages, bool is_read) 81 { 82 struct bio *bio; 83 84 /* No failure on bio allocation */ 85 bio = bio_alloc(GFP_NOIO, npages); 86 87 bio->bi_bdev = sbi->sb->s_bdev; 88 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr); 89 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io; 90 bio->bi_private = sbi; 91 92 return bio; 93 } 94 95 static void __submit_merged_bio(struct f2fs_bio_info *io) 96 { 97 struct f2fs_io_info *fio = &io->fio; 98 int rw; 99 100 if (!io->bio) 101 return; 102 103 rw = fio->rw; 104 105 if (is_read_io(rw)) { 106 trace_f2fs_submit_read_bio(io->sbi->sb, rw, 107 fio->type, io->bio); 108 submit_bio(rw, io->bio); 109 } else { 110 trace_f2fs_submit_write_bio(io->sbi->sb, rw, 111 fio->type, io->bio); 112 /* 113 * META_FLUSH is only from the checkpoint procedure, and we 114 * should wait this metadata bio for FS consistency. 115 */ 116 if (fio->type == META_FLUSH) { 117 DECLARE_COMPLETION_ONSTACK(wait); 118 io->sbi->wait_io = &wait; 119 submit_bio(rw, io->bio); 120 wait_for_completion(&wait); 121 } else { 122 submit_bio(rw, io->bio); 123 } 124 } 125 126 io->bio = NULL; 127 } 128 129 void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, 130 enum page_type type, int rw) 131 { 132 enum page_type btype = PAGE_TYPE_OF_BIO(type); 133 struct f2fs_bio_info *io; 134 135 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype]; 136 137 down_write(&io->io_rwsem); 138 139 /* change META to META_FLUSH in the checkpoint procedure */ 140 if (type >= META_FLUSH) { 141 io->fio.type = META_FLUSH; 142 if (test_opt(sbi, NOBARRIER)) 143 io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO; 144 else 145 io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO; 146 } 147 __submit_merged_bio(io); 148 up_write(&io->io_rwsem); 149 } 150 151 /* 152 * Fill the locked page with data located in the block address. 153 * Return unlocked page. 154 */ 155 int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page, 156 block_t blk_addr, int rw) 157 { 158 struct bio *bio; 159 160 trace_f2fs_submit_page_bio(page, blk_addr, rw); 161 162 /* Allocate a new bio */ 163 bio = __bio_alloc(sbi, blk_addr, 1, is_read_io(rw)); 164 165 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { 166 bio_put(bio); 167 f2fs_put_page(page, 1); 168 return -EFAULT; 169 } 170 171 submit_bio(rw, bio); 172 return 0; 173 } 174 175 void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page, 176 block_t blk_addr, struct f2fs_io_info *fio) 177 { 178 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); 179 struct f2fs_bio_info *io; 180 bool is_read = is_read_io(fio->rw); 181 182 io = is_read ? &sbi->read_io : &sbi->write_io[btype]; 183 184 verify_block_addr(sbi, blk_addr); 185 186 down_write(&io->io_rwsem); 187 188 if (!is_read) 189 inc_page_count(sbi, F2FS_WRITEBACK); 190 191 if (io->bio && (io->last_block_in_bio != blk_addr - 1 || 192 io->fio.rw != fio->rw)) 193 __submit_merged_bio(io); 194 alloc_new: 195 if (io->bio == NULL) { 196 int bio_blocks = MAX_BIO_BLOCKS(sbi); 197 198 io->bio = __bio_alloc(sbi, blk_addr, bio_blocks, is_read); 199 io->fio = *fio; 200 } 201 202 if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) < 203 PAGE_CACHE_SIZE) { 204 __submit_merged_bio(io); 205 goto alloc_new; 206 } 207 208 io->last_block_in_bio = blk_addr; 209 210 up_write(&io->io_rwsem); 211 trace_f2fs_submit_page_mbio(page, fio->rw, fio->type, blk_addr); 212 } 213 214 /* 215 * Lock ordering for the change of data block address: 216 * ->data_page 217 * ->node_page 218 * update block addresses in the node page 219 */ 220 static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr) 221 { 222 struct f2fs_node *rn; 223 __le32 *addr_array; 224 struct page *node_page = dn->node_page; 225 unsigned int ofs_in_node = dn->ofs_in_node; 226 227 f2fs_wait_on_page_writeback(node_page, NODE); 228 229 rn = F2FS_NODE(node_page); 230 231 /* Get physical address of data block */ 232 addr_array = blkaddr_in_node(rn); 233 addr_array[ofs_in_node] = cpu_to_le32(new_addr); 234 set_page_dirty(node_page); 235 } 236 237 int reserve_new_block(struct dnode_of_data *dn) 238 { 239 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 240 241 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) 242 return -EPERM; 243 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) 244 return -ENOSPC; 245 246 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node); 247 248 __set_data_blkaddr(dn, NEW_ADDR); 249 dn->data_blkaddr = NEW_ADDR; 250 mark_inode_dirty(dn->inode); 251 sync_inode_page(dn); 252 return 0; 253 } 254 255 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) 256 { 257 bool need_put = dn->inode_page ? false : true; 258 int err; 259 260 /* if inode_page exists, index should be zero */ 261 f2fs_bug_on(F2FS_I_SB(dn->inode), !need_put && index); 262 263 err = get_dnode_of_data(dn, index, ALLOC_NODE); 264 if (err) 265 return err; 266 267 if (dn->data_blkaddr == NULL_ADDR) 268 err = reserve_new_block(dn); 269 if (err || need_put) 270 f2fs_put_dnode(dn); 271 return err; 272 } 273 274 static int check_extent_cache(struct inode *inode, pgoff_t pgofs, 275 struct buffer_head *bh_result) 276 { 277 struct f2fs_inode_info *fi = F2FS_I(inode); 278 pgoff_t start_fofs, end_fofs; 279 block_t start_blkaddr; 280 281 if (is_inode_flag_set(fi, FI_NO_EXTENT)) 282 return 0; 283 284 read_lock(&fi->ext.ext_lock); 285 if (fi->ext.len == 0) { 286 read_unlock(&fi->ext.ext_lock); 287 return 0; 288 } 289 290 stat_inc_total_hit(inode->i_sb); 291 292 start_fofs = fi->ext.fofs; 293 end_fofs = fi->ext.fofs + fi->ext.len - 1; 294 start_blkaddr = fi->ext.blk_addr; 295 296 if (pgofs >= start_fofs && pgofs <= end_fofs) { 297 unsigned int blkbits = inode->i_sb->s_blocksize_bits; 298 size_t count; 299 300 clear_buffer_new(bh_result); 301 map_bh(bh_result, inode->i_sb, 302 start_blkaddr + pgofs - start_fofs); 303 count = end_fofs - pgofs + 1; 304 if (count < (UINT_MAX >> blkbits)) 305 bh_result->b_size = (count << blkbits); 306 else 307 bh_result->b_size = UINT_MAX; 308 309 stat_inc_read_hit(inode->i_sb); 310 read_unlock(&fi->ext.ext_lock); 311 return 1; 312 } 313 read_unlock(&fi->ext.ext_lock); 314 return 0; 315 } 316 317 void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn) 318 { 319 struct f2fs_inode_info *fi = F2FS_I(dn->inode); 320 pgoff_t fofs, start_fofs, end_fofs; 321 block_t start_blkaddr, end_blkaddr; 322 int need_update = true; 323 324 f2fs_bug_on(F2FS_I_SB(dn->inode), blk_addr == NEW_ADDR); 325 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + 326 dn->ofs_in_node; 327 328 /* Update the page address in the parent node */ 329 __set_data_blkaddr(dn, blk_addr); 330 331 if (is_inode_flag_set(fi, FI_NO_EXTENT)) 332 return; 333 334 write_lock(&fi->ext.ext_lock); 335 336 start_fofs = fi->ext.fofs; 337 end_fofs = fi->ext.fofs + fi->ext.len - 1; 338 start_blkaddr = fi->ext.blk_addr; 339 end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1; 340 341 /* Drop and initialize the matched extent */ 342 if (fi->ext.len == 1 && fofs == start_fofs) 343 fi->ext.len = 0; 344 345 /* Initial extent */ 346 if (fi->ext.len == 0) { 347 if (blk_addr != NULL_ADDR) { 348 fi->ext.fofs = fofs; 349 fi->ext.blk_addr = blk_addr; 350 fi->ext.len = 1; 351 } 352 goto end_update; 353 } 354 355 /* Front merge */ 356 if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) { 357 fi->ext.fofs--; 358 fi->ext.blk_addr--; 359 fi->ext.len++; 360 goto end_update; 361 } 362 363 /* Back merge */ 364 if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) { 365 fi->ext.len++; 366 goto end_update; 367 } 368 369 /* Split the existing extent */ 370 if (fi->ext.len > 1 && 371 fofs >= start_fofs && fofs <= end_fofs) { 372 if ((end_fofs - fofs) < (fi->ext.len >> 1)) { 373 fi->ext.len = fofs - start_fofs; 374 } else { 375 fi->ext.fofs = fofs + 1; 376 fi->ext.blk_addr = start_blkaddr + 377 fofs - start_fofs + 1; 378 fi->ext.len -= fofs - start_fofs + 1; 379 } 380 } else { 381 need_update = false; 382 } 383 384 /* Finally, if the extent is very fragmented, let's drop the cache. */ 385 if (fi->ext.len < F2FS_MIN_EXTENT_LEN) { 386 fi->ext.len = 0; 387 set_inode_flag(fi, FI_NO_EXTENT); 388 need_update = true; 389 } 390 end_update: 391 write_unlock(&fi->ext.ext_lock); 392 if (need_update) 393 sync_inode_page(dn); 394 return; 395 } 396 397 struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync) 398 { 399 struct address_space *mapping = inode->i_mapping; 400 struct dnode_of_data dn; 401 struct page *page; 402 int err; 403 404 page = find_get_page(mapping, index); 405 if (page && PageUptodate(page)) 406 return page; 407 f2fs_put_page(page, 0); 408 409 set_new_dnode(&dn, inode, NULL, NULL, 0); 410 err = get_dnode_of_data(&dn, index, LOOKUP_NODE); 411 if (err) 412 return ERR_PTR(err); 413 f2fs_put_dnode(&dn); 414 415 if (dn.data_blkaddr == NULL_ADDR) 416 return ERR_PTR(-ENOENT); 417 418 /* By fallocate(), there is no cached page, but with NEW_ADDR */ 419 if (unlikely(dn.data_blkaddr == NEW_ADDR)) 420 return ERR_PTR(-EINVAL); 421 422 page = grab_cache_page(mapping, index); 423 if (!page) 424 return ERR_PTR(-ENOMEM); 425 426 if (PageUptodate(page)) { 427 unlock_page(page); 428 return page; 429 } 430 431 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, dn.data_blkaddr, 432 sync ? READ_SYNC : READA); 433 if (err) 434 return ERR_PTR(err); 435 436 if (sync) { 437 wait_on_page_locked(page); 438 if (unlikely(!PageUptodate(page))) { 439 f2fs_put_page(page, 0); 440 return ERR_PTR(-EIO); 441 } 442 } 443 return page; 444 } 445 446 /* 447 * If it tries to access a hole, return an error. 448 * Because, the callers, functions in dir.c and GC, should be able to know 449 * whether this page exists or not. 450 */ 451 struct page *get_lock_data_page(struct inode *inode, pgoff_t index) 452 { 453 struct address_space *mapping = inode->i_mapping; 454 struct dnode_of_data dn; 455 struct page *page; 456 int err; 457 458 repeat: 459 page = grab_cache_page(mapping, index); 460 if (!page) 461 return ERR_PTR(-ENOMEM); 462 463 set_new_dnode(&dn, inode, NULL, NULL, 0); 464 err = get_dnode_of_data(&dn, index, LOOKUP_NODE); 465 if (err) { 466 f2fs_put_page(page, 1); 467 return ERR_PTR(err); 468 } 469 f2fs_put_dnode(&dn); 470 471 if (unlikely(dn.data_blkaddr == NULL_ADDR)) { 472 f2fs_put_page(page, 1); 473 return ERR_PTR(-ENOENT); 474 } 475 476 if (PageUptodate(page)) 477 return page; 478 479 /* 480 * A new dentry page is allocated but not able to be written, since its 481 * new inode page couldn't be allocated due to -ENOSPC. 482 * In such the case, its blkaddr can be remained as NEW_ADDR. 483 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata. 484 */ 485 if (dn.data_blkaddr == NEW_ADDR) { 486 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 487 SetPageUptodate(page); 488 return page; 489 } 490 491 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, 492 dn.data_blkaddr, READ_SYNC); 493 if (err) 494 return ERR_PTR(err); 495 496 lock_page(page); 497 if (unlikely(!PageUptodate(page))) { 498 f2fs_put_page(page, 1); 499 return ERR_PTR(-EIO); 500 } 501 if (unlikely(page->mapping != mapping)) { 502 f2fs_put_page(page, 1); 503 goto repeat; 504 } 505 return page; 506 } 507 508 /* 509 * Caller ensures that this data page is never allocated. 510 * A new zero-filled data page is allocated in the page cache. 511 * 512 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and 513 * f2fs_unlock_op(). 514 * Note that, ipage is set only by make_empty_dir. 515 */ 516 struct page *get_new_data_page(struct inode *inode, 517 struct page *ipage, pgoff_t index, bool new_i_size) 518 { 519 struct address_space *mapping = inode->i_mapping; 520 struct page *page; 521 struct dnode_of_data dn; 522 int err; 523 524 set_new_dnode(&dn, inode, ipage, NULL, 0); 525 err = f2fs_reserve_block(&dn, index); 526 if (err) 527 return ERR_PTR(err); 528 repeat: 529 page = grab_cache_page(mapping, index); 530 if (!page) { 531 err = -ENOMEM; 532 goto put_err; 533 } 534 535 if (PageUptodate(page)) 536 return page; 537 538 if (dn.data_blkaddr == NEW_ADDR) { 539 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 540 SetPageUptodate(page); 541 } else { 542 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, 543 dn.data_blkaddr, READ_SYNC); 544 if (err) 545 goto put_err; 546 547 lock_page(page); 548 if (unlikely(!PageUptodate(page))) { 549 f2fs_put_page(page, 1); 550 err = -EIO; 551 goto put_err; 552 } 553 if (unlikely(page->mapping != mapping)) { 554 f2fs_put_page(page, 1); 555 goto repeat; 556 } 557 } 558 559 if (new_i_size && 560 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) { 561 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT)); 562 /* Only the directory inode sets new_i_size */ 563 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); 564 } 565 return page; 566 567 put_err: 568 f2fs_put_dnode(&dn); 569 return ERR_PTR(err); 570 } 571 572 static int __allocate_data_block(struct dnode_of_data *dn) 573 { 574 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 575 struct f2fs_inode_info *fi = F2FS_I(dn->inode); 576 struct f2fs_summary sum; 577 block_t new_blkaddr; 578 struct node_info ni; 579 pgoff_t fofs; 580 int type; 581 582 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) 583 return -EPERM; 584 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) 585 return -ENOSPC; 586 587 __set_data_blkaddr(dn, NEW_ADDR); 588 dn->data_blkaddr = NEW_ADDR; 589 590 get_node_info(sbi, dn->nid, &ni); 591 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); 592 593 type = CURSEG_WARM_DATA; 594 595 allocate_data_block(sbi, NULL, NULL_ADDR, &new_blkaddr, &sum, type); 596 597 /* direct IO doesn't use extent cache to maximize the performance */ 598 set_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT); 599 update_extent_cache(new_blkaddr, dn); 600 clear_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT); 601 602 /* update i_size */ 603 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + 604 dn->ofs_in_node; 605 if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT)) 606 i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT)); 607 608 dn->data_blkaddr = new_blkaddr; 609 return 0; 610 } 611 612 /* 613 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh. 614 * If original data blocks are allocated, then give them to blockdev. 615 * Otherwise, 616 * a. preallocate requested block addresses 617 * b. do not use extent cache for better performance 618 * c. give the block addresses to blockdev 619 */ 620 static int __get_data_block(struct inode *inode, sector_t iblock, 621 struct buffer_head *bh_result, int create, bool fiemap) 622 { 623 unsigned int blkbits = inode->i_sb->s_blocksize_bits; 624 unsigned maxblocks = bh_result->b_size >> blkbits; 625 struct dnode_of_data dn; 626 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA; 627 pgoff_t pgofs, end_offset; 628 int err = 0, ofs = 1; 629 bool allocated = false; 630 631 /* Get the page offset from the block offset(iblock) */ 632 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits)); 633 634 if (check_extent_cache(inode, pgofs, bh_result)) 635 goto out; 636 637 if (create) { 638 f2fs_balance_fs(F2FS_I_SB(inode)); 639 f2fs_lock_op(F2FS_I_SB(inode)); 640 } 641 642 /* When reading holes, we need its node page */ 643 set_new_dnode(&dn, inode, NULL, NULL, 0); 644 err = get_dnode_of_data(&dn, pgofs, mode); 645 if (err) { 646 if (err == -ENOENT) 647 err = 0; 648 goto unlock_out; 649 } 650 if (dn.data_blkaddr == NEW_ADDR && !fiemap) 651 goto put_out; 652 653 if (dn.data_blkaddr != NULL_ADDR) { 654 map_bh(bh_result, inode->i_sb, dn.data_blkaddr); 655 } else if (create) { 656 err = __allocate_data_block(&dn); 657 if (err) 658 goto put_out; 659 allocated = true; 660 map_bh(bh_result, inode->i_sb, dn.data_blkaddr); 661 } else { 662 goto put_out; 663 } 664 665 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); 666 bh_result->b_size = (((size_t)1) << blkbits); 667 dn.ofs_in_node++; 668 pgofs++; 669 670 get_next: 671 if (dn.ofs_in_node >= end_offset) { 672 if (allocated) 673 sync_inode_page(&dn); 674 allocated = false; 675 f2fs_put_dnode(&dn); 676 677 set_new_dnode(&dn, inode, NULL, NULL, 0); 678 err = get_dnode_of_data(&dn, pgofs, mode); 679 if (err) { 680 if (err == -ENOENT) 681 err = 0; 682 goto unlock_out; 683 } 684 if (dn.data_blkaddr == NEW_ADDR && !fiemap) 685 goto put_out; 686 687 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); 688 } 689 690 if (maxblocks > (bh_result->b_size >> blkbits)) { 691 block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); 692 if (blkaddr == NULL_ADDR && create) { 693 err = __allocate_data_block(&dn); 694 if (err) 695 goto sync_out; 696 allocated = true; 697 blkaddr = dn.data_blkaddr; 698 } 699 /* Give more consecutive addresses for the readahead */ 700 if (blkaddr == (bh_result->b_blocknr + ofs)) { 701 ofs++; 702 dn.ofs_in_node++; 703 pgofs++; 704 bh_result->b_size += (((size_t)1) << blkbits); 705 goto get_next; 706 } 707 } 708 sync_out: 709 if (allocated) 710 sync_inode_page(&dn); 711 put_out: 712 f2fs_put_dnode(&dn); 713 unlock_out: 714 if (create) 715 f2fs_unlock_op(F2FS_I_SB(inode)); 716 out: 717 trace_f2fs_get_data_block(inode, iblock, bh_result, err); 718 return err; 719 } 720 721 static int get_data_block(struct inode *inode, sector_t iblock, 722 struct buffer_head *bh_result, int create) 723 { 724 return __get_data_block(inode, iblock, bh_result, create, false); 725 } 726 727 static int get_data_block_fiemap(struct inode *inode, sector_t iblock, 728 struct buffer_head *bh_result, int create) 729 { 730 return __get_data_block(inode, iblock, bh_result, create, true); 731 } 732 733 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 734 u64 start, u64 len) 735 { 736 return generic_block_fiemap(inode, fieinfo, 737 start, len, get_data_block_fiemap); 738 } 739 740 static int f2fs_read_data_page(struct file *file, struct page *page) 741 { 742 struct inode *inode = page->mapping->host; 743 int ret; 744 745 trace_f2fs_readpage(page, DATA); 746 747 /* If the file has inline data, try to read it directly */ 748 if (f2fs_has_inline_data(inode)) 749 ret = f2fs_read_inline_data(inode, page); 750 else 751 ret = mpage_readpage(page, get_data_block); 752 753 return ret; 754 } 755 756 static int f2fs_read_data_pages(struct file *file, 757 struct address_space *mapping, 758 struct list_head *pages, unsigned nr_pages) 759 { 760 struct inode *inode = file->f_mapping->host; 761 762 /* If the file has inline data, skip readpages */ 763 if (f2fs_has_inline_data(inode)) 764 return 0; 765 766 return mpage_readpages(mapping, pages, nr_pages, get_data_block); 767 } 768 769 int do_write_data_page(struct page *page, struct f2fs_io_info *fio) 770 { 771 struct inode *inode = page->mapping->host; 772 block_t old_blkaddr, new_blkaddr; 773 struct dnode_of_data dn; 774 int err = 0; 775 776 set_new_dnode(&dn, inode, NULL, NULL, 0); 777 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE); 778 if (err) 779 return err; 780 781 old_blkaddr = dn.data_blkaddr; 782 783 /* This page is already truncated */ 784 if (old_blkaddr == NULL_ADDR) 785 goto out_writepage; 786 787 set_page_writeback(page); 788 789 /* 790 * If current allocation needs SSR, 791 * it had better in-place writes for updated data. 792 */ 793 if (unlikely(old_blkaddr != NEW_ADDR && 794 !is_cold_data(page) && 795 need_inplace_update(inode))) { 796 rewrite_data_page(page, old_blkaddr, fio); 797 set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE); 798 } else { 799 write_data_page(page, &dn, &new_blkaddr, fio); 800 update_extent_cache(new_blkaddr, &dn); 801 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); 802 } 803 out_writepage: 804 f2fs_put_dnode(&dn); 805 return err; 806 } 807 808 static int f2fs_write_data_page(struct page *page, 809 struct writeback_control *wbc) 810 { 811 struct inode *inode = page->mapping->host; 812 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 813 loff_t i_size = i_size_read(inode); 814 const pgoff_t end_index = ((unsigned long long) i_size) 815 >> PAGE_CACHE_SHIFT; 816 unsigned offset = 0; 817 bool need_balance_fs = false; 818 int err = 0; 819 struct f2fs_io_info fio = { 820 .type = DATA, 821 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, 822 }; 823 824 trace_f2fs_writepage(page, DATA); 825 826 if (page->index < end_index) 827 goto write; 828 829 /* 830 * If the offset is out-of-range of file size, 831 * this page does not have to be written to disk. 832 */ 833 offset = i_size & (PAGE_CACHE_SIZE - 1); 834 if ((page->index >= end_index + 1) || !offset) 835 goto out; 836 837 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 838 write: 839 if (unlikely(sbi->por_doing)) 840 goto redirty_out; 841 842 /* Dentry blocks are controlled by checkpoint */ 843 if (S_ISDIR(inode->i_mode)) { 844 if (unlikely(f2fs_cp_error(sbi))) 845 goto redirty_out; 846 err = do_write_data_page(page, &fio); 847 goto done; 848 } 849 850 /* we should bypass data pages to proceed the kworkder jobs */ 851 if (unlikely(f2fs_cp_error(sbi))) { 852 SetPageError(page); 853 unlock_page(page); 854 goto out; 855 } 856 857 if (!wbc->for_reclaim) 858 need_balance_fs = true; 859 else if (has_not_enough_free_secs(sbi, 0)) 860 goto redirty_out; 861 862 f2fs_lock_op(sbi); 863 if (f2fs_has_inline_data(inode) || f2fs_may_inline(inode)) 864 err = f2fs_write_inline_data(inode, page, offset); 865 else 866 err = do_write_data_page(page, &fio); 867 f2fs_unlock_op(sbi); 868 done: 869 if (err && err != -ENOENT) 870 goto redirty_out; 871 872 clear_cold_data(page); 873 out: 874 inode_dec_dirty_pages(inode); 875 unlock_page(page); 876 if (need_balance_fs) 877 f2fs_balance_fs(sbi); 878 if (wbc->for_reclaim) 879 f2fs_submit_merged_bio(sbi, DATA, WRITE); 880 return 0; 881 882 redirty_out: 883 redirty_page_for_writepage(wbc, page); 884 return AOP_WRITEPAGE_ACTIVATE; 885 } 886 887 static int __f2fs_writepage(struct page *page, struct writeback_control *wbc, 888 void *data) 889 { 890 struct address_space *mapping = data; 891 int ret = mapping->a_ops->writepage(page, wbc); 892 mapping_set_error(mapping, ret); 893 return ret; 894 } 895 896 static int f2fs_write_data_pages(struct address_space *mapping, 897 struct writeback_control *wbc) 898 { 899 struct inode *inode = mapping->host; 900 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 901 bool locked = false; 902 int ret; 903 long diff; 904 905 trace_f2fs_writepages(mapping->host, wbc, DATA); 906 907 /* deal with chardevs and other special file */ 908 if (!mapping->a_ops->writepage) 909 return 0; 910 911 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE && 912 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) && 913 available_free_memory(sbi, DIRTY_DENTS)) 914 goto skip_write; 915 916 diff = nr_pages_to_write(sbi, DATA, wbc); 917 918 if (!S_ISDIR(inode->i_mode)) { 919 mutex_lock(&sbi->writepages); 920 locked = true; 921 } 922 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); 923 if (locked) 924 mutex_unlock(&sbi->writepages); 925 926 f2fs_submit_merged_bio(sbi, DATA, WRITE); 927 928 remove_dirty_dir_inode(inode); 929 930 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 931 return ret; 932 933 skip_write: 934 wbc->pages_skipped += get_dirty_pages(inode); 935 return 0; 936 } 937 938 static void f2fs_write_failed(struct address_space *mapping, loff_t to) 939 { 940 struct inode *inode = mapping->host; 941 942 if (to > inode->i_size) { 943 truncate_pagecache(inode, inode->i_size); 944 truncate_blocks(inode, inode->i_size, true); 945 } 946 } 947 948 static int f2fs_write_begin(struct file *file, struct address_space *mapping, 949 loff_t pos, unsigned len, unsigned flags, 950 struct page **pagep, void **fsdata) 951 { 952 struct inode *inode = mapping->host; 953 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 954 struct page *page; 955 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; 956 struct dnode_of_data dn; 957 int err = 0; 958 959 trace_f2fs_write_begin(inode, pos, len, flags); 960 961 f2fs_balance_fs(sbi); 962 repeat: 963 err = f2fs_convert_inline_data(inode, pos + len, NULL); 964 if (err) 965 goto fail; 966 967 page = grab_cache_page_write_begin(mapping, index, flags); 968 if (!page) { 969 err = -ENOMEM; 970 goto fail; 971 } 972 973 /* to avoid latency during memory pressure */ 974 unlock_page(page); 975 976 *pagep = page; 977 978 if (f2fs_has_inline_data(inode) && (pos + len) <= MAX_INLINE_DATA) 979 goto inline_data; 980 981 f2fs_lock_op(sbi); 982 set_new_dnode(&dn, inode, NULL, NULL, 0); 983 err = f2fs_reserve_block(&dn, index); 984 f2fs_unlock_op(sbi); 985 if (err) { 986 f2fs_put_page(page, 0); 987 goto fail; 988 } 989 inline_data: 990 lock_page(page); 991 if (unlikely(page->mapping != mapping)) { 992 f2fs_put_page(page, 1); 993 goto repeat; 994 } 995 996 f2fs_wait_on_page_writeback(page, DATA); 997 998 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) 999 return 0; 1000 1001 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { 1002 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 1003 unsigned end = start + len; 1004 1005 /* Reading beyond i_size is simple: memset to zero */ 1006 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); 1007 goto out; 1008 } 1009 1010 if (dn.data_blkaddr == NEW_ADDR) { 1011 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 1012 } else { 1013 if (f2fs_has_inline_data(inode)) { 1014 err = f2fs_read_inline_data(inode, page); 1015 if (err) { 1016 page_cache_release(page); 1017 goto fail; 1018 } 1019 } else { 1020 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, 1021 READ_SYNC); 1022 if (err) 1023 goto fail; 1024 } 1025 1026 lock_page(page); 1027 if (unlikely(!PageUptodate(page))) { 1028 f2fs_put_page(page, 1); 1029 err = -EIO; 1030 goto fail; 1031 } 1032 if (unlikely(page->mapping != mapping)) { 1033 f2fs_put_page(page, 1); 1034 goto repeat; 1035 } 1036 } 1037 out: 1038 SetPageUptodate(page); 1039 clear_cold_data(page); 1040 return 0; 1041 fail: 1042 f2fs_write_failed(mapping, pos + len); 1043 return err; 1044 } 1045 1046 static int f2fs_write_end(struct file *file, 1047 struct address_space *mapping, 1048 loff_t pos, unsigned len, unsigned copied, 1049 struct page *page, void *fsdata) 1050 { 1051 struct inode *inode = page->mapping->host; 1052 1053 trace_f2fs_write_end(inode, pos, len, copied); 1054 1055 if (f2fs_is_atomic_file(inode) || f2fs_is_volatile_file(inode)) 1056 register_inmem_page(inode, page); 1057 else 1058 set_page_dirty(page); 1059 1060 if (pos + copied > i_size_read(inode)) { 1061 i_size_write(inode, pos + copied); 1062 mark_inode_dirty(inode); 1063 update_inode_page(inode); 1064 } 1065 1066 f2fs_put_page(page, 1); 1067 return copied; 1068 } 1069 1070 static int check_direct_IO(struct inode *inode, int rw, 1071 struct iov_iter *iter, loff_t offset) 1072 { 1073 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1; 1074 1075 if (rw == READ) 1076 return 0; 1077 1078 if (offset & blocksize_mask) 1079 return -EINVAL; 1080 1081 if (iov_iter_alignment(iter) & blocksize_mask) 1082 return -EINVAL; 1083 1084 return 0; 1085 } 1086 1087 static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb, 1088 struct iov_iter *iter, loff_t offset) 1089 { 1090 struct file *file = iocb->ki_filp; 1091 struct address_space *mapping = file->f_mapping; 1092 struct inode *inode = mapping->host; 1093 size_t count = iov_iter_count(iter); 1094 int err; 1095 1096 /* Let buffer I/O handle the inline data case. */ 1097 if (f2fs_has_inline_data(inode)) 1098 return 0; 1099 1100 if (check_direct_IO(inode, rw, iter, offset)) 1101 return 0; 1102 1103 trace_f2fs_direct_IO_enter(inode, offset, count, rw); 1104 1105 err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block); 1106 if (err < 0 && (rw & WRITE)) 1107 f2fs_write_failed(mapping, offset + count); 1108 1109 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err); 1110 1111 return err; 1112 } 1113 1114 static void f2fs_invalidate_data_page(struct page *page, unsigned int offset, 1115 unsigned int length) 1116 { 1117 struct inode *inode = page->mapping->host; 1118 1119 if (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE) 1120 return; 1121 1122 if (PageDirty(page)) 1123 inode_dec_dirty_pages(inode); 1124 ClearPagePrivate(page); 1125 } 1126 1127 static int f2fs_release_data_page(struct page *page, gfp_t wait) 1128 { 1129 ClearPagePrivate(page); 1130 return 1; 1131 } 1132 1133 static int f2fs_set_data_page_dirty(struct page *page) 1134 { 1135 struct address_space *mapping = page->mapping; 1136 struct inode *inode = mapping->host; 1137 1138 trace_f2fs_set_page_dirty(page, DATA); 1139 1140 SetPageUptodate(page); 1141 mark_inode_dirty(inode); 1142 1143 if (!PageDirty(page)) { 1144 __set_page_dirty_nobuffers(page); 1145 update_dirty_page(inode, page); 1146 return 1; 1147 } 1148 return 0; 1149 } 1150 1151 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) 1152 { 1153 struct inode *inode = mapping->host; 1154 1155 if (f2fs_has_inline_data(inode)) 1156 return 0; 1157 1158 return generic_block_bmap(mapping, block, get_data_block); 1159 } 1160 1161 const struct address_space_operations f2fs_dblock_aops = { 1162 .readpage = f2fs_read_data_page, 1163 .readpages = f2fs_read_data_pages, 1164 .writepage = f2fs_write_data_page, 1165 .writepages = f2fs_write_data_pages, 1166 .write_begin = f2fs_write_begin, 1167 .write_end = f2fs_write_end, 1168 .set_page_dirty = f2fs_set_data_page_dirty, 1169 .invalidatepage = f2fs_invalidate_data_page, 1170 .releasepage = f2fs_release_data_page, 1171 .direct_IO = f2fs_direct_IO, 1172 .bmap = f2fs_bmap, 1173 }; 1174