1 /* 2 * fs/f2fs/data.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/buffer_head.h> 14 #include <linux/mpage.h> 15 #include <linux/aio.h> 16 #include <linux/writeback.h> 17 #include <linux/backing-dev.h> 18 #include <linux/blkdev.h> 19 #include <linux/bio.h> 20 #include <linux/prefetch.h> 21 22 #include "f2fs.h" 23 #include "node.h" 24 #include "segment.h" 25 #include "trace.h" 26 #include <trace/events/f2fs.h> 27 28 static void f2fs_read_end_io(struct bio *bio, int err) 29 { 30 struct bio_vec *bvec; 31 int i; 32 33 bio_for_each_segment_all(bvec, bio, i) { 34 struct page *page = bvec->bv_page; 35 36 if (!err) { 37 SetPageUptodate(page); 38 } else { 39 ClearPageUptodate(page); 40 SetPageError(page); 41 } 42 unlock_page(page); 43 } 44 bio_put(bio); 45 } 46 47 static void f2fs_write_end_io(struct bio *bio, int err) 48 { 49 struct f2fs_sb_info *sbi = bio->bi_private; 50 struct bio_vec *bvec; 51 int i; 52 53 bio_for_each_segment_all(bvec, bio, i) { 54 struct page *page = bvec->bv_page; 55 56 if (unlikely(err)) { 57 set_page_dirty(page); 58 set_bit(AS_EIO, &page->mapping->flags); 59 f2fs_stop_checkpoint(sbi); 60 } 61 end_page_writeback(page); 62 dec_page_count(sbi, F2FS_WRITEBACK); 63 } 64 65 if (!get_pages(sbi, F2FS_WRITEBACK) && 66 !list_empty(&sbi->cp_wait.task_list)) 67 wake_up(&sbi->cp_wait); 68 69 bio_put(bio); 70 } 71 72 /* 73 * Low-level block read/write IO operations. 74 */ 75 static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, 76 int npages, bool is_read) 77 { 78 struct bio *bio; 79 80 /* No failure on bio allocation */ 81 bio = bio_alloc(GFP_NOIO, npages); 82 83 bio->bi_bdev = sbi->sb->s_bdev; 84 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr); 85 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io; 86 bio->bi_private = sbi; 87 88 return bio; 89 } 90 91 static void __submit_merged_bio(struct f2fs_bio_info *io) 92 { 93 struct f2fs_io_info *fio = &io->fio; 94 95 if (!io->bio) 96 return; 97 98 if (is_read_io(fio->rw)) 99 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio); 100 else 101 trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio); 102 103 submit_bio(fio->rw, io->bio); 104 io->bio = NULL; 105 } 106 107 void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, 108 enum page_type type, int rw) 109 { 110 enum page_type btype = PAGE_TYPE_OF_BIO(type); 111 struct f2fs_bio_info *io; 112 113 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype]; 114 115 down_write(&io->io_rwsem); 116 117 /* change META to META_FLUSH in the checkpoint procedure */ 118 if (type >= META_FLUSH) { 119 io->fio.type = META_FLUSH; 120 if (test_opt(sbi, NOBARRIER)) 121 io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO; 122 else 123 io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO; 124 } 125 __submit_merged_bio(io); 126 up_write(&io->io_rwsem); 127 } 128 129 /* 130 * Fill the locked page with data located in the block address. 131 * Return unlocked page. 132 */ 133 int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page, 134 struct f2fs_io_info *fio) 135 { 136 struct bio *bio; 137 138 trace_f2fs_submit_page_bio(page, fio); 139 f2fs_trace_ios(page, fio, 0); 140 141 /* Allocate a new bio */ 142 bio = __bio_alloc(sbi, fio->blk_addr, 1, is_read_io(fio->rw)); 143 144 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { 145 bio_put(bio); 146 f2fs_put_page(page, 1); 147 return -EFAULT; 148 } 149 150 submit_bio(fio->rw, bio); 151 return 0; 152 } 153 154 void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page, 155 struct f2fs_io_info *fio) 156 { 157 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); 158 struct f2fs_bio_info *io; 159 bool is_read = is_read_io(fio->rw); 160 161 io = is_read ? &sbi->read_io : &sbi->write_io[btype]; 162 163 verify_block_addr(sbi, fio->blk_addr); 164 165 down_write(&io->io_rwsem); 166 167 if (!is_read) 168 inc_page_count(sbi, F2FS_WRITEBACK); 169 170 if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 || 171 io->fio.rw != fio->rw)) 172 __submit_merged_bio(io); 173 alloc_new: 174 if (io->bio == NULL) { 175 int bio_blocks = MAX_BIO_BLOCKS(sbi); 176 177 io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read); 178 io->fio = *fio; 179 } 180 181 if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) < 182 PAGE_CACHE_SIZE) { 183 __submit_merged_bio(io); 184 goto alloc_new; 185 } 186 187 io->last_block_in_bio = fio->blk_addr; 188 f2fs_trace_ios(page, fio, 0); 189 190 up_write(&io->io_rwsem); 191 trace_f2fs_submit_page_mbio(page, fio); 192 } 193 194 /* 195 * Lock ordering for the change of data block address: 196 * ->data_page 197 * ->node_page 198 * update block addresses in the node page 199 */ 200 static void __set_data_blkaddr(struct dnode_of_data *dn) 201 { 202 struct f2fs_node *rn; 203 __le32 *addr_array; 204 struct page *node_page = dn->node_page; 205 unsigned int ofs_in_node = dn->ofs_in_node; 206 207 f2fs_wait_on_page_writeback(node_page, NODE); 208 209 rn = F2FS_NODE(node_page); 210 211 /* Get physical address of data block */ 212 addr_array = blkaddr_in_node(rn); 213 addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr); 214 set_page_dirty(node_page); 215 } 216 217 int reserve_new_block(struct dnode_of_data *dn) 218 { 219 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 220 221 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) 222 return -EPERM; 223 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) 224 return -ENOSPC; 225 226 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node); 227 228 dn->data_blkaddr = NEW_ADDR; 229 __set_data_blkaddr(dn); 230 mark_inode_dirty(dn->inode); 231 sync_inode_page(dn); 232 return 0; 233 } 234 235 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) 236 { 237 bool need_put = dn->inode_page ? false : true; 238 int err; 239 240 err = get_dnode_of_data(dn, index, ALLOC_NODE); 241 if (err) 242 return err; 243 244 if (dn->data_blkaddr == NULL_ADDR) 245 err = reserve_new_block(dn); 246 if (err || need_put) 247 f2fs_put_dnode(dn); 248 return err; 249 } 250 251 static int check_extent_cache(struct inode *inode, pgoff_t pgofs, 252 struct buffer_head *bh_result) 253 { 254 struct f2fs_inode_info *fi = F2FS_I(inode); 255 pgoff_t start_fofs, end_fofs; 256 block_t start_blkaddr; 257 258 if (is_inode_flag_set(fi, FI_NO_EXTENT)) 259 return 0; 260 261 read_lock(&fi->ext.ext_lock); 262 if (fi->ext.len == 0) { 263 read_unlock(&fi->ext.ext_lock); 264 return 0; 265 } 266 267 stat_inc_total_hit(inode->i_sb); 268 269 start_fofs = fi->ext.fofs; 270 end_fofs = fi->ext.fofs + fi->ext.len - 1; 271 start_blkaddr = fi->ext.blk_addr; 272 273 if (pgofs >= start_fofs && pgofs <= end_fofs) { 274 unsigned int blkbits = inode->i_sb->s_blocksize_bits; 275 size_t count; 276 277 set_buffer_new(bh_result); 278 map_bh(bh_result, inode->i_sb, 279 start_blkaddr + pgofs - start_fofs); 280 count = end_fofs - pgofs + 1; 281 if (count < (UINT_MAX >> blkbits)) 282 bh_result->b_size = (count << blkbits); 283 else 284 bh_result->b_size = UINT_MAX; 285 286 stat_inc_read_hit(inode->i_sb); 287 read_unlock(&fi->ext.ext_lock); 288 return 1; 289 } 290 read_unlock(&fi->ext.ext_lock); 291 return 0; 292 } 293 294 void update_extent_cache(struct dnode_of_data *dn) 295 { 296 struct f2fs_inode_info *fi = F2FS_I(dn->inode); 297 pgoff_t fofs, start_fofs, end_fofs; 298 block_t start_blkaddr, end_blkaddr; 299 int need_update = true; 300 301 f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR); 302 303 /* Update the page address in the parent node */ 304 __set_data_blkaddr(dn); 305 306 if (is_inode_flag_set(fi, FI_NO_EXTENT)) 307 return; 308 309 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + 310 dn->ofs_in_node; 311 312 write_lock(&fi->ext.ext_lock); 313 314 start_fofs = fi->ext.fofs; 315 end_fofs = fi->ext.fofs + fi->ext.len - 1; 316 start_blkaddr = fi->ext.blk_addr; 317 end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1; 318 319 /* Drop and initialize the matched extent */ 320 if (fi->ext.len == 1 && fofs == start_fofs) 321 fi->ext.len = 0; 322 323 /* Initial extent */ 324 if (fi->ext.len == 0) { 325 if (dn->data_blkaddr != NULL_ADDR) { 326 fi->ext.fofs = fofs; 327 fi->ext.blk_addr = dn->data_blkaddr; 328 fi->ext.len = 1; 329 } 330 goto end_update; 331 } 332 333 /* Front merge */ 334 if (fofs == start_fofs - 1 && dn->data_blkaddr == start_blkaddr - 1) { 335 fi->ext.fofs--; 336 fi->ext.blk_addr--; 337 fi->ext.len++; 338 goto end_update; 339 } 340 341 /* Back merge */ 342 if (fofs == end_fofs + 1 && dn->data_blkaddr == end_blkaddr + 1) { 343 fi->ext.len++; 344 goto end_update; 345 } 346 347 /* Split the existing extent */ 348 if (fi->ext.len > 1 && 349 fofs >= start_fofs && fofs <= end_fofs) { 350 if ((end_fofs - fofs) < (fi->ext.len >> 1)) { 351 fi->ext.len = fofs - start_fofs; 352 } else { 353 fi->ext.fofs = fofs + 1; 354 fi->ext.blk_addr = start_blkaddr + 355 fofs - start_fofs + 1; 356 fi->ext.len -= fofs - start_fofs + 1; 357 } 358 } else { 359 need_update = false; 360 } 361 362 /* Finally, if the extent is very fragmented, let's drop the cache. */ 363 if (fi->ext.len < F2FS_MIN_EXTENT_LEN) { 364 fi->ext.len = 0; 365 set_inode_flag(fi, FI_NO_EXTENT); 366 need_update = true; 367 } 368 end_update: 369 write_unlock(&fi->ext.ext_lock); 370 if (need_update) 371 sync_inode_page(dn); 372 return; 373 } 374 375 struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync) 376 { 377 struct address_space *mapping = inode->i_mapping; 378 struct dnode_of_data dn; 379 struct page *page; 380 int err; 381 struct f2fs_io_info fio = { 382 .type = DATA, 383 .rw = sync ? READ_SYNC : READA, 384 }; 385 386 page = find_get_page(mapping, index); 387 if (page && PageUptodate(page)) 388 return page; 389 f2fs_put_page(page, 0); 390 391 set_new_dnode(&dn, inode, NULL, NULL, 0); 392 err = get_dnode_of_data(&dn, index, LOOKUP_NODE); 393 if (err) 394 return ERR_PTR(err); 395 f2fs_put_dnode(&dn); 396 397 if (dn.data_blkaddr == NULL_ADDR) 398 return ERR_PTR(-ENOENT); 399 400 /* By fallocate(), there is no cached page, but with NEW_ADDR */ 401 if (unlikely(dn.data_blkaddr == NEW_ADDR)) 402 return ERR_PTR(-EINVAL); 403 404 page = grab_cache_page(mapping, index); 405 if (!page) 406 return ERR_PTR(-ENOMEM); 407 408 if (PageUptodate(page)) { 409 unlock_page(page); 410 return page; 411 } 412 413 fio.blk_addr = dn.data_blkaddr; 414 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio); 415 if (err) 416 return ERR_PTR(err); 417 418 if (sync) { 419 wait_on_page_locked(page); 420 if (unlikely(!PageUptodate(page))) { 421 f2fs_put_page(page, 0); 422 return ERR_PTR(-EIO); 423 } 424 } 425 return page; 426 } 427 428 /* 429 * If it tries to access a hole, return an error. 430 * Because, the callers, functions in dir.c and GC, should be able to know 431 * whether this page exists or not. 432 */ 433 struct page *get_lock_data_page(struct inode *inode, pgoff_t index) 434 { 435 struct address_space *mapping = inode->i_mapping; 436 struct dnode_of_data dn; 437 struct page *page; 438 int err; 439 struct f2fs_io_info fio = { 440 .type = DATA, 441 .rw = READ_SYNC, 442 }; 443 repeat: 444 page = grab_cache_page(mapping, index); 445 if (!page) 446 return ERR_PTR(-ENOMEM); 447 448 set_new_dnode(&dn, inode, NULL, NULL, 0); 449 err = get_dnode_of_data(&dn, index, LOOKUP_NODE); 450 if (err) { 451 f2fs_put_page(page, 1); 452 return ERR_PTR(err); 453 } 454 f2fs_put_dnode(&dn); 455 456 if (unlikely(dn.data_blkaddr == NULL_ADDR)) { 457 f2fs_put_page(page, 1); 458 return ERR_PTR(-ENOENT); 459 } 460 461 if (PageUptodate(page)) 462 return page; 463 464 /* 465 * A new dentry page is allocated but not able to be written, since its 466 * new inode page couldn't be allocated due to -ENOSPC. 467 * In such the case, its blkaddr can be remained as NEW_ADDR. 468 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata. 469 */ 470 if (dn.data_blkaddr == NEW_ADDR) { 471 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 472 SetPageUptodate(page); 473 return page; 474 } 475 476 fio.blk_addr = dn.data_blkaddr; 477 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio); 478 if (err) 479 return ERR_PTR(err); 480 481 lock_page(page); 482 if (unlikely(!PageUptodate(page))) { 483 f2fs_put_page(page, 1); 484 return ERR_PTR(-EIO); 485 } 486 if (unlikely(page->mapping != mapping)) { 487 f2fs_put_page(page, 1); 488 goto repeat; 489 } 490 return page; 491 } 492 493 /* 494 * Caller ensures that this data page is never allocated. 495 * A new zero-filled data page is allocated in the page cache. 496 * 497 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and 498 * f2fs_unlock_op(). 499 * Note that, ipage is set only by make_empty_dir. 500 */ 501 struct page *get_new_data_page(struct inode *inode, 502 struct page *ipage, pgoff_t index, bool new_i_size) 503 { 504 struct address_space *mapping = inode->i_mapping; 505 struct page *page; 506 struct dnode_of_data dn; 507 int err; 508 509 set_new_dnode(&dn, inode, ipage, NULL, 0); 510 err = f2fs_reserve_block(&dn, index); 511 if (err) 512 return ERR_PTR(err); 513 repeat: 514 page = grab_cache_page(mapping, index); 515 if (!page) { 516 err = -ENOMEM; 517 goto put_err; 518 } 519 520 if (PageUptodate(page)) 521 return page; 522 523 if (dn.data_blkaddr == NEW_ADDR) { 524 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 525 SetPageUptodate(page); 526 } else { 527 struct f2fs_io_info fio = { 528 .type = DATA, 529 .rw = READ_SYNC, 530 .blk_addr = dn.data_blkaddr, 531 }; 532 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio); 533 if (err) 534 goto put_err; 535 536 lock_page(page); 537 if (unlikely(!PageUptodate(page))) { 538 f2fs_put_page(page, 1); 539 err = -EIO; 540 goto put_err; 541 } 542 if (unlikely(page->mapping != mapping)) { 543 f2fs_put_page(page, 1); 544 goto repeat; 545 } 546 } 547 548 if (new_i_size && 549 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) { 550 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT)); 551 /* Only the directory inode sets new_i_size */ 552 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); 553 } 554 return page; 555 556 put_err: 557 f2fs_put_dnode(&dn); 558 return ERR_PTR(err); 559 } 560 561 static int __allocate_data_block(struct dnode_of_data *dn) 562 { 563 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 564 struct f2fs_inode_info *fi = F2FS_I(dn->inode); 565 struct f2fs_summary sum; 566 struct node_info ni; 567 int seg = CURSEG_WARM_DATA; 568 pgoff_t fofs; 569 570 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) 571 return -EPERM; 572 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) 573 return -ENOSPC; 574 575 get_node_info(sbi, dn->nid, &ni); 576 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); 577 578 if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page) 579 seg = CURSEG_DIRECT_IO; 580 581 allocate_data_block(sbi, NULL, NULL_ADDR, &dn->data_blkaddr, &sum, seg); 582 583 /* direct IO doesn't use extent cache to maximize the performance */ 584 __set_data_blkaddr(dn); 585 586 /* update i_size */ 587 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + 588 dn->ofs_in_node; 589 if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT)) 590 i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT)); 591 592 return 0; 593 } 594 595 static void __allocate_data_blocks(struct inode *inode, loff_t offset, 596 size_t count) 597 { 598 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 599 struct dnode_of_data dn; 600 u64 start = F2FS_BYTES_TO_BLK(offset); 601 u64 len = F2FS_BYTES_TO_BLK(count); 602 bool allocated; 603 u64 end_offset; 604 605 while (len) { 606 f2fs_balance_fs(sbi); 607 f2fs_lock_op(sbi); 608 609 /* When reading holes, we need its node page */ 610 set_new_dnode(&dn, inode, NULL, NULL, 0); 611 if (get_dnode_of_data(&dn, start, ALLOC_NODE)) 612 goto out; 613 614 allocated = false; 615 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); 616 617 while (dn.ofs_in_node < end_offset && len) { 618 if (dn.data_blkaddr == NULL_ADDR) { 619 if (__allocate_data_block(&dn)) 620 goto sync_out; 621 allocated = true; 622 } 623 len--; 624 start++; 625 dn.ofs_in_node++; 626 } 627 628 if (allocated) 629 sync_inode_page(&dn); 630 631 f2fs_put_dnode(&dn); 632 f2fs_unlock_op(sbi); 633 } 634 return; 635 636 sync_out: 637 if (allocated) 638 sync_inode_page(&dn); 639 f2fs_put_dnode(&dn); 640 out: 641 f2fs_unlock_op(sbi); 642 return; 643 } 644 645 /* 646 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh. 647 * If original data blocks are allocated, then give them to blockdev. 648 * Otherwise, 649 * a. preallocate requested block addresses 650 * b. do not use extent cache for better performance 651 * c. give the block addresses to blockdev 652 */ 653 static int __get_data_block(struct inode *inode, sector_t iblock, 654 struct buffer_head *bh_result, int create, bool fiemap) 655 { 656 unsigned int blkbits = inode->i_sb->s_blocksize_bits; 657 unsigned maxblocks = bh_result->b_size >> blkbits; 658 struct dnode_of_data dn; 659 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA; 660 pgoff_t pgofs, end_offset; 661 int err = 0, ofs = 1; 662 bool allocated = false; 663 664 /* Get the page offset from the block offset(iblock) */ 665 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits)); 666 667 if (check_extent_cache(inode, pgofs, bh_result)) 668 goto out; 669 670 if (create) 671 f2fs_lock_op(F2FS_I_SB(inode)); 672 673 /* When reading holes, we need its node page */ 674 set_new_dnode(&dn, inode, NULL, NULL, 0); 675 err = get_dnode_of_data(&dn, pgofs, mode); 676 if (err) { 677 if (err == -ENOENT) 678 err = 0; 679 goto unlock_out; 680 } 681 if (dn.data_blkaddr == NEW_ADDR && !fiemap) 682 goto put_out; 683 684 if (dn.data_blkaddr != NULL_ADDR) { 685 set_buffer_new(bh_result); 686 map_bh(bh_result, inode->i_sb, dn.data_blkaddr); 687 } else if (create) { 688 err = __allocate_data_block(&dn); 689 if (err) 690 goto put_out; 691 allocated = true; 692 set_buffer_new(bh_result); 693 map_bh(bh_result, inode->i_sb, dn.data_blkaddr); 694 } else { 695 goto put_out; 696 } 697 698 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); 699 bh_result->b_size = (((size_t)1) << blkbits); 700 dn.ofs_in_node++; 701 pgofs++; 702 703 get_next: 704 if (dn.ofs_in_node >= end_offset) { 705 if (allocated) 706 sync_inode_page(&dn); 707 allocated = false; 708 f2fs_put_dnode(&dn); 709 710 set_new_dnode(&dn, inode, NULL, NULL, 0); 711 err = get_dnode_of_data(&dn, pgofs, mode); 712 if (err) { 713 if (err == -ENOENT) 714 err = 0; 715 goto unlock_out; 716 } 717 if (dn.data_blkaddr == NEW_ADDR && !fiemap) 718 goto put_out; 719 720 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); 721 } 722 723 if (maxblocks > (bh_result->b_size >> blkbits)) { 724 block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); 725 if (blkaddr == NULL_ADDR && create) { 726 err = __allocate_data_block(&dn); 727 if (err) 728 goto sync_out; 729 allocated = true; 730 blkaddr = dn.data_blkaddr; 731 } 732 /* Give more consecutive addresses for the readahead */ 733 if (blkaddr == (bh_result->b_blocknr + ofs)) { 734 ofs++; 735 dn.ofs_in_node++; 736 pgofs++; 737 bh_result->b_size += (((size_t)1) << blkbits); 738 goto get_next; 739 } 740 } 741 sync_out: 742 if (allocated) 743 sync_inode_page(&dn); 744 put_out: 745 f2fs_put_dnode(&dn); 746 unlock_out: 747 if (create) 748 f2fs_unlock_op(F2FS_I_SB(inode)); 749 out: 750 trace_f2fs_get_data_block(inode, iblock, bh_result, err); 751 return err; 752 } 753 754 static int get_data_block(struct inode *inode, sector_t iblock, 755 struct buffer_head *bh_result, int create) 756 { 757 return __get_data_block(inode, iblock, bh_result, create, false); 758 } 759 760 static int get_data_block_fiemap(struct inode *inode, sector_t iblock, 761 struct buffer_head *bh_result, int create) 762 { 763 return __get_data_block(inode, iblock, bh_result, create, true); 764 } 765 766 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 767 u64 start, u64 len) 768 { 769 return generic_block_fiemap(inode, fieinfo, 770 start, len, get_data_block_fiemap); 771 } 772 773 static int f2fs_read_data_page(struct file *file, struct page *page) 774 { 775 struct inode *inode = page->mapping->host; 776 int ret = -EAGAIN; 777 778 trace_f2fs_readpage(page, DATA); 779 780 /* If the file has inline data, try to read it directly */ 781 if (f2fs_has_inline_data(inode)) 782 ret = f2fs_read_inline_data(inode, page); 783 if (ret == -EAGAIN) 784 ret = mpage_readpage(page, get_data_block); 785 786 return ret; 787 } 788 789 static int f2fs_read_data_pages(struct file *file, 790 struct address_space *mapping, 791 struct list_head *pages, unsigned nr_pages) 792 { 793 struct inode *inode = file->f_mapping->host; 794 795 /* If the file has inline data, skip readpages */ 796 if (f2fs_has_inline_data(inode)) 797 return 0; 798 799 return mpage_readpages(mapping, pages, nr_pages, get_data_block); 800 } 801 802 int do_write_data_page(struct page *page, struct f2fs_io_info *fio) 803 { 804 struct inode *inode = page->mapping->host; 805 struct dnode_of_data dn; 806 int err = 0; 807 808 set_new_dnode(&dn, inode, NULL, NULL, 0); 809 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE); 810 if (err) 811 return err; 812 813 fio->blk_addr = dn.data_blkaddr; 814 815 /* This page is already truncated */ 816 if (fio->blk_addr == NULL_ADDR) 817 goto out_writepage; 818 819 set_page_writeback(page); 820 821 /* 822 * If current allocation needs SSR, 823 * it had better in-place writes for updated data. 824 */ 825 if (unlikely(fio->blk_addr != NEW_ADDR && 826 !is_cold_data(page) && 827 need_inplace_update(inode))) { 828 rewrite_data_page(page, fio); 829 set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE); 830 } else { 831 write_data_page(page, &dn, fio); 832 update_extent_cache(&dn); 833 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); 834 } 835 out_writepage: 836 f2fs_put_dnode(&dn); 837 return err; 838 } 839 840 static int f2fs_write_data_page(struct page *page, 841 struct writeback_control *wbc) 842 { 843 struct inode *inode = page->mapping->host; 844 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 845 loff_t i_size = i_size_read(inode); 846 const pgoff_t end_index = ((unsigned long long) i_size) 847 >> PAGE_CACHE_SHIFT; 848 unsigned offset = 0; 849 bool need_balance_fs = false; 850 int err = 0; 851 struct f2fs_io_info fio = { 852 .type = DATA, 853 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, 854 }; 855 856 trace_f2fs_writepage(page, DATA); 857 858 if (page->index < end_index) 859 goto write; 860 861 /* 862 * If the offset is out-of-range of file size, 863 * this page does not have to be written to disk. 864 */ 865 offset = i_size & (PAGE_CACHE_SIZE - 1); 866 if ((page->index >= end_index + 1) || !offset) 867 goto out; 868 869 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 870 write: 871 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 872 goto redirty_out; 873 if (f2fs_is_drop_cache(inode)) 874 goto out; 875 if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim && 876 available_free_memory(sbi, BASE_CHECK)) 877 goto redirty_out; 878 879 /* Dentry blocks are controlled by checkpoint */ 880 if (S_ISDIR(inode->i_mode)) { 881 if (unlikely(f2fs_cp_error(sbi))) 882 goto redirty_out; 883 err = do_write_data_page(page, &fio); 884 goto done; 885 } 886 887 /* we should bypass data pages to proceed the kworkder jobs */ 888 if (unlikely(f2fs_cp_error(sbi))) { 889 SetPageError(page); 890 goto out; 891 } 892 893 if (!wbc->for_reclaim) 894 need_balance_fs = true; 895 else if (has_not_enough_free_secs(sbi, 0)) 896 goto redirty_out; 897 898 err = -EAGAIN; 899 f2fs_lock_op(sbi); 900 if (f2fs_has_inline_data(inode)) 901 err = f2fs_write_inline_data(inode, page); 902 if (err == -EAGAIN) 903 err = do_write_data_page(page, &fio); 904 f2fs_unlock_op(sbi); 905 done: 906 if (err && err != -ENOENT) 907 goto redirty_out; 908 909 clear_cold_data(page); 910 out: 911 inode_dec_dirty_pages(inode); 912 unlock_page(page); 913 if (need_balance_fs) 914 f2fs_balance_fs(sbi); 915 if (wbc->for_reclaim) 916 f2fs_submit_merged_bio(sbi, DATA, WRITE); 917 return 0; 918 919 redirty_out: 920 redirty_page_for_writepage(wbc, page); 921 return AOP_WRITEPAGE_ACTIVATE; 922 } 923 924 static int __f2fs_writepage(struct page *page, struct writeback_control *wbc, 925 void *data) 926 { 927 struct address_space *mapping = data; 928 int ret = mapping->a_ops->writepage(page, wbc); 929 mapping_set_error(mapping, ret); 930 return ret; 931 } 932 933 static int f2fs_write_data_pages(struct address_space *mapping, 934 struct writeback_control *wbc) 935 { 936 struct inode *inode = mapping->host; 937 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 938 bool locked = false; 939 int ret; 940 long diff; 941 942 trace_f2fs_writepages(mapping->host, wbc, DATA); 943 944 /* deal with chardevs and other special file */ 945 if (!mapping->a_ops->writepage) 946 return 0; 947 948 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE && 949 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) && 950 available_free_memory(sbi, DIRTY_DENTS)) 951 goto skip_write; 952 953 diff = nr_pages_to_write(sbi, DATA, wbc); 954 955 if (!S_ISDIR(inode->i_mode)) { 956 mutex_lock(&sbi->writepages); 957 locked = true; 958 } 959 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); 960 if (locked) 961 mutex_unlock(&sbi->writepages); 962 963 f2fs_submit_merged_bio(sbi, DATA, WRITE); 964 965 remove_dirty_dir_inode(inode); 966 967 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 968 return ret; 969 970 skip_write: 971 wbc->pages_skipped += get_dirty_pages(inode); 972 return 0; 973 } 974 975 static void f2fs_write_failed(struct address_space *mapping, loff_t to) 976 { 977 struct inode *inode = mapping->host; 978 979 if (to > inode->i_size) { 980 truncate_pagecache(inode, inode->i_size); 981 truncate_blocks(inode, inode->i_size, true); 982 } 983 } 984 985 static int f2fs_write_begin(struct file *file, struct address_space *mapping, 986 loff_t pos, unsigned len, unsigned flags, 987 struct page **pagep, void **fsdata) 988 { 989 struct inode *inode = mapping->host; 990 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 991 struct page *page, *ipage; 992 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; 993 struct dnode_of_data dn; 994 int err = 0; 995 996 trace_f2fs_write_begin(inode, pos, len, flags); 997 998 f2fs_balance_fs(sbi); 999 1000 /* 1001 * We should check this at this moment to avoid deadlock on inode page 1002 * and #0 page. The locking rule for inline_data conversion should be: 1003 * lock_page(page #0) -> lock_page(inode_page) 1004 */ 1005 if (index != 0) { 1006 err = f2fs_convert_inline_inode(inode); 1007 if (err) 1008 goto fail; 1009 } 1010 repeat: 1011 page = grab_cache_page_write_begin(mapping, index, flags); 1012 if (!page) { 1013 err = -ENOMEM; 1014 goto fail; 1015 } 1016 1017 *pagep = page; 1018 1019 f2fs_lock_op(sbi); 1020 1021 /* check inline_data */ 1022 ipage = get_node_page(sbi, inode->i_ino); 1023 if (IS_ERR(ipage)) { 1024 err = PTR_ERR(ipage); 1025 goto unlock_fail; 1026 } 1027 1028 set_new_dnode(&dn, inode, ipage, ipage, 0); 1029 1030 if (f2fs_has_inline_data(inode)) { 1031 if (pos + len <= MAX_INLINE_DATA) { 1032 read_inline_data(page, ipage); 1033 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); 1034 sync_inode_page(&dn); 1035 goto put_next; 1036 } 1037 err = f2fs_convert_inline_page(&dn, page); 1038 if (err) 1039 goto put_fail; 1040 } 1041 err = f2fs_reserve_block(&dn, index); 1042 if (err) 1043 goto put_fail; 1044 put_next: 1045 f2fs_put_dnode(&dn); 1046 f2fs_unlock_op(sbi); 1047 1048 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) 1049 return 0; 1050 1051 f2fs_wait_on_page_writeback(page, DATA); 1052 1053 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { 1054 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 1055 unsigned end = start + len; 1056 1057 /* Reading beyond i_size is simple: memset to zero */ 1058 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); 1059 goto out; 1060 } 1061 1062 if (dn.data_blkaddr == NEW_ADDR) { 1063 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 1064 } else { 1065 struct f2fs_io_info fio = { 1066 .type = DATA, 1067 .rw = READ_SYNC, 1068 .blk_addr = dn.data_blkaddr, 1069 }; 1070 err = f2fs_submit_page_bio(sbi, page, &fio); 1071 if (err) 1072 goto fail; 1073 1074 lock_page(page); 1075 if (unlikely(!PageUptodate(page))) { 1076 f2fs_put_page(page, 1); 1077 err = -EIO; 1078 goto fail; 1079 } 1080 if (unlikely(page->mapping != mapping)) { 1081 f2fs_put_page(page, 1); 1082 goto repeat; 1083 } 1084 } 1085 out: 1086 SetPageUptodate(page); 1087 clear_cold_data(page); 1088 return 0; 1089 1090 put_fail: 1091 f2fs_put_dnode(&dn); 1092 unlock_fail: 1093 f2fs_unlock_op(sbi); 1094 f2fs_put_page(page, 1); 1095 fail: 1096 f2fs_write_failed(mapping, pos + len); 1097 return err; 1098 } 1099 1100 static int f2fs_write_end(struct file *file, 1101 struct address_space *mapping, 1102 loff_t pos, unsigned len, unsigned copied, 1103 struct page *page, void *fsdata) 1104 { 1105 struct inode *inode = page->mapping->host; 1106 1107 trace_f2fs_write_end(inode, pos, len, copied); 1108 1109 set_page_dirty(page); 1110 1111 if (pos + copied > i_size_read(inode)) { 1112 i_size_write(inode, pos + copied); 1113 mark_inode_dirty(inode); 1114 update_inode_page(inode); 1115 } 1116 1117 f2fs_put_page(page, 1); 1118 return copied; 1119 } 1120 1121 static int check_direct_IO(struct inode *inode, int rw, 1122 struct iov_iter *iter, loff_t offset) 1123 { 1124 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1; 1125 1126 if (rw == READ) 1127 return 0; 1128 1129 if (offset & blocksize_mask) 1130 return -EINVAL; 1131 1132 if (iov_iter_alignment(iter) & blocksize_mask) 1133 return -EINVAL; 1134 1135 return 0; 1136 } 1137 1138 static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb, 1139 struct iov_iter *iter, loff_t offset) 1140 { 1141 struct file *file = iocb->ki_filp; 1142 struct address_space *mapping = file->f_mapping; 1143 struct inode *inode = mapping->host; 1144 size_t count = iov_iter_count(iter); 1145 int err; 1146 1147 /* we don't need to use inline_data strictly */ 1148 if (f2fs_has_inline_data(inode)) { 1149 err = f2fs_convert_inline_inode(inode); 1150 if (err) 1151 return err; 1152 } 1153 1154 if (check_direct_IO(inode, rw, iter, offset)) 1155 return 0; 1156 1157 trace_f2fs_direct_IO_enter(inode, offset, count, rw); 1158 1159 if (rw & WRITE) 1160 __allocate_data_blocks(inode, offset, count); 1161 1162 err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block); 1163 if (err < 0 && (rw & WRITE)) 1164 f2fs_write_failed(mapping, offset + count); 1165 1166 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err); 1167 1168 return err; 1169 } 1170 1171 void f2fs_invalidate_page(struct page *page, unsigned int offset, 1172 unsigned int length) 1173 { 1174 struct inode *inode = page->mapping->host; 1175 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1176 1177 if (inode->i_ino >= F2FS_ROOT_INO(sbi) && 1178 (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE)) 1179 return; 1180 1181 if (PageDirty(page)) { 1182 if (inode->i_ino == F2FS_META_INO(sbi)) 1183 dec_page_count(sbi, F2FS_DIRTY_META); 1184 else if (inode->i_ino == F2FS_NODE_INO(sbi)) 1185 dec_page_count(sbi, F2FS_DIRTY_NODES); 1186 else 1187 inode_dec_dirty_pages(inode); 1188 } 1189 ClearPagePrivate(page); 1190 } 1191 1192 int f2fs_release_page(struct page *page, gfp_t wait) 1193 { 1194 /* If this is dirty page, keep PagePrivate */ 1195 if (PageDirty(page)) 1196 return 0; 1197 1198 ClearPagePrivate(page); 1199 return 1; 1200 } 1201 1202 static int f2fs_set_data_page_dirty(struct page *page) 1203 { 1204 struct address_space *mapping = page->mapping; 1205 struct inode *inode = mapping->host; 1206 1207 trace_f2fs_set_page_dirty(page, DATA); 1208 1209 SetPageUptodate(page); 1210 1211 if (f2fs_is_atomic_file(inode)) { 1212 register_inmem_page(inode, page); 1213 return 1; 1214 } 1215 1216 mark_inode_dirty(inode); 1217 1218 if (!PageDirty(page)) { 1219 __set_page_dirty_nobuffers(page); 1220 update_dirty_page(inode, page); 1221 return 1; 1222 } 1223 return 0; 1224 } 1225 1226 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) 1227 { 1228 struct inode *inode = mapping->host; 1229 1230 /* we don't need to use inline_data strictly */ 1231 if (f2fs_has_inline_data(inode)) { 1232 int err = f2fs_convert_inline_inode(inode); 1233 if (err) 1234 return err; 1235 } 1236 return generic_block_bmap(mapping, block, get_data_block); 1237 } 1238 1239 const struct address_space_operations f2fs_dblock_aops = { 1240 .readpage = f2fs_read_data_page, 1241 .readpages = f2fs_read_data_pages, 1242 .writepage = f2fs_write_data_page, 1243 .writepages = f2fs_write_data_pages, 1244 .write_begin = f2fs_write_begin, 1245 .write_end = f2fs_write_end, 1246 .set_page_dirty = f2fs_set_data_page_dirty, 1247 .invalidatepage = f2fs_invalidate_page, 1248 .releasepage = f2fs_release_page, 1249 .direct_IO = f2fs_direct_IO, 1250 .bmap = f2fs_bmap, 1251 }; 1252