1 /* 2 * fs/f2fs/data.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/buffer_head.h> 14 #include <linux/mpage.h> 15 #include <linux/aio.h> 16 #include <linux/writeback.h> 17 #include <linux/backing-dev.h> 18 #include <linux/blkdev.h> 19 #include <linux/bio.h> 20 #include <linux/prefetch.h> 21 22 #include "f2fs.h" 23 #include "node.h" 24 #include "segment.h" 25 #include "trace.h" 26 #include <trace/events/f2fs.h> 27 28 static void f2fs_read_end_io(struct bio *bio, int err) 29 { 30 struct bio_vec *bvec; 31 int i; 32 33 bio_for_each_segment_all(bvec, bio, i) { 34 struct page *page = bvec->bv_page; 35 36 if (!err) { 37 SetPageUptodate(page); 38 } else { 39 ClearPageUptodate(page); 40 SetPageError(page); 41 } 42 unlock_page(page); 43 } 44 bio_put(bio); 45 } 46 47 static void f2fs_write_end_io(struct bio *bio, int err) 48 { 49 struct f2fs_sb_info *sbi = bio->bi_private; 50 struct bio_vec *bvec; 51 int i; 52 53 bio_for_each_segment_all(bvec, bio, i) { 54 struct page *page = bvec->bv_page; 55 56 if (unlikely(err)) { 57 set_page_dirty(page); 58 set_bit(AS_EIO, &page->mapping->flags); 59 f2fs_stop_checkpoint(sbi); 60 } 61 end_page_writeback(page); 62 dec_page_count(sbi, F2FS_WRITEBACK); 63 } 64 65 if (!get_pages(sbi, F2FS_WRITEBACK) && 66 !list_empty(&sbi->cp_wait.task_list)) 67 wake_up(&sbi->cp_wait); 68 69 bio_put(bio); 70 } 71 72 /* 73 * Low-level block read/write IO operations. 74 */ 75 static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, 76 int npages, bool is_read) 77 { 78 struct bio *bio; 79 80 /* No failure on bio allocation */ 81 bio = bio_alloc(GFP_NOIO, npages); 82 83 bio->bi_bdev = sbi->sb->s_bdev; 84 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr); 85 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io; 86 bio->bi_private = sbi; 87 88 return bio; 89 } 90 91 static void __submit_merged_bio(struct f2fs_bio_info *io) 92 { 93 struct f2fs_io_info *fio = &io->fio; 94 95 if (!io->bio) 96 return; 97 98 if (is_read_io(fio->rw)) 99 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio); 100 else 101 trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio); 102 103 submit_bio(fio->rw, io->bio); 104 io->bio = NULL; 105 } 106 107 void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, 108 enum page_type type, int rw) 109 { 110 enum page_type btype = PAGE_TYPE_OF_BIO(type); 111 struct f2fs_bio_info *io; 112 113 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype]; 114 115 down_write(&io->io_rwsem); 116 117 /* change META to META_FLUSH in the checkpoint procedure */ 118 if (type >= META_FLUSH) { 119 io->fio.type = META_FLUSH; 120 if (test_opt(sbi, NOBARRIER)) 121 io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO; 122 else 123 io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO; 124 } 125 __submit_merged_bio(io); 126 up_write(&io->io_rwsem); 127 } 128 129 /* 130 * Fill the locked page with data located in the block address. 131 * Return unlocked page. 132 */ 133 int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page, 134 struct f2fs_io_info *fio) 135 { 136 struct bio *bio; 137 138 trace_f2fs_submit_page_bio(page, fio); 139 f2fs_trace_ios(page, fio, 0); 140 141 /* Allocate a new bio */ 142 bio = __bio_alloc(sbi, fio->blk_addr, 1, is_read_io(fio->rw)); 143 144 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { 145 bio_put(bio); 146 f2fs_put_page(page, 1); 147 return -EFAULT; 148 } 149 150 submit_bio(fio->rw, bio); 151 return 0; 152 } 153 154 void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page, 155 struct f2fs_io_info *fio) 156 { 157 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); 158 struct f2fs_bio_info *io; 159 bool is_read = is_read_io(fio->rw); 160 161 io = is_read ? &sbi->read_io : &sbi->write_io[btype]; 162 163 verify_block_addr(sbi, fio->blk_addr); 164 165 down_write(&io->io_rwsem); 166 167 if (!is_read) 168 inc_page_count(sbi, F2FS_WRITEBACK); 169 170 if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 || 171 io->fio.rw != fio->rw)) 172 __submit_merged_bio(io); 173 alloc_new: 174 if (io->bio == NULL) { 175 int bio_blocks = MAX_BIO_BLOCKS(sbi); 176 177 io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read); 178 io->fio = *fio; 179 } 180 181 if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) < 182 PAGE_CACHE_SIZE) { 183 __submit_merged_bio(io); 184 goto alloc_new; 185 } 186 187 io->last_block_in_bio = fio->blk_addr; 188 f2fs_trace_ios(page, fio, 0); 189 190 up_write(&io->io_rwsem); 191 trace_f2fs_submit_page_mbio(page, fio); 192 } 193 194 /* 195 * Lock ordering for the change of data block address: 196 * ->data_page 197 * ->node_page 198 * update block addresses in the node page 199 */ 200 static void __set_data_blkaddr(struct dnode_of_data *dn) 201 { 202 struct f2fs_node *rn; 203 __le32 *addr_array; 204 struct page *node_page = dn->node_page; 205 unsigned int ofs_in_node = dn->ofs_in_node; 206 207 f2fs_wait_on_page_writeback(node_page, NODE); 208 209 rn = F2FS_NODE(node_page); 210 211 /* Get physical address of data block */ 212 addr_array = blkaddr_in_node(rn); 213 addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr); 214 set_page_dirty(node_page); 215 } 216 217 int reserve_new_block(struct dnode_of_data *dn) 218 { 219 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 220 221 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) 222 return -EPERM; 223 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) 224 return -ENOSPC; 225 226 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node); 227 228 dn->data_blkaddr = NEW_ADDR; 229 __set_data_blkaddr(dn); 230 mark_inode_dirty(dn->inode); 231 sync_inode_page(dn); 232 return 0; 233 } 234 235 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) 236 { 237 bool need_put = dn->inode_page ? false : true; 238 int err; 239 240 err = get_dnode_of_data(dn, index, ALLOC_NODE); 241 if (err) 242 return err; 243 244 if (dn->data_blkaddr == NULL_ADDR) 245 err = reserve_new_block(dn); 246 if (err || need_put) 247 f2fs_put_dnode(dn); 248 return err; 249 } 250 251 static int check_extent_cache(struct inode *inode, pgoff_t pgofs, 252 struct buffer_head *bh_result) 253 { 254 struct f2fs_inode_info *fi = F2FS_I(inode); 255 pgoff_t start_fofs, end_fofs; 256 block_t start_blkaddr; 257 258 if (is_inode_flag_set(fi, FI_NO_EXTENT)) 259 return 0; 260 261 read_lock(&fi->ext.ext_lock); 262 if (fi->ext.len == 0) { 263 read_unlock(&fi->ext.ext_lock); 264 return 0; 265 } 266 267 stat_inc_total_hit(inode->i_sb); 268 269 start_fofs = fi->ext.fofs; 270 end_fofs = fi->ext.fofs + fi->ext.len - 1; 271 start_blkaddr = fi->ext.blk_addr; 272 273 if (pgofs >= start_fofs && pgofs <= end_fofs) { 274 unsigned int blkbits = inode->i_sb->s_blocksize_bits; 275 size_t count; 276 277 clear_buffer_new(bh_result); 278 map_bh(bh_result, inode->i_sb, 279 start_blkaddr + pgofs - start_fofs); 280 count = end_fofs - pgofs + 1; 281 if (count < (UINT_MAX >> blkbits)) 282 bh_result->b_size = (count << blkbits); 283 else 284 bh_result->b_size = UINT_MAX; 285 286 stat_inc_read_hit(inode->i_sb); 287 read_unlock(&fi->ext.ext_lock); 288 return 1; 289 } 290 read_unlock(&fi->ext.ext_lock); 291 return 0; 292 } 293 294 void update_extent_cache(struct dnode_of_data *dn) 295 { 296 struct f2fs_inode_info *fi = F2FS_I(dn->inode); 297 pgoff_t fofs, start_fofs, end_fofs; 298 block_t start_blkaddr, end_blkaddr; 299 int need_update = true; 300 301 f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR); 302 303 /* Update the page address in the parent node */ 304 __set_data_blkaddr(dn); 305 306 if (is_inode_flag_set(fi, FI_NO_EXTENT)) 307 return; 308 309 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + 310 dn->ofs_in_node; 311 312 write_lock(&fi->ext.ext_lock); 313 314 start_fofs = fi->ext.fofs; 315 end_fofs = fi->ext.fofs + fi->ext.len - 1; 316 start_blkaddr = fi->ext.blk_addr; 317 end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1; 318 319 /* Drop and initialize the matched extent */ 320 if (fi->ext.len == 1 && fofs == start_fofs) 321 fi->ext.len = 0; 322 323 /* Initial extent */ 324 if (fi->ext.len == 0) { 325 if (dn->data_blkaddr != NULL_ADDR) { 326 fi->ext.fofs = fofs; 327 fi->ext.blk_addr = dn->data_blkaddr; 328 fi->ext.len = 1; 329 } 330 goto end_update; 331 } 332 333 /* Front merge */ 334 if (fofs == start_fofs - 1 && dn->data_blkaddr == start_blkaddr - 1) { 335 fi->ext.fofs--; 336 fi->ext.blk_addr--; 337 fi->ext.len++; 338 goto end_update; 339 } 340 341 /* Back merge */ 342 if (fofs == end_fofs + 1 && dn->data_blkaddr == end_blkaddr + 1) { 343 fi->ext.len++; 344 goto end_update; 345 } 346 347 /* Split the existing extent */ 348 if (fi->ext.len > 1 && 349 fofs >= start_fofs && fofs <= end_fofs) { 350 if ((end_fofs - fofs) < (fi->ext.len >> 1)) { 351 fi->ext.len = fofs - start_fofs; 352 } else { 353 fi->ext.fofs = fofs + 1; 354 fi->ext.blk_addr = start_blkaddr + 355 fofs - start_fofs + 1; 356 fi->ext.len -= fofs - start_fofs + 1; 357 } 358 } else { 359 need_update = false; 360 } 361 362 /* Finally, if the extent is very fragmented, let's drop the cache. */ 363 if (fi->ext.len < F2FS_MIN_EXTENT_LEN) { 364 fi->ext.len = 0; 365 set_inode_flag(fi, FI_NO_EXTENT); 366 need_update = true; 367 } 368 end_update: 369 write_unlock(&fi->ext.ext_lock); 370 if (need_update) 371 sync_inode_page(dn); 372 return; 373 } 374 375 struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync) 376 { 377 struct address_space *mapping = inode->i_mapping; 378 struct dnode_of_data dn; 379 struct page *page; 380 int err; 381 struct f2fs_io_info fio = { 382 .type = DATA, 383 .rw = sync ? READ_SYNC : READA, 384 }; 385 386 page = find_get_page(mapping, index); 387 if (page && PageUptodate(page)) 388 return page; 389 f2fs_put_page(page, 0); 390 391 set_new_dnode(&dn, inode, NULL, NULL, 0); 392 err = get_dnode_of_data(&dn, index, LOOKUP_NODE); 393 if (err) 394 return ERR_PTR(err); 395 f2fs_put_dnode(&dn); 396 397 if (dn.data_blkaddr == NULL_ADDR) 398 return ERR_PTR(-ENOENT); 399 400 /* By fallocate(), there is no cached page, but with NEW_ADDR */ 401 if (unlikely(dn.data_blkaddr == NEW_ADDR)) 402 return ERR_PTR(-EINVAL); 403 404 page = grab_cache_page(mapping, index); 405 if (!page) 406 return ERR_PTR(-ENOMEM); 407 408 if (PageUptodate(page)) { 409 unlock_page(page); 410 return page; 411 } 412 413 fio.blk_addr = dn.data_blkaddr; 414 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio); 415 if (err) 416 return ERR_PTR(err); 417 418 if (sync) { 419 wait_on_page_locked(page); 420 if (unlikely(!PageUptodate(page))) { 421 f2fs_put_page(page, 0); 422 return ERR_PTR(-EIO); 423 } 424 } 425 return page; 426 } 427 428 /* 429 * If it tries to access a hole, return an error. 430 * Because, the callers, functions in dir.c and GC, should be able to know 431 * whether this page exists or not. 432 */ 433 struct page *get_lock_data_page(struct inode *inode, pgoff_t index) 434 { 435 struct address_space *mapping = inode->i_mapping; 436 struct dnode_of_data dn; 437 struct page *page; 438 int err; 439 struct f2fs_io_info fio = { 440 .type = DATA, 441 .rw = READ_SYNC, 442 }; 443 repeat: 444 page = grab_cache_page(mapping, index); 445 if (!page) 446 return ERR_PTR(-ENOMEM); 447 448 set_new_dnode(&dn, inode, NULL, NULL, 0); 449 err = get_dnode_of_data(&dn, index, LOOKUP_NODE); 450 if (err) { 451 f2fs_put_page(page, 1); 452 return ERR_PTR(err); 453 } 454 f2fs_put_dnode(&dn); 455 456 if (unlikely(dn.data_blkaddr == NULL_ADDR)) { 457 f2fs_put_page(page, 1); 458 return ERR_PTR(-ENOENT); 459 } 460 461 if (PageUptodate(page)) 462 return page; 463 464 /* 465 * A new dentry page is allocated but not able to be written, since its 466 * new inode page couldn't be allocated due to -ENOSPC. 467 * In such the case, its blkaddr can be remained as NEW_ADDR. 468 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata. 469 */ 470 if (dn.data_blkaddr == NEW_ADDR) { 471 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 472 SetPageUptodate(page); 473 return page; 474 } 475 476 fio.blk_addr = dn.data_blkaddr; 477 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio); 478 if (err) 479 return ERR_PTR(err); 480 481 lock_page(page); 482 if (unlikely(!PageUptodate(page))) { 483 f2fs_put_page(page, 1); 484 return ERR_PTR(-EIO); 485 } 486 if (unlikely(page->mapping != mapping)) { 487 f2fs_put_page(page, 1); 488 goto repeat; 489 } 490 return page; 491 } 492 493 /* 494 * Caller ensures that this data page is never allocated. 495 * A new zero-filled data page is allocated in the page cache. 496 * 497 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and 498 * f2fs_unlock_op(). 499 * Note that, ipage is set only by make_empty_dir. 500 */ 501 struct page *get_new_data_page(struct inode *inode, 502 struct page *ipage, pgoff_t index, bool new_i_size) 503 { 504 struct address_space *mapping = inode->i_mapping; 505 struct page *page; 506 struct dnode_of_data dn; 507 int err; 508 509 set_new_dnode(&dn, inode, ipage, NULL, 0); 510 err = f2fs_reserve_block(&dn, index); 511 if (err) 512 return ERR_PTR(err); 513 repeat: 514 page = grab_cache_page(mapping, index); 515 if (!page) { 516 err = -ENOMEM; 517 goto put_err; 518 } 519 520 if (PageUptodate(page)) 521 return page; 522 523 if (dn.data_blkaddr == NEW_ADDR) { 524 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 525 SetPageUptodate(page); 526 } else { 527 struct f2fs_io_info fio = { 528 .type = DATA, 529 .rw = READ_SYNC, 530 .blk_addr = dn.data_blkaddr, 531 }; 532 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio); 533 if (err) 534 goto put_err; 535 536 lock_page(page); 537 if (unlikely(!PageUptodate(page))) { 538 f2fs_put_page(page, 1); 539 err = -EIO; 540 goto put_err; 541 } 542 if (unlikely(page->mapping != mapping)) { 543 f2fs_put_page(page, 1); 544 goto repeat; 545 } 546 } 547 548 if (new_i_size && 549 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) { 550 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT)); 551 /* Only the directory inode sets new_i_size */ 552 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); 553 } 554 return page; 555 556 put_err: 557 f2fs_put_dnode(&dn); 558 return ERR_PTR(err); 559 } 560 561 static int __allocate_data_block(struct dnode_of_data *dn) 562 { 563 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 564 struct f2fs_inode_info *fi = F2FS_I(dn->inode); 565 struct f2fs_summary sum; 566 struct node_info ni; 567 int seg = CURSEG_WARM_DATA; 568 pgoff_t fofs; 569 570 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) 571 return -EPERM; 572 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) 573 return -ENOSPC; 574 575 get_node_info(sbi, dn->nid, &ni); 576 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); 577 578 if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page) 579 seg = CURSEG_DIRECT_IO; 580 581 allocate_data_block(sbi, NULL, NULL_ADDR, &dn->data_blkaddr, &sum, seg); 582 583 /* direct IO doesn't use extent cache to maximize the performance */ 584 __set_data_blkaddr(dn); 585 586 /* update i_size */ 587 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + 588 dn->ofs_in_node; 589 if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT)) 590 i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT)); 591 592 return 0; 593 } 594 595 /* 596 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh. 597 * If original data blocks are allocated, then give them to blockdev. 598 * Otherwise, 599 * a. preallocate requested block addresses 600 * b. do not use extent cache for better performance 601 * c. give the block addresses to blockdev 602 */ 603 static int __get_data_block(struct inode *inode, sector_t iblock, 604 struct buffer_head *bh_result, int create, bool fiemap) 605 { 606 unsigned int blkbits = inode->i_sb->s_blocksize_bits; 607 unsigned maxblocks = bh_result->b_size >> blkbits; 608 struct dnode_of_data dn; 609 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA; 610 pgoff_t pgofs, end_offset; 611 int err = 0, ofs = 1; 612 bool allocated = false; 613 614 /* Get the page offset from the block offset(iblock) */ 615 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits)); 616 617 if (check_extent_cache(inode, pgofs, bh_result)) 618 goto out; 619 620 if (create) { 621 f2fs_balance_fs(F2FS_I_SB(inode)); 622 f2fs_lock_op(F2FS_I_SB(inode)); 623 } 624 625 /* When reading holes, we need its node page */ 626 set_new_dnode(&dn, inode, NULL, NULL, 0); 627 err = get_dnode_of_data(&dn, pgofs, mode); 628 if (err) { 629 if (err == -ENOENT) 630 err = 0; 631 goto unlock_out; 632 } 633 if (dn.data_blkaddr == NEW_ADDR && !fiemap) 634 goto put_out; 635 636 if (dn.data_blkaddr != NULL_ADDR) { 637 map_bh(bh_result, inode->i_sb, dn.data_blkaddr); 638 } else if (create) { 639 err = __allocate_data_block(&dn); 640 if (err) 641 goto put_out; 642 allocated = true; 643 map_bh(bh_result, inode->i_sb, dn.data_blkaddr); 644 } else { 645 goto put_out; 646 } 647 648 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); 649 bh_result->b_size = (((size_t)1) << blkbits); 650 dn.ofs_in_node++; 651 pgofs++; 652 653 get_next: 654 if (dn.ofs_in_node >= end_offset) { 655 if (allocated) 656 sync_inode_page(&dn); 657 allocated = false; 658 f2fs_put_dnode(&dn); 659 660 set_new_dnode(&dn, inode, NULL, NULL, 0); 661 err = get_dnode_of_data(&dn, pgofs, mode); 662 if (err) { 663 if (err == -ENOENT) 664 err = 0; 665 goto unlock_out; 666 } 667 if (dn.data_blkaddr == NEW_ADDR && !fiemap) 668 goto put_out; 669 670 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); 671 } 672 673 if (maxblocks > (bh_result->b_size >> blkbits)) { 674 block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); 675 if (blkaddr == NULL_ADDR && create) { 676 err = __allocate_data_block(&dn); 677 if (err) 678 goto sync_out; 679 allocated = true; 680 blkaddr = dn.data_blkaddr; 681 } 682 /* Give more consecutive addresses for the readahead */ 683 if (blkaddr == (bh_result->b_blocknr + ofs)) { 684 ofs++; 685 dn.ofs_in_node++; 686 pgofs++; 687 bh_result->b_size += (((size_t)1) << blkbits); 688 goto get_next; 689 } 690 } 691 sync_out: 692 if (allocated) 693 sync_inode_page(&dn); 694 put_out: 695 f2fs_put_dnode(&dn); 696 unlock_out: 697 if (create) 698 f2fs_unlock_op(F2FS_I_SB(inode)); 699 out: 700 trace_f2fs_get_data_block(inode, iblock, bh_result, err); 701 return err; 702 } 703 704 static int get_data_block(struct inode *inode, sector_t iblock, 705 struct buffer_head *bh_result, int create) 706 { 707 return __get_data_block(inode, iblock, bh_result, create, false); 708 } 709 710 static int get_data_block_fiemap(struct inode *inode, sector_t iblock, 711 struct buffer_head *bh_result, int create) 712 { 713 return __get_data_block(inode, iblock, bh_result, create, true); 714 } 715 716 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 717 u64 start, u64 len) 718 { 719 return generic_block_fiemap(inode, fieinfo, 720 start, len, get_data_block_fiemap); 721 } 722 723 static int f2fs_read_data_page(struct file *file, struct page *page) 724 { 725 struct inode *inode = page->mapping->host; 726 int ret = -EAGAIN; 727 728 trace_f2fs_readpage(page, DATA); 729 730 /* If the file has inline data, try to read it directly */ 731 if (f2fs_has_inline_data(inode)) 732 ret = f2fs_read_inline_data(inode, page); 733 if (ret == -EAGAIN) 734 ret = mpage_readpage(page, get_data_block); 735 736 return ret; 737 } 738 739 static int f2fs_read_data_pages(struct file *file, 740 struct address_space *mapping, 741 struct list_head *pages, unsigned nr_pages) 742 { 743 struct inode *inode = file->f_mapping->host; 744 745 /* If the file has inline data, skip readpages */ 746 if (f2fs_has_inline_data(inode)) 747 return 0; 748 749 return mpage_readpages(mapping, pages, nr_pages, get_data_block); 750 } 751 752 int do_write_data_page(struct page *page, struct f2fs_io_info *fio) 753 { 754 struct inode *inode = page->mapping->host; 755 struct dnode_of_data dn; 756 int err = 0; 757 758 set_new_dnode(&dn, inode, NULL, NULL, 0); 759 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE); 760 if (err) 761 return err; 762 763 fio->blk_addr = dn.data_blkaddr; 764 765 /* This page is already truncated */ 766 if (fio->blk_addr == NULL_ADDR) 767 goto out_writepage; 768 769 set_page_writeback(page); 770 771 /* 772 * If current allocation needs SSR, 773 * it had better in-place writes for updated data. 774 */ 775 if (unlikely(fio->blk_addr != NEW_ADDR && 776 !is_cold_data(page) && 777 need_inplace_update(inode))) { 778 rewrite_data_page(page, fio); 779 set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE); 780 } else { 781 write_data_page(page, &dn, fio); 782 update_extent_cache(&dn); 783 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); 784 } 785 out_writepage: 786 f2fs_put_dnode(&dn); 787 return err; 788 } 789 790 static int f2fs_write_data_page(struct page *page, 791 struct writeback_control *wbc) 792 { 793 struct inode *inode = page->mapping->host; 794 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 795 loff_t i_size = i_size_read(inode); 796 const pgoff_t end_index = ((unsigned long long) i_size) 797 >> PAGE_CACHE_SHIFT; 798 unsigned offset = 0; 799 bool need_balance_fs = false; 800 int err = 0; 801 struct f2fs_io_info fio = { 802 .type = DATA, 803 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, 804 }; 805 806 trace_f2fs_writepage(page, DATA); 807 808 if (page->index < end_index) 809 goto write; 810 811 /* 812 * If the offset is out-of-range of file size, 813 * this page does not have to be written to disk. 814 */ 815 offset = i_size & (PAGE_CACHE_SIZE - 1); 816 if ((page->index >= end_index + 1) || !offset) 817 goto out; 818 819 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 820 write: 821 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 822 goto redirty_out; 823 if (f2fs_is_drop_cache(inode)) 824 goto out; 825 if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim && 826 available_free_memory(sbi, BASE_CHECK)) 827 goto redirty_out; 828 829 /* Dentry blocks are controlled by checkpoint */ 830 if (S_ISDIR(inode->i_mode)) { 831 if (unlikely(f2fs_cp_error(sbi))) 832 goto redirty_out; 833 err = do_write_data_page(page, &fio); 834 goto done; 835 } 836 837 /* we should bypass data pages to proceed the kworkder jobs */ 838 if (unlikely(f2fs_cp_error(sbi))) { 839 SetPageError(page); 840 goto out; 841 } 842 843 if (!wbc->for_reclaim) 844 need_balance_fs = true; 845 else if (has_not_enough_free_secs(sbi, 0)) 846 goto redirty_out; 847 848 err = -EAGAIN; 849 f2fs_lock_op(sbi); 850 if (f2fs_has_inline_data(inode)) 851 err = f2fs_write_inline_data(inode, page); 852 if (err == -EAGAIN) 853 err = do_write_data_page(page, &fio); 854 f2fs_unlock_op(sbi); 855 done: 856 if (err && err != -ENOENT) 857 goto redirty_out; 858 859 clear_cold_data(page); 860 out: 861 inode_dec_dirty_pages(inode); 862 unlock_page(page); 863 if (need_balance_fs) 864 f2fs_balance_fs(sbi); 865 if (wbc->for_reclaim) 866 f2fs_submit_merged_bio(sbi, DATA, WRITE); 867 return 0; 868 869 redirty_out: 870 redirty_page_for_writepage(wbc, page); 871 return AOP_WRITEPAGE_ACTIVATE; 872 } 873 874 static int __f2fs_writepage(struct page *page, struct writeback_control *wbc, 875 void *data) 876 { 877 struct address_space *mapping = data; 878 int ret = mapping->a_ops->writepage(page, wbc); 879 mapping_set_error(mapping, ret); 880 return ret; 881 } 882 883 static int f2fs_write_data_pages(struct address_space *mapping, 884 struct writeback_control *wbc) 885 { 886 struct inode *inode = mapping->host; 887 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 888 bool locked = false; 889 int ret; 890 long diff; 891 892 trace_f2fs_writepages(mapping->host, wbc, DATA); 893 894 /* deal with chardevs and other special file */ 895 if (!mapping->a_ops->writepage) 896 return 0; 897 898 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE && 899 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) && 900 available_free_memory(sbi, DIRTY_DENTS)) 901 goto skip_write; 902 903 diff = nr_pages_to_write(sbi, DATA, wbc); 904 905 if (!S_ISDIR(inode->i_mode)) { 906 mutex_lock(&sbi->writepages); 907 locked = true; 908 } 909 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); 910 if (locked) 911 mutex_unlock(&sbi->writepages); 912 913 f2fs_submit_merged_bio(sbi, DATA, WRITE); 914 915 remove_dirty_dir_inode(inode); 916 917 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 918 return ret; 919 920 skip_write: 921 wbc->pages_skipped += get_dirty_pages(inode); 922 return 0; 923 } 924 925 static void f2fs_write_failed(struct address_space *mapping, loff_t to) 926 { 927 struct inode *inode = mapping->host; 928 929 if (to > inode->i_size) { 930 truncate_pagecache(inode, inode->i_size); 931 truncate_blocks(inode, inode->i_size, true); 932 } 933 } 934 935 static int f2fs_write_begin(struct file *file, struct address_space *mapping, 936 loff_t pos, unsigned len, unsigned flags, 937 struct page **pagep, void **fsdata) 938 { 939 struct inode *inode = mapping->host; 940 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 941 struct page *page, *ipage; 942 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; 943 struct dnode_of_data dn; 944 int err = 0; 945 946 trace_f2fs_write_begin(inode, pos, len, flags); 947 948 f2fs_balance_fs(sbi); 949 950 /* 951 * We should check this at this moment to avoid deadlock on inode page 952 * and #0 page. The locking rule for inline_data conversion should be: 953 * lock_page(page #0) -> lock_page(inode_page) 954 */ 955 if (index != 0) { 956 err = f2fs_convert_inline_inode(inode); 957 if (err) 958 goto fail; 959 } 960 repeat: 961 page = grab_cache_page_write_begin(mapping, index, flags); 962 if (!page) { 963 err = -ENOMEM; 964 goto fail; 965 } 966 967 *pagep = page; 968 969 f2fs_lock_op(sbi); 970 971 /* check inline_data */ 972 ipage = get_node_page(sbi, inode->i_ino); 973 if (IS_ERR(ipage)) { 974 err = PTR_ERR(ipage); 975 goto unlock_fail; 976 } 977 978 set_new_dnode(&dn, inode, ipage, ipage, 0); 979 980 if (f2fs_has_inline_data(inode)) { 981 if (pos + len <= MAX_INLINE_DATA) { 982 read_inline_data(page, ipage); 983 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); 984 sync_inode_page(&dn); 985 goto put_next; 986 } 987 err = f2fs_convert_inline_page(&dn, page); 988 if (err) 989 goto put_fail; 990 } 991 err = f2fs_reserve_block(&dn, index); 992 if (err) 993 goto put_fail; 994 put_next: 995 f2fs_put_dnode(&dn); 996 f2fs_unlock_op(sbi); 997 998 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) 999 return 0; 1000 1001 f2fs_wait_on_page_writeback(page, DATA); 1002 1003 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { 1004 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 1005 unsigned end = start + len; 1006 1007 /* Reading beyond i_size is simple: memset to zero */ 1008 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); 1009 goto out; 1010 } 1011 1012 if (dn.data_blkaddr == NEW_ADDR) { 1013 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 1014 } else { 1015 struct f2fs_io_info fio = { 1016 .type = DATA, 1017 .rw = READ_SYNC, 1018 .blk_addr = dn.data_blkaddr, 1019 }; 1020 err = f2fs_submit_page_bio(sbi, page, &fio); 1021 if (err) 1022 goto fail; 1023 1024 lock_page(page); 1025 if (unlikely(!PageUptodate(page))) { 1026 f2fs_put_page(page, 1); 1027 err = -EIO; 1028 goto fail; 1029 } 1030 if (unlikely(page->mapping != mapping)) { 1031 f2fs_put_page(page, 1); 1032 goto repeat; 1033 } 1034 } 1035 out: 1036 SetPageUptodate(page); 1037 clear_cold_data(page); 1038 return 0; 1039 1040 put_fail: 1041 f2fs_put_dnode(&dn); 1042 unlock_fail: 1043 f2fs_unlock_op(sbi); 1044 f2fs_put_page(page, 1); 1045 fail: 1046 f2fs_write_failed(mapping, pos + len); 1047 return err; 1048 } 1049 1050 static int f2fs_write_end(struct file *file, 1051 struct address_space *mapping, 1052 loff_t pos, unsigned len, unsigned copied, 1053 struct page *page, void *fsdata) 1054 { 1055 struct inode *inode = page->mapping->host; 1056 1057 trace_f2fs_write_end(inode, pos, len, copied); 1058 1059 set_page_dirty(page); 1060 1061 if (pos + copied > i_size_read(inode)) { 1062 i_size_write(inode, pos + copied); 1063 mark_inode_dirty(inode); 1064 update_inode_page(inode); 1065 } 1066 1067 f2fs_put_page(page, 1); 1068 return copied; 1069 } 1070 1071 static int check_direct_IO(struct inode *inode, int rw, 1072 struct iov_iter *iter, loff_t offset) 1073 { 1074 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1; 1075 1076 if (rw == READ) 1077 return 0; 1078 1079 if (offset & blocksize_mask) 1080 return -EINVAL; 1081 1082 if (iov_iter_alignment(iter) & blocksize_mask) 1083 return -EINVAL; 1084 1085 return 0; 1086 } 1087 1088 static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb, 1089 struct iov_iter *iter, loff_t offset) 1090 { 1091 struct file *file = iocb->ki_filp; 1092 struct address_space *mapping = file->f_mapping; 1093 struct inode *inode = mapping->host; 1094 size_t count = iov_iter_count(iter); 1095 int err; 1096 1097 /* we don't need to use inline_data strictly */ 1098 if (f2fs_has_inline_data(inode)) { 1099 err = f2fs_convert_inline_inode(inode); 1100 if (err) 1101 return err; 1102 } 1103 1104 if (check_direct_IO(inode, rw, iter, offset)) 1105 return 0; 1106 1107 trace_f2fs_direct_IO_enter(inode, offset, count, rw); 1108 1109 err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block); 1110 if (err < 0 && (rw & WRITE)) 1111 f2fs_write_failed(mapping, offset + count); 1112 1113 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err); 1114 1115 return err; 1116 } 1117 1118 static void f2fs_invalidate_data_page(struct page *page, unsigned int offset, 1119 unsigned int length) 1120 { 1121 struct inode *inode = page->mapping->host; 1122 1123 if (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE) 1124 return; 1125 1126 if (PageDirty(page)) 1127 inode_dec_dirty_pages(inode); 1128 ClearPagePrivate(page); 1129 } 1130 1131 static int f2fs_release_data_page(struct page *page, gfp_t wait) 1132 { 1133 ClearPagePrivate(page); 1134 return 1; 1135 } 1136 1137 static int f2fs_set_data_page_dirty(struct page *page) 1138 { 1139 struct address_space *mapping = page->mapping; 1140 struct inode *inode = mapping->host; 1141 1142 trace_f2fs_set_page_dirty(page, DATA); 1143 1144 SetPageUptodate(page); 1145 1146 if (f2fs_is_atomic_file(inode)) { 1147 register_inmem_page(inode, page); 1148 return 1; 1149 } 1150 1151 mark_inode_dirty(inode); 1152 1153 if (!PageDirty(page)) { 1154 __set_page_dirty_nobuffers(page); 1155 update_dirty_page(inode, page); 1156 return 1; 1157 } 1158 return 0; 1159 } 1160 1161 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) 1162 { 1163 struct inode *inode = mapping->host; 1164 1165 /* we don't need to use inline_data strictly */ 1166 if (f2fs_has_inline_data(inode)) { 1167 int err = f2fs_convert_inline_inode(inode); 1168 if (err) 1169 return err; 1170 } 1171 return generic_block_bmap(mapping, block, get_data_block); 1172 } 1173 1174 const struct address_space_operations f2fs_dblock_aops = { 1175 .readpage = f2fs_read_data_page, 1176 .readpages = f2fs_read_data_pages, 1177 .writepage = f2fs_write_data_page, 1178 .writepages = f2fs_write_data_pages, 1179 .write_begin = f2fs_write_begin, 1180 .write_end = f2fs_write_end, 1181 .set_page_dirty = f2fs_set_data_page_dirty, 1182 .invalidatepage = f2fs_invalidate_data_page, 1183 .releasepage = f2fs_release_data_page, 1184 .direct_IO = f2fs_direct_IO, 1185 .bmap = f2fs_bmap, 1186 }; 1187