1 /* 2 * fs/f2fs/data.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/buffer_head.h> 14 #include <linux/mpage.h> 15 #include <linux/writeback.h> 16 #include <linux/backing-dev.h> 17 #include <linux/pagevec.h> 18 #include <linux/blkdev.h> 19 #include <linux/bio.h> 20 #include <linux/prefetch.h> 21 #include <linux/uio.h> 22 #include <linux/cleancache.h> 23 24 #include "f2fs.h" 25 #include "node.h" 26 #include "segment.h" 27 #include "trace.h" 28 #include <trace/events/f2fs.h> 29 30 static void f2fs_read_end_io(struct bio *bio) 31 { 32 struct bio_vec *bvec; 33 int i; 34 35 if (f2fs_bio_encrypted(bio)) { 36 if (bio->bi_error) { 37 fscrypt_release_ctx(bio->bi_private); 38 } else { 39 fscrypt_decrypt_bio_pages(bio->bi_private, bio); 40 return; 41 } 42 } 43 44 bio_for_each_segment_all(bvec, bio, i) { 45 struct page *page = bvec->bv_page; 46 47 if (!bio->bi_error) { 48 SetPageUptodate(page); 49 } else { 50 ClearPageUptodate(page); 51 SetPageError(page); 52 } 53 unlock_page(page); 54 } 55 bio_put(bio); 56 } 57 58 static void f2fs_write_end_io(struct bio *bio) 59 { 60 struct f2fs_sb_info *sbi = bio->bi_private; 61 struct bio_vec *bvec; 62 int i; 63 64 bio_for_each_segment_all(bvec, bio, i) { 65 struct page *page = bvec->bv_page; 66 67 fscrypt_pullback_bio_page(&page, true); 68 69 if (unlikely(bio->bi_error)) { 70 set_bit(AS_EIO, &page->mapping->flags); 71 f2fs_stop_checkpoint(sbi); 72 } 73 end_page_writeback(page); 74 dec_page_count(sbi, F2FS_WRITEBACK); 75 } 76 77 if (!get_pages(sbi, F2FS_WRITEBACK) && wq_has_sleeper(&sbi->cp_wait)) 78 wake_up(&sbi->cp_wait); 79 80 bio_put(bio); 81 } 82 83 /* 84 * Low-level block read/write IO operations. 85 */ 86 static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, 87 int npages, bool is_read) 88 { 89 struct bio *bio; 90 91 bio = f2fs_bio_alloc(npages); 92 93 bio->bi_bdev = sbi->sb->s_bdev; 94 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr); 95 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io; 96 bio->bi_private = is_read ? NULL : sbi; 97 98 return bio; 99 } 100 101 static void __submit_merged_bio(struct f2fs_bio_info *io) 102 { 103 struct f2fs_io_info *fio = &io->fio; 104 105 if (!io->bio) 106 return; 107 108 if (is_read_io(fio->rw)) 109 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio); 110 else 111 trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio); 112 113 submit_bio(fio->rw, io->bio); 114 io->bio = NULL; 115 } 116 117 static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode, 118 struct page *page, nid_t ino) 119 { 120 struct bio_vec *bvec; 121 struct page *target; 122 int i; 123 124 if (!io->bio) 125 return false; 126 127 if (!inode && !page && !ino) 128 return true; 129 130 bio_for_each_segment_all(bvec, io->bio, i) { 131 132 if (bvec->bv_page->mapping) 133 target = bvec->bv_page; 134 else 135 target = fscrypt_control_page(bvec->bv_page); 136 137 if (inode && inode == target->mapping->host) 138 return true; 139 if (page && page == target) 140 return true; 141 if (ino && ino == ino_of_node(target)) 142 return true; 143 } 144 145 return false; 146 } 147 148 static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode, 149 struct page *page, nid_t ino, 150 enum page_type type) 151 { 152 enum page_type btype = PAGE_TYPE_OF_BIO(type); 153 struct f2fs_bio_info *io = &sbi->write_io[btype]; 154 bool ret; 155 156 down_read(&io->io_rwsem); 157 ret = __has_merged_page(io, inode, page, ino); 158 up_read(&io->io_rwsem); 159 return ret; 160 } 161 162 static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, 163 struct inode *inode, struct page *page, 164 nid_t ino, enum page_type type, int rw) 165 { 166 enum page_type btype = PAGE_TYPE_OF_BIO(type); 167 struct f2fs_bio_info *io; 168 169 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype]; 170 171 down_write(&io->io_rwsem); 172 173 if (!__has_merged_page(io, inode, page, ino)) 174 goto out; 175 176 /* change META to META_FLUSH in the checkpoint procedure */ 177 if (type >= META_FLUSH) { 178 io->fio.type = META_FLUSH; 179 if (test_opt(sbi, NOBARRIER)) 180 io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO; 181 else 182 io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO; 183 } 184 __submit_merged_bio(io); 185 out: 186 up_write(&io->io_rwsem); 187 } 188 189 void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type, 190 int rw) 191 { 192 __f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw); 193 } 194 195 void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi, 196 struct inode *inode, struct page *page, 197 nid_t ino, enum page_type type, int rw) 198 { 199 if (has_merged_page(sbi, inode, page, ino, type)) 200 __f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw); 201 } 202 203 void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi) 204 { 205 f2fs_submit_merged_bio(sbi, DATA, WRITE); 206 f2fs_submit_merged_bio(sbi, NODE, WRITE); 207 f2fs_submit_merged_bio(sbi, META, WRITE); 208 } 209 210 /* 211 * Fill the locked page with data located in the block address. 212 * Return unlocked page. 213 */ 214 int f2fs_submit_page_bio(struct f2fs_io_info *fio) 215 { 216 struct bio *bio; 217 struct page *page = fio->encrypted_page ? 218 fio->encrypted_page : fio->page; 219 220 trace_f2fs_submit_page_bio(page, fio); 221 f2fs_trace_ios(fio, 0); 222 223 /* Allocate a new bio */ 224 bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw)); 225 226 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { 227 bio_put(bio); 228 return -EFAULT; 229 } 230 231 submit_bio(fio->rw, bio); 232 return 0; 233 } 234 235 void f2fs_submit_page_mbio(struct f2fs_io_info *fio) 236 { 237 struct f2fs_sb_info *sbi = fio->sbi; 238 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); 239 struct f2fs_bio_info *io; 240 bool is_read = is_read_io(fio->rw); 241 struct page *bio_page; 242 243 io = is_read ? &sbi->read_io : &sbi->write_io[btype]; 244 245 if (fio->old_blkaddr != NEW_ADDR) 246 verify_block_addr(sbi, fio->old_blkaddr); 247 verify_block_addr(sbi, fio->new_blkaddr); 248 249 down_write(&io->io_rwsem); 250 251 if (!is_read) 252 inc_page_count(sbi, F2FS_WRITEBACK); 253 254 if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 || 255 io->fio.rw != fio->rw)) 256 __submit_merged_bio(io); 257 alloc_new: 258 if (io->bio == NULL) { 259 int bio_blocks = MAX_BIO_BLOCKS(sbi); 260 261 io->bio = __bio_alloc(sbi, fio->new_blkaddr, 262 bio_blocks, is_read); 263 io->fio = *fio; 264 } 265 266 bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; 267 268 if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) < 269 PAGE_CACHE_SIZE) { 270 __submit_merged_bio(io); 271 goto alloc_new; 272 } 273 274 io->last_block_in_bio = fio->new_blkaddr; 275 f2fs_trace_ios(fio, 0); 276 277 up_write(&io->io_rwsem); 278 trace_f2fs_submit_page_mbio(fio->page, fio); 279 } 280 281 /* 282 * Lock ordering for the change of data block address: 283 * ->data_page 284 * ->node_page 285 * update block addresses in the node page 286 */ 287 void set_data_blkaddr(struct dnode_of_data *dn) 288 { 289 struct f2fs_node *rn; 290 __le32 *addr_array; 291 struct page *node_page = dn->node_page; 292 unsigned int ofs_in_node = dn->ofs_in_node; 293 294 f2fs_wait_on_page_writeback(node_page, NODE, true); 295 296 rn = F2FS_NODE(node_page); 297 298 /* Get physical address of data block */ 299 addr_array = blkaddr_in_node(rn); 300 addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr); 301 if (set_page_dirty(node_page)) 302 dn->node_changed = true; 303 } 304 305 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr) 306 { 307 dn->data_blkaddr = blkaddr; 308 set_data_blkaddr(dn); 309 f2fs_update_extent_cache(dn); 310 } 311 312 int reserve_new_block(struct dnode_of_data *dn) 313 { 314 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 315 316 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) 317 return -EPERM; 318 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) 319 return -ENOSPC; 320 321 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node); 322 323 dn->data_blkaddr = NEW_ADDR; 324 set_data_blkaddr(dn); 325 mark_inode_dirty(dn->inode); 326 sync_inode_page(dn); 327 return 0; 328 } 329 330 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) 331 { 332 bool need_put = dn->inode_page ? false : true; 333 int err; 334 335 err = get_dnode_of_data(dn, index, ALLOC_NODE); 336 if (err) 337 return err; 338 339 if (dn->data_blkaddr == NULL_ADDR) 340 err = reserve_new_block(dn); 341 if (err || need_put) 342 f2fs_put_dnode(dn); 343 return err; 344 } 345 346 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index) 347 { 348 struct extent_info ei; 349 struct inode *inode = dn->inode; 350 351 if (f2fs_lookup_extent_cache(inode, index, &ei)) { 352 dn->data_blkaddr = ei.blk + index - ei.fofs; 353 return 0; 354 } 355 356 return f2fs_reserve_block(dn, index); 357 } 358 359 struct page *get_read_data_page(struct inode *inode, pgoff_t index, 360 int rw, bool for_write) 361 { 362 struct address_space *mapping = inode->i_mapping; 363 struct dnode_of_data dn; 364 struct page *page; 365 struct extent_info ei; 366 int err; 367 struct f2fs_io_info fio = { 368 .sbi = F2FS_I_SB(inode), 369 .type = DATA, 370 .rw = rw, 371 .encrypted_page = NULL, 372 }; 373 374 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 375 return read_mapping_page(mapping, index, NULL); 376 377 page = f2fs_grab_cache_page(mapping, index, for_write); 378 if (!page) 379 return ERR_PTR(-ENOMEM); 380 381 if (f2fs_lookup_extent_cache(inode, index, &ei)) { 382 dn.data_blkaddr = ei.blk + index - ei.fofs; 383 goto got_it; 384 } 385 386 set_new_dnode(&dn, inode, NULL, NULL, 0); 387 err = get_dnode_of_data(&dn, index, LOOKUP_NODE); 388 if (err) 389 goto put_err; 390 f2fs_put_dnode(&dn); 391 392 if (unlikely(dn.data_blkaddr == NULL_ADDR)) { 393 err = -ENOENT; 394 goto put_err; 395 } 396 got_it: 397 if (PageUptodate(page)) { 398 unlock_page(page); 399 return page; 400 } 401 402 /* 403 * A new dentry page is allocated but not able to be written, since its 404 * new inode page couldn't be allocated due to -ENOSPC. 405 * In such the case, its blkaddr can be remained as NEW_ADDR. 406 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata. 407 */ 408 if (dn.data_blkaddr == NEW_ADDR) { 409 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 410 SetPageUptodate(page); 411 unlock_page(page); 412 return page; 413 } 414 415 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; 416 fio.page = page; 417 err = f2fs_submit_page_bio(&fio); 418 if (err) 419 goto put_err; 420 return page; 421 422 put_err: 423 f2fs_put_page(page, 1); 424 return ERR_PTR(err); 425 } 426 427 struct page *find_data_page(struct inode *inode, pgoff_t index) 428 { 429 struct address_space *mapping = inode->i_mapping; 430 struct page *page; 431 432 page = find_get_page(mapping, index); 433 if (page && PageUptodate(page)) 434 return page; 435 f2fs_put_page(page, 0); 436 437 page = get_read_data_page(inode, index, READ_SYNC, false); 438 if (IS_ERR(page)) 439 return page; 440 441 if (PageUptodate(page)) 442 return page; 443 444 wait_on_page_locked(page); 445 if (unlikely(!PageUptodate(page))) { 446 f2fs_put_page(page, 0); 447 return ERR_PTR(-EIO); 448 } 449 return page; 450 } 451 452 /* 453 * If it tries to access a hole, return an error. 454 * Because, the callers, functions in dir.c and GC, should be able to know 455 * whether this page exists or not. 456 */ 457 struct page *get_lock_data_page(struct inode *inode, pgoff_t index, 458 bool for_write) 459 { 460 struct address_space *mapping = inode->i_mapping; 461 struct page *page; 462 repeat: 463 page = get_read_data_page(inode, index, READ_SYNC, for_write); 464 if (IS_ERR(page)) 465 return page; 466 467 /* wait for read completion */ 468 lock_page(page); 469 if (unlikely(!PageUptodate(page))) { 470 f2fs_put_page(page, 1); 471 return ERR_PTR(-EIO); 472 } 473 if (unlikely(page->mapping != mapping)) { 474 f2fs_put_page(page, 1); 475 goto repeat; 476 } 477 return page; 478 } 479 480 /* 481 * Caller ensures that this data page is never allocated. 482 * A new zero-filled data page is allocated in the page cache. 483 * 484 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and 485 * f2fs_unlock_op(). 486 * Note that, ipage is set only by make_empty_dir, and if any error occur, 487 * ipage should be released by this function. 488 */ 489 struct page *get_new_data_page(struct inode *inode, 490 struct page *ipage, pgoff_t index, bool new_i_size) 491 { 492 struct address_space *mapping = inode->i_mapping; 493 struct page *page; 494 struct dnode_of_data dn; 495 int err; 496 497 page = f2fs_grab_cache_page(mapping, index, true); 498 if (!page) { 499 /* 500 * before exiting, we should make sure ipage will be released 501 * if any error occur. 502 */ 503 f2fs_put_page(ipage, 1); 504 return ERR_PTR(-ENOMEM); 505 } 506 507 set_new_dnode(&dn, inode, ipage, NULL, 0); 508 err = f2fs_reserve_block(&dn, index); 509 if (err) { 510 f2fs_put_page(page, 1); 511 return ERR_PTR(err); 512 } 513 if (!ipage) 514 f2fs_put_dnode(&dn); 515 516 if (PageUptodate(page)) 517 goto got_it; 518 519 if (dn.data_blkaddr == NEW_ADDR) { 520 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 521 SetPageUptodate(page); 522 } else { 523 f2fs_put_page(page, 1); 524 525 /* if ipage exists, blkaddr should be NEW_ADDR */ 526 f2fs_bug_on(F2FS_I_SB(inode), ipage); 527 page = get_lock_data_page(inode, index, true); 528 if (IS_ERR(page)) 529 return page; 530 } 531 got_it: 532 if (new_i_size && i_size_read(inode) < 533 ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) { 534 i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)); 535 /* Only the directory inode sets new_i_size */ 536 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); 537 } 538 return page; 539 } 540 541 static int __allocate_data_block(struct dnode_of_data *dn) 542 { 543 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 544 struct f2fs_summary sum; 545 struct node_info ni; 546 int seg = CURSEG_WARM_DATA; 547 pgoff_t fofs; 548 549 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) 550 return -EPERM; 551 552 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); 553 if (dn->data_blkaddr == NEW_ADDR) 554 goto alloc; 555 556 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) 557 return -ENOSPC; 558 559 alloc: 560 get_node_info(sbi, dn->nid, &ni); 561 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); 562 563 if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page) 564 seg = CURSEG_DIRECT_IO; 565 566 allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr, 567 &sum, seg); 568 set_data_blkaddr(dn); 569 570 /* update i_size */ 571 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) + 572 dn->ofs_in_node; 573 if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT)) 574 i_size_write(dn->inode, 575 ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT)); 576 return 0; 577 } 578 579 ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from) 580 { 581 struct inode *inode = file_inode(iocb->ki_filp); 582 struct f2fs_map_blocks map; 583 ssize_t ret = 0; 584 585 map.m_lblk = F2FS_BYTES_TO_BLK(iocb->ki_pos); 586 map.m_len = F2FS_BLK_ALIGN(iov_iter_count(from)); 587 map.m_next_pgofs = NULL; 588 589 if (f2fs_encrypted_inode(inode)) 590 return 0; 591 592 if (iocb->ki_flags & IOCB_DIRECT) { 593 ret = f2fs_convert_inline_inode(inode); 594 if (ret) 595 return ret; 596 return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO); 597 } 598 if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) { 599 ret = f2fs_convert_inline_inode(inode); 600 if (ret) 601 return ret; 602 } 603 if (!f2fs_has_inline_data(inode)) 604 return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO); 605 return ret; 606 } 607 608 /* 609 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with 610 * f2fs_map_blocks structure. 611 * If original data blocks are allocated, then give them to blockdev. 612 * Otherwise, 613 * a. preallocate requested block addresses 614 * b. do not use extent cache for better performance 615 * c. give the block addresses to blockdev 616 */ 617 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, 618 int create, int flag) 619 { 620 unsigned int maxblocks = map->m_len; 621 struct dnode_of_data dn; 622 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 623 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA; 624 pgoff_t pgofs, end_offset; 625 int err = 0, ofs = 1; 626 struct extent_info ei; 627 bool allocated = false; 628 block_t blkaddr; 629 630 map->m_len = 0; 631 map->m_flags = 0; 632 633 /* it only supports block size == page size */ 634 pgofs = (pgoff_t)map->m_lblk; 635 636 if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) { 637 map->m_pblk = ei.blk + pgofs - ei.fofs; 638 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs); 639 map->m_flags = F2FS_MAP_MAPPED; 640 goto out; 641 } 642 643 next_dnode: 644 if (create) 645 f2fs_lock_op(sbi); 646 647 /* When reading holes, we need its node page */ 648 set_new_dnode(&dn, inode, NULL, NULL, 0); 649 err = get_dnode_of_data(&dn, pgofs, mode); 650 if (err) { 651 if (err == -ENOENT) { 652 err = 0; 653 if (map->m_next_pgofs) 654 *map->m_next_pgofs = 655 get_next_page_offset(&dn, pgofs); 656 } 657 goto unlock_out; 658 } 659 660 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 661 662 next_block: 663 blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); 664 665 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) { 666 if (create) { 667 if (unlikely(f2fs_cp_error(sbi))) { 668 err = -EIO; 669 goto sync_out; 670 } 671 if (flag == F2FS_GET_BLOCK_PRE_AIO) { 672 if (blkaddr == NULL_ADDR) 673 err = reserve_new_block(&dn); 674 } else { 675 err = __allocate_data_block(&dn); 676 } 677 if (err) 678 goto sync_out; 679 allocated = true; 680 map->m_flags = F2FS_MAP_NEW; 681 blkaddr = dn.data_blkaddr; 682 } else { 683 if (flag == F2FS_GET_BLOCK_FIEMAP && 684 blkaddr == NULL_ADDR) { 685 if (map->m_next_pgofs) 686 *map->m_next_pgofs = pgofs + 1; 687 } 688 if (flag != F2FS_GET_BLOCK_FIEMAP || 689 blkaddr != NEW_ADDR) { 690 if (flag == F2FS_GET_BLOCK_BMAP) 691 err = -ENOENT; 692 goto sync_out; 693 } 694 } 695 } 696 697 if (map->m_len == 0) { 698 /* preallocated unwritten block should be mapped for fiemap. */ 699 if (blkaddr == NEW_ADDR) 700 map->m_flags |= F2FS_MAP_UNWRITTEN; 701 map->m_flags |= F2FS_MAP_MAPPED; 702 703 map->m_pblk = blkaddr; 704 map->m_len = 1; 705 } else if ((map->m_pblk != NEW_ADDR && 706 blkaddr == (map->m_pblk + ofs)) || 707 (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) || 708 flag == F2FS_GET_BLOCK_PRE_DIO || 709 flag == F2FS_GET_BLOCK_PRE_AIO) { 710 ofs++; 711 map->m_len++; 712 } else { 713 goto sync_out; 714 } 715 716 dn.ofs_in_node++; 717 pgofs++; 718 719 if (map->m_len < maxblocks) { 720 if (dn.ofs_in_node < end_offset) 721 goto next_block; 722 723 if (allocated) 724 sync_inode_page(&dn); 725 f2fs_put_dnode(&dn); 726 727 if (create) { 728 f2fs_unlock_op(sbi); 729 f2fs_balance_fs(sbi, allocated); 730 } 731 allocated = false; 732 goto next_dnode; 733 } 734 735 sync_out: 736 if (allocated) 737 sync_inode_page(&dn); 738 f2fs_put_dnode(&dn); 739 unlock_out: 740 if (create) { 741 f2fs_unlock_op(sbi); 742 f2fs_balance_fs(sbi, allocated); 743 } 744 out: 745 trace_f2fs_map_blocks(inode, map, err); 746 return err; 747 } 748 749 static int __get_data_block(struct inode *inode, sector_t iblock, 750 struct buffer_head *bh, int create, int flag, 751 pgoff_t *next_pgofs) 752 { 753 struct f2fs_map_blocks map; 754 int ret; 755 756 map.m_lblk = iblock; 757 map.m_len = bh->b_size >> inode->i_blkbits; 758 map.m_next_pgofs = next_pgofs; 759 760 ret = f2fs_map_blocks(inode, &map, create, flag); 761 if (!ret) { 762 map_bh(bh, inode->i_sb, map.m_pblk); 763 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags; 764 bh->b_size = map.m_len << inode->i_blkbits; 765 } 766 return ret; 767 } 768 769 static int get_data_block(struct inode *inode, sector_t iblock, 770 struct buffer_head *bh_result, int create, int flag, 771 pgoff_t *next_pgofs) 772 { 773 return __get_data_block(inode, iblock, bh_result, create, 774 flag, next_pgofs); 775 } 776 777 static int get_data_block_dio(struct inode *inode, sector_t iblock, 778 struct buffer_head *bh_result, int create) 779 { 780 return __get_data_block(inode, iblock, bh_result, create, 781 F2FS_GET_BLOCK_DIO, NULL); 782 } 783 784 static int get_data_block_bmap(struct inode *inode, sector_t iblock, 785 struct buffer_head *bh_result, int create) 786 { 787 /* Block number less than F2FS MAX BLOCKS */ 788 if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks)) 789 return -EFBIG; 790 791 return __get_data_block(inode, iblock, bh_result, create, 792 F2FS_GET_BLOCK_BMAP, NULL); 793 } 794 795 static inline sector_t logical_to_blk(struct inode *inode, loff_t offset) 796 { 797 return (offset >> inode->i_blkbits); 798 } 799 800 static inline loff_t blk_to_logical(struct inode *inode, sector_t blk) 801 { 802 return (blk << inode->i_blkbits); 803 } 804 805 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 806 u64 start, u64 len) 807 { 808 struct buffer_head map_bh; 809 sector_t start_blk, last_blk; 810 pgoff_t next_pgofs; 811 loff_t isize; 812 u64 logical = 0, phys = 0, size = 0; 813 u32 flags = 0; 814 int ret = 0; 815 816 ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC); 817 if (ret) 818 return ret; 819 820 if (f2fs_has_inline_data(inode)) { 821 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len); 822 if (ret != -EAGAIN) 823 return ret; 824 } 825 826 inode_lock(inode); 827 828 isize = i_size_read(inode); 829 if (start >= isize) 830 goto out; 831 832 if (start + len > isize) 833 len = isize - start; 834 835 if (logical_to_blk(inode, len) == 0) 836 len = blk_to_logical(inode, 1); 837 838 start_blk = logical_to_blk(inode, start); 839 last_blk = logical_to_blk(inode, start + len - 1); 840 841 next: 842 memset(&map_bh, 0, sizeof(struct buffer_head)); 843 map_bh.b_size = len; 844 845 ret = get_data_block(inode, start_blk, &map_bh, 0, 846 F2FS_GET_BLOCK_FIEMAP, &next_pgofs); 847 if (ret) 848 goto out; 849 850 /* HOLE */ 851 if (!buffer_mapped(&map_bh)) { 852 start_blk = next_pgofs; 853 /* Go through holes util pass the EOF */ 854 if (blk_to_logical(inode, start_blk) < isize) 855 goto prep_next; 856 /* Found a hole beyond isize means no more extents. 857 * Note that the premise is that filesystems don't 858 * punch holes beyond isize and keep size unchanged. 859 */ 860 flags |= FIEMAP_EXTENT_LAST; 861 } 862 863 if (size) { 864 if (f2fs_encrypted_inode(inode)) 865 flags |= FIEMAP_EXTENT_DATA_ENCRYPTED; 866 867 ret = fiemap_fill_next_extent(fieinfo, logical, 868 phys, size, flags); 869 } 870 871 if (start_blk > last_blk || ret) 872 goto out; 873 874 logical = blk_to_logical(inode, start_blk); 875 phys = blk_to_logical(inode, map_bh.b_blocknr); 876 size = map_bh.b_size; 877 flags = 0; 878 if (buffer_unwritten(&map_bh)) 879 flags = FIEMAP_EXTENT_UNWRITTEN; 880 881 start_blk += logical_to_blk(inode, size); 882 883 prep_next: 884 cond_resched(); 885 if (fatal_signal_pending(current)) 886 ret = -EINTR; 887 else 888 goto next; 889 out: 890 if (ret == 1) 891 ret = 0; 892 893 inode_unlock(inode); 894 return ret; 895 } 896 897 /* 898 * This function was originally taken from fs/mpage.c, and customized for f2fs. 899 * Major change was from block_size == page_size in f2fs by default. 900 */ 901 static int f2fs_mpage_readpages(struct address_space *mapping, 902 struct list_head *pages, struct page *page, 903 unsigned nr_pages) 904 { 905 struct bio *bio = NULL; 906 unsigned page_idx; 907 sector_t last_block_in_bio = 0; 908 struct inode *inode = mapping->host; 909 const unsigned blkbits = inode->i_blkbits; 910 const unsigned blocksize = 1 << blkbits; 911 sector_t block_in_file; 912 sector_t last_block; 913 sector_t last_block_in_file; 914 sector_t block_nr; 915 struct block_device *bdev = inode->i_sb->s_bdev; 916 struct f2fs_map_blocks map; 917 918 map.m_pblk = 0; 919 map.m_lblk = 0; 920 map.m_len = 0; 921 map.m_flags = 0; 922 map.m_next_pgofs = NULL; 923 924 for (page_idx = 0; nr_pages; page_idx++, nr_pages--) { 925 926 prefetchw(&page->flags); 927 if (pages) { 928 page = list_entry(pages->prev, struct page, lru); 929 list_del(&page->lru); 930 if (add_to_page_cache_lru(page, mapping, 931 page->index, GFP_KERNEL)) 932 goto next_page; 933 } 934 935 block_in_file = (sector_t)page->index; 936 last_block = block_in_file + nr_pages; 937 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> 938 blkbits; 939 if (last_block > last_block_in_file) 940 last_block = last_block_in_file; 941 942 /* 943 * Map blocks using the previous result first. 944 */ 945 if ((map.m_flags & F2FS_MAP_MAPPED) && 946 block_in_file > map.m_lblk && 947 block_in_file < (map.m_lblk + map.m_len)) 948 goto got_it; 949 950 /* 951 * Then do more f2fs_map_blocks() calls until we are 952 * done with this page. 953 */ 954 map.m_flags = 0; 955 956 if (block_in_file < last_block) { 957 map.m_lblk = block_in_file; 958 map.m_len = last_block - block_in_file; 959 960 if (f2fs_map_blocks(inode, &map, 0, 961 F2FS_GET_BLOCK_READ)) 962 goto set_error_page; 963 } 964 got_it: 965 if ((map.m_flags & F2FS_MAP_MAPPED)) { 966 block_nr = map.m_pblk + block_in_file - map.m_lblk; 967 SetPageMappedToDisk(page); 968 969 if (!PageUptodate(page) && !cleancache_get_page(page)) { 970 SetPageUptodate(page); 971 goto confused; 972 } 973 } else { 974 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 975 SetPageUptodate(page); 976 unlock_page(page); 977 goto next_page; 978 } 979 980 /* 981 * This page will go to BIO. Do we need to send this 982 * BIO off first? 983 */ 984 if (bio && (last_block_in_bio != block_nr - 1)) { 985 submit_and_realloc: 986 submit_bio(READ, bio); 987 bio = NULL; 988 } 989 if (bio == NULL) { 990 struct fscrypt_ctx *ctx = NULL; 991 992 if (f2fs_encrypted_inode(inode) && 993 S_ISREG(inode->i_mode)) { 994 995 ctx = fscrypt_get_ctx(inode); 996 if (IS_ERR(ctx)) 997 goto set_error_page; 998 999 /* wait the page to be moved by cleaning */ 1000 f2fs_wait_on_encrypted_page_writeback( 1001 F2FS_I_SB(inode), block_nr); 1002 } 1003 1004 bio = bio_alloc(GFP_KERNEL, 1005 min_t(int, nr_pages, BIO_MAX_PAGES)); 1006 if (!bio) { 1007 if (ctx) 1008 fscrypt_release_ctx(ctx); 1009 goto set_error_page; 1010 } 1011 bio->bi_bdev = bdev; 1012 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr); 1013 bio->bi_end_io = f2fs_read_end_io; 1014 bio->bi_private = ctx; 1015 } 1016 1017 if (bio_add_page(bio, page, blocksize, 0) < blocksize) 1018 goto submit_and_realloc; 1019 1020 last_block_in_bio = block_nr; 1021 goto next_page; 1022 set_error_page: 1023 SetPageError(page); 1024 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 1025 unlock_page(page); 1026 goto next_page; 1027 confused: 1028 if (bio) { 1029 submit_bio(READ, bio); 1030 bio = NULL; 1031 } 1032 unlock_page(page); 1033 next_page: 1034 if (pages) 1035 page_cache_release(page); 1036 } 1037 BUG_ON(pages && !list_empty(pages)); 1038 if (bio) 1039 submit_bio(READ, bio); 1040 return 0; 1041 } 1042 1043 static int f2fs_read_data_page(struct file *file, struct page *page) 1044 { 1045 struct inode *inode = page->mapping->host; 1046 int ret = -EAGAIN; 1047 1048 trace_f2fs_readpage(page, DATA); 1049 1050 /* If the file has inline data, try to read it directly */ 1051 if (f2fs_has_inline_data(inode)) 1052 ret = f2fs_read_inline_data(inode, page); 1053 if (ret == -EAGAIN) 1054 ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1); 1055 return ret; 1056 } 1057 1058 static int f2fs_read_data_pages(struct file *file, 1059 struct address_space *mapping, 1060 struct list_head *pages, unsigned nr_pages) 1061 { 1062 struct inode *inode = file->f_mapping->host; 1063 struct page *page = list_entry(pages->prev, struct page, lru); 1064 1065 trace_f2fs_readpages(inode, page, nr_pages); 1066 1067 /* If the file has inline data, skip readpages */ 1068 if (f2fs_has_inline_data(inode)) 1069 return 0; 1070 1071 return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages); 1072 } 1073 1074 int do_write_data_page(struct f2fs_io_info *fio) 1075 { 1076 struct page *page = fio->page; 1077 struct inode *inode = page->mapping->host; 1078 struct dnode_of_data dn; 1079 int err = 0; 1080 1081 set_new_dnode(&dn, inode, NULL, NULL, 0); 1082 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE); 1083 if (err) 1084 return err; 1085 1086 fio->old_blkaddr = dn.data_blkaddr; 1087 1088 /* This page is already truncated */ 1089 if (fio->old_blkaddr == NULL_ADDR) { 1090 ClearPageUptodate(page); 1091 goto out_writepage; 1092 } 1093 1094 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { 1095 1096 /* wait for GCed encrypted page writeback */ 1097 f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode), 1098 fio->old_blkaddr); 1099 1100 fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page); 1101 if (IS_ERR(fio->encrypted_page)) { 1102 err = PTR_ERR(fio->encrypted_page); 1103 goto out_writepage; 1104 } 1105 } 1106 1107 set_page_writeback(page); 1108 1109 /* 1110 * If current allocation needs SSR, 1111 * it had better in-place writes for updated data. 1112 */ 1113 if (unlikely(fio->old_blkaddr != NEW_ADDR && 1114 !is_cold_data(page) && 1115 !IS_ATOMIC_WRITTEN_PAGE(page) && 1116 need_inplace_update(inode))) { 1117 rewrite_data_page(fio); 1118 set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE); 1119 trace_f2fs_do_write_data_page(page, IPU); 1120 } else { 1121 write_data_page(&dn, fio); 1122 trace_f2fs_do_write_data_page(page, OPU); 1123 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); 1124 if (page->index == 0) 1125 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); 1126 } 1127 out_writepage: 1128 f2fs_put_dnode(&dn); 1129 return err; 1130 } 1131 1132 static int f2fs_write_data_page(struct page *page, 1133 struct writeback_control *wbc) 1134 { 1135 struct inode *inode = page->mapping->host; 1136 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1137 loff_t i_size = i_size_read(inode); 1138 const pgoff_t end_index = ((unsigned long long) i_size) 1139 >> PAGE_CACHE_SHIFT; 1140 unsigned offset = 0; 1141 bool need_balance_fs = false; 1142 int err = 0; 1143 struct f2fs_io_info fio = { 1144 .sbi = sbi, 1145 .type = DATA, 1146 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, 1147 .page = page, 1148 .encrypted_page = NULL, 1149 }; 1150 1151 trace_f2fs_writepage(page, DATA); 1152 1153 if (page->index < end_index) 1154 goto write; 1155 1156 /* 1157 * If the offset is out-of-range of file size, 1158 * this page does not have to be written to disk. 1159 */ 1160 offset = i_size & (PAGE_CACHE_SIZE - 1); 1161 if ((page->index >= end_index + 1) || !offset) 1162 goto out; 1163 1164 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 1165 write: 1166 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1167 goto redirty_out; 1168 if (f2fs_is_drop_cache(inode)) 1169 goto out; 1170 if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim && 1171 available_free_memory(sbi, BASE_CHECK)) 1172 goto redirty_out; 1173 1174 /* Dentry blocks are controlled by checkpoint */ 1175 if (S_ISDIR(inode->i_mode)) { 1176 if (unlikely(f2fs_cp_error(sbi))) 1177 goto redirty_out; 1178 err = do_write_data_page(&fio); 1179 goto done; 1180 } 1181 1182 /* we should bypass data pages to proceed the kworkder jobs */ 1183 if (unlikely(f2fs_cp_error(sbi))) { 1184 SetPageError(page); 1185 goto out; 1186 } 1187 1188 if (!wbc->for_reclaim) 1189 need_balance_fs = true; 1190 else if (has_not_enough_free_secs(sbi, 0)) 1191 goto redirty_out; 1192 1193 err = -EAGAIN; 1194 f2fs_lock_op(sbi); 1195 if (f2fs_has_inline_data(inode)) 1196 err = f2fs_write_inline_data(inode, page); 1197 if (err == -EAGAIN) 1198 err = do_write_data_page(&fio); 1199 f2fs_unlock_op(sbi); 1200 done: 1201 if (err && err != -ENOENT) 1202 goto redirty_out; 1203 1204 clear_cold_data(page); 1205 out: 1206 inode_dec_dirty_pages(inode); 1207 if (err) 1208 ClearPageUptodate(page); 1209 1210 if (wbc->for_reclaim) { 1211 f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE); 1212 remove_dirty_inode(inode); 1213 } 1214 1215 unlock_page(page); 1216 f2fs_balance_fs(sbi, need_balance_fs); 1217 1218 if (unlikely(f2fs_cp_error(sbi))) 1219 f2fs_submit_merged_bio(sbi, DATA, WRITE); 1220 1221 return 0; 1222 1223 redirty_out: 1224 redirty_page_for_writepage(wbc, page); 1225 return AOP_WRITEPAGE_ACTIVATE; 1226 } 1227 1228 static int __f2fs_writepage(struct page *page, struct writeback_control *wbc, 1229 void *data) 1230 { 1231 struct address_space *mapping = data; 1232 int ret = mapping->a_ops->writepage(page, wbc); 1233 mapping_set_error(mapping, ret); 1234 return ret; 1235 } 1236 1237 /* 1238 * This function was copied from write_cche_pages from mm/page-writeback.c. 1239 * The major change is making write step of cold data page separately from 1240 * warm/hot data page. 1241 */ 1242 static int f2fs_write_cache_pages(struct address_space *mapping, 1243 struct writeback_control *wbc, writepage_t writepage, 1244 void *data) 1245 { 1246 int ret = 0; 1247 int done = 0; 1248 struct pagevec pvec; 1249 int nr_pages; 1250 pgoff_t uninitialized_var(writeback_index); 1251 pgoff_t index; 1252 pgoff_t end; /* Inclusive */ 1253 pgoff_t done_index; 1254 int cycled; 1255 int range_whole = 0; 1256 int tag; 1257 int step = 0; 1258 1259 pagevec_init(&pvec, 0); 1260 next: 1261 if (wbc->range_cyclic) { 1262 writeback_index = mapping->writeback_index; /* prev offset */ 1263 index = writeback_index; 1264 if (index == 0) 1265 cycled = 1; 1266 else 1267 cycled = 0; 1268 end = -1; 1269 } else { 1270 index = wbc->range_start >> PAGE_CACHE_SHIFT; 1271 end = wbc->range_end >> PAGE_CACHE_SHIFT; 1272 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 1273 range_whole = 1; 1274 cycled = 1; /* ignore range_cyclic tests */ 1275 } 1276 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 1277 tag = PAGECACHE_TAG_TOWRITE; 1278 else 1279 tag = PAGECACHE_TAG_DIRTY; 1280 retry: 1281 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 1282 tag_pages_for_writeback(mapping, index, end); 1283 done_index = index; 1284 while (!done && (index <= end)) { 1285 int i; 1286 1287 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 1288 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1); 1289 if (nr_pages == 0) 1290 break; 1291 1292 for (i = 0; i < nr_pages; i++) { 1293 struct page *page = pvec.pages[i]; 1294 1295 if (page->index > end) { 1296 done = 1; 1297 break; 1298 } 1299 1300 done_index = page->index; 1301 1302 lock_page(page); 1303 1304 if (unlikely(page->mapping != mapping)) { 1305 continue_unlock: 1306 unlock_page(page); 1307 continue; 1308 } 1309 1310 if (!PageDirty(page)) { 1311 /* someone wrote it for us */ 1312 goto continue_unlock; 1313 } 1314 1315 if (step == is_cold_data(page)) 1316 goto continue_unlock; 1317 1318 if (PageWriteback(page)) { 1319 if (wbc->sync_mode != WB_SYNC_NONE) 1320 f2fs_wait_on_page_writeback(page, 1321 DATA, true); 1322 else 1323 goto continue_unlock; 1324 } 1325 1326 BUG_ON(PageWriteback(page)); 1327 if (!clear_page_dirty_for_io(page)) 1328 goto continue_unlock; 1329 1330 ret = (*writepage)(page, wbc, data); 1331 if (unlikely(ret)) { 1332 if (ret == AOP_WRITEPAGE_ACTIVATE) { 1333 unlock_page(page); 1334 ret = 0; 1335 } else { 1336 done_index = page->index + 1; 1337 done = 1; 1338 break; 1339 } 1340 } 1341 1342 if (--wbc->nr_to_write <= 0 && 1343 wbc->sync_mode == WB_SYNC_NONE) { 1344 done = 1; 1345 break; 1346 } 1347 } 1348 pagevec_release(&pvec); 1349 cond_resched(); 1350 } 1351 1352 if (step < 1) { 1353 step++; 1354 goto next; 1355 } 1356 1357 if (!cycled && !done) { 1358 cycled = 1; 1359 index = 0; 1360 end = writeback_index - 1; 1361 goto retry; 1362 } 1363 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 1364 mapping->writeback_index = done_index; 1365 1366 return ret; 1367 } 1368 1369 static int f2fs_write_data_pages(struct address_space *mapping, 1370 struct writeback_control *wbc) 1371 { 1372 struct inode *inode = mapping->host; 1373 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1374 bool locked = false; 1375 int ret; 1376 long diff; 1377 1378 /* deal with chardevs and other special file */ 1379 if (!mapping->a_ops->writepage) 1380 return 0; 1381 1382 /* skip writing if there is no dirty page in this inode */ 1383 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE) 1384 return 0; 1385 1386 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE && 1387 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) && 1388 available_free_memory(sbi, DIRTY_DENTS)) 1389 goto skip_write; 1390 1391 /* skip writing during file defragment */ 1392 if (is_inode_flag_set(F2FS_I(inode), FI_DO_DEFRAG)) 1393 goto skip_write; 1394 1395 /* during POR, we don't need to trigger writepage at all. */ 1396 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1397 goto skip_write; 1398 1399 trace_f2fs_writepages(mapping->host, wbc, DATA); 1400 1401 diff = nr_pages_to_write(sbi, DATA, wbc); 1402 1403 if (!S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_ALL) { 1404 mutex_lock(&sbi->writepages); 1405 locked = true; 1406 } 1407 ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); 1408 f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0, DATA, WRITE); 1409 if (locked) 1410 mutex_unlock(&sbi->writepages); 1411 1412 remove_dirty_inode(inode); 1413 1414 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 1415 return ret; 1416 1417 skip_write: 1418 wbc->pages_skipped += get_dirty_pages(inode); 1419 trace_f2fs_writepages(mapping->host, wbc, DATA); 1420 return 0; 1421 } 1422 1423 static void f2fs_write_failed(struct address_space *mapping, loff_t to) 1424 { 1425 struct inode *inode = mapping->host; 1426 loff_t i_size = i_size_read(inode); 1427 1428 if (to > i_size) { 1429 truncate_pagecache(inode, i_size); 1430 truncate_blocks(inode, i_size, true); 1431 } 1432 } 1433 1434 static int prepare_write_begin(struct f2fs_sb_info *sbi, 1435 struct page *page, loff_t pos, unsigned len, 1436 block_t *blk_addr, bool *node_changed) 1437 { 1438 struct inode *inode = page->mapping->host; 1439 pgoff_t index = page->index; 1440 struct dnode_of_data dn; 1441 struct page *ipage; 1442 bool locked = false; 1443 struct extent_info ei; 1444 int err = 0; 1445 1446 /* 1447 * we already allocated all the blocks, so we don't need to get 1448 * the block addresses when there is no need to fill the page. 1449 */ 1450 if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) && 1451 len == PAGE_CACHE_SIZE) 1452 return 0; 1453 1454 if (f2fs_has_inline_data(inode) || 1455 (pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { 1456 f2fs_lock_op(sbi); 1457 locked = true; 1458 } 1459 restart: 1460 /* check inline_data */ 1461 ipage = get_node_page(sbi, inode->i_ino); 1462 if (IS_ERR(ipage)) { 1463 err = PTR_ERR(ipage); 1464 goto unlock_out; 1465 } 1466 1467 set_new_dnode(&dn, inode, ipage, ipage, 0); 1468 1469 if (f2fs_has_inline_data(inode)) { 1470 if (pos + len <= MAX_INLINE_DATA) { 1471 read_inline_data(page, ipage); 1472 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); 1473 set_inline_node(ipage); 1474 } else { 1475 err = f2fs_convert_inline_page(&dn, page); 1476 if (err) 1477 goto out; 1478 if (dn.data_blkaddr == NULL_ADDR) 1479 err = f2fs_get_block(&dn, index); 1480 } 1481 } else if (locked) { 1482 err = f2fs_get_block(&dn, index); 1483 } else { 1484 if (f2fs_lookup_extent_cache(inode, index, &ei)) { 1485 dn.data_blkaddr = ei.blk + index - ei.fofs; 1486 } else { 1487 /* hole case */ 1488 err = get_dnode_of_data(&dn, index, LOOKUP_NODE); 1489 if (err || (!err && dn.data_blkaddr == NULL_ADDR)) { 1490 f2fs_put_dnode(&dn); 1491 f2fs_lock_op(sbi); 1492 locked = true; 1493 goto restart; 1494 } 1495 } 1496 } 1497 1498 /* convert_inline_page can make node_changed */ 1499 *blk_addr = dn.data_blkaddr; 1500 *node_changed = dn.node_changed; 1501 out: 1502 f2fs_put_dnode(&dn); 1503 unlock_out: 1504 if (locked) 1505 f2fs_unlock_op(sbi); 1506 return err; 1507 } 1508 1509 static int f2fs_write_begin(struct file *file, struct address_space *mapping, 1510 loff_t pos, unsigned len, unsigned flags, 1511 struct page **pagep, void **fsdata) 1512 { 1513 struct inode *inode = mapping->host; 1514 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1515 struct page *page = NULL; 1516 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; 1517 bool need_balance = false; 1518 block_t blkaddr = NULL_ADDR; 1519 int err = 0; 1520 1521 trace_f2fs_write_begin(inode, pos, len, flags); 1522 1523 /* 1524 * We should check this at this moment to avoid deadlock on inode page 1525 * and #0 page. The locking rule for inline_data conversion should be: 1526 * lock_page(page #0) -> lock_page(inode_page) 1527 */ 1528 if (index != 0) { 1529 err = f2fs_convert_inline_inode(inode); 1530 if (err) 1531 goto fail; 1532 } 1533 repeat: 1534 page = grab_cache_page_write_begin(mapping, index, flags); 1535 if (!page) { 1536 err = -ENOMEM; 1537 goto fail; 1538 } 1539 1540 *pagep = page; 1541 1542 err = prepare_write_begin(sbi, page, pos, len, 1543 &blkaddr, &need_balance); 1544 if (err) 1545 goto fail; 1546 1547 if (need_balance && has_not_enough_free_secs(sbi, 0)) { 1548 unlock_page(page); 1549 f2fs_balance_fs(sbi, true); 1550 lock_page(page); 1551 if (page->mapping != mapping) { 1552 /* The page got truncated from under us */ 1553 f2fs_put_page(page, 1); 1554 goto repeat; 1555 } 1556 } 1557 1558 f2fs_wait_on_page_writeback(page, DATA, false); 1559 1560 /* wait for GCed encrypted page writeback */ 1561 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 1562 f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr); 1563 1564 if (len == PAGE_CACHE_SIZE) 1565 goto out_update; 1566 if (PageUptodate(page)) 1567 goto out_clear; 1568 1569 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { 1570 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 1571 unsigned end = start + len; 1572 1573 /* Reading beyond i_size is simple: memset to zero */ 1574 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); 1575 goto out_update; 1576 } 1577 1578 if (blkaddr == NEW_ADDR) { 1579 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 1580 } else { 1581 struct f2fs_io_info fio = { 1582 .sbi = sbi, 1583 .type = DATA, 1584 .rw = READ_SYNC, 1585 .old_blkaddr = blkaddr, 1586 .new_blkaddr = blkaddr, 1587 .page = page, 1588 .encrypted_page = NULL, 1589 }; 1590 err = f2fs_submit_page_bio(&fio); 1591 if (err) 1592 goto fail; 1593 1594 lock_page(page); 1595 if (unlikely(!PageUptodate(page))) { 1596 err = -EIO; 1597 goto fail; 1598 } 1599 if (unlikely(page->mapping != mapping)) { 1600 f2fs_put_page(page, 1); 1601 goto repeat; 1602 } 1603 1604 /* avoid symlink page */ 1605 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { 1606 err = fscrypt_decrypt_page(page); 1607 if (err) 1608 goto fail; 1609 } 1610 } 1611 out_update: 1612 SetPageUptodate(page); 1613 out_clear: 1614 clear_cold_data(page); 1615 return 0; 1616 1617 fail: 1618 f2fs_put_page(page, 1); 1619 f2fs_write_failed(mapping, pos + len); 1620 return err; 1621 } 1622 1623 static int f2fs_write_end(struct file *file, 1624 struct address_space *mapping, 1625 loff_t pos, unsigned len, unsigned copied, 1626 struct page *page, void *fsdata) 1627 { 1628 struct inode *inode = page->mapping->host; 1629 1630 trace_f2fs_write_end(inode, pos, len, copied); 1631 1632 set_page_dirty(page); 1633 1634 if (pos + copied > i_size_read(inode)) { 1635 i_size_write(inode, pos + copied); 1636 mark_inode_dirty(inode); 1637 } 1638 1639 f2fs_put_page(page, 1); 1640 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1641 return copied; 1642 } 1643 1644 static int check_direct_IO(struct inode *inode, struct iov_iter *iter, 1645 loff_t offset) 1646 { 1647 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1; 1648 1649 if (offset & blocksize_mask) 1650 return -EINVAL; 1651 1652 if (iov_iter_alignment(iter) & blocksize_mask) 1653 return -EINVAL; 1654 1655 return 0; 1656 } 1657 1658 static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, 1659 loff_t offset) 1660 { 1661 struct address_space *mapping = iocb->ki_filp->f_mapping; 1662 struct inode *inode = mapping->host; 1663 size_t count = iov_iter_count(iter); 1664 int err; 1665 1666 err = check_direct_IO(inode, iter, offset); 1667 if (err) 1668 return err; 1669 1670 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 1671 return 0; 1672 1673 trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); 1674 1675 err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio); 1676 if (err < 0 && iov_iter_rw(iter) == WRITE) 1677 f2fs_write_failed(mapping, offset + count); 1678 1679 trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err); 1680 1681 return err; 1682 } 1683 1684 void f2fs_invalidate_page(struct page *page, unsigned int offset, 1685 unsigned int length) 1686 { 1687 struct inode *inode = page->mapping->host; 1688 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1689 1690 if (inode->i_ino >= F2FS_ROOT_INO(sbi) && 1691 (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE)) 1692 return; 1693 1694 if (PageDirty(page)) { 1695 if (inode->i_ino == F2FS_META_INO(sbi)) 1696 dec_page_count(sbi, F2FS_DIRTY_META); 1697 else if (inode->i_ino == F2FS_NODE_INO(sbi)) 1698 dec_page_count(sbi, F2FS_DIRTY_NODES); 1699 else 1700 inode_dec_dirty_pages(inode); 1701 } 1702 1703 /* This is atomic written page, keep Private */ 1704 if (IS_ATOMIC_WRITTEN_PAGE(page)) 1705 return; 1706 1707 ClearPagePrivate(page); 1708 } 1709 1710 int f2fs_release_page(struct page *page, gfp_t wait) 1711 { 1712 /* If this is dirty page, keep PagePrivate */ 1713 if (PageDirty(page)) 1714 return 0; 1715 1716 /* This is atomic written page, keep Private */ 1717 if (IS_ATOMIC_WRITTEN_PAGE(page)) 1718 return 0; 1719 1720 ClearPagePrivate(page); 1721 return 1; 1722 } 1723 1724 static int f2fs_set_data_page_dirty(struct page *page) 1725 { 1726 struct address_space *mapping = page->mapping; 1727 struct inode *inode = mapping->host; 1728 1729 trace_f2fs_set_page_dirty(page, DATA); 1730 1731 SetPageUptodate(page); 1732 1733 if (f2fs_is_atomic_file(inode)) { 1734 if (!IS_ATOMIC_WRITTEN_PAGE(page)) { 1735 register_inmem_page(inode, page); 1736 return 1; 1737 } 1738 /* 1739 * Previously, this page has been registered, we just 1740 * return here. 1741 */ 1742 return 0; 1743 } 1744 1745 if (!PageDirty(page)) { 1746 __set_page_dirty_nobuffers(page); 1747 update_dirty_page(inode, page); 1748 return 1; 1749 } 1750 return 0; 1751 } 1752 1753 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) 1754 { 1755 struct inode *inode = mapping->host; 1756 1757 if (f2fs_has_inline_data(inode)) 1758 return 0; 1759 1760 /* make sure allocating whole blocks */ 1761 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 1762 filemap_write_and_wait(mapping); 1763 1764 return generic_block_bmap(mapping, block, get_data_block_bmap); 1765 } 1766 1767 const struct address_space_operations f2fs_dblock_aops = { 1768 .readpage = f2fs_read_data_page, 1769 .readpages = f2fs_read_data_pages, 1770 .writepage = f2fs_write_data_page, 1771 .writepages = f2fs_write_data_pages, 1772 .write_begin = f2fs_write_begin, 1773 .write_end = f2fs_write_end, 1774 .set_page_dirty = f2fs_set_data_page_dirty, 1775 .invalidatepage = f2fs_invalidate_page, 1776 .releasepage = f2fs_release_page, 1777 .direct_IO = f2fs_direct_IO, 1778 .bmap = f2fs_bmap, 1779 }; 1780