data.c (8df22a4d6f5b81c9c1703579d4907b57002689ed) data.c (88b88a66797159949cec32eaab12b4968f6fae2d)
1/*
2 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as

--- 71 unchanged lines hidden (view full) ---

80 int npages, bool is_read)
81{
82 struct bio *bio;
83
84 /* No failure on bio allocation */
85 bio = bio_alloc(GFP_NOIO, npages);
86
87 bio->bi_bdev = sbi->sb->s_bdev;
1/*
2 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as

--- 71 unchanged lines hidden (view full) ---

80 int npages, bool is_read)
81{
82 struct bio *bio;
83
84 /* No failure on bio allocation */
85 bio = bio_alloc(GFP_NOIO, npages);
86
87 bio->bi_bdev = sbi->sb->s_bdev;
88 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
88 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
89 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
90 bio->bi_private = sbi;
91
92 return bio;
93}
94
95static void __submit_merged_bio(struct f2fs_bio_info *io)
96{

--- 91 unchanged lines hidden (view full) ---

188 if (!is_read)
189 inc_page_count(sbi, F2FS_WRITEBACK);
190
191 if (io->bio && (io->last_block_in_bio != blk_addr - 1 ||
192 io->fio.rw != fio->rw))
193 __submit_merged_bio(io);
194alloc_new:
195 if (io->bio == NULL) {
89 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
90 bio->bi_private = sbi;
91
92 return bio;
93}
94
95static void __submit_merged_bio(struct f2fs_bio_info *io)
96{

--- 91 unchanged lines hidden (view full) ---

188 if (!is_read)
189 inc_page_count(sbi, F2FS_WRITEBACK);
190
191 if (io->bio && (io->last_block_in_bio != blk_addr - 1 ||
192 io->fio.rw != fio->rw))
193 __submit_merged_bio(io);
194alloc_new:
195 if (io->bio == NULL) {
196 int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
196 int bio_blocks = MAX_BIO_BLOCKS(sbi);
197
198 io->bio = __bio_alloc(sbi, blk_addr, bio_blocks, is_read);
199 io->fio = *fio;
200 }
201
202 if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
203 PAGE_CACHE_SIZE) {
204 __submit_merged_bio(io);

--- 26 unchanged lines hidden (view full) ---

231 /* Get physical address of data block */
232 addr_array = blkaddr_in_node(rn);
233 addr_array[ofs_in_node] = cpu_to_le32(new_addr);
234 set_page_dirty(node_page);
235}
236
237int reserve_new_block(struct dnode_of_data *dn)
238{
197
198 io->bio = __bio_alloc(sbi, blk_addr, bio_blocks, is_read);
199 io->fio = *fio;
200 }
201
202 if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
203 PAGE_CACHE_SIZE) {
204 __submit_merged_bio(io);

--- 26 unchanged lines hidden (view full) ---

231 /* Get physical address of data block */
232 addr_array = blkaddr_in_node(rn);
233 addr_array[ofs_in_node] = cpu_to_le32(new_addr);
234 set_page_dirty(node_page);
235}
236
237int reserve_new_block(struct dnode_of_data *dn)
238{
239 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
239 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
240
241 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
242 return -EPERM;
243 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
244 return -ENOSPC;
245
246 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
247

--- 5 unchanged lines hidden (view full) ---

253}
254
255int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
256{
257 bool need_put = dn->inode_page ? false : true;
258 int err;
259
260 /* if inode_page exists, index should be zero */
240
241 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
242 return -EPERM;
243 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
244 return -ENOSPC;
245
246 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
247

--- 5 unchanged lines hidden (view full) ---

253}
254
255int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
256{
257 bool need_put = dn->inode_page ? false : true;
258 int err;
259
260 /* if inode_page exists, index should be zero */
261 f2fs_bug_on(!need_put && index);
261 f2fs_bug_on(F2FS_I_SB(dn->inode), !need_put && index);
262
263 err = get_dnode_of_data(dn, index, ALLOC_NODE);
264 if (err)
265 return err;
266
267 if (dn->data_blkaddr == NULL_ADDR)
268 err = reserve_new_block(dn);
269 if (err || need_put)

--- 46 unchanged lines hidden (view full) ---

316
317void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
318{
319 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
320 pgoff_t fofs, start_fofs, end_fofs;
321 block_t start_blkaddr, end_blkaddr;
322 int need_update = true;
323
262
263 err = get_dnode_of_data(dn, index, ALLOC_NODE);
264 if (err)
265 return err;
266
267 if (dn->data_blkaddr == NULL_ADDR)
268 err = reserve_new_block(dn);
269 if (err || need_put)

--- 46 unchanged lines hidden (view full) ---

316
317void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
318{
319 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
320 pgoff_t fofs, start_fofs, end_fofs;
321 block_t start_blkaddr, end_blkaddr;
322 int need_update = true;
323
324 f2fs_bug_on(blk_addr == NEW_ADDR);
324 f2fs_bug_on(F2FS_I_SB(dn->inode), blk_addr == NEW_ADDR);
325 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
326 dn->ofs_in_node;
327
328 /* Update the page address in the parent node */
329 __set_data_blkaddr(dn, blk_addr);
330
331 if (is_inode_flag_set(fi, FI_NO_EXTENT))
332 return;

--- 58 unchanged lines hidden (view full) ---

391 write_unlock(&fi->ext.ext_lock);
392 if (need_update)
393 sync_inode_page(dn);
394 return;
395}
396
397struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
398{
325 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
326 dn->ofs_in_node;
327
328 /* Update the page address in the parent node */
329 __set_data_blkaddr(dn, blk_addr);
330
331 if (is_inode_flag_set(fi, FI_NO_EXTENT))
332 return;

--- 58 unchanged lines hidden (view full) ---

391 write_unlock(&fi->ext.ext_lock);
392 if (need_update)
393 sync_inode_page(dn);
394 return;
395}
396
397struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
398{
399 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
400 struct address_space *mapping = inode->i_mapping;
401 struct dnode_of_data dn;
402 struct page *page;
403 int err;
404
405 page = find_get_page(mapping, index);
406 if (page && PageUptodate(page))
407 return page;

--- 16 unchanged lines hidden (view full) ---

424 if (!page)
425 return ERR_PTR(-ENOMEM);
426
427 if (PageUptodate(page)) {
428 unlock_page(page);
429 return page;
430 }
431
399 struct address_space *mapping = inode->i_mapping;
400 struct dnode_of_data dn;
401 struct page *page;
402 int err;
403
404 page = find_get_page(mapping, index);
405 if (page && PageUptodate(page))
406 return page;

--- 16 unchanged lines hidden (view full) ---

423 if (!page)
424 return ERR_PTR(-ENOMEM);
425
426 if (PageUptodate(page)) {
427 unlock_page(page);
428 return page;
429 }
430
432 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
431 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, dn.data_blkaddr,
433 sync ? READ_SYNC : READA);
434 if (err)
435 return ERR_PTR(err);
436
437 if (sync) {
438 wait_on_page_locked(page);
439 if (unlikely(!PageUptodate(page))) {
440 f2fs_put_page(page, 0);

--- 5 unchanged lines hidden (view full) ---

446
447/*
448 * If it tries to access a hole, return an error.
449 * Because, the callers, functions in dir.c and GC, should be able to know
450 * whether this page exists or not.
451 */
452struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
453{
432 sync ? READ_SYNC : READA);
433 if (err)
434 return ERR_PTR(err);
435
436 if (sync) {
437 wait_on_page_locked(page);
438 if (unlikely(!PageUptodate(page))) {
439 f2fs_put_page(page, 0);

--- 5 unchanged lines hidden (view full) ---

445
446/*
447 * If it tries to access a hole, return an error.
448 * Because, the callers, functions in dir.c and GC, should be able to know
449 * whether this page exists or not.
450 */
451struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
452{
454 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
455 struct address_space *mapping = inode->i_mapping;
456 struct dnode_of_data dn;
457 struct page *page;
458 int err;
459
460repeat:
461 page = grab_cache_page(mapping, index);
462 if (!page)

--- 22 unchanged lines hidden (view full) ---

485 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
486 */
487 if (dn.data_blkaddr == NEW_ADDR) {
488 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
489 SetPageUptodate(page);
490 return page;
491 }
492
453 struct address_space *mapping = inode->i_mapping;
454 struct dnode_of_data dn;
455 struct page *page;
456 int err;
457
458repeat:
459 page = grab_cache_page(mapping, index);
460 if (!page)

--- 22 unchanged lines hidden (view full) ---

483 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
484 */
485 if (dn.data_blkaddr == NEW_ADDR) {
486 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
487 SetPageUptodate(page);
488 return page;
489 }
490
493 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, READ_SYNC);
491 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page,
492 dn.data_blkaddr, READ_SYNC);
494 if (err)
495 return ERR_PTR(err);
496
497 lock_page(page);
498 if (unlikely(!PageUptodate(page))) {
499 f2fs_put_page(page, 1);
500 return ERR_PTR(-EIO);
501 }

--- 10 unchanged lines hidden (view full) ---

512 *
513 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
514 * f2fs_unlock_op().
515 * Note that, ipage is set only by make_empty_dir.
516 */
517struct page *get_new_data_page(struct inode *inode,
518 struct page *ipage, pgoff_t index, bool new_i_size)
519{
493 if (err)
494 return ERR_PTR(err);
495
496 lock_page(page);
497 if (unlikely(!PageUptodate(page))) {
498 f2fs_put_page(page, 1);
499 return ERR_PTR(-EIO);
500 }

--- 10 unchanged lines hidden (view full) ---

511 *
512 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
513 * f2fs_unlock_op().
514 * Note that, ipage is set only by make_empty_dir.
515 */
516struct page *get_new_data_page(struct inode *inode,
517 struct page *ipage, pgoff_t index, bool new_i_size)
518{
520 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
521 struct address_space *mapping = inode->i_mapping;
522 struct page *page;
523 struct dnode_of_data dn;
524 int err;
525
526 set_new_dnode(&dn, inode, ipage, NULL, 0);
527 err = f2fs_reserve_block(&dn, index);
528 if (err)

--- 7 unchanged lines hidden (view full) ---

536
537 if (PageUptodate(page))
538 return page;
539
540 if (dn.data_blkaddr == NEW_ADDR) {
541 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
542 SetPageUptodate(page);
543 } else {
519 struct address_space *mapping = inode->i_mapping;
520 struct page *page;
521 struct dnode_of_data dn;
522 int err;
523
524 set_new_dnode(&dn, inode, ipage, NULL, 0);
525 err = f2fs_reserve_block(&dn, index);
526 if (err)

--- 7 unchanged lines hidden (view full) ---

534
535 if (PageUptodate(page))
536 return page;
537
538 if (dn.data_blkaddr == NEW_ADDR) {
539 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
540 SetPageUptodate(page);
541 } else {
544 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
545 READ_SYNC);
542 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page,
543 dn.data_blkaddr, READ_SYNC);
546 if (err)
547 goto put_err;
548
549 lock_page(page);
550 if (unlikely(!PageUptodate(page))) {
551 f2fs_put_page(page, 1);
552 err = -EIO;
553 goto put_err;

--- 14 unchanged lines hidden (view full) ---

568
569put_err:
570 f2fs_put_dnode(&dn);
571 return ERR_PTR(err);
572}
573
574static int __allocate_data_block(struct dnode_of_data *dn)
575{
544 if (err)
545 goto put_err;
546
547 lock_page(page);
548 if (unlikely(!PageUptodate(page))) {
549 f2fs_put_page(page, 1);
550 err = -EIO;
551 goto put_err;

--- 14 unchanged lines hidden (view full) ---

566
567put_err:
568 f2fs_put_dnode(&dn);
569 return ERR_PTR(err);
570}
571
572static int __allocate_data_block(struct dnode_of_data *dn)
573{
576 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
574 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
575 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
577 struct f2fs_summary sum;
578 block_t new_blkaddr;
579 struct node_info ni;
576 struct f2fs_summary sum;
577 block_t new_blkaddr;
578 struct node_info ni;
579 pgoff_t fofs;
580 int type;
581
582 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
583 return -EPERM;
584 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
585 return -ENOSPC;
586
587 __set_data_blkaddr(dn, NEW_ADDR);

--- 6 unchanged lines hidden (view full) ---

594
595 allocate_data_block(sbi, NULL, NULL_ADDR, &new_blkaddr, &sum, type);
596
597 /* direct IO doesn't use extent cache to maximize the performance */
598 set_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
599 update_extent_cache(new_blkaddr, dn);
600 clear_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
601
580 int type;
581
582 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
583 return -EPERM;
584 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
585 return -ENOSPC;
586
587 __set_data_blkaddr(dn, NEW_ADDR);

--- 6 unchanged lines hidden (view full) ---

594
595 allocate_data_block(sbi, NULL, NULL_ADDR, &new_blkaddr, &sum, type);
596
597 /* direct IO doesn't use extent cache to maximize the performance */
598 set_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
599 update_extent_cache(new_blkaddr, dn);
600 clear_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
601
602 /* update i_size */
603 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
604 dn->ofs_in_node;
605 if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
606 i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
607
602 dn->data_blkaddr = new_blkaddr;
603 return 0;
604}
605
606/*
607 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
608 * If original data blocks are allocated, then give them to blockdev.
609 * Otherwise,
610 * a. preallocate requested block addresses
611 * b. do not use extent cache for better performance
612 * c. give the block addresses to blockdev
613 */
614static int __get_data_block(struct inode *inode, sector_t iblock,
615 struct buffer_head *bh_result, int create, bool fiemap)
616{
608 dn->data_blkaddr = new_blkaddr;
609 return 0;
610}
611
612/*
613 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
614 * If original data blocks are allocated, then give them to blockdev.
615 * Otherwise,
616 * a. preallocate requested block addresses
617 * b. do not use extent cache for better performance
618 * c. give the block addresses to blockdev
619 */
620static int __get_data_block(struct inode *inode, sector_t iblock,
621 struct buffer_head *bh_result, int create, bool fiemap)
622{
617 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
618 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
619 unsigned maxblocks = bh_result->b_size >> blkbits;
620 struct dnode_of_data dn;
621 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
622 pgoff_t pgofs, end_offset;
623 int err = 0, ofs = 1;
624 bool allocated = false;
625
626 /* Get the page offset from the block offset(iblock) */
627 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
628
629 if (check_extent_cache(inode, pgofs, bh_result))
630 goto out;
631
632 if (create) {
623 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
624 unsigned maxblocks = bh_result->b_size >> blkbits;
625 struct dnode_of_data dn;
626 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
627 pgoff_t pgofs, end_offset;
628 int err = 0, ofs = 1;
629 bool allocated = false;
630
631 /* Get the page offset from the block offset(iblock) */
632 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
633
634 if (check_extent_cache(inode, pgofs, bh_result))
635 goto out;
636
637 if (create) {
633 f2fs_balance_fs(sbi);
634 f2fs_lock_op(sbi);
638 f2fs_balance_fs(F2FS_I_SB(inode));
639 f2fs_lock_op(F2FS_I_SB(inode));
635 }
636
637 /* When reading holes, we need its node page */
638 set_new_dnode(&dn, inode, NULL, NULL, 0);
639 err = get_dnode_of_data(&dn, pgofs, mode);
640 if (err) {
641 if (err == -ENOENT)
642 err = 0;

--- 59 unchanged lines hidden (view full) ---

702 }
703sync_out:
704 if (allocated)
705 sync_inode_page(&dn);
706put_out:
707 f2fs_put_dnode(&dn);
708unlock_out:
709 if (create)
640 }
641
642 /* When reading holes, we need its node page */
643 set_new_dnode(&dn, inode, NULL, NULL, 0);
644 err = get_dnode_of_data(&dn, pgofs, mode);
645 if (err) {
646 if (err == -ENOENT)
647 err = 0;

--- 59 unchanged lines hidden (view full) ---

707 }
708sync_out:
709 if (allocated)
710 sync_inode_page(&dn);
711put_out:
712 f2fs_put_dnode(&dn);
713unlock_out:
714 if (create)
710 f2fs_unlock_op(sbi);
715 f2fs_unlock_op(F2FS_I_SB(inode));
711out:
712 trace_f2fs_get_data_block(inode, iblock, bh_result, err);
713 return err;
714}
715
716static int get_data_block(struct inode *inode, sector_t iblock,
717 struct buffer_head *bh_result, int create)
718{

--- 80 unchanged lines hidden (view full) ---

799 f2fs_put_dnode(&dn);
800 return err;
801}
802
803static int f2fs_write_data_page(struct page *page,
804 struct writeback_control *wbc)
805{
806 struct inode *inode = page->mapping->host;
716out:
717 trace_f2fs_get_data_block(inode, iblock, bh_result, err);
718 return err;
719}
720
721static int get_data_block(struct inode *inode, sector_t iblock,
722 struct buffer_head *bh_result, int create)
723{

--- 80 unchanged lines hidden (view full) ---

804 f2fs_put_dnode(&dn);
805 return err;
806}
807
808static int f2fs_write_data_page(struct page *page,
809 struct writeback_control *wbc)
810{
811 struct inode *inode = page->mapping->host;
807 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
812 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
808 loff_t i_size = i_size_read(inode);
809 const pgoff_t end_index = ((unsigned long long) i_size)
810 >> PAGE_CACHE_SHIFT;
811 unsigned offset = 0;
812 bool need_balance_fs = false;
813 int err = 0;
814 struct f2fs_io_info fio = {
815 .type = DATA,

--- 25 unchanged lines hidden (view full) ---

841 err = do_write_data_page(page, &fio);
842 goto done;
843 }
844
845 /* we should bypass data pages to proceed the kworkder jobs */
846 if (unlikely(f2fs_cp_error(sbi))) {
847 SetPageError(page);
848 unlock_page(page);
813 loff_t i_size = i_size_read(inode);
814 const pgoff_t end_index = ((unsigned long long) i_size)
815 >> PAGE_CACHE_SHIFT;
816 unsigned offset = 0;
817 bool need_balance_fs = false;
818 int err = 0;
819 struct f2fs_io_info fio = {
820 .type = DATA,

--- 25 unchanged lines hidden (view full) ---

846 err = do_write_data_page(page, &fio);
847 goto done;
848 }
849
850 /* we should bypass data pages to proceed the kworkder jobs */
851 if (unlikely(f2fs_cp_error(sbi))) {
852 SetPageError(page);
853 unlock_page(page);
849 return 0;
854 goto out;
850 }
851
852 if (!wbc->for_reclaim)
853 need_balance_fs = true;
854 else if (has_not_enough_free_secs(sbi, 0))
855 goto redirty_out;
856
857 f2fs_lock_op(sbi);
858 if (f2fs_has_inline_data(inode) || f2fs_may_inline(inode))
859 err = f2fs_write_inline_data(inode, page, offset);
860 else
861 err = do_write_data_page(page, &fio);
862 f2fs_unlock_op(sbi);
863done:
864 if (err && err != -ENOENT)
865 goto redirty_out;
866
867 clear_cold_data(page);
868out:
855 }
856
857 if (!wbc->for_reclaim)
858 need_balance_fs = true;
859 else if (has_not_enough_free_secs(sbi, 0))
860 goto redirty_out;
861
862 f2fs_lock_op(sbi);
863 if (f2fs_has_inline_data(inode) || f2fs_may_inline(inode))
864 err = f2fs_write_inline_data(inode, page, offset);
865 else
866 err = do_write_data_page(page, &fio);
867 f2fs_unlock_op(sbi);
868done:
869 if (err && err != -ENOENT)
870 goto redirty_out;
871
872 clear_cold_data(page);
873out:
869 inode_dec_dirty_dents(inode);
874 inode_dec_dirty_pages(inode);
870 unlock_page(page);
871 if (need_balance_fs)
872 f2fs_balance_fs(sbi);
873 if (wbc->for_reclaim)
874 f2fs_submit_merged_bio(sbi, DATA, WRITE);
875 return 0;
876
877redirty_out:

--- 9 unchanged lines hidden (view full) ---

887 mapping_set_error(mapping, ret);
888 return ret;
889}
890
891static int f2fs_write_data_pages(struct address_space *mapping,
892 struct writeback_control *wbc)
893{
894 struct inode *inode = mapping->host;
875 unlock_page(page);
876 if (need_balance_fs)
877 f2fs_balance_fs(sbi);
878 if (wbc->for_reclaim)
879 f2fs_submit_merged_bio(sbi, DATA, WRITE);
880 return 0;
881
882redirty_out:

--- 9 unchanged lines hidden (view full) ---

892 mapping_set_error(mapping, ret);
893 return ret;
894}
895
896static int f2fs_write_data_pages(struct address_space *mapping,
897 struct writeback_control *wbc)
898{
899 struct inode *inode = mapping->host;
895 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
900 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
896 bool locked = false;
897 int ret;
898 long diff;
899
900 trace_f2fs_writepages(mapping->host, wbc, DATA);
901
902 /* deal with chardevs and other special file */
903 if (!mapping->a_ops->writepage)
904 return 0;
905
906 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
901 bool locked = false;
902 int ret;
903 long diff;
904
905 trace_f2fs_writepages(mapping->host, wbc, DATA);
906
907 /* deal with chardevs and other special file */
908 if (!mapping->a_ops->writepage)
909 return 0;
910
911 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
907 get_dirty_dents(inode) < nr_pages_to_skip(sbi, DATA) &&
912 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
908 available_free_memory(sbi, DIRTY_DENTS))
909 goto skip_write;
910
911 diff = nr_pages_to_write(sbi, DATA, wbc);
912
913 if (!S_ISDIR(inode->i_mode)) {
914 mutex_lock(&sbi->writepages);
915 locked = true;

--- 5 unchanged lines hidden (view full) ---

921 f2fs_submit_merged_bio(sbi, DATA, WRITE);
922
923 remove_dirty_dir_inode(inode);
924
925 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
926 return ret;
927
928skip_write:
913 available_free_memory(sbi, DIRTY_DENTS))
914 goto skip_write;
915
916 diff = nr_pages_to_write(sbi, DATA, wbc);
917
918 if (!S_ISDIR(inode->i_mode)) {
919 mutex_lock(&sbi->writepages);
920 locked = true;

--- 5 unchanged lines hidden (view full) ---

926 f2fs_submit_merged_bio(sbi, DATA, WRITE);
927
928 remove_dirty_dir_inode(inode);
929
930 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
931 return ret;
932
933skip_write:
929 wbc->pages_skipped += get_dirty_dents(inode);
934 wbc->pages_skipped += get_dirty_pages(inode);
930 return 0;
931}
932
933static void f2fs_write_failed(struct address_space *mapping, loff_t to)
934{
935 struct inode *inode = mapping->host;
936
937 if (to > inode->i_size) {
938 truncate_pagecache(inode, inode->i_size);
939 truncate_blocks(inode, inode->i_size, true);
940 }
941}
942
943static int f2fs_write_begin(struct file *file, struct address_space *mapping,
944 loff_t pos, unsigned len, unsigned flags,
945 struct page **pagep, void **fsdata)
946{
947 struct inode *inode = mapping->host;
935 return 0;
936}
937
938static void f2fs_write_failed(struct address_space *mapping, loff_t to)
939{
940 struct inode *inode = mapping->host;
941
942 if (to > inode->i_size) {
943 truncate_pagecache(inode, inode->i_size);
944 truncate_blocks(inode, inode->i_size, true);
945 }
946}
947
948static int f2fs_write_begin(struct file *file, struct address_space *mapping,
949 loff_t pos, unsigned len, unsigned flags,
950 struct page **pagep, void **fsdata)
951{
952 struct inode *inode = mapping->host;
948 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
953 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
949 struct page *page;
950 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
951 struct dnode_of_data dn;
952 int err = 0;
953
954 trace_f2fs_write_begin(inode, pos, len, flags);
955
956 f2fs_balance_fs(sbi);

--- 85 unchanged lines hidden (view full) ---

1042 struct address_space *mapping,
1043 loff_t pos, unsigned len, unsigned copied,
1044 struct page *page, void *fsdata)
1045{
1046 struct inode *inode = page->mapping->host;
1047
1048 trace_f2fs_write_end(inode, pos, len, copied);
1049
954 struct page *page;
955 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
956 struct dnode_of_data dn;
957 int err = 0;
958
959 trace_f2fs_write_begin(inode, pos, len, flags);
960
961 f2fs_balance_fs(sbi);

--- 85 unchanged lines hidden (view full) ---

1047 struct address_space *mapping,
1048 loff_t pos, unsigned len, unsigned copied,
1049 struct page *page, void *fsdata)
1050{
1051 struct inode *inode = page->mapping->host;
1052
1053 trace_f2fs_write_end(inode, pos, len, copied);
1054
1050 set_page_dirty(page);
1055 if (f2fs_is_atomic_file(inode))
1056 register_inmem_page(inode, page);
1057 else
1058 set_page_dirty(page);
1051
1052 if (pos + copied > i_size_read(inode)) {
1053 i_size_write(inode, pos + copied);
1054 mark_inode_dirty(inode);
1055 update_inode_page(inode);
1056 }
1057
1058 f2fs_put_page(page, 1);

--- 28 unchanged lines hidden (view full) ---

1087
1088 /* Let buffer I/O handle the inline data case. */
1089 if (f2fs_has_inline_data(inode))
1090 return 0;
1091
1092 if (check_direct_IO(inode, rw, iter, offset))
1093 return 0;
1094
1059
1060 if (pos + copied > i_size_read(inode)) {
1061 i_size_write(inode, pos + copied);
1062 mark_inode_dirty(inode);
1063 update_inode_page(inode);
1064 }
1065
1066 f2fs_put_page(page, 1);

--- 28 unchanged lines hidden (view full) ---

1095
1096 /* Let buffer I/O handle the inline data case. */
1097 if (f2fs_has_inline_data(inode))
1098 return 0;
1099
1100 if (check_direct_IO(inode, rw, iter, offset))
1101 return 0;
1102
1095 /* clear fsync mark to recover these blocks */
1096 fsync_mark_clear(F2FS_SB(inode->i_sb), inode->i_ino);
1097
1098 trace_f2fs_direct_IO_enter(inode, offset, count, rw);
1099
1100 err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block);
1101 if (err < 0 && (rw & WRITE))
1102 f2fs_write_failed(mapping, offset + count);
1103
1104 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
1105
1106 return err;
1107}
1108
1109static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
1110 unsigned int length)
1111{
1112 struct inode *inode = page->mapping->host;
1103 trace_f2fs_direct_IO_enter(inode, offset, count, rw);
1104
1105 err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block);
1106 if (err < 0 && (rw & WRITE))
1107 f2fs_write_failed(mapping, offset + count);
1108
1109 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
1110
1111 return err;
1112}
1113
1114static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
1115 unsigned int length)
1116{
1117 struct inode *inode = page->mapping->host;
1118
1119 if (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE)
1120 return;
1121
1113 if (PageDirty(page))
1122 if (PageDirty(page))
1114 inode_dec_dirty_dents(inode);
1123 inode_dec_dirty_pages(inode);
1115 ClearPagePrivate(page);
1116}
1117
1118static int f2fs_release_data_page(struct page *page, gfp_t wait)
1119{
1120 ClearPagePrivate(page);
1121 return 1;
1122}

--- 5 unchanged lines hidden (view full) ---

1128
1129 trace_f2fs_set_page_dirty(page, DATA);
1130
1131 SetPageUptodate(page);
1132 mark_inode_dirty(inode);
1133
1134 if (!PageDirty(page)) {
1135 __set_page_dirty_nobuffers(page);
1124 ClearPagePrivate(page);
1125}
1126
1127static int f2fs_release_data_page(struct page *page, gfp_t wait)
1128{
1129 ClearPagePrivate(page);
1130 return 1;
1131}

--- 5 unchanged lines hidden (view full) ---

1137
1138 trace_f2fs_set_page_dirty(page, DATA);
1139
1140 SetPageUptodate(page);
1141 mark_inode_dirty(inode);
1142
1143 if (!PageDirty(page)) {
1144 __set_page_dirty_nobuffers(page);
1136 set_dirty_dir_page(inode, page);
1145 update_dirty_page(inode, page);
1137 return 1;
1138 }
1139 return 0;
1140}
1141
1142static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
1143{
1144 struct inode *inode = mapping->host;

--- 20 unchanged lines hidden ---
1146 return 1;
1147 }
1148 return 0;
1149}
1150
1151static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
1152{
1153 struct inode *inode = mapping->host;

--- 20 unchanged lines hidden ---