data.c (8632987380765dee716d460640aa58d58d52998e) data.c (a1e09b03e6f5c1d713c88259909137c0fd264ede)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/f2fs/data.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8#include <linux/fs.h>

--- 7 unchanged lines hidden (view full) ---

16#include <linux/bio.h>
17#include <linux/blk-crypto.h>
18#include <linux/swap.h>
19#include <linux/prefetch.h>
20#include <linux/uio.h>
21#include <linux/cleancache.h>
22#include <linux/sched/signal.h>
23#include <linux/fiemap.h>
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/f2fs/data.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8#include <linux/fs.h>

--- 7 unchanged lines hidden (view full) ---

16#include <linux/bio.h>
17#include <linux/blk-crypto.h>
18#include <linux/swap.h>
19#include <linux/prefetch.h>
20#include <linux/uio.h>
21#include <linux/cleancache.h>
22#include <linux/sched/signal.h>
23#include <linux/fiemap.h>
24#include <linux/iomap.h>
24
25#include "f2fs.h"
26#include "node.h"
27#include "segment.h"
28#include "iostat.h"
29#include <trace/events/f2fs.h>
30
31#define NUM_PREALLOC_POST_READ_CTXS 128

--- 1339 unchanged lines hidden (view full) ---

1371 f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
1372 &sum, seg_type, NULL);
1373 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
1374 invalidate_mapping_pages(META_MAPPING(sbi),
1375 old_blkaddr, old_blkaddr);
1376 f2fs_invalidate_compress_page(sbi, old_blkaddr);
1377 }
1378 f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
25
26#include "f2fs.h"
27#include "node.h"
28#include "segment.h"
29#include "iostat.h"
30#include <trace/events/f2fs.h>
31
32#define NUM_PREALLOC_POST_READ_CTXS 128

--- 1339 unchanged lines hidden (view full) ---

1372 f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
1373 &sum, seg_type, NULL);
1374 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
1375 invalidate_mapping_pages(META_MAPPING(sbi),
1376 old_blkaddr, old_blkaddr);
1377 f2fs_invalidate_compress_page(sbi, old_blkaddr);
1378 }
1379 f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
1379
1380 /*
1381 * i_size will be updated by direct_IO. Otherwise, we'll get stale
1382 * data from unwritten block via dio_read.
1383 */
1384 return 0;
1385}
1386
1380 return 0;
1381}
1382
1387int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
1388{
1389 struct inode *inode = file_inode(iocb->ki_filp);
1390 struct f2fs_map_blocks map;
1391 int flag;
1392 int err = 0;
1393 bool direct_io = iocb->ki_flags & IOCB_DIRECT;
1394
1395 map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
1396 map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
1397 if (map.m_len > map.m_lblk)
1398 map.m_len -= map.m_lblk;
1399 else
1400 map.m_len = 0;
1401
1402 map.m_next_pgofs = NULL;
1403 map.m_next_extent = NULL;
1404 map.m_seg_type = NO_CHECK_TYPE;
1405 map.m_may_create = true;
1406
1407 if (direct_io) {
1408 map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
1409 flag = f2fs_force_buffered_io(inode, iocb, from) ?
1410 F2FS_GET_BLOCK_PRE_AIO :
1411 F2FS_GET_BLOCK_PRE_DIO;
1412 goto map_blocks;
1413 }
1414 if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
1415 err = f2fs_convert_inline_inode(inode);
1416 if (err)
1417 return err;
1418 }
1419 if (f2fs_has_inline_data(inode))
1420 return err;
1421
1422 flag = F2FS_GET_BLOCK_PRE_AIO;
1423
1424map_blocks:
1425 err = f2fs_map_blocks(inode, &map, 1, flag);
1426 if (map.m_len > 0 && err == -ENOSPC) {
1427 if (!direct_io)
1428 set_inode_flag(inode, FI_NO_PREALLOC);
1429 err = 0;
1430 }
1431 return err;
1432}
1433
1434void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
1435{
1436 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1437 if (lock)
1438 down_read(&sbi->node_change);
1439 else
1440 up_read(&sbi->node_change);
1441 } else {

--- 143 unchanged lines hidden (view full) ---

1585 prealloc++;
1586 last_ofs_in_node = dn.ofs_in_node;
1587 }
1588 } else {
1589 WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
1590 flag != F2FS_GET_BLOCK_DIO);
1591 err = __allocate_data_block(&dn,
1592 map->m_seg_type);
1383void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
1384{
1385 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1386 if (lock)
1387 down_read(&sbi->node_change);
1388 else
1389 up_read(&sbi->node_change);
1390 } else {

--- 143 unchanged lines hidden (view full) ---

1534 prealloc++;
1535 last_ofs_in_node = dn.ofs_in_node;
1536 }
1537 } else {
1538 WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
1539 flag != F2FS_GET_BLOCK_DIO);
1540 err = __allocate_data_block(&dn,
1541 map->m_seg_type);
1593 if (!err)
1542 if (!err) {
1543 if (flag == F2FS_GET_BLOCK_PRE_DIO)
1544 file_need_truncate(inode);
1594 set_inode_flag(inode, FI_APPEND_WRITE);
1545 set_inode_flag(inode, FI_APPEND_WRITE);
1546 }
1595 }
1596 if (err)
1597 goto sync_out;
1598 map->m_flags |= F2FS_MAP_NEW;
1599 blkaddr = dn.data_blkaddr;
1600 } else {
1601 if (f2fs_compressed_file(inode) &&
1602 f2fs_sanity_check_cluster(&dn) &&

--- 178 unchanged lines hidden (view full) ---

1781 return (bytes >> inode->i_blkbits);
1782}
1783
1784static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
1785{
1786 return (blks << inode->i_blkbits);
1787}
1788
1547 }
1548 if (err)
1549 goto sync_out;
1550 map->m_flags |= F2FS_MAP_NEW;
1551 blkaddr = dn.data_blkaddr;
1552 } else {
1553 if (f2fs_compressed_file(inode) &&
1554 f2fs_sanity_check_cluster(&dn) &&

--- 178 unchanged lines hidden (view full) ---

1733 return (bytes >> inode->i_blkbits);
1734}
1735
1736static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
1737{
1738 return (blks << inode->i_blkbits);
1739}
1740
1789static int __get_data_block(struct inode *inode, sector_t iblock,
1790 struct buffer_head *bh, int create, int flag,
1791 pgoff_t *next_pgofs, int seg_type, bool may_write)
1792{
1793 struct f2fs_map_blocks map;
1794 int err;
1795
1796 map.m_lblk = iblock;
1797 map.m_len = bytes_to_blks(inode, bh->b_size);
1798 map.m_next_pgofs = next_pgofs;
1799 map.m_next_extent = NULL;
1800 map.m_seg_type = seg_type;
1801 map.m_may_create = may_write;
1802
1803 err = f2fs_map_blocks(inode, &map, create, flag);
1804 if (!err) {
1805 map_bh(bh, inode->i_sb, map.m_pblk);
1806 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1807 bh->b_size = blks_to_bytes(inode, map.m_len);
1808
1809 if (map.m_multidev_dio)
1810 bh->b_bdev = map.m_bdev;
1811 }
1812 return err;
1813}
1814
1815static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
1816 struct buffer_head *bh_result, int create)
1817{
1818 return __get_data_block(inode, iblock, bh_result, create,
1819 F2FS_GET_BLOCK_DIO, NULL,
1820 f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1821 true);
1822}
1823
1824static int get_data_block_dio(struct inode *inode, sector_t iblock,
1825 struct buffer_head *bh_result, int create)
1826{
1827 return __get_data_block(inode, iblock, bh_result, create,
1828 F2FS_GET_BLOCK_DIO, NULL,
1829 f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1830 false);
1831}
1832
1833static int f2fs_xattr_fiemap(struct inode *inode,
1834 struct fiemap_extent_info *fieinfo)
1835{
1836 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1837 struct page *page;
1838 struct node_info ni;
1839 __u64 phys = 0, len;
1840 __u32 flags;

--- 1141 unchanged lines hidden (view full) ---

2982 struct compress_ctx cc = {
2983 .inode = inode,
2984 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2985 .cluster_size = F2FS_I(inode)->i_cluster_size,
2986 .cluster_idx = NULL_CLUSTER,
2987 .rpages = NULL,
2988 .nr_rpages = 0,
2989 .cpages = NULL,
1741static int f2fs_xattr_fiemap(struct inode *inode,
1742 struct fiemap_extent_info *fieinfo)
1743{
1744 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1745 struct page *page;
1746 struct node_info ni;
1747 __u64 phys = 0, len;
1748 __u32 flags;

--- 1141 unchanged lines hidden (view full) ---

2890 struct compress_ctx cc = {
2891 .inode = inode,
2892 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2893 .cluster_size = F2FS_I(inode)->i_cluster_size,
2894 .cluster_idx = NULL_CLUSTER,
2895 .rpages = NULL,
2896 .nr_rpages = 0,
2897 .cpages = NULL,
2898 .valid_nr_cpages = 0,
2990 .rbuf = NULL,
2991 .cbuf = NULL,
2992 .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
2993 .private = NULL,
2994 };
2995#endif
2996 int nr_pages;
2997 pgoff_t index;

--- 302 unchanged lines hidden (view full) ---

3300{
3301 struct inode *inode = mapping->host;
3302
3303 return __f2fs_write_data_pages(mapping, wbc,
3304 F2FS_I(inode)->cp_task == current ?
3305 FS_CP_DATA_IO : FS_DATA_IO);
3306}
3307
2899 .rbuf = NULL,
2900 .cbuf = NULL,
2901 .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
2902 .private = NULL,
2903 };
2904#endif
2905 int nr_pages;
2906 pgoff_t index;

--- 302 unchanged lines hidden (view full) ---

3209{
3210 struct inode *inode = mapping->host;
3211
3212 return __f2fs_write_data_pages(mapping, wbc,
3213 F2FS_I(inode)->cp_task == current ?
3214 FS_CP_DATA_IO : FS_DATA_IO);
3215}
3216
3308static void f2fs_write_failed(struct inode *inode, loff_t to)
3217void f2fs_write_failed(struct inode *inode, loff_t to)
3309{
3310 loff_t i_size = i_size_read(inode);
3311
3312 if (IS_NOQUOTA(inode))
3313 return;
3314
3315 /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
3316 if (to > i_size && !f2fs_verity_in_progress(inode)) {

--- 17 unchanged lines hidden (view full) ---

3334 struct dnode_of_data dn;
3335 struct page *ipage;
3336 bool locked = false;
3337 struct extent_info ei = {0, };
3338 int err = 0;
3339 int flag;
3340
3341 /*
3218{
3219 loff_t i_size = i_size_read(inode);
3220
3221 if (IS_NOQUOTA(inode))
3222 return;
3223
3224 /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
3225 if (to > i_size && !f2fs_verity_in_progress(inode)) {

--- 17 unchanged lines hidden (view full) ---

3243 struct dnode_of_data dn;
3244 struct page *ipage;
3245 bool locked = false;
3246 struct extent_info ei = {0, };
3247 int err = 0;
3248 int flag;
3249
3250 /*
3342 * we already allocated all the blocks, so we don't need to get
3343 * the block addresses when there is no need to fill the page.
3251 * If a whole page is being written and we already preallocated all the
3252 * blocks, then there is no need to get a block address now.
3344 */
3253 */
3345 if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
3346 !is_inode_flag_set(inode, FI_NO_PREALLOC) &&
3347 !f2fs_verity_in_progress(inode))
3254 if (len == PAGE_SIZE && is_inode_flag_set(inode, FI_PREALLOCATED_ALL))
3348 return 0;
3349
3350 /* f2fs_lock_op avoids race between write CP and convert_inline_page */
3351 if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
3352 flag = F2FS_GET_BLOCK_DEFAULT;
3353 else
3354 flag = F2FS_GET_BLOCK_PRE_AIO;
3355

--- 234 unchanged lines hidden (view full) ---

3590 !f2fs_verity_in_progress(inode))
3591 f2fs_i_size_write(inode, pos + copied);
3592unlock_out:
3593 f2fs_put_page(page, 1);
3594 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3595 return copied;
3596}
3597
3255 return 0;
3256
3257 /* f2fs_lock_op avoids race between write CP and convert_inline_page */
3258 if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
3259 flag = F2FS_GET_BLOCK_DEFAULT;
3260 else
3261 flag = F2FS_GET_BLOCK_PRE_AIO;
3262

--- 234 unchanged lines hidden (view full) ---

3497 !f2fs_verity_in_progress(inode))
3498 f2fs_i_size_write(inode, pos + copied);
3499unlock_out:
3500 f2fs_put_page(page, 1);
3501 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3502 return copied;
3503}
3504
3598static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
3599 loff_t offset)
3600{
3601 unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
3602 unsigned blkbits = i_blkbits;
3603 unsigned blocksize_mask = (1 << blkbits) - 1;
3604 unsigned long align = offset | iov_iter_alignment(iter);
3605 struct block_device *bdev = inode->i_sb->s_bdev;
3606
3607 if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode))
3608 return 1;
3609
3610 if (align & blocksize_mask) {
3611 if (bdev)
3612 blkbits = blksize_bits(bdev_logical_block_size(bdev));
3613 blocksize_mask = (1 << blkbits) - 1;
3614 if (align & blocksize_mask)
3615 return -EINVAL;
3616 return 1;
3617 }
3618 return 0;
3619}
3620
3621static void f2fs_dio_end_io(struct bio *bio)
3622{
3623 struct f2fs_private_dio *dio = bio->bi_private;
3624
3625 dec_page_count(F2FS_I_SB(dio->inode),
3626 dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3627
3628 bio->bi_private = dio->orig_private;
3629 bio->bi_end_io = dio->orig_end_io;
3630
3631 kfree(dio);
3632
3633 bio_endio(bio);
3634}
3635
3636static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
3637 loff_t file_offset)
3638{
3639 struct f2fs_private_dio *dio;
3640 bool write = (bio_op(bio) == REQ_OP_WRITE);
3641
3642 dio = f2fs_kzalloc(F2FS_I_SB(inode),
3643 sizeof(struct f2fs_private_dio), GFP_NOFS);
3644 if (!dio)
3645 goto out;
3646
3647 dio->inode = inode;
3648 dio->orig_end_io = bio->bi_end_io;
3649 dio->orig_private = bio->bi_private;
3650 dio->write = write;
3651
3652 bio->bi_end_io = f2fs_dio_end_io;
3653 bio->bi_private = dio;
3654
3655 inc_page_count(F2FS_I_SB(inode),
3656 write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3657
3658 submit_bio(bio);
3659 return;
3660out:
3661 bio->bi_status = BLK_STS_IOERR;
3662 bio_endio(bio);
3663}
3664
3665static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3666{
3667 struct address_space *mapping = iocb->ki_filp->f_mapping;
3668 struct inode *inode = mapping->host;
3669 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3670 struct f2fs_inode_info *fi = F2FS_I(inode);
3671 size_t count = iov_iter_count(iter);
3672 loff_t offset = iocb->ki_pos;
3673 int rw = iov_iter_rw(iter);
3674 int err;
3675 enum rw_hint hint = iocb->ki_hint;
3676 int whint_mode = F2FS_OPTION(sbi).whint_mode;
3677 bool do_opu;
3678
3679 err = check_direct_IO(inode, iter, offset);
3680 if (err)
3681 return err < 0 ? err : 0;
3682
3683 if (f2fs_force_buffered_io(inode, iocb, iter))
3684 return 0;
3685
3686 do_opu = rw == WRITE && f2fs_lfs_mode(sbi);
3687
3688 trace_f2fs_direct_IO_enter(inode, offset, count, rw);
3689
3690 if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
3691 iocb->ki_hint = WRITE_LIFE_NOT_SET;
3692
3693 if (iocb->ki_flags & IOCB_NOWAIT) {
3694 if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
3695 iocb->ki_hint = hint;
3696 err = -EAGAIN;
3697 goto out;
3698 }
3699 if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
3700 up_read(&fi->i_gc_rwsem[rw]);
3701 iocb->ki_hint = hint;
3702 err = -EAGAIN;
3703 goto out;
3704 }
3705 } else {
3706 down_read(&fi->i_gc_rwsem[rw]);
3707 if (do_opu)
3708 down_read(&fi->i_gc_rwsem[READ]);
3709 }
3710
3711 err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
3712 iter, rw == WRITE ? get_data_block_dio_write :
3713 get_data_block_dio, NULL, f2fs_dio_submit_bio,
3714 rw == WRITE ? DIO_LOCKING | DIO_SKIP_HOLES :
3715 DIO_SKIP_HOLES);
3716
3717 if (do_opu)
3718 up_read(&fi->i_gc_rwsem[READ]);
3719
3720 up_read(&fi->i_gc_rwsem[rw]);
3721
3722 if (rw == WRITE) {
3723 if (whint_mode == WHINT_MODE_OFF)
3724 iocb->ki_hint = hint;
3725 if (err > 0) {
3726 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3727 err);
3728 if (!do_opu)
3729 set_inode_flag(inode, FI_UPDATE_WRITE);
3730 } else if (err == -EIOCBQUEUED) {
3731 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3732 count - iov_iter_count(iter));
3733 } else if (err < 0) {
3734 f2fs_write_failed(inode, offset + count);
3735 }
3736 } else {
3737 if (err > 0)
3738 f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err);
3739 else if (err == -EIOCBQUEUED)
3740 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_READ_IO,
3741 count - iov_iter_count(iter));
3742 }
3743
3744out:
3745 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
3746
3747 return err;
3748}
3749
3750void f2fs_invalidate_page(struct page *page, unsigned int offset,
3751 unsigned int length)
3752{
3753 struct inode *inode = page->mapping->host;
3754 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3755
3756 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
3757 (offset % PAGE_SIZE || length != PAGE_SIZE))

--- 439 unchanged lines hidden (view full) ---

4197 .readahead = f2fs_readahead,
4198 .writepage = f2fs_write_data_page,
4199 .writepages = f2fs_write_data_pages,
4200 .write_begin = f2fs_write_begin,
4201 .write_end = f2fs_write_end,
4202 .set_page_dirty = f2fs_set_data_page_dirty,
4203 .invalidatepage = f2fs_invalidate_page,
4204 .releasepage = f2fs_release_page,
3505void f2fs_invalidate_page(struct page *page, unsigned int offset,
3506 unsigned int length)
3507{
3508 struct inode *inode = page->mapping->host;
3509 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3510
3511 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
3512 (offset % PAGE_SIZE || length != PAGE_SIZE))

--- 439 unchanged lines hidden (view full) ---

3952 .readahead = f2fs_readahead,
3953 .writepage = f2fs_write_data_page,
3954 .writepages = f2fs_write_data_pages,
3955 .write_begin = f2fs_write_begin,
3956 .write_end = f2fs_write_end,
3957 .set_page_dirty = f2fs_set_data_page_dirty,
3958 .invalidatepage = f2fs_invalidate_page,
3959 .releasepage = f2fs_release_page,
4205 .direct_IO = f2fs_direct_IO,
3960 .direct_IO = noop_direct_IO,
4206 .bmap = f2fs_bmap,
4207 .swap_activate = f2fs_swap_activate,
4208 .swap_deactivate = f2fs_swap_deactivate,
4209#ifdef CONFIG_MIGRATION
4210 .migratepage = f2fs_migrate_page,
4211#endif
4212};
4213

--- 63 unchanged lines hidden (view full) ---

4277 return -ENOMEM;
4278 return 0;
4279}
4280
4281void f2fs_destroy_bio_entry_cache(void)
4282{
4283 kmem_cache_destroy(bio_entry_slab);
4284}
3961 .bmap = f2fs_bmap,
3962 .swap_activate = f2fs_swap_activate,
3963 .swap_deactivate = f2fs_swap_deactivate,
3964#ifdef CONFIG_MIGRATION
3965 .migratepage = f2fs_migrate_page,
3966#endif
3967};
3968

--- 63 unchanged lines hidden (view full) ---

4032 return -ENOMEM;
4033 return 0;
4034}
4035
4036void f2fs_destroy_bio_entry_cache(void)
4037{
4038 kmem_cache_destroy(bio_entry_slab);
4039}
4040
4041static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
4042 unsigned int flags, struct iomap *iomap,
4043 struct iomap *srcmap)
4044{
4045 struct f2fs_map_blocks map = {};
4046 pgoff_t next_pgofs = 0;
4047 int err;
4048
4049 map.m_lblk = bytes_to_blks(inode, offset);
4050 map.m_len = bytes_to_blks(inode, offset + length - 1) - map.m_lblk + 1;
4051 map.m_next_pgofs = &next_pgofs;
4052 map.m_seg_type = f2fs_rw_hint_to_seg_type(inode->i_write_hint);
4053 if (flags & IOMAP_WRITE)
4054 map.m_may_create = true;
4055
4056 err = f2fs_map_blocks(inode, &map, flags & IOMAP_WRITE,
4057 F2FS_GET_BLOCK_DIO);
4058 if (err)
4059 return err;
4060
4061 iomap->offset = blks_to_bytes(inode, map.m_lblk);
4062
4063 if (map.m_flags & (F2FS_MAP_MAPPED | F2FS_MAP_UNWRITTEN)) {
4064 iomap->length = blks_to_bytes(inode, map.m_len);
4065 if (map.m_flags & F2FS_MAP_MAPPED) {
4066 iomap->type = IOMAP_MAPPED;
4067 iomap->flags |= IOMAP_F_MERGED;
4068 } else {
4069 iomap->type = IOMAP_UNWRITTEN;
4070 }
4071 if (WARN_ON_ONCE(!__is_valid_data_blkaddr(map.m_pblk)))
4072 return -EINVAL;
4073
4074 iomap->bdev = map.m_bdev;
4075 iomap->addr = blks_to_bytes(inode, map.m_pblk);
4076 } else {
4077 iomap->length = blks_to_bytes(inode, next_pgofs) -
4078 iomap->offset;
4079 iomap->type = IOMAP_HOLE;
4080 iomap->addr = IOMAP_NULL_ADDR;
4081 }
4082
4083 if (map.m_flags & F2FS_MAP_NEW)
4084 iomap->flags |= IOMAP_F_NEW;
4085 if ((inode->i_state & I_DIRTY_DATASYNC) ||
4086 offset + length > i_size_read(inode))
4087 iomap->flags |= IOMAP_F_DIRTY;
4088
4089 return 0;
4090}
4091
4092const struct iomap_ops f2fs_iomap_ops = {
4093 .iomap_begin = f2fs_iomap_begin,
4094};