147e4937aSGao Xiang // SPDX-License-Identifier: GPL-2.0-only 247e4937aSGao Xiang /* 347e4937aSGao Xiang * Copyright (C) 2017-2018 HUAWEI, Inc. 447e4937aSGao Xiang * http://www.huawei.com/ 547e4937aSGao Xiang * Created by Gao Xiang <gaoxiang25@huawei.com> 647e4937aSGao Xiang */ 747e4937aSGao Xiang #include "internal.h" 847e4937aSGao Xiang #include <linux/prefetch.h> 947e4937aSGao Xiang 1047e4937aSGao Xiang #include <trace/events/erofs.h> 1147e4937aSGao Xiang 1247e4937aSGao Xiang static inline void read_endio(struct bio *bio) 1347e4937aSGao Xiang { 1447e4937aSGao Xiang struct super_block *const sb = bio->bi_private; 1547e4937aSGao Xiang struct bio_vec *bvec; 1647e4937aSGao Xiang blk_status_t err = bio->bi_status; 1747e4937aSGao Xiang struct bvec_iter_all iter_all; 1847e4937aSGao Xiang 1947e4937aSGao Xiang if (time_to_inject(EROFS_SB(sb), FAULT_READ_IO)) { 2047e4937aSGao Xiang erofs_show_injection_info(FAULT_READ_IO); 2147e4937aSGao Xiang err = BLK_STS_IOERR; 2247e4937aSGao Xiang } 2347e4937aSGao Xiang 2447e4937aSGao Xiang bio_for_each_segment_all(bvec, bio, iter_all) { 2547e4937aSGao Xiang struct page *page = bvec->bv_page; 2647e4937aSGao Xiang 2747e4937aSGao Xiang /* page is already locked */ 2847e4937aSGao Xiang DBG_BUGON(PageUptodate(page)); 2947e4937aSGao Xiang 308d8a09b0SGao Xiang if (err) 3147e4937aSGao Xiang SetPageError(page); 3247e4937aSGao Xiang else 3347e4937aSGao Xiang SetPageUptodate(page); 3447e4937aSGao Xiang 3547e4937aSGao Xiang unlock_page(page); 3647e4937aSGao Xiang /* page could be reclaimed now */ 3747e4937aSGao Xiang } 3847e4937aSGao Xiang bio_put(bio); 3947e4937aSGao Xiang } 4047e4937aSGao Xiang 4147e4937aSGao Xiang /* prio -- true is used for dir */ 4247e4937aSGao Xiang struct page *__erofs_get_meta_page(struct super_block *sb, 4347e4937aSGao Xiang erofs_blk_t blkaddr, bool prio, bool nofail) 4447e4937aSGao Xiang { 4547e4937aSGao Xiang struct inode *const bd_inode = sb->s_bdev->bd_inode; 4647e4937aSGao Xiang struct address_space *const mapping = bd_inode->i_mapping; 4747e4937aSGao Xiang /* prefer retrying in the allocator to blindly looping below */ 4847e4937aSGao Xiang const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) | 4947e4937aSGao Xiang (nofail ? __GFP_NOFAIL : 0); 5047e4937aSGao Xiang unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0; 5147e4937aSGao Xiang struct page *page; 5247e4937aSGao Xiang int err; 5347e4937aSGao Xiang 5447e4937aSGao Xiang repeat: 5547e4937aSGao Xiang page = find_or_create_page(mapping, blkaddr, gfp); 568d8a09b0SGao Xiang if (!page) { 5747e4937aSGao Xiang DBG_BUGON(nofail); 5847e4937aSGao Xiang return ERR_PTR(-ENOMEM); 5947e4937aSGao Xiang } 6047e4937aSGao Xiang DBG_BUGON(!PageLocked(page)); 6147e4937aSGao Xiang 6247e4937aSGao Xiang if (!PageUptodate(page)) { 6347e4937aSGao Xiang struct bio *bio; 6447e4937aSGao Xiang 6547e4937aSGao Xiang bio = erofs_grab_bio(sb, blkaddr, 1, sb, read_endio, nofail); 6647e4937aSGao Xiang if (IS_ERR(bio)) { 6747e4937aSGao Xiang DBG_BUGON(nofail); 6847e4937aSGao Xiang err = PTR_ERR(bio); 6947e4937aSGao Xiang goto err_out; 7047e4937aSGao Xiang } 7147e4937aSGao Xiang 72097a802aSGao Xiang if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE) { 7347e4937aSGao Xiang err = -EFAULT; 7447e4937aSGao Xiang goto err_out; 7547e4937aSGao Xiang } 7647e4937aSGao Xiang 7747e4937aSGao Xiang __submit_bio(bio, REQ_OP_READ, 7847e4937aSGao Xiang REQ_META | (prio ? REQ_PRIO : 0)); 7947e4937aSGao Xiang 8047e4937aSGao Xiang lock_page(page); 8147e4937aSGao Xiang 8247e4937aSGao Xiang /* this page has been truncated by others */ 838d8a09b0SGao Xiang if (page->mapping != mapping) { 8447e4937aSGao Xiang unlock_repeat: 8547e4937aSGao Xiang unlock_page(page); 8647e4937aSGao Xiang put_page(page); 8747e4937aSGao Xiang goto repeat; 8847e4937aSGao Xiang } 8947e4937aSGao Xiang 9047e4937aSGao Xiang /* more likely a read error */ 918d8a09b0SGao Xiang if (!PageUptodate(page)) { 9247e4937aSGao Xiang if (io_retries) { 9347e4937aSGao Xiang --io_retries; 9447e4937aSGao Xiang goto unlock_repeat; 9547e4937aSGao Xiang } 9647e4937aSGao Xiang err = -EIO; 9747e4937aSGao Xiang goto err_out; 9847e4937aSGao Xiang } 9947e4937aSGao Xiang } 10047e4937aSGao Xiang return page; 10147e4937aSGao Xiang 10247e4937aSGao Xiang err_out: 10347e4937aSGao Xiang unlock_page(page); 10447e4937aSGao Xiang put_page(page); 10547e4937aSGao Xiang return ERR_PTR(err); 10647e4937aSGao Xiang } 10747e4937aSGao Xiang 10847e4937aSGao Xiang static int erofs_map_blocks_flatmode(struct inode *inode, 10947e4937aSGao Xiang struct erofs_map_blocks *map, 11047e4937aSGao Xiang int flags) 11147e4937aSGao Xiang { 11247e4937aSGao Xiang int err = 0; 11347e4937aSGao Xiang erofs_blk_t nblocks, lastblk; 11447e4937aSGao Xiang u64 offset = map->m_la; 11547e4937aSGao Xiang struct erofs_vnode *vi = EROFS_V(inode); 1168a765682SGao Xiang bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE); 11747e4937aSGao Xiang 11847e4937aSGao Xiang trace_erofs_map_blocks_flatmode_enter(inode, map, flags); 11947e4937aSGao Xiang 12047e4937aSGao Xiang nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE); 1218a765682SGao Xiang lastblk = nblocks - tailendpacking; 12247e4937aSGao Xiang 1238d8a09b0SGao Xiang if (offset >= inode->i_size) { 12447e4937aSGao Xiang /* leave out-of-bound access unmapped */ 12547e4937aSGao Xiang map->m_flags = 0; 12647e4937aSGao Xiang map->m_plen = 0; 12747e4937aSGao Xiang goto out; 12847e4937aSGao Xiang } 12947e4937aSGao Xiang 13047e4937aSGao Xiang /* there is no hole in flatmode */ 13147e4937aSGao Xiang map->m_flags = EROFS_MAP_MAPPED; 13247e4937aSGao Xiang 13347e4937aSGao Xiang if (offset < blknr_to_addr(lastblk)) { 13447e4937aSGao Xiang map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la; 13547e4937aSGao Xiang map->m_plen = blknr_to_addr(lastblk) - offset; 1368a765682SGao Xiang } else if (tailendpacking) { 13747e4937aSGao Xiang /* 2 - inode inline B: inode, [xattrs], inline last blk... */ 13847e4937aSGao Xiang struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); 13947e4937aSGao Xiang 14047e4937aSGao Xiang map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize + 14147e4937aSGao Xiang vi->xattr_isize + erofs_blkoff(map->m_la); 14247e4937aSGao Xiang map->m_plen = inode->i_size - offset; 14347e4937aSGao Xiang 14447e4937aSGao Xiang /* inline data should be located in one meta block */ 14547e4937aSGao Xiang if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) { 14647e4937aSGao Xiang errln("inline data cross block boundary @ nid %llu", 14747e4937aSGao Xiang vi->nid); 14847e4937aSGao Xiang DBG_BUGON(1); 14947e4937aSGao Xiang err = -EFSCORRUPTED; 15047e4937aSGao Xiang goto err_out; 15147e4937aSGao Xiang } 15247e4937aSGao Xiang 15347e4937aSGao Xiang map->m_flags |= EROFS_MAP_META; 15447e4937aSGao Xiang } else { 15547e4937aSGao Xiang errln("internal error @ nid: %llu (size %llu), m_la 0x%llx", 15647e4937aSGao Xiang vi->nid, inode->i_size, map->m_la); 15747e4937aSGao Xiang DBG_BUGON(1); 15847e4937aSGao Xiang err = -EIO; 15947e4937aSGao Xiang goto err_out; 16047e4937aSGao Xiang } 16147e4937aSGao Xiang 16247e4937aSGao Xiang out: 16347e4937aSGao Xiang map->m_llen = map->m_plen; 16447e4937aSGao Xiang 16547e4937aSGao Xiang err_out: 16647e4937aSGao Xiang trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0); 16747e4937aSGao Xiang return err; 16847e4937aSGao Xiang } 16947e4937aSGao Xiang 17047e4937aSGao Xiang int erofs_map_blocks(struct inode *inode, 17147e4937aSGao Xiang struct erofs_map_blocks *map, int flags) 17247e4937aSGao Xiang { 1738a765682SGao Xiang if (erofs_inode_is_data_compressed(EROFS_V(inode)->datalayout)) { 17447e4937aSGao Xiang int err = z_erofs_map_blocks_iter(inode, map, flags); 17547e4937aSGao Xiang 17647e4937aSGao Xiang if (map->mpage) { 17747e4937aSGao Xiang put_page(map->mpage); 17847e4937aSGao Xiang map->mpage = NULL; 17947e4937aSGao Xiang } 18047e4937aSGao Xiang return err; 18147e4937aSGao Xiang } 18247e4937aSGao Xiang return erofs_map_blocks_flatmode(inode, map, flags); 18347e4937aSGao Xiang } 18447e4937aSGao Xiang 18547e4937aSGao Xiang static inline struct bio *erofs_read_raw_page(struct bio *bio, 18647e4937aSGao Xiang struct address_space *mapping, 18747e4937aSGao Xiang struct page *page, 18847e4937aSGao Xiang erofs_off_t *last_block, 18947e4937aSGao Xiang unsigned int nblocks, 19047e4937aSGao Xiang bool ra) 19147e4937aSGao Xiang { 19247e4937aSGao Xiang struct inode *const inode = mapping->host; 19347e4937aSGao Xiang struct super_block *const sb = inode->i_sb; 19447e4937aSGao Xiang erofs_off_t current_block = (erofs_off_t)page->index; 19547e4937aSGao Xiang int err; 19647e4937aSGao Xiang 19747e4937aSGao Xiang DBG_BUGON(!nblocks); 19847e4937aSGao Xiang 19947e4937aSGao Xiang if (PageUptodate(page)) { 20047e4937aSGao Xiang err = 0; 20147e4937aSGao Xiang goto has_updated; 20247e4937aSGao Xiang } 20347e4937aSGao Xiang 20447e4937aSGao Xiang /* note that for readpage case, bio also equals to NULL */ 20547e4937aSGao Xiang if (bio && 20647e4937aSGao Xiang /* not continuous */ 20747e4937aSGao Xiang *last_block + 1 != current_block) { 20847e4937aSGao Xiang submit_bio_retry: 20947e4937aSGao Xiang __submit_bio(bio, REQ_OP_READ, 0); 21047e4937aSGao Xiang bio = NULL; 21147e4937aSGao Xiang } 21247e4937aSGao Xiang 21347e4937aSGao Xiang if (!bio) { 21447e4937aSGao Xiang struct erofs_map_blocks map = { 21547e4937aSGao Xiang .m_la = blknr_to_addr(current_block), 21647e4937aSGao Xiang }; 21747e4937aSGao Xiang erofs_blk_t blknr; 21847e4937aSGao Xiang unsigned int blkoff; 21947e4937aSGao Xiang 22047e4937aSGao Xiang err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); 2218d8a09b0SGao Xiang if (err) 22247e4937aSGao Xiang goto err_out; 22347e4937aSGao Xiang 22447e4937aSGao Xiang /* zero out the holed page */ 2258d8a09b0SGao Xiang if (!(map.m_flags & EROFS_MAP_MAPPED)) { 22647e4937aSGao Xiang zero_user_segment(page, 0, PAGE_SIZE); 22747e4937aSGao Xiang SetPageUptodate(page); 22847e4937aSGao Xiang 22947e4937aSGao Xiang /* imply err = 0, see erofs_map_blocks */ 23047e4937aSGao Xiang goto has_updated; 23147e4937aSGao Xiang } 23247e4937aSGao Xiang 23347e4937aSGao Xiang /* for RAW access mode, m_plen must be equal to m_llen */ 23447e4937aSGao Xiang DBG_BUGON(map.m_plen != map.m_llen); 23547e4937aSGao Xiang 23647e4937aSGao Xiang blknr = erofs_blknr(map.m_pa); 23747e4937aSGao Xiang blkoff = erofs_blkoff(map.m_pa); 23847e4937aSGao Xiang 23947e4937aSGao Xiang /* deal with inline page */ 24047e4937aSGao Xiang if (map.m_flags & EROFS_MAP_META) { 24147e4937aSGao Xiang void *vsrc, *vto; 24247e4937aSGao Xiang struct page *ipage; 24347e4937aSGao Xiang 24447e4937aSGao Xiang DBG_BUGON(map.m_plen > PAGE_SIZE); 24547e4937aSGao Xiang 24647e4937aSGao Xiang ipage = erofs_get_meta_page(inode->i_sb, blknr, 0); 24747e4937aSGao Xiang 24847e4937aSGao Xiang if (IS_ERR(ipage)) { 24947e4937aSGao Xiang err = PTR_ERR(ipage); 25047e4937aSGao Xiang goto err_out; 25147e4937aSGao Xiang } 25247e4937aSGao Xiang 25347e4937aSGao Xiang vsrc = kmap_atomic(ipage); 25447e4937aSGao Xiang vto = kmap_atomic(page); 25547e4937aSGao Xiang memcpy(vto, vsrc + blkoff, map.m_plen); 25647e4937aSGao Xiang memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen); 25747e4937aSGao Xiang kunmap_atomic(vto); 25847e4937aSGao Xiang kunmap_atomic(vsrc); 25947e4937aSGao Xiang flush_dcache_page(page); 26047e4937aSGao Xiang 26147e4937aSGao Xiang SetPageUptodate(page); 26247e4937aSGao Xiang /* TODO: could we unlock the page earlier? */ 26347e4937aSGao Xiang unlock_page(ipage); 26447e4937aSGao Xiang put_page(ipage); 26547e4937aSGao Xiang 26647e4937aSGao Xiang /* imply err = 0, see erofs_map_blocks */ 26747e4937aSGao Xiang goto has_updated; 26847e4937aSGao Xiang } 26947e4937aSGao Xiang 27047e4937aSGao Xiang /* pa must be block-aligned for raw reading */ 27147e4937aSGao Xiang DBG_BUGON(erofs_blkoff(map.m_pa)); 27247e4937aSGao Xiang 27347e4937aSGao Xiang /* max # of continuous pages */ 27447e4937aSGao Xiang if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE)) 27547e4937aSGao Xiang nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE); 27647e4937aSGao Xiang if (nblocks > BIO_MAX_PAGES) 27747e4937aSGao Xiang nblocks = BIO_MAX_PAGES; 27847e4937aSGao Xiang 27947e4937aSGao Xiang bio = erofs_grab_bio(sb, blknr, nblocks, sb, 28047e4937aSGao Xiang read_endio, false); 28147e4937aSGao Xiang if (IS_ERR(bio)) { 28247e4937aSGao Xiang err = PTR_ERR(bio); 28347e4937aSGao Xiang bio = NULL; 28447e4937aSGao Xiang goto err_out; 28547e4937aSGao Xiang } 28647e4937aSGao Xiang } 28747e4937aSGao Xiang 28847e4937aSGao Xiang err = bio_add_page(bio, page, PAGE_SIZE, 0); 28947e4937aSGao Xiang /* out of the extent or bio is full */ 29047e4937aSGao Xiang if (err < PAGE_SIZE) 29147e4937aSGao Xiang goto submit_bio_retry; 29247e4937aSGao Xiang 29347e4937aSGao Xiang *last_block = current_block; 29447e4937aSGao Xiang 29547e4937aSGao Xiang /* shift in advance in case of it followed by too many gaps */ 29647e4937aSGao Xiang if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) { 29747e4937aSGao Xiang /* err should reassign to 0 after submitting */ 29847e4937aSGao Xiang err = 0; 29947e4937aSGao Xiang goto submit_bio_out; 30047e4937aSGao Xiang } 30147e4937aSGao Xiang 30247e4937aSGao Xiang return bio; 30347e4937aSGao Xiang 30447e4937aSGao Xiang err_out: 30547e4937aSGao Xiang /* for sync reading, set page error immediately */ 30647e4937aSGao Xiang if (!ra) { 30747e4937aSGao Xiang SetPageError(page); 30847e4937aSGao Xiang ClearPageUptodate(page); 30947e4937aSGao Xiang } 31047e4937aSGao Xiang has_updated: 31147e4937aSGao Xiang unlock_page(page); 31247e4937aSGao Xiang 31347e4937aSGao Xiang /* if updated manually, continuous pages has a gap */ 31447e4937aSGao Xiang if (bio) 31547e4937aSGao Xiang submit_bio_out: 31647e4937aSGao Xiang __submit_bio(bio, REQ_OP_READ, 0); 31747e4937aSGao Xiang 3188d8a09b0SGao Xiang return err ? ERR_PTR(err) : NULL; 31947e4937aSGao Xiang } 32047e4937aSGao Xiang 32147e4937aSGao Xiang /* 32247e4937aSGao Xiang * since we dont have write or truncate flows, so no inode 32347e4937aSGao Xiang * locking needs to be held at the moment. 32447e4937aSGao Xiang */ 32547e4937aSGao Xiang static int erofs_raw_access_readpage(struct file *file, struct page *page) 32647e4937aSGao Xiang { 32747e4937aSGao Xiang erofs_off_t last_block; 32847e4937aSGao Xiang struct bio *bio; 32947e4937aSGao Xiang 33047e4937aSGao Xiang trace_erofs_readpage(page, true); 33147e4937aSGao Xiang 33247e4937aSGao Xiang bio = erofs_read_raw_page(NULL, page->mapping, 33347e4937aSGao Xiang page, &last_block, 1, false); 33447e4937aSGao Xiang 33547e4937aSGao Xiang if (IS_ERR(bio)) 33647e4937aSGao Xiang return PTR_ERR(bio); 33747e4937aSGao Xiang 33847e4937aSGao Xiang DBG_BUGON(bio); /* since we have only one bio -- must be NULL */ 33947e4937aSGao Xiang return 0; 34047e4937aSGao Xiang } 34147e4937aSGao Xiang 34247e4937aSGao Xiang static int erofs_raw_access_readpages(struct file *filp, 34347e4937aSGao Xiang struct address_space *mapping, 34447e4937aSGao Xiang struct list_head *pages, 34547e4937aSGao Xiang unsigned int nr_pages) 34647e4937aSGao Xiang { 34747e4937aSGao Xiang erofs_off_t last_block; 34847e4937aSGao Xiang struct bio *bio = NULL; 34947e4937aSGao Xiang gfp_t gfp = readahead_gfp_mask(mapping); 35047e4937aSGao Xiang struct page *page = list_last_entry(pages, struct page, lru); 35147e4937aSGao Xiang 35247e4937aSGao Xiang trace_erofs_readpages(mapping->host, page, nr_pages, true); 35347e4937aSGao Xiang 35447e4937aSGao Xiang for (; nr_pages; --nr_pages) { 35547e4937aSGao Xiang page = list_entry(pages->prev, struct page, lru); 35647e4937aSGao Xiang 35747e4937aSGao Xiang prefetchw(&page->flags); 35847e4937aSGao Xiang list_del(&page->lru); 35947e4937aSGao Xiang 36047e4937aSGao Xiang if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) { 36147e4937aSGao Xiang bio = erofs_read_raw_page(bio, mapping, page, 36247e4937aSGao Xiang &last_block, nr_pages, true); 36347e4937aSGao Xiang 36447e4937aSGao Xiang /* all the page errors are ignored when readahead */ 36547e4937aSGao Xiang if (IS_ERR(bio)) { 36647e4937aSGao Xiang pr_err("%s, readahead error at page %lu of nid %llu\n", 36747e4937aSGao Xiang __func__, page->index, 36847e4937aSGao Xiang EROFS_V(mapping->host)->nid); 36947e4937aSGao Xiang 37047e4937aSGao Xiang bio = NULL; 37147e4937aSGao Xiang } 37247e4937aSGao Xiang } 37347e4937aSGao Xiang 37447e4937aSGao Xiang /* pages could still be locked */ 37547e4937aSGao Xiang put_page(page); 37647e4937aSGao Xiang } 37747e4937aSGao Xiang DBG_BUGON(!list_empty(pages)); 37847e4937aSGao Xiang 37947e4937aSGao Xiang /* the rare case (end in gaps) */ 3808d8a09b0SGao Xiang if (bio) 38147e4937aSGao Xiang __submit_bio(bio, REQ_OP_READ, 0); 38247e4937aSGao Xiang return 0; 38347e4937aSGao Xiang } 38447e4937aSGao Xiang 38547e4937aSGao Xiang static int erofs_get_block(struct inode *inode, sector_t iblock, 38647e4937aSGao Xiang struct buffer_head *bh, int create) 38747e4937aSGao Xiang { 38847e4937aSGao Xiang struct erofs_map_blocks map = { 38947e4937aSGao Xiang .m_la = iblock << 9, 39047e4937aSGao Xiang }; 39147e4937aSGao Xiang int err; 39247e4937aSGao Xiang 39347e4937aSGao Xiang err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); 39447e4937aSGao Xiang if (err) 39547e4937aSGao Xiang return err; 39647e4937aSGao Xiang 39747e4937aSGao Xiang if (map.m_flags & EROFS_MAP_MAPPED) 39847e4937aSGao Xiang bh->b_blocknr = erofs_blknr(map.m_pa); 39947e4937aSGao Xiang 40047e4937aSGao Xiang return err; 40147e4937aSGao Xiang } 40247e4937aSGao Xiang 40347e4937aSGao Xiang static sector_t erofs_bmap(struct address_space *mapping, sector_t block) 40447e4937aSGao Xiang { 40547e4937aSGao Xiang struct inode *inode = mapping->host; 40647e4937aSGao Xiang 4078a765682SGao Xiang if (EROFS_V(inode)->datalayout == EROFS_INODE_FLAT_INLINE) { 40847e4937aSGao Xiang erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE; 40947e4937aSGao Xiang 41047e4937aSGao Xiang if (block >> LOG_SECTORS_PER_BLOCK >= blks) 41147e4937aSGao Xiang return 0; 41247e4937aSGao Xiang } 41347e4937aSGao Xiang 41447e4937aSGao Xiang return generic_block_bmap(mapping, block, erofs_get_block); 41547e4937aSGao Xiang } 41647e4937aSGao Xiang 41747e4937aSGao Xiang /* for uncompressed (aligned) files and raw access for other files */ 41847e4937aSGao Xiang const struct address_space_operations erofs_raw_access_aops = { 41947e4937aSGao Xiang .readpage = erofs_raw_access_readpage, 42047e4937aSGao Xiang .readpages = erofs_raw_access_readpages, 42147e4937aSGao Xiang .bmap = erofs_bmap, 42247e4937aSGao Xiang }; 42347e4937aSGao Xiang 424