1*47e4937aSGao Xiang // SPDX-License-Identifier: GPL-2.0-only 2*47e4937aSGao Xiang /* 3*47e4937aSGao Xiang * Copyright (C) 2017-2018 HUAWEI, Inc. 4*47e4937aSGao Xiang * http://www.huawei.com/ 5*47e4937aSGao Xiang * Created by Gao Xiang <gaoxiang25@huawei.com> 6*47e4937aSGao Xiang */ 7*47e4937aSGao Xiang #include "internal.h" 8*47e4937aSGao Xiang #include <linux/prefetch.h> 9*47e4937aSGao Xiang 10*47e4937aSGao Xiang #include <trace/events/erofs.h> 11*47e4937aSGao Xiang 12*47e4937aSGao Xiang static inline void read_endio(struct bio *bio) 13*47e4937aSGao Xiang { 14*47e4937aSGao Xiang struct super_block *const sb = bio->bi_private; 15*47e4937aSGao Xiang struct bio_vec *bvec; 16*47e4937aSGao Xiang blk_status_t err = bio->bi_status; 17*47e4937aSGao Xiang struct bvec_iter_all iter_all; 18*47e4937aSGao Xiang 19*47e4937aSGao Xiang if (time_to_inject(EROFS_SB(sb), FAULT_READ_IO)) { 20*47e4937aSGao Xiang erofs_show_injection_info(FAULT_READ_IO); 21*47e4937aSGao Xiang err = BLK_STS_IOERR; 22*47e4937aSGao Xiang } 23*47e4937aSGao Xiang 24*47e4937aSGao Xiang bio_for_each_segment_all(bvec, bio, iter_all) { 25*47e4937aSGao Xiang struct page *page = bvec->bv_page; 26*47e4937aSGao Xiang 27*47e4937aSGao Xiang /* page is already locked */ 28*47e4937aSGao Xiang DBG_BUGON(PageUptodate(page)); 29*47e4937aSGao Xiang 30*47e4937aSGao Xiang if (unlikely(err)) 31*47e4937aSGao Xiang SetPageError(page); 32*47e4937aSGao Xiang else 33*47e4937aSGao Xiang SetPageUptodate(page); 34*47e4937aSGao Xiang 35*47e4937aSGao Xiang unlock_page(page); 36*47e4937aSGao Xiang /* page could be reclaimed now */ 37*47e4937aSGao Xiang } 38*47e4937aSGao Xiang bio_put(bio); 39*47e4937aSGao Xiang } 40*47e4937aSGao Xiang 41*47e4937aSGao Xiang /* prio -- true is used for dir */ 42*47e4937aSGao Xiang struct page *__erofs_get_meta_page(struct super_block *sb, 43*47e4937aSGao Xiang erofs_blk_t blkaddr, bool prio, bool nofail) 44*47e4937aSGao Xiang { 45*47e4937aSGao Xiang struct inode *const bd_inode = sb->s_bdev->bd_inode; 46*47e4937aSGao Xiang struct address_space *const mapping = bd_inode->i_mapping; 47*47e4937aSGao Xiang /* prefer retrying in the allocator to blindly looping below */ 48*47e4937aSGao Xiang const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) | 49*47e4937aSGao Xiang (nofail ? __GFP_NOFAIL : 0); 50*47e4937aSGao Xiang unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0; 51*47e4937aSGao Xiang struct page *page; 52*47e4937aSGao Xiang int err; 53*47e4937aSGao Xiang 54*47e4937aSGao Xiang repeat: 55*47e4937aSGao Xiang page = find_or_create_page(mapping, blkaddr, gfp); 56*47e4937aSGao Xiang if (unlikely(!page)) { 57*47e4937aSGao Xiang DBG_BUGON(nofail); 58*47e4937aSGao Xiang return ERR_PTR(-ENOMEM); 59*47e4937aSGao Xiang } 60*47e4937aSGao Xiang DBG_BUGON(!PageLocked(page)); 61*47e4937aSGao Xiang 62*47e4937aSGao Xiang if (!PageUptodate(page)) { 63*47e4937aSGao Xiang struct bio *bio; 64*47e4937aSGao Xiang 65*47e4937aSGao Xiang bio = erofs_grab_bio(sb, blkaddr, 1, sb, read_endio, nofail); 66*47e4937aSGao Xiang if (IS_ERR(bio)) { 67*47e4937aSGao Xiang DBG_BUGON(nofail); 68*47e4937aSGao Xiang err = PTR_ERR(bio); 69*47e4937aSGao Xiang goto err_out; 70*47e4937aSGao Xiang } 71*47e4937aSGao Xiang 72*47e4937aSGao Xiang err = bio_add_page(bio, page, PAGE_SIZE, 0); 73*47e4937aSGao Xiang if (unlikely(err != PAGE_SIZE)) { 74*47e4937aSGao Xiang err = -EFAULT; 75*47e4937aSGao Xiang goto err_out; 76*47e4937aSGao Xiang } 77*47e4937aSGao Xiang 78*47e4937aSGao Xiang __submit_bio(bio, REQ_OP_READ, 79*47e4937aSGao Xiang REQ_META | (prio ? REQ_PRIO : 0)); 80*47e4937aSGao Xiang 81*47e4937aSGao Xiang lock_page(page); 82*47e4937aSGao Xiang 83*47e4937aSGao Xiang /* this page has been truncated by others */ 84*47e4937aSGao Xiang if (unlikely(page->mapping != mapping)) { 85*47e4937aSGao Xiang unlock_repeat: 86*47e4937aSGao Xiang unlock_page(page); 87*47e4937aSGao Xiang put_page(page); 88*47e4937aSGao Xiang goto repeat; 89*47e4937aSGao Xiang } 90*47e4937aSGao Xiang 91*47e4937aSGao Xiang /* more likely a read error */ 92*47e4937aSGao Xiang if (unlikely(!PageUptodate(page))) { 93*47e4937aSGao Xiang if (io_retries) { 94*47e4937aSGao Xiang --io_retries; 95*47e4937aSGao Xiang goto unlock_repeat; 96*47e4937aSGao Xiang } 97*47e4937aSGao Xiang err = -EIO; 98*47e4937aSGao Xiang goto err_out; 99*47e4937aSGao Xiang } 100*47e4937aSGao Xiang } 101*47e4937aSGao Xiang return page; 102*47e4937aSGao Xiang 103*47e4937aSGao Xiang err_out: 104*47e4937aSGao Xiang unlock_page(page); 105*47e4937aSGao Xiang put_page(page); 106*47e4937aSGao Xiang return ERR_PTR(err); 107*47e4937aSGao Xiang } 108*47e4937aSGao Xiang 109*47e4937aSGao Xiang static int erofs_map_blocks_flatmode(struct inode *inode, 110*47e4937aSGao Xiang struct erofs_map_blocks *map, 111*47e4937aSGao Xiang int flags) 112*47e4937aSGao Xiang { 113*47e4937aSGao Xiang int err = 0; 114*47e4937aSGao Xiang erofs_blk_t nblocks, lastblk; 115*47e4937aSGao Xiang u64 offset = map->m_la; 116*47e4937aSGao Xiang struct erofs_vnode *vi = EROFS_V(inode); 117*47e4937aSGao Xiang 118*47e4937aSGao Xiang trace_erofs_map_blocks_flatmode_enter(inode, map, flags); 119*47e4937aSGao Xiang 120*47e4937aSGao Xiang nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE); 121*47e4937aSGao Xiang lastblk = nblocks - is_inode_flat_inline(inode); 122*47e4937aSGao Xiang 123*47e4937aSGao Xiang if (unlikely(offset >= inode->i_size)) { 124*47e4937aSGao Xiang /* leave out-of-bound access unmapped */ 125*47e4937aSGao Xiang map->m_flags = 0; 126*47e4937aSGao Xiang map->m_plen = 0; 127*47e4937aSGao Xiang goto out; 128*47e4937aSGao Xiang } 129*47e4937aSGao Xiang 130*47e4937aSGao Xiang /* there is no hole in flatmode */ 131*47e4937aSGao Xiang map->m_flags = EROFS_MAP_MAPPED; 132*47e4937aSGao Xiang 133*47e4937aSGao Xiang if (offset < blknr_to_addr(lastblk)) { 134*47e4937aSGao Xiang map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la; 135*47e4937aSGao Xiang map->m_plen = blknr_to_addr(lastblk) - offset; 136*47e4937aSGao Xiang } else if (is_inode_flat_inline(inode)) { 137*47e4937aSGao Xiang /* 2 - inode inline B: inode, [xattrs], inline last blk... */ 138*47e4937aSGao Xiang struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); 139*47e4937aSGao Xiang 140*47e4937aSGao Xiang map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize + 141*47e4937aSGao Xiang vi->xattr_isize + erofs_blkoff(map->m_la); 142*47e4937aSGao Xiang map->m_plen = inode->i_size - offset; 143*47e4937aSGao Xiang 144*47e4937aSGao Xiang /* inline data should be located in one meta block */ 145*47e4937aSGao Xiang if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) { 146*47e4937aSGao Xiang errln("inline data cross block boundary @ nid %llu", 147*47e4937aSGao Xiang vi->nid); 148*47e4937aSGao Xiang DBG_BUGON(1); 149*47e4937aSGao Xiang err = -EFSCORRUPTED; 150*47e4937aSGao Xiang goto err_out; 151*47e4937aSGao Xiang } 152*47e4937aSGao Xiang 153*47e4937aSGao Xiang map->m_flags |= EROFS_MAP_META; 154*47e4937aSGao Xiang } else { 155*47e4937aSGao Xiang errln("internal error @ nid: %llu (size %llu), m_la 0x%llx", 156*47e4937aSGao Xiang vi->nid, inode->i_size, map->m_la); 157*47e4937aSGao Xiang DBG_BUGON(1); 158*47e4937aSGao Xiang err = -EIO; 159*47e4937aSGao Xiang goto err_out; 160*47e4937aSGao Xiang } 161*47e4937aSGao Xiang 162*47e4937aSGao Xiang out: 163*47e4937aSGao Xiang map->m_llen = map->m_plen; 164*47e4937aSGao Xiang 165*47e4937aSGao Xiang err_out: 166*47e4937aSGao Xiang trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0); 167*47e4937aSGao Xiang return err; 168*47e4937aSGao Xiang } 169*47e4937aSGao Xiang 170*47e4937aSGao Xiang int erofs_map_blocks(struct inode *inode, 171*47e4937aSGao Xiang struct erofs_map_blocks *map, int flags) 172*47e4937aSGao Xiang { 173*47e4937aSGao Xiang if (unlikely(is_inode_layout_compression(inode))) { 174*47e4937aSGao Xiang int err = z_erofs_map_blocks_iter(inode, map, flags); 175*47e4937aSGao Xiang 176*47e4937aSGao Xiang if (map->mpage) { 177*47e4937aSGao Xiang put_page(map->mpage); 178*47e4937aSGao Xiang map->mpage = NULL; 179*47e4937aSGao Xiang } 180*47e4937aSGao Xiang return err; 181*47e4937aSGao Xiang } 182*47e4937aSGao Xiang return erofs_map_blocks_flatmode(inode, map, flags); 183*47e4937aSGao Xiang } 184*47e4937aSGao Xiang 185*47e4937aSGao Xiang static inline struct bio *erofs_read_raw_page(struct bio *bio, 186*47e4937aSGao Xiang struct address_space *mapping, 187*47e4937aSGao Xiang struct page *page, 188*47e4937aSGao Xiang erofs_off_t *last_block, 189*47e4937aSGao Xiang unsigned int nblocks, 190*47e4937aSGao Xiang bool ra) 191*47e4937aSGao Xiang { 192*47e4937aSGao Xiang struct inode *const inode = mapping->host; 193*47e4937aSGao Xiang struct super_block *const sb = inode->i_sb; 194*47e4937aSGao Xiang erofs_off_t current_block = (erofs_off_t)page->index; 195*47e4937aSGao Xiang int err; 196*47e4937aSGao Xiang 197*47e4937aSGao Xiang DBG_BUGON(!nblocks); 198*47e4937aSGao Xiang 199*47e4937aSGao Xiang if (PageUptodate(page)) { 200*47e4937aSGao Xiang err = 0; 201*47e4937aSGao Xiang goto has_updated; 202*47e4937aSGao Xiang } 203*47e4937aSGao Xiang 204*47e4937aSGao Xiang /* note that for readpage case, bio also equals to NULL */ 205*47e4937aSGao Xiang if (bio && 206*47e4937aSGao Xiang /* not continuous */ 207*47e4937aSGao Xiang *last_block + 1 != current_block) { 208*47e4937aSGao Xiang submit_bio_retry: 209*47e4937aSGao Xiang __submit_bio(bio, REQ_OP_READ, 0); 210*47e4937aSGao Xiang bio = NULL; 211*47e4937aSGao Xiang } 212*47e4937aSGao Xiang 213*47e4937aSGao Xiang if (!bio) { 214*47e4937aSGao Xiang struct erofs_map_blocks map = { 215*47e4937aSGao Xiang .m_la = blknr_to_addr(current_block), 216*47e4937aSGao Xiang }; 217*47e4937aSGao Xiang erofs_blk_t blknr; 218*47e4937aSGao Xiang unsigned int blkoff; 219*47e4937aSGao Xiang 220*47e4937aSGao Xiang err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); 221*47e4937aSGao Xiang if (unlikely(err)) 222*47e4937aSGao Xiang goto err_out; 223*47e4937aSGao Xiang 224*47e4937aSGao Xiang /* zero out the holed page */ 225*47e4937aSGao Xiang if (unlikely(!(map.m_flags & EROFS_MAP_MAPPED))) { 226*47e4937aSGao Xiang zero_user_segment(page, 0, PAGE_SIZE); 227*47e4937aSGao Xiang SetPageUptodate(page); 228*47e4937aSGao Xiang 229*47e4937aSGao Xiang /* imply err = 0, see erofs_map_blocks */ 230*47e4937aSGao Xiang goto has_updated; 231*47e4937aSGao Xiang } 232*47e4937aSGao Xiang 233*47e4937aSGao Xiang /* for RAW access mode, m_plen must be equal to m_llen */ 234*47e4937aSGao Xiang DBG_BUGON(map.m_plen != map.m_llen); 235*47e4937aSGao Xiang 236*47e4937aSGao Xiang blknr = erofs_blknr(map.m_pa); 237*47e4937aSGao Xiang blkoff = erofs_blkoff(map.m_pa); 238*47e4937aSGao Xiang 239*47e4937aSGao Xiang /* deal with inline page */ 240*47e4937aSGao Xiang if (map.m_flags & EROFS_MAP_META) { 241*47e4937aSGao Xiang void *vsrc, *vto; 242*47e4937aSGao Xiang struct page *ipage; 243*47e4937aSGao Xiang 244*47e4937aSGao Xiang DBG_BUGON(map.m_plen > PAGE_SIZE); 245*47e4937aSGao Xiang 246*47e4937aSGao Xiang ipage = erofs_get_meta_page(inode->i_sb, blknr, 0); 247*47e4937aSGao Xiang 248*47e4937aSGao Xiang if (IS_ERR(ipage)) { 249*47e4937aSGao Xiang err = PTR_ERR(ipage); 250*47e4937aSGao Xiang goto err_out; 251*47e4937aSGao Xiang } 252*47e4937aSGao Xiang 253*47e4937aSGao Xiang vsrc = kmap_atomic(ipage); 254*47e4937aSGao Xiang vto = kmap_atomic(page); 255*47e4937aSGao Xiang memcpy(vto, vsrc + blkoff, map.m_plen); 256*47e4937aSGao Xiang memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen); 257*47e4937aSGao Xiang kunmap_atomic(vto); 258*47e4937aSGao Xiang kunmap_atomic(vsrc); 259*47e4937aSGao Xiang flush_dcache_page(page); 260*47e4937aSGao Xiang 261*47e4937aSGao Xiang SetPageUptodate(page); 262*47e4937aSGao Xiang /* TODO: could we unlock the page earlier? */ 263*47e4937aSGao Xiang unlock_page(ipage); 264*47e4937aSGao Xiang put_page(ipage); 265*47e4937aSGao Xiang 266*47e4937aSGao Xiang /* imply err = 0, see erofs_map_blocks */ 267*47e4937aSGao Xiang goto has_updated; 268*47e4937aSGao Xiang } 269*47e4937aSGao Xiang 270*47e4937aSGao Xiang /* pa must be block-aligned for raw reading */ 271*47e4937aSGao Xiang DBG_BUGON(erofs_blkoff(map.m_pa)); 272*47e4937aSGao Xiang 273*47e4937aSGao Xiang /* max # of continuous pages */ 274*47e4937aSGao Xiang if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE)) 275*47e4937aSGao Xiang nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE); 276*47e4937aSGao Xiang if (nblocks > BIO_MAX_PAGES) 277*47e4937aSGao Xiang nblocks = BIO_MAX_PAGES; 278*47e4937aSGao Xiang 279*47e4937aSGao Xiang bio = erofs_grab_bio(sb, blknr, nblocks, sb, 280*47e4937aSGao Xiang read_endio, false); 281*47e4937aSGao Xiang if (IS_ERR(bio)) { 282*47e4937aSGao Xiang err = PTR_ERR(bio); 283*47e4937aSGao Xiang bio = NULL; 284*47e4937aSGao Xiang goto err_out; 285*47e4937aSGao Xiang } 286*47e4937aSGao Xiang } 287*47e4937aSGao Xiang 288*47e4937aSGao Xiang err = bio_add_page(bio, page, PAGE_SIZE, 0); 289*47e4937aSGao Xiang /* out of the extent or bio is full */ 290*47e4937aSGao Xiang if (err < PAGE_SIZE) 291*47e4937aSGao Xiang goto submit_bio_retry; 292*47e4937aSGao Xiang 293*47e4937aSGao Xiang *last_block = current_block; 294*47e4937aSGao Xiang 295*47e4937aSGao Xiang /* shift in advance in case of it followed by too many gaps */ 296*47e4937aSGao Xiang if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) { 297*47e4937aSGao Xiang /* err should reassign to 0 after submitting */ 298*47e4937aSGao Xiang err = 0; 299*47e4937aSGao Xiang goto submit_bio_out; 300*47e4937aSGao Xiang } 301*47e4937aSGao Xiang 302*47e4937aSGao Xiang return bio; 303*47e4937aSGao Xiang 304*47e4937aSGao Xiang err_out: 305*47e4937aSGao Xiang /* for sync reading, set page error immediately */ 306*47e4937aSGao Xiang if (!ra) { 307*47e4937aSGao Xiang SetPageError(page); 308*47e4937aSGao Xiang ClearPageUptodate(page); 309*47e4937aSGao Xiang } 310*47e4937aSGao Xiang has_updated: 311*47e4937aSGao Xiang unlock_page(page); 312*47e4937aSGao Xiang 313*47e4937aSGao Xiang /* if updated manually, continuous pages has a gap */ 314*47e4937aSGao Xiang if (bio) 315*47e4937aSGao Xiang submit_bio_out: 316*47e4937aSGao Xiang __submit_bio(bio, REQ_OP_READ, 0); 317*47e4937aSGao Xiang 318*47e4937aSGao Xiang return unlikely(err) ? ERR_PTR(err) : NULL; 319*47e4937aSGao Xiang } 320*47e4937aSGao Xiang 321*47e4937aSGao Xiang /* 322*47e4937aSGao Xiang * since we dont have write or truncate flows, so no inode 323*47e4937aSGao Xiang * locking needs to be held at the moment. 324*47e4937aSGao Xiang */ 325*47e4937aSGao Xiang static int erofs_raw_access_readpage(struct file *file, struct page *page) 326*47e4937aSGao Xiang { 327*47e4937aSGao Xiang erofs_off_t last_block; 328*47e4937aSGao Xiang struct bio *bio; 329*47e4937aSGao Xiang 330*47e4937aSGao Xiang trace_erofs_readpage(page, true); 331*47e4937aSGao Xiang 332*47e4937aSGao Xiang bio = erofs_read_raw_page(NULL, page->mapping, 333*47e4937aSGao Xiang page, &last_block, 1, false); 334*47e4937aSGao Xiang 335*47e4937aSGao Xiang if (IS_ERR(bio)) 336*47e4937aSGao Xiang return PTR_ERR(bio); 337*47e4937aSGao Xiang 338*47e4937aSGao Xiang DBG_BUGON(bio); /* since we have only one bio -- must be NULL */ 339*47e4937aSGao Xiang return 0; 340*47e4937aSGao Xiang } 341*47e4937aSGao Xiang 342*47e4937aSGao Xiang static int erofs_raw_access_readpages(struct file *filp, 343*47e4937aSGao Xiang struct address_space *mapping, 344*47e4937aSGao Xiang struct list_head *pages, 345*47e4937aSGao Xiang unsigned int nr_pages) 346*47e4937aSGao Xiang { 347*47e4937aSGao Xiang erofs_off_t last_block; 348*47e4937aSGao Xiang struct bio *bio = NULL; 349*47e4937aSGao Xiang gfp_t gfp = readahead_gfp_mask(mapping); 350*47e4937aSGao Xiang struct page *page = list_last_entry(pages, struct page, lru); 351*47e4937aSGao Xiang 352*47e4937aSGao Xiang trace_erofs_readpages(mapping->host, page, nr_pages, true); 353*47e4937aSGao Xiang 354*47e4937aSGao Xiang for (; nr_pages; --nr_pages) { 355*47e4937aSGao Xiang page = list_entry(pages->prev, struct page, lru); 356*47e4937aSGao Xiang 357*47e4937aSGao Xiang prefetchw(&page->flags); 358*47e4937aSGao Xiang list_del(&page->lru); 359*47e4937aSGao Xiang 360*47e4937aSGao Xiang if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) { 361*47e4937aSGao Xiang bio = erofs_read_raw_page(bio, mapping, page, 362*47e4937aSGao Xiang &last_block, nr_pages, true); 363*47e4937aSGao Xiang 364*47e4937aSGao Xiang /* all the page errors are ignored when readahead */ 365*47e4937aSGao Xiang if (IS_ERR(bio)) { 366*47e4937aSGao Xiang pr_err("%s, readahead error at page %lu of nid %llu\n", 367*47e4937aSGao Xiang __func__, page->index, 368*47e4937aSGao Xiang EROFS_V(mapping->host)->nid); 369*47e4937aSGao Xiang 370*47e4937aSGao Xiang bio = NULL; 371*47e4937aSGao Xiang } 372*47e4937aSGao Xiang } 373*47e4937aSGao Xiang 374*47e4937aSGao Xiang /* pages could still be locked */ 375*47e4937aSGao Xiang put_page(page); 376*47e4937aSGao Xiang } 377*47e4937aSGao Xiang DBG_BUGON(!list_empty(pages)); 378*47e4937aSGao Xiang 379*47e4937aSGao Xiang /* the rare case (end in gaps) */ 380*47e4937aSGao Xiang if (unlikely(bio)) 381*47e4937aSGao Xiang __submit_bio(bio, REQ_OP_READ, 0); 382*47e4937aSGao Xiang return 0; 383*47e4937aSGao Xiang } 384*47e4937aSGao Xiang 385*47e4937aSGao Xiang static int erofs_get_block(struct inode *inode, sector_t iblock, 386*47e4937aSGao Xiang struct buffer_head *bh, int create) 387*47e4937aSGao Xiang { 388*47e4937aSGao Xiang struct erofs_map_blocks map = { 389*47e4937aSGao Xiang .m_la = iblock << 9, 390*47e4937aSGao Xiang }; 391*47e4937aSGao Xiang int err; 392*47e4937aSGao Xiang 393*47e4937aSGao Xiang err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); 394*47e4937aSGao Xiang if (err) 395*47e4937aSGao Xiang return err; 396*47e4937aSGao Xiang 397*47e4937aSGao Xiang if (map.m_flags & EROFS_MAP_MAPPED) 398*47e4937aSGao Xiang bh->b_blocknr = erofs_blknr(map.m_pa); 399*47e4937aSGao Xiang 400*47e4937aSGao Xiang return err; 401*47e4937aSGao Xiang } 402*47e4937aSGao Xiang 403*47e4937aSGao Xiang static sector_t erofs_bmap(struct address_space *mapping, sector_t block) 404*47e4937aSGao Xiang { 405*47e4937aSGao Xiang struct inode *inode = mapping->host; 406*47e4937aSGao Xiang 407*47e4937aSGao Xiang if (is_inode_flat_inline(inode)) { 408*47e4937aSGao Xiang erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE; 409*47e4937aSGao Xiang 410*47e4937aSGao Xiang if (block >> LOG_SECTORS_PER_BLOCK >= blks) 411*47e4937aSGao Xiang return 0; 412*47e4937aSGao Xiang } 413*47e4937aSGao Xiang 414*47e4937aSGao Xiang return generic_block_bmap(mapping, block, erofs_get_block); 415*47e4937aSGao Xiang } 416*47e4937aSGao Xiang 417*47e4937aSGao Xiang /* for uncompressed (aligned) files and raw access for other files */ 418*47e4937aSGao Xiang const struct address_space_operations erofs_raw_access_aops = { 419*47e4937aSGao Xiang .readpage = erofs_raw_access_readpage, 420*47e4937aSGao Xiang .readpages = erofs_raw_access_readpages, 421*47e4937aSGao Xiang .bmap = erofs_bmap, 422*47e4937aSGao Xiang }; 423*47e4937aSGao Xiang 424