147e4937aSGao Xiang // SPDX-License-Identifier: GPL-2.0-only 247e4937aSGao Xiang /* 347e4937aSGao Xiang * Copyright (C) 2018 HUAWEI, Inc. 4592e7cd0SAlexander A. Klimov * https://www.huawei.com/ 506a304cdSGao Xiang * Copyright (C) 2022 Alibaba Cloud 647e4937aSGao Xiang */ 747e4937aSGao Xiang #include "zdata.h" 847e4937aSGao Xiang #include "compress.h" 947e4937aSGao Xiang #include <linux/prefetch.h> 1099486c51SChristoph Hellwig #include <linux/psi.h> 1147e4937aSGao Xiang 1247e4937aSGao Xiang #include <trace/events/erofs.h> 1347e4937aSGao Xiang 1447e4937aSGao Xiang /* 159f6cc76eSGao Xiang * since pclustersize is variable for big pcluster feature, introduce slab 169f6cc76eSGao Xiang * pools implementation for different pcluster sizes. 179f6cc76eSGao Xiang */ 189f6cc76eSGao Xiang struct z_erofs_pcluster_slab { 199f6cc76eSGao Xiang struct kmem_cache *slab; 209f6cc76eSGao Xiang unsigned int maxpages; 219f6cc76eSGao Xiang char name[48]; 229f6cc76eSGao Xiang }; 239f6cc76eSGao Xiang 249f6cc76eSGao Xiang #define _PCLP(n) { .maxpages = n } 259f6cc76eSGao Xiang 269f6cc76eSGao Xiang static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = { 279f6cc76eSGao Xiang _PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128), 289f6cc76eSGao Xiang _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES) 299f6cc76eSGao Xiang }; 309f6cc76eSGao Xiang 3106a304cdSGao Xiang struct z_erofs_bvec_iter { 3206a304cdSGao Xiang struct page *bvpage; 3306a304cdSGao Xiang struct z_erofs_bvset *bvset; 3406a304cdSGao Xiang unsigned int nr, cur; 3506a304cdSGao Xiang }; 3606a304cdSGao Xiang 3706a304cdSGao Xiang static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter) 3806a304cdSGao Xiang { 3906a304cdSGao Xiang if (iter->bvpage) 4006a304cdSGao Xiang kunmap_local(iter->bvset); 4106a304cdSGao Xiang return iter->bvpage; 4206a304cdSGao Xiang } 4306a304cdSGao Xiang 4406a304cdSGao Xiang static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter) 4506a304cdSGao Xiang { 4606a304cdSGao Xiang unsigned long base = (unsigned long)((struct z_erofs_bvset *)0)->bvec; 4706a304cdSGao Xiang /* have to access nextpage in advance, otherwise it will be unmapped */ 4806a304cdSGao Xiang struct page *nextpage = iter->bvset->nextpage; 4906a304cdSGao Xiang struct page *oldpage; 5006a304cdSGao Xiang 5106a304cdSGao Xiang DBG_BUGON(!nextpage); 5206a304cdSGao Xiang oldpage = z_erofs_bvec_iter_end(iter); 5306a304cdSGao Xiang iter->bvpage = nextpage; 5406a304cdSGao Xiang iter->bvset = kmap_local_page(nextpage); 5506a304cdSGao Xiang iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec); 5606a304cdSGao Xiang iter->cur = 0; 5706a304cdSGao Xiang return oldpage; 5806a304cdSGao Xiang } 5906a304cdSGao Xiang 6006a304cdSGao Xiang static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter, 6106a304cdSGao Xiang struct z_erofs_bvset_inline *bvset, 6206a304cdSGao Xiang unsigned int bootstrap_nr, 6306a304cdSGao Xiang unsigned int cur) 6406a304cdSGao Xiang { 6506a304cdSGao Xiang *iter = (struct z_erofs_bvec_iter) { 6606a304cdSGao Xiang .nr = bootstrap_nr, 6706a304cdSGao Xiang .bvset = (struct z_erofs_bvset *)bvset, 6806a304cdSGao Xiang }; 6906a304cdSGao Xiang 7006a304cdSGao Xiang while (cur > iter->nr) { 7106a304cdSGao Xiang cur -= iter->nr; 7206a304cdSGao Xiang z_erofs_bvset_flip(iter); 7306a304cdSGao Xiang } 7406a304cdSGao Xiang iter->cur = cur; 7506a304cdSGao Xiang } 7606a304cdSGao Xiang 7706a304cdSGao Xiang static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter, 7806a304cdSGao Xiang struct z_erofs_bvec *bvec, 7906a304cdSGao Xiang struct page **candidate_bvpage) 8006a304cdSGao Xiang { 8106a304cdSGao Xiang if (iter->cur == iter->nr) { 8206a304cdSGao Xiang if (!*candidate_bvpage) 8306a304cdSGao Xiang return -EAGAIN; 8406a304cdSGao Xiang 8506a304cdSGao Xiang DBG_BUGON(iter->bvset->nextpage); 8606a304cdSGao Xiang iter->bvset->nextpage = *candidate_bvpage; 8706a304cdSGao Xiang z_erofs_bvset_flip(iter); 8806a304cdSGao Xiang 8906a304cdSGao Xiang iter->bvset->nextpage = NULL; 9006a304cdSGao Xiang *candidate_bvpage = NULL; 9106a304cdSGao Xiang } 9206a304cdSGao Xiang iter->bvset->bvec[iter->cur++] = *bvec; 9306a304cdSGao Xiang return 0; 9406a304cdSGao Xiang } 9506a304cdSGao Xiang 9606a304cdSGao Xiang static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter, 9706a304cdSGao Xiang struct z_erofs_bvec *bvec, 9806a304cdSGao Xiang struct page **old_bvpage) 9906a304cdSGao Xiang { 10006a304cdSGao Xiang if (iter->cur == iter->nr) 10106a304cdSGao Xiang *old_bvpage = z_erofs_bvset_flip(iter); 10206a304cdSGao Xiang else 10306a304cdSGao Xiang *old_bvpage = NULL; 10406a304cdSGao Xiang *bvec = iter->bvset->bvec[iter->cur++]; 10506a304cdSGao Xiang } 10606a304cdSGao Xiang 1079f6cc76eSGao Xiang static void z_erofs_destroy_pcluster_pool(void) 1089f6cc76eSGao Xiang { 1099f6cc76eSGao Xiang int i; 1109f6cc76eSGao Xiang 1119f6cc76eSGao Xiang for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { 1129f6cc76eSGao Xiang if (!pcluster_pool[i].slab) 1139f6cc76eSGao Xiang continue; 1149f6cc76eSGao Xiang kmem_cache_destroy(pcluster_pool[i].slab); 1159f6cc76eSGao Xiang pcluster_pool[i].slab = NULL; 1169f6cc76eSGao Xiang } 1179f6cc76eSGao Xiang } 1189f6cc76eSGao Xiang 1199f6cc76eSGao Xiang static int z_erofs_create_pcluster_pool(void) 1209f6cc76eSGao Xiang { 1219f6cc76eSGao Xiang struct z_erofs_pcluster_slab *pcs; 1229f6cc76eSGao Xiang struct z_erofs_pcluster *a; 1239f6cc76eSGao Xiang unsigned int size; 1249f6cc76eSGao Xiang 1259f6cc76eSGao Xiang for (pcs = pcluster_pool; 1269f6cc76eSGao Xiang pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) { 127ed722fbcSGao Xiang size = struct_size(a, compressed_bvecs, pcs->maxpages); 1289f6cc76eSGao Xiang 1299f6cc76eSGao Xiang sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages); 1309f6cc76eSGao Xiang pcs->slab = kmem_cache_create(pcs->name, size, 0, 1319f6cc76eSGao Xiang SLAB_RECLAIM_ACCOUNT, NULL); 1329f6cc76eSGao Xiang if (pcs->slab) 1339f6cc76eSGao Xiang continue; 1349f6cc76eSGao Xiang 1359f6cc76eSGao Xiang z_erofs_destroy_pcluster_pool(); 1369f6cc76eSGao Xiang return -ENOMEM; 1379f6cc76eSGao Xiang } 1389f6cc76eSGao Xiang return 0; 1399f6cc76eSGao Xiang } 1409f6cc76eSGao Xiang 1419f6cc76eSGao Xiang static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages) 1429f6cc76eSGao Xiang { 1439f6cc76eSGao Xiang int i; 1449f6cc76eSGao Xiang 1459f6cc76eSGao Xiang for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { 1469f6cc76eSGao Xiang struct z_erofs_pcluster_slab *pcs = pcluster_pool + i; 1479f6cc76eSGao Xiang struct z_erofs_pcluster *pcl; 1489f6cc76eSGao Xiang 1499f6cc76eSGao Xiang if (nrpages > pcs->maxpages) 1509f6cc76eSGao Xiang continue; 1519f6cc76eSGao Xiang 1529f6cc76eSGao Xiang pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS); 1539f6cc76eSGao Xiang if (!pcl) 1549f6cc76eSGao Xiang return ERR_PTR(-ENOMEM); 1559f6cc76eSGao Xiang pcl->pclusterpages = nrpages; 1569f6cc76eSGao Xiang return pcl; 1579f6cc76eSGao Xiang } 1589f6cc76eSGao Xiang return ERR_PTR(-EINVAL); 1599f6cc76eSGao Xiang } 1609f6cc76eSGao Xiang 1619f6cc76eSGao Xiang static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl) 1629f6cc76eSGao Xiang { 163cecf864dSYue Hu unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 1649f6cc76eSGao Xiang int i; 1659f6cc76eSGao Xiang 1669f6cc76eSGao Xiang for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { 1679f6cc76eSGao Xiang struct z_erofs_pcluster_slab *pcs = pcluster_pool + i; 1689f6cc76eSGao Xiang 169cecf864dSYue Hu if (pclusterpages > pcs->maxpages) 1709f6cc76eSGao Xiang continue; 1719f6cc76eSGao Xiang 1729f6cc76eSGao Xiang kmem_cache_free(pcs->slab, pcl); 1739f6cc76eSGao Xiang return; 1749f6cc76eSGao Xiang } 1759f6cc76eSGao Xiang DBG_BUGON(1); 1769f6cc76eSGao Xiang } 1779f6cc76eSGao Xiang 17847e4937aSGao Xiang /* 17947e4937aSGao Xiang * tagged pointer with 1-bit tag for all compressed pages 18047e4937aSGao Xiang * tag 0 - the page is just found with an extra page reference 18147e4937aSGao Xiang */ 18247e4937aSGao Xiang typedef tagptr1_t compressed_page_t; 18347e4937aSGao Xiang 18447e4937aSGao Xiang #define tag_compressed_page_justfound(page) \ 18547e4937aSGao Xiang tagptr_fold(compressed_page_t, page, 1) 18647e4937aSGao Xiang 18747e4937aSGao Xiang static struct workqueue_struct *z_erofs_workqueue __read_mostly; 18847e4937aSGao Xiang 18947e4937aSGao Xiang void z_erofs_exit_zip_subsystem(void) 19047e4937aSGao Xiang { 19147e4937aSGao Xiang destroy_workqueue(z_erofs_workqueue); 1929f6cc76eSGao Xiang z_erofs_destroy_pcluster_pool(); 19347e4937aSGao Xiang } 19447e4937aSGao Xiang 19599634bf3SGao Xiang static inline int z_erofs_init_workqueue(void) 19647e4937aSGao Xiang { 19747e4937aSGao Xiang const unsigned int onlinecpus = num_possible_cpus(); 19847e4937aSGao Xiang 19947e4937aSGao Xiang /* 20047e4937aSGao Xiang * no need to spawn too many threads, limiting threads could minimum 20147e4937aSGao Xiang * scheduling overhead, perhaps per-CPU threads should be better? 20247e4937aSGao Xiang */ 2030e62ea33SGao Xiang z_erofs_workqueue = alloc_workqueue("erofs_unzipd", 2040e62ea33SGao Xiang WQ_UNBOUND | WQ_HIGHPRI, 20547e4937aSGao Xiang onlinecpus + onlinecpus / 4); 20647e4937aSGao Xiang return z_erofs_workqueue ? 0 : -ENOMEM; 20747e4937aSGao Xiang } 20847e4937aSGao Xiang 20947e4937aSGao Xiang int __init z_erofs_init_zip_subsystem(void) 21047e4937aSGao Xiang { 2119f6cc76eSGao Xiang int err = z_erofs_create_pcluster_pool(); 21247e4937aSGao Xiang 2139f6cc76eSGao Xiang if (err) 2149f6cc76eSGao Xiang return err; 2159f6cc76eSGao Xiang err = z_erofs_init_workqueue(); 2169f6cc76eSGao Xiang if (err) 2179f6cc76eSGao Xiang z_erofs_destroy_pcluster_pool(); 2189f6cc76eSGao Xiang return err; 21947e4937aSGao Xiang } 22047e4937aSGao Xiang 221db166fc2SGao Xiang enum z_erofs_pclustermode { 222db166fc2SGao Xiang Z_EROFS_PCLUSTER_INFLIGHT, 22347e4937aSGao Xiang /* 224db166fc2SGao Xiang * The current pclusters was the tail of an exist chain, in addition 225db166fc2SGao Xiang * that the previous processed chained pclusters are all decided to 22647e4937aSGao Xiang * be hooked up to it. 227db166fc2SGao Xiang * A new chain will be created for the remaining pclusters which are 228db166fc2SGao Xiang * not processed yet, so different from Z_EROFS_PCLUSTER_FOLLOWED, 229db166fc2SGao Xiang * the next pcluster cannot reuse the whole page safely for inplace I/O 230db166fc2SGao Xiang * in the following scenario: 23147e4937aSGao Xiang * ________________________________________________________________ 23247e4937aSGao Xiang * | tail (partial) page | head (partial) page | 233db166fc2SGao Xiang * | (belongs to the next pcl) | (belongs to the current pcl) | 234db166fc2SGao Xiang * |_______PCLUSTER_FOLLOWED______|________PCLUSTER_HOOKED__________| 23547e4937aSGao Xiang */ 236db166fc2SGao Xiang Z_EROFS_PCLUSTER_HOOKED, 2370b964600SGao Xiang /* 238db166fc2SGao Xiang * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it 2390b964600SGao Xiang * could be dispatched into bypass queue later due to uptodated managed 2400b964600SGao Xiang * pages. All related online pages cannot be reused for inplace I/O (or 241387bab87SGao Xiang * bvpage) since it can be directly decoded without I/O submission. 2420b964600SGao Xiang */ 243db166fc2SGao Xiang Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE, 24447e4937aSGao Xiang /* 24547e4937aSGao Xiang * The current collection has been linked with the owned chain, and 24647e4937aSGao Xiang * could also be linked with the remaining collections, which means 24747e4937aSGao Xiang * if the processing page is the tail page of the collection, thus 24847e4937aSGao Xiang * the current collection can safely use the whole page (since 24947e4937aSGao Xiang * the previous collection is under control) for in-place I/O, as 25047e4937aSGao Xiang * illustrated below: 25147e4937aSGao Xiang * ________________________________________________________________ 25247e4937aSGao Xiang * | tail (partial) page | head (partial) page | 25347e4937aSGao Xiang * | (of the current cl) | (of the previous collection) | 254db166fc2SGao Xiang * | PCLUSTER_FOLLOWED or | | 255db166fc2SGao Xiang * |_____PCLUSTER_HOOKED__|___________PCLUSTER_FOLLOWED____________| 25647e4937aSGao Xiang * 25747e4937aSGao Xiang * [ (*) the above page can be used as inplace I/O. ] 25847e4937aSGao Xiang */ 259db166fc2SGao Xiang Z_EROFS_PCLUSTER_FOLLOWED, 26047e4937aSGao Xiang }; 26147e4937aSGao Xiang 2625c6dcc57SGao Xiang struct z_erofs_decompress_frontend { 2635c6dcc57SGao Xiang struct inode *const inode; 2645c6dcc57SGao Xiang struct erofs_map_blocks map; 26506a304cdSGao Xiang struct z_erofs_bvec_iter biter; 26647e4937aSGao Xiang 26706a304cdSGao Xiang struct page *candidate_bvpage; 26847e4937aSGao Xiang struct z_erofs_pcluster *pcl, *tailpcl; 26947e4937aSGao Xiang z_erofs_next_pcluster_t owned_head; 270db166fc2SGao Xiang enum z_erofs_pclustermode mode; 27147e4937aSGao Xiang 2726ea5aad3SGao Xiang bool readahead; 27347e4937aSGao Xiang /* used for applying cache strategy on the fly */ 27447e4937aSGao Xiang bool backmost; 27547e4937aSGao Xiang erofs_off_t headoffset; 276ed722fbcSGao Xiang 277ed722fbcSGao Xiang /* a pointer used to pick up inplace I/O pages */ 278ed722fbcSGao Xiang unsigned int icur; 27947e4937aSGao Xiang }; 28047e4937aSGao Xiang 28147e4937aSGao Xiang #define DECOMPRESS_FRONTEND_INIT(__i) { \ 2825c6dcc57SGao Xiang .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \ 283db166fc2SGao Xiang .mode = Z_EROFS_PCLUSTER_FOLLOWED, .backmost = true } 28447e4937aSGao Xiang 2851282dea3SGao Xiang static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe) 2861282dea3SGao Xiang { 2871282dea3SGao Xiang unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy; 2881282dea3SGao Xiang 2891282dea3SGao Xiang if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED) 2901282dea3SGao Xiang return false; 2911282dea3SGao Xiang 2921282dea3SGao Xiang if (fe->backmost) 2931282dea3SGao Xiang return true; 2941282dea3SGao Xiang 2951282dea3SGao Xiang if (cachestrategy >= EROFS_ZIP_CACHE_READAROUND && 2961282dea3SGao Xiang fe->map.m_la < fe->headoffset) 2971282dea3SGao Xiang return true; 2981282dea3SGao Xiang 2991282dea3SGao Xiang return false; 3001282dea3SGao Xiang } 3011282dea3SGao Xiang 3026f39d1e1SGao Xiang static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe, 303eaa9172aSGao Xiang struct page **pagepool) 30447e4937aSGao Xiang { 3056f39d1e1SGao Xiang struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode)); 3065c6dcc57SGao Xiang struct z_erofs_pcluster *pcl = fe->pcl; 3071282dea3SGao Xiang bool shouldalloc = z_erofs_should_alloc_cache(fe); 30847e4937aSGao Xiang bool standalone = true; 3096f39d1e1SGao Xiang /* 3106f39d1e1SGao Xiang * optimistic allocation without direct reclaim since inplace I/O 3116f39d1e1SGao Xiang * can be used if low memory otherwise. 3126f39d1e1SGao Xiang */ 3131825c8d7SGao Xiang gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) | 3141825c8d7SGao Xiang __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; 315ed722fbcSGao Xiang unsigned int i; 31647e4937aSGao Xiang 317db166fc2SGao Xiang if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED) 31847e4937aSGao Xiang return; 31947e4937aSGao Xiang 320ed722fbcSGao Xiang for (i = 0; i < pcl->pclusterpages; ++i) { 32147e4937aSGao Xiang struct page *page; 32247e4937aSGao Xiang compressed_page_t t; 3231825c8d7SGao Xiang struct page *newpage = NULL; 32447e4937aSGao Xiang 32547e4937aSGao Xiang /* the compressed page was loaded before */ 326ed722fbcSGao Xiang if (READ_ONCE(pcl->compressed_bvecs[i].page)) 32747e4937aSGao Xiang continue; 32847e4937aSGao Xiang 329ed722fbcSGao Xiang page = find_get_page(mc, pcl->obj.index + i); 33047e4937aSGao Xiang 33147e4937aSGao Xiang if (page) { 33247e4937aSGao Xiang t = tag_compressed_page_justfound(page); 3330b964600SGao Xiang } else { 3340b964600SGao Xiang /* I/O is needed, no possible to decompress directly */ 3350b964600SGao Xiang standalone = false; 3361282dea3SGao Xiang if (!shouldalloc) 3371282dea3SGao Xiang continue; 3381282dea3SGao Xiang 3391282dea3SGao Xiang /* 3401282dea3SGao Xiang * try to use cached I/O if page allocation 3411282dea3SGao Xiang * succeeds or fallback to in-place I/O instead 3421282dea3SGao Xiang * to avoid any direct reclaim. 3431282dea3SGao Xiang */ 3441825c8d7SGao Xiang newpage = erofs_allocpage(pagepool, gfp); 3451825c8d7SGao Xiang if (!newpage) 34647e4937aSGao Xiang continue; 3471282dea3SGao Xiang set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE); 3480b964600SGao Xiang t = tag_compressed_page_justfound(newpage); 34947e4937aSGao Xiang } 35047e4937aSGao Xiang 351ed722fbcSGao Xiang if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL, 352ed722fbcSGao Xiang tagptr_cast_ptr(t))) 35347e4937aSGao Xiang continue; 35447e4937aSGao Xiang 355eaa9172aSGao Xiang if (page) 35647e4937aSGao Xiang put_page(page); 357eaa9172aSGao Xiang else if (newpage) 358eaa9172aSGao Xiang erofs_pagepool_add(pagepool, newpage); 35947e4937aSGao Xiang } 36047e4937aSGao Xiang 3610b964600SGao Xiang /* 3620b964600SGao Xiang * don't do inplace I/O if all compressed pages are available in 3630b964600SGao Xiang * managed cache since it can be moved to the bypass queue instead. 3640b964600SGao Xiang */ 3650b964600SGao Xiang if (standalone) 366db166fc2SGao Xiang fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; 36747e4937aSGao Xiang } 36847e4937aSGao Xiang 36947e4937aSGao Xiang /* called by erofs_shrinker to get rid of all compressed_pages */ 37047e4937aSGao Xiang int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, 37147e4937aSGao Xiang struct erofs_workgroup *grp) 37247e4937aSGao Xiang { 37347e4937aSGao Xiang struct z_erofs_pcluster *const pcl = 37447e4937aSGao Xiang container_of(grp, struct z_erofs_pcluster, obj); 37547e4937aSGao Xiang int i; 37647e4937aSGao Xiang 377cecf864dSYue Hu DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); 37847e4937aSGao Xiang /* 37947e4937aSGao Xiang * refcount of workgroup is now freezed as 1, 38047e4937aSGao Xiang * therefore no need to worry about available decompression users. 38147e4937aSGao Xiang */ 3829f6cc76eSGao Xiang for (i = 0; i < pcl->pclusterpages; ++i) { 383ed722fbcSGao Xiang struct page *page = pcl->compressed_bvecs[i].page; 38447e4937aSGao Xiang 38547e4937aSGao Xiang if (!page) 38647e4937aSGao Xiang continue; 38747e4937aSGao Xiang 38847e4937aSGao Xiang /* block other users from reclaiming or migrating the page */ 38947e4937aSGao Xiang if (!trylock_page(page)) 39047e4937aSGao Xiang return -EBUSY; 39147e4937aSGao Xiang 392f4d4e5fcSYue Hu if (!erofs_page_is_managed(sbi, page)) 39347e4937aSGao Xiang continue; 39447e4937aSGao Xiang 39547e4937aSGao Xiang /* barrier is implied in the following 'unlock_page' */ 396ed722fbcSGao Xiang WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); 3976aaa7b06SGao Xiang detach_page_private(page); 39847e4937aSGao Xiang unlock_page(page); 39947e4937aSGao Xiang } 40047e4937aSGao Xiang return 0; 40147e4937aSGao Xiang } 40247e4937aSGao Xiang 403d252ff3dSYue Hu int erofs_try_to_free_cached_page(struct page *page) 40447e4937aSGao Xiang { 40547e4937aSGao Xiang struct z_erofs_pcluster *const pcl = (void *)page_private(page); 406ed722fbcSGao Xiang int ret, i; 40747e4937aSGao Xiang 408ed722fbcSGao Xiang if (!erofs_workgroup_try_to_freeze(&pcl->obj, 1)) 409ed722fbcSGao Xiang return 0; 41047e4937aSGao Xiang 411ed722fbcSGao Xiang ret = 0; 412cecf864dSYue Hu DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); 4139f6cc76eSGao Xiang for (i = 0; i < pcl->pclusterpages; ++i) { 414ed722fbcSGao Xiang if (pcl->compressed_bvecs[i].page == page) { 415ed722fbcSGao Xiang WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); 41647e4937aSGao Xiang ret = 1; 41747e4937aSGao Xiang break; 41847e4937aSGao Xiang } 41947e4937aSGao Xiang } 42047e4937aSGao Xiang erofs_workgroup_unfreeze(&pcl->obj, 1); 4216aaa7b06SGao Xiang if (ret) 4226aaa7b06SGao Xiang detach_page_private(page); 42347e4937aSGao Xiang return ret; 42447e4937aSGao Xiang } 42547e4937aSGao Xiang 4265c6dcc57SGao Xiang static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe, 427ed722fbcSGao Xiang struct z_erofs_bvec *bvec) 42847e4937aSGao Xiang { 4295c6dcc57SGao Xiang struct z_erofs_pcluster *const pcl = fe->pcl; 43047e4937aSGao Xiang 431ed722fbcSGao Xiang while (fe->icur > 0) { 432ed722fbcSGao Xiang if (!cmpxchg(&pcl->compressed_bvecs[--fe->icur].page, 433ed722fbcSGao Xiang NULL, bvec->page)) { 434ed722fbcSGao Xiang pcl->compressed_bvecs[fe->icur] = *bvec; 43547e4937aSGao Xiang return true; 436ed722fbcSGao Xiang } 437ed722fbcSGao Xiang } 43847e4937aSGao Xiang return false; 43947e4937aSGao Xiang } 44047e4937aSGao Xiang 44187ca34a7SGao Xiang /* callers must be with pcluster lock held */ 4425c6dcc57SGao Xiang static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe, 4435b220b20SGao Xiang struct z_erofs_bvec *bvec, bool exclusive) 44447e4937aSGao Xiang { 44547e4937aSGao Xiang int ret; 44647e4937aSGao Xiang 447db166fc2SGao Xiang if (exclusive) { 44806a304cdSGao Xiang /* give priority for inplaceio to use file pages first */ 449ed722fbcSGao Xiang if (z_erofs_try_inplace_io(fe, bvec)) 45047e4937aSGao Xiang return 0; 45106a304cdSGao Xiang /* otherwise, check if it can be used as a bvpage */ 452db166fc2SGao Xiang if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED && 45306a304cdSGao Xiang !fe->candidate_bvpage) 45406a304cdSGao Xiang fe->candidate_bvpage = bvec->page; 45506a304cdSGao Xiang } 45606a304cdSGao Xiang ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage); 45706a304cdSGao Xiang fe->pcl->vcnt += (ret >= 0); 45806a304cdSGao Xiang return ret; 45947e4937aSGao Xiang } 46047e4937aSGao Xiang 4615c6dcc57SGao Xiang static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f) 46247e4937aSGao Xiang { 4635c6dcc57SGao Xiang struct z_erofs_pcluster *pcl = f->pcl; 4645c6dcc57SGao Xiang z_erofs_next_pcluster_t *owned_head = &f->owned_head; 46547e4937aSGao Xiang 466473e15b0SGao Xiang /* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */ 467473e15b0SGao Xiang if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL, 468473e15b0SGao Xiang *owned_head) == Z_EROFS_PCLUSTER_NIL) { 46947e4937aSGao Xiang *owned_head = &pcl->next; 470473e15b0SGao Xiang /* so we can attach this pcluster to our submission chain. */ 471db166fc2SGao Xiang f->mode = Z_EROFS_PCLUSTER_FOLLOWED; 472473e15b0SGao Xiang return; 473473e15b0SGao Xiang } 474473e15b0SGao Xiang 47547e4937aSGao Xiang /* 476473e15b0SGao Xiang * type 2, link to the end of an existing open chain, be careful 477473e15b0SGao Xiang * that its submission is controlled by the original attached chain. 47847e4937aSGao Xiang */ 479267f2492SGao Xiang if (*owned_head != &pcl->next && pcl != f->tailpcl && 480267f2492SGao Xiang cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, 481473e15b0SGao Xiang *owned_head) == Z_EROFS_PCLUSTER_TAIL) { 48247e4937aSGao Xiang *owned_head = Z_EROFS_PCLUSTER_TAIL; 483db166fc2SGao Xiang f->mode = Z_EROFS_PCLUSTER_HOOKED; 4845c6dcc57SGao Xiang f->tailpcl = NULL; 485473e15b0SGao Xiang return; 48647e4937aSGao Xiang } 487473e15b0SGao Xiang /* type 3, it belongs to a chain, but it isn't the end of the chain */ 488db166fc2SGao Xiang f->mode = Z_EROFS_PCLUSTER_INFLIGHT; 48947e4937aSGao Xiang } 49047e4937aSGao Xiang 49183a386c0SGao Xiang static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) 49247e4937aSGao Xiang { 49383a386c0SGao Xiang struct erofs_map_blocks *map = &fe->map; 494cecf864dSYue Hu bool ztailpacking = map->m_flags & EROFS_MAP_META; 49547e4937aSGao Xiang struct z_erofs_pcluster *pcl; 49664094a04SGao Xiang struct erofs_workgroup *grp; 49747e4937aSGao Xiang int err; 49847e4937aSGao Xiang 499*c42c0ffeSChen Zhongjin if (!(map->m_flags & EROFS_MAP_ENCODED) || 500*c42c0ffeSChen Zhongjin (!ztailpacking && !(map->m_pa >> PAGE_SHIFT))) { 5018f899262SGao Xiang DBG_BUGON(1); 5028f899262SGao Xiang return -EFSCORRUPTED; 5038f899262SGao Xiang } 5048f899262SGao Xiang 5059f6cc76eSGao Xiang /* no available pcluster, let's allocate one */ 506cecf864dSYue Hu pcl = z_erofs_alloc_pcluster(ztailpacking ? 1 : 507cecf864dSYue Hu map->m_plen >> PAGE_SHIFT); 5089f6cc76eSGao Xiang if (IS_ERR(pcl)) 5099f6cc76eSGao Xiang return PTR_ERR(pcl); 51047e4937aSGao Xiang 51164094a04SGao Xiang atomic_set(&pcl->obj.refcount, 1); 5128f899262SGao Xiang pcl->algorithmformat = map->m_algorithmformat; 5132bfab9c0SGao Xiang pcl->length = 0; 5142bfab9c0SGao Xiang pcl->partial = true; 51547e4937aSGao Xiang 51647e4937aSGao Xiang /* new pclusters should be claimed as type 1, primary and followed */ 5175c6dcc57SGao Xiang pcl->next = fe->owned_head; 51887ca34a7SGao Xiang pcl->pageofs_out = map->m_la & ~PAGE_MASK; 519db166fc2SGao Xiang fe->mode = Z_EROFS_PCLUSTER_FOLLOWED; 52047e4937aSGao Xiang 52147e4937aSGao Xiang /* 52247e4937aSGao Xiang * lock all primary followed works before visible to others 52347e4937aSGao Xiang * and mutex_trylock *never* fails for a new pcluster. 52447e4937aSGao Xiang */ 52587ca34a7SGao Xiang mutex_init(&pcl->lock); 52687ca34a7SGao Xiang DBG_BUGON(!mutex_trylock(&pcl->lock)); 52747e4937aSGao Xiang 528cecf864dSYue Hu if (ztailpacking) { 529cecf864dSYue Hu pcl->obj.index = 0; /* which indicates ztailpacking */ 530cecf864dSYue Hu pcl->pageofs_in = erofs_blkoff(map->m_pa); 531cecf864dSYue Hu pcl->tailpacking_size = map->m_plen; 532cecf864dSYue Hu } else { 533cecf864dSYue Hu pcl->obj.index = map->m_pa >> PAGE_SHIFT; 534cecf864dSYue Hu 53583a386c0SGao Xiang grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj); 53664094a04SGao Xiang if (IS_ERR(grp)) { 53764094a04SGao Xiang err = PTR_ERR(grp); 53864094a04SGao Xiang goto err_out; 53964094a04SGao Xiang } 54064094a04SGao Xiang 54164094a04SGao Xiang if (grp != &pcl->obj) { 5425c6dcc57SGao Xiang fe->pcl = container_of(grp, 543cecf864dSYue Hu struct z_erofs_pcluster, obj); 54464094a04SGao Xiang err = -EEXIST; 54564094a04SGao Xiang goto err_out; 54647e4937aSGao Xiang } 547cecf864dSYue Hu } 54847e4937aSGao Xiang /* used to check tail merging loop due to corrupted images */ 5495c6dcc57SGao Xiang if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL) 5505c6dcc57SGao Xiang fe->tailpcl = pcl; 5515c6dcc57SGao Xiang fe->owned_head = &pcl->next; 5525c6dcc57SGao Xiang fe->pcl = pcl; 5539e579fc1SGao Xiang return 0; 55464094a04SGao Xiang 55564094a04SGao Xiang err_out: 55687ca34a7SGao Xiang mutex_unlock(&pcl->lock); 5579f6cc76eSGao Xiang z_erofs_free_pcluster(pcl); 55864094a04SGao Xiang return err; 55947e4937aSGao Xiang } 56047e4937aSGao Xiang 56183a386c0SGao Xiang static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe) 56247e4937aSGao Xiang { 56383a386c0SGao Xiang struct erofs_map_blocks *map = &fe->map; 5640d823b42SGao Xiang struct erofs_workgroup *grp = NULL; 5659e579fc1SGao Xiang int ret; 56647e4937aSGao Xiang 56787ca34a7SGao Xiang DBG_BUGON(fe->pcl); 56847e4937aSGao Xiang 56987ca34a7SGao Xiang /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */ 5705c6dcc57SGao Xiang DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL); 5715c6dcc57SGao Xiang DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); 57247e4937aSGao Xiang 5730d823b42SGao Xiang if (!(map->m_flags & EROFS_MAP_META)) { 5740d823b42SGao Xiang grp = erofs_find_workgroup(fe->inode->i_sb, 5750d823b42SGao Xiang map->m_pa >> PAGE_SHIFT); 5760d823b42SGao Xiang } else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) { 57747e4937aSGao Xiang DBG_BUGON(1); 578cecf864dSYue Hu return -EFSCORRUPTED; 579cecf864dSYue Hu } 58047e4937aSGao Xiang 58164094a04SGao Xiang if (grp) { 5825c6dcc57SGao Xiang fe->pcl = container_of(grp, struct z_erofs_pcluster, obj); 5830d823b42SGao Xiang ret = -EEXIST; 58464094a04SGao Xiang } else { 58583a386c0SGao Xiang ret = z_erofs_register_pcluster(fe); 58664094a04SGao Xiang } 58747e4937aSGao Xiang 5880d823b42SGao Xiang if (ret == -EEXIST) { 589267f2492SGao Xiang mutex_lock(&fe->pcl->lock); 590267f2492SGao Xiang /* used to check tail merging loop due to corrupted images */ 591267f2492SGao Xiang if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL) 592267f2492SGao Xiang fe->tailpcl = fe->pcl; 593267f2492SGao Xiang 594267f2492SGao Xiang z_erofs_try_to_claim_pcluster(fe); 5950d823b42SGao Xiang } else if (ret) { 5960d823b42SGao Xiang return ret; 5970d823b42SGao Xiang } 59806a304cdSGao Xiang z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset, 599387bab87SGao Xiang Z_EROFS_INLINE_BVECS, fe->pcl->vcnt); 60081382f5fSGao Xiang /* since file-backed online pages are traversed in reverse order */ 601ed722fbcSGao Xiang fe->icur = z_erofs_pclusterpages(fe->pcl); 60247e4937aSGao Xiang return 0; 60347e4937aSGao Xiang } 60447e4937aSGao Xiang 60547e4937aSGao Xiang /* 60647e4937aSGao Xiang * keep in mind that no referenced pclusters will be freed 60747e4937aSGao Xiang * only after a RCU grace period. 60847e4937aSGao Xiang */ 60947e4937aSGao Xiang static void z_erofs_rcu_callback(struct rcu_head *head) 61047e4937aSGao Xiang { 61187ca34a7SGao Xiang z_erofs_free_pcluster(container_of(head, 61287ca34a7SGao Xiang struct z_erofs_pcluster, rcu)); 61347e4937aSGao Xiang } 61447e4937aSGao Xiang 61547e4937aSGao Xiang void erofs_workgroup_free_rcu(struct erofs_workgroup *grp) 61647e4937aSGao Xiang { 61747e4937aSGao Xiang struct z_erofs_pcluster *const pcl = 61847e4937aSGao Xiang container_of(grp, struct z_erofs_pcluster, obj); 61947e4937aSGao Xiang 62087ca34a7SGao Xiang call_rcu(&pcl->rcu, z_erofs_rcu_callback); 62147e4937aSGao Xiang } 62247e4937aSGao Xiang 6235c6dcc57SGao Xiang static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe) 62447e4937aSGao Xiang { 62587ca34a7SGao Xiang struct z_erofs_pcluster *pcl = fe->pcl; 62647e4937aSGao Xiang 62787ca34a7SGao Xiang if (!pcl) 62847e4937aSGao Xiang return false; 62947e4937aSGao Xiang 63006a304cdSGao Xiang z_erofs_bvec_iter_end(&fe->biter); 63187ca34a7SGao Xiang mutex_unlock(&pcl->lock); 63247e4937aSGao Xiang 63306a304cdSGao Xiang if (fe->candidate_bvpage) { 63406a304cdSGao Xiang DBG_BUGON(z_erofs_is_shortlived_page(fe->candidate_bvpage)); 63506a304cdSGao Xiang fe->candidate_bvpage = NULL; 63606a304cdSGao Xiang } 63706a304cdSGao Xiang 63847e4937aSGao Xiang /* 63947e4937aSGao Xiang * if all pending pages are added, don't hold its reference 64047e4937aSGao Xiang * any longer if the pcluster isn't hosted by ourselves. 64147e4937aSGao Xiang */ 642db166fc2SGao Xiang if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE) 64387ca34a7SGao Xiang erofs_workgroup_put(&pcl->obj); 64447e4937aSGao Xiang 64587ca34a7SGao Xiang fe->pcl = NULL; 64647e4937aSGao Xiang return true; 64747e4937aSGao Xiang } 64847e4937aSGao Xiang 649b15b2e30SYue Hu static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos, 650b15b2e30SYue Hu struct page *page, unsigned int pageofs, 651b15b2e30SYue Hu unsigned int len) 652b15b2e30SYue Hu { 653b15b2e30SYue Hu struct inode *packed_inode = EROFS_I_SB(inode)->packed_inode; 654b15b2e30SYue Hu struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 655b15b2e30SYue Hu u8 *src, *dst; 656b15b2e30SYue Hu unsigned int i, cnt; 657b15b2e30SYue Hu 658e5126de1SYue Hu if (!packed_inode) 659e5126de1SYue Hu return -EFSCORRUPTED; 660e5126de1SYue Hu 661b15b2e30SYue Hu pos += EROFS_I(inode)->z_fragmentoff; 662b15b2e30SYue Hu for (i = 0; i < len; i += cnt) { 663b15b2e30SYue Hu cnt = min_t(unsigned int, len - i, 664b15b2e30SYue Hu EROFS_BLKSIZ - erofs_blkoff(pos)); 665b15b2e30SYue Hu src = erofs_bread(&buf, packed_inode, 666b15b2e30SYue Hu erofs_blknr(pos), EROFS_KMAP); 667b15b2e30SYue Hu if (IS_ERR(src)) { 668b15b2e30SYue Hu erofs_put_metabuf(&buf); 669b15b2e30SYue Hu return PTR_ERR(src); 670b15b2e30SYue Hu } 671b15b2e30SYue Hu 672b15b2e30SYue Hu dst = kmap_local_page(page); 673b15b2e30SYue Hu memcpy(dst + pageofs + i, src + erofs_blkoff(pos), cnt); 674b15b2e30SYue Hu kunmap_local(dst); 675b15b2e30SYue Hu pos += cnt; 676b15b2e30SYue Hu } 677b15b2e30SYue Hu erofs_put_metabuf(&buf); 678b15b2e30SYue Hu return 0; 679b15b2e30SYue Hu } 680b15b2e30SYue Hu 68147e4937aSGao Xiang static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, 682eaa9172aSGao Xiang struct page *page, struct page **pagepool) 68347e4937aSGao Xiang { 68447e4937aSGao Xiang struct inode *const inode = fe->inode; 68547e4937aSGao Xiang struct erofs_map_blocks *const map = &fe->map; 68647e4937aSGao Xiang const loff_t offset = page_offset(page); 6875b220b20SGao Xiang bool tight = true, exclusive; 6882bfab9c0SGao Xiang unsigned int cur, end, spiltted; 68947e4937aSGao Xiang int err = 0; 69047e4937aSGao Xiang 69147e4937aSGao Xiang /* register locked file pages as online pages in pack */ 69247e4937aSGao Xiang z_erofs_onlinepage_init(page); 69347e4937aSGao Xiang 69447e4937aSGao Xiang spiltted = 0; 69547e4937aSGao Xiang end = PAGE_SIZE; 69647e4937aSGao Xiang repeat: 69747e4937aSGao Xiang cur = end - 1; 69847e4937aSGao Xiang 69939397a46SGao Xiang if (offset + cur < map->m_la || 70039397a46SGao Xiang offset + cur >= map->m_la + map->m_llen) { 70139397a46SGao Xiang erofs_dbg("out-of-range map @ pos %llu", offset + cur); 70247e4937aSGao Xiang 7035c6dcc57SGao Xiang if (z_erofs_collector_end(fe)) 70447e4937aSGao Xiang fe->backmost = false; 70547e4937aSGao Xiang map->m_la = offset + cur; 70647e4937aSGao Xiang map->m_llen = 0; 70747e4937aSGao Xiang err = z_erofs_map_blocks_iter(inode, map, 0); 7088d8a09b0SGao Xiang if (err) 70967148551SGao Xiang goto out; 71039397a46SGao Xiang } else { 71139397a46SGao Xiang if (fe->pcl) 71239397a46SGao Xiang goto hitted; 71339397a46SGao Xiang /* didn't get a valid pcluster previously (very rare) */ 71439397a46SGao Xiang } 71547e4937aSGao Xiang 716b15b2e30SYue Hu if (!(map->m_flags & EROFS_MAP_MAPPED) || 717b15b2e30SYue Hu map->m_flags & EROFS_MAP_FRAGMENT) 71847e4937aSGao Xiang goto hitted; 71947e4937aSGao Xiang 72083a386c0SGao Xiang err = z_erofs_collector_begin(fe); 7218d8a09b0SGao Xiang if (err) 72267148551SGao Xiang goto out; 72347e4937aSGao Xiang 7245c6dcc57SGao Xiang if (z_erofs_is_inline_pcluster(fe->pcl)) { 72509c54379SGao Xiang void *mp; 726cecf864dSYue Hu 72709c54379SGao Xiang mp = erofs_read_metabuf(&fe->map.buf, inode->i_sb, 72809c54379SGao Xiang erofs_blknr(map->m_pa), EROFS_NO_KMAP); 72909c54379SGao Xiang if (IS_ERR(mp)) { 73009c54379SGao Xiang err = PTR_ERR(mp); 731cecf864dSYue Hu erofs_err(inode->i_sb, 732cecf864dSYue Hu "failed to get inline page, err %d", err); 73367148551SGao Xiang goto out; 734cecf864dSYue Hu } 73509c54379SGao Xiang get_page(fe->map.buf.page); 736ed722fbcSGao Xiang WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, 737ed722fbcSGao Xiang fe->map.buf.page); 738db166fc2SGao Xiang fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; 739cecf864dSYue Hu } else { 7406f39d1e1SGao Xiang /* bind cache first when cached decompression is preferred */ 7411282dea3SGao Xiang z_erofs_bind_cache(fe, pagepool); 742cecf864dSYue Hu } 74347e4937aSGao Xiang hitted: 744dc76ea8cSGao Xiang /* 745dc76ea8cSGao Xiang * Ensure the current partial page belongs to this submit chain rather 746dc76ea8cSGao Xiang * than other concurrent submit chains or the noio(bypass) chain since 747dc76ea8cSGao Xiang * those chains are handled asynchronously thus the page cannot be used 748387bab87SGao Xiang * for inplace I/O or bvpage (should be processed in a strict order.) 749dc76ea8cSGao Xiang */ 750db166fc2SGao Xiang tight &= (fe->mode >= Z_EROFS_PCLUSTER_HOOKED && 751db166fc2SGao Xiang fe->mode != Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE); 752dc76ea8cSGao Xiang 75347e4937aSGao Xiang cur = end - min_t(unsigned int, offset + end - map->m_la, end); 7548d8a09b0SGao Xiang if (!(map->m_flags & EROFS_MAP_MAPPED)) { 75547e4937aSGao Xiang zero_user_segment(page, cur, end); 75647e4937aSGao Xiang goto next_part; 75747e4937aSGao Xiang } 758b15b2e30SYue Hu if (map->m_flags & EROFS_MAP_FRAGMENT) { 759b15b2e30SYue Hu unsigned int pageofs, skip, len; 760b15b2e30SYue Hu 761b15b2e30SYue Hu if (offset > map->m_la) { 762b15b2e30SYue Hu pageofs = 0; 763b15b2e30SYue Hu skip = offset - map->m_la; 764b15b2e30SYue Hu } else { 765b15b2e30SYue Hu pageofs = map->m_la & ~PAGE_MASK; 766b15b2e30SYue Hu skip = 0; 767b15b2e30SYue Hu } 768b15b2e30SYue Hu len = min_t(unsigned int, map->m_llen - skip, end - cur); 769b15b2e30SYue Hu err = z_erofs_read_fragment(inode, skip, page, pageofs, len); 770b15b2e30SYue Hu if (err) 771b15b2e30SYue Hu goto out; 772b15b2e30SYue Hu ++spiltted; 773b15b2e30SYue Hu tight = false; 774b15b2e30SYue Hu goto next_part; 775b15b2e30SYue Hu } 77647e4937aSGao Xiang 7775b220b20SGao Xiang exclusive = (!cur && (!spiltted || tight)); 77847e4937aSGao Xiang if (cur) 779db166fc2SGao Xiang tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED); 78047e4937aSGao Xiang 78147e4937aSGao Xiang retry: 78206a304cdSGao Xiang err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) { 78306a304cdSGao Xiang .page = page, 78406a304cdSGao Xiang .offset = offset - map->m_la, 78506a304cdSGao Xiang .end = end, 7865b220b20SGao Xiang }), exclusive); 78706a304cdSGao Xiang /* should allocate an additional short-lived page for bvset */ 78806a304cdSGao Xiang if (err == -EAGAIN && !fe->candidate_bvpage) { 78906a304cdSGao Xiang fe->candidate_bvpage = alloc_page(GFP_NOFS | __GFP_NOFAIL); 79006a304cdSGao Xiang set_page_private(fe->candidate_bvpage, 79106a304cdSGao Xiang Z_EROFS_SHORTLIVED_PAGE); 79247e4937aSGao Xiang goto retry; 79347e4937aSGao Xiang } 79447e4937aSGao Xiang 79506a304cdSGao Xiang if (err) { 79606a304cdSGao Xiang DBG_BUGON(err == -EAGAIN && fe->candidate_bvpage); 79767148551SGao Xiang goto out; 79806a304cdSGao Xiang } 79947e4937aSGao Xiang 80067148551SGao Xiang z_erofs_onlinepage_split(page); 80147e4937aSGao Xiang /* bump up the number of spiltted parts of a page */ 80247e4937aSGao Xiang ++spiltted; 803267f2492SGao Xiang if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK)) 804267f2492SGao Xiang fe->pcl->multibases = true; 8052bfab9c0SGao Xiang if (fe->pcl->length < offset + end - map->m_la) { 8062bfab9c0SGao Xiang fe->pcl->length = offset + end - map->m_la; 8072bfab9c0SGao Xiang fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK; 8082bfab9c0SGao Xiang } 809e7933278SGao Xiang if ((map->m_flags & EROFS_MAP_FULL_MAPPED) && 810e7933278SGao Xiang !(map->m_flags & EROFS_MAP_PARTIAL_REF) && 811e7933278SGao Xiang fe->pcl->length == map->m_llen) 812e7933278SGao Xiang fe->pcl->partial = false; 81347e4937aSGao Xiang next_part: 8142bfab9c0SGao Xiang /* shorten the remaining extent to update progress */ 81547e4937aSGao Xiang map->m_llen = offset + cur - map->m_la; 8162bfab9c0SGao Xiang map->m_flags &= ~EROFS_MAP_FULL_MAPPED; 81747e4937aSGao Xiang 81847e4937aSGao Xiang end = cur; 81947e4937aSGao Xiang if (end > 0) 82047e4937aSGao Xiang goto repeat; 82147e4937aSGao Xiang 82247e4937aSGao Xiang out: 82367148551SGao Xiang if (err) 82467148551SGao Xiang z_erofs_page_mark_eio(page); 82547e4937aSGao Xiang z_erofs_onlinepage_endio(page); 82647e4937aSGao Xiang 8274f761fa2SGao Xiang erofs_dbg("%s, finish page: %pK spiltted: %u map->m_llen %llu", 82847e4937aSGao Xiang __func__, page, spiltted, map->m_llen); 82947e4937aSGao Xiang return err; 83047e4937aSGao Xiang } 83147e4937aSGao Xiang 83240452ffcSHuang Jianan static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi, 83340452ffcSHuang Jianan unsigned int readahead_pages) 83440452ffcSHuang Jianan { 835a2e20a25SMatthew Wilcox (Oracle) /* auto: enable for read_folio, disable for readahead */ 83640452ffcSHuang Jianan if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) && 83740452ffcSHuang Jianan !readahead_pages) 83840452ffcSHuang Jianan return true; 83940452ffcSHuang Jianan 84040452ffcSHuang Jianan if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) && 84140452ffcSHuang Jianan (readahead_pages <= sbi->opt.max_sync_decompress_pages)) 84240452ffcSHuang Jianan return true; 84340452ffcSHuang Jianan 84440452ffcSHuang Jianan return false; 84540452ffcSHuang Jianan } 84640452ffcSHuang Jianan 8476aaa7b06SGao Xiang static bool z_erofs_page_is_invalidated(struct page *page) 8486aaa7b06SGao Xiang { 8496aaa7b06SGao Xiang return !page->mapping && !z_erofs_is_shortlived_page(page); 8506aaa7b06SGao Xiang } 8516aaa7b06SGao Xiang 8524f05687fSGao Xiang struct z_erofs_decompress_backend { 8534f05687fSGao Xiang struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES]; 8544f05687fSGao Xiang struct super_block *sb; 8554f05687fSGao Xiang struct z_erofs_pcluster *pcl; 8564f05687fSGao Xiang 8574f05687fSGao Xiang /* pages with the longest decompressed length for deduplication */ 8584f05687fSGao Xiang struct page **decompressed_pages; 8594f05687fSGao Xiang /* pages to keep the compressed data */ 8604f05687fSGao Xiang struct page **compressed_pages; 8614f05687fSGao Xiang 862267f2492SGao Xiang struct list_head decompressed_secondary_bvecs; 8634f05687fSGao Xiang struct page **pagepool; 8642bfab9c0SGao Xiang unsigned int onstack_used, nr_pages; 8654f05687fSGao Xiang }; 8664f05687fSGao Xiang 867267f2492SGao Xiang struct z_erofs_bvec_item { 868267f2492SGao Xiang struct z_erofs_bvec bvec; 869267f2492SGao Xiang struct list_head list; 870267f2492SGao Xiang }; 871267f2492SGao Xiang 872267f2492SGao Xiang static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be, 8733fe96ee0SGao Xiang struct z_erofs_bvec *bvec) 8743fe96ee0SGao Xiang { 875267f2492SGao Xiang struct z_erofs_bvec_item *item; 876267f2492SGao Xiang 877267f2492SGao Xiang if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK)) { 878267f2492SGao Xiang unsigned int pgnr; 8793fe96ee0SGao Xiang 880267f2492SGao Xiang pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT; 8812bfab9c0SGao Xiang DBG_BUGON(pgnr >= be->nr_pages); 88263bbb856SGao Xiang if (!be->decompressed_pages[pgnr]) { 8833fe96ee0SGao Xiang be->decompressed_pages[pgnr] = bvec->page; 884267f2492SGao Xiang return; 8853fe96ee0SGao Xiang } 88663bbb856SGao Xiang } 8873fe96ee0SGao Xiang 888267f2492SGao Xiang /* (cold path) one pcluster is requested multiple times */ 889267f2492SGao Xiang item = kmalloc(sizeof(*item), GFP_KERNEL | __GFP_NOFAIL); 890267f2492SGao Xiang item->bvec = *bvec; 891267f2492SGao Xiang list_add(&item->list, &be->decompressed_secondary_bvecs); 892267f2492SGao Xiang } 893267f2492SGao Xiang 894267f2492SGao Xiang static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be, 895267f2492SGao Xiang int err) 896267f2492SGao Xiang { 897267f2492SGao Xiang unsigned int off0 = be->pcl->pageofs_out; 898267f2492SGao Xiang struct list_head *p, *n; 899267f2492SGao Xiang 900267f2492SGao Xiang list_for_each_safe(p, n, &be->decompressed_secondary_bvecs) { 901267f2492SGao Xiang struct z_erofs_bvec_item *bvi; 902267f2492SGao Xiang unsigned int end, cur; 903267f2492SGao Xiang void *dst, *src; 904267f2492SGao Xiang 905267f2492SGao Xiang bvi = container_of(p, struct z_erofs_bvec_item, list); 906267f2492SGao Xiang cur = bvi->bvec.offset < 0 ? -bvi->bvec.offset : 0; 907267f2492SGao Xiang end = min_t(unsigned int, be->pcl->length - bvi->bvec.offset, 908267f2492SGao Xiang bvi->bvec.end); 909267f2492SGao Xiang dst = kmap_local_page(bvi->bvec.page); 910267f2492SGao Xiang while (cur < end) { 911267f2492SGao Xiang unsigned int pgnr, scur, len; 912267f2492SGao Xiang 913267f2492SGao Xiang pgnr = (bvi->bvec.offset + cur + off0) >> PAGE_SHIFT; 914267f2492SGao Xiang DBG_BUGON(pgnr >= be->nr_pages); 915267f2492SGao Xiang 916267f2492SGao Xiang scur = bvi->bvec.offset + cur - 917267f2492SGao Xiang ((pgnr << PAGE_SHIFT) - off0); 918267f2492SGao Xiang len = min_t(unsigned int, end - cur, PAGE_SIZE - scur); 919267f2492SGao Xiang if (!be->decompressed_pages[pgnr]) { 920267f2492SGao Xiang err = -EFSCORRUPTED; 921267f2492SGao Xiang cur += len; 922267f2492SGao Xiang continue; 923267f2492SGao Xiang } 924267f2492SGao Xiang src = kmap_local_page(be->decompressed_pages[pgnr]); 925267f2492SGao Xiang memcpy(dst + cur, src + scur, len); 926267f2492SGao Xiang kunmap_local(src); 927267f2492SGao Xiang cur += len; 928267f2492SGao Xiang } 929267f2492SGao Xiang kunmap_local(dst); 930267f2492SGao Xiang if (err) 931267f2492SGao Xiang z_erofs_page_mark_eio(bvi->bvec.page); 932267f2492SGao Xiang z_erofs_onlinepage_endio(bvi->bvec.page); 933267f2492SGao Xiang list_del(p); 934267f2492SGao Xiang kfree(bvi); 935267f2492SGao Xiang } 936267f2492SGao Xiang } 937267f2492SGao Xiang 938267f2492SGao Xiang static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be) 93942fec235SGao Xiang { 9404f05687fSGao Xiang struct z_erofs_pcluster *pcl = be->pcl; 94106a304cdSGao Xiang struct z_erofs_bvec_iter biter; 94206a304cdSGao Xiang struct page *old_bvpage; 943267f2492SGao Xiang int i; 94442fec235SGao Xiang 945387bab87SGao Xiang z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0); 94642fec235SGao Xiang for (i = 0; i < pcl->vcnt; ++i) { 94706a304cdSGao Xiang struct z_erofs_bvec bvec; 94842fec235SGao Xiang 94906a304cdSGao Xiang z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage); 95042fec235SGao Xiang 95106a304cdSGao Xiang if (old_bvpage) 9524f05687fSGao Xiang z_erofs_put_shortlivedpage(be->pagepool, old_bvpage); 95342fec235SGao Xiang 95406a304cdSGao Xiang DBG_BUGON(z_erofs_page_is_invalidated(bvec.page)); 955267f2492SGao Xiang z_erofs_do_decompressed_bvec(be, &bvec); 95642fec235SGao Xiang } 95706a304cdSGao Xiang 95806a304cdSGao Xiang old_bvpage = z_erofs_bvec_iter_end(&biter); 95906a304cdSGao Xiang if (old_bvpage) 9604f05687fSGao Xiang z_erofs_put_shortlivedpage(be->pagepool, old_bvpage); 96142fec235SGao Xiang } 96242fec235SGao Xiang 9634f05687fSGao Xiang static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be, 9644f05687fSGao Xiang bool *overlapped) 96567139e36SGao Xiang { 9664f05687fSGao Xiang struct z_erofs_pcluster *pcl = be->pcl; 96767139e36SGao Xiang unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 96867139e36SGao Xiang int i, err = 0; 96967139e36SGao Xiang 97067139e36SGao Xiang *overlapped = false; 97167139e36SGao Xiang for (i = 0; i < pclusterpages; ++i) { 972ed722fbcSGao Xiang struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i]; 973ed722fbcSGao Xiang struct page *page = bvec->page; 97467139e36SGao Xiang 97567139e36SGao Xiang /* compressed pages ought to be present before decompressing */ 97667139e36SGao Xiang if (!page) { 97767139e36SGao Xiang DBG_BUGON(1); 97867139e36SGao Xiang continue; 97967139e36SGao Xiang } 980fe3e5914SGao Xiang be->compressed_pages[i] = page; 98167139e36SGao Xiang 98267139e36SGao Xiang if (z_erofs_is_inline_pcluster(pcl)) { 98367139e36SGao Xiang if (!PageUptodate(page)) 98467139e36SGao Xiang err = -EIO; 98567139e36SGao Xiang continue; 98667139e36SGao Xiang } 98767139e36SGao Xiang 98867139e36SGao Xiang DBG_BUGON(z_erofs_page_is_invalidated(page)); 98967139e36SGao Xiang if (!z_erofs_is_shortlived_page(page)) { 9904f05687fSGao Xiang if (erofs_page_is_managed(EROFS_SB(be->sb), page)) { 99167139e36SGao Xiang if (!PageUptodate(page)) 99267139e36SGao Xiang err = -EIO; 99367139e36SGao Xiang continue; 99467139e36SGao Xiang } 995267f2492SGao Xiang z_erofs_do_decompressed_bvec(be, bvec); 99667139e36SGao Xiang *overlapped = true; 99767139e36SGao Xiang } 99867139e36SGao Xiang } 99967139e36SGao Xiang 1000fe3e5914SGao Xiang if (err) 10014f05687fSGao Xiang return err; 10024f05687fSGao Xiang return 0; 100367139e36SGao Xiang } 100467139e36SGao Xiang 10054f05687fSGao Xiang static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, 10064f05687fSGao Xiang int err) 100747e4937aSGao Xiang { 10084f05687fSGao Xiang struct erofs_sb_info *const sbi = EROFS_SB(be->sb); 10094f05687fSGao Xiang struct z_erofs_pcluster *pcl = be->pcl; 1010cecf864dSYue Hu unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 10112bfab9c0SGao Xiang unsigned int i, inputsize; 101267148551SGao Xiang int err2; 10132bfab9c0SGao Xiang struct page *page; 10142bfab9c0SGao Xiang bool overlapped; 101547e4937aSGao Xiang 101687ca34a7SGao Xiang mutex_lock(&pcl->lock); 10172bfab9c0SGao Xiang be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT; 101847e4937aSGao Xiang 1019fe3e5914SGao Xiang /* allocate (de)compressed page arrays if cannot be kept on stack */ 1020fe3e5914SGao Xiang be->decompressed_pages = NULL; 1021fe3e5914SGao Xiang be->compressed_pages = NULL; 1022fe3e5914SGao Xiang be->onstack_used = 0; 10232bfab9c0SGao Xiang if (be->nr_pages <= Z_EROFS_ONSTACK_PAGES) { 10244f05687fSGao Xiang be->decompressed_pages = be->onstack_pages; 10252bfab9c0SGao Xiang be->onstack_used = be->nr_pages; 10264f05687fSGao Xiang memset(be->decompressed_pages, 0, 10272bfab9c0SGao Xiang sizeof(struct page *) * be->nr_pages); 1028fe3e5914SGao Xiang } 1029fe3e5914SGao Xiang 1030fe3e5914SGao Xiang if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES) 1031fe3e5914SGao Xiang be->compressed_pages = be->onstack_pages + be->onstack_used; 1032fe3e5914SGao Xiang 1033fe3e5914SGao Xiang if (!be->decompressed_pages) 10344f05687fSGao Xiang be->decompressed_pages = 10352bfab9c0SGao Xiang kvcalloc(be->nr_pages, sizeof(struct page *), 1036e7368187SGao Xiang GFP_KERNEL | __GFP_NOFAIL); 1037fe3e5914SGao Xiang if (!be->compressed_pages) 1038fe3e5914SGao Xiang be->compressed_pages = 1039fe3e5914SGao Xiang kvcalloc(pclusterpages, sizeof(struct page *), 1040fe3e5914SGao Xiang GFP_KERNEL | __GFP_NOFAIL); 104147e4937aSGao Xiang 1042267f2492SGao Xiang z_erofs_parse_out_bvecs(be); 10434f05687fSGao Xiang err2 = z_erofs_parse_in_bvecs(be, &overlapped); 10444f05687fSGao Xiang if (err2) 10454f05687fSGao Xiang err = err2; 10468d8a09b0SGao Xiang if (err) 104747e4937aSGao Xiang goto out; 104847e4937aSGao Xiang 1049cecf864dSYue Hu if (z_erofs_is_inline_pcluster(pcl)) 1050cecf864dSYue Hu inputsize = pcl->tailpacking_size; 1051cecf864dSYue Hu else 1052cecf864dSYue Hu inputsize = pclusterpages * PAGE_SIZE; 1053cecf864dSYue Hu 105447e4937aSGao Xiang err = z_erofs_decompress(&(struct z_erofs_decompress_req) { 10554f05687fSGao Xiang .sb = be->sb, 10564f05687fSGao Xiang .in = be->compressed_pages, 10574f05687fSGao Xiang .out = be->decompressed_pages, 1058cecf864dSYue Hu .pageofs_in = pcl->pageofs_in, 105987ca34a7SGao Xiang .pageofs_out = pcl->pageofs_out, 10609f6cc76eSGao Xiang .inputsize = inputsize, 10612bfab9c0SGao Xiang .outputsize = pcl->length, 106247e4937aSGao Xiang .alg = pcl->algorithmformat, 106347e4937aSGao Xiang .inplace_io = overlapped, 10642bfab9c0SGao Xiang .partial_decoding = pcl->partial, 1065267f2492SGao Xiang .fillgaps = pcl->multibases, 10664f05687fSGao Xiang }, be->pagepool); 106747e4937aSGao Xiang 106847e4937aSGao Xiang out: 1069cecf864dSYue Hu /* must handle all compressed pages before actual file pages */ 1070cecf864dSYue Hu if (z_erofs_is_inline_pcluster(pcl)) { 1071ed722fbcSGao Xiang page = pcl->compressed_bvecs[0].page; 1072ed722fbcSGao Xiang WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL); 1073cecf864dSYue Hu put_page(page); 1074cecf864dSYue Hu } else { 1075cecf864dSYue Hu for (i = 0; i < pclusterpages; ++i) { 1076ed722fbcSGao Xiang page = pcl->compressed_bvecs[i].page; 107747e4937aSGao Xiang 107847e4937aSGao Xiang if (erofs_page_is_managed(sbi, page)) 107947e4937aSGao Xiang continue; 108047e4937aSGao Xiang 10816aaa7b06SGao Xiang /* recycle all individual short-lived pages */ 10824f05687fSGao Xiang (void)z_erofs_put_shortlivedpage(be->pagepool, page); 1083ed722fbcSGao Xiang WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); 108447e4937aSGao Xiang } 1085cecf864dSYue Hu } 1086fe3e5914SGao Xiang if (be->compressed_pages < be->onstack_pages || 1087fe3e5914SGao Xiang be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES) 1088fe3e5914SGao Xiang kvfree(be->compressed_pages); 1089267f2492SGao Xiang z_erofs_fill_other_copies(be, err); 109047e4937aSGao Xiang 10912bfab9c0SGao Xiang for (i = 0; i < be->nr_pages; ++i) { 10924f05687fSGao Xiang page = be->decompressed_pages[i]; 109347e4937aSGao Xiang if (!page) 109447e4937aSGao Xiang continue; 109547e4937aSGao Xiang 10966aaa7b06SGao Xiang DBG_BUGON(z_erofs_page_is_invalidated(page)); 109747e4937aSGao Xiang 10986aaa7b06SGao Xiang /* recycle all individual short-lived pages */ 10994f05687fSGao Xiang if (z_erofs_put_shortlivedpage(be->pagepool, page)) 110047e4937aSGao Xiang continue; 110167148551SGao Xiang if (err) 110267148551SGao Xiang z_erofs_page_mark_eio(page); 110347e4937aSGao Xiang z_erofs_onlinepage_endio(page); 110447e4937aSGao Xiang } 110547e4937aSGao Xiang 11064f05687fSGao Xiang if (be->decompressed_pages != be->onstack_pages) 11074f05687fSGao Xiang kvfree(be->decompressed_pages); 110847e4937aSGao Xiang 11092bfab9c0SGao Xiang pcl->length = 0; 11102bfab9c0SGao Xiang pcl->partial = true; 1111267f2492SGao Xiang pcl->multibases = false; 111206a304cdSGao Xiang pcl->bvset.nextpage = NULL; 111387ca34a7SGao Xiang pcl->vcnt = 0; 111447e4937aSGao Xiang 111587ca34a7SGao Xiang /* pcluster lock MUST be taken before the following line */ 111647e4937aSGao Xiang WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL); 111787ca34a7SGao Xiang mutex_unlock(&pcl->lock); 111847e4937aSGao Xiang return err; 111947e4937aSGao Xiang } 112047e4937aSGao Xiang 11210c638f70SGao Xiang static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, 1122eaa9172aSGao Xiang struct page **pagepool) 112347e4937aSGao Xiang { 11244f05687fSGao Xiang struct z_erofs_decompress_backend be = { 11254f05687fSGao Xiang .sb = io->sb, 11264f05687fSGao Xiang .pagepool = pagepool, 1127267f2492SGao Xiang .decompressed_secondary_bvecs = 1128267f2492SGao Xiang LIST_HEAD_INIT(be.decompressed_secondary_bvecs), 11294f05687fSGao Xiang }; 113047e4937aSGao Xiang z_erofs_next_pcluster_t owned = io->head; 113147e4937aSGao Xiang 113247e4937aSGao Xiang while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) { 11334f05687fSGao Xiang /* impossible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */ 113447e4937aSGao Xiang DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL); 11354f05687fSGao Xiang /* impossible that 'owned' equals Z_EROFS_PCLUSTER_NIL */ 113647e4937aSGao Xiang DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL); 113747e4937aSGao Xiang 11384f05687fSGao Xiang be.pcl = container_of(owned, struct z_erofs_pcluster, next); 11394f05687fSGao Xiang owned = READ_ONCE(be.pcl->next); 114047e4937aSGao Xiang 11414f05687fSGao Xiang z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0); 11424f05687fSGao Xiang erofs_workgroup_put(&be.pcl->obj); 114347e4937aSGao Xiang } 114447e4937aSGao Xiang } 114547e4937aSGao Xiang 11460c638f70SGao Xiang static void z_erofs_decompressqueue_work(struct work_struct *work) 114747e4937aSGao Xiang { 1148a4b1fab1SGao Xiang struct z_erofs_decompressqueue *bgq = 1149a4b1fab1SGao Xiang container_of(work, struct z_erofs_decompressqueue, u.work); 1150eaa9172aSGao Xiang struct page *pagepool = NULL; 115147e4937aSGao Xiang 1152a4b1fab1SGao Xiang DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED); 11530c638f70SGao Xiang z_erofs_decompress_queue(bgq, &pagepool); 115447e4937aSGao Xiang 1155eaa9172aSGao Xiang erofs_release_pages(&pagepool); 1156a4b1fab1SGao Xiang kvfree(bgq); 115747e4937aSGao Xiang } 115847e4937aSGao Xiang 11597865827cSGao Xiang static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, 11607865827cSGao Xiang bool sync, int bios) 11617865827cSGao Xiang { 11627865827cSGao Xiang struct erofs_sb_info *const sbi = EROFS_SB(io->sb); 11637865827cSGao Xiang 11647865827cSGao Xiang /* wake up the caller thread for sync decompression */ 11657865827cSGao Xiang if (sync) { 11667865827cSGao Xiang if (!atomic_add_return(bios, &io->pending_bios)) 116760b30050SHongyu Jin complete(&io->u.done); 11687865827cSGao Xiang return; 11697865827cSGao Xiang } 11707865827cSGao Xiang 11717865827cSGao Xiang if (atomic_add_return(bios, &io->pending_bios)) 11727865827cSGao Xiang return; 11737865827cSGao Xiang /* Use workqueue and sync decompression for atomic contexts only */ 11747865827cSGao Xiang if (in_atomic() || irqs_disabled()) { 11757865827cSGao Xiang queue_work(z_erofs_workqueue, &io->u.work); 11767865827cSGao Xiang /* enable sync decompression for readahead */ 11777865827cSGao Xiang if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) 11787865827cSGao Xiang sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON; 11797865827cSGao Xiang return; 11807865827cSGao Xiang } 11817865827cSGao Xiang z_erofs_decompressqueue_work(&io->u.work); 11827865827cSGao Xiang } 11837865827cSGao Xiang 118447e4937aSGao Xiang static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, 118547e4937aSGao Xiang unsigned int nr, 1186eaa9172aSGao Xiang struct page **pagepool, 11879f2731d6SGao Xiang struct address_space *mc) 118847e4937aSGao Xiang { 118947e4937aSGao Xiang const pgoff_t index = pcl->obj.index; 11909f2731d6SGao Xiang gfp_t gfp = mapping_gfp_mask(mc); 119147e4937aSGao Xiang bool tocache = false; 119247e4937aSGao Xiang 119347e4937aSGao Xiang struct address_space *mapping; 119447e4937aSGao Xiang struct page *oldpage, *page; 119547e4937aSGao Xiang 119647e4937aSGao Xiang compressed_page_t t; 119747e4937aSGao Xiang int justfound; 119847e4937aSGao Xiang 119947e4937aSGao Xiang repeat: 1200ed722fbcSGao Xiang page = READ_ONCE(pcl->compressed_bvecs[nr].page); 120147e4937aSGao Xiang oldpage = page; 120247e4937aSGao Xiang 120347e4937aSGao Xiang if (!page) 120447e4937aSGao Xiang goto out_allocpage; 120547e4937aSGao Xiang 120647e4937aSGao Xiang /* process the target tagged pointer */ 120747e4937aSGao Xiang t = tagptr_init(compressed_page_t, page); 120847e4937aSGao Xiang justfound = tagptr_unfold_tags(t); 120947e4937aSGao Xiang page = tagptr_unfold_ptr(t); 121047e4937aSGao Xiang 12111825c8d7SGao Xiang /* 12121825c8d7SGao Xiang * preallocated cached pages, which is used to avoid direct reclaim 12131825c8d7SGao Xiang * otherwise, it will go inplace I/O path instead. 12141825c8d7SGao Xiang */ 12151825c8d7SGao Xiang if (page->private == Z_EROFS_PREALLOCATED_PAGE) { 1216ed722fbcSGao Xiang WRITE_ONCE(pcl->compressed_bvecs[nr].page, page); 12171825c8d7SGao Xiang set_page_private(page, 0); 12181825c8d7SGao Xiang tocache = true; 12191825c8d7SGao Xiang goto out_tocache; 12201825c8d7SGao Xiang } 122147e4937aSGao Xiang mapping = READ_ONCE(page->mapping); 122247e4937aSGao Xiang 122347e4937aSGao Xiang /* 12246aaa7b06SGao Xiang * file-backed online pages in plcuster are all locked steady, 122547e4937aSGao Xiang * therefore it is impossible for `mapping' to be NULL. 122647e4937aSGao Xiang */ 122747e4937aSGao Xiang if (mapping && mapping != mc) 122847e4937aSGao Xiang /* ought to be unmanaged pages */ 122947e4937aSGao Xiang goto out; 123047e4937aSGao Xiang 12316aaa7b06SGao Xiang /* directly return for shortlived page as well */ 12326aaa7b06SGao Xiang if (z_erofs_is_shortlived_page(page)) 12336aaa7b06SGao Xiang goto out; 12346aaa7b06SGao Xiang 123547e4937aSGao Xiang lock_page(page); 123647e4937aSGao Xiang 123747e4937aSGao Xiang /* only true if page reclaim goes wrong, should never happen */ 123847e4937aSGao Xiang DBG_BUGON(justfound && PagePrivate(page)); 123947e4937aSGao Xiang 124047e4937aSGao Xiang /* the page is still in manage cache */ 124147e4937aSGao Xiang if (page->mapping == mc) { 1242ed722fbcSGao Xiang WRITE_ONCE(pcl->compressed_bvecs[nr].page, page); 124347e4937aSGao Xiang 124447e4937aSGao Xiang if (!PagePrivate(page)) { 124547e4937aSGao Xiang /* 124647e4937aSGao Xiang * impossible to be !PagePrivate(page) for 124747e4937aSGao Xiang * the current restriction as well if 1248ed722fbcSGao Xiang * the page is already in compressed_bvecs[]. 124947e4937aSGao Xiang */ 125047e4937aSGao Xiang DBG_BUGON(!justfound); 125147e4937aSGao Xiang 125247e4937aSGao Xiang justfound = 0; 125347e4937aSGao Xiang set_page_private(page, (unsigned long)pcl); 125447e4937aSGao Xiang SetPagePrivate(page); 125547e4937aSGao Xiang } 125647e4937aSGao Xiang 125747e4937aSGao Xiang /* no need to submit io if it is already up-to-date */ 125847e4937aSGao Xiang if (PageUptodate(page)) { 125947e4937aSGao Xiang unlock_page(page); 126047e4937aSGao Xiang page = NULL; 126147e4937aSGao Xiang } 126247e4937aSGao Xiang goto out; 126347e4937aSGao Xiang } 126447e4937aSGao Xiang 126547e4937aSGao Xiang /* 126647e4937aSGao Xiang * the managed page has been truncated, it's unsafe to 126747e4937aSGao Xiang * reuse this one, let's allocate a new cache-managed page. 126847e4937aSGao Xiang */ 126947e4937aSGao Xiang DBG_BUGON(page->mapping); 127047e4937aSGao Xiang DBG_BUGON(!justfound); 127147e4937aSGao Xiang 127247e4937aSGao Xiang tocache = true; 127347e4937aSGao Xiang unlock_page(page); 127447e4937aSGao Xiang put_page(page); 127547e4937aSGao Xiang out_allocpage: 12765ddcee1fSGao Xiang page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL); 1277ed722fbcSGao Xiang if (oldpage != cmpxchg(&pcl->compressed_bvecs[nr].page, 1278ed722fbcSGao Xiang oldpage, page)) { 1279eaa9172aSGao Xiang erofs_pagepool_add(pagepool, page); 12805ddcee1fSGao Xiang cond_resched(); 12815ddcee1fSGao Xiang goto repeat; 12825ddcee1fSGao Xiang } 12831825c8d7SGao Xiang out_tocache: 1284bf225074SGao Xiang if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) { 1285bf225074SGao Xiang /* turn into temporary page if fails (1 ref) */ 1286bf225074SGao Xiang set_page_private(page, Z_EROFS_SHORTLIVED_PAGE); 1287bf225074SGao Xiang goto out; 1288a30573b3SGao Xiang } 1289bf225074SGao Xiang attach_page_private(page, pcl); 1290bf225074SGao Xiang /* drop a refcount added by allocpage (then we have 2 refs here) */ 1291bf225074SGao Xiang put_page(page); 1292bf225074SGao Xiang 129347e4937aSGao Xiang out: /* the only exit (for tracing and debugging) */ 129447e4937aSGao Xiang return page; 129547e4937aSGao Xiang } 129647e4937aSGao Xiang 1297a4b1fab1SGao Xiang static struct z_erofs_decompressqueue * 1298a4b1fab1SGao Xiang jobqueue_init(struct super_block *sb, 1299a4b1fab1SGao Xiang struct z_erofs_decompressqueue *fgq, bool *fg) 130047e4937aSGao Xiang { 1301a4b1fab1SGao Xiang struct z_erofs_decompressqueue *q; 130247e4937aSGao Xiang 1303a4b1fab1SGao Xiang if (fg && !*fg) { 1304a4b1fab1SGao Xiang q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN); 1305a4b1fab1SGao Xiang if (!q) { 1306a4b1fab1SGao Xiang *fg = true; 1307a4b1fab1SGao Xiang goto fg_out; 130847e4937aSGao Xiang } 13090c638f70SGao Xiang INIT_WORK(&q->u.work, z_erofs_decompressqueue_work); 1310a4b1fab1SGao Xiang } else { 1311a4b1fab1SGao Xiang fg_out: 1312a4b1fab1SGao Xiang q = fgq; 131360b30050SHongyu Jin init_completion(&fgq->u.done); 1314a4b1fab1SGao Xiang atomic_set(&fgq->pending_bios, 0); 131567148551SGao Xiang q->eio = false; 1316a4b1fab1SGao Xiang } 1317a4b1fab1SGao Xiang q->sb = sb; 1318a4b1fab1SGao Xiang q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED; 1319a4b1fab1SGao Xiang return q; 132047e4937aSGao Xiang } 132147e4937aSGao Xiang 132247e4937aSGao Xiang /* define decompression jobqueue types */ 132347e4937aSGao Xiang enum { 132447e4937aSGao Xiang JQ_BYPASS, 132547e4937aSGao Xiang JQ_SUBMIT, 132647e4937aSGao Xiang NR_JOBQUEUES, 132747e4937aSGao Xiang }; 132847e4937aSGao Xiang 132947e4937aSGao Xiang static void *jobqueueset_init(struct super_block *sb, 1330a4b1fab1SGao Xiang struct z_erofs_decompressqueue *q[], 1331a4b1fab1SGao Xiang struct z_erofs_decompressqueue *fgq, bool *fg) 133247e4937aSGao Xiang { 133347e4937aSGao Xiang /* 133447e4937aSGao Xiang * if managed cache is enabled, bypass jobqueue is needed, 133547e4937aSGao Xiang * no need to read from device for all pclusters in this queue. 133647e4937aSGao Xiang */ 1337a4b1fab1SGao Xiang q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL); 1338a4b1fab1SGao Xiang q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, fg); 133947e4937aSGao Xiang 1340a4b1fab1SGao Xiang return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], *fg)); 134147e4937aSGao Xiang } 134247e4937aSGao Xiang 134347e4937aSGao Xiang static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, 134447e4937aSGao Xiang z_erofs_next_pcluster_t qtail[], 134547e4937aSGao Xiang z_erofs_next_pcluster_t owned_head) 134647e4937aSGao Xiang { 134747e4937aSGao Xiang z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT]; 134847e4937aSGao Xiang z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS]; 134947e4937aSGao Xiang 135047e4937aSGao Xiang DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); 135147e4937aSGao Xiang if (owned_head == Z_EROFS_PCLUSTER_TAIL) 135247e4937aSGao Xiang owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED; 135347e4937aSGao Xiang 135447e4937aSGao Xiang WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED); 135547e4937aSGao Xiang 135647e4937aSGao Xiang WRITE_ONCE(*submit_qtail, owned_head); 135747e4937aSGao Xiang WRITE_ONCE(*bypass_qtail, &pcl->next); 135847e4937aSGao Xiang 135947e4937aSGao Xiang qtail[JQ_BYPASS] = &pcl->next; 136047e4937aSGao Xiang } 136147e4937aSGao Xiang 13627865827cSGao Xiang static void z_erofs_decompressqueue_endio(struct bio *bio) 13637865827cSGao Xiang { 13647865827cSGao Xiang tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private); 13657865827cSGao Xiang struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t); 13667865827cSGao Xiang blk_status_t err = bio->bi_status; 13677865827cSGao Xiang struct bio_vec *bvec; 13687865827cSGao Xiang struct bvec_iter_all iter_all; 13697865827cSGao Xiang 13707865827cSGao Xiang bio_for_each_segment_all(bvec, bio, iter_all) { 13717865827cSGao Xiang struct page *page = bvec->bv_page; 13727865827cSGao Xiang 13737865827cSGao Xiang DBG_BUGON(PageUptodate(page)); 13747865827cSGao Xiang DBG_BUGON(z_erofs_page_is_invalidated(page)); 13757865827cSGao Xiang 13767865827cSGao Xiang if (erofs_page_is_managed(EROFS_SB(q->sb), page)) { 13777865827cSGao Xiang if (!err) 13787865827cSGao Xiang SetPageUptodate(page); 13797865827cSGao Xiang unlock_page(page); 13807865827cSGao Xiang } 13817865827cSGao Xiang } 138267148551SGao Xiang if (err) 138367148551SGao Xiang q->eio = true; 13847865827cSGao Xiang z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1); 13857865827cSGao Xiang bio_put(bio); 13867865827cSGao Xiang } 13877865827cSGao Xiang 138883a386c0SGao Xiang static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, 1389eaa9172aSGao Xiang struct page **pagepool, 1390a4b1fab1SGao Xiang struct z_erofs_decompressqueue *fgq, 1391a4b1fab1SGao Xiang bool *force_fg) 139247e4937aSGao Xiang { 139383a386c0SGao Xiang struct super_block *sb = f->inode->i_sb; 139483a386c0SGao Xiang struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb)); 139547e4937aSGao Xiang z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; 1396a4b1fab1SGao Xiang struct z_erofs_decompressqueue *q[NR_JOBQUEUES]; 139747e4937aSGao Xiang void *bi_private; 13985c6dcc57SGao Xiang z_erofs_next_pcluster_t owned_head = f->owned_head; 1399dfeab2e9SGao Xiang /* bio is NULL initially, so no need to initialize last_{index,bdev} */ 14003f649ab7SKees Cook pgoff_t last_index; 1401dfeab2e9SGao Xiang struct block_device *last_bdev; 14021e4a2955SGao Xiang unsigned int nr_bios = 0; 14031e4a2955SGao Xiang struct bio *bio = NULL; 140482e60d00SJohannes Weiner unsigned long pflags; 140582e60d00SJohannes Weiner int memstall = 0; 140647e4937aSGao Xiang 1407a4b1fab1SGao Xiang bi_private = jobqueueset_init(sb, q, fgq, force_fg); 1408a4b1fab1SGao Xiang qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; 1409a4b1fab1SGao Xiang qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; 141047e4937aSGao Xiang 141147e4937aSGao Xiang /* by default, all need io submission */ 141247e4937aSGao Xiang q[JQ_SUBMIT]->head = owned_head; 141347e4937aSGao Xiang 141447e4937aSGao Xiang do { 1415dfeab2e9SGao Xiang struct erofs_map_dev mdev; 141647e4937aSGao Xiang struct z_erofs_pcluster *pcl; 14171e4a2955SGao Xiang pgoff_t cur, end; 14181e4a2955SGao Xiang unsigned int i = 0; 14191e4a2955SGao Xiang bool bypass = true; 142047e4937aSGao Xiang 142147e4937aSGao Xiang /* no possible 'owned_head' equals the following */ 142247e4937aSGao Xiang DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); 142347e4937aSGao Xiang DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL); 142447e4937aSGao Xiang 142547e4937aSGao Xiang pcl = container_of(owned_head, struct z_erofs_pcluster, next); 142647e4937aSGao Xiang 1427cecf864dSYue Hu /* close the main owned chain at first */ 1428cecf864dSYue Hu owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, 1429cecf864dSYue Hu Z_EROFS_PCLUSTER_TAIL_CLOSED); 1430cecf864dSYue Hu if (z_erofs_is_inline_pcluster(pcl)) { 1431cecf864dSYue Hu move_to_bypass_jobqueue(pcl, qtail, owned_head); 1432cecf864dSYue Hu continue; 1433cecf864dSYue Hu } 1434cecf864dSYue Hu 1435dfeab2e9SGao Xiang /* no device id here, thus it will always succeed */ 1436dfeab2e9SGao Xiang mdev = (struct erofs_map_dev) { 1437dfeab2e9SGao Xiang .m_pa = blknr_to_addr(pcl->obj.index), 1438dfeab2e9SGao Xiang }; 1439dfeab2e9SGao Xiang (void)erofs_map_dev(sb, &mdev); 1440dfeab2e9SGao Xiang 1441dfeab2e9SGao Xiang cur = erofs_blknr(mdev.m_pa); 14429f6cc76eSGao Xiang end = cur + pcl->pclusterpages; 144347e4937aSGao Xiang 14441e4a2955SGao Xiang do { 14451e4a2955SGao Xiang struct page *page; 144647e4937aSGao Xiang 14471e4a2955SGao Xiang page = pickup_page_for_submission(pcl, i++, pagepool, 144883a386c0SGao Xiang mc); 14491e4a2955SGao Xiang if (!page) 14501e4a2955SGao Xiang continue; 145147e4937aSGao Xiang 1452dfeab2e9SGao Xiang if (bio && (cur != last_index + 1 || 1453dfeab2e9SGao Xiang last_bdev != mdev.m_bdev)) { 145447e4937aSGao Xiang submit_bio_retry: 145594e4e153SGao Xiang submit_bio(bio); 145682e60d00SJohannes Weiner if (memstall) { 145782e60d00SJohannes Weiner psi_memstall_leave(&pflags); 145882e60d00SJohannes Weiner memstall = 0; 145982e60d00SJohannes Weiner } 146047e4937aSGao Xiang bio = NULL; 146147e4937aSGao Xiang } 146247e4937aSGao Xiang 146382e60d00SJohannes Weiner if (unlikely(PageWorkingset(page)) && !memstall) { 146499486c51SChristoph Hellwig psi_memstall_enter(&pflags); 146582e60d00SJohannes Weiner memstall = 1; 146682e60d00SJohannes Weiner } 146799486c51SChristoph Hellwig 146847e4937aSGao Xiang if (!bio) { 146907888c66SChristoph Hellwig bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS, 147007888c66SChristoph Hellwig REQ_OP_READ, GFP_NOIO); 14710c638f70SGao Xiang bio->bi_end_io = z_erofs_decompressqueue_endio; 1472dfeab2e9SGao Xiang 1473dfeab2e9SGao Xiang last_bdev = mdev.m_bdev; 14741e4a2955SGao Xiang bio->bi_iter.bi_sector = (sector_t)cur << 1475a5c0b780SGao Xiang LOG_SECTORS_PER_BLOCK; 1476a5c0b780SGao Xiang bio->bi_private = bi_private; 14776ea5aad3SGao Xiang if (f->readahead) 14786ea5aad3SGao Xiang bio->bi_opf |= REQ_RAHEAD; 147947e4937aSGao Xiang ++nr_bios; 148047e4937aSGao Xiang } 148147e4937aSGao Xiang 14826c3e485eSGao Xiang if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) 148347e4937aSGao Xiang goto submit_bio_retry; 148447e4937aSGao Xiang 14851e4a2955SGao Xiang last_index = cur; 14861e4a2955SGao Xiang bypass = false; 14871e4a2955SGao Xiang } while (++cur < end); 148847e4937aSGao Xiang 14891e4a2955SGao Xiang if (!bypass) 149047e4937aSGao Xiang qtail[JQ_SUBMIT] = &pcl->next; 149147e4937aSGao Xiang else 149247e4937aSGao Xiang move_to_bypass_jobqueue(pcl, qtail, owned_head); 149347e4937aSGao Xiang } while (owned_head != Z_EROFS_PCLUSTER_TAIL); 149447e4937aSGao Xiang 149599486c51SChristoph Hellwig if (bio) { 149694e4e153SGao Xiang submit_bio(bio); 149782e60d00SJohannes Weiner if (memstall) 149882e60d00SJohannes Weiner psi_memstall_leave(&pflags); 149999486c51SChristoph Hellwig } 150047e4937aSGao Xiang 1501587a67b7SGao Xiang /* 1502587a67b7SGao Xiang * although background is preferred, no one is pending for submission. 1503587a67b7SGao Xiang * don't issue workqueue for decompression but drop it directly instead. 1504587a67b7SGao Xiang */ 1505587a67b7SGao Xiang if (!*force_fg && !nr_bios) { 1506587a67b7SGao Xiang kvfree(q[JQ_SUBMIT]); 15071e4a2955SGao Xiang return; 1508587a67b7SGao Xiang } 1509a4b1fab1SGao Xiang z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios); 151047e4937aSGao Xiang } 151147e4937aSGao Xiang 151283a386c0SGao Xiang static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f, 1513eaa9172aSGao Xiang struct page **pagepool, bool force_fg) 151447e4937aSGao Xiang { 1515a4b1fab1SGao Xiang struct z_erofs_decompressqueue io[NR_JOBQUEUES]; 151647e4937aSGao Xiang 15175c6dcc57SGao Xiang if (f->owned_head == Z_EROFS_PCLUSTER_TAIL) 151847e4937aSGao Xiang return; 151983a386c0SGao Xiang z_erofs_submit_queue(f, pagepool, io, &force_fg); 152047e4937aSGao Xiang 15210c638f70SGao Xiang /* handle bypass queue (no i/o pclusters) immediately */ 15220c638f70SGao Xiang z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool); 152347e4937aSGao Xiang 152447e4937aSGao Xiang if (!force_fg) 152547e4937aSGao Xiang return; 152647e4937aSGao Xiang 152747e4937aSGao Xiang /* wait until all bios are completed */ 152860b30050SHongyu Jin wait_for_completion_io(&io[JQ_SUBMIT].u.done); 152947e4937aSGao Xiang 15300c638f70SGao Xiang /* handle synchronous decompress queue in the caller context */ 15310c638f70SGao Xiang z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool); 153247e4937aSGao Xiang } 153347e4937aSGao Xiang 153438629291SGao Xiang /* 153538629291SGao Xiang * Since partial uptodate is still unimplemented for now, we have to use 153638629291SGao Xiang * approximate readmore strategies as a start. 153738629291SGao Xiang */ 153838629291SGao Xiang static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, 153938629291SGao Xiang struct readahead_control *rac, 154038629291SGao Xiang erofs_off_t end, 1541eaa9172aSGao Xiang struct page **pagepool, 154238629291SGao Xiang bool backmost) 154338629291SGao Xiang { 154438629291SGao Xiang struct inode *inode = f->inode; 154538629291SGao Xiang struct erofs_map_blocks *map = &f->map; 154638629291SGao Xiang erofs_off_t cur; 154738629291SGao Xiang int err; 154838629291SGao Xiang 154938629291SGao Xiang if (backmost) { 155038629291SGao Xiang map->m_la = end; 1551622ceaddSGao Xiang err = z_erofs_map_blocks_iter(inode, map, 1552622ceaddSGao Xiang EROFS_GET_BLOCKS_READMORE); 155338629291SGao Xiang if (err) 155438629291SGao Xiang return; 155538629291SGao Xiang 155638629291SGao Xiang /* expend ra for the trailing edge if readahead */ 155738629291SGao Xiang if (rac) { 155838629291SGao Xiang loff_t newstart = readahead_pos(rac); 155938629291SGao Xiang 156038629291SGao Xiang cur = round_up(map->m_la + map->m_llen, PAGE_SIZE); 156138629291SGao Xiang readahead_expand(rac, newstart, cur - newstart); 156238629291SGao Xiang return; 156338629291SGao Xiang } 156438629291SGao Xiang end = round_up(end, PAGE_SIZE); 156538629291SGao Xiang } else { 156638629291SGao Xiang end = round_up(map->m_la, PAGE_SIZE); 156738629291SGao Xiang 156838629291SGao Xiang if (!map->m_llen) 156938629291SGao Xiang return; 157038629291SGao Xiang } 157138629291SGao Xiang 157238629291SGao Xiang cur = map->m_la + map->m_llen - 1; 157338629291SGao Xiang while (cur >= end) { 157438629291SGao Xiang pgoff_t index = cur >> PAGE_SHIFT; 157538629291SGao Xiang struct page *page; 157638629291SGao Xiang 157738629291SGao Xiang page = erofs_grab_cache_page_nowait(inode->i_mapping, index); 1578aa793b46SGao Xiang if (page) { 157938629291SGao Xiang if (PageUptodate(page)) { 158038629291SGao Xiang unlock_page(page); 1581aa793b46SGao Xiang } else { 158238629291SGao Xiang err = z_erofs_do_read_page(f, page, pagepool); 158338629291SGao Xiang if (err) 158438629291SGao Xiang erofs_err(inode->i_sb, 158538629291SGao Xiang "readmore error at page %lu @ nid %llu", 158638629291SGao Xiang index, EROFS_I(inode)->nid); 1587aa793b46SGao Xiang } 158838629291SGao Xiang put_page(page); 1589aa793b46SGao Xiang } 1590aa793b46SGao Xiang 159138629291SGao Xiang if (cur < PAGE_SIZE) 159238629291SGao Xiang break; 159338629291SGao Xiang cur = (index << PAGE_SHIFT) - 1; 159438629291SGao Xiang } 159538629291SGao Xiang } 159638629291SGao Xiang 1597a2e20a25SMatthew Wilcox (Oracle) static int z_erofs_read_folio(struct file *file, struct folio *folio) 159847e4937aSGao Xiang { 1599a2e20a25SMatthew Wilcox (Oracle) struct page *page = &folio->page; 160047e4937aSGao Xiang struct inode *const inode = page->mapping->host; 160140452ffcSHuang Jianan struct erofs_sb_info *const sbi = EROFS_I_SB(inode); 160247e4937aSGao Xiang struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); 1603eaa9172aSGao Xiang struct page *pagepool = NULL; 160447e4937aSGao Xiang int err; 160547e4937aSGao Xiang 160647e4937aSGao Xiang trace_erofs_readpage(page, false); 160747e4937aSGao Xiang f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT; 160847e4937aSGao Xiang 160938629291SGao Xiang z_erofs_pcluster_readmore(&f, NULL, f.headoffset + PAGE_SIZE - 1, 161038629291SGao Xiang &pagepool, true); 16111825c8d7SGao Xiang err = z_erofs_do_read_page(&f, page, &pagepool); 161238629291SGao Xiang z_erofs_pcluster_readmore(&f, NULL, 0, &pagepool, false); 161338629291SGao Xiang 16145c6dcc57SGao Xiang (void)z_erofs_collector_end(&f); 161547e4937aSGao Xiang 161647e4937aSGao Xiang /* if some compressed cluster ready, need submit them anyway */ 161783a386c0SGao Xiang z_erofs_runqueue(&f, &pagepool, 161840452ffcSHuang Jianan z_erofs_get_sync_decompress_policy(sbi, 0)); 161947e4937aSGao Xiang 162047e4937aSGao Xiang if (err) 16214f761fa2SGao Xiang erofs_err(inode->i_sb, "failed to read, err [%d]", err); 162247e4937aSGao Xiang 162309c54379SGao Xiang erofs_put_metabuf(&f.map.buf); 1624eaa9172aSGao Xiang erofs_release_pages(&pagepool); 162547e4937aSGao Xiang return err; 162647e4937aSGao Xiang } 162747e4937aSGao Xiang 16280615090cSMatthew Wilcox (Oracle) static void z_erofs_readahead(struct readahead_control *rac) 162947e4937aSGao Xiang { 16300615090cSMatthew Wilcox (Oracle) struct inode *const inode = rac->mapping->host; 163147e4937aSGao Xiang struct erofs_sb_info *const sbi = EROFS_I_SB(inode); 163247e4937aSGao Xiang struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); 1633eaa9172aSGao Xiang struct page *pagepool = NULL, *head = NULL, *page; 163438629291SGao Xiang unsigned int nr_pages; 163547e4937aSGao Xiang 16366ea5aad3SGao Xiang f.readahead = true; 16370615090cSMatthew Wilcox (Oracle) f.headoffset = readahead_pos(rac); 163847e4937aSGao Xiang 163938629291SGao Xiang z_erofs_pcluster_readmore(&f, rac, f.headoffset + 164038629291SGao Xiang readahead_length(rac) - 1, &pagepool, true); 164138629291SGao Xiang nr_pages = readahead_count(rac); 164238629291SGao Xiang trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false); 164338629291SGao Xiang 16440615090cSMatthew Wilcox (Oracle) while ((page = readahead_page(rac))) { 164547e4937aSGao Xiang set_page_private(page, (unsigned long)head); 164647e4937aSGao Xiang head = page; 164747e4937aSGao Xiang } 164847e4937aSGao Xiang 164947e4937aSGao Xiang while (head) { 165047e4937aSGao Xiang struct page *page = head; 165147e4937aSGao Xiang int err; 165247e4937aSGao Xiang 165347e4937aSGao Xiang /* traversal in reverse order */ 165447e4937aSGao Xiang head = (void *)page_private(page); 165547e4937aSGao Xiang 16561825c8d7SGao Xiang err = z_erofs_do_read_page(&f, page, &pagepool); 1657a5876e24SGao Xiang if (err) 16584f761fa2SGao Xiang erofs_err(inode->i_sb, 16594f761fa2SGao Xiang "readahead error at page %lu @ nid %llu", 16604f761fa2SGao Xiang page->index, EROFS_I(inode)->nid); 166147e4937aSGao Xiang put_page(page); 166247e4937aSGao Xiang } 166338629291SGao Xiang z_erofs_pcluster_readmore(&f, rac, 0, &pagepool, false); 16645c6dcc57SGao Xiang (void)z_erofs_collector_end(&f); 166547e4937aSGao Xiang 166683a386c0SGao Xiang z_erofs_runqueue(&f, &pagepool, 166740452ffcSHuang Jianan z_erofs_get_sync_decompress_policy(sbi, nr_pages)); 166809c54379SGao Xiang erofs_put_metabuf(&f.map.buf); 1669eaa9172aSGao Xiang erofs_release_pages(&pagepool); 167047e4937aSGao Xiang } 167147e4937aSGao Xiang 16720c638f70SGao Xiang const struct address_space_operations z_erofs_aops = { 1673a2e20a25SMatthew Wilcox (Oracle) .read_folio = z_erofs_read_folio, 16740615090cSMatthew Wilcox (Oracle) .readahead = z_erofs_readahead, 167547e4937aSGao Xiang }; 1676