147e4937aSGao Xiang // SPDX-License-Identifier: GPL-2.0-only 247e4937aSGao Xiang /* 347e4937aSGao Xiang * Copyright (C) 2018 HUAWEI, Inc. 4592e7cd0SAlexander A. Klimov * https://www.huawei.com/ 506a304cdSGao Xiang * Copyright (C) 2022 Alibaba Cloud 647e4937aSGao Xiang */ 747e4937aSGao Xiang #include "zdata.h" 847e4937aSGao Xiang #include "compress.h" 947e4937aSGao Xiang #include <linux/prefetch.h> 1099486c51SChristoph Hellwig #include <linux/psi.h> 1147e4937aSGao Xiang 1247e4937aSGao Xiang #include <trace/events/erofs.h> 1347e4937aSGao Xiang 1447e4937aSGao Xiang /* 159f6cc76eSGao Xiang * since pclustersize is variable for big pcluster feature, introduce slab 169f6cc76eSGao Xiang * pools implementation for different pcluster sizes. 179f6cc76eSGao Xiang */ 189f6cc76eSGao Xiang struct z_erofs_pcluster_slab { 199f6cc76eSGao Xiang struct kmem_cache *slab; 209f6cc76eSGao Xiang unsigned int maxpages; 219f6cc76eSGao Xiang char name[48]; 229f6cc76eSGao Xiang }; 239f6cc76eSGao Xiang 249f6cc76eSGao Xiang #define _PCLP(n) { .maxpages = n } 259f6cc76eSGao Xiang 269f6cc76eSGao Xiang static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = { 279f6cc76eSGao Xiang _PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128), 289f6cc76eSGao Xiang _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES) 299f6cc76eSGao Xiang }; 309f6cc76eSGao Xiang 3106a304cdSGao Xiang struct z_erofs_bvec_iter { 3206a304cdSGao Xiang struct page *bvpage; 3306a304cdSGao Xiang struct z_erofs_bvset *bvset; 3406a304cdSGao Xiang unsigned int nr, cur; 3506a304cdSGao Xiang }; 3606a304cdSGao Xiang 3706a304cdSGao Xiang static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter) 3806a304cdSGao Xiang { 3906a304cdSGao Xiang if (iter->bvpage) 4006a304cdSGao Xiang kunmap_local(iter->bvset); 4106a304cdSGao Xiang return iter->bvpage; 4206a304cdSGao Xiang } 4306a304cdSGao Xiang 4406a304cdSGao Xiang static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter) 4506a304cdSGao Xiang { 4606a304cdSGao Xiang unsigned long base = (unsigned long)((struct z_erofs_bvset *)0)->bvec; 4706a304cdSGao Xiang /* have to access nextpage in advance, otherwise it will be unmapped */ 4806a304cdSGao Xiang struct page *nextpage = iter->bvset->nextpage; 4906a304cdSGao Xiang struct page *oldpage; 5006a304cdSGao Xiang 5106a304cdSGao Xiang DBG_BUGON(!nextpage); 5206a304cdSGao Xiang oldpage = z_erofs_bvec_iter_end(iter); 5306a304cdSGao Xiang iter->bvpage = nextpage; 5406a304cdSGao Xiang iter->bvset = kmap_local_page(nextpage); 5506a304cdSGao Xiang iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec); 5606a304cdSGao Xiang iter->cur = 0; 5706a304cdSGao Xiang return oldpage; 5806a304cdSGao Xiang } 5906a304cdSGao Xiang 6006a304cdSGao Xiang static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter, 6106a304cdSGao Xiang struct z_erofs_bvset_inline *bvset, 6206a304cdSGao Xiang unsigned int bootstrap_nr, 6306a304cdSGao Xiang unsigned int cur) 6406a304cdSGao Xiang { 6506a304cdSGao Xiang *iter = (struct z_erofs_bvec_iter) { 6606a304cdSGao Xiang .nr = bootstrap_nr, 6706a304cdSGao Xiang .bvset = (struct z_erofs_bvset *)bvset, 6806a304cdSGao Xiang }; 6906a304cdSGao Xiang 7006a304cdSGao Xiang while (cur > iter->nr) { 7106a304cdSGao Xiang cur -= iter->nr; 7206a304cdSGao Xiang z_erofs_bvset_flip(iter); 7306a304cdSGao Xiang } 7406a304cdSGao Xiang iter->cur = cur; 7506a304cdSGao Xiang } 7606a304cdSGao Xiang 7706a304cdSGao Xiang static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter, 7806a304cdSGao Xiang struct z_erofs_bvec *bvec, 7906a304cdSGao Xiang struct page **candidate_bvpage) 8006a304cdSGao Xiang { 8106a304cdSGao Xiang if (iter->cur == iter->nr) { 8206a304cdSGao Xiang if (!*candidate_bvpage) 8306a304cdSGao Xiang return -EAGAIN; 8406a304cdSGao Xiang 8506a304cdSGao Xiang DBG_BUGON(iter->bvset->nextpage); 8606a304cdSGao Xiang iter->bvset->nextpage = *candidate_bvpage; 8706a304cdSGao Xiang z_erofs_bvset_flip(iter); 8806a304cdSGao Xiang 8906a304cdSGao Xiang iter->bvset->nextpage = NULL; 9006a304cdSGao Xiang *candidate_bvpage = NULL; 9106a304cdSGao Xiang } 9206a304cdSGao Xiang iter->bvset->bvec[iter->cur++] = *bvec; 9306a304cdSGao Xiang return 0; 9406a304cdSGao Xiang } 9506a304cdSGao Xiang 9606a304cdSGao Xiang static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter, 9706a304cdSGao Xiang struct z_erofs_bvec *bvec, 9806a304cdSGao Xiang struct page **old_bvpage) 9906a304cdSGao Xiang { 10006a304cdSGao Xiang if (iter->cur == iter->nr) 10106a304cdSGao Xiang *old_bvpage = z_erofs_bvset_flip(iter); 10206a304cdSGao Xiang else 10306a304cdSGao Xiang *old_bvpage = NULL; 10406a304cdSGao Xiang *bvec = iter->bvset->bvec[iter->cur++]; 10506a304cdSGao Xiang } 10606a304cdSGao Xiang 1079f6cc76eSGao Xiang static void z_erofs_destroy_pcluster_pool(void) 1089f6cc76eSGao Xiang { 1099f6cc76eSGao Xiang int i; 1109f6cc76eSGao Xiang 1119f6cc76eSGao Xiang for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { 1129f6cc76eSGao Xiang if (!pcluster_pool[i].slab) 1139f6cc76eSGao Xiang continue; 1149f6cc76eSGao Xiang kmem_cache_destroy(pcluster_pool[i].slab); 1159f6cc76eSGao Xiang pcluster_pool[i].slab = NULL; 1169f6cc76eSGao Xiang } 1179f6cc76eSGao Xiang } 1189f6cc76eSGao Xiang 1199f6cc76eSGao Xiang static int z_erofs_create_pcluster_pool(void) 1209f6cc76eSGao Xiang { 1219f6cc76eSGao Xiang struct z_erofs_pcluster_slab *pcs; 1229f6cc76eSGao Xiang struct z_erofs_pcluster *a; 1239f6cc76eSGao Xiang unsigned int size; 1249f6cc76eSGao Xiang 1259f6cc76eSGao Xiang for (pcs = pcluster_pool; 1269f6cc76eSGao Xiang pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) { 127ed722fbcSGao Xiang size = struct_size(a, compressed_bvecs, pcs->maxpages); 1289f6cc76eSGao Xiang 1299f6cc76eSGao Xiang sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages); 1309f6cc76eSGao Xiang pcs->slab = kmem_cache_create(pcs->name, size, 0, 1319f6cc76eSGao Xiang SLAB_RECLAIM_ACCOUNT, NULL); 1329f6cc76eSGao Xiang if (pcs->slab) 1339f6cc76eSGao Xiang continue; 1349f6cc76eSGao Xiang 1359f6cc76eSGao Xiang z_erofs_destroy_pcluster_pool(); 1369f6cc76eSGao Xiang return -ENOMEM; 1379f6cc76eSGao Xiang } 1389f6cc76eSGao Xiang return 0; 1399f6cc76eSGao Xiang } 1409f6cc76eSGao Xiang 1419f6cc76eSGao Xiang static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages) 1429f6cc76eSGao Xiang { 1439f6cc76eSGao Xiang int i; 1449f6cc76eSGao Xiang 1459f6cc76eSGao Xiang for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { 1469f6cc76eSGao Xiang struct z_erofs_pcluster_slab *pcs = pcluster_pool + i; 1479f6cc76eSGao Xiang struct z_erofs_pcluster *pcl; 1489f6cc76eSGao Xiang 1499f6cc76eSGao Xiang if (nrpages > pcs->maxpages) 1509f6cc76eSGao Xiang continue; 1519f6cc76eSGao Xiang 1529f6cc76eSGao Xiang pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS); 1539f6cc76eSGao Xiang if (!pcl) 1549f6cc76eSGao Xiang return ERR_PTR(-ENOMEM); 1559f6cc76eSGao Xiang pcl->pclusterpages = nrpages; 1569f6cc76eSGao Xiang return pcl; 1579f6cc76eSGao Xiang } 1589f6cc76eSGao Xiang return ERR_PTR(-EINVAL); 1599f6cc76eSGao Xiang } 1609f6cc76eSGao Xiang 1619f6cc76eSGao Xiang static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl) 1629f6cc76eSGao Xiang { 163cecf864dSYue Hu unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 1649f6cc76eSGao Xiang int i; 1659f6cc76eSGao Xiang 1669f6cc76eSGao Xiang for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { 1679f6cc76eSGao Xiang struct z_erofs_pcluster_slab *pcs = pcluster_pool + i; 1689f6cc76eSGao Xiang 169cecf864dSYue Hu if (pclusterpages > pcs->maxpages) 1709f6cc76eSGao Xiang continue; 1719f6cc76eSGao Xiang 1729f6cc76eSGao Xiang kmem_cache_free(pcs->slab, pcl); 1739f6cc76eSGao Xiang return; 1749f6cc76eSGao Xiang } 1759f6cc76eSGao Xiang DBG_BUGON(1); 1769f6cc76eSGao Xiang } 1779f6cc76eSGao Xiang 17847e4937aSGao Xiang /* how to allocate cached pages for a pcluster */ 17947e4937aSGao Xiang enum z_erofs_cache_alloctype { 18047e4937aSGao Xiang DONTALLOC, /* don't allocate any cached pages */ 1811825c8d7SGao Xiang /* 1821825c8d7SGao Xiang * try to use cached I/O if page allocation succeeds or fallback 1831825c8d7SGao Xiang * to in-place I/O instead to avoid any direct reclaim. 1841825c8d7SGao Xiang */ 1851825c8d7SGao Xiang TRYALLOC, 18647e4937aSGao Xiang }; 18747e4937aSGao Xiang 18847e4937aSGao Xiang /* 18947e4937aSGao Xiang * tagged pointer with 1-bit tag for all compressed pages 19047e4937aSGao Xiang * tag 0 - the page is just found with an extra page reference 19147e4937aSGao Xiang */ 19247e4937aSGao Xiang typedef tagptr1_t compressed_page_t; 19347e4937aSGao Xiang 19447e4937aSGao Xiang #define tag_compressed_page_justfound(page) \ 19547e4937aSGao Xiang tagptr_fold(compressed_page_t, page, 1) 19647e4937aSGao Xiang 19747e4937aSGao Xiang static struct workqueue_struct *z_erofs_workqueue __read_mostly; 19847e4937aSGao Xiang 19947e4937aSGao Xiang void z_erofs_exit_zip_subsystem(void) 20047e4937aSGao Xiang { 20147e4937aSGao Xiang destroy_workqueue(z_erofs_workqueue); 2029f6cc76eSGao Xiang z_erofs_destroy_pcluster_pool(); 20347e4937aSGao Xiang } 20447e4937aSGao Xiang 20599634bf3SGao Xiang static inline int z_erofs_init_workqueue(void) 20647e4937aSGao Xiang { 20747e4937aSGao Xiang const unsigned int onlinecpus = num_possible_cpus(); 20847e4937aSGao Xiang 20947e4937aSGao Xiang /* 21047e4937aSGao Xiang * no need to spawn too many threads, limiting threads could minimum 21147e4937aSGao Xiang * scheduling overhead, perhaps per-CPU threads should be better? 21247e4937aSGao Xiang */ 2130e62ea33SGao Xiang z_erofs_workqueue = alloc_workqueue("erofs_unzipd", 2140e62ea33SGao Xiang WQ_UNBOUND | WQ_HIGHPRI, 21547e4937aSGao Xiang onlinecpus + onlinecpus / 4); 21647e4937aSGao Xiang return z_erofs_workqueue ? 0 : -ENOMEM; 21747e4937aSGao Xiang } 21847e4937aSGao Xiang 21947e4937aSGao Xiang int __init z_erofs_init_zip_subsystem(void) 22047e4937aSGao Xiang { 2219f6cc76eSGao Xiang int err = z_erofs_create_pcluster_pool(); 22247e4937aSGao Xiang 2239f6cc76eSGao Xiang if (err) 2249f6cc76eSGao Xiang return err; 2259f6cc76eSGao Xiang err = z_erofs_init_workqueue(); 2269f6cc76eSGao Xiang if (err) 2279f6cc76eSGao Xiang z_erofs_destroy_pcluster_pool(); 2289f6cc76eSGao Xiang return err; 22947e4937aSGao Xiang } 23047e4937aSGao Xiang 231db166fc2SGao Xiang enum z_erofs_pclustermode { 232db166fc2SGao Xiang Z_EROFS_PCLUSTER_INFLIGHT, 23347e4937aSGao Xiang /* 234db166fc2SGao Xiang * The current pclusters was the tail of an exist chain, in addition 235db166fc2SGao Xiang * that the previous processed chained pclusters are all decided to 23647e4937aSGao Xiang * be hooked up to it. 237db166fc2SGao Xiang * A new chain will be created for the remaining pclusters which are 238db166fc2SGao Xiang * not processed yet, so different from Z_EROFS_PCLUSTER_FOLLOWED, 239db166fc2SGao Xiang * the next pcluster cannot reuse the whole page safely for inplace I/O 240db166fc2SGao Xiang * in the following scenario: 24147e4937aSGao Xiang * ________________________________________________________________ 24247e4937aSGao Xiang * | tail (partial) page | head (partial) page | 243db166fc2SGao Xiang * | (belongs to the next pcl) | (belongs to the current pcl) | 244db166fc2SGao Xiang * |_______PCLUSTER_FOLLOWED______|________PCLUSTER_HOOKED__________| 24547e4937aSGao Xiang */ 246db166fc2SGao Xiang Z_EROFS_PCLUSTER_HOOKED, 2470b964600SGao Xiang /* 248db166fc2SGao Xiang * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it 2490b964600SGao Xiang * could be dispatched into bypass queue later due to uptodated managed 2500b964600SGao Xiang * pages. All related online pages cannot be reused for inplace I/O (or 251387bab87SGao Xiang * bvpage) since it can be directly decoded without I/O submission. 2520b964600SGao Xiang */ 253db166fc2SGao Xiang Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE, 25447e4937aSGao Xiang /* 25547e4937aSGao Xiang * The current collection has been linked with the owned chain, and 25647e4937aSGao Xiang * could also be linked with the remaining collections, which means 25747e4937aSGao Xiang * if the processing page is the tail page of the collection, thus 25847e4937aSGao Xiang * the current collection can safely use the whole page (since 25947e4937aSGao Xiang * the previous collection is under control) for in-place I/O, as 26047e4937aSGao Xiang * illustrated below: 26147e4937aSGao Xiang * ________________________________________________________________ 26247e4937aSGao Xiang * | tail (partial) page | head (partial) page | 26347e4937aSGao Xiang * | (of the current cl) | (of the previous collection) | 264db166fc2SGao Xiang * | PCLUSTER_FOLLOWED or | | 265db166fc2SGao Xiang * |_____PCLUSTER_HOOKED__|___________PCLUSTER_FOLLOWED____________| 26647e4937aSGao Xiang * 26747e4937aSGao Xiang * [ (*) the above page can be used as inplace I/O. ] 26847e4937aSGao Xiang */ 269db166fc2SGao Xiang Z_EROFS_PCLUSTER_FOLLOWED, 27047e4937aSGao Xiang }; 27147e4937aSGao Xiang 2725c6dcc57SGao Xiang struct z_erofs_decompress_frontend { 2735c6dcc57SGao Xiang struct inode *const inode; 2745c6dcc57SGao Xiang struct erofs_map_blocks map; 27506a304cdSGao Xiang struct z_erofs_bvec_iter biter; 27647e4937aSGao Xiang 27706a304cdSGao Xiang struct page *candidate_bvpage; 27847e4937aSGao Xiang struct z_erofs_pcluster *pcl, *tailpcl; 27947e4937aSGao Xiang z_erofs_next_pcluster_t owned_head; 280db166fc2SGao Xiang enum z_erofs_pclustermode mode; 28147e4937aSGao Xiang 2826ea5aad3SGao Xiang bool readahead; 28347e4937aSGao Xiang /* used for applying cache strategy on the fly */ 28447e4937aSGao Xiang bool backmost; 28547e4937aSGao Xiang erofs_off_t headoffset; 286ed722fbcSGao Xiang 287ed722fbcSGao Xiang /* a pointer used to pick up inplace I/O pages */ 288ed722fbcSGao Xiang unsigned int icur; 28947e4937aSGao Xiang }; 29047e4937aSGao Xiang 29147e4937aSGao Xiang #define DECOMPRESS_FRONTEND_INIT(__i) { \ 2925c6dcc57SGao Xiang .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \ 293db166fc2SGao Xiang .mode = Z_EROFS_PCLUSTER_FOLLOWED, .backmost = true } 29447e4937aSGao Xiang 2956f39d1e1SGao Xiang static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe, 2961825c8d7SGao Xiang enum z_erofs_cache_alloctype type, 297eaa9172aSGao Xiang struct page **pagepool) 29847e4937aSGao Xiang { 2996f39d1e1SGao Xiang struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode)); 3005c6dcc57SGao Xiang struct z_erofs_pcluster *pcl = fe->pcl; 30147e4937aSGao Xiang bool standalone = true; 3026f39d1e1SGao Xiang /* 3036f39d1e1SGao Xiang * optimistic allocation without direct reclaim since inplace I/O 3046f39d1e1SGao Xiang * can be used if low memory otherwise. 3056f39d1e1SGao Xiang */ 3061825c8d7SGao Xiang gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) | 3071825c8d7SGao Xiang __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; 308ed722fbcSGao Xiang unsigned int i; 30947e4937aSGao Xiang 310db166fc2SGao Xiang if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED) 31147e4937aSGao Xiang return; 31247e4937aSGao Xiang 313ed722fbcSGao Xiang for (i = 0; i < pcl->pclusterpages; ++i) { 31447e4937aSGao Xiang struct page *page; 31547e4937aSGao Xiang compressed_page_t t; 3161825c8d7SGao Xiang struct page *newpage = NULL; 31747e4937aSGao Xiang 31847e4937aSGao Xiang /* the compressed page was loaded before */ 319ed722fbcSGao Xiang if (READ_ONCE(pcl->compressed_bvecs[i].page)) 32047e4937aSGao Xiang continue; 32147e4937aSGao Xiang 322ed722fbcSGao Xiang page = find_get_page(mc, pcl->obj.index + i); 32347e4937aSGao Xiang 32447e4937aSGao Xiang if (page) { 32547e4937aSGao Xiang t = tag_compressed_page_justfound(page); 3260b964600SGao Xiang } else { 3270b964600SGao Xiang /* I/O is needed, no possible to decompress directly */ 3280b964600SGao Xiang standalone = false; 3290b964600SGao Xiang switch (type) { 3300b964600SGao Xiang case TRYALLOC: 3311825c8d7SGao Xiang newpage = erofs_allocpage(pagepool, gfp); 3321825c8d7SGao Xiang if (!newpage) 33347e4937aSGao Xiang continue; 3340b964600SGao Xiang set_page_private(newpage, 3350b964600SGao Xiang Z_EROFS_PREALLOCATED_PAGE); 3360b964600SGao Xiang t = tag_compressed_page_justfound(newpage); 3370b964600SGao Xiang break; 3380b964600SGao Xiang default: /* DONTALLOC */ 3390b964600SGao Xiang continue; 3400b964600SGao Xiang } 34147e4937aSGao Xiang } 34247e4937aSGao Xiang 343ed722fbcSGao Xiang if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL, 344ed722fbcSGao Xiang tagptr_cast_ptr(t))) 34547e4937aSGao Xiang continue; 34647e4937aSGao Xiang 347eaa9172aSGao Xiang if (page) 34847e4937aSGao Xiang put_page(page); 349eaa9172aSGao Xiang else if (newpage) 350eaa9172aSGao Xiang erofs_pagepool_add(pagepool, newpage); 35147e4937aSGao Xiang } 35247e4937aSGao Xiang 3530b964600SGao Xiang /* 3540b964600SGao Xiang * don't do inplace I/O if all compressed pages are available in 3550b964600SGao Xiang * managed cache since it can be moved to the bypass queue instead. 3560b964600SGao Xiang */ 3570b964600SGao Xiang if (standalone) 358db166fc2SGao Xiang fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; 35947e4937aSGao Xiang } 36047e4937aSGao Xiang 36147e4937aSGao Xiang /* called by erofs_shrinker to get rid of all compressed_pages */ 36247e4937aSGao Xiang int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, 36347e4937aSGao Xiang struct erofs_workgroup *grp) 36447e4937aSGao Xiang { 36547e4937aSGao Xiang struct z_erofs_pcluster *const pcl = 36647e4937aSGao Xiang container_of(grp, struct z_erofs_pcluster, obj); 36747e4937aSGao Xiang int i; 36847e4937aSGao Xiang 369cecf864dSYue Hu DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); 37047e4937aSGao Xiang /* 37147e4937aSGao Xiang * refcount of workgroup is now freezed as 1, 37247e4937aSGao Xiang * therefore no need to worry about available decompression users. 37347e4937aSGao Xiang */ 3749f6cc76eSGao Xiang for (i = 0; i < pcl->pclusterpages; ++i) { 375ed722fbcSGao Xiang struct page *page = pcl->compressed_bvecs[i].page; 37647e4937aSGao Xiang 37747e4937aSGao Xiang if (!page) 37847e4937aSGao Xiang continue; 37947e4937aSGao Xiang 38047e4937aSGao Xiang /* block other users from reclaiming or migrating the page */ 38147e4937aSGao Xiang if (!trylock_page(page)) 38247e4937aSGao Xiang return -EBUSY; 38347e4937aSGao Xiang 384f4d4e5fcSYue Hu if (!erofs_page_is_managed(sbi, page)) 38547e4937aSGao Xiang continue; 38647e4937aSGao Xiang 38747e4937aSGao Xiang /* barrier is implied in the following 'unlock_page' */ 388ed722fbcSGao Xiang WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); 3896aaa7b06SGao Xiang detach_page_private(page); 39047e4937aSGao Xiang unlock_page(page); 39147e4937aSGao Xiang } 39247e4937aSGao Xiang return 0; 39347e4937aSGao Xiang } 39447e4937aSGao Xiang 395d252ff3dSYue Hu int erofs_try_to_free_cached_page(struct page *page) 39647e4937aSGao Xiang { 39747e4937aSGao Xiang struct z_erofs_pcluster *const pcl = (void *)page_private(page); 398ed722fbcSGao Xiang int ret, i; 39947e4937aSGao Xiang 400ed722fbcSGao Xiang if (!erofs_workgroup_try_to_freeze(&pcl->obj, 1)) 401ed722fbcSGao Xiang return 0; 40247e4937aSGao Xiang 403ed722fbcSGao Xiang ret = 0; 404cecf864dSYue Hu DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); 4059f6cc76eSGao Xiang for (i = 0; i < pcl->pclusterpages; ++i) { 406ed722fbcSGao Xiang if (pcl->compressed_bvecs[i].page == page) { 407ed722fbcSGao Xiang WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); 40847e4937aSGao Xiang ret = 1; 40947e4937aSGao Xiang break; 41047e4937aSGao Xiang } 41147e4937aSGao Xiang } 41247e4937aSGao Xiang erofs_workgroup_unfreeze(&pcl->obj, 1); 4136aaa7b06SGao Xiang if (ret) 4146aaa7b06SGao Xiang detach_page_private(page); 41547e4937aSGao Xiang return ret; 41647e4937aSGao Xiang } 41747e4937aSGao Xiang 4185c6dcc57SGao Xiang static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe, 419ed722fbcSGao Xiang struct z_erofs_bvec *bvec) 42047e4937aSGao Xiang { 4215c6dcc57SGao Xiang struct z_erofs_pcluster *const pcl = fe->pcl; 42247e4937aSGao Xiang 423ed722fbcSGao Xiang while (fe->icur > 0) { 424ed722fbcSGao Xiang if (!cmpxchg(&pcl->compressed_bvecs[--fe->icur].page, 425ed722fbcSGao Xiang NULL, bvec->page)) { 426ed722fbcSGao Xiang pcl->compressed_bvecs[fe->icur] = *bvec; 42747e4937aSGao Xiang return true; 428ed722fbcSGao Xiang } 429ed722fbcSGao Xiang } 43047e4937aSGao Xiang return false; 43147e4937aSGao Xiang } 43247e4937aSGao Xiang 43387ca34a7SGao Xiang /* callers must be with pcluster lock held */ 4345c6dcc57SGao Xiang static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe, 4355b220b20SGao Xiang struct z_erofs_bvec *bvec, bool exclusive) 43647e4937aSGao Xiang { 43747e4937aSGao Xiang int ret; 43847e4937aSGao Xiang 439db166fc2SGao Xiang if (exclusive) { 44006a304cdSGao Xiang /* give priority for inplaceio to use file pages first */ 441ed722fbcSGao Xiang if (z_erofs_try_inplace_io(fe, bvec)) 44247e4937aSGao Xiang return 0; 44306a304cdSGao Xiang /* otherwise, check if it can be used as a bvpage */ 444db166fc2SGao Xiang if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED && 44506a304cdSGao Xiang !fe->candidate_bvpage) 44606a304cdSGao Xiang fe->candidate_bvpage = bvec->page; 44706a304cdSGao Xiang } 44806a304cdSGao Xiang ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage); 44906a304cdSGao Xiang fe->pcl->vcnt += (ret >= 0); 45006a304cdSGao Xiang return ret; 45147e4937aSGao Xiang } 45247e4937aSGao Xiang 4535c6dcc57SGao Xiang static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f) 45447e4937aSGao Xiang { 4555c6dcc57SGao Xiang struct z_erofs_pcluster *pcl = f->pcl; 4565c6dcc57SGao Xiang z_erofs_next_pcluster_t *owned_head = &f->owned_head; 45747e4937aSGao Xiang 458473e15b0SGao Xiang /* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */ 459473e15b0SGao Xiang if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL, 460473e15b0SGao Xiang *owned_head) == Z_EROFS_PCLUSTER_NIL) { 46147e4937aSGao Xiang *owned_head = &pcl->next; 462473e15b0SGao Xiang /* so we can attach this pcluster to our submission chain. */ 463db166fc2SGao Xiang f->mode = Z_EROFS_PCLUSTER_FOLLOWED; 464473e15b0SGao Xiang return; 465473e15b0SGao Xiang } 466473e15b0SGao Xiang 46747e4937aSGao Xiang /* 468473e15b0SGao Xiang * type 2, link to the end of an existing open chain, be careful 469473e15b0SGao Xiang * that its submission is controlled by the original attached chain. 47047e4937aSGao Xiang */ 471267f2492SGao Xiang if (*owned_head != &pcl->next && pcl != f->tailpcl && 472267f2492SGao Xiang cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, 473473e15b0SGao Xiang *owned_head) == Z_EROFS_PCLUSTER_TAIL) { 47447e4937aSGao Xiang *owned_head = Z_EROFS_PCLUSTER_TAIL; 475db166fc2SGao Xiang f->mode = Z_EROFS_PCLUSTER_HOOKED; 4765c6dcc57SGao Xiang f->tailpcl = NULL; 477473e15b0SGao Xiang return; 47847e4937aSGao Xiang } 479473e15b0SGao Xiang /* type 3, it belongs to a chain, but it isn't the end of the chain */ 480db166fc2SGao Xiang f->mode = Z_EROFS_PCLUSTER_INFLIGHT; 48147e4937aSGao Xiang } 48247e4937aSGao Xiang 48383a386c0SGao Xiang static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) 48447e4937aSGao Xiang { 48583a386c0SGao Xiang struct erofs_map_blocks *map = &fe->map; 486cecf864dSYue Hu bool ztailpacking = map->m_flags & EROFS_MAP_META; 48747e4937aSGao Xiang struct z_erofs_pcluster *pcl; 48864094a04SGao Xiang struct erofs_workgroup *grp; 48947e4937aSGao Xiang int err; 49047e4937aSGao Xiang 4918f899262SGao Xiang if (!(map->m_flags & EROFS_MAP_ENCODED)) { 4928f899262SGao Xiang DBG_BUGON(1); 4938f899262SGao Xiang return -EFSCORRUPTED; 4948f899262SGao Xiang } 4958f899262SGao Xiang 4969f6cc76eSGao Xiang /* no available pcluster, let's allocate one */ 497cecf864dSYue Hu pcl = z_erofs_alloc_pcluster(ztailpacking ? 1 : 498cecf864dSYue Hu map->m_plen >> PAGE_SHIFT); 4999f6cc76eSGao Xiang if (IS_ERR(pcl)) 5009f6cc76eSGao Xiang return PTR_ERR(pcl); 50147e4937aSGao Xiang 50264094a04SGao Xiang atomic_set(&pcl->obj.refcount, 1); 5038f899262SGao Xiang pcl->algorithmformat = map->m_algorithmformat; 5042bfab9c0SGao Xiang pcl->length = 0; 5052bfab9c0SGao Xiang pcl->partial = true; 50647e4937aSGao Xiang 50747e4937aSGao Xiang /* new pclusters should be claimed as type 1, primary and followed */ 5085c6dcc57SGao Xiang pcl->next = fe->owned_head; 50987ca34a7SGao Xiang pcl->pageofs_out = map->m_la & ~PAGE_MASK; 510db166fc2SGao Xiang fe->mode = Z_EROFS_PCLUSTER_FOLLOWED; 51147e4937aSGao Xiang 51247e4937aSGao Xiang /* 51347e4937aSGao Xiang * lock all primary followed works before visible to others 51447e4937aSGao Xiang * and mutex_trylock *never* fails for a new pcluster. 51547e4937aSGao Xiang */ 51687ca34a7SGao Xiang mutex_init(&pcl->lock); 51787ca34a7SGao Xiang DBG_BUGON(!mutex_trylock(&pcl->lock)); 51847e4937aSGao Xiang 519cecf864dSYue Hu if (ztailpacking) { 520cecf864dSYue Hu pcl->obj.index = 0; /* which indicates ztailpacking */ 521cecf864dSYue Hu pcl->pageofs_in = erofs_blkoff(map->m_pa); 522cecf864dSYue Hu pcl->tailpacking_size = map->m_plen; 523cecf864dSYue Hu } else { 524cecf864dSYue Hu pcl->obj.index = map->m_pa >> PAGE_SHIFT; 525cecf864dSYue Hu 52683a386c0SGao Xiang grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj); 52764094a04SGao Xiang if (IS_ERR(grp)) { 52864094a04SGao Xiang err = PTR_ERR(grp); 52964094a04SGao Xiang goto err_out; 53064094a04SGao Xiang } 53164094a04SGao Xiang 53264094a04SGao Xiang if (grp != &pcl->obj) { 5335c6dcc57SGao Xiang fe->pcl = container_of(grp, 534cecf864dSYue Hu struct z_erofs_pcluster, obj); 53564094a04SGao Xiang err = -EEXIST; 53664094a04SGao Xiang goto err_out; 53747e4937aSGao Xiang } 538cecf864dSYue Hu } 53947e4937aSGao Xiang /* used to check tail merging loop due to corrupted images */ 5405c6dcc57SGao Xiang if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL) 5415c6dcc57SGao Xiang fe->tailpcl = pcl; 5425c6dcc57SGao Xiang fe->owned_head = &pcl->next; 5435c6dcc57SGao Xiang fe->pcl = pcl; 5449e579fc1SGao Xiang return 0; 54564094a04SGao Xiang 54664094a04SGao Xiang err_out: 54787ca34a7SGao Xiang mutex_unlock(&pcl->lock); 5489f6cc76eSGao Xiang z_erofs_free_pcluster(pcl); 54964094a04SGao Xiang return err; 55047e4937aSGao Xiang } 55147e4937aSGao Xiang 55283a386c0SGao Xiang static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe) 55347e4937aSGao Xiang { 55483a386c0SGao Xiang struct erofs_map_blocks *map = &fe->map; 5550d823b42SGao Xiang struct erofs_workgroup *grp = NULL; 5569e579fc1SGao Xiang int ret; 55747e4937aSGao Xiang 55887ca34a7SGao Xiang DBG_BUGON(fe->pcl); 55947e4937aSGao Xiang 56087ca34a7SGao Xiang /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */ 5615c6dcc57SGao Xiang DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL); 5625c6dcc57SGao Xiang DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); 56347e4937aSGao Xiang 5640d823b42SGao Xiang if (!(map->m_flags & EROFS_MAP_META)) { 5650d823b42SGao Xiang grp = erofs_find_workgroup(fe->inode->i_sb, 5660d823b42SGao Xiang map->m_pa >> PAGE_SHIFT); 5670d823b42SGao Xiang } else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) { 56847e4937aSGao Xiang DBG_BUGON(1); 569cecf864dSYue Hu return -EFSCORRUPTED; 570cecf864dSYue Hu } 57147e4937aSGao Xiang 57264094a04SGao Xiang if (grp) { 5735c6dcc57SGao Xiang fe->pcl = container_of(grp, struct z_erofs_pcluster, obj); 5740d823b42SGao Xiang ret = -EEXIST; 57564094a04SGao Xiang } else { 57683a386c0SGao Xiang ret = z_erofs_register_pcluster(fe); 57764094a04SGao Xiang } 57847e4937aSGao Xiang 5790d823b42SGao Xiang if (ret == -EEXIST) { 580267f2492SGao Xiang mutex_lock(&fe->pcl->lock); 581267f2492SGao Xiang /* used to check tail merging loop due to corrupted images */ 582267f2492SGao Xiang if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL) 583267f2492SGao Xiang fe->tailpcl = fe->pcl; 584267f2492SGao Xiang 585267f2492SGao Xiang z_erofs_try_to_claim_pcluster(fe); 5860d823b42SGao Xiang } else if (ret) { 5870d823b42SGao Xiang return ret; 5880d823b42SGao Xiang } 58906a304cdSGao Xiang z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset, 590387bab87SGao Xiang Z_EROFS_INLINE_BVECS, fe->pcl->vcnt); 59181382f5fSGao Xiang /* since file-backed online pages are traversed in reverse order */ 592ed722fbcSGao Xiang fe->icur = z_erofs_pclusterpages(fe->pcl); 59347e4937aSGao Xiang return 0; 59447e4937aSGao Xiang } 59547e4937aSGao Xiang 59647e4937aSGao Xiang /* 59747e4937aSGao Xiang * keep in mind that no referenced pclusters will be freed 59847e4937aSGao Xiang * only after a RCU grace period. 59947e4937aSGao Xiang */ 60047e4937aSGao Xiang static void z_erofs_rcu_callback(struct rcu_head *head) 60147e4937aSGao Xiang { 60287ca34a7SGao Xiang z_erofs_free_pcluster(container_of(head, 60387ca34a7SGao Xiang struct z_erofs_pcluster, rcu)); 60447e4937aSGao Xiang } 60547e4937aSGao Xiang 60647e4937aSGao Xiang void erofs_workgroup_free_rcu(struct erofs_workgroup *grp) 60747e4937aSGao Xiang { 60847e4937aSGao Xiang struct z_erofs_pcluster *const pcl = 60947e4937aSGao Xiang container_of(grp, struct z_erofs_pcluster, obj); 61047e4937aSGao Xiang 61187ca34a7SGao Xiang call_rcu(&pcl->rcu, z_erofs_rcu_callback); 61247e4937aSGao Xiang } 61347e4937aSGao Xiang 6145c6dcc57SGao Xiang static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe) 61547e4937aSGao Xiang { 61687ca34a7SGao Xiang struct z_erofs_pcluster *pcl = fe->pcl; 61747e4937aSGao Xiang 61887ca34a7SGao Xiang if (!pcl) 61947e4937aSGao Xiang return false; 62047e4937aSGao Xiang 62106a304cdSGao Xiang z_erofs_bvec_iter_end(&fe->biter); 62287ca34a7SGao Xiang mutex_unlock(&pcl->lock); 62347e4937aSGao Xiang 62406a304cdSGao Xiang if (fe->candidate_bvpage) { 62506a304cdSGao Xiang DBG_BUGON(z_erofs_is_shortlived_page(fe->candidate_bvpage)); 62606a304cdSGao Xiang fe->candidate_bvpage = NULL; 62706a304cdSGao Xiang } 62806a304cdSGao Xiang 62947e4937aSGao Xiang /* 63047e4937aSGao Xiang * if all pending pages are added, don't hold its reference 63147e4937aSGao Xiang * any longer if the pcluster isn't hosted by ourselves. 63247e4937aSGao Xiang */ 633db166fc2SGao Xiang if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE) 63487ca34a7SGao Xiang erofs_workgroup_put(&pcl->obj); 63547e4937aSGao Xiang 63687ca34a7SGao Xiang fe->pcl = NULL; 63747e4937aSGao Xiang return true; 63847e4937aSGao Xiang } 63947e4937aSGao Xiang 64047e4937aSGao Xiang static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe, 64147e4937aSGao Xiang unsigned int cachestrategy, 64247e4937aSGao Xiang erofs_off_t la) 64347e4937aSGao Xiang { 64447e4937aSGao Xiang if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED) 64547e4937aSGao Xiang return false; 64647e4937aSGao Xiang 64747e4937aSGao Xiang if (fe->backmost) 64847e4937aSGao Xiang return true; 64947e4937aSGao Xiang 65047e4937aSGao Xiang return cachestrategy >= EROFS_ZIP_CACHE_READAROUND && 65147e4937aSGao Xiang la < fe->headoffset; 65247e4937aSGao Xiang } 65347e4937aSGao Xiang 654b15b2e30SYue Hu static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos, 655b15b2e30SYue Hu struct page *page, unsigned int pageofs, 656b15b2e30SYue Hu unsigned int len) 657b15b2e30SYue Hu { 658b15b2e30SYue Hu struct inode *packed_inode = EROFS_I_SB(inode)->packed_inode; 659b15b2e30SYue Hu struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 660b15b2e30SYue Hu u8 *src, *dst; 661b15b2e30SYue Hu unsigned int i, cnt; 662b15b2e30SYue Hu 663b15b2e30SYue Hu pos += EROFS_I(inode)->z_fragmentoff; 664b15b2e30SYue Hu for (i = 0; i < len; i += cnt) { 665b15b2e30SYue Hu cnt = min_t(unsigned int, len - i, 666b15b2e30SYue Hu EROFS_BLKSIZ - erofs_blkoff(pos)); 667b15b2e30SYue Hu src = erofs_bread(&buf, packed_inode, 668b15b2e30SYue Hu erofs_blknr(pos), EROFS_KMAP); 669b15b2e30SYue Hu if (IS_ERR(src)) { 670b15b2e30SYue Hu erofs_put_metabuf(&buf); 671b15b2e30SYue Hu return PTR_ERR(src); 672b15b2e30SYue Hu } 673b15b2e30SYue Hu 674b15b2e30SYue Hu dst = kmap_local_page(page); 675b15b2e30SYue Hu memcpy(dst + pageofs + i, src + erofs_blkoff(pos), cnt); 676b15b2e30SYue Hu kunmap_local(dst); 677b15b2e30SYue Hu pos += cnt; 678b15b2e30SYue Hu } 679b15b2e30SYue Hu erofs_put_metabuf(&buf); 680b15b2e30SYue Hu return 0; 681b15b2e30SYue Hu } 682b15b2e30SYue Hu 68347e4937aSGao Xiang static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, 684eaa9172aSGao Xiang struct page *page, struct page **pagepool) 68547e4937aSGao Xiang { 68647e4937aSGao Xiang struct inode *const inode = fe->inode; 687bda17a45SGao Xiang struct erofs_sb_info *const sbi = EROFS_I_SB(inode); 68847e4937aSGao Xiang struct erofs_map_blocks *const map = &fe->map; 68947e4937aSGao Xiang const loff_t offset = page_offset(page); 6905b220b20SGao Xiang bool tight = true, exclusive; 69147e4937aSGao Xiang 69247e4937aSGao Xiang enum z_erofs_cache_alloctype cache_strategy; 6932bfab9c0SGao Xiang unsigned int cur, end, spiltted; 69447e4937aSGao Xiang int err = 0; 69547e4937aSGao Xiang 69647e4937aSGao Xiang /* register locked file pages as online pages in pack */ 69747e4937aSGao Xiang z_erofs_onlinepage_init(page); 69847e4937aSGao Xiang 69947e4937aSGao Xiang spiltted = 0; 70047e4937aSGao Xiang end = PAGE_SIZE; 70147e4937aSGao Xiang repeat: 70247e4937aSGao Xiang cur = end - 1; 70347e4937aSGao Xiang 70439397a46SGao Xiang if (offset + cur < map->m_la || 70539397a46SGao Xiang offset + cur >= map->m_la + map->m_llen) { 70639397a46SGao Xiang erofs_dbg("out-of-range map @ pos %llu", offset + cur); 70747e4937aSGao Xiang 7085c6dcc57SGao Xiang if (z_erofs_collector_end(fe)) 70947e4937aSGao Xiang fe->backmost = false; 71047e4937aSGao Xiang map->m_la = offset + cur; 71147e4937aSGao Xiang map->m_llen = 0; 71247e4937aSGao Xiang err = z_erofs_map_blocks_iter(inode, map, 0); 7138d8a09b0SGao Xiang if (err) 71467148551SGao Xiang goto out; 71539397a46SGao Xiang } else { 71639397a46SGao Xiang if (fe->pcl) 71739397a46SGao Xiang goto hitted; 71839397a46SGao Xiang /* didn't get a valid pcluster previously (very rare) */ 71939397a46SGao Xiang } 72047e4937aSGao Xiang 721b15b2e30SYue Hu if (!(map->m_flags & EROFS_MAP_MAPPED) || 722b15b2e30SYue Hu map->m_flags & EROFS_MAP_FRAGMENT) 72347e4937aSGao Xiang goto hitted; 72447e4937aSGao Xiang 72583a386c0SGao Xiang err = z_erofs_collector_begin(fe); 7268d8a09b0SGao Xiang if (err) 72767148551SGao Xiang goto out; 72847e4937aSGao Xiang 7295c6dcc57SGao Xiang if (z_erofs_is_inline_pcluster(fe->pcl)) { 73009c54379SGao Xiang void *mp; 731cecf864dSYue Hu 73209c54379SGao Xiang mp = erofs_read_metabuf(&fe->map.buf, inode->i_sb, 73309c54379SGao Xiang erofs_blknr(map->m_pa), EROFS_NO_KMAP); 73409c54379SGao Xiang if (IS_ERR(mp)) { 73509c54379SGao Xiang err = PTR_ERR(mp); 736cecf864dSYue Hu erofs_err(inode->i_sb, 737cecf864dSYue Hu "failed to get inline page, err %d", err); 73867148551SGao Xiang goto out; 739cecf864dSYue Hu } 74009c54379SGao Xiang get_page(fe->map.buf.page); 741ed722fbcSGao Xiang WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, 742ed722fbcSGao Xiang fe->map.buf.page); 743db166fc2SGao Xiang fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; 744cecf864dSYue Hu } else { 7456f39d1e1SGao Xiang /* bind cache first when cached decompression is preferred */ 746cecf864dSYue Hu if (should_alloc_managed_pages(fe, sbi->opt.cache_strategy, 747cecf864dSYue Hu map->m_la)) 7481825c8d7SGao Xiang cache_strategy = TRYALLOC; 74947e4937aSGao Xiang else 75047e4937aSGao Xiang cache_strategy = DONTALLOC; 75147e4937aSGao Xiang 7526f39d1e1SGao Xiang z_erofs_bind_cache(fe, cache_strategy, pagepool); 753cecf864dSYue Hu } 75447e4937aSGao Xiang hitted: 755dc76ea8cSGao Xiang /* 756dc76ea8cSGao Xiang * Ensure the current partial page belongs to this submit chain rather 757dc76ea8cSGao Xiang * than other concurrent submit chains or the noio(bypass) chain since 758dc76ea8cSGao Xiang * those chains are handled asynchronously thus the page cannot be used 759387bab87SGao Xiang * for inplace I/O or bvpage (should be processed in a strict order.) 760dc76ea8cSGao Xiang */ 761db166fc2SGao Xiang tight &= (fe->mode >= Z_EROFS_PCLUSTER_HOOKED && 762db166fc2SGao Xiang fe->mode != Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE); 763dc76ea8cSGao Xiang 76447e4937aSGao Xiang cur = end - min_t(unsigned int, offset + end - map->m_la, end); 7658d8a09b0SGao Xiang if (!(map->m_flags & EROFS_MAP_MAPPED)) { 76647e4937aSGao Xiang zero_user_segment(page, cur, end); 76747e4937aSGao Xiang goto next_part; 76847e4937aSGao Xiang } 769b15b2e30SYue Hu if (map->m_flags & EROFS_MAP_FRAGMENT) { 770b15b2e30SYue Hu unsigned int pageofs, skip, len; 771b15b2e30SYue Hu 772b15b2e30SYue Hu if (offset > map->m_la) { 773b15b2e30SYue Hu pageofs = 0; 774b15b2e30SYue Hu skip = offset - map->m_la; 775b15b2e30SYue Hu } else { 776b15b2e30SYue Hu pageofs = map->m_la & ~PAGE_MASK; 777b15b2e30SYue Hu skip = 0; 778b15b2e30SYue Hu } 779b15b2e30SYue Hu len = min_t(unsigned int, map->m_llen - skip, end - cur); 780b15b2e30SYue Hu err = z_erofs_read_fragment(inode, skip, page, pageofs, len); 781b15b2e30SYue Hu if (err) 782b15b2e30SYue Hu goto out; 783b15b2e30SYue Hu ++spiltted; 784b15b2e30SYue Hu tight = false; 785b15b2e30SYue Hu goto next_part; 786b15b2e30SYue Hu } 78747e4937aSGao Xiang 7885b220b20SGao Xiang exclusive = (!cur && (!spiltted || tight)); 78947e4937aSGao Xiang if (cur) 790db166fc2SGao Xiang tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED); 79147e4937aSGao Xiang 79247e4937aSGao Xiang retry: 79306a304cdSGao Xiang err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) { 79406a304cdSGao Xiang .page = page, 79506a304cdSGao Xiang .offset = offset - map->m_la, 79606a304cdSGao Xiang .end = end, 7975b220b20SGao Xiang }), exclusive); 79806a304cdSGao Xiang /* should allocate an additional short-lived page for bvset */ 79906a304cdSGao Xiang if (err == -EAGAIN && !fe->candidate_bvpage) { 80006a304cdSGao Xiang fe->candidate_bvpage = alloc_page(GFP_NOFS | __GFP_NOFAIL); 80106a304cdSGao Xiang set_page_private(fe->candidate_bvpage, 80206a304cdSGao Xiang Z_EROFS_SHORTLIVED_PAGE); 80347e4937aSGao Xiang goto retry; 80447e4937aSGao Xiang } 80547e4937aSGao Xiang 80606a304cdSGao Xiang if (err) { 80706a304cdSGao Xiang DBG_BUGON(err == -EAGAIN && fe->candidate_bvpage); 80867148551SGao Xiang goto out; 80906a304cdSGao Xiang } 81047e4937aSGao Xiang 81167148551SGao Xiang z_erofs_onlinepage_split(page); 81247e4937aSGao Xiang /* bump up the number of spiltted parts of a page */ 81347e4937aSGao Xiang ++spiltted; 814267f2492SGao Xiang if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK)) 815267f2492SGao Xiang fe->pcl->multibases = true; 81667148551SGao Xiang 8172bfab9c0SGao Xiang if ((map->m_flags & EROFS_MAP_FULL_MAPPED) && 8185c2a6425SGao Xiang !(map->m_flags & EROFS_MAP_PARTIAL_REF) && 8192bfab9c0SGao Xiang fe->pcl->length == map->m_llen) 8202bfab9c0SGao Xiang fe->pcl->partial = false; 8212bfab9c0SGao Xiang if (fe->pcl->length < offset + end - map->m_la) { 8222bfab9c0SGao Xiang fe->pcl->length = offset + end - map->m_la; 8232bfab9c0SGao Xiang fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK; 8242bfab9c0SGao Xiang } 82547e4937aSGao Xiang next_part: 8262bfab9c0SGao Xiang /* shorten the remaining extent to update progress */ 82747e4937aSGao Xiang map->m_llen = offset + cur - map->m_la; 8282bfab9c0SGao Xiang map->m_flags &= ~EROFS_MAP_FULL_MAPPED; 82947e4937aSGao Xiang 83047e4937aSGao Xiang end = cur; 83147e4937aSGao Xiang if (end > 0) 83247e4937aSGao Xiang goto repeat; 83347e4937aSGao Xiang 83447e4937aSGao Xiang out: 83567148551SGao Xiang if (err) 83667148551SGao Xiang z_erofs_page_mark_eio(page); 83747e4937aSGao Xiang z_erofs_onlinepage_endio(page); 83847e4937aSGao Xiang 8394f761fa2SGao Xiang erofs_dbg("%s, finish page: %pK spiltted: %u map->m_llen %llu", 84047e4937aSGao Xiang __func__, page, spiltted, map->m_llen); 84147e4937aSGao Xiang return err; 84247e4937aSGao Xiang } 84347e4937aSGao Xiang 84440452ffcSHuang Jianan static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi, 84540452ffcSHuang Jianan unsigned int readahead_pages) 84640452ffcSHuang Jianan { 847a2e20a25SMatthew Wilcox (Oracle) /* auto: enable for read_folio, disable for readahead */ 84840452ffcSHuang Jianan if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) && 84940452ffcSHuang Jianan !readahead_pages) 85040452ffcSHuang Jianan return true; 85140452ffcSHuang Jianan 85240452ffcSHuang Jianan if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) && 85340452ffcSHuang Jianan (readahead_pages <= sbi->opt.max_sync_decompress_pages)) 85440452ffcSHuang Jianan return true; 85540452ffcSHuang Jianan 85640452ffcSHuang Jianan return false; 85740452ffcSHuang Jianan } 85840452ffcSHuang Jianan 8596aaa7b06SGao Xiang static bool z_erofs_page_is_invalidated(struct page *page) 8606aaa7b06SGao Xiang { 8616aaa7b06SGao Xiang return !page->mapping && !z_erofs_is_shortlived_page(page); 8626aaa7b06SGao Xiang } 8636aaa7b06SGao Xiang 8644f05687fSGao Xiang struct z_erofs_decompress_backend { 8654f05687fSGao Xiang struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES]; 8664f05687fSGao Xiang struct super_block *sb; 8674f05687fSGao Xiang struct z_erofs_pcluster *pcl; 8684f05687fSGao Xiang 8694f05687fSGao Xiang /* pages with the longest decompressed length for deduplication */ 8704f05687fSGao Xiang struct page **decompressed_pages; 8714f05687fSGao Xiang /* pages to keep the compressed data */ 8724f05687fSGao Xiang struct page **compressed_pages; 8734f05687fSGao Xiang 874267f2492SGao Xiang struct list_head decompressed_secondary_bvecs; 8754f05687fSGao Xiang struct page **pagepool; 8762bfab9c0SGao Xiang unsigned int onstack_used, nr_pages; 8774f05687fSGao Xiang }; 8784f05687fSGao Xiang 879267f2492SGao Xiang struct z_erofs_bvec_item { 880267f2492SGao Xiang struct z_erofs_bvec bvec; 881267f2492SGao Xiang struct list_head list; 882267f2492SGao Xiang }; 883267f2492SGao Xiang 884267f2492SGao Xiang static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be, 8853fe96ee0SGao Xiang struct z_erofs_bvec *bvec) 8863fe96ee0SGao Xiang { 887267f2492SGao Xiang struct z_erofs_bvec_item *item; 888267f2492SGao Xiang 889267f2492SGao Xiang if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK)) { 890267f2492SGao Xiang unsigned int pgnr; 8913fe96ee0SGao Xiang 892267f2492SGao Xiang pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT; 8932bfab9c0SGao Xiang DBG_BUGON(pgnr >= be->nr_pages); 894*63bbb856SGao Xiang if (!be->decompressed_pages[pgnr]) { 8953fe96ee0SGao Xiang be->decompressed_pages[pgnr] = bvec->page; 896267f2492SGao Xiang return; 8973fe96ee0SGao Xiang } 898*63bbb856SGao Xiang } 8993fe96ee0SGao Xiang 900267f2492SGao Xiang /* (cold path) one pcluster is requested multiple times */ 901267f2492SGao Xiang item = kmalloc(sizeof(*item), GFP_KERNEL | __GFP_NOFAIL); 902267f2492SGao Xiang item->bvec = *bvec; 903267f2492SGao Xiang list_add(&item->list, &be->decompressed_secondary_bvecs); 904267f2492SGao Xiang } 905267f2492SGao Xiang 906267f2492SGao Xiang static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be, 907267f2492SGao Xiang int err) 908267f2492SGao Xiang { 909267f2492SGao Xiang unsigned int off0 = be->pcl->pageofs_out; 910267f2492SGao Xiang struct list_head *p, *n; 911267f2492SGao Xiang 912267f2492SGao Xiang list_for_each_safe(p, n, &be->decompressed_secondary_bvecs) { 913267f2492SGao Xiang struct z_erofs_bvec_item *bvi; 914267f2492SGao Xiang unsigned int end, cur; 915267f2492SGao Xiang void *dst, *src; 916267f2492SGao Xiang 917267f2492SGao Xiang bvi = container_of(p, struct z_erofs_bvec_item, list); 918267f2492SGao Xiang cur = bvi->bvec.offset < 0 ? -bvi->bvec.offset : 0; 919267f2492SGao Xiang end = min_t(unsigned int, be->pcl->length - bvi->bvec.offset, 920267f2492SGao Xiang bvi->bvec.end); 921267f2492SGao Xiang dst = kmap_local_page(bvi->bvec.page); 922267f2492SGao Xiang while (cur < end) { 923267f2492SGao Xiang unsigned int pgnr, scur, len; 924267f2492SGao Xiang 925267f2492SGao Xiang pgnr = (bvi->bvec.offset + cur + off0) >> PAGE_SHIFT; 926267f2492SGao Xiang DBG_BUGON(pgnr >= be->nr_pages); 927267f2492SGao Xiang 928267f2492SGao Xiang scur = bvi->bvec.offset + cur - 929267f2492SGao Xiang ((pgnr << PAGE_SHIFT) - off0); 930267f2492SGao Xiang len = min_t(unsigned int, end - cur, PAGE_SIZE - scur); 931267f2492SGao Xiang if (!be->decompressed_pages[pgnr]) { 932267f2492SGao Xiang err = -EFSCORRUPTED; 933267f2492SGao Xiang cur += len; 934267f2492SGao Xiang continue; 935267f2492SGao Xiang } 936267f2492SGao Xiang src = kmap_local_page(be->decompressed_pages[pgnr]); 937267f2492SGao Xiang memcpy(dst + cur, src + scur, len); 938267f2492SGao Xiang kunmap_local(src); 939267f2492SGao Xiang cur += len; 940267f2492SGao Xiang } 941267f2492SGao Xiang kunmap_local(dst); 942267f2492SGao Xiang if (err) 943267f2492SGao Xiang z_erofs_page_mark_eio(bvi->bvec.page); 944267f2492SGao Xiang z_erofs_onlinepage_endio(bvi->bvec.page); 945267f2492SGao Xiang list_del(p); 946267f2492SGao Xiang kfree(bvi); 947267f2492SGao Xiang } 948267f2492SGao Xiang } 949267f2492SGao Xiang 950267f2492SGao Xiang static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be) 95142fec235SGao Xiang { 9524f05687fSGao Xiang struct z_erofs_pcluster *pcl = be->pcl; 95306a304cdSGao Xiang struct z_erofs_bvec_iter biter; 95406a304cdSGao Xiang struct page *old_bvpage; 955267f2492SGao Xiang int i; 95642fec235SGao Xiang 957387bab87SGao Xiang z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0); 95842fec235SGao Xiang for (i = 0; i < pcl->vcnt; ++i) { 95906a304cdSGao Xiang struct z_erofs_bvec bvec; 96042fec235SGao Xiang 96106a304cdSGao Xiang z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage); 96242fec235SGao Xiang 96306a304cdSGao Xiang if (old_bvpage) 9644f05687fSGao Xiang z_erofs_put_shortlivedpage(be->pagepool, old_bvpage); 96542fec235SGao Xiang 96606a304cdSGao Xiang DBG_BUGON(z_erofs_page_is_invalidated(bvec.page)); 967267f2492SGao Xiang z_erofs_do_decompressed_bvec(be, &bvec); 96842fec235SGao Xiang } 96906a304cdSGao Xiang 97006a304cdSGao Xiang old_bvpage = z_erofs_bvec_iter_end(&biter); 97106a304cdSGao Xiang if (old_bvpage) 9724f05687fSGao Xiang z_erofs_put_shortlivedpage(be->pagepool, old_bvpage); 97342fec235SGao Xiang } 97442fec235SGao Xiang 9754f05687fSGao Xiang static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be, 9764f05687fSGao Xiang bool *overlapped) 97767139e36SGao Xiang { 9784f05687fSGao Xiang struct z_erofs_pcluster *pcl = be->pcl; 97967139e36SGao Xiang unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 98067139e36SGao Xiang int i, err = 0; 98167139e36SGao Xiang 98267139e36SGao Xiang *overlapped = false; 98367139e36SGao Xiang for (i = 0; i < pclusterpages; ++i) { 984ed722fbcSGao Xiang struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i]; 985ed722fbcSGao Xiang struct page *page = bvec->page; 98667139e36SGao Xiang 98767139e36SGao Xiang /* compressed pages ought to be present before decompressing */ 98867139e36SGao Xiang if (!page) { 98967139e36SGao Xiang DBG_BUGON(1); 99067139e36SGao Xiang continue; 99167139e36SGao Xiang } 992fe3e5914SGao Xiang be->compressed_pages[i] = page; 99367139e36SGao Xiang 99467139e36SGao Xiang if (z_erofs_is_inline_pcluster(pcl)) { 99567139e36SGao Xiang if (!PageUptodate(page)) 99667139e36SGao Xiang err = -EIO; 99767139e36SGao Xiang continue; 99867139e36SGao Xiang } 99967139e36SGao Xiang 100067139e36SGao Xiang DBG_BUGON(z_erofs_page_is_invalidated(page)); 100167139e36SGao Xiang if (!z_erofs_is_shortlived_page(page)) { 10024f05687fSGao Xiang if (erofs_page_is_managed(EROFS_SB(be->sb), page)) { 100367139e36SGao Xiang if (!PageUptodate(page)) 100467139e36SGao Xiang err = -EIO; 100567139e36SGao Xiang continue; 100667139e36SGao Xiang } 1007267f2492SGao Xiang z_erofs_do_decompressed_bvec(be, bvec); 100867139e36SGao Xiang *overlapped = true; 100967139e36SGao Xiang } 101067139e36SGao Xiang } 101167139e36SGao Xiang 1012fe3e5914SGao Xiang if (err) 10134f05687fSGao Xiang return err; 10144f05687fSGao Xiang return 0; 101567139e36SGao Xiang } 101667139e36SGao Xiang 10174f05687fSGao Xiang static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, 10184f05687fSGao Xiang int err) 101947e4937aSGao Xiang { 10204f05687fSGao Xiang struct erofs_sb_info *const sbi = EROFS_SB(be->sb); 10214f05687fSGao Xiang struct z_erofs_pcluster *pcl = be->pcl; 1022cecf864dSYue Hu unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 10232bfab9c0SGao Xiang unsigned int i, inputsize; 102467148551SGao Xiang int err2; 10252bfab9c0SGao Xiang struct page *page; 10262bfab9c0SGao Xiang bool overlapped; 102747e4937aSGao Xiang 102887ca34a7SGao Xiang mutex_lock(&pcl->lock); 10292bfab9c0SGao Xiang be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT; 103047e4937aSGao Xiang 1031fe3e5914SGao Xiang /* allocate (de)compressed page arrays if cannot be kept on stack */ 1032fe3e5914SGao Xiang be->decompressed_pages = NULL; 1033fe3e5914SGao Xiang be->compressed_pages = NULL; 1034fe3e5914SGao Xiang be->onstack_used = 0; 10352bfab9c0SGao Xiang if (be->nr_pages <= Z_EROFS_ONSTACK_PAGES) { 10364f05687fSGao Xiang be->decompressed_pages = be->onstack_pages; 10372bfab9c0SGao Xiang be->onstack_used = be->nr_pages; 10384f05687fSGao Xiang memset(be->decompressed_pages, 0, 10392bfab9c0SGao Xiang sizeof(struct page *) * be->nr_pages); 1040fe3e5914SGao Xiang } 1041fe3e5914SGao Xiang 1042fe3e5914SGao Xiang if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES) 1043fe3e5914SGao Xiang be->compressed_pages = be->onstack_pages + be->onstack_used; 1044fe3e5914SGao Xiang 1045fe3e5914SGao Xiang if (!be->decompressed_pages) 10464f05687fSGao Xiang be->decompressed_pages = 10472bfab9c0SGao Xiang kvcalloc(be->nr_pages, sizeof(struct page *), 1048e7368187SGao Xiang GFP_KERNEL | __GFP_NOFAIL); 1049fe3e5914SGao Xiang if (!be->compressed_pages) 1050fe3e5914SGao Xiang be->compressed_pages = 1051fe3e5914SGao Xiang kvcalloc(pclusterpages, sizeof(struct page *), 1052fe3e5914SGao Xiang GFP_KERNEL | __GFP_NOFAIL); 105347e4937aSGao Xiang 1054267f2492SGao Xiang z_erofs_parse_out_bvecs(be); 10554f05687fSGao Xiang err2 = z_erofs_parse_in_bvecs(be, &overlapped); 10564f05687fSGao Xiang if (err2) 10574f05687fSGao Xiang err = err2; 10588d8a09b0SGao Xiang if (err) 105947e4937aSGao Xiang goto out; 106047e4937aSGao Xiang 1061cecf864dSYue Hu if (z_erofs_is_inline_pcluster(pcl)) 1062cecf864dSYue Hu inputsize = pcl->tailpacking_size; 1063cecf864dSYue Hu else 1064cecf864dSYue Hu inputsize = pclusterpages * PAGE_SIZE; 1065cecf864dSYue Hu 106647e4937aSGao Xiang err = z_erofs_decompress(&(struct z_erofs_decompress_req) { 10674f05687fSGao Xiang .sb = be->sb, 10684f05687fSGao Xiang .in = be->compressed_pages, 10694f05687fSGao Xiang .out = be->decompressed_pages, 1070cecf864dSYue Hu .pageofs_in = pcl->pageofs_in, 107187ca34a7SGao Xiang .pageofs_out = pcl->pageofs_out, 10729f6cc76eSGao Xiang .inputsize = inputsize, 10732bfab9c0SGao Xiang .outputsize = pcl->length, 107447e4937aSGao Xiang .alg = pcl->algorithmformat, 107547e4937aSGao Xiang .inplace_io = overlapped, 10762bfab9c0SGao Xiang .partial_decoding = pcl->partial, 1077267f2492SGao Xiang .fillgaps = pcl->multibases, 10784f05687fSGao Xiang }, be->pagepool); 107947e4937aSGao Xiang 108047e4937aSGao Xiang out: 1081cecf864dSYue Hu /* must handle all compressed pages before actual file pages */ 1082cecf864dSYue Hu if (z_erofs_is_inline_pcluster(pcl)) { 1083ed722fbcSGao Xiang page = pcl->compressed_bvecs[0].page; 1084ed722fbcSGao Xiang WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL); 1085cecf864dSYue Hu put_page(page); 1086cecf864dSYue Hu } else { 1087cecf864dSYue Hu for (i = 0; i < pclusterpages; ++i) { 1088ed722fbcSGao Xiang page = pcl->compressed_bvecs[i].page; 108947e4937aSGao Xiang 109047e4937aSGao Xiang if (erofs_page_is_managed(sbi, page)) 109147e4937aSGao Xiang continue; 109247e4937aSGao Xiang 10936aaa7b06SGao Xiang /* recycle all individual short-lived pages */ 10944f05687fSGao Xiang (void)z_erofs_put_shortlivedpage(be->pagepool, page); 1095ed722fbcSGao Xiang WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); 109647e4937aSGao Xiang } 1097cecf864dSYue Hu } 1098fe3e5914SGao Xiang if (be->compressed_pages < be->onstack_pages || 1099fe3e5914SGao Xiang be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES) 1100fe3e5914SGao Xiang kvfree(be->compressed_pages); 1101267f2492SGao Xiang z_erofs_fill_other_copies(be, err); 110247e4937aSGao Xiang 11032bfab9c0SGao Xiang for (i = 0; i < be->nr_pages; ++i) { 11044f05687fSGao Xiang page = be->decompressed_pages[i]; 110547e4937aSGao Xiang if (!page) 110647e4937aSGao Xiang continue; 110747e4937aSGao Xiang 11086aaa7b06SGao Xiang DBG_BUGON(z_erofs_page_is_invalidated(page)); 110947e4937aSGao Xiang 11106aaa7b06SGao Xiang /* recycle all individual short-lived pages */ 11114f05687fSGao Xiang if (z_erofs_put_shortlivedpage(be->pagepool, page)) 111247e4937aSGao Xiang continue; 111367148551SGao Xiang if (err) 111467148551SGao Xiang z_erofs_page_mark_eio(page); 111547e4937aSGao Xiang z_erofs_onlinepage_endio(page); 111647e4937aSGao Xiang } 111747e4937aSGao Xiang 11184f05687fSGao Xiang if (be->decompressed_pages != be->onstack_pages) 11194f05687fSGao Xiang kvfree(be->decompressed_pages); 112047e4937aSGao Xiang 11212bfab9c0SGao Xiang pcl->length = 0; 11222bfab9c0SGao Xiang pcl->partial = true; 1123267f2492SGao Xiang pcl->multibases = false; 112406a304cdSGao Xiang pcl->bvset.nextpage = NULL; 112587ca34a7SGao Xiang pcl->vcnt = 0; 112647e4937aSGao Xiang 112787ca34a7SGao Xiang /* pcluster lock MUST be taken before the following line */ 112847e4937aSGao Xiang WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL); 112987ca34a7SGao Xiang mutex_unlock(&pcl->lock); 113047e4937aSGao Xiang return err; 113147e4937aSGao Xiang } 113247e4937aSGao Xiang 11330c638f70SGao Xiang static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, 1134eaa9172aSGao Xiang struct page **pagepool) 113547e4937aSGao Xiang { 11364f05687fSGao Xiang struct z_erofs_decompress_backend be = { 11374f05687fSGao Xiang .sb = io->sb, 11384f05687fSGao Xiang .pagepool = pagepool, 1139267f2492SGao Xiang .decompressed_secondary_bvecs = 1140267f2492SGao Xiang LIST_HEAD_INIT(be.decompressed_secondary_bvecs), 11414f05687fSGao Xiang }; 114247e4937aSGao Xiang z_erofs_next_pcluster_t owned = io->head; 114347e4937aSGao Xiang 114447e4937aSGao Xiang while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) { 11454f05687fSGao Xiang /* impossible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */ 114647e4937aSGao Xiang DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL); 11474f05687fSGao Xiang /* impossible that 'owned' equals Z_EROFS_PCLUSTER_NIL */ 114847e4937aSGao Xiang DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL); 114947e4937aSGao Xiang 11504f05687fSGao Xiang be.pcl = container_of(owned, struct z_erofs_pcluster, next); 11514f05687fSGao Xiang owned = READ_ONCE(be.pcl->next); 115247e4937aSGao Xiang 11534f05687fSGao Xiang z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0); 11544f05687fSGao Xiang erofs_workgroup_put(&be.pcl->obj); 115547e4937aSGao Xiang } 115647e4937aSGao Xiang } 115747e4937aSGao Xiang 11580c638f70SGao Xiang static void z_erofs_decompressqueue_work(struct work_struct *work) 115947e4937aSGao Xiang { 1160a4b1fab1SGao Xiang struct z_erofs_decompressqueue *bgq = 1161a4b1fab1SGao Xiang container_of(work, struct z_erofs_decompressqueue, u.work); 1162eaa9172aSGao Xiang struct page *pagepool = NULL; 116347e4937aSGao Xiang 1164a4b1fab1SGao Xiang DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED); 11650c638f70SGao Xiang z_erofs_decompress_queue(bgq, &pagepool); 116647e4937aSGao Xiang 1167eaa9172aSGao Xiang erofs_release_pages(&pagepool); 1168a4b1fab1SGao Xiang kvfree(bgq); 116947e4937aSGao Xiang } 117047e4937aSGao Xiang 11717865827cSGao Xiang static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, 11727865827cSGao Xiang bool sync, int bios) 11737865827cSGao Xiang { 11747865827cSGao Xiang struct erofs_sb_info *const sbi = EROFS_SB(io->sb); 11757865827cSGao Xiang 11767865827cSGao Xiang /* wake up the caller thread for sync decompression */ 11777865827cSGao Xiang if (sync) { 11787865827cSGao Xiang if (!atomic_add_return(bios, &io->pending_bios)) 117960b30050SHongyu Jin complete(&io->u.done); 11807865827cSGao Xiang return; 11817865827cSGao Xiang } 11827865827cSGao Xiang 11837865827cSGao Xiang if (atomic_add_return(bios, &io->pending_bios)) 11847865827cSGao Xiang return; 11857865827cSGao Xiang /* Use workqueue and sync decompression for atomic contexts only */ 11867865827cSGao Xiang if (in_atomic() || irqs_disabled()) { 11877865827cSGao Xiang queue_work(z_erofs_workqueue, &io->u.work); 11887865827cSGao Xiang /* enable sync decompression for readahead */ 11897865827cSGao Xiang if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) 11907865827cSGao Xiang sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON; 11917865827cSGao Xiang return; 11927865827cSGao Xiang } 11937865827cSGao Xiang z_erofs_decompressqueue_work(&io->u.work); 11947865827cSGao Xiang } 11957865827cSGao Xiang 119647e4937aSGao Xiang static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, 119747e4937aSGao Xiang unsigned int nr, 1198eaa9172aSGao Xiang struct page **pagepool, 11999f2731d6SGao Xiang struct address_space *mc) 120047e4937aSGao Xiang { 120147e4937aSGao Xiang const pgoff_t index = pcl->obj.index; 12029f2731d6SGao Xiang gfp_t gfp = mapping_gfp_mask(mc); 120347e4937aSGao Xiang bool tocache = false; 120447e4937aSGao Xiang 120547e4937aSGao Xiang struct address_space *mapping; 120647e4937aSGao Xiang struct page *oldpage, *page; 120747e4937aSGao Xiang 120847e4937aSGao Xiang compressed_page_t t; 120947e4937aSGao Xiang int justfound; 121047e4937aSGao Xiang 121147e4937aSGao Xiang repeat: 1212ed722fbcSGao Xiang page = READ_ONCE(pcl->compressed_bvecs[nr].page); 121347e4937aSGao Xiang oldpage = page; 121447e4937aSGao Xiang 121547e4937aSGao Xiang if (!page) 121647e4937aSGao Xiang goto out_allocpage; 121747e4937aSGao Xiang 121847e4937aSGao Xiang /* process the target tagged pointer */ 121947e4937aSGao Xiang t = tagptr_init(compressed_page_t, page); 122047e4937aSGao Xiang justfound = tagptr_unfold_tags(t); 122147e4937aSGao Xiang page = tagptr_unfold_ptr(t); 122247e4937aSGao Xiang 12231825c8d7SGao Xiang /* 12241825c8d7SGao Xiang * preallocated cached pages, which is used to avoid direct reclaim 12251825c8d7SGao Xiang * otherwise, it will go inplace I/O path instead. 12261825c8d7SGao Xiang */ 12271825c8d7SGao Xiang if (page->private == Z_EROFS_PREALLOCATED_PAGE) { 1228ed722fbcSGao Xiang WRITE_ONCE(pcl->compressed_bvecs[nr].page, page); 12291825c8d7SGao Xiang set_page_private(page, 0); 12301825c8d7SGao Xiang tocache = true; 12311825c8d7SGao Xiang goto out_tocache; 12321825c8d7SGao Xiang } 123347e4937aSGao Xiang mapping = READ_ONCE(page->mapping); 123447e4937aSGao Xiang 123547e4937aSGao Xiang /* 12366aaa7b06SGao Xiang * file-backed online pages in plcuster are all locked steady, 123747e4937aSGao Xiang * therefore it is impossible for `mapping' to be NULL. 123847e4937aSGao Xiang */ 123947e4937aSGao Xiang if (mapping && mapping != mc) 124047e4937aSGao Xiang /* ought to be unmanaged pages */ 124147e4937aSGao Xiang goto out; 124247e4937aSGao Xiang 12436aaa7b06SGao Xiang /* directly return for shortlived page as well */ 12446aaa7b06SGao Xiang if (z_erofs_is_shortlived_page(page)) 12456aaa7b06SGao Xiang goto out; 12466aaa7b06SGao Xiang 124747e4937aSGao Xiang lock_page(page); 124847e4937aSGao Xiang 124947e4937aSGao Xiang /* only true if page reclaim goes wrong, should never happen */ 125047e4937aSGao Xiang DBG_BUGON(justfound && PagePrivate(page)); 125147e4937aSGao Xiang 125247e4937aSGao Xiang /* the page is still in manage cache */ 125347e4937aSGao Xiang if (page->mapping == mc) { 1254ed722fbcSGao Xiang WRITE_ONCE(pcl->compressed_bvecs[nr].page, page); 125547e4937aSGao Xiang 125647e4937aSGao Xiang if (!PagePrivate(page)) { 125747e4937aSGao Xiang /* 125847e4937aSGao Xiang * impossible to be !PagePrivate(page) for 125947e4937aSGao Xiang * the current restriction as well if 1260ed722fbcSGao Xiang * the page is already in compressed_bvecs[]. 126147e4937aSGao Xiang */ 126247e4937aSGao Xiang DBG_BUGON(!justfound); 126347e4937aSGao Xiang 126447e4937aSGao Xiang justfound = 0; 126547e4937aSGao Xiang set_page_private(page, (unsigned long)pcl); 126647e4937aSGao Xiang SetPagePrivate(page); 126747e4937aSGao Xiang } 126847e4937aSGao Xiang 126947e4937aSGao Xiang /* no need to submit io if it is already up-to-date */ 127047e4937aSGao Xiang if (PageUptodate(page)) { 127147e4937aSGao Xiang unlock_page(page); 127247e4937aSGao Xiang page = NULL; 127347e4937aSGao Xiang } 127447e4937aSGao Xiang goto out; 127547e4937aSGao Xiang } 127647e4937aSGao Xiang 127747e4937aSGao Xiang /* 127847e4937aSGao Xiang * the managed page has been truncated, it's unsafe to 127947e4937aSGao Xiang * reuse this one, let's allocate a new cache-managed page. 128047e4937aSGao Xiang */ 128147e4937aSGao Xiang DBG_BUGON(page->mapping); 128247e4937aSGao Xiang DBG_BUGON(!justfound); 128347e4937aSGao Xiang 128447e4937aSGao Xiang tocache = true; 128547e4937aSGao Xiang unlock_page(page); 128647e4937aSGao Xiang put_page(page); 128747e4937aSGao Xiang out_allocpage: 12885ddcee1fSGao Xiang page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL); 1289ed722fbcSGao Xiang if (oldpage != cmpxchg(&pcl->compressed_bvecs[nr].page, 1290ed722fbcSGao Xiang oldpage, page)) { 1291eaa9172aSGao Xiang erofs_pagepool_add(pagepool, page); 12925ddcee1fSGao Xiang cond_resched(); 12935ddcee1fSGao Xiang goto repeat; 12945ddcee1fSGao Xiang } 12951825c8d7SGao Xiang out_tocache: 1296bf225074SGao Xiang if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) { 1297bf225074SGao Xiang /* turn into temporary page if fails (1 ref) */ 1298bf225074SGao Xiang set_page_private(page, Z_EROFS_SHORTLIVED_PAGE); 1299bf225074SGao Xiang goto out; 1300a30573b3SGao Xiang } 1301bf225074SGao Xiang attach_page_private(page, pcl); 1302bf225074SGao Xiang /* drop a refcount added by allocpage (then we have 2 refs here) */ 1303bf225074SGao Xiang put_page(page); 1304bf225074SGao Xiang 130547e4937aSGao Xiang out: /* the only exit (for tracing and debugging) */ 130647e4937aSGao Xiang return page; 130747e4937aSGao Xiang } 130847e4937aSGao Xiang 1309a4b1fab1SGao Xiang static struct z_erofs_decompressqueue * 1310a4b1fab1SGao Xiang jobqueue_init(struct super_block *sb, 1311a4b1fab1SGao Xiang struct z_erofs_decompressqueue *fgq, bool *fg) 131247e4937aSGao Xiang { 1313a4b1fab1SGao Xiang struct z_erofs_decompressqueue *q; 131447e4937aSGao Xiang 1315a4b1fab1SGao Xiang if (fg && !*fg) { 1316a4b1fab1SGao Xiang q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN); 1317a4b1fab1SGao Xiang if (!q) { 1318a4b1fab1SGao Xiang *fg = true; 1319a4b1fab1SGao Xiang goto fg_out; 132047e4937aSGao Xiang } 13210c638f70SGao Xiang INIT_WORK(&q->u.work, z_erofs_decompressqueue_work); 1322a4b1fab1SGao Xiang } else { 1323a4b1fab1SGao Xiang fg_out: 1324a4b1fab1SGao Xiang q = fgq; 132560b30050SHongyu Jin init_completion(&fgq->u.done); 1326a4b1fab1SGao Xiang atomic_set(&fgq->pending_bios, 0); 132767148551SGao Xiang q->eio = false; 1328a4b1fab1SGao Xiang } 1329a4b1fab1SGao Xiang q->sb = sb; 1330a4b1fab1SGao Xiang q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED; 1331a4b1fab1SGao Xiang return q; 133247e4937aSGao Xiang } 133347e4937aSGao Xiang 133447e4937aSGao Xiang /* define decompression jobqueue types */ 133547e4937aSGao Xiang enum { 133647e4937aSGao Xiang JQ_BYPASS, 133747e4937aSGao Xiang JQ_SUBMIT, 133847e4937aSGao Xiang NR_JOBQUEUES, 133947e4937aSGao Xiang }; 134047e4937aSGao Xiang 134147e4937aSGao Xiang static void *jobqueueset_init(struct super_block *sb, 1342a4b1fab1SGao Xiang struct z_erofs_decompressqueue *q[], 1343a4b1fab1SGao Xiang struct z_erofs_decompressqueue *fgq, bool *fg) 134447e4937aSGao Xiang { 134547e4937aSGao Xiang /* 134647e4937aSGao Xiang * if managed cache is enabled, bypass jobqueue is needed, 134747e4937aSGao Xiang * no need to read from device for all pclusters in this queue. 134847e4937aSGao Xiang */ 1349a4b1fab1SGao Xiang q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL); 1350a4b1fab1SGao Xiang q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, fg); 135147e4937aSGao Xiang 1352a4b1fab1SGao Xiang return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], *fg)); 135347e4937aSGao Xiang } 135447e4937aSGao Xiang 135547e4937aSGao Xiang static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, 135647e4937aSGao Xiang z_erofs_next_pcluster_t qtail[], 135747e4937aSGao Xiang z_erofs_next_pcluster_t owned_head) 135847e4937aSGao Xiang { 135947e4937aSGao Xiang z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT]; 136047e4937aSGao Xiang z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS]; 136147e4937aSGao Xiang 136247e4937aSGao Xiang DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); 136347e4937aSGao Xiang if (owned_head == Z_EROFS_PCLUSTER_TAIL) 136447e4937aSGao Xiang owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED; 136547e4937aSGao Xiang 136647e4937aSGao Xiang WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED); 136747e4937aSGao Xiang 136847e4937aSGao Xiang WRITE_ONCE(*submit_qtail, owned_head); 136947e4937aSGao Xiang WRITE_ONCE(*bypass_qtail, &pcl->next); 137047e4937aSGao Xiang 137147e4937aSGao Xiang qtail[JQ_BYPASS] = &pcl->next; 137247e4937aSGao Xiang } 137347e4937aSGao Xiang 13747865827cSGao Xiang static void z_erofs_decompressqueue_endio(struct bio *bio) 13757865827cSGao Xiang { 13767865827cSGao Xiang tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private); 13777865827cSGao Xiang struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t); 13787865827cSGao Xiang blk_status_t err = bio->bi_status; 13797865827cSGao Xiang struct bio_vec *bvec; 13807865827cSGao Xiang struct bvec_iter_all iter_all; 13817865827cSGao Xiang 13827865827cSGao Xiang bio_for_each_segment_all(bvec, bio, iter_all) { 13837865827cSGao Xiang struct page *page = bvec->bv_page; 13847865827cSGao Xiang 13857865827cSGao Xiang DBG_BUGON(PageUptodate(page)); 13867865827cSGao Xiang DBG_BUGON(z_erofs_page_is_invalidated(page)); 13877865827cSGao Xiang 13887865827cSGao Xiang if (erofs_page_is_managed(EROFS_SB(q->sb), page)) { 13897865827cSGao Xiang if (!err) 13907865827cSGao Xiang SetPageUptodate(page); 13917865827cSGao Xiang unlock_page(page); 13927865827cSGao Xiang } 13937865827cSGao Xiang } 139467148551SGao Xiang if (err) 139567148551SGao Xiang q->eio = true; 13967865827cSGao Xiang z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1); 13977865827cSGao Xiang bio_put(bio); 13987865827cSGao Xiang } 13997865827cSGao Xiang 140083a386c0SGao Xiang static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, 1401eaa9172aSGao Xiang struct page **pagepool, 1402a4b1fab1SGao Xiang struct z_erofs_decompressqueue *fgq, 1403a4b1fab1SGao Xiang bool *force_fg) 140447e4937aSGao Xiang { 140583a386c0SGao Xiang struct super_block *sb = f->inode->i_sb; 140683a386c0SGao Xiang struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb)); 140747e4937aSGao Xiang z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; 1408a4b1fab1SGao Xiang struct z_erofs_decompressqueue *q[NR_JOBQUEUES]; 140947e4937aSGao Xiang void *bi_private; 14105c6dcc57SGao Xiang z_erofs_next_pcluster_t owned_head = f->owned_head; 1411dfeab2e9SGao Xiang /* bio is NULL initially, so no need to initialize last_{index,bdev} */ 14123f649ab7SKees Cook pgoff_t last_index; 1413dfeab2e9SGao Xiang struct block_device *last_bdev; 14141e4a2955SGao Xiang unsigned int nr_bios = 0; 14151e4a2955SGao Xiang struct bio *bio = NULL; 141699486c51SChristoph Hellwig /* initialize to 1 to make skip psi_memstall_leave unless needed */ 141799486c51SChristoph Hellwig unsigned long pflags = 1; 141847e4937aSGao Xiang 1419a4b1fab1SGao Xiang bi_private = jobqueueset_init(sb, q, fgq, force_fg); 1420a4b1fab1SGao Xiang qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; 1421a4b1fab1SGao Xiang qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; 142247e4937aSGao Xiang 142347e4937aSGao Xiang /* by default, all need io submission */ 142447e4937aSGao Xiang q[JQ_SUBMIT]->head = owned_head; 142547e4937aSGao Xiang 142647e4937aSGao Xiang do { 1427dfeab2e9SGao Xiang struct erofs_map_dev mdev; 142847e4937aSGao Xiang struct z_erofs_pcluster *pcl; 14291e4a2955SGao Xiang pgoff_t cur, end; 14301e4a2955SGao Xiang unsigned int i = 0; 14311e4a2955SGao Xiang bool bypass = true; 143247e4937aSGao Xiang 143347e4937aSGao Xiang /* no possible 'owned_head' equals the following */ 143447e4937aSGao Xiang DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); 143547e4937aSGao Xiang DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL); 143647e4937aSGao Xiang 143747e4937aSGao Xiang pcl = container_of(owned_head, struct z_erofs_pcluster, next); 143847e4937aSGao Xiang 1439cecf864dSYue Hu /* close the main owned chain at first */ 1440cecf864dSYue Hu owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, 1441cecf864dSYue Hu Z_EROFS_PCLUSTER_TAIL_CLOSED); 1442cecf864dSYue Hu if (z_erofs_is_inline_pcluster(pcl)) { 1443cecf864dSYue Hu move_to_bypass_jobqueue(pcl, qtail, owned_head); 1444cecf864dSYue Hu continue; 1445cecf864dSYue Hu } 1446cecf864dSYue Hu 1447dfeab2e9SGao Xiang /* no device id here, thus it will always succeed */ 1448dfeab2e9SGao Xiang mdev = (struct erofs_map_dev) { 1449dfeab2e9SGao Xiang .m_pa = blknr_to_addr(pcl->obj.index), 1450dfeab2e9SGao Xiang }; 1451dfeab2e9SGao Xiang (void)erofs_map_dev(sb, &mdev); 1452dfeab2e9SGao Xiang 1453dfeab2e9SGao Xiang cur = erofs_blknr(mdev.m_pa); 14549f6cc76eSGao Xiang end = cur + pcl->pclusterpages; 145547e4937aSGao Xiang 14561e4a2955SGao Xiang do { 14571e4a2955SGao Xiang struct page *page; 145847e4937aSGao Xiang 14591e4a2955SGao Xiang page = pickup_page_for_submission(pcl, i++, pagepool, 146083a386c0SGao Xiang mc); 14611e4a2955SGao Xiang if (!page) 14621e4a2955SGao Xiang continue; 146347e4937aSGao Xiang 1464dfeab2e9SGao Xiang if (bio && (cur != last_index + 1 || 1465dfeab2e9SGao Xiang last_bdev != mdev.m_bdev)) { 146647e4937aSGao Xiang submit_bio_retry: 146799486c51SChristoph Hellwig if (!pflags) 146899486c51SChristoph Hellwig psi_memstall_leave(&pflags); 146994e4e153SGao Xiang submit_bio(bio); 147047e4937aSGao Xiang bio = NULL; 147147e4937aSGao Xiang } 147247e4937aSGao Xiang 147399486c51SChristoph Hellwig if (unlikely(PageWorkingset(page))) 147499486c51SChristoph Hellwig psi_memstall_enter(&pflags); 147599486c51SChristoph Hellwig 147647e4937aSGao Xiang if (!bio) { 147707888c66SChristoph Hellwig bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS, 147807888c66SChristoph Hellwig REQ_OP_READ, GFP_NOIO); 14790c638f70SGao Xiang bio->bi_end_io = z_erofs_decompressqueue_endio; 1480dfeab2e9SGao Xiang 1481dfeab2e9SGao Xiang last_bdev = mdev.m_bdev; 14821e4a2955SGao Xiang bio->bi_iter.bi_sector = (sector_t)cur << 1483a5c0b780SGao Xiang LOG_SECTORS_PER_BLOCK; 1484a5c0b780SGao Xiang bio->bi_private = bi_private; 14856ea5aad3SGao Xiang if (f->readahead) 14866ea5aad3SGao Xiang bio->bi_opf |= REQ_RAHEAD; 148747e4937aSGao Xiang ++nr_bios; 148847e4937aSGao Xiang } 148947e4937aSGao Xiang 14906c3e485eSGao Xiang if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) 149147e4937aSGao Xiang goto submit_bio_retry; 149247e4937aSGao Xiang 14931e4a2955SGao Xiang last_index = cur; 14941e4a2955SGao Xiang bypass = false; 14951e4a2955SGao Xiang } while (++cur < end); 149647e4937aSGao Xiang 14971e4a2955SGao Xiang if (!bypass) 149847e4937aSGao Xiang qtail[JQ_SUBMIT] = &pcl->next; 149947e4937aSGao Xiang else 150047e4937aSGao Xiang move_to_bypass_jobqueue(pcl, qtail, owned_head); 150147e4937aSGao Xiang } while (owned_head != Z_EROFS_PCLUSTER_TAIL); 150247e4937aSGao Xiang 150399486c51SChristoph Hellwig if (bio) { 150499486c51SChristoph Hellwig if (!pflags) 150599486c51SChristoph Hellwig psi_memstall_leave(&pflags); 150694e4e153SGao Xiang submit_bio(bio); 150799486c51SChristoph Hellwig } 150847e4937aSGao Xiang 1509587a67b7SGao Xiang /* 1510587a67b7SGao Xiang * although background is preferred, no one is pending for submission. 1511587a67b7SGao Xiang * don't issue workqueue for decompression but drop it directly instead. 1512587a67b7SGao Xiang */ 1513587a67b7SGao Xiang if (!*force_fg && !nr_bios) { 1514587a67b7SGao Xiang kvfree(q[JQ_SUBMIT]); 15151e4a2955SGao Xiang return; 1516587a67b7SGao Xiang } 1517a4b1fab1SGao Xiang z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios); 151847e4937aSGao Xiang } 151947e4937aSGao Xiang 152083a386c0SGao Xiang static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f, 1521eaa9172aSGao Xiang struct page **pagepool, bool force_fg) 152247e4937aSGao Xiang { 1523a4b1fab1SGao Xiang struct z_erofs_decompressqueue io[NR_JOBQUEUES]; 152447e4937aSGao Xiang 15255c6dcc57SGao Xiang if (f->owned_head == Z_EROFS_PCLUSTER_TAIL) 152647e4937aSGao Xiang return; 152783a386c0SGao Xiang z_erofs_submit_queue(f, pagepool, io, &force_fg); 152847e4937aSGao Xiang 15290c638f70SGao Xiang /* handle bypass queue (no i/o pclusters) immediately */ 15300c638f70SGao Xiang z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool); 153147e4937aSGao Xiang 153247e4937aSGao Xiang if (!force_fg) 153347e4937aSGao Xiang return; 153447e4937aSGao Xiang 153547e4937aSGao Xiang /* wait until all bios are completed */ 153660b30050SHongyu Jin wait_for_completion_io(&io[JQ_SUBMIT].u.done); 153747e4937aSGao Xiang 15380c638f70SGao Xiang /* handle synchronous decompress queue in the caller context */ 15390c638f70SGao Xiang z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool); 154047e4937aSGao Xiang } 154147e4937aSGao Xiang 154238629291SGao Xiang /* 154338629291SGao Xiang * Since partial uptodate is still unimplemented for now, we have to use 154438629291SGao Xiang * approximate readmore strategies as a start. 154538629291SGao Xiang */ 154638629291SGao Xiang static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, 154738629291SGao Xiang struct readahead_control *rac, 154838629291SGao Xiang erofs_off_t end, 1549eaa9172aSGao Xiang struct page **pagepool, 155038629291SGao Xiang bool backmost) 155138629291SGao Xiang { 155238629291SGao Xiang struct inode *inode = f->inode; 155338629291SGao Xiang struct erofs_map_blocks *map = &f->map; 155438629291SGao Xiang erofs_off_t cur; 155538629291SGao Xiang int err; 155638629291SGao Xiang 155738629291SGao Xiang if (backmost) { 155838629291SGao Xiang map->m_la = end; 1559622ceaddSGao Xiang err = z_erofs_map_blocks_iter(inode, map, 1560622ceaddSGao Xiang EROFS_GET_BLOCKS_READMORE); 156138629291SGao Xiang if (err) 156238629291SGao Xiang return; 156338629291SGao Xiang 156438629291SGao Xiang /* expend ra for the trailing edge if readahead */ 156538629291SGao Xiang if (rac) { 156638629291SGao Xiang loff_t newstart = readahead_pos(rac); 156738629291SGao Xiang 156838629291SGao Xiang cur = round_up(map->m_la + map->m_llen, PAGE_SIZE); 156938629291SGao Xiang readahead_expand(rac, newstart, cur - newstart); 157038629291SGao Xiang return; 157138629291SGao Xiang } 157238629291SGao Xiang end = round_up(end, PAGE_SIZE); 157338629291SGao Xiang } else { 157438629291SGao Xiang end = round_up(map->m_la, PAGE_SIZE); 157538629291SGao Xiang 157638629291SGao Xiang if (!map->m_llen) 157738629291SGao Xiang return; 157838629291SGao Xiang } 157938629291SGao Xiang 158038629291SGao Xiang cur = map->m_la + map->m_llen - 1; 158138629291SGao Xiang while (cur >= end) { 158238629291SGao Xiang pgoff_t index = cur >> PAGE_SHIFT; 158338629291SGao Xiang struct page *page; 158438629291SGao Xiang 158538629291SGao Xiang page = erofs_grab_cache_page_nowait(inode->i_mapping, index); 1586aa793b46SGao Xiang if (page) { 158738629291SGao Xiang if (PageUptodate(page)) { 158838629291SGao Xiang unlock_page(page); 1589aa793b46SGao Xiang } else { 159038629291SGao Xiang err = z_erofs_do_read_page(f, page, pagepool); 159138629291SGao Xiang if (err) 159238629291SGao Xiang erofs_err(inode->i_sb, 159338629291SGao Xiang "readmore error at page %lu @ nid %llu", 159438629291SGao Xiang index, EROFS_I(inode)->nid); 1595aa793b46SGao Xiang } 159638629291SGao Xiang put_page(page); 1597aa793b46SGao Xiang } 1598aa793b46SGao Xiang 159938629291SGao Xiang if (cur < PAGE_SIZE) 160038629291SGao Xiang break; 160138629291SGao Xiang cur = (index << PAGE_SHIFT) - 1; 160238629291SGao Xiang } 160338629291SGao Xiang } 160438629291SGao Xiang 1605a2e20a25SMatthew Wilcox (Oracle) static int z_erofs_read_folio(struct file *file, struct folio *folio) 160647e4937aSGao Xiang { 1607a2e20a25SMatthew Wilcox (Oracle) struct page *page = &folio->page; 160847e4937aSGao Xiang struct inode *const inode = page->mapping->host; 160940452ffcSHuang Jianan struct erofs_sb_info *const sbi = EROFS_I_SB(inode); 161047e4937aSGao Xiang struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); 1611eaa9172aSGao Xiang struct page *pagepool = NULL; 161247e4937aSGao Xiang int err; 161347e4937aSGao Xiang 161447e4937aSGao Xiang trace_erofs_readpage(page, false); 161547e4937aSGao Xiang f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT; 161647e4937aSGao Xiang 161738629291SGao Xiang z_erofs_pcluster_readmore(&f, NULL, f.headoffset + PAGE_SIZE - 1, 161838629291SGao Xiang &pagepool, true); 16191825c8d7SGao Xiang err = z_erofs_do_read_page(&f, page, &pagepool); 162038629291SGao Xiang z_erofs_pcluster_readmore(&f, NULL, 0, &pagepool, false); 162138629291SGao Xiang 16225c6dcc57SGao Xiang (void)z_erofs_collector_end(&f); 162347e4937aSGao Xiang 162447e4937aSGao Xiang /* if some compressed cluster ready, need submit them anyway */ 162583a386c0SGao Xiang z_erofs_runqueue(&f, &pagepool, 162640452ffcSHuang Jianan z_erofs_get_sync_decompress_policy(sbi, 0)); 162747e4937aSGao Xiang 162847e4937aSGao Xiang if (err) 16294f761fa2SGao Xiang erofs_err(inode->i_sb, "failed to read, err [%d]", err); 163047e4937aSGao Xiang 163109c54379SGao Xiang erofs_put_metabuf(&f.map.buf); 1632eaa9172aSGao Xiang erofs_release_pages(&pagepool); 163347e4937aSGao Xiang return err; 163447e4937aSGao Xiang } 163547e4937aSGao Xiang 16360615090cSMatthew Wilcox (Oracle) static void z_erofs_readahead(struct readahead_control *rac) 163747e4937aSGao Xiang { 16380615090cSMatthew Wilcox (Oracle) struct inode *const inode = rac->mapping->host; 163947e4937aSGao Xiang struct erofs_sb_info *const sbi = EROFS_I_SB(inode); 164047e4937aSGao Xiang struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); 1641eaa9172aSGao Xiang struct page *pagepool = NULL, *head = NULL, *page; 164238629291SGao Xiang unsigned int nr_pages; 164347e4937aSGao Xiang 16446ea5aad3SGao Xiang f.readahead = true; 16450615090cSMatthew Wilcox (Oracle) f.headoffset = readahead_pos(rac); 164647e4937aSGao Xiang 164738629291SGao Xiang z_erofs_pcluster_readmore(&f, rac, f.headoffset + 164838629291SGao Xiang readahead_length(rac) - 1, &pagepool, true); 164938629291SGao Xiang nr_pages = readahead_count(rac); 165038629291SGao Xiang trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false); 165138629291SGao Xiang 16520615090cSMatthew Wilcox (Oracle) while ((page = readahead_page(rac))) { 165347e4937aSGao Xiang set_page_private(page, (unsigned long)head); 165447e4937aSGao Xiang head = page; 165547e4937aSGao Xiang } 165647e4937aSGao Xiang 165747e4937aSGao Xiang while (head) { 165847e4937aSGao Xiang struct page *page = head; 165947e4937aSGao Xiang int err; 166047e4937aSGao Xiang 166147e4937aSGao Xiang /* traversal in reverse order */ 166247e4937aSGao Xiang head = (void *)page_private(page); 166347e4937aSGao Xiang 16641825c8d7SGao Xiang err = z_erofs_do_read_page(&f, page, &pagepool); 1665a5876e24SGao Xiang if (err) 16664f761fa2SGao Xiang erofs_err(inode->i_sb, 16674f761fa2SGao Xiang "readahead error at page %lu @ nid %llu", 16684f761fa2SGao Xiang page->index, EROFS_I(inode)->nid); 166947e4937aSGao Xiang put_page(page); 167047e4937aSGao Xiang } 167138629291SGao Xiang z_erofs_pcluster_readmore(&f, rac, 0, &pagepool, false); 16725c6dcc57SGao Xiang (void)z_erofs_collector_end(&f); 167347e4937aSGao Xiang 167483a386c0SGao Xiang z_erofs_runqueue(&f, &pagepool, 167540452ffcSHuang Jianan z_erofs_get_sync_decompress_policy(sbi, nr_pages)); 167609c54379SGao Xiang erofs_put_metabuf(&f.map.buf); 1677eaa9172aSGao Xiang erofs_release_pages(&pagepool); 167847e4937aSGao Xiang } 167947e4937aSGao Xiang 16800c638f70SGao Xiang const struct address_space_operations z_erofs_aops = { 1681a2e20a25SMatthew Wilcox (Oracle) .read_folio = z_erofs_read_folio, 16820615090cSMatthew Wilcox (Oracle) .readahead = z_erofs_readahead, 168347e4937aSGao Xiang }; 1684