1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 * Copyright (C) 2022 Alibaba Cloud 6 */ 7 #include "zdata.h" 8 #include "compress.h" 9 #include <linux/prefetch.h> 10 #include <linux/psi.h> 11 12 #include <trace/events/erofs.h> 13 14 /* 15 * since pclustersize is variable for big pcluster feature, introduce slab 16 * pools implementation for different pcluster sizes. 17 */ 18 struct z_erofs_pcluster_slab { 19 struct kmem_cache *slab; 20 unsigned int maxpages; 21 char name[48]; 22 }; 23 24 #define _PCLP(n) { .maxpages = n } 25 26 static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = { 27 _PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128), 28 _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES) 29 }; 30 31 struct z_erofs_bvec_iter { 32 struct page *bvpage; 33 struct z_erofs_bvset *bvset; 34 unsigned int nr, cur; 35 }; 36 37 static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter) 38 { 39 if (iter->bvpage) 40 kunmap_local(iter->bvset); 41 return iter->bvpage; 42 } 43 44 static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter) 45 { 46 unsigned long base = (unsigned long)((struct z_erofs_bvset *)0)->bvec; 47 /* have to access nextpage in advance, otherwise it will be unmapped */ 48 struct page *nextpage = iter->bvset->nextpage; 49 struct page *oldpage; 50 51 DBG_BUGON(!nextpage); 52 oldpage = z_erofs_bvec_iter_end(iter); 53 iter->bvpage = nextpage; 54 iter->bvset = kmap_local_page(nextpage); 55 iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec); 56 iter->cur = 0; 57 return oldpage; 58 } 59 60 static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter, 61 struct z_erofs_bvset_inline *bvset, 62 unsigned int bootstrap_nr, 63 unsigned int cur) 64 { 65 *iter = (struct z_erofs_bvec_iter) { 66 .nr = bootstrap_nr, 67 .bvset = (struct z_erofs_bvset *)bvset, 68 }; 69 70 while (cur > iter->nr) { 71 cur -= iter->nr; 72 z_erofs_bvset_flip(iter); 73 } 74 iter->cur = cur; 75 } 76 77 static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter, 78 struct z_erofs_bvec *bvec, 79 struct page **candidate_bvpage) 80 { 81 if (iter->cur == iter->nr) { 82 if (!*candidate_bvpage) 83 return -EAGAIN; 84 85 DBG_BUGON(iter->bvset->nextpage); 86 iter->bvset->nextpage = *candidate_bvpage; 87 z_erofs_bvset_flip(iter); 88 89 iter->bvset->nextpage = NULL; 90 *candidate_bvpage = NULL; 91 } 92 iter->bvset->bvec[iter->cur++] = *bvec; 93 return 0; 94 } 95 96 static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter, 97 struct z_erofs_bvec *bvec, 98 struct page **old_bvpage) 99 { 100 if (iter->cur == iter->nr) 101 *old_bvpage = z_erofs_bvset_flip(iter); 102 else 103 *old_bvpage = NULL; 104 *bvec = iter->bvset->bvec[iter->cur++]; 105 } 106 107 static void z_erofs_destroy_pcluster_pool(void) 108 { 109 int i; 110 111 for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { 112 if (!pcluster_pool[i].slab) 113 continue; 114 kmem_cache_destroy(pcluster_pool[i].slab); 115 pcluster_pool[i].slab = NULL; 116 } 117 } 118 119 static int z_erofs_create_pcluster_pool(void) 120 { 121 struct z_erofs_pcluster_slab *pcs; 122 struct z_erofs_pcluster *a; 123 unsigned int size; 124 125 for (pcs = pcluster_pool; 126 pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) { 127 size = struct_size(a, compressed_bvecs, pcs->maxpages); 128 129 sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages); 130 pcs->slab = kmem_cache_create(pcs->name, size, 0, 131 SLAB_RECLAIM_ACCOUNT, NULL); 132 if (pcs->slab) 133 continue; 134 135 z_erofs_destroy_pcluster_pool(); 136 return -ENOMEM; 137 } 138 return 0; 139 } 140 141 static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages) 142 { 143 int i; 144 145 for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { 146 struct z_erofs_pcluster_slab *pcs = pcluster_pool + i; 147 struct z_erofs_pcluster *pcl; 148 149 if (nrpages > pcs->maxpages) 150 continue; 151 152 pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS); 153 if (!pcl) 154 return ERR_PTR(-ENOMEM); 155 pcl->pclusterpages = nrpages; 156 return pcl; 157 } 158 return ERR_PTR(-EINVAL); 159 } 160 161 static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl) 162 { 163 unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 164 int i; 165 166 for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { 167 struct z_erofs_pcluster_slab *pcs = pcluster_pool + i; 168 169 if (pclusterpages > pcs->maxpages) 170 continue; 171 172 kmem_cache_free(pcs->slab, pcl); 173 return; 174 } 175 DBG_BUGON(1); 176 } 177 178 /* how to allocate cached pages for a pcluster */ 179 enum z_erofs_cache_alloctype { 180 DONTALLOC, /* don't allocate any cached pages */ 181 /* 182 * try to use cached I/O if page allocation succeeds or fallback 183 * to in-place I/O instead to avoid any direct reclaim. 184 */ 185 TRYALLOC, 186 }; 187 188 /* 189 * tagged pointer with 1-bit tag for all compressed pages 190 * tag 0 - the page is just found with an extra page reference 191 */ 192 typedef tagptr1_t compressed_page_t; 193 194 #define tag_compressed_page_justfound(page) \ 195 tagptr_fold(compressed_page_t, page, 1) 196 197 static struct workqueue_struct *z_erofs_workqueue __read_mostly; 198 199 void z_erofs_exit_zip_subsystem(void) 200 { 201 destroy_workqueue(z_erofs_workqueue); 202 z_erofs_destroy_pcluster_pool(); 203 } 204 205 static inline int z_erofs_init_workqueue(void) 206 { 207 const unsigned int onlinecpus = num_possible_cpus(); 208 209 /* 210 * no need to spawn too many threads, limiting threads could minimum 211 * scheduling overhead, perhaps per-CPU threads should be better? 212 */ 213 z_erofs_workqueue = alloc_workqueue("erofs_unzipd", 214 WQ_UNBOUND | WQ_HIGHPRI, 215 onlinecpus + onlinecpus / 4); 216 return z_erofs_workqueue ? 0 : -ENOMEM; 217 } 218 219 int __init z_erofs_init_zip_subsystem(void) 220 { 221 int err = z_erofs_create_pcluster_pool(); 222 223 if (err) 224 return err; 225 err = z_erofs_init_workqueue(); 226 if (err) 227 z_erofs_destroy_pcluster_pool(); 228 return err; 229 } 230 231 enum z_erofs_pclustermode { 232 Z_EROFS_PCLUSTER_INFLIGHT, 233 /* 234 * The current pclusters was the tail of an exist chain, in addition 235 * that the previous processed chained pclusters are all decided to 236 * be hooked up to it. 237 * A new chain will be created for the remaining pclusters which are 238 * not processed yet, so different from Z_EROFS_PCLUSTER_FOLLOWED, 239 * the next pcluster cannot reuse the whole page safely for inplace I/O 240 * in the following scenario: 241 * ________________________________________________________________ 242 * | tail (partial) page | head (partial) page | 243 * | (belongs to the next pcl) | (belongs to the current pcl) | 244 * |_______PCLUSTER_FOLLOWED______|________PCLUSTER_HOOKED__________| 245 */ 246 Z_EROFS_PCLUSTER_HOOKED, 247 /* 248 * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it 249 * could be dispatched into bypass queue later due to uptodated managed 250 * pages. All related online pages cannot be reused for inplace I/O (or 251 * bvpage) since it can be directly decoded without I/O submission. 252 */ 253 Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE, 254 /* 255 * The current collection has been linked with the owned chain, and 256 * could also be linked with the remaining collections, which means 257 * if the processing page is the tail page of the collection, thus 258 * the current collection can safely use the whole page (since 259 * the previous collection is under control) for in-place I/O, as 260 * illustrated below: 261 * ________________________________________________________________ 262 * | tail (partial) page | head (partial) page | 263 * | (of the current cl) | (of the previous collection) | 264 * | PCLUSTER_FOLLOWED or | | 265 * |_____PCLUSTER_HOOKED__|___________PCLUSTER_FOLLOWED____________| 266 * 267 * [ (*) the above page can be used as inplace I/O. ] 268 */ 269 Z_EROFS_PCLUSTER_FOLLOWED, 270 }; 271 272 struct z_erofs_decompress_frontend { 273 struct inode *const inode; 274 struct erofs_map_blocks map; 275 struct z_erofs_bvec_iter biter; 276 277 struct page *candidate_bvpage; 278 struct z_erofs_pcluster *pcl, *tailpcl; 279 z_erofs_next_pcluster_t owned_head; 280 enum z_erofs_pclustermode mode; 281 282 bool readahead; 283 /* used for applying cache strategy on the fly */ 284 bool backmost; 285 erofs_off_t headoffset; 286 287 /* a pointer used to pick up inplace I/O pages */ 288 unsigned int icur; 289 }; 290 291 #define DECOMPRESS_FRONTEND_INIT(__i) { \ 292 .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \ 293 .mode = Z_EROFS_PCLUSTER_FOLLOWED, .backmost = true } 294 295 static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe, 296 enum z_erofs_cache_alloctype type, 297 struct page **pagepool) 298 { 299 struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode)); 300 struct z_erofs_pcluster *pcl = fe->pcl; 301 bool standalone = true; 302 /* 303 * optimistic allocation without direct reclaim since inplace I/O 304 * can be used if low memory otherwise. 305 */ 306 gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) | 307 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; 308 unsigned int i; 309 310 if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED) 311 return; 312 313 for (i = 0; i < pcl->pclusterpages; ++i) { 314 struct page *page; 315 compressed_page_t t; 316 struct page *newpage = NULL; 317 318 /* the compressed page was loaded before */ 319 if (READ_ONCE(pcl->compressed_bvecs[i].page)) 320 continue; 321 322 page = find_get_page(mc, pcl->obj.index + i); 323 324 if (page) { 325 t = tag_compressed_page_justfound(page); 326 } else { 327 /* I/O is needed, no possible to decompress directly */ 328 standalone = false; 329 switch (type) { 330 case TRYALLOC: 331 newpage = erofs_allocpage(pagepool, gfp); 332 if (!newpage) 333 continue; 334 set_page_private(newpage, 335 Z_EROFS_PREALLOCATED_PAGE); 336 t = tag_compressed_page_justfound(newpage); 337 break; 338 default: /* DONTALLOC */ 339 continue; 340 } 341 } 342 343 if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL, 344 tagptr_cast_ptr(t))) 345 continue; 346 347 if (page) 348 put_page(page); 349 else if (newpage) 350 erofs_pagepool_add(pagepool, newpage); 351 } 352 353 /* 354 * don't do inplace I/O if all compressed pages are available in 355 * managed cache since it can be moved to the bypass queue instead. 356 */ 357 if (standalone) 358 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; 359 } 360 361 /* called by erofs_shrinker to get rid of all compressed_pages */ 362 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, 363 struct erofs_workgroup *grp) 364 { 365 struct z_erofs_pcluster *const pcl = 366 container_of(grp, struct z_erofs_pcluster, obj); 367 int i; 368 369 DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); 370 /* 371 * refcount of workgroup is now freezed as 1, 372 * therefore no need to worry about available decompression users. 373 */ 374 for (i = 0; i < pcl->pclusterpages; ++i) { 375 struct page *page = pcl->compressed_bvecs[i].page; 376 377 if (!page) 378 continue; 379 380 /* block other users from reclaiming or migrating the page */ 381 if (!trylock_page(page)) 382 return -EBUSY; 383 384 if (!erofs_page_is_managed(sbi, page)) 385 continue; 386 387 /* barrier is implied in the following 'unlock_page' */ 388 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); 389 detach_page_private(page); 390 unlock_page(page); 391 } 392 return 0; 393 } 394 395 int erofs_try_to_free_cached_page(struct page *page) 396 { 397 struct z_erofs_pcluster *const pcl = (void *)page_private(page); 398 int ret, i; 399 400 if (!erofs_workgroup_try_to_freeze(&pcl->obj, 1)) 401 return 0; 402 403 ret = 0; 404 DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); 405 for (i = 0; i < pcl->pclusterpages; ++i) { 406 if (pcl->compressed_bvecs[i].page == page) { 407 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); 408 ret = 1; 409 break; 410 } 411 } 412 erofs_workgroup_unfreeze(&pcl->obj, 1); 413 if (ret) 414 detach_page_private(page); 415 return ret; 416 } 417 418 static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe, 419 struct z_erofs_bvec *bvec) 420 { 421 struct z_erofs_pcluster *const pcl = fe->pcl; 422 423 while (fe->icur > 0) { 424 if (!cmpxchg(&pcl->compressed_bvecs[--fe->icur].page, 425 NULL, bvec->page)) { 426 pcl->compressed_bvecs[fe->icur] = *bvec; 427 return true; 428 } 429 } 430 return false; 431 } 432 433 /* callers must be with pcluster lock held */ 434 static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe, 435 struct z_erofs_bvec *bvec, bool exclusive) 436 { 437 int ret; 438 439 if (exclusive) { 440 /* give priority for inplaceio to use file pages first */ 441 if (z_erofs_try_inplace_io(fe, bvec)) 442 return 0; 443 /* otherwise, check if it can be used as a bvpage */ 444 if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED && 445 !fe->candidate_bvpage) 446 fe->candidate_bvpage = bvec->page; 447 } 448 ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage); 449 fe->pcl->vcnt += (ret >= 0); 450 return ret; 451 } 452 453 static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f) 454 { 455 struct z_erofs_pcluster *pcl = f->pcl; 456 z_erofs_next_pcluster_t *owned_head = &f->owned_head; 457 458 /* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */ 459 if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL, 460 *owned_head) == Z_EROFS_PCLUSTER_NIL) { 461 *owned_head = &pcl->next; 462 /* so we can attach this pcluster to our submission chain. */ 463 f->mode = Z_EROFS_PCLUSTER_FOLLOWED; 464 return; 465 } 466 467 /* 468 * type 2, link to the end of an existing open chain, be careful 469 * that its submission is controlled by the original attached chain. 470 */ 471 if (*owned_head != &pcl->next && pcl != f->tailpcl && 472 cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, 473 *owned_head) == Z_EROFS_PCLUSTER_TAIL) { 474 *owned_head = Z_EROFS_PCLUSTER_TAIL; 475 f->mode = Z_EROFS_PCLUSTER_HOOKED; 476 f->tailpcl = NULL; 477 return; 478 } 479 /* type 3, it belongs to a chain, but it isn't the end of the chain */ 480 f->mode = Z_EROFS_PCLUSTER_INFLIGHT; 481 } 482 483 static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) 484 { 485 struct erofs_map_blocks *map = &fe->map; 486 bool ztailpacking = map->m_flags & EROFS_MAP_META; 487 struct z_erofs_pcluster *pcl; 488 struct erofs_workgroup *grp; 489 int err; 490 491 if (!(map->m_flags & EROFS_MAP_ENCODED)) { 492 DBG_BUGON(1); 493 return -EFSCORRUPTED; 494 } 495 496 /* no available pcluster, let's allocate one */ 497 pcl = z_erofs_alloc_pcluster(ztailpacking ? 1 : 498 map->m_plen >> PAGE_SHIFT); 499 if (IS_ERR(pcl)) 500 return PTR_ERR(pcl); 501 502 atomic_set(&pcl->obj.refcount, 1); 503 pcl->algorithmformat = map->m_algorithmformat; 504 pcl->length = 0; 505 pcl->partial = true; 506 507 /* new pclusters should be claimed as type 1, primary and followed */ 508 pcl->next = fe->owned_head; 509 pcl->pageofs_out = map->m_la & ~PAGE_MASK; 510 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED; 511 512 /* 513 * lock all primary followed works before visible to others 514 * and mutex_trylock *never* fails for a new pcluster. 515 */ 516 mutex_init(&pcl->lock); 517 DBG_BUGON(!mutex_trylock(&pcl->lock)); 518 519 if (ztailpacking) { 520 pcl->obj.index = 0; /* which indicates ztailpacking */ 521 pcl->pageofs_in = erofs_blkoff(map->m_pa); 522 pcl->tailpacking_size = map->m_plen; 523 } else { 524 pcl->obj.index = map->m_pa >> PAGE_SHIFT; 525 526 grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj); 527 if (IS_ERR(grp)) { 528 err = PTR_ERR(grp); 529 goto err_out; 530 } 531 532 if (grp != &pcl->obj) { 533 fe->pcl = container_of(grp, 534 struct z_erofs_pcluster, obj); 535 err = -EEXIST; 536 goto err_out; 537 } 538 } 539 /* used to check tail merging loop due to corrupted images */ 540 if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL) 541 fe->tailpcl = pcl; 542 fe->owned_head = &pcl->next; 543 fe->pcl = pcl; 544 return 0; 545 546 err_out: 547 mutex_unlock(&pcl->lock); 548 z_erofs_free_pcluster(pcl); 549 return err; 550 } 551 552 static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe) 553 { 554 struct erofs_map_blocks *map = &fe->map; 555 struct erofs_workgroup *grp = NULL; 556 int ret; 557 558 DBG_BUGON(fe->pcl); 559 560 /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */ 561 DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL); 562 DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); 563 564 if (!(map->m_flags & EROFS_MAP_META)) { 565 grp = erofs_find_workgroup(fe->inode->i_sb, 566 map->m_pa >> PAGE_SHIFT); 567 } else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) { 568 DBG_BUGON(1); 569 return -EFSCORRUPTED; 570 } 571 572 if (grp) { 573 fe->pcl = container_of(grp, struct z_erofs_pcluster, obj); 574 ret = -EEXIST; 575 } else { 576 ret = z_erofs_register_pcluster(fe); 577 } 578 579 if (ret == -EEXIST) { 580 mutex_lock(&fe->pcl->lock); 581 /* used to check tail merging loop due to corrupted images */ 582 if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL) 583 fe->tailpcl = fe->pcl; 584 585 z_erofs_try_to_claim_pcluster(fe); 586 } else if (ret) { 587 return ret; 588 } 589 z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset, 590 Z_EROFS_INLINE_BVECS, fe->pcl->vcnt); 591 /* since file-backed online pages are traversed in reverse order */ 592 fe->icur = z_erofs_pclusterpages(fe->pcl); 593 return 0; 594 } 595 596 /* 597 * keep in mind that no referenced pclusters will be freed 598 * only after a RCU grace period. 599 */ 600 static void z_erofs_rcu_callback(struct rcu_head *head) 601 { 602 z_erofs_free_pcluster(container_of(head, 603 struct z_erofs_pcluster, rcu)); 604 } 605 606 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp) 607 { 608 struct z_erofs_pcluster *const pcl = 609 container_of(grp, struct z_erofs_pcluster, obj); 610 611 call_rcu(&pcl->rcu, z_erofs_rcu_callback); 612 } 613 614 static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe) 615 { 616 struct z_erofs_pcluster *pcl = fe->pcl; 617 618 if (!pcl) 619 return false; 620 621 z_erofs_bvec_iter_end(&fe->biter); 622 mutex_unlock(&pcl->lock); 623 624 if (fe->candidate_bvpage) { 625 DBG_BUGON(z_erofs_is_shortlived_page(fe->candidate_bvpage)); 626 fe->candidate_bvpage = NULL; 627 } 628 629 /* 630 * if all pending pages are added, don't hold its reference 631 * any longer if the pcluster isn't hosted by ourselves. 632 */ 633 if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE) 634 erofs_workgroup_put(&pcl->obj); 635 636 fe->pcl = NULL; 637 return true; 638 } 639 640 static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe, 641 unsigned int cachestrategy, 642 erofs_off_t la) 643 { 644 if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED) 645 return false; 646 647 if (fe->backmost) 648 return true; 649 650 return cachestrategy >= EROFS_ZIP_CACHE_READAROUND && 651 la < fe->headoffset; 652 } 653 654 static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, 655 struct page *page, struct page **pagepool) 656 { 657 struct inode *const inode = fe->inode; 658 struct erofs_sb_info *const sbi = EROFS_I_SB(inode); 659 struct erofs_map_blocks *const map = &fe->map; 660 const loff_t offset = page_offset(page); 661 bool tight = true, exclusive; 662 663 enum z_erofs_cache_alloctype cache_strategy; 664 unsigned int cur, end, spiltted; 665 int err = 0; 666 667 /* register locked file pages as online pages in pack */ 668 z_erofs_onlinepage_init(page); 669 670 spiltted = 0; 671 end = PAGE_SIZE; 672 repeat: 673 cur = end - 1; 674 675 if (offset + cur < map->m_la || 676 offset + cur >= map->m_la + map->m_llen) { 677 erofs_dbg("out-of-range map @ pos %llu", offset + cur); 678 679 if (z_erofs_collector_end(fe)) 680 fe->backmost = false; 681 map->m_la = offset + cur; 682 map->m_llen = 0; 683 err = z_erofs_map_blocks_iter(inode, map, 0); 684 if (err) 685 goto out; 686 } else { 687 if (fe->pcl) 688 goto hitted; 689 /* didn't get a valid pcluster previously (very rare) */ 690 } 691 692 if (!(map->m_flags & EROFS_MAP_MAPPED)) 693 goto hitted; 694 695 err = z_erofs_collector_begin(fe); 696 if (err) 697 goto out; 698 699 if (z_erofs_is_inline_pcluster(fe->pcl)) { 700 void *mp; 701 702 mp = erofs_read_metabuf(&fe->map.buf, inode->i_sb, 703 erofs_blknr(map->m_pa), EROFS_NO_KMAP); 704 if (IS_ERR(mp)) { 705 err = PTR_ERR(mp); 706 erofs_err(inode->i_sb, 707 "failed to get inline page, err %d", err); 708 goto out; 709 } 710 get_page(fe->map.buf.page); 711 WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, 712 fe->map.buf.page); 713 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; 714 } else { 715 /* bind cache first when cached decompression is preferred */ 716 if (should_alloc_managed_pages(fe, sbi->opt.cache_strategy, 717 map->m_la)) 718 cache_strategy = TRYALLOC; 719 else 720 cache_strategy = DONTALLOC; 721 722 z_erofs_bind_cache(fe, cache_strategy, pagepool); 723 } 724 hitted: 725 /* 726 * Ensure the current partial page belongs to this submit chain rather 727 * than other concurrent submit chains or the noio(bypass) chain since 728 * those chains are handled asynchronously thus the page cannot be used 729 * for inplace I/O or bvpage (should be processed in a strict order.) 730 */ 731 tight &= (fe->mode >= Z_EROFS_PCLUSTER_HOOKED && 732 fe->mode != Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE); 733 734 cur = end - min_t(unsigned int, offset + end - map->m_la, end); 735 if (!(map->m_flags & EROFS_MAP_MAPPED)) { 736 zero_user_segment(page, cur, end); 737 goto next_part; 738 } 739 740 exclusive = (!cur && (!spiltted || tight)); 741 if (cur) 742 tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED); 743 744 retry: 745 err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) { 746 .page = page, 747 .offset = offset - map->m_la, 748 .end = end, 749 }), exclusive); 750 /* should allocate an additional short-lived page for bvset */ 751 if (err == -EAGAIN && !fe->candidate_bvpage) { 752 fe->candidate_bvpage = alloc_page(GFP_NOFS | __GFP_NOFAIL); 753 set_page_private(fe->candidate_bvpage, 754 Z_EROFS_SHORTLIVED_PAGE); 755 goto retry; 756 } 757 758 if (err) { 759 DBG_BUGON(err == -EAGAIN && fe->candidate_bvpage); 760 goto out; 761 } 762 763 z_erofs_onlinepage_split(page); 764 /* bump up the number of spiltted parts of a page */ 765 ++spiltted; 766 if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK)) 767 fe->pcl->multibases = true; 768 769 if ((map->m_flags & EROFS_MAP_FULL_MAPPED) && 770 fe->pcl->length == map->m_llen) 771 fe->pcl->partial = false; 772 if (fe->pcl->length < offset + end - map->m_la) { 773 fe->pcl->length = offset + end - map->m_la; 774 fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK; 775 } 776 next_part: 777 /* shorten the remaining extent to update progress */ 778 map->m_llen = offset + cur - map->m_la; 779 map->m_flags &= ~EROFS_MAP_FULL_MAPPED; 780 781 end = cur; 782 if (end > 0) 783 goto repeat; 784 785 out: 786 if (err) 787 z_erofs_page_mark_eio(page); 788 z_erofs_onlinepage_endio(page); 789 790 erofs_dbg("%s, finish page: %pK spiltted: %u map->m_llen %llu", 791 __func__, page, spiltted, map->m_llen); 792 return err; 793 } 794 795 static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi, 796 unsigned int readahead_pages) 797 { 798 /* auto: enable for read_folio, disable for readahead */ 799 if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) && 800 !readahead_pages) 801 return true; 802 803 if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) && 804 (readahead_pages <= sbi->opt.max_sync_decompress_pages)) 805 return true; 806 807 return false; 808 } 809 810 static bool z_erofs_page_is_invalidated(struct page *page) 811 { 812 return !page->mapping && !z_erofs_is_shortlived_page(page); 813 } 814 815 struct z_erofs_decompress_backend { 816 struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES]; 817 struct super_block *sb; 818 struct z_erofs_pcluster *pcl; 819 820 /* pages with the longest decompressed length for deduplication */ 821 struct page **decompressed_pages; 822 /* pages to keep the compressed data */ 823 struct page **compressed_pages; 824 825 struct list_head decompressed_secondary_bvecs; 826 struct page **pagepool; 827 unsigned int onstack_used, nr_pages; 828 }; 829 830 struct z_erofs_bvec_item { 831 struct z_erofs_bvec bvec; 832 struct list_head list; 833 }; 834 835 static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be, 836 struct z_erofs_bvec *bvec) 837 { 838 struct z_erofs_bvec_item *item; 839 840 if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK)) { 841 unsigned int pgnr; 842 struct page *oldpage; 843 844 pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT; 845 DBG_BUGON(pgnr >= be->nr_pages); 846 oldpage = be->decompressed_pages[pgnr]; 847 be->decompressed_pages[pgnr] = bvec->page; 848 849 if (!oldpage) 850 return; 851 } 852 853 /* (cold path) one pcluster is requested multiple times */ 854 item = kmalloc(sizeof(*item), GFP_KERNEL | __GFP_NOFAIL); 855 item->bvec = *bvec; 856 list_add(&item->list, &be->decompressed_secondary_bvecs); 857 } 858 859 static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be, 860 int err) 861 { 862 unsigned int off0 = be->pcl->pageofs_out; 863 struct list_head *p, *n; 864 865 list_for_each_safe(p, n, &be->decompressed_secondary_bvecs) { 866 struct z_erofs_bvec_item *bvi; 867 unsigned int end, cur; 868 void *dst, *src; 869 870 bvi = container_of(p, struct z_erofs_bvec_item, list); 871 cur = bvi->bvec.offset < 0 ? -bvi->bvec.offset : 0; 872 end = min_t(unsigned int, be->pcl->length - bvi->bvec.offset, 873 bvi->bvec.end); 874 dst = kmap_local_page(bvi->bvec.page); 875 while (cur < end) { 876 unsigned int pgnr, scur, len; 877 878 pgnr = (bvi->bvec.offset + cur + off0) >> PAGE_SHIFT; 879 DBG_BUGON(pgnr >= be->nr_pages); 880 881 scur = bvi->bvec.offset + cur - 882 ((pgnr << PAGE_SHIFT) - off0); 883 len = min_t(unsigned int, end - cur, PAGE_SIZE - scur); 884 if (!be->decompressed_pages[pgnr]) { 885 err = -EFSCORRUPTED; 886 cur += len; 887 continue; 888 } 889 src = kmap_local_page(be->decompressed_pages[pgnr]); 890 memcpy(dst + cur, src + scur, len); 891 kunmap_local(src); 892 cur += len; 893 } 894 kunmap_local(dst); 895 if (err) 896 z_erofs_page_mark_eio(bvi->bvec.page); 897 z_erofs_onlinepage_endio(bvi->bvec.page); 898 list_del(p); 899 kfree(bvi); 900 } 901 } 902 903 static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be) 904 { 905 struct z_erofs_pcluster *pcl = be->pcl; 906 struct z_erofs_bvec_iter biter; 907 struct page *old_bvpage; 908 int i; 909 910 z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0); 911 for (i = 0; i < pcl->vcnt; ++i) { 912 struct z_erofs_bvec bvec; 913 914 z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage); 915 916 if (old_bvpage) 917 z_erofs_put_shortlivedpage(be->pagepool, old_bvpage); 918 919 DBG_BUGON(z_erofs_page_is_invalidated(bvec.page)); 920 z_erofs_do_decompressed_bvec(be, &bvec); 921 } 922 923 old_bvpage = z_erofs_bvec_iter_end(&biter); 924 if (old_bvpage) 925 z_erofs_put_shortlivedpage(be->pagepool, old_bvpage); 926 } 927 928 static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be, 929 bool *overlapped) 930 { 931 struct z_erofs_pcluster *pcl = be->pcl; 932 unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 933 int i, err = 0; 934 935 *overlapped = false; 936 for (i = 0; i < pclusterpages; ++i) { 937 struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i]; 938 struct page *page = bvec->page; 939 940 /* compressed pages ought to be present before decompressing */ 941 if (!page) { 942 DBG_BUGON(1); 943 continue; 944 } 945 be->compressed_pages[i] = page; 946 947 if (z_erofs_is_inline_pcluster(pcl)) { 948 if (!PageUptodate(page)) 949 err = -EIO; 950 continue; 951 } 952 953 DBG_BUGON(z_erofs_page_is_invalidated(page)); 954 if (!z_erofs_is_shortlived_page(page)) { 955 if (erofs_page_is_managed(EROFS_SB(be->sb), page)) { 956 if (!PageUptodate(page)) 957 err = -EIO; 958 continue; 959 } 960 z_erofs_do_decompressed_bvec(be, bvec); 961 *overlapped = true; 962 } 963 } 964 965 if (err) 966 return err; 967 return 0; 968 } 969 970 static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, 971 int err) 972 { 973 struct erofs_sb_info *const sbi = EROFS_SB(be->sb); 974 struct z_erofs_pcluster *pcl = be->pcl; 975 unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 976 unsigned int i, inputsize; 977 int err2; 978 struct page *page; 979 bool overlapped; 980 981 mutex_lock(&pcl->lock); 982 be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT; 983 984 /* allocate (de)compressed page arrays if cannot be kept on stack */ 985 be->decompressed_pages = NULL; 986 be->compressed_pages = NULL; 987 be->onstack_used = 0; 988 if (be->nr_pages <= Z_EROFS_ONSTACK_PAGES) { 989 be->decompressed_pages = be->onstack_pages; 990 be->onstack_used = be->nr_pages; 991 memset(be->decompressed_pages, 0, 992 sizeof(struct page *) * be->nr_pages); 993 } 994 995 if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES) 996 be->compressed_pages = be->onstack_pages + be->onstack_used; 997 998 if (!be->decompressed_pages) 999 be->decompressed_pages = 1000 kvcalloc(be->nr_pages, sizeof(struct page *), 1001 GFP_KERNEL | __GFP_NOFAIL); 1002 if (!be->compressed_pages) 1003 be->compressed_pages = 1004 kvcalloc(pclusterpages, sizeof(struct page *), 1005 GFP_KERNEL | __GFP_NOFAIL); 1006 1007 z_erofs_parse_out_bvecs(be); 1008 err2 = z_erofs_parse_in_bvecs(be, &overlapped); 1009 if (err2) 1010 err = err2; 1011 if (err) 1012 goto out; 1013 1014 if (z_erofs_is_inline_pcluster(pcl)) 1015 inputsize = pcl->tailpacking_size; 1016 else 1017 inputsize = pclusterpages * PAGE_SIZE; 1018 1019 err = z_erofs_decompress(&(struct z_erofs_decompress_req) { 1020 .sb = be->sb, 1021 .in = be->compressed_pages, 1022 .out = be->decompressed_pages, 1023 .pageofs_in = pcl->pageofs_in, 1024 .pageofs_out = pcl->pageofs_out, 1025 .inputsize = inputsize, 1026 .outputsize = pcl->length, 1027 .alg = pcl->algorithmformat, 1028 .inplace_io = overlapped, 1029 .partial_decoding = pcl->partial, 1030 .fillgaps = pcl->multibases, 1031 }, be->pagepool); 1032 1033 out: 1034 /* must handle all compressed pages before actual file pages */ 1035 if (z_erofs_is_inline_pcluster(pcl)) { 1036 page = pcl->compressed_bvecs[0].page; 1037 WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL); 1038 put_page(page); 1039 } else { 1040 for (i = 0; i < pclusterpages; ++i) { 1041 page = pcl->compressed_bvecs[i].page; 1042 1043 if (erofs_page_is_managed(sbi, page)) 1044 continue; 1045 1046 /* recycle all individual short-lived pages */ 1047 (void)z_erofs_put_shortlivedpage(be->pagepool, page); 1048 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); 1049 } 1050 } 1051 if (be->compressed_pages < be->onstack_pages || 1052 be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES) 1053 kvfree(be->compressed_pages); 1054 z_erofs_fill_other_copies(be, err); 1055 1056 for (i = 0; i < be->nr_pages; ++i) { 1057 page = be->decompressed_pages[i]; 1058 if (!page) 1059 continue; 1060 1061 DBG_BUGON(z_erofs_page_is_invalidated(page)); 1062 1063 /* recycle all individual short-lived pages */ 1064 if (z_erofs_put_shortlivedpage(be->pagepool, page)) 1065 continue; 1066 if (err) 1067 z_erofs_page_mark_eio(page); 1068 z_erofs_onlinepage_endio(page); 1069 } 1070 1071 if (be->decompressed_pages != be->onstack_pages) 1072 kvfree(be->decompressed_pages); 1073 1074 pcl->length = 0; 1075 pcl->partial = true; 1076 pcl->multibases = false; 1077 pcl->bvset.nextpage = NULL; 1078 pcl->vcnt = 0; 1079 1080 /* pcluster lock MUST be taken before the following line */ 1081 WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL); 1082 mutex_unlock(&pcl->lock); 1083 return err; 1084 } 1085 1086 static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, 1087 struct page **pagepool) 1088 { 1089 struct z_erofs_decompress_backend be = { 1090 .sb = io->sb, 1091 .pagepool = pagepool, 1092 .decompressed_secondary_bvecs = 1093 LIST_HEAD_INIT(be.decompressed_secondary_bvecs), 1094 }; 1095 z_erofs_next_pcluster_t owned = io->head; 1096 1097 while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) { 1098 /* impossible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */ 1099 DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL); 1100 /* impossible that 'owned' equals Z_EROFS_PCLUSTER_NIL */ 1101 DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL); 1102 1103 be.pcl = container_of(owned, struct z_erofs_pcluster, next); 1104 owned = READ_ONCE(be.pcl->next); 1105 1106 z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0); 1107 erofs_workgroup_put(&be.pcl->obj); 1108 } 1109 } 1110 1111 static void z_erofs_decompressqueue_work(struct work_struct *work) 1112 { 1113 struct z_erofs_decompressqueue *bgq = 1114 container_of(work, struct z_erofs_decompressqueue, u.work); 1115 struct page *pagepool = NULL; 1116 1117 DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED); 1118 z_erofs_decompress_queue(bgq, &pagepool); 1119 1120 erofs_release_pages(&pagepool); 1121 kvfree(bgq); 1122 } 1123 1124 static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, 1125 bool sync, int bios) 1126 { 1127 struct erofs_sb_info *const sbi = EROFS_SB(io->sb); 1128 1129 /* wake up the caller thread for sync decompression */ 1130 if (sync) { 1131 if (!atomic_add_return(bios, &io->pending_bios)) 1132 complete(&io->u.done); 1133 return; 1134 } 1135 1136 if (atomic_add_return(bios, &io->pending_bios)) 1137 return; 1138 /* Use workqueue and sync decompression for atomic contexts only */ 1139 if (in_atomic() || irqs_disabled()) { 1140 queue_work(z_erofs_workqueue, &io->u.work); 1141 /* enable sync decompression for readahead */ 1142 if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) 1143 sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON; 1144 return; 1145 } 1146 z_erofs_decompressqueue_work(&io->u.work); 1147 } 1148 1149 static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, 1150 unsigned int nr, 1151 struct page **pagepool, 1152 struct address_space *mc) 1153 { 1154 const pgoff_t index = pcl->obj.index; 1155 gfp_t gfp = mapping_gfp_mask(mc); 1156 bool tocache = false; 1157 1158 struct address_space *mapping; 1159 struct page *oldpage, *page; 1160 1161 compressed_page_t t; 1162 int justfound; 1163 1164 repeat: 1165 page = READ_ONCE(pcl->compressed_bvecs[nr].page); 1166 oldpage = page; 1167 1168 if (!page) 1169 goto out_allocpage; 1170 1171 /* process the target tagged pointer */ 1172 t = tagptr_init(compressed_page_t, page); 1173 justfound = tagptr_unfold_tags(t); 1174 page = tagptr_unfold_ptr(t); 1175 1176 /* 1177 * preallocated cached pages, which is used to avoid direct reclaim 1178 * otherwise, it will go inplace I/O path instead. 1179 */ 1180 if (page->private == Z_EROFS_PREALLOCATED_PAGE) { 1181 WRITE_ONCE(pcl->compressed_bvecs[nr].page, page); 1182 set_page_private(page, 0); 1183 tocache = true; 1184 goto out_tocache; 1185 } 1186 mapping = READ_ONCE(page->mapping); 1187 1188 /* 1189 * file-backed online pages in plcuster are all locked steady, 1190 * therefore it is impossible for `mapping' to be NULL. 1191 */ 1192 if (mapping && mapping != mc) 1193 /* ought to be unmanaged pages */ 1194 goto out; 1195 1196 /* directly return for shortlived page as well */ 1197 if (z_erofs_is_shortlived_page(page)) 1198 goto out; 1199 1200 lock_page(page); 1201 1202 /* only true if page reclaim goes wrong, should never happen */ 1203 DBG_BUGON(justfound && PagePrivate(page)); 1204 1205 /* the page is still in manage cache */ 1206 if (page->mapping == mc) { 1207 WRITE_ONCE(pcl->compressed_bvecs[nr].page, page); 1208 1209 if (!PagePrivate(page)) { 1210 /* 1211 * impossible to be !PagePrivate(page) for 1212 * the current restriction as well if 1213 * the page is already in compressed_bvecs[]. 1214 */ 1215 DBG_BUGON(!justfound); 1216 1217 justfound = 0; 1218 set_page_private(page, (unsigned long)pcl); 1219 SetPagePrivate(page); 1220 } 1221 1222 /* no need to submit io if it is already up-to-date */ 1223 if (PageUptodate(page)) { 1224 unlock_page(page); 1225 page = NULL; 1226 } 1227 goto out; 1228 } 1229 1230 /* 1231 * the managed page has been truncated, it's unsafe to 1232 * reuse this one, let's allocate a new cache-managed page. 1233 */ 1234 DBG_BUGON(page->mapping); 1235 DBG_BUGON(!justfound); 1236 1237 tocache = true; 1238 unlock_page(page); 1239 put_page(page); 1240 out_allocpage: 1241 page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL); 1242 if (oldpage != cmpxchg(&pcl->compressed_bvecs[nr].page, 1243 oldpage, page)) { 1244 erofs_pagepool_add(pagepool, page); 1245 cond_resched(); 1246 goto repeat; 1247 } 1248 out_tocache: 1249 if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) { 1250 /* turn into temporary page if fails (1 ref) */ 1251 set_page_private(page, Z_EROFS_SHORTLIVED_PAGE); 1252 goto out; 1253 } 1254 attach_page_private(page, pcl); 1255 /* drop a refcount added by allocpage (then we have 2 refs here) */ 1256 put_page(page); 1257 1258 out: /* the only exit (for tracing and debugging) */ 1259 return page; 1260 } 1261 1262 static struct z_erofs_decompressqueue * 1263 jobqueue_init(struct super_block *sb, 1264 struct z_erofs_decompressqueue *fgq, bool *fg) 1265 { 1266 struct z_erofs_decompressqueue *q; 1267 1268 if (fg && !*fg) { 1269 q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN); 1270 if (!q) { 1271 *fg = true; 1272 goto fg_out; 1273 } 1274 INIT_WORK(&q->u.work, z_erofs_decompressqueue_work); 1275 } else { 1276 fg_out: 1277 q = fgq; 1278 init_completion(&fgq->u.done); 1279 atomic_set(&fgq->pending_bios, 0); 1280 q->eio = false; 1281 } 1282 q->sb = sb; 1283 q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED; 1284 return q; 1285 } 1286 1287 /* define decompression jobqueue types */ 1288 enum { 1289 JQ_BYPASS, 1290 JQ_SUBMIT, 1291 NR_JOBQUEUES, 1292 }; 1293 1294 static void *jobqueueset_init(struct super_block *sb, 1295 struct z_erofs_decompressqueue *q[], 1296 struct z_erofs_decompressqueue *fgq, bool *fg) 1297 { 1298 /* 1299 * if managed cache is enabled, bypass jobqueue is needed, 1300 * no need to read from device for all pclusters in this queue. 1301 */ 1302 q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL); 1303 q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, fg); 1304 1305 return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], *fg)); 1306 } 1307 1308 static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, 1309 z_erofs_next_pcluster_t qtail[], 1310 z_erofs_next_pcluster_t owned_head) 1311 { 1312 z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT]; 1313 z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS]; 1314 1315 DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); 1316 if (owned_head == Z_EROFS_PCLUSTER_TAIL) 1317 owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED; 1318 1319 WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED); 1320 1321 WRITE_ONCE(*submit_qtail, owned_head); 1322 WRITE_ONCE(*bypass_qtail, &pcl->next); 1323 1324 qtail[JQ_BYPASS] = &pcl->next; 1325 } 1326 1327 static void z_erofs_decompressqueue_endio(struct bio *bio) 1328 { 1329 tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private); 1330 struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t); 1331 blk_status_t err = bio->bi_status; 1332 struct bio_vec *bvec; 1333 struct bvec_iter_all iter_all; 1334 1335 bio_for_each_segment_all(bvec, bio, iter_all) { 1336 struct page *page = bvec->bv_page; 1337 1338 DBG_BUGON(PageUptodate(page)); 1339 DBG_BUGON(z_erofs_page_is_invalidated(page)); 1340 1341 if (erofs_page_is_managed(EROFS_SB(q->sb), page)) { 1342 if (!err) 1343 SetPageUptodate(page); 1344 unlock_page(page); 1345 } 1346 } 1347 if (err) 1348 q->eio = true; 1349 z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1); 1350 bio_put(bio); 1351 } 1352 1353 static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, 1354 struct page **pagepool, 1355 struct z_erofs_decompressqueue *fgq, 1356 bool *force_fg) 1357 { 1358 struct super_block *sb = f->inode->i_sb; 1359 struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb)); 1360 z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; 1361 struct z_erofs_decompressqueue *q[NR_JOBQUEUES]; 1362 void *bi_private; 1363 z_erofs_next_pcluster_t owned_head = f->owned_head; 1364 /* bio is NULL initially, so no need to initialize last_{index,bdev} */ 1365 pgoff_t last_index; 1366 struct block_device *last_bdev; 1367 unsigned int nr_bios = 0; 1368 struct bio *bio = NULL; 1369 /* initialize to 1 to make skip psi_memstall_leave unless needed */ 1370 unsigned long pflags = 1; 1371 1372 bi_private = jobqueueset_init(sb, q, fgq, force_fg); 1373 qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; 1374 qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; 1375 1376 /* by default, all need io submission */ 1377 q[JQ_SUBMIT]->head = owned_head; 1378 1379 do { 1380 struct erofs_map_dev mdev; 1381 struct z_erofs_pcluster *pcl; 1382 pgoff_t cur, end; 1383 unsigned int i = 0; 1384 bool bypass = true; 1385 1386 /* no possible 'owned_head' equals the following */ 1387 DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); 1388 DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL); 1389 1390 pcl = container_of(owned_head, struct z_erofs_pcluster, next); 1391 1392 /* close the main owned chain at first */ 1393 owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, 1394 Z_EROFS_PCLUSTER_TAIL_CLOSED); 1395 if (z_erofs_is_inline_pcluster(pcl)) { 1396 move_to_bypass_jobqueue(pcl, qtail, owned_head); 1397 continue; 1398 } 1399 1400 /* no device id here, thus it will always succeed */ 1401 mdev = (struct erofs_map_dev) { 1402 .m_pa = blknr_to_addr(pcl->obj.index), 1403 }; 1404 (void)erofs_map_dev(sb, &mdev); 1405 1406 cur = erofs_blknr(mdev.m_pa); 1407 end = cur + pcl->pclusterpages; 1408 1409 do { 1410 struct page *page; 1411 1412 page = pickup_page_for_submission(pcl, i++, pagepool, 1413 mc); 1414 if (!page) 1415 continue; 1416 1417 if (bio && (cur != last_index + 1 || 1418 last_bdev != mdev.m_bdev)) { 1419 submit_bio_retry: 1420 if (!pflags) 1421 psi_memstall_leave(&pflags); 1422 submit_bio(bio); 1423 bio = NULL; 1424 } 1425 1426 if (unlikely(PageWorkingset(page))) 1427 psi_memstall_enter(&pflags); 1428 1429 if (!bio) { 1430 bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS, 1431 REQ_OP_READ, GFP_NOIO); 1432 bio->bi_end_io = z_erofs_decompressqueue_endio; 1433 1434 last_bdev = mdev.m_bdev; 1435 bio->bi_iter.bi_sector = (sector_t)cur << 1436 LOG_SECTORS_PER_BLOCK; 1437 bio->bi_private = bi_private; 1438 if (f->readahead) 1439 bio->bi_opf |= REQ_RAHEAD; 1440 ++nr_bios; 1441 } 1442 1443 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) 1444 goto submit_bio_retry; 1445 1446 last_index = cur; 1447 bypass = false; 1448 } while (++cur < end); 1449 1450 if (!bypass) 1451 qtail[JQ_SUBMIT] = &pcl->next; 1452 else 1453 move_to_bypass_jobqueue(pcl, qtail, owned_head); 1454 } while (owned_head != Z_EROFS_PCLUSTER_TAIL); 1455 1456 if (bio) { 1457 if (!pflags) 1458 psi_memstall_leave(&pflags); 1459 submit_bio(bio); 1460 } 1461 1462 /* 1463 * although background is preferred, no one is pending for submission. 1464 * don't issue workqueue for decompression but drop it directly instead. 1465 */ 1466 if (!*force_fg && !nr_bios) { 1467 kvfree(q[JQ_SUBMIT]); 1468 return; 1469 } 1470 z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios); 1471 } 1472 1473 static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f, 1474 struct page **pagepool, bool force_fg) 1475 { 1476 struct z_erofs_decompressqueue io[NR_JOBQUEUES]; 1477 1478 if (f->owned_head == Z_EROFS_PCLUSTER_TAIL) 1479 return; 1480 z_erofs_submit_queue(f, pagepool, io, &force_fg); 1481 1482 /* handle bypass queue (no i/o pclusters) immediately */ 1483 z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool); 1484 1485 if (!force_fg) 1486 return; 1487 1488 /* wait until all bios are completed */ 1489 wait_for_completion_io(&io[JQ_SUBMIT].u.done); 1490 1491 /* handle synchronous decompress queue in the caller context */ 1492 z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool); 1493 } 1494 1495 /* 1496 * Since partial uptodate is still unimplemented for now, we have to use 1497 * approximate readmore strategies as a start. 1498 */ 1499 static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, 1500 struct readahead_control *rac, 1501 erofs_off_t end, 1502 struct page **pagepool, 1503 bool backmost) 1504 { 1505 struct inode *inode = f->inode; 1506 struct erofs_map_blocks *map = &f->map; 1507 erofs_off_t cur; 1508 int err; 1509 1510 if (backmost) { 1511 map->m_la = end; 1512 err = z_erofs_map_blocks_iter(inode, map, 1513 EROFS_GET_BLOCKS_READMORE); 1514 if (err) 1515 return; 1516 1517 /* expend ra for the trailing edge if readahead */ 1518 if (rac) { 1519 loff_t newstart = readahead_pos(rac); 1520 1521 cur = round_up(map->m_la + map->m_llen, PAGE_SIZE); 1522 readahead_expand(rac, newstart, cur - newstart); 1523 return; 1524 } 1525 end = round_up(end, PAGE_SIZE); 1526 } else { 1527 end = round_up(map->m_la, PAGE_SIZE); 1528 1529 if (!map->m_llen) 1530 return; 1531 } 1532 1533 cur = map->m_la + map->m_llen - 1; 1534 while (cur >= end) { 1535 pgoff_t index = cur >> PAGE_SHIFT; 1536 struct page *page; 1537 1538 page = erofs_grab_cache_page_nowait(inode->i_mapping, index); 1539 if (page) { 1540 if (PageUptodate(page)) { 1541 unlock_page(page); 1542 } else { 1543 err = z_erofs_do_read_page(f, page, pagepool); 1544 if (err) 1545 erofs_err(inode->i_sb, 1546 "readmore error at page %lu @ nid %llu", 1547 index, EROFS_I(inode)->nid); 1548 } 1549 put_page(page); 1550 } 1551 1552 if (cur < PAGE_SIZE) 1553 break; 1554 cur = (index << PAGE_SHIFT) - 1; 1555 } 1556 } 1557 1558 static int z_erofs_read_folio(struct file *file, struct folio *folio) 1559 { 1560 struct page *page = &folio->page; 1561 struct inode *const inode = page->mapping->host; 1562 struct erofs_sb_info *const sbi = EROFS_I_SB(inode); 1563 struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); 1564 struct page *pagepool = NULL; 1565 int err; 1566 1567 trace_erofs_readpage(page, false); 1568 f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT; 1569 1570 z_erofs_pcluster_readmore(&f, NULL, f.headoffset + PAGE_SIZE - 1, 1571 &pagepool, true); 1572 err = z_erofs_do_read_page(&f, page, &pagepool); 1573 z_erofs_pcluster_readmore(&f, NULL, 0, &pagepool, false); 1574 1575 (void)z_erofs_collector_end(&f); 1576 1577 /* if some compressed cluster ready, need submit them anyway */ 1578 z_erofs_runqueue(&f, &pagepool, 1579 z_erofs_get_sync_decompress_policy(sbi, 0)); 1580 1581 if (err) 1582 erofs_err(inode->i_sb, "failed to read, err [%d]", err); 1583 1584 erofs_put_metabuf(&f.map.buf); 1585 erofs_release_pages(&pagepool); 1586 return err; 1587 } 1588 1589 static void z_erofs_readahead(struct readahead_control *rac) 1590 { 1591 struct inode *const inode = rac->mapping->host; 1592 struct erofs_sb_info *const sbi = EROFS_I_SB(inode); 1593 struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); 1594 struct page *pagepool = NULL, *head = NULL, *page; 1595 unsigned int nr_pages; 1596 1597 f.readahead = true; 1598 f.headoffset = readahead_pos(rac); 1599 1600 z_erofs_pcluster_readmore(&f, rac, f.headoffset + 1601 readahead_length(rac) - 1, &pagepool, true); 1602 nr_pages = readahead_count(rac); 1603 trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false); 1604 1605 while ((page = readahead_page(rac))) { 1606 set_page_private(page, (unsigned long)head); 1607 head = page; 1608 } 1609 1610 while (head) { 1611 struct page *page = head; 1612 int err; 1613 1614 /* traversal in reverse order */ 1615 head = (void *)page_private(page); 1616 1617 err = z_erofs_do_read_page(&f, page, &pagepool); 1618 if (err) 1619 erofs_err(inode->i_sb, 1620 "readahead error at page %lu @ nid %llu", 1621 page->index, EROFS_I(inode)->nid); 1622 put_page(page); 1623 } 1624 z_erofs_pcluster_readmore(&f, rac, 0, &pagepool, false); 1625 (void)z_erofs_collector_end(&f); 1626 1627 z_erofs_runqueue(&f, &pagepool, 1628 z_erofs_get_sync_decompress_policy(sbi, nr_pages)); 1629 erofs_put_metabuf(&f.map.buf); 1630 erofs_release_pages(&pagepool); 1631 } 1632 1633 const struct address_space_operations z_erofs_aops = { 1634 .read_folio = z_erofs_read_folio, 1635 .readahead = z_erofs_readahead, 1636 }; 1637