1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 * Copyright (C) 2022 Alibaba Cloud 6 */ 7 #include "zdata.h" 8 #include "compress.h" 9 #include <linux/prefetch.h> 10 11 #include <trace/events/erofs.h> 12 13 /* 14 * since pclustersize is variable for big pcluster feature, introduce slab 15 * pools implementation for different pcluster sizes. 16 */ 17 struct z_erofs_pcluster_slab { 18 struct kmem_cache *slab; 19 unsigned int maxpages; 20 char name[48]; 21 }; 22 23 #define _PCLP(n) { .maxpages = n } 24 25 static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = { 26 _PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128), 27 _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES) 28 }; 29 30 struct z_erofs_bvec_iter { 31 struct page *bvpage; 32 struct z_erofs_bvset *bvset; 33 unsigned int nr, cur; 34 }; 35 36 static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter) 37 { 38 if (iter->bvpage) 39 kunmap_local(iter->bvset); 40 return iter->bvpage; 41 } 42 43 static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter) 44 { 45 unsigned long base = (unsigned long)((struct z_erofs_bvset *)0)->bvec; 46 /* have to access nextpage in advance, otherwise it will be unmapped */ 47 struct page *nextpage = iter->bvset->nextpage; 48 struct page *oldpage; 49 50 DBG_BUGON(!nextpage); 51 oldpage = z_erofs_bvec_iter_end(iter); 52 iter->bvpage = nextpage; 53 iter->bvset = kmap_local_page(nextpage); 54 iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec); 55 iter->cur = 0; 56 return oldpage; 57 } 58 59 static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter, 60 struct z_erofs_bvset_inline *bvset, 61 unsigned int bootstrap_nr, 62 unsigned int cur) 63 { 64 *iter = (struct z_erofs_bvec_iter) { 65 .nr = bootstrap_nr, 66 .bvset = (struct z_erofs_bvset *)bvset, 67 }; 68 69 while (cur > iter->nr) { 70 cur -= iter->nr; 71 z_erofs_bvset_flip(iter); 72 } 73 iter->cur = cur; 74 } 75 76 static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter, 77 struct z_erofs_bvec *bvec, 78 struct page **candidate_bvpage) 79 { 80 if (iter->cur == iter->nr) { 81 if (!*candidate_bvpage) 82 return -EAGAIN; 83 84 DBG_BUGON(iter->bvset->nextpage); 85 iter->bvset->nextpage = *candidate_bvpage; 86 z_erofs_bvset_flip(iter); 87 88 iter->bvset->nextpage = NULL; 89 *candidate_bvpage = NULL; 90 } 91 iter->bvset->bvec[iter->cur++] = *bvec; 92 return 0; 93 } 94 95 static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter, 96 struct z_erofs_bvec *bvec, 97 struct page **old_bvpage) 98 { 99 if (iter->cur == iter->nr) 100 *old_bvpage = z_erofs_bvset_flip(iter); 101 else 102 *old_bvpage = NULL; 103 *bvec = iter->bvset->bvec[iter->cur++]; 104 } 105 106 static void z_erofs_destroy_pcluster_pool(void) 107 { 108 int i; 109 110 for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { 111 if (!pcluster_pool[i].slab) 112 continue; 113 kmem_cache_destroy(pcluster_pool[i].slab); 114 pcluster_pool[i].slab = NULL; 115 } 116 } 117 118 static int z_erofs_create_pcluster_pool(void) 119 { 120 struct z_erofs_pcluster_slab *pcs; 121 struct z_erofs_pcluster *a; 122 unsigned int size; 123 124 for (pcs = pcluster_pool; 125 pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) { 126 size = struct_size(a, compressed_bvecs, pcs->maxpages); 127 128 sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages); 129 pcs->slab = kmem_cache_create(pcs->name, size, 0, 130 SLAB_RECLAIM_ACCOUNT, NULL); 131 if (pcs->slab) 132 continue; 133 134 z_erofs_destroy_pcluster_pool(); 135 return -ENOMEM; 136 } 137 return 0; 138 } 139 140 static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages) 141 { 142 int i; 143 144 for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { 145 struct z_erofs_pcluster_slab *pcs = pcluster_pool + i; 146 struct z_erofs_pcluster *pcl; 147 148 if (nrpages > pcs->maxpages) 149 continue; 150 151 pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS); 152 if (!pcl) 153 return ERR_PTR(-ENOMEM); 154 pcl->pclusterpages = nrpages; 155 return pcl; 156 } 157 return ERR_PTR(-EINVAL); 158 } 159 160 static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl) 161 { 162 unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 163 int i; 164 165 for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { 166 struct z_erofs_pcluster_slab *pcs = pcluster_pool + i; 167 168 if (pclusterpages > pcs->maxpages) 169 continue; 170 171 kmem_cache_free(pcs->slab, pcl); 172 return; 173 } 174 DBG_BUGON(1); 175 } 176 177 /* how to allocate cached pages for a pcluster */ 178 enum z_erofs_cache_alloctype { 179 DONTALLOC, /* don't allocate any cached pages */ 180 /* 181 * try to use cached I/O if page allocation succeeds or fallback 182 * to in-place I/O instead to avoid any direct reclaim. 183 */ 184 TRYALLOC, 185 }; 186 187 /* 188 * tagged pointer with 1-bit tag for all compressed pages 189 * tag 0 - the page is just found with an extra page reference 190 */ 191 typedef tagptr1_t compressed_page_t; 192 193 #define tag_compressed_page_justfound(page) \ 194 tagptr_fold(compressed_page_t, page, 1) 195 196 static struct workqueue_struct *z_erofs_workqueue __read_mostly; 197 198 void z_erofs_exit_zip_subsystem(void) 199 { 200 destroy_workqueue(z_erofs_workqueue); 201 z_erofs_destroy_pcluster_pool(); 202 } 203 204 static inline int z_erofs_init_workqueue(void) 205 { 206 const unsigned int onlinecpus = num_possible_cpus(); 207 208 /* 209 * no need to spawn too many threads, limiting threads could minimum 210 * scheduling overhead, perhaps per-CPU threads should be better? 211 */ 212 z_erofs_workqueue = alloc_workqueue("erofs_unzipd", 213 WQ_UNBOUND | WQ_HIGHPRI, 214 onlinecpus + onlinecpus / 4); 215 return z_erofs_workqueue ? 0 : -ENOMEM; 216 } 217 218 int __init z_erofs_init_zip_subsystem(void) 219 { 220 int err = z_erofs_create_pcluster_pool(); 221 222 if (err) 223 return err; 224 err = z_erofs_init_workqueue(); 225 if (err) 226 z_erofs_destroy_pcluster_pool(); 227 return err; 228 } 229 230 enum z_erofs_pclustermode { 231 Z_EROFS_PCLUSTER_INFLIGHT, 232 /* 233 * The current pclusters was the tail of an exist chain, in addition 234 * that the previous processed chained pclusters are all decided to 235 * be hooked up to it. 236 * A new chain will be created for the remaining pclusters which are 237 * not processed yet, so different from Z_EROFS_PCLUSTER_FOLLOWED, 238 * the next pcluster cannot reuse the whole page safely for inplace I/O 239 * in the following scenario: 240 * ________________________________________________________________ 241 * | tail (partial) page | head (partial) page | 242 * | (belongs to the next pcl) | (belongs to the current pcl) | 243 * |_______PCLUSTER_FOLLOWED______|________PCLUSTER_HOOKED__________| 244 */ 245 Z_EROFS_PCLUSTER_HOOKED, 246 /* 247 * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it 248 * could be dispatched into bypass queue later due to uptodated managed 249 * pages. All related online pages cannot be reused for inplace I/O (or 250 * bvpage) since it can be directly decoded without I/O submission. 251 */ 252 Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE, 253 /* 254 * The current collection has been linked with the owned chain, and 255 * could also be linked with the remaining collections, which means 256 * if the processing page is the tail page of the collection, thus 257 * the current collection can safely use the whole page (since 258 * the previous collection is under control) for in-place I/O, as 259 * illustrated below: 260 * ________________________________________________________________ 261 * | tail (partial) page | head (partial) page | 262 * | (of the current cl) | (of the previous collection) | 263 * | PCLUSTER_FOLLOWED or | | 264 * |_____PCLUSTER_HOOKED__|___________PCLUSTER_FOLLOWED____________| 265 * 266 * [ (*) the above page can be used as inplace I/O. ] 267 */ 268 Z_EROFS_PCLUSTER_FOLLOWED, 269 }; 270 271 struct z_erofs_decompress_frontend { 272 struct inode *const inode; 273 struct erofs_map_blocks map; 274 struct z_erofs_bvec_iter biter; 275 276 struct page *candidate_bvpage; 277 struct z_erofs_pcluster *pcl, *tailpcl; 278 z_erofs_next_pcluster_t owned_head; 279 enum z_erofs_pclustermode mode; 280 281 bool readahead; 282 /* used for applying cache strategy on the fly */ 283 bool backmost; 284 erofs_off_t headoffset; 285 286 /* a pointer used to pick up inplace I/O pages */ 287 unsigned int icur; 288 }; 289 290 #define DECOMPRESS_FRONTEND_INIT(__i) { \ 291 .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \ 292 .mode = Z_EROFS_PCLUSTER_FOLLOWED, .backmost = true } 293 294 static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe, 295 enum z_erofs_cache_alloctype type, 296 struct page **pagepool) 297 { 298 struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode)); 299 struct z_erofs_pcluster *pcl = fe->pcl; 300 bool standalone = true; 301 /* 302 * optimistic allocation without direct reclaim since inplace I/O 303 * can be used if low memory otherwise. 304 */ 305 gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) | 306 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; 307 unsigned int i; 308 309 if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED) 310 return; 311 312 for (i = 0; i < pcl->pclusterpages; ++i) { 313 struct page *page; 314 compressed_page_t t; 315 struct page *newpage = NULL; 316 317 /* the compressed page was loaded before */ 318 if (READ_ONCE(pcl->compressed_bvecs[i].page)) 319 continue; 320 321 page = find_get_page(mc, pcl->obj.index + i); 322 323 if (page) { 324 t = tag_compressed_page_justfound(page); 325 } else { 326 /* I/O is needed, no possible to decompress directly */ 327 standalone = false; 328 switch (type) { 329 case TRYALLOC: 330 newpage = erofs_allocpage(pagepool, gfp); 331 if (!newpage) 332 continue; 333 set_page_private(newpage, 334 Z_EROFS_PREALLOCATED_PAGE); 335 t = tag_compressed_page_justfound(newpage); 336 break; 337 default: /* DONTALLOC */ 338 continue; 339 } 340 } 341 342 if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL, 343 tagptr_cast_ptr(t))) 344 continue; 345 346 if (page) 347 put_page(page); 348 else if (newpage) 349 erofs_pagepool_add(pagepool, newpage); 350 } 351 352 /* 353 * don't do inplace I/O if all compressed pages are available in 354 * managed cache since it can be moved to the bypass queue instead. 355 */ 356 if (standalone) 357 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; 358 } 359 360 /* called by erofs_shrinker to get rid of all compressed_pages */ 361 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, 362 struct erofs_workgroup *grp) 363 { 364 struct z_erofs_pcluster *const pcl = 365 container_of(grp, struct z_erofs_pcluster, obj); 366 int i; 367 368 DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); 369 /* 370 * refcount of workgroup is now freezed as 1, 371 * therefore no need to worry about available decompression users. 372 */ 373 for (i = 0; i < pcl->pclusterpages; ++i) { 374 struct page *page = pcl->compressed_bvecs[i].page; 375 376 if (!page) 377 continue; 378 379 /* block other users from reclaiming or migrating the page */ 380 if (!trylock_page(page)) 381 return -EBUSY; 382 383 if (!erofs_page_is_managed(sbi, page)) 384 continue; 385 386 /* barrier is implied in the following 'unlock_page' */ 387 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); 388 detach_page_private(page); 389 unlock_page(page); 390 } 391 return 0; 392 } 393 394 int erofs_try_to_free_cached_page(struct page *page) 395 { 396 struct z_erofs_pcluster *const pcl = (void *)page_private(page); 397 int ret, i; 398 399 if (!erofs_workgroup_try_to_freeze(&pcl->obj, 1)) 400 return 0; 401 402 ret = 0; 403 DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); 404 for (i = 0; i < pcl->pclusterpages; ++i) { 405 if (pcl->compressed_bvecs[i].page == page) { 406 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); 407 ret = 1; 408 break; 409 } 410 } 411 erofs_workgroup_unfreeze(&pcl->obj, 1); 412 if (ret) 413 detach_page_private(page); 414 return ret; 415 } 416 417 static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe, 418 struct z_erofs_bvec *bvec) 419 { 420 struct z_erofs_pcluster *const pcl = fe->pcl; 421 422 while (fe->icur > 0) { 423 if (!cmpxchg(&pcl->compressed_bvecs[--fe->icur].page, 424 NULL, bvec->page)) { 425 pcl->compressed_bvecs[fe->icur] = *bvec; 426 return true; 427 } 428 } 429 return false; 430 } 431 432 /* callers must be with pcluster lock held */ 433 static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe, 434 struct z_erofs_bvec *bvec, bool exclusive) 435 { 436 int ret; 437 438 if (exclusive) { 439 /* give priority for inplaceio to use file pages first */ 440 if (z_erofs_try_inplace_io(fe, bvec)) 441 return 0; 442 /* otherwise, check if it can be used as a bvpage */ 443 if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED && 444 !fe->candidate_bvpage) 445 fe->candidate_bvpage = bvec->page; 446 } 447 ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage); 448 fe->pcl->vcnt += (ret >= 0); 449 return ret; 450 } 451 452 static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f) 453 { 454 struct z_erofs_pcluster *pcl = f->pcl; 455 z_erofs_next_pcluster_t *owned_head = &f->owned_head; 456 457 /* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */ 458 if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL, 459 *owned_head) == Z_EROFS_PCLUSTER_NIL) { 460 *owned_head = &pcl->next; 461 /* so we can attach this pcluster to our submission chain. */ 462 f->mode = Z_EROFS_PCLUSTER_FOLLOWED; 463 return; 464 } 465 466 /* 467 * type 2, link to the end of an existing open chain, be careful 468 * that its submission is controlled by the original attached chain. 469 */ 470 if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, 471 *owned_head) == Z_EROFS_PCLUSTER_TAIL) { 472 *owned_head = Z_EROFS_PCLUSTER_TAIL; 473 f->mode = Z_EROFS_PCLUSTER_HOOKED; 474 f->tailpcl = NULL; 475 return; 476 } 477 /* type 3, it belongs to a chain, but it isn't the end of the chain */ 478 f->mode = Z_EROFS_PCLUSTER_INFLIGHT; 479 } 480 481 static int z_erofs_lookup_pcluster(struct z_erofs_decompress_frontend *fe) 482 { 483 struct erofs_map_blocks *map = &fe->map; 484 struct z_erofs_pcluster *pcl = fe->pcl; 485 unsigned int length; 486 487 /* to avoid unexpected loop formed by corrupted images */ 488 if (fe->owned_head == &pcl->next || pcl == fe->tailpcl) { 489 DBG_BUGON(1); 490 return -EFSCORRUPTED; 491 } 492 493 if (pcl->pageofs_out != (map->m_la & ~PAGE_MASK)) { 494 DBG_BUGON(1); 495 return -EFSCORRUPTED; 496 } 497 498 length = READ_ONCE(pcl->length); 499 if (length & Z_EROFS_PCLUSTER_FULL_LENGTH) { 500 if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) { 501 DBG_BUGON(1); 502 return -EFSCORRUPTED; 503 } 504 } else { 505 unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT; 506 507 if (map->m_flags & EROFS_MAP_FULL_MAPPED) 508 llen |= Z_EROFS_PCLUSTER_FULL_LENGTH; 509 510 while (llen > length && 511 length != cmpxchg_relaxed(&pcl->length, length, llen)) { 512 cpu_relax(); 513 length = READ_ONCE(pcl->length); 514 } 515 } 516 mutex_lock(&pcl->lock); 517 /* used to check tail merging loop due to corrupted images */ 518 if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL) 519 fe->tailpcl = pcl; 520 521 z_erofs_try_to_claim_pcluster(fe); 522 return 0; 523 } 524 525 static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) 526 { 527 struct erofs_map_blocks *map = &fe->map; 528 bool ztailpacking = map->m_flags & EROFS_MAP_META; 529 struct z_erofs_pcluster *pcl; 530 struct erofs_workgroup *grp; 531 int err; 532 533 if (!(map->m_flags & EROFS_MAP_ENCODED)) { 534 DBG_BUGON(1); 535 return -EFSCORRUPTED; 536 } 537 538 /* no available pcluster, let's allocate one */ 539 pcl = z_erofs_alloc_pcluster(ztailpacking ? 1 : 540 map->m_plen >> PAGE_SHIFT); 541 if (IS_ERR(pcl)) 542 return PTR_ERR(pcl); 543 544 atomic_set(&pcl->obj.refcount, 1); 545 pcl->algorithmformat = map->m_algorithmformat; 546 pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) | 547 (map->m_flags & EROFS_MAP_FULL_MAPPED ? 548 Z_EROFS_PCLUSTER_FULL_LENGTH : 0); 549 550 /* new pclusters should be claimed as type 1, primary and followed */ 551 pcl->next = fe->owned_head; 552 pcl->pageofs_out = map->m_la & ~PAGE_MASK; 553 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED; 554 555 /* 556 * lock all primary followed works before visible to others 557 * and mutex_trylock *never* fails for a new pcluster. 558 */ 559 mutex_init(&pcl->lock); 560 DBG_BUGON(!mutex_trylock(&pcl->lock)); 561 562 if (ztailpacking) { 563 pcl->obj.index = 0; /* which indicates ztailpacking */ 564 pcl->pageofs_in = erofs_blkoff(map->m_pa); 565 pcl->tailpacking_size = map->m_plen; 566 } else { 567 pcl->obj.index = map->m_pa >> PAGE_SHIFT; 568 569 grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj); 570 if (IS_ERR(grp)) { 571 err = PTR_ERR(grp); 572 goto err_out; 573 } 574 575 if (grp != &pcl->obj) { 576 fe->pcl = container_of(grp, 577 struct z_erofs_pcluster, obj); 578 err = -EEXIST; 579 goto err_out; 580 } 581 } 582 /* used to check tail merging loop due to corrupted images */ 583 if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL) 584 fe->tailpcl = pcl; 585 fe->owned_head = &pcl->next; 586 fe->pcl = pcl; 587 return 0; 588 589 err_out: 590 mutex_unlock(&pcl->lock); 591 z_erofs_free_pcluster(pcl); 592 return err; 593 } 594 595 static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe) 596 { 597 struct erofs_map_blocks *map = &fe->map; 598 struct erofs_workgroup *grp = NULL; 599 int ret; 600 601 DBG_BUGON(fe->pcl); 602 603 /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */ 604 DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL); 605 DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); 606 607 if (!(map->m_flags & EROFS_MAP_META)) { 608 grp = erofs_find_workgroup(fe->inode->i_sb, 609 map->m_pa >> PAGE_SHIFT); 610 } else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) { 611 DBG_BUGON(1); 612 return -EFSCORRUPTED; 613 } 614 615 if (grp) { 616 fe->pcl = container_of(grp, struct z_erofs_pcluster, obj); 617 ret = -EEXIST; 618 } else { 619 ret = z_erofs_register_pcluster(fe); 620 } 621 622 if (ret == -EEXIST) { 623 ret = z_erofs_lookup_pcluster(fe); 624 if (ret) { 625 erofs_workgroup_put(&fe->pcl->obj); 626 return ret; 627 } 628 } else if (ret) { 629 return ret; 630 } 631 z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset, 632 Z_EROFS_INLINE_BVECS, fe->pcl->vcnt); 633 /* since file-backed online pages are traversed in reverse order */ 634 fe->icur = z_erofs_pclusterpages(fe->pcl); 635 return 0; 636 } 637 638 /* 639 * keep in mind that no referenced pclusters will be freed 640 * only after a RCU grace period. 641 */ 642 static void z_erofs_rcu_callback(struct rcu_head *head) 643 { 644 z_erofs_free_pcluster(container_of(head, 645 struct z_erofs_pcluster, rcu)); 646 } 647 648 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp) 649 { 650 struct z_erofs_pcluster *const pcl = 651 container_of(grp, struct z_erofs_pcluster, obj); 652 653 call_rcu(&pcl->rcu, z_erofs_rcu_callback); 654 } 655 656 static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe) 657 { 658 struct z_erofs_pcluster *pcl = fe->pcl; 659 660 if (!pcl) 661 return false; 662 663 z_erofs_bvec_iter_end(&fe->biter); 664 mutex_unlock(&pcl->lock); 665 666 if (fe->candidate_bvpage) { 667 DBG_BUGON(z_erofs_is_shortlived_page(fe->candidate_bvpage)); 668 fe->candidate_bvpage = NULL; 669 } 670 671 /* 672 * if all pending pages are added, don't hold its reference 673 * any longer if the pcluster isn't hosted by ourselves. 674 */ 675 if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE) 676 erofs_workgroup_put(&pcl->obj); 677 678 fe->pcl = NULL; 679 return true; 680 } 681 682 static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe, 683 unsigned int cachestrategy, 684 erofs_off_t la) 685 { 686 if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED) 687 return false; 688 689 if (fe->backmost) 690 return true; 691 692 return cachestrategy >= EROFS_ZIP_CACHE_READAROUND && 693 la < fe->headoffset; 694 } 695 696 static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, 697 struct page *page, struct page **pagepool) 698 { 699 struct inode *const inode = fe->inode; 700 struct erofs_sb_info *const sbi = EROFS_I_SB(inode); 701 struct erofs_map_blocks *const map = &fe->map; 702 const loff_t offset = page_offset(page); 703 bool tight = true, exclusive; 704 705 enum z_erofs_cache_alloctype cache_strategy; 706 unsigned int cur, end, spiltted, index; 707 int err = 0; 708 709 /* register locked file pages as online pages in pack */ 710 z_erofs_onlinepage_init(page); 711 712 spiltted = 0; 713 end = PAGE_SIZE; 714 repeat: 715 cur = end - 1; 716 717 if (offset + cur < map->m_la || 718 offset + cur >= map->m_la + map->m_llen) { 719 erofs_dbg("out-of-range map @ pos %llu", offset + cur); 720 721 if (z_erofs_collector_end(fe)) 722 fe->backmost = false; 723 map->m_la = offset + cur; 724 map->m_llen = 0; 725 err = z_erofs_map_blocks_iter(inode, map, 0); 726 if (err) 727 goto out; 728 } else { 729 if (fe->pcl) 730 goto hitted; 731 /* didn't get a valid pcluster previously (very rare) */ 732 } 733 734 if (!(map->m_flags & EROFS_MAP_MAPPED)) 735 goto hitted; 736 737 err = z_erofs_collector_begin(fe); 738 if (err) 739 goto out; 740 741 if (z_erofs_is_inline_pcluster(fe->pcl)) { 742 void *mp; 743 744 mp = erofs_read_metabuf(&fe->map.buf, inode->i_sb, 745 erofs_blknr(map->m_pa), EROFS_NO_KMAP); 746 if (IS_ERR(mp)) { 747 err = PTR_ERR(mp); 748 erofs_err(inode->i_sb, 749 "failed to get inline page, err %d", err); 750 goto out; 751 } 752 get_page(fe->map.buf.page); 753 WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, 754 fe->map.buf.page); 755 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; 756 } else { 757 /* bind cache first when cached decompression is preferred */ 758 if (should_alloc_managed_pages(fe, sbi->opt.cache_strategy, 759 map->m_la)) 760 cache_strategy = TRYALLOC; 761 else 762 cache_strategy = DONTALLOC; 763 764 z_erofs_bind_cache(fe, cache_strategy, pagepool); 765 } 766 hitted: 767 /* 768 * Ensure the current partial page belongs to this submit chain rather 769 * than other concurrent submit chains or the noio(bypass) chain since 770 * those chains are handled asynchronously thus the page cannot be used 771 * for inplace I/O or bvpage (should be processed in a strict order.) 772 */ 773 tight &= (fe->mode >= Z_EROFS_PCLUSTER_HOOKED && 774 fe->mode != Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE); 775 776 cur = end - min_t(unsigned int, offset + end - map->m_la, end); 777 if (!(map->m_flags & EROFS_MAP_MAPPED)) { 778 zero_user_segment(page, cur, end); 779 goto next_part; 780 } 781 782 exclusive = (!cur && (!spiltted || tight)); 783 if (cur) 784 tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED); 785 786 retry: 787 err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) { 788 .page = page, 789 .offset = offset - map->m_la, 790 .end = end, 791 }), exclusive); 792 /* should allocate an additional short-lived page for bvset */ 793 if (err == -EAGAIN && !fe->candidate_bvpage) { 794 fe->candidate_bvpage = alloc_page(GFP_NOFS | __GFP_NOFAIL); 795 set_page_private(fe->candidate_bvpage, 796 Z_EROFS_SHORTLIVED_PAGE); 797 goto retry; 798 } 799 800 if (err) { 801 DBG_BUGON(err == -EAGAIN && fe->candidate_bvpage); 802 goto out; 803 } 804 805 z_erofs_onlinepage_split(page); 806 /* bump up the number of spiltted parts of a page */ 807 ++spiltted; 808 809 /* also update nr_pages */ 810 index = page->index - (map->m_la >> PAGE_SHIFT); 811 fe->pcl->nr_pages = max_t(pgoff_t, fe->pcl->nr_pages, index + 1); 812 next_part: 813 /* can be used for verification */ 814 map->m_llen = offset + cur - map->m_la; 815 816 end = cur; 817 if (end > 0) 818 goto repeat; 819 820 out: 821 if (err) 822 z_erofs_page_mark_eio(page); 823 z_erofs_onlinepage_endio(page); 824 825 erofs_dbg("%s, finish page: %pK spiltted: %u map->m_llen %llu", 826 __func__, page, spiltted, map->m_llen); 827 return err; 828 } 829 830 static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi, 831 unsigned int readahead_pages) 832 { 833 /* auto: enable for read_folio, disable for readahead */ 834 if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) && 835 !readahead_pages) 836 return true; 837 838 if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) && 839 (readahead_pages <= sbi->opt.max_sync_decompress_pages)) 840 return true; 841 842 return false; 843 } 844 845 static bool z_erofs_page_is_invalidated(struct page *page) 846 { 847 return !page->mapping && !z_erofs_is_shortlived_page(page); 848 } 849 850 struct z_erofs_decompress_backend { 851 struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES]; 852 struct super_block *sb; 853 struct z_erofs_pcluster *pcl; 854 855 /* pages with the longest decompressed length for deduplication */ 856 struct page **decompressed_pages; 857 /* pages to keep the compressed data */ 858 struct page **compressed_pages; 859 860 struct page **pagepool; 861 unsigned int onstack_used; 862 }; 863 864 static int z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be, 865 struct z_erofs_bvec *bvec) 866 { 867 unsigned int pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT; 868 struct page *oldpage; 869 870 DBG_BUGON(pgnr >= be->pcl->nr_pages); 871 oldpage = be->decompressed_pages[pgnr]; 872 be->decompressed_pages[pgnr] = bvec->page; 873 874 /* error out if one pcluster is refenenced multiple times. */ 875 if (oldpage) { 876 DBG_BUGON(1); 877 z_erofs_page_mark_eio(oldpage); 878 z_erofs_onlinepage_endio(oldpage); 879 return -EFSCORRUPTED; 880 } 881 return 0; 882 } 883 884 static int z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be) 885 { 886 struct z_erofs_pcluster *pcl = be->pcl; 887 struct z_erofs_bvec_iter biter; 888 struct page *old_bvpage; 889 int i, err = 0; 890 891 z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0); 892 for (i = 0; i < pcl->vcnt; ++i) { 893 struct z_erofs_bvec bvec; 894 895 z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage); 896 897 if (old_bvpage) 898 z_erofs_put_shortlivedpage(be->pagepool, old_bvpage); 899 900 DBG_BUGON(z_erofs_page_is_invalidated(bvec.page)); 901 err = z_erofs_do_decompressed_bvec(be, &bvec); 902 } 903 904 old_bvpage = z_erofs_bvec_iter_end(&biter); 905 if (old_bvpage) 906 z_erofs_put_shortlivedpage(be->pagepool, old_bvpage); 907 return err; 908 } 909 910 static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be, 911 bool *overlapped) 912 { 913 struct z_erofs_pcluster *pcl = be->pcl; 914 unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 915 int i, err = 0; 916 917 *overlapped = false; 918 for (i = 0; i < pclusterpages; ++i) { 919 struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i]; 920 struct page *page = bvec->page; 921 922 /* compressed pages ought to be present before decompressing */ 923 if (!page) { 924 DBG_BUGON(1); 925 continue; 926 } 927 be->compressed_pages[i] = page; 928 929 if (z_erofs_is_inline_pcluster(pcl)) { 930 if (!PageUptodate(page)) 931 err = -EIO; 932 continue; 933 } 934 935 DBG_BUGON(z_erofs_page_is_invalidated(page)); 936 if (!z_erofs_is_shortlived_page(page)) { 937 if (erofs_page_is_managed(EROFS_SB(be->sb), page)) { 938 if (!PageUptodate(page)) 939 err = -EIO; 940 continue; 941 } 942 err = z_erofs_do_decompressed_bvec(be, bvec); 943 *overlapped = true; 944 } 945 } 946 947 if (err) 948 return err; 949 return 0; 950 } 951 952 static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, 953 int err) 954 { 955 struct erofs_sb_info *const sbi = EROFS_SB(be->sb); 956 struct z_erofs_pcluster *pcl = be->pcl; 957 unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 958 unsigned int i, inputsize, outputsize, llen, nr_pages; 959 struct page *page; 960 int err2; 961 bool overlapped, partial; 962 963 DBG_BUGON(!READ_ONCE(pcl->nr_pages)); 964 mutex_lock(&pcl->lock); 965 nr_pages = pcl->nr_pages; 966 967 /* allocate (de)compressed page arrays if cannot be kept on stack */ 968 be->decompressed_pages = NULL; 969 be->compressed_pages = NULL; 970 be->onstack_used = 0; 971 if (nr_pages <= Z_EROFS_ONSTACK_PAGES) { 972 be->decompressed_pages = be->onstack_pages; 973 be->onstack_used = nr_pages; 974 memset(be->decompressed_pages, 0, 975 sizeof(struct page *) * nr_pages); 976 } 977 978 if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES) 979 be->compressed_pages = be->onstack_pages + be->onstack_used; 980 981 if (!be->decompressed_pages) 982 be->decompressed_pages = 983 kvcalloc(nr_pages, sizeof(struct page *), 984 GFP_KERNEL | __GFP_NOFAIL); 985 if (!be->compressed_pages) 986 be->compressed_pages = 987 kvcalloc(pclusterpages, sizeof(struct page *), 988 GFP_KERNEL | __GFP_NOFAIL); 989 990 err2 = z_erofs_parse_out_bvecs(be); 991 if (err2) 992 err = err2; 993 err2 = z_erofs_parse_in_bvecs(be, &overlapped); 994 if (err2) 995 err = err2; 996 997 if (err) 998 goto out; 999 1000 llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT; 1001 if (nr_pages << PAGE_SHIFT >= pcl->pageofs_out + llen) { 1002 outputsize = llen; 1003 partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH); 1004 } else { 1005 outputsize = (nr_pages << PAGE_SHIFT) - pcl->pageofs_out; 1006 partial = true; 1007 } 1008 1009 if (z_erofs_is_inline_pcluster(pcl)) 1010 inputsize = pcl->tailpacking_size; 1011 else 1012 inputsize = pclusterpages * PAGE_SIZE; 1013 1014 err = z_erofs_decompress(&(struct z_erofs_decompress_req) { 1015 .sb = be->sb, 1016 .in = be->compressed_pages, 1017 .out = be->decompressed_pages, 1018 .pageofs_in = pcl->pageofs_in, 1019 .pageofs_out = pcl->pageofs_out, 1020 .inputsize = inputsize, 1021 .outputsize = outputsize, 1022 .alg = pcl->algorithmformat, 1023 .inplace_io = overlapped, 1024 .partial_decoding = partial 1025 }, be->pagepool); 1026 1027 out: 1028 /* must handle all compressed pages before actual file pages */ 1029 if (z_erofs_is_inline_pcluster(pcl)) { 1030 page = pcl->compressed_bvecs[0].page; 1031 WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL); 1032 put_page(page); 1033 } else { 1034 for (i = 0; i < pclusterpages; ++i) { 1035 page = pcl->compressed_bvecs[i].page; 1036 1037 if (erofs_page_is_managed(sbi, page)) 1038 continue; 1039 1040 /* recycle all individual short-lived pages */ 1041 (void)z_erofs_put_shortlivedpage(be->pagepool, page); 1042 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); 1043 } 1044 } 1045 if (be->compressed_pages < be->onstack_pages || 1046 be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES) 1047 kvfree(be->compressed_pages); 1048 1049 for (i = 0; i < nr_pages; ++i) { 1050 page = be->decompressed_pages[i]; 1051 if (!page) 1052 continue; 1053 1054 DBG_BUGON(z_erofs_page_is_invalidated(page)); 1055 1056 /* recycle all individual short-lived pages */ 1057 if (z_erofs_put_shortlivedpage(be->pagepool, page)) 1058 continue; 1059 if (err) 1060 z_erofs_page_mark_eio(page); 1061 z_erofs_onlinepage_endio(page); 1062 } 1063 1064 if (be->decompressed_pages != be->onstack_pages) 1065 kvfree(be->decompressed_pages); 1066 1067 pcl->nr_pages = 0; 1068 pcl->bvset.nextpage = NULL; 1069 pcl->vcnt = 0; 1070 1071 /* pcluster lock MUST be taken before the following line */ 1072 WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL); 1073 mutex_unlock(&pcl->lock); 1074 return err; 1075 } 1076 1077 static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, 1078 struct page **pagepool) 1079 { 1080 struct z_erofs_decompress_backend be = { 1081 .sb = io->sb, 1082 .pagepool = pagepool, 1083 }; 1084 z_erofs_next_pcluster_t owned = io->head; 1085 1086 while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) { 1087 /* impossible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */ 1088 DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL); 1089 /* impossible that 'owned' equals Z_EROFS_PCLUSTER_NIL */ 1090 DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL); 1091 1092 be.pcl = container_of(owned, struct z_erofs_pcluster, next); 1093 owned = READ_ONCE(be.pcl->next); 1094 1095 z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0); 1096 erofs_workgroup_put(&be.pcl->obj); 1097 } 1098 } 1099 1100 static void z_erofs_decompressqueue_work(struct work_struct *work) 1101 { 1102 struct z_erofs_decompressqueue *bgq = 1103 container_of(work, struct z_erofs_decompressqueue, u.work); 1104 struct page *pagepool = NULL; 1105 1106 DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED); 1107 z_erofs_decompress_queue(bgq, &pagepool); 1108 1109 erofs_release_pages(&pagepool); 1110 kvfree(bgq); 1111 } 1112 1113 static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, 1114 bool sync, int bios) 1115 { 1116 struct erofs_sb_info *const sbi = EROFS_SB(io->sb); 1117 1118 /* wake up the caller thread for sync decompression */ 1119 if (sync) { 1120 if (!atomic_add_return(bios, &io->pending_bios)) 1121 complete(&io->u.done); 1122 return; 1123 } 1124 1125 if (atomic_add_return(bios, &io->pending_bios)) 1126 return; 1127 /* Use workqueue and sync decompression for atomic contexts only */ 1128 if (in_atomic() || irqs_disabled()) { 1129 queue_work(z_erofs_workqueue, &io->u.work); 1130 /* enable sync decompression for readahead */ 1131 if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) 1132 sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON; 1133 return; 1134 } 1135 z_erofs_decompressqueue_work(&io->u.work); 1136 } 1137 1138 static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, 1139 unsigned int nr, 1140 struct page **pagepool, 1141 struct address_space *mc) 1142 { 1143 const pgoff_t index = pcl->obj.index; 1144 gfp_t gfp = mapping_gfp_mask(mc); 1145 bool tocache = false; 1146 1147 struct address_space *mapping; 1148 struct page *oldpage, *page; 1149 1150 compressed_page_t t; 1151 int justfound; 1152 1153 repeat: 1154 page = READ_ONCE(pcl->compressed_bvecs[nr].page); 1155 oldpage = page; 1156 1157 if (!page) 1158 goto out_allocpage; 1159 1160 /* process the target tagged pointer */ 1161 t = tagptr_init(compressed_page_t, page); 1162 justfound = tagptr_unfold_tags(t); 1163 page = tagptr_unfold_ptr(t); 1164 1165 /* 1166 * preallocated cached pages, which is used to avoid direct reclaim 1167 * otherwise, it will go inplace I/O path instead. 1168 */ 1169 if (page->private == Z_EROFS_PREALLOCATED_PAGE) { 1170 WRITE_ONCE(pcl->compressed_bvecs[nr].page, page); 1171 set_page_private(page, 0); 1172 tocache = true; 1173 goto out_tocache; 1174 } 1175 mapping = READ_ONCE(page->mapping); 1176 1177 /* 1178 * file-backed online pages in plcuster are all locked steady, 1179 * therefore it is impossible for `mapping' to be NULL. 1180 */ 1181 if (mapping && mapping != mc) 1182 /* ought to be unmanaged pages */ 1183 goto out; 1184 1185 /* directly return for shortlived page as well */ 1186 if (z_erofs_is_shortlived_page(page)) 1187 goto out; 1188 1189 lock_page(page); 1190 1191 /* only true if page reclaim goes wrong, should never happen */ 1192 DBG_BUGON(justfound && PagePrivate(page)); 1193 1194 /* the page is still in manage cache */ 1195 if (page->mapping == mc) { 1196 WRITE_ONCE(pcl->compressed_bvecs[nr].page, page); 1197 1198 if (!PagePrivate(page)) { 1199 /* 1200 * impossible to be !PagePrivate(page) for 1201 * the current restriction as well if 1202 * the page is already in compressed_bvecs[]. 1203 */ 1204 DBG_BUGON(!justfound); 1205 1206 justfound = 0; 1207 set_page_private(page, (unsigned long)pcl); 1208 SetPagePrivate(page); 1209 } 1210 1211 /* no need to submit io if it is already up-to-date */ 1212 if (PageUptodate(page)) { 1213 unlock_page(page); 1214 page = NULL; 1215 } 1216 goto out; 1217 } 1218 1219 /* 1220 * the managed page has been truncated, it's unsafe to 1221 * reuse this one, let's allocate a new cache-managed page. 1222 */ 1223 DBG_BUGON(page->mapping); 1224 DBG_BUGON(!justfound); 1225 1226 tocache = true; 1227 unlock_page(page); 1228 put_page(page); 1229 out_allocpage: 1230 page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL); 1231 if (oldpage != cmpxchg(&pcl->compressed_bvecs[nr].page, 1232 oldpage, page)) { 1233 erofs_pagepool_add(pagepool, page); 1234 cond_resched(); 1235 goto repeat; 1236 } 1237 out_tocache: 1238 if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) { 1239 /* turn into temporary page if fails (1 ref) */ 1240 set_page_private(page, Z_EROFS_SHORTLIVED_PAGE); 1241 goto out; 1242 } 1243 attach_page_private(page, pcl); 1244 /* drop a refcount added by allocpage (then we have 2 refs here) */ 1245 put_page(page); 1246 1247 out: /* the only exit (for tracing and debugging) */ 1248 return page; 1249 } 1250 1251 static struct z_erofs_decompressqueue * 1252 jobqueue_init(struct super_block *sb, 1253 struct z_erofs_decompressqueue *fgq, bool *fg) 1254 { 1255 struct z_erofs_decompressqueue *q; 1256 1257 if (fg && !*fg) { 1258 q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN); 1259 if (!q) { 1260 *fg = true; 1261 goto fg_out; 1262 } 1263 INIT_WORK(&q->u.work, z_erofs_decompressqueue_work); 1264 } else { 1265 fg_out: 1266 q = fgq; 1267 init_completion(&fgq->u.done); 1268 atomic_set(&fgq->pending_bios, 0); 1269 q->eio = false; 1270 } 1271 q->sb = sb; 1272 q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED; 1273 return q; 1274 } 1275 1276 /* define decompression jobqueue types */ 1277 enum { 1278 JQ_BYPASS, 1279 JQ_SUBMIT, 1280 NR_JOBQUEUES, 1281 }; 1282 1283 static void *jobqueueset_init(struct super_block *sb, 1284 struct z_erofs_decompressqueue *q[], 1285 struct z_erofs_decompressqueue *fgq, bool *fg) 1286 { 1287 /* 1288 * if managed cache is enabled, bypass jobqueue is needed, 1289 * no need to read from device for all pclusters in this queue. 1290 */ 1291 q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL); 1292 q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, fg); 1293 1294 return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], *fg)); 1295 } 1296 1297 static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, 1298 z_erofs_next_pcluster_t qtail[], 1299 z_erofs_next_pcluster_t owned_head) 1300 { 1301 z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT]; 1302 z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS]; 1303 1304 DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); 1305 if (owned_head == Z_EROFS_PCLUSTER_TAIL) 1306 owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED; 1307 1308 WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED); 1309 1310 WRITE_ONCE(*submit_qtail, owned_head); 1311 WRITE_ONCE(*bypass_qtail, &pcl->next); 1312 1313 qtail[JQ_BYPASS] = &pcl->next; 1314 } 1315 1316 static void z_erofs_decompressqueue_endio(struct bio *bio) 1317 { 1318 tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private); 1319 struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t); 1320 blk_status_t err = bio->bi_status; 1321 struct bio_vec *bvec; 1322 struct bvec_iter_all iter_all; 1323 1324 bio_for_each_segment_all(bvec, bio, iter_all) { 1325 struct page *page = bvec->bv_page; 1326 1327 DBG_BUGON(PageUptodate(page)); 1328 DBG_BUGON(z_erofs_page_is_invalidated(page)); 1329 1330 if (erofs_page_is_managed(EROFS_SB(q->sb), page)) { 1331 if (!err) 1332 SetPageUptodate(page); 1333 unlock_page(page); 1334 } 1335 } 1336 if (err) 1337 q->eio = true; 1338 z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1); 1339 bio_put(bio); 1340 } 1341 1342 static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, 1343 struct page **pagepool, 1344 struct z_erofs_decompressqueue *fgq, 1345 bool *force_fg) 1346 { 1347 struct super_block *sb = f->inode->i_sb; 1348 struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb)); 1349 z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; 1350 struct z_erofs_decompressqueue *q[NR_JOBQUEUES]; 1351 void *bi_private; 1352 z_erofs_next_pcluster_t owned_head = f->owned_head; 1353 /* bio is NULL initially, so no need to initialize last_{index,bdev} */ 1354 pgoff_t last_index; 1355 struct block_device *last_bdev; 1356 unsigned int nr_bios = 0; 1357 struct bio *bio = NULL; 1358 1359 bi_private = jobqueueset_init(sb, q, fgq, force_fg); 1360 qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; 1361 qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; 1362 1363 /* by default, all need io submission */ 1364 q[JQ_SUBMIT]->head = owned_head; 1365 1366 do { 1367 struct erofs_map_dev mdev; 1368 struct z_erofs_pcluster *pcl; 1369 pgoff_t cur, end; 1370 unsigned int i = 0; 1371 bool bypass = true; 1372 1373 /* no possible 'owned_head' equals the following */ 1374 DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); 1375 DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL); 1376 1377 pcl = container_of(owned_head, struct z_erofs_pcluster, next); 1378 1379 /* close the main owned chain at first */ 1380 owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, 1381 Z_EROFS_PCLUSTER_TAIL_CLOSED); 1382 if (z_erofs_is_inline_pcluster(pcl)) { 1383 move_to_bypass_jobqueue(pcl, qtail, owned_head); 1384 continue; 1385 } 1386 1387 /* no device id here, thus it will always succeed */ 1388 mdev = (struct erofs_map_dev) { 1389 .m_pa = blknr_to_addr(pcl->obj.index), 1390 }; 1391 (void)erofs_map_dev(sb, &mdev); 1392 1393 cur = erofs_blknr(mdev.m_pa); 1394 end = cur + pcl->pclusterpages; 1395 1396 do { 1397 struct page *page; 1398 1399 page = pickup_page_for_submission(pcl, i++, pagepool, 1400 mc); 1401 if (!page) 1402 continue; 1403 1404 if (bio && (cur != last_index + 1 || 1405 last_bdev != mdev.m_bdev)) { 1406 submit_bio_retry: 1407 submit_bio(bio); 1408 bio = NULL; 1409 } 1410 1411 if (!bio) { 1412 bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS, 1413 REQ_OP_READ, GFP_NOIO); 1414 bio->bi_end_io = z_erofs_decompressqueue_endio; 1415 1416 last_bdev = mdev.m_bdev; 1417 bio->bi_iter.bi_sector = (sector_t)cur << 1418 LOG_SECTORS_PER_BLOCK; 1419 bio->bi_private = bi_private; 1420 if (f->readahead) 1421 bio->bi_opf |= REQ_RAHEAD; 1422 ++nr_bios; 1423 } 1424 1425 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) 1426 goto submit_bio_retry; 1427 1428 last_index = cur; 1429 bypass = false; 1430 } while (++cur < end); 1431 1432 if (!bypass) 1433 qtail[JQ_SUBMIT] = &pcl->next; 1434 else 1435 move_to_bypass_jobqueue(pcl, qtail, owned_head); 1436 } while (owned_head != Z_EROFS_PCLUSTER_TAIL); 1437 1438 if (bio) 1439 submit_bio(bio); 1440 1441 /* 1442 * although background is preferred, no one is pending for submission. 1443 * don't issue workqueue for decompression but drop it directly instead. 1444 */ 1445 if (!*force_fg && !nr_bios) { 1446 kvfree(q[JQ_SUBMIT]); 1447 return; 1448 } 1449 z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios); 1450 } 1451 1452 static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f, 1453 struct page **pagepool, bool force_fg) 1454 { 1455 struct z_erofs_decompressqueue io[NR_JOBQUEUES]; 1456 1457 if (f->owned_head == Z_EROFS_PCLUSTER_TAIL) 1458 return; 1459 z_erofs_submit_queue(f, pagepool, io, &force_fg); 1460 1461 /* handle bypass queue (no i/o pclusters) immediately */ 1462 z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool); 1463 1464 if (!force_fg) 1465 return; 1466 1467 /* wait until all bios are completed */ 1468 wait_for_completion_io(&io[JQ_SUBMIT].u.done); 1469 1470 /* handle synchronous decompress queue in the caller context */ 1471 z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool); 1472 } 1473 1474 /* 1475 * Since partial uptodate is still unimplemented for now, we have to use 1476 * approximate readmore strategies as a start. 1477 */ 1478 static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, 1479 struct readahead_control *rac, 1480 erofs_off_t end, 1481 struct page **pagepool, 1482 bool backmost) 1483 { 1484 struct inode *inode = f->inode; 1485 struct erofs_map_blocks *map = &f->map; 1486 erofs_off_t cur; 1487 int err; 1488 1489 if (backmost) { 1490 map->m_la = end; 1491 err = z_erofs_map_blocks_iter(inode, map, 1492 EROFS_GET_BLOCKS_READMORE); 1493 if (err) 1494 return; 1495 1496 /* expend ra for the trailing edge if readahead */ 1497 if (rac) { 1498 loff_t newstart = readahead_pos(rac); 1499 1500 cur = round_up(map->m_la + map->m_llen, PAGE_SIZE); 1501 readahead_expand(rac, newstart, cur - newstart); 1502 return; 1503 } 1504 end = round_up(end, PAGE_SIZE); 1505 } else { 1506 end = round_up(map->m_la, PAGE_SIZE); 1507 1508 if (!map->m_llen) 1509 return; 1510 } 1511 1512 cur = map->m_la + map->m_llen - 1; 1513 while (cur >= end) { 1514 pgoff_t index = cur >> PAGE_SHIFT; 1515 struct page *page; 1516 1517 page = erofs_grab_cache_page_nowait(inode->i_mapping, index); 1518 if (page) { 1519 if (PageUptodate(page)) { 1520 unlock_page(page); 1521 } else { 1522 err = z_erofs_do_read_page(f, page, pagepool); 1523 if (err) 1524 erofs_err(inode->i_sb, 1525 "readmore error at page %lu @ nid %llu", 1526 index, EROFS_I(inode)->nid); 1527 } 1528 put_page(page); 1529 } 1530 1531 if (cur < PAGE_SIZE) 1532 break; 1533 cur = (index << PAGE_SHIFT) - 1; 1534 } 1535 } 1536 1537 static int z_erofs_read_folio(struct file *file, struct folio *folio) 1538 { 1539 struct page *page = &folio->page; 1540 struct inode *const inode = page->mapping->host; 1541 struct erofs_sb_info *const sbi = EROFS_I_SB(inode); 1542 struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); 1543 struct page *pagepool = NULL; 1544 int err; 1545 1546 trace_erofs_readpage(page, false); 1547 f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT; 1548 1549 z_erofs_pcluster_readmore(&f, NULL, f.headoffset + PAGE_SIZE - 1, 1550 &pagepool, true); 1551 err = z_erofs_do_read_page(&f, page, &pagepool); 1552 z_erofs_pcluster_readmore(&f, NULL, 0, &pagepool, false); 1553 1554 (void)z_erofs_collector_end(&f); 1555 1556 /* if some compressed cluster ready, need submit them anyway */ 1557 z_erofs_runqueue(&f, &pagepool, 1558 z_erofs_get_sync_decompress_policy(sbi, 0)); 1559 1560 if (err) 1561 erofs_err(inode->i_sb, "failed to read, err [%d]", err); 1562 1563 erofs_put_metabuf(&f.map.buf); 1564 erofs_release_pages(&pagepool); 1565 return err; 1566 } 1567 1568 static void z_erofs_readahead(struct readahead_control *rac) 1569 { 1570 struct inode *const inode = rac->mapping->host; 1571 struct erofs_sb_info *const sbi = EROFS_I_SB(inode); 1572 struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); 1573 struct page *pagepool = NULL, *head = NULL, *page; 1574 unsigned int nr_pages; 1575 1576 f.readahead = true; 1577 f.headoffset = readahead_pos(rac); 1578 1579 z_erofs_pcluster_readmore(&f, rac, f.headoffset + 1580 readahead_length(rac) - 1, &pagepool, true); 1581 nr_pages = readahead_count(rac); 1582 trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false); 1583 1584 while ((page = readahead_page(rac))) { 1585 set_page_private(page, (unsigned long)head); 1586 head = page; 1587 } 1588 1589 while (head) { 1590 struct page *page = head; 1591 int err; 1592 1593 /* traversal in reverse order */ 1594 head = (void *)page_private(page); 1595 1596 err = z_erofs_do_read_page(&f, page, &pagepool); 1597 if (err) 1598 erofs_err(inode->i_sb, 1599 "readahead error at page %lu @ nid %llu", 1600 page->index, EROFS_I(inode)->nid); 1601 put_page(page); 1602 } 1603 z_erofs_pcluster_readmore(&f, rac, 0, &pagepool, false); 1604 (void)z_erofs_collector_end(&f); 1605 1606 z_erofs_runqueue(&f, &pagepool, 1607 z_erofs_get_sync_decompress_policy(sbi, nr_pages)); 1608 erofs_put_metabuf(&f.map.buf); 1609 erofs_release_pages(&pagepool); 1610 } 1611 1612 const struct address_space_operations z_erofs_aops = { 1613 .read_folio = z_erofs_read_folio, 1614 .readahead = z_erofs_readahead, 1615 }; 1616