1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 * Copyright (C) 2022 Alibaba Cloud 6 */ 7 #include "compress.h" 8 #include <linux/psi.h> 9 #include <linux/cpuhotplug.h> 10 #include <trace/events/erofs.h> 11 12 #define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE) 13 #define Z_EROFS_INLINE_BVECS 2 14 15 /* 16 * let's leave a type here in case of introducing 17 * another tagged pointer later. 18 */ 19 typedef void *z_erofs_next_pcluster_t; 20 21 struct z_erofs_bvec { 22 struct page *page; 23 int offset; 24 unsigned int end; 25 }; 26 27 #define __Z_EROFS_BVSET(name, total) \ 28 struct name { \ 29 /* point to the next page which contains the following bvecs */ \ 30 struct page *nextpage; \ 31 struct z_erofs_bvec bvec[total]; \ 32 } 33 __Z_EROFS_BVSET(z_erofs_bvset,); 34 __Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS); 35 36 /* 37 * Structure fields follow one of the following exclusion rules. 38 * 39 * I: Modifiable by initialization/destruction paths and read-only 40 * for everyone else; 41 * 42 * L: Field should be protected by the pcluster lock; 43 * 44 * A: Field should be accessed / updated in atomic for parallelized code. 45 */ 46 struct z_erofs_pcluster { 47 struct erofs_workgroup obj; 48 struct mutex lock; 49 50 /* A: point to next chained pcluster or TAILs */ 51 z_erofs_next_pcluster_t next; 52 53 /* L: the maximum decompression size of this round */ 54 unsigned int length; 55 56 /* L: total number of bvecs */ 57 unsigned int vcnt; 58 59 /* I: page offset of start position of decompression */ 60 unsigned short pageofs_out; 61 62 /* I: page offset of inline compressed data */ 63 unsigned short pageofs_in; 64 65 union { 66 /* L: inline a certain number of bvec for bootstrap */ 67 struct z_erofs_bvset_inline bvset; 68 69 /* I: can be used to free the pcluster by RCU. */ 70 struct rcu_head rcu; 71 }; 72 73 union { 74 /* I: physical cluster size in pages */ 75 unsigned short pclusterpages; 76 77 /* I: tailpacking inline compressed size */ 78 unsigned short tailpacking_size; 79 }; 80 81 /* I: compression algorithm format */ 82 unsigned char algorithmformat; 83 84 /* L: whether partial decompression or not */ 85 bool partial; 86 87 /* L: indicate several pageofs_outs or not */ 88 bool multibases; 89 90 /* A: compressed bvecs (can be cached or inplaced pages) */ 91 struct z_erofs_bvec compressed_bvecs[]; 92 }; 93 94 /* let's avoid the valid 32-bit kernel addresses */ 95 96 /* the end of a chain of pclusters */ 97 #define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE) 98 #define Z_EROFS_PCLUSTER_NIL (NULL) 99 100 struct z_erofs_decompressqueue { 101 struct super_block *sb; 102 atomic_t pending_bios; 103 z_erofs_next_pcluster_t head; 104 105 union { 106 struct completion done; 107 struct work_struct work; 108 struct kthread_work kthread_work; 109 } u; 110 bool eio, sync; 111 }; 112 113 static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl) 114 { 115 return !pcl->obj.index; 116 } 117 118 static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl) 119 { 120 if (z_erofs_is_inline_pcluster(pcl)) 121 return 1; 122 return pcl->pclusterpages; 123 } 124 125 /* 126 * bit 30: I/O error occurred on this page 127 * bit 0 - 29: remaining parts to complete this page 128 */ 129 #define Z_EROFS_PAGE_EIO (1 << 30) 130 131 static inline void z_erofs_onlinepage_init(struct page *page) 132 { 133 union { 134 atomic_t o; 135 unsigned long v; 136 } u = { .o = ATOMIC_INIT(1) }; 137 138 set_page_private(page, u.v); 139 smp_wmb(); 140 SetPagePrivate(page); 141 } 142 143 static inline void z_erofs_onlinepage_split(struct page *page) 144 { 145 atomic_inc((atomic_t *)&page->private); 146 } 147 148 static inline void z_erofs_page_mark_eio(struct page *page) 149 { 150 int orig; 151 152 do { 153 orig = atomic_read((atomic_t *)&page->private); 154 } while (atomic_cmpxchg((atomic_t *)&page->private, orig, 155 orig | Z_EROFS_PAGE_EIO) != orig); 156 } 157 158 static inline void z_erofs_onlinepage_endio(struct page *page) 159 { 160 unsigned int v; 161 162 DBG_BUGON(!PagePrivate(page)); 163 v = atomic_dec_return((atomic_t *)&page->private); 164 if (!(v & ~Z_EROFS_PAGE_EIO)) { 165 set_page_private(page, 0); 166 ClearPagePrivate(page); 167 if (!(v & Z_EROFS_PAGE_EIO)) 168 SetPageUptodate(page); 169 unlock_page(page); 170 } 171 } 172 173 #define Z_EROFS_ONSTACK_PAGES 32 174 175 /* 176 * since pclustersize is variable for big pcluster feature, introduce slab 177 * pools implementation for different pcluster sizes. 178 */ 179 struct z_erofs_pcluster_slab { 180 struct kmem_cache *slab; 181 unsigned int maxpages; 182 char name[48]; 183 }; 184 185 #define _PCLP(n) { .maxpages = n } 186 187 static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = { 188 _PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128), 189 _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES) 190 }; 191 192 struct z_erofs_bvec_iter { 193 struct page *bvpage; 194 struct z_erofs_bvset *bvset; 195 unsigned int nr, cur; 196 }; 197 198 static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter) 199 { 200 if (iter->bvpage) 201 kunmap_local(iter->bvset); 202 return iter->bvpage; 203 } 204 205 static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter) 206 { 207 unsigned long base = (unsigned long)((struct z_erofs_bvset *)0)->bvec; 208 /* have to access nextpage in advance, otherwise it will be unmapped */ 209 struct page *nextpage = iter->bvset->nextpage; 210 struct page *oldpage; 211 212 DBG_BUGON(!nextpage); 213 oldpage = z_erofs_bvec_iter_end(iter); 214 iter->bvpage = nextpage; 215 iter->bvset = kmap_local_page(nextpage); 216 iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec); 217 iter->cur = 0; 218 return oldpage; 219 } 220 221 static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter, 222 struct z_erofs_bvset_inline *bvset, 223 unsigned int bootstrap_nr, 224 unsigned int cur) 225 { 226 *iter = (struct z_erofs_bvec_iter) { 227 .nr = bootstrap_nr, 228 .bvset = (struct z_erofs_bvset *)bvset, 229 }; 230 231 while (cur > iter->nr) { 232 cur -= iter->nr; 233 z_erofs_bvset_flip(iter); 234 } 235 iter->cur = cur; 236 } 237 238 static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter, 239 struct z_erofs_bvec *bvec, 240 struct page **candidate_bvpage, 241 struct page **pagepool) 242 { 243 if (iter->cur >= iter->nr) { 244 struct page *nextpage = *candidate_bvpage; 245 246 if (!nextpage) { 247 nextpage = erofs_allocpage(pagepool, GFP_NOFS); 248 if (!nextpage) 249 return -ENOMEM; 250 set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE); 251 } 252 DBG_BUGON(iter->bvset->nextpage); 253 iter->bvset->nextpage = nextpage; 254 z_erofs_bvset_flip(iter); 255 256 iter->bvset->nextpage = NULL; 257 *candidate_bvpage = NULL; 258 } 259 iter->bvset->bvec[iter->cur++] = *bvec; 260 return 0; 261 } 262 263 static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter, 264 struct z_erofs_bvec *bvec, 265 struct page **old_bvpage) 266 { 267 if (iter->cur == iter->nr) 268 *old_bvpage = z_erofs_bvset_flip(iter); 269 else 270 *old_bvpage = NULL; 271 *bvec = iter->bvset->bvec[iter->cur++]; 272 } 273 274 static void z_erofs_destroy_pcluster_pool(void) 275 { 276 int i; 277 278 for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { 279 if (!pcluster_pool[i].slab) 280 continue; 281 kmem_cache_destroy(pcluster_pool[i].slab); 282 pcluster_pool[i].slab = NULL; 283 } 284 } 285 286 static int z_erofs_create_pcluster_pool(void) 287 { 288 struct z_erofs_pcluster_slab *pcs; 289 struct z_erofs_pcluster *a; 290 unsigned int size; 291 292 for (pcs = pcluster_pool; 293 pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) { 294 size = struct_size(a, compressed_bvecs, pcs->maxpages); 295 296 sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages); 297 pcs->slab = kmem_cache_create(pcs->name, size, 0, 298 SLAB_RECLAIM_ACCOUNT, NULL); 299 if (pcs->slab) 300 continue; 301 302 z_erofs_destroy_pcluster_pool(); 303 return -ENOMEM; 304 } 305 return 0; 306 } 307 308 static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages) 309 { 310 int i; 311 312 for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { 313 struct z_erofs_pcluster_slab *pcs = pcluster_pool + i; 314 struct z_erofs_pcluster *pcl; 315 316 if (nrpages > pcs->maxpages) 317 continue; 318 319 pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS); 320 if (!pcl) 321 return ERR_PTR(-ENOMEM); 322 pcl->pclusterpages = nrpages; 323 return pcl; 324 } 325 return ERR_PTR(-EINVAL); 326 } 327 328 static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl) 329 { 330 unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 331 int i; 332 333 for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { 334 struct z_erofs_pcluster_slab *pcs = pcluster_pool + i; 335 336 if (pclusterpages > pcs->maxpages) 337 continue; 338 339 kmem_cache_free(pcs->slab, pcl); 340 return; 341 } 342 DBG_BUGON(1); 343 } 344 345 static struct workqueue_struct *z_erofs_workqueue __read_mostly; 346 347 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD 348 static struct kthread_worker __rcu **z_erofs_pcpu_workers; 349 350 static void erofs_destroy_percpu_workers(void) 351 { 352 struct kthread_worker *worker; 353 unsigned int cpu; 354 355 for_each_possible_cpu(cpu) { 356 worker = rcu_dereference_protected( 357 z_erofs_pcpu_workers[cpu], 1); 358 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL); 359 if (worker) 360 kthread_destroy_worker(worker); 361 } 362 kfree(z_erofs_pcpu_workers); 363 } 364 365 static struct kthread_worker *erofs_init_percpu_worker(int cpu) 366 { 367 struct kthread_worker *worker = 368 kthread_create_worker_on_cpu(cpu, 0, "erofs_worker/%u", cpu); 369 370 if (IS_ERR(worker)) 371 return worker; 372 if (IS_ENABLED(CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI)) 373 sched_set_fifo_low(worker->task); 374 return worker; 375 } 376 377 static int erofs_init_percpu_workers(void) 378 { 379 struct kthread_worker *worker; 380 unsigned int cpu; 381 382 z_erofs_pcpu_workers = kcalloc(num_possible_cpus(), 383 sizeof(struct kthread_worker *), GFP_ATOMIC); 384 if (!z_erofs_pcpu_workers) 385 return -ENOMEM; 386 387 for_each_online_cpu(cpu) { /* could miss cpu{off,on}line? */ 388 worker = erofs_init_percpu_worker(cpu); 389 if (!IS_ERR(worker)) 390 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker); 391 } 392 return 0; 393 } 394 #else 395 static inline void erofs_destroy_percpu_workers(void) {} 396 static inline int erofs_init_percpu_workers(void) { return 0; } 397 #endif 398 399 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_EROFS_FS_PCPU_KTHREAD) 400 static DEFINE_SPINLOCK(z_erofs_pcpu_worker_lock); 401 static enum cpuhp_state erofs_cpuhp_state; 402 403 static int erofs_cpu_online(unsigned int cpu) 404 { 405 struct kthread_worker *worker, *old; 406 407 worker = erofs_init_percpu_worker(cpu); 408 if (IS_ERR(worker)) 409 return PTR_ERR(worker); 410 411 spin_lock(&z_erofs_pcpu_worker_lock); 412 old = rcu_dereference_protected(z_erofs_pcpu_workers[cpu], 413 lockdep_is_held(&z_erofs_pcpu_worker_lock)); 414 if (!old) 415 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker); 416 spin_unlock(&z_erofs_pcpu_worker_lock); 417 if (old) 418 kthread_destroy_worker(worker); 419 return 0; 420 } 421 422 static int erofs_cpu_offline(unsigned int cpu) 423 { 424 struct kthread_worker *worker; 425 426 spin_lock(&z_erofs_pcpu_worker_lock); 427 worker = rcu_dereference_protected(z_erofs_pcpu_workers[cpu], 428 lockdep_is_held(&z_erofs_pcpu_worker_lock)); 429 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL); 430 spin_unlock(&z_erofs_pcpu_worker_lock); 431 432 synchronize_rcu(); 433 if (worker) 434 kthread_destroy_worker(worker); 435 return 0; 436 } 437 438 static int erofs_cpu_hotplug_init(void) 439 { 440 int state; 441 442 state = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 443 "fs/erofs:online", erofs_cpu_online, erofs_cpu_offline); 444 if (state < 0) 445 return state; 446 447 erofs_cpuhp_state = state; 448 return 0; 449 } 450 451 static void erofs_cpu_hotplug_destroy(void) 452 { 453 if (erofs_cpuhp_state) 454 cpuhp_remove_state_nocalls(erofs_cpuhp_state); 455 } 456 #else /* !CONFIG_HOTPLUG_CPU || !CONFIG_EROFS_FS_PCPU_KTHREAD */ 457 static inline int erofs_cpu_hotplug_init(void) { return 0; } 458 static inline void erofs_cpu_hotplug_destroy(void) {} 459 #endif 460 461 void z_erofs_exit_zip_subsystem(void) 462 { 463 erofs_cpu_hotplug_destroy(); 464 erofs_destroy_percpu_workers(); 465 destroy_workqueue(z_erofs_workqueue); 466 z_erofs_destroy_pcluster_pool(); 467 } 468 469 int __init z_erofs_init_zip_subsystem(void) 470 { 471 int err = z_erofs_create_pcluster_pool(); 472 473 if (err) 474 goto out_error_pcluster_pool; 475 476 z_erofs_workqueue = alloc_workqueue("erofs_worker", 477 WQ_UNBOUND | WQ_HIGHPRI, num_possible_cpus()); 478 if (!z_erofs_workqueue) { 479 err = -ENOMEM; 480 goto out_error_workqueue_init; 481 } 482 483 err = erofs_init_percpu_workers(); 484 if (err) 485 goto out_error_pcpu_worker; 486 487 err = erofs_cpu_hotplug_init(); 488 if (err < 0) 489 goto out_error_cpuhp_init; 490 return err; 491 492 out_error_cpuhp_init: 493 erofs_destroy_percpu_workers(); 494 out_error_pcpu_worker: 495 destroy_workqueue(z_erofs_workqueue); 496 out_error_workqueue_init: 497 z_erofs_destroy_pcluster_pool(); 498 out_error_pcluster_pool: 499 return err; 500 } 501 502 enum z_erofs_pclustermode { 503 Z_EROFS_PCLUSTER_INFLIGHT, 504 /* 505 * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it 506 * could be dispatched into bypass queue later due to uptodated managed 507 * pages. All related online pages cannot be reused for inplace I/O (or 508 * bvpage) since it can be directly decoded without I/O submission. 509 */ 510 Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE, 511 /* 512 * The current collection has been linked with the owned chain, and 513 * could also be linked with the remaining collections, which means 514 * if the processing page is the tail page of the collection, thus 515 * the current collection can safely use the whole page (since 516 * the previous collection is under control) for in-place I/O, as 517 * illustrated below: 518 * ________________________________________________________________ 519 * | tail (partial) page | head (partial) page | 520 * | (of the current cl) | (of the previous collection) | 521 * | | | 522 * |__PCLUSTER_FOLLOWED___|___________PCLUSTER_FOLLOWED____________| 523 * 524 * [ (*) the above page can be used as inplace I/O. ] 525 */ 526 Z_EROFS_PCLUSTER_FOLLOWED, 527 }; 528 529 struct z_erofs_decompress_frontend { 530 struct inode *const inode; 531 struct erofs_map_blocks map; 532 struct z_erofs_bvec_iter biter; 533 534 struct page *pagepool; 535 struct page *candidate_bvpage; 536 struct z_erofs_pcluster *pcl; 537 z_erofs_next_pcluster_t owned_head; 538 enum z_erofs_pclustermode mode; 539 540 /* used for applying cache strategy on the fly */ 541 bool backmost; 542 erofs_off_t headoffset; 543 544 /* a pointer used to pick up inplace I/O pages */ 545 unsigned int icur; 546 }; 547 548 #define DECOMPRESS_FRONTEND_INIT(__i) { \ 549 .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \ 550 .mode = Z_EROFS_PCLUSTER_FOLLOWED, .backmost = true } 551 552 static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe) 553 { 554 unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy; 555 556 if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED) 557 return false; 558 559 if (fe->backmost) 560 return true; 561 562 if (cachestrategy >= EROFS_ZIP_CACHE_READAROUND && 563 fe->map.m_la < fe->headoffset) 564 return true; 565 566 return false; 567 } 568 569 static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) 570 { 571 struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode)); 572 struct z_erofs_pcluster *pcl = fe->pcl; 573 bool shouldalloc = z_erofs_should_alloc_cache(fe); 574 bool standalone = true; 575 /* 576 * optimistic allocation without direct reclaim since inplace I/O 577 * can be used if low memory otherwise. 578 */ 579 gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) | 580 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; 581 unsigned int i; 582 583 if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED) 584 return; 585 586 for (i = 0; i < pcl->pclusterpages; ++i) { 587 struct page *page; 588 void *t; /* mark pages just found for debugging */ 589 struct page *newpage = NULL; 590 591 /* the compressed page was loaded before */ 592 if (READ_ONCE(pcl->compressed_bvecs[i].page)) 593 continue; 594 595 page = find_get_page(mc, pcl->obj.index + i); 596 597 if (page) { 598 t = (void *)((unsigned long)page | 1); 599 } else { 600 /* I/O is needed, no possible to decompress directly */ 601 standalone = false; 602 if (!shouldalloc) 603 continue; 604 605 /* 606 * try to use cached I/O if page allocation 607 * succeeds or fallback to in-place I/O instead 608 * to avoid any direct reclaim. 609 */ 610 newpage = erofs_allocpage(&fe->pagepool, gfp); 611 if (!newpage) 612 continue; 613 set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE); 614 t = (void *)((unsigned long)newpage | 1); 615 } 616 617 if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL, t)) 618 continue; 619 620 if (page) 621 put_page(page); 622 else if (newpage) 623 erofs_pagepool_add(&fe->pagepool, newpage); 624 } 625 626 /* 627 * don't do inplace I/O if all compressed pages are available in 628 * managed cache since it can be moved to the bypass queue instead. 629 */ 630 if (standalone) 631 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; 632 } 633 634 /* called by erofs_shrinker to get rid of all compressed_pages */ 635 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, 636 struct erofs_workgroup *grp) 637 { 638 struct z_erofs_pcluster *const pcl = 639 container_of(grp, struct z_erofs_pcluster, obj); 640 int i; 641 642 DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); 643 /* 644 * refcount of workgroup is now freezed as 1, 645 * therefore no need to worry about available decompression users. 646 */ 647 for (i = 0; i < pcl->pclusterpages; ++i) { 648 struct page *page = pcl->compressed_bvecs[i].page; 649 650 if (!page) 651 continue; 652 653 /* block other users from reclaiming or migrating the page */ 654 if (!trylock_page(page)) 655 return -EBUSY; 656 657 if (!erofs_page_is_managed(sbi, page)) 658 continue; 659 660 /* barrier is implied in the following 'unlock_page' */ 661 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); 662 detach_page_private(page); 663 unlock_page(page); 664 } 665 return 0; 666 } 667 668 int erofs_try_to_free_cached_page(struct page *page) 669 { 670 struct z_erofs_pcluster *const pcl = (void *)page_private(page); 671 int ret, i; 672 673 if (!erofs_workgroup_try_to_freeze(&pcl->obj, 1)) 674 return 0; 675 676 ret = 0; 677 DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); 678 for (i = 0; i < pcl->pclusterpages; ++i) { 679 if (pcl->compressed_bvecs[i].page == page) { 680 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); 681 ret = 1; 682 break; 683 } 684 } 685 erofs_workgroup_unfreeze(&pcl->obj, 1); 686 if (ret) 687 detach_page_private(page); 688 return ret; 689 } 690 691 static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe, 692 struct z_erofs_bvec *bvec) 693 { 694 struct z_erofs_pcluster *const pcl = fe->pcl; 695 696 while (fe->icur > 0) { 697 if (!cmpxchg(&pcl->compressed_bvecs[--fe->icur].page, 698 NULL, bvec->page)) { 699 pcl->compressed_bvecs[fe->icur] = *bvec; 700 return true; 701 } 702 } 703 return false; 704 } 705 706 /* callers must be with pcluster lock held */ 707 static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe, 708 struct z_erofs_bvec *bvec, bool exclusive) 709 { 710 int ret; 711 712 if (exclusive) { 713 /* give priority for inplaceio to use file pages first */ 714 if (z_erofs_try_inplace_io(fe, bvec)) 715 return 0; 716 /* otherwise, check if it can be used as a bvpage */ 717 if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED && 718 !fe->candidate_bvpage) 719 fe->candidate_bvpage = bvec->page; 720 } 721 ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage, 722 &fe->pagepool); 723 fe->pcl->vcnt += (ret >= 0); 724 return ret; 725 } 726 727 static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f) 728 { 729 struct z_erofs_pcluster *pcl = f->pcl; 730 z_erofs_next_pcluster_t *owned_head = &f->owned_head; 731 732 /* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */ 733 if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL, 734 *owned_head) == Z_EROFS_PCLUSTER_NIL) { 735 *owned_head = &pcl->next; 736 /* so we can attach this pcluster to our submission chain. */ 737 f->mode = Z_EROFS_PCLUSTER_FOLLOWED; 738 return; 739 } 740 741 /* type 2, it belongs to an ongoing chain */ 742 f->mode = Z_EROFS_PCLUSTER_INFLIGHT; 743 } 744 745 static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) 746 { 747 struct erofs_map_blocks *map = &fe->map; 748 bool ztailpacking = map->m_flags & EROFS_MAP_META; 749 struct z_erofs_pcluster *pcl; 750 struct erofs_workgroup *grp; 751 int err; 752 753 if (!(map->m_flags & EROFS_MAP_ENCODED) || 754 (!ztailpacking && !(map->m_pa >> PAGE_SHIFT))) { 755 DBG_BUGON(1); 756 return -EFSCORRUPTED; 757 } 758 759 /* no available pcluster, let's allocate one */ 760 pcl = z_erofs_alloc_pcluster(ztailpacking ? 1 : 761 map->m_plen >> PAGE_SHIFT); 762 if (IS_ERR(pcl)) 763 return PTR_ERR(pcl); 764 765 atomic_set(&pcl->obj.refcount, 1); 766 pcl->algorithmformat = map->m_algorithmformat; 767 pcl->length = 0; 768 pcl->partial = true; 769 770 /* new pclusters should be claimed as type 1, primary and followed */ 771 pcl->next = fe->owned_head; 772 pcl->pageofs_out = map->m_la & ~PAGE_MASK; 773 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED; 774 775 /* 776 * lock all primary followed works before visible to others 777 * and mutex_trylock *never* fails for a new pcluster. 778 */ 779 mutex_init(&pcl->lock); 780 DBG_BUGON(!mutex_trylock(&pcl->lock)); 781 782 if (ztailpacking) { 783 pcl->obj.index = 0; /* which indicates ztailpacking */ 784 pcl->pageofs_in = erofs_blkoff(fe->inode->i_sb, map->m_pa); 785 pcl->tailpacking_size = map->m_plen; 786 } else { 787 pcl->obj.index = map->m_pa >> PAGE_SHIFT; 788 789 grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj); 790 if (IS_ERR(grp)) { 791 err = PTR_ERR(grp); 792 goto err_out; 793 } 794 795 if (grp != &pcl->obj) { 796 fe->pcl = container_of(grp, 797 struct z_erofs_pcluster, obj); 798 err = -EEXIST; 799 goto err_out; 800 } 801 } 802 fe->owned_head = &pcl->next; 803 fe->pcl = pcl; 804 return 0; 805 806 err_out: 807 mutex_unlock(&pcl->lock); 808 z_erofs_free_pcluster(pcl); 809 return err; 810 } 811 812 static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe) 813 { 814 struct erofs_map_blocks *map = &fe->map; 815 struct erofs_workgroup *grp = NULL; 816 int ret; 817 818 DBG_BUGON(fe->pcl); 819 820 /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */ 821 DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL); 822 823 if (!(map->m_flags & EROFS_MAP_META)) { 824 grp = erofs_find_workgroup(fe->inode->i_sb, 825 map->m_pa >> PAGE_SHIFT); 826 } else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) { 827 DBG_BUGON(1); 828 return -EFSCORRUPTED; 829 } 830 831 if (grp) { 832 fe->pcl = container_of(grp, struct z_erofs_pcluster, obj); 833 ret = -EEXIST; 834 } else { 835 ret = z_erofs_register_pcluster(fe); 836 } 837 838 if (ret == -EEXIST) { 839 mutex_lock(&fe->pcl->lock); 840 z_erofs_try_to_claim_pcluster(fe); 841 } else if (ret) { 842 return ret; 843 } 844 z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset, 845 Z_EROFS_INLINE_BVECS, fe->pcl->vcnt); 846 /* since file-backed online pages are traversed in reverse order */ 847 fe->icur = z_erofs_pclusterpages(fe->pcl); 848 return 0; 849 } 850 851 /* 852 * keep in mind that no referenced pclusters will be freed 853 * only after a RCU grace period. 854 */ 855 static void z_erofs_rcu_callback(struct rcu_head *head) 856 { 857 z_erofs_free_pcluster(container_of(head, 858 struct z_erofs_pcluster, rcu)); 859 } 860 861 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp) 862 { 863 struct z_erofs_pcluster *const pcl = 864 container_of(grp, struct z_erofs_pcluster, obj); 865 866 call_rcu(&pcl->rcu, z_erofs_rcu_callback); 867 } 868 869 static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe) 870 { 871 struct z_erofs_pcluster *pcl = fe->pcl; 872 873 if (!pcl) 874 return false; 875 876 z_erofs_bvec_iter_end(&fe->biter); 877 mutex_unlock(&pcl->lock); 878 879 if (fe->candidate_bvpage) 880 fe->candidate_bvpage = NULL; 881 882 /* 883 * if all pending pages are added, don't hold its reference 884 * any longer if the pcluster isn't hosted by ourselves. 885 */ 886 if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE) 887 erofs_workgroup_put(&pcl->obj); 888 889 fe->pcl = NULL; 890 return true; 891 } 892 893 static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos, 894 struct page *page, unsigned int pageofs, 895 unsigned int len) 896 { 897 struct super_block *sb = inode->i_sb; 898 struct inode *packed_inode = EROFS_I_SB(inode)->packed_inode; 899 struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 900 u8 *src, *dst; 901 unsigned int i, cnt; 902 903 if (!packed_inode) 904 return -EFSCORRUPTED; 905 906 buf.inode = packed_inode; 907 pos += EROFS_I(inode)->z_fragmentoff; 908 for (i = 0; i < len; i += cnt) { 909 cnt = min_t(unsigned int, len - i, 910 sb->s_blocksize - erofs_blkoff(sb, pos)); 911 src = erofs_bread(&buf, erofs_blknr(sb, pos), EROFS_KMAP); 912 if (IS_ERR(src)) { 913 erofs_put_metabuf(&buf); 914 return PTR_ERR(src); 915 } 916 917 dst = kmap_local_page(page); 918 memcpy(dst + pageofs + i, src + erofs_blkoff(sb, pos), cnt); 919 kunmap_local(dst); 920 pos += cnt; 921 } 922 erofs_put_metabuf(&buf); 923 return 0; 924 } 925 926 static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, 927 struct page *page) 928 { 929 struct inode *const inode = fe->inode; 930 struct erofs_map_blocks *const map = &fe->map; 931 const loff_t offset = page_offset(page); 932 bool tight = true, exclusive; 933 unsigned int cur, end, spiltted; 934 int err = 0; 935 936 /* register locked file pages as online pages in pack */ 937 z_erofs_onlinepage_init(page); 938 939 spiltted = 0; 940 end = PAGE_SIZE; 941 repeat: 942 cur = end - 1; 943 944 if (offset + cur < map->m_la || 945 offset + cur >= map->m_la + map->m_llen) { 946 if (z_erofs_collector_end(fe)) 947 fe->backmost = false; 948 map->m_la = offset + cur; 949 map->m_llen = 0; 950 err = z_erofs_map_blocks_iter(inode, map, 0); 951 if (err) 952 goto out; 953 } else { 954 if (fe->pcl) 955 goto hitted; 956 /* didn't get a valid pcluster previously (very rare) */ 957 } 958 959 if (!(map->m_flags & EROFS_MAP_MAPPED) || 960 map->m_flags & EROFS_MAP_FRAGMENT) 961 goto hitted; 962 963 err = z_erofs_collector_begin(fe); 964 if (err) 965 goto out; 966 967 if (z_erofs_is_inline_pcluster(fe->pcl)) { 968 void *mp; 969 970 mp = erofs_read_metabuf(&fe->map.buf, inode->i_sb, 971 erofs_blknr(inode->i_sb, map->m_pa), 972 EROFS_NO_KMAP); 973 if (IS_ERR(mp)) { 974 err = PTR_ERR(mp); 975 erofs_err(inode->i_sb, 976 "failed to get inline page, err %d", err); 977 goto out; 978 } 979 get_page(fe->map.buf.page); 980 WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, 981 fe->map.buf.page); 982 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; 983 } else { 984 /* bind cache first when cached decompression is preferred */ 985 z_erofs_bind_cache(fe); 986 } 987 hitted: 988 /* 989 * Ensure the current partial page belongs to this submit chain rather 990 * than other concurrent submit chains or the noio(bypass) chain since 991 * those chains are handled asynchronously thus the page cannot be used 992 * for inplace I/O or bvpage (should be processed in a strict order.) 993 */ 994 tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE); 995 996 cur = end - min_t(unsigned int, offset + end - map->m_la, end); 997 if (!(map->m_flags & EROFS_MAP_MAPPED)) { 998 zero_user_segment(page, cur, end); 999 goto next_part; 1000 } 1001 if (map->m_flags & EROFS_MAP_FRAGMENT) { 1002 unsigned int pageofs, skip, len; 1003 1004 if (offset > map->m_la) { 1005 pageofs = 0; 1006 skip = offset - map->m_la; 1007 } else { 1008 pageofs = map->m_la & ~PAGE_MASK; 1009 skip = 0; 1010 } 1011 len = min_t(unsigned int, map->m_llen - skip, end - cur); 1012 err = z_erofs_read_fragment(inode, skip, page, pageofs, len); 1013 if (err) 1014 goto out; 1015 ++spiltted; 1016 tight = false; 1017 goto next_part; 1018 } 1019 1020 exclusive = (!cur && (!spiltted || tight)); 1021 if (cur) 1022 tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED); 1023 1024 err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) { 1025 .page = page, 1026 .offset = offset - map->m_la, 1027 .end = end, 1028 }), exclusive); 1029 if (err) 1030 goto out; 1031 1032 z_erofs_onlinepage_split(page); 1033 /* bump up the number of spiltted parts of a page */ 1034 ++spiltted; 1035 if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK)) 1036 fe->pcl->multibases = true; 1037 if (fe->pcl->length < offset + end - map->m_la) { 1038 fe->pcl->length = offset + end - map->m_la; 1039 fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK; 1040 } 1041 if ((map->m_flags & EROFS_MAP_FULL_MAPPED) && 1042 !(map->m_flags & EROFS_MAP_PARTIAL_REF) && 1043 fe->pcl->length == map->m_llen) 1044 fe->pcl->partial = false; 1045 next_part: 1046 /* shorten the remaining extent to update progress */ 1047 map->m_llen = offset + cur - map->m_la; 1048 map->m_flags &= ~EROFS_MAP_FULL_MAPPED; 1049 1050 end = cur; 1051 if (end > 0) 1052 goto repeat; 1053 1054 out: 1055 if (err) 1056 z_erofs_page_mark_eio(page); 1057 z_erofs_onlinepage_endio(page); 1058 return err; 1059 } 1060 1061 static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi, 1062 unsigned int readahead_pages) 1063 { 1064 /* auto: enable for read_folio, disable for readahead */ 1065 if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) && 1066 !readahead_pages) 1067 return true; 1068 1069 if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) && 1070 (readahead_pages <= sbi->opt.max_sync_decompress_pages)) 1071 return true; 1072 1073 return false; 1074 } 1075 1076 static bool z_erofs_page_is_invalidated(struct page *page) 1077 { 1078 return !page->mapping && !z_erofs_is_shortlived_page(page); 1079 } 1080 1081 struct z_erofs_decompress_backend { 1082 struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES]; 1083 struct super_block *sb; 1084 struct z_erofs_pcluster *pcl; 1085 1086 /* pages with the longest decompressed length for deduplication */ 1087 struct page **decompressed_pages; 1088 /* pages to keep the compressed data */ 1089 struct page **compressed_pages; 1090 1091 struct list_head decompressed_secondary_bvecs; 1092 struct page **pagepool; 1093 unsigned int onstack_used, nr_pages; 1094 }; 1095 1096 struct z_erofs_bvec_item { 1097 struct z_erofs_bvec bvec; 1098 struct list_head list; 1099 }; 1100 1101 static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be, 1102 struct z_erofs_bvec *bvec) 1103 { 1104 struct z_erofs_bvec_item *item; 1105 1106 if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK)) { 1107 unsigned int pgnr; 1108 1109 pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT; 1110 DBG_BUGON(pgnr >= be->nr_pages); 1111 if (!be->decompressed_pages[pgnr]) { 1112 be->decompressed_pages[pgnr] = bvec->page; 1113 return; 1114 } 1115 } 1116 1117 /* (cold path) one pcluster is requested multiple times */ 1118 item = kmalloc(sizeof(*item), GFP_KERNEL | __GFP_NOFAIL); 1119 item->bvec = *bvec; 1120 list_add(&item->list, &be->decompressed_secondary_bvecs); 1121 } 1122 1123 static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be, 1124 int err) 1125 { 1126 unsigned int off0 = be->pcl->pageofs_out; 1127 struct list_head *p, *n; 1128 1129 list_for_each_safe(p, n, &be->decompressed_secondary_bvecs) { 1130 struct z_erofs_bvec_item *bvi; 1131 unsigned int end, cur; 1132 void *dst, *src; 1133 1134 bvi = container_of(p, struct z_erofs_bvec_item, list); 1135 cur = bvi->bvec.offset < 0 ? -bvi->bvec.offset : 0; 1136 end = min_t(unsigned int, be->pcl->length - bvi->bvec.offset, 1137 bvi->bvec.end); 1138 dst = kmap_local_page(bvi->bvec.page); 1139 while (cur < end) { 1140 unsigned int pgnr, scur, len; 1141 1142 pgnr = (bvi->bvec.offset + cur + off0) >> PAGE_SHIFT; 1143 DBG_BUGON(pgnr >= be->nr_pages); 1144 1145 scur = bvi->bvec.offset + cur - 1146 ((pgnr << PAGE_SHIFT) - off0); 1147 len = min_t(unsigned int, end - cur, PAGE_SIZE - scur); 1148 if (!be->decompressed_pages[pgnr]) { 1149 err = -EFSCORRUPTED; 1150 cur += len; 1151 continue; 1152 } 1153 src = kmap_local_page(be->decompressed_pages[pgnr]); 1154 memcpy(dst + cur, src + scur, len); 1155 kunmap_local(src); 1156 cur += len; 1157 } 1158 kunmap_local(dst); 1159 if (err) 1160 z_erofs_page_mark_eio(bvi->bvec.page); 1161 z_erofs_onlinepage_endio(bvi->bvec.page); 1162 list_del(p); 1163 kfree(bvi); 1164 } 1165 } 1166 1167 static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be) 1168 { 1169 struct z_erofs_pcluster *pcl = be->pcl; 1170 struct z_erofs_bvec_iter biter; 1171 struct page *old_bvpage; 1172 int i; 1173 1174 z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0); 1175 for (i = 0; i < pcl->vcnt; ++i) { 1176 struct z_erofs_bvec bvec; 1177 1178 z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage); 1179 1180 if (old_bvpage) 1181 z_erofs_put_shortlivedpage(be->pagepool, old_bvpage); 1182 1183 DBG_BUGON(z_erofs_page_is_invalidated(bvec.page)); 1184 z_erofs_do_decompressed_bvec(be, &bvec); 1185 } 1186 1187 old_bvpage = z_erofs_bvec_iter_end(&biter); 1188 if (old_bvpage) 1189 z_erofs_put_shortlivedpage(be->pagepool, old_bvpage); 1190 } 1191 1192 static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be, 1193 bool *overlapped) 1194 { 1195 struct z_erofs_pcluster *pcl = be->pcl; 1196 unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 1197 int i, err = 0; 1198 1199 *overlapped = false; 1200 for (i = 0; i < pclusterpages; ++i) { 1201 struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i]; 1202 struct page *page = bvec->page; 1203 1204 /* compressed pages ought to be present before decompressing */ 1205 if (!page) { 1206 DBG_BUGON(1); 1207 continue; 1208 } 1209 be->compressed_pages[i] = page; 1210 1211 if (z_erofs_is_inline_pcluster(pcl)) { 1212 if (!PageUptodate(page)) 1213 err = -EIO; 1214 continue; 1215 } 1216 1217 DBG_BUGON(z_erofs_page_is_invalidated(page)); 1218 if (!z_erofs_is_shortlived_page(page)) { 1219 if (erofs_page_is_managed(EROFS_SB(be->sb), page)) { 1220 if (!PageUptodate(page)) 1221 err = -EIO; 1222 continue; 1223 } 1224 z_erofs_do_decompressed_bvec(be, bvec); 1225 *overlapped = true; 1226 } 1227 } 1228 1229 if (err) 1230 return err; 1231 return 0; 1232 } 1233 1234 static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, 1235 int err) 1236 { 1237 struct erofs_sb_info *const sbi = EROFS_SB(be->sb); 1238 struct z_erofs_pcluster *pcl = be->pcl; 1239 unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 1240 const struct z_erofs_decompressor *decompressor = 1241 &erofs_decompressors[pcl->algorithmformat]; 1242 unsigned int i, inputsize; 1243 int err2; 1244 struct page *page; 1245 bool overlapped; 1246 1247 mutex_lock(&pcl->lock); 1248 be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT; 1249 1250 /* allocate (de)compressed page arrays if cannot be kept on stack */ 1251 be->decompressed_pages = NULL; 1252 be->compressed_pages = NULL; 1253 be->onstack_used = 0; 1254 if (be->nr_pages <= Z_EROFS_ONSTACK_PAGES) { 1255 be->decompressed_pages = be->onstack_pages; 1256 be->onstack_used = be->nr_pages; 1257 memset(be->decompressed_pages, 0, 1258 sizeof(struct page *) * be->nr_pages); 1259 } 1260 1261 if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES) 1262 be->compressed_pages = be->onstack_pages + be->onstack_used; 1263 1264 if (!be->decompressed_pages) 1265 be->decompressed_pages = 1266 kvcalloc(be->nr_pages, sizeof(struct page *), 1267 GFP_KERNEL | __GFP_NOFAIL); 1268 if (!be->compressed_pages) 1269 be->compressed_pages = 1270 kvcalloc(pclusterpages, sizeof(struct page *), 1271 GFP_KERNEL | __GFP_NOFAIL); 1272 1273 z_erofs_parse_out_bvecs(be); 1274 err2 = z_erofs_parse_in_bvecs(be, &overlapped); 1275 if (err2) 1276 err = err2; 1277 if (err) 1278 goto out; 1279 1280 if (z_erofs_is_inline_pcluster(pcl)) 1281 inputsize = pcl->tailpacking_size; 1282 else 1283 inputsize = pclusterpages * PAGE_SIZE; 1284 1285 err = decompressor->decompress(&(struct z_erofs_decompress_req) { 1286 .sb = be->sb, 1287 .in = be->compressed_pages, 1288 .out = be->decompressed_pages, 1289 .pageofs_in = pcl->pageofs_in, 1290 .pageofs_out = pcl->pageofs_out, 1291 .inputsize = inputsize, 1292 .outputsize = pcl->length, 1293 .alg = pcl->algorithmformat, 1294 .inplace_io = overlapped, 1295 .partial_decoding = pcl->partial, 1296 .fillgaps = pcl->multibases, 1297 }, be->pagepool); 1298 1299 out: 1300 /* must handle all compressed pages before actual file pages */ 1301 if (z_erofs_is_inline_pcluster(pcl)) { 1302 page = pcl->compressed_bvecs[0].page; 1303 WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL); 1304 put_page(page); 1305 } else { 1306 for (i = 0; i < pclusterpages; ++i) { 1307 page = pcl->compressed_bvecs[i].page; 1308 1309 if (erofs_page_is_managed(sbi, page)) 1310 continue; 1311 1312 /* recycle all individual short-lived pages */ 1313 (void)z_erofs_put_shortlivedpage(be->pagepool, page); 1314 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); 1315 } 1316 } 1317 if (be->compressed_pages < be->onstack_pages || 1318 be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES) 1319 kvfree(be->compressed_pages); 1320 z_erofs_fill_other_copies(be, err); 1321 1322 for (i = 0; i < be->nr_pages; ++i) { 1323 page = be->decompressed_pages[i]; 1324 if (!page) 1325 continue; 1326 1327 DBG_BUGON(z_erofs_page_is_invalidated(page)); 1328 1329 /* recycle all individual short-lived pages */ 1330 if (z_erofs_put_shortlivedpage(be->pagepool, page)) 1331 continue; 1332 if (err) 1333 z_erofs_page_mark_eio(page); 1334 z_erofs_onlinepage_endio(page); 1335 } 1336 1337 if (be->decompressed_pages != be->onstack_pages) 1338 kvfree(be->decompressed_pages); 1339 1340 pcl->length = 0; 1341 pcl->partial = true; 1342 pcl->multibases = false; 1343 pcl->bvset.nextpage = NULL; 1344 pcl->vcnt = 0; 1345 1346 /* pcluster lock MUST be taken before the following line */ 1347 WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL); 1348 mutex_unlock(&pcl->lock); 1349 return err; 1350 } 1351 1352 static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, 1353 struct page **pagepool) 1354 { 1355 struct z_erofs_decompress_backend be = { 1356 .sb = io->sb, 1357 .pagepool = pagepool, 1358 .decompressed_secondary_bvecs = 1359 LIST_HEAD_INIT(be.decompressed_secondary_bvecs), 1360 }; 1361 z_erofs_next_pcluster_t owned = io->head; 1362 1363 while (owned != Z_EROFS_PCLUSTER_TAIL) { 1364 DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL); 1365 1366 be.pcl = container_of(owned, struct z_erofs_pcluster, next); 1367 owned = READ_ONCE(be.pcl->next); 1368 1369 z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0); 1370 erofs_workgroup_put(&be.pcl->obj); 1371 } 1372 } 1373 1374 static void z_erofs_decompressqueue_work(struct work_struct *work) 1375 { 1376 struct z_erofs_decompressqueue *bgq = 1377 container_of(work, struct z_erofs_decompressqueue, u.work); 1378 struct page *pagepool = NULL; 1379 1380 DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL); 1381 z_erofs_decompress_queue(bgq, &pagepool); 1382 erofs_release_pages(&pagepool); 1383 kvfree(bgq); 1384 } 1385 1386 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD 1387 static void z_erofs_decompressqueue_kthread_work(struct kthread_work *work) 1388 { 1389 z_erofs_decompressqueue_work((struct work_struct *)work); 1390 } 1391 #endif 1392 1393 static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, 1394 int bios) 1395 { 1396 struct erofs_sb_info *const sbi = EROFS_SB(io->sb); 1397 1398 /* wake up the caller thread for sync decompression */ 1399 if (io->sync) { 1400 if (!atomic_add_return(bios, &io->pending_bios)) 1401 complete(&io->u.done); 1402 return; 1403 } 1404 1405 if (atomic_add_return(bios, &io->pending_bios)) 1406 return; 1407 /* Use (kthread_)work and sync decompression for atomic contexts only */ 1408 if (in_atomic() || irqs_disabled()) { 1409 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD 1410 struct kthread_worker *worker; 1411 1412 rcu_read_lock(); 1413 worker = rcu_dereference( 1414 z_erofs_pcpu_workers[raw_smp_processor_id()]); 1415 if (!worker) { 1416 INIT_WORK(&io->u.work, z_erofs_decompressqueue_work); 1417 queue_work(z_erofs_workqueue, &io->u.work); 1418 } else { 1419 kthread_queue_work(worker, &io->u.kthread_work); 1420 } 1421 rcu_read_unlock(); 1422 #else 1423 queue_work(z_erofs_workqueue, &io->u.work); 1424 #endif 1425 /* enable sync decompression for readahead */ 1426 if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) 1427 sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON; 1428 return; 1429 } 1430 z_erofs_decompressqueue_work(&io->u.work); 1431 } 1432 1433 static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, 1434 unsigned int nr, 1435 struct page **pagepool, 1436 struct address_space *mc) 1437 { 1438 const pgoff_t index = pcl->obj.index; 1439 gfp_t gfp = mapping_gfp_mask(mc); 1440 bool tocache = false; 1441 1442 struct address_space *mapping; 1443 struct page *oldpage, *page; 1444 int justfound; 1445 1446 repeat: 1447 page = READ_ONCE(pcl->compressed_bvecs[nr].page); 1448 oldpage = page; 1449 1450 if (!page) 1451 goto out_allocpage; 1452 1453 justfound = (unsigned long)page & 1UL; 1454 page = (struct page *)((unsigned long)page & ~1UL); 1455 1456 /* 1457 * preallocated cached pages, which is used to avoid direct reclaim 1458 * otherwise, it will go inplace I/O path instead. 1459 */ 1460 if (page->private == Z_EROFS_PREALLOCATED_PAGE) { 1461 WRITE_ONCE(pcl->compressed_bvecs[nr].page, page); 1462 set_page_private(page, 0); 1463 tocache = true; 1464 goto out_tocache; 1465 } 1466 mapping = READ_ONCE(page->mapping); 1467 1468 /* 1469 * file-backed online pages in plcuster are all locked steady, 1470 * therefore it is impossible for `mapping' to be NULL. 1471 */ 1472 if (mapping && mapping != mc) 1473 /* ought to be unmanaged pages */ 1474 goto out; 1475 1476 /* directly return for shortlived page as well */ 1477 if (z_erofs_is_shortlived_page(page)) 1478 goto out; 1479 1480 lock_page(page); 1481 1482 /* only true if page reclaim goes wrong, should never happen */ 1483 DBG_BUGON(justfound && PagePrivate(page)); 1484 1485 /* the page is still in manage cache */ 1486 if (page->mapping == mc) { 1487 WRITE_ONCE(pcl->compressed_bvecs[nr].page, page); 1488 1489 if (!PagePrivate(page)) { 1490 /* 1491 * impossible to be !PagePrivate(page) for 1492 * the current restriction as well if 1493 * the page is already in compressed_bvecs[]. 1494 */ 1495 DBG_BUGON(!justfound); 1496 1497 justfound = 0; 1498 set_page_private(page, (unsigned long)pcl); 1499 SetPagePrivate(page); 1500 } 1501 1502 /* no need to submit io if it is already up-to-date */ 1503 if (PageUptodate(page)) { 1504 unlock_page(page); 1505 page = NULL; 1506 } 1507 goto out; 1508 } 1509 1510 /* 1511 * the managed page has been truncated, it's unsafe to 1512 * reuse this one, let's allocate a new cache-managed page. 1513 */ 1514 DBG_BUGON(page->mapping); 1515 DBG_BUGON(!justfound); 1516 1517 tocache = true; 1518 unlock_page(page); 1519 put_page(page); 1520 out_allocpage: 1521 page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL); 1522 if (oldpage != cmpxchg(&pcl->compressed_bvecs[nr].page, 1523 oldpage, page)) { 1524 erofs_pagepool_add(pagepool, page); 1525 cond_resched(); 1526 goto repeat; 1527 } 1528 out_tocache: 1529 if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) { 1530 /* turn into temporary page if fails (1 ref) */ 1531 set_page_private(page, Z_EROFS_SHORTLIVED_PAGE); 1532 goto out; 1533 } 1534 attach_page_private(page, pcl); 1535 /* drop a refcount added by allocpage (then we have 2 refs here) */ 1536 put_page(page); 1537 1538 out: /* the only exit (for tracing and debugging) */ 1539 return page; 1540 } 1541 1542 static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb, 1543 struct z_erofs_decompressqueue *fgq, bool *fg) 1544 { 1545 struct z_erofs_decompressqueue *q; 1546 1547 if (fg && !*fg) { 1548 q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN); 1549 if (!q) { 1550 *fg = true; 1551 goto fg_out; 1552 } 1553 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD 1554 kthread_init_work(&q->u.kthread_work, 1555 z_erofs_decompressqueue_kthread_work); 1556 #else 1557 INIT_WORK(&q->u.work, z_erofs_decompressqueue_work); 1558 #endif 1559 } else { 1560 fg_out: 1561 q = fgq; 1562 init_completion(&fgq->u.done); 1563 atomic_set(&fgq->pending_bios, 0); 1564 q->eio = false; 1565 q->sync = true; 1566 } 1567 q->sb = sb; 1568 q->head = Z_EROFS_PCLUSTER_TAIL; 1569 return q; 1570 } 1571 1572 /* define decompression jobqueue types */ 1573 enum { 1574 JQ_BYPASS, 1575 JQ_SUBMIT, 1576 NR_JOBQUEUES, 1577 }; 1578 1579 static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, 1580 z_erofs_next_pcluster_t qtail[], 1581 z_erofs_next_pcluster_t owned_head) 1582 { 1583 z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT]; 1584 z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS]; 1585 1586 WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL); 1587 1588 WRITE_ONCE(*submit_qtail, owned_head); 1589 WRITE_ONCE(*bypass_qtail, &pcl->next); 1590 1591 qtail[JQ_BYPASS] = &pcl->next; 1592 } 1593 1594 static void z_erofs_decompressqueue_endio(struct bio *bio) 1595 { 1596 struct z_erofs_decompressqueue *q = bio->bi_private; 1597 blk_status_t err = bio->bi_status; 1598 struct bio_vec *bvec; 1599 struct bvec_iter_all iter_all; 1600 1601 bio_for_each_segment_all(bvec, bio, iter_all) { 1602 struct page *page = bvec->bv_page; 1603 1604 DBG_BUGON(PageUptodate(page)); 1605 DBG_BUGON(z_erofs_page_is_invalidated(page)); 1606 1607 if (erofs_page_is_managed(EROFS_SB(q->sb), page)) { 1608 if (!err) 1609 SetPageUptodate(page); 1610 unlock_page(page); 1611 } 1612 } 1613 if (err) 1614 q->eio = true; 1615 z_erofs_decompress_kickoff(q, -1); 1616 bio_put(bio); 1617 } 1618 1619 static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, 1620 struct z_erofs_decompressqueue *fgq, 1621 bool *force_fg, bool readahead) 1622 { 1623 struct super_block *sb = f->inode->i_sb; 1624 struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb)); 1625 z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; 1626 struct z_erofs_decompressqueue *q[NR_JOBQUEUES]; 1627 z_erofs_next_pcluster_t owned_head = f->owned_head; 1628 /* bio is NULL initially, so no need to initialize last_{index,bdev} */ 1629 pgoff_t last_index; 1630 struct block_device *last_bdev; 1631 unsigned int nr_bios = 0; 1632 struct bio *bio = NULL; 1633 unsigned long pflags; 1634 int memstall = 0; 1635 1636 /* 1637 * if managed cache is enabled, bypass jobqueue is needed, 1638 * no need to read from device for all pclusters in this queue. 1639 */ 1640 q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL); 1641 q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, force_fg); 1642 1643 qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; 1644 qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; 1645 1646 /* by default, all need io submission */ 1647 q[JQ_SUBMIT]->head = owned_head; 1648 1649 do { 1650 struct erofs_map_dev mdev; 1651 struct z_erofs_pcluster *pcl; 1652 pgoff_t cur, end; 1653 unsigned int i = 0; 1654 bool bypass = true; 1655 1656 DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL); 1657 pcl = container_of(owned_head, struct z_erofs_pcluster, next); 1658 owned_head = READ_ONCE(pcl->next); 1659 1660 if (z_erofs_is_inline_pcluster(pcl)) { 1661 move_to_bypass_jobqueue(pcl, qtail, owned_head); 1662 continue; 1663 } 1664 1665 /* no device id here, thus it will always succeed */ 1666 mdev = (struct erofs_map_dev) { 1667 .m_pa = erofs_pos(sb, pcl->obj.index), 1668 }; 1669 (void)erofs_map_dev(sb, &mdev); 1670 1671 cur = erofs_blknr(sb, mdev.m_pa); 1672 end = cur + pcl->pclusterpages; 1673 1674 do { 1675 struct page *page; 1676 1677 page = pickup_page_for_submission(pcl, i++, 1678 &f->pagepool, mc); 1679 if (!page) 1680 continue; 1681 1682 if (bio && (cur != last_index + 1 || 1683 last_bdev != mdev.m_bdev)) { 1684 submit_bio_retry: 1685 submit_bio(bio); 1686 if (memstall) { 1687 psi_memstall_leave(&pflags); 1688 memstall = 0; 1689 } 1690 bio = NULL; 1691 } 1692 1693 if (unlikely(PageWorkingset(page)) && !memstall) { 1694 psi_memstall_enter(&pflags); 1695 memstall = 1; 1696 } 1697 1698 if (!bio) { 1699 bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS, 1700 REQ_OP_READ, GFP_NOIO); 1701 bio->bi_end_io = z_erofs_decompressqueue_endio; 1702 1703 last_bdev = mdev.m_bdev; 1704 bio->bi_iter.bi_sector = (sector_t)cur << 1705 (sb->s_blocksize_bits - 9); 1706 bio->bi_private = q[JQ_SUBMIT]; 1707 if (readahead) 1708 bio->bi_opf |= REQ_RAHEAD; 1709 ++nr_bios; 1710 } 1711 1712 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) 1713 goto submit_bio_retry; 1714 1715 last_index = cur; 1716 bypass = false; 1717 } while (++cur < end); 1718 1719 if (!bypass) 1720 qtail[JQ_SUBMIT] = &pcl->next; 1721 else 1722 move_to_bypass_jobqueue(pcl, qtail, owned_head); 1723 } while (owned_head != Z_EROFS_PCLUSTER_TAIL); 1724 1725 if (bio) { 1726 submit_bio(bio); 1727 if (memstall) 1728 psi_memstall_leave(&pflags); 1729 } 1730 1731 /* 1732 * although background is preferred, no one is pending for submission. 1733 * don't issue decompression but drop it directly instead. 1734 */ 1735 if (!*force_fg && !nr_bios) { 1736 kvfree(q[JQ_SUBMIT]); 1737 return; 1738 } 1739 z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios); 1740 } 1741 1742 static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f, 1743 bool force_fg, bool ra) 1744 { 1745 struct z_erofs_decompressqueue io[NR_JOBQUEUES]; 1746 1747 if (f->owned_head == Z_EROFS_PCLUSTER_TAIL) 1748 return; 1749 z_erofs_submit_queue(f, io, &force_fg, ra); 1750 1751 /* handle bypass queue (no i/o pclusters) immediately */ 1752 z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool); 1753 1754 if (!force_fg) 1755 return; 1756 1757 /* wait until all bios are completed */ 1758 wait_for_completion_io(&io[JQ_SUBMIT].u.done); 1759 1760 /* handle synchronous decompress queue in the caller context */ 1761 z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool); 1762 } 1763 1764 /* 1765 * Since partial uptodate is still unimplemented for now, we have to use 1766 * approximate readmore strategies as a start. 1767 */ 1768 static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, 1769 struct readahead_control *rac, bool backmost) 1770 { 1771 struct inode *inode = f->inode; 1772 struct erofs_map_blocks *map = &f->map; 1773 erofs_off_t cur, end, headoffset = f->headoffset; 1774 int err; 1775 1776 if (backmost) { 1777 if (rac) 1778 end = headoffset + readahead_length(rac) - 1; 1779 else 1780 end = headoffset + PAGE_SIZE - 1; 1781 map->m_la = end; 1782 err = z_erofs_map_blocks_iter(inode, map, 1783 EROFS_GET_BLOCKS_READMORE); 1784 if (err) 1785 return; 1786 1787 /* expand ra for the trailing edge if readahead */ 1788 if (rac) { 1789 cur = round_up(map->m_la + map->m_llen, PAGE_SIZE); 1790 readahead_expand(rac, headoffset, cur - headoffset); 1791 return; 1792 } 1793 end = round_up(end, PAGE_SIZE); 1794 } else { 1795 end = round_up(map->m_la, PAGE_SIZE); 1796 1797 if (!map->m_llen) 1798 return; 1799 } 1800 1801 cur = map->m_la + map->m_llen - 1; 1802 while (cur >= end) { 1803 pgoff_t index = cur >> PAGE_SHIFT; 1804 struct page *page; 1805 1806 page = erofs_grab_cache_page_nowait(inode->i_mapping, index); 1807 if (page) { 1808 if (PageUptodate(page)) { 1809 unlock_page(page); 1810 } else { 1811 err = z_erofs_do_read_page(f, page); 1812 if (err) 1813 erofs_err(inode->i_sb, 1814 "readmore error at page %lu @ nid %llu", 1815 index, EROFS_I(inode)->nid); 1816 } 1817 put_page(page); 1818 } 1819 1820 if (cur < PAGE_SIZE) 1821 break; 1822 cur = (index << PAGE_SHIFT) - 1; 1823 } 1824 } 1825 1826 static int z_erofs_read_folio(struct file *file, struct folio *folio) 1827 { 1828 struct page *page = &folio->page; 1829 struct inode *const inode = page->mapping->host; 1830 struct erofs_sb_info *const sbi = EROFS_I_SB(inode); 1831 struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); 1832 int err; 1833 1834 trace_erofs_readpage(page, false); 1835 f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT; 1836 1837 z_erofs_pcluster_readmore(&f, NULL, true); 1838 err = z_erofs_do_read_page(&f, page); 1839 z_erofs_pcluster_readmore(&f, NULL, false); 1840 (void)z_erofs_collector_end(&f); 1841 1842 /* if some compressed cluster ready, need submit them anyway */ 1843 z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, 0), false); 1844 1845 if (err) 1846 erofs_err(inode->i_sb, "failed to read, err [%d]", err); 1847 1848 erofs_put_metabuf(&f.map.buf); 1849 erofs_release_pages(&f.pagepool); 1850 return err; 1851 } 1852 1853 static void z_erofs_readahead(struct readahead_control *rac) 1854 { 1855 struct inode *const inode = rac->mapping->host; 1856 struct erofs_sb_info *const sbi = EROFS_I_SB(inode); 1857 struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); 1858 struct page *head = NULL, *page; 1859 unsigned int nr_pages; 1860 1861 f.headoffset = readahead_pos(rac); 1862 1863 z_erofs_pcluster_readmore(&f, rac, true); 1864 nr_pages = readahead_count(rac); 1865 trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false); 1866 1867 while ((page = readahead_page(rac))) { 1868 set_page_private(page, (unsigned long)head); 1869 head = page; 1870 } 1871 1872 while (head) { 1873 struct page *page = head; 1874 int err; 1875 1876 /* traversal in reverse order */ 1877 head = (void *)page_private(page); 1878 1879 err = z_erofs_do_read_page(&f, page); 1880 if (err) 1881 erofs_err(inode->i_sb, 1882 "readahead error at page %lu @ nid %llu", 1883 page->index, EROFS_I(inode)->nid); 1884 put_page(page); 1885 } 1886 z_erofs_pcluster_readmore(&f, rac, false); 1887 (void)z_erofs_collector_end(&f); 1888 1889 z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, nr_pages), true); 1890 erofs_put_metabuf(&f.map.buf); 1891 erofs_release_pages(&f.pagepool); 1892 } 1893 1894 const struct address_space_operations z_erofs_aops = { 1895 .read_folio = z_erofs_read_folio, 1896 .readahead = z_erofs_readahead, 1897 }; 1898