1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 * Created by Gao Xiang <gaoxiang25@huawei.com> 6 */ 7 #include "zdata.h" 8 #include "compress.h" 9 #include <linux/prefetch.h> 10 11 #include <trace/events/erofs.h> 12 13 /* 14 * a compressed_pages[] placeholder in order to avoid 15 * being filled with file pages for in-place decompression. 16 */ 17 #define PAGE_UNALLOCATED ((void *)0x5F0E4B1D) 18 19 /* how to allocate cached pages for a pcluster */ 20 enum z_erofs_cache_alloctype { 21 DONTALLOC, /* don't allocate any cached pages */ 22 DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */ 23 }; 24 25 /* 26 * tagged pointer with 1-bit tag for all compressed pages 27 * tag 0 - the page is just found with an extra page reference 28 */ 29 typedef tagptr1_t compressed_page_t; 30 31 #define tag_compressed_page_justfound(page) \ 32 tagptr_fold(compressed_page_t, page, 1) 33 34 static struct workqueue_struct *z_erofs_workqueue __read_mostly; 35 static struct kmem_cache *pcluster_cachep __read_mostly; 36 37 void z_erofs_exit_zip_subsystem(void) 38 { 39 destroy_workqueue(z_erofs_workqueue); 40 kmem_cache_destroy(pcluster_cachep); 41 } 42 43 static inline int z_erofs_init_workqueue(void) 44 { 45 const unsigned int onlinecpus = num_possible_cpus(); 46 47 /* 48 * no need to spawn too many threads, limiting threads could minimum 49 * scheduling overhead, perhaps per-CPU threads should be better? 50 */ 51 z_erofs_workqueue = alloc_workqueue("erofs_unzipd", 52 WQ_UNBOUND | WQ_HIGHPRI, 53 onlinecpus + onlinecpus / 4); 54 return z_erofs_workqueue ? 0 : -ENOMEM; 55 } 56 57 static void z_erofs_pcluster_init_once(void *ptr) 58 { 59 struct z_erofs_pcluster *pcl = ptr; 60 struct z_erofs_collection *cl = z_erofs_primarycollection(pcl); 61 unsigned int i; 62 63 mutex_init(&cl->lock); 64 cl->nr_pages = 0; 65 cl->vcnt = 0; 66 for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i) 67 pcl->compressed_pages[i] = NULL; 68 } 69 70 int __init z_erofs_init_zip_subsystem(void) 71 { 72 pcluster_cachep = kmem_cache_create("erofs_compress", 73 Z_EROFS_WORKGROUP_SIZE, 0, 74 SLAB_RECLAIM_ACCOUNT, 75 z_erofs_pcluster_init_once); 76 if (pcluster_cachep) { 77 if (!z_erofs_init_workqueue()) 78 return 0; 79 80 kmem_cache_destroy(pcluster_cachep); 81 } 82 return -ENOMEM; 83 } 84 85 enum z_erofs_collectmode { 86 COLLECT_SECONDARY, 87 COLLECT_PRIMARY, 88 /* 89 * The current collection was the tail of an exist chain, in addition 90 * that the previous processed chained collections are all decided to 91 * be hooked up to it. 92 * A new chain will be created for the remaining collections which are 93 * not processed yet, therefore different from COLLECT_PRIMARY_FOLLOWED, 94 * the next collection cannot reuse the whole page safely in 95 * the following scenario: 96 * ________________________________________________________________ 97 * | tail (partial) page | head (partial) page | 98 * | (belongs to the next cl) | (belongs to the current cl) | 99 * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________| 100 */ 101 COLLECT_PRIMARY_HOOKED, 102 COLLECT_PRIMARY_FOLLOWED_NOINPLACE, 103 /* 104 * The current collection has been linked with the owned chain, and 105 * could also be linked with the remaining collections, which means 106 * if the processing page is the tail page of the collection, thus 107 * the current collection can safely use the whole page (since 108 * the previous collection is under control) for in-place I/O, as 109 * illustrated below: 110 * ________________________________________________________________ 111 * | tail (partial) page | head (partial) page | 112 * | (of the current cl) | (of the previous collection) | 113 * | PRIMARY_FOLLOWED or | | 114 * |_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________| 115 * 116 * [ (*) the above page can be used as inplace I/O. ] 117 */ 118 COLLECT_PRIMARY_FOLLOWED, 119 }; 120 121 struct z_erofs_collector { 122 struct z_erofs_pagevec_ctor vector; 123 124 struct z_erofs_pcluster *pcl, *tailpcl; 125 struct z_erofs_collection *cl; 126 struct page **compressedpages; 127 z_erofs_next_pcluster_t owned_head; 128 129 enum z_erofs_collectmode mode; 130 }; 131 132 struct z_erofs_decompress_frontend { 133 struct inode *const inode; 134 135 struct z_erofs_collector clt; 136 struct erofs_map_blocks map; 137 138 /* used for applying cache strategy on the fly */ 139 bool backmost; 140 erofs_off_t headoffset; 141 }; 142 143 #define COLLECTOR_INIT() { \ 144 .owned_head = Z_EROFS_PCLUSTER_TAIL, \ 145 .mode = COLLECT_PRIMARY_FOLLOWED } 146 147 #define DECOMPRESS_FRONTEND_INIT(__i) { \ 148 .inode = __i, .clt = COLLECTOR_INIT(), \ 149 .backmost = true, } 150 151 static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES]; 152 static DEFINE_MUTEX(z_pagemap_global_lock); 153 154 static void preload_compressed_pages(struct z_erofs_collector *clt, 155 struct address_space *mc, 156 enum z_erofs_cache_alloctype type) 157 { 158 const struct z_erofs_pcluster *pcl = clt->pcl; 159 const unsigned int clusterpages = BIT(pcl->clusterbits); 160 struct page **pages = clt->compressedpages; 161 pgoff_t index = pcl->obj.index + (pages - pcl->compressed_pages); 162 bool standalone = true; 163 164 if (clt->mode < COLLECT_PRIMARY_FOLLOWED) 165 return; 166 167 for (; pages < pcl->compressed_pages + clusterpages; ++pages) { 168 struct page *page; 169 compressed_page_t t; 170 171 /* the compressed page was loaded before */ 172 if (READ_ONCE(*pages)) 173 continue; 174 175 page = find_get_page(mc, index); 176 177 if (page) { 178 t = tag_compressed_page_justfound(page); 179 } else if (type == DELAYEDALLOC) { 180 t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED); 181 } else { /* DONTALLOC */ 182 if (standalone) 183 clt->compressedpages = pages; 184 standalone = false; 185 continue; 186 } 187 188 if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t))) 189 continue; 190 191 if (page) 192 put_page(page); 193 } 194 195 if (standalone) /* downgrade to PRIMARY_FOLLOWED_NOINPLACE */ 196 clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE; 197 } 198 199 /* called by erofs_shrinker to get rid of all compressed_pages */ 200 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, 201 struct erofs_workgroup *grp) 202 { 203 struct z_erofs_pcluster *const pcl = 204 container_of(grp, struct z_erofs_pcluster, obj); 205 struct address_space *const mapping = MNGD_MAPPING(sbi); 206 const unsigned int clusterpages = BIT(pcl->clusterbits); 207 int i; 208 209 /* 210 * refcount of workgroup is now freezed as 1, 211 * therefore no need to worry about available decompression users. 212 */ 213 for (i = 0; i < clusterpages; ++i) { 214 struct page *page = pcl->compressed_pages[i]; 215 216 if (!page) 217 continue; 218 219 /* block other users from reclaiming or migrating the page */ 220 if (!trylock_page(page)) 221 return -EBUSY; 222 223 if (page->mapping != mapping) 224 continue; 225 226 /* barrier is implied in the following 'unlock_page' */ 227 WRITE_ONCE(pcl->compressed_pages[i], NULL); 228 set_page_private(page, 0); 229 ClearPagePrivate(page); 230 231 unlock_page(page); 232 put_page(page); 233 } 234 return 0; 235 } 236 237 int erofs_try_to_free_cached_page(struct address_space *mapping, 238 struct page *page) 239 { 240 struct z_erofs_pcluster *const pcl = (void *)page_private(page); 241 const unsigned int clusterpages = BIT(pcl->clusterbits); 242 int ret = 0; /* 0 - busy */ 243 244 if (erofs_workgroup_try_to_freeze(&pcl->obj, 1)) { 245 unsigned int i; 246 247 for (i = 0; i < clusterpages; ++i) { 248 if (pcl->compressed_pages[i] == page) { 249 WRITE_ONCE(pcl->compressed_pages[i], NULL); 250 ret = 1; 251 break; 252 } 253 } 254 erofs_workgroup_unfreeze(&pcl->obj, 1); 255 256 if (ret) { 257 ClearPagePrivate(page); 258 put_page(page); 259 } 260 } 261 return ret; 262 } 263 264 /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */ 265 static inline bool z_erofs_try_inplace_io(struct z_erofs_collector *clt, 266 struct page *page) 267 { 268 struct z_erofs_pcluster *const pcl = clt->pcl; 269 const unsigned int clusterpages = BIT(pcl->clusterbits); 270 271 while (clt->compressedpages < pcl->compressed_pages + clusterpages) { 272 if (!cmpxchg(clt->compressedpages++, NULL, page)) 273 return true; 274 } 275 return false; 276 } 277 278 /* callers must be with collection lock held */ 279 static int z_erofs_attach_page(struct z_erofs_collector *clt, 280 struct page *page, 281 enum z_erofs_page_type type) 282 { 283 int ret; 284 bool occupied; 285 286 /* give priority for inplaceio */ 287 if (clt->mode >= COLLECT_PRIMARY && 288 type == Z_EROFS_PAGE_TYPE_EXCLUSIVE && 289 z_erofs_try_inplace_io(clt, page)) 290 return 0; 291 292 ret = z_erofs_pagevec_enqueue(&clt->vector, 293 page, type, &occupied); 294 clt->cl->vcnt += (unsigned int)ret; 295 296 return ret ? 0 : -EAGAIN; 297 } 298 299 static enum z_erofs_collectmode 300 try_to_claim_pcluster(struct z_erofs_pcluster *pcl, 301 z_erofs_next_pcluster_t *owned_head) 302 { 303 /* let's claim these following types of pclusters */ 304 retry: 305 if (pcl->next == Z_EROFS_PCLUSTER_NIL) { 306 /* type 1, nil pcluster */ 307 if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL, 308 *owned_head) != Z_EROFS_PCLUSTER_NIL) 309 goto retry; 310 311 *owned_head = &pcl->next; 312 /* lucky, I am the followee :) */ 313 return COLLECT_PRIMARY_FOLLOWED; 314 } else if (pcl->next == Z_EROFS_PCLUSTER_TAIL) { 315 /* 316 * type 2, link to the end of a existing open chain, 317 * be careful that its submission itself is governed 318 * by the original owned chain. 319 */ 320 if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, 321 *owned_head) != Z_EROFS_PCLUSTER_TAIL) 322 goto retry; 323 *owned_head = Z_EROFS_PCLUSTER_TAIL; 324 return COLLECT_PRIMARY_HOOKED; 325 } 326 return COLLECT_PRIMARY; /* :( better luck next time */ 327 } 328 329 static int z_erofs_lookup_collection(struct z_erofs_collector *clt, 330 struct inode *inode, 331 struct erofs_map_blocks *map) 332 { 333 struct z_erofs_pcluster *pcl = clt->pcl; 334 struct z_erofs_collection *cl; 335 unsigned int length; 336 337 /* to avoid unexpected loop formed by corrupted images */ 338 if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) { 339 DBG_BUGON(1); 340 return -EFSCORRUPTED; 341 } 342 343 cl = z_erofs_primarycollection(pcl); 344 if (cl->pageofs != (map->m_la & ~PAGE_MASK)) { 345 DBG_BUGON(1); 346 return -EFSCORRUPTED; 347 } 348 349 length = READ_ONCE(pcl->length); 350 if (length & Z_EROFS_PCLUSTER_FULL_LENGTH) { 351 if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) { 352 DBG_BUGON(1); 353 return -EFSCORRUPTED; 354 } 355 } else { 356 unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT; 357 358 if (map->m_flags & EROFS_MAP_FULL_MAPPED) 359 llen |= Z_EROFS_PCLUSTER_FULL_LENGTH; 360 361 while (llen > length && 362 length != cmpxchg_relaxed(&pcl->length, length, llen)) { 363 cpu_relax(); 364 length = READ_ONCE(pcl->length); 365 } 366 } 367 mutex_lock(&cl->lock); 368 /* used to check tail merging loop due to corrupted images */ 369 if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) 370 clt->tailpcl = pcl; 371 clt->mode = try_to_claim_pcluster(pcl, &clt->owned_head); 372 /* clean tailpcl if the current owned_head is Z_EROFS_PCLUSTER_TAIL */ 373 if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) 374 clt->tailpcl = NULL; 375 clt->cl = cl; 376 return 0; 377 } 378 379 static int z_erofs_register_collection(struct z_erofs_collector *clt, 380 struct inode *inode, 381 struct erofs_map_blocks *map) 382 { 383 struct z_erofs_pcluster *pcl; 384 struct z_erofs_collection *cl; 385 struct erofs_workgroup *grp; 386 int err; 387 388 /* no available workgroup, let's allocate one */ 389 pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS); 390 if (!pcl) 391 return -ENOMEM; 392 393 atomic_set(&pcl->obj.refcount, 1); 394 pcl->obj.index = map->m_pa >> PAGE_SHIFT; 395 396 pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) | 397 (map->m_flags & EROFS_MAP_FULL_MAPPED ? 398 Z_EROFS_PCLUSTER_FULL_LENGTH : 0); 399 400 if (map->m_flags & EROFS_MAP_ZIPPED) 401 pcl->algorithmformat = Z_EROFS_COMPRESSION_LZ4; 402 else 403 pcl->algorithmformat = Z_EROFS_COMPRESSION_SHIFTED; 404 405 pcl->clusterbits = EROFS_I(inode)->z_physical_clusterbits[0]; 406 pcl->clusterbits -= PAGE_SHIFT; 407 408 /* new pclusters should be claimed as type 1, primary and followed */ 409 pcl->next = clt->owned_head; 410 clt->mode = COLLECT_PRIMARY_FOLLOWED; 411 412 cl = z_erofs_primarycollection(pcl); 413 414 /* must be cleaned before freeing to slab */ 415 DBG_BUGON(cl->nr_pages); 416 DBG_BUGON(cl->vcnt); 417 418 cl->pageofs = map->m_la & ~PAGE_MASK; 419 420 /* 421 * lock all primary followed works before visible to others 422 * and mutex_trylock *never* fails for a new pcluster. 423 */ 424 DBG_BUGON(!mutex_trylock(&cl->lock)); 425 426 grp = erofs_insert_workgroup(inode->i_sb, &pcl->obj); 427 if (IS_ERR(grp)) { 428 err = PTR_ERR(grp); 429 goto err_out; 430 } 431 432 if (grp != &pcl->obj) { 433 clt->pcl = container_of(grp, struct z_erofs_pcluster, obj); 434 err = -EEXIST; 435 goto err_out; 436 } 437 /* used to check tail merging loop due to corrupted images */ 438 if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) 439 clt->tailpcl = pcl; 440 clt->owned_head = &pcl->next; 441 clt->pcl = pcl; 442 clt->cl = cl; 443 return 0; 444 445 err_out: 446 mutex_unlock(&cl->lock); 447 kmem_cache_free(pcluster_cachep, pcl); 448 return err; 449 } 450 451 static int z_erofs_collector_begin(struct z_erofs_collector *clt, 452 struct inode *inode, 453 struct erofs_map_blocks *map) 454 { 455 struct erofs_workgroup *grp; 456 int ret; 457 458 DBG_BUGON(clt->cl); 459 460 /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */ 461 DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_NIL); 462 DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); 463 464 if (!PAGE_ALIGNED(map->m_pa)) { 465 DBG_BUGON(1); 466 return -EINVAL; 467 } 468 469 grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT); 470 if (grp) { 471 clt->pcl = container_of(grp, struct z_erofs_pcluster, obj); 472 } else { 473 ret = z_erofs_register_collection(clt, inode, map); 474 475 if (!ret) 476 goto out; 477 if (ret != -EEXIST) 478 return ret; 479 } 480 481 ret = z_erofs_lookup_collection(clt, inode, map); 482 if (ret) { 483 erofs_workgroup_put(&clt->pcl->obj); 484 return ret; 485 } 486 487 out: 488 z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS, 489 clt->cl->pagevec, clt->cl->vcnt); 490 491 clt->compressedpages = clt->pcl->compressed_pages; 492 if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */ 493 clt->compressedpages += Z_EROFS_CLUSTER_MAX_PAGES; 494 return 0; 495 } 496 497 /* 498 * keep in mind that no referenced pclusters will be freed 499 * only after a RCU grace period. 500 */ 501 static void z_erofs_rcu_callback(struct rcu_head *head) 502 { 503 struct z_erofs_collection *const cl = 504 container_of(head, struct z_erofs_collection, rcu); 505 506 kmem_cache_free(pcluster_cachep, 507 container_of(cl, struct z_erofs_pcluster, 508 primary_collection)); 509 } 510 511 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp) 512 { 513 struct z_erofs_pcluster *const pcl = 514 container_of(grp, struct z_erofs_pcluster, obj); 515 struct z_erofs_collection *const cl = z_erofs_primarycollection(pcl); 516 517 call_rcu(&cl->rcu, z_erofs_rcu_callback); 518 } 519 520 static void z_erofs_collection_put(struct z_erofs_collection *cl) 521 { 522 struct z_erofs_pcluster *const pcl = 523 container_of(cl, struct z_erofs_pcluster, primary_collection); 524 525 erofs_workgroup_put(&pcl->obj); 526 } 527 528 static bool z_erofs_collector_end(struct z_erofs_collector *clt) 529 { 530 struct z_erofs_collection *cl = clt->cl; 531 532 if (!cl) 533 return false; 534 535 z_erofs_pagevec_ctor_exit(&clt->vector, false); 536 mutex_unlock(&cl->lock); 537 538 /* 539 * if all pending pages are added, don't hold its reference 540 * any longer if the pcluster isn't hosted by ourselves. 541 */ 542 if (clt->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE) 543 z_erofs_collection_put(cl); 544 545 clt->cl = NULL; 546 return true; 547 } 548 549 static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe, 550 unsigned int cachestrategy, 551 erofs_off_t la) 552 { 553 if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED) 554 return false; 555 556 if (fe->backmost) 557 return true; 558 559 return cachestrategy >= EROFS_ZIP_CACHE_READAROUND && 560 la < fe->headoffset; 561 } 562 563 static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, 564 struct page *page) 565 { 566 struct inode *const inode = fe->inode; 567 struct erofs_sb_info *const sbi = EROFS_I_SB(inode); 568 struct erofs_map_blocks *const map = &fe->map; 569 struct z_erofs_collector *const clt = &fe->clt; 570 const loff_t offset = page_offset(page); 571 bool tight = true; 572 573 enum z_erofs_cache_alloctype cache_strategy; 574 enum z_erofs_page_type page_type; 575 unsigned int cur, end, spiltted, index; 576 int err = 0; 577 578 /* register locked file pages as online pages in pack */ 579 z_erofs_onlinepage_init(page); 580 581 spiltted = 0; 582 end = PAGE_SIZE; 583 repeat: 584 cur = end - 1; 585 586 /* lucky, within the range of the current map_blocks */ 587 if (offset + cur >= map->m_la && 588 offset + cur < map->m_la + map->m_llen) { 589 /* didn't get a valid collection previously (very rare) */ 590 if (!clt->cl) 591 goto restart_now; 592 goto hitted; 593 } 594 595 /* go ahead the next map_blocks */ 596 erofs_dbg("%s: [out-of-range] pos %llu", __func__, offset + cur); 597 598 if (z_erofs_collector_end(clt)) 599 fe->backmost = false; 600 601 map->m_la = offset + cur; 602 map->m_llen = 0; 603 err = z_erofs_map_blocks_iter(inode, map, 0); 604 if (err) 605 goto err_out; 606 607 restart_now: 608 if (!(map->m_flags & EROFS_MAP_MAPPED)) 609 goto hitted; 610 611 err = z_erofs_collector_begin(clt, inode, map); 612 if (err) 613 goto err_out; 614 615 /* preload all compressed pages (maybe downgrade role if necessary) */ 616 if (should_alloc_managed_pages(fe, sbi->ctx.cache_strategy, map->m_la)) 617 cache_strategy = DELAYEDALLOC; 618 else 619 cache_strategy = DONTALLOC; 620 621 preload_compressed_pages(clt, MNGD_MAPPING(sbi), cache_strategy); 622 623 hitted: 624 /* 625 * Ensure the current partial page belongs to this submit chain rather 626 * than other concurrent submit chains or the noio(bypass) chain since 627 * those chains are handled asynchronously thus the page cannot be used 628 * for inplace I/O or pagevec (should be processed in strict order.) 629 */ 630 tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED && 631 clt->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE); 632 633 cur = end - min_t(unsigned int, offset + end - map->m_la, end); 634 if (!(map->m_flags & EROFS_MAP_MAPPED)) { 635 zero_user_segment(page, cur, end); 636 goto next_part; 637 } 638 639 /* let's derive page type */ 640 page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD : 641 (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE : 642 (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE : 643 Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED)); 644 645 if (cur) 646 tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED); 647 648 retry: 649 err = z_erofs_attach_page(clt, page, page_type); 650 /* should allocate an additional staging page for pagevec */ 651 if (err == -EAGAIN) { 652 struct page *const newpage = 653 alloc_page(GFP_NOFS | __GFP_NOFAIL); 654 655 newpage->mapping = Z_EROFS_MAPPING_STAGING; 656 err = z_erofs_attach_page(clt, newpage, 657 Z_EROFS_PAGE_TYPE_EXCLUSIVE); 658 if (!err) 659 goto retry; 660 } 661 662 if (err) 663 goto err_out; 664 665 index = page->index - (map->m_la >> PAGE_SHIFT); 666 667 z_erofs_onlinepage_fixup(page, index, true); 668 669 /* bump up the number of spiltted parts of a page */ 670 ++spiltted; 671 /* also update nr_pages */ 672 clt->cl->nr_pages = max_t(pgoff_t, clt->cl->nr_pages, index + 1); 673 next_part: 674 /* can be used for verification */ 675 map->m_llen = offset + cur - map->m_la; 676 677 end = cur; 678 if (end > 0) 679 goto repeat; 680 681 out: 682 z_erofs_onlinepage_endio(page); 683 684 erofs_dbg("%s, finish page: %pK spiltted: %u map->m_llen %llu", 685 __func__, page, spiltted, map->m_llen); 686 return err; 687 688 /* if some error occurred while processing this page */ 689 err_out: 690 SetPageError(page); 691 goto out; 692 } 693 694 static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, 695 bool sync, int bios) 696 { 697 /* wake up the caller thread for sync decompression */ 698 if (sync) { 699 unsigned long flags; 700 701 spin_lock_irqsave(&io->u.wait.lock, flags); 702 if (!atomic_add_return(bios, &io->pending_bios)) 703 wake_up_locked(&io->u.wait); 704 spin_unlock_irqrestore(&io->u.wait.lock, flags); 705 return; 706 } 707 708 if (!atomic_add_return(bios, &io->pending_bios)) 709 queue_work(z_erofs_workqueue, &io->u.work); 710 } 711 712 static void z_erofs_decompressqueue_endio(struct bio *bio) 713 { 714 tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private); 715 struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t); 716 blk_status_t err = bio->bi_status; 717 struct bio_vec *bvec; 718 struct bvec_iter_all iter_all; 719 720 bio_for_each_segment_all(bvec, bio, iter_all) { 721 struct page *page = bvec->bv_page; 722 723 DBG_BUGON(PageUptodate(page)); 724 DBG_BUGON(!page->mapping); 725 726 if (err) 727 SetPageError(page); 728 729 if (erofs_page_is_managed(EROFS_SB(q->sb), page)) { 730 if (!err) 731 SetPageUptodate(page); 732 unlock_page(page); 733 } 734 } 735 z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1); 736 bio_put(bio); 737 } 738 739 static int z_erofs_decompress_pcluster(struct super_block *sb, 740 struct z_erofs_pcluster *pcl, 741 struct list_head *pagepool) 742 { 743 struct erofs_sb_info *const sbi = EROFS_SB(sb); 744 const unsigned int clusterpages = BIT(pcl->clusterbits); 745 struct z_erofs_pagevec_ctor ctor; 746 unsigned int i, outputsize, llen, nr_pages; 747 struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES]; 748 struct page **pages, **compressed_pages, *page; 749 750 enum z_erofs_page_type page_type; 751 bool overlapped, partial; 752 struct z_erofs_collection *cl; 753 int err; 754 755 might_sleep(); 756 cl = z_erofs_primarycollection(pcl); 757 DBG_BUGON(!READ_ONCE(cl->nr_pages)); 758 759 mutex_lock(&cl->lock); 760 nr_pages = cl->nr_pages; 761 762 if (nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES) { 763 pages = pages_onstack; 764 } else if (nr_pages <= Z_EROFS_VMAP_GLOBAL_PAGES && 765 mutex_trylock(&z_pagemap_global_lock)) { 766 pages = z_pagemap_global; 767 } else { 768 gfp_t gfp_flags = GFP_KERNEL; 769 770 if (nr_pages > Z_EROFS_VMAP_GLOBAL_PAGES) 771 gfp_flags |= __GFP_NOFAIL; 772 773 pages = kvmalloc_array(nr_pages, sizeof(struct page *), 774 gfp_flags); 775 776 /* fallback to global pagemap for the lowmem scenario */ 777 if (!pages) { 778 mutex_lock(&z_pagemap_global_lock); 779 pages = z_pagemap_global; 780 } 781 } 782 783 for (i = 0; i < nr_pages; ++i) 784 pages[i] = NULL; 785 786 err = 0; 787 z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS, 788 cl->pagevec, 0); 789 790 for (i = 0; i < cl->vcnt; ++i) { 791 unsigned int pagenr; 792 793 page = z_erofs_pagevec_dequeue(&ctor, &page_type); 794 795 /* all pages in pagevec ought to be valid */ 796 DBG_BUGON(!page); 797 DBG_BUGON(!page->mapping); 798 799 if (z_erofs_put_stagingpage(pagepool, page)) 800 continue; 801 802 if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD) 803 pagenr = 0; 804 else 805 pagenr = z_erofs_onlinepage_index(page); 806 807 DBG_BUGON(pagenr >= nr_pages); 808 809 /* 810 * currently EROFS doesn't support multiref(dedup), 811 * so here erroring out one multiref page. 812 */ 813 if (pages[pagenr]) { 814 DBG_BUGON(1); 815 SetPageError(pages[pagenr]); 816 z_erofs_onlinepage_endio(pages[pagenr]); 817 err = -EFSCORRUPTED; 818 } 819 pages[pagenr] = page; 820 } 821 z_erofs_pagevec_ctor_exit(&ctor, true); 822 823 overlapped = false; 824 compressed_pages = pcl->compressed_pages; 825 826 for (i = 0; i < clusterpages; ++i) { 827 unsigned int pagenr; 828 829 page = compressed_pages[i]; 830 831 /* all compressed pages ought to be valid */ 832 DBG_BUGON(!page); 833 DBG_BUGON(!page->mapping); 834 835 if (!z_erofs_page_is_staging(page)) { 836 if (erofs_page_is_managed(sbi, page)) { 837 if (!PageUptodate(page)) 838 err = -EIO; 839 continue; 840 } 841 842 /* 843 * only if non-head page can be selected 844 * for inplace decompression 845 */ 846 pagenr = z_erofs_onlinepage_index(page); 847 848 DBG_BUGON(pagenr >= nr_pages); 849 if (pages[pagenr]) { 850 DBG_BUGON(1); 851 SetPageError(pages[pagenr]); 852 z_erofs_onlinepage_endio(pages[pagenr]); 853 err = -EFSCORRUPTED; 854 } 855 pages[pagenr] = page; 856 857 overlapped = true; 858 } 859 860 /* PG_error needs checking for inplaced and staging pages */ 861 if (PageError(page)) { 862 DBG_BUGON(PageUptodate(page)); 863 err = -EIO; 864 } 865 } 866 867 if (err) 868 goto out; 869 870 llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT; 871 if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) { 872 outputsize = llen; 873 partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH); 874 } else { 875 outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs; 876 partial = true; 877 } 878 879 err = z_erofs_decompress(&(struct z_erofs_decompress_req) { 880 .sb = sb, 881 .in = compressed_pages, 882 .out = pages, 883 .pageofs_out = cl->pageofs, 884 .inputsize = PAGE_SIZE, 885 .outputsize = outputsize, 886 .alg = pcl->algorithmformat, 887 .inplace_io = overlapped, 888 .partial_decoding = partial 889 }, pagepool); 890 891 out: 892 /* must handle all compressed pages before endding pages */ 893 for (i = 0; i < clusterpages; ++i) { 894 page = compressed_pages[i]; 895 896 if (erofs_page_is_managed(sbi, page)) 897 continue; 898 899 /* recycle all individual staging pages */ 900 (void)z_erofs_put_stagingpage(pagepool, page); 901 902 WRITE_ONCE(compressed_pages[i], NULL); 903 } 904 905 for (i = 0; i < nr_pages; ++i) { 906 page = pages[i]; 907 if (!page) 908 continue; 909 910 DBG_BUGON(!page->mapping); 911 912 /* recycle all individual staging pages */ 913 if (z_erofs_put_stagingpage(pagepool, page)) 914 continue; 915 916 if (err < 0) 917 SetPageError(page); 918 919 z_erofs_onlinepage_endio(page); 920 } 921 922 if (pages == z_pagemap_global) 923 mutex_unlock(&z_pagemap_global_lock); 924 else if (pages != pages_onstack) 925 kvfree(pages); 926 927 cl->nr_pages = 0; 928 cl->vcnt = 0; 929 930 /* all cl locks MUST be taken before the following line */ 931 WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL); 932 933 /* all cl locks SHOULD be released right now */ 934 mutex_unlock(&cl->lock); 935 936 z_erofs_collection_put(cl); 937 return err; 938 } 939 940 static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, 941 struct list_head *pagepool) 942 { 943 z_erofs_next_pcluster_t owned = io->head; 944 945 while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) { 946 struct z_erofs_pcluster *pcl; 947 948 /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */ 949 DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL); 950 951 /* no possible that 'owned' equals NULL */ 952 DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL); 953 954 pcl = container_of(owned, struct z_erofs_pcluster, next); 955 owned = READ_ONCE(pcl->next); 956 957 z_erofs_decompress_pcluster(io->sb, pcl, pagepool); 958 } 959 } 960 961 static void z_erofs_decompressqueue_work(struct work_struct *work) 962 { 963 struct z_erofs_decompressqueue *bgq = 964 container_of(work, struct z_erofs_decompressqueue, u.work); 965 LIST_HEAD(pagepool); 966 967 DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED); 968 z_erofs_decompress_queue(bgq, &pagepool); 969 970 put_pages_list(&pagepool); 971 kvfree(bgq); 972 } 973 974 static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, 975 unsigned int nr, 976 struct list_head *pagepool, 977 struct address_space *mc, 978 gfp_t gfp) 979 { 980 const pgoff_t index = pcl->obj.index; 981 bool tocache = false; 982 983 struct address_space *mapping; 984 struct page *oldpage, *page; 985 986 compressed_page_t t; 987 int justfound; 988 989 repeat: 990 page = READ_ONCE(pcl->compressed_pages[nr]); 991 oldpage = page; 992 993 if (!page) 994 goto out_allocpage; 995 996 /* 997 * the cached page has not been allocated and 998 * an placeholder is out there, prepare it now. 999 */ 1000 if (page == PAGE_UNALLOCATED) { 1001 tocache = true; 1002 goto out_allocpage; 1003 } 1004 1005 /* process the target tagged pointer */ 1006 t = tagptr_init(compressed_page_t, page); 1007 justfound = tagptr_unfold_tags(t); 1008 page = tagptr_unfold_ptr(t); 1009 1010 mapping = READ_ONCE(page->mapping); 1011 1012 /* 1013 * unmanaged (file) pages are all locked solidly, 1014 * therefore it is impossible for `mapping' to be NULL. 1015 */ 1016 if (mapping && mapping != mc) 1017 /* ought to be unmanaged pages */ 1018 goto out; 1019 1020 lock_page(page); 1021 1022 /* only true if page reclaim goes wrong, should never happen */ 1023 DBG_BUGON(justfound && PagePrivate(page)); 1024 1025 /* the page is still in manage cache */ 1026 if (page->mapping == mc) { 1027 WRITE_ONCE(pcl->compressed_pages[nr], page); 1028 1029 ClearPageError(page); 1030 if (!PagePrivate(page)) { 1031 /* 1032 * impossible to be !PagePrivate(page) for 1033 * the current restriction as well if 1034 * the page is already in compressed_pages[]. 1035 */ 1036 DBG_BUGON(!justfound); 1037 1038 justfound = 0; 1039 set_page_private(page, (unsigned long)pcl); 1040 SetPagePrivate(page); 1041 } 1042 1043 /* no need to submit io if it is already up-to-date */ 1044 if (PageUptodate(page)) { 1045 unlock_page(page); 1046 page = NULL; 1047 } 1048 goto out; 1049 } 1050 1051 /* 1052 * the managed page has been truncated, it's unsafe to 1053 * reuse this one, let's allocate a new cache-managed page. 1054 */ 1055 DBG_BUGON(page->mapping); 1056 DBG_BUGON(!justfound); 1057 1058 tocache = true; 1059 unlock_page(page); 1060 put_page(page); 1061 out_allocpage: 1062 page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL); 1063 if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) { 1064 /* non-LRU / non-movable temporary page is needed */ 1065 page->mapping = Z_EROFS_MAPPING_STAGING; 1066 tocache = false; 1067 } 1068 1069 if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) { 1070 if (tocache) { 1071 /* since it added to managed cache successfully */ 1072 unlock_page(page); 1073 put_page(page); 1074 } else { 1075 list_add(&page->lru, pagepool); 1076 } 1077 cond_resched(); 1078 goto repeat; 1079 } 1080 set_page_private(page, (unsigned long)pcl); 1081 SetPagePrivate(page); 1082 out: /* the only exit (for tracing and debugging) */ 1083 return page; 1084 } 1085 1086 static struct z_erofs_decompressqueue * 1087 jobqueue_init(struct super_block *sb, 1088 struct z_erofs_decompressqueue *fgq, bool *fg) 1089 { 1090 struct z_erofs_decompressqueue *q; 1091 1092 if (fg && !*fg) { 1093 q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN); 1094 if (!q) { 1095 *fg = true; 1096 goto fg_out; 1097 } 1098 INIT_WORK(&q->u.work, z_erofs_decompressqueue_work); 1099 } else { 1100 fg_out: 1101 q = fgq; 1102 init_waitqueue_head(&fgq->u.wait); 1103 atomic_set(&fgq->pending_bios, 0); 1104 } 1105 q->sb = sb; 1106 q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED; 1107 return q; 1108 } 1109 1110 /* define decompression jobqueue types */ 1111 enum { 1112 JQ_BYPASS, 1113 JQ_SUBMIT, 1114 NR_JOBQUEUES, 1115 }; 1116 1117 static void *jobqueueset_init(struct super_block *sb, 1118 struct z_erofs_decompressqueue *q[], 1119 struct z_erofs_decompressqueue *fgq, bool *fg) 1120 { 1121 /* 1122 * if managed cache is enabled, bypass jobqueue is needed, 1123 * no need to read from device for all pclusters in this queue. 1124 */ 1125 q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL); 1126 q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, fg); 1127 1128 return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], *fg)); 1129 } 1130 1131 static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, 1132 z_erofs_next_pcluster_t qtail[], 1133 z_erofs_next_pcluster_t owned_head) 1134 { 1135 z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT]; 1136 z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS]; 1137 1138 DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); 1139 if (owned_head == Z_EROFS_PCLUSTER_TAIL) 1140 owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED; 1141 1142 WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED); 1143 1144 WRITE_ONCE(*submit_qtail, owned_head); 1145 WRITE_ONCE(*bypass_qtail, &pcl->next); 1146 1147 qtail[JQ_BYPASS] = &pcl->next; 1148 } 1149 1150 static void z_erofs_submit_queue(struct super_block *sb, 1151 z_erofs_next_pcluster_t owned_head, 1152 struct list_head *pagepool, 1153 struct z_erofs_decompressqueue *fgq, 1154 bool *force_fg) 1155 { 1156 struct erofs_sb_info *const sbi = EROFS_SB(sb); 1157 z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; 1158 struct z_erofs_decompressqueue *q[NR_JOBQUEUES]; 1159 void *bi_private; 1160 /* since bio will be NULL, no need to initialize last_index */ 1161 pgoff_t last_index; 1162 unsigned int nr_bios = 0; 1163 struct bio *bio = NULL; 1164 1165 bi_private = jobqueueset_init(sb, q, fgq, force_fg); 1166 qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; 1167 qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; 1168 1169 /* by default, all need io submission */ 1170 q[JQ_SUBMIT]->head = owned_head; 1171 1172 do { 1173 struct z_erofs_pcluster *pcl; 1174 pgoff_t cur, end; 1175 unsigned int i = 0; 1176 bool bypass = true; 1177 1178 /* no possible 'owned_head' equals the following */ 1179 DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); 1180 DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL); 1181 1182 pcl = container_of(owned_head, struct z_erofs_pcluster, next); 1183 1184 cur = pcl->obj.index; 1185 end = cur + BIT(pcl->clusterbits); 1186 1187 /* close the main owned chain at first */ 1188 owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, 1189 Z_EROFS_PCLUSTER_TAIL_CLOSED); 1190 1191 do { 1192 struct page *page; 1193 1194 page = pickup_page_for_submission(pcl, i++, pagepool, 1195 MNGD_MAPPING(sbi), 1196 GFP_NOFS); 1197 if (!page) 1198 continue; 1199 1200 if (bio && cur != last_index + 1) { 1201 submit_bio_retry: 1202 submit_bio(bio); 1203 bio = NULL; 1204 } 1205 1206 if (!bio) { 1207 bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); 1208 1209 bio->bi_end_io = z_erofs_decompressqueue_endio; 1210 bio_set_dev(bio, sb->s_bdev); 1211 bio->bi_iter.bi_sector = (sector_t)cur << 1212 LOG_SECTORS_PER_BLOCK; 1213 bio->bi_private = bi_private; 1214 bio->bi_opf = REQ_OP_READ; 1215 ++nr_bios; 1216 } 1217 1218 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) 1219 goto submit_bio_retry; 1220 1221 last_index = cur; 1222 bypass = false; 1223 } while (++cur < end); 1224 1225 if (!bypass) 1226 qtail[JQ_SUBMIT] = &pcl->next; 1227 else 1228 move_to_bypass_jobqueue(pcl, qtail, owned_head); 1229 } while (owned_head != Z_EROFS_PCLUSTER_TAIL); 1230 1231 if (bio) 1232 submit_bio(bio); 1233 1234 /* 1235 * although background is preferred, no one is pending for submission. 1236 * don't issue workqueue for decompression but drop it directly instead. 1237 */ 1238 if (!*force_fg && !nr_bios) { 1239 kvfree(q[JQ_SUBMIT]); 1240 return; 1241 } 1242 z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios); 1243 } 1244 1245 static void z_erofs_runqueue(struct super_block *sb, 1246 struct z_erofs_collector *clt, 1247 struct list_head *pagepool, bool force_fg) 1248 { 1249 struct z_erofs_decompressqueue io[NR_JOBQUEUES]; 1250 1251 if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) 1252 return; 1253 z_erofs_submit_queue(sb, clt->owned_head, pagepool, io, &force_fg); 1254 1255 /* handle bypass queue (no i/o pclusters) immediately */ 1256 z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool); 1257 1258 if (!force_fg) 1259 return; 1260 1261 /* wait until all bios are completed */ 1262 io_wait_event(io[JQ_SUBMIT].u.wait, 1263 !atomic_read(&io[JQ_SUBMIT].pending_bios)); 1264 1265 /* handle synchronous decompress queue in the caller context */ 1266 z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool); 1267 } 1268 1269 static int z_erofs_readpage(struct file *file, struct page *page) 1270 { 1271 struct inode *const inode = page->mapping->host; 1272 struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); 1273 int err; 1274 LIST_HEAD(pagepool); 1275 1276 trace_erofs_readpage(page, false); 1277 1278 f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT; 1279 1280 err = z_erofs_do_read_page(&f, page); 1281 (void)z_erofs_collector_end(&f.clt); 1282 1283 /* if some compressed cluster ready, need submit them anyway */ 1284 z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, true); 1285 1286 if (err) 1287 erofs_err(inode->i_sb, "failed to read, err [%d]", err); 1288 1289 if (f.map.mpage) 1290 put_page(f.map.mpage); 1291 1292 /* clean up the remaining free pages */ 1293 put_pages_list(&pagepool); 1294 return err; 1295 } 1296 1297 static void z_erofs_readahead(struct readahead_control *rac) 1298 { 1299 struct inode *const inode = rac->mapping->host; 1300 struct erofs_sb_info *const sbi = EROFS_I_SB(inode); 1301 1302 unsigned int nr_pages = readahead_count(rac); 1303 bool sync = (nr_pages <= sbi->ctx.max_sync_decompress_pages); 1304 struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); 1305 struct page *page, *head = NULL; 1306 LIST_HEAD(pagepool); 1307 1308 trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false); 1309 1310 f.headoffset = readahead_pos(rac); 1311 1312 while ((page = readahead_page(rac))) { 1313 prefetchw(&page->flags); 1314 1315 /* 1316 * A pure asynchronous readahead is indicated if 1317 * a PG_readahead marked page is hitted at first. 1318 * Let's also do asynchronous decompression for this case. 1319 */ 1320 sync &= !(PageReadahead(page) && !head); 1321 1322 set_page_private(page, (unsigned long)head); 1323 head = page; 1324 } 1325 1326 while (head) { 1327 struct page *page = head; 1328 int err; 1329 1330 /* traversal in reverse order */ 1331 head = (void *)page_private(page); 1332 1333 err = z_erofs_do_read_page(&f, page); 1334 if (err) 1335 erofs_err(inode->i_sb, 1336 "readahead error at page %lu @ nid %llu", 1337 page->index, EROFS_I(inode)->nid); 1338 put_page(page); 1339 } 1340 1341 (void)z_erofs_collector_end(&f.clt); 1342 1343 z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, sync); 1344 1345 if (f.map.mpage) 1346 put_page(f.map.mpage); 1347 1348 /* clean up the remaining free pages */ 1349 put_pages_list(&pagepool); 1350 } 1351 1352 const struct address_space_operations z_erofs_aops = { 1353 .readpage = z_erofs_readpage, 1354 .readahead = z_erofs_readahead, 1355 }; 1356 1357