xref: /openbmc/linux/fs/erofs/zdata.c (revision e4c1cf52)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2022 Alibaba Cloud
6  */
7 #include "compress.h"
8 #include <linux/psi.h>
9 #include <linux/cpuhotplug.h>
10 #include <trace/events/erofs.h>
11 
12 #define Z_EROFS_PCLUSTER_MAX_PAGES	(Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
13 #define Z_EROFS_INLINE_BVECS		2
14 
15 /*
16  * let's leave a type here in case of introducing
17  * another tagged pointer later.
18  */
19 typedef void *z_erofs_next_pcluster_t;
20 
21 struct z_erofs_bvec {
22 	struct page *page;
23 	int offset;
24 	unsigned int end;
25 };
26 
27 #define __Z_EROFS_BVSET(name, total) \
28 struct name { \
29 	/* point to the next page which contains the following bvecs */ \
30 	struct page *nextpage; \
31 	struct z_erofs_bvec bvec[total]; \
32 }
33 __Z_EROFS_BVSET(z_erofs_bvset,);
34 __Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS);
35 
36 /*
37  * Structure fields follow one of the following exclusion rules.
38  *
39  * I: Modifiable by initialization/destruction paths and read-only
40  *    for everyone else;
41  *
42  * L: Field should be protected by the pcluster lock;
43  *
44  * A: Field should be accessed / updated in atomic for parallelized code.
45  */
46 struct z_erofs_pcluster {
47 	struct erofs_workgroup obj;
48 	struct mutex lock;
49 
50 	/* A: point to next chained pcluster or TAILs */
51 	z_erofs_next_pcluster_t next;
52 
53 	/* L: the maximum decompression size of this round */
54 	unsigned int length;
55 
56 	/* L: total number of bvecs */
57 	unsigned int vcnt;
58 
59 	/* I: page offset of start position of decompression */
60 	unsigned short pageofs_out;
61 
62 	/* I: page offset of inline compressed data */
63 	unsigned short pageofs_in;
64 
65 	union {
66 		/* L: inline a certain number of bvec for bootstrap */
67 		struct z_erofs_bvset_inline bvset;
68 
69 		/* I: can be used to free the pcluster by RCU. */
70 		struct rcu_head rcu;
71 	};
72 
73 	union {
74 		/* I: physical cluster size in pages */
75 		unsigned short pclusterpages;
76 
77 		/* I: tailpacking inline compressed size */
78 		unsigned short tailpacking_size;
79 	};
80 
81 	/* I: compression algorithm format */
82 	unsigned char algorithmformat;
83 
84 	/* L: whether partial decompression or not */
85 	bool partial;
86 
87 	/* L: indicate several pageofs_outs or not */
88 	bool multibases;
89 
90 	/* A: compressed bvecs (can be cached or inplaced pages) */
91 	struct z_erofs_bvec compressed_bvecs[];
92 };
93 
94 /* the end of a chain of pclusters */
95 #define Z_EROFS_PCLUSTER_TAIL           ((void *) 0x700 + POISON_POINTER_DELTA)
96 #define Z_EROFS_PCLUSTER_NIL            (NULL)
97 
98 struct z_erofs_decompressqueue {
99 	struct super_block *sb;
100 	atomic_t pending_bios;
101 	z_erofs_next_pcluster_t head;
102 
103 	union {
104 		struct completion done;
105 		struct work_struct work;
106 		struct kthread_work kthread_work;
107 	} u;
108 	bool eio, sync;
109 };
110 
111 static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
112 {
113 	return !pcl->obj.index;
114 }
115 
116 static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
117 {
118 	if (z_erofs_is_inline_pcluster(pcl))
119 		return 1;
120 	return pcl->pclusterpages;
121 }
122 
123 /*
124  * bit 30: I/O error occurred on this page
125  * bit 0 - 29: remaining parts to complete this page
126  */
127 #define Z_EROFS_PAGE_EIO			(1 << 30)
128 
129 static inline void z_erofs_onlinepage_init(struct page *page)
130 {
131 	union {
132 		atomic_t o;
133 		unsigned long v;
134 	} u = { .o = ATOMIC_INIT(1) };
135 
136 	set_page_private(page, u.v);
137 	smp_wmb();
138 	SetPagePrivate(page);
139 }
140 
141 static inline void z_erofs_onlinepage_split(struct page *page)
142 {
143 	atomic_inc((atomic_t *)&page->private);
144 }
145 
146 static inline void z_erofs_page_mark_eio(struct page *page)
147 {
148 	int orig;
149 
150 	do {
151 		orig = atomic_read((atomic_t *)&page->private);
152 	} while (atomic_cmpxchg((atomic_t *)&page->private, orig,
153 				orig | Z_EROFS_PAGE_EIO) != orig);
154 }
155 
156 static inline void z_erofs_onlinepage_endio(struct page *page)
157 {
158 	unsigned int v;
159 
160 	DBG_BUGON(!PagePrivate(page));
161 	v = atomic_dec_return((atomic_t *)&page->private);
162 	if (!(v & ~Z_EROFS_PAGE_EIO)) {
163 		set_page_private(page, 0);
164 		ClearPagePrivate(page);
165 		if (!(v & Z_EROFS_PAGE_EIO))
166 			SetPageUptodate(page);
167 		unlock_page(page);
168 	}
169 }
170 
171 #define Z_EROFS_ONSTACK_PAGES		32
172 
173 /*
174  * since pclustersize is variable for big pcluster feature, introduce slab
175  * pools implementation for different pcluster sizes.
176  */
177 struct z_erofs_pcluster_slab {
178 	struct kmem_cache *slab;
179 	unsigned int maxpages;
180 	char name[48];
181 };
182 
183 #define _PCLP(n) { .maxpages = n }
184 
185 static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = {
186 	_PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128),
187 	_PCLP(Z_EROFS_PCLUSTER_MAX_PAGES)
188 };
189 
190 struct z_erofs_bvec_iter {
191 	struct page *bvpage;
192 	struct z_erofs_bvset *bvset;
193 	unsigned int nr, cur;
194 };
195 
196 static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter)
197 {
198 	if (iter->bvpage)
199 		kunmap_local(iter->bvset);
200 	return iter->bvpage;
201 }
202 
203 static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter)
204 {
205 	unsigned long base = (unsigned long)((struct z_erofs_bvset *)0)->bvec;
206 	/* have to access nextpage in advance, otherwise it will be unmapped */
207 	struct page *nextpage = iter->bvset->nextpage;
208 	struct page *oldpage;
209 
210 	DBG_BUGON(!nextpage);
211 	oldpage = z_erofs_bvec_iter_end(iter);
212 	iter->bvpage = nextpage;
213 	iter->bvset = kmap_local_page(nextpage);
214 	iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec);
215 	iter->cur = 0;
216 	return oldpage;
217 }
218 
219 static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter,
220 				    struct z_erofs_bvset_inline *bvset,
221 				    unsigned int bootstrap_nr,
222 				    unsigned int cur)
223 {
224 	*iter = (struct z_erofs_bvec_iter) {
225 		.nr = bootstrap_nr,
226 		.bvset = (struct z_erofs_bvset *)bvset,
227 	};
228 
229 	while (cur > iter->nr) {
230 		cur -= iter->nr;
231 		z_erofs_bvset_flip(iter);
232 	}
233 	iter->cur = cur;
234 }
235 
236 static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
237 				struct z_erofs_bvec *bvec,
238 				struct page **candidate_bvpage,
239 				struct page **pagepool)
240 {
241 	if (iter->cur >= iter->nr) {
242 		struct page *nextpage = *candidate_bvpage;
243 
244 		if (!nextpage) {
245 			nextpage = erofs_allocpage(pagepool, GFP_NOFS);
246 			if (!nextpage)
247 				return -ENOMEM;
248 			set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE);
249 		}
250 		DBG_BUGON(iter->bvset->nextpage);
251 		iter->bvset->nextpage = nextpage;
252 		z_erofs_bvset_flip(iter);
253 
254 		iter->bvset->nextpage = NULL;
255 		*candidate_bvpage = NULL;
256 	}
257 	iter->bvset->bvec[iter->cur++] = *bvec;
258 	return 0;
259 }
260 
261 static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter,
262 				 struct z_erofs_bvec *bvec,
263 				 struct page **old_bvpage)
264 {
265 	if (iter->cur == iter->nr)
266 		*old_bvpage = z_erofs_bvset_flip(iter);
267 	else
268 		*old_bvpage = NULL;
269 	*bvec = iter->bvset->bvec[iter->cur++];
270 }
271 
272 static void z_erofs_destroy_pcluster_pool(void)
273 {
274 	int i;
275 
276 	for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
277 		if (!pcluster_pool[i].slab)
278 			continue;
279 		kmem_cache_destroy(pcluster_pool[i].slab);
280 		pcluster_pool[i].slab = NULL;
281 	}
282 }
283 
284 static int z_erofs_create_pcluster_pool(void)
285 {
286 	struct z_erofs_pcluster_slab *pcs;
287 	struct z_erofs_pcluster *a;
288 	unsigned int size;
289 
290 	for (pcs = pcluster_pool;
291 	     pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
292 		size = struct_size(a, compressed_bvecs, pcs->maxpages);
293 
294 		sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages);
295 		pcs->slab = kmem_cache_create(pcs->name, size, 0,
296 					      SLAB_RECLAIM_ACCOUNT, NULL);
297 		if (pcs->slab)
298 			continue;
299 
300 		z_erofs_destroy_pcluster_pool();
301 		return -ENOMEM;
302 	}
303 	return 0;
304 }
305 
306 static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages)
307 {
308 	int i;
309 
310 	for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
311 		struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
312 		struct z_erofs_pcluster *pcl;
313 
314 		if (nrpages > pcs->maxpages)
315 			continue;
316 
317 		pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS);
318 		if (!pcl)
319 			return ERR_PTR(-ENOMEM);
320 		pcl->pclusterpages = nrpages;
321 		return pcl;
322 	}
323 	return ERR_PTR(-EINVAL);
324 }
325 
326 static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
327 {
328 	unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
329 	int i;
330 
331 	for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
332 		struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
333 
334 		if (pclusterpages > pcs->maxpages)
335 			continue;
336 
337 		kmem_cache_free(pcs->slab, pcl);
338 		return;
339 	}
340 	DBG_BUGON(1);
341 }
342 
343 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
344 
345 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
346 static struct kthread_worker __rcu **z_erofs_pcpu_workers;
347 
348 static void erofs_destroy_percpu_workers(void)
349 {
350 	struct kthread_worker *worker;
351 	unsigned int cpu;
352 
353 	for_each_possible_cpu(cpu) {
354 		worker = rcu_dereference_protected(
355 					z_erofs_pcpu_workers[cpu], 1);
356 		rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
357 		if (worker)
358 			kthread_destroy_worker(worker);
359 	}
360 	kfree(z_erofs_pcpu_workers);
361 }
362 
363 static struct kthread_worker *erofs_init_percpu_worker(int cpu)
364 {
365 	struct kthread_worker *worker =
366 		kthread_create_worker_on_cpu(cpu, 0, "erofs_worker/%u", cpu);
367 
368 	if (IS_ERR(worker))
369 		return worker;
370 	if (IS_ENABLED(CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI))
371 		sched_set_fifo_low(worker->task);
372 	return worker;
373 }
374 
375 static int erofs_init_percpu_workers(void)
376 {
377 	struct kthread_worker *worker;
378 	unsigned int cpu;
379 
380 	z_erofs_pcpu_workers = kcalloc(num_possible_cpus(),
381 			sizeof(struct kthread_worker *), GFP_ATOMIC);
382 	if (!z_erofs_pcpu_workers)
383 		return -ENOMEM;
384 
385 	for_each_online_cpu(cpu) {	/* could miss cpu{off,on}line? */
386 		worker = erofs_init_percpu_worker(cpu);
387 		if (!IS_ERR(worker))
388 			rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
389 	}
390 	return 0;
391 }
392 #else
393 static inline void erofs_destroy_percpu_workers(void) {}
394 static inline int erofs_init_percpu_workers(void) { return 0; }
395 #endif
396 
397 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_EROFS_FS_PCPU_KTHREAD)
398 static DEFINE_SPINLOCK(z_erofs_pcpu_worker_lock);
399 static enum cpuhp_state erofs_cpuhp_state;
400 
401 static int erofs_cpu_online(unsigned int cpu)
402 {
403 	struct kthread_worker *worker, *old;
404 
405 	worker = erofs_init_percpu_worker(cpu);
406 	if (IS_ERR(worker))
407 		return PTR_ERR(worker);
408 
409 	spin_lock(&z_erofs_pcpu_worker_lock);
410 	old = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
411 			lockdep_is_held(&z_erofs_pcpu_worker_lock));
412 	if (!old)
413 		rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
414 	spin_unlock(&z_erofs_pcpu_worker_lock);
415 	if (old)
416 		kthread_destroy_worker(worker);
417 	return 0;
418 }
419 
420 static int erofs_cpu_offline(unsigned int cpu)
421 {
422 	struct kthread_worker *worker;
423 
424 	spin_lock(&z_erofs_pcpu_worker_lock);
425 	worker = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
426 			lockdep_is_held(&z_erofs_pcpu_worker_lock));
427 	rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
428 	spin_unlock(&z_erofs_pcpu_worker_lock);
429 
430 	synchronize_rcu();
431 	if (worker)
432 		kthread_destroy_worker(worker);
433 	return 0;
434 }
435 
436 static int erofs_cpu_hotplug_init(void)
437 {
438 	int state;
439 
440 	state = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
441 			"fs/erofs:online", erofs_cpu_online, erofs_cpu_offline);
442 	if (state < 0)
443 		return state;
444 
445 	erofs_cpuhp_state = state;
446 	return 0;
447 }
448 
449 static void erofs_cpu_hotplug_destroy(void)
450 {
451 	if (erofs_cpuhp_state)
452 		cpuhp_remove_state_nocalls(erofs_cpuhp_state);
453 }
454 #else /* !CONFIG_HOTPLUG_CPU || !CONFIG_EROFS_FS_PCPU_KTHREAD */
455 static inline int erofs_cpu_hotplug_init(void) { return 0; }
456 static inline void erofs_cpu_hotplug_destroy(void) {}
457 #endif
458 
459 void z_erofs_exit_zip_subsystem(void)
460 {
461 	erofs_cpu_hotplug_destroy();
462 	erofs_destroy_percpu_workers();
463 	destroy_workqueue(z_erofs_workqueue);
464 	z_erofs_destroy_pcluster_pool();
465 }
466 
467 int __init z_erofs_init_zip_subsystem(void)
468 {
469 	int err = z_erofs_create_pcluster_pool();
470 
471 	if (err)
472 		goto out_error_pcluster_pool;
473 
474 	z_erofs_workqueue = alloc_workqueue("erofs_worker",
475 			WQ_UNBOUND | WQ_HIGHPRI, num_possible_cpus());
476 	if (!z_erofs_workqueue) {
477 		err = -ENOMEM;
478 		goto out_error_workqueue_init;
479 	}
480 
481 	err = erofs_init_percpu_workers();
482 	if (err)
483 		goto out_error_pcpu_worker;
484 
485 	err = erofs_cpu_hotplug_init();
486 	if (err < 0)
487 		goto out_error_cpuhp_init;
488 	return err;
489 
490 out_error_cpuhp_init:
491 	erofs_destroy_percpu_workers();
492 out_error_pcpu_worker:
493 	destroy_workqueue(z_erofs_workqueue);
494 out_error_workqueue_init:
495 	z_erofs_destroy_pcluster_pool();
496 out_error_pcluster_pool:
497 	return err;
498 }
499 
500 enum z_erofs_pclustermode {
501 	Z_EROFS_PCLUSTER_INFLIGHT,
502 	/*
503 	 * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it
504 	 * could be dispatched into bypass queue later due to uptodated managed
505 	 * pages. All related online pages cannot be reused for inplace I/O (or
506 	 * bvpage) since it can be directly decoded without I/O submission.
507 	 */
508 	Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE,
509 	/*
510 	 * The pcluster was just linked to a decompression chain by us.  It can
511 	 * also be linked with the remaining pclusters, which means if the
512 	 * processing page is the tail page of a pcluster, this pcluster can
513 	 * safely use the whole page (since the previous pcluster is within the
514 	 * same chain) for in-place I/O, as illustrated below:
515 	 *  ___________________________________________________
516 	 * |  tail (partial) page  |    head (partial) page    |
517 	 * |  (of the current pcl) |   (of the previous pcl)   |
518 	 * |___PCLUSTER_FOLLOWED___|_____PCLUSTER_FOLLOWED_____|
519 	 *
520 	 * [  (*) the page above can be used as inplace I/O.   ]
521 	 */
522 	Z_EROFS_PCLUSTER_FOLLOWED,
523 };
524 
525 struct z_erofs_decompress_frontend {
526 	struct inode *const inode;
527 	struct erofs_map_blocks map;
528 	struct z_erofs_bvec_iter biter;
529 
530 	struct page *pagepool;
531 	struct page *candidate_bvpage;
532 	struct z_erofs_pcluster *pcl;
533 	z_erofs_next_pcluster_t owned_head;
534 	enum z_erofs_pclustermode mode;
535 
536 	/* used for applying cache strategy on the fly */
537 	bool backmost;
538 	erofs_off_t headoffset;
539 
540 	/* a pointer used to pick up inplace I/O pages */
541 	unsigned int icur;
542 };
543 
544 #define DECOMPRESS_FRONTEND_INIT(__i) { \
545 	.inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
546 	.mode = Z_EROFS_PCLUSTER_FOLLOWED, .backmost = true }
547 
548 static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
549 {
550 	unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy;
551 
552 	if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
553 		return false;
554 
555 	if (fe->backmost)
556 		return true;
557 
558 	if (cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
559 	    fe->map.m_la < fe->headoffset)
560 		return true;
561 
562 	return false;
563 }
564 
565 static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
566 {
567 	struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
568 	struct z_erofs_pcluster *pcl = fe->pcl;
569 	bool shouldalloc = z_erofs_should_alloc_cache(fe);
570 	bool standalone = true;
571 	/*
572 	 * optimistic allocation without direct reclaim since inplace I/O
573 	 * can be used if low memory otherwise.
574 	 */
575 	gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
576 			__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
577 	unsigned int i;
578 
579 	if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
580 		return;
581 
582 	for (i = 0; i < pcl->pclusterpages; ++i) {
583 		struct page *page;
584 		void *t;	/* mark pages just found for debugging */
585 		struct page *newpage = NULL;
586 
587 		/* the compressed page was loaded before */
588 		if (READ_ONCE(pcl->compressed_bvecs[i].page))
589 			continue;
590 
591 		page = find_get_page(mc, pcl->obj.index + i);
592 
593 		if (page) {
594 			t = (void *)((unsigned long)page | 1);
595 		} else {
596 			/* I/O is needed, no possible to decompress directly */
597 			standalone = false;
598 			if (!shouldalloc)
599 				continue;
600 
601 			/*
602 			 * try to use cached I/O if page allocation
603 			 * succeeds or fallback to in-place I/O instead
604 			 * to avoid any direct reclaim.
605 			 */
606 			newpage = erofs_allocpage(&fe->pagepool, gfp);
607 			if (!newpage)
608 				continue;
609 			set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
610 			t = (void *)((unsigned long)newpage | 1);
611 		}
612 
613 		if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL, t))
614 			continue;
615 
616 		if (page)
617 			put_page(page);
618 		else if (newpage)
619 			erofs_pagepool_add(&fe->pagepool, newpage);
620 	}
621 
622 	/*
623 	 * don't do inplace I/O if all compressed pages are available in
624 	 * managed cache since it can be moved to the bypass queue instead.
625 	 */
626 	if (standalone)
627 		fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
628 }
629 
630 /* called by erofs_shrinker to get rid of all compressed_pages */
631 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
632 				       struct erofs_workgroup *grp)
633 {
634 	struct z_erofs_pcluster *const pcl =
635 		container_of(grp, struct z_erofs_pcluster, obj);
636 	int i;
637 
638 	DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
639 	/*
640 	 * refcount of workgroup is now freezed as 0,
641 	 * therefore no need to worry about available decompression users.
642 	 */
643 	for (i = 0; i < pcl->pclusterpages; ++i) {
644 		struct page *page = pcl->compressed_bvecs[i].page;
645 
646 		if (!page)
647 			continue;
648 
649 		/* block other users from reclaiming or migrating the page */
650 		if (!trylock_page(page))
651 			return -EBUSY;
652 
653 		if (!erofs_page_is_managed(sbi, page))
654 			continue;
655 
656 		/* barrier is implied in the following 'unlock_page' */
657 		WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
658 		detach_page_private(page);
659 		unlock_page(page);
660 	}
661 	return 0;
662 }
663 
664 static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
665 {
666 	struct z_erofs_pcluster *pcl = folio_get_private(folio);
667 	bool ret;
668 	int i;
669 
670 	if (!folio_test_private(folio))
671 		return true;
672 
673 	ret = false;
674 	spin_lock(&pcl->obj.lockref.lock);
675 	if (pcl->obj.lockref.count > 0)
676 		goto out;
677 
678 	DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
679 	for (i = 0; i < pcl->pclusterpages; ++i) {
680 		if (pcl->compressed_bvecs[i].page == &folio->page) {
681 			WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
682 			ret = true;
683 			break;
684 		}
685 	}
686 	if (ret)
687 		folio_detach_private(folio);
688 out:
689 	spin_unlock(&pcl->obj.lockref.lock);
690 	return ret;
691 }
692 
693 /*
694  * It will be called only on inode eviction. In case that there are still some
695  * decompression requests in progress, wait with rescheduling for a bit here.
696  * An extra lock could be introduced instead but it seems unnecessary.
697  */
698 static void z_erofs_cache_invalidate_folio(struct folio *folio,
699 					   size_t offset, size_t length)
700 {
701 	const size_t stop = length + offset;
702 
703 	/* Check for potential overflow in debug mode */
704 	DBG_BUGON(stop > folio_size(folio) || stop < length);
705 
706 	if (offset == 0 && stop == folio_size(folio))
707 		while (!z_erofs_cache_release_folio(folio, GFP_NOFS))
708 			cond_resched();
709 }
710 
711 static const struct address_space_operations z_erofs_cache_aops = {
712 	.release_folio = z_erofs_cache_release_folio,
713 	.invalidate_folio = z_erofs_cache_invalidate_folio,
714 };
715 
716 int erofs_init_managed_cache(struct super_block *sb)
717 {
718 	struct inode *const inode = new_inode(sb);
719 
720 	if (!inode)
721 		return -ENOMEM;
722 
723 	set_nlink(inode, 1);
724 	inode->i_size = OFFSET_MAX;
725 	inode->i_mapping->a_ops = &z_erofs_cache_aops;
726 	mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
727 	EROFS_SB(sb)->managed_cache = inode;
728 	return 0;
729 }
730 
731 static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
732 				   struct z_erofs_bvec *bvec)
733 {
734 	struct z_erofs_pcluster *const pcl = fe->pcl;
735 
736 	while (fe->icur > 0) {
737 		if (!cmpxchg(&pcl->compressed_bvecs[--fe->icur].page,
738 			     NULL, bvec->page)) {
739 			pcl->compressed_bvecs[fe->icur] = *bvec;
740 			return true;
741 		}
742 	}
743 	return false;
744 }
745 
746 /* callers must be with pcluster lock held */
747 static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
748 			       struct z_erofs_bvec *bvec, bool exclusive)
749 {
750 	int ret;
751 
752 	if (exclusive) {
753 		/* give priority for inplaceio to use file pages first */
754 		if (z_erofs_try_inplace_io(fe, bvec))
755 			return 0;
756 		/* otherwise, check if it can be used as a bvpage */
757 		if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
758 		    !fe->candidate_bvpage)
759 			fe->candidate_bvpage = bvec->page;
760 	}
761 	ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage,
762 				   &fe->pagepool);
763 	fe->pcl->vcnt += (ret >= 0);
764 	return ret;
765 }
766 
767 static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
768 {
769 	struct z_erofs_pcluster *pcl = f->pcl;
770 	z_erofs_next_pcluster_t *owned_head = &f->owned_head;
771 
772 	/* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */
773 	if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
774 		    *owned_head) == Z_EROFS_PCLUSTER_NIL) {
775 		*owned_head = &pcl->next;
776 		/* so we can attach this pcluster to our submission chain. */
777 		f->mode = Z_EROFS_PCLUSTER_FOLLOWED;
778 		return;
779 	}
780 
781 	/* type 2, it belongs to an ongoing chain */
782 	f->mode = Z_EROFS_PCLUSTER_INFLIGHT;
783 }
784 
785 static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
786 {
787 	struct erofs_map_blocks *map = &fe->map;
788 	bool ztailpacking = map->m_flags & EROFS_MAP_META;
789 	struct z_erofs_pcluster *pcl;
790 	struct erofs_workgroup *grp;
791 	int err;
792 
793 	if (!(map->m_flags & EROFS_MAP_ENCODED) ||
794 	    (!ztailpacking && !(map->m_pa >> PAGE_SHIFT))) {
795 		DBG_BUGON(1);
796 		return -EFSCORRUPTED;
797 	}
798 
799 	/* no available pcluster, let's allocate one */
800 	pcl = z_erofs_alloc_pcluster(ztailpacking ? 1 :
801 				     map->m_plen >> PAGE_SHIFT);
802 	if (IS_ERR(pcl))
803 		return PTR_ERR(pcl);
804 
805 	spin_lock_init(&pcl->obj.lockref.lock);
806 	pcl->algorithmformat = map->m_algorithmformat;
807 	pcl->length = 0;
808 	pcl->partial = true;
809 
810 	/* new pclusters should be claimed as type 1, primary and followed */
811 	pcl->next = fe->owned_head;
812 	pcl->pageofs_out = map->m_la & ~PAGE_MASK;
813 	fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
814 
815 	/*
816 	 * lock all primary followed works before visible to others
817 	 * and mutex_trylock *never* fails for a new pcluster.
818 	 */
819 	mutex_init(&pcl->lock);
820 	DBG_BUGON(!mutex_trylock(&pcl->lock));
821 
822 	if (ztailpacking) {
823 		pcl->obj.index = 0;	/* which indicates ztailpacking */
824 		pcl->pageofs_in = erofs_blkoff(fe->inode->i_sb, map->m_pa);
825 		pcl->tailpacking_size = map->m_plen;
826 	} else {
827 		pcl->obj.index = map->m_pa >> PAGE_SHIFT;
828 
829 		grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj);
830 		if (IS_ERR(grp)) {
831 			err = PTR_ERR(grp);
832 			goto err_out;
833 		}
834 
835 		if (grp != &pcl->obj) {
836 			fe->pcl = container_of(grp,
837 					struct z_erofs_pcluster, obj);
838 			err = -EEXIST;
839 			goto err_out;
840 		}
841 	}
842 	fe->owned_head = &pcl->next;
843 	fe->pcl = pcl;
844 	return 0;
845 
846 err_out:
847 	mutex_unlock(&pcl->lock);
848 	z_erofs_free_pcluster(pcl);
849 	return err;
850 }
851 
852 static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
853 {
854 	struct erofs_map_blocks *map = &fe->map;
855 	struct super_block *sb = fe->inode->i_sb;
856 	erofs_blk_t blknr = erofs_blknr(sb, map->m_pa);
857 	struct erofs_workgroup *grp = NULL;
858 	int ret;
859 
860 	DBG_BUGON(fe->pcl);
861 
862 	/* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */
863 	DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
864 
865 	if (!(map->m_flags & EROFS_MAP_META)) {
866 		grp = erofs_find_workgroup(sb, blknr);
867 	} else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
868 		DBG_BUGON(1);
869 		return -EFSCORRUPTED;
870 	}
871 
872 	if (grp) {
873 		fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
874 		ret = -EEXIST;
875 	} else {
876 		ret = z_erofs_register_pcluster(fe);
877 	}
878 
879 	if (ret == -EEXIST) {
880 		mutex_lock(&fe->pcl->lock);
881 		z_erofs_try_to_claim_pcluster(fe);
882 	} else if (ret) {
883 		return ret;
884 	}
885 
886 	z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
887 				Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
888 	if (!z_erofs_is_inline_pcluster(fe->pcl)) {
889 		/* bind cache first when cached decompression is preferred */
890 		z_erofs_bind_cache(fe);
891 	} else {
892 		void *mptr;
893 
894 		mptr = erofs_read_metabuf(&map->buf, sb, blknr, EROFS_NO_KMAP);
895 		if (IS_ERR(mptr)) {
896 			ret = PTR_ERR(mptr);
897 			erofs_err(sb, "failed to get inline data %d", ret);
898 			return ret;
899 		}
900 		get_page(map->buf.page);
901 		WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page);
902 		fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
903 	}
904 	/* file-backed inplace I/O pages are traversed in reverse order */
905 	fe->icur = z_erofs_pclusterpages(fe->pcl);
906 	return 0;
907 }
908 
909 /*
910  * keep in mind that no referenced pclusters will be freed
911  * only after a RCU grace period.
912  */
913 static void z_erofs_rcu_callback(struct rcu_head *head)
914 {
915 	z_erofs_free_pcluster(container_of(head,
916 			struct z_erofs_pcluster, rcu));
917 }
918 
919 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
920 {
921 	struct z_erofs_pcluster *const pcl =
922 		container_of(grp, struct z_erofs_pcluster, obj);
923 
924 	call_rcu(&pcl->rcu, z_erofs_rcu_callback);
925 }
926 
927 static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
928 {
929 	struct z_erofs_pcluster *pcl = fe->pcl;
930 
931 	if (!pcl)
932 		return;
933 
934 	z_erofs_bvec_iter_end(&fe->biter);
935 	mutex_unlock(&pcl->lock);
936 
937 	if (fe->candidate_bvpage)
938 		fe->candidate_bvpage = NULL;
939 
940 	/*
941 	 * if all pending pages are added, don't hold its reference
942 	 * any longer if the pcluster isn't hosted by ourselves.
943 	 */
944 	if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE)
945 		erofs_workgroup_put(&pcl->obj);
946 
947 	fe->pcl = NULL;
948 	fe->backmost = false;
949 }
950 
951 static int z_erofs_read_fragment(struct super_block *sb, struct page *page,
952 			unsigned int cur, unsigned int end, erofs_off_t pos)
953 {
954 	struct inode *packed_inode = EROFS_SB(sb)->packed_inode;
955 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
956 	unsigned int cnt;
957 	u8 *src;
958 
959 	if (!packed_inode)
960 		return -EFSCORRUPTED;
961 
962 	buf.inode = packed_inode;
963 	for (; cur < end; cur += cnt, pos += cnt) {
964 		cnt = min_t(unsigned int, end - cur,
965 			    sb->s_blocksize - erofs_blkoff(sb, pos));
966 		src = erofs_bread(&buf, erofs_blknr(sb, pos), EROFS_KMAP);
967 		if (IS_ERR(src)) {
968 			erofs_put_metabuf(&buf);
969 			return PTR_ERR(src);
970 		}
971 		memcpy_to_page(page, cur, src + erofs_blkoff(sb, pos), cnt);
972 	}
973 	erofs_put_metabuf(&buf);
974 	return 0;
975 }
976 
977 static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
978 				struct page *page)
979 {
980 	struct inode *const inode = fe->inode;
981 	struct erofs_map_blocks *const map = &fe->map;
982 	const loff_t offset = page_offset(page);
983 	bool tight = true, exclusive;
984 	unsigned int cur, end, len, split;
985 	int err = 0;
986 
987 	z_erofs_onlinepage_init(page);
988 
989 	split = 0;
990 	end = PAGE_SIZE;
991 repeat:
992 	if (offset + end - 1 < map->m_la ||
993 	    offset + end - 1 >= map->m_la + map->m_llen) {
994 		z_erofs_pcluster_end(fe);
995 		map->m_la = offset + end - 1;
996 		map->m_llen = 0;
997 		err = z_erofs_map_blocks_iter(inode, map, 0);
998 		if (err)
999 			goto out;
1000 	}
1001 
1002 	cur = offset > map->m_la ? 0 : map->m_la - offset;
1003 	/* bump split parts first to avoid several separate cases */
1004 	++split;
1005 
1006 	if (!(map->m_flags & EROFS_MAP_MAPPED)) {
1007 		zero_user_segment(page, cur, end);
1008 		tight = false;
1009 		goto next_part;
1010 	}
1011 
1012 	if (map->m_flags & EROFS_MAP_FRAGMENT) {
1013 		erofs_off_t fpos = offset + cur - map->m_la;
1014 
1015 		len = min_t(unsigned int, map->m_llen - fpos, end - cur);
1016 		err = z_erofs_read_fragment(inode->i_sb, page, cur, cur + len,
1017 				EROFS_I(inode)->z_fragmentoff + fpos);
1018 		if (err)
1019 			goto out;
1020 		tight = false;
1021 		goto next_part;
1022 	}
1023 
1024 	if (!fe->pcl) {
1025 		err = z_erofs_pcluster_begin(fe);
1026 		if (err)
1027 			goto out;
1028 	}
1029 
1030 	/*
1031 	 * Ensure the current partial page belongs to this submit chain rather
1032 	 * than other concurrent submit chains or the noio(bypass) chain since
1033 	 * those chains are handled asynchronously thus the page cannot be used
1034 	 * for inplace I/O or bvpage (should be processed in a strict order.)
1035 	 */
1036 	tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
1037 	exclusive = (!cur && ((split <= 1) || tight));
1038 	if (cur)
1039 		tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED);
1040 
1041 	err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) {
1042 					.page = page,
1043 					.offset = offset - map->m_la,
1044 					.end = end,
1045 				  }), exclusive);
1046 	if (err)
1047 		goto out;
1048 
1049 	z_erofs_onlinepage_split(page);
1050 	if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
1051 		fe->pcl->multibases = true;
1052 	if (fe->pcl->length < offset + end - map->m_la) {
1053 		fe->pcl->length = offset + end - map->m_la;
1054 		fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
1055 	}
1056 	if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
1057 	    !(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
1058 	    fe->pcl->length == map->m_llen)
1059 		fe->pcl->partial = false;
1060 next_part:
1061 	/* shorten the remaining extent to update progress */
1062 	map->m_llen = offset + cur - map->m_la;
1063 	map->m_flags &= ~EROFS_MAP_FULL_MAPPED;
1064 
1065 	end = cur;
1066 	if (end > 0)
1067 		goto repeat;
1068 
1069 out:
1070 	if (err)
1071 		z_erofs_page_mark_eio(page);
1072 	z_erofs_onlinepage_endio(page);
1073 	return err;
1074 }
1075 
1076 static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi,
1077 				       unsigned int readahead_pages)
1078 {
1079 	/* auto: enable for read_folio, disable for readahead */
1080 	if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) &&
1081 	    !readahead_pages)
1082 		return true;
1083 
1084 	if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) &&
1085 	    (readahead_pages <= sbi->opt.max_sync_decompress_pages))
1086 		return true;
1087 
1088 	return false;
1089 }
1090 
1091 static bool z_erofs_page_is_invalidated(struct page *page)
1092 {
1093 	return !page->mapping && !z_erofs_is_shortlived_page(page);
1094 }
1095 
1096 struct z_erofs_decompress_backend {
1097 	struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
1098 	struct super_block *sb;
1099 	struct z_erofs_pcluster *pcl;
1100 
1101 	/* pages with the longest decompressed length for deduplication */
1102 	struct page **decompressed_pages;
1103 	/* pages to keep the compressed data */
1104 	struct page **compressed_pages;
1105 
1106 	struct list_head decompressed_secondary_bvecs;
1107 	struct page **pagepool;
1108 	unsigned int onstack_used, nr_pages;
1109 };
1110 
1111 struct z_erofs_bvec_item {
1112 	struct z_erofs_bvec bvec;
1113 	struct list_head list;
1114 };
1115 
1116 static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
1117 					 struct z_erofs_bvec *bvec)
1118 {
1119 	struct z_erofs_bvec_item *item;
1120 	unsigned int pgnr;
1121 
1122 	if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK) &&
1123 	    (bvec->end == PAGE_SIZE ||
1124 	     bvec->offset + bvec->end == be->pcl->length)) {
1125 		pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
1126 		DBG_BUGON(pgnr >= be->nr_pages);
1127 		if (!be->decompressed_pages[pgnr]) {
1128 			be->decompressed_pages[pgnr] = bvec->page;
1129 			return;
1130 		}
1131 	}
1132 
1133 	/* (cold path) one pcluster is requested multiple times */
1134 	item = kmalloc(sizeof(*item), GFP_KERNEL | __GFP_NOFAIL);
1135 	item->bvec = *bvec;
1136 	list_add(&item->list, &be->decompressed_secondary_bvecs);
1137 }
1138 
1139 static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
1140 				      int err)
1141 {
1142 	unsigned int off0 = be->pcl->pageofs_out;
1143 	struct list_head *p, *n;
1144 
1145 	list_for_each_safe(p, n, &be->decompressed_secondary_bvecs) {
1146 		struct z_erofs_bvec_item *bvi;
1147 		unsigned int end, cur;
1148 		void *dst, *src;
1149 
1150 		bvi = container_of(p, struct z_erofs_bvec_item, list);
1151 		cur = bvi->bvec.offset < 0 ? -bvi->bvec.offset : 0;
1152 		end = min_t(unsigned int, be->pcl->length - bvi->bvec.offset,
1153 			    bvi->bvec.end);
1154 		dst = kmap_local_page(bvi->bvec.page);
1155 		while (cur < end) {
1156 			unsigned int pgnr, scur, len;
1157 
1158 			pgnr = (bvi->bvec.offset + cur + off0) >> PAGE_SHIFT;
1159 			DBG_BUGON(pgnr >= be->nr_pages);
1160 
1161 			scur = bvi->bvec.offset + cur -
1162 					((pgnr << PAGE_SHIFT) - off0);
1163 			len = min_t(unsigned int, end - cur, PAGE_SIZE - scur);
1164 			if (!be->decompressed_pages[pgnr]) {
1165 				err = -EFSCORRUPTED;
1166 				cur += len;
1167 				continue;
1168 			}
1169 			src = kmap_local_page(be->decompressed_pages[pgnr]);
1170 			memcpy(dst + cur, src + scur, len);
1171 			kunmap_local(src);
1172 			cur += len;
1173 		}
1174 		kunmap_local(dst);
1175 		if (err)
1176 			z_erofs_page_mark_eio(bvi->bvec.page);
1177 		z_erofs_onlinepage_endio(bvi->bvec.page);
1178 		list_del(p);
1179 		kfree(bvi);
1180 	}
1181 }
1182 
1183 static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
1184 {
1185 	struct z_erofs_pcluster *pcl = be->pcl;
1186 	struct z_erofs_bvec_iter biter;
1187 	struct page *old_bvpage;
1188 	int i;
1189 
1190 	z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0);
1191 	for (i = 0; i < pcl->vcnt; ++i) {
1192 		struct z_erofs_bvec bvec;
1193 
1194 		z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage);
1195 
1196 		if (old_bvpage)
1197 			z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
1198 
1199 		DBG_BUGON(z_erofs_page_is_invalidated(bvec.page));
1200 		z_erofs_do_decompressed_bvec(be, &bvec);
1201 	}
1202 
1203 	old_bvpage = z_erofs_bvec_iter_end(&biter);
1204 	if (old_bvpage)
1205 		z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
1206 }
1207 
1208 static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
1209 				  bool *overlapped)
1210 {
1211 	struct z_erofs_pcluster *pcl = be->pcl;
1212 	unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
1213 	int i, err = 0;
1214 
1215 	*overlapped = false;
1216 	for (i = 0; i < pclusterpages; ++i) {
1217 		struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i];
1218 		struct page *page = bvec->page;
1219 
1220 		/* compressed pages ought to be present before decompressing */
1221 		if (!page) {
1222 			DBG_BUGON(1);
1223 			continue;
1224 		}
1225 		be->compressed_pages[i] = page;
1226 
1227 		if (z_erofs_is_inline_pcluster(pcl)) {
1228 			if (!PageUptodate(page))
1229 				err = -EIO;
1230 			continue;
1231 		}
1232 
1233 		DBG_BUGON(z_erofs_page_is_invalidated(page));
1234 		if (!z_erofs_is_shortlived_page(page)) {
1235 			if (erofs_page_is_managed(EROFS_SB(be->sb), page)) {
1236 				if (!PageUptodate(page))
1237 					err = -EIO;
1238 				continue;
1239 			}
1240 			z_erofs_do_decompressed_bvec(be, bvec);
1241 			*overlapped = true;
1242 		}
1243 	}
1244 
1245 	if (err)
1246 		return err;
1247 	return 0;
1248 }
1249 
1250 static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
1251 				       int err)
1252 {
1253 	struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
1254 	struct z_erofs_pcluster *pcl = be->pcl;
1255 	unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
1256 	const struct z_erofs_decompressor *decompressor =
1257 				&erofs_decompressors[pcl->algorithmformat];
1258 	unsigned int i, inputsize;
1259 	int err2;
1260 	struct page *page;
1261 	bool overlapped;
1262 
1263 	mutex_lock(&pcl->lock);
1264 	be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT;
1265 
1266 	/* allocate (de)compressed page arrays if cannot be kept on stack */
1267 	be->decompressed_pages = NULL;
1268 	be->compressed_pages = NULL;
1269 	be->onstack_used = 0;
1270 	if (be->nr_pages <= Z_EROFS_ONSTACK_PAGES) {
1271 		be->decompressed_pages = be->onstack_pages;
1272 		be->onstack_used = be->nr_pages;
1273 		memset(be->decompressed_pages, 0,
1274 		       sizeof(struct page *) * be->nr_pages);
1275 	}
1276 
1277 	if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES)
1278 		be->compressed_pages = be->onstack_pages + be->onstack_used;
1279 
1280 	if (!be->decompressed_pages)
1281 		be->decompressed_pages =
1282 			kvcalloc(be->nr_pages, sizeof(struct page *),
1283 				 GFP_KERNEL | __GFP_NOFAIL);
1284 	if (!be->compressed_pages)
1285 		be->compressed_pages =
1286 			kvcalloc(pclusterpages, sizeof(struct page *),
1287 				 GFP_KERNEL | __GFP_NOFAIL);
1288 
1289 	z_erofs_parse_out_bvecs(be);
1290 	err2 = z_erofs_parse_in_bvecs(be, &overlapped);
1291 	if (err2)
1292 		err = err2;
1293 	if (err)
1294 		goto out;
1295 
1296 	if (z_erofs_is_inline_pcluster(pcl))
1297 		inputsize = pcl->tailpacking_size;
1298 	else
1299 		inputsize = pclusterpages * PAGE_SIZE;
1300 
1301 	err = decompressor->decompress(&(struct z_erofs_decompress_req) {
1302 					.sb = be->sb,
1303 					.in = be->compressed_pages,
1304 					.out = be->decompressed_pages,
1305 					.pageofs_in = pcl->pageofs_in,
1306 					.pageofs_out = pcl->pageofs_out,
1307 					.inputsize = inputsize,
1308 					.outputsize = pcl->length,
1309 					.alg = pcl->algorithmformat,
1310 					.inplace_io = overlapped,
1311 					.partial_decoding = pcl->partial,
1312 					.fillgaps = pcl->multibases,
1313 				 }, be->pagepool);
1314 
1315 out:
1316 	/* must handle all compressed pages before actual file pages */
1317 	if (z_erofs_is_inline_pcluster(pcl)) {
1318 		page = pcl->compressed_bvecs[0].page;
1319 		WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL);
1320 		put_page(page);
1321 	} else {
1322 		for (i = 0; i < pclusterpages; ++i) {
1323 			page = pcl->compressed_bvecs[i].page;
1324 
1325 			if (erofs_page_is_managed(sbi, page))
1326 				continue;
1327 
1328 			/* recycle all individual short-lived pages */
1329 			(void)z_erofs_put_shortlivedpage(be->pagepool, page);
1330 			WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
1331 		}
1332 	}
1333 	if (be->compressed_pages < be->onstack_pages ||
1334 	    be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
1335 		kvfree(be->compressed_pages);
1336 	z_erofs_fill_other_copies(be, err);
1337 
1338 	for (i = 0; i < be->nr_pages; ++i) {
1339 		page = be->decompressed_pages[i];
1340 		if (!page)
1341 			continue;
1342 
1343 		DBG_BUGON(z_erofs_page_is_invalidated(page));
1344 
1345 		/* recycle all individual short-lived pages */
1346 		if (z_erofs_put_shortlivedpage(be->pagepool, page))
1347 			continue;
1348 		if (err)
1349 			z_erofs_page_mark_eio(page);
1350 		z_erofs_onlinepage_endio(page);
1351 	}
1352 
1353 	if (be->decompressed_pages != be->onstack_pages)
1354 		kvfree(be->decompressed_pages);
1355 
1356 	pcl->length = 0;
1357 	pcl->partial = true;
1358 	pcl->multibases = false;
1359 	pcl->bvset.nextpage = NULL;
1360 	pcl->vcnt = 0;
1361 
1362 	/* pcluster lock MUST be taken before the following line */
1363 	WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
1364 	mutex_unlock(&pcl->lock);
1365 	return err;
1366 }
1367 
1368 static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
1369 				     struct page **pagepool)
1370 {
1371 	struct z_erofs_decompress_backend be = {
1372 		.sb = io->sb,
1373 		.pagepool = pagepool,
1374 		.decompressed_secondary_bvecs =
1375 			LIST_HEAD_INIT(be.decompressed_secondary_bvecs),
1376 	};
1377 	z_erofs_next_pcluster_t owned = io->head;
1378 
1379 	while (owned != Z_EROFS_PCLUSTER_TAIL) {
1380 		DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
1381 
1382 		be.pcl = container_of(owned, struct z_erofs_pcluster, next);
1383 		owned = READ_ONCE(be.pcl->next);
1384 
1385 		z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0);
1386 		erofs_workgroup_put(&be.pcl->obj);
1387 	}
1388 }
1389 
1390 static void z_erofs_decompressqueue_work(struct work_struct *work)
1391 {
1392 	struct z_erofs_decompressqueue *bgq =
1393 		container_of(work, struct z_erofs_decompressqueue, u.work);
1394 	struct page *pagepool = NULL;
1395 
1396 	DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL);
1397 	z_erofs_decompress_queue(bgq, &pagepool);
1398 	erofs_release_pages(&pagepool);
1399 	kvfree(bgq);
1400 }
1401 
1402 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
1403 static void z_erofs_decompressqueue_kthread_work(struct kthread_work *work)
1404 {
1405 	z_erofs_decompressqueue_work((struct work_struct *)work);
1406 }
1407 #endif
1408 
1409 static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
1410 				       int bios)
1411 {
1412 	struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
1413 
1414 	/* wake up the caller thread for sync decompression */
1415 	if (io->sync) {
1416 		if (!atomic_add_return(bios, &io->pending_bios))
1417 			complete(&io->u.done);
1418 		return;
1419 	}
1420 
1421 	if (atomic_add_return(bios, &io->pending_bios))
1422 		return;
1423 	/* Use (kthread_)work and sync decompression for atomic contexts only */
1424 	if (!in_task() || irqs_disabled() || rcu_read_lock_any_held()) {
1425 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
1426 		struct kthread_worker *worker;
1427 
1428 		rcu_read_lock();
1429 		worker = rcu_dereference(
1430 				z_erofs_pcpu_workers[raw_smp_processor_id()]);
1431 		if (!worker) {
1432 			INIT_WORK(&io->u.work, z_erofs_decompressqueue_work);
1433 			queue_work(z_erofs_workqueue, &io->u.work);
1434 		} else {
1435 			kthread_queue_work(worker, &io->u.kthread_work);
1436 		}
1437 		rcu_read_unlock();
1438 #else
1439 		queue_work(z_erofs_workqueue, &io->u.work);
1440 #endif
1441 		/* enable sync decompression for readahead */
1442 		if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
1443 			sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
1444 		return;
1445 	}
1446 	z_erofs_decompressqueue_work(&io->u.work);
1447 }
1448 
1449 static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
1450 					       unsigned int nr,
1451 					       struct page **pagepool,
1452 					       struct address_space *mc)
1453 {
1454 	const pgoff_t index = pcl->obj.index;
1455 	gfp_t gfp = mapping_gfp_mask(mc);
1456 	bool tocache = false;
1457 
1458 	struct address_space *mapping;
1459 	struct page *oldpage, *page;
1460 	int justfound;
1461 
1462 repeat:
1463 	page = READ_ONCE(pcl->compressed_bvecs[nr].page);
1464 	oldpage = page;
1465 
1466 	if (!page)
1467 		goto out_allocpage;
1468 
1469 	justfound = (unsigned long)page & 1UL;
1470 	page = (struct page *)((unsigned long)page & ~1UL);
1471 
1472 	/*
1473 	 * preallocated cached pages, which is used to avoid direct reclaim
1474 	 * otherwise, it will go inplace I/O path instead.
1475 	 */
1476 	if (page->private == Z_EROFS_PREALLOCATED_PAGE) {
1477 		WRITE_ONCE(pcl->compressed_bvecs[nr].page, page);
1478 		set_page_private(page, 0);
1479 		tocache = true;
1480 		goto out_tocache;
1481 	}
1482 	mapping = READ_ONCE(page->mapping);
1483 
1484 	/*
1485 	 * file-backed online pages in plcuster are all locked steady,
1486 	 * therefore it is impossible for `mapping' to be NULL.
1487 	 */
1488 	if (mapping && mapping != mc)
1489 		/* ought to be unmanaged pages */
1490 		goto out;
1491 
1492 	/* directly return for shortlived page as well */
1493 	if (z_erofs_is_shortlived_page(page))
1494 		goto out;
1495 
1496 	lock_page(page);
1497 
1498 	/* only true if page reclaim goes wrong, should never happen */
1499 	DBG_BUGON(justfound && PagePrivate(page));
1500 
1501 	/* the page is still in manage cache */
1502 	if (page->mapping == mc) {
1503 		WRITE_ONCE(pcl->compressed_bvecs[nr].page, page);
1504 
1505 		if (!PagePrivate(page)) {
1506 			/*
1507 			 * impossible to be !PagePrivate(page) for
1508 			 * the current restriction as well if
1509 			 * the page is already in compressed_bvecs[].
1510 			 */
1511 			DBG_BUGON(!justfound);
1512 
1513 			justfound = 0;
1514 			set_page_private(page, (unsigned long)pcl);
1515 			SetPagePrivate(page);
1516 		}
1517 
1518 		/* no need to submit io if it is already up-to-date */
1519 		if (PageUptodate(page)) {
1520 			unlock_page(page);
1521 			page = NULL;
1522 		}
1523 		goto out;
1524 	}
1525 
1526 	/*
1527 	 * the managed page has been truncated, it's unsafe to
1528 	 * reuse this one, let's allocate a new cache-managed page.
1529 	 */
1530 	DBG_BUGON(page->mapping);
1531 	DBG_BUGON(!justfound);
1532 
1533 	tocache = true;
1534 	unlock_page(page);
1535 	put_page(page);
1536 out_allocpage:
1537 	page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
1538 	if (oldpage != cmpxchg(&pcl->compressed_bvecs[nr].page,
1539 			       oldpage, page)) {
1540 		erofs_pagepool_add(pagepool, page);
1541 		cond_resched();
1542 		goto repeat;
1543 	}
1544 out_tocache:
1545 	if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) {
1546 		/* turn into temporary page if fails (1 ref) */
1547 		set_page_private(page, Z_EROFS_SHORTLIVED_PAGE);
1548 		goto out;
1549 	}
1550 	attach_page_private(page, pcl);
1551 	/* drop a refcount added by allocpage (then we have 2 refs here) */
1552 	put_page(page);
1553 
1554 out:	/* the only exit (for tracing and debugging) */
1555 	return page;
1556 }
1557 
1558 static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb,
1559 			      struct z_erofs_decompressqueue *fgq, bool *fg)
1560 {
1561 	struct z_erofs_decompressqueue *q;
1562 
1563 	if (fg && !*fg) {
1564 		q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN);
1565 		if (!q) {
1566 			*fg = true;
1567 			goto fg_out;
1568 		}
1569 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
1570 		kthread_init_work(&q->u.kthread_work,
1571 				  z_erofs_decompressqueue_kthread_work);
1572 #else
1573 		INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
1574 #endif
1575 	} else {
1576 fg_out:
1577 		q = fgq;
1578 		init_completion(&fgq->u.done);
1579 		atomic_set(&fgq->pending_bios, 0);
1580 		q->eio = false;
1581 		q->sync = true;
1582 	}
1583 	q->sb = sb;
1584 	q->head = Z_EROFS_PCLUSTER_TAIL;
1585 	return q;
1586 }
1587 
1588 /* define decompression jobqueue types */
1589 enum {
1590 	JQ_BYPASS,
1591 	JQ_SUBMIT,
1592 	NR_JOBQUEUES,
1593 };
1594 
1595 static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
1596 				    z_erofs_next_pcluster_t qtail[],
1597 				    z_erofs_next_pcluster_t owned_head)
1598 {
1599 	z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
1600 	z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
1601 
1602 	WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL);
1603 
1604 	WRITE_ONCE(*submit_qtail, owned_head);
1605 	WRITE_ONCE(*bypass_qtail, &pcl->next);
1606 
1607 	qtail[JQ_BYPASS] = &pcl->next;
1608 }
1609 
1610 static void z_erofs_decompressqueue_endio(struct bio *bio)
1611 {
1612 	struct z_erofs_decompressqueue *q = bio->bi_private;
1613 	blk_status_t err = bio->bi_status;
1614 	struct bio_vec *bvec;
1615 	struct bvec_iter_all iter_all;
1616 
1617 	bio_for_each_segment_all(bvec, bio, iter_all) {
1618 		struct page *page = bvec->bv_page;
1619 
1620 		DBG_BUGON(PageUptodate(page));
1621 		DBG_BUGON(z_erofs_page_is_invalidated(page));
1622 
1623 		if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
1624 			if (!err)
1625 				SetPageUptodate(page);
1626 			unlock_page(page);
1627 		}
1628 	}
1629 	if (err)
1630 		q->eio = true;
1631 	z_erofs_decompress_kickoff(q, -1);
1632 	bio_put(bio);
1633 }
1634 
1635 static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
1636 				 struct z_erofs_decompressqueue *fgq,
1637 				 bool *force_fg, bool readahead)
1638 {
1639 	struct super_block *sb = f->inode->i_sb;
1640 	struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
1641 	z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
1642 	struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
1643 	z_erofs_next_pcluster_t owned_head = f->owned_head;
1644 	/* bio is NULL initially, so no need to initialize last_{index,bdev} */
1645 	pgoff_t last_index;
1646 	struct block_device *last_bdev;
1647 	unsigned int nr_bios = 0;
1648 	struct bio *bio = NULL;
1649 	unsigned long pflags;
1650 	int memstall = 0;
1651 
1652 	/*
1653 	 * if managed cache is enabled, bypass jobqueue is needed,
1654 	 * no need to read from device for all pclusters in this queue.
1655 	 */
1656 	q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
1657 	q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, force_fg);
1658 
1659 	qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
1660 	qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
1661 
1662 	/* by default, all need io submission */
1663 	q[JQ_SUBMIT]->head = owned_head;
1664 
1665 	do {
1666 		struct erofs_map_dev mdev;
1667 		struct z_erofs_pcluster *pcl;
1668 		pgoff_t cur, end;
1669 		unsigned int i = 0;
1670 		bool bypass = true;
1671 
1672 		DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
1673 		pcl = container_of(owned_head, struct z_erofs_pcluster, next);
1674 		owned_head = READ_ONCE(pcl->next);
1675 
1676 		if (z_erofs_is_inline_pcluster(pcl)) {
1677 			move_to_bypass_jobqueue(pcl, qtail, owned_head);
1678 			continue;
1679 		}
1680 
1681 		/* no device id here, thus it will always succeed */
1682 		mdev = (struct erofs_map_dev) {
1683 			.m_pa = erofs_pos(sb, pcl->obj.index),
1684 		};
1685 		(void)erofs_map_dev(sb, &mdev);
1686 
1687 		cur = erofs_blknr(sb, mdev.m_pa);
1688 		end = cur + pcl->pclusterpages;
1689 
1690 		do {
1691 			struct page *page;
1692 
1693 			page = pickup_page_for_submission(pcl, i++,
1694 					&f->pagepool, mc);
1695 			if (!page)
1696 				continue;
1697 
1698 			if (bio && (cur != last_index + 1 ||
1699 				    last_bdev != mdev.m_bdev)) {
1700 submit_bio_retry:
1701 				submit_bio(bio);
1702 				if (memstall) {
1703 					psi_memstall_leave(&pflags);
1704 					memstall = 0;
1705 				}
1706 				bio = NULL;
1707 			}
1708 
1709 			if (unlikely(PageWorkingset(page)) && !memstall) {
1710 				psi_memstall_enter(&pflags);
1711 				memstall = 1;
1712 			}
1713 
1714 			if (!bio) {
1715 				bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
1716 						REQ_OP_READ, GFP_NOIO);
1717 				bio->bi_end_io = z_erofs_decompressqueue_endio;
1718 
1719 				last_bdev = mdev.m_bdev;
1720 				bio->bi_iter.bi_sector = (sector_t)cur <<
1721 					(sb->s_blocksize_bits - 9);
1722 				bio->bi_private = q[JQ_SUBMIT];
1723 				if (readahead)
1724 					bio->bi_opf |= REQ_RAHEAD;
1725 				++nr_bios;
1726 			}
1727 
1728 			if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
1729 				goto submit_bio_retry;
1730 
1731 			last_index = cur;
1732 			bypass = false;
1733 		} while (++cur < end);
1734 
1735 		if (!bypass)
1736 			qtail[JQ_SUBMIT] = &pcl->next;
1737 		else
1738 			move_to_bypass_jobqueue(pcl, qtail, owned_head);
1739 	} while (owned_head != Z_EROFS_PCLUSTER_TAIL);
1740 
1741 	if (bio) {
1742 		submit_bio(bio);
1743 		if (memstall)
1744 			psi_memstall_leave(&pflags);
1745 	}
1746 
1747 	/*
1748 	 * although background is preferred, no one is pending for submission.
1749 	 * don't issue decompression but drop it directly instead.
1750 	 */
1751 	if (!*force_fg && !nr_bios) {
1752 		kvfree(q[JQ_SUBMIT]);
1753 		return;
1754 	}
1755 	z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios);
1756 }
1757 
1758 static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
1759 			     bool force_fg, bool ra)
1760 {
1761 	struct z_erofs_decompressqueue io[NR_JOBQUEUES];
1762 
1763 	if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
1764 		return;
1765 	z_erofs_submit_queue(f, io, &force_fg, ra);
1766 
1767 	/* handle bypass queue (no i/o pclusters) immediately */
1768 	z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool);
1769 
1770 	if (!force_fg)
1771 		return;
1772 
1773 	/* wait until all bios are completed */
1774 	wait_for_completion_io(&io[JQ_SUBMIT].u.done);
1775 
1776 	/* handle synchronous decompress queue in the caller context */
1777 	z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool);
1778 }
1779 
1780 /*
1781  * Since partial uptodate is still unimplemented for now, we have to use
1782  * approximate readmore strategies as a start.
1783  */
1784 static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
1785 		struct readahead_control *rac, bool backmost)
1786 {
1787 	struct inode *inode = f->inode;
1788 	struct erofs_map_blocks *map = &f->map;
1789 	erofs_off_t cur, end, headoffset = f->headoffset;
1790 	int err;
1791 
1792 	if (backmost) {
1793 		if (rac)
1794 			end = headoffset + readahead_length(rac) - 1;
1795 		else
1796 			end = headoffset + PAGE_SIZE - 1;
1797 		map->m_la = end;
1798 		err = z_erofs_map_blocks_iter(inode, map,
1799 					      EROFS_GET_BLOCKS_READMORE);
1800 		if (err)
1801 			return;
1802 
1803 		/* expand ra for the trailing edge if readahead */
1804 		if (rac) {
1805 			cur = round_up(map->m_la + map->m_llen, PAGE_SIZE);
1806 			readahead_expand(rac, headoffset, cur - headoffset);
1807 			return;
1808 		}
1809 		end = round_up(end, PAGE_SIZE);
1810 	} else {
1811 		end = round_up(map->m_la, PAGE_SIZE);
1812 
1813 		if (!map->m_llen)
1814 			return;
1815 	}
1816 
1817 	cur = map->m_la + map->m_llen - 1;
1818 	while ((cur >= end) && (cur < i_size_read(inode))) {
1819 		pgoff_t index = cur >> PAGE_SHIFT;
1820 		struct page *page;
1821 
1822 		page = erofs_grab_cache_page_nowait(inode->i_mapping, index);
1823 		if (page) {
1824 			if (PageUptodate(page))
1825 				unlock_page(page);
1826 			else
1827 				(void)z_erofs_do_read_page(f, page);
1828 			put_page(page);
1829 		}
1830 
1831 		if (cur < PAGE_SIZE)
1832 			break;
1833 		cur = (index << PAGE_SHIFT) - 1;
1834 	}
1835 }
1836 
1837 static int z_erofs_read_folio(struct file *file, struct folio *folio)
1838 {
1839 	struct page *page = &folio->page;
1840 	struct inode *const inode = page->mapping->host;
1841 	struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1842 	struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1843 	int err;
1844 
1845 	trace_erofs_readpage(page, false);
1846 	f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
1847 
1848 	z_erofs_pcluster_readmore(&f, NULL, true);
1849 	err = z_erofs_do_read_page(&f, page);
1850 	z_erofs_pcluster_readmore(&f, NULL, false);
1851 	z_erofs_pcluster_end(&f);
1852 
1853 	/* if some compressed cluster ready, need submit them anyway */
1854 	z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, 0), false);
1855 
1856 	if (err && err != -EINTR)
1857 		erofs_err(inode->i_sb, "read error %d @ %lu of nid %llu",
1858 			  err, folio->index, EROFS_I(inode)->nid);
1859 
1860 	erofs_put_metabuf(&f.map.buf);
1861 	erofs_release_pages(&f.pagepool);
1862 	return err;
1863 }
1864 
1865 static void z_erofs_readahead(struct readahead_control *rac)
1866 {
1867 	struct inode *const inode = rac->mapping->host;
1868 	struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1869 	struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1870 	struct page *head = NULL, *page;
1871 	unsigned int nr_pages;
1872 
1873 	f.headoffset = readahead_pos(rac);
1874 
1875 	z_erofs_pcluster_readmore(&f, rac, true);
1876 	nr_pages = readahead_count(rac);
1877 	trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false);
1878 
1879 	while ((page = readahead_page(rac))) {
1880 		set_page_private(page, (unsigned long)head);
1881 		head = page;
1882 	}
1883 
1884 	while (head) {
1885 		struct page *page = head;
1886 		int err;
1887 
1888 		/* traversal in reverse order */
1889 		head = (void *)page_private(page);
1890 
1891 		err = z_erofs_do_read_page(&f, page);
1892 		if (err && err != -EINTR)
1893 			erofs_err(inode->i_sb, "readahead error %d @ %lu of nid %llu",
1894 				  err, page->index, EROFS_I(inode)->nid);
1895 		put_page(page);
1896 	}
1897 	z_erofs_pcluster_readmore(&f, rac, false);
1898 	z_erofs_pcluster_end(&f);
1899 
1900 	z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, nr_pages), true);
1901 	erofs_put_metabuf(&f.map.buf);
1902 	erofs_release_pages(&f.pagepool);
1903 }
1904 
1905 const struct address_space_operations z_erofs_aops = {
1906 	.read_folio = z_erofs_read_folio,
1907 	.readahead = z_erofs_readahead,
1908 };
1909