xref: /openbmc/linux/fs/f2fs/compress.c (revision 2984f26a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs compress support
4  *
5  * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/moduleparam.h>
11 #include <linux/writeback.h>
12 #include <linux/backing-dev.h>
13 #include <linux/lzo.h>
14 #include <linux/lz4.h>
15 #include <linux/zstd.h>
16 #include <linux/pagevec.h>
17 
18 #include "f2fs.h"
19 #include "node.h"
20 #include "segment.h"
21 #include <trace/events/f2fs.h>
22 
23 static struct kmem_cache *cic_entry_slab;
24 static struct kmem_cache *dic_entry_slab;
25 
26 static void *page_array_alloc(struct inode *inode, int nr)
27 {
28 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
29 	unsigned int size = sizeof(struct page *) * nr;
30 
31 	if (likely(size <= sbi->page_array_slab_size))
32 		return f2fs_kmem_cache_alloc(sbi->page_array_slab,
33 					GFP_F2FS_ZERO, false, F2FS_I_SB(inode));
34 	return f2fs_kzalloc(sbi, size, GFP_NOFS);
35 }
36 
37 static void page_array_free(struct inode *inode, void *pages, int nr)
38 {
39 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
40 	unsigned int size = sizeof(struct page *) * nr;
41 
42 	if (!pages)
43 		return;
44 
45 	if (likely(size <= sbi->page_array_slab_size))
46 		kmem_cache_free(sbi->page_array_slab, pages);
47 	else
48 		kfree(pages);
49 }
50 
51 struct f2fs_compress_ops {
52 	int (*init_compress_ctx)(struct compress_ctx *cc);
53 	void (*destroy_compress_ctx)(struct compress_ctx *cc);
54 	int (*compress_pages)(struct compress_ctx *cc);
55 	int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
56 	void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
57 	int (*decompress_pages)(struct decompress_io_ctx *dic);
58 	bool (*is_level_valid)(int level);
59 };
60 
61 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
62 {
63 	return index & (cc->cluster_size - 1);
64 }
65 
66 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
67 {
68 	return index >> cc->log_cluster_size;
69 }
70 
71 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
72 {
73 	return cc->cluster_idx << cc->log_cluster_size;
74 }
75 
76 bool f2fs_is_compressed_page(struct page *page)
77 {
78 	if (!PagePrivate(page))
79 		return false;
80 	if (!page_private(page))
81 		return false;
82 	if (page_private_nonpointer(page))
83 		return false;
84 
85 	f2fs_bug_on(F2FS_M_SB(page->mapping),
86 		*((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
87 	return true;
88 }
89 
90 static void f2fs_set_compressed_page(struct page *page,
91 		struct inode *inode, pgoff_t index, void *data)
92 {
93 	attach_page_private(page, (void *)data);
94 
95 	/* i_crypto_info and iv index */
96 	page->index = index;
97 	page->mapping = inode->i_mapping;
98 }
99 
100 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
101 {
102 	int i;
103 
104 	for (i = 0; i < len; i++) {
105 		if (!cc->rpages[i])
106 			continue;
107 		if (unlock)
108 			unlock_page(cc->rpages[i]);
109 		else
110 			put_page(cc->rpages[i]);
111 	}
112 }
113 
114 static void f2fs_put_rpages(struct compress_ctx *cc)
115 {
116 	f2fs_drop_rpages(cc, cc->cluster_size, false);
117 }
118 
119 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
120 {
121 	f2fs_drop_rpages(cc, len, true);
122 }
123 
124 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
125 		struct writeback_control *wbc, bool redirty, int unlock)
126 {
127 	unsigned int i;
128 
129 	for (i = 0; i < cc->cluster_size; i++) {
130 		if (!cc->rpages[i])
131 			continue;
132 		if (redirty)
133 			redirty_page_for_writepage(wbc, cc->rpages[i]);
134 		f2fs_put_page(cc->rpages[i], unlock);
135 	}
136 }
137 
138 struct page *f2fs_compress_control_page(struct page *page)
139 {
140 	return ((struct compress_io_ctx *)page_private(page))->rpages[0];
141 }
142 
143 int f2fs_init_compress_ctx(struct compress_ctx *cc)
144 {
145 	if (cc->rpages)
146 		return 0;
147 
148 	cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
149 	return cc->rpages ? 0 : -ENOMEM;
150 }
151 
152 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
153 {
154 	page_array_free(cc->inode, cc->rpages, cc->cluster_size);
155 	cc->rpages = NULL;
156 	cc->nr_rpages = 0;
157 	cc->nr_cpages = 0;
158 	cc->valid_nr_cpages = 0;
159 	if (!reuse)
160 		cc->cluster_idx = NULL_CLUSTER;
161 }
162 
163 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
164 {
165 	unsigned int cluster_ofs;
166 
167 	if (!f2fs_cluster_can_merge_page(cc, page->index))
168 		f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
169 
170 	cluster_ofs = offset_in_cluster(cc, page->index);
171 	cc->rpages[cluster_ofs] = page;
172 	cc->nr_rpages++;
173 	cc->cluster_idx = cluster_idx(cc, page->index);
174 }
175 
176 #ifdef CONFIG_F2FS_FS_LZO
177 static int lzo_init_compress_ctx(struct compress_ctx *cc)
178 {
179 	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
180 				LZO1X_MEM_COMPRESS, GFP_NOFS);
181 	if (!cc->private)
182 		return -ENOMEM;
183 
184 	cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
185 	return 0;
186 }
187 
188 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
189 {
190 	kvfree(cc->private);
191 	cc->private = NULL;
192 }
193 
194 static int lzo_compress_pages(struct compress_ctx *cc)
195 {
196 	int ret;
197 
198 	ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
199 					&cc->clen, cc->private);
200 	if (ret != LZO_E_OK) {
201 		printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
202 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
203 		return -EIO;
204 	}
205 	return 0;
206 }
207 
208 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
209 {
210 	int ret;
211 
212 	ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
213 						dic->rbuf, &dic->rlen);
214 	if (ret != LZO_E_OK) {
215 		printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
216 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
217 		return -EIO;
218 	}
219 
220 	if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
221 		printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
222 					"expected:%lu\n", KERN_ERR,
223 					F2FS_I_SB(dic->inode)->sb->s_id,
224 					dic->rlen,
225 					PAGE_SIZE << dic->log_cluster_size);
226 		return -EIO;
227 	}
228 	return 0;
229 }
230 
231 static const struct f2fs_compress_ops f2fs_lzo_ops = {
232 	.init_compress_ctx	= lzo_init_compress_ctx,
233 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
234 	.compress_pages		= lzo_compress_pages,
235 	.decompress_pages	= lzo_decompress_pages,
236 };
237 #endif
238 
239 #ifdef CONFIG_F2FS_FS_LZ4
240 static int lz4_init_compress_ctx(struct compress_ctx *cc)
241 {
242 	unsigned int size = LZ4_MEM_COMPRESS;
243 
244 #ifdef CONFIG_F2FS_FS_LZ4HC
245 	if (F2FS_I(cc->inode)->i_compress_level)
246 		size = LZ4HC_MEM_COMPRESS;
247 #endif
248 
249 	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
250 	if (!cc->private)
251 		return -ENOMEM;
252 
253 	/*
254 	 * we do not change cc->clen to LZ4_compressBound(inputsize) to
255 	 * adapt worst compress case, because lz4 compressor can handle
256 	 * output budget properly.
257 	 */
258 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
259 	return 0;
260 }
261 
262 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
263 {
264 	kvfree(cc->private);
265 	cc->private = NULL;
266 }
267 
268 static int lz4_compress_pages(struct compress_ctx *cc)
269 {
270 	int len = -EINVAL;
271 	unsigned char level = F2FS_I(cc->inode)->i_compress_level;
272 
273 	if (!level)
274 		len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
275 						cc->clen, cc->private);
276 #ifdef CONFIG_F2FS_FS_LZ4HC
277 	else
278 		len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
279 					cc->clen, level, cc->private);
280 #endif
281 	if (len < 0)
282 		return len;
283 	if (!len)
284 		return -EAGAIN;
285 
286 	cc->clen = len;
287 	return 0;
288 }
289 
290 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
291 {
292 	int ret;
293 
294 	ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
295 						dic->clen, dic->rlen);
296 	if (ret < 0) {
297 		printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
298 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
299 		return -EIO;
300 	}
301 
302 	if (ret != PAGE_SIZE << dic->log_cluster_size) {
303 		printk_ratelimited("%sF2FS-fs (%s): lz4 invalid ret:%d, "
304 					"expected:%lu\n", KERN_ERR,
305 					F2FS_I_SB(dic->inode)->sb->s_id, ret,
306 					PAGE_SIZE << dic->log_cluster_size);
307 		return -EIO;
308 	}
309 	return 0;
310 }
311 
312 static bool lz4_is_level_valid(int lvl)
313 {
314 #ifdef CONFIG_F2FS_FS_LZ4HC
315 	return !lvl || (lvl >= LZ4HC_MIN_CLEVEL && lvl <= LZ4HC_MAX_CLEVEL);
316 #else
317 	return lvl == 0;
318 #endif
319 }
320 
321 static const struct f2fs_compress_ops f2fs_lz4_ops = {
322 	.init_compress_ctx	= lz4_init_compress_ctx,
323 	.destroy_compress_ctx	= lz4_destroy_compress_ctx,
324 	.compress_pages		= lz4_compress_pages,
325 	.decompress_pages	= lz4_decompress_pages,
326 	.is_level_valid		= lz4_is_level_valid,
327 };
328 #endif
329 
330 #ifdef CONFIG_F2FS_FS_ZSTD
331 static int zstd_init_compress_ctx(struct compress_ctx *cc)
332 {
333 	zstd_parameters params;
334 	zstd_cstream *stream;
335 	void *workspace;
336 	unsigned int workspace_size;
337 	unsigned char level = F2FS_I(cc->inode)->i_compress_level;
338 
339 	/* Need to remain this for backward compatibility */
340 	if (!level)
341 		level = F2FS_ZSTD_DEFAULT_CLEVEL;
342 
343 	params = zstd_get_params(level, cc->rlen);
344 	workspace_size = zstd_cstream_workspace_bound(&params.cParams);
345 
346 	workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
347 					workspace_size, GFP_NOFS);
348 	if (!workspace)
349 		return -ENOMEM;
350 
351 	stream = zstd_init_cstream(&params, 0, workspace, workspace_size);
352 	if (!stream) {
353 		printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_cstream failed\n",
354 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
355 				__func__);
356 		kvfree(workspace);
357 		return -EIO;
358 	}
359 
360 	cc->private = workspace;
361 	cc->private2 = stream;
362 
363 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
364 	return 0;
365 }
366 
367 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
368 {
369 	kvfree(cc->private);
370 	cc->private = NULL;
371 	cc->private2 = NULL;
372 }
373 
374 static int zstd_compress_pages(struct compress_ctx *cc)
375 {
376 	zstd_cstream *stream = cc->private2;
377 	zstd_in_buffer inbuf;
378 	zstd_out_buffer outbuf;
379 	int src_size = cc->rlen;
380 	int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
381 	int ret;
382 
383 	inbuf.pos = 0;
384 	inbuf.src = cc->rbuf;
385 	inbuf.size = src_size;
386 
387 	outbuf.pos = 0;
388 	outbuf.dst = cc->cbuf->cdata;
389 	outbuf.size = dst_size;
390 
391 	ret = zstd_compress_stream(stream, &outbuf, &inbuf);
392 	if (zstd_is_error(ret)) {
393 		printk_ratelimited("%sF2FS-fs (%s): %s zstd_compress_stream failed, ret: %d\n",
394 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
395 				__func__, zstd_get_error_code(ret));
396 		return -EIO;
397 	}
398 
399 	ret = zstd_end_stream(stream, &outbuf);
400 	if (zstd_is_error(ret)) {
401 		printk_ratelimited("%sF2FS-fs (%s): %s zstd_end_stream returned %d\n",
402 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
403 				__func__, zstd_get_error_code(ret));
404 		return -EIO;
405 	}
406 
407 	/*
408 	 * there is compressed data remained in intermediate buffer due to
409 	 * no more space in cbuf.cdata
410 	 */
411 	if (ret)
412 		return -EAGAIN;
413 
414 	cc->clen = outbuf.pos;
415 	return 0;
416 }
417 
418 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
419 {
420 	zstd_dstream *stream;
421 	void *workspace;
422 	unsigned int workspace_size;
423 	unsigned int max_window_size =
424 			MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
425 
426 	workspace_size = zstd_dstream_workspace_bound(max_window_size);
427 
428 	workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
429 					workspace_size, GFP_NOFS);
430 	if (!workspace)
431 		return -ENOMEM;
432 
433 	stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
434 	if (!stream) {
435 		printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_dstream failed\n",
436 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
437 				__func__);
438 		kvfree(workspace);
439 		return -EIO;
440 	}
441 
442 	dic->private = workspace;
443 	dic->private2 = stream;
444 
445 	return 0;
446 }
447 
448 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
449 {
450 	kvfree(dic->private);
451 	dic->private = NULL;
452 	dic->private2 = NULL;
453 }
454 
455 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
456 {
457 	zstd_dstream *stream = dic->private2;
458 	zstd_in_buffer inbuf;
459 	zstd_out_buffer outbuf;
460 	int ret;
461 
462 	inbuf.pos = 0;
463 	inbuf.src = dic->cbuf->cdata;
464 	inbuf.size = dic->clen;
465 
466 	outbuf.pos = 0;
467 	outbuf.dst = dic->rbuf;
468 	outbuf.size = dic->rlen;
469 
470 	ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
471 	if (zstd_is_error(ret)) {
472 		printk_ratelimited("%sF2FS-fs (%s): %s zstd_decompress_stream failed, ret: %d\n",
473 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
474 				__func__, zstd_get_error_code(ret));
475 		return -EIO;
476 	}
477 
478 	if (dic->rlen != outbuf.pos) {
479 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
480 				"expected:%lu\n", KERN_ERR,
481 				F2FS_I_SB(dic->inode)->sb->s_id,
482 				__func__, dic->rlen,
483 				PAGE_SIZE << dic->log_cluster_size);
484 		return -EIO;
485 	}
486 
487 	return 0;
488 }
489 
490 static bool zstd_is_level_valid(int lvl)
491 {
492 	return lvl >= zstd_min_clevel() && lvl <= zstd_max_clevel();
493 }
494 
495 static const struct f2fs_compress_ops f2fs_zstd_ops = {
496 	.init_compress_ctx	= zstd_init_compress_ctx,
497 	.destroy_compress_ctx	= zstd_destroy_compress_ctx,
498 	.compress_pages		= zstd_compress_pages,
499 	.init_decompress_ctx	= zstd_init_decompress_ctx,
500 	.destroy_decompress_ctx	= zstd_destroy_decompress_ctx,
501 	.decompress_pages	= zstd_decompress_pages,
502 	.is_level_valid		= zstd_is_level_valid,
503 };
504 #endif
505 
506 #ifdef CONFIG_F2FS_FS_LZO
507 #ifdef CONFIG_F2FS_FS_LZORLE
508 static int lzorle_compress_pages(struct compress_ctx *cc)
509 {
510 	int ret;
511 
512 	ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
513 					&cc->clen, cc->private);
514 	if (ret != LZO_E_OK) {
515 		printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
516 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
517 		return -EIO;
518 	}
519 	return 0;
520 }
521 
522 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
523 	.init_compress_ctx	= lzo_init_compress_ctx,
524 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
525 	.compress_pages		= lzorle_compress_pages,
526 	.decompress_pages	= lzo_decompress_pages,
527 };
528 #endif
529 #endif
530 
531 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
532 #ifdef CONFIG_F2FS_FS_LZO
533 	&f2fs_lzo_ops,
534 #else
535 	NULL,
536 #endif
537 #ifdef CONFIG_F2FS_FS_LZ4
538 	&f2fs_lz4_ops,
539 #else
540 	NULL,
541 #endif
542 #ifdef CONFIG_F2FS_FS_ZSTD
543 	&f2fs_zstd_ops,
544 #else
545 	NULL,
546 #endif
547 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
548 	&f2fs_lzorle_ops,
549 #else
550 	NULL,
551 #endif
552 };
553 
554 bool f2fs_is_compress_backend_ready(struct inode *inode)
555 {
556 	if (!f2fs_compressed_file(inode))
557 		return true;
558 	return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
559 }
560 
561 bool f2fs_is_compress_level_valid(int alg, int lvl)
562 {
563 	const struct f2fs_compress_ops *cops = f2fs_cops[alg];
564 
565 	if (cops->is_level_valid)
566 		return cops->is_level_valid(lvl);
567 
568 	return lvl == 0;
569 }
570 
571 static mempool_t *compress_page_pool;
572 static int num_compress_pages = 512;
573 module_param(num_compress_pages, uint, 0444);
574 MODULE_PARM_DESC(num_compress_pages,
575 		"Number of intermediate compress pages to preallocate");
576 
577 int __init f2fs_init_compress_mempool(void)
578 {
579 	compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
580 	return compress_page_pool ? 0 : -ENOMEM;
581 }
582 
583 void f2fs_destroy_compress_mempool(void)
584 {
585 	mempool_destroy(compress_page_pool);
586 }
587 
588 static struct page *f2fs_compress_alloc_page(void)
589 {
590 	struct page *page;
591 
592 	page = mempool_alloc(compress_page_pool, GFP_NOFS);
593 	lock_page(page);
594 
595 	return page;
596 }
597 
598 static void f2fs_compress_free_page(struct page *page)
599 {
600 	if (!page)
601 		return;
602 	detach_page_private(page);
603 	page->mapping = NULL;
604 	unlock_page(page);
605 	mempool_free(page, compress_page_pool);
606 }
607 
608 #define MAX_VMAP_RETRIES	3
609 
610 static void *f2fs_vmap(struct page **pages, unsigned int count)
611 {
612 	int i;
613 	void *buf = NULL;
614 
615 	for (i = 0; i < MAX_VMAP_RETRIES; i++) {
616 		buf = vm_map_ram(pages, count, -1);
617 		if (buf)
618 			break;
619 		vm_unmap_aliases();
620 	}
621 	return buf;
622 }
623 
624 static int f2fs_compress_pages(struct compress_ctx *cc)
625 {
626 	struct f2fs_inode_info *fi = F2FS_I(cc->inode);
627 	const struct f2fs_compress_ops *cops =
628 				f2fs_cops[fi->i_compress_algorithm];
629 	unsigned int max_len, new_nr_cpages;
630 	u32 chksum = 0;
631 	int i, ret;
632 
633 	trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
634 				cc->cluster_size, fi->i_compress_algorithm);
635 
636 	if (cops->init_compress_ctx) {
637 		ret = cops->init_compress_ctx(cc);
638 		if (ret)
639 			goto out;
640 	}
641 
642 	max_len = COMPRESS_HEADER_SIZE + cc->clen;
643 	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
644 	cc->valid_nr_cpages = cc->nr_cpages;
645 
646 	cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
647 	if (!cc->cpages) {
648 		ret = -ENOMEM;
649 		goto destroy_compress_ctx;
650 	}
651 
652 	for (i = 0; i < cc->nr_cpages; i++)
653 		cc->cpages[i] = f2fs_compress_alloc_page();
654 
655 	cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
656 	if (!cc->rbuf) {
657 		ret = -ENOMEM;
658 		goto out_free_cpages;
659 	}
660 
661 	cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
662 	if (!cc->cbuf) {
663 		ret = -ENOMEM;
664 		goto out_vunmap_rbuf;
665 	}
666 
667 	ret = cops->compress_pages(cc);
668 	if (ret)
669 		goto out_vunmap_cbuf;
670 
671 	max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
672 
673 	if (cc->clen > max_len) {
674 		ret = -EAGAIN;
675 		goto out_vunmap_cbuf;
676 	}
677 
678 	cc->cbuf->clen = cpu_to_le32(cc->clen);
679 
680 	if (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))
681 		chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
682 					cc->cbuf->cdata, cc->clen);
683 	cc->cbuf->chksum = cpu_to_le32(chksum);
684 
685 	for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
686 		cc->cbuf->reserved[i] = cpu_to_le32(0);
687 
688 	new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
689 
690 	/* zero out any unused part of the last page */
691 	memset(&cc->cbuf->cdata[cc->clen], 0,
692 			(new_nr_cpages * PAGE_SIZE) -
693 			(cc->clen + COMPRESS_HEADER_SIZE));
694 
695 	vm_unmap_ram(cc->cbuf, cc->nr_cpages);
696 	vm_unmap_ram(cc->rbuf, cc->cluster_size);
697 
698 	for (i = new_nr_cpages; i < cc->nr_cpages; i++) {
699 		f2fs_compress_free_page(cc->cpages[i]);
700 		cc->cpages[i] = NULL;
701 	}
702 
703 	if (cops->destroy_compress_ctx)
704 		cops->destroy_compress_ctx(cc);
705 
706 	cc->valid_nr_cpages = new_nr_cpages;
707 
708 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
709 							cc->clen, ret);
710 	return 0;
711 
712 out_vunmap_cbuf:
713 	vm_unmap_ram(cc->cbuf, cc->nr_cpages);
714 out_vunmap_rbuf:
715 	vm_unmap_ram(cc->rbuf, cc->cluster_size);
716 out_free_cpages:
717 	for (i = 0; i < cc->nr_cpages; i++) {
718 		if (cc->cpages[i])
719 			f2fs_compress_free_page(cc->cpages[i]);
720 	}
721 	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
722 	cc->cpages = NULL;
723 destroy_compress_ctx:
724 	if (cops->destroy_compress_ctx)
725 		cops->destroy_compress_ctx(cc);
726 out:
727 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
728 							cc->clen, ret);
729 	return ret;
730 }
731 
732 static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
733 		bool pre_alloc);
734 static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
735 		bool bypass_destroy_callback, bool pre_alloc);
736 
737 void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
738 {
739 	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
740 	struct f2fs_inode_info *fi = F2FS_I(dic->inode);
741 	const struct f2fs_compress_ops *cops =
742 			f2fs_cops[fi->i_compress_algorithm];
743 	bool bypass_callback = false;
744 	int ret;
745 
746 	trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
747 				dic->cluster_size, fi->i_compress_algorithm);
748 
749 	if (dic->failed) {
750 		ret = -EIO;
751 		goto out_end_io;
752 	}
753 
754 	ret = f2fs_prepare_decomp_mem(dic, false);
755 	if (ret) {
756 		bypass_callback = true;
757 		goto out_release;
758 	}
759 
760 	dic->clen = le32_to_cpu(dic->cbuf->clen);
761 	dic->rlen = PAGE_SIZE << dic->log_cluster_size;
762 
763 	if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
764 		ret = -EFSCORRUPTED;
765 
766 		/* Avoid f2fs_commit_super in irq context */
767 		if (!in_task)
768 			f2fs_handle_error_async(sbi, ERROR_FAIL_DECOMPRESSION);
769 		else
770 			f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
771 		goto out_release;
772 	}
773 
774 	ret = cops->decompress_pages(dic);
775 
776 	if (!ret && (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))) {
777 		u32 provided = le32_to_cpu(dic->cbuf->chksum);
778 		u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
779 
780 		if (provided != calculated) {
781 			if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
782 				set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
783 				printk_ratelimited(
784 					"%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
785 					KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
786 					provided, calculated);
787 			}
788 			set_sbi_flag(sbi, SBI_NEED_FSCK);
789 		}
790 	}
791 
792 out_release:
793 	f2fs_release_decomp_mem(dic, bypass_callback, false);
794 
795 out_end_io:
796 	trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
797 							dic->clen, ret);
798 	f2fs_decompress_end_io(dic, ret, in_task);
799 }
800 
801 /*
802  * This is called when a page of a compressed cluster has been read from disk
803  * (or failed to be read from disk).  It checks whether this page was the last
804  * page being waited on in the cluster, and if so, it decompresses the cluster
805  * (or in the case of a failure, cleans up without actually decompressing).
806  */
807 void f2fs_end_read_compressed_page(struct page *page, bool failed,
808 		block_t blkaddr, bool in_task)
809 {
810 	struct decompress_io_ctx *dic =
811 			(struct decompress_io_ctx *)page_private(page);
812 	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
813 
814 	dec_page_count(sbi, F2FS_RD_DATA);
815 
816 	if (failed)
817 		WRITE_ONCE(dic->failed, true);
818 	else if (blkaddr && in_task)
819 		f2fs_cache_compressed_page(sbi, page,
820 					dic->inode->i_ino, blkaddr);
821 
822 	if (atomic_dec_and_test(&dic->remaining_pages))
823 		f2fs_decompress_cluster(dic, in_task);
824 }
825 
826 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
827 {
828 	if (cc->cluster_idx == NULL_CLUSTER)
829 		return true;
830 	return cc->cluster_idx == cluster_idx(cc, index);
831 }
832 
833 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
834 {
835 	return cc->nr_rpages == 0;
836 }
837 
838 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
839 {
840 	return cc->cluster_size == cc->nr_rpages;
841 }
842 
843 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
844 {
845 	if (f2fs_cluster_is_empty(cc))
846 		return true;
847 	return is_page_in_cluster(cc, index);
848 }
849 
850 bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
851 				int index, int nr_pages, bool uptodate)
852 {
853 	unsigned long pgidx = pages[index]->index;
854 	int i = uptodate ? 0 : 1;
855 
856 	/*
857 	 * when uptodate set to true, try to check all pages in cluster is
858 	 * uptodate or not.
859 	 */
860 	if (uptodate && (pgidx % cc->cluster_size))
861 		return false;
862 
863 	if (nr_pages - index < cc->cluster_size)
864 		return false;
865 
866 	for (; i < cc->cluster_size; i++) {
867 		if (pages[index + i]->index != pgidx + i)
868 			return false;
869 		if (uptodate && !PageUptodate(pages[index + i]))
870 			return false;
871 	}
872 
873 	return true;
874 }
875 
876 static bool cluster_has_invalid_data(struct compress_ctx *cc)
877 {
878 	loff_t i_size = i_size_read(cc->inode);
879 	unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
880 	int i;
881 
882 	for (i = 0; i < cc->cluster_size; i++) {
883 		struct page *page = cc->rpages[i];
884 
885 		f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
886 
887 		/* beyond EOF */
888 		if (page->index >= nr_pages)
889 			return true;
890 	}
891 	return false;
892 }
893 
894 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
895 {
896 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
897 	unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
898 	bool compressed = dn->data_blkaddr == COMPRESS_ADDR;
899 	int cluster_end = 0;
900 	int i;
901 	char *reason = "";
902 
903 	if (!compressed)
904 		return false;
905 
906 	/* [..., COMPR_ADDR, ...] */
907 	if (dn->ofs_in_node % cluster_size) {
908 		reason = "[*|C|*|*]";
909 		goto out;
910 	}
911 
912 	for (i = 1; i < cluster_size; i++) {
913 		block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
914 							dn->ofs_in_node + i);
915 
916 		/* [COMPR_ADDR, ..., COMPR_ADDR] */
917 		if (blkaddr == COMPRESS_ADDR) {
918 			reason = "[C|*|C|*]";
919 			goto out;
920 		}
921 		if (!__is_valid_data_blkaddr(blkaddr)) {
922 			if (!cluster_end)
923 				cluster_end = i;
924 			continue;
925 		}
926 		/* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */
927 		if (cluster_end) {
928 			reason = "[C|N|N|V]";
929 			goto out;
930 		}
931 	}
932 	return false;
933 out:
934 	f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s",
935 			dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason);
936 	set_sbi_flag(sbi, SBI_NEED_FSCK);
937 	return true;
938 }
939 
940 static int __f2fs_cluster_blocks(struct inode *inode,
941 				unsigned int cluster_idx, bool compr)
942 {
943 	struct dnode_of_data dn;
944 	unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
945 	unsigned int start_idx = cluster_idx <<
946 				F2FS_I(inode)->i_log_cluster_size;
947 	int ret;
948 
949 	set_new_dnode(&dn, inode, NULL, NULL, 0);
950 	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
951 	if (ret) {
952 		if (ret == -ENOENT)
953 			ret = 0;
954 		goto fail;
955 	}
956 
957 	if (f2fs_sanity_check_cluster(&dn)) {
958 		ret = -EFSCORRUPTED;
959 		f2fs_handle_error(F2FS_I_SB(inode), ERROR_CORRUPTED_CLUSTER);
960 		goto fail;
961 	}
962 
963 	if (dn.data_blkaddr == COMPRESS_ADDR) {
964 		int i;
965 
966 		ret = 1;
967 		for (i = 1; i < cluster_size; i++) {
968 			block_t blkaddr;
969 
970 			blkaddr = data_blkaddr(dn.inode,
971 					dn.node_page, dn.ofs_in_node + i);
972 			if (compr) {
973 				if (__is_valid_data_blkaddr(blkaddr))
974 					ret++;
975 			} else {
976 				if (blkaddr != NULL_ADDR)
977 					ret++;
978 			}
979 		}
980 
981 		f2fs_bug_on(F2FS_I_SB(inode),
982 			!compr && ret != cluster_size &&
983 			!is_inode_flag_set(inode, FI_COMPRESS_RELEASED));
984 	}
985 fail:
986 	f2fs_put_dnode(&dn);
987 	return ret;
988 }
989 
990 /* return # of compressed blocks in compressed cluster */
991 static int f2fs_compressed_blocks(struct compress_ctx *cc)
992 {
993 	return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
994 }
995 
996 /* return # of valid blocks in compressed cluster */
997 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
998 {
999 	return __f2fs_cluster_blocks(inode,
1000 		index >> F2FS_I(inode)->i_log_cluster_size,
1001 		false);
1002 }
1003 
1004 static bool cluster_may_compress(struct compress_ctx *cc)
1005 {
1006 	if (!f2fs_need_compress_data(cc->inode))
1007 		return false;
1008 	if (f2fs_is_atomic_file(cc->inode))
1009 		return false;
1010 	if (!f2fs_cluster_is_full(cc))
1011 		return false;
1012 	if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
1013 		return false;
1014 	return !cluster_has_invalid_data(cc);
1015 }
1016 
1017 static void set_cluster_writeback(struct compress_ctx *cc)
1018 {
1019 	int i;
1020 
1021 	for (i = 0; i < cc->cluster_size; i++) {
1022 		if (cc->rpages[i])
1023 			set_page_writeback(cc->rpages[i]);
1024 	}
1025 }
1026 
1027 static void set_cluster_dirty(struct compress_ctx *cc)
1028 {
1029 	int i;
1030 
1031 	for (i = 0; i < cc->cluster_size; i++)
1032 		if (cc->rpages[i]) {
1033 			set_page_dirty(cc->rpages[i]);
1034 			set_page_private_gcing(cc->rpages[i]);
1035 		}
1036 }
1037 
1038 static int prepare_compress_overwrite(struct compress_ctx *cc,
1039 		struct page **pagep, pgoff_t index, void **fsdata)
1040 {
1041 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1042 	struct address_space *mapping = cc->inode->i_mapping;
1043 	struct page *page;
1044 	sector_t last_block_in_bio;
1045 	fgf_t fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
1046 	pgoff_t start_idx = start_idx_of_cluster(cc);
1047 	int i, ret;
1048 
1049 retry:
1050 	ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
1051 	if (ret <= 0)
1052 		return ret;
1053 
1054 	ret = f2fs_init_compress_ctx(cc);
1055 	if (ret)
1056 		return ret;
1057 
1058 	/* keep page reference to avoid page reclaim */
1059 	for (i = 0; i < cc->cluster_size; i++) {
1060 		page = f2fs_pagecache_get_page(mapping, start_idx + i,
1061 							fgp_flag, GFP_NOFS);
1062 		if (!page) {
1063 			ret = -ENOMEM;
1064 			goto unlock_pages;
1065 		}
1066 
1067 		if (PageUptodate(page))
1068 			f2fs_put_page(page, 1);
1069 		else
1070 			f2fs_compress_ctx_add_page(cc, page);
1071 	}
1072 
1073 	if (!f2fs_cluster_is_empty(cc)) {
1074 		struct bio *bio = NULL;
1075 
1076 		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
1077 					&last_block_in_bio, false, true);
1078 		f2fs_put_rpages(cc);
1079 		f2fs_destroy_compress_ctx(cc, true);
1080 		if (ret)
1081 			goto out;
1082 		if (bio)
1083 			f2fs_submit_read_bio(sbi, bio, DATA);
1084 
1085 		ret = f2fs_init_compress_ctx(cc);
1086 		if (ret)
1087 			goto out;
1088 	}
1089 
1090 	for (i = 0; i < cc->cluster_size; i++) {
1091 		f2fs_bug_on(sbi, cc->rpages[i]);
1092 
1093 		page = find_lock_page(mapping, start_idx + i);
1094 		if (!page) {
1095 			/* page can be truncated */
1096 			goto release_and_retry;
1097 		}
1098 
1099 		f2fs_wait_on_page_writeback(page, DATA, true, true);
1100 		f2fs_compress_ctx_add_page(cc, page);
1101 
1102 		if (!PageUptodate(page)) {
1103 release_and_retry:
1104 			f2fs_put_rpages(cc);
1105 			f2fs_unlock_rpages(cc, i + 1);
1106 			f2fs_destroy_compress_ctx(cc, true);
1107 			goto retry;
1108 		}
1109 	}
1110 
1111 	if (likely(!ret)) {
1112 		*fsdata = cc->rpages;
1113 		*pagep = cc->rpages[offset_in_cluster(cc, index)];
1114 		return cc->cluster_size;
1115 	}
1116 
1117 unlock_pages:
1118 	f2fs_put_rpages(cc);
1119 	f2fs_unlock_rpages(cc, i);
1120 	f2fs_destroy_compress_ctx(cc, true);
1121 out:
1122 	return ret;
1123 }
1124 
1125 int f2fs_prepare_compress_overwrite(struct inode *inode,
1126 		struct page **pagep, pgoff_t index, void **fsdata)
1127 {
1128 	struct compress_ctx cc = {
1129 		.inode = inode,
1130 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1131 		.cluster_size = F2FS_I(inode)->i_cluster_size,
1132 		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1133 		.rpages = NULL,
1134 		.nr_rpages = 0,
1135 	};
1136 
1137 	return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1138 }
1139 
1140 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1141 					pgoff_t index, unsigned copied)
1142 
1143 {
1144 	struct compress_ctx cc = {
1145 		.inode = inode,
1146 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1147 		.cluster_size = F2FS_I(inode)->i_cluster_size,
1148 		.rpages = fsdata,
1149 	};
1150 	bool first_index = (index == cc.rpages[0]->index);
1151 
1152 	if (copied)
1153 		set_cluster_dirty(&cc);
1154 
1155 	f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1156 	f2fs_destroy_compress_ctx(&cc, false);
1157 
1158 	return first_index;
1159 }
1160 
1161 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1162 {
1163 	void *fsdata = NULL;
1164 	struct page *pagep;
1165 	int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1166 	pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1167 							log_cluster_size;
1168 	int err;
1169 
1170 	err = f2fs_is_compressed_cluster(inode, start_idx);
1171 	if (err < 0)
1172 		return err;
1173 
1174 	/* truncate normal cluster */
1175 	if (!err)
1176 		return f2fs_do_truncate_blocks(inode, from, lock);
1177 
1178 	/* truncate compressed cluster */
1179 	err = f2fs_prepare_compress_overwrite(inode, &pagep,
1180 						start_idx, &fsdata);
1181 
1182 	/* should not be a normal cluster */
1183 	f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1184 
1185 	if (err <= 0)
1186 		return err;
1187 
1188 	if (err > 0) {
1189 		struct page **rpages = fsdata;
1190 		int cluster_size = F2FS_I(inode)->i_cluster_size;
1191 		int i;
1192 
1193 		for (i = cluster_size - 1; i >= 0; i--) {
1194 			loff_t start = rpages[i]->index << PAGE_SHIFT;
1195 
1196 			if (from <= start) {
1197 				zero_user_segment(rpages[i], 0, PAGE_SIZE);
1198 			} else {
1199 				zero_user_segment(rpages[i], from - start,
1200 								PAGE_SIZE);
1201 				break;
1202 			}
1203 		}
1204 
1205 		f2fs_compress_write_end(inode, fsdata, start_idx, true);
1206 	}
1207 	return 0;
1208 }
1209 
1210 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1211 					int *submitted,
1212 					struct writeback_control *wbc,
1213 					enum iostat_type io_type)
1214 {
1215 	struct inode *inode = cc->inode;
1216 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1217 	struct f2fs_inode_info *fi = F2FS_I(inode);
1218 	struct f2fs_io_info fio = {
1219 		.sbi = sbi,
1220 		.ino = cc->inode->i_ino,
1221 		.type = DATA,
1222 		.op = REQ_OP_WRITE,
1223 		.op_flags = wbc_to_write_flags(wbc),
1224 		.old_blkaddr = NEW_ADDR,
1225 		.page = NULL,
1226 		.encrypted_page = NULL,
1227 		.compressed_page = NULL,
1228 		.submitted = 0,
1229 		.io_type = io_type,
1230 		.io_wbc = wbc,
1231 		.encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ?
1232 									1 : 0,
1233 	};
1234 	struct dnode_of_data dn;
1235 	struct node_info ni;
1236 	struct compress_io_ctx *cic;
1237 	pgoff_t start_idx = start_idx_of_cluster(cc);
1238 	unsigned int last_index = cc->cluster_size - 1;
1239 	loff_t psize;
1240 	int i, err;
1241 	bool quota_inode = IS_NOQUOTA(inode);
1242 
1243 	/* we should bypass data pages to proceed the kworker jobs */
1244 	if (unlikely(f2fs_cp_error(sbi))) {
1245 		mapping_set_error(cc->rpages[0]->mapping, -EIO);
1246 		goto out_free;
1247 	}
1248 
1249 	if (quota_inode) {
1250 		/*
1251 		 * We need to wait for node_write to avoid block allocation during
1252 		 * checkpoint. This can only happen to quota writes which can cause
1253 		 * the below discard race condition.
1254 		 */
1255 		f2fs_down_read(&sbi->node_write);
1256 	} else if (!f2fs_trylock_op(sbi)) {
1257 		goto out_free;
1258 	}
1259 
1260 	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1261 
1262 	err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1263 	if (err)
1264 		goto out_unlock_op;
1265 
1266 	for (i = 0; i < cc->cluster_size; i++) {
1267 		if (data_blkaddr(dn.inode, dn.node_page,
1268 					dn.ofs_in_node + i) == NULL_ADDR)
1269 			goto out_put_dnode;
1270 	}
1271 
1272 	psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1273 
1274 	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1275 	if (err)
1276 		goto out_put_dnode;
1277 
1278 	fio.version = ni.version;
1279 
1280 	cic = f2fs_kmem_cache_alloc(cic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1281 	if (!cic)
1282 		goto out_put_dnode;
1283 
1284 	cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1285 	cic->inode = inode;
1286 	atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
1287 	cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1288 	if (!cic->rpages)
1289 		goto out_put_cic;
1290 
1291 	cic->nr_rpages = cc->cluster_size;
1292 
1293 	for (i = 0; i < cc->valid_nr_cpages; i++) {
1294 		f2fs_set_compressed_page(cc->cpages[i], inode,
1295 					cc->rpages[i + 1]->index, cic);
1296 		fio.compressed_page = cc->cpages[i];
1297 
1298 		fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1299 						dn.ofs_in_node + i + 1);
1300 
1301 		/* wait for GCed page writeback via META_MAPPING */
1302 		f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1303 
1304 		if (fio.encrypted) {
1305 			fio.page = cc->rpages[i + 1];
1306 			err = f2fs_encrypt_one_page(&fio);
1307 			if (err)
1308 				goto out_destroy_crypt;
1309 			cc->cpages[i] = fio.encrypted_page;
1310 		}
1311 	}
1312 
1313 	set_cluster_writeback(cc);
1314 
1315 	for (i = 0; i < cc->cluster_size; i++)
1316 		cic->rpages[i] = cc->rpages[i];
1317 
1318 	for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1319 		block_t blkaddr;
1320 
1321 		blkaddr = f2fs_data_blkaddr(&dn);
1322 		fio.page = cc->rpages[i];
1323 		fio.old_blkaddr = blkaddr;
1324 
1325 		/* cluster header */
1326 		if (i == 0) {
1327 			if (blkaddr == COMPRESS_ADDR)
1328 				fio.compr_blocks++;
1329 			if (__is_valid_data_blkaddr(blkaddr))
1330 				f2fs_invalidate_blocks(sbi, blkaddr);
1331 			f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1332 			goto unlock_continue;
1333 		}
1334 
1335 		if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1336 			fio.compr_blocks++;
1337 
1338 		if (i > cc->valid_nr_cpages) {
1339 			if (__is_valid_data_blkaddr(blkaddr)) {
1340 				f2fs_invalidate_blocks(sbi, blkaddr);
1341 				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1342 			}
1343 			goto unlock_continue;
1344 		}
1345 
1346 		f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1347 
1348 		if (fio.encrypted)
1349 			fio.encrypted_page = cc->cpages[i - 1];
1350 		else
1351 			fio.compressed_page = cc->cpages[i - 1];
1352 
1353 		cc->cpages[i - 1] = NULL;
1354 		f2fs_outplace_write_data(&dn, &fio);
1355 		(*submitted)++;
1356 unlock_continue:
1357 		inode_dec_dirty_pages(cc->inode);
1358 		unlock_page(fio.page);
1359 	}
1360 
1361 	if (fio.compr_blocks)
1362 		f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1363 	f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
1364 	add_compr_block_stat(inode, cc->valid_nr_cpages);
1365 
1366 	set_inode_flag(cc->inode, FI_APPEND_WRITE);
1367 	if (cc->cluster_idx == 0)
1368 		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1369 
1370 	f2fs_put_dnode(&dn);
1371 	if (quota_inode)
1372 		f2fs_up_read(&sbi->node_write);
1373 	else
1374 		f2fs_unlock_op(sbi);
1375 
1376 	spin_lock(&fi->i_size_lock);
1377 	if (fi->last_disk_size < psize)
1378 		fi->last_disk_size = psize;
1379 	spin_unlock(&fi->i_size_lock);
1380 
1381 	f2fs_put_rpages(cc);
1382 	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1383 	cc->cpages = NULL;
1384 	f2fs_destroy_compress_ctx(cc, false);
1385 	return 0;
1386 
1387 out_destroy_crypt:
1388 	page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1389 
1390 	for (--i; i >= 0; i--)
1391 		fscrypt_finalize_bounce_page(&cc->cpages[i]);
1392 out_put_cic:
1393 	kmem_cache_free(cic_entry_slab, cic);
1394 out_put_dnode:
1395 	f2fs_put_dnode(&dn);
1396 out_unlock_op:
1397 	if (quota_inode)
1398 		f2fs_up_read(&sbi->node_write);
1399 	else
1400 		f2fs_unlock_op(sbi);
1401 out_free:
1402 	for (i = 0; i < cc->valid_nr_cpages; i++) {
1403 		f2fs_compress_free_page(cc->cpages[i]);
1404 		cc->cpages[i] = NULL;
1405 	}
1406 	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1407 	cc->cpages = NULL;
1408 	return -EAGAIN;
1409 }
1410 
1411 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1412 {
1413 	struct f2fs_sb_info *sbi = bio->bi_private;
1414 	struct compress_io_ctx *cic =
1415 			(struct compress_io_ctx *)page_private(page);
1416 	int i;
1417 
1418 	if (unlikely(bio->bi_status))
1419 		mapping_set_error(cic->inode->i_mapping, -EIO);
1420 
1421 	f2fs_compress_free_page(page);
1422 
1423 	dec_page_count(sbi, F2FS_WB_DATA);
1424 
1425 	if (atomic_dec_return(&cic->pending_pages))
1426 		return;
1427 
1428 	for (i = 0; i < cic->nr_rpages; i++) {
1429 		WARN_ON(!cic->rpages[i]);
1430 		clear_page_private_gcing(cic->rpages[i]);
1431 		end_page_writeback(cic->rpages[i]);
1432 	}
1433 
1434 	page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
1435 	kmem_cache_free(cic_entry_slab, cic);
1436 }
1437 
1438 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1439 					int *submitted,
1440 					struct writeback_control *wbc,
1441 					enum iostat_type io_type)
1442 {
1443 	struct address_space *mapping = cc->inode->i_mapping;
1444 	int _submitted, compr_blocks, ret, i;
1445 
1446 	compr_blocks = f2fs_compressed_blocks(cc);
1447 
1448 	for (i = 0; i < cc->cluster_size; i++) {
1449 		if (!cc->rpages[i])
1450 			continue;
1451 
1452 		redirty_page_for_writepage(wbc, cc->rpages[i]);
1453 		unlock_page(cc->rpages[i]);
1454 	}
1455 
1456 	if (compr_blocks < 0)
1457 		return compr_blocks;
1458 
1459 	for (i = 0; i < cc->cluster_size; i++) {
1460 		if (!cc->rpages[i])
1461 			continue;
1462 retry_write:
1463 		lock_page(cc->rpages[i]);
1464 
1465 		if (cc->rpages[i]->mapping != mapping) {
1466 continue_unlock:
1467 			unlock_page(cc->rpages[i]);
1468 			continue;
1469 		}
1470 
1471 		if (!PageDirty(cc->rpages[i]))
1472 			goto continue_unlock;
1473 
1474 		if (PageWriteback(cc->rpages[i])) {
1475 			if (wbc->sync_mode == WB_SYNC_NONE)
1476 				goto continue_unlock;
1477 			f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true);
1478 		}
1479 
1480 		if (!clear_page_dirty_for_io(cc->rpages[i]))
1481 			goto continue_unlock;
1482 
1483 		ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1484 						NULL, NULL, wbc, io_type,
1485 						compr_blocks, false);
1486 		if (ret) {
1487 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
1488 				unlock_page(cc->rpages[i]);
1489 				ret = 0;
1490 			} else if (ret == -EAGAIN) {
1491 				/*
1492 				 * for quota file, just redirty left pages to
1493 				 * avoid deadlock caused by cluster update race
1494 				 * from foreground operation.
1495 				 */
1496 				if (IS_NOQUOTA(cc->inode))
1497 					return 0;
1498 				ret = 0;
1499 				f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
1500 				goto retry_write;
1501 			}
1502 			return ret;
1503 		}
1504 
1505 		*submitted += _submitted;
1506 	}
1507 
1508 	f2fs_balance_fs(F2FS_M_SB(mapping), true);
1509 
1510 	return 0;
1511 }
1512 
1513 int f2fs_write_multi_pages(struct compress_ctx *cc,
1514 					int *submitted,
1515 					struct writeback_control *wbc,
1516 					enum iostat_type io_type)
1517 {
1518 	int err;
1519 
1520 	*submitted = 0;
1521 	if (cluster_may_compress(cc)) {
1522 		err = f2fs_compress_pages(cc);
1523 		if (err == -EAGAIN) {
1524 			add_compr_block_stat(cc->inode, cc->cluster_size);
1525 			goto write;
1526 		} else if (err) {
1527 			f2fs_put_rpages_wbc(cc, wbc, true, 1);
1528 			goto destroy_out;
1529 		}
1530 
1531 		err = f2fs_write_compressed_pages(cc, submitted,
1532 							wbc, io_type);
1533 		if (!err)
1534 			return 0;
1535 		f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1536 	}
1537 write:
1538 	f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1539 
1540 	err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1541 	f2fs_put_rpages_wbc(cc, wbc, false, 0);
1542 destroy_out:
1543 	f2fs_destroy_compress_ctx(cc, false);
1544 	return err;
1545 }
1546 
1547 static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
1548 		bool pre_alloc)
1549 {
1550 	return pre_alloc ^ f2fs_low_mem_mode(sbi);
1551 }
1552 
1553 static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
1554 		bool pre_alloc)
1555 {
1556 	const struct f2fs_compress_ops *cops =
1557 		f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1558 	int i;
1559 
1560 	if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1561 		return 0;
1562 
1563 	dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
1564 	if (!dic->tpages)
1565 		return -ENOMEM;
1566 
1567 	for (i = 0; i < dic->cluster_size; i++) {
1568 		if (dic->rpages[i]) {
1569 			dic->tpages[i] = dic->rpages[i];
1570 			continue;
1571 		}
1572 
1573 		dic->tpages[i] = f2fs_compress_alloc_page();
1574 	}
1575 
1576 	dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
1577 	if (!dic->rbuf)
1578 		return -ENOMEM;
1579 
1580 	dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
1581 	if (!dic->cbuf)
1582 		return -ENOMEM;
1583 
1584 	if (cops->init_decompress_ctx)
1585 		return cops->init_decompress_ctx(dic);
1586 
1587 	return 0;
1588 }
1589 
1590 static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
1591 		bool bypass_destroy_callback, bool pre_alloc)
1592 {
1593 	const struct f2fs_compress_ops *cops =
1594 		f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1595 
1596 	if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1597 		return;
1598 
1599 	if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
1600 		cops->destroy_decompress_ctx(dic);
1601 
1602 	if (dic->cbuf)
1603 		vm_unmap_ram(dic->cbuf, dic->nr_cpages);
1604 
1605 	if (dic->rbuf)
1606 		vm_unmap_ram(dic->rbuf, dic->cluster_size);
1607 }
1608 
1609 static void f2fs_free_dic(struct decompress_io_ctx *dic,
1610 		bool bypass_destroy_callback);
1611 
1612 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1613 {
1614 	struct decompress_io_ctx *dic;
1615 	pgoff_t start_idx = start_idx_of_cluster(cc);
1616 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1617 	int i, ret;
1618 
1619 	dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1620 	if (!dic)
1621 		return ERR_PTR(-ENOMEM);
1622 
1623 	dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1624 	if (!dic->rpages) {
1625 		kmem_cache_free(dic_entry_slab, dic);
1626 		return ERR_PTR(-ENOMEM);
1627 	}
1628 
1629 	dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1630 	dic->inode = cc->inode;
1631 	atomic_set(&dic->remaining_pages, cc->nr_cpages);
1632 	dic->cluster_idx = cc->cluster_idx;
1633 	dic->cluster_size = cc->cluster_size;
1634 	dic->log_cluster_size = cc->log_cluster_size;
1635 	dic->nr_cpages = cc->nr_cpages;
1636 	refcount_set(&dic->refcnt, 1);
1637 	dic->failed = false;
1638 	dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
1639 
1640 	for (i = 0; i < dic->cluster_size; i++)
1641 		dic->rpages[i] = cc->rpages[i];
1642 	dic->nr_rpages = cc->cluster_size;
1643 
1644 	dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1645 	if (!dic->cpages) {
1646 		ret = -ENOMEM;
1647 		goto out_free;
1648 	}
1649 
1650 	for (i = 0; i < dic->nr_cpages; i++) {
1651 		struct page *page;
1652 
1653 		page = f2fs_compress_alloc_page();
1654 		f2fs_set_compressed_page(page, cc->inode,
1655 					start_idx + i + 1, dic);
1656 		dic->cpages[i] = page;
1657 	}
1658 
1659 	ret = f2fs_prepare_decomp_mem(dic, true);
1660 	if (ret)
1661 		goto out_free;
1662 
1663 	return dic;
1664 
1665 out_free:
1666 	f2fs_free_dic(dic, true);
1667 	return ERR_PTR(ret);
1668 }
1669 
1670 static void f2fs_free_dic(struct decompress_io_ctx *dic,
1671 		bool bypass_destroy_callback)
1672 {
1673 	int i;
1674 
1675 	f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);
1676 
1677 	if (dic->tpages) {
1678 		for (i = 0; i < dic->cluster_size; i++) {
1679 			if (dic->rpages[i])
1680 				continue;
1681 			if (!dic->tpages[i])
1682 				continue;
1683 			f2fs_compress_free_page(dic->tpages[i]);
1684 		}
1685 		page_array_free(dic->inode, dic->tpages, dic->cluster_size);
1686 	}
1687 
1688 	if (dic->cpages) {
1689 		for (i = 0; i < dic->nr_cpages; i++) {
1690 			if (!dic->cpages[i])
1691 				continue;
1692 			f2fs_compress_free_page(dic->cpages[i]);
1693 		}
1694 		page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
1695 	}
1696 
1697 	page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
1698 	kmem_cache_free(dic_entry_slab, dic);
1699 }
1700 
1701 static void f2fs_late_free_dic(struct work_struct *work)
1702 {
1703 	struct decompress_io_ctx *dic =
1704 		container_of(work, struct decompress_io_ctx, free_work);
1705 
1706 	f2fs_free_dic(dic, false);
1707 }
1708 
1709 static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
1710 {
1711 	if (refcount_dec_and_test(&dic->refcnt)) {
1712 		if (in_task) {
1713 			f2fs_free_dic(dic, false);
1714 		} else {
1715 			INIT_WORK(&dic->free_work, f2fs_late_free_dic);
1716 			queue_work(F2FS_I_SB(dic->inode)->post_read_wq,
1717 					&dic->free_work);
1718 		}
1719 	}
1720 }
1721 
1722 static void f2fs_verify_cluster(struct work_struct *work)
1723 {
1724 	struct decompress_io_ctx *dic =
1725 		container_of(work, struct decompress_io_ctx, verity_work);
1726 	int i;
1727 
1728 	/* Verify, update, and unlock the decompressed pages. */
1729 	for (i = 0; i < dic->cluster_size; i++) {
1730 		struct page *rpage = dic->rpages[i];
1731 
1732 		if (!rpage)
1733 			continue;
1734 
1735 		if (fsverity_verify_page(rpage))
1736 			SetPageUptodate(rpage);
1737 		else
1738 			ClearPageUptodate(rpage);
1739 		unlock_page(rpage);
1740 	}
1741 
1742 	f2fs_put_dic(dic, true);
1743 }
1744 
1745 /*
1746  * This is called when a compressed cluster has been decompressed
1747  * (or failed to be read and/or decompressed).
1748  */
1749 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
1750 				bool in_task)
1751 {
1752 	int i;
1753 
1754 	if (!failed && dic->need_verity) {
1755 		/*
1756 		 * Note that to avoid deadlocks, the verity work can't be done
1757 		 * on the decompression workqueue.  This is because verifying
1758 		 * the data pages can involve reading metadata pages from the
1759 		 * file, and these metadata pages may be compressed.
1760 		 */
1761 		INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1762 		fsverity_enqueue_verify_work(&dic->verity_work);
1763 		return;
1764 	}
1765 
1766 	/* Update and unlock the cluster's pagecache pages. */
1767 	for (i = 0; i < dic->cluster_size; i++) {
1768 		struct page *rpage = dic->rpages[i];
1769 
1770 		if (!rpage)
1771 			continue;
1772 
1773 		if (failed)
1774 			ClearPageUptodate(rpage);
1775 		else
1776 			SetPageUptodate(rpage);
1777 		unlock_page(rpage);
1778 	}
1779 
1780 	/*
1781 	 * Release the reference to the decompress_io_ctx that was being held
1782 	 * for I/O completion.
1783 	 */
1784 	f2fs_put_dic(dic, in_task);
1785 }
1786 
1787 /*
1788  * Put a reference to a compressed page's decompress_io_ctx.
1789  *
1790  * This is called when the page is no longer needed and can be freed.
1791  */
1792 void f2fs_put_page_dic(struct page *page, bool in_task)
1793 {
1794 	struct decompress_io_ctx *dic =
1795 			(struct decompress_io_ctx *)page_private(page);
1796 
1797 	f2fs_put_dic(dic, in_task);
1798 }
1799 
1800 /*
1801  * check whether cluster blocks are contiguous, and add extent cache entry
1802  * only if cluster blocks are logically and physically contiguous.
1803  */
1804 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
1805 {
1806 	bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
1807 	int i = compressed ? 1 : 0;
1808 	block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
1809 						dn->ofs_in_node + i);
1810 
1811 	for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
1812 		block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
1813 						dn->ofs_in_node + i);
1814 
1815 		if (!__is_valid_data_blkaddr(blkaddr))
1816 			break;
1817 		if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr)
1818 			return 0;
1819 	}
1820 
1821 	return compressed ? i - 1 : i;
1822 }
1823 
1824 const struct address_space_operations f2fs_compress_aops = {
1825 	.release_folio = f2fs_release_folio,
1826 	.invalidate_folio = f2fs_invalidate_folio,
1827 	.migrate_folio	= filemap_migrate_folio,
1828 };
1829 
1830 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
1831 {
1832 	return sbi->compress_inode->i_mapping;
1833 }
1834 
1835 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
1836 {
1837 	if (!sbi->compress_inode)
1838 		return;
1839 	invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
1840 }
1841 
1842 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1843 						nid_t ino, block_t blkaddr)
1844 {
1845 	struct page *cpage;
1846 	int ret;
1847 
1848 	if (!test_opt(sbi, COMPRESS_CACHE))
1849 		return;
1850 
1851 	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1852 		return;
1853 
1854 	if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
1855 		return;
1856 
1857 	cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
1858 	if (cpage) {
1859 		f2fs_put_page(cpage, 0);
1860 		return;
1861 	}
1862 
1863 	cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
1864 	if (!cpage)
1865 		return;
1866 
1867 	ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
1868 						blkaddr, GFP_NOFS);
1869 	if (ret) {
1870 		f2fs_put_page(cpage, 0);
1871 		return;
1872 	}
1873 
1874 	set_page_private_data(cpage, ino);
1875 
1876 	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1877 		goto out;
1878 
1879 	memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
1880 	SetPageUptodate(cpage);
1881 out:
1882 	f2fs_put_page(cpage, 1);
1883 }
1884 
1885 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1886 								block_t blkaddr)
1887 {
1888 	struct page *cpage;
1889 	bool hitted = false;
1890 
1891 	if (!test_opt(sbi, COMPRESS_CACHE))
1892 		return false;
1893 
1894 	cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
1895 				blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
1896 	if (cpage) {
1897 		if (PageUptodate(cpage)) {
1898 			atomic_inc(&sbi->compress_page_hit);
1899 			memcpy(page_address(page),
1900 				page_address(cpage), PAGE_SIZE);
1901 			hitted = true;
1902 		}
1903 		f2fs_put_page(cpage, 1);
1904 	}
1905 
1906 	return hitted;
1907 }
1908 
1909 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
1910 {
1911 	struct address_space *mapping = COMPRESS_MAPPING(sbi);
1912 	struct folio_batch fbatch;
1913 	pgoff_t index = 0;
1914 	pgoff_t end = MAX_BLKADDR(sbi);
1915 
1916 	if (!mapping->nrpages)
1917 		return;
1918 
1919 	folio_batch_init(&fbatch);
1920 
1921 	do {
1922 		unsigned int nr, i;
1923 
1924 		nr = filemap_get_folios(mapping, &index, end - 1, &fbatch);
1925 		if (!nr)
1926 			break;
1927 
1928 		for (i = 0; i < nr; i++) {
1929 			struct folio *folio = fbatch.folios[i];
1930 
1931 			folio_lock(folio);
1932 			if (folio->mapping != mapping) {
1933 				folio_unlock(folio);
1934 				continue;
1935 			}
1936 
1937 			if (ino != get_page_private_data(&folio->page)) {
1938 				folio_unlock(folio);
1939 				continue;
1940 			}
1941 
1942 			generic_error_remove_page(mapping, &folio->page);
1943 			folio_unlock(folio);
1944 		}
1945 		folio_batch_release(&fbatch);
1946 		cond_resched();
1947 	} while (index < end);
1948 }
1949 
1950 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
1951 {
1952 	struct inode *inode;
1953 
1954 	if (!test_opt(sbi, COMPRESS_CACHE))
1955 		return 0;
1956 
1957 	inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
1958 	if (IS_ERR(inode))
1959 		return PTR_ERR(inode);
1960 	sbi->compress_inode = inode;
1961 
1962 	sbi->compress_percent = COMPRESS_PERCENT;
1963 	sbi->compress_watermark = COMPRESS_WATERMARK;
1964 
1965 	atomic_set(&sbi->compress_page_hit, 0);
1966 
1967 	return 0;
1968 }
1969 
1970 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
1971 {
1972 	if (!sbi->compress_inode)
1973 		return;
1974 	iput(sbi->compress_inode);
1975 	sbi->compress_inode = NULL;
1976 }
1977 
1978 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
1979 {
1980 	dev_t dev = sbi->sb->s_bdev->bd_dev;
1981 	char slab_name[35];
1982 
1983 	if (!f2fs_sb_has_compression(sbi))
1984 		return 0;
1985 
1986 	sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
1987 
1988 	sbi->page_array_slab_size = sizeof(struct page *) <<
1989 					F2FS_OPTION(sbi).compress_log_size;
1990 
1991 	sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
1992 					sbi->page_array_slab_size);
1993 	return sbi->page_array_slab ? 0 : -ENOMEM;
1994 }
1995 
1996 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
1997 {
1998 	kmem_cache_destroy(sbi->page_array_slab);
1999 }
2000 
2001 int __init f2fs_init_compress_cache(void)
2002 {
2003 	cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
2004 					sizeof(struct compress_io_ctx));
2005 	if (!cic_entry_slab)
2006 		return -ENOMEM;
2007 	dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
2008 					sizeof(struct decompress_io_ctx));
2009 	if (!dic_entry_slab)
2010 		goto free_cic;
2011 	return 0;
2012 free_cic:
2013 	kmem_cache_destroy(cic_entry_slab);
2014 	return -ENOMEM;
2015 }
2016 
2017 void f2fs_destroy_compress_cache(void)
2018 {
2019 	kmem_cache_destroy(dic_entry_slab);
2020 	kmem_cache_destroy(cic_entry_slab);
2021 }
2022