xref: /openbmc/linux/fs/f2fs/compress.c (revision f97769fd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs compress support
4  *
5  * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/writeback.h>
11 #include <linux/backing-dev.h>
12 #include <linux/lzo.h>
13 #include <linux/lz4.h>
14 #include <linux/zstd.h>
15 
16 #include "f2fs.h"
17 #include "node.h"
18 #include <trace/events/f2fs.h>
19 
20 struct f2fs_compress_ops {
21 	int (*init_compress_ctx)(struct compress_ctx *cc);
22 	void (*destroy_compress_ctx)(struct compress_ctx *cc);
23 	int (*compress_pages)(struct compress_ctx *cc);
24 	int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
25 	void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
26 	int (*decompress_pages)(struct decompress_io_ctx *dic);
27 };
28 
29 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
30 {
31 	return index & (cc->cluster_size - 1);
32 }
33 
34 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
35 {
36 	return index >> cc->log_cluster_size;
37 }
38 
39 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
40 {
41 	return cc->cluster_idx << cc->log_cluster_size;
42 }
43 
44 bool f2fs_is_compressed_page(struct page *page)
45 {
46 	if (!PagePrivate(page))
47 		return false;
48 	if (!page_private(page))
49 		return false;
50 	if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
51 		return false;
52 	/*
53 	 * page->private may be set with pid.
54 	 * pid_max is enough to check if it is traced.
55 	 */
56 	if (IS_IO_TRACED_PAGE(page))
57 		return false;
58 
59 	f2fs_bug_on(F2FS_M_SB(page->mapping),
60 		*((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
61 	return true;
62 }
63 
64 static void f2fs_set_compressed_page(struct page *page,
65 		struct inode *inode, pgoff_t index, void *data)
66 {
67 	SetPagePrivate(page);
68 	set_page_private(page, (unsigned long)data);
69 
70 	/* i_crypto_info and iv index */
71 	page->index = index;
72 	page->mapping = inode->i_mapping;
73 }
74 
75 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
76 {
77 	int i;
78 
79 	for (i = 0; i < len; i++) {
80 		if (!cc->rpages[i])
81 			continue;
82 		if (unlock)
83 			unlock_page(cc->rpages[i]);
84 		else
85 			put_page(cc->rpages[i]);
86 	}
87 }
88 
89 static void f2fs_put_rpages(struct compress_ctx *cc)
90 {
91 	f2fs_drop_rpages(cc, cc->cluster_size, false);
92 }
93 
94 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
95 {
96 	f2fs_drop_rpages(cc, len, true);
97 }
98 
99 static void f2fs_put_rpages_mapping(struct address_space *mapping,
100 				pgoff_t start, int len)
101 {
102 	int i;
103 
104 	for (i = 0; i < len; i++) {
105 		struct page *page = find_get_page(mapping, start + i);
106 
107 		put_page(page);
108 		put_page(page);
109 	}
110 }
111 
112 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
113 		struct writeback_control *wbc, bool redirty, int unlock)
114 {
115 	unsigned int i;
116 
117 	for (i = 0; i < cc->cluster_size; i++) {
118 		if (!cc->rpages[i])
119 			continue;
120 		if (redirty)
121 			redirty_page_for_writepage(wbc, cc->rpages[i]);
122 		f2fs_put_page(cc->rpages[i], unlock);
123 	}
124 }
125 
126 struct page *f2fs_compress_control_page(struct page *page)
127 {
128 	return ((struct compress_io_ctx *)page_private(page))->rpages[0];
129 }
130 
131 int f2fs_init_compress_ctx(struct compress_ctx *cc)
132 {
133 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
134 
135 	if (cc->nr_rpages)
136 		return 0;
137 
138 	cc->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
139 					cc->log_cluster_size, GFP_NOFS);
140 	return cc->rpages ? 0 : -ENOMEM;
141 }
142 
143 void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
144 {
145 	kfree(cc->rpages);
146 	cc->rpages = NULL;
147 	cc->nr_rpages = 0;
148 	cc->nr_cpages = 0;
149 	cc->cluster_idx = NULL_CLUSTER;
150 }
151 
152 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
153 {
154 	unsigned int cluster_ofs;
155 
156 	if (!f2fs_cluster_can_merge_page(cc, page->index))
157 		f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
158 
159 	cluster_ofs = offset_in_cluster(cc, page->index);
160 	cc->rpages[cluster_ofs] = page;
161 	cc->nr_rpages++;
162 	cc->cluster_idx = cluster_idx(cc, page->index);
163 }
164 
165 #ifdef CONFIG_F2FS_FS_LZO
166 static int lzo_init_compress_ctx(struct compress_ctx *cc)
167 {
168 	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
169 				LZO1X_MEM_COMPRESS, GFP_NOFS);
170 	if (!cc->private)
171 		return -ENOMEM;
172 
173 	cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
174 	return 0;
175 }
176 
177 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
178 {
179 	kvfree(cc->private);
180 	cc->private = NULL;
181 }
182 
183 static int lzo_compress_pages(struct compress_ctx *cc)
184 {
185 	int ret;
186 
187 	ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
188 					&cc->clen, cc->private);
189 	if (ret != LZO_E_OK) {
190 		printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
191 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
192 		return -EIO;
193 	}
194 	return 0;
195 }
196 
197 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
198 {
199 	int ret;
200 
201 	ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
202 						dic->rbuf, &dic->rlen);
203 	if (ret != LZO_E_OK) {
204 		printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
205 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
206 		return -EIO;
207 	}
208 
209 	if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
210 		printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
211 					"expected:%lu\n", KERN_ERR,
212 					F2FS_I_SB(dic->inode)->sb->s_id,
213 					dic->rlen,
214 					PAGE_SIZE << dic->log_cluster_size);
215 		return -EIO;
216 	}
217 	return 0;
218 }
219 
220 static const struct f2fs_compress_ops f2fs_lzo_ops = {
221 	.init_compress_ctx	= lzo_init_compress_ctx,
222 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
223 	.compress_pages		= lzo_compress_pages,
224 	.decompress_pages	= lzo_decompress_pages,
225 };
226 #endif
227 
228 #ifdef CONFIG_F2FS_FS_LZ4
229 static int lz4_init_compress_ctx(struct compress_ctx *cc)
230 {
231 	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
232 				LZ4_MEM_COMPRESS, GFP_NOFS);
233 	if (!cc->private)
234 		return -ENOMEM;
235 
236 	/*
237 	 * we do not change cc->clen to LZ4_compressBound(inputsize) to
238 	 * adapt worst compress case, because lz4 compressor can handle
239 	 * output budget properly.
240 	 */
241 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
242 	return 0;
243 }
244 
245 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
246 {
247 	kvfree(cc->private);
248 	cc->private = NULL;
249 }
250 
251 static int lz4_compress_pages(struct compress_ctx *cc)
252 {
253 	int len;
254 
255 	len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
256 						cc->clen, cc->private);
257 	if (!len)
258 		return -EAGAIN;
259 
260 	cc->clen = len;
261 	return 0;
262 }
263 
264 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
265 {
266 	int ret;
267 
268 	ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
269 						dic->clen, dic->rlen);
270 	if (ret < 0) {
271 		printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
272 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
273 		return -EIO;
274 	}
275 
276 	if (ret != PAGE_SIZE << dic->log_cluster_size) {
277 		printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
278 					"expected:%lu\n", KERN_ERR,
279 					F2FS_I_SB(dic->inode)->sb->s_id,
280 					dic->rlen,
281 					PAGE_SIZE << dic->log_cluster_size);
282 		return -EIO;
283 	}
284 	return 0;
285 }
286 
287 static const struct f2fs_compress_ops f2fs_lz4_ops = {
288 	.init_compress_ctx	= lz4_init_compress_ctx,
289 	.destroy_compress_ctx	= lz4_destroy_compress_ctx,
290 	.compress_pages		= lz4_compress_pages,
291 	.decompress_pages	= lz4_decompress_pages,
292 };
293 #endif
294 
295 #ifdef CONFIG_F2FS_FS_ZSTD
296 #define F2FS_ZSTD_DEFAULT_CLEVEL	1
297 
298 static int zstd_init_compress_ctx(struct compress_ctx *cc)
299 {
300 	ZSTD_parameters params;
301 	ZSTD_CStream *stream;
302 	void *workspace;
303 	unsigned int workspace_size;
304 
305 	params = ZSTD_getParams(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen, 0);
306 	workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
307 
308 	workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
309 					workspace_size, GFP_NOFS);
310 	if (!workspace)
311 		return -ENOMEM;
312 
313 	stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
314 	if (!stream) {
315 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
316 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
317 				__func__);
318 		kvfree(workspace);
319 		return -EIO;
320 	}
321 
322 	cc->private = workspace;
323 	cc->private2 = stream;
324 
325 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
326 	return 0;
327 }
328 
329 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
330 {
331 	kvfree(cc->private);
332 	cc->private = NULL;
333 	cc->private2 = NULL;
334 }
335 
336 static int zstd_compress_pages(struct compress_ctx *cc)
337 {
338 	ZSTD_CStream *stream = cc->private2;
339 	ZSTD_inBuffer inbuf;
340 	ZSTD_outBuffer outbuf;
341 	int src_size = cc->rlen;
342 	int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
343 	int ret;
344 
345 	inbuf.pos = 0;
346 	inbuf.src = cc->rbuf;
347 	inbuf.size = src_size;
348 
349 	outbuf.pos = 0;
350 	outbuf.dst = cc->cbuf->cdata;
351 	outbuf.size = dst_size;
352 
353 	ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
354 	if (ZSTD_isError(ret)) {
355 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
356 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
357 				__func__, ZSTD_getErrorCode(ret));
358 		return -EIO;
359 	}
360 
361 	ret = ZSTD_endStream(stream, &outbuf);
362 	if (ZSTD_isError(ret)) {
363 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
364 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
365 				__func__, ZSTD_getErrorCode(ret));
366 		return -EIO;
367 	}
368 
369 	/*
370 	 * there is compressed data remained in intermediate buffer due to
371 	 * no more space in cbuf.cdata
372 	 */
373 	if (ret)
374 		return -EAGAIN;
375 
376 	cc->clen = outbuf.pos;
377 	return 0;
378 }
379 
380 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
381 {
382 	ZSTD_DStream *stream;
383 	void *workspace;
384 	unsigned int workspace_size;
385 
386 	workspace_size = ZSTD_DStreamWorkspaceBound(MAX_COMPRESS_WINDOW_SIZE);
387 
388 	workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
389 					workspace_size, GFP_NOFS);
390 	if (!workspace)
391 		return -ENOMEM;
392 
393 	stream = ZSTD_initDStream(MAX_COMPRESS_WINDOW_SIZE,
394 					workspace, workspace_size);
395 	if (!stream) {
396 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
397 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
398 				__func__);
399 		kvfree(workspace);
400 		return -EIO;
401 	}
402 
403 	dic->private = workspace;
404 	dic->private2 = stream;
405 
406 	return 0;
407 }
408 
409 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
410 {
411 	kvfree(dic->private);
412 	dic->private = NULL;
413 	dic->private2 = NULL;
414 }
415 
416 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
417 {
418 	ZSTD_DStream *stream = dic->private2;
419 	ZSTD_inBuffer inbuf;
420 	ZSTD_outBuffer outbuf;
421 	int ret;
422 
423 	inbuf.pos = 0;
424 	inbuf.src = dic->cbuf->cdata;
425 	inbuf.size = dic->clen;
426 
427 	outbuf.pos = 0;
428 	outbuf.dst = dic->rbuf;
429 	outbuf.size = dic->rlen;
430 
431 	ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
432 	if (ZSTD_isError(ret)) {
433 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
434 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
435 				__func__, ZSTD_getErrorCode(ret));
436 		return -EIO;
437 	}
438 
439 	if (dic->rlen != outbuf.pos) {
440 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
441 				"expected:%lu\n", KERN_ERR,
442 				F2FS_I_SB(dic->inode)->sb->s_id,
443 				__func__, dic->rlen,
444 				PAGE_SIZE << dic->log_cluster_size);
445 		return -EIO;
446 	}
447 
448 	return 0;
449 }
450 
451 static const struct f2fs_compress_ops f2fs_zstd_ops = {
452 	.init_compress_ctx	= zstd_init_compress_ctx,
453 	.destroy_compress_ctx	= zstd_destroy_compress_ctx,
454 	.compress_pages		= zstd_compress_pages,
455 	.init_decompress_ctx	= zstd_init_decompress_ctx,
456 	.destroy_decompress_ctx	= zstd_destroy_decompress_ctx,
457 	.decompress_pages	= zstd_decompress_pages,
458 };
459 #endif
460 
461 #ifdef CONFIG_F2FS_FS_LZO
462 #ifdef CONFIG_F2FS_FS_LZORLE
463 static int lzorle_compress_pages(struct compress_ctx *cc)
464 {
465 	int ret;
466 
467 	ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
468 					&cc->clen, cc->private);
469 	if (ret != LZO_E_OK) {
470 		printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
471 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
472 		return -EIO;
473 	}
474 	return 0;
475 }
476 
477 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
478 	.init_compress_ctx	= lzo_init_compress_ctx,
479 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
480 	.compress_pages		= lzorle_compress_pages,
481 	.decompress_pages	= lzo_decompress_pages,
482 };
483 #endif
484 #endif
485 
486 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
487 #ifdef CONFIG_F2FS_FS_LZO
488 	&f2fs_lzo_ops,
489 #else
490 	NULL,
491 #endif
492 #ifdef CONFIG_F2FS_FS_LZ4
493 	&f2fs_lz4_ops,
494 #else
495 	NULL,
496 #endif
497 #ifdef CONFIG_F2FS_FS_ZSTD
498 	&f2fs_zstd_ops,
499 #else
500 	NULL,
501 #endif
502 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
503 	&f2fs_lzorle_ops,
504 #else
505 	NULL,
506 #endif
507 };
508 
509 bool f2fs_is_compress_backend_ready(struct inode *inode)
510 {
511 	if (!f2fs_compressed_file(inode))
512 		return true;
513 	return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
514 }
515 
516 static mempool_t *compress_page_pool;
517 static int num_compress_pages = 512;
518 module_param(num_compress_pages, uint, 0444);
519 MODULE_PARM_DESC(num_compress_pages,
520 		"Number of intermediate compress pages to preallocate");
521 
522 int f2fs_init_compress_mempool(void)
523 {
524 	compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
525 	if (!compress_page_pool)
526 		return -ENOMEM;
527 
528 	return 0;
529 }
530 
531 void f2fs_destroy_compress_mempool(void)
532 {
533 	mempool_destroy(compress_page_pool);
534 }
535 
536 static struct page *f2fs_compress_alloc_page(void)
537 {
538 	struct page *page;
539 
540 	page = mempool_alloc(compress_page_pool, GFP_NOFS);
541 	lock_page(page);
542 
543 	return page;
544 }
545 
546 static void f2fs_compress_free_page(struct page *page)
547 {
548 	if (!page)
549 		return;
550 	set_page_private(page, (unsigned long)NULL);
551 	ClearPagePrivate(page);
552 	page->mapping = NULL;
553 	unlock_page(page);
554 	mempool_free(page, compress_page_pool);
555 }
556 
557 static int f2fs_compress_pages(struct compress_ctx *cc)
558 {
559 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
560 	struct f2fs_inode_info *fi = F2FS_I(cc->inode);
561 	const struct f2fs_compress_ops *cops =
562 				f2fs_cops[fi->i_compress_algorithm];
563 	unsigned int max_len, nr_cpages;
564 	int i, ret;
565 
566 	trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
567 				cc->cluster_size, fi->i_compress_algorithm);
568 
569 	if (cops->init_compress_ctx) {
570 		ret = cops->init_compress_ctx(cc);
571 		if (ret)
572 			goto out;
573 	}
574 
575 	max_len = COMPRESS_HEADER_SIZE + cc->clen;
576 	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
577 
578 	cc->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
579 					cc->nr_cpages, GFP_NOFS);
580 	if (!cc->cpages) {
581 		ret = -ENOMEM;
582 		goto destroy_compress_ctx;
583 	}
584 
585 	for (i = 0; i < cc->nr_cpages; i++) {
586 		cc->cpages[i] = f2fs_compress_alloc_page();
587 		if (!cc->cpages[i]) {
588 			ret = -ENOMEM;
589 			goto out_free_cpages;
590 		}
591 	}
592 
593 	cc->rbuf = vmap(cc->rpages, cc->cluster_size, VM_MAP, PAGE_KERNEL_RO);
594 	if (!cc->rbuf) {
595 		ret = -ENOMEM;
596 		goto out_free_cpages;
597 	}
598 
599 	cc->cbuf = vmap(cc->cpages, cc->nr_cpages, VM_MAP, PAGE_KERNEL);
600 	if (!cc->cbuf) {
601 		ret = -ENOMEM;
602 		goto out_vunmap_rbuf;
603 	}
604 
605 	ret = cops->compress_pages(cc);
606 	if (ret)
607 		goto out_vunmap_cbuf;
608 
609 	max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
610 
611 	if (cc->clen > max_len) {
612 		ret = -EAGAIN;
613 		goto out_vunmap_cbuf;
614 	}
615 
616 	cc->cbuf->clen = cpu_to_le32(cc->clen);
617 
618 	for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
619 		cc->cbuf->reserved[i] = cpu_to_le32(0);
620 
621 	nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
622 
623 	/* zero out any unused part of the last page */
624 	memset(&cc->cbuf->cdata[cc->clen], 0,
625 	       (nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE));
626 
627 	vunmap(cc->cbuf);
628 	vunmap(cc->rbuf);
629 
630 	for (i = nr_cpages; i < cc->nr_cpages; i++) {
631 		f2fs_compress_free_page(cc->cpages[i]);
632 		cc->cpages[i] = NULL;
633 	}
634 
635 	if (cops->destroy_compress_ctx)
636 		cops->destroy_compress_ctx(cc);
637 
638 	cc->nr_cpages = nr_cpages;
639 
640 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
641 							cc->clen, ret);
642 	return 0;
643 
644 out_vunmap_cbuf:
645 	vunmap(cc->cbuf);
646 out_vunmap_rbuf:
647 	vunmap(cc->rbuf);
648 out_free_cpages:
649 	for (i = 0; i < cc->nr_cpages; i++) {
650 		if (cc->cpages[i])
651 			f2fs_compress_free_page(cc->cpages[i]);
652 	}
653 	kfree(cc->cpages);
654 	cc->cpages = NULL;
655 destroy_compress_ctx:
656 	if (cops->destroy_compress_ctx)
657 		cops->destroy_compress_ctx(cc);
658 out:
659 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
660 							cc->clen, ret);
661 	return ret;
662 }
663 
664 void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
665 {
666 	struct decompress_io_ctx *dic =
667 			(struct decompress_io_ctx *)page_private(page);
668 	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
669 	struct f2fs_inode_info *fi= F2FS_I(dic->inode);
670 	const struct f2fs_compress_ops *cops =
671 			f2fs_cops[fi->i_compress_algorithm];
672 	int ret;
673 	int i;
674 
675 	dec_page_count(sbi, F2FS_RD_DATA);
676 
677 	if (bio->bi_status || PageError(page))
678 		dic->failed = true;
679 
680 	if (refcount_dec_not_one(&dic->ref))
681 		return;
682 
683 	trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
684 				dic->cluster_size, fi->i_compress_algorithm);
685 
686 	/* submit partial compressed pages */
687 	if (dic->failed) {
688 		ret = -EIO;
689 		goto out_free_dic;
690 	}
691 
692 	dic->tpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
693 					dic->cluster_size, GFP_NOFS);
694 	if (!dic->tpages) {
695 		ret = -ENOMEM;
696 		goto out_free_dic;
697 	}
698 
699 	for (i = 0; i < dic->cluster_size; i++) {
700 		if (dic->rpages[i]) {
701 			dic->tpages[i] = dic->rpages[i];
702 			continue;
703 		}
704 
705 		dic->tpages[i] = f2fs_compress_alloc_page();
706 		if (!dic->tpages[i]) {
707 			ret = -ENOMEM;
708 			goto out_free_dic;
709 		}
710 	}
711 
712 	if (cops->init_decompress_ctx) {
713 		ret = cops->init_decompress_ctx(dic);
714 		if (ret)
715 			goto out_free_dic;
716 	}
717 
718 	dic->rbuf = vmap(dic->tpages, dic->cluster_size, VM_MAP, PAGE_KERNEL);
719 	if (!dic->rbuf) {
720 		ret = -ENOMEM;
721 		goto destroy_decompress_ctx;
722 	}
723 
724 	dic->cbuf = vmap(dic->cpages, dic->nr_cpages, VM_MAP, PAGE_KERNEL_RO);
725 	if (!dic->cbuf) {
726 		ret = -ENOMEM;
727 		goto out_vunmap_rbuf;
728 	}
729 
730 	dic->clen = le32_to_cpu(dic->cbuf->clen);
731 	dic->rlen = PAGE_SIZE << dic->log_cluster_size;
732 
733 	if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
734 		ret = -EFSCORRUPTED;
735 		goto out_vunmap_cbuf;
736 	}
737 
738 	ret = cops->decompress_pages(dic);
739 
740 out_vunmap_cbuf:
741 	vunmap(dic->cbuf);
742 out_vunmap_rbuf:
743 	vunmap(dic->rbuf);
744 destroy_decompress_ctx:
745 	if (cops->destroy_decompress_ctx)
746 		cops->destroy_decompress_ctx(dic);
747 out_free_dic:
748 	if (verity)
749 		refcount_set(&dic->ref, dic->nr_cpages);
750 	if (!verity)
751 		f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
752 								ret, false);
753 
754 	trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
755 							dic->clen, ret);
756 	if (!verity)
757 		f2fs_free_dic(dic);
758 }
759 
760 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
761 {
762 	if (cc->cluster_idx == NULL_CLUSTER)
763 		return true;
764 	return cc->cluster_idx == cluster_idx(cc, index);
765 }
766 
767 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
768 {
769 	return cc->nr_rpages == 0;
770 }
771 
772 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
773 {
774 	return cc->cluster_size == cc->nr_rpages;
775 }
776 
777 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
778 {
779 	if (f2fs_cluster_is_empty(cc))
780 		return true;
781 	return is_page_in_cluster(cc, index);
782 }
783 
784 static bool __cluster_may_compress(struct compress_ctx *cc)
785 {
786 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
787 	loff_t i_size = i_size_read(cc->inode);
788 	unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
789 	int i;
790 
791 	for (i = 0; i < cc->cluster_size; i++) {
792 		struct page *page = cc->rpages[i];
793 
794 		f2fs_bug_on(sbi, !page);
795 
796 		if (unlikely(f2fs_cp_error(sbi)))
797 			return false;
798 		if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
799 			return false;
800 
801 		/* beyond EOF */
802 		if (page->index >= nr_pages)
803 			return false;
804 	}
805 	return true;
806 }
807 
808 static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
809 {
810 	struct dnode_of_data dn;
811 	int ret;
812 
813 	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
814 	ret = f2fs_get_dnode_of_data(&dn, start_idx_of_cluster(cc),
815 							LOOKUP_NODE);
816 	if (ret) {
817 		if (ret == -ENOENT)
818 			ret = 0;
819 		goto fail;
820 	}
821 
822 	if (dn.data_blkaddr == COMPRESS_ADDR) {
823 		int i;
824 
825 		ret = 1;
826 		for (i = 1; i < cc->cluster_size; i++) {
827 			block_t blkaddr;
828 
829 			blkaddr = data_blkaddr(dn.inode,
830 					dn.node_page, dn.ofs_in_node + i);
831 			if (compr) {
832 				if (__is_valid_data_blkaddr(blkaddr))
833 					ret++;
834 			} else {
835 				if (blkaddr != NULL_ADDR)
836 					ret++;
837 			}
838 		}
839 	}
840 fail:
841 	f2fs_put_dnode(&dn);
842 	return ret;
843 }
844 
845 /* return # of compressed blocks in compressed cluster */
846 static int f2fs_compressed_blocks(struct compress_ctx *cc)
847 {
848 	return __f2fs_cluster_blocks(cc, true);
849 }
850 
851 /* return # of valid blocks in compressed cluster */
852 static int f2fs_cluster_blocks(struct compress_ctx *cc)
853 {
854 	return __f2fs_cluster_blocks(cc, false);
855 }
856 
857 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
858 {
859 	struct compress_ctx cc = {
860 		.inode = inode,
861 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
862 		.cluster_size = F2FS_I(inode)->i_cluster_size,
863 		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
864 	};
865 
866 	return f2fs_cluster_blocks(&cc);
867 }
868 
869 static bool cluster_may_compress(struct compress_ctx *cc)
870 {
871 	if (!f2fs_compressed_file(cc->inode))
872 		return false;
873 	if (f2fs_is_atomic_file(cc->inode))
874 		return false;
875 	if (f2fs_is_mmap_file(cc->inode))
876 		return false;
877 	if (!f2fs_cluster_is_full(cc))
878 		return false;
879 	if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
880 		return false;
881 	return __cluster_may_compress(cc);
882 }
883 
884 static void set_cluster_writeback(struct compress_ctx *cc)
885 {
886 	int i;
887 
888 	for (i = 0; i < cc->cluster_size; i++) {
889 		if (cc->rpages[i])
890 			set_page_writeback(cc->rpages[i]);
891 	}
892 }
893 
894 static void set_cluster_dirty(struct compress_ctx *cc)
895 {
896 	int i;
897 
898 	for (i = 0; i < cc->cluster_size; i++)
899 		if (cc->rpages[i])
900 			set_page_dirty(cc->rpages[i]);
901 }
902 
903 static int prepare_compress_overwrite(struct compress_ctx *cc,
904 		struct page **pagep, pgoff_t index, void **fsdata)
905 {
906 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
907 	struct address_space *mapping = cc->inode->i_mapping;
908 	struct page *page;
909 	struct dnode_of_data dn;
910 	sector_t last_block_in_bio;
911 	unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
912 	pgoff_t start_idx = start_idx_of_cluster(cc);
913 	int i, ret;
914 	bool prealloc;
915 
916 retry:
917 	ret = f2fs_cluster_blocks(cc);
918 	if (ret <= 0)
919 		return ret;
920 
921 	/* compressed case */
922 	prealloc = (ret < cc->cluster_size);
923 
924 	ret = f2fs_init_compress_ctx(cc);
925 	if (ret)
926 		return ret;
927 
928 	/* keep page reference to avoid page reclaim */
929 	for (i = 0; i < cc->cluster_size; i++) {
930 		page = f2fs_pagecache_get_page(mapping, start_idx + i,
931 							fgp_flag, GFP_NOFS);
932 		if (!page) {
933 			ret = -ENOMEM;
934 			goto unlock_pages;
935 		}
936 
937 		if (PageUptodate(page))
938 			unlock_page(page);
939 		else
940 			f2fs_compress_ctx_add_page(cc, page);
941 	}
942 
943 	if (!f2fs_cluster_is_empty(cc)) {
944 		struct bio *bio = NULL;
945 
946 		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
947 					&last_block_in_bio, false, true);
948 		f2fs_destroy_compress_ctx(cc);
949 		if (ret)
950 			goto release_pages;
951 		if (bio)
952 			f2fs_submit_bio(sbi, bio, DATA);
953 
954 		ret = f2fs_init_compress_ctx(cc);
955 		if (ret)
956 			goto release_pages;
957 	}
958 
959 	for (i = 0; i < cc->cluster_size; i++) {
960 		f2fs_bug_on(sbi, cc->rpages[i]);
961 
962 		page = find_lock_page(mapping, start_idx + i);
963 		f2fs_bug_on(sbi, !page);
964 
965 		f2fs_wait_on_page_writeback(page, DATA, true, true);
966 
967 		f2fs_compress_ctx_add_page(cc, page);
968 		f2fs_put_page(page, 0);
969 
970 		if (!PageUptodate(page)) {
971 			f2fs_unlock_rpages(cc, i + 1);
972 			f2fs_put_rpages_mapping(mapping, start_idx,
973 					cc->cluster_size);
974 			f2fs_destroy_compress_ctx(cc);
975 			goto retry;
976 		}
977 	}
978 
979 	if (prealloc) {
980 		f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
981 
982 		set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
983 
984 		for (i = cc->cluster_size - 1; i > 0; i--) {
985 			ret = f2fs_get_block(&dn, start_idx + i);
986 			if (ret) {
987 				i = cc->cluster_size;
988 				break;
989 			}
990 
991 			if (dn.data_blkaddr != NEW_ADDR)
992 				break;
993 		}
994 
995 		f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
996 	}
997 
998 	if (likely(!ret)) {
999 		*fsdata = cc->rpages;
1000 		*pagep = cc->rpages[offset_in_cluster(cc, index)];
1001 		return cc->cluster_size;
1002 	}
1003 
1004 unlock_pages:
1005 	f2fs_unlock_rpages(cc, i);
1006 release_pages:
1007 	f2fs_put_rpages_mapping(mapping, start_idx, i);
1008 	f2fs_destroy_compress_ctx(cc);
1009 	return ret;
1010 }
1011 
1012 int f2fs_prepare_compress_overwrite(struct inode *inode,
1013 		struct page **pagep, pgoff_t index, void **fsdata)
1014 {
1015 	struct compress_ctx cc = {
1016 		.inode = inode,
1017 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1018 		.cluster_size = F2FS_I(inode)->i_cluster_size,
1019 		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1020 		.rpages = NULL,
1021 		.nr_rpages = 0,
1022 	};
1023 
1024 	return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1025 }
1026 
1027 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1028 					pgoff_t index, unsigned copied)
1029 
1030 {
1031 	struct compress_ctx cc = {
1032 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1033 		.cluster_size = F2FS_I(inode)->i_cluster_size,
1034 		.rpages = fsdata,
1035 	};
1036 	bool first_index = (index == cc.rpages[0]->index);
1037 
1038 	if (copied)
1039 		set_cluster_dirty(&cc);
1040 
1041 	f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1042 	f2fs_destroy_compress_ctx(&cc);
1043 
1044 	return first_index;
1045 }
1046 
1047 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1048 {
1049 	void *fsdata = NULL;
1050 	struct page *pagep;
1051 	int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1052 	pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1053 							log_cluster_size;
1054 	int err;
1055 
1056 	err = f2fs_is_compressed_cluster(inode, start_idx);
1057 	if (err < 0)
1058 		return err;
1059 
1060 	/* truncate normal cluster */
1061 	if (!err)
1062 		return f2fs_do_truncate_blocks(inode, from, lock);
1063 
1064 	/* truncate compressed cluster */
1065 	err = f2fs_prepare_compress_overwrite(inode, &pagep,
1066 						start_idx, &fsdata);
1067 
1068 	/* should not be a normal cluster */
1069 	f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1070 
1071 	if (err <= 0)
1072 		return err;
1073 
1074 	if (err > 0) {
1075 		struct page **rpages = fsdata;
1076 		int cluster_size = F2FS_I(inode)->i_cluster_size;
1077 		int i;
1078 
1079 		for (i = cluster_size - 1; i >= 0; i--) {
1080 			loff_t start = rpages[i]->index << PAGE_SHIFT;
1081 
1082 			if (from <= start) {
1083 				zero_user_segment(rpages[i], 0, PAGE_SIZE);
1084 			} else {
1085 				zero_user_segment(rpages[i], from - start,
1086 								PAGE_SIZE);
1087 				break;
1088 			}
1089 		}
1090 
1091 		f2fs_compress_write_end(inode, fsdata, start_idx, true);
1092 	}
1093 	return 0;
1094 }
1095 
1096 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1097 					int *submitted,
1098 					struct writeback_control *wbc,
1099 					enum iostat_type io_type)
1100 {
1101 	struct inode *inode = cc->inode;
1102 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1103 	struct f2fs_inode_info *fi = F2FS_I(inode);
1104 	struct f2fs_io_info fio = {
1105 		.sbi = sbi,
1106 		.ino = cc->inode->i_ino,
1107 		.type = DATA,
1108 		.op = REQ_OP_WRITE,
1109 		.op_flags = wbc_to_write_flags(wbc),
1110 		.old_blkaddr = NEW_ADDR,
1111 		.page = NULL,
1112 		.encrypted_page = NULL,
1113 		.compressed_page = NULL,
1114 		.submitted = false,
1115 		.io_type = io_type,
1116 		.io_wbc = wbc,
1117 		.encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
1118 	};
1119 	struct dnode_of_data dn;
1120 	struct node_info ni;
1121 	struct compress_io_ctx *cic;
1122 	pgoff_t start_idx = start_idx_of_cluster(cc);
1123 	unsigned int last_index = cc->cluster_size - 1;
1124 	loff_t psize;
1125 	int i, err;
1126 
1127 	if (IS_NOQUOTA(inode)) {
1128 		/*
1129 		 * We need to wait for node_write to avoid block allocation during
1130 		 * checkpoint. This can only happen to quota writes which can cause
1131 		 * the below discard race condition.
1132 		 */
1133 		down_read(&sbi->node_write);
1134 	} else if (!f2fs_trylock_op(sbi)) {
1135 		return -EAGAIN;
1136 	}
1137 
1138 	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1139 
1140 	err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1141 	if (err)
1142 		goto out_unlock_op;
1143 
1144 	for (i = 0; i < cc->cluster_size; i++) {
1145 		if (data_blkaddr(dn.inode, dn.node_page,
1146 					dn.ofs_in_node + i) == NULL_ADDR)
1147 			goto out_put_dnode;
1148 	}
1149 
1150 	psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1151 
1152 	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1153 	if (err)
1154 		goto out_put_dnode;
1155 
1156 	fio.version = ni.version;
1157 
1158 	cic = f2fs_kzalloc(sbi, sizeof(struct compress_io_ctx), GFP_NOFS);
1159 	if (!cic)
1160 		goto out_put_dnode;
1161 
1162 	cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1163 	cic->inode = inode;
1164 	refcount_set(&cic->ref, cc->nr_cpages);
1165 	cic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
1166 			cc->log_cluster_size, GFP_NOFS);
1167 	if (!cic->rpages)
1168 		goto out_put_cic;
1169 
1170 	cic->nr_rpages = cc->cluster_size;
1171 
1172 	for (i = 0; i < cc->nr_cpages; i++) {
1173 		f2fs_set_compressed_page(cc->cpages[i], inode,
1174 					cc->rpages[i + 1]->index, cic);
1175 		fio.compressed_page = cc->cpages[i];
1176 
1177 		fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1178 						dn.ofs_in_node + i + 1);
1179 
1180 		/* wait for GCed page writeback via META_MAPPING */
1181 		f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1182 
1183 		if (fio.encrypted) {
1184 			fio.page = cc->rpages[i + 1];
1185 			err = f2fs_encrypt_one_page(&fio);
1186 			if (err)
1187 				goto out_destroy_crypt;
1188 			cc->cpages[i] = fio.encrypted_page;
1189 		}
1190 	}
1191 
1192 	set_cluster_writeback(cc);
1193 
1194 	for (i = 0; i < cc->cluster_size; i++)
1195 		cic->rpages[i] = cc->rpages[i];
1196 
1197 	for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1198 		block_t blkaddr;
1199 
1200 		blkaddr = f2fs_data_blkaddr(&dn);
1201 		fio.page = cc->rpages[i];
1202 		fio.old_blkaddr = blkaddr;
1203 
1204 		/* cluster header */
1205 		if (i == 0) {
1206 			if (blkaddr == COMPRESS_ADDR)
1207 				fio.compr_blocks++;
1208 			if (__is_valid_data_blkaddr(blkaddr))
1209 				f2fs_invalidate_blocks(sbi, blkaddr);
1210 			f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1211 			goto unlock_continue;
1212 		}
1213 
1214 		if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1215 			fio.compr_blocks++;
1216 
1217 		if (i > cc->nr_cpages) {
1218 			if (__is_valid_data_blkaddr(blkaddr)) {
1219 				f2fs_invalidate_blocks(sbi, blkaddr);
1220 				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1221 			}
1222 			goto unlock_continue;
1223 		}
1224 
1225 		f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1226 
1227 		if (fio.encrypted)
1228 			fio.encrypted_page = cc->cpages[i - 1];
1229 		else
1230 			fio.compressed_page = cc->cpages[i - 1];
1231 
1232 		cc->cpages[i - 1] = NULL;
1233 		f2fs_outplace_write_data(&dn, &fio);
1234 		(*submitted)++;
1235 unlock_continue:
1236 		inode_dec_dirty_pages(cc->inode);
1237 		unlock_page(fio.page);
1238 	}
1239 
1240 	if (fio.compr_blocks)
1241 		f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1242 	f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
1243 
1244 	set_inode_flag(cc->inode, FI_APPEND_WRITE);
1245 	if (cc->cluster_idx == 0)
1246 		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1247 
1248 	f2fs_put_dnode(&dn);
1249 	if (IS_NOQUOTA(inode))
1250 		up_read(&sbi->node_write);
1251 	else
1252 		f2fs_unlock_op(sbi);
1253 
1254 	spin_lock(&fi->i_size_lock);
1255 	if (fi->last_disk_size < psize)
1256 		fi->last_disk_size = psize;
1257 	spin_unlock(&fi->i_size_lock);
1258 
1259 	f2fs_put_rpages(cc);
1260 	f2fs_destroy_compress_ctx(cc);
1261 	return 0;
1262 
1263 out_destroy_crypt:
1264 	kfree(cic->rpages);
1265 
1266 	for (--i; i >= 0; i--)
1267 		fscrypt_finalize_bounce_page(&cc->cpages[i]);
1268 	for (i = 0; i < cc->nr_cpages; i++) {
1269 		if (!cc->cpages[i])
1270 			continue;
1271 		f2fs_put_page(cc->cpages[i], 1);
1272 	}
1273 out_put_cic:
1274 	kfree(cic);
1275 out_put_dnode:
1276 	f2fs_put_dnode(&dn);
1277 out_unlock_op:
1278 	if (IS_NOQUOTA(inode))
1279 		up_read(&sbi->node_write);
1280 	else
1281 		f2fs_unlock_op(sbi);
1282 	return -EAGAIN;
1283 }
1284 
1285 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1286 {
1287 	struct f2fs_sb_info *sbi = bio->bi_private;
1288 	struct compress_io_ctx *cic =
1289 			(struct compress_io_ctx *)page_private(page);
1290 	int i;
1291 
1292 	if (unlikely(bio->bi_status))
1293 		mapping_set_error(cic->inode->i_mapping, -EIO);
1294 
1295 	f2fs_compress_free_page(page);
1296 
1297 	dec_page_count(sbi, F2FS_WB_DATA);
1298 
1299 	if (refcount_dec_not_one(&cic->ref))
1300 		return;
1301 
1302 	for (i = 0; i < cic->nr_rpages; i++) {
1303 		WARN_ON(!cic->rpages[i]);
1304 		clear_cold_data(cic->rpages[i]);
1305 		end_page_writeback(cic->rpages[i]);
1306 	}
1307 
1308 	kfree(cic->rpages);
1309 	kfree(cic);
1310 }
1311 
1312 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1313 					int *submitted,
1314 					struct writeback_control *wbc,
1315 					enum iostat_type io_type)
1316 {
1317 	struct address_space *mapping = cc->inode->i_mapping;
1318 	int _submitted, compr_blocks, ret;
1319 	int i = -1, err = 0;
1320 
1321 	compr_blocks = f2fs_compressed_blocks(cc);
1322 	if (compr_blocks < 0) {
1323 		err = compr_blocks;
1324 		goto out_err;
1325 	}
1326 
1327 	for (i = 0; i < cc->cluster_size; i++) {
1328 		if (!cc->rpages[i])
1329 			continue;
1330 retry_write:
1331 		if (cc->rpages[i]->mapping != mapping) {
1332 			unlock_page(cc->rpages[i]);
1333 			continue;
1334 		}
1335 
1336 		BUG_ON(!PageLocked(cc->rpages[i]));
1337 
1338 		ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1339 						NULL, NULL, wbc, io_type,
1340 						compr_blocks);
1341 		if (ret) {
1342 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
1343 				unlock_page(cc->rpages[i]);
1344 				ret = 0;
1345 			} else if (ret == -EAGAIN) {
1346 				/*
1347 				 * for quota file, just redirty left pages to
1348 				 * avoid deadlock caused by cluster update race
1349 				 * from foreground operation.
1350 				 */
1351 				if (IS_NOQUOTA(cc->inode)) {
1352 					err = 0;
1353 					goto out_err;
1354 				}
1355 				ret = 0;
1356 				cond_resched();
1357 				congestion_wait(BLK_RW_ASYNC,
1358 						DEFAULT_IO_TIMEOUT);
1359 				lock_page(cc->rpages[i]);
1360 
1361 				if (!PageDirty(cc->rpages[i])) {
1362 					unlock_page(cc->rpages[i]);
1363 					continue;
1364 				}
1365 
1366 				clear_page_dirty_for_io(cc->rpages[i]);
1367 				goto retry_write;
1368 			}
1369 			err = ret;
1370 			goto out_err;
1371 		}
1372 
1373 		*submitted += _submitted;
1374 	}
1375 	return 0;
1376 out_err:
1377 	for (++i; i < cc->cluster_size; i++) {
1378 		if (!cc->rpages[i])
1379 			continue;
1380 		redirty_page_for_writepage(wbc, cc->rpages[i]);
1381 		unlock_page(cc->rpages[i]);
1382 	}
1383 	return err;
1384 }
1385 
1386 int f2fs_write_multi_pages(struct compress_ctx *cc,
1387 					int *submitted,
1388 					struct writeback_control *wbc,
1389 					enum iostat_type io_type)
1390 {
1391 	struct f2fs_inode_info *fi = F2FS_I(cc->inode);
1392 	const struct f2fs_compress_ops *cops =
1393 			f2fs_cops[fi->i_compress_algorithm];
1394 	int err;
1395 
1396 	*submitted = 0;
1397 	if (cluster_may_compress(cc)) {
1398 		err = f2fs_compress_pages(cc);
1399 		if (err == -EAGAIN) {
1400 			goto write;
1401 		} else if (err) {
1402 			f2fs_put_rpages_wbc(cc, wbc, true, 1);
1403 			goto destroy_out;
1404 		}
1405 
1406 		err = f2fs_write_compressed_pages(cc, submitted,
1407 							wbc, io_type);
1408 		cops->destroy_compress_ctx(cc);
1409 		kfree(cc->cpages);
1410 		cc->cpages = NULL;
1411 		if (!err)
1412 			return 0;
1413 		f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1414 	}
1415 write:
1416 	f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1417 
1418 	err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1419 	f2fs_put_rpages_wbc(cc, wbc, false, 0);
1420 destroy_out:
1421 	f2fs_destroy_compress_ctx(cc);
1422 	return err;
1423 }
1424 
1425 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1426 {
1427 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1428 	struct decompress_io_ctx *dic;
1429 	pgoff_t start_idx = start_idx_of_cluster(cc);
1430 	int i;
1431 
1432 	dic = f2fs_kzalloc(sbi, sizeof(struct decompress_io_ctx), GFP_NOFS);
1433 	if (!dic)
1434 		return ERR_PTR(-ENOMEM);
1435 
1436 	dic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
1437 			cc->log_cluster_size, GFP_NOFS);
1438 	if (!dic->rpages) {
1439 		kfree(dic);
1440 		return ERR_PTR(-ENOMEM);
1441 	}
1442 
1443 	dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1444 	dic->inode = cc->inode;
1445 	refcount_set(&dic->ref, cc->nr_cpages);
1446 	dic->cluster_idx = cc->cluster_idx;
1447 	dic->cluster_size = cc->cluster_size;
1448 	dic->log_cluster_size = cc->log_cluster_size;
1449 	dic->nr_cpages = cc->nr_cpages;
1450 	dic->failed = false;
1451 
1452 	for (i = 0; i < dic->cluster_size; i++)
1453 		dic->rpages[i] = cc->rpages[i];
1454 	dic->nr_rpages = cc->cluster_size;
1455 
1456 	dic->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
1457 					dic->nr_cpages, GFP_NOFS);
1458 	if (!dic->cpages)
1459 		goto out_free;
1460 
1461 	for (i = 0; i < dic->nr_cpages; i++) {
1462 		struct page *page;
1463 
1464 		page = f2fs_compress_alloc_page();
1465 		if (!page)
1466 			goto out_free;
1467 
1468 		f2fs_set_compressed_page(page, cc->inode,
1469 					start_idx + i + 1, dic);
1470 		dic->cpages[i] = page;
1471 	}
1472 
1473 	return dic;
1474 
1475 out_free:
1476 	f2fs_free_dic(dic);
1477 	return ERR_PTR(-ENOMEM);
1478 }
1479 
1480 void f2fs_free_dic(struct decompress_io_ctx *dic)
1481 {
1482 	int i;
1483 
1484 	if (dic->tpages) {
1485 		for (i = 0; i < dic->cluster_size; i++) {
1486 			if (dic->rpages[i])
1487 				continue;
1488 			if (!dic->tpages[i])
1489 				continue;
1490 			f2fs_compress_free_page(dic->tpages[i]);
1491 		}
1492 		kfree(dic->tpages);
1493 	}
1494 
1495 	if (dic->cpages) {
1496 		for (i = 0; i < dic->nr_cpages; i++) {
1497 			if (!dic->cpages[i])
1498 				continue;
1499 			f2fs_compress_free_page(dic->cpages[i]);
1500 		}
1501 		kfree(dic->cpages);
1502 	}
1503 
1504 	kfree(dic->rpages);
1505 	kfree(dic);
1506 }
1507 
1508 void f2fs_decompress_end_io(struct page **rpages,
1509 			unsigned int cluster_size, bool err, bool verity)
1510 {
1511 	int i;
1512 
1513 	for (i = 0; i < cluster_size; i++) {
1514 		struct page *rpage = rpages[i];
1515 
1516 		if (!rpage)
1517 			continue;
1518 
1519 		if (err || PageError(rpage))
1520 			goto clear_uptodate;
1521 
1522 		if (!verity || fsverity_verify_page(rpage)) {
1523 			SetPageUptodate(rpage);
1524 			goto unlock;
1525 		}
1526 clear_uptodate:
1527 		ClearPageUptodate(rpage);
1528 		ClearPageError(rpage);
1529 unlock:
1530 		unlock_page(rpage);
1531 	}
1532 }
1533