xref: /openbmc/linux/fs/f2fs/compress.c (revision 002dff36)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs compress support
4  *
5  * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/writeback.h>
11 #include <linux/backing-dev.h>
12 #include <linux/lzo.h>
13 #include <linux/lz4.h>
14 #include <linux/zstd.h>
15 
16 #include "f2fs.h"
17 #include "node.h"
18 #include <trace/events/f2fs.h>
19 
20 struct f2fs_compress_ops {
21 	int (*init_compress_ctx)(struct compress_ctx *cc);
22 	void (*destroy_compress_ctx)(struct compress_ctx *cc);
23 	int (*compress_pages)(struct compress_ctx *cc);
24 	int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
25 	void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
26 	int (*decompress_pages)(struct decompress_io_ctx *dic);
27 };
28 
29 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
30 {
31 	return index & (cc->cluster_size - 1);
32 }
33 
34 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
35 {
36 	return index >> cc->log_cluster_size;
37 }
38 
39 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
40 {
41 	return cc->cluster_idx << cc->log_cluster_size;
42 }
43 
44 bool f2fs_is_compressed_page(struct page *page)
45 {
46 	if (!PagePrivate(page))
47 		return false;
48 	if (!page_private(page))
49 		return false;
50 	if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
51 		return false;
52 	f2fs_bug_on(F2FS_M_SB(page->mapping),
53 		*((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
54 	return true;
55 }
56 
57 static void f2fs_set_compressed_page(struct page *page,
58 		struct inode *inode, pgoff_t index, void *data)
59 {
60 	SetPagePrivate(page);
61 	set_page_private(page, (unsigned long)data);
62 
63 	/* i_crypto_info and iv index */
64 	page->index = index;
65 	page->mapping = inode->i_mapping;
66 }
67 
68 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
69 {
70 	int i;
71 
72 	for (i = 0; i < len; i++) {
73 		if (!cc->rpages[i])
74 			continue;
75 		if (unlock)
76 			unlock_page(cc->rpages[i]);
77 		else
78 			put_page(cc->rpages[i]);
79 	}
80 }
81 
82 static void f2fs_put_rpages(struct compress_ctx *cc)
83 {
84 	f2fs_drop_rpages(cc, cc->cluster_size, false);
85 }
86 
87 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
88 {
89 	f2fs_drop_rpages(cc, len, true);
90 }
91 
92 static void f2fs_put_rpages_mapping(struct address_space *mapping,
93 				pgoff_t start, int len)
94 {
95 	int i;
96 
97 	for (i = 0; i < len; i++) {
98 		struct page *page = find_get_page(mapping, start + i);
99 
100 		put_page(page);
101 		put_page(page);
102 	}
103 }
104 
105 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
106 		struct writeback_control *wbc, bool redirty, int unlock)
107 {
108 	unsigned int i;
109 
110 	for (i = 0; i < cc->cluster_size; i++) {
111 		if (!cc->rpages[i])
112 			continue;
113 		if (redirty)
114 			redirty_page_for_writepage(wbc, cc->rpages[i]);
115 		f2fs_put_page(cc->rpages[i], unlock);
116 	}
117 }
118 
119 struct page *f2fs_compress_control_page(struct page *page)
120 {
121 	return ((struct compress_io_ctx *)page_private(page))->rpages[0];
122 }
123 
124 int f2fs_init_compress_ctx(struct compress_ctx *cc)
125 {
126 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
127 
128 	if (cc->nr_rpages)
129 		return 0;
130 
131 	cc->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
132 					cc->log_cluster_size, GFP_NOFS);
133 	return cc->rpages ? 0 : -ENOMEM;
134 }
135 
136 void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
137 {
138 	kfree(cc->rpages);
139 	cc->rpages = NULL;
140 	cc->nr_rpages = 0;
141 	cc->nr_cpages = 0;
142 	cc->cluster_idx = NULL_CLUSTER;
143 }
144 
145 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
146 {
147 	unsigned int cluster_ofs;
148 
149 	if (!f2fs_cluster_can_merge_page(cc, page->index))
150 		f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
151 
152 	cluster_ofs = offset_in_cluster(cc, page->index);
153 	cc->rpages[cluster_ofs] = page;
154 	cc->nr_rpages++;
155 	cc->cluster_idx = cluster_idx(cc, page->index);
156 }
157 
158 #ifdef CONFIG_F2FS_FS_LZO
159 static int lzo_init_compress_ctx(struct compress_ctx *cc)
160 {
161 	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
162 				LZO1X_MEM_COMPRESS, GFP_NOFS);
163 	if (!cc->private)
164 		return -ENOMEM;
165 
166 	cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
167 	return 0;
168 }
169 
170 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
171 {
172 	kvfree(cc->private);
173 	cc->private = NULL;
174 }
175 
176 static int lzo_compress_pages(struct compress_ctx *cc)
177 {
178 	int ret;
179 
180 	ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
181 					&cc->clen, cc->private);
182 	if (ret != LZO_E_OK) {
183 		printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
184 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
185 		return -EIO;
186 	}
187 	return 0;
188 }
189 
190 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
191 {
192 	int ret;
193 
194 	ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
195 						dic->rbuf, &dic->rlen);
196 	if (ret != LZO_E_OK) {
197 		printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
198 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
199 		return -EIO;
200 	}
201 
202 	if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
203 		printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
204 					"expected:%lu\n", KERN_ERR,
205 					F2FS_I_SB(dic->inode)->sb->s_id,
206 					dic->rlen,
207 					PAGE_SIZE << dic->log_cluster_size);
208 		return -EIO;
209 	}
210 	return 0;
211 }
212 
213 static const struct f2fs_compress_ops f2fs_lzo_ops = {
214 	.init_compress_ctx	= lzo_init_compress_ctx,
215 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
216 	.compress_pages		= lzo_compress_pages,
217 	.decompress_pages	= lzo_decompress_pages,
218 };
219 #endif
220 
221 #ifdef CONFIG_F2FS_FS_LZ4
222 static int lz4_init_compress_ctx(struct compress_ctx *cc)
223 {
224 	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
225 				LZ4_MEM_COMPRESS, GFP_NOFS);
226 	if (!cc->private)
227 		return -ENOMEM;
228 
229 	/*
230 	 * we do not change cc->clen to LZ4_compressBound(inputsize) to
231 	 * adapt worst compress case, because lz4 compressor can handle
232 	 * output budget properly.
233 	 */
234 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
235 	return 0;
236 }
237 
238 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
239 {
240 	kvfree(cc->private);
241 	cc->private = NULL;
242 }
243 
244 static int lz4_compress_pages(struct compress_ctx *cc)
245 {
246 	int len;
247 
248 	len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
249 						cc->clen, cc->private);
250 	if (!len)
251 		return -EAGAIN;
252 
253 	cc->clen = len;
254 	return 0;
255 }
256 
257 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
258 {
259 	int ret;
260 
261 	ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
262 						dic->clen, dic->rlen);
263 	if (ret < 0) {
264 		printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
265 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
266 		return -EIO;
267 	}
268 
269 	if (ret != PAGE_SIZE << dic->log_cluster_size) {
270 		printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
271 					"expected:%lu\n", KERN_ERR,
272 					F2FS_I_SB(dic->inode)->sb->s_id,
273 					dic->rlen,
274 					PAGE_SIZE << dic->log_cluster_size);
275 		return -EIO;
276 	}
277 	return 0;
278 }
279 
280 static const struct f2fs_compress_ops f2fs_lz4_ops = {
281 	.init_compress_ctx	= lz4_init_compress_ctx,
282 	.destroy_compress_ctx	= lz4_destroy_compress_ctx,
283 	.compress_pages		= lz4_compress_pages,
284 	.decompress_pages	= lz4_decompress_pages,
285 };
286 #endif
287 
288 #ifdef CONFIG_F2FS_FS_ZSTD
289 #define F2FS_ZSTD_DEFAULT_CLEVEL	1
290 
291 static int zstd_init_compress_ctx(struct compress_ctx *cc)
292 {
293 	ZSTD_parameters params;
294 	ZSTD_CStream *stream;
295 	void *workspace;
296 	unsigned int workspace_size;
297 
298 	params = ZSTD_getParams(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen, 0);
299 	workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
300 
301 	workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
302 					workspace_size, GFP_NOFS);
303 	if (!workspace)
304 		return -ENOMEM;
305 
306 	stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
307 	if (!stream) {
308 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
309 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
310 				__func__);
311 		kvfree(workspace);
312 		return -EIO;
313 	}
314 
315 	cc->private = workspace;
316 	cc->private2 = stream;
317 
318 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
319 	return 0;
320 }
321 
322 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
323 {
324 	kvfree(cc->private);
325 	cc->private = NULL;
326 	cc->private2 = NULL;
327 }
328 
329 static int zstd_compress_pages(struct compress_ctx *cc)
330 {
331 	ZSTD_CStream *stream = cc->private2;
332 	ZSTD_inBuffer inbuf;
333 	ZSTD_outBuffer outbuf;
334 	int src_size = cc->rlen;
335 	int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
336 	int ret;
337 
338 	inbuf.pos = 0;
339 	inbuf.src = cc->rbuf;
340 	inbuf.size = src_size;
341 
342 	outbuf.pos = 0;
343 	outbuf.dst = cc->cbuf->cdata;
344 	outbuf.size = dst_size;
345 
346 	ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
347 	if (ZSTD_isError(ret)) {
348 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
349 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
350 				__func__, ZSTD_getErrorCode(ret));
351 		return -EIO;
352 	}
353 
354 	ret = ZSTD_endStream(stream, &outbuf);
355 	if (ZSTD_isError(ret)) {
356 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
357 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
358 				__func__, ZSTD_getErrorCode(ret));
359 		return -EIO;
360 	}
361 
362 	/*
363 	 * there is compressed data remained in intermediate buffer due to
364 	 * no more space in cbuf.cdata
365 	 */
366 	if (ret)
367 		return -EAGAIN;
368 
369 	cc->clen = outbuf.pos;
370 	return 0;
371 }
372 
373 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
374 {
375 	ZSTD_DStream *stream;
376 	void *workspace;
377 	unsigned int workspace_size;
378 
379 	workspace_size = ZSTD_DStreamWorkspaceBound(MAX_COMPRESS_WINDOW_SIZE);
380 
381 	workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
382 					workspace_size, GFP_NOFS);
383 	if (!workspace)
384 		return -ENOMEM;
385 
386 	stream = ZSTD_initDStream(MAX_COMPRESS_WINDOW_SIZE,
387 					workspace, workspace_size);
388 	if (!stream) {
389 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
390 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
391 				__func__);
392 		kvfree(workspace);
393 		return -EIO;
394 	}
395 
396 	dic->private = workspace;
397 	dic->private2 = stream;
398 
399 	return 0;
400 }
401 
402 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
403 {
404 	kvfree(dic->private);
405 	dic->private = NULL;
406 	dic->private2 = NULL;
407 }
408 
409 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
410 {
411 	ZSTD_DStream *stream = dic->private2;
412 	ZSTD_inBuffer inbuf;
413 	ZSTD_outBuffer outbuf;
414 	int ret;
415 
416 	inbuf.pos = 0;
417 	inbuf.src = dic->cbuf->cdata;
418 	inbuf.size = dic->clen;
419 
420 	outbuf.pos = 0;
421 	outbuf.dst = dic->rbuf;
422 	outbuf.size = dic->rlen;
423 
424 	ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
425 	if (ZSTD_isError(ret)) {
426 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
427 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
428 				__func__, ZSTD_getErrorCode(ret));
429 		return -EIO;
430 	}
431 
432 	if (dic->rlen != outbuf.pos) {
433 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
434 				"expected:%lu\n", KERN_ERR,
435 				F2FS_I_SB(dic->inode)->sb->s_id,
436 				__func__, dic->rlen,
437 				PAGE_SIZE << dic->log_cluster_size);
438 		return -EIO;
439 	}
440 
441 	return 0;
442 }
443 
444 static const struct f2fs_compress_ops f2fs_zstd_ops = {
445 	.init_compress_ctx	= zstd_init_compress_ctx,
446 	.destroy_compress_ctx	= zstd_destroy_compress_ctx,
447 	.compress_pages		= zstd_compress_pages,
448 	.init_decompress_ctx	= zstd_init_decompress_ctx,
449 	.destroy_decompress_ctx	= zstd_destroy_decompress_ctx,
450 	.decompress_pages	= zstd_decompress_pages,
451 };
452 #endif
453 
454 #ifdef CONFIG_F2FS_FS_LZO
455 #ifdef CONFIG_F2FS_FS_LZORLE
456 static int lzorle_compress_pages(struct compress_ctx *cc)
457 {
458 	int ret;
459 
460 	ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
461 					&cc->clen, cc->private);
462 	if (ret != LZO_E_OK) {
463 		printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
464 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
465 		return -EIO;
466 	}
467 	return 0;
468 }
469 
470 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
471 	.init_compress_ctx	= lzo_init_compress_ctx,
472 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
473 	.compress_pages		= lzorle_compress_pages,
474 	.decompress_pages	= lzo_decompress_pages,
475 };
476 #endif
477 #endif
478 
479 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
480 #ifdef CONFIG_F2FS_FS_LZO
481 	&f2fs_lzo_ops,
482 #else
483 	NULL,
484 #endif
485 #ifdef CONFIG_F2FS_FS_LZ4
486 	&f2fs_lz4_ops,
487 #else
488 	NULL,
489 #endif
490 #ifdef CONFIG_F2FS_FS_ZSTD
491 	&f2fs_zstd_ops,
492 #else
493 	NULL,
494 #endif
495 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
496 	&f2fs_lzorle_ops,
497 #else
498 	NULL,
499 #endif
500 };
501 
502 bool f2fs_is_compress_backend_ready(struct inode *inode)
503 {
504 	if (!f2fs_compressed_file(inode))
505 		return true;
506 	return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
507 }
508 
509 static mempool_t *compress_page_pool = NULL;
510 static int num_compress_pages = 512;
511 module_param(num_compress_pages, uint, 0444);
512 MODULE_PARM_DESC(num_compress_pages,
513 		"Number of intermediate compress pages to preallocate");
514 
515 int f2fs_init_compress_mempool(void)
516 {
517 	compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
518 	if (!compress_page_pool)
519 		return -ENOMEM;
520 
521 	return 0;
522 }
523 
524 void f2fs_destroy_compress_mempool(void)
525 {
526 	mempool_destroy(compress_page_pool);
527 }
528 
529 static struct page *f2fs_compress_alloc_page(void)
530 {
531 	struct page *page;
532 
533 	page = mempool_alloc(compress_page_pool, GFP_NOFS);
534 	lock_page(page);
535 
536 	return page;
537 }
538 
539 static void f2fs_compress_free_page(struct page *page)
540 {
541 	if (!page)
542 		return;
543 	set_page_private(page, (unsigned long)NULL);
544 	ClearPagePrivate(page);
545 	page->mapping = NULL;
546 	unlock_page(page);
547 	mempool_free(page, compress_page_pool);
548 }
549 
550 static int f2fs_compress_pages(struct compress_ctx *cc)
551 {
552 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
553 	struct f2fs_inode_info *fi = F2FS_I(cc->inode);
554 	const struct f2fs_compress_ops *cops =
555 				f2fs_cops[fi->i_compress_algorithm];
556 	unsigned int max_len, nr_cpages;
557 	int i, ret;
558 
559 	trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
560 				cc->cluster_size, fi->i_compress_algorithm);
561 
562 	if (cops->init_compress_ctx) {
563 		ret = cops->init_compress_ctx(cc);
564 		if (ret)
565 			goto out;
566 	}
567 
568 	max_len = COMPRESS_HEADER_SIZE + cc->clen;
569 	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
570 
571 	cc->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
572 					cc->nr_cpages, GFP_NOFS);
573 	if (!cc->cpages) {
574 		ret = -ENOMEM;
575 		goto destroy_compress_ctx;
576 	}
577 
578 	for (i = 0; i < cc->nr_cpages; i++) {
579 		cc->cpages[i] = f2fs_compress_alloc_page();
580 		if (!cc->cpages[i]) {
581 			ret = -ENOMEM;
582 			goto out_free_cpages;
583 		}
584 	}
585 
586 	cc->rbuf = vmap(cc->rpages, cc->cluster_size, VM_MAP, PAGE_KERNEL_RO);
587 	if (!cc->rbuf) {
588 		ret = -ENOMEM;
589 		goto out_free_cpages;
590 	}
591 
592 	cc->cbuf = vmap(cc->cpages, cc->nr_cpages, VM_MAP, PAGE_KERNEL);
593 	if (!cc->cbuf) {
594 		ret = -ENOMEM;
595 		goto out_vunmap_rbuf;
596 	}
597 
598 	ret = cops->compress_pages(cc);
599 	if (ret)
600 		goto out_vunmap_cbuf;
601 
602 	max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
603 
604 	if (cc->clen > max_len) {
605 		ret = -EAGAIN;
606 		goto out_vunmap_cbuf;
607 	}
608 
609 	cc->cbuf->clen = cpu_to_le32(cc->clen);
610 
611 	for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
612 		cc->cbuf->reserved[i] = cpu_to_le32(0);
613 
614 	nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
615 
616 	/* zero out any unused part of the last page */
617 	memset(&cc->cbuf->cdata[cc->clen], 0,
618 	       (nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE));
619 
620 	vunmap(cc->cbuf);
621 	vunmap(cc->rbuf);
622 
623 	for (i = nr_cpages; i < cc->nr_cpages; i++) {
624 		f2fs_compress_free_page(cc->cpages[i]);
625 		cc->cpages[i] = NULL;
626 	}
627 
628 	if (cops->destroy_compress_ctx)
629 		cops->destroy_compress_ctx(cc);
630 
631 	cc->nr_cpages = nr_cpages;
632 
633 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
634 							cc->clen, ret);
635 	return 0;
636 
637 out_vunmap_cbuf:
638 	vunmap(cc->cbuf);
639 out_vunmap_rbuf:
640 	vunmap(cc->rbuf);
641 out_free_cpages:
642 	for (i = 0; i < cc->nr_cpages; i++) {
643 		if (cc->cpages[i])
644 			f2fs_compress_free_page(cc->cpages[i]);
645 	}
646 	kfree(cc->cpages);
647 	cc->cpages = NULL;
648 destroy_compress_ctx:
649 	if (cops->destroy_compress_ctx)
650 		cops->destroy_compress_ctx(cc);
651 out:
652 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
653 							cc->clen, ret);
654 	return ret;
655 }
656 
657 void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
658 {
659 	struct decompress_io_ctx *dic =
660 			(struct decompress_io_ctx *)page_private(page);
661 	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
662 	struct f2fs_inode_info *fi= F2FS_I(dic->inode);
663 	const struct f2fs_compress_ops *cops =
664 			f2fs_cops[fi->i_compress_algorithm];
665 	int ret;
666 
667 	dec_page_count(sbi, F2FS_RD_DATA);
668 
669 	if (bio->bi_status || PageError(page))
670 		dic->failed = true;
671 
672 	if (refcount_dec_not_one(&dic->ref))
673 		return;
674 
675 	trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
676 				dic->cluster_size, fi->i_compress_algorithm);
677 
678 	/* submit partial compressed pages */
679 	if (dic->failed) {
680 		ret = -EIO;
681 		goto out_free_dic;
682 	}
683 
684 	if (cops->init_decompress_ctx) {
685 		ret = cops->init_decompress_ctx(dic);
686 		if (ret)
687 			goto out_free_dic;
688 	}
689 
690 	dic->rbuf = vmap(dic->tpages, dic->cluster_size, VM_MAP, PAGE_KERNEL);
691 	if (!dic->rbuf) {
692 		ret = -ENOMEM;
693 		goto destroy_decompress_ctx;
694 	}
695 
696 	dic->cbuf = vmap(dic->cpages, dic->nr_cpages, VM_MAP, PAGE_KERNEL_RO);
697 	if (!dic->cbuf) {
698 		ret = -ENOMEM;
699 		goto out_vunmap_rbuf;
700 	}
701 
702 	dic->clen = le32_to_cpu(dic->cbuf->clen);
703 	dic->rlen = PAGE_SIZE << dic->log_cluster_size;
704 
705 	if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
706 		ret = -EFSCORRUPTED;
707 		goto out_vunmap_cbuf;
708 	}
709 
710 	ret = cops->decompress_pages(dic);
711 
712 out_vunmap_cbuf:
713 	vunmap(dic->cbuf);
714 out_vunmap_rbuf:
715 	vunmap(dic->rbuf);
716 destroy_decompress_ctx:
717 	if (cops->destroy_decompress_ctx)
718 		cops->destroy_decompress_ctx(dic);
719 out_free_dic:
720 	if (verity)
721 		refcount_set(&dic->ref, dic->nr_cpages);
722 	if (!verity)
723 		f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
724 								ret, false);
725 
726 	trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
727 							dic->clen, ret);
728 	if (!verity)
729 		f2fs_free_dic(dic);
730 }
731 
732 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
733 {
734 	if (cc->cluster_idx == NULL_CLUSTER)
735 		return true;
736 	return cc->cluster_idx == cluster_idx(cc, index);
737 }
738 
739 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
740 {
741 	return cc->nr_rpages == 0;
742 }
743 
744 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
745 {
746 	return cc->cluster_size == cc->nr_rpages;
747 }
748 
749 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
750 {
751 	if (f2fs_cluster_is_empty(cc))
752 		return true;
753 	return is_page_in_cluster(cc, index);
754 }
755 
756 static bool __cluster_may_compress(struct compress_ctx *cc)
757 {
758 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
759 	loff_t i_size = i_size_read(cc->inode);
760 	unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
761 	int i;
762 
763 	for (i = 0; i < cc->cluster_size; i++) {
764 		struct page *page = cc->rpages[i];
765 
766 		f2fs_bug_on(sbi, !page);
767 
768 		if (unlikely(f2fs_cp_error(sbi)))
769 			return false;
770 		if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
771 			return false;
772 
773 		/* beyond EOF */
774 		if (page->index >= nr_pages)
775 			return false;
776 	}
777 	return true;
778 }
779 
780 static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
781 {
782 	struct dnode_of_data dn;
783 	int ret;
784 
785 	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
786 	ret = f2fs_get_dnode_of_data(&dn, start_idx_of_cluster(cc),
787 							LOOKUP_NODE);
788 	if (ret) {
789 		if (ret == -ENOENT)
790 			ret = 0;
791 		goto fail;
792 	}
793 
794 	if (dn.data_blkaddr == COMPRESS_ADDR) {
795 		int i;
796 
797 		ret = 1;
798 		for (i = 1; i < cc->cluster_size; i++) {
799 			block_t blkaddr;
800 
801 			blkaddr = data_blkaddr(dn.inode,
802 					dn.node_page, dn.ofs_in_node + i);
803 			if (compr) {
804 				if (__is_valid_data_blkaddr(blkaddr))
805 					ret++;
806 			} else {
807 				if (blkaddr != NULL_ADDR)
808 					ret++;
809 			}
810 		}
811 	}
812 fail:
813 	f2fs_put_dnode(&dn);
814 	return ret;
815 }
816 
817 /* return # of compressed blocks in compressed cluster */
818 static int f2fs_compressed_blocks(struct compress_ctx *cc)
819 {
820 	return __f2fs_cluster_blocks(cc, true);
821 }
822 
823 /* return # of valid blocks in compressed cluster */
824 static int f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
825 {
826 	return __f2fs_cluster_blocks(cc, false);
827 }
828 
829 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
830 {
831 	struct compress_ctx cc = {
832 		.inode = inode,
833 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
834 		.cluster_size = F2FS_I(inode)->i_cluster_size,
835 		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
836 	};
837 
838 	return f2fs_cluster_blocks(&cc, false);
839 }
840 
841 static bool cluster_may_compress(struct compress_ctx *cc)
842 {
843 	if (!f2fs_compressed_file(cc->inode))
844 		return false;
845 	if (f2fs_is_atomic_file(cc->inode))
846 		return false;
847 	if (f2fs_is_mmap_file(cc->inode))
848 		return false;
849 	if (!f2fs_cluster_is_full(cc))
850 		return false;
851 	if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
852 		return false;
853 	return __cluster_may_compress(cc);
854 }
855 
856 static void set_cluster_writeback(struct compress_ctx *cc)
857 {
858 	int i;
859 
860 	for (i = 0; i < cc->cluster_size; i++) {
861 		if (cc->rpages[i])
862 			set_page_writeback(cc->rpages[i]);
863 	}
864 }
865 
866 static void set_cluster_dirty(struct compress_ctx *cc)
867 {
868 	int i;
869 
870 	for (i = 0; i < cc->cluster_size; i++)
871 		if (cc->rpages[i])
872 			set_page_dirty(cc->rpages[i]);
873 }
874 
875 static int prepare_compress_overwrite(struct compress_ctx *cc,
876 		struct page **pagep, pgoff_t index, void **fsdata)
877 {
878 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
879 	struct address_space *mapping = cc->inode->i_mapping;
880 	struct page *page;
881 	struct dnode_of_data dn;
882 	sector_t last_block_in_bio;
883 	unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
884 	pgoff_t start_idx = start_idx_of_cluster(cc);
885 	int i, ret;
886 	bool prealloc;
887 
888 retry:
889 	ret = f2fs_cluster_blocks(cc, false);
890 	if (ret <= 0)
891 		return ret;
892 
893 	/* compressed case */
894 	prealloc = (ret < cc->cluster_size);
895 
896 	ret = f2fs_init_compress_ctx(cc);
897 	if (ret)
898 		return ret;
899 
900 	/* keep page reference to avoid page reclaim */
901 	for (i = 0; i < cc->cluster_size; i++) {
902 		page = f2fs_pagecache_get_page(mapping, start_idx + i,
903 							fgp_flag, GFP_NOFS);
904 		if (!page) {
905 			ret = -ENOMEM;
906 			goto unlock_pages;
907 		}
908 
909 		if (PageUptodate(page))
910 			unlock_page(page);
911 		else
912 			f2fs_compress_ctx_add_page(cc, page);
913 	}
914 
915 	if (!f2fs_cluster_is_empty(cc)) {
916 		struct bio *bio = NULL;
917 
918 		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
919 					&last_block_in_bio, false, true);
920 		f2fs_destroy_compress_ctx(cc);
921 		if (ret)
922 			goto release_pages;
923 		if (bio)
924 			f2fs_submit_bio(sbi, bio, DATA);
925 
926 		ret = f2fs_init_compress_ctx(cc);
927 		if (ret)
928 			goto release_pages;
929 	}
930 
931 	for (i = 0; i < cc->cluster_size; i++) {
932 		f2fs_bug_on(sbi, cc->rpages[i]);
933 
934 		page = find_lock_page(mapping, start_idx + i);
935 		f2fs_bug_on(sbi, !page);
936 
937 		f2fs_wait_on_page_writeback(page, DATA, true, true);
938 
939 		f2fs_compress_ctx_add_page(cc, page);
940 		f2fs_put_page(page, 0);
941 
942 		if (!PageUptodate(page)) {
943 			f2fs_unlock_rpages(cc, i + 1);
944 			f2fs_put_rpages_mapping(mapping, start_idx,
945 					cc->cluster_size);
946 			f2fs_destroy_compress_ctx(cc);
947 			goto retry;
948 		}
949 	}
950 
951 	if (prealloc) {
952 		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
953 
954 		set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
955 
956 		for (i = cc->cluster_size - 1; i > 0; i--) {
957 			ret = f2fs_get_block(&dn, start_idx + i);
958 			if (ret) {
959 				i = cc->cluster_size;
960 				break;
961 			}
962 
963 			if (dn.data_blkaddr != NEW_ADDR)
964 				break;
965 		}
966 
967 		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
968 	}
969 
970 	if (likely(!ret)) {
971 		*fsdata = cc->rpages;
972 		*pagep = cc->rpages[offset_in_cluster(cc, index)];
973 		return cc->cluster_size;
974 	}
975 
976 unlock_pages:
977 	f2fs_unlock_rpages(cc, i);
978 release_pages:
979 	f2fs_put_rpages_mapping(mapping, start_idx, i);
980 	f2fs_destroy_compress_ctx(cc);
981 	return ret;
982 }
983 
984 int f2fs_prepare_compress_overwrite(struct inode *inode,
985 		struct page **pagep, pgoff_t index, void **fsdata)
986 {
987 	struct compress_ctx cc = {
988 		.inode = inode,
989 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
990 		.cluster_size = F2FS_I(inode)->i_cluster_size,
991 		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
992 		.rpages = NULL,
993 		.nr_rpages = 0,
994 	};
995 
996 	return prepare_compress_overwrite(&cc, pagep, index, fsdata);
997 }
998 
999 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1000 					pgoff_t index, unsigned copied)
1001 
1002 {
1003 	struct compress_ctx cc = {
1004 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1005 		.cluster_size = F2FS_I(inode)->i_cluster_size,
1006 		.rpages = fsdata,
1007 	};
1008 	bool first_index = (index == cc.rpages[0]->index);
1009 
1010 	if (copied)
1011 		set_cluster_dirty(&cc);
1012 
1013 	f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1014 	f2fs_destroy_compress_ctx(&cc);
1015 
1016 	return first_index;
1017 }
1018 
1019 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1020 {
1021 	void *fsdata = NULL;
1022 	struct page *pagep;
1023 	int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1024 	pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1025 							log_cluster_size;
1026 	int err;
1027 
1028 	err = f2fs_is_compressed_cluster(inode, start_idx);
1029 	if (err < 0)
1030 		return err;
1031 
1032 	/* truncate normal cluster */
1033 	if (!err)
1034 		return f2fs_do_truncate_blocks(inode, from, lock);
1035 
1036 	/* truncate compressed cluster */
1037 	err = f2fs_prepare_compress_overwrite(inode, &pagep,
1038 						start_idx, &fsdata);
1039 
1040 	/* should not be a normal cluster */
1041 	f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1042 
1043 	if (err <= 0)
1044 		return err;
1045 
1046 	if (err > 0) {
1047 		struct page **rpages = fsdata;
1048 		int cluster_size = F2FS_I(inode)->i_cluster_size;
1049 		int i;
1050 
1051 		for (i = cluster_size - 1; i >= 0; i--) {
1052 			loff_t start = rpages[i]->index << PAGE_SHIFT;
1053 
1054 			if (from <= start) {
1055 				zero_user_segment(rpages[i], 0, PAGE_SIZE);
1056 			} else {
1057 				zero_user_segment(rpages[i], from - start,
1058 								PAGE_SIZE);
1059 				break;
1060 			}
1061 		}
1062 
1063 		f2fs_compress_write_end(inode, fsdata, start_idx, true);
1064 	}
1065 	return 0;
1066 }
1067 
1068 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1069 					int *submitted,
1070 					struct writeback_control *wbc,
1071 					enum iostat_type io_type)
1072 {
1073 	struct inode *inode = cc->inode;
1074 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1075 	struct f2fs_inode_info *fi = F2FS_I(inode);
1076 	struct f2fs_io_info fio = {
1077 		.sbi = sbi,
1078 		.ino = cc->inode->i_ino,
1079 		.type = DATA,
1080 		.op = REQ_OP_WRITE,
1081 		.op_flags = wbc_to_write_flags(wbc),
1082 		.old_blkaddr = NEW_ADDR,
1083 		.page = NULL,
1084 		.encrypted_page = NULL,
1085 		.compressed_page = NULL,
1086 		.submitted = false,
1087 		.io_type = io_type,
1088 		.io_wbc = wbc,
1089 		.encrypted = f2fs_encrypted_file(cc->inode),
1090 	};
1091 	struct dnode_of_data dn;
1092 	struct node_info ni;
1093 	struct compress_io_ctx *cic;
1094 	pgoff_t start_idx = start_idx_of_cluster(cc);
1095 	unsigned int last_index = cc->cluster_size - 1;
1096 	loff_t psize;
1097 	int i, err;
1098 
1099 	if (!IS_NOQUOTA(inode) && !f2fs_trylock_op(sbi))
1100 		return -EAGAIN;
1101 
1102 	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1103 
1104 	err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1105 	if (err)
1106 		goto out_unlock_op;
1107 
1108 	for (i = 0; i < cc->cluster_size; i++) {
1109 		if (data_blkaddr(dn.inode, dn.node_page,
1110 					dn.ofs_in_node + i) == NULL_ADDR)
1111 			goto out_put_dnode;
1112 	}
1113 
1114 	psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1115 
1116 	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1117 	if (err)
1118 		goto out_put_dnode;
1119 
1120 	fio.version = ni.version;
1121 
1122 	cic = f2fs_kzalloc(sbi, sizeof(struct compress_io_ctx), GFP_NOFS);
1123 	if (!cic)
1124 		goto out_put_dnode;
1125 
1126 	cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1127 	cic->inode = inode;
1128 	refcount_set(&cic->ref, cc->nr_cpages);
1129 	cic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
1130 			cc->log_cluster_size, GFP_NOFS);
1131 	if (!cic->rpages)
1132 		goto out_put_cic;
1133 
1134 	cic->nr_rpages = cc->cluster_size;
1135 
1136 	for (i = 0; i < cc->nr_cpages; i++) {
1137 		f2fs_set_compressed_page(cc->cpages[i], inode,
1138 					cc->rpages[i + 1]->index, cic);
1139 		fio.compressed_page = cc->cpages[i];
1140 		if (fio.encrypted) {
1141 			fio.page = cc->rpages[i + 1];
1142 			err = f2fs_encrypt_one_page(&fio);
1143 			if (err)
1144 				goto out_destroy_crypt;
1145 			cc->cpages[i] = fio.encrypted_page;
1146 		}
1147 	}
1148 
1149 	set_cluster_writeback(cc);
1150 
1151 	for (i = 0; i < cc->cluster_size; i++)
1152 		cic->rpages[i] = cc->rpages[i];
1153 
1154 	for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1155 		block_t blkaddr;
1156 
1157 		blkaddr = f2fs_data_blkaddr(&dn);
1158 		fio.page = cc->rpages[i];
1159 		fio.old_blkaddr = blkaddr;
1160 
1161 		/* cluster header */
1162 		if (i == 0) {
1163 			if (blkaddr == COMPRESS_ADDR)
1164 				fio.compr_blocks++;
1165 			if (__is_valid_data_blkaddr(blkaddr))
1166 				f2fs_invalidate_blocks(sbi, blkaddr);
1167 			f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1168 			goto unlock_continue;
1169 		}
1170 
1171 		if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1172 			fio.compr_blocks++;
1173 
1174 		if (i > cc->nr_cpages) {
1175 			if (__is_valid_data_blkaddr(blkaddr)) {
1176 				f2fs_invalidate_blocks(sbi, blkaddr);
1177 				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1178 			}
1179 			goto unlock_continue;
1180 		}
1181 
1182 		f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1183 
1184 		if (fio.encrypted)
1185 			fio.encrypted_page = cc->cpages[i - 1];
1186 		else
1187 			fio.compressed_page = cc->cpages[i - 1];
1188 
1189 		cc->cpages[i - 1] = NULL;
1190 		f2fs_outplace_write_data(&dn, &fio);
1191 		(*submitted)++;
1192 unlock_continue:
1193 		inode_dec_dirty_pages(cc->inode);
1194 		unlock_page(fio.page);
1195 	}
1196 
1197 	if (fio.compr_blocks)
1198 		f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1199 	f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
1200 
1201 	set_inode_flag(cc->inode, FI_APPEND_WRITE);
1202 	if (cc->cluster_idx == 0)
1203 		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1204 
1205 	f2fs_put_dnode(&dn);
1206 	if (!IS_NOQUOTA(inode))
1207 		f2fs_unlock_op(sbi);
1208 
1209 	spin_lock(&fi->i_size_lock);
1210 	if (fi->last_disk_size < psize)
1211 		fi->last_disk_size = psize;
1212 	spin_unlock(&fi->i_size_lock);
1213 
1214 	f2fs_put_rpages(cc);
1215 	f2fs_destroy_compress_ctx(cc);
1216 	return 0;
1217 
1218 out_destroy_crypt:
1219 	kfree(cic->rpages);
1220 
1221 	for (--i; i >= 0; i--)
1222 		fscrypt_finalize_bounce_page(&cc->cpages[i]);
1223 	for (i = 0; i < cc->nr_cpages; i++) {
1224 		if (!cc->cpages[i])
1225 			continue;
1226 		f2fs_put_page(cc->cpages[i], 1);
1227 	}
1228 out_put_cic:
1229 	kfree(cic);
1230 out_put_dnode:
1231 	f2fs_put_dnode(&dn);
1232 out_unlock_op:
1233 	if (!IS_NOQUOTA(inode))
1234 		f2fs_unlock_op(sbi);
1235 	return -EAGAIN;
1236 }
1237 
1238 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1239 {
1240 	struct f2fs_sb_info *sbi = bio->bi_private;
1241 	struct compress_io_ctx *cic =
1242 			(struct compress_io_ctx *)page_private(page);
1243 	int i;
1244 
1245 	if (unlikely(bio->bi_status))
1246 		mapping_set_error(cic->inode->i_mapping, -EIO);
1247 
1248 	f2fs_compress_free_page(page);
1249 
1250 	dec_page_count(sbi, F2FS_WB_DATA);
1251 
1252 	if (refcount_dec_not_one(&cic->ref))
1253 		return;
1254 
1255 	for (i = 0; i < cic->nr_rpages; i++) {
1256 		WARN_ON(!cic->rpages[i]);
1257 		clear_cold_data(cic->rpages[i]);
1258 		end_page_writeback(cic->rpages[i]);
1259 	}
1260 
1261 	kfree(cic->rpages);
1262 	kfree(cic);
1263 }
1264 
1265 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1266 					int *submitted,
1267 					struct writeback_control *wbc,
1268 					enum iostat_type io_type)
1269 {
1270 	struct address_space *mapping = cc->inode->i_mapping;
1271 	int _submitted, compr_blocks, ret;
1272 	int i = -1, err = 0;
1273 
1274 	compr_blocks = f2fs_compressed_blocks(cc);
1275 	if (compr_blocks < 0) {
1276 		err = compr_blocks;
1277 		goto out_err;
1278 	}
1279 
1280 	for (i = 0; i < cc->cluster_size; i++) {
1281 		if (!cc->rpages[i])
1282 			continue;
1283 retry_write:
1284 		if (cc->rpages[i]->mapping != mapping) {
1285 			unlock_page(cc->rpages[i]);
1286 			continue;
1287 		}
1288 
1289 		BUG_ON(!PageLocked(cc->rpages[i]));
1290 
1291 		ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1292 						NULL, NULL, wbc, io_type,
1293 						compr_blocks);
1294 		if (ret) {
1295 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
1296 				unlock_page(cc->rpages[i]);
1297 				ret = 0;
1298 			} else if (ret == -EAGAIN) {
1299 				/*
1300 				 * for quota file, just redirty left pages to
1301 				 * avoid deadlock caused by cluster update race
1302 				 * from foreground operation.
1303 				 */
1304 				if (IS_NOQUOTA(cc->inode)) {
1305 					err = 0;
1306 					goto out_err;
1307 				}
1308 				ret = 0;
1309 				cond_resched();
1310 				congestion_wait(BLK_RW_ASYNC,
1311 						DEFAULT_IO_TIMEOUT);
1312 				lock_page(cc->rpages[i]);
1313 				clear_page_dirty_for_io(cc->rpages[i]);
1314 				goto retry_write;
1315 			}
1316 			err = ret;
1317 			goto out_err;
1318 		}
1319 
1320 		*submitted += _submitted;
1321 	}
1322 	return 0;
1323 out_err:
1324 	for (++i; i < cc->cluster_size; i++) {
1325 		if (!cc->rpages[i])
1326 			continue;
1327 		redirty_page_for_writepage(wbc, cc->rpages[i]);
1328 		unlock_page(cc->rpages[i]);
1329 	}
1330 	return err;
1331 }
1332 
1333 int f2fs_write_multi_pages(struct compress_ctx *cc,
1334 					int *submitted,
1335 					struct writeback_control *wbc,
1336 					enum iostat_type io_type)
1337 {
1338 	struct f2fs_inode_info *fi = F2FS_I(cc->inode);
1339 	const struct f2fs_compress_ops *cops =
1340 			f2fs_cops[fi->i_compress_algorithm];
1341 	int err;
1342 
1343 	*submitted = 0;
1344 	if (cluster_may_compress(cc)) {
1345 		err = f2fs_compress_pages(cc);
1346 		if (err == -EAGAIN) {
1347 			goto write;
1348 		} else if (err) {
1349 			f2fs_put_rpages_wbc(cc, wbc, true, 1);
1350 			goto destroy_out;
1351 		}
1352 
1353 		err = f2fs_write_compressed_pages(cc, submitted,
1354 							wbc, io_type);
1355 		cops->destroy_compress_ctx(cc);
1356 		if (!err)
1357 			return 0;
1358 		f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1359 	}
1360 write:
1361 	f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1362 
1363 	err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1364 	f2fs_put_rpages_wbc(cc, wbc, false, 0);
1365 destroy_out:
1366 	f2fs_destroy_compress_ctx(cc);
1367 	return err;
1368 }
1369 
1370 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1371 {
1372 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1373 	struct decompress_io_ctx *dic;
1374 	pgoff_t start_idx = start_idx_of_cluster(cc);
1375 	int i;
1376 
1377 	dic = f2fs_kzalloc(sbi, sizeof(struct decompress_io_ctx), GFP_NOFS);
1378 	if (!dic)
1379 		return ERR_PTR(-ENOMEM);
1380 
1381 	dic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
1382 			cc->log_cluster_size, GFP_NOFS);
1383 	if (!dic->rpages) {
1384 		kfree(dic);
1385 		return ERR_PTR(-ENOMEM);
1386 	}
1387 
1388 	dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1389 	dic->inode = cc->inode;
1390 	refcount_set(&dic->ref, cc->nr_cpages);
1391 	dic->cluster_idx = cc->cluster_idx;
1392 	dic->cluster_size = cc->cluster_size;
1393 	dic->log_cluster_size = cc->log_cluster_size;
1394 	dic->nr_cpages = cc->nr_cpages;
1395 	dic->failed = false;
1396 
1397 	for (i = 0; i < dic->cluster_size; i++)
1398 		dic->rpages[i] = cc->rpages[i];
1399 	dic->nr_rpages = cc->cluster_size;
1400 
1401 	dic->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
1402 					dic->nr_cpages, GFP_NOFS);
1403 	if (!dic->cpages)
1404 		goto out_free;
1405 
1406 	for (i = 0; i < dic->nr_cpages; i++) {
1407 		struct page *page;
1408 
1409 		page = f2fs_compress_alloc_page();
1410 		if (!page)
1411 			goto out_free;
1412 
1413 		f2fs_set_compressed_page(page, cc->inode,
1414 					start_idx + i + 1, dic);
1415 		dic->cpages[i] = page;
1416 	}
1417 
1418 	dic->tpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
1419 					dic->cluster_size, GFP_NOFS);
1420 	if (!dic->tpages)
1421 		goto out_free;
1422 
1423 	for (i = 0; i < dic->cluster_size; i++) {
1424 		if (cc->rpages[i]) {
1425 			dic->tpages[i] = cc->rpages[i];
1426 			continue;
1427 		}
1428 
1429 		dic->tpages[i] = f2fs_compress_alloc_page();
1430 		if (!dic->tpages[i])
1431 			goto out_free;
1432 	}
1433 
1434 	return dic;
1435 
1436 out_free:
1437 	f2fs_free_dic(dic);
1438 	return ERR_PTR(-ENOMEM);
1439 }
1440 
1441 void f2fs_free_dic(struct decompress_io_ctx *dic)
1442 {
1443 	int i;
1444 
1445 	if (dic->tpages) {
1446 		for (i = 0; i < dic->cluster_size; i++) {
1447 			if (dic->rpages[i])
1448 				continue;
1449 			if (!dic->tpages[i])
1450 				continue;
1451 			f2fs_compress_free_page(dic->tpages[i]);
1452 		}
1453 		kfree(dic->tpages);
1454 	}
1455 
1456 	if (dic->cpages) {
1457 		for (i = 0; i < dic->nr_cpages; i++) {
1458 			if (!dic->cpages[i])
1459 				continue;
1460 			f2fs_compress_free_page(dic->cpages[i]);
1461 		}
1462 		kfree(dic->cpages);
1463 	}
1464 
1465 	kfree(dic->rpages);
1466 	kfree(dic);
1467 }
1468 
1469 void f2fs_decompress_end_io(struct page **rpages,
1470 			unsigned int cluster_size, bool err, bool verity)
1471 {
1472 	int i;
1473 
1474 	for (i = 0; i < cluster_size; i++) {
1475 		struct page *rpage = rpages[i];
1476 
1477 		if (!rpage)
1478 			continue;
1479 
1480 		if (err || PageError(rpage))
1481 			goto clear_uptodate;
1482 
1483 		if (!verity || fsverity_verify_page(rpage)) {
1484 			SetPageUptodate(rpage);
1485 			goto unlock;
1486 		}
1487 clear_uptodate:
1488 		ClearPageUptodate(rpage);
1489 		ClearPageError(rpage);
1490 unlock:
1491 		unlock_page(rpage);
1492 	}
1493 }
1494