xref: /openbmc/linux/fs/f2fs/compress.c (revision 015d239a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs compress support
4  *
5  * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/writeback.h>
11 #include <linux/backing-dev.h>
12 #include <linux/lzo.h>
13 #include <linux/lz4.h>
14 
15 #include "f2fs.h"
16 #include "node.h"
17 #include <trace/events/f2fs.h>
18 
19 struct f2fs_compress_ops {
20 	int (*init_compress_ctx)(struct compress_ctx *cc);
21 	void (*destroy_compress_ctx)(struct compress_ctx *cc);
22 	int (*compress_pages)(struct compress_ctx *cc);
23 	int (*decompress_pages)(struct decompress_io_ctx *dic);
24 };
25 
26 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
27 {
28 	return index & (cc->cluster_size - 1);
29 }
30 
31 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
32 {
33 	return index >> cc->log_cluster_size;
34 }
35 
36 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
37 {
38 	return cc->cluster_idx << cc->log_cluster_size;
39 }
40 
41 bool f2fs_is_compressed_page(struct page *page)
42 {
43 	if (!PagePrivate(page))
44 		return false;
45 	if (!page_private(page))
46 		return false;
47 	if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
48 		return false;
49 	f2fs_bug_on(F2FS_M_SB(page->mapping),
50 		*((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
51 	return true;
52 }
53 
54 static void f2fs_set_compressed_page(struct page *page,
55 		struct inode *inode, pgoff_t index, void *data, refcount_t *r)
56 {
57 	SetPagePrivate(page);
58 	set_page_private(page, (unsigned long)data);
59 
60 	/* i_crypto_info and iv index */
61 	page->index = index;
62 	page->mapping = inode->i_mapping;
63 	if (r)
64 		refcount_inc(r);
65 }
66 
67 static void f2fs_put_compressed_page(struct page *page)
68 {
69 	set_page_private(page, (unsigned long)NULL);
70 	ClearPagePrivate(page);
71 	page->mapping = NULL;
72 	unlock_page(page);
73 	put_page(page);
74 }
75 
76 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
77 {
78 	int i;
79 
80 	for (i = 0; i < len; i++) {
81 		if (!cc->rpages[i])
82 			continue;
83 		if (unlock)
84 			unlock_page(cc->rpages[i]);
85 		else
86 			put_page(cc->rpages[i]);
87 	}
88 }
89 
90 static void f2fs_put_rpages(struct compress_ctx *cc)
91 {
92 	f2fs_drop_rpages(cc, cc->cluster_size, false);
93 }
94 
95 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
96 {
97 	f2fs_drop_rpages(cc, len, true);
98 }
99 
100 static void f2fs_put_rpages_mapping(struct compress_ctx *cc,
101 				struct address_space *mapping,
102 				pgoff_t start, int len)
103 {
104 	int i;
105 
106 	for (i = 0; i < len; i++) {
107 		struct page *page = find_get_page(mapping, start + i);
108 
109 		put_page(page);
110 		put_page(page);
111 	}
112 }
113 
114 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
115 		struct writeback_control *wbc, bool redirty, int unlock)
116 {
117 	unsigned int i;
118 
119 	for (i = 0; i < cc->cluster_size; i++) {
120 		if (!cc->rpages[i])
121 			continue;
122 		if (redirty)
123 			redirty_page_for_writepage(wbc, cc->rpages[i]);
124 		f2fs_put_page(cc->rpages[i], unlock);
125 	}
126 }
127 
128 struct page *f2fs_compress_control_page(struct page *page)
129 {
130 	return ((struct compress_io_ctx *)page_private(page))->rpages[0];
131 }
132 
133 int f2fs_init_compress_ctx(struct compress_ctx *cc)
134 {
135 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
136 
137 	if (cc->nr_rpages)
138 		return 0;
139 
140 	cc->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
141 					cc->log_cluster_size, GFP_NOFS);
142 	return cc->rpages ? 0 : -ENOMEM;
143 }
144 
145 void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
146 {
147 	kfree(cc->rpages);
148 	cc->rpages = NULL;
149 	cc->nr_rpages = 0;
150 	cc->nr_cpages = 0;
151 	cc->cluster_idx = NULL_CLUSTER;
152 }
153 
154 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
155 {
156 	unsigned int cluster_ofs;
157 
158 	if (!f2fs_cluster_can_merge_page(cc, page->index))
159 		f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
160 
161 	cluster_ofs = offset_in_cluster(cc, page->index);
162 	cc->rpages[cluster_ofs] = page;
163 	cc->nr_rpages++;
164 	cc->cluster_idx = cluster_idx(cc, page->index);
165 }
166 
167 #ifdef CONFIG_F2FS_FS_LZO
168 static int lzo_init_compress_ctx(struct compress_ctx *cc)
169 {
170 	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
171 				LZO1X_MEM_COMPRESS, GFP_NOFS);
172 	if (!cc->private)
173 		return -ENOMEM;
174 
175 	cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
176 	return 0;
177 }
178 
179 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
180 {
181 	kvfree(cc->private);
182 	cc->private = NULL;
183 }
184 
185 static int lzo_compress_pages(struct compress_ctx *cc)
186 {
187 	int ret;
188 
189 	ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
190 					&cc->clen, cc->private);
191 	if (ret != LZO_E_OK) {
192 		printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
193 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
194 		return -EIO;
195 	}
196 	return 0;
197 }
198 
199 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
200 {
201 	int ret;
202 
203 	ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
204 						dic->rbuf, &dic->rlen);
205 	if (ret != LZO_E_OK) {
206 		printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
207 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
208 		return -EIO;
209 	}
210 
211 	if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
212 		printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
213 					"expected:%lu\n", KERN_ERR,
214 					F2FS_I_SB(dic->inode)->sb->s_id,
215 					dic->rlen,
216 					PAGE_SIZE << dic->log_cluster_size);
217 		return -EIO;
218 	}
219 	return 0;
220 }
221 
222 static const struct f2fs_compress_ops f2fs_lzo_ops = {
223 	.init_compress_ctx	= lzo_init_compress_ctx,
224 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
225 	.compress_pages		= lzo_compress_pages,
226 	.decompress_pages	= lzo_decompress_pages,
227 };
228 #endif
229 
230 #ifdef CONFIG_F2FS_FS_LZ4
231 static int lz4_init_compress_ctx(struct compress_ctx *cc)
232 {
233 	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
234 				LZ4_MEM_COMPRESS, GFP_NOFS);
235 	if (!cc->private)
236 		return -ENOMEM;
237 
238 	cc->clen = LZ4_compressBound(PAGE_SIZE << cc->log_cluster_size);
239 	return 0;
240 }
241 
242 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
243 {
244 	kvfree(cc->private);
245 	cc->private = NULL;
246 }
247 
248 static int lz4_compress_pages(struct compress_ctx *cc)
249 {
250 	int len;
251 
252 	len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
253 						cc->clen, cc->private);
254 	if (!len) {
255 		printk_ratelimited("%sF2FS-fs (%s): lz4 compress failed\n",
256 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id);
257 		return -EIO;
258 	}
259 	cc->clen = len;
260 	return 0;
261 }
262 
263 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
264 {
265 	int ret;
266 
267 	ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
268 						dic->clen, dic->rlen);
269 	if (ret < 0) {
270 		printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
271 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
272 		return -EIO;
273 	}
274 
275 	if (ret != PAGE_SIZE << dic->log_cluster_size) {
276 		printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
277 					"expected:%lu\n", KERN_ERR,
278 					F2FS_I_SB(dic->inode)->sb->s_id,
279 					dic->rlen,
280 					PAGE_SIZE << dic->log_cluster_size);
281 		return -EIO;
282 	}
283 	return 0;
284 }
285 
286 static const struct f2fs_compress_ops f2fs_lz4_ops = {
287 	.init_compress_ctx	= lz4_init_compress_ctx,
288 	.destroy_compress_ctx	= lz4_destroy_compress_ctx,
289 	.compress_pages		= lz4_compress_pages,
290 	.decompress_pages	= lz4_decompress_pages,
291 };
292 #endif
293 
294 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
295 #ifdef CONFIG_F2FS_FS_LZO
296 	&f2fs_lzo_ops,
297 #else
298 	NULL,
299 #endif
300 #ifdef CONFIG_F2FS_FS_LZ4
301 	&f2fs_lz4_ops,
302 #else
303 	NULL,
304 #endif
305 };
306 
307 bool f2fs_is_compress_backend_ready(struct inode *inode)
308 {
309 	if (!f2fs_compressed_file(inode))
310 		return true;
311 	return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
312 }
313 
314 static struct page *f2fs_grab_page(void)
315 {
316 	struct page *page;
317 
318 	page = alloc_page(GFP_NOFS);
319 	if (!page)
320 		return NULL;
321 	lock_page(page);
322 	return page;
323 }
324 
325 static int f2fs_compress_pages(struct compress_ctx *cc)
326 {
327 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
328 	struct f2fs_inode_info *fi = F2FS_I(cc->inode);
329 	const struct f2fs_compress_ops *cops =
330 				f2fs_cops[fi->i_compress_algorithm];
331 	unsigned int max_len, nr_cpages;
332 	int i, ret;
333 
334 	trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
335 				cc->cluster_size, fi->i_compress_algorithm);
336 
337 	ret = cops->init_compress_ctx(cc);
338 	if (ret)
339 		goto out;
340 
341 	max_len = COMPRESS_HEADER_SIZE + cc->clen;
342 	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
343 
344 	cc->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
345 					cc->nr_cpages, GFP_NOFS);
346 	if (!cc->cpages) {
347 		ret = -ENOMEM;
348 		goto destroy_compress_ctx;
349 	}
350 
351 	for (i = 0; i < cc->nr_cpages; i++) {
352 		cc->cpages[i] = f2fs_grab_page();
353 		if (!cc->cpages[i]) {
354 			ret = -ENOMEM;
355 			goto out_free_cpages;
356 		}
357 	}
358 
359 	cc->rbuf = vmap(cc->rpages, cc->cluster_size, VM_MAP, PAGE_KERNEL_RO);
360 	if (!cc->rbuf) {
361 		ret = -ENOMEM;
362 		goto out_free_cpages;
363 	}
364 
365 	cc->cbuf = vmap(cc->cpages, cc->nr_cpages, VM_MAP, PAGE_KERNEL);
366 	if (!cc->cbuf) {
367 		ret = -ENOMEM;
368 		goto out_vunmap_rbuf;
369 	}
370 
371 	ret = cops->compress_pages(cc);
372 	if (ret)
373 		goto out_vunmap_cbuf;
374 
375 	max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
376 
377 	if (cc->clen > max_len) {
378 		ret = -EAGAIN;
379 		goto out_vunmap_cbuf;
380 	}
381 
382 	cc->cbuf->clen = cpu_to_le32(cc->clen);
383 	cc->cbuf->chksum = cpu_to_le32(0);
384 
385 	for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
386 		cc->cbuf->reserved[i] = cpu_to_le32(0);
387 
388 	vunmap(cc->cbuf);
389 	vunmap(cc->rbuf);
390 
391 	nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
392 
393 	for (i = nr_cpages; i < cc->nr_cpages; i++) {
394 		f2fs_put_compressed_page(cc->cpages[i]);
395 		cc->cpages[i] = NULL;
396 	}
397 
398 	cc->nr_cpages = nr_cpages;
399 
400 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
401 							cc->clen, ret);
402 	return 0;
403 
404 out_vunmap_cbuf:
405 	vunmap(cc->cbuf);
406 out_vunmap_rbuf:
407 	vunmap(cc->rbuf);
408 out_free_cpages:
409 	for (i = 0; i < cc->nr_cpages; i++) {
410 		if (cc->cpages[i])
411 			f2fs_put_compressed_page(cc->cpages[i]);
412 	}
413 	kfree(cc->cpages);
414 	cc->cpages = NULL;
415 destroy_compress_ctx:
416 	cops->destroy_compress_ctx(cc);
417 out:
418 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
419 							cc->clen, ret);
420 	return ret;
421 }
422 
423 void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
424 {
425 	struct decompress_io_ctx *dic =
426 			(struct decompress_io_ctx *)page_private(page);
427 	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
428 	struct f2fs_inode_info *fi= F2FS_I(dic->inode);
429 	const struct f2fs_compress_ops *cops =
430 			f2fs_cops[fi->i_compress_algorithm];
431 	int ret;
432 
433 	dec_page_count(sbi, F2FS_RD_DATA);
434 
435 	if (bio->bi_status || PageError(page))
436 		dic->failed = true;
437 
438 	if (refcount_dec_not_one(&dic->ref))
439 		return;
440 
441 	trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
442 				dic->cluster_size, fi->i_compress_algorithm);
443 
444 	/* submit partial compressed pages */
445 	if (dic->failed) {
446 		ret = -EIO;
447 		goto out_free_dic;
448 	}
449 
450 	dic->rbuf = vmap(dic->tpages, dic->cluster_size, VM_MAP, PAGE_KERNEL);
451 	if (!dic->rbuf) {
452 		ret = -ENOMEM;
453 		goto out_free_dic;
454 	}
455 
456 	dic->cbuf = vmap(dic->cpages, dic->nr_cpages, VM_MAP, PAGE_KERNEL_RO);
457 	if (!dic->cbuf) {
458 		ret = -ENOMEM;
459 		goto out_vunmap_rbuf;
460 	}
461 
462 	dic->clen = le32_to_cpu(dic->cbuf->clen);
463 	dic->rlen = PAGE_SIZE << dic->log_cluster_size;
464 
465 	if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
466 		ret = -EFSCORRUPTED;
467 		goto out_vunmap_cbuf;
468 	}
469 
470 	ret = cops->decompress_pages(dic);
471 
472 out_vunmap_cbuf:
473 	vunmap(dic->cbuf);
474 out_vunmap_rbuf:
475 	vunmap(dic->rbuf);
476 out_free_dic:
477 	if (!verity)
478 		f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
479 								ret, false);
480 
481 	trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
482 							dic->clen, ret);
483 	if (!verity)
484 		f2fs_free_dic(dic);
485 }
486 
487 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
488 {
489 	if (cc->cluster_idx == NULL_CLUSTER)
490 		return true;
491 	return cc->cluster_idx == cluster_idx(cc, index);
492 }
493 
494 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
495 {
496 	return cc->nr_rpages == 0;
497 }
498 
499 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
500 {
501 	return cc->cluster_size == cc->nr_rpages;
502 }
503 
504 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
505 {
506 	if (f2fs_cluster_is_empty(cc))
507 		return true;
508 	return is_page_in_cluster(cc, index);
509 }
510 
511 static bool __cluster_may_compress(struct compress_ctx *cc)
512 {
513 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
514 	loff_t i_size = i_size_read(cc->inode);
515 	unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
516 	int i;
517 
518 	for (i = 0; i < cc->cluster_size; i++) {
519 		struct page *page = cc->rpages[i];
520 
521 		f2fs_bug_on(sbi, !page);
522 
523 		if (unlikely(f2fs_cp_error(sbi)))
524 			return false;
525 		if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
526 			return false;
527 
528 		/* beyond EOF */
529 		if (page->index >= nr_pages)
530 			return false;
531 	}
532 	return true;
533 }
534 
535 /* return # of compressed block addresses */
536 static int f2fs_compressed_blocks(struct compress_ctx *cc)
537 {
538 	struct dnode_of_data dn;
539 	int ret;
540 
541 	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
542 	ret = f2fs_get_dnode_of_data(&dn, start_idx_of_cluster(cc),
543 							LOOKUP_NODE);
544 	if (ret) {
545 		if (ret == -ENOENT)
546 			ret = 0;
547 		goto fail;
548 	}
549 
550 	if (dn.data_blkaddr == COMPRESS_ADDR) {
551 		int i;
552 
553 		ret = 1;
554 		for (i = 1; i < cc->cluster_size; i++) {
555 			block_t blkaddr;
556 
557 			blkaddr = datablock_addr(dn.inode,
558 					dn.node_page, dn.ofs_in_node + i);
559 			if (blkaddr != NULL_ADDR)
560 				ret++;
561 		}
562 	}
563 fail:
564 	f2fs_put_dnode(&dn);
565 	return ret;
566 }
567 
568 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
569 {
570 	struct compress_ctx cc = {
571 		.inode = inode,
572 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
573 		.cluster_size = F2FS_I(inode)->i_cluster_size,
574 		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
575 	};
576 
577 	return f2fs_compressed_blocks(&cc);
578 }
579 
580 static bool cluster_may_compress(struct compress_ctx *cc)
581 {
582 	if (!f2fs_compressed_file(cc->inode))
583 		return false;
584 	if (f2fs_is_atomic_file(cc->inode))
585 		return false;
586 	if (f2fs_is_mmap_file(cc->inode))
587 		return false;
588 	if (!f2fs_cluster_is_full(cc))
589 		return false;
590 	return __cluster_may_compress(cc);
591 }
592 
593 static void set_cluster_writeback(struct compress_ctx *cc)
594 {
595 	int i;
596 
597 	for (i = 0; i < cc->cluster_size; i++) {
598 		if (cc->rpages[i])
599 			set_page_writeback(cc->rpages[i]);
600 	}
601 }
602 
603 static void set_cluster_dirty(struct compress_ctx *cc)
604 {
605 	int i;
606 
607 	for (i = 0; i < cc->cluster_size; i++)
608 		if (cc->rpages[i])
609 			set_page_dirty(cc->rpages[i]);
610 }
611 
612 static int prepare_compress_overwrite(struct compress_ctx *cc,
613 		struct page **pagep, pgoff_t index, void **fsdata)
614 {
615 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
616 	struct address_space *mapping = cc->inode->i_mapping;
617 	struct page *page;
618 	struct dnode_of_data dn;
619 	sector_t last_block_in_bio;
620 	unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
621 	pgoff_t start_idx = start_idx_of_cluster(cc);
622 	int i, ret;
623 	bool prealloc;
624 
625 retry:
626 	ret = f2fs_compressed_blocks(cc);
627 	if (ret <= 0)
628 		return ret;
629 
630 	/* compressed case */
631 	prealloc = (ret < cc->cluster_size);
632 
633 	ret = f2fs_init_compress_ctx(cc);
634 	if (ret)
635 		return ret;
636 
637 	/* keep page reference to avoid page reclaim */
638 	for (i = 0; i < cc->cluster_size; i++) {
639 		page = f2fs_pagecache_get_page(mapping, start_idx + i,
640 							fgp_flag, GFP_NOFS);
641 		if (!page) {
642 			ret = -ENOMEM;
643 			goto unlock_pages;
644 		}
645 
646 		if (PageUptodate(page))
647 			unlock_page(page);
648 		else
649 			f2fs_compress_ctx_add_page(cc, page);
650 	}
651 
652 	if (!f2fs_cluster_is_empty(cc)) {
653 		struct bio *bio = NULL;
654 
655 		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
656 						&last_block_in_bio, false);
657 		f2fs_destroy_compress_ctx(cc);
658 		if (ret)
659 			goto release_pages;
660 		if (bio)
661 			f2fs_submit_bio(sbi, bio, DATA);
662 
663 		ret = f2fs_init_compress_ctx(cc);
664 		if (ret)
665 			goto release_pages;
666 	}
667 
668 	for (i = 0; i < cc->cluster_size; i++) {
669 		f2fs_bug_on(sbi, cc->rpages[i]);
670 
671 		page = find_lock_page(mapping, start_idx + i);
672 		f2fs_bug_on(sbi, !page);
673 
674 		f2fs_wait_on_page_writeback(page, DATA, true, true);
675 
676 		f2fs_compress_ctx_add_page(cc, page);
677 		f2fs_put_page(page, 0);
678 
679 		if (!PageUptodate(page)) {
680 			f2fs_unlock_rpages(cc, i + 1);
681 			f2fs_put_rpages_mapping(cc, mapping, start_idx,
682 					cc->cluster_size);
683 			f2fs_destroy_compress_ctx(cc);
684 			goto retry;
685 		}
686 	}
687 
688 	if (prealloc) {
689 		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
690 
691 		set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
692 
693 		for (i = cc->cluster_size - 1; i > 0; i--) {
694 			ret = f2fs_get_block(&dn, start_idx + i);
695 			if (ret) {
696 				i = cc->cluster_size;
697 				break;
698 			}
699 
700 			if (dn.data_blkaddr != NEW_ADDR)
701 				break;
702 		}
703 
704 		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
705 	}
706 
707 	if (likely(!ret)) {
708 		*fsdata = cc->rpages;
709 		*pagep = cc->rpages[offset_in_cluster(cc, index)];
710 		return cc->cluster_size;
711 	}
712 
713 unlock_pages:
714 	f2fs_unlock_rpages(cc, i);
715 release_pages:
716 	f2fs_put_rpages_mapping(cc, mapping, start_idx, i);
717 	f2fs_destroy_compress_ctx(cc);
718 	return ret;
719 }
720 
721 int f2fs_prepare_compress_overwrite(struct inode *inode,
722 		struct page **pagep, pgoff_t index, void **fsdata)
723 {
724 	struct compress_ctx cc = {
725 		.inode = inode,
726 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
727 		.cluster_size = F2FS_I(inode)->i_cluster_size,
728 		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
729 		.rpages = NULL,
730 		.nr_rpages = 0,
731 	};
732 
733 	return prepare_compress_overwrite(&cc, pagep, index, fsdata);
734 }
735 
736 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
737 					pgoff_t index, unsigned copied)
738 
739 {
740 	struct compress_ctx cc = {
741 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
742 		.cluster_size = F2FS_I(inode)->i_cluster_size,
743 		.rpages = fsdata,
744 	};
745 	bool first_index = (index == cc.rpages[0]->index);
746 
747 	if (copied)
748 		set_cluster_dirty(&cc);
749 
750 	f2fs_put_rpages_wbc(&cc, NULL, false, 1);
751 	f2fs_destroy_compress_ctx(&cc);
752 
753 	return first_index;
754 }
755 
756 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
757 					int *submitted,
758 					struct writeback_control *wbc,
759 					enum iostat_type io_type)
760 {
761 	struct inode *inode = cc->inode;
762 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
763 	struct f2fs_inode_info *fi = F2FS_I(inode);
764 	struct f2fs_io_info fio = {
765 		.sbi = sbi,
766 		.ino = cc->inode->i_ino,
767 		.type = DATA,
768 		.op = REQ_OP_WRITE,
769 		.op_flags = wbc_to_write_flags(wbc),
770 		.old_blkaddr = NEW_ADDR,
771 		.page = NULL,
772 		.encrypted_page = NULL,
773 		.compressed_page = NULL,
774 		.submitted = false,
775 		.need_lock = LOCK_RETRY,
776 		.io_type = io_type,
777 		.io_wbc = wbc,
778 		.encrypted = f2fs_encrypted_file(cc->inode),
779 	};
780 	struct dnode_of_data dn;
781 	struct node_info ni;
782 	struct compress_io_ctx *cic;
783 	pgoff_t start_idx = start_idx_of_cluster(cc);
784 	unsigned int last_index = cc->cluster_size - 1;
785 	loff_t psize;
786 	int i, err;
787 
788 	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
789 
790 	f2fs_lock_op(sbi);
791 
792 	err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
793 	if (err)
794 		goto out_unlock_op;
795 
796 	for (i = 0; i < cc->cluster_size; i++) {
797 		if (datablock_addr(dn.inode, dn.node_page,
798 					dn.ofs_in_node + i) == NULL_ADDR)
799 			goto out_put_dnode;
800 	}
801 
802 	psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
803 
804 	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
805 	if (err)
806 		goto out_put_dnode;
807 
808 	fio.version = ni.version;
809 
810 	cic = f2fs_kzalloc(sbi, sizeof(struct compress_io_ctx), GFP_NOFS);
811 	if (!cic)
812 		goto out_put_dnode;
813 
814 	cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
815 	cic->inode = inode;
816 	refcount_set(&cic->ref, 1);
817 	cic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
818 			cc->log_cluster_size, GFP_NOFS);
819 	if (!cic->rpages)
820 		goto out_put_cic;
821 
822 	cic->nr_rpages = cc->cluster_size;
823 
824 	for (i = 0; i < cc->nr_cpages; i++) {
825 		f2fs_set_compressed_page(cc->cpages[i], inode,
826 					cc->rpages[i + 1]->index,
827 					cic, i ? &cic->ref : NULL);
828 		fio.compressed_page = cc->cpages[i];
829 		if (fio.encrypted) {
830 			fio.page = cc->rpages[i + 1];
831 			err = f2fs_encrypt_one_page(&fio);
832 			if (err)
833 				goto out_destroy_crypt;
834 			cc->cpages[i] = fio.encrypted_page;
835 		}
836 	}
837 
838 	set_cluster_writeback(cc);
839 
840 	for (i = 0; i < cc->cluster_size; i++)
841 		cic->rpages[i] = cc->rpages[i];
842 
843 	for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
844 		block_t blkaddr;
845 
846 		blkaddr = datablock_addr(dn.inode, dn.node_page,
847 							dn.ofs_in_node);
848 		fio.page = cic->rpages[i];
849 		fio.old_blkaddr = blkaddr;
850 
851 		/* cluster header */
852 		if (i == 0) {
853 			if (blkaddr == COMPRESS_ADDR)
854 				fio.compr_blocks++;
855 			if (__is_valid_data_blkaddr(blkaddr))
856 				f2fs_invalidate_blocks(sbi, blkaddr);
857 			f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
858 			goto unlock_continue;
859 		}
860 
861 		if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
862 			fio.compr_blocks++;
863 
864 		if (i > cc->nr_cpages) {
865 			if (__is_valid_data_blkaddr(blkaddr)) {
866 				f2fs_invalidate_blocks(sbi, blkaddr);
867 				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
868 			}
869 			goto unlock_continue;
870 		}
871 
872 		f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
873 
874 		if (fio.encrypted)
875 			fio.encrypted_page = cc->cpages[i - 1];
876 		else
877 			fio.compressed_page = cc->cpages[i - 1];
878 
879 		cc->cpages[i - 1] = NULL;
880 		f2fs_outplace_write_data(&dn, &fio);
881 		(*submitted)++;
882 unlock_continue:
883 		inode_dec_dirty_pages(cc->inode);
884 		unlock_page(fio.page);
885 	}
886 
887 	if (fio.compr_blocks)
888 		f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
889 	f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
890 
891 	set_inode_flag(cc->inode, FI_APPEND_WRITE);
892 	if (cc->cluster_idx == 0)
893 		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
894 
895 	f2fs_put_dnode(&dn);
896 	f2fs_unlock_op(sbi);
897 
898 	down_write(&fi->i_sem);
899 	if (fi->last_disk_size < psize)
900 		fi->last_disk_size = psize;
901 	up_write(&fi->i_sem);
902 
903 	f2fs_put_rpages(cc);
904 	f2fs_destroy_compress_ctx(cc);
905 	return 0;
906 
907 out_destroy_crypt:
908 	kfree(cic->rpages);
909 
910 	for (--i; i >= 0; i--)
911 		fscrypt_finalize_bounce_page(&cc->cpages[i]);
912 	for (i = 0; i < cc->nr_cpages; i++) {
913 		if (!cc->cpages[i])
914 			continue;
915 		f2fs_put_page(cc->cpages[i], 1);
916 	}
917 out_put_cic:
918 	kfree(cic);
919 out_put_dnode:
920 	f2fs_put_dnode(&dn);
921 out_unlock_op:
922 	f2fs_unlock_op(sbi);
923 	return -EAGAIN;
924 }
925 
926 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
927 {
928 	struct f2fs_sb_info *sbi = bio->bi_private;
929 	struct compress_io_ctx *cic =
930 			(struct compress_io_ctx *)page_private(page);
931 	int i;
932 
933 	if (unlikely(bio->bi_status))
934 		mapping_set_error(cic->inode->i_mapping, -EIO);
935 
936 	f2fs_put_compressed_page(page);
937 
938 	dec_page_count(sbi, F2FS_WB_DATA);
939 
940 	if (refcount_dec_not_one(&cic->ref))
941 		return;
942 
943 	for (i = 0; i < cic->nr_rpages; i++) {
944 		WARN_ON(!cic->rpages[i]);
945 		clear_cold_data(cic->rpages[i]);
946 		end_page_writeback(cic->rpages[i]);
947 	}
948 
949 	kfree(cic->rpages);
950 	kfree(cic);
951 }
952 
953 static int f2fs_write_raw_pages(struct compress_ctx *cc,
954 					int *submitted,
955 					struct writeback_control *wbc,
956 					enum iostat_type io_type)
957 {
958 	struct address_space *mapping = cc->inode->i_mapping;
959 	int _submitted, compr_blocks, ret;
960 	int i = -1, err = 0;
961 
962 	compr_blocks = f2fs_compressed_blocks(cc);
963 	if (compr_blocks < 0) {
964 		err = compr_blocks;
965 		goto out_err;
966 	}
967 
968 	for (i = 0; i < cc->cluster_size; i++) {
969 		if (!cc->rpages[i])
970 			continue;
971 retry_write:
972 		if (cc->rpages[i]->mapping != mapping) {
973 			unlock_page(cc->rpages[i]);
974 			continue;
975 		}
976 
977 		BUG_ON(!PageLocked(cc->rpages[i]));
978 
979 		ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
980 						NULL, NULL, wbc, io_type,
981 						compr_blocks);
982 		if (ret) {
983 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
984 				unlock_page(cc->rpages[i]);
985 				ret = 0;
986 			} else if (ret == -EAGAIN) {
987 				ret = 0;
988 				cond_resched();
989 				congestion_wait(BLK_RW_ASYNC, HZ/50);
990 				lock_page(cc->rpages[i]);
991 				clear_page_dirty_for_io(cc->rpages[i]);
992 				goto retry_write;
993 			}
994 			err = ret;
995 			goto out_fail;
996 		}
997 
998 		*submitted += _submitted;
999 	}
1000 	return 0;
1001 
1002 out_fail:
1003 	/* TODO: revoke partially updated block addresses */
1004 	BUG_ON(compr_blocks);
1005 out_err:
1006 	for (++i; i < cc->cluster_size; i++) {
1007 		if (!cc->rpages[i])
1008 			continue;
1009 		redirty_page_for_writepage(wbc, cc->rpages[i]);
1010 		unlock_page(cc->rpages[i]);
1011 	}
1012 	return err;
1013 }
1014 
1015 int f2fs_write_multi_pages(struct compress_ctx *cc,
1016 					int *submitted,
1017 					struct writeback_control *wbc,
1018 					enum iostat_type io_type)
1019 {
1020 	struct f2fs_inode_info *fi = F2FS_I(cc->inode);
1021 	const struct f2fs_compress_ops *cops =
1022 			f2fs_cops[fi->i_compress_algorithm];
1023 	int err;
1024 
1025 	*submitted = 0;
1026 	if (cluster_may_compress(cc)) {
1027 		err = f2fs_compress_pages(cc);
1028 		if (err == -EAGAIN) {
1029 			goto write;
1030 		} else if (err) {
1031 			f2fs_put_rpages_wbc(cc, wbc, true, 1);
1032 			goto destroy_out;
1033 		}
1034 
1035 		err = f2fs_write_compressed_pages(cc, submitted,
1036 							wbc, io_type);
1037 		cops->destroy_compress_ctx(cc);
1038 		if (!err)
1039 			return 0;
1040 		f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1041 	}
1042 write:
1043 	f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1044 
1045 	err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1046 	f2fs_put_rpages_wbc(cc, wbc, false, 0);
1047 destroy_out:
1048 	f2fs_destroy_compress_ctx(cc);
1049 	return err;
1050 }
1051 
1052 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1053 {
1054 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1055 	struct decompress_io_ctx *dic;
1056 	pgoff_t start_idx = start_idx_of_cluster(cc);
1057 	int i;
1058 
1059 	dic = f2fs_kzalloc(sbi, sizeof(struct decompress_io_ctx), GFP_NOFS);
1060 	if (!dic)
1061 		return ERR_PTR(-ENOMEM);
1062 
1063 	dic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
1064 			cc->log_cluster_size, GFP_NOFS);
1065 	if (!dic->rpages) {
1066 		kfree(dic);
1067 		return ERR_PTR(-ENOMEM);
1068 	}
1069 
1070 	dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1071 	dic->inode = cc->inode;
1072 	refcount_set(&dic->ref, 1);
1073 	dic->cluster_idx = cc->cluster_idx;
1074 	dic->cluster_size = cc->cluster_size;
1075 	dic->log_cluster_size = cc->log_cluster_size;
1076 	dic->nr_cpages = cc->nr_cpages;
1077 	dic->failed = false;
1078 
1079 	for (i = 0; i < dic->cluster_size; i++)
1080 		dic->rpages[i] = cc->rpages[i];
1081 	dic->nr_rpages = cc->cluster_size;
1082 
1083 	dic->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
1084 					dic->nr_cpages, GFP_NOFS);
1085 	if (!dic->cpages)
1086 		goto out_free;
1087 
1088 	for (i = 0; i < dic->nr_cpages; i++) {
1089 		struct page *page;
1090 
1091 		page = f2fs_grab_page();
1092 		if (!page)
1093 			goto out_free;
1094 
1095 		f2fs_set_compressed_page(page, cc->inode,
1096 					start_idx + i + 1,
1097 					dic, i ? &dic->ref : NULL);
1098 		dic->cpages[i] = page;
1099 	}
1100 
1101 	dic->tpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
1102 					dic->cluster_size, GFP_NOFS);
1103 	if (!dic->tpages)
1104 		goto out_free;
1105 
1106 	for (i = 0; i < dic->cluster_size; i++) {
1107 		if (cc->rpages[i])
1108 			continue;
1109 
1110 		dic->tpages[i] = f2fs_grab_page();
1111 		if (!dic->tpages[i])
1112 			goto out_free;
1113 	}
1114 
1115 	for (i = 0; i < dic->cluster_size; i++) {
1116 		if (dic->tpages[i])
1117 			continue;
1118 		dic->tpages[i] = cc->rpages[i];
1119 	}
1120 
1121 	return dic;
1122 
1123 out_free:
1124 	f2fs_free_dic(dic);
1125 	return ERR_PTR(-ENOMEM);
1126 }
1127 
1128 void f2fs_free_dic(struct decompress_io_ctx *dic)
1129 {
1130 	int i;
1131 
1132 	if (dic->tpages) {
1133 		for (i = 0; i < dic->cluster_size; i++) {
1134 			if (dic->rpages[i])
1135 				continue;
1136 			f2fs_put_page(dic->tpages[i], 1);
1137 		}
1138 		kfree(dic->tpages);
1139 	}
1140 
1141 	if (dic->cpages) {
1142 		for (i = 0; i < dic->nr_cpages; i++) {
1143 			if (!dic->cpages[i])
1144 				continue;
1145 			f2fs_put_compressed_page(dic->cpages[i]);
1146 		}
1147 		kfree(dic->cpages);
1148 	}
1149 
1150 	kfree(dic->rpages);
1151 	kfree(dic);
1152 }
1153 
1154 void f2fs_decompress_end_io(struct page **rpages,
1155 			unsigned int cluster_size, bool err, bool verity)
1156 {
1157 	int i;
1158 
1159 	for (i = 0; i < cluster_size; i++) {
1160 		struct page *rpage = rpages[i];
1161 
1162 		if (!rpage)
1163 			continue;
1164 
1165 		if (err || PageError(rpage)) {
1166 			ClearPageUptodate(rpage);
1167 			ClearPageError(rpage);
1168 		} else {
1169 			if (!verity || fsverity_verify_page(rpage))
1170 				SetPageUptodate(rpage);
1171 			else
1172 				SetPageError(rpage);
1173 		}
1174 		unlock_page(rpage);
1175 	}
1176 }
1177