1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * f2fs compress support
4 *
5 * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6 */
7
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/moduleparam.h>
11 #include <linux/writeback.h>
12 #include <linux/backing-dev.h>
13 #include <linux/lzo.h>
14 #include <linux/lz4.h>
15 #include <linux/zstd.h>
16 #include <linux/pagevec.h>
17
18 #include "f2fs.h"
19 #include "node.h"
20 #include "segment.h"
21 #include <trace/events/f2fs.h>
22
23 static struct kmem_cache *cic_entry_slab;
24 static struct kmem_cache *dic_entry_slab;
25
page_array_alloc(struct inode * inode,int nr)26 static void *page_array_alloc(struct inode *inode, int nr)
27 {
28 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
29 unsigned int size = sizeof(struct page *) * nr;
30
31 if (likely(size <= sbi->page_array_slab_size))
32 return f2fs_kmem_cache_alloc(sbi->page_array_slab,
33 GFP_F2FS_ZERO, false, F2FS_I_SB(inode));
34 return f2fs_kzalloc(sbi, size, GFP_NOFS);
35 }
36
page_array_free(struct inode * inode,void * pages,int nr)37 static void page_array_free(struct inode *inode, void *pages, int nr)
38 {
39 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
40 unsigned int size = sizeof(struct page *) * nr;
41
42 if (!pages)
43 return;
44
45 if (likely(size <= sbi->page_array_slab_size))
46 kmem_cache_free(sbi->page_array_slab, pages);
47 else
48 kfree(pages);
49 }
50
51 struct f2fs_compress_ops {
52 int (*init_compress_ctx)(struct compress_ctx *cc);
53 void (*destroy_compress_ctx)(struct compress_ctx *cc);
54 int (*compress_pages)(struct compress_ctx *cc);
55 int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
56 void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
57 int (*decompress_pages)(struct decompress_io_ctx *dic);
58 bool (*is_level_valid)(int level);
59 };
60
offset_in_cluster(struct compress_ctx * cc,pgoff_t index)61 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
62 {
63 return index & (cc->cluster_size - 1);
64 }
65
cluster_idx(struct compress_ctx * cc,pgoff_t index)66 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
67 {
68 return index >> cc->log_cluster_size;
69 }
70
start_idx_of_cluster(struct compress_ctx * cc)71 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
72 {
73 return cc->cluster_idx << cc->log_cluster_size;
74 }
75
f2fs_is_compressed_page(struct page * page)76 bool f2fs_is_compressed_page(struct page *page)
77 {
78 if (!PagePrivate(page))
79 return false;
80 if (!page_private(page))
81 return false;
82 if (page_private_nonpointer(page))
83 return false;
84
85 f2fs_bug_on(F2FS_M_SB(page->mapping),
86 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
87 return true;
88 }
89
f2fs_set_compressed_page(struct page * page,struct inode * inode,pgoff_t index,void * data)90 static void f2fs_set_compressed_page(struct page *page,
91 struct inode *inode, pgoff_t index, void *data)
92 {
93 attach_page_private(page, (void *)data);
94
95 /* i_crypto_info and iv index */
96 page->index = index;
97 page->mapping = inode->i_mapping;
98 }
99
f2fs_drop_rpages(struct compress_ctx * cc,int len,bool unlock)100 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
101 {
102 int i;
103
104 for (i = 0; i < len; i++) {
105 if (!cc->rpages[i])
106 continue;
107 if (unlock)
108 unlock_page(cc->rpages[i]);
109 else
110 put_page(cc->rpages[i]);
111 }
112 }
113
f2fs_put_rpages(struct compress_ctx * cc)114 static void f2fs_put_rpages(struct compress_ctx *cc)
115 {
116 f2fs_drop_rpages(cc, cc->cluster_size, false);
117 }
118
f2fs_unlock_rpages(struct compress_ctx * cc,int len)119 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
120 {
121 f2fs_drop_rpages(cc, len, true);
122 }
123
f2fs_put_rpages_wbc(struct compress_ctx * cc,struct writeback_control * wbc,bool redirty,int unlock)124 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
125 struct writeback_control *wbc, bool redirty, int unlock)
126 {
127 unsigned int i;
128
129 for (i = 0; i < cc->cluster_size; i++) {
130 if (!cc->rpages[i])
131 continue;
132 if (redirty)
133 redirty_page_for_writepage(wbc, cc->rpages[i]);
134 f2fs_put_page(cc->rpages[i], unlock);
135 }
136 }
137
f2fs_compress_control_page(struct page * page)138 struct page *f2fs_compress_control_page(struct page *page)
139 {
140 return ((struct compress_io_ctx *)page_private(page))->rpages[0];
141 }
142
f2fs_init_compress_ctx(struct compress_ctx * cc)143 int f2fs_init_compress_ctx(struct compress_ctx *cc)
144 {
145 if (cc->rpages)
146 return 0;
147
148 cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
149 return cc->rpages ? 0 : -ENOMEM;
150 }
151
f2fs_destroy_compress_ctx(struct compress_ctx * cc,bool reuse)152 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
153 {
154 page_array_free(cc->inode, cc->rpages, cc->cluster_size);
155 cc->rpages = NULL;
156 cc->nr_rpages = 0;
157 cc->nr_cpages = 0;
158 cc->valid_nr_cpages = 0;
159 if (!reuse)
160 cc->cluster_idx = NULL_CLUSTER;
161 }
162
f2fs_compress_ctx_add_page(struct compress_ctx * cc,struct page * page)163 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
164 {
165 unsigned int cluster_ofs;
166
167 if (!f2fs_cluster_can_merge_page(cc, page->index))
168 f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
169
170 cluster_ofs = offset_in_cluster(cc, page->index);
171 cc->rpages[cluster_ofs] = page;
172 cc->nr_rpages++;
173 cc->cluster_idx = cluster_idx(cc, page->index);
174 }
175
176 #ifdef CONFIG_F2FS_FS_LZO
lzo_init_compress_ctx(struct compress_ctx * cc)177 static int lzo_init_compress_ctx(struct compress_ctx *cc)
178 {
179 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
180 LZO1X_MEM_COMPRESS, GFP_NOFS);
181 if (!cc->private)
182 return -ENOMEM;
183
184 cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
185 return 0;
186 }
187
lzo_destroy_compress_ctx(struct compress_ctx * cc)188 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
189 {
190 kvfree(cc->private);
191 cc->private = NULL;
192 }
193
lzo_compress_pages(struct compress_ctx * cc)194 static int lzo_compress_pages(struct compress_ctx *cc)
195 {
196 int ret;
197
198 ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
199 &cc->clen, cc->private);
200 if (ret != LZO_E_OK) {
201 f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
202 "lzo compress failed, ret:%d", ret);
203 return -EIO;
204 }
205 return 0;
206 }
207
lzo_decompress_pages(struct decompress_io_ctx * dic)208 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
209 {
210 int ret;
211
212 ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
213 dic->rbuf, &dic->rlen);
214 if (ret != LZO_E_OK) {
215 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
216 "lzo decompress failed, ret:%d", ret);
217 return -EIO;
218 }
219
220 if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
221 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
222 "lzo invalid rlen:%zu, expected:%lu",
223 dic->rlen, PAGE_SIZE << dic->log_cluster_size);
224 return -EIO;
225 }
226 return 0;
227 }
228
229 static const struct f2fs_compress_ops f2fs_lzo_ops = {
230 .init_compress_ctx = lzo_init_compress_ctx,
231 .destroy_compress_ctx = lzo_destroy_compress_ctx,
232 .compress_pages = lzo_compress_pages,
233 .decompress_pages = lzo_decompress_pages,
234 };
235 #endif
236
237 #ifdef CONFIG_F2FS_FS_LZ4
lz4_init_compress_ctx(struct compress_ctx * cc)238 static int lz4_init_compress_ctx(struct compress_ctx *cc)
239 {
240 unsigned int size = LZ4_MEM_COMPRESS;
241
242 #ifdef CONFIG_F2FS_FS_LZ4HC
243 if (F2FS_I(cc->inode)->i_compress_level)
244 size = LZ4HC_MEM_COMPRESS;
245 #endif
246
247 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
248 if (!cc->private)
249 return -ENOMEM;
250
251 /*
252 * we do not change cc->clen to LZ4_compressBound(inputsize) to
253 * adapt worst compress case, because lz4 compressor can handle
254 * output budget properly.
255 */
256 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
257 return 0;
258 }
259
lz4_destroy_compress_ctx(struct compress_ctx * cc)260 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
261 {
262 kvfree(cc->private);
263 cc->private = NULL;
264 }
265
lz4_compress_pages(struct compress_ctx * cc)266 static int lz4_compress_pages(struct compress_ctx *cc)
267 {
268 int len = -EINVAL;
269 unsigned char level = F2FS_I(cc->inode)->i_compress_level;
270
271 if (!level)
272 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
273 cc->clen, cc->private);
274 #ifdef CONFIG_F2FS_FS_LZ4HC
275 else
276 len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
277 cc->clen, level, cc->private);
278 #endif
279 if (len < 0)
280 return len;
281 if (!len)
282 return -EAGAIN;
283
284 cc->clen = len;
285 return 0;
286 }
287
lz4_decompress_pages(struct decompress_io_ctx * dic)288 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
289 {
290 int ret;
291
292 ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
293 dic->clen, dic->rlen);
294 if (ret < 0) {
295 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
296 "lz4 decompress failed, ret:%d", ret);
297 return -EIO;
298 }
299
300 if (ret != PAGE_SIZE << dic->log_cluster_size) {
301 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
302 "lz4 invalid ret:%d, expected:%lu",
303 ret, PAGE_SIZE << dic->log_cluster_size);
304 return -EIO;
305 }
306 return 0;
307 }
308
lz4_is_level_valid(int lvl)309 static bool lz4_is_level_valid(int lvl)
310 {
311 #ifdef CONFIG_F2FS_FS_LZ4HC
312 return !lvl || (lvl >= LZ4HC_MIN_CLEVEL && lvl <= LZ4HC_MAX_CLEVEL);
313 #else
314 return lvl == 0;
315 #endif
316 }
317
318 static const struct f2fs_compress_ops f2fs_lz4_ops = {
319 .init_compress_ctx = lz4_init_compress_ctx,
320 .destroy_compress_ctx = lz4_destroy_compress_ctx,
321 .compress_pages = lz4_compress_pages,
322 .decompress_pages = lz4_decompress_pages,
323 .is_level_valid = lz4_is_level_valid,
324 };
325 #endif
326
327 #ifdef CONFIG_F2FS_FS_ZSTD
zstd_init_compress_ctx(struct compress_ctx * cc)328 static int zstd_init_compress_ctx(struct compress_ctx *cc)
329 {
330 zstd_parameters params;
331 zstd_cstream *stream;
332 void *workspace;
333 unsigned int workspace_size;
334 unsigned char level = F2FS_I(cc->inode)->i_compress_level;
335
336 /* Need to remain this for backward compatibility */
337 if (!level)
338 level = F2FS_ZSTD_DEFAULT_CLEVEL;
339
340 params = zstd_get_params(level, cc->rlen);
341 workspace_size = zstd_cstream_workspace_bound(¶ms.cParams);
342
343 workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
344 workspace_size, GFP_NOFS);
345 if (!workspace)
346 return -ENOMEM;
347
348 stream = zstd_init_cstream(¶ms, 0, workspace, workspace_size);
349 if (!stream) {
350 f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
351 "%s zstd_init_cstream failed", __func__);
352 kvfree(workspace);
353 return -EIO;
354 }
355
356 cc->private = workspace;
357 cc->private2 = stream;
358
359 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
360 return 0;
361 }
362
zstd_destroy_compress_ctx(struct compress_ctx * cc)363 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
364 {
365 kvfree(cc->private);
366 cc->private = NULL;
367 cc->private2 = NULL;
368 }
369
zstd_compress_pages(struct compress_ctx * cc)370 static int zstd_compress_pages(struct compress_ctx *cc)
371 {
372 zstd_cstream *stream = cc->private2;
373 zstd_in_buffer inbuf;
374 zstd_out_buffer outbuf;
375 int src_size = cc->rlen;
376 int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
377 int ret;
378
379 inbuf.pos = 0;
380 inbuf.src = cc->rbuf;
381 inbuf.size = src_size;
382
383 outbuf.pos = 0;
384 outbuf.dst = cc->cbuf->cdata;
385 outbuf.size = dst_size;
386
387 ret = zstd_compress_stream(stream, &outbuf, &inbuf);
388 if (zstd_is_error(ret)) {
389 f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
390 "%s zstd_compress_stream failed, ret: %d",
391 __func__, zstd_get_error_code(ret));
392 return -EIO;
393 }
394
395 ret = zstd_end_stream(stream, &outbuf);
396 if (zstd_is_error(ret)) {
397 f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
398 "%s zstd_end_stream returned %d",
399 __func__, zstd_get_error_code(ret));
400 return -EIO;
401 }
402
403 /*
404 * there is compressed data remained in intermediate buffer due to
405 * no more space in cbuf.cdata
406 */
407 if (ret)
408 return -EAGAIN;
409
410 cc->clen = outbuf.pos;
411 return 0;
412 }
413
zstd_init_decompress_ctx(struct decompress_io_ctx * dic)414 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
415 {
416 zstd_dstream *stream;
417 void *workspace;
418 unsigned int workspace_size;
419 unsigned int max_window_size =
420 MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
421
422 workspace_size = zstd_dstream_workspace_bound(max_window_size);
423
424 workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
425 workspace_size, GFP_NOFS);
426 if (!workspace)
427 return -ENOMEM;
428
429 stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
430 if (!stream) {
431 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
432 "%s zstd_init_dstream failed", __func__);
433 kvfree(workspace);
434 return -EIO;
435 }
436
437 dic->private = workspace;
438 dic->private2 = stream;
439
440 return 0;
441 }
442
zstd_destroy_decompress_ctx(struct decompress_io_ctx * dic)443 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
444 {
445 kvfree(dic->private);
446 dic->private = NULL;
447 dic->private2 = NULL;
448 }
449
zstd_decompress_pages(struct decompress_io_ctx * dic)450 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
451 {
452 zstd_dstream *stream = dic->private2;
453 zstd_in_buffer inbuf;
454 zstd_out_buffer outbuf;
455 int ret;
456
457 inbuf.pos = 0;
458 inbuf.src = dic->cbuf->cdata;
459 inbuf.size = dic->clen;
460
461 outbuf.pos = 0;
462 outbuf.dst = dic->rbuf;
463 outbuf.size = dic->rlen;
464
465 ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
466 if (zstd_is_error(ret)) {
467 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
468 "%s zstd_decompress_stream failed, ret: %d",
469 __func__, zstd_get_error_code(ret));
470 return -EIO;
471 }
472
473 if (dic->rlen != outbuf.pos) {
474 f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
475 "%s ZSTD invalid rlen:%zu, expected:%lu",
476 __func__, dic->rlen,
477 PAGE_SIZE << dic->log_cluster_size);
478 return -EIO;
479 }
480
481 return 0;
482 }
483
zstd_is_level_valid(int lvl)484 static bool zstd_is_level_valid(int lvl)
485 {
486 return lvl >= zstd_min_clevel() && lvl <= zstd_max_clevel();
487 }
488
489 static const struct f2fs_compress_ops f2fs_zstd_ops = {
490 .init_compress_ctx = zstd_init_compress_ctx,
491 .destroy_compress_ctx = zstd_destroy_compress_ctx,
492 .compress_pages = zstd_compress_pages,
493 .init_decompress_ctx = zstd_init_decompress_ctx,
494 .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
495 .decompress_pages = zstd_decompress_pages,
496 .is_level_valid = zstd_is_level_valid,
497 };
498 #endif
499
500 #ifdef CONFIG_F2FS_FS_LZO
501 #ifdef CONFIG_F2FS_FS_LZORLE
lzorle_compress_pages(struct compress_ctx * cc)502 static int lzorle_compress_pages(struct compress_ctx *cc)
503 {
504 int ret;
505
506 ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
507 &cc->clen, cc->private);
508 if (ret != LZO_E_OK) {
509 f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
510 "lzo-rle compress failed, ret:%d", ret);
511 return -EIO;
512 }
513 return 0;
514 }
515
516 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
517 .init_compress_ctx = lzo_init_compress_ctx,
518 .destroy_compress_ctx = lzo_destroy_compress_ctx,
519 .compress_pages = lzorle_compress_pages,
520 .decompress_pages = lzo_decompress_pages,
521 };
522 #endif
523 #endif
524
525 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
526 #ifdef CONFIG_F2FS_FS_LZO
527 &f2fs_lzo_ops,
528 #else
529 NULL,
530 #endif
531 #ifdef CONFIG_F2FS_FS_LZ4
532 &f2fs_lz4_ops,
533 #else
534 NULL,
535 #endif
536 #ifdef CONFIG_F2FS_FS_ZSTD
537 &f2fs_zstd_ops,
538 #else
539 NULL,
540 #endif
541 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
542 &f2fs_lzorle_ops,
543 #else
544 NULL,
545 #endif
546 };
547
f2fs_is_compress_backend_ready(struct inode * inode)548 bool f2fs_is_compress_backend_ready(struct inode *inode)
549 {
550 if (!f2fs_compressed_file(inode))
551 return true;
552 return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
553 }
554
f2fs_is_compress_level_valid(int alg,int lvl)555 bool f2fs_is_compress_level_valid(int alg, int lvl)
556 {
557 const struct f2fs_compress_ops *cops = f2fs_cops[alg];
558
559 if (cops->is_level_valid)
560 return cops->is_level_valid(lvl);
561
562 return lvl == 0;
563 }
564
565 static mempool_t *compress_page_pool;
566 static int num_compress_pages = 512;
567 module_param(num_compress_pages, uint, 0444);
568 MODULE_PARM_DESC(num_compress_pages,
569 "Number of intermediate compress pages to preallocate");
570
f2fs_init_compress_mempool(void)571 int __init f2fs_init_compress_mempool(void)
572 {
573 compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
574 return compress_page_pool ? 0 : -ENOMEM;
575 }
576
f2fs_destroy_compress_mempool(void)577 void f2fs_destroy_compress_mempool(void)
578 {
579 mempool_destroy(compress_page_pool);
580 }
581
f2fs_compress_alloc_page(void)582 static struct page *f2fs_compress_alloc_page(void)
583 {
584 struct page *page;
585
586 page = mempool_alloc(compress_page_pool, GFP_NOFS);
587 lock_page(page);
588
589 return page;
590 }
591
f2fs_compress_free_page(struct page * page)592 static void f2fs_compress_free_page(struct page *page)
593 {
594 if (!page)
595 return;
596 detach_page_private(page);
597 page->mapping = NULL;
598 unlock_page(page);
599 mempool_free(page, compress_page_pool);
600 }
601
602 #define MAX_VMAP_RETRIES 3
603
f2fs_vmap(struct page ** pages,unsigned int count)604 static void *f2fs_vmap(struct page **pages, unsigned int count)
605 {
606 int i;
607 void *buf = NULL;
608
609 for (i = 0; i < MAX_VMAP_RETRIES; i++) {
610 buf = vm_map_ram(pages, count, -1);
611 if (buf)
612 break;
613 vm_unmap_aliases();
614 }
615 return buf;
616 }
617
f2fs_compress_pages(struct compress_ctx * cc)618 static int f2fs_compress_pages(struct compress_ctx *cc)
619 {
620 struct f2fs_inode_info *fi = F2FS_I(cc->inode);
621 const struct f2fs_compress_ops *cops =
622 f2fs_cops[fi->i_compress_algorithm];
623 unsigned int max_len, new_nr_cpages;
624 u32 chksum = 0;
625 int i, ret;
626
627 trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
628 cc->cluster_size, fi->i_compress_algorithm);
629
630 if (cops->init_compress_ctx) {
631 ret = cops->init_compress_ctx(cc);
632 if (ret)
633 goto out;
634 }
635
636 max_len = COMPRESS_HEADER_SIZE + cc->clen;
637 cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
638 cc->valid_nr_cpages = cc->nr_cpages;
639
640 cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
641 if (!cc->cpages) {
642 ret = -ENOMEM;
643 goto destroy_compress_ctx;
644 }
645
646 for (i = 0; i < cc->nr_cpages; i++)
647 cc->cpages[i] = f2fs_compress_alloc_page();
648
649 cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
650 if (!cc->rbuf) {
651 ret = -ENOMEM;
652 goto out_free_cpages;
653 }
654
655 cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
656 if (!cc->cbuf) {
657 ret = -ENOMEM;
658 goto out_vunmap_rbuf;
659 }
660
661 ret = cops->compress_pages(cc);
662 if (ret)
663 goto out_vunmap_cbuf;
664
665 max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
666
667 if (cc->clen > max_len) {
668 ret = -EAGAIN;
669 goto out_vunmap_cbuf;
670 }
671
672 cc->cbuf->clen = cpu_to_le32(cc->clen);
673
674 if (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))
675 chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
676 cc->cbuf->cdata, cc->clen);
677 cc->cbuf->chksum = cpu_to_le32(chksum);
678
679 for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
680 cc->cbuf->reserved[i] = cpu_to_le32(0);
681
682 new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
683
684 /* zero out any unused part of the last page */
685 memset(&cc->cbuf->cdata[cc->clen], 0,
686 (new_nr_cpages * PAGE_SIZE) -
687 (cc->clen + COMPRESS_HEADER_SIZE));
688
689 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
690 vm_unmap_ram(cc->rbuf, cc->cluster_size);
691
692 for (i = new_nr_cpages; i < cc->nr_cpages; i++) {
693 f2fs_compress_free_page(cc->cpages[i]);
694 cc->cpages[i] = NULL;
695 }
696
697 if (cops->destroy_compress_ctx)
698 cops->destroy_compress_ctx(cc);
699
700 cc->valid_nr_cpages = new_nr_cpages;
701
702 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
703 cc->clen, ret);
704 return 0;
705
706 out_vunmap_cbuf:
707 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
708 out_vunmap_rbuf:
709 vm_unmap_ram(cc->rbuf, cc->cluster_size);
710 out_free_cpages:
711 for (i = 0; i < cc->nr_cpages; i++) {
712 if (cc->cpages[i])
713 f2fs_compress_free_page(cc->cpages[i]);
714 }
715 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
716 cc->cpages = NULL;
717 destroy_compress_ctx:
718 if (cops->destroy_compress_ctx)
719 cops->destroy_compress_ctx(cc);
720 out:
721 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
722 cc->clen, ret);
723 return ret;
724 }
725
726 static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
727 bool pre_alloc);
728 static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
729 bool bypass_destroy_callback, bool pre_alloc);
730
f2fs_decompress_cluster(struct decompress_io_ctx * dic,bool in_task)731 void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
732 {
733 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
734 struct f2fs_inode_info *fi = F2FS_I(dic->inode);
735 const struct f2fs_compress_ops *cops =
736 f2fs_cops[fi->i_compress_algorithm];
737 bool bypass_callback = false;
738 int ret;
739
740 trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
741 dic->cluster_size, fi->i_compress_algorithm);
742
743 if (dic->failed) {
744 ret = -EIO;
745 goto out_end_io;
746 }
747
748 ret = f2fs_prepare_decomp_mem(dic, false);
749 if (ret) {
750 bypass_callback = true;
751 goto out_release;
752 }
753
754 dic->clen = le32_to_cpu(dic->cbuf->clen);
755 dic->rlen = PAGE_SIZE << dic->log_cluster_size;
756
757 if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
758 ret = -EFSCORRUPTED;
759
760 /* Avoid f2fs_commit_super in irq context */
761 if (!in_task)
762 f2fs_handle_error_async(sbi, ERROR_FAIL_DECOMPRESSION);
763 else
764 f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
765 goto out_release;
766 }
767
768 ret = cops->decompress_pages(dic);
769
770 if (!ret && (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))) {
771 u32 provided = le32_to_cpu(dic->cbuf->chksum);
772 u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
773
774 if (provided != calculated) {
775 if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
776 set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
777 f2fs_info_ratelimited(sbi,
778 "checksum invalid, nid = %lu, %x vs %x",
779 dic->inode->i_ino,
780 provided, calculated);
781 }
782 set_sbi_flag(sbi, SBI_NEED_FSCK);
783 }
784 }
785
786 out_release:
787 f2fs_release_decomp_mem(dic, bypass_callback, false);
788
789 out_end_io:
790 trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
791 dic->clen, ret);
792 f2fs_decompress_end_io(dic, ret, in_task);
793 }
794
795 /*
796 * This is called when a page of a compressed cluster has been read from disk
797 * (or failed to be read from disk). It checks whether this page was the last
798 * page being waited on in the cluster, and if so, it decompresses the cluster
799 * (or in the case of a failure, cleans up without actually decompressing).
800 */
f2fs_end_read_compressed_page(struct page * page,bool failed,block_t blkaddr,bool in_task)801 void f2fs_end_read_compressed_page(struct page *page, bool failed,
802 block_t blkaddr, bool in_task)
803 {
804 struct decompress_io_ctx *dic =
805 (struct decompress_io_ctx *)page_private(page);
806 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
807
808 dec_page_count(sbi, F2FS_RD_DATA);
809
810 if (failed)
811 WRITE_ONCE(dic->failed, true);
812 else if (blkaddr && in_task)
813 f2fs_cache_compressed_page(sbi, page,
814 dic->inode->i_ino, blkaddr);
815
816 if (atomic_dec_and_test(&dic->remaining_pages))
817 f2fs_decompress_cluster(dic, in_task);
818 }
819
is_page_in_cluster(struct compress_ctx * cc,pgoff_t index)820 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
821 {
822 if (cc->cluster_idx == NULL_CLUSTER)
823 return true;
824 return cc->cluster_idx == cluster_idx(cc, index);
825 }
826
f2fs_cluster_is_empty(struct compress_ctx * cc)827 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
828 {
829 return cc->nr_rpages == 0;
830 }
831
f2fs_cluster_is_full(struct compress_ctx * cc)832 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
833 {
834 return cc->cluster_size == cc->nr_rpages;
835 }
836
f2fs_cluster_can_merge_page(struct compress_ctx * cc,pgoff_t index)837 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
838 {
839 if (f2fs_cluster_is_empty(cc))
840 return true;
841 return is_page_in_cluster(cc, index);
842 }
843
f2fs_all_cluster_page_ready(struct compress_ctx * cc,struct page ** pages,int index,int nr_pages,bool uptodate)844 bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
845 int index, int nr_pages, bool uptodate)
846 {
847 unsigned long pgidx = pages[index]->index;
848 int i = uptodate ? 0 : 1;
849
850 /*
851 * when uptodate set to true, try to check all pages in cluster is
852 * uptodate or not.
853 */
854 if (uptodate && (pgidx % cc->cluster_size))
855 return false;
856
857 if (nr_pages - index < cc->cluster_size)
858 return false;
859
860 for (; i < cc->cluster_size; i++) {
861 if (pages[index + i]->index != pgidx + i)
862 return false;
863 if (uptodate && !PageUptodate(pages[index + i]))
864 return false;
865 }
866
867 return true;
868 }
869
cluster_has_invalid_data(struct compress_ctx * cc)870 static bool cluster_has_invalid_data(struct compress_ctx *cc)
871 {
872 loff_t i_size = i_size_read(cc->inode);
873 unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
874 int i;
875
876 for (i = 0; i < cc->cluster_size; i++) {
877 struct page *page = cc->rpages[i];
878
879 f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
880
881 /* beyond EOF */
882 if (page->index >= nr_pages)
883 return true;
884 }
885 return false;
886 }
887
f2fs_sanity_check_cluster(struct dnode_of_data * dn)888 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
889 {
890 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
891 unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
892 bool compressed = dn->data_blkaddr == COMPRESS_ADDR;
893 int cluster_end = 0;
894 int i;
895 char *reason = "";
896
897 if (!compressed)
898 return false;
899
900 /* [..., COMPR_ADDR, ...] */
901 if (dn->ofs_in_node % cluster_size) {
902 reason = "[*|C|*|*]";
903 goto out;
904 }
905
906 for (i = 1; i < cluster_size; i++) {
907 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
908 dn->ofs_in_node + i);
909
910 /* [COMPR_ADDR, ..., COMPR_ADDR] */
911 if (blkaddr == COMPRESS_ADDR) {
912 reason = "[C|*|C|*]";
913 goto out;
914 }
915 if (!__is_valid_data_blkaddr(blkaddr)) {
916 if (!cluster_end)
917 cluster_end = i;
918 continue;
919 }
920 /* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */
921 if (cluster_end) {
922 reason = "[C|N|N|V]";
923 goto out;
924 }
925 }
926 return false;
927 out:
928 f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s",
929 dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason);
930 set_sbi_flag(sbi, SBI_NEED_FSCK);
931 return true;
932 }
933
__f2fs_cluster_blocks(struct inode * inode,unsigned int cluster_idx,bool compr)934 static int __f2fs_cluster_blocks(struct inode *inode,
935 unsigned int cluster_idx, bool compr)
936 {
937 struct dnode_of_data dn;
938 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
939 unsigned int start_idx = cluster_idx <<
940 F2FS_I(inode)->i_log_cluster_size;
941 int ret;
942
943 set_new_dnode(&dn, inode, NULL, NULL, 0);
944 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
945 if (ret) {
946 if (ret == -ENOENT)
947 ret = 0;
948 goto fail;
949 }
950
951 if (f2fs_sanity_check_cluster(&dn)) {
952 ret = -EFSCORRUPTED;
953 f2fs_handle_error(F2FS_I_SB(inode), ERROR_CORRUPTED_CLUSTER);
954 goto fail;
955 }
956
957 if (dn.data_blkaddr == COMPRESS_ADDR) {
958 int i;
959
960 ret = 1;
961 for (i = 1; i < cluster_size; i++) {
962 block_t blkaddr;
963
964 blkaddr = data_blkaddr(dn.inode,
965 dn.node_page, dn.ofs_in_node + i);
966 if (compr) {
967 if (__is_valid_data_blkaddr(blkaddr))
968 ret++;
969 } else {
970 if (blkaddr != NULL_ADDR)
971 ret++;
972 }
973 }
974
975 f2fs_bug_on(F2FS_I_SB(inode),
976 !compr && ret != cluster_size &&
977 !is_inode_flag_set(inode, FI_COMPRESS_RELEASED));
978 }
979 fail:
980 f2fs_put_dnode(&dn);
981 return ret;
982 }
983
984 /* return # of compressed blocks in compressed cluster */
f2fs_compressed_blocks(struct compress_ctx * cc)985 static int f2fs_compressed_blocks(struct compress_ctx *cc)
986 {
987 return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
988 }
989
990 /* return # of valid blocks in compressed cluster */
f2fs_is_compressed_cluster(struct inode * inode,pgoff_t index)991 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
992 {
993 return __f2fs_cluster_blocks(inode,
994 index >> F2FS_I(inode)->i_log_cluster_size,
995 false);
996 }
997
cluster_may_compress(struct compress_ctx * cc)998 static bool cluster_may_compress(struct compress_ctx *cc)
999 {
1000 if (!f2fs_need_compress_data(cc->inode))
1001 return false;
1002 if (f2fs_is_atomic_file(cc->inode))
1003 return false;
1004 if (!f2fs_cluster_is_full(cc))
1005 return false;
1006 if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
1007 return false;
1008 return !cluster_has_invalid_data(cc);
1009 }
1010
set_cluster_writeback(struct compress_ctx * cc)1011 static void set_cluster_writeback(struct compress_ctx *cc)
1012 {
1013 int i;
1014
1015 for (i = 0; i < cc->cluster_size; i++) {
1016 if (cc->rpages[i])
1017 set_page_writeback(cc->rpages[i]);
1018 }
1019 }
1020
set_cluster_dirty(struct compress_ctx * cc)1021 static void set_cluster_dirty(struct compress_ctx *cc)
1022 {
1023 int i;
1024
1025 for (i = 0; i < cc->cluster_size; i++)
1026 if (cc->rpages[i]) {
1027 set_page_dirty(cc->rpages[i]);
1028 set_page_private_gcing(cc->rpages[i]);
1029 }
1030 }
1031
prepare_compress_overwrite(struct compress_ctx * cc,struct page ** pagep,pgoff_t index,void ** fsdata)1032 static int prepare_compress_overwrite(struct compress_ctx *cc,
1033 struct page **pagep, pgoff_t index, void **fsdata)
1034 {
1035 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1036 struct address_space *mapping = cc->inode->i_mapping;
1037 struct page *page;
1038 sector_t last_block_in_bio;
1039 fgf_t fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
1040 pgoff_t start_idx = start_idx_of_cluster(cc);
1041 int i, ret;
1042
1043 retry:
1044 ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
1045 if (ret <= 0)
1046 return ret;
1047
1048 ret = f2fs_init_compress_ctx(cc);
1049 if (ret)
1050 return ret;
1051
1052 /* keep page reference to avoid page reclaim */
1053 for (i = 0; i < cc->cluster_size; i++) {
1054 page = f2fs_pagecache_get_page(mapping, start_idx + i,
1055 fgp_flag, GFP_NOFS);
1056 if (!page) {
1057 ret = -ENOMEM;
1058 goto unlock_pages;
1059 }
1060
1061 if (PageUptodate(page))
1062 f2fs_put_page(page, 1);
1063 else
1064 f2fs_compress_ctx_add_page(cc, page);
1065 }
1066
1067 if (!f2fs_cluster_is_empty(cc)) {
1068 struct bio *bio = NULL;
1069
1070 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
1071 &last_block_in_bio, false, true);
1072 f2fs_put_rpages(cc);
1073 f2fs_destroy_compress_ctx(cc, true);
1074 if (ret)
1075 goto out;
1076 if (bio)
1077 f2fs_submit_read_bio(sbi, bio, DATA);
1078
1079 ret = f2fs_init_compress_ctx(cc);
1080 if (ret)
1081 goto out;
1082 }
1083
1084 for (i = 0; i < cc->cluster_size; i++) {
1085 f2fs_bug_on(sbi, cc->rpages[i]);
1086
1087 page = find_lock_page(mapping, start_idx + i);
1088 if (!page) {
1089 /* page can be truncated */
1090 goto release_and_retry;
1091 }
1092
1093 f2fs_wait_on_page_writeback(page, DATA, true, true);
1094 f2fs_compress_ctx_add_page(cc, page);
1095
1096 if (!PageUptodate(page)) {
1097 release_and_retry:
1098 f2fs_put_rpages(cc);
1099 f2fs_unlock_rpages(cc, i + 1);
1100 f2fs_destroy_compress_ctx(cc, true);
1101 goto retry;
1102 }
1103 }
1104
1105 if (likely(!ret)) {
1106 *fsdata = cc->rpages;
1107 *pagep = cc->rpages[offset_in_cluster(cc, index)];
1108 return cc->cluster_size;
1109 }
1110
1111 unlock_pages:
1112 f2fs_put_rpages(cc);
1113 f2fs_unlock_rpages(cc, i);
1114 f2fs_destroy_compress_ctx(cc, true);
1115 out:
1116 return ret;
1117 }
1118
f2fs_prepare_compress_overwrite(struct inode * inode,struct page ** pagep,pgoff_t index,void ** fsdata)1119 int f2fs_prepare_compress_overwrite(struct inode *inode,
1120 struct page **pagep, pgoff_t index, void **fsdata)
1121 {
1122 struct compress_ctx cc = {
1123 .inode = inode,
1124 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1125 .cluster_size = F2FS_I(inode)->i_cluster_size,
1126 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1127 .rpages = NULL,
1128 .nr_rpages = 0,
1129 };
1130
1131 return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1132 }
1133
f2fs_compress_write_end(struct inode * inode,void * fsdata,pgoff_t index,unsigned copied)1134 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1135 pgoff_t index, unsigned copied)
1136
1137 {
1138 struct compress_ctx cc = {
1139 .inode = inode,
1140 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1141 .cluster_size = F2FS_I(inode)->i_cluster_size,
1142 .rpages = fsdata,
1143 };
1144 bool first_index = (index == cc.rpages[0]->index);
1145
1146 if (copied)
1147 set_cluster_dirty(&cc);
1148
1149 f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1150 f2fs_destroy_compress_ctx(&cc, false);
1151
1152 return first_index;
1153 }
1154
f2fs_truncate_partial_cluster(struct inode * inode,u64 from,bool lock)1155 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1156 {
1157 void *fsdata = NULL;
1158 struct page *pagep;
1159 int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1160 pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1161 log_cluster_size;
1162 int err;
1163
1164 err = f2fs_is_compressed_cluster(inode, start_idx);
1165 if (err < 0)
1166 return err;
1167
1168 /* truncate normal cluster */
1169 if (!err)
1170 return f2fs_do_truncate_blocks(inode, from, lock);
1171
1172 /* truncate compressed cluster */
1173 err = f2fs_prepare_compress_overwrite(inode, &pagep,
1174 start_idx, &fsdata);
1175
1176 /* should not be a normal cluster */
1177 f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1178
1179 if (err <= 0)
1180 return err;
1181
1182 if (err > 0) {
1183 struct page **rpages = fsdata;
1184 int cluster_size = F2FS_I(inode)->i_cluster_size;
1185 int i;
1186
1187 for (i = cluster_size - 1; i >= 0; i--) {
1188 loff_t start = rpages[i]->index << PAGE_SHIFT;
1189
1190 if (from <= start) {
1191 zero_user_segment(rpages[i], 0, PAGE_SIZE);
1192 } else {
1193 zero_user_segment(rpages[i], from - start,
1194 PAGE_SIZE);
1195 break;
1196 }
1197 }
1198
1199 f2fs_compress_write_end(inode, fsdata, start_idx, true);
1200 }
1201 return 0;
1202 }
1203
f2fs_write_compressed_pages(struct compress_ctx * cc,int * submitted,struct writeback_control * wbc,enum iostat_type io_type)1204 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1205 int *submitted,
1206 struct writeback_control *wbc,
1207 enum iostat_type io_type)
1208 {
1209 struct inode *inode = cc->inode;
1210 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1211 struct f2fs_inode_info *fi = F2FS_I(inode);
1212 struct f2fs_io_info fio = {
1213 .sbi = sbi,
1214 .ino = cc->inode->i_ino,
1215 .type = DATA,
1216 .op = REQ_OP_WRITE,
1217 .op_flags = wbc_to_write_flags(wbc),
1218 .old_blkaddr = NEW_ADDR,
1219 .page = NULL,
1220 .encrypted_page = NULL,
1221 .compressed_page = NULL,
1222 .submitted = 0,
1223 .io_type = io_type,
1224 .io_wbc = wbc,
1225 .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ?
1226 1 : 0,
1227 };
1228 struct dnode_of_data dn;
1229 struct node_info ni;
1230 struct compress_io_ctx *cic;
1231 pgoff_t start_idx = start_idx_of_cluster(cc);
1232 unsigned int last_index = cc->cluster_size - 1;
1233 loff_t psize;
1234 int i, err;
1235 bool quota_inode = IS_NOQUOTA(inode);
1236
1237 /* we should bypass data pages to proceed the kworker jobs */
1238 if (unlikely(f2fs_cp_error(sbi))) {
1239 mapping_set_error(cc->rpages[0]->mapping, -EIO);
1240 goto out_free;
1241 }
1242
1243 if (quota_inode) {
1244 /*
1245 * We need to wait for node_write to avoid block allocation during
1246 * checkpoint. This can only happen to quota writes which can cause
1247 * the below discard race condition.
1248 */
1249 f2fs_down_read(&sbi->node_write);
1250 } else if (!f2fs_trylock_op(sbi)) {
1251 goto out_free;
1252 }
1253
1254 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1255
1256 err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1257 if (err)
1258 goto out_unlock_op;
1259
1260 for (i = 0; i < cc->cluster_size; i++) {
1261 if (data_blkaddr(dn.inode, dn.node_page,
1262 dn.ofs_in_node + i) == NULL_ADDR)
1263 goto out_put_dnode;
1264 }
1265
1266 psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1267
1268 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1269 if (err)
1270 goto out_put_dnode;
1271
1272 fio.version = ni.version;
1273
1274 cic = f2fs_kmem_cache_alloc(cic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1275 if (!cic)
1276 goto out_put_dnode;
1277
1278 cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1279 cic->inode = inode;
1280 atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
1281 cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1282 if (!cic->rpages)
1283 goto out_put_cic;
1284
1285 cic->nr_rpages = cc->cluster_size;
1286
1287 for (i = 0; i < cc->valid_nr_cpages; i++) {
1288 f2fs_set_compressed_page(cc->cpages[i], inode,
1289 cc->rpages[i + 1]->index, cic);
1290 fio.compressed_page = cc->cpages[i];
1291
1292 fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1293 dn.ofs_in_node + i + 1);
1294
1295 /* wait for GCed page writeback via META_MAPPING */
1296 f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1297
1298 if (fio.encrypted) {
1299 fio.page = cc->rpages[i + 1];
1300 err = f2fs_encrypt_one_page(&fio);
1301 if (err)
1302 goto out_destroy_crypt;
1303 cc->cpages[i] = fio.encrypted_page;
1304 }
1305 }
1306
1307 set_cluster_writeback(cc);
1308
1309 for (i = 0; i < cc->cluster_size; i++)
1310 cic->rpages[i] = cc->rpages[i];
1311
1312 for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1313 block_t blkaddr;
1314
1315 blkaddr = f2fs_data_blkaddr(&dn);
1316 fio.page = cc->rpages[i];
1317 fio.old_blkaddr = blkaddr;
1318
1319 /* cluster header */
1320 if (i == 0) {
1321 if (blkaddr == COMPRESS_ADDR)
1322 fio.compr_blocks++;
1323 if (__is_valid_data_blkaddr(blkaddr))
1324 f2fs_invalidate_blocks(sbi, blkaddr);
1325 f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1326 goto unlock_continue;
1327 }
1328
1329 if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1330 fio.compr_blocks++;
1331
1332 if (i > cc->valid_nr_cpages) {
1333 if (__is_valid_data_blkaddr(blkaddr)) {
1334 f2fs_invalidate_blocks(sbi, blkaddr);
1335 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1336 }
1337 goto unlock_continue;
1338 }
1339
1340 f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1341
1342 if (fio.encrypted)
1343 fio.encrypted_page = cc->cpages[i - 1];
1344 else
1345 fio.compressed_page = cc->cpages[i - 1];
1346
1347 cc->cpages[i - 1] = NULL;
1348 f2fs_outplace_write_data(&dn, &fio);
1349 (*submitted)++;
1350 unlock_continue:
1351 inode_dec_dirty_pages(cc->inode);
1352 unlock_page(fio.page);
1353 }
1354
1355 if (fio.compr_blocks)
1356 f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1357 f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
1358 add_compr_block_stat(inode, cc->valid_nr_cpages);
1359
1360 set_inode_flag(cc->inode, FI_APPEND_WRITE);
1361
1362 f2fs_put_dnode(&dn);
1363 if (quota_inode)
1364 f2fs_up_read(&sbi->node_write);
1365 else
1366 f2fs_unlock_op(sbi);
1367
1368 spin_lock(&fi->i_size_lock);
1369 if (fi->last_disk_size < psize)
1370 fi->last_disk_size = psize;
1371 spin_unlock(&fi->i_size_lock);
1372
1373 f2fs_put_rpages(cc);
1374 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1375 cc->cpages = NULL;
1376 f2fs_destroy_compress_ctx(cc, false);
1377 return 0;
1378
1379 out_destroy_crypt:
1380 page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1381
1382 for (--i; i >= 0; i--)
1383 fscrypt_finalize_bounce_page(&cc->cpages[i]);
1384 out_put_cic:
1385 kmem_cache_free(cic_entry_slab, cic);
1386 out_put_dnode:
1387 f2fs_put_dnode(&dn);
1388 out_unlock_op:
1389 if (quota_inode)
1390 f2fs_up_read(&sbi->node_write);
1391 else
1392 f2fs_unlock_op(sbi);
1393 out_free:
1394 for (i = 0; i < cc->valid_nr_cpages; i++) {
1395 f2fs_compress_free_page(cc->cpages[i]);
1396 cc->cpages[i] = NULL;
1397 }
1398 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1399 cc->cpages = NULL;
1400 return -EAGAIN;
1401 }
1402
f2fs_compress_write_end_io(struct bio * bio,struct page * page)1403 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1404 {
1405 struct f2fs_sb_info *sbi = bio->bi_private;
1406 struct compress_io_ctx *cic =
1407 (struct compress_io_ctx *)page_private(page);
1408 enum count_type type = WB_DATA_TYPE(page,
1409 f2fs_is_compressed_page(page));
1410 int i;
1411
1412 if (unlikely(bio->bi_status))
1413 mapping_set_error(cic->inode->i_mapping, -EIO);
1414
1415 f2fs_compress_free_page(page);
1416
1417 dec_page_count(sbi, type);
1418
1419 if (atomic_dec_return(&cic->pending_pages))
1420 return;
1421
1422 for (i = 0; i < cic->nr_rpages; i++) {
1423 WARN_ON(!cic->rpages[i]);
1424 clear_page_private_gcing(cic->rpages[i]);
1425 end_page_writeback(cic->rpages[i]);
1426 }
1427
1428 page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
1429 kmem_cache_free(cic_entry_slab, cic);
1430 }
1431
f2fs_write_raw_pages(struct compress_ctx * cc,int * submitted_p,struct writeback_control * wbc,enum iostat_type io_type)1432 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1433 int *submitted_p,
1434 struct writeback_control *wbc,
1435 enum iostat_type io_type)
1436 {
1437 struct address_space *mapping = cc->inode->i_mapping;
1438 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
1439 int submitted, compr_blocks, i;
1440 int ret = 0;
1441
1442 compr_blocks = f2fs_compressed_blocks(cc);
1443
1444 for (i = 0; i < cc->cluster_size; i++) {
1445 if (!cc->rpages[i])
1446 continue;
1447
1448 redirty_page_for_writepage(wbc, cc->rpages[i]);
1449 unlock_page(cc->rpages[i]);
1450 }
1451
1452 if (compr_blocks < 0)
1453 return compr_blocks;
1454
1455 /* overwrite compressed cluster w/ normal cluster */
1456 if (compr_blocks > 0)
1457 f2fs_lock_op(sbi);
1458
1459 for (i = 0; i < cc->cluster_size; i++) {
1460 if (!cc->rpages[i])
1461 continue;
1462 retry_write:
1463 lock_page(cc->rpages[i]);
1464
1465 if (cc->rpages[i]->mapping != mapping) {
1466 continue_unlock:
1467 unlock_page(cc->rpages[i]);
1468 continue;
1469 }
1470
1471 if (!PageDirty(cc->rpages[i]))
1472 goto continue_unlock;
1473
1474 if (PageWriteback(cc->rpages[i])) {
1475 if (wbc->sync_mode == WB_SYNC_NONE)
1476 goto continue_unlock;
1477 f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true);
1478 }
1479
1480 if (!clear_page_dirty_for_io(cc->rpages[i]))
1481 goto continue_unlock;
1482
1483 ret = f2fs_write_single_data_page(cc->rpages[i], &submitted,
1484 NULL, NULL, wbc, io_type,
1485 compr_blocks, false);
1486 if (ret) {
1487 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1488 unlock_page(cc->rpages[i]);
1489 ret = 0;
1490 } else if (ret == -EAGAIN) {
1491 ret = 0;
1492 /*
1493 * for quota file, just redirty left pages to
1494 * avoid deadlock caused by cluster update race
1495 * from foreground operation.
1496 */
1497 if (IS_NOQUOTA(cc->inode))
1498 goto out;
1499 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
1500 goto retry_write;
1501 }
1502 goto out;
1503 }
1504
1505 *submitted_p += submitted;
1506 }
1507
1508 out:
1509 if (compr_blocks > 0)
1510 f2fs_unlock_op(sbi);
1511
1512 f2fs_balance_fs(sbi, true);
1513 return ret;
1514 }
1515
f2fs_write_multi_pages(struct compress_ctx * cc,int * submitted,struct writeback_control * wbc,enum iostat_type io_type)1516 int f2fs_write_multi_pages(struct compress_ctx *cc,
1517 int *submitted,
1518 struct writeback_control *wbc,
1519 enum iostat_type io_type)
1520 {
1521 int err;
1522
1523 *submitted = 0;
1524 if (cluster_may_compress(cc)) {
1525 err = f2fs_compress_pages(cc);
1526 if (err == -EAGAIN) {
1527 add_compr_block_stat(cc->inode, cc->cluster_size);
1528 goto write;
1529 } else if (err) {
1530 f2fs_put_rpages_wbc(cc, wbc, true, 1);
1531 goto destroy_out;
1532 }
1533
1534 err = f2fs_write_compressed_pages(cc, submitted,
1535 wbc, io_type);
1536 if (!err)
1537 return 0;
1538 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1539 }
1540 write:
1541 f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1542
1543 err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1544 f2fs_put_rpages_wbc(cc, wbc, false, 0);
1545 destroy_out:
1546 f2fs_destroy_compress_ctx(cc, false);
1547 return err;
1548 }
1549
allow_memalloc_for_decomp(struct f2fs_sb_info * sbi,bool pre_alloc)1550 static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
1551 bool pre_alloc)
1552 {
1553 return pre_alloc ^ f2fs_low_mem_mode(sbi);
1554 }
1555
f2fs_prepare_decomp_mem(struct decompress_io_ctx * dic,bool pre_alloc)1556 static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
1557 bool pre_alloc)
1558 {
1559 const struct f2fs_compress_ops *cops =
1560 f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1561 int i;
1562
1563 if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1564 return 0;
1565
1566 dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
1567 if (!dic->tpages)
1568 return -ENOMEM;
1569
1570 for (i = 0; i < dic->cluster_size; i++) {
1571 if (dic->rpages[i]) {
1572 dic->tpages[i] = dic->rpages[i];
1573 continue;
1574 }
1575
1576 dic->tpages[i] = f2fs_compress_alloc_page();
1577 }
1578
1579 dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
1580 if (!dic->rbuf)
1581 return -ENOMEM;
1582
1583 dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
1584 if (!dic->cbuf)
1585 return -ENOMEM;
1586
1587 if (cops->init_decompress_ctx)
1588 return cops->init_decompress_ctx(dic);
1589
1590 return 0;
1591 }
1592
f2fs_release_decomp_mem(struct decompress_io_ctx * dic,bool bypass_destroy_callback,bool pre_alloc)1593 static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
1594 bool bypass_destroy_callback, bool pre_alloc)
1595 {
1596 const struct f2fs_compress_ops *cops =
1597 f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1598
1599 if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1600 return;
1601
1602 if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
1603 cops->destroy_decompress_ctx(dic);
1604
1605 if (dic->cbuf)
1606 vm_unmap_ram(dic->cbuf, dic->nr_cpages);
1607
1608 if (dic->rbuf)
1609 vm_unmap_ram(dic->rbuf, dic->cluster_size);
1610 }
1611
1612 static void f2fs_free_dic(struct decompress_io_ctx *dic,
1613 bool bypass_destroy_callback);
1614
f2fs_alloc_dic(struct compress_ctx * cc)1615 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1616 {
1617 struct decompress_io_ctx *dic;
1618 pgoff_t start_idx = start_idx_of_cluster(cc);
1619 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1620 int i, ret;
1621
1622 dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1623 if (!dic)
1624 return ERR_PTR(-ENOMEM);
1625
1626 dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1627 if (!dic->rpages) {
1628 kmem_cache_free(dic_entry_slab, dic);
1629 return ERR_PTR(-ENOMEM);
1630 }
1631
1632 dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1633 dic->inode = cc->inode;
1634 atomic_set(&dic->remaining_pages, cc->nr_cpages);
1635 dic->cluster_idx = cc->cluster_idx;
1636 dic->cluster_size = cc->cluster_size;
1637 dic->log_cluster_size = cc->log_cluster_size;
1638 dic->nr_cpages = cc->nr_cpages;
1639 refcount_set(&dic->refcnt, 1);
1640 dic->failed = false;
1641 dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
1642
1643 for (i = 0; i < dic->cluster_size; i++)
1644 dic->rpages[i] = cc->rpages[i];
1645 dic->nr_rpages = cc->cluster_size;
1646
1647 dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1648 if (!dic->cpages) {
1649 ret = -ENOMEM;
1650 goto out_free;
1651 }
1652
1653 for (i = 0; i < dic->nr_cpages; i++) {
1654 struct page *page;
1655
1656 page = f2fs_compress_alloc_page();
1657 f2fs_set_compressed_page(page, cc->inode,
1658 start_idx + i + 1, dic);
1659 dic->cpages[i] = page;
1660 }
1661
1662 ret = f2fs_prepare_decomp_mem(dic, true);
1663 if (ret)
1664 goto out_free;
1665
1666 return dic;
1667
1668 out_free:
1669 f2fs_free_dic(dic, true);
1670 return ERR_PTR(ret);
1671 }
1672
f2fs_free_dic(struct decompress_io_ctx * dic,bool bypass_destroy_callback)1673 static void f2fs_free_dic(struct decompress_io_ctx *dic,
1674 bool bypass_destroy_callback)
1675 {
1676 int i;
1677
1678 f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);
1679
1680 if (dic->tpages) {
1681 for (i = 0; i < dic->cluster_size; i++) {
1682 if (dic->rpages[i])
1683 continue;
1684 if (!dic->tpages[i])
1685 continue;
1686 f2fs_compress_free_page(dic->tpages[i]);
1687 }
1688 page_array_free(dic->inode, dic->tpages, dic->cluster_size);
1689 }
1690
1691 if (dic->cpages) {
1692 for (i = 0; i < dic->nr_cpages; i++) {
1693 if (!dic->cpages[i])
1694 continue;
1695 f2fs_compress_free_page(dic->cpages[i]);
1696 }
1697 page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
1698 }
1699
1700 page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
1701 kmem_cache_free(dic_entry_slab, dic);
1702 }
1703
f2fs_late_free_dic(struct work_struct * work)1704 static void f2fs_late_free_dic(struct work_struct *work)
1705 {
1706 struct decompress_io_ctx *dic =
1707 container_of(work, struct decompress_io_ctx, free_work);
1708
1709 f2fs_free_dic(dic, false);
1710 }
1711
f2fs_put_dic(struct decompress_io_ctx * dic,bool in_task)1712 static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
1713 {
1714 if (refcount_dec_and_test(&dic->refcnt)) {
1715 if (in_task) {
1716 f2fs_free_dic(dic, false);
1717 } else {
1718 INIT_WORK(&dic->free_work, f2fs_late_free_dic);
1719 queue_work(F2FS_I_SB(dic->inode)->post_read_wq,
1720 &dic->free_work);
1721 }
1722 }
1723 }
1724
f2fs_verify_cluster(struct work_struct * work)1725 static void f2fs_verify_cluster(struct work_struct *work)
1726 {
1727 struct decompress_io_ctx *dic =
1728 container_of(work, struct decompress_io_ctx, verity_work);
1729 int i;
1730
1731 /* Verify, update, and unlock the decompressed pages. */
1732 for (i = 0; i < dic->cluster_size; i++) {
1733 struct page *rpage = dic->rpages[i];
1734
1735 if (!rpage)
1736 continue;
1737
1738 if (fsverity_verify_page(rpage))
1739 SetPageUptodate(rpage);
1740 else
1741 ClearPageUptodate(rpage);
1742 unlock_page(rpage);
1743 }
1744
1745 f2fs_put_dic(dic, true);
1746 }
1747
1748 /*
1749 * This is called when a compressed cluster has been decompressed
1750 * (or failed to be read and/or decompressed).
1751 */
f2fs_decompress_end_io(struct decompress_io_ctx * dic,bool failed,bool in_task)1752 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
1753 bool in_task)
1754 {
1755 int i;
1756
1757 if (!failed && dic->need_verity) {
1758 /*
1759 * Note that to avoid deadlocks, the verity work can't be done
1760 * on the decompression workqueue. This is because verifying
1761 * the data pages can involve reading metadata pages from the
1762 * file, and these metadata pages may be compressed.
1763 */
1764 INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1765 fsverity_enqueue_verify_work(&dic->verity_work);
1766 return;
1767 }
1768
1769 /* Update and unlock the cluster's pagecache pages. */
1770 for (i = 0; i < dic->cluster_size; i++) {
1771 struct page *rpage = dic->rpages[i];
1772
1773 if (!rpage)
1774 continue;
1775
1776 if (failed)
1777 ClearPageUptodate(rpage);
1778 else
1779 SetPageUptodate(rpage);
1780 unlock_page(rpage);
1781 }
1782
1783 /*
1784 * Release the reference to the decompress_io_ctx that was being held
1785 * for I/O completion.
1786 */
1787 f2fs_put_dic(dic, in_task);
1788 }
1789
1790 /*
1791 * Put a reference to a compressed page's decompress_io_ctx.
1792 *
1793 * This is called when the page is no longer needed and can be freed.
1794 */
f2fs_put_page_dic(struct page * page,bool in_task)1795 void f2fs_put_page_dic(struct page *page, bool in_task)
1796 {
1797 struct decompress_io_ctx *dic =
1798 (struct decompress_io_ctx *)page_private(page);
1799
1800 f2fs_put_dic(dic, in_task);
1801 }
1802
1803 /*
1804 * check whether cluster blocks are contiguous, and add extent cache entry
1805 * only if cluster blocks are logically and physically contiguous.
1806 */
f2fs_cluster_blocks_are_contiguous(struct dnode_of_data * dn,unsigned int ofs_in_node)1807 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
1808 unsigned int ofs_in_node)
1809 {
1810 bool compressed = data_blkaddr(dn->inode, dn->node_page,
1811 ofs_in_node) == COMPRESS_ADDR;
1812 int i = compressed ? 1 : 0;
1813 block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
1814 ofs_in_node + i);
1815
1816 for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
1817 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
1818 ofs_in_node + i);
1819
1820 if (!__is_valid_data_blkaddr(blkaddr))
1821 break;
1822 if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr)
1823 return 0;
1824 }
1825
1826 return compressed ? i - 1 : i;
1827 }
1828
1829 const struct address_space_operations f2fs_compress_aops = {
1830 .release_folio = f2fs_release_folio,
1831 .invalidate_folio = f2fs_invalidate_folio,
1832 .migrate_folio = filemap_migrate_folio,
1833 };
1834
COMPRESS_MAPPING(struct f2fs_sb_info * sbi)1835 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
1836 {
1837 return sbi->compress_inode->i_mapping;
1838 }
1839
f2fs_invalidate_compress_page(struct f2fs_sb_info * sbi,block_t blkaddr)1840 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
1841 {
1842 if (!sbi->compress_inode)
1843 return;
1844 invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
1845 }
1846
f2fs_cache_compressed_page(struct f2fs_sb_info * sbi,struct page * page,nid_t ino,block_t blkaddr)1847 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1848 nid_t ino, block_t blkaddr)
1849 {
1850 struct page *cpage;
1851 int ret;
1852
1853 if (!test_opt(sbi, COMPRESS_CACHE))
1854 return;
1855
1856 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1857 return;
1858
1859 if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
1860 return;
1861
1862 cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
1863 if (cpage) {
1864 f2fs_put_page(cpage, 0);
1865 return;
1866 }
1867
1868 cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
1869 if (!cpage)
1870 return;
1871
1872 ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
1873 blkaddr, GFP_NOFS);
1874 if (ret) {
1875 f2fs_put_page(cpage, 0);
1876 return;
1877 }
1878
1879 set_page_private_data(cpage, ino);
1880
1881 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1882 goto out;
1883
1884 memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
1885 SetPageUptodate(cpage);
1886 out:
1887 f2fs_put_page(cpage, 1);
1888 }
1889
f2fs_load_compressed_page(struct f2fs_sb_info * sbi,struct page * page,block_t blkaddr)1890 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1891 block_t blkaddr)
1892 {
1893 struct page *cpage;
1894 bool hitted = false;
1895
1896 if (!test_opt(sbi, COMPRESS_CACHE))
1897 return false;
1898
1899 cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
1900 blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
1901 if (cpage) {
1902 if (PageUptodate(cpage)) {
1903 atomic_inc(&sbi->compress_page_hit);
1904 memcpy(page_address(page),
1905 page_address(cpage), PAGE_SIZE);
1906 hitted = true;
1907 }
1908 f2fs_put_page(cpage, 1);
1909 }
1910
1911 return hitted;
1912 }
1913
f2fs_invalidate_compress_pages(struct f2fs_sb_info * sbi,nid_t ino)1914 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
1915 {
1916 struct address_space *mapping = COMPRESS_MAPPING(sbi);
1917 struct folio_batch fbatch;
1918 pgoff_t index = 0;
1919 pgoff_t end = MAX_BLKADDR(sbi);
1920
1921 if (!mapping->nrpages)
1922 return;
1923
1924 folio_batch_init(&fbatch);
1925
1926 do {
1927 unsigned int nr, i;
1928
1929 nr = filemap_get_folios(mapping, &index, end - 1, &fbatch);
1930 if (!nr)
1931 break;
1932
1933 for (i = 0; i < nr; i++) {
1934 struct folio *folio = fbatch.folios[i];
1935
1936 folio_lock(folio);
1937 if (folio->mapping != mapping) {
1938 folio_unlock(folio);
1939 continue;
1940 }
1941
1942 if (ino != get_page_private_data(&folio->page)) {
1943 folio_unlock(folio);
1944 continue;
1945 }
1946
1947 generic_error_remove_page(mapping, &folio->page);
1948 folio_unlock(folio);
1949 }
1950 folio_batch_release(&fbatch);
1951 cond_resched();
1952 } while (index < end);
1953 }
1954
f2fs_init_compress_inode(struct f2fs_sb_info * sbi)1955 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
1956 {
1957 struct inode *inode;
1958
1959 if (!test_opt(sbi, COMPRESS_CACHE))
1960 return 0;
1961
1962 inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
1963 if (IS_ERR(inode))
1964 return PTR_ERR(inode);
1965 sbi->compress_inode = inode;
1966
1967 sbi->compress_percent = COMPRESS_PERCENT;
1968 sbi->compress_watermark = COMPRESS_WATERMARK;
1969
1970 atomic_set(&sbi->compress_page_hit, 0);
1971
1972 return 0;
1973 }
1974
f2fs_destroy_compress_inode(struct f2fs_sb_info * sbi)1975 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
1976 {
1977 if (!sbi->compress_inode)
1978 return;
1979 iput(sbi->compress_inode);
1980 sbi->compress_inode = NULL;
1981 }
1982
f2fs_init_page_array_cache(struct f2fs_sb_info * sbi)1983 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
1984 {
1985 dev_t dev = sbi->sb->s_bdev->bd_dev;
1986 char slab_name[35];
1987
1988 if (!f2fs_sb_has_compression(sbi))
1989 return 0;
1990
1991 sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
1992
1993 sbi->page_array_slab_size = sizeof(struct page *) <<
1994 F2FS_OPTION(sbi).compress_log_size;
1995
1996 sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
1997 sbi->page_array_slab_size);
1998 return sbi->page_array_slab ? 0 : -ENOMEM;
1999 }
2000
f2fs_destroy_page_array_cache(struct f2fs_sb_info * sbi)2001 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
2002 {
2003 kmem_cache_destroy(sbi->page_array_slab);
2004 }
2005
f2fs_init_compress_cache(void)2006 int __init f2fs_init_compress_cache(void)
2007 {
2008 cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
2009 sizeof(struct compress_io_ctx));
2010 if (!cic_entry_slab)
2011 return -ENOMEM;
2012 dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
2013 sizeof(struct decompress_io_ctx));
2014 if (!dic_entry_slab)
2015 goto free_cic;
2016 return 0;
2017 free_cic:
2018 kmem_cache_destroy(cic_entry_slab);
2019 return -ENOMEM;
2020 }
2021
f2fs_destroy_compress_cache(void)2022 void f2fs_destroy_compress_cache(void)
2023 {
2024 kmem_cache_destroy(dic_entry_slab);
2025 kmem_cache_destroy(cic_entry_slab);
2026 }
2027