1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * f2fs compress support 4 * 5 * Copyright (c) 2019 Chao Yu <chao@kernel.org> 6 */ 7 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/writeback.h> 11 #include <linux/backing-dev.h> 12 #include <linux/lzo.h> 13 #include <linux/lz4.h> 14 #include <linux/zstd.h> 15 #include <linux/pagevec.h> 16 17 #include "f2fs.h" 18 #include "node.h" 19 #include "segment.h" 20 #include <trace/events/f2fs.h> 21 22 static struct kmem_cache *cic_entry_slab; 23 static struct kmem_cache *dic_entry_slab; 24 25 static void *page_array_alloc(struct inode *inode, int nr) 26 { 27 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 28 unsigned int size = sizeof(struct page *) * nr; 29 30 if (likely(size <= sbi->page_array_slab_size)) 31 return f2fs_kmem_cache_alloc(sbi->page_array_slab, 32 GFP_F2FS_ZERO, false, F2FS_I_SB(inode)); 33 return f2fs_kzalloc(sbi, size, GFP_NOFS); 34 } 35 36 static void page_array_free(struct inode *inode, void *pages, int nr) 37 { 38 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 39 unsigned int size = sizeof(struct page *) * nr; 40 41 if (!pages) 42 return; 43 44 if (likely(size <= sbi->page_array_slab_size)) 45 kmem_cache_free(sbi->page_array_slab, pages); 46 else 47 kfree(pages); 48 } 49 50 struct f2fs_compress_ops { 51 int (*init_compress_ctx)(struct compress_ctx *cc); 52 void (*destroy_compress_ctx)(struct compress_ctx *cc); 53 int (*compress_pages)(struct compress_ctx *cc); 54 int (*init_decompress_ctx)(struct decompress_io_ctx *dic); 55 void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic); 56 int (*decompress_pages)(struct decompress_io_ctx *dic); 57 }; 58 59 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index) 60 { 61 return index & (cc->cluster_size - 1); 62 } 63 64 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index) 65 { 66 return index >> cc->log_cluster_size; 67 } 68 69 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc) 70 { 71 return cc->cluster_idx << cc->log_cluster_size; 72 } 73 74 bool f2fs_is_compressed_page(struct page *page) 75 { 76 if (!PagePrivate(page)) 77 return false; 78 if (!page_private(page)) 79 return false; 80 if (page_private_nonpointer(page)) 81 return false; 82 83 f2fs_bug_on(F2FS_M_SB(page->mapping), 84 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC); 85 return true; 86 } 87 88 static void f2fs_set_compressed_page(struct page *page, 89 struct inode *inode, pgoff_t index, void *data) 90 { 91 attach_page_private(page, (void *)data); 92 93 /* i_crypto_info and iv index */ 94 page->index = index; 95 page->mapping = inode->i_mapping; 96 } 97 98 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock) 99 { 100 int i; 101 102 for (i = 0; i < len; i++) { 103 if (!cc->rpages[i]) 104 continue; 105 if (unlock) 106 unlock_page(cc->rpages[i]); 107 else 108 put_page(cc->rpages[i]); 109 } 110 } 111 112 static void f2fs_put_rpages(struct compress_ctx *cc) 113 { 114 f2fs_drop_rpages(cc, cc->cluster_size, false); 115 } 116 117 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len) 118 { 119 f2fs_drop_rpages(cc, len, true); 120 } 121 122 static void f2fs_put_rpages_wbc(struct compress_ctx *cc, 123 struct writeback_control *wbc, bool redirty, int unlock) 124 { 125 unsigned int i; 126 127 for (i = 0; i < cc->cluster_size; i++) { 128 if (!cc->rpages[i]) 129 continue; 130 if (redirty) 131 redirty_page_for_writepage(wbc, cc->rpages[i]); 132 f2fs_put_page(cc->rpages[i], unlock); 133 } 134 } 135 136 struct page *f2fs_compress_control_page(struct page *page) 137 { 138 return ((struct compress_io_ctx *)page_private(page))->rpages[0]; 139 } 140 141 int f2fs_init_compress_ctx(struct compress_ctx *cc) 142 { 143 if (cc->rpages) 144 return 0; 145 146 cc->rpages = page_array_alloc(cc->inode, cc->cluster_size); 147 return cc->rpages ? 0 : -ENOMEM; 148 } 149 150 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse) 151 { 152 page_array_free(cc->inode, cc->rpages, cc->cluster_size); 153 cc->rpages = NULL; 154 cc->nr_rpages = 0; 155 cc->nr_cpages = 0; 156 if (!reuse) 157 cc->cluster_idx = NULL_CLUSTER; 158 } 159 160 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page) 161 { 162 unsigned int cluster_ofs; 163 164 if (!f2fs_cluster_can_merge_page(cc, page->index)) 165 f2fs_bug_on(F2FS_I_SB(cc->inode), 1); 166 167 cluster_ofs = offset_in_cluster(cc, page->index); 168 cc->rpages[cluster_ofs] = page; 169 cc->nr_rpages++; 170 cc->cluster_idx = cluster_idx(cc, page->index); 171 } 172 173 #ifdef CONFIG_F2FS_FS_LZO 174 static int lzo_init_compress_ctx(struct compress_ctx *cc) 175 { 176 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), 177 LZO1X_MEM_COMPRESS, GFP_NOFS); 178 if (!cc->private) 179 return -ENOMEM; 180 181 cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size); 182 return 0; 183 } 184 185 static void lzo_destroy_compress_ctx(struct compress_ctx *cc) 186 { 187 kvfree(cc->private); 188 cc->private = NULL; 189 } 190 191 static int lzo_compress_pages(struct compress_ctx *cc) 192 { 193 int ret; 194 195 ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata, 196 &cc->clen, cc->private); 197 if (ret != LZO_E_OK) { 198 printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n", 199 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret); 200 return -EIO; 201 } 202 return 0; 203 } 204 205 static int lzo_decompress_pages(struct decompress_io_ctx *dic) 206 { 207 int ret; 208 209 ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen, 210 dic->rbuf, &dic->rlen); 211 if (ret != LZO_E_OK) { 212 printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n", 213 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret); 214 return -EIO; 215 } 216 217 if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) { 218 printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, " 219 "expected:%lu\n", KERN_ERR, 220 F2FS_I_SB(dic->inode)->sb->s_id, 221 dic->rlen, 222 PAGE_SIZE << dic->log_cluster_size); 223 return -EIO; 224 } 225 return 0; 226 } 227 228 static const struct f2fs_compress_ops f2fs_lzo_ops = { 229 .init_compress_ctx = lzo_init_compress_ctx, 230 .destroy_compress_ctx = lzo_destroy_compress_ctx, 231 .compress_pages = lzo_compress_pages, 232 .decompress_pages = lzo_decompress_pages, 233 }; 234 #endif 235 236 #ifdef CONFIG_F2FS_FS_LZ4 237 static int lz4_init_compress_ctx(struct compress_ctx *cc) 238 { 239 unsigned int size = LZ4_MEM_COMPRESS; 240 241 #ifdef CONFIG_F2FS_FS_LZ4HC 242 if (F2FS_I(cc->inode)->i_compress_flag >> COMPRESS_LEVEL_OFFSET) 243 size = LZ4HC_MEM_COMPRESS; 244 #endif 245 246 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS); 247 if (!cc->private) 248 return -ENOMEM; 249 250 /* 251 * we do not change cc->clen to LZ4_compressBound(inputsize) to 252 * adapt worst compress case, because lz4 compressor can handle 253 * output budget properly. 254 */ 255 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE; 256 return 0; 257 } 258 259 static void lz4_destroy_compress_ctx(struct compress_ctx *cc) 260 { 261 kvfree(cc->private); 262 cc->private = NULL; 263 } 264 265 #ifdef CONFIG_F2FS_FS_LZ4HC 266 static int lz4hc_compress_pages(struct compress_ctx *cc) 267 { 268 unsigned char level = F2FS_I(cc->inode)->i_compress_flag >> 269 COMPRESS_LEVEL_OFFSET; 270 int len; 271 272 if (level) 273 len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen, 274 cc->clen, level, cc->private); 275 else 276 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen, 277 cc->clen, cc->private); 278 if (!len) 279 return -EAGAIN; 280 281 cc->clen = len; 282 return 0; 283 } 284 #endif 285 286 static int lz4_compress_pages(struct compress_ctx *cc) 287 { 288 int len; 289 290 #ifdef CONFIG_F2FS_FS_LZ4HC 291 return lz4hc_compress_pages(cc); 292 #endif 293 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen, 294 cc->clen, cc->private); 295 if (!len) 296 return -EAGAIN; 297 298 cc->clen = len; 299 return 0; 300 } 301 302 static int lz4_decompress_pages(struct decompress_io_ctx *dic) 303 { 304 int ret; 305 306 ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf, 307 dic->clen, dic->rlen); 308 if (ret < 0) { 309 printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n", 310 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret); 311 return -EIO; 312 } 313 314 if (ret != PAGE_SIZE << dic->log_cluster_size) { 315 printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, " 316 "expected:%lu\n", KERN_ERR, 317 F2FS_I_SB(dic->inode)->sb->s_id, 318 dic->rlen, 319 PAGE_SIZE << dic->log_cluster_size); 320 return -EIO; 321 } 322 return 0; 323 } 324 325 static const struct f2fs_compress_ops f2fs_lz4_ops = { 326 .init_compress_ctx = lz4_init_compress_ctx, 327 .destroy_compress_ctx = lz4_destroy_compress_ctx, 328 .compress_pages = lz4_compress_pages, 329 .decompress_pages = lz4_decompress_pages, 330 }; 331 #endif 332 333 #ifdef CONFIG_F2FS_FS_ZSTD 334 #define F2FS_ZSTD_DEFAULT_CLEVEL 1 335 336 static int zstd_init_compress_ctx(struct compress_ctx *cc) 337 { 338 ZSTD_parameters params; 339 ZSTD_CStream *stream; 340 void *workspace; 341 unsigned int workspace_size; 342 unsigned char level = F2FS_I(cc->inode)->i_compress_flag >> 343 COMPRESS_LEVEL_OFFSET; 344 345 if (!level) 346 level = F2FS_ZSTD_DEFAULT_CLEVEL; 347 348 params = ZSTD_getParams(level, cc->rlen, 0); 349 workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams); 350 351 workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode), 352 workspace_size, GFP_NOFS); 353 if (!workspace) 354 return -ENOMEM; 355 356 stream = ZSTD_initCStream(params, 0, workspace, workspace_size); 357 if (!stream) { 358 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n", 359 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, 360 __func__); 361 kvfree(workspace); 362 return -EIO; 363 } 364 365 cc->private = workspace; 366 cc->private2 = stream; 367 368 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE; 369 return 0; 370 } 371 372 static void zstd_destroy_compress_ctx(struct compress_ctx *cc) 373 { 374 kvfree(cc->private); 375 cc->private = NULL; 376 cc->private2 = NULL; 377 } 378 379 static int zstd_compress_pages(struct compress_ctx *cc) 380 { 381 ZSTD_CStream *stream = cc->private2; 382 ZSTD_inBuffer inbuf; 383 ZSTD_outBuffer outbuf; 384 int src_size = cc->rlen; 385 int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE; 386 int ret; 387 388 inbuf.pos = 0; 389 inbuf.src = cc->rbuf; 390 inbuf.size = src_size; 391 392 outbuf.pos = 0; 393 outbuf.dst = cc->cbuf->cdata; 394 outbuf.size = dst_size; 395 396 ret = ZSTD_compressStream(stream, &outbuf, &inbuf); 397 if (ZSTD_isError(ret)) { 398 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n", 399 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, 400 __func__, ZSTD_getErrorCode(ret)); 401 return -EIO; 402 } 403 404 ret = ZSTD_endStream(stream, &outbuf); 405 if (ZSTD_isError(ret)) { 406 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n", 407 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, 408 __func__, ZSTD_getErrorCode(ret)); 409 return -EIO; 410 } 411 412 /* 413 * there is compressed data remained in intermediate buffer due to 414 * no more space in cbuf.cdata 415 */ 416 if (ret) 417 return -EAGAIN; 418 419 cc->clen = outbuf.pos; 420 return 0; 421 } 422 423 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic) 424 { 425 ZSTD_DStream *stream; 426 void *workspace; 427 unsigned int workspace_size; 428 unsigned int max_window_size = 429 MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size); 430 431 workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size); 432 433 workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode), 434 workspace_size, GFP_NOFS); 435 if (!workspace) 436 return -ENOMEM; 437 438 stream = ZSTD_initDStream(max_window_size, workspace, workspace_size); 439 if (!stream) { 440 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n", 441 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, 442 __func__); 443 kvfree(workspace); 444 return -EIO; 445 } 446 447 dic->private = workspace; 448 dic->private2 = stream; 449 450 return 0; 451 } 452 453 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic) 454 { 455 kvfree(dic->private); 456 dic->private = NULL; 457 dic->private2 = NULL; 458 } 459 460 static int zstd_decompress_pages(struct decompress_io_ctx *dic) 461 { 462 ZSTD_DStream *stream = dic->private2; 463 ZSTD_inBuffer inbuf; 464 ZSTD_outBuffer outbuf; 465 int ret; 466 467 inbuf.pos = 0; 468 inbuf.src = dic->cbuf->cdata; 469 inbuf.size = dic->clen; 470 471 outbuf.pos = 0; 472 outbuf.dst = dic->rbuf; 473 outbuf.size = dic->rlen; 474 475 ret = ZSTD_decompressStream(stream, &outbuf, &inbuf); 476 if (ZSTD_isError(ret)) { 477 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n", 478 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, 479 __func__, ZSTD_getErrorCode(ret)); 480 return -EIO; 481 } 482 483 if (dic->rlen != outbuf.pos) { 484 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, " 485 "expected:%lu\n", KERN_ERR, 486 F2FS_I_SB(dic->inode)->sb->s_id, 487 __func__, dic->rlen, 488 PAGE_SIZE << dic->log_cluster_size); 489 return -EIO; 490 } 491 492 return 0; 493 } 494 495 static const struct f2fs_compress_ops f2fs_zstd_ops = { 496 .init_compress_ctx = zstd_init_compress_ctx, 497 .destroy_compress_ctx = zstd_destroy_compress_ctx, 498 .compress_pages = zstd_compress_pages, 499 .init_decompress_ctx = zstd_init_decompress_ctx, 500 .destroy_decompress_ctx = zstd_destroy_decompress_ctx, 501 .decompress_pages = zstd_decompress_pages, 502 }; 503 #endif 504 505 #ifdef CONFIG_F2FS_FS_LZO 506 #ifdef CONFIG_F2FS_FS_LZORLE 507 static int lzorle_compress_pages(struct compress_ctx *cc) 508 { 509 int ret; 510 511 ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata, 512 &cc->clen, cc->private); 513 if (ret != LZO_E_OK) { 514 printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n", 515 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret); 516 return -EIO; 517 } 518 return 0; 519 } 520 521 static const struct f2fs_compress_ops f2fs_lzorle_ops = { 522 .init_compress_ctx = lzo_init_compress_ctx, 523 .destroy_compress_ctx = lzo_destroy_compress_ctx, 524 .compress_pages = lzorle_compress_pages, 525 .decompress_pages = lzo_decompress_pages, 526 }; 527 #endif 528 #endif 529 530 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = { 531 #ifdef CONFIG_F2FS_FS_LZO 532 &f2fs_lzo_ops, 533 #else 534 NULL, 535 #endif 536 #ifdef CONFIG_F2FS_FS_LZ4 537 &f2fs_lz4_ops, 538 #else 539 NULL, 540 #endif 541 #ifdef CONFIG_F2FS_FS_ZSTD 542 &f2fs_zstd_ops, 543 #else 544 NULL, 545 #endif 546 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE) 547 &f2fs_lzorle_ops, 548 #else 549 NULL, 550 #endif 551 }; 552 553 bool f2fs_is_compress_backend_ready(struct inode *inode) 554 { 555 if (!f2fs_compressed_file(inode)) 556 return true; 557 return f2fs_cops[F2FS_I(inode)->i_compress_algorithm]; 558 } 559 560 static mempool_t *compress_page_pool; 561 static int num_compress_pages = 512; 562 module_param(num_compress_pages, uint, 0444); 563 MODULE_PARM_DESC(num_compress_pages, 564 "Number of intermediate compress pages to preallocate"); 565 566 int f2fs_init_compress_mempool(void) 567 { 568 compress_page_pool = mempool_create_page_pool(num_compress_pages, 0); 569 if (!compress_page_pool) 570 return -ENOMEM; 571 572 return 0; 573 } 574 575 void f2fs_destroy_compress_mempool(void) 576 { 577 mempool_destroy(compress_page_pool); 578 } 579 580 static struct page *f2fs_compress_alloc_page(void) 581 { 582 struct page *page; 583 584 page = mempool_alloc(compress_page_pool, GFP_NOFS); 585 lock_page(page); 586 587 return page; 588 } 589 590 static void f2fs_compress_free_page(struct page *page) 591 { 592 if (!page) 593 return; 594 detach_page_private(page); 595 page->mapping = NULL; 596 unlock_page(page); 597 mempool_free(page, compress_page_pool); 598 } 599 600 #define MAX_VMAP_RETRIES 3 601 602 static void *f2fs_vmap(struct page **pages, unsigned int count) 603 { 604 int i; 605 void *buf = NULL; 606 607 for (i = 0; i < MAX_VMAP_RETRIES; i++) { 608 buf = vm_map_ram(pages, count, -1); 609 if (buf) 610 break; 611 vm_unmap_aliases(); 612 } 613 return buf; 614 } 615 616 static int f2fs_compress_pages(struct compress_ctx *cc) 617 { 618 struct f2fs_inode_info *fi = F2FS_I(cc->inode); 619 const struct f2fs_compress_ops *cops = 620 f2fs_cops[fi->i_compress_algorithm]; 621 unsigned int max_len, new_nr_cpages; 622 struct page **new_cpages; 623 u32 chksum = 0; 624 int i, ret; 625 626 trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx, 627 cc->cluster_size, fi->i_compress_algorithm); 628 629 if (cops->init_compress_ctx) { 630 ret = cops->init_compress_ctx(cc); 631 if (ret) 632 goto out; 633 } 634 635 max_len = COMPRESS_HEADER_SIZE + cc->clen; 636 cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE); 637 638 cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages); 639 if (!cc->cpages) { 640 ret = -ENOMEM; 641 goto destroy_compress_ctx; 642 } 643 644 for (i = 0; i < cc->nr_cpages; i++) { 645 cc->cpages[i] = f2fs_compress_alloc_page(); 646 if (!cc->cpages[i]) { 647 ret = -ENOMEM; 648 goto out_free_cpages; 649 } 650 } 651 652 cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size); 653 if (!cc->rbuf) { 654 ret = -ENOMEM; 655 goto out_free_cpages; 656 } 657 658 cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages); 659 if (!cc->cbuf) { 660 ret = -ENOMEM; 661 goto out_vunmap_rbuf; 662 } 663 664 ret = cops->compress_pages(cc); 665 if (ret) 666 goto out_vunmap_cbuf; 667 668 max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE; 669 670 if (cc->clen > max_len) { 671 ret = -EAGAIN; 672 goto out_vunmap_cbuf; 673 } 674 675 cc->cbuf->clen = cpu_to_le32(cc->clen); 676 677 if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM) 678 chksum = f2fs_crc32(F2FS_I_SB(cc->inode), 679 cc->cbuf->cdata, cc->clen); 680 cc->cbuf->chksum = cpu_to_le32(chksum); 681 682 for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++) 683 cc->cbuf->reserved[i] = cpu_to_le32(0); 684 685 new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE); 686 687 /* Now we're going to cut unnecessary tail pages */ 688 new_cpages = page_array_alloc(cc->inode, new_nr_cpages); 689 if (!new_cpages) { 690 ret = -ENOMEM; 691 goto out_vunmap_cbuf; 692 } 693 694 /* zero out any unused part of the last page */ 695 memset(&cc->cbuf->cdata[cc->clen], 0, 696 (new_nr_cpages * PAGE_SIZE) - 697 (cc->clen + COMPRESS_HEADER_SIZE)); 698 699 vm_unmap_ram(cc->cbuf, cc->nr_cpages); 700 vm_unmap_ram(cc->rbuf, cc->cluster_size); 701 702 for (i = 0; i < cc->nr_cpages; i++) { 703 if (i < new_nr_cpages) { 704 new_cpages[i] = cc->cpages[i]; 705 continue; 706 } 707 f2fs_compress_free_page(cc->cpages[i]); 708 cc->cpages[i] = NULL; 709 } 710 711 if (cops->destroy_compress_ctx) 712 cops->destroy_compress_ctx(cc); 713 714 page_array_free(cc->inode, cc->cpages, cc->nr_cpages); 715 cc->cpages = new_cpages; 716 cc->nr_cpages = new_nr_cpages; 717 718 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx, 719 cc->clen, ret); 720 return 0; 721 722 out_vunmap_cbuf: 723 vm_unmap_ram(cc->cbuf, cc->nr_cpages); 724 out_vunmap_rbuf: 725 vm_unmap_ram(cc->rbuf, cc->cluster_size); 726 out_free_cpages: 727 for (i = 0; i < cc->nr_cpages; i++) { 728 if (cc->cpages[i]) 729 f2fs_compress_free_page(cc->cpages[i]); 730 } 731 page_array_free(cc->inode, cc->cpages, cc->nr_cpages); 732 cc->cpages = NULL; 733 destroy_compress_ctx: 734 if (cops->destroy_compress_ctx) 735 cops->destroy_compress_ctx(cc); 736 out: 737 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx, 738 cc->clen, ret); 739 return ret; 740 } 741 742 void f2fs_decompress_cluster(struct decompress_io_ctx *dic) 743 { 744 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode); 745 struct f2fs_inode_info *fi = F2FS_I(dic->inode); 746 const struct f2fs_compress_ops *cops = 747 f2fs_cops[fi->i_compress_algorithm]; 748 int ret; 749 int i; 750 751 trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx, 752 dic->cluster_size, fi->i_compress_algorithm); 753 754 if (dic->failed) { 755 ret = -EIO; 756 goto out_end_io; 757 } 758 759 dic->tpages = page_array_alloc(dic->inode, dic->cluster_size); 760 if (!dic->tpages) { 761 ret = -ENOMEM; 762 goto out_end_io; 763 } 764 765 for (i = 0; i < dic->cluster_size; i++) { 766 if (dic->rpages[i]) { 767 dic->tpages[i] = dic->rpages[i]; 768 continue; 769 } 770 771 dic->tpages[i] = f2fs_compress_alloc_page(); 772 if (!dic->tpages[i]) { 773 ret = -ENOMEM; 774 goto out_end_io; 775 } 776 } 777 778 if (cops->init_decompress_ctx) { 779 ret = cops->init_decompress_ctx(dic); 780 if (ret) 781 goto out_end_io; 782 } 783 784 dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size); 785 if (!dic->rbuf) { 786 ret = -ENOMEM; 787 goto out_destroy_decompress_ctx; 788 } 789 790 dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages); 791 if (!dic->cbuf) { 792 ret = -ENOMEM; 793 goto out_vunmap_rbuf; 794 } 795 796 dic->clen = le32_to_cpu(dic->cbuf->clen); 797 dic->rlen = PAGE_SIZE << dic->log_cluster_size; 798 799 if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) { 800 ret = -EFSCORRUPTED; 801 goto out_vunmap_cbuf; 802 } 803 804 ret = cops->decompress_pages(dic); 805 806 if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) { 807 u32 provided = le32_to_cpu(dic->cbuf->chksum); 808 u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen); 809 810 if (provided != calculated) { 811 if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) { 812 set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT); 813 printk_ratelimited( 814 "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x", 815 KERN_INFO, sbi->sb->s_id, dic->inode->i_ino, 816 provided, calculated); 817 } 818 set_sbi_flag(sbi, SBI_NEED_FSCK); 819 } 820 } 821 822 out_vunmap_cbuf: 823 vm_unmap_ram(dic->cbuf, dic->nr_cpages); 824 out_vunmap_rbuf: 825 vm_unmap_ram(dic->rbuf, dic->cluster_size); 826 out_destroy_decompress_ctx: 827 if (cops->destroy_decompress_ctx) 828 cops->destroy_decompress_ctx(dic); 829 out_end_io: 830 trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx, 831 dic->clen, ret); 832 f2fs_decompress_end_io(dic, ret); 833 } 834 835 /* 836 * This is called when a page of a compressed cluster has been read from disk 837 * (or failed to be read from disk). It checks whether this page was the last 838 * page being waited on in the cluster, and if so, it decompresses the cluster 839 * (or in the case of a failure, cleans up without actually decompressing). 840 */ 841 void f2fs_end_read_compressed_page(struct page *page, bool failed, 842 block_t blkaddr) 843 { 844 struct decompress_io_ctx *dic = 845 (struct decompress_io_ctx *)page_private(page); 846 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode); 847 848 dec_page_count(sbi, F2FS_RD_DATA); 849 850 if (failed) 851 WRITE_ONCE(dic->failed, true); 852 else if (blkaddr) 853 f2fs_cache_compressed_page(sbi, page, 854 dic->inode->i_ino, blkaddr); 855 856 if (atomic_dec_and_test(&dic->remaining_pages)) 857 f2fs_decompress_cluster(dic); 858 } 859 860 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index) 861 { 862 if (cc->cluster_idx == NULL_CLUSTER) 863 return true; 864 return cc->cluster_idx == cluster_idx(cc, index); 865 } 866 867 bool f2fs_cluster_is_empty(struct compress_ctx *cc) 868 { 869 return cc->nr_rpages == 0; 870 } 871 872 static bool f2fs_cluster_is_full(struct compress_ctx *cc) 873 { 874 return cc->cluster_size == cc->nr_rpages; 875 } 876 877 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index) 878 { 879 if (f2fs_cluster_is_empty(cc)) 880 return true; 881 return is_page_in_cluster(cc, index); 882 } 883 884 static bool cluster_has_invalid_data(struct compress_ctx *cc) 885 { 886 loff_t i_size = i_size_read(cc->inode); 887 unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE); 888 int i; 889 890 for (i = 0; i < cc->cluster_size; i++) { 891 struct page *page = cc->rpages[i]; 892 893 f2fs_bug_on(F2FS_I_SB(cc->inode), !page); 894 895 /* beyond EOF */ 896 if (page->index >= nr_pages) 897 return true; 898 } 899 return false; 900 } 901 902 static int __f2fs_cluster_blocks(struct inode *inode, 903 unsigned int cluster_idx, bool compr) 904 { 905 struct dnode_of_data dn; 906 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size; 907 unsigned int start_idx = cluster_idx << 908 F2FS_I(inode)->i_log_cluster_size; 909 int ret; 910 911 set_new_dnode(&dn, inode, NULL, NULL, 0); 912 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE); 913 if (ret) { 914 if (ret == -ENOENT) 915 ret = 0; 916 goto fail; 917 } 918 919 if (dn.data_blkaddr == COMPRESS_ADDR) { 920 int i; 921 922 ret = 1; 923 for (i = 1; i < cluster_size; i++) { 924 block_t blkaddr; 925 926 blkaddr = data_blkaddr(dn.inode, 927 dn.node_page, dn.ofs_in_node + i); 928 if (compr) { 929 if (__is_valid_data_blkaddr(blkaddr)) 930 ret++; 931 } else { 932 if (blkaddr != NULL_ADDR) 933 ret++; 934 } 935 } 936 937 f2fs_bug_on(F2FS_I_SB(inode), 938 !compr && ret != cluster_size && 939 !is_inode_flag_set(inode, FI_COMPRESS_RELEASED)); 940 } 941 fail: 942 f2fs_put_dnode(&dn); 943 return ret; 944 } 945 946 /* return # of compressed blocks in compressed cluster */ 947 static int f2fs_compressed_blocks(struct compress_ctx *cc) 948 { 949 return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true); 950 } 951 952 /* return # of valid blocks in compressed cluster */ 953 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index) 954 { 955 return __f2fs_cluster_blocks(inode, 956 index >> F2FS_I(inode)->i_log_cluster_size, 957 false); 958 } 959 960 static bool cluster_may_compress(struct compress_ctx *cc) 961 { 962 if (!f2fs_need_compress_data(cc->inode)) 963 return false; 964 if (f2fs_is_atomic_file(cc->inode)) 965 return false; 966 if (!f2fs_cluster_is_full(cc)) 967 return false; 968 if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode)))) 969 return false; 970 return !cluster_has_invalid_data(cc); 971 } 972 973 static void set_cluster_writeback(struct compress_ctx *cc) 974 { 975 int i; 976 977 for (i = 0; i < cc->cluster_size; i++) { 978 if (cc->rpages[i]) 979 set_page_writeback(cc->rpages[i]); 980 } 981 } 982 983 static void set_cluster_dirty(struct compress_ctx *cc) 984 { 985 int i; 986 987 for (i = 0; i < cc->cluster_size; i++) 988 if (cc->rpages[i]) 989 set_page_dirty(cc->rpages[i]); 990 } 991 992 static int prepare_compress_overwrite(struct compress_ctx *cc, 993 struct page **pagep, pgoff_t index, void **fsdata) 994 { 995 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode); 996 struct address_space *mapping = cc->inode->i_mapping; 997 struct page *page; 998 sector_t last_block_in_bio; 999 unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT; 1000 pgoff_t start_idx = start_idx_of_cluster(cc); 1001 int i, ret; 1002 1003 retry: 1004 ret = f2fs_is_compressed_cluster(cc->inode, start_idx); 1005 if (ret <= 0) 1006 return ret; 1007 1008 ret = f2fs_init_compress_ctx(cc); 1009 if (ret) 1010 return ret; 1011 1012 /* keep page reference to avoid page reclaim */ 1013 for (i = 0; i < cc->cluster_size; i++) { 1014 page = f2fs_pagecache_get_page(mapping, start_idx + i, 1015 fgp_flag, GFP_NOFS); 1016 if (!page) { 1017 ret = -ENOMEM; 1018 goto unlock_pages; 1019 } 1020 1021 if (PageUptodate(page)) 1022 f2fs_put_page(page, 1); 1023 else 1024 f2fs_compress_ctx_add_page(cc, page); 1025 } 1026 1027 if (!f2fs_cluster_is_empty(cc)) { 1028 struct bio *bio = NULL; 1029 1030 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size, 1031 &last_block_in_bio, false, true); 1032 f2fs_put_rpages(cc); 1033 f2fs_destroy_compress_ctx(cc, true); 1034 if (ret) 1035 goto out; 1036 if (bio) 1037 f2fs_submit_bio(sbi, bio, DATA); 1038 1039 ret = f2fs_init_compress_ctx(cc); 1040 if (ret) 1041 goto out; 1042 } 1043 1044 for (i = 0; i < cc->cluster_size; i++) { 1045 f2fs_bug_on(sbi, cc->rpages[i]); 1046 1047 page = find_lock_page(mapping, start_idx + i); 1048 if (!page) { 1049 /* page can be truncated */ 1050 goto release_and_retry; 1051 } 1052 1053 f2fs_wait_on_page_writeback(page, DATA, true, true); 1054 f2fs_compress_ctx_add_page(cc, page); 1055 1056 if (!PageUptodate(page)) { 1057 release_and_retry: 1058 f2fs_put_rpages(cc); 1059 f2fs_unlock_rpages(cc, i + 1); 1060 f2fs_destroy_compress_ctx(cc, true); 1061 goto retry; 1062 } 1063 } 1064 1065 if (likely(!ret)) { 1066 *fsdata = cc->rpages; 1067 *pagep = cc->rpages[offset_in_cluster(cc, index)]; 1068 return cc->cluster_size; 1069 } 1070 1071 unlock_pages: 1072 f2fs_put_rpages(cc); 1073 f2fs_unlock_rpages(cc, i); 1074 f2fs_destroy_compress_ctx(cc, true); 1075 out: 1076 return ret; 1077 } 1078 1079 int f2fs_prepare_compress_overwrite(struct inode *inode, 1080 struct page **pagep, pgoff_t index, void **fsdata) 1081 { 1082 struct compress_ctx cc = { 1083 .inode = inode, 1084 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size, 1085 .cluster_size = F2FS_I(inode)->i_cluster_size, 1086 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size, 1087 .rpages = NULL, 1088 .nr_rpages = 0, 1089 }; 1090 1091 return prepare_compress_overwrite(&cc, pagep, index, fsdata); 1092 } 1093 1094 bool f2fs_compress_write_end(struct inode *inode, void *fsdata, 1095 pgoff_t index, unsigned copied) 1096 1097 { 1098 struct compress_ctx cc = { 1099 .inode = inode, 1100 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size, 1101 .cluster_size = F2FS_I(inode)->i_cluster_size, 1102 .rpages = fsdata, 1103 }; 1104 bool first_index = (index == cc.rpages[0]->index); 1105 1106 if (copied) 1107 set_cluster_dirty(&cc); 1108 1109 f2fs_put_rpages_wbc(&cc, NULL, false, 1); 1110 f2fs_destroy_compress_ctx(&cc, false); 1111 1112 return first_index; 1113 } 1114 1115 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock) 1116 { 1117 void *fsdata = NULL; 1118 struct page *pagep; 1119 int log_cluster_size = F2FS_I(inode)->i_log_cluster_size; 1120 pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) << 1121 log_cluster_size; 1122 int err; 1123 1124 err = f2fs_is_compressed_cluster(inode, start_idx); 1125 if (err < 0) 1126 return err; 1127 1128 /* truncate normal cluster */ 1129 if (!err) 1130 return f2fs_do_truncate_blocks(inode, from, lock); 1131 1132 /* truncate compressed cluster */ 1133 err = f2fs_prepare_compress_overwrite(inode, &pagep, 1134 start_idx, &fsdata); 1135 1136 /* should not be a normal cluster */ 1137 f2fs_bug_on(F2FS_I_SB(inode), err == 0); 1138 1139 if (err <= 0) 1140 return err; 1141 1142 if (err > 0) { 1143 struct page **rpages = fsdata; 1144 int cluster_size = F2FS_I(inode)->i_cluster_size; 1145 int i; 1146 1147 for (i = cluster_size - 1; i >= 0; i--) { 1148 loff_t start = rpages[i]->index << PAGE_SHIFT; 1149 1150 if (from <= start) { 1151 zero_user_segment(rpages[i], 0, PAGE_SIZE); 1152 } else { 1153 zero_user_segment(rpages[i], from - start, 1154 PAGE_SIZE); 1155 break; 1156 } 1157 } 1158 1159 f2fs_compress_write_end(inode, fsdata, start_idx, true); 1160 } 1161 return 0; 1162 } 1163 1164 static int f2fs_write_compressed_pages(struct compress_ctx *cc, 1165 int *submitted, 1166 struct writeback_control *wbc, 1167 enum iostat_type io_type) 1168 { 1169 struct inode *inode = cc->inode; 1170 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1171 struct f2fs_inode_info *fi = F2FS_I(inode); 1172 struct f2fs_io_info fio = { 1173 .sbi = sbi, 1174 .ino = cc->inode->i_ino, 1175 .type = DATA, 1176 .op = REQ_OP_WRITE, 1177 .op_flags = wbc_to_write_flags(wbc), 1178 .old_blkaddr = NEW_ADDR, 1179 .page = NULL, 1180 .encrypted_page = NULL, 1181 .compressed_page = NULL, 1182 .submitted = false, 1183 .io_type = io_type, 1184 .io_wbc = wbc, 1185 .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode), 1186 }; 1187 struct dnode_of_data dn; 1188 struct node_info ni; 1189 struct compress_io_ctx *cic; 1190 pgoff_t start_idx = start_idx_of_cluster(cc); 1191 unsigned int last_index = cc->cluster_size - 1; 1192 loff_t psize; 1193 int i, err; 1194 1195 /* we should bypass data pages to proceed the kworkder jobs */ 1196 if (unlikely(f2fs_cp_error(sbi))) { 1197 mapping_set_error(cc->rpages[0]->mapping, -EIO); 1198 goto out_free; 1199 } 1200 1201 if (IS_NOQUOTA(inode)) { 1202 /* 1203 * We need to wait for node_write to avoid block allocation during 1204 * checkpoint. This can only happen to quota writes which can cause 1205 * the below discard race condition. 1206 */ 1207 down_read(&sbi->node_write); 1208 } else if (!f2fs_trylock_op(sbi)) { 1209 goto out_free; 1210 } 1211 1212 set_new_dnode(&dn, cc->inode, NULL, NULL, 0); 1213 1214 err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE); 1215 if (err) 1216 goto out_unlock_op; 1217 1218 for (i = 0; i < cc->cluster_size; i++) { 1219 if (data_blkaddr(dn.inode, dn.node_page, 1220 dn.ofs_in_node + i) == NULL_ADDR) 1221 goto out_put_dnode; 1222 } 1223 1224 psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT; 1225 1226 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni); 1227 if (err) 1228 goto out_put_dnode; 1229 1230 fio.version = ni.version; 1231 1232 cic = f2fs_kmem_cache_alloc(cic_entry_slab, GFP_F2FS_ZERO, false, sbi); 1233 if (!cic) 1234 goto out_put_dnode; 1235 1236 cic->magic = F2FS_COMPRESSED_PAGE_MAGIC; 1237 cic->inode = inode; 1238 atomic_set(&cic->pending_pages, cc->nr_cpages); 1239 cic->rpages = page_array_alloc(cc->inode, cc->cluster_size); 1240 if (!cic->rpages) 1241 goto out_put_cic; 1242 1243 cic->nr_rpages = cc->cluster_size; 1244 1245 for (i = 0; i < cc->nr_cpages; i++) { 1246 f2fs_set_compressed_page(cc->cpages[i], inode, 1247 cc->rpages[i + 1]->index, cic); 1248 fio.compressed_page = cc->cpages[i]; 1249 1250 fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page, 1251 dn.ofs_in_node + i + 1); 1252 1253 /* wait for GCed page writeback via META_MAPPING */ 1254 f2fs_wait_on_block_writeback(inode, fio.old_blkaddr); 1255 1256 if (fio.encrypted) { 1257 fio.page = cc->rpages[i + 1]; 1258 err = f2fs_encrypt_one_page(&fio); 1259 if (err) 1260 goto out_destroy_crypt; 1261 cc->cpages[i] = fio.encrypted_page; 1262 } 1263 } 1264 1265 set_cluster_writeback(cc); 1266 1267 for (i = 0; i < cc->cluster_size; i++) 1268 cic->rpages[i] = cc->rpages[i]; 1269 1270 for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) { 1271 block_t blkaddr; 1272 1273 blkaddr = f2fs_data_blkaddr(&dn); 1274 fio.page = cc->rpages[i]; 1275 fio.old_blkaddr = blkaddr; 1276 1277 /* cluster header */ 1278 if (i == 0) { 1279 if (blkaddr == COMPRESS_ADDR) 1280 fio.compr_blocks++; 1281 if (__is_valid_data_blkaddr(blkaddr)) 1282 f2fs_invalidate_blocks(sbi, blkaddr); 1283 f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR); 1284 goto unlock_continue; 1285 } 1286 1287 if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr)) 1288 fio.compr_blocks++; 1289 1290 if (i > cc->nr_cpages) { 1291 if (__is_valid_data_blkaddr(blkaddr)) { 1292 f2fs_invalidate_blocks(sbi, blkaddr); 1293 f2fs_update_data_blkaddr(&dn, NEW_ADDR); 1294 } 1295 goto unlock_continue; 1296 } 1297 1298 f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR); 1299 1300 if (fio.encrypted) 1301 fio.encrypted_page = cc->cpages[i - 1]; 1302 else 1303 fio.compressed_page = cc->cpages[i - 1]; 1304 1305 cc->cpages[i - 1] = NULL; 1306 f2fs_outplace_write_data(&dn, &fio); 1307 (*submitted)++; 1308 unlock_continue: 1309 inode_dec_dirty_pages(cc->inode); 1310 unlock_page(fio.page); 1311 } 1312 1313 if (fio.compr_blocks) 1314 f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false); 1315 f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true); 1316 add_compr_block_stat(inode, cc->nr_cpages); 1317 1318 set_inode_flag(cc->inode, FI_APPEND_WRITE); 1319 if (cc->cluster_idx == 0) 1320 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN); 1321 1322 f2fs_put_dnode(&dn); 1323 if (IS_NOQUOTA(inode)) 1324 up_read(&sbi->node_write); 1325 else 1326 f2fs_unlock_op(sbi); 1327 1328 spin_lock(&fi->i_size_lock); 1329 if (fi->last_disk_size < psize) 1330 fi->last_disk_size = psize; 1331 spin_unlock(&fi->i_size_lock); 1332 1333 f2fs_put_rpages(cc); 1334 page_array_free(cc->inode, cc->cpages, cc->nr_cpages); 1335 cc->cpages = NULL; 1336 f2fs_destroy_compress_ctx(cc, false); 1337 return 0; 1338 1339 out_destroy_crypt: 1340 page_array_free(cc->inode, cic->rpages, cc->cluster_size); 1341 1342 for (--i; i >= 0; i--) 1343 fscrypt_finalize_bounce_page(&cc->cpages[i]); 1344 for (i = 0; i < cc->nr_cpages; i++) { 1345 if (!cc->cpages[i]) 1346 continue; 1347 f2fs_compress_free_page(cc->cpages[i]); 1348 cc->cpages[i] = NULL; 1349 } 1350 out_put_cic: 1351 kmem_cache_free(cic_entry_slab, cic); 1352 out_put_dnode: 1353 f2fs_put_dnode(&dn); 1354 out_unlock_op: 1355 if (IS_NOQUOTA(inode)) 1356 up_read(&sbi->node_write); 1357 else 1358 f2fs_unlock_op(sbi); 1359 out_free: 1360 page_array_free(cc->inode, cc->cpages, cc->nr_cpages); 1361 cc->cpages = NULL; 1362 return -EAGAIN; 1363 } 1364 1365 void f2fs_compress_write_end_io(struct bio *bio, struct page *page) 1366 { 1367 struct f2fs_sb_info *sbi = bio->bi_private; 1368 struct compress_io_ctx *cic = 1369 (struct compress_io_ctx *)page_private(page); 1370 int i; 1371 1372 if (unlikely(bio->bi_status)) 1373 mapping_set_error(cic->inode->i_mapping, -EIO); 1374 1375 f2fs_compress_free_page(page); 1376 1377 dec_page_count(sbi, F2FS_WB_DATA); 1378 1379 if (atomic_dec_return(&cic->pending_pages)) 1380 return; 1381 1382 for (i = 0; i < cic->nr_rpages; i++) { 1383 WARN_ON(!cic->rpages[i]); 1384 clear_page_private_gcing(cic->rpages[i]); 1385 end_page_writeback(cic->rpages[i]); 1386 } 1387 1388 page_array_free(cic->inode, cic->rpages, cic->nr_rpages); 1389 kmem_cache_free(cic_entry_slab, cic); 1390 } 1391 1392 static int f2fs_write_raw_pages(struct compress_ctx *cc, 1393 int *submitted, 1394 struct writeback_control *wbc, 1395 enum iostat_type io_type) 1396 { 1397 struct address_space *mapping = cc->inode->i_mapping; 1398 int _submitted, compr_blocks, ret; 1399 int i = -1, err = 0; 1400 1401 compr_blocks = f2fs_compressed_blocks(cc); 1402 if (compr_blocks < 0) { 1403 err = compr_blocks; 1404 goto out_err; 1405 } 1406 1407 for (i = 0; i < cc->cluster_size; i++) { 1408 if (!cc->rpages[i]) 1409 continue; 1410 retry_write: 1411 if (cc->rpages[i]->mapping != mapping) { 1412 unlock_page(cc->rpages[i]); 1413 continue; 1414 } 1415 1416 BUG_ON(!PageLocked(cc->rpages[i])); 1417 1418 ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted, 1419 NULL, NULL, wbc, io_type, 1420 compr_blocks, false); 1421 if (ret) { 1422 if (ret == AOP_WRITEPAGE_ACTIVATE) { 1423 unlock_page(cc->rpages[i]); 1424 ret = 0; 1425 } else if (ret == -EAGAIN) { 1426 /* 1427 * for quota file, just redirty left pages to 1428 * avoid deadlock caused by cluster update race 1429 * from foreground operation. 1430 */ 1431 if (IS_NOQUOTA(cc->inode)) { 1432 err = 0; 1433 goto out_err; 1434 } 1435 ret = 0; 1436 cond_resched(); 1437 congestion_wait(BLK_RW_ASYNC, 1438 DEFAULT_IO_TIMEOUT); 1439 lock_page(cc->rpages[i]); 1440 1441 if (!PageDirty(cc->rpages[i])) { 1442 unlock_page(cc->rpages[i]); 1443 continue; 1444 } 1445 1446 clear_page_dirty_for_io(cc->rpages[i]); 1447 goto retry_write; 1448 } 1449 err = ret; 1450 goto out_err; 1451 } 1452 1453 *submitted += _submitted; 1454 } 1455 1456 f2fs_balance_fs(F2FS_M_SB(mapping), true); 1457 1458 return 0; 1459 out_err: 1460 for (++i; i < cc->cluster_size; i++) { 1461 if (!cc->rpages[i]) 1462 continue; 1463 redirty_page_for_writepage(wbc, cc->rpages[i]); 1464 unlock_page(cc->rpages[i]); 1465 } 1466 return err; 1467 } 1468 1469 int f2fs_write_multi_pages(struct compress_ctx *cc, 1470 int *submitted, 1471 struct writeback_control *wbc, 1472 enum iostat_type io_type) 1473 { 1474 int err; 1475 1476 *submitted = 0; 1477 if (cluster_may_compress(cc)) { 1478 err = f2fs_compress_pages(cc); 1479 if (err == -EAGAIN) { 1480 goto write; 1481 } else if (err) { 1482 f2fs_put_rpages_wbc(cc, wbc, true, 1); 1483 goto destroy_out; 1484 } 1485 1486 err = f2fs_write_compressed_pages(cc, submitted, 1487 wbc, io_type); 1488 if (!err) 1489 return 0; 1490 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN); 1491 } 1492 write: 1493 f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted); 1494 1495 err = f2fs_write_raw_pages(cc, submitted, wbc, io_type); 1496 f2fs_put_rpages_wbc(cc, wbc, false, 0); 1497 destroy_out: 1498 f2fs_destroy_compress_ctx(cc, false); 1499 return err; 1500 } 1501 1502 static void f2fs_free_dic(struct decompress_io_ctx *dic); 1503 1504 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) 1505 { 1506 struct decompress_io_ctx *dic; 1507 pgoff_t start_idx = start_idx_of_cluster(cc); 1508 int i; 1509 1510 dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO, 1511 false, F2FS_I_SB(cc->inode)); 1512 if (!dic) 1513 return ERR_PTR(-ENOMEM); 1514 1515 dic->rpages = page_array_alloc(cc->inode, cc->cluster_size); 1516 if (!dic->rpages) { 1517 kmem_cache_free(dic_entry_slab, dic); 1518 return ERR_PTR(-ENOMEM); 1519 } 1520 1521 dic->magic = F2FS_COMPRESSED_PAGE_MAGIC; 1522 dic->inode = cc->inode; 1523 atomic_set(&dic->remaining_pages, cc->nr_cpages); 1524 dic->cluster_idx = cc->cluster_idx; 1525 dic->cluster_size = cc->cluster_size; 1526 dic->log_cluster_size = cc->log_cluster_size; 1527 dic->nr_cpages = cc->nr_cpages; 1528 refcount_set(&dic->refcnt, 1); 1529 dic->failed = false; 1530 dic->need_verity = f2fs_need_verity(cc->inode, start_idx); 1531 1532 for (i = 0; i < dic->cluster_size; i++) 1533 dic->rpages[i] = cc->rpages[i]; 1534 dic->nr_rpages = cc->cluster_size; 1535 1536 dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages); 1537 if (!dic->cpages) 1538 goto out_free; 1539 1540 for (i = 0; i < dic->nr_cpages; i++) { 1541 struct page *page; 1542 1543 page = f2fs_compress_alloc_page(); 1544 if (!page) 1545 goto out_free; 1546 1547 f2fs_set_compressed_page(page, cc->inode, 1548 start_idx + i + 1, dic); 1549 dic->cpages[i] = page; 1550 } 1551 1552 return dic; 1553 1554 out_free: 1555 f2fs_free_dic(dic); 1556 return ERR_PTR(-ENOMEM); 1557 } 1558 1559 static void f2fs_free_dic(struct decompress_io_ctx *dic) 1560 { 1561 int i; 1562 1563 if (dic->tpages) { 1564 for (i = 0; i < dic->cluster_size; i++) { 1565 if (dic->rpages[i]) 1566 continue; 1567 if (!dic->tpages[i]) 1568 continue; 1569 f2fs_compress_free_page(dic->tpages[i]); 1570 } 1571 page_array_free(dic->inode, dic->tpages, dic->cluster_size); 1572 } 1573 1574 if (dic->cpages) { 1575 for (i = 0; i < dic->nr_cpages; i++) { 1576 if (!dic->cpages[i]) 1577 continue; 1578 f2fs_compress_free_page(dic->cpages[i]); 1579 } 1580 page_array_free(dic->inode, dic->cpages, dic->nr_cpages); 1581 } 1582 1583 page_array_free(dic->inode, dic->rpages, dic->nr_rpages); 1584 kmem_cache_free(dic_entry_slab, dic); 1585 } 1586 1587 static void f2fs_put_dic(struct decompress_io_ctx *dic) 1588 { 1589 if (refcount_dec_and_test(&dic->refcnt)) 1590 f2fs_free_dic(dic); 1591 } 1592 1593 /* 1594 * Update and unlock the cluster's pagecache pages, and release the reference to 1595 * the decompress_io_ctx that was being held for I/O completion. 1596 */ 1597 static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed) 1598 { 1599 int i; 1600 1601 for (i = 0; i < dic->cluster_size; i++) { 1602 struct page *rpage = dic->rpages[i]; 1603 1604 if (!rpage) 1605 continue; 1606 1607 /* PG_error was set if verity failed. */ 1608 if (failed || PageError(rpage)) { 1609 ClearPageUptodate(rpage); 1610 /* will re-read again later */ 1611 ClearPageError(rpage); 1612 } else { 1613 SetPageUptodate(rpage); 1614 } 1615 unlock_page(rpage); 1616 } 1617 1618 f2fs_put_dic(dic); 1619 } 1620 1621 static void f2fs_verify_cluster(struct work_struct *work) 1622 { 1623 struct decompress_io_ctx *dic = 1624 container_of(work, struct decompress_io_ctx, verity_work); 1625 int i; 1626 1627 /* Verify the cluster's decompressed pages with fs-verity. */ 1628 for (i = 0; i < dic->cluster_size; i++) { 1629 struct page *rpage = dic->rpages[i]; 1630 1631 if (rpage && !fsverity_verify_page(rpage)) 1632 SetPageError(rpage); 1633 } 1634 1635 __f2fs_decompress_end_io(dic, false); 1636 } 1637 1638 /* 1639 * This is called when a compressed cluster has been decompressed 1640 * (or failed to be read and/or decompressed). 1641 */ 1642 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed) 1643 { 1644 if (!failed && dic->need_verity) { 1645 /* 1646 * Note that to avoid deadlocks, the verity work can't be done 1647 * on the decompression workqueue. This is because verifying 1648 * the data pages can involve reading metadata pages from the 1649 * file, and these metadata pages may be compressed. 1650 */ 1651 INIT_WORK(&dic->verity_work, f2fs_verify_cluster); 1652 fsverity_enqueue_verify_work(&dic->verity_work); 1653 } else { 1654 __f2fs_decompress_end_io(dic, failed); 1655 } 1656 } 1657 1658 /* 1659 * Put a reference to a compressed page's decompress_io_ctx. 1660 * 1661 * This is called when the page is no longer needed and can be freed. 1662 */ 1663 void f2fs_put_page_dic(struct page *page) 1664 { 1665 struct decompress_io_ctx *dic = 1666 (struct decompress_io_ctx *)page_private(page); 1667 1668 f2fs_put_dic(dic); 1669 } 1670 1671 /* 1672 * check whether cluster blocks are contiguous, and add extent cache entry 1673 * only if cluster blocks are logically and physically contiguous. 1674 */ 1675 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) 1676 { 1677 bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR; 1678 int i = compressed ? 1 : 0; 1679 block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page, 1680 dn->ofs_in_node + i); 1681 1682 for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) { 1683 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page, 1684 dn->ofs_in_node + i); 1685 1686 if (!__is_valid_data_blkaddr(blkaddr)) 1687 break; 1688 if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr) 1689 return 0; 1690 } 1691 1692 return compressed ? i - 1 : i; 1693 } 1694 1695 const struct address_space_operations f2fs_compress_aops = { 1696 .releasepage = f2fs_release_page, 1697 .invalidatepage = f2fs_invalidate_page, 1698 }; 1699 1700 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi) 1701 { 1702 return sbi->compress_inode->i_mapping; 1703 } 1704 1705 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr) 1706 { 1707 if (!sbi->compress_inode) 1708 return; 1709 invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr); 1710 } 1711 1712 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, 1713 nid_t ino, block_t blkaddr) 1714 { 1715 struct page *cpage; 1716 int ret; 1717 1718 if (!test_opt(sbi, COMPRESS_CACHE)) 1719 return; 1720 1721 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ)) 1722 return; 1723 1724 if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE)) 1725 return; 1726 1727 cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr); 1728 if (cpage) { 1729 f2fs_put_page(cpage, 0); 1730 return; 1731 } 1732 1733 cpage = alloc_page(__GFP_NOWARN | __GFP_IO); 1734 if (!cpage) 1735 return; 1736 1737 ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi), 1738 blkaddr, GFP_NOFS); 1739 if (ret) { 1740 f2fs_put_page(cpage, 0); 1741 return; 1742 } 1743 1744 set_page_private_data(cpage, ino); 1745 1746 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ)) 1747 goto out; 1748 1749 memcpy(page_address(cpage), page_address(page), PAGE_SIZE); 1750 SetPageUptodate(cpage); 1751 out: 1752 f2fs_put_page(cpage, 1); 1753 } 1754 1755 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page, 1756 block_t blkaddr) 1757 { 1758 struct page *cpage; 1759 bool hitted = false; 1760 1761 if (!test_opt(sbi, COMPRESS_CACHE)) 1762 return false; 1763 1764 cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi), 1765 blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS); 1766 if (cpage) { 1767 if (PageUptodate(cpage)) { 1768 atomic_inc(&sbi->compress_page_hit); 1769 memcpy(page_address(page), 1770 page_address(cpage), PAGE_SIZE); 1771 hitted = true; 1772 } 1773 f2fs_put_page(cpage, 1); 1774 } 1775 1776 return hitted; 1777 } 1778 1779 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino) 1780 { 1781 struct address_space *mapping = sbi->compress_inode->i_mapping; 1782 struct pagevec pvec; 1783 pgoff_t index = 0; 1784 pgoff_t end = MAX_BLKADDR(sbi); 1785 1786 if (!mapping->nrpages) 1787 return; 1788 1789 pagevec_init(&pvec); 1790 1791 do { 1792 unsigned int nr_pages; 1793 int i; 1794 1795 nr_pages = pagevec_lookup_range(&pvec, mapping, 1796 &index, end - 1); 1797 if (!nr_pages) 1798 break; 1799 1800 for (i = 0; i < nr_pages; i++) { 1801 struct page *page = pvec.pages[i]; 1802 1803 if (page->index > end) 1804 break; 1805 1806 lock_page(page); 1807 if (page->mapping != mapping) { 1808 unlock_page(page); 1809 continue; 1810 } 1811 1812 if (ino != get_page_private_data(page)) { 1813 unlock_page(page); 1814 continue; 1815 } 1816 1817 generic_error_remove_page(mapping, page); 1818 unlock_page(page); 1819 } 1820 pagevec_release(&pvec); 1821 cond_resched(); 1822 } while (index < end); 1823 } 1824 1825 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) 1826 { 1827 struct inode *inode; 1828 1829 if (!test_opt(sbi, COMPRESS_CACHE)) 1830 return 0; 1831 1832 inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi)); 1833 if (IS_ERR(inode)) 1834 return PTR_ERR(inode); 1835 sbi->compress_inode = inode; 1836 1837 sbi->compress_percent = COMPRESS_PERCENT; 1838 sbi->compress_watermark = COMPRESS_WATERMARK; 1839 1840 atomic_set(&sbi->compress_page_hit, 0); 1841 1842 return 0; 1843 } 1844 1845 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) 1846 { 1847 if (!sbi->compress_inode) 1848 return; 1849 iput(sbi->compress_inode); 1850 sbi->compress_inode = NULL; 1851 } 1852 1853 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) 1854 { 1855 dev_t dev = sbi->sb->s_bdev->bd_dev; 1856 char slab_name[32]; 1857 1858 sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev)); 1859 1860 sbi->page_array_slab_size = sizeof(struct page *) << 1861 F2FS_OPTION(sbi).compress_log_size; 1862 1863 sbi->page_array_slab = f2fs_kmem_cache_create(slab_name, 1864 sbi->page_array_slab_size); 1865 if (!sbi->page_array_slab) 1866 return -ENOMEM; 1867 return 0; 1868 } 1869 1870 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) 1871 { 1872 kmem_cache_destroy(sbi->page_array_slab); 1873 } 1874 1875 static int __init f2fs_init_cic_cache(void) 1876 { 1877 cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry", 1878 sizeof(struct compress_io_ctx)); 1879 if (!cic_entry_slab) 1880 return -ENOMEM; 1881 return 0; 1882 } 1883 1884 static void f2fs_destroy_cic_cache(void) 1885 { 1886 kmem_cache_destroy(cic_entry_slab); 1887 } 1888 1889 static int __init f2fs_init_dic_cache(void) 1890 { 1891 dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry", 1892 sizeof(struct decompress_io_ctx)); 1893 if (!dic_entry_slab) 1894 return -ENOMEM; 1895 return 0; 1896 } 1897 1898 static void f2fs_destroy_dic_cache(void) 1899 { 1900 kmem_cache_destroy(dic_entry_slab); 1901 } 1902 1903 int __init f2fs_init_compress_cache(void) 1904 { 1905 int err; 1906 1907 err = f2fs_init_cic_cache(); 1908 if (err) 1909 goto out; 1910 err = f2fs_init_dic_cache(); 1911 if (err) 1912 goto free_cic; 1913 return 0; 1914 free_cic: 1915 f2fs_destroy_cic_cache(); 1916 out: 1917 return -ENOMEM; 1918 } 1919 1920 void f2fs_destroy_compress_cache(void) 1921 { 1922 f2fs_destroy_dic_cache(); 1923 f2fs_destroy_cic_cache(); 1924 } 1925