Lines Matching +full:cluster +full:- +full:index
1 // SPDX-License-Identifier: GPL-2.0
12 #include <linux/backing-dev.h>
31 if (likely(size <= sbi->page_array_slab_size)) in page_array_alloc()
32 return f2fs_kmem_cache_alloc(sbi->page_array_slab, in page_array_alloc()
45 if (likely(size <= sbi->page_array_slab_size)) in page_array_free()
46 kmem_cache_free(sbi->page_array_slab, pages); in page_array_free()
61 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index) in offset_in_cluster() argument
63 return index & (cc->cluster_size - 1); in offset_in_cluster()
66 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index) in cluster_idx() argument
68 return index >> cc->log_cluster_size; in cluster_idx()
73 return cc->cluster_idx << cc->log_cluster_size; in start_idx_of_cluster()
85 f2fs_bug_on(F2FS_M_SB(page->mapping), in f2fs_is_compressed_page()
91 struct inode *inode, pgoff_t index, void *data) in f2fs_set_compressed_page() argument
95 /* i_crypto_info and iv index */ in f2fs_set_compressed_page()
96 page->index = index; in f2fs_set_compressed_page()
97 page->mapping = inode->i_mapping; in f2fs_set_compressed_page()
105 if (!cc->rpages[i]) in f2fs_drop_rpages()
108 unlock_page(cc->rpages[i]); in f2fs_drop_rpages()
110 put_page(cc->rpages[i]); in f2fs_drop_rpages()
116 f2fs_drop_rpages(cc, cc->cluster_size, false); in f2fs_put_rpages()
129 for (i = 0; i < cc->cluster_size; i++) { in f2fs_put_rpages_wbc()
130 if (!cc->rpages[i]) in f2fs_put_rpages_wbc()
133 redirty_page_for_writepage(wbc, cc->rpages[i]); in f2fs_put_rpages_wbc()
134 f2fs_put_page(cc->rpages[i], unlock); in f2fs_put_rpages_wbc()
140 return ((struct compress_io_ctx *)page_private(page))->rpages[0]; in f2fs_compress_control_page()
145 if (cc->rpages) in f2fs_init_compress_ctx()
148 cc->rpages = page_array_alloc(cc->inode, cc->cluster_size); in f2fs_init_compress_ctx()
149 return cc->rpages ? 0 : -ENOMEM; in f2fs_init_compress_ctx()
154 page_array_free(cc->inode, cc->rpages, cc->cluster_size); in f2fs_destroy_compress_ctx()
155 cc->rpages = NULL; in f2fs_destroy_compress_ctx()
156 cc->nr_rpages = 0; in f2fs_destroy_compress_ctx()
157 cc->nr_cpages = 0; in f2fs_destroy_compress_ctx()
158 cc->valid_nr_cpages = 0; in f2fs_destroy_compress_ctx()
160 cc->cluster_idx = NULL_CLUSTER; in f2fs_destroy_compress_ctx()
167 if (!f2fs_cluster_can_merge_page(cc, page->index)) in f2fs_compress_ctx_add_page()
168 f2fs_bug_on(F2FS_I_SB(cc->inode), 1); in f2fs_compress_ctx_add_page()
170 cluster_ofs = offset_in_cluster(cc, page->index); in f2fs_compress_ctx_add_page()
171 cc->rpages[cluster_ofs] = page; in f2fs_compress_ctx_add_page()
172 cc->nr_rpages++; in f2fs_compress_ctx_add_page()
173 cc->cluster_idx = cluster_idx(cc, page->index); in f2fs_compress_ctx_add_page()
179 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), in lzo_init_compress_ctx()
181 if (!cc->private) in lzo_init_compress_ctx()
182 return -ENOMEM; in lzo_init_compress_ctx()
184 cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size); in lzo_init_compress_ctx()
190 kvfree(cc->private); in lzo_destroy_compress_ctx()
191 cc->private = NULL; in lzo_destroy_compress_ctx()
198 ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata, in lzo_compress_pages()
199 &cc->clen, cc->private); in lzo_compress_pages()
201 f2fs_err_ratelimited(F2FS_I_SB(cc->inode), in lzo_compress_pages()
203 return -EIO; in lzo_compress_pages()
212 ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen, in lzo_decompress_pages()
213 dic->rbuf, &dic->rlen); in lzo_decompress_pages()
215 f2fs_err_ratelimited(F2FS_I_SB(dic->inode), in lzo_decompress_pages()
217 return -EIO; in lzo_decompress_pages()
220 if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) { in lzo_decompress_pages()
221 f2fs_err_ratelimited(F2FS_I_SB(dic->inode), in lzo_decompress_pages()
223 dic->rlen, PAGE_SIZE << dic->log_cluster_size); in lzo_decompress_pages()
224 return -EIO; in lzo_decompress_pages()
243 if (F2FS_I(cc->inode)->i_compress_level) in lz4_init_compress_ctx()
247 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS); in lz4_init_compress_ctx()
248 if (!cc->private) in lz4_init_compress_ctx()
249 return -ENOMEM; in lz4_init_compress_ctx()
252 * we do not change cc->clen to LZ4_compressBound(inputsize) to in lz4_init_compress_ctx()
256 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE; in lz4_init_compress_ctx()
262 kvfree(cc->private); in lz4_destroy_compress_ctx()
263 cc->private = NULL; in lz4_destroy_compress_ctx()
268 int len = -EINVAL; in lz4_compress_pages()
269 unsigned char level = F2FS_I(cc->inode)->i_compress_level; in lz4_compress_pages()
272 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen, in lz4_compress_pages()
273 cc->clen, cc->private); in lz4_compress_pages()
276 len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen, in lz4_compress_pages()
277 cc->clen, level, cc->private); in lz4_compress_pages()
282 return -EAGAIN; in lz4_compress_pages()
284 cc->clen = len; in lz4_compress_pages()
292 ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf, in lz4_decompress_pages()
293 dic->clen, dic->rlen); in lz4_decompress_pages()
295 f2fs_err_ratelimited(F2FS_I_SB(dic->inode), in lz4_decompress_pages()
297 return -EIO; in lz4_decompress_pages()
300 if (ret != PAGE_SIZE << dic->log_cluster_size) { in lz4_decompress_pages()
301 f2fs_err_ratelimited(F2FS_I_SB(dic->inode), in lz4_decompress_pages()
303 ret, PAGE_SIZE << dic->log_cluster_size); in lz4_decompress_pages()
304 return -EIO; in lz4_decompress_pages()
334 unsigned char level = F2FS_I(cc->inode)->i_compress_level; in zstd_init_compress_ctx()
340 params = zstd_get_params(level, cc->rlen); in zstd_init_compress_ctx()
343 workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode), in zstd_init_compress_ctx()
346 return -ENOMEM; in zstd_init_compress_ctx()
350 f2fs_err_ratelimited(F2FS_I_SB(cc->inode), in zstd_init_compress_ctx()
353 return -EIO; in zstd_init_compress_ctx()
356 cc->private = workspace; in zstd_init_compress_ctx()
357 cc->private2 = stream; in zstd_init_compress_ctx()
359 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE; in zstd_init_compress_ctx()
365 kvfree(cc->private); in zstd_destroy_compress_ctx()
366 cc->private = NULL; in zstd_destroy_compress_ctx()
367 cc->private2 = NULL; in zstd_destroy_compress_ctx()
372 zstd_cstream *stream = cc->private2; in zstd_compress_pages()
375 int src_size = cc->rlen; in zstd_compress_pages()
376 int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE; in zstd_compress_pages()
380 inbuf.src = cc->rbuf; in zstd_compress_pages()
384 outbuf.dst = cc->cbuf->cdata; in zstd_compress_pages()
389 f2fs_err_ratelimited(F2FS_I_SB(cc->inode), in zstd_compress_pages()
392 return -EIO; in zstd_compress_pages()
397 f2fs_err_ratelimited(F2FS_I_SB(cc->inode), in zstd_compress_pages()
400 return -EIO; in zstd_compress_pages()
408 return -EAGAIN; in zstd_compress_pages()
410 cc->clen = outbuf.pos; in zstd_compress_pages()
420 MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size); in zstd_init_decompress_ctx()
424 workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode), in zstd_init_decompress_ctx()
427 return -ENOMEM; in zstd_init_decompress_ctx()
431 f2fs_err_ratelimited(F2FS_I_SB(dic->inode), in zstd_init_decompress_ctx()
434 return -EIO; in zstd_init_decompress_ctx()
437 dic->private = workspace; in zstd_init_decompress_ctx()
438 dic->private2 = stream; in zstd_init_decompress_ctx()
445 kvfree(dic->private); in zstd_destroy_decompress_ctx()
446 dic->private = NULL; in zstd_destroy_decompress_ctx()
447 dic->private2 = NULL; in zstd_destroy_decompress_ctx()
452 zstd_dstream *stream = dic->private2; in zstd_decompress_pages()
458 inbuf.src = dic->cbuf->cdata; in zstd_decompress_pages()
459 inbuf.size = dic->clen; in zstd_decompress_pages()
462 outbuf.dst = dic->rbuf; in zstd_decompress_pages()
463 outbuf.size = dic->rlen; in zstd_decompress_pages()
467 f2fs_err_ratelimited(F2FS_I_SB(dic->inode), in zstd_decompress_pages()
470 return -EIO; in zstd_decompress_pages()
473 if (dic->rlen != outbuf.pos) { in zstd_decompress_pages()
474 f2fs_err_ratelimited(F2FS_I_SB(dic->inode), in zstd_decompress_pages()
476 __func__, dic->rlen, in zstd_decompress_pages()
477 PAGE_SIZE << dic->log_cluster_size); in zstd_decompress_pages()
478 return -EIO; in zstd_decompress_pages()
506 ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata, in lzorle_compress_pages()
507 &cc->clen, cc->private); in lzorle_compress_pages()
509 f2fs_err_ratelimited(F2FS_I_SB(cc->inode), in lzorle_compress_pages()
510 "lzo-rle compress failed, ret:%d", ret); in lzorle_compress_pages()
511 return -EIO; in lzorle_compress_pages()
552 return f2fs_cops[F2FS_I(inode)->i_compress_algorithm]; in f2fs_is_compress_backend_ready()
559 if (cops->is_level_valid) in f2fs_is_compress_level_valid()
560 return cops->is_level_valid(lvl); in f2fs_is_compress_level_valid()
574 return compress_page_pool ? 0 : -ENOMEM; in f2fs_init_compress_mempool()
597 page->mapping = NULL; in f2fs_compress_free_page()
610 buf = vm_map_ram(pages, count, -1); in f2fs_vmap()
620 struct f2fs_inode_info *fi = F2FS_I(cc->inode); in f2fs_compress_pages()
622 f2fs_cops[fi->i_compress_algorithm]; in f2fs_compress_pages()
627 trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx, in f2fs_compress_pages()
628 cc->cluster_size, fi->i_compress_algorithm); in f2fs_compress_pages()
630 if (cops->init_compress_ctx) { in f2fs_compress_pages()
631 ret = cops->init_compress_ctx(cc); in f2fs_compress_pages()
636 max_len = COMPRESS_HEADER_SIZE + cc->clen; in f2fs_compress_pages()
637 cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE); in f2fs_compress_pages()
638 cc->valid_nr_cpages = cc->nr_cpages; in f2fs_compress_pages()
640 cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages); in f2fs_compress_pages()
641 if (!cc->cpages) { in f2fs_compress_pages()
642 ret = -ENOMEM; in f2fs_compress_pages()
646 for (i = 0; i < cc->nr_cpages; i++) in f2fs_compress_pages()
647 cc->cpages[i] = f2fs_compress_alloc_page(); in f2fs_compress_pages()
649 cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size); in f2fs_compress_pages()
650 if (!cc->rbuf) { in f2fs_compress_pages()
651 ret = -ENOMEM; in f2fs_compress_pages()
655 cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages); in f2fs_compress_pages()
656 if (!cc->cbuf) { in f2fs_compress_pages()
657 ret = -ENOMEM; in f2fs_compress_pages()
661 ret = cops->compress_pages(cc); in f2fs_compress_pages()
665 max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE; in f2fs_compress_pages()
667 if (cc->clen > max_len) { in f2fs_compress_pages()
668 ret = -EAGAIN; in f2fs_compress_pages()
672 cc->cbuf->clen = cpu_to_le32(cc->clen); in f2fs_compress_pages()
674 if (fi->i_compress_flag & BIT(COMPRESS_CHKSUM)) in f2fs_compress_pages()
675 chksum = f2fs_crc32(F2FS_I_SB(cc->inode), in f2fs_compress_pages()
676 cc->cbuf->cdata, cc->clen); in f2fs_compress_pages()
677 cc->cbuf->chksum = cpu_to_le32(chksum); in f2fs_compress_pages()
680 cc->cbuf->reserved[i] = cpu_to_le32(0); in f2fs_compress_pages()
682 new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE); in f2fs_compress_pages()
685 memset(&cc->cbuf->cdata[cc->clen], 0, in f2fs_compress_pages()
686 (new_nr_cpages * PAGE_SIZE) - in f2fs_compress_pages()
687 (cc->clen + COMPRESS_HEADER_SIZE)); in f2fs_compress_pages()
689 vm_unmap_ram(cc->cbuf, cc->nr_cpages); in f2fs_compress_pages()
690 vm_unmap_ram(cc->rbuf, cc->cluster_size); in f2fs_compress_pages()
692 for (i = new_nr_cpages; i < cc->nr_cpages; i++) { in f2fs_compress_pages()
693 f2fs_compress_free_page(cc->cpages[i]); in f2fs_compress_pages()
694 cc->cpages[i] = NULL; in f2fs_compress_pages()
697 if (cops->destroy_compress_ctx) in f2fs_compress_pages()
698 cops->destroy_compress_ctx(cc); in f2fs_compress_pages()
700 cc->valid_nr_cpages = new_nr_cpages; in f2fs_compress_pages()
702 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx, in f2fs_compress_pages()
703 cc->clen, ret); in f2fs_compress_pages()
707 vm_unmap_ram(cc->cbuf, cc->nr_cpages); in f2fs_compress_pages()
709 vm_unmap_ram(cc->rbuf, cc->cluster_size); in f2fs_compress_pages()
711 for (i = 0; i < cc->nr_cpages; i++) { in f2fs_compress_pages()
712 if (cc->cpages[i]) in f2fs_compress_pages()
713 f2fs_compress_free_page(cc->cpages[i]); in f2fs_compress_pages()
715 page_array_free(cc->inode, cc->cpages, cc->nr_cpages); in f2fs_compress_pages()
716 cc->cpages = NULL; in f2fs_compress_pages()
718 if (cops->destroy_compress_ctx) in f2fs_compress_pages()
719 cops->destroy_compress_ctx(cc); in f2fs_compress_pages()
721 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx, in f2fs_compress_pages()
722 cc->clen, ret); in f2fs_compress_pages()
733 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode); in f2fs_decompress_cluster()
734 struct f2fs_inode_info *fi = F2FS_I(dic->inode); in f2fs_decompress_cluster()
736 f2fs_cops[fi->i_compress_algorithm]; in f2fs_decompress_cluster()
740 trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx, in f2fs_decompress_cluster()
741 dic->cluster_size, fi->i_compress_algorithm); in f2fs_decompress_cluster()
743 if (dic->failed) { in f2fs_decompress_cluster()
744 ret = -EIO; in f2fs_decompress_cluster()
754 dic->clen = le32_to_cpu(dic->cbuf->clen); in f2fs_decompress_cluster()
755 dic->rlen = PAGE_SIZE << dic->log_cluster_size; in f2fs_decompress_cluster()
757 if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) { in f2fs_decompress_cluster()
758 ret = -EFSCORRUPTED; in f2fs_decompress_cluster()
768 ret = cops->decompress_pages(dic); in f2fs_decompress_cluster()
770 if (!ret && (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))) { in f2fs_decompress_cluster()
771 u32 provided = le32_to_cpu(dic->cbuf->chksum); in f2fs_decompress_cluster()
772 u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen); in f2fs_decompress_cluster()
775 if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) { in f2fs_decompress_cluster()
776 set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT); in f2fs_decompress_cluster()
779 dic->inode->i_ino, in f2fs_decompress_cluster()
790 trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx, in f2fs_decompress_cluster()
791 dic->clen, ret); in f2fs_decompress_cluster()
796 * This is called when a page of a compressed cluster has been read from disk
798 * page being waited on in the cluster, and if so, it decompresses the cluster
806 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode); in f2fs_end_read_compressed_page()
811 WRITE_ONCE(dic->failed, true); in f2fs_end_read_compressed_page()
814 dic->inode->i_ino, blkaddr); in f2fs_end_read_compressed_page()
816 if (atomic_dec_and_test(&dic->remaining_pages)) in f2fs_end_read_compressed_page()
820 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index) in is_page_in_cluster() argument
822 if (cc->cluster_idx == NULL_CLUSTER) in is_page_in_cluster()
824 return cc->cluster_idx == cluster_idx(cc, index); in is_page_in_cluster()
829 return cc->nr_rpages == 0; in f2fs_cluster_is_empty()
834 return cc->cluster_size == cc->nr_rpages; in f2fs_cluster_is_full()
837 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index) in f2fs_cluster_can_merge_page() argument
841 return is_page_in_cluster(cc, index); in f2fs_cluster_can_merge_page()
845 int index, int nr_pages, bool uptodate) in f2fs_all_cluster_page_ready() argument
847 unsigned long pgidx = pages[index]->index; in f2fs_all_cluster_page_ready()
851 * when uptodate set to true, try to check all pages in cluster is in f2fs_all_cluster_page_ready()
854 if (uptodate && (pgidx % cc->cluster_size)) in f2fs_all_cluster_page_ready()
857 if (nr_pages - index < cc->cluster_size) in f2fs_all_cluster_page_ready()
860 for (; i < cc->cluster_size; i++) { in f2fs_all_cluster_page_ready()
861 if (pages[index + i]->index != pgidx + i) in f2fs_all_cluster_page_ready()
863 if (uptodate && !PageUptodate(pages[index + i])) in f2fs_all_cluster_page_ready()
872 loff_t i_size = i_size_read(cc->inode); in cluster_has_invalid_data()
876 for (i = 0; i < cc->cluster_size; i++) { in cluster_has_invalid_data()
877 struct page *page = cc->rpages[i]; in cluster_has_invalid_data()
879 f2fs_bug_on(F2FS_I_SB(cc->inode), !page); in cluster_has_invalid_data()
882 if (page->index >= nr_pages) in cluster_has_invalid_data()
891 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in f2fs_sanity_check_cluster()
892 unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size; in f2fs_sanity_check_cluster()
898 if (dn->data_blkaddr != COMPRESS_ADDR) in f2fs_sanity_check_cluster()
902 if (dn->ofs_in_node % cluster_size) { in f2fs_sanity_check_cluster()
908 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page, in f2fs_sanity_check_cluster()
909 dn->ofs_in_node + i); in f2fs_sanity_check_cluster()
928 f2fs_bug_on(F2FS_I_SB(dn->inode), count != cluster_size && in f2fs_sanity_check_cluster()
929 !is_inode_flag_set(dn->inode, FI_COMPRESS_RELEASED)); in f2fs_sanity_check_cluster()
933 f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s", in f2fs_sanity_check_cluster()
934 dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason); in f2fs_sanity_check_cluster()
945 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size; in __f2fs_get_cluster_blocks()
949 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page, in __f2fs_get_cluster_blocks()
950 dn->ofs_in_node + i); in __f2fs_get_cluster_blocks()
964 F2FS_I(inode)->i_log_cluster_size; in __f2fs_cluster_blocks()
970 if (ret == -ENOENT) in __f2fs_cluster_blocks()
976 ret = -EFSCORRUPTED; in __f2fs_cluster_blocks()
993 /* return # of compressed blocks in compressed cluster */
996 return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, in f2fs_compressed_blocks()
1000 /* return # of raw blocks in non-compressed cluster */
1008 /* return whether cluster is compressed one or not */
1009 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index) in f2fs_is_compressed_cluster() argument
1012 index >> F2FS_I(inode)->i_log_cluster_size, in f2fs_is_compressed_cluster()
1016 /* return whether cluster contains non raw blocks or not */
1017 bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index) in f2fs_is_sparse_cluster() argument
1019 unsigned int cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size; in f2fs_is_sparse_cluster()
1022 F2FS_I(inode)->i_cluster_size; in f2fs_is_sparse_cluster()
1027 if (!f2fs_need_compress_data(cc->inode)) in cluster_may_compress()
1029 if (f2fs_is_atomic_file(cc->inode)) in cluster_may_compress()
1033 if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode)))) in cluster_may_compress()
1042 for (i = 0; i < cc->cluster_size; i++) { in set_cluster_writeback()
1043 if (cc->rpages[i]) in set_cluster_writeback()
1044 set_page_writeback(cc->rpages[i]); in set_cluster_writeback()
1052 for (i = 0; i < cc->cluster_size; i++) in set_cluster_dirty()
1053 if (cc->rpages[i]) { in set_cluster_dirty()
1054 set_page_dirty(cc->rpages[i]); in set_cluster_dirty()
1055 set_page_private_gcing(cc->rpages[i]); in set_cluster_dirty()
1060 struct page **pagep, pgoff_t index, void **fsdata) in prepare_compress_overwrite() argument
1062 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode); in prepare_compress_overwrite()
1063 struct address_space *mapping = cc->inode->i_mapping; in prepare_compress_overwrite()
1071 ret = f2fs_is_compressed_cluster(cc->inode, start_idx); in prepare_compress_overwrite()
1080 for (i = 0; i < cc->cluster_size; i++) { in prepare_compress_overwrite()
1084 ret = -ENOMEM; in prepare_compress_overwrite()
1097 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size, in prepare_compress_overwrite()
1111 for (i = 0; i < cc->cluster_size; i++) { in prepare_compress_overwrite()
1112 f2fs_bug_on(sbi, cc->rpages[i]); in prepare_compress_overwrite()
1133 *fsdata = cc->rpages; in prepare_compress_overwrite()
1134 *pagep = cc->rpages[offset_in_cluster(cc, index)]; in prepare_compress_overwrite()
1135 return cc->cluster_size; in prepare_compress_overwrite()
1147 struct page **pagep, pgoff_t index, void **fsdata) in f2fs_prepare_compress_overwrite() argument
1151 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size, in f2fs_prepare_compress_overwrite()
1152 .cluster_size = F2FS_I(inode)->i_cluster_size, in f2fs_prepare_compress_overwrite()
1153 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size, in f2fs_prepare_compress_overwrite()
1158 return prepare_compress_overwrite(&cc, pagep, index, fsdata); in f2fs_prepare_compress_overwrite()
1162 pgoff_t index, unsigned copied) in f2fs_compress_write_end() argument
1167 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size, in f2fs_compress_write_end()
1168 .cluster_size = F2FS_I(inode)->i_cluster_size, in f2fs_compress_write_end()
1171 bool first_index = (index == cc.rpages[0]->index); in f2fs_compress_write_end()
1186 int log_cluster_size = F2FS_I(inode)->i_log_cluster_size; in f2fs_truncate_partial_cluster()
1195 /* truncate normal cluster */ in f2fs_truncate_partial_cluster()
1199 /* truncate compressed cluster */ in f2fs_truncate_partial_cluster()
1203 /* should not be a normal cluster */ in f2fs_truncate_partial_cluster()
1211 int cluster_size = F2FS_I(inode)->i_cluster_size; in f2fs_truncate_partial_cluster()
1214 for (i = cluster_size - 1; i >= 0; i--) { in f2fs_truncate_partial_cluster()
1215 loff_t start = rpages[i]->index << PAGE_SHIFT; in f2fs_truncate_partial_cluster()
1220 zero_user_segment(rpages[i], from - start, in f2fs_truncate_partial_cluster()
1236 struct inode *inode = cc->inode; in f2fs_write_compressed_pages()
1241 .ino = cc->inode->i_ino, in f2fs_write_compressed_pages()
1252 .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ? in f2fs_write_compressed_pages()
1259 unsigned int last_index = cc->cluster_size - 1; in f2fs_write_compressed_pages()
1266 mapping_set_error(cc->rpages[0]->mapping, -EIO); in f2fs_write_compressed_pages()
1276 f2fs_down_read(&sbi->node_write); in f2fs_write_compressed_pages()
1281 set_new_dnode(&dn, cc->inode, NULL, NULL, 0); in f2fs_write_compressed_pages()
1287 for (i = 0; i < cc->cluster_size; i++) { in f2fs_write_compressed_pages()
1293 psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT; in f2fs_write_compressed_pages()
1305 cic->magic = F2FS_COMPRESSED_PAGE_MAGIC; in f2fs_write_compressed_pages()
1306 cic->inode = inode; in f2fs_write_compressed_pages()
1307 atomic_set(&cic->pending_pages, cc->valid_nr_cpages); in f2fs_write_compressed_pages()
1308 cic->rpages = page_array_alloc(cc->inode, cc->cluster_size); in f2fs_write_compressed_pages()
1309 if (!cic->rpages) in f2fs_write_compressed_pages()
1312 cic->nr_rpages = cc->cluster_size; in f2fs_write_compressed_pages()
1314 for (i = 0; i < cc->valid_nr_cpages; i++) { in f2fs_write_compressed_pages()
1315 f2fs_set_compressed_page(cc->cpages[i], inode, in f2fs_write_compressed_pages()
1316 cc->rpages[i + 1]->index, cic); in f2fs_write_compressed_pages()
1317 fio.compressed_page = cc->cpages[i]; in f2fs_write_compressed_pages()
1326 fio.page = cc->rpages[i + 1]; in f2fs_write_compressed_pages()
1330 cc->cpages[i] = fio.encrypted_page; in f2fs_write_compressed_pages()
1336 for (i = 0; i < cc->cluster_size; i++) in f2fs_write_compressed_pages()
1337 cic->rpages[i] = cc->rpages[i]; in f2fs_write_compressed_pages()
1339 for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) { in f2fs_write_compressed_pages()
1343 fio.page = cc->rpages[i]; in f2fs_write_compressed_pages()
1346 /* cluster header */ in f2fs_write_compressed_pages()
1359 if (i > cc->valid_nr_cpages) { in f2fs_write_compressed_pages()
1370 fio.encrypted_page = cc->cpages[i - 1]; in f2fs_write_compressed_pages()
1372 fio.compressed_page = cc->cpages[i - 1]; in f2fs_write_compressed_pages()
1374 cc->cpages[i - 1] = NULL; in f2fs_write_compressed_pages()
1378 inode_dec_dirty_pages(cc->inode); in f2fs_write_compressed_pages()
1383 f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false); in f2fs_write_compressed_pages()
1384 f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true); in f2fs_write_compressed_pages()
1385 add_compr_block_stat(inode, cc->valid_nr_cpages); in f2fs_write_compressed_pages()
1387 set_inode_flag(cc->inode, FI_APPEND_WRITE); in f2fs_write_compressed_pages()
1391 f2fs_up_read(&sbi->node_write); in f2fs_write_compressed_pages()
1395 spin_lock(&fi->i_size_lock); in f2fs_write_compressed_pages()
1396 if (fi->last_disk_size < psize) in f2fs_write_compressed_pages()
1397 fi->last_disk_size = psize; in f2fs_write_compressed_pages()
1398 spin_unlock(&fi->i_size_lock); in f2fs_write_compressed_pages()
1401 page_array_free(cc->inode, cc->cpages, cc->nr_cpages); in f2fs_write_compressed_pages()
1402 cc->cpages = NULL; in f2fs_write_compressed_pages()
1407 page_array_free(cc->inode, cic->rpages, cc->cluster_size); in f2fs_write_compressed_pages()
1409 for (--i; i >= 0; i--) in f2fs_write_compressed_pages()
1410 fscrypt_finalize_bounce_page(&cc->cpages[i]); in f2fs_write_compressed_pages()
1417 f2fs_up_read(&sbi->node_write); in f2fs_write_compressed_pages()
1421 for (i = 0; i < cc->valid_nr_cpages; i++) { in f2fs_write_compressed_pages()
1422 f2fs_compress_free_page(cc->cpages[i]); in f2fs_write_compressed_pages()
1423 cc->cpages[i] = NULL; in f2fs_write_compressed_pages()
1425 page_array_free(cc->inode, cc->cpages, cc->nr_cpages); in f2fs_write_compressed_pages()
1426 cc->cpages = NULL; in f2fs_write_compressed_pages()
1427 return -EAGAIN; in f2fs_write_compressed_pages()
1432 struct f2fs_sb_info *sbi = bio->bi_private; in f2fs_compress_write_end_io()
1439 if (unlikely(bio->bi_status)) in f2fs_compress_write_end_io()
1440 mapping_set_error(cic->inode->i_mapping, -EIO); in f2fs_compress_write_end_io()
1446 if (atomic_dec_return(&cic->pending_pages)) in f2fs_compress_write_end_io()
1449 for (i = 0; i < cic->nr_rpages; i++) { in f2fs_compress_write_end_io()
1450 WARN_ON(!cic->rpages[i]); in f2fs_compress_write_end_io()
1451 clear_page_private_gcing(cic->rpages[i]); in f2fs_compress_write_end_io()
1452 end_page_writeback(cic->rpages[i]); in f2fs_compress_write_end_io()
1455 page_array_free(cic->inode, cic->rpages, cic->nr_rpages); in f2fs_compress_write_end_io()
1464 struct address_space *mapping = cc->inode->i_mapping; in f2fs_write_raw_pages()
1471 for (i = 0; i < cc->cluster_size; i++) { in f2fs_write_raw_pages()
1472 if (!cc->rpages[i]) in f2fs_write_raw_pages()
1475 redirty_page_for_writepage(wbc, cc->rpages[i]); in f2fs_write_raw_pages()
1476 unlock_page(cc->rpages[i]); in f2fs_write_raw_pages()
1482 /* overwrite compressed cluster w/ normal cluster */ in f2fs_write_raw_pages()
1486 for (i = 0; i < cc->cluster_size; i++) { in f2fs_write_raw_pages()
1487 if (!cc->rpages[i]) in f2fs_write_raw_pages()
1490 lock_page(cc->rpages[i]); in f2fs_write_raw_pages()
1492 if (cc->rpages[i]->mapping != mapping) { in f2fs_write_raw_pages()
1494 unlock_page(cc->rpages[i]); in f2fs_write_raw_pages()
1498 if (!PageDirty(cc->rpages[i])) in f2fs_write_raw_pages()
1501 if (PageWriteback(cc->rpages[i])) { in f2fs_write_raw_pages()
1502 if (wbc->sync_mode == WB_SYNC_NONE) in f2fs_write_raw_pages()
1504 f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true); in f2fs_write_raw_pages()
1507 if (!clear_page_dirty_for_io(cc->rpages[i])) in f2fs_write_raw_pages()
1510 ret = f2fs_write_single_data_page(cc->rpages[i], &submitted, in f2fs_write_raw_pages()
1515 unlock_page(cc->rpages[i]); in f2fs_write_raw_pages()
1517 } else if (ret == -EAGAIN) { in f2fs_write_raw_pages()
1521 * avoid deadlock caused by cluster update race in f2fs_write_raw_pages()
1524 if (IS_NOQUOTA(cc->inode)) in f2fs_write_raw_pages()
1553 if (err == -EAGAIN) { in f2fs_write_multi_pages()
1554 add_compr_block_stat(cc->inode, cc->cluster_size); in f2fs_write_multi_pages()
1565 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN); in f2fs_write_multi_pages()
1568 f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted); in f2fs_write_multi_pages()
1587 f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm]; in f2fs_prepare_decomp_mem()
1590 if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc)) in f2fs_prepare_decomp_mem()
1593 dic->tpages = page_array_alloc(dic->inode, dic->cluster_size); in f2fs_prepare_decomp_mem()
1594 if (!dic->tpages) in f2fs_prepare_decomp_mem()
1595 return -ENOMEM; in f2fs_prepare_decomp_mem()
1597 for (i = 0; i < dic->cluster_size; i++) { in f2fs_prepare_decomp_mem()
1598 if (dic->rpages[i]) { in f2fs_prepare_decomp_mem()
1599 dic->tpages[i] = dic->rpages[i]; in f2fs_prepare_decomp_mem()
1603 dic->tpages[i] = f2fs_compress_alloc_page(); in f2fs_prepare_decomp_mem()
1606 dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size); in f2fs_prepare_decomp_mem()
1607 if (!dic->rbuf) in f2fs_prepare_decomp_mem()
1608 return -ENOMEM; in f2fs_prepare_decomp_mem()
1610 dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages); in f2fs_prepare_decomp_mem()
1611 if (!dic->cbuf) in f2fs_prepare_decomp_mem()
1612 return -ENOMEM; in f2fs_prepare_decomp_mem()
1614 if (cops->init_decompress_ctx) in f2fs_prepare_decomp_mem()
1615 return cops->init_decompress_ctx(dic); in f2fs_prepare_decomp_mem()
1624 f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm]; in f2fs_release_decomp_mem()
1626 if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc)) in f2fs_release_decomp_mem()
1629 if (!bypass_destroy_callback && cops->destroy_decompress_ctx) in f2fs_release_decomp_mem()
1630 cops->destroy_decompress_ctx(dic); in f2fs_release_decomp_mem()
1632 if (dic->cbuf) in f2fs_release_decomp_mem()
1633 vm_unmap_ram(dic->cbuf, dic->nr_cpages); in f2fs_release_decomp_mem()
1635 if (dic->rbuf) in f2fs_release_decomp_mem()
1636 vm_unmap_ram(dic->rbuf, dic->cluster_size); in f2fs_release_decomp_mem()
1646 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode); in f2fs_alloc_dic()
1651 return ERR_PTR(-ENOMEM); in f2fs_alloc_dic()
1653 dic->rpages = page_array_alloc(cc->inode, cc->cluster_size); in f2fs_alloc_dic()
1654 if (!dic->rpages) { in f2fs_alloc_dic()
1656 return ERR_PTR(-ENOMEM); in f2fs_alloc_dic()
1659 dic->magic = F2FS_COMPRESSED_PAGE_MAGIC; in f2fs_alloc_dic()
1660 dic->inode = cc->inode; in f2fs_alloc_dic()
1661 atomic_set(&dic->remaining_pages, cc->nr_cpages); in f2fs_alloc_dic()
1662 dic->cluster_idx = cc->cluster_idx; in f2fs_alloc_dic()
1663 dic->cluster_size = cc->cluster_size; in f2fs_alloc_dic()
1664 dic->log_cluster_size = cc->log_cluster_size; in f2fs_alloc_dic()
1665 dic->nr_cpages = cc->nr_cpages; in f2fs_alloc_dic()
1666 refcount_set(&dic->refcnt, 1); in f2fs_alloc_dic()
1667 dic->failed = false; in f2fs_alloc_dic()
1668 dic->need_verity = f2fs_need_verity(cc->inode, start_idx); in f2fs_alloc_dic()
1670 for (i = 0; i < dic->cluster_size; i++) in f2fs_alloc_dic()
1671 dic->rpages[i] = cc->rpages[i]; in f2fs_alloc_dic()
1672 dic->nr_rpages = cc->cluster_size; in f2fs_alloc_dic()
1674 dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages); in f2fs_alloc_dic()
1675 if (!dic->cpages) { in f2fs_alloc_dic()
1676 ret = -ENOMEM; in f2fs_alloc_dic()
1680 for (i = 0; i < dic->nr_cpages; i++) { in f2fs_alloc_dic()
1684 f2fs_set_compressed_page(page, cc->inode, in f2fs_alloc_dic()
1686 dic->cpages[i] = page; in f2fs_alloc_dic()
1707 if (dic->tpages) { in f2fs_free_dic()
1708 for (i = 0; i < dic->cluster_size; i++) { in f2fs_free_dic()
1709 if (dic->rpages[i]) in f2fs_free_dic()
1711 if (!dic->tpages[i]) in f2fs_free_dic()
1713 f2fs_compress_free_page(dic->tpages[i]); in f2fs_free_dic()
1715 page_array_free(dic->inode, dic->tpages, dic->cluster_size); in f2fs_free_dic()
1718 if (dic->cpages) { in f2fs_free_dic()
1719 for (i = 0; i < dic->nr_cpages; i++) { in f2fs_free_dic()
1720 if (!dic->cpages[i]) in f2fs_free_dic()
1722 f2fs_compress_free_page(dic->cpages[i]); in f2fs_free_dic()
1724 page_array_free(dic->inode, dic->cpages, dic->nr_cpages); in f2fs_free_dic()
1727 page_array_free(dic->inode, dic->rpages, dic->nr_rpages); in f2fs_free_dic()
1741 if (refcount_dec_and_test(&dic->refcnt)) { in f2fs_put_dic()
1745 INIT_WORK(&dic->free_work, f2fs_late_free_dic); in f2fs_put_dic()
1746 queue_work(F2FS_I_SB(dic->inode)->post_read_wq, in f2fs_put_dic()
1747 &dic->free_work); in f2fs_put_dic()
1759 for (i = 0; i < dic->cluster_size; i++) { in f2fs_verify_cluster()
1760 struct page *rpage = dic->rpages[i]; in f2fs_verify_cluster()
1776 * This is called when a compressed cluster has been decompressed
1784 if (!failed && dic->need_verity) { in f2fs_decompress_end_io()
1791 INIT_WORK(&dic->verity_work, f2fs_verify_cluster); in f2fs_decompress_end_io()
1792 fsverity_enqueue_verify_work(&dic->verity_work); in f2fs_decompress_end_io()
1796 /* Update and unlock the cluster's pagecache pages. */ in f2fs_decompress_end_io()
1797 for (i = 0; i < dic->cluster_size; i++) { in f2fs_decompress_end_io()
1798 struct page *rpage = dic->rpages[i]; in f2fs_decompress_end_io()
1831 * check whether cluster blocks are contiguous, and add extent cache entry
1832 * only if cluster blocks are logically and physically contiguous.
1837 bool compressed = data_blkaddr(dn->inode, dn->node_page, in f2fs_cluster_blocks_are_contiguous()
1840 block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page, in f2fs_cluster_blocks_are_contiguous()
1843 for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) { in f2fs_cluster_blocks_are_contiguous()
1844 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page, in f2fs_cluster_blocks_are_contiguous()
1849 if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr) in f2fs_cluster_blocks_are_contiguous()
1853 return compressed ? i - 1 : i; in f2fs_cluster_blocks_are_contiguous()
1864 return sbi->compress_inode->i_mapping; in COMPRESS_MAPPING()
1869 if (!sbi->compress_inode) in f2fs_invalidate_compress_page()
1930 atomic_inc(&sbi->compress_page_hit); in f2fs_load_compressed_page()
1945 pgoff_t index = 0; in f2fs_invalidate_compress_pages() local
1948 if (!mapping->nrpages) in f2fs_invalidate_compress_pages()
1956 nr = filemap_get_folios(mapping, &index, end - 1, &fbatch); in f2fs_invalidate_compress_pages()
1964 if (folio->mapping != mapping) { in f2fs_invalidate_compress_pages()
1969 if (ino != get_page_private_data(&folio->page)) { in f2fs_invalidate_compress_pages()
1974 generic_error_remove_page(mapping, &folio->page); in f2fs_invalidate_compress_pages()
1979 } while (index < end); in f2fs_invalidate_compress_pages()
1989 inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi)); in f2fs_init_compress_inode()
1992 sbi->compress_inode = inode; in f2fs_init_compress_inode()
1994 sbi->compress_percent = COMPRESS_PERCENT; in f2fs_init_compress_inode()
1995 sbi->compress_watermark = COMPRESS_WATERMARK; in f2fs_init_compress_inode()
1997 atomic_set(&sbi->compress_page_hit, 0); in f2fs_init_compress_inode()
2004 if (!sbi->compress_inode) in f2fs_destroy_compress_inode()
2006 iput(sbi->compress_inode); in f2fs_destroy_compress_inode()
2007 sbi->compress_inode = NULL; in f2fs_destroy_compress_inode()
2012 dev_t dev = sbi->sb->s_bdev->bd_dev; in f2fs_init_page_array_cache()
2018 sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev)); in f2fs_init_page_array_cache()
2020 sbi->page_array_slab_size = sizeof(struct page *) << in f2fs_init_page_array_cache()
2023 sbi->page_array_slab = f2fs_kmem_cache_create(slab_name, in f2fs_init_page_array_cache()
2024 sbi->page_array_slab_size); in f2fs_init_page_array_cache()
2025 return sbi->page_array_slab ? 0 : -ENOMEM; in f2fs_init_page_array_cache()
2030 kmem_cache_destroy(sbi->page_array_slab); in f2fs_destroy_page_array_cache()
2038 return -ENOMEM; in f2fs_init_compress_cache()
2046 return -ENOMEM; in f2fs_init_compress_cache()