147e4937aSGao Xiang // SPDX-License-Identifier: GPL-2.0-only 247e4937aSGao Xiang /* 347e4937aSGao Xiang * Copyright (C) 2019 HUAWEI, Inc. 4592e7cd0SAlexander A. Klimov * https://www.huawei.com/ 547e4937aSGao Xiang */ 647e4937aSGao Xiang #include "compress.h" 747e4937aSGao Xiang #include <linux/module.h> 847e4937aSGao Xiang #include <linux/lz4.h> 947e4937aSGao Xiang 1047e4937aSGao Xiang #ifndef LZ4_DISTANCE_MAX /* history window size */ 1147e4937aSGao Xiang #define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ 1247e4937aSGao Xiang #endif 1347e4937aSGao Xiang 1447e4937aSGao Xiang #define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1) 1547e4937aSGao Xiang #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN 1647e4937aSGao Xiang #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32) 1747e4937aSGao Xiang #endif 1847e4937aSGao Xiang 1947e4937aSGao Xiang struct z_erofs_decompressor { 20*966edfb0SGao Xiang int (*decompress)(struct z_erofs_decompress_req *rq, 2147e4937aSGao Xiang struct list_head *pagepool); 2247e4937aSGao Xiang char *name; 2347e4937aSGao Xiang }; 2447e4937aSGao Xiang 255d50538fSHuang Jianan int z_erofs_load_lz4_config(struct super_block *sb, 2646249cdeSGao Xiang struct erofs_super_block *dsb, 2746249cdeSGao Xiang struct z_erofs_lz4_cfgs *lz4, int size) 285d50538fSHuang Jianan { 294fea63f7SGao Xiang struct erofs_sb_info *sbi = EROFS_SB(sb); 3046249cdeSGao Xiang u16 distance; 3146249cdeSGao Xiang 3246249cdeSGao Xiang if (lz4) { 3346249cdeSGao Xiang if (size < sizeof(struct z_erofs_lz4_cfgs)) { 3446249cdeSGao Xiang erofs_err(sb, "invalid lz4 cfgs, size=%u", size); 3546249cdeSGao Xiang return -EINVAL; 3646249cdeSGao Xiang } 3746249cdeSGao Xiang distance = le16_to_cpu(lz4->max_distance); 384fea63f7SGao Xiang 394fea63f7SGao Xiang sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks); 404fea63f7SGao Xiang if (!sbi->lz4.max_pclusterblks) { 414fea63f7SGao Xiang sbi->lz4.max_pclusterblks = 1; /* reserved case */ 424fea63f7SGao Xiang } else if (sbi->lz4.max_pclusterblks > 434fea63f7SGao Xiang Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) { 444fea63f7SGao Xiang erofs_err(sb, "too large lz4 pclusterblks %u", 454fea63f7SGao Xiang sbi->lz4.max_pclusterblks); 464fea63f7SGao Xiang return -EINVAL; 474fea63f7SGao Xiang } else if (sbi->lz4.max_pclusterblks >= 2) { 484fea63f7SGao Xiang erofs_info(sb, "EXPERIMENTAL big pcluster feature in use. Use at your own risk!"); 494fea63f7SGao Xiang } 5046249cdeSGao Xiang } else { 5114373711SGao Xiang distance = le16_to_cpu(dsb->u1.lz4_max_distance); 524fea63f7SGao Xiang sbi->lz4.max_pclusterblks = 1; 5346249cdeSGao Xiang } 545d50538fSHuang Jianan 554fea63f7SGao Xiang sbi->lz4.max_distance_pages = distance ? 565d50538fSHuang Jianan DIV_ROUND_UP(distance, PAGE_SIZE) + 1 : 575d50538fSHuang Jianan LZ4_MAX_DISTANCE_PAGES; 584fea63f7SGao Xiang return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks); 595d50538fSHuang Jianan } 605d50538fSHuang Jianan 61*966edfb0SGao Xiang /* 62*966edfb0SGao Xiang * Fill all gaps with bounce pages if it's a sparse page list. Also check if 63*966edfb0SGao Xiang * all physical pages are consecutive, which can be seen for moderate CR. 64*966edfb0SGao Xiang */ 65*966edfb0SGao Xiang static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq, 6647e4937aSGao Xiang struct list_head *pagepool) 6747e4937aSGao Xiang { 6847e4937aSGao Xiang const unsigned int nr = 6947e4937aSGao Xiang PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; 7047e4937aSGao Xiang struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL }; 7147e4937aSGao Xiang unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES, 7247e4937aSGao Xiang BITS_PER_LONG)] = { 0 }; 735d50538fSHuang Jianan unsigned int lz4_max_distance_pages = 745d50538fSHuang Jianan EROFS_SB(rq->sb)->lz4.max_distance_pages; 7547e4937aSGao Xiang void *kaddr = NULL; 7647e4937aSGao Xiang unsigned int i, j, top; 7747e4937aSGao Xiang 7847e4937aSGao Xiang top = 0; 7947e4937aSGao Xiang for (i = j = 0; i < nr; ++i, ++j) { 8047e4937aSGao Xiang struct page *const page = rq->out[i]; 8147e4937aSGao Xiang struct page *victim; 8247e4937aSGao Xiang 835d50538fSHuang Jianan if (j >= lz4_max_distance_pages) 8447e4937aSGao Xiang j = 0; 8547e4937aSGao Xiang 8647e4937aSGao Xiang /* 'valid' bounced can only be tested after a complete round */ 8747e4937aSGao Xiang if (test_bit(j, bounced)) { 885d50538fSHuang Jianan DBG_BUGON(i < lz4_max_distance_pages); 895d50538fSHuang Jianan DBG_BUGON(top >= lz4_max_distance_pages); 905d50538fSHuang Jianan availables[top++] = rq->out[i - lz4_max_distance_pages]; 9147e4937aSGao Xiang } 9247e4937aSGao Xiang 9347e4937aSGao Xiang if (page) { 9447e4937aSGao Xiang __clear_bit(j, bounced); 9547e4937aSGao Xiang if (kaddr) { 9647e4937aSGao Xiang if (kaddr + PAGE_SIZE == page_address(page)) 9747e4937aSGao Xiang kaddr += PAGE_SIZE; 9847e4937aSGao Xiang else 9947e4937aSGao Xiang kaddr = NULL; 10047e4937aSGao Xiang } else if (!i) { 10147e4937aSGao Xiang kaddr = page_address(page); 10247e4937aSGao Xiang } 10347e4937aSGao Xiang continue; 10447e4937aSGao Xiang } 10547e4937aSGao Xiang kaddr = NULL; 10647e4937aSGao Xiang __set_bit(j, bounced); 10747e4937aSGao Xiang 10847e4937aSGao Xiang if (top) { 10947e4937aSGao Xiang victim = availables[--top]; 11047e4937aSGao Xiang get_page(victim); 11147e4937aSGao Xiang } else { 112b4892fa3SHuang Jianan victim = erofs_allocpage(pagepool, 113b4892fa3SHuang Jianan GFP_KERNEL | __GFP_NOFAIL); 1146aaa7b06SGao Xiang set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE); 11547e4937aSGao Xiang } 11647e4937aSGao Xiang rq->out[i] = victim; 11747e4937aSGao Xiang } 11847e4937aSGao Xiang return kaddr ? 1 : 0; 11947e4937aSGao Xiang } 12047e4937aSGao Xiang 121*966edfb0SGao Xiang static void *z_erofs_lz4_handle_inplace_io(struct z_erofs_decompress_req *rq, 122598162d0SGao Xiang void *inpage, unsigned int *inputmargin, int *maptype, 123598162d0SGao Xiang bool support_0padding) 12447e4937aSGao Xiang { 125598162d0SGao Xiang unsigned int nrpages_in, nrpages_out; 126598162d0SGao Xiang unsigned int ofull, oend, inputsize, total, i, j; 127598162d0SGao Xiang struct page **in; 128598162d0SGao Xiang void *src, *tmp; 12947e4937aSGao Xiang 130598162d0SGao Xiang inputsize = rq->inputsize; 131598162d0SGao Xiang nrpages_in = PAGE_ALIGN(inputsize) >> PAGE_SHIFT; 132598162d0SGao Xiang oend = rq->pageofs_out + rq->outputsize; 133598162d0SGao Xiang ofull = PAGE_ALIGN(oend); 134598162d0SGao Xiang nrpages_out = ofull >> PAGE_SHIFT; 135598162d0SGao Xiang 136598162d0SGao Xiang if (rq->inplace_io) { 137598162d0SGao Xiang if (rq->partial_decoding || !support_0padding || 138598162d0SGao Xiang ofull - oend < LZ4_DECOMPRESS_INPLACE_MARGIN(inputsize)) 139598162d0SGao Xiang goto docopy; 140598162d0SGao Xiang 141598162d0SGao Xiang for (i = 0; i < nrpages_in; ++i) { 142598162d0SGao Xiang DBG_BUGON(rq->in[i] == NULL); 143598162d0SGao Xiang for (j = 0; j < nrpages_out - nrpages_in + i; ++j) 144598162d0SGao Xiang if (rq->out[j] == rq->in[i]) 145598162d0SGao Xiang goto docopy; 14647e4937aSGao Xiang } 147598162d0SGao Xiang } 148598162d0SGao Xiang 149598162d0SGao Xiang if (nrpages_in <= 1) { 150598162d0SGao Xiang *maptype = 0; 151598162d0SGao Xiang return inpage; 152598162d0SGao Xiang } 153598162d0SGao Xiang kunmap_atomic(inpage); 154598162d0SGao Xiang might_sleep(); 155598162d0SGao Xiang src = erofs_vm_map_ram(rq->in, nrpages_in); 156598162d0SGao Xiang if (!src) 157598162d0SGao Xiang return ERR_PTR(-ENOMEM); 158598162d0SGao Xiang *maptype = 1; 159598162d0SGao Xiang return src; 160598162d0SGao Xiang 161598162d0SGao Xiang docopy: 162598162d0SGao Xiang /* Or copy compressed data which can be overlapped to per-CPU buffer */ 163598162d0SGao Xiang in = rq->in; 164598162d0SGao Xiang src = erofs_get_pcpubuf(nrpages_in); 165598162d0SGao Xiang if (!src) { 166598162d0SGao Xiang DBG_BUGON(1); 167598162d0SGao Xiang kunmap_atomic(inpage); 168598162d0SGao Xiang return ERR_PTR(-EFAULT); 169598162d0SGao Xiang } 170598162d0SGao Xiang 171598162d0SGao Xiang tmp = src; 172598162d0SGao Xiang total = rq->inputsize; 173598162d0SGao Xiang while (total) { 174598162d0SGao Xiang unsigned int page_copycnt = 175598162d0SGao Xiang min_t(unsigned int, total, PAGE_SIZE - *inputmargin); 176598162d0SGao Xiang 177598162d0SGao Xiang if (!inpage) 178598162d0SGao Xiang inpage = kmap_atomic(*in); 179598162d0SGao Xiang memcpy(tmp, inpage + *inputmargin, page_copycnt); 180598162d0SGao Xiang kunmap_atomic(inpage); 181598162d0SGao Xiang inpage = NULL; 182598162d0SGao Xiang tmp += page_copycnt; 183598162d0SGao Xiang total -= page_copycnt; 184598162d0SGao Xiang ++in; 185598162d0SGao Xiang *inputmargin = 0; 186598162d0SGao Xiang } 187598162d0SGao Xiang *maptype = 2; 188598162d0SGao Xiang return src; 18947e4937aSGao Xiang } 19047e4937aSGao Xiang 191*966edfb0SGao Xiang static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq, 192*966edfb0SGao Xiang u8 *out) 19347e4937aSGao Xiang { 194598162d0SGao Xiang unsigned int inputmargin; 195598162d0SGao Xiang u8 *headpage, *src; 196598162d0SGao Xiang bool support_0padding; 197598162d0SGao Xiang int ret, maptype; 19847e4937aSGao Xiang 199598162d0SGao Xiang DBG_BUGON(*rq->in == NULL); 200598162d0SGao Xiang headpage = kmap_atomic(*rq->in); 20147e4937aSGao Xiang inputmargin = 0; 20247e4937aSGao Xiang support_0padding = false; 20347e4937aSGao Xiang 20447e4937aSGao Xiang /* decompression inplace is only safe when 0padding is enabled */ 205de06a6a3SGao Xiang if (erofs_sb_has_lz4_0padding(EROFS_SB(rq->sb))) { 20647e4937aSGao Xiang support_0padding = true; 20747e4937aSGao Xiang 208598162d0SGao Xiang while (!headpage[inputmargin & ~PAGE_MASK]) 20947e4937aSGao Xiang if (!(++inputmargin & ~PAGE_MASK)) 21047e4937aSGao Xiang break; 21147e4937aSGao Xiang 21247e4937aSGao Xiang if (inputmargin >= rq->inputsize) { 213598162d0SGao Xiang kunmap_atomic(headpage); 21447e4937aSGao Xiang return -EIO; 21547e4937aSGao Xiang } 21647e4937aSGao Xiang } 21747e4937aSGao Xiang 218598162d0SGao Xiang rq->inputsize -= inputmargin; 219*966edfb0SGao Xiang src = z_erofs_lz4_handle_inplace_io(rq, headpage, &inputmargin, 220*966edfb0SGao Xiang &maptype, support_0padding); 221598162d0SGao Xiang if (IS_ERR(src)) 222598162d0SGao Xiang return PTR_ERR(src); 22347e4937aSGao Xiang 224af1038abSGao Xiang /* legacy format could compress extra data in a pcluster. */ 225af1038abSGao Xiang if (rq->partial_decoding || !support_0padding) 22647e4937aSGao Xiang ret = LZ4_decompress_safe_partial(src + inputmargin, out, 227598162d0SGao Xiang rq->inputsize, rq->outputsize, rq->outputsize); 228af1038abSGao Xiang else 229af1038abSGao Xiang ret = LZ4_decompress_safe(src + inputmargin, out, 230598162d0SGao Xiang rq->inputsize, rq->outputsize); 231af1038abSGao Xiang 232aa99a76bSGao Xiang if (ret != rq->outputsize) { 233aa99a76bSGao Xiang erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]", 234598162d0SGao Xiang ret, rq->inputsize, inputmargin, rq->outputsize); 235aa99a76bSGao Xiang 23647e4937aSGao Xiang WARN_ON(1); 23747e4937aSGao Xiang print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET, 238598162d0SGao Xiang 16, 1, src + inputmargin, rq->inputsize, true); 23947e4937aSGao Xiang print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET, 24047e4937aSGao Xiang 16, 1, out, rq->outputsize, true); 241aa99a76bSGao Xiang 242aa99a76bSGao Xiang if (ret >= 0) 243aa99a76bSGao Xiang memset(out + ret, 0, rq->outputsize - ret); 24447e4937aSGao Xiang ret = -EIO; 2455b6e7e12SYue Hu } else { 2465b6e7e12SYue Hu ret = 0; 24747e4937aSGao Xiang } 24847e4937aSGao Xiang 249598162d0SGao Xiang if (maptype == 0) { 25047e4937aSGao Xiang kunmap_atomic(src); 251598162d0SGao Xiang } else if (maptype == 1) { 252598162d0SGao Xiang vm_unmap_ram(src, PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT); 253598162d0SGao Xiang } else if (maptype == 2) { 254598162d0SGao Xiang erofs_put_pcpubuf(src); 255598162d0SGao Xiang } else { 256598162d0SGao Xiang DBG_BUGON(1); 257598162d0SGao Xiang return -EFAULT; 258598162d0SGao Xiang } 25947e4937aSGao Xiang return ret; 26047e4937aSGao Xiang } 26147e4937aSGao Xiang 262*966edfb0SGao Xiang static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, 26347e4937aSGao Xiang struct list_head *pagepool) 26447e4937aSGao Xiang { 26547e4937aSGao Xiang const unsigned int nrpages_out = 26647e4937aSGao Xiang PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; 26747e4937aSGao Xiang unsigned int dst_maptype; 26847e4937aSGao Xiang void *dst; 269598162d0SGao Xiang int ret; 27047e4937aSGao Xiang 2715b6e7e12SYue Hu /* one optimized fast path only for non bigpcluster cases yet */ 2725b6e7e12SYue Hu if (rq->inputsize <= PAGE_SIZE && nrpages_out == 1 && !rq->inplace_io) { 27347e4937aSGao Xiang DBG_BUGON(!*rq->out); 27447e4937aSGao Xiang dst = kmap_atomic(*rq->out); 27547e4937aSGao Xiang dst_maptype = 0; 27647e4937aSGao Xiang goto dstmap_out; 27747e4937aSGao Xiang } 27847e4937aSGao Xiang 279598162d0SGao Xiang /* general decoding path which can be used for all cases */ 280*966edfb0SGao Xiang ret = z_erofs_lz4_prepare_dstpages(rq, pagepool); 281598162d0SGao Xiang if (ret < 0) 28247e4937aSGao Xiang return ret; 283598162d0SGao Xiang if (ret) { 28447e4937aSGao Xiang dst = page_address(*rq->out); 28547e4937aSGao Xiang dst_maptype = 1; 28647e4937aSGao Xiang goto dstmap_out; 28747e4937aSGao Xiang } 28847e4937aSGao Xiang 289598162d0SGao Xiang dst = erofs_vm_map_ram(rq->out, nrpages_out); 29047e4937aSGao Xiang if (!dst) 29147e4937aSGao Xiang return -ENOMEM; 29247e4937aSGao Xiang dst_maptype = 2; 29347e4937aSGao Xiang 29447e4937aSGao Xiang dstmap_out: 295*966edfb0SGao Xiang ret = z_erofs_lz4_decompress_mem(rq, dst + rq->pageofs_out); 29647e4937aSGao Xiang 29747e4937aSGao Xiang if (!dst_maptype) 29847e4937aSGao Xiang kunmap_atomic(dst); 29947e4937aSGao Xiang else if (dst_maptype == 2) 30073d03931SGao Xiang vm_unmap_ram(dst, nrpages_out); 30147e4937aSGao Xiang return ret; 30247e4937aSGao Xiang } 30347e4937aSGao Xiang 304*966edfb0SGao Xiang static int z_erofs_shifted_transform(struct z_erofs_decompress_req *rq, 30547e4937aSGao Xiang struct list_head *pagepool) 30647e4937aSGao Xiang { 30747e4937aSGao Xiang const unsigned int nrpages_out = 30847e4937aSGao Xiang PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; 30947e4937aSGao Xiang const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out; 31047e4937aSGao Xiang unsigned char *src, *dst; 31147e4937aSGao Xiang 31247e4937aSGao Xiang if (nrpages_out > 2) { 31347e4937aSGao Xiang DBG_BUGON(1); 31447e4937aSGao Xiang return -EIO; 31547e4937aSGao Xiang } 31647e4937aSGao Xiang 31747e4937aSGao Xiang if (rq->out[0] == *rq->in) { 31847e4937aSGao Xiang DBG_BUGON(nrpages_out != 1); 31947e4937aSGao Xiang return 0; 32047e4937aSGao Xiang } 32147e4937aSGao Xiang 32247e4937aSGao Xiang src = kmap_atomic(*rq->in); 3234d202437SGao Xiang if (rq->out[0]) { 32447e4937aSGao Xiang dst = kmap_atomic(rq->out[0]); 32547e4937aSGao Xiang memcpy(dst + rq->pageofs_out, src, righthalf); 3264d202437SGao Xiang kunmap_atomic(dst); 32747e4937aSGao Xiang } 32847e4937aSGao Xiang 3294d202437SGao Xiang if (nrpages_out == 2) { 3304d202437SGao Xiang DBG_BUGON(!rq->out[1]); 33147e4937aSGao Xiang if (rq->out[1] == *rq->in) { 33247e4937aSGao Xiang memmove(src, src + righthalf, rq->pageofs_out); 3334d202437SGao Xiang } else { 33447e4937aSGao Xiang dst = kmap_atomic(rq->out[1]); 33547e4937aSGao Xiang memcpy(dst, src + righthalf, rq->pageofs_out); 33647e4937aSGao Xiang kunmap_atomic(dst); 3374d202437SGao Xiang } 3384d202437SGao Xiang } 33947e4937aSGao Xiang kunmap_atomic(src); 34047e4937aSGao Xiang return 0; 34147e4937aSGao Xiang } 34247e4937aSGao Xiang 343*966edfb0SGao Xiang static struct z_erofs_decompressor decompressors[] = { 344*966edfb0SGao Xiang [Z_EROFS_COMPRESSION_SHIFTED] = { 345*966edfb0SGao Xiang .decompress = z_erofs_shifted_transform, 346*966edfb0SGao Xiang .name = "shifted" 347*966edfb0SGao Xiang }, 348*966edfb0SGao Xiang [Z_EROFS_COMPRESSION_LZ4] = { 349*966edfb0SGao Xiang .decompress = z_erofs_lz4_decompress, 350*966edfb0SGao Xiang .name = "lz4" 351*966edfb0SGao Xiang }, 352*966edfb0SGao Xiang }; 353*966edfb0SGao Xiang 35447e4937aSGao Xiang int z_erofs_decompress(struct z_erofs_decompress_req *rq, 35547e4937aSGao Xiang struct list_head *pagepool) 35647e4937aSGao Xiang { 357*966edfb0SGao Xiang return decompressors[rq->alg].decompress(rq, pagepool); 35847e4937aSGao Xiang } 359