147e4937aSGao Xiang // SPDX-License-Identifier: GPL-2.0-only 247e4937aSGao Xiang /* 347e4937aSGao Xiang * Copyright (C) 2019 HUAWEI, Inc. 4592e7cd0SAlexander A. Klimov * https://www.huawei.com/ 547e4937aSGao Xiang */ 647e4937aSGao Xiang #include "compress.h" 747e4937aSGao Xiang #include <linux/module.h> 847e4937aSGao Xiang #include <linux/lz4.h> 947e4937aSGao Xiang 1047e4937aSGao Xiang #ifndef LZ4_DISTANCE_MAX /* history window size */ 1147e4937aSGao Xiang #define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ 1247e4937aSGao Xiang #endif 1347e4937aSGao Xiang 1447e4937aSGao Xiang #define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1) 1547e4937aSGao Xiang #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN 1647e4937aSGao Xiang #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32) 1747e4937aSGao Xiang #endif 1847e4937aSGao Xiang 19d67aee76SGao Xiang struct z_erofs_lz4_decompress_ctx { 20d67aee76SGao Xiang struct z_erofs_decompress_req *rq; 21d67aee76SGao Xiang /* # of encoded, decoded pages */ 22d67aee76SGao Xiang unsigned int inpages, outpages; 23d67aee76SGao Xiang /* decoded block total length (used for in-place decompression) */ 24d67aee76SGao Xiang unsigned int oend; 25d67aee76SGao Xiang }; 26d67aee76SGao Xiang 275d50538fSHuang Jianan int z_erofs_load_lz4_config(struct super_block *sb, 2846249cdeSGao Xiang struct erofs_super_block *dsb, 2946249cdeSGao Xiang struct z_erofs_lz4_cfgs *lz4, int size) 305d50538fSHuang Jianan { 314fea63f7SGao Xiang struct erofs_sb_info *sbi = EROFS_SB(sb); 3246249cdeSGao Xiang u16 distance; 3346249cdeSGao Xiang 3446249cdeSGao Xiang if (lz4) { 3546249cdeSGao Xiang if (size < sizeof(struct z_erofs_lz4_cfgs)) { 3646249cdeSGao Xiang erofs_err(sb, "invalid lz4 cfgs, size=%u", size); 3746249cdeSGao Xiang return -EINVAL; 3846249cdeSGao Xiang } 3946249cdeSGao Xiang distance = le16_to_cpu(lz4->max_distance); 404fea63f7SGao Xiang 414fea63f7SGao Xiang sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks); 424fea63f7SGao Xiang if (!sbi->lz4.max_pclusterblks) { 434fea63f7SGao Xiang sbi->lz4.max_pclusterblks = 1; /* reserved case */ 444fea63f7SGao Xiang } else if (sbi->lz4.max_pclusterblks > 454fea63f7SGao Xiang Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) { 464fea63f7SGao Xiang erofs_err(sb, "too large lz4 pclusterblks %u", 474fea63f7SGao Xiang sbi->lz4.max_pclusterblks); 484fea63f7SGao Xiang return -EINVAL; 494fea63f7SGao Xiang } 5046249cdeSGao Xiang } else { 5114373711SGao Xiang distance = le16_to_cpu(dsb->u1.lz4_max_distance); 524fea63f7SGao Xiang sbi->lz4.max_pclusterblks = 1; 5346249cdeSGao Xiang } 545d50538fSHuang Jianan 554fea63f7SGao Xiang sbi->lz4.max_distance_pages = distance ? 565d50538fSHuang Jianan DIV_ROUND_UP(distance, PAGE_SIZE) + 1 : 575d50538fSHuang Jianan LZ4_MAX_DISTANCE_PAGES; 584fea63f7SGao Xiang return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks); 595d50538fSHuang Jianan } 605d50538fSHuang Jianan 61966edfb0SGao Xiang /* 62966edfb0SGao Xiang * Fill all gaps with bounce pages if it's a sparse page list. Also check if 63966edfb0SGao Xiang * all physical pages are consecutive, which can be seen for moderate CR. 64966edfb0SGao Xiang */ 65d67aee76SGao Xiang static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx, 66eaa9172aSGao Xiang struct page **pagepool) 6747e4937aSGao Xiang { 68d67aee76SGao Xiang struct z_erofs_decompress_req *rq = ctx->rq; 6947e4937aSGao Xiang struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL }; 7047e4937aSGao Xiang unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES, 7147e4937aSGao Xiang BITS_PER_LONG)] = { 0 }; 725d50538fSHuang Jianan unsigned int lz4_max_distance_pages = 735d50538fSHuang Jianan EROFS_SB(rq->sb)->lz4.max_distance_pages; 7447e4937aSGao Xiang void *kaddr = NULL; 7547e4937aSGao Xiang unsigned int i, j, top; 7647e4937aSGao Xiang 7747e4937aSGao Xiang top = 0; 78d67aee76SGao Xiang for (i = j = 0; i < ctx->outpages; ++i, ++j) { 7947e4937aSGao Xiang struct page *const page = rq->out[i]; 8047e4937aSGao Xiang struct page *victim; 8147e4937aSGao Xiang 825d50538fSHuang Jianan if (j >= lz4_max_distance_pages) 8347e4937aSGao Xiang j = 0; 8447e4937aSGao Xiang 8547e4937aSGao Xiang /* 'valid' bounced can only be tested after a complete round */ 8647e4937aSGao Xiang if (test_bit(j, bounced)) { 875d50538fSHuang Jianan DBG_BUGON(i < lz4_max_distance_pages); 885d50538fSHuang Jianan DBG_BUGON(top >= lz4_max_distance_pages); 895d50538fSHuang Jianan availables[top++] = rq->out[i - lz4_max_distance_pages]; 9047e4937aSGao Xiang } 9147e4937aSGao Xiang 9247e4937aSGao Xiang if (page) { 9347e4937aSGao Xiang __clear_bit(j, bounced); 9447e4937aSGao Xiang if (kaddr) { 9547e4937aSGao Xiang if (kaddr + PAGE_SIZE == page_address(page)) 9647e4937aSGao Xiang kaddr += PAGE_SIZE; 9747e4937aSGao Xiang else 9847e4937aSGao Xiang kaddr = NULL; 9947e4937aSGao Xiang } else if (!i) { 10047e4937aSGao Xiang kaddr = page_address(page); 10147e4937aSGao Xiang } 10247e4937aSGao Xiang continue; 10347e4937aSGao Xiang } 10447e4937aSGao Xiang kaddr = NULL; 10547e4937aSGao Xiang __set_bit(j, bounced); 10647e4937aSGao Xiang 10747e4937aSGao Xiang if (top) { 10847e4937aSGao Xiang victim = availables[--top]; 10947e4937aSGao Xiang get_page(victim); 11047e4937aSGao Xiang } else { 111b4892fa3SHuang Jianan victim = erofs_allocpage(pagepool, 112b4892fa3SHuang Jianan GFP_KERNEL | __GFP_NOFAIL); 1136aaa7b06SGao Xiang set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE); 11447e4937aSGao Xiang } 11547e4937aSGao Xiang rq->out[i] = victim; 11647e4937aSGao Xiang } 11747e4937aSGao Xiang return kaddr ? 1 : 0; 11847e4937aSGao Xiang } 11947e4937aSGao Xiang 120d67aee76SGao Xiang static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx, 121598162d0SGao Xiang void *inpage, unsigned int *inputmargin, int *maptype, 122ab749badSGao Xiang bool may_inplace) 12347e4937aSGao Xiang { 124d67aee76SGao Xiang struct z_erofs_decompress_req *rq = ctx->rq; 125d67aee76SGao Xiang unsigned int omargin, total, i, j; 126598162d0SGao Xiang struct page **in; 127598162d0SGao Xiang void *src, *tmp; 12847e4937aSGao Xiang 129598162d0SGao Xiang if (rq->inplace_io) { 130d67aee76SGao Xiang omargin = PAGE_ALIGN(ctx->oend) - ctx->oend; 131ab749badSGao Xiang if (rq->partial_decoding || !may_inplace || 132d67aee76SGao Xiang omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize)) 133598162d0SGao Xiang goto docopy; 134598162d0SGao Xiang 135d67aee76SGao Xiang for (i = 0; i < ctx->inpages; ++i) { 136598162d0SGao Xiang DBG_BUGON(rq->in[i] == NULL); 137d67aee76SGao Xiang for (j = 0; j < ctx->outpages - ctx->inpages + i; ++j) 138598162d0SGao Xiang if (rq->out[j] == rq->in[i]) 139598162d0SGao Xiang goto docopy; 14047e4937aSGao Xiang } 141598162d0SGao Xiang } 142598162d0SGao Xiang 143d67aee76SGao Xiang if (ctx->inpages <= 1) { 144598162d0SGao Xiang *maptype = 0; 145598162d0SGao Xiang return inpage; 146598162d0SGao Xiang } 147598162d0SGao Xiang kunmap_atomic(inpage); 148598162d0SGao Xiang might_sleep(); 149d67aee76SGao Xiang src = erofs_vm_map_ram(rq->in, ctx->inpages); 150598162d0SGao Xiang if (!src) 151598162d0SGao Xiang return ERR_PTR(-ENOMEM); 152598162d0SGao Xiang *maptype = 1; 153598162d0SGao Xiang return src; 154598162d0SGao Xiang 155598162d0SGao Xiang docopy: 156598162d0SGao Xiang /* Or copy compressed data which can be overlapped to per-CPU buffer */ 157598162d0SGao Xiang in = rq->in; 158d67aee76SGao Xiang src = erofs_get_pcpubuf(ctx->inpages); 159598162d0SGao Xiang if (!src) { 160598162d0SGao Xiang DBG_BUGON(1); 161598162d0SGao Xiang kunmap_atomic(inpage); 162598162d0SGao Xiang return ERR_PTR(-EFAULT); 163598162d0SGao Xiang } 164598162d0SGao Xiang 165598162d0SGao Xiang tmp = src; 166598162d0SGao Xiang total = rq->inputsize; 167598162d0SGao Xiang while (total) { 168598162d0SGao Xiang unsigned int page_copycnt = 169598162d0SGao Xiang min_t(unsigned int, total, PAGE_SIZE - *inputmargin); 170598162d0SGao Xiang 171598162d0SGao Xiang if (!inpage) 172598162d0SGao Xiang inpage = kmap_atomic(*in); 173598162d0SGao Xiang memcpy(tmp, inpage + *inputmargin, page_copycnt); 174598162d0SGao Xiang kunmap_atomic(inpage); 175598162d0SGao Xiang inpage = NULL; 176598162d0SGao Xiang tmp += page_copycnt; 177598162d0SGao Xiang total -= page_copycnt; 178598162d0SGao Xiang ++in; 179598162d0SGao Xiang *inputmargin = 0; 180598162d0SGao Xiang } 181598162d0SGao Xiang *maptype = 2; 182598162d0SGao Xiang return src; 18347e4937aSGao Xiang } 18447e4937aSGao Xiang 18510e5f6e4SGao Xiang /* 18610e5f6e4SGao Xiang * Get the exact inputsize with zero_padding feature. 18710e5f6e4SGao Xiang * - For LZ4, it should work if zero_padding feature is on (5.3+); 18810e5f6e4SGao Xiang * - For MicroLZMA, it'd be enabled all the time. 18910e5f6e4SGao Xiang */ 19010e5f6e4SGao Xiang int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf, 19110e5f6e4SGao Xiang unsigned int padbufsize) 19210e5f6e4SGao Xiang { 19310e5f6e4SGao Xiang const char *padend; 19410e5f6e4SGao Xiang 19510e5f6e4SGao Xiang padend = memchr_inv(padbuf, 0, padbufsize); 19610e5f6e4SGao Xiang if (!padend) 19710e5f6e4SGao Xiang return -EFSCORRUPTED; 19810e5f6e4SGao Xiang rq->inputsize -= padend - padbuf; 19910e5f6e4SGao Xiang rq->pageofs_in += padend - padbuf; 20010e5f6e4SGao Xiang return 0; 20110e5f6e4SGao Xiang } 20210e5f6e4SGao Xiang 203d67aee76SGao Xiang static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx, 204966edfb0SGao Xiang u8 *out) 20547e4937aSGao Xiang { 206d67aee76SGao Xiang struct z_erofs_decompress_req *rq = ctx->rq; 207ab749badSGao Xiang bool support_0padding = false, may_inplace = false; 208598162d0SGao Xiang unsigned int inputmargin; 209598162d0SGao Xiang u8 *headpage, *src; 210598162d0SGao Xiang int ret, maptype; 21147e4937aSGao Xiang 212598162d0SGao Xiang DBG_BUGON(*rq->in == NULL); 213598162d0SGao Xiang headpage = kmap_atomic(*rq->in); 21447e4937aSGao Xiang 21510e5f6e4SGao Xiang /* LZ4 decompression inplace is only safe if zero_padding is enabled */ 2167e508f2cSHuang Jianan if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) { 21747e4937aSGao Xiang support_0padding = true; 21810e5f6e4SGao Xiang ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in, 21910e5f6e4SGao Xiang min_t(unsigned int, rq->inputsize, 22010e5f6e4SGao Xiang EROFS_BLKSIZ - rq->pageofs_in)); 22110e5f6e4SGao Xiang if (ret) { 222598162d0SGao Xiang kunmap_atomic(headpage); 22310e5f6e4SGao Xiang return ret; 22447e4937aSGao Xiang } 225ab749badSGao Xiang may_inplace = !((rq->pageofs_in + rq->inputsize) & 226ab749badSGao Xiang (EROFS_BLKSIZ - 1)); 22747e4937aSGao Xiang } 22847e4937aSGao Xiang 22910e5f6e4SGao Xiang inputmargin = rq->pageofs_in; 230d67aee76SGao Xiang src = z_erofs_lz4_handle_overlap(ctx, headpage, &inputmargin, 231ab749badSGao Xiang &maptype, may_inplace); 232598162d0SGao Xiang if (IS_ERR(src)) 233598162d0SGao Xiang return PTR_ERR(src); 23447e4937aSGao Xiang 235af1038abSGao Xiang /* legacy format could compress extra data in a pcluster. */ 236af1038abSGao Xiang if (rq->partial_decoding || !support_0padding) 23747e4937aSGao Xiang ret = LZ4_decompress_safe_partial(src + inputmargin, out, 238598162d0SGao Xiang rq->inputsize, rq->outputsize, rq->outputsize); 239af1038abSGao Xiang else 240af1038abSGao Xiang ret = LZ4_decompress_safe(src + inputmargin, out, 241598162d0SGao Xiang rq->inputsize, rq->outputsize); 242af1038abSGao Xiang 243aa99a76bSGao Xiang if (ret != rq->outputsize) { 244aa99a76bSGao Xiang erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]", 245598162d0SGao Xiang ret, rq->inputsize, inputmargin, rq->outputsize); 246aa99a76bSGao Xiang 24747e4937aSGao Xiang print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET, 248598162d0SGao Xiang 16, 1, src + inputmargin, rq->inputsize, true); 24947e4937aSGao Xiang print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET, 25047e4937aSGao Xiang 16, 1, out, rq->outputsize, true); 251aa99a76bSGao Xiang 252aa99a76bSGao Xiang if (ret >= 0) 253aa99a76bSGao Xiang memset(out + ret, 0, rq->outputsize - ret); 25447e4937aSGao Xiang ret = -EIO; 2555b6e7e12SYue Hu } else { 2565b6e7e12SYue Hu ret = 0; 25747e4937aSGao Xiang } 25847e4937aSGao Xiang 259598162d0SGao Xiang if (maptype == 0) { 260d67aee76SGao Xiang kunmap_atomic(headpage); 261598162d0SGao Xiang } else if (maptype == 1) { 262d67aee76SGao Xiang vm_unmap_ram(src, ctx->inpages); 263598162d0SGao Xiang } else if (maptype == 2) { 264598162d0SGao Xiang erofs_put_pcpubuf(src); 265598162d0SGao Xiang } else { 266598162d0SGao Xiang DBG_BUGON(1); 267598162d0SGao Xiang return -EFAULT; 268598162d0SGao Xiang } 26947e4937aSGao Xiang return ret; 27047e4937aSGao Xiang } 27147e4937aSGao Xiang 272966edfb0SGao Xiang static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, 273eaa9172aSGao Xiang struct page **pagepool) 27447e4937aSGao Xiang { 275d67aee76SGao Xiang struct z_erofs_lz4_decompress_ctx ctx; 27647e4937aSGao Xiang unsigned int dst_maptype; 27747e4937aSGao Xiang void *dst; 278598162d0SGao Xiang int ret; 27947e4937aSGao Xiang 280d67aee76SGao Xiang ctx.rq = rq; 281d67aee76SGao Xiang ctx.oend = rq->pageofs_out + rq->outputsize; 282d67aee76SGao Xiang ctx.outpages = PAGE_ALIGN(ctx.oend) >> PAGE_SHIFT; 283d67aee76SGao Xiang ctx.inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; 284d67aee76SGao Xiang 2855b6e7e12SYue Hu /* one optimized fast path only for non bigpcluster cases yet */ 286d67aee76SGao Xiang if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) { 28747e4937aSGao Xiang DBG_BUGON(!*rq->out); 28847e4937aSGao Xiang dst = kmap_atomic(*rq->out); 28947e4937aSGao Xiang dst_maptype = 0; 29047e4937aSGao Xiang goto dstmap_out; 29147e4937aSGao Xiang } 29247e4937aSGao Xiang 293598162d0SGao Xiang /* general decoding path which can be used for all cases */ 294d67aee76SGao Xiang ret = z_erofs_lz4_prepare_dstpages(&ctx, pagepool); 295d67aee76SGao Xiang if (ret < 0) { 29647e4937aSGao Xiang return ret; 297d67aee76SGao Xiang } else if (ret > 0) { 29847e4937aSGao Xiang dst = page_address(*rq->out); 29947e4937aSGao Xiang dst_maptype = 1; 300d67aee76SGao Xiang } else { 301d67aee76SGao Xiang dst = erofs_vm_map_ram(rq->out, ctx.outpages); 30247e4937aSGao Xiang if (!dst) 30347e4937aSGao Xiang return -ENOMEM; 30447e4937aSGao Xiang dst_maptype = 2; 305d67aee76SGao Xiang } 30647e4937aSGao Xiang 30747e4937aSGao Xiang dstmap_out: 308d67aee76SGao Xiang ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out); 30947e4937aSGao Xiang if (!dst_maptype) 31047e4937aSGao Xiang kunmap_atomic(dst); 31147e4937aSGao Xiang else if (dst_maptype == 2) 312d67aee76SGao Xiang vm_unmap_ram(dst, ctx.outpages); 31347e4937aSGao Xiang return ret; 31447e4937aSGao Xiang } 31547e4937aSGao Xiang 316966edfb0SGao Xiang static int z_erofs_shifted_transform(struct z_erofs_decompress_req *rq, 317eaa9172aSGao Xiang struct page **pagepool) 31847e4937aSGao Xiang { 31947e4937aSGao Xiang const unsigned int nrpages_out = 32047e4937aSGao Xiang PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; 321ab749badSGao Xiang const unsigned int righthalf = min_t(unsigned int, rq->outputsize, 322ab749badSGao Xiang PAGE_SIZE - rq->pageofs_out); 323*dcbe6803SGao Xiang const unsigned int lefthalf = rq->outputsize - righthalf; 32447e4937aSGao Xiang unsigned char *src, *dst; 32547e4937aSGao Xiang 32647e4937aSGao Xiang if (nrpages_out > 2) { 32747e4937aSGao Xiang DBG_BUGON(1); 32847e4937aSGao Xiang return -EIO; 32947e4937aSGao Xiang } 33047e4937aSGao Xiang 33147e4937aSGao Xiang if (rq->out[0] == *rq->in) { 33247e4937aSGao Xiang DBG_BUGON(nrpages_out != 1); 33347e4937aSGao Xiang return 0; 33447e4937aSGao Xiang } 33547e4937aSGao Xiang 336ab749badSGao Xiang src = kmap_atomic(*rq->in) + rq->pageofs_in; 3374d202437SGao Xiang if (rq->out[0]) { 33847e4937aSGao Xiang dst = kmap_atomic(rq->out[0]); 33947e4937aSGao Xiang memcpy(dst + rq->pageofs_out, src, righthalf); 3404d202437SGao Xiang kunmap_atomic(dst); 34147e4937aSGao Xiang } 34247e4937aSGao Xiang 3434d202437SGao Xiang if (nrpages_out == 2) { 3444d202437SGao Xiang DBG_BUGON(!rq->out[1]); 34547e4937aSGao Xiang if (rq->out[1] == *rq->in) { 346*dcbe6803SGao Xiang memmove(src, src + righthalf, lefthalf); 3474d202437SGao Xiang } else { 34847e4937aSGao Xiang dst = kmap_atomic(rq->out[1]); 349*dcbe6803SGao Xiang memcpy(dst, src + righthalf, lefthalf); 35047e4937aSGao Xiang kunmap_atomic(dst); 3514d202437SGao Xiang } 3524d202437SGao Xiang } 35347e4937aSGao Xiang kunmap_atomic(src); 35447e4937aSGao Xiang return 0; 35547e4937aSGao Xiang } 35647e4937aSGao Xiang 357966edfb0SGao Xiang static struct z_erofs_decompressor decompressors[] = { 358966edfb0SGao Xiang [Z_EROFS_COMPRESSION_SHIFTED] = { 359966edfb0SGao Xiang .decompress = z_erofs_shifted_transform, 360966edfb0SGao Xiang .name = "shifted" 361966edfb0SGao Xiang }, 362966edfb0SGao Xiang [Z_EROFS_COMPRESSION_LZ4] = { 363966edfb0SGao Xiang .decompress = z_erofs_lz4_decompress, 364966edfb0SGao Xiang .name = "lz4" 365966edfb0SGao Xiang }, 366622ceaddSGao Xiang #ifdef CONFIG_EROFS_FS_ZIP_LZMA 367622ceaddSGao Xiang [Z_EROFS_COMPRESSION_LZMA] = { 368622ceaddSGao Xiang .decompress = z_erofs_lzma_decompress, 369622ceaddSGao Xiang .name = "lzma" 370622ceaddSGao Xiang }, 371622ceaddSGao Xiang #endif 372966edfb0SGao Xiang }; 373966edfb0SGao Xiang 37447e4937aSGao Xiang int z_erofs_decompress(struct z_erofs_decompress_req *rq, 375eaa9172aSGao Xiang struct page **pagepool) 37647e4937aSGao Xiang { 377966edfb0SGao Xiang return decompressors[rq->alg].decompress(rq, pagepool); 37847e4937aSGao Xiang } 379