147e4937aSGao Xiang // SPDX-License-Identifier: GPL-2.0-only 247e4937aSGao Xiang /* 347e4937aSGao Xiang * Copyright (C) 2019 HUAWEI, Inc. 4592e7cd0SAlexander A. Klimov * https://www.huawei.com/ 547e4937aSGao Xiang */ 647e4937aSGao Xiang #include "compress.h" 747e4937aSGao Xiang #include <linux/module.h> 847e4937aSGao Xiang #include <linux/lz4.h> 947e4937aSGao Xiang 1047e4937aSGao Xiang #ifndef LZ4_DISTANCE_MAX /* history window size */ 1147e4937aSGao Xiang #define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ 1247e4937aSGao Xiang #endif 1347e4937aSGao Xiang 1447e4937aSGao Xiang #define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1) 1547e4937aSGao Xiang #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN 1647e4937aSGao Xiang #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32) 1747e4937aSGao Xiang #endif 1847e4937aSGao Xiang 19d67aee76SGao Xiang struct z_erofs_lz4_decompress_ctx { 20d67aee76SGao Xiang struct z_erofs_decompress_req *rq; 21d67aee76SGao Xiang /* # of encoded, decoded pages */ 22d67aee76SGao Xiang unsigned int inpages, outpages; 23d67aee76SGao Xiang /* decoded block total length (used for in-place decompression) */ 24d67aee76SGao Xiang unsigned int oend; 25d67aee76SGao Xiang }; 26d67aee76SGao Xiang 275d50538fSHuang Jianan int z_erofs_load_lz4_config(struct super_block *sb, 2846249cdeSGao Xiang struct erofs_super_block *dsb, 2946249cdeSGao Xiang struct z_erofs_lz4_cfgs *lz4, int size) 305d50538fSHuang Jianan { 314fea63f7SGao Xiang struct erofs_sb_info *sbi = EROFS_SB(sb); 3246249cdeSGao Xiang u16 distance; 3346249cdeSGao Xiang 3446249cdeSGao Xiang if (lz4) { 3546249cdeSGao Xiang if (size < sizeof(struct z_erofs_lz4_cfgs)) { 3646249cdeSGao Xiang erofs_err(sb, "invalid lz4 cfgs, size=%u", size); 3746249cdeSGao Xiang return -EINVAL; 3846249cdeSGao Xiang } 3946249cdeSGao Xiang distance = le16_to_cpu(lz4->max_distance); 404fea63f7SGao Xiang 414fea63f7SGao Xiang sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks); 424fea63f7SGao Xiang if (!sbi->lz4.max_pclusterblks) { 434fea63f7SGao Xiang sbi->lz4.max_pclusterblks = 1; /* reserved case */ 444fea63f7SGao Xiang } else if (sbi->lz4.max_pclusterblks > 454fea63f7SGao Xiang Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) { 464fea63f7SGao Xiang erofs_err(sb, "too large lz4 pclusterblks %u", 474fea63f7SGao Xiang sbi->lz4.max_pclusterblks); 484fea63f7SGao Xiang return -EINVAL; 494fea63f7SGao Xiang } else if (sbi->lz4.max_pclusterblks >= 2) { 504fea63f7SGao Xiang erofs_info(sb, "EXPERIMENTAL big pcluster feature in use. Use at your own risk!"); 514fea63f7SGao Xiang } 5246249cdeSGao Xiang } else { 5314373711SGao Xiang distance = le16_to_cpu(dsb->u1.lz4_max_distance); 544fea63f7SGao Xiang sbi->lz4.max_pclusterblks = 1; 5546249cdeSGao Xiang } 565d50538fSHuang Jianan 574fea63f7SGao Xiang sbi->lz4.max_distance_pages = distance ? 585d50538fSHuang Jianan DIV_ROUND_UP(distance, PAGE_SIZE) + 1 : 595d50538fSHuang Jianan LZ4_MAX_DISTANCE_PAGES; 604fea63f7SGao Xiang return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks); 615d50538fSHuang Jianan } 625d50538fSHuang Jianan 63966edfb0SGao Xiang /* 64966edfb0SGao Xiang * Fill all gaps with bounce pages if it's a sparse page list. Also check if 65966edfb0SGao Xiang * all physical pages are consecutive, which can be seen for moderate CR. 66966edfb0SGao Xiang */ 67d67aee76SGao Xiang static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx, 68eaa9172aSGao Xiang struct page **pagepool) 6947e4937aSGao Xiang { 70d67aee76SGao Xiang struct z_erofs_decompress_req *rq = ctx->rq; 7147e4937aSGao Xiang struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL }; 7247e4937aSGao Xiang unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES, 7347e4937aSGao Xiang BITS_PER_LONG)] = { 0 }; 745d50538fSHuang Jianan unsigned int lz4_max_distance_pages = 755d50538fSHuang Jianan EROFS_SB(rq->sb)->lz4.max_distance_pages; 7647e4937aSGao Xiang void *kaddr = NULL; 7747e4937aSGao Xiang unsigned int i, j, top; 7847e4937aSGao Xiang 7947e4937aSGao Xiang top = 0; 80d67aee76SGao Xiang for (i = j = 0; i < ctx->outpages; ++i, ++j) { 8147e4937aSGao Xiang struct page *const page = rq->out[i]; 8247e4937aSGao Xiang struct page *victim; 8347e4937aSGao Xiang 845d50538fSHuang Jianan if (j >= lz4_max_distance_pages) 8547e4937aSGao Xiang j = 0; 8647e4937aSGao Xiang 8747e4937aSGao Xiang /* 'valid' bounced can only be tested after a complete round */ 8847e4937aSGao Xiang if (test_bit(j, bounced)) { 895d50538fSHuang Jianan DBG_BUGON(i < lz4_max_distance_pages); 905d50538fSHuang Jianan DBG_BUGON(top >= lz4_max_distance_pages); 915d50538fSHuang Jianan availables[top++] = rq->out[i - lz4_max_distance_pages]; 9247e4937aSGao Xiang } 9347e4937aSGao Xiang 9447e4937aSGao Xiang if (page) { 9547e4937aSGao Xiang __clear_bit(j, bounced); 9647e4937aSGao Xiang if (kaddr) { 9747e4937aSGao Xiang if (kaddr + PAGE_SIZE == page_address(page)) 9847e4937aSGao Xiang kaddr += PAGE_SIZE; 9947e4937aSGao Xiang else 10047e4937aSGao Xiang kaddr = NULL; 10147e4937aSGao Xiang } else if (!i) { 10247e4937aSGao Xiang kaddr = page_address(page); 10347e4937aSGao Xiang } 10447e4937aSGao Xiang continue; 10547e4937aSGao Xiang } 10647e4937aSGao Xiang kaddr = NULL; 10747e4937aSGao Xiang __set_bit(j, bounced); 10847e4937aSGao Xiang 10947e4937aSGao Xiang if (top) { 11047e4937aSGao Xiang victim = availables[--top]; 11147e4937aSGao Xiang get_page(victim); 11247e4937aSGao Xiang } else { 113b4892fa3SHuang Jianan victim = erofs_allocpage(pagepool, 114b4892fa3SHuang Jianan GFP_KERNEL | __GFP_NOFAIL); 1156aaa7b06SGao Xiang set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE); 11647e4937aSGao Xiang } 11747e4937aSGao Xiang rq->out[i] = victim; 11847e4937aSGao Xiang } 11947e4937aSGao Xiang return kaddr ? 1 : 0; 12047e4937aSGao Xiang } 12147e4937aSGao Xiang 122d67aee76SGao Xiang static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx, 123598162d0SGao Xiang void *inpage, unsigned int *inputmargin, int *maptype, 124*ab749badSGao Xiang bool may_inplace) 12547e4937aSGao Xiang { 126d67aee76SGao Xiang struct z_erofs_decompress_req *rq = ctx->rq; 127d67aee76SGao Xiang unsigned int omargin, total, i, j; 128598162d0SGao Xiang struct page **in; 129598162d0SGao Xiang void *src, *tmp; 13047e4937aSGao Xiang 131598162d0SGao Xiang if (rq->inplace_io) { 132d67aee76SGao Xiang omargin = PAGE_ALIGN(ctx->oend) - ctx->oend; 133*ab749badSGao Xiang if (rq->partial_decoding || !may_inplace || 134d67aee76SGao Xiang omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize)) 135598162d0SGao Xiang goto docopy; 136598162d0SGao Xiang 137d67aee76SGao Xiang for (i = 0; i < ctx->inpages; ++i) { 138598162d0SGao Xiang DBG_BUGON(rq->in[i] == NULL); 139d67aee76SGao Xiang for (j = 0; j < ctx->outpages - ctx->inpages + i; ++j) 140598162d0SGao Xiang if (rq->out[j] == rq->in[i]) 141598162d0SGao Xiang goto docopy; 14247e4937aSGao Xiang } 143598162d0SGao Xiang } 144598162d0SGao Xiang 145d67aee76SGao Xiang if (ctx->inpages <= 1) { 146598162d0SGao Xiang *maptype = 0; 147598162d0SGao Xiang return inpage; 148598162d0SGao Xiang } 149598162d0SGao Xiang kunmap_atomic(inpage); 150598162d0SGao Xiang might_sleep(); 151d67aee76SGao Xiang src = erofs_vm_map_ram(rq->in, ctx->inpages); 152598162d0SGao Xiang if (!src) 153598162d0SGao Xiang return ERR_PTR(-ENOMEM); 154598162d0SGao Xiang *maptype = 1; 155598162d0SGao Xiang return src; 156598162d0SGao Xiang 157598162d0SGao Xiang docopy: 158598162d0SGao Xiang /* Or copy compressed data which can be overlapped to per-CPU buffer */ 159598162d0SGao Xiang in = rq->in; 160d67aee76SGao Xiang src = erofs_get_pcpubuf(ctx->inpages); 161598162d0SGao Xiang if (!src) { 162598162d0SGao Xiang DBG_BUGON(1); 163598162d0SGao Xiang kunmap_atomic(inpage); 164598162d0SGao Xiang return ERR_PTR(-EFAULT); 165598162d0SGao Xiang } 166598162d0SGao Xiang 167598162d0SGao Xiang tmp = src; 168598162d0SGao Xiang total = rq->inputsize; 169598162d0SGao Xiang while (total) { 170598162d0SGao Xiang unsigned int page_copycnt = 171598162d0SGao Xiang min_t(unsigned int, total, PAGE_SIZE - *inputmargin); 172598162d0SGao Xiang 173598162d0SGao Xiang if (!inpage) 174598162d0SGao Xiang inpage = kmap_atomic(*in); 175598162d0SGao Xiang memcpy(tmp, inpage + *inputmargin, page_copycnt); 176598162d0SGao Xiang kunmap_atomic(inpage); 177598162d0SGao Xiang inpage = NULL; 178598162d0SGao Xiang tmp += page_copycnt; 179598162d0SGao Xiang total -= page_copycnt; 180598162d0SGao Xiang ++in; 181598162d0SGao Xiang *inputmargin = 0; 182598162d0SGao Xiang } 183598162d0SGao Xiang *maptype = 2; 184598162d0SGao Xiang return src; 18547e4937aSGao Xiang } 18647e4937aSGao Xiang 18710e5f6e4SGao Xiang /* 18810e5f6e4SGao Xiang * Get the exact inputsize with zero_padding feature. 18910e5f6e4SGao Xiang * - For LZ4, it should work if zero_padding feature is on (5.3+); 19010e5f6e4SGao Xiang * - For MicroLZMA, it'd be enabled all the time. 19110e5f6e4SGao Xiang */ 19210e5f6e4SGao Xiang int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf, 19310e5f6e4SGao Xiang unsigned int padbufsize) 19410e5f6e4SGao Xiang { 19510e5f6e4SGao Xiang const char *padend; 19610e5f6e4SGao Xiang 19710e5f6e4SGao Xiang padend = memchr_inv(padbuf, 0, padbufsize); 19810e5f6e4SGao Xiang if (!padend) 19910e5f6e4SGao Xiang return -EFSCORRUPTED; 20010e5f6e4SGao Xiang rq->inputsize -= padend - padbuf; 20110e5f6e4SGao Xiang rq->pageofs_in += padend - padbuf; 20210e5f6e4SGao Xiang return 0; 20310e5f6e4SGao Xiang } 20410e5f6e4SGao Xiang 205d67aee76SGao Xiang static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx, 206966edfb0SGao Xiang u8 *out) 20747e4937aSGao Xiang { 208d67aee76SGao Xiang struct z_erofs_decompress_req *rq = ctx->rq; 209*ab749badSGao Xiang bool support_0padding = false, may_inplace = false; 210598162d0SGao Xiang unsigned int inputmargin; 211598162d0SGao Xiang u8 *headpage, *src; 212598162d0SGao Xiang int ret, maptype; 21347e4937aSGao Xiang 214598162d0SGao Xiang DBG_BUGON(*rq->in == NULL); 215598162d0SGao Xiang headpage = kmap_atomic(*rq->in); 21647e4937aSGao Xiang 21710e5f6e4SGao Xiang /* LZ4 decompression inplace is only safe if zero_padding is enabled */ 2187e508f2cSHuang Jianan if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) { 21947e4937aSGao Xiang support_0padding = true; 22010e5f6e4SGao Xiang ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in, 22110e5f6e4SGao Xiang min_t(unsigned int, rq->inputsize, 22210e5f6e4SGao Xiang EROFS_BLKSIZ - rq->pageofs_in)); 22310e5f6e4SGao Xiang if (ret) { 224598162d0SGao Xiang kunmap_atomic(headpage); 22510e5f6e4SGao Xiang return ret; 22647e4937aSGao Xiang } 227*ab749badSGao Xiang may_inplace = !((rq->pageofs_in + rq->inputsize) & 228*ab749badSGao Xiang (EROFS_BLKSIZ - 1)); 22947e4937aSGao Xiang } 23047e4937aSGao Xiang 23110e5f6e4SGao Xiang inputmargin = rq->pageofs_in; 232d67aee76SGao Xiang src = z_erofs_lz4_handle_overlap(ctx, headpage, &inputmargin, 233*ab749badSGao Xiang &maptype, may_inplace); 234598162d0SGao Xiang if (IS_ERR(src)) 235598162d0SGao Xiang return PTR_ERR(src); 23647e4937aSGao Xiang 237af1038abSGao Xiang /* legacy format could compress extra data in a pcluster. */ 238af1038abSGao Xiang if (rq->partial_decoding || !support_0padding) 23947e4937aSGao Xiang ret = LZ4_decompress_safe_partial(src + inputmargin, out, 240598162d0SGao Xiang rq->inputsize, rq->outputsize, rq->outputsize); 241af1038abSGao Xiang else 242af1038abSGao Xiang ret = LZ4_decompress_safe(src + inputmargin, out, 243598162d0SGao Xiang rq->inputsize, rq->outputsize); 244af1038abSGao Xiang 245aa99a76bSGao Xiang if (ret != rq->outputsize) { 246aa99a76bSGao Xiang erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]", 247598162d0SGao Xiang ret, rq->inputsize, inputmargin, rq->outputsize); 248aa99a76bSGao Xiang 24947e4937aSGao Xiang print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET, 250598162d0SGao Xiang 16, 1, src + inputmargin, rq->inputsize, true); 25147e4937aSGao Xiang print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET, 25247e4937aSGao Xiang 16, 1, out, rq->outputsize, true); 253aa99a76bSGao Xiang 254aa99a76bSGao Xiang if (ret >= 0) 255aa99a76bSGao Xiang memset(out + ret, 0, rq->outputsize - ret); 25647e4937aSGao Xiang ret = -EIO; 2575b6e7e12SYue Hu } else { 2585b6e7e12SYue Hu ret = 0; 25947e4937aSGao Xiang } 26047e4937aSGao Xiang 261598162d0SGao Xiang if (maptype == 0) { 262d67aee76SGao Xiang kunmap_atomic(headpage); 263598162d0SGao Xiang } else if (maptype == 1) { 264d67aee76SGao Xiang vm_unmap_ram(src, ctx->inpages); 265598162d0SGao Xiang } else if (maptype == 2) { 266598162d0SGao Xiang erofs_put_pcpubuf(src); 267598162d0SGao Xiang } else { 268598162d0SGao Xiang DBG_BUGON(1); 269598162d0SGao Xiang return -EFAULT; 270598162d0SGao Xiang } 27147e4937aSGao Xiang return ret; 27247e4937aSGao Xiang } 27347e4937aSGao Xiang 274966edfb0SGao Xiang static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, 275eaa9172aSGao Xiang struct page **pagepool) 27647e4937aSGao Xiang { 277d67aee76SGao Xiang struct z_erofs_lz4_decompress_ctx ctx; 27847e4937aSGao Xiang unsigned int dst_maptype; 27947e4937aSGao Xiang void *dst; 280598162d0SGao Xiang int ret; 28147e4937aSGao Xiang 282d67aee76SGao Xiang ctx.rq = rq; 283d67aee76SGao Xiang ctx.oend = rq->pageofs_out + rq->outputsize; 284d67aee76SGao Xiang ctx.outpages = PAGE_ALIGN(ctx.oend) >> PAGE_SHIFT; 285d67aee76SGao Xiang ctx.inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; 286d67aee76SGao Xiang 2875b6e7e12SYue Hu /* one optimized fast path only for non bigpcluster cases yet */ 288d67aee76SGao Xiang if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) { 28947e4937aSGao Xiang DBG_BUGON(!*rq->out); 29047e4937aSGao Xiang dst = kmap_atomic(*rq->out); 29147e4937aSGao Xiang dst_maptype = 0; 29247e4937aSGao Xiang goto dstmap_out; 29347e4937aSGao Xiang } 29447e4937aSGao Xiang 295598162d0SGao Xiang /* general decoding path which can be used for all cases */ 296d67aee76SGao Xiang ret = z_erofs_lz4_prepare_dstpages(&ctx, pagepool); 297d67aee76SGao Xiang if (ret < 0) { 29847e4937aSGao Xiang return ret; 299d67aee76SGao Xiang } else if (ret > 0) { 30047e4937aSGao Xiang dst = page_address(*rq->out); 30147e4937aSGao Xiang dst_maptype = 1; 302d67aee76SGao Xiang } else { 303d67aee76SGao Xiang dst = erofs_vm_map_ram(rq->out, ctx.outpages); 30447e4937aSGao Xiang if (!dst) 30547e4937aSGao Xiang return -ENOMEM; 30647e4937aSGao Xiang dst_maptype = 2; 307d67aee76SGao Xiang } 30847e4937aSGao Xiang 30947e4937aSGao Xiang dstmap_out: 310d67aee76SGao Xiang ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out); 31147e4937aSGao Xiang if (!dst_maptype) 31247e4937aSGao Xiang kunmap_atomic(dst); 31347e4937aSGao Xiang else if (dst_maptype == 2) 314d67aee76SGao Xiang vm_unmap_ram(dst, ctx.outpages); 31547e4937aSGao Xiang return ret; 31647e4937aSGao Xiang } 31747e4937aSGao Xiang 318966edfb0SGao Xiang static int z_erofs_shifted_transform(struct z_erofs_decompress_req *rq, 319eaa9172aSGao Xiang struct page **pagepool) 32047e4937aSGao Xiang { 32147e4937aSGao Xiang const unsigned int nrpages_out = 32247e4937aSGao Xiang PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; 323*ab749badSGao Xiang const unsigned int righthalf = min_t(unsigned int, rq->outputsize, 324*ab749badSGao Xiang PAGE_SIZE - rq->pageofs_out); 32547e4937aSGao Xiang unsigned char *src, *dst; 32647e4937aSGao Xiang 32747e4937aSGao Xiang if (nrpages_out > 2) { 32847e4937aSGao Xiang DBG_BUGON(1); 32947e4937aSGao Xiang return -EIO; 33047e4937aSGao Xiang } 33147e4937aSGao Xiang 33247e4937aSGao Xiang if (rq->out[0] == *rq->in) { 33347e4937aSGao Xiang DBG_BUGON(nrpages_out != 1); 33447e4937aSGao Xiang return 0; 33547e4937aSGao Xiang } 33647e4937aSGao Xiang 337*ab749badSGao Xiang src = kmap_atomic(*rq->in) + rq->pageofs_in; 3384d202437SGao Xiang if (rq->out[0]) { 33947e4937aSGao Xiang dst = kmap_atomic(rq->out[0]); 34047e4937aSGao Xiang memcpy(dst + rq->pageofs_out, src, righthalf); 3414d202437SGao Xiang kunmap_atomic(dst); 34247e4937aSGao Xiang } 34347e4937aSGao Xiang 3444d202437SGao Xiang if (nrpages_out == 2) { 3454d202437SGao Xiang DBG_BUGON(!rq->out[1]); 34647e4937aSGao Xiang if (rq->out[1] == *rq->in) { 34747e4937aSGao Xiang memmove(src, src + righthalf, rq->pageofs_out); 3484d202437SGao Xiang } else { 34947e4937aSGao Xiang dst = kmap_atomic(rq->out[1]); 35047e4937aSGao Xiang memcpy(dst, src + righthalf, rq->pageofs_out); 35147e4937aSGao Xiang kunmap_atomic(dst); 3524d202437SGao Xiang } 3534d202437SGao Xiang } 35447e4937aSGao Xiang kunmap_atomic(src); 35547e4937aSGao Xiang return 0; 35647e4937aSGao Xiang } 35747e4937aSGao Xiang 358966edfb0SGao Xiang static struct z_erofs_decompressor decompressors[] = { 359966edfb0SGao Xiang [Z_EROFS_COMPRESSION_SHIFTED] = { 360966edfb0SGao Xiang .decompress = z_erofs_shifted_transform, 361966edfb0SGao Xiang .name = "shifted" 362966edfb0SGao Xiang }, 363966edfb0SGao Xiang [Z_EROFS_COMPRESSION_LZ4] = { 364966edfb0SGao Xiang .decompress = z_erofs_lz4_decompress, 365966edfb0SGao Xiang .name = "lz4" 366966edfb0SGao Xiang }, 367622ceaddSGao Xiang #ifdef CONFIG_EROFS_FS_ZIP_LZMA 368622ceaddSGao Xiang [Z_EROFS_COMPRESSION_LZMA] = { 369622ceaddSGao Xiang .decompress = z_erofs_lzma_decompress, 370622ceaddSGao Xiang .name = "lzma" 371622ceaddSGao Xiang }, 372622ceaddSGao Xiang #endif 373966edfb0SGao Xiang }; 374966edfb0SGao Xiang 37547e4937aSGao Xiang int z_erofs_decompress(struct z_erofs_decompress_req *rq, 376eaa9172aSGao Xiang struct page **pagepool) 37747e4937aSGao Xiang { 378966edfb0SGao Xiang return decompressors[rq->alg].decompress(rq, pagepool); 37947e4937aSGao Xiang } 380