1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2019 HUAWEI, Inc. 4 * http://www.huawei.com/ 5 * Created by Gao Xiang <gaoxiang25@huawei.com> 6 */ 7 #include "compress.h" 8 #include <linux/module.h> 9 #include <linux/lz4.h> 10 11 #ifndef LZ4_DISTANCE_MAX /* history window size */ 12 #define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ 13 #endif 14 15 #define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1) 16 #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN 17 #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32) 18 #endif 19 20 struct z_erofs_decompressor { 21 /* 22 * if destpages have sparsed pages, fill them with bounce pages. 23 * it also check whether destpages indicate continuous physical memory. 24 */ 25 int (*prepare_destpages)(struct z_erofs_decompress_req *rq, 26 struct list_head *pagepool); 27 int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out); 28 char *name; 29 }; 30 31 static bool use_vmap; 32 module_param(use_vmap, bool, 0444); 33 MODULE_PARM_DESC(use_vmap, "Use vmap() instead of vm_map_ram() (default 0)"); 34 35 static int lz4_prepare_destpages(struct z_erofs_decompress_req *rq, 36 struct list_head *pagepool) 37 { 38 const unsigned int nr = 39 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; 40 struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL }; 41 unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES, 42 BITS_PER_LONG)] = { 0 }; 43 void *kaddr = NULL; 44 unsigned int i, j, top; 45 46 top = 0; 47 for (i = j = 0; i < nr; ++i, ++j) { 48 struct page *const page = rq->out[i]; 49 struct page *victim; 50 51 if (j >= LZ4_MAX_DISTANCE_PAGES) 52 j = 0; 53 54 /* 'valid' bounced can only be tested after a complete round */ 55 if (test_bit(j, bounced)) { 56 DBG_BUGON(i < LZ4_MAX_DISTANCE_PAGES); 57 DBG_BUGON(top >= LZ4_MAX_DISTANCE_PAGES); 58 availables[top++] = rq->out[i - LZ4_MAX_DISTANCE_PAGES]; 59 } 60 61 if (page) { 62 __clear_bit(j, bounced); 63 if (kaddr) { 64 if (kaddr + PAGE_SIZE == page_address(page)) 65 kaddr += PAGE_SIZE; 66 else 67 kaddr = NULL; 68 } else if (!i) { 69 kaddr = page_address(page); 70 } 71 continue; 72 } 73 kaddr = NULL; 74 __set_bit(j, bounced); 75 76 if (top) { 77 victim = availables[--top]; 78 get_page(victim); 79 } else { 80 victim = erofs_allocpage(pagepool, GFP_KERNEL, false); 81 if (unlikely(!victim)) 82 return -ENOMEM; 83 victim->mapping = Z_EROFS_MAPPING_STAGING; 84 } 85 rq->out[i] = victim; 86 } 87 return kaddr ? 1 : 0; 88 } 89 90 static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq, 91 u8 *src, unsigned int pageofs_in) 92 { 93 /* 94 * if in-place decompression is ongoing, those decompressed 95 * pages should be copied in order to avoid being overlapped. 96 */ 97 struct page **in = rq->in; 98 u8 *const tmp = erofs_get_pcpubuf(0); 99 u8 *tmpp = tmp; 100 unsigned int inlen = rq->inputsize - pageofs_in; 101 unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in); 102 103 while (tmpp < tmp + inlen) { 104 if (!src) 105 src = kmap_atomic(*in); 106 memcpy(tmpp, src + pageofs_in, count); 107 kunmap_atomic(src); 108 src = NULL; 109 tmpp += count; 110 pageofs_in = 0; 111 count = PAGE_SIZE; 112 ++in; 113 } 114 return tmp; 115 } 116 117 static int lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out) 118 { 119 unsigned int inputmargin, inlen; 120 u8 *src; 121 bool copied, support_0padding; 122 int ret; 123 124 if (rq->inputsize > PAGE_SIZE) 125 return -EOPNOTSUPP; 126 127 src = kmap_atomic(*rq->in); 128 inputmargin = 0; 129 support_0padding = false; 130 131 /* decompression inplace is only safe when 0padding is enabled */ 132 if (EROFS_SB(rq->sb)->requirements & EROFS_REQUIREMENT_LZ4_0PADDING) { 133 support_0padding = true; 134 135 while (!src[inputmargin & ~PAGE_MASK]) 136 if (!(++inputmargin & ~PAGE_MASK)) 137 break; 138 139 if (inputmargin >= rq->inputsize) { 140 kunmap_atomic(src); 141 return -EIO; 142 } 143 } 144 145 copied = false; 146 inlen = rq->inputsize - inputmargin; 147 if (rq->inplace_io) { 148 const uint oend = (rq->pageofs_out + 149 rq->outputsize) & ~PAGE_MASK; 150 const uint nr = PAGE_ALIGN(rq->pageofs_out + 151 rq->outputsize) >> PAGE_SHIFT; 152 153 if (rq->partial_decoding || !support_0padding || 154 rq->out[nr - 1] != rq->in[0] || 155 rq->inputsize - oend < 156 LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) { 157 src = generic_copy_inplace_data(rq, src, inputmargin); 158 inputmargin = 0; 159 copied = true; 160 } 161 } 162 163 ret = LZ4_decompress_safe_partial(src + inputmargin, out, 164 inlen, rq->outputsize, 165 rq->outputsize); 166 if (ret < 0) { 167 errln("%s, failed to decompress, in[%p, %u, %u] out[%p, %u]", 168 __func__, src + inputmargin, inlen, inputmargin, 169 out, rq->outputsize); 170 WARN_ON(1); 171 print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET, 172 16, 1, src + inputmargin, inlen, true); 173 print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET, 174 16, 1, out, rq->outputsize, true); 175 ret = -EIO; 176 } 177 178 if (copied) 179 erofs_put_pcpubuf(src); 180 else 181 kunmap_atomic(src); 182 return ret; 183 } 184 185 static struct z_erofs_decompressor decompressors[] = { 186 [Z_EROFS_COMPRESSION_SHIFTED] = { 187 .name = "shifted" 188 }, 189 [Z_EROFS_COMPRESSION_LZ4] = { 190 .prepare_destpages = lz4_prepare_destpages, 191 .decompress = lz4_decompress, 192 .name = "lz4" 193 }, 194 }; 195 196 static void copy_from_pcpubuf(struct page **out, const char *dst, 197 unsigned short pageofs_out, 198 unsigned int outputsize) 199 { 200 const char *end = dst + outputsize; 201 const unsigned int righthalf = PAGE_SIZE - pageofs_out; 202 const char *cur = dst - pageofs_out; 203 204 while (cur < end) { 205 struct page *const page = *out++; 206 207 if (page) { 208 char *buf = kmap_atomic(page); 209 210 if (cur >= dst) { 211 memcpy(buf, cur, min_t(uint, PAGE_SIZE, 212 end - cur)); 213 } else { 214 memcpy(buf + pageofs_out, cur + pageofs_out, 215 min_t(uint, righthalf, end - cur)); 216 } 217 kunmap_atomic(buf); 218 } 219 cur += PAGE_SIZE; 220 } 221 } 222 223 static void *erofs_vmap(struct page **pages, unsigned int count) 224 { 225 int i = 0; 226 227 if (use_vmap) 228 return vmap(pages, count, VM_MAP, PAGE_KERNEL); 229 230 while (1) { 231 void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL); 232 233 /* retry two more times (totally 3 times) */ 234 if (addr || ++i >= 3) 235 return addr; 236 vm_unmap_aliases(); 237 } 238 return NULL; 239 } 240 241 static void erofs_vunmap(const void *mem, unsigned int count) 242 { 243 if (!use_vmap) 244 vm_unmap_ram(mem, count); 245 else 246 vunmap(mem); 247 } 248 249 static int decompress_generic(struct z_erofs_decompress_req *rq, 250 struct list_head *pagepool) 251 { 252 const unsigned int nrpages_out = 253 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; 254 const struct z_erofs_decompressor *alg = decompressors + rq->alg; 255 unsigned int dst_maptype; 256 void *dst; 257 int ret; 258 259 if (nrpages_out == 1 && !rq->inplace_io) { 260 DBG_BUGON(!*rq->out); 261 dst = kmap_atomic(*rq->out); 262 dst_maptype = 0; 263 goto dstmap_out; 264 } 265 266 /* 267 * For the case of small output size (especially much less 268 * than PAGE_SIZE), memcpy the decompressed data rather than 269 * compressed data is preferred. 270 */ 271 if (rq->outputsize <= PAGE_SIZE * 7 / 8) { 272 dst = erofs_get_pcpubuf(0); 273 if (IS_ERR(dst)) 274 return PTR_ERR(dst); 275 276 rq->inplace_io = false; 277 ret = alg->decompress(rq, dst); 278 if (!ret) 279 copy_from_pcpubuf(rq->out, dst, rq->pageofs_out, 280 rq->outputsize); 281 282 erofs_put_pcpubuf(dst); 283 return ret; 284 } 285 286 ret = alg->prepare_destpages(rq, pagepool); 287 if (ret < 0) { 288 return ret; 289 } else if (ret) { 290 dst = page_address(*rq->out); 291 dst_maptype = 1; 292 goto dstmap_out; 293 } 294 295 dst = erofs_vmap(rq->out, nrpages_out); 296 if (!dst) 297 return -ENOMEM; 298 dst_maptype = 2; 299 300 dstmap_out: 301 ret = alg->decompress(rq, dst + rq->pageofs_out); 302 303 if (!dst_maptype) 304 kunmap_atomic(dst); 305 else if (dst_maptype == 2) 306 erofs_vunmap(dst, nrpages_out); 307 return ret; 308 } 309 310 static int shifted_decompress(const struct z_erofs_decompress_req *rq, 311 struct list_head *pagepool) 312 { 313 const unsigned int nrpages_out = 314 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; 315 const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out; 316 unsigned char *src, *dst; 317 318 if (nrpages_out > 2) { 319 DBG_BUGON(1); 320 return -EIO; 321 } 322 323 if (rq->out[0] == *rq->in) { 324 DBG_BUGON(nrpages_out != 1); 325 return 0; 326 } 327 328 src = kmap_atomic(*rq->in); 329 if (!rq->out[0]) { 330 dst = NULL; 331 } else { 332 dst = kmap_atomic(rq->out[0]); 333 memcpy(dst + rq->pageofs_out, src, righthalf); 334 } 335 336 if (rq->out[1] == *rq->in) { 337 memmove(src, src + righthalf, rq->pageofs_out); 338 } else if (nrpages_out == 2) { 339 if (dst) 340 kunmap_atomic(dst); 341 DBG_BUGON(!rq->out[1]); 342 dst = kmap_atomic(rq->out[1]); 343 memcpy(dst, src + righthalf, rq->pageofs_out); 344 } 345 if (dst) 346 kunmap_atomic(dst); 347 kunmap_atomic(src); 348 return 0; 349 } 350 351 int z_erofs_decompress(struct z_erofs_decompress_req *rq, 352 struct list_head *pagepool) 353 { 354 if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED) 355 return shifted_decompress(rq, pagepool); 356 return decompress_generic(rq, pagepool); 357 } 358 359