xref: /openbmc/linux/fs/erofs/decompressor.c (revision ab749bad)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2019 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  */
6 #include "compress.h"
7 #include <linux/module.h>
8 #include <linux/lz4.h>
9 
10 #ifndef LZ4_DISTANCE_MAX	/* history window size */
11 #define LZ4_DISTANCE_MAX 65535	/* set to maximum value by default */
12 #endif
13 
14 #define LZ4_MAX_DISTANCE_PAGES	(DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
15 #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
16 #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize)  (((srcsize) >> 8) + 32)
17 #endif
18 
19 struct z_erofs_lz4_decompress_ctx {
20 	struct z_erofs_decompress_req *rq;
21 	/* # of encoded, decoded pages */
22 	unsigned int inpages, outpages;
23 	/* decoded block total length (used for in-place decompression) */
24 	unsigned int oend;
25 };
26 
27 int z_erofs_load_lz4_config(struct super_block *sb,
28 			    struct erofs_super_block *dsb,
29 			    struct z_erofs_lz4_cfgs *lz4, int size)
30 {
31 	struct erofs_sb_info *sbi = EROFS_SB(sb);
32 	u16 distance;
33 
34 	if (lz4) {
35 		if (size < sizeof(struct z_erofs_lz4_cfgs)) {
36 			erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
37 			return -EINVAL;
38 		}
39 		distance = le16_to_cpu(lz4->max_distance);
40 
41 		sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
42 		if (!sbi->lz4.max_pclusterblks) {
43 			sbi->lz4.max_pclusterblks = 1;	/* reserved case */
44 		} else if (sbi->lz4.max_pclusterblks >
45 			   Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) {
46 			erofs_err(sb, "too large lz4 pclusterblks %u",
47 				  sbi->lz4.max_pclusterblks);
48 			return -EINVAL;
49 		} else if (sbi->lz4.max_pclusterblks >= 2) {
50 			erofs_info(sb, "EXPERIMENTAL big pcluster feature in use. Use at your own risk!");
51 		}
52 	} else {
53 		distance = le16_to_cpu(dsb->u1.lz4_max_distance);
54 		sbi->lz4.max_pclusterblks = 1;
55 	}
56 
57 	sbi->lz4.max_distance_pages = distance ?
58 					DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
59 					LZ4_MAX_DISTANCE_PAGES;
60 	return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
61 }
62 
63 /*
64  * Fill all gaps with bounce pages if it's a sparse page list. Also check if
65  * all physical pages are consecutive, which can be seen for moderate CR.
66  */
67 static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
68 					struct page **pagepool)
69 {
70 	struct z_erofs_decompress_req *rq = ctx->rq;
71 	struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
72 	unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
73 					   BITS_PER_LONG)] = { 0 };
74 	unsigned int lz4_max_distance_pages =
75 				EROFS_SB(rq->sb)->lz4.max_distance_pages;
76 	void *kaddr = NULL;
77 	unsigned int i, j, top;
78 
79 	top = 0;
80 	for (i = j = 0; i < ctx->outpages; ++i, ++j) {
81 		struct page *const page = rq->out[i];
82 		struct page *victim;
83 
84 		if (j >= lz4_max_distance_pages)
85 			j = 0;
86 
87 		/* 'valid' bounced can only be tested after a complete round */
88 		if (test_bit(j, bounced)) {
89 			DBG_BUGON(i < lz4_max_distance_pages);
90 			DBG_BUGON(top >= lz4_max_distance_pages);
91 			availables[top++] = rq->out[i - lz4_max_distance_pages];
92 		}
93 
94 		if (page) {
95 			__clear_bit(j, bounced);
96 			if (kaddr) {
97 				if (kaddr + PAGE_SIZE == page_address(page))
98 					kaddr += PAGE_SIZE;
99 				else
100 					kaddr = NULL;
101 			} else if (!i) {
102 				kaddr = page_address(page);
103 			}
104 			continue;
105 		}
106 		kaddr = NULL;
107 		__set_bit(j, bounced);
108 
109 		if (top) {
110 			victim = availables[--top];
111 			get_page(victim);
112 		} else {
113 			victim = erofs_allocpage(pagepool,
114 						 GFP_KERNEL | __GFP_NOFAIL);
115 			set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
116 		}
117 		rq->out[i] = victim;
118 	}
119 	return kaddr ? 1 : 0;
120 }
121 
122 static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
123 			void *inpage, unsigned int *inputmargin, int *maptype,
124 			bool may_inplace)
125 {
126 	struct z_erofs_decompress_req *rq = ctx->rq;
127 	unsigned int omargin, total, i, j;
128 	struct page **in;
129 	void *src, *tmp;
130 
131 	if (rq->inplace_io) {
132 		omargin = PAGE_ALIGN(ctx->oend) - ctx->oend;
133 		if (rq->partial_decoding || !may_inplace ||
134 		    omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
135 			goto docopy;
136 
137 		for (i = 0; i < ctx->inpages; ++i) {
138 			DBG_BUGON(rq->in[i] == NULL);
139 			for (j = 0; j < ctx->outpages - ctx->inpages + i; ++j)
140 				if (rq->out[j] == rq->in[i])
141 					goto docopy;
142 		}
143 	}
144 
145 	if (ctx->inpages <= 1) {
146 		*maptype = 0;
147 		return inpage;
148 	}
149 	kunmap_atomic(inpage);
150 	might_sleep();
151 	src = erofs_vm_map_ram(rq->in, ctx->inpages);
152 	if (!src)
153 		return ERR_PTR(-ENOMEM);
154 	*maptype = 1;
155 	return src;
156 
157 docopy:
158 	/* Or copy compressed data which can be overlapped to per-CPU buffer */
159 	in = rq->in;
160 	src = erofs_get_pcpubuf(ctx->inpages);
161 	if (!src) {
162 		DBG_BUGON(1);
163 		kunmap_atomic(inpage);
164 		return ERR_PTR(-EFAULT);
165 	}
166 
167 	tmp = src;
168 	total = rq->inputsize;
169 	while (total) {
170 		unsigned int page_copycnt =
171 			min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
172 
173 		if (!inpage)
174 			inpage = kmap_atomic(*in);
175 		memcpy(tmp, inpage + *inputmargin, page_copycnt);
176 		kunmap_atomic(inpage);
177 		inpage = NULL;
178 		tmp += page_copycnt;
179 		total -= page_copycnt;
180 		++in;
181 		*inputmargin = 0;
182 	}
183 	*maptype = 2;
184 	return src;
185 }
186 
187 /*
188  * Get the exact inputsize with zero_padding feature.
189  *  - For LZ4, it should work if zero_padding feature is on (5.3+);
190  *  - For MicroLZMA, it'd be enabled all the time.
191  */
192 int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
193 			 unsigned int padbufsize)
194 {
195 	const char *padend;
196 
197 	padend = memchr_inv(padbuf, 0, padbufsize);
198 	if (!padend)
199 		return -EFSCORRUPTED;
200 	rq->inputsize -= padend - padbuf;
201 	rq->pageofs_in += padend - padbuf;
202 	return 0;
203 }
204 
205 static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
206 				      u8 *out)
207 {
208 	struct z_erofs_decompress_req *rq = ctx->rq;
209 	bool support_0padding = false, may_inplace = false;
210 	unsigned int inputmargin;
211 	u8 *headpage, *src;
212 	int ret, maptype;
213 
214 	DBG_BUGON(*rq->in == NULL);
215 	headpage = kmap_atomic(*rq->in);
216 
217 	/* LZ4 decompression inplace is only safe if zero_padding is enabled */
218 	if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
219 		support_0padding = true;
220 		ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
221 				min_t(unsigned int, rq->inputsize,
222 				      EROFS_BLKSIZ - rq->pageofs_in));
223 		if (ret) {
224 			kunmap_atomic(headpage);
225 			return ret;
226 		}
227 		may_inplace = !((rq->pageofs_in + rq->inputsize) &
228 				(EROFS_BLKSIZ - 1));
229 	}
230 
231 	inputmargin = rq->pageofs_in;
232 	src = z_erofs_lz4_handle_overlap(ctx, headpage, &inputmargin,
233 					 &maptype, may_inplace);
234 	if (IS_ERR(src))
235 		return PTR_ERR(src);
236 
237 	/* legacy format could compress extra data in a pcluster. */
238 	if (rq->partial_decoding || !support_0padding)
239 		ret = LZ4_decompress_safe_partial(src + inputmargin, out,
240 				rq->inputsize, rq->outputsize, rq->outputsize);
241 	else
242 		ret = LZ4_decompress_safe(src + inputmargin, out,
243 					  rq->inputsize, rq->outputsize);
244 
245 	if (ret != rq->outputsize) {
246 		erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
247 			  ret, rq->inputsize, inputmargin, rq->outputsize);
248 
249 		print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
250 			       16, 1, src + inputmargin, rq->inputsize, true);
251 		print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
252 			       16, 1, out, rq->outputsize, true);
253 
254 		if (ret >= 0)
255 			memset(out + ret, 0, rq->outputsize - ret);
256 		ret = -EIO;
257 	} else {
258 		ret = 0;
259 	}
260 
261 	if (maptype == 0) {
262 		kunmap_atomic(headpage);
263 	} else if (maptype == 1) {
264 		vm_unmap_ram(src, ctx->inpages);
265 	} else if (maptype == 2) {
266 		erofs_put_pcpubuf(src);
267 	} else {
268 		DBG_BUGON(1);
269 		return -EFAULT;
270 	}
271 	return ret;
272 }
273 
274 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
275 				  struct page **pagepool)
276 {
277 	struct z_erofs_lz4_decompress_ctx ctx;
278 	unsigned int dst_maptype;
279 	void *dst;
280 	int ret;
281 
282 	ctx.rq = rq;
283 	ctx.oend = rq->pageofs_out + rq->outputsize;
284 	ctx.outpages = PAGE_ALIGN(ctx.oend) >> PAGE_SHIFT;
285 	ctx.inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
286 
287 	/* one optimized fast path only for non bigpcluster cases yet */
288 	if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) {
289 		DBG_BUGON(!*rq->out);
290 		dst = kmap_atomic(*rq->out);
291 		dst_maptype = 0;
292 		goto dstmap_out;
293 	}
294 
295 	/* general decoding path which can be used for all cases */
296 	ret = z_erofs_lz4_prepare_dstpages(&ctx, pagepool);
297 	if (ret < 0) {
298 		return ret;
299 	} else if (ret > 0) {
300 		dst = page_address(*rq->out);
301 		dst_maptype = 1;
302 	} else {
303 		dst = erofs_vm_map_ram(rq->out, ctx.outpages);
304 		if (!dst)
305 			return -ENOMEM;
306 		dst_maptype = 2;
307 	}
308 
309 dstmap_out:
310 	ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out);
311 	if (!dst_maptype)
312 		kunmap_atomic(dst);
313 	else if (dst_maptype == 2)
314 		vm_unmap_ram(dst, ctx.outpages);
315 	return ret;
316 }
317 
318 static int z_erofs_shifted_transform(struct z_erofs_decompress_req *rq,
319 				     struct page **pagepool)
320 {
321 	const unsigned int nrpages_out =
322 		PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
323 	const unsigned int righthalf = min_t(unsigned int, rq->outputsize,
324 					     PAGE_SIZE - rq->pageofs_out);
325 	unsigned char *src, *dst;
326 
327 	if (nrpages_out > 2) {
328 		DBG_BUGON(1);
329 		return -EIO;
330 	}
331 
332 	if (rq->out[0] == *rq->in) {
333 		DBG_BUGON(nrpages_out != 1);
334 		return 0;
335 	}
336 
337 	src = kmap_atomic(*rq->in) + rq->pageofs_in;
338 	if (rq->out[0]) {
339 		dst = kmap_atomic(rq->out[0]);
340 		memcpy(dst + rq->pageofs_out, src, righthalf);
341 		kunmap_atomic(dst);
342 	}
343 
344 	if (nrpages_out == 2) {
345 		DBG_BUGON(!rq->out[1]);
346 		if (rq->out[1] == *rq->in) {
347 			memmove(src, src + righthalf, rq->pageofs_out);
348 		} else {
349 			dst = kmap_atomic(rq->out[1]);
350 			memcpy(dst, src + righthalf, rq->pageofs_out);
351 			kunmap_atomic(dst);
352 		}
353 	}
354 	kunmap_atomic(src);
355 	return 0;
356 }
357 
358 static struct z_erofs_decompressor decompressors[] = {
359 	[Z_EROFS_COMPRESSION_SHIFTED] = {
360 		.decompress = z_erofs_shifted_transform,
361 		.name = "shifted"
362 	},
363 	[Z_EROFS_COMPRESSION_LZ4] = {
364 		.decompress = z_erofs_lz4_decompress,
365 		.name = "lz4"
366 	},
367 #ifdef CONFIG_EROFS_FS_ZIP_LZMA
368 	[Z_EROFS_COMPRESSION_LZMA] = {
369 		.decompress = z_erofs_lzma_decompress,
370 		.name = "lzma"
371 	},
372 #endif
373 };
374 
375 int z_erofs_decompress(struct z_erofs_decompress_req *rq,
376 		       struct page **pagepool)
377 {
378 	return decompressors[rq->alg].decompress(rq, pagepool);
379 }
380