xref: /openbmc/linux/fs/erofs/decompressor.c (revision 89aba575)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2019 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  */
6 #include "compress.h"
7 #include <linux/module.h>
8 #include <linux/lz4.h>
9 
10 #ifndef LZ4_DISTANCE_MAX	/* history window size */
11 #define LZ4_DISTANCE_MAX 65535	/* set to maximum value by default */
12 #endif
13 
14 #define LZ4_MAX_DISTANCE_PAGES	(DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
15 #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
16 #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize)  (((srcsize) >> 8) + 32)
17 #endif
18 
19 struct z_erofs_lz4_decompress_ctx {
20 	struct z_erofs_decompress_req *rq;
21 	/* # of encoded, decoded pages */
22 	unsigned int inpages, outpages;
23 	/* decoded block total length (used for in-place decompression) */
24 	unsigned int oend;
25 };
26 
27 int z_erofs_load_lz4_config(struct super_block *sb,
28 			    struct erofs_super_block *dsb,
29 			    struct z_erofs_lz4_cfgs *lz4, int size)
30 {
31 	struct erofs_sb_info *sbi = EROFS_SB(sb);
32 	u16 distance;
33 
34 	if (lz4) {
35 		if (size < sizeof(struct z_erofs_lz4_cfgs)) {
36 			erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
37 			return -EINVAL;
38 		}
39 		distance = le16_to_cpu(lz4->max_distance);
40 
41 		sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
42 		if (!sbi->lz4.max_pclusterblks) {
43 			sbi->lz4.max_pclusterblks = 1;	/* reserved case */
44 		} else if (sbi->lz4.max_pclusterblks >
45 			   Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) {
46 			erofs_err(sb, "too large lz4 pclusterblks %u",
47 				  sbi->lz4.max_pclusterblks);
48 			return -EINVAL;
49 		}
50 	} else {
51 		distance = le16_to_cpu(dsb->u1.lz4_max_distance);
52 		sbi->lz4.max_pclusterblks = 1;
53 	}
54 
55 	sbi->lz4.max_distance_pages = distance ?
56 					DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
57 					LZ4_MAX_DISTANCE_PAGES;
58 	return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
59 }
60 
61 /*
62  * Fill all gaps with bounce pages if it's a sparse page list. Also check if
63  * all physical pages are consecutive, which can be seen for moderate CR.
64  */
65 static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
66 					struct page **pagepool)
67 {
68 	struct z_erofs_decompress_req *rq = ctx->rq;
69 	struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
70 	unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
71 					   BITS_PER_LONG)] = { 0 };
72 	unsigned int lz4_max_distance_pages =
73 				EROFS_SB(rq->sb)->lz4.max_distance_pages;
74 	void *kaddr = NULL;
75 	unsigned int i, j, top;
76 
77 	top = 0;
78 	for (i = j = 0; i < ctx->outpages; ++i, ++j) {
79 		struct page *const page = rq->out[i];
80 		struct page *victim;
81 
82 		if (j >= lz4_max_distance_pages)
83 			j = 0;
84 
85 		/* 'valid' bounced can only be tested after a complete round */
86 		if (!rq->fillgaps && test_bit(j, bounced)) {
87 			DBG_BUGON(i < lz4_max_distance_pages);
88 			DBG_BUGON(top >= lz4_max_distance_pages);
89 			availables[top++] = rq->out[i - lz4_max_distance_pages];
90 		}
91 
92 		if (page) {
93 			__clear_bit(j, bounced);
94 			if (!PageHighMem(page)) {
95 				if (!i) {
96 					kaddr = page_address(page);
97 					continue;
98 				}
99 				if (kaddr &&
100 				    kaddr + PAGE_SIZE == page_address(page)) {
101 					kaddr += PAGE_SIZE;
102 					continue;
103 				}
104 			}
105 			kaddr = NULL;
106 			continue;
107 		}
108 		kaddr = NULL;
109 		__set_bit(j, bounced);
110 
111 		if (top) {
112 			victim = availables[--top];
113 			get_page(victim);
114 		} else {
115 			victim = erofs_allocpage(pagepool,
116 						 GFP_KERNEL | __GFP_NOFAIL);
117 			set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
118 		}
119 		rq->out[i] = victim;
120 	}
121 	return kaddr ? 1 : 0;
122 }
123 
124 static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
125 			void *inpage, unsigned int *inputmargin, int *maptype,
126 			bool may_inplace)
127 {
128 	struct z_erofs_decompress_req *rq = ctx->rq;
129 	unsigned int omargin, total, i, j;
130 	struct page **in;
131 	void *src, *tmp;
132 
133 	if (rq->inplace_io) {
134 		omargin = PAGE_ALIGN(ctx->oend) - ctx->oend;
135 		if (rq->partial_decoding || !may_inplace ||
136 		    omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
137 			goto docopy;
138 
139 		for (i = 0; i < ctx->inpages; ++i) {
140 			DBG_BUGON(rq->in[i] == NULL);
141 			for (j = 0; j < ctx->outpages - ctx->inpages + i; ++j)
142 				if (rq->out[j] == rq->in[i])
143 					goto docopy;
144 		}
145 	}
146 
147 	if (ctx->inpages <= 1) {
148 		*maptype = 0;
149 		return inpage;
150 	}
151 	kunmap_atomic(inpage);
152 	might_sleep();
153 	src = erofs_vm_map_ram(rq->in, ctx->inpages);
154 	if (!src)
155 		return ERR_PTR(-ENOMEM);
156 	*maptype = 1;
157 	return src;
158 
159 docopy:
160 	/* Or copy compressed data which can be overlapped to per-CPU buffer */
161 	in = rq->in;
162 	src = erofs_get_pcpubuf(ctx->inpages);
163 	if (!src) {
164 		DBG_BUGON(1);
165 		kunmap_atomic(inpage);
166 		return ERR_PTR(-EFAULT);
167 	}
168 
169 	tmp = src;
170 	total = rq->inputsize;
171 	while (total) {
172 		unsigned int page_copycnt =
173 			min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
174 
175 		if (!inpage)
176 			inpage = kmap_atomic(*in);
177 		memcpy(tmp, inpage + *inputmargin, page_copycnt);
178 		kunmap_atomic(inpage);
179 		inpage = NULL;
180 		tmp += page_copycnt;
181 		total -= page_copycnt;
182 		++in;
183 		*inputmargin = 0;
184 	}
185 	*maptype = 2;
186 	return src;
187 }
188 
189 /*
190  * Get the exact inputsize with zero_padding feature.
191  *  - For LZ4, it should work if zero_padding feature is on (5.3+);
192  *  - For MicroLZMA, it'd be enabled all the time.
193  */
194 int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
195 			 unsigned int padbufsize)
196 {
197 	const char *padend;
198 
199 	padend = memchr_inv(padbuf, 0, padbufsize);
200 	if (!padend)
201 		return -EFSCORRUPTED;
202 	rq->inputsize -= padend - padbuf;
203 	rq->pageofs_in += padend - padbuf;
204 	return 0;
205 }
206 
207 static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
208 				      u8 *out)
209 {
210 	struct z_erofs_decompress_req *rq = ctx->rq;
211 	bool support_0padding = false, may_inplace = false;
212 	unsigned int inputmargin;
213 	u8 *headpage, *src;
214 	int ret, maptype;
215 
216 	DBG_BUGON(*rq->in == NULL);
217 	headpage = kmap_atomic(*rq->in);
218 
219 	/* LZ4 decompression inplace is only safe if zero_padding is enabled */
220 	if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
221 		support_0padding = true;
222 		ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
223 				min_t(unsigned int, rq->inputsize,
224 				      EROFS_BLKSIZ - rq->pageofs_in));
225 		if (ret) {
226 			kunmap_atomic(headpage);
227 			return ret;
228 		}
229 		may_inplace = !((rq->pageofs_in + rq->inputsize) &
230 				(EROFS_BLKSIZ - 1));
231 	}
232 
233 	inputmargin = rq->pageofs_in;
234 	src = z_erofs_lz4_handle_overlap(ctx, headpage, &inputmargin,
235 					 &maptype, may_inplace);
236 	if (IS_ERR(src))
237 		return PTR_ERR(src);
238 
239 	/* legacy format could compress extra data in a pcluster. */
240 	if (rq->partial_decoding || !support_0padding)
241 		ret = LZ4_decompress_safe_partial(src + inputmargin, out,
242 				rq->inputsize, rq->outputsize, rq->outputsize);
243 	else
244 		ret = LZ4_decompress_safe(src + inputmargin, out,
245 					  rq->inputsize, rq->outputsize);
246 
247 	if (ret != rq->outputsize) {
248 		erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
249 			  ret, rq->inputsize, inputmargin, rq->outputsize);
250 
251 		print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
252 			       16, 1, src + inputmargin, rq->inputsize, true);
253 		print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
254 			       16, 1, out, rq->outputsize, true);
255 
256 		if (ret >= 0)
257 			memset(out + ret, 0, rq->outputsize - ret);
258 		ret = -EIO;
259 	} else {
260 		ret = 0;
261 	}
262 
263 	if (maptype == 0) {
264 		kunmap_atomic(headpage);
265 	} else if (maptype == 1) {
266 		vm_unmap_ram(src, ctx->inpages);
267 	} else if (maptype == 2) {
268 		erofs_put_pcpubuf(src);
269 	} else {
270 		DBG_BUGON(1);
271 		return -EFAULT;
272 	}
273 	return ret;
274 }
275 
276 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
277 				  struct page **pagepool)
278 {
279 	struct z_erofs_lz4_decompress_ctx ctx;
280 	unsigned int dst_maptype;
281 	void *dst;
282 	int ret;
283 
284 	ctx.rq = rq;
285 	ctx.oend = rq->pageofs_out + rq->outputsize;
286 	ctx.outpages = PAGE_ALIGN(ctx.oend) >> PAGE_SHIFT;
287 	ctx.inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
288 
289 	/* one optimized fast path only for non bigpcluster cases yet */
290 	if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) {
291 		DBG_BUGON(!*rq->out);
292 		dst = kmap_atomic(*rq->out);
293 		dst_maptype = 0;
294 		goto dstmap_out;
295 	}
296 
297 	/* general decoding path which can be used for all cases */
298 	ret = z_erofs_lz4_prepare_dstpages(&ctx, pagepool);
299 	if (ret < 0) {
300 		return ret;
301 	} else if (ret > 0) {
302 		dst = page_address(*rq->out);
303 		dst_maptype = 1;
304 	} else {
305 		dst = erofs_vm_map_ram(rq->out, ctx.outpages);
306 		if (!dst)
307 			return -ENOMEM;
308 		dst_maptype = 2;
309 	}
310 
311 dstmap_out:
312 	ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out);
313 	if (!dst_maptype)
314 		kunmap_atomic(dst);
315 	else if (dst_maptype == 2)
316 		vm_unmap_ram(dst, ctx.outpages);
317 	return ret;
318 }
319 
320 static int z_erofs_shifted_transform(struct z_erofs_decompress_req *rq,
321 				     struct page **pagepool)
322 {
323 	const unsigned int nrpages_out =
324 		PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
325 	const unsigned int righthalf = min_t(unsigned int, rq->outputsize,
326 					     PAGE_SIZE - rq->pageofs_out);
327 	const unsigned int lefthalf = rq->outputsize - righthalf;
328 	unsigned char *src, *dst;
329 
330 	if (nrpages_out > 2) {
331 		DBG_BUGON(1);
332 		return -EIO;
333 	}
334 
335 	if (rq->out[0] == *rq->in) {
336 		DBG_BUGON(nrpages_out != 1);
337 		return 0;
338 	}
339 
340 	src = kmap_atomic(*rq->in) + rq->pageofs_in;
341 	if (rq->out[0]) {
342 		dst = kmap_atomic(rq->out[0]);
343 		memcpy(dst + rq->pageofs_out, src, righthalf);
344 		kunmap_atomic(dst);
345 	}
346 
347 	if (nrpages_out == 2) {
348 		DBG_BUGON(!rq->out[1]);
349 		if (rq->out[1] == *rq->in) {
350 			memmove(src, src + righthalf, lefthalf);
351 		} else {
352 			dst = kmap_atomic(rq->out[1]);
353 			memcpy(dst, src + righthalf, lefthalf);
354 			kunmap_atomic(dst);
355 		}
356 	}
357 	kunmap_atomic(src);
358 	return 0;
359 }
360 
361 static struct z_erofs_decompressor decompressors[] = {
362 	[Z_EROFS_COMPRESSION_SHIFTED] = {
363 		.decompress = z_erofs_shifted_transform,
364 		.name = "shifted"
365 	},
366 	[Z_EROFS_COMPRESSION_LZ4] = {
367 		.decompress = z_erofs_lz4_decompress,
368 		.name = "lz4"
369 	},
370 #ifdef CONFIG_EROFS_FS_ZIP_LZMA
371 	[Z_EROFS_COMPRESSION_LZMA] = {
372 		.decompress = z_erofs_lzma_decompress,
373 		.name = "lzma"
374 	},
375 #endif
376 };
377 
378 int z_erofs_decompress(struct z_erofs_decompress_req *rq,
379 		       struct page **pagepool)
380 {
381 	return decompressors[rq->alg].decompress(rq, pagepool);
382 }
383