xref: /openbmc/linux/fs/erofs/decompressor.c (revision 5085e036)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2019 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  */
6 #include "compress.h"
7 #include <linux/module.h>
8 #include <linux/lz4.h>
9 
10 #ifndef LZ4_DISTANCE_MAX	/* history window size */
11 #define LZ4_DISTANCE_MAX 65535	/* set to maximum value by default */
12 #endif
13 
14 #define LZ4_MAX_DISTANCE_PAGES	(DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
15 #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
16 #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize)  (((srcsize) >> 8) + 32)
17 #endif
18 
19 struct z_erofs_lz4_decompress_ctx {
20 	struct z_erofs_decompress_req *rq;
21 	/* # of encoded, decoded pages */
22 	unsigned int inpages, outpages;
23 	/* decoded block total length (used for in-place decompression) */
24 	unsigned int oend;
25 };
26 
27 int z_erofs_load_lz4_config(struct super_block *sb,
28 			    struct erofs_super_block *dsb,
29 			    struct z_erofs_lz4_cfgs *lz4, int size)
30 {
31 	struct erofs_sb_info *sbi = EROFS_SB(sb);
32 	u16 distance;
33 
34 	if (lz4) {
35 		if (size < sizeof(struct z_erofs_lz4_cfgs)) {
36 			erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
37 			return -EINVAL;
38 		}
39 		distance = le16_to_cpu(lz4->max_distance);
40 
41 		sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
42 		if (!sbi->lz4.max_pclusterblks) {
43 			sbi->lz4.max_pclusterblks = 1;	/* reserved case */
44 		} else if (sbi->lz4.max_pclusterblks >
45 			   Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) {
46 			erofs_err(sb, "too large lz4 pclusterblks %u",
47 				  sbi->lz4.max_pclusterblks);
48 			return -EINVAL;
49 		}
50 	} else {
51 		distance = le16_to_cpu(dsb->u1.lz4_max_distance);
52 		sbi->lz4.max_pclusterblks = 1;
53 	}
54 
55 	sbi->lz4.max_distance_pages = distance ?
56 					DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
57 					LZ4_MAX_DISTANCE_PAGES;
58 	return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
59 }
60 
61 /*
62  * Fill all gaps with bounce pages if it's a sparse page list. Also check if
63  * all physical pages are consecutive, which can be seen for moderate CR.
64  */
65 static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
66 					struct page **pagepool)
67 {
68 	struct z_erofs_decompress_req *rq = ctx->rq;
69 	struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
70 	unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
71 					   BITS_PER_LONG)] = { 0 };
72 	unsigned int lz4_max_distance_pages =
73 				EROFS_SB(rq->sb)->lz4.max_distance_pages;
74 	void *kaddr = NULL;
75 	unsigned int i, j, top;
76 
77 	top = 0;
78 	for (i = j = 0; i < ctx->outpages; ++i, ++j) {
79 		struct page *const page = rq->out[i];
80 		struct page *victim;
81 
82 		if (j >= lz4_max_distance_pages)
83 			j = 0;
84 
85 		/* 'valid' bounced can only be tested after a complete round */
86 		if (test_bit(j, bounced)) {
87 			DBG_BUGON(i < lz4_max_distance_pages);
88 			DBG_BUGON(top >= lz4_max_distance_pages);
89 			availables[top++] = rq->out[i - lz4_max_distance_pages];
90 		}
91 
92 		if (page) {
93 			__clear_bit(j, bounced);
94 			if (kaddr) {
95 				if (kaddr + PAGE_SIZE == page_address(page))
96 					kaddr += PAGE_SIZE;
97 				else
98 					kaddr = NULL;
99 			} else if (!i) {
100 				kaddr = page_address(page);
101 			}
102 			continue;
103 		}
104 		kaddr = NULL;
105 		__set_bit(j, bounced);
106 
107 		if (top) {
108 			victim = availables[--top];
109 			get_page(victim);
110 		} else {
111 			victim = erofs_allocpage(pagepool,
112 						 GFP_KERNEL | __GFP_NOFAIL);
113 			set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
114 		}
115 		rq->out[i] = victim;
116 	}
117 	return kaddr ? 1 : 0;
118 }
119 
120 static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
121 			void *inpage, unsigned int *inputmargin, int *maptype,
122 			bool may_inplace)
123 {
124 	struct z_erofs_decompress_req *rq = ctx->rq;
125 	unsigned int omargin, total, i, j;
126 	struct page **in;
127 	void *src, *tmp;
128 
129 	if (rq->inplace_io) {
130 		omargin = PAGE_ALIGN(ctx->oend) - ctx->oend;
131 		if (rq->partial_decoding || !may_inplace ||
132 		    omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
133 			goto docopy;
134 
135 		for (i = 0; i < ctx->inpages; ++i) {
136 			DBG_BUGON(rq->in[i] == NULL);
137 			for (j = 0; j < ctx->outpages - ctx->inpages + i; ++j)
138 				if (rq->out[j] == rq->in[i])
139 					goto docopy;
140 		}
141 	}
142 
143 	if (ctx->inpages <= 1) {
144 		*maptype = 0;
145 		return inpage;
146 	}
147 	kunmap_atomic(inpage);
148 	might_sleep();
149 	src = erofs_vm_map_ram(rq->in, ctx->inpages);
150 	if (!src)
151 		return ERR_PTR(-ENOMEM);
152 	*maptype = 1;
153 	return src;
154 
155 docopy:
156 	/* Or copy compressed data which can be overlapped to per-CPU buffer */
157 	in = rq->in;
158 	src = erofs_get_pcpubuf(ctx->inpages);
159 	if (!src) {
160 		DBG_BUGON(1);
161 		kunmap_atomic(inpage);
162 		return ERR_PTR(-EFAULT);
163 	}
164 
165 	tmp = src;
166 	total = rq->inputsize;
167 	while (total) {
168 		unsigned int page_copycnt =
169 			min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
170 
171 		if (!inpage)
172 			inpage = kmap_atomic(*in);
173 		memcpy(tmp, inpage + *inputmargin, page_copycnt);
174 		kunmap_atomic(inpage);
175 		inpage = NULL;
176 		tmp += page_copycnt;
177 		total -= page_copycnt;
178 		++in;
179 		*inputmargin = 0;
180 	}
181 	*maptype = 2;
182 	return src;
183 }
184 
185 /*
186  * Get the exact inputsize with zero_padding feature.
187  *  - For LZ4, it should work if zero_padding feature is on (5.3+);
188  *  - For MicroLZMA, it'd be enabled all the time.
189  */
190 int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
191 			 unsigned int padbufsize)
192 {
193 	const char *padend;
194 
195 	padend = memchr_inv(padbuf, 0, padbufsize);
196 	if (!padend)
197 		return -EFSCORRUPTED;
198 	rq->inputsize -= padend - padbuf;
199 	rq->pageofs_in += padend - padbuf;
200 	return 0;
201 }
202 
203 static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
204 				      u8 *out)
205 {
206 	struct z_erofs_decompress_req *rq = ctx->rq;
207 	bool support_0padding = false, may_inplace = false;
208 	unsigned int inputmargin;
209 	u8 *headpage, *src;
210 	int ret, maptype;
211 
212 	DBG_BUGON(*rq->in == NULL);
213 	headpage = kmap_atomic(*rq->in);
214 
215 	/* LZ4 decompression inplace is only safe if zero_padding is enabled */
216 	if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
217 		support_0padding = true;
218 		ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
219 				min_t(unsigned int, rq->inputsize,
220 				      EROFS_BLKSIZ - rq->pageofs_in));
221 		if (ret) {
222 			kunmap_atomic(headpage);
223 			return ret;
224 		}
225 		may_inplace = !((rq->pageofs_in + rq->inputsize) &
226 				(EROFS_BLKSIZ - 1));
227 	}
228 
229 	inputmargin = rq->pageofs_in;
230 	src = z_erofs_lz4_handle_overlap(ctx, headpage, &inputmargin,
231 					 &maptype, may_inplace);
232 	if (IS_ERR(src))
233 		return PTR_ERR(src);
234 
235 	/* legacy format could compress extra data in a pcluster. */
236 	if (rq->partial_decoding || !support_0padding)
237 		ret = LZ4_decompress_safe_partial(src + inputmargin, out,
238 				rq->inputsize, rq->outputsize, rq->outputsize);
239 	else
240 		ret = LZ4_decompress_safe(src + inputmargin, out,
241 					  rq->inputsize, rq->outputsize);
242 
243 	if (ret != rq->outputsize) {
244 		erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
245 			  ret, rq->inputsize, inputmargin, rq->outputsize);
246 
247 		print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
248 			       16, 1, src + inputmargin, rq->inputsize, true);
249 		print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
250 			       16, 1, out, rq->outputsize, true);
251 
252 		if (ret >= 0)
253 			memset(out + ret, 0, rq->outputsize - ret);
254 		ret = -EIO;
255 	} else {
256 		ret = 0;
257 	}
258 
259 	if (maptype == 0) {
260 		kunmap_atomic(headpage);
261 	} else if (maptype == 1) {
262 		vm_unmap_ram(src, ctx->inpages);
263 	} else if (maptype == 2) {
264 		erofs_put_pcpubuf(src);
265 	} else {
266 		DBG_BUGON(1);
267 		return -EFAULT;
268 	}
269 	return ret;
270 }
271 
272 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
273 				  struct page **pagepool)
274 {
275 	struct z_erofs_lz4_decompress_ctx ctx;
276 	unsigned int dst_maptype;
277 	void *dst;
278 	int ret;
279 
280 	ctx.rq = rq;
281 	ctx.oend = rq->pageofs_out + rq->outputsize;
282 	ctx.outpages = PAGE_ALIGN(ctx.oend) >> PAGE_SHIFT;
283 	ctx.inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
284 
285 	/* one optimized fast path only for non bigpcluster cases yet */
286 	if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) {
287 		DBG_BUGON(!*rq->out);
288 		dst = kmap_atomic(*rq->out);
289 		dst_maptype = 0;
290 		goto dstmap_out;
291 	}
292 
293 	/* general decoding path which can be used for all cases */
294 	ret = z_erofs_lz4_prepare_dstpages(&ctx, pagepool);
295 	if (ret < 0) {
296 		return ret;
297 	} else if (ret > 0) {
298 		dst = page_address(*rq->out);
299 		dst_maptype = 1;
300 	} else {
301 		dst = erofs_vm_map_ram(rq->out, ctx.outpages);
302 		if (!dst)
303 			return -ENOMEM;
304 		dst_maptype = 2;
305 	}
306 
307 dstmap_out:
308 	ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out);
309 	if (!dst_maptype)
310 		kunmap_atomic(dst);
311 	else if (dst_maptype == 2)
312 		vm_unmap_ram(dst, ctx.outpages);
313 	return ret;
314 }
315 
316 static int z_erofs_shifted_transform(struct z_erofs_decompress_req *rq,
317 				     struct page **pagepool)
318 {
319 	const unsigned int nrpages_out =
320 		PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
321 	const unsigned int righthalf = min_t(unsigned int, rq->outputsize,
322 					     PAGE_SIZE - rq->pageofs_out);
323 	const unsigned int lefthalf = rq->outputsize - righthalf;
324 	unsigned char *src, *dst;
325 
326 	if (nrpages_out > 2) {
327 		DBG_BUGON(1);
328 		return -EIO;
329 	}
330 
331 	if (rq->out[0] == *rq->in) {
332 		DBG_BUGON(nrpages_out != 1);
333 		return 0;
334 	}
335 
336 	src = kmap_atomic(*rq->in) + rq->pageofs_in;
337 	if (rq->out[0]) {
338 		dst = kmap_atomic(rq->out[0]);
339 		memcpy(dst + rq->pageofs_out, src, righthalf);
340 		kunmap_atomic(dst);
341 	}
342 
343 	if (nrpages_out == 2) {
344 		DBG_BUGON(!rq->out[1]);
345 		if (rq->out[1] == *rq->in) {
346 			memmove(src, src + righthalf, lefthalf);
347 		} else {
348 			dst = kmap_atomic(rq->out[1]);
349 			memcpy(dst, src + righthalf, lefthalf);
350 			kunmap_atomic(dst);
351 		}
352 	}
353 	kunmap_atomic(src);
354 	return 0;
355 }
356 
357 static struct z_erofs_decompressor decompressors[] = {
358 	[Z_EROFS_COMPRESSION_SHIFTED] = {
359 		.decompress = z_erofs_shifted_transform,
360 		.name = "shifted"
361 	},
362 	[Z_EROFS_COMPRESSION_LZ4] = {
363 		.decompress = z_erofs_lz4_decompress,
364 		.name = "lz4"
365 	},
366 #ifdef CONFIG_EROFS_FS_ZIP_LZMA
367 	[Z_EROFS_COMPRESSION_LZMA] = {
368 		.decompress = z_erofs_lzma_decompress,
369 		.name = "lzma"
370 	},
371 #endif
372 };
373 
374 int z_erofs_decompress(struct z_erofs_decompress_req *rq,
375 		       struct page **pagepool)
376 {
377 	return decompressors[rq->alg].decompress(rq, pagepool);
378 }
379