1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <linux/module.h>
3 #include <linux/zlib.h>
4 #include "compress.h"
5 
6 struct z_erofs_deflate {
7 	struct z_erofs_deflate *next;
8 	struct z_stream_s z;
9 	u8 bounce[PAGE_SIZE];
10 };
11 
12 static DEFINE_SPINLOCK(z_erofs_deflate_lock);
13 static unsigned int z_erofs_deflate_nstrms, z_erofs_deflate_avail_strms;
14 static struct z_erofs_deflate *z_erofs_deflate_head;
15 static DECLARE_WAIT_QUEUE_HEAD(z_erofs_deflate_wq);
16 
17 module_param_named(deflate_streams, z_erofs_deflate_nstrms, uint, 0444);
18 
19 void z_erofs_deflate_exit(void)
20 {
21 	/* there should be no running fs instance */
22 	while (z_erofs_deflate_avail_strms) {
23 		struct z_erofs_deflate *strm;
24 
25 		spin_lock(&z_erofs_deflate_lock);
26 		strm = z_erofs_deflate_head;
27 		if (!strm) {
28 			spin_unlock(&z_erofs_deflate_lock);
29 			continue;
30 		}
31 		z_erofs_deflate_head = NULL;
32 		spin_unlock(&z_erofs_deflate_lock);
33 
34 		while (strm) {
35 			struct z_erofs_deflate *n = strm->next;
36 
37 			vfree(strm->z.workspace);
38 			kfree(strm);
39 			--z_erofs_deflate_avail_strms;
40 			strm = n;
41 		}
42 	}
43 }
44 
45 int __init z_erofs_deflate_init(void)
46 {
47 	/* by default, use # of possible CPUs instead */
48 	if (!z_erofs_deflate_nstrms)
49 		z_erofs_deflate_nstrms = num_possible_cpus();
50 
51 	for (; z_erofs_deflate_avail_strms < z_erofs_deflate_nstrms;
52 	     ++z_erofs_deflate_avail_strms) {
53 		struct z_erofs_deflate *strm;
54 
55 		strm = kzalloc(sizeof(*strm), GFP_KERNEL);
56 		if (!strm)
57 			goto out_failed;
58 
59 		/* XXX: in-kernel zlib cannot shrink windowbits currently */
60 		strm->z.workspace = vmalloc(zlib_inflate_workspacesize());
61 		if (!strm->z.workspace) {
62 			kfree(strm);
63 			goto out_failed;
64 		}
65 
66 		spin_lock(&z_erofs_deflate_lock);
67 		strm->next = z_erofs_deflate_head;
68 		z_erofs_deflate_head = strm;
69 		spin_unlock(&z_erofs_deflate_lock);
70 	}
71 	return 0;
72 
73 out_failed:
74 	pr_err("failed to allocate zlib workspace\n");
75 	z_erofs_deflate_exit();
76 	return -ENOMEM;
77 }
78 
79 int z_erofs_load_deflate_config(struct super_block *sb,
80 			struct erofs_super_block *dsb, void *data, int size)
81 {
82 	struct z_erofs_deflate_cfgs *dfl = data;
83 
84 	if (!dfl || size < sizeof(struct z_erofs_deflate_cfgs)) {
85 		erofs_err(sb, "invalid deflate cfgs, size=%u", size);
86 		return -EINVAL;
87 	}
88 
89 	if (dfl->windowbits > MAX_WBITS) {
90 		erofs_err(sb, "unsupported windowbits %u", dfl->windowbits);
91 		return -EOPNOTSUPP;
92 	}
93 
94 	erofs_info(sb, "EXPERIMENTAL DEFLATE feature in use. Use at your own risk!");
95 	return 0;
96 }
97 
98 int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
99 			       struct page **pagepool)
100 {
101 	const unsigned int nrpages_out =
102 		PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
103 	const unsigned int nrpages_in =
104 		PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
105 	struct super_block *sb = rq->sb;
106 	unsigned int insz, outsz, pofs;
107 	struct z_erofs_deflate *strm;
108 	u8 *kin, *kout = NULL;
109 	bool bounced = false;
110 	int no = -1, ni = 0, j = 0, zerr, err;
111 
112 	/* 1. get the exact DEFLATE compressed size */
113 	kin = kmap_local_page(*rq->in);
114 	err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in,
115 			min_t(unsigned int, rq->inputsize,
116 			      sb->s_blocksize - rq->pageofs_in));
117 	if (err) {
118 		kunmap_local(kin);
119 		return err;
120 	}
121 
122 	/* 2. get an available DEFLATE context */
123 again:
124 	spin_lock(&z_erofs_deflate_lock);
125 	strm = z_erofs_deflate_head;
126 	if (!strm) {
127 		spin_unlock(&z_erofs_deflate_lock);
128 		wait_event(z_erofs_deflate_wq, READ_ONCE(z_erofs_deflate_head));
129 		goto again;
130 	}
131 	z_erofs_deflate_head = strm->next;
132 	spin_unlock(&z_erofs_deflate_lock);
133 
134 	/* 3. multi-call decompress */
135 	insz = rq->inputsize;
136 	outsz = rq->outputsize;
137 	zerr = zlib_inflateInit2(&strm->z, -MAX_WBITS);
138 	if (zerr != Z_OK) {
139 		err = -EIO;
140 		goto failed_zinit;
141 	}
142 
143 	pofs = rq->pageofs_out;
144 	strm->z.avail_in = min_t(u32, insz, PAGE_SIZE - rq->pageofs_in);
145 	insz -= strm->z.avail_in;
146 	strm->z.next_in = kin + rq->pageofs_in;
147 	strm->z.avail_out = 0;
148 
149 	while (1) {
150 		if (!strm->z.avail_out) {
151 			if (++no >= nrpages_out || !outsz) {
152 				erofs_err(sb, "insufficient space for decompressed data");
153 				err = -EFSCORRUPTED;
154 				break;
155 			}
156 
157 			if (kout)
158 				kunmap_local(kout);
159 			strm->z.avail_out = min_t(u32, outsz, PAGE_SIZE - pofs);
160 			outsz -= strm->z.avail_out;
161 			if (!rq->out[no]) {
162 				rq->out[no] = erofs_allocpage(pagepool,
163 						GFP_KERNEL | __GFP_NOFAIL);
164 				set_page_private(rq->out[no],
165 						 Z_EROFS_SHORTLIVED_PAGE);
166 			}
167 			kout = kmap_local_page(rq->out[no]);
168 			strm->z.next_out = kout + pofs;
169 			pofs = 0;
170 		}
171 
172 		if (!strm->z.avail_in && insz) {
173 			if (++ni >= nrpages_in) {
174 				erofs_err(sb, "invalid compressed data");
175 				err = -EFSCORRUPTED;
176 				break;
177 			}
178 
179 			if (kout) { /* unlike kmap(), take care of the orders */
180 				j = strm->z.next_out - kout;
181 				kunmap_local(kout);
182 			}
183 			kunmap_local(kin);
184 			strm->z.avail_in = min_t(u32, insz, PAGE_SIZE);
185 			insz -= strm->z.avail_in;
186 			kin = kmap_local_page(rq->in[ni]);
187 			strm->z.next_in = kin;
188 			bounced = false;
189 			if (kout) {
190 				kout = kmap_local_page(rq->out[no]);
191 				strm->z.next_out = kout + j;
192 			}
193 		}
194 
195 		/*
196 		 * Handle overlapping: Use bounced buffer if the compressed
197 		 * data is under processing; Or use short-lived pages from the
198 		 * on-stack pagepool where pages share among the same request
199 		 * and not _all_ inplace I/O pages are needed to be doubled.
200 		 */
201 		if (!bounced && rq->out[no] == rq->in[ni]) {
202 			memcpy(strm->bounce, strm->z.next_in, strm->z.avail_in);
203 			strm->z.next_in = strm->bounce;
204 			bounced = true;
205 		}
206 
207 		for (j = ni + 1; j < nrpages_in; ++j) {
208 			struct page *tmppage;
209 
210 			if (rq->out[no] != rq->in[j])
211 				continue;
212 
213 			DBG_BUGON(erofs_page_is_managed(EROFS_SB(sb),
214 							rq->in[j]));
215 			tmppage = erofs_allocpage(pagepool,
216 						  GFP_KERNEL | __GFP_NOFAIL);
217 			set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
218 			copy_highpage(tmppage, rq->in[j]);
219 			rq->in[j] = tmppage;
220 		}
221 
222 		zerr = zlib_inflate(&strm->z, Z_SYNC_FLUSH);
223 		if (zerr != Z_OK || !(outsz + strm->z.avail_out)) {
224 			if (zerr == Z_OK && rq->partial_decoding)
225 				break;
226 			if (zerr == Z_STREAM_END && !outsz)
227 				break;
228 			erofs_err(sb, "failed to decompress %d in[%u] out[%u]",
229 				  zerr, rq->inputsize, rq->outputsize);
230 			err = -EFSCORRUPTED;
231 			break;
232 		}
233 	}
234 
235 	if (zlib_inflateEnd(&strm->z) != Z_OK && !err)
236 		err = -EIO;
237 	if (kout)
238 		kunmap_local(kout);
239 failed_zinit:
240 	kunmap_local(kin);
241 	/* 4. push back DEFLATE stream context to the global list */
242 	spin_lock(&z_erofs_deflate_lock);
243 	strm->next = z_erofs_deflate_head;
244 	z_erofs_deflate_head = strm;
245 	spin_unlock(&z_erofs_deflate_lock);
246 	wake_up(&z_erofs_deflate_wq);
247 	return err;
248 }
249