xref: /openbmc/linux/crypto/scompress.c (revision 6b5fc336)
1 /*
2  * Synchronous Compression operations
3  *
4  * Copyright 2015 LG Electronics Inc.
5  * Copyright (c) 2016, Intel Corporation
6  * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the Free
10  * Software Foundation; either version 2 of the License, or (at your option)
11  * any later version.
12  *
13  */
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/seq_file.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/crypto.h>
21 #include <linux/compiler.h>
22 #include <linux/vmalloc.h>
23 #include <crypto/algapi.h>
24 #include <linux/cryptouser.h>
25 #include <net/netlink.h>
26 #include <linux/scatterlist.h>
27 #include <crypto/scatterwalk.h>
28 #include <crypto/internal/acompress.h>
29 #include <crypto/internal/scompress.h>
30 #include "internal.h"
31 
32 static const struct crypto_type crypto_scomp_type;
33 static void * __percpu *scomp_src_scratches;
34 static void * __percpu *scomp_dst_scratches;
35 static int scomp_scratch_users;
36 static DEFINE_MUTEX(scomp_lock);
37 
38 #ifdef CONFIG_NET
39 static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
40 {
41 	struct crypto_report_comp rscomp;
42 
43 	strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
44 
45 	if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
46 		    sizeof(struct crypto_report_comp), &rscomp))
47 		goto nla_put_failure;
48 	return 0;
49 
50 nla_put_failure:
51 	return -EMSGSIZE;
52 }
53 #else
54 static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
55 {
56 	return -ENOSYS;
57 }
58 #endif
59 
60 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
61 	__maybe_unused;
62 
63 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
64 {
65 	seq_puts(m, "type         : scomp\n");
66 }
67 
68 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
69 {
70 	return 0;
71 }
72 
73 static void crypto_scomp_free_scratches(void * __percpu *scratches)
74 {
75 	int i;
76 
77 	if (!scratches)
78 		return;
79 
80 	for_each_possible_cpu(i)
81 		vfree(*per_cpu_ptr(scratches, i));
82 
83 	free_percpu(scratches);
84 }
85 
86 static void * __percpu *crypto_scomp_alloc_scratches(void)
87 {
88 	void * __percpu *scratches;
89 	int i;
90 
91 	scratches = alloc_percpu(void *);
92 	if (!scratches)
93 		return NULL;
94 
95 	for_each_possible_cpu(i) {
96 		void *scratch;
97 
98 		scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
99 		if (!scratch)
100 			goto error;
101 		*per_cpu_ptr(scratches, i) = scratch;
102 	}
103 
104 	return scratches;
105 
106 error:
107 	crypto_scomp_free_scratches(scratches);
108 	return NULL;
109 }
110 
111 static void crypto_scomp_free_all_scratches(void)
112 {
113 	if (!--scomp_scratch_users) {
114 		crypto_scomp_free_scratches(scomp_src_scratches);
115 		crypto_scomp_free_scratches(scomp_dst_scratches);
116 		scomp_src_scratches = NULL;
117 		scomp_dst_scratches = NULL;
118 	}
119 }
120 
121 static int crypto_scomp_alloc_all_scratches(void)
122 {
123 	if (!scomp_scratch_users++) {
124 		scomp_src_scratches = crypto_scomp_alloc_scratches();
125 		if (!scomp_src_scratches)
126 			return -ENOMEM;
127 		scomp_dst_scratches = crypto_scomp_alloc_scratches();
128 		if (!scomp_dst_scratches)
129 			return -ENOMEM;
130 	}
131 	return 0;
132 }
133 
134 static void crypto_scomp_sg_free(struct scatterlist *sgl)
135 {
136 	int i, n;
137 	struct page *page;
138 
139 	if (!sgl)
140 		return;
141 
142 	n = sg_nents(sgl);
143 	for_each_sg(sgl, sgl, n, i) {
144 		page = sg_page(sgl);
145 		if (page)
146 			__free_page(page);
147 	}
148 
149 	kfree(sgl);
150 }
151 
152 static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
153 {
154 	struct scatterlist *sgl;
155 	struct page *page;
156 	int i, n;
157 
158 	n = ((size - 1) >> PAGE_SHIFT) + 1;
159 
160 	sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
161 	if (!sgl)
162 		return NULL;
163 
164 	sg_init_table(sgl, n);
165 
166 	for (i = 0; i < n; i++) {
167 		page = alloc_page(gfp);
168 		if (!page)
169 			goto err;
170 		sg_set_page(sgl + i, page, PAGE_SIZE, 0);
171 	}
172 
173 	return sgl;
174 
175 err:
176 	sg_mark_end(sgl + i);
177 	crypto_scomp_sg_free(sgl);
178 	return NULL;
179 }
180 
181 static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
182 {
183 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
184 	void **tfm_ctx = acomp_tfm_ctx(tfm);
185 	struct crypto_scomp *scomp = *tfm_ctx;
186 	void **ctx = acomp_request_ctx(req);
187 	const int cpu = get_cpu();
188 	u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
189 	u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
190 	int ret;
191 
192 	if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
193 		ret = -EINVAL;
194 		goto out;
195 	}
196 
197 	if (req->dst && !req->dlen) {
198 		ret = -EINVAL;
199 		goto out;
200 	}
201 
202 	if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
203 		req->dlen = SCOMP_SCRATCH_SIZE;
204 
205 	scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
206 	if (dir)
207 		ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
208 					    scratch_dst, &req->dlen, *ctx);
209 	else
210 		ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
211 					      scratch_dst, &req->dlen, *ctx);
212 	if (!ret) {
213 		if (!req->dst) {
214 			req->dst = crypto_scomp_sg_alloc(req->dlen,
215 				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
216 				   GFP_KERNEL : GFP_ATOMIC);
217 			if (!req->dst)
218 				goto out;
219 		}
220 		scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
221 					 1);
222 	}
223 out:
224 	put_cpu();
225 	return ret;
226 }
227 
228 static int scomp_acomp_compress(struct acomp_req *req)
229 {
230 	return scomp_acomp_comp_decomp(req, 1);
231 }
232 
233 static int scomp_acomp_decompress(struct acomp_req *req)
234 {
235 	return scomp_acomp_comp_decomp(req, 0);
236 }
237 
238 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
239 {
240 	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
241 
242 	crypto_free_scomp(*ctx);
243 }
244 
245 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
246 {
247 	struct crypto_alg *calg = tfm->__crt_alg;
248 	struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
249 	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
250 	struct crypto_scomp *scomp;
251 
252 	if (!crypto_mod_get(calg))
253 		return -EAGAIN;
254 
255 	scomp = crypto_create_tfm(calg, &crypto_scomp_type);
256 	if (IS_ERR(scomp)) {
257 		crypto_mod_put(calg);
258 		return PTR_ERR(scomp);
259 	}
260 
261 	*ctx = scomp;
262 	tfm->exit = crypto_exit_scomp_ops_async;
263 
264 	crt->compress = scomp_acomp_compress;
265 	crt->decompress = scomp_acomp_decompress;
266 	crt->dst_free = crypto_scomp_sg_free;
267 	crt->reqsize = sizeof(void *);
268 
269 	return 0;
270 }
271 
272 struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
273 {
274 	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
275 	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
276 	struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
277 	struct crypto_scomp *scomp = *tfm_ctx;
278 	void *ctx;
279 
280 	ctx = crypto_scomp_alloc_ctx(scomp);
281 	if (IS_ERR(ctx)) {
282 		kfree(req);
283 		return NULL;
284 	}
285 
286 	*req->__ctx = ctx;
287 
288 	return req;
289 }
290 
291 void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
292 {
293 	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
294 	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
295 	struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
296 	struct crypto_scomp *scomp = *tfm_ctx;
297 	void *ctx = *req->__ctx;
298 
299 	if (ctx)
300 		crypto_scomp_free_ctx(scomp, ctx);
301 }
302 
303 static const struct crypto_type crypto_scomp_type = {
304 	.extsize = crypto_alg_extsize,
305 	.init_tfm = crypto_scomp_init_tfm,
306 #ifdef CONFIG_PROC_FS
307 	.show = crypto_scomp_show,
308 #endif
309 	.report = crypto_scomp_report,
310 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
311 	.maskset = CRYPTO_ALG_TYPE_MASK,
312 	.type = CRYPTO_ALG_TYPE_SCOMPRESS,
313 	.tfmsize = offsetof(struct crypto_scomp, base),
314 };
315 
316 int crypto_register_scomp(struct scomp_alg *alg)
317 {
318 	struct crypto_alg *base = &alg->base;
319 	int ret = -ENOMEM;
320 
321 	mutex_lock(&scomp_lock);
322 	if (crypto_scomp_alloc_all_scratches())
323 		goto error;
324 
325 	base->cra_type = &crypto_scomp_type;
326 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
327 	base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
328 
329 	ret = crypto_register_alg(base);
330 	if (ret)
331 		goto error;
332 
333 	mutex_unlock(&scomp_lock);
334 	return ret;
335 
336 error:
337 	crypto_scomp_free_all_scratches();
338 	mutex_unlock(&scomp_lock);
339 	return ret;
340 }
341 EXPORT_SYMBOL_GPL(crypto_register_scomp);
342 
343 int crypto_unregister_scomp(struct scomp_alg *alg)
344 {
345 	int ret;
346 
347 	mutex_lock(&scomp_lock);
348 	ret = crypto_unregister_alg(&alg->base);
349 	crypto_scomp_free_all_scratches();
350 	mutex_unlock(&scomp_lock);
351 
352 	return ret;
353 }
354 EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
355 
356 int crypto_register_scomps(struct scomp_alg *algs, int count)
357 {
358 	int i, ret;
359 
360 	for (i = 0; i < count; i++) {
361 		ret = crypto_register_scomp(&algs[i]);
362 		if (ret)
363 			goto err;
364 	}
365 
366 	return 0;
367 
368 err:
369 	for (--i; i >= 0; --i)
370 		crypto_unregister_scomp(&algs[i]);
371 
372 	return ret;
373 }
374 EXPORT_SYMBOL_GPL(crypto_register_scomps);
375 
376 void crypto_unregister_scomps(struct scomp_alg *algs, int count)
377 {
378 	int i;
379 
380 	for (i = count - 1; i >= 0; --i)
381 		crypto_unregister_scomp(&algs[i]);
382 }
383 EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
384 
385 MODULE_LICENSE("GPL");
386 MODULE_DESCRIPTION("Synchronous compression type");
387