xref: /openbmc/linux/crypto/scompress.c (revision fe4549b1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Synchronous Compression operations
4  *
5  * Copyright 2015 LG Electronics Inc.
6  * Copyright (c) 2016, Intel Corporation
7  * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8  */
9 
10 #include <crypto/internal/acompress.h>
11 #include <crypto/internal/scompress.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/cryptouser.h>
14 #include <linux/err.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/scatterlist.h>
18 #include <linux/seq_file.h>
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/vmalloc.h>
22 #include <net/netlink.h>
23 
24 #include "compress.h"
25 
26 struct scomp_scratch {
27 	spinlock_t	lock;
28 	void		*src;
29 	void		*dst;
30 };
31 
32 static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
33 	.lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
34 };
35 
36 static const struct crypto_type crypto_scomp_type;
37 static int scomp_scratch_users;
38 static DEFINE_MUTEX(scomp_lock);
39 
40 static int __maybe_unused crypto_scomp_report(
41 	struct sk_buff *skb, struct crypto_alg *alg)
42 {
43 	struct crypto_report_comp rscomp;
44 
45 	memset(&rscomp, 0, sizeof(rscomp));
46 
47 	strscpy(rscomp.type, "scomp", sizeof(rscomp.type));
48 
49 	return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
50 		       sizeof(rscomp), &rscomp);
51 }
52 
53 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
54 	__maybe_unused;
55 
56 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
57 {
58 	seq_puts(m, "type         : scomp\n");
59 }
60 
61 static void crypto_scomp_free_scratches(void)
62 {
63 	struct scomp_scratch *scratch;
64 	int i;
65 
66 	for_each_possible_cpu(i) {
67 		scratch = per_cpu_ptr(&scomp_scratch, i);
68 
69 		vfree(scratch->src);
70 		vfree(scratch->dst);
71 		scratch->src = NULL;
72 		scratch->dst = NULL;
73 	}
74 }
75 
76 static int crypto_scomp_alloc_scratches(void)
77 {
78 	struct scomp_scratch *scratch;
79 	int i;
80 
81 	for_each_possible_cpu(i) {
82 		void *mem;
83 
84 		scratch = per_cpu_ptr(&scomp_scratch, i);
85 
86 		mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
87 		if (!mem)
88 			goto error;
89 		scratch->src = mem;
90 		mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
91 		if (!mem)
92 			goto error;
93 		scratch->dst = mem;
94 	}
95 	return 0;
96 error:
97 	crypto_scomp_free_scratches();
98 	return -ENOMEM;
99 }
100 
101 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
102 {
103 	int ret = 0;
104 
105 	mutex_lock(&scomp_lock);
106 	if (!scomp_scratch_users++)
107 		ret = crypto_scomp_alloc_scratches();
108 	mutex_unlock(&scomp_lock);
109 
110 	return ret;
111 }
112 
113 static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
114 {
115 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
116 	void **tfm_ctx = acomp_tfm_ctx(tfm);
117 	struct crypto_scomp *scomp = *tfm_ctx;
118 	void **ctx = acomp_request_ctx(req);
119 	struct scomp_scratch *scratch;
120 	unsigned int dlen;
121 	int ret;
122 
123 	if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
124 		return -EINVAL;
125 
126 	if (req->dst && !req->dlen)
127 		return -EINVAL;
128 
129 	if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
130 		req->dlen = SCOMP_SCRATCH_SIZE;
131 
132 	dlen = req->dlen;
133 
134 	scratch = raw_cpu_ptr(&scomp_scratch);
135 	spin_lock(&scratch->lock);
136 
137 	scatterwalk_map_and_copy(scratch->src, req->src, 0, req->slen, 0);
138 	if (dir)
139 		ret = crypto_scomp_compress(scomp, scratch->src, req->slen,
140 					    scratch->dst, &req->dlen, *ctx);
141 	else
142 		ret = crypto_scomp_decompress(scomp, scratch->src, req->slen,
143 					      scratch->dst, &req->dlen, *ctx);
144 	if (!ret) {
145 		if (!req->dst) {
146 			req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
147 			if (!req->dst) {
148 				ret = -ENOMEM;
149 				goto out;
150 			}
151 		} else if (req->dlen > dlen) {
152 			ret = -ENOSPC;
153 			goto out;
154 		}
155 		scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
156 					 1);
157 	}
158 out:
159 	spin_unlock(&scratch->lock);
160 	return ret;
161 }
162 
163 static int scomp_acomp_compress(struct acomp_req *req)
164 {
165 	return scomp_acomp_comp_decomp(req, 1);
166 }
167 
168 static int scomp_acomp_decompress(struct acomp_req *req)
169 {
170 	return scomp_acomp_comp_decomp(req, 0);
171 }
172 
173 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
174 {
175 	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
176 
177 	crypto_free_scomp(*ctx);
178 
179 	mutex_lock(&scomp_lock);
180 	if (!--scomp_scratch_users)
181 		crypto_scomp_free_scratches();
182 	mutex_unlock(&scomp_lock);
183 }
184 
185 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
186 {
187 	struct crypto_alg *calg = tfm->__crt_alg;
188 	struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
189 	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
190 	struct crypto_scomp *scomp;
191 
192 	if (!crypto_mod_get(calg))
193 		return -EAGAIN;
194 
195 	scomp = crypto_create_tfm(calg, &crypto_scomp_type);
196 	if (IS_ERR(scomp)) {
197 		crypto_mod_put(calg);
198 		return PTR_ERR(scomp);
199 	}
200 
201 	*ctx = scomp;
202 	tfm->exit = crypto_exit_scomp_ops_async;
203 
204 	crt->compress = scomp_acomp_compress;
205 	crt->decompress = scomp_acomp_decompress;
206 	crt->dst_free = sgl_free;
207 	crt->reqsize = sizeof(void *);
208 
209 	return 0;
210 }
211 
212 struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
213 {
214 	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
215 	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
216 	struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
217 	struct crypto_scomp *scomp = *tfm_ctx;
218 	void *ctx;
219 
220 	ctx = crypto_scomp_alloc_ctx(scomp);
221 	if (IS_ERR(ctx)) {
222 		kfree(req);
223 		return NULL;
224 	}
225 
226 	*req->__ctx = ctx;
227 
228 	return req;
229 }
230 
231 void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
232 {
233 	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
234 	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
235 	struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
236 	struct crypto_scomp *scomp = *tfm_ctx;
237 	void *ctx = *req->__ctx;
238 
239 	if (ctx)
240 		crypto_scomp_free_ctx(scomp, ctx);
241 }
242 
243 static const struct crypto_type crypto_scomp_type = {
244 	.extsize = crypto_alg_extsize,
245 	.init_tfm = crypto_scomp_init_tfm,
246 #ifdef CONFIG_PROC_FS
247 	.show = crypto_scomp_show,
248 #endif
249 #if IS_ENABLED(CONFIG_CRYPTO_USER)
250 	.report = crypto_scomp_report,
251 #endif
252 #ifdef CONFIG_CRYPTO_STATS
253 	.report_stat = crypto_acomp_report_stat,
254 #endif
255 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
256 	.maskset = CRYPTO_ALG_TYPE_MASK,
257 	.type = CRYPTO_ALG_TYPE_SCOMPRESS,
258 	.tfmsize = offsetof(struct crypto_scomp, base),
259 };
260 
261 int crypto_register_scomp(struct scomp_alg *alg)
262 {
263 	struct crypto_alg *base = &alg->calg.base;
264 
265 	comp_prepare_alg(&alg->calg);
266 
267 	base->cra_type = &crypto_scomp_type;
268 	base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
269 
270 	return crypto_register_alg(base);
271 }
272 EXPORT_SYMBOL_GPL(crypto_register_scomp);
273 
274 void crypto_unregister_scomp(struct scomp_alg *alg)
275 {
276 	crypto_unregister_alg(&alg->base);
277 }
278 EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
279 
280 int crypto_register_scomps(struct scomp_alg *algs, int count)
281 {
282 	int i, ret;
283 
284 	for (i = 0; i < count; i++) {
285 		ret = crypto_register_scomp(&algs[i]);
286 		if (ret)
287 			goto err;
288 	}
289 
290 	return 0;
291 
292 err:
293 	for (--i; i >= 0; --i)
294 		crypto_unregister_scomp(&algs[i]);
295 
296 	return ret;
297 }
298 EXPORT_SYMBOL_GPL(crypto_register_scomps);
299 
300 void crypto_unregister_scomps(struct scomp_alg *algs, int count)
301 {
302 	int i;
303 
304 	for (i = count - 1; i >= 0; --i)
305 		crypto_unregister_scomp(&algs[i]);
306 }
307 EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
308 
309 MODULE_LICENSE("GPL");
310 MODULE_DESCRIPTION("Synchronous compression type");
311