xref: /openbmc/linux/crypto/crypto_user_base.c (revision 6cc23ed2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Crypto user configuration API.
4  *
5  * Copyright (C) 2011 secunet Security Networks AG
6  * Copyright (C) 2011 Steffen Klassert <steffen.klassert@secunet.com>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/crypto.h>
11 #include <linux/cryptouser.h>
12 #include <linux/sched.h>
13 #include <linux/security.h>
14 #include <net/netlink.h>
15 #include <net/net_namespace.h>
16 #include <net/sock.h>
17 #include <crypto/internal/skcipher.h>
18 #include <crypto/internal/rng.h>
19 #include <crypto/akcipher.h>
20 #include <crypto/kpp.h>
21 #include <crypto/internal/cryptouser.h>
22 
23 #include "internal.h"
24 
25 #define null_terminated(x)	(strnlen(x, sizeof(x)) < sizeof(x))
26 
27 static DEFINE_MUTEX(crypto_cfg_mutex);
28 
29 struct crypto_dump_info {
30 	struct sk_buff *in_skb;
31 	struct sk_buff *out_skb;
32 	u32 nlmsg_seq;
33 	u16 nlmsg_flags;
34 };
35 
36 struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
37 {
38 	struct crypto_alg *q, *alg = NULL;
39 
40 	down_read(&crypto_alg_sem);
41 
42 	list_for_each_entry(q, &crypto_alg_list, cra_list) {
43 		int match = 0;
44 
45 		if (crypto_is_larval(q))
46 			continue;
47 
48 		if ((q->cra_flags ^ p->cru_type) & p->cru_mask)
49 			continue;
50 
51 		if (strlen(p->cru_driver_name))
52 			match = !strcmp(q->cra_driver_name,
53 					p->cru_driver_name);
54 		else if (!exact)
55 			match = !strcmp(q->cra_name, p->cru_name);
56 
57 		if (!match)
58 			continue;
59 
60 		if (unlikely(!crypto_mod_get(q)))
61 			continue;
62 
63 		alg = q;
64 		break;
65 	}
66 
67 	up_read(&crypto_alg_sem);
68 
69 	return alg;
70 }
71 
72 static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
73 {
74 	struct crypto_report_cipher rcipher;
75 
76 	memset(&rcipher, 0, sizeof(rcipher));
77 
78 	strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
79 
80 	rcipher.blocksize = alg->cra_blocksize;
81 	rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
82 	rcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
83 
84 	return nla_put(skb, CRYPTOCFGA_REPORT_CIPHER,
85 		       sizeof(rcipher), &rcipher);
86 }
87 
88 static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
89 {
90 	struct crypto_report_comp rcomp;
91 
92 	memset(&rcomp, 0, sizeof(rcomp));
93 
94 	strscpy(rcomp.type, "compression", sizeof(rcomp.type));
95 
96 	return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(rcomp), &rcomp);
97 }
98 
99 static int crypto_report_one(struct crypto_alg *alg,
100 			     struct crypto_user_alg *ualg, struct sk_buff *skb)
101 {
102 	memset(ualg, 0, sizeof(*ualg));
103 
104 	strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
105 	strscpy(ualg->cru_driver_name, alg->cra_driver_name,
106 		sizeof(ualg->cru_driver_name));
107 	strscpy(ualg->cru_module_name, module_name(alg->cra_module),
108 		sizeof(ualg->cru_module_name));
109 
110 	ualg->cru_type = 0;
111 	ualg->cru_mask = 0;
112 	ualg->cru_flags = alg->cra_flags;
113 	ualg->cru_refcnt = refcount_read(&alg->cra_refcnt);
114 
115 	if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
116 		goto nla_put_failure;
117 	if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
118 		struct crypto_report_larval rl;
119 
120 		memset(&rl, 0, sizeof(rl));
121 		strscpy(rl.type, "larval", sizeof(rl.type));
122 		if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, sizeof(rl), &rl))
123 			goto nla_put_failure;
124 		goto out;
125 	}
126 
127 	if (alg->cra_type && alg->cra_type->report) {
128 		if (alg->cra_type->report(skb, alg))
129 			goto nla_put_failure;
130 
131 		goto out;
132 	}
133 
134 	switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
135 	case CRYPTO_ALG_TYPE_CIPHER:
136 		if (crypto_report_cipher(skb, alg))
137 			goto nla_put_failure;
138 
139 		break;
140 	case CRYPTO_ALG_TYPE_COMPRESS:
141 		if (crypto_report_comp(skb, alg))
142 			goto nla_put_failure;
143 
144 		break;
145 	}
146 
147 out:
148 	return 0;
149 
150 nla_put_failure:
151 	return -EMSGSIZE;
152 }
153 
154 static int crypto_report_alg(struct crypto_alg *alg,
155 			     struct crypto_dump_info *info)
156 {
157 	struct sk_buff *in_skb = info->in_skb;
158 	struct sk_buff *skb = info->out_skb;
159 	struct nlmsghdr *nlh;
160 	struct crypto_user_alg *ualg;
161 	int err = 0;
162 
163 	nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
164 			CRYPTO_MSG_GETALG, sizeof(*ualg), info->nlmsg_flags);
165 	if (!nlh) {
166 		err = -EMSGSIZE;
167 		goto out;
168 	}
169 
170 	ualg = nlmsg_data(nlh);
171 
172 	err = crypto_report_one(alg, ualg, skb);
173 	if (err) {
174 		nlmsg_cancel(skb, nlh);
175 		goto out;
176 	}
177 
178 	nlmsg_end(skb, nlh);
179 
180 out:
181 	return err;
182 }
183 
184 static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
185 			 struct nlattr **attrs)
186 {
187 	struct net *net = sock_net(in_skb->sk);
188 	struct crypto_user_alg *p = nlmsg_data(in_nlh);
189 	struct crypto_alg *alg;
190 	struct sk_buff *skb;
191 	struct crypto_dump_info info;
192 	int err;
193 
194 	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
195 		return -EINVAL;
196 
197 	alg = crypto_alg_match(p, 0);
198 	if (!alg)
199 		return -ENOENT;
200 
201 	err = -ENOMEM;
202 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
203 	if (!skb)
204 		goto drop_alg;
205 
206 	info.in_skb = in_skb;
207 	info.out_skb = skb;
208 	info.nlmsg_seq = in_nlh->nlmsg_seq;
209 	info.nlmsg_flags = 0;
210 
211 	err = crypto_report_alg(alg, &info);
212 
213 drop_alg:
214 	crypto_mod_put(alg);
215 
216 	if (err)
217 		return err;
218 
219 	return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
220 }
221 
222 static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb)
223 {
224 	const size_t start_pos = cb->args[0];
225 	size_t pos = 0;
226 	struct crypto_dump_info info;
227 	struct crypto_alg *alg;
228 	int res;
229 
230 	info.in_skb = cb->skb;
231 	info.out_skb = skb;
232 	info.nlmsg_seq = cb->nlh->nlmsg_seq;
233 	info.nlmsg_flags = NLM_F_MULTI;
234 
235 	down_read(&crypto_alg_sem);
236 	list_for_each_entry(alg, &crypto_alg_list, cra_list) {
237 		if (pos >= start_pos) {
238 			res = crypto_report_alg(alg, &info);
239 			if (res == -EMSGSIZE)
240 				break;
241 			if (res)
242 				goto out;
243 		}
244 		pos++;
245 	}
246 	cb->args[0] = pos;
247 	res = skb->len;
248 out:
249 	up_read(&crypto_alg_sem);
250 	return res;
251 }
252 
253 static int crypto_dump_report_done(struct netlink_callback *cb)
254 {
255 	return 0;
256 }
257 
258 static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
259 			     struct nlattr **attrs)
260 {
261 	struct crypto_alg *alg;
262 	struct crypto_user_alg *p = nlmsg_data(nlh);
263 	struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
264 	LIST_HEAD(list);
265 
266 	if (!netlink_capable(skb, CAP_NET_ADMIN))
267 		return -EPERM;
268 
269 	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
270 		return -EINVAL;
271 
272 	if (priority && !strlen(p->cru_driver_name))
273 		return -EINVAL;
274 
275 	alg = crypto_alg_match(p, 1);
276 	if (!alg)
277 		return -ENOENT;
278 
279 	down_write(&crypto_alg_sem);
280 
281 	crypto_remove_spawns(alg, &list, NULL);
282 
283 	if (priority)
284 		alg->cra_priority = nla_get_u32(priority);
285 
286 	up_write(&crypto_alg_sem);
287 
288 	crypto_mod_put(alg);
289 	crypto_remove_final(&list);
290 
291 	return 0;
292 }
293 
294 static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
295 			  struct nlattr **attrs)
296 {
297 	struct crypto_alg *alg;
298 	struct crypto_user_alg *p = nlmsg_data(nlh);
299 	int err;
300 
301 	if (!netlink_capable(skb, CAP_NET_ADMIN))
302 		return -EPERM;
303 
304 	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
305 		return -EINVAL;
306 
307 	alg = crypto_alg_match(p, 1);
308 	if (!alg)
309 		return -ENOENT;
310 
311 	/* We can not unregister core algorithms such as aes-generic.
312 	 * We would loose the reference in the crypto_alg_list to this algorithm
313 	 * if we try to unregister. Unregistering such an algorithm without
314 	 * removing the module is not possible, so we restrict to crypto
315 	 * instances that are build from templates. */
316 	err = -EINVAL;
317 	if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE))
318 		goto drop_alg;
319 
320 	err = -EBUSY;
321 	if (refcount_read(&alg->cra_refcnt) > 2)
322 		goto drop_alg;
323 
324 	err = crypto_unregister_instance((struct crypto_instance *)alg);
325 
326 drop_alg:
327 	crypto_mod_put(alg);
328 	return err;
329 }
330 
331 static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
332 			  struct nlattr **attrs)
333 {
334 	int exact = 0;
335 	const char *name;
336 	struct crypto_alg *alg;
337 	struct crypto_user_alg *p = nlmsg_data(nlh);
338 	struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
339 
340 	if (!netlink_capable(skb, CAP_NET_ADMIN))
341 		return -EPERM;
342 
343 	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
344 		return -EINVAL;
345 
346 	if (strlen(p->cru_driver_name))
347 		exact = 1;
348 
349 	if (priority && !exact)
350 		return -EINVAL;
351 
352 	alg = crypto_alg_match(p, exact);
353 	if (alg) {
354 		crypto_mod_put(alg);
355 		return -EEXIST;
356 	}
357 
358 	if (strlen(p->cru_driver_name))
359 		name = p->cru_driver_name;
360 	else
361 		name = p->cru_name;
362 
363 	alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
364 	if (IS_ERR(alg))
365 		return PTR_ERR(alg);
366 
367 	down_write(&crypto_alg_sem);
368 
369 	if (priority)
370 		alg->cra_priority = nla_get_u32(priority);
371 
372 	up_write(&crypto_alg_sem);
373 
374 	crypto_mod_put(alg);
375 
376 	return 0;
377 }
378 
379 static int crypto_del_rng(struct sk_buff *skb, struct nlmsghdr *nlh,
380 			  struct nlattr **attrs)
381 {
382 	if (!netlink_capable(skb, CAP_NET_ADMIN))
383 		return -EPERM;
384 	return crypto_del_default_rng();
385 }
386 
387 #define MSGSIZE(type) sizeof(struct type)
388 
389 static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
390 	[CRYPTO_MSG_NEWALG	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
391 	[CRYPTO_MSG_DELALG	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
392 	[CRYPTO_MSG_UPDATEALG	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
393 	[CRYPTO_MSG_GETALG	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
394 	[CRYPTO_MSG_DELRNG	- CRYPTO_MSG_BASE] = 0,
395 	[CRYPTO_MSG_GETSTAT	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
396 };
397 
398 static const struct nla_policy crypto_policy[CRYPTOCFGA_MAX+1] = {
399 	[CRYPTOCFGA_PRIORITY_VAL]   = { .type = NLA_U32},
400 };
401 
402 #undef MSGSIZE
403 
404 static const struct crypto_link {
405 	int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
406 	int (*dump)(struct sk_buff *, struct netlink_callback *);
407 	int (*done)(struct netlink_callback *);
408 } crypto_dispatch[CRYPTO_NR_MSGTYPES] = {
409 	[CRYPTO_MSG_NEWALG	- CRYPTO_MSG_BASE] = { .doit = crypto_add_alg},
410 	[CRYPTO_MSG_DELALG	- CRYPTO_MSG_BASE] = { .doit = crypto_del_alg},
411 	[CRYPTO_MSG_UPDATEALG	- CRYPTO_MSG_BASE] = { .doit = crypto_update_alg},
412 	[CRYPTO_MSG_GETALG	- CRYPTO_MSG_BASE] = { .doit = crypto_report,
413 						       .dump = crypto_dump_report,
414 						       .done = crypto_dump_report_done},
415 	[CRYPTO_MSG_DELRNG	- CRYPTO_MSG_BASE] = { .doit = crypto_del_rng },
416 	[CRYPTO_MSG_GETSTAT	- CRYPTO_MSG_BASE] = { .doit = crypto_reportstat},
417 };
418 
419 static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
420 			       struct netlink_ext_ack *extack)
421 {
422 	struct net *net = sock_net(skb->sk);
423 	struct nlattr *attrs[CRYPTOCFGA_MAX+1];
424 	const struct crypto_link *link;
425 	int type, err;
426 
427 	type = nlh->nlmsg_type;
428 	if (type > CRYPTO_MSG_MAX)
429 		return -EINVAL;
430 
431 	type -= CRYPTO_MSG_BASE;
432 	link = &crypto_dispatch[type];
433 
434 	if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
435 	    (nlh->nlmsg_flags & NLM_F_DUMP))) {
436 		struct crypto_alg *alg;
437 		unsigned long dump_alloc = 0;
438 
439 		if (link->dump == NULL)
440 			return -EINVAL;
441 
442 		down_read(&crypto_alg_sem);
443 		list_for_each_entry(alg, &crypto_alg_list, cra_list)
444 			dump_alloc += CRYPTO_REPORT_MAXSIZE;
445 		up_read(&crypto_alg_sem);
446 
447 		{
448 			struct netlink_dump_control c = {
449 				.dump = link->dump,
450 				.done = link->done,
451 				.min_dump_alloc = min(dump_alloc, 65535UL),
452 			};
453 			err = netlink_dump_start(net->crypto_nlsk, skb, nlh, &c);
454 		}
455 
456 		return err;
457 	}
458 
459 	err = nlmsg_parse_deprecated(nlh, crypto_msg_min[type], attrs,
460 				     CRYPTOCFGA_MAX, crypto_policy, extack);
461 	if (err < 0)
462 		return err;
463 
464 	if (link->doit == NULL)
465 		return -EINVAL;
466 
467 	return link->doit(skb, nlh, attrs);
468 }
469 
470 static void crypto_netlink_rcv(struct sk_buff *skb)
471 {
472 	mutex_lock(&crypto_cfg_mutex);
473 	netlink_rcv_skb(skb, &crypto_user_rcv_msg);
474 	mutex_unlock(&crypto_cfg_mutex);
475 }
476 
477 static int __net_init crypto_netlink_init(struct net *net)
478 {
479 	struct netlink_kernel_cfg cfg = {
480 		.input	= crypto_netlink_rcv,
481 	};
482 
483 	net->crypto_nlsk = netlink_kernel_create(net, NETLINK_CRYPTO, &cfg);
484 	return net->crypto_nlsk == NULL ? -ENOMEM : 0;
485 }
486 
487 static void __net_exit crypto_netlink_exit(struct net *net)
488 {
489 	netlink_kernel_release(net->crypto_nlsk);
490 	net->crypto_nlsk = NULL;
491 }
492 
493 static struct pernet_operations crypto_netlink_net_ops = {
494 	.init = crypto_netlink_init,
495 	.exit = crypto_netlink_exit,
496 };
497 
498 static int __init crypto_user_init(void)
499 {
500 	return register_pernet_subsys(&crypto_netlink_net_ops);
501 }
502 
503 static void __exit crypto_user_exit(void)
504 {
505 	unregister_pernet_subsys(&crypto_netlink_net_ops);
506 }
507 
508 module_init(crypto_user_init);
509 module_exit(crypto_user_exit);
510 MODULE_LICENSE("GPL");
511 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
512 MODULE_DESCRIPTION("Crypto userspace configuration API");
513 MODULE_ALIAS("net-pf-16-proto-21");
514