xref: /openbmc/linux/net/ipv6/esp6.c (revision 4e5e4705)
1 /*
2  * Copyright (C)2002 USAGI/WIDE Project
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
17  *
18  * Authors
19  *
20  *	Mitsuru KANDA @USAGI       : IPv6 Support
21  * 	Kazunori MIYAZAWA @USAGI   :
22  * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
23  *
24  * 	This file is derived from net/ipv4/esp.c
25  */
26 
27 #define pr_fmt(fmt) "IPv6: " fmt
28 
29 #include <crypto/aead.h>
30 #include <crypto/authenc.h>
31 #include <linux/err.h>
32 #include <linux/module.h>
33 #include <net/ip.h>
34 #include <net/xfrm.h>
35 #include <net/esp.h>
36 #include <linux/scatterlist.h>
37 #include <linux/kernel.h>
38 #include <linux/pfkeyv2.h>
39 #include <linux/random.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <net/ip6_route.h>
43 #include <net/icmp.h>
44 #include <net/ipv6.h>
45 #include <net/protocol.h>
46 #include <linux/icmpv6.h>
47 
48 struct esp_skb_cb {
49 	struct xfrm_skb_cb xfrm;
50 	void *tmp;
51 };
52 
53 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
54 
55 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
56 
57 /*
58  * Allocate an AEAD request structure with extra space for SG and IV.
59  *
60  * For alignment considerations the upper 32 bits of the sequence number are
61  * placed at the front, if present. Followed by the IV, the request and finally
62  * the SG list.
63  *
64  * TODO: Use spare space in skb for this where possible.
65  */
66 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
67 {
68 	unsigned int len;
69 
70 	len = seqihlen;
71 
72 	len += crypto_aead_ivsize(aead);
73 
74 	if (len) {
75 		len += crypto_aead_alignmask(aead) &
76 		       ~(crypto_tfm_ctx_alignment() - 1);
77 		len = ALIGN(len, crypto_tfm_ctx_alignment());
78 	}
79 
80 	len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
81 	len = ALIGN(len, __alignof__(struct scatterlist));
82 
83 	len += sizeof(struct scatterlist) * nfrags;
84 
85 	return kmalloc(len, GFP_ATOMIC);
86 }
87 
88 static inline __be32 *esp_tmp_seqhi(void *tmp)
89 {
90 	return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
91 }
92 
93 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
94 {
95 	return crypto_aead_ivsize(aead) ?
96 	       PTR_ALIGN((u8 *)tmp + seqhilen,
97 			 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
98 }
99 
100 static inline struct aead_givcrypt_request *esp_tmp_givreq(
101 	struct crypto_aead *aead, u8 *iv)
102 {
103 	struct aead_givcrypt_request *req;
104 
105 	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
106 				crypto_tfm_ctx_alignment());
107 	aead_givcrypt_set_tfm(req, aead);
108 	return req;
109 }
110 
111 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
112 {
113 	struct aead_request *req;
114 
115 	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
116 				crypto_tfm_ctx_alignment());
117 	aead_request_set_tfm(req, aead);
118 	return req;
119 }
120 
121 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
122 					     struct aead_request *req)
123 {
124 	return (void *)ALIGN((unsigned long)(req + 1) +
125 			     crypto_aead_reqsize(aead),
126 			     __alignof__(struct scatterlist));
127 }
128 
129 static inline struct scatterlist *esp_givreq_sg(
130 	struct crypto_aead *aead, struct aead_givcrypt_request *req)
131 {
132 	return (void *)ALIGN((unsigned long)(req + 1) +
133 			     crypto_aead_reqsize(aead),
134 			     __alignof__(struct scatterlist));
135 }
136 
137 static void esp_output_done(struct crypto_async_request *base, int err)
138 {
139 	struct sk_buff *skb = base->data;
140 
141 	kfree(ESP_SKB_CB(skb)->tmp);
142 	xfrm_output_resume(skb, err);
143 }
144 
145 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
146 {
147 	int err;
148 	struct ip_esp_hdr *esph;
149 	struct crypto_aead *aead;
150 	struct aead_givcrypt_request *req;
151 	struct scatterlist *sg;
152 	struct scatterlist *asg;
153 	struct sk_buff *trailer;
154 	void *tmp;
155 	int blksize;
156 	int clen;
157 	int alen;
158 	int plen;
159 	int tfclen;
160 	int nfrags;
161 	int assoclen;
162 	int sglists;
163 	int seqhilen;
164 	u8 *iv;
165 	u8 *tail;
166 	__be32 *seqhi;
167 
168 	/* skb is pure payload to encrypt */
169 	aead = x->data;
170 	alen = crypto_aead_authsize(aead);
171 
172 	tfclen = 0;
173 	if (x->tfcpad) {
174 		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
175 		u32 padto;
176 
177 		padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
178 		if (skb->len < padto)
179 			tfclen = padto - skb->len;
180 	}
181 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
182 	clen = ALIGN(skb->len + 2 + tfclen, blksize);
183 	plen = clen - skb->len - tfclen;
184 
185 	err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
186 	if (err < 0)
187 		goto error;
188 	nfrags = err;
189 
190 	assoclen = sizeof(*esph);
191 	sglists = 1;
192 	seqhilen = 0;
193 
194 	if (x->props.flags & XFRM_STATE_ESN) {
195 		sglists += 2;
196 		seqhilen += sizeof(__be32);
197 		assoclen += seqhilen;
198 	}
199 
200 	tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
201 	if (!tmp) {
202 		err = -ENOMEM;
203 		goto error;
204 	}
205 
206 	seqhi = esp_tmp_seqhi(tmp);
207 	iv = esp_tmp_iv(aead, tmp, seqhilen);
208 	req = esp_tmp_givreq(aead, iv);
209 	asg = esp_givreq_sg(aead, req);
210 	sg = asg + sglists;
211 
212 	/* Fill padding... */
213 	tail = skb_tail_pointer(trailer);
214 	if (tfclen) {
215 		memset(tail, 0, tfclen);
216 		tail += tfclen;
217 	}
218 	do {
219 		int i;
220 		for (i = 0; i < plen - 2; i++)
221 			tail[i] = i + 1;
222 	} while (0);
223 	tail[plen - 2] = plen - 2;
224 	tail[plen - 1] = *skb_mac_header(skb);
225 	pskb_put(skb, trailer, clen - skb->len + alen);
226 
227 	skb_push(skb, -skb_network_offset(skb));
228 	esph = ip_esp_hdr(skb);
229 	*skb_mac_header(skb) = IPPROTO_ESP;
230 
231 	esph->spi = x->id.spi;
232 	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
233 
234 	sg_init_table(sg, nfrags);
235 	skb_to_sgvec(skb, sg,
236 		     esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
237 		     clen + alen);
238 
239 	if ((x->props.flags & XFRM_STATE_ESN)) {
240 		sg_init_table(asg, 3);
241 		sg_set_buf(asg, &esph->spi, sizeof(__be32));
242 		*seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
243 		sg_set_buf(asg + 1, seqhi, seqhilen);
244 		sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
245 	} else
246 		sg_init_one(asg, esph, sizeof(*esph));
247 
248 	aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
249 	aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
250 	aead_givcrypt_set_assoc(req, asg, assoclen);
251 	aead_givcrypt_set_giv(req, esph->enc_data,
252 			      XFRM_SKB_CB(skb)->seq.output.low);
253 
254 	ESP_SKB_CB(skb)->tmp = tmp;
255 	err = crypto_aead_givencrypt(req);
256 	if (err == -EINPROGRESS)
257 		goto error;
258 
259 	if (err == -EBUSY)
260 		err = NET_XMIT_DROP;
261 
262 	kfree(tmp);
263 
264 error:
265 	return err;
266 }
267 
268 static int esp_input_done2(struct sk_buff *skb, int err)
269 {
270 	struct xfrm_state *x = xfrm_input_state(skb);
271 	struct crypto_aead *aead = x->data;
272 	int alen = crypto_aead_authsize(aead);
273 	int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
274 	int elen = skb->len - hlen;
275 	int hdr_len = skb_network_header_len(skb);
276 	int padlen;
277 	u8 nexthdr[2];
278 
279 	kfree(ESP_SKB_CB(skb)->tmp);
280 
281 	if (unlikely(err))
282 		goto out;
283 
284 	if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
285 		BUG();
286 
287 	err = -EINVAL;
288 	padlen = nexthdr[0];
289 	if (padlen + 2 + alen >= elen) {
290 		LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage "
291 			       "padlen=%d, elen=%d\n", padlen + 2, elen - alen);
292 		goto out;
293 	}
294 
295 	/* ... check padding bits here. Silly. :-) */
296 
297 	pskb_trim(skb, skb->len - alen - padlen - 2);
298 	__skb_pull(skb, hlen);
299 	if (x->props.mode == XFRM_MODE_TUNNEL)
300 		skb_reset_transport_header(skb);
301 	else
302 		skb_set_transport_header(skb, -hdr_len);
303 
304 	err = nexthdr[1];
305 
306 	/* RFC4303: Drop dummy packets without any error */
307 	if (err == IPPROTO_NONE)
308 		err = -EINVAL;
309 
310 out:
311 	return err;
312 }
313 
314 static void esp_input_done(struct crypto_async_request *base, int err)
315 {
316 	struct sk_buff *skb = base->data;
317 
318 	xfrm_input_resume(skb, esp_input_done2(skb, err));
319 }
320 
321 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
322 {
323 	struct ip_esp_hdr *esph;
324 	struct crypto_aead *aead = x->data;
325 	struct aead_request *req;
326 	struct sk_buff *trailer;
327 	int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
328 	int nfrags;
329 	int assoclen;
330 	int sglists;
331 	int seqhilen;
332 	int ret = 0;
333 	void *tmp;
334 	__be32 *seqhi;
335 	u8 *iv;
336 	struct scatterlist *sg;
337 	struct scatterlist *asg;
338 
339 	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) {
340 		ret = -EINVAL;
341 		goto out;
342 	}
343 
344 	if (elen <= 0) {
345 		ret = -EINVAL;
346 		goto out;
347 	}
348 
349 	if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) {
350 		ret = -EINVAL;
351 		goto out;
352 	}
353 
354 	ret = -ENOMEM;
355 
356 	assoclen = sizeof(*esph);
357 	sglists = 1;
358 	seqhilen = 0;
359 
360 	if (x->props.flags & XFRM_STATE_ESN) {
361 		sglists += 2;
362 		seqhilen += sizeof(__be32);
363 		assoclen += seqhilen;
364 	}
365 
366 	tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
367 	if (!tmp)
368 		goto out;
369 
370 	ESP_SKB_CB(skb)->tmp = tmp;
371 	seqhi = esp_tmp_seqhi(tmp);
372 	iv = esp_tmp_iv(aead, tmp, seqhilen);
373 	req = esp_tmp_req(aead, iv);
374 	asg = esp_req_sg(aead, req);
375 	sg = asg + sglists;
376 
377 	skb->ip_summed = CHECKSUM_NONE;
378 
379 	esph = (struct ip_esp_hdr *)skb->data;
380 
381 	/* Get ivec. This can be wrong, check against another impls. */
382 	iv = esph->enc_data;
383 
384 	sg_init_table(sg, nfrags);
385 	skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
386 
387 	if ((x->props.flags & XFRM_STATE_ESN)) {
388 		sg_init_table(asg, 3);
389 		sg_set_buf(asg, &esph->spi, sizeof(__be32));
390 		*seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
391 		sg_set_buf(asg + 1, seqhi, seqhilen);
392 		sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
393 	} else
394 		sg_init_one(asg, esph, sizeof(*esph));
395 
396 	aead_request_set_callback(req, 0, esp_input_done, skb);
397 	aead_request_set_crypt(req, sg, sg, elen, iv);
398 	aead_request_set_assoc(req, asg, assoclen);
399 
400 	ret = crypto_aead_decrypt(req);
401 	if (ret == -EINPROGRESS)
402 		goto out;
403 
404 	ret = esp_input_done2(skb, ret);
405 
406 out:
407 	return ret;
408 }
409 
410 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
411 {
412 	struct crypto_aead *aead = x->data;
413 	u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
414 	unsigned int net_adj;
415 
416 	if (x->props.mode != XFRM_MODE_TUNNEL)
417 		net_adj = sizeof(struct ipv6hdr);
418 	else
419 		net_adj = 0;
420 
421 	return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
422 		 net_adj) & ~(blksize - 1)) + net_adj - 2;
423 }
424 
425 static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
426 		     u8 type, u8 code, int offset, __be32 info)
427 {
428 	struct net *net = dev_net(skb->dev);
429 	const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
430 	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
431 	struct xfrm_state *x;
432 
433 	if (type != ICMPV6_PKT_TOOBIG &&
434 	    type != NDISC_REDIRECT)
435 		return;
436 
437 	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
438 			      esph->spi, IPPROTO_ESP, AF_INET6);
439 	if (!x)
440 		return;
441 
442 	if (type == NDISC_REDIRECT)
443 		ip6_redirect(skb, net, skb->dev->ifindex, 0);
444 	else
445 		ip6_update_pmtu(skb, net, info, 0, 0);
446 	xfrm_state_put(x);
447 }
448 
449 static void esp6_destroy(struct xfrm_state *x)
450 {
451 	struct crypto_aead *aead = x->data;
452 
453 	if (!aead)
454 		return;
455 
456 	crypto_free_aead(aead);
457 }
458 
459 static int esp_init_aead(struct xfrm_state *x)
460 {
461 	struct crypto_aead *aead;
462 	int err;
463 
464 	aead = crypto_alloc_aead(x->aead->alg_name, 0, 0);
465 	err = PTR_ERR(aead);
466 	if (IS_ERR(aead))
467 		goto error;
468 
469 	x->data = aead;
470 
471 	err = crypto_aead_setkey(aead, x->aead->alg_key,
472 				 (x->aead->alg_key_len + 7) / 8);
473 	if (err)
474 		goto error;
475 
476 	err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
477 	if (err)
478 		goto error;
479 
480 error:
481 	return err;
482 }
483 
484 static int esp_init_authenc(struct xfrm_state *x)
485 {
486 	struct crypto_aead *aead;
487 	struct crypto_authenc_key_param *param;
488 	struct rtattr *rta;
489 	char *key;
490 	char *p;
491 	char authenc_name[CRYPTO_MAX_ALG_NAME];
492 	unsigned int keylen;
493 	int err;
494 
495 	err = -EINVAL;
496 	if (x->ealg == NULL)
497 		goto error;
498 
499 	err = -ENAMETOOLONG;
500 
501 	if ((x->props.flags & XFRM_STATE_ESN)) {
502 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
503 			     "authencesn(%s,%s)",
504 			     x->aalg ? x->aalg->alg_name : "digest_null",
505 			     x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
506 			goto error;
507 	} else {
508 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
509 			     "authenc(%s,%s)",
510 			     x->aalg ? x->aalg->alg_name : "digest_null",
511 			     x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
512 			goto error;
513 	}
514 
515 	aead = crypto_alloc_aead(authenc_name, 0, 0);
516 	err = PTR_ERR(aead);
517 	if (IS_ERR(aead))
518 		goto error;
519 
520 	x->data = aead;
521 
522 	keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
523 		 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
524 	err = -ENOMEM;
525 	key = kmalloc(keylen, GFP_KERNEL);
526 	if (!key)
527 		goto error;
528 
529 	p = key;
530 	rta = (void *)p;
531 	rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
532 	rta->rta_len = RTA_LENGTH(sizeof(*param));
533 	param = RTA_DATA(rta);
534 	p += RTA_SPACE(sizeof(*param));
535 
536 	if (x->aalg) {
537 		struct xfrm_algo_desc *aalg_desc;
538 
539 		memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
540 		p += (x->aalg->alg_key_len + 7) / 8;
541 
542 		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
543 		BUG_ON(!aalg_desc);
544 
545 		err = -EINVAL;
546 		if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
547 		    crypto_aead_authsize(aead)) {
548 			NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
549 				 x->aalg->alg_name,
550 				 crypto_aead_authsize(aead),
551 				 aalg_desc->uinfo.auth.icv_fullbits/8);
552 			goto free_key;
553 		}
554 
555 		err = crypto_aead_setauthsize(
556 			aead, x->aalg->alg_trunc_len / 8);
557 		if (err)
558 			goto free_key;
559 	}
560 
561 	param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
562 	memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
563 
564 	err = crypto_aead_setkey(aead, key, keylen);
565 
566 free_key:
567 	kfree(key);
568 
569 error:
570 	return err;
571 }
572 
573 static int esp6_init_state(struct xfrm_state *x)
574 {
575 	struct crypto_aead *aead;
576 	u32 align;
577 	int err;
578 
579 	if (x->encap)
580 		return -EINVAL;
581 
582 	x->data = NULL;
583 
584 	if (x->aead)
585 		err = esp_init_aead(x);
586 	else
587 		err = esp_init_authenc(x);
588 
589 	if (err)
590 		goto error;
591 
592 	aead = x->data;
593 
594 	x->props.header_len = sizeof(struct ip_esp_hdr) +
595 			      crypto_aead_ivsize(aead);
596 	switch (x->props.mode) {
597 	case XFRM_MODE_BEET:
598 		if (x->sel.family != AF_INET6)
599 			x->props.header_len += IPV4_BEET_PHMAXLEN +
600 				               (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
601 		break;
602 	case XFRM_MODE_TRANSPORT:
603 		break;
604 	case XFRM_MODE_TUNNEL:
605 		x->props.header_len += sizeof(struct ipv6hdr);
606 		break;
607 	default:
608 		goto error;
609 	}
610 
611 	align = ALIGN(crypto_aead_blocksize(aead), 4);
612 	x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
613 
614 error:
615 	return err;
616 }
617 
618 static const struct xfrm_type esp6_type =
619 {
620 	.description	= "ESP6",
621 	.owner	     	= THIS_MODULE,
622 	.proto	     	= IPPROTO_ESP,
623 	.flags		= XFRM_TYPE_REPLAY_PROT,
624 	.init_state	= esp6_init_state,
625 	.destructor	= esp6_destroy,
626 	.get_mtu	= esp6_get_mtu,
627 	.input		= esp6_input,
628 	.output		= esp6_output,
629 	.hdr_offset	= xfrm6_find_1stfragopt,
630 };
631 
632 static const struct inet6_protocol esp6_protocol = {
633 	.handler 	=	xfrm6_rcv,
634 	.err_handler	=	esp6_err,
635 	.flags		=	INET6_PROTO_NOPOLICY,
636 };
637 
638 static int __init esp6_init(void)
639 {
640 	if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
641 		pr_info("%s: can't add xfrm type\n", __func__);
642 		return -EAGAIN;
643 	}
644 	if (inet6_add_protocol(&esp6_protocol, IPPROTO_ESP) < 0) {
645 		pr_info("%s: can't add protocol\n", __func__);
646 		xfrm_unregister_type(&esp6_type, AF_INET6);
647 		return -EAGAIN;
648 	}
649 
650 	return 0;
651 }
652 
653 static void __exit esp6_fini(void)
654 {
655 	if (inet6_del_protocol(&esp6_protocol, IPPROTO_ESP) < 0)
656 		pr_info("%s: can't remove protocol\n", __func__);
657 	if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
658 		pr_info("%s: can't remove xfrm type\n", __func__);
659 }
660 
661 module_init(esp6_init);
662 module_exit(esp6_fini);
663 
664 MODULE_LICENSE("GPL");
665 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
666