xref: /openbmc/linux/net/ipv6/esp6.c (revision aac5987a)
1 /*
2  * Copyright (C)2002 USAGI/WIDE Project
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  *
17  * Authors
18  *
19  *	Mitsuru KANDA @USAGI       : IPv6 Support
20  *	Kazunori MIYAZAWA @USAGI   :
21  *	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
22  *
23  *	This file is derived from net/ipv4/esp.c
24  */
25 
26 #define pr_fmt(fmt) "IPv6: " fmt
27 
28 #include <crypto/aead.h>
29 #include <crypto/authenc.h>
30 #include <linux/err.h>
31 #include <linux/module.h>
32 #include <net/ip.h>
33 #include <net/xfrm.h>
34 #include <net/esp.h>
35 #include <linux/scatterlist.h>
36 #include <linux/kernel.h>
37 #include <linux/pfkeyv2.h>
38 #include <linux/random.h>
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <net/ip6_route.h>
42 #include <net/icmp.h>
43 #include <net/ipv6.h>
44 #include <net/protocol.h>
45 #include <linux/icmpv6.h>
46 
47 #include <linux/highmem.h>
48 
49 struct esp_skb_cb {
50 	struct xfrm_skb_cb xfrm;
51 	void *tmp;
52 };
53 
54 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
55 
56 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
57 
58 /*
59  * Allocate an AEAD request structure with extra space for SG and IV.
60  *
61  * For alignment considerations the upper 32 bits of the sequence number are
62  * placed at the front, if present. Followed by the IV, the request and finally
63  * the SG list.
64  *
65  * TODO: Use spare space in skb for this where possible.
66  */
67 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
68 {
69 	unsigned int len;
70 
71 	len = seqihlen;
72 
73 	len += crypto_aead_ivsize(aead);
74 
75 	if (len) {
76 		len += crypto_aead_alignmask(aead) &
77 		       ~(crypto_tfm_ctx_alignment() - 1);
78 		len = ALIGN(len, crypto_tfm_ctx_alignment());
79 	}
80 
81 	len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
82 	len = ALIGN(len, __alignof__(struct scatterlist));
83 
84 	len += sizeof(struct scatterlist) * nfrags;
85 
86 	return kmalloc(len, GFP_ATOMIC);
87 }
88 
89 static inline __be32 *esp_tmp_seqhi(void *tmp)
90 {
91 	return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
92 }
93 
94 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
95 {
96 	return crypto_aead_ivsize(aead) ?
97 	       PTR_ALIGN((u8 *)tmp + seqhilen,
98 			 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
99 }
100 
101 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
102 {
103 	struct aead_request *req;
104 
105 	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
106 				crypto_tfm_ctx_alignment());
107 	aead_request_set_tfm(req, aead);
108 	return req;
109 }
110 
111 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
112 					     struct aead_request *req)
113 {
114 	return (void *)ALIGN((unsigned long)(req + 1) +
115 			     crypto_aead_reqsize(aead),
116 			     __alignof__(struct scatterlist));
117 }
118 
119 static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
120 {
121 	__be32 *seqhi;
122 	struct crypto_aead *aead = x->data;
123 	int seqhilen = 0;
124 	u8 *iv;
125 	struct aead_request *req;
126 	struct scatterlist *sg;
127 
128 	if (x->props.flags & XFRM_STATE_ESN)
129 		seqhilen += sizeof(__be32);
130 
131 	seqhi = esp_tmp_seqhi(tmp);
132 	iv = esp_tmp_iv(aead, tmp, seqhilen);
133 	req = esp_tmp_req(aead, iv);
134 
135 	/* Unref skb_frag_pages in the src scatterlist if necessary.
136 	 * Skip the first sg which comes from skb->data.
137 	 */
138 	if (req->src != req->dst)
139 		for (sg = sg_next(req->src); sg; sg = sg_next(sg))
140 			put_page(sg_page(sg));
141 }
142 
143 static void esp_output_done(struct crypto_async_request *base, int err)
144 {
145 	struct sk_buff *skb = base->data;
146 	void *tmp;
147 	struct dst_entry *dst = skb_dst(skb);
148 	struct xfrm_state *x = dst->xfrm;
149 
150 	tmp = ESP_SKB_CB(skb)->tmp;
151 	esp_ssg_unref(x, tmp);
152 	kfree(tmp);
153 	xfrm_output_resume(skb, err);
154 }
155 
156 /* Move ESP header back into place. */
157 static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
158 {
159 	struct ip_esp_hdr *esph = (void *)(skb->data + offset);
160 	void *tmp = ESP_SKB_CB(skb)->tmp;
161 	__be32 *seqhi = esp_tmp_seqhi(tmp);
162 
163 	esph->seq_no = esph->spi;
164 	esph->spi = *seqhi;
165 }
166 
167 static void esp_output_restore_header(struct sk_buff *skb)
168 {
169 	esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
170 }
171 
172 static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
173 					     struct ip_esp_hdr *esph,
174 					     __be32 *seqhi)
175 {
176 	struct xfrm_state *x = skb_dst(skb)->xfrm;
177 
178 	/* For ESN we move the header forward by 4 bytes to
179 	 * accomodate the high bits.  We will move it back after
180 	 * encryption.
181 	 */
182 	if ((x->props.flags & XFRM_STATE_ESN)) {
183 		esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
184 		*seqhi = esph->spi;
185 		esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
186 	}
187 
188 	esph->spi = x->id.spi;
189 
190 	return esph;
191 }
192 
193 static void esp_output_done_esn(struct crypto_async_request *base, int err)
194 {
195 	struct sk_buff *skb = base->data;
196 
197 	esp_output_restore_header(skb);
198 	esp_output_done(base, err);
199 }
200 
201 static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
202 {
203 	/* Fill padding... */
204 	if (tfclen) {
205 		memset(tail, 0, tfclen);
206 		tail += tfclen;
207 	}
208 	do {
209 		int i;
210 		for (i = 0; i < plen - 2; i++)
211 			tail[i] = i + 1;
212 	} while (0);
213 	tail[plen - 2] = plen - 2;
214 	tail[plen - 1] = proto;
215 }
216 
217 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
218 {
219 	int err;
220 	struct ip_esp_hdr *esph;
221 	struct crypto_aead *aead;
222 	struct aead_request *req;
223 	struct scatterlist *sg, *dsg;
224 	struct sk_buff *trailer;
225 	struct page *page;
226 	void *tmp;
227 	int blksize;
228 	int clen;
229 	int alen;
230 	int plen;
231 	int ivlen;
232 	int tfclen;
233 	int nfrags;
234 	int assoclen;
235 	int seqhilen;
236 	int tailen;
237 	u8 *iv;
238 	u8 *tail;
239 	u8 *vaddr;
240 	__be32 *seqhi;
241 	__be64 seqno;
242 	__u8 proto = *skb_mac_header(skb);
243 
244 	/* skb is pure payload to encrypt */
245 	aead = x->data;
246 	alen = crypto_aead_authsize(aead);
247 	ivlen = crypto_aead_ivsize(aead);
248 
249 	tfclen = 0;
250 	if (x->tfcpad) {
251 		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
252 		u32 padto;
253 
254 		padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
255 		if (skb->len < padto)
256 			tfclen = padto - skb->len;
257 	}
258 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
259 	clen = ALIGN(skb->len + 2 + tfclen, blksize);
260 	plen = clen - skb->len - tfclen;
261 	tailen = tfclen + plen + alen;
262 
263 	assoclen = sizeof(*esph);
264 	seqhilen = 0;
265 
266 	if (x->props.flags & XFRM_STATE_ESN) {
267 		seqhilen += sizeof(__be32);
268 		assoclen += seqhilen;
269 	}
270 
271 	*skb_mac_header(skb) = IPPROTO_ESP;
272 	esph = ip_esp_hdr(skb);
273 
274 	if (!skb_cloned(skb)) {
275 		if (tailen <= skb_availroom(skb)) {
276 			nfrags = 1;
277 			trailer = skb;
278 			tail = skb_tail_pointer(trailer);
279 
280 			goto skip_cow;
281 		} else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
282 			   && !skb_has_frag_list(skb)) {
283 			int allocsize;
284 			struct sock *sk = skb->sk;
285 			struct page_frag *pfrag = &x->xfrag;
286 
287 			allocsize = ALIGN(tailen, L1_CACHE_BYTES);
288 
289 			spin_lock_bh(&x->lock);
290 
291 			if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
292 				spin_unlock_bh(&x->lock);
293 				goto cow;
294 			}
295 
296 			page = pfrag->page;
297 			get_page(page);
298 
299 			vaddr = kmap_atomic(page);
300 
301 			tail = vaddr + pfrag->offset;
302 
303 			esp_output_fill_trailer(tail, tfclen, plen, proto);
304 
305 			kunmap_atomic(vaddr);
306 
307 			nfrags = skb_shinfo(skb)->nr_frags;
308 
309 			__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
310 					     tailen);
311 			skb_shinfo(skb)->nr_frags = ++nfrags;
312 
313 			pfrag->offset = pfrag->offset + allocsize;
314 			nfrags++;
315 
316 			skb->len += tailen;
317 			skb->data_len += tailen;
318 			skb->truesize += tailen;
319 			if (sk)
320 				atomic_add(tailen, &sk->sk_wmem_alloc);
321 
322 			skb_push(skb, -skb_network_offset(skb));
323 
324 			esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
325 			esph->spi = x->id.spi;
326 
327 			tmp = esp_alloc_tmp(aead, nfrags + 2, seqhilen);
328 			if (!tmp) {
329 				spin_unlock_bh(&x->lock);
330 				err = -ENOMEM;
331 				goto error;
332 			}
333 			seqhi = esp_tmp_seqhi(tmp);
334 			iv = esp_tmp_iv(aead, tmp, seqhilen);
335 			req = esp_tmp_req(aead, iv);
336 			sg = esp_req_sg(aead, req);
337 			dsg = &sg[nfrags];
338 
339 			esph = esp_output_set_esn(skb, esph, seqhi);
340 
341 			sg_init_table(sg, nfrags);
342 			skb_to_sgvec(skb, sg,
343 				     (unsigned char *)esph - skb->data,
344 				     assoclen + ivlen + clen + alen);
345 
346 			allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
347 
348 			if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
349 				spin_unlock_bh(&x->lock);
350 				err = -ENOMEM;
351 				goto error;
352 			}
353 
354 			skb_shinfo(skb)->nr_frags = 1;
355 
356 			page = pfrag->page;
357 			get_page(page);
358 			/* replace page frags in skb with new page */
359 			__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
360 			pfrag->offset = pfrag->offset + allocsize;
361 
362 			sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
363 			skb_to_sgvec(skb, dsg,
364 				     (unsigned char *)esph - skb->data,
365 				     assoclen + ivlen + clen + alen);
366 
367 			spin_unlock_bh(&x->lock);
368 
369 			goto skip_cow2;
370 		}
371 	}
372 
373 cow:
374 	err = skb_cow_data(skb, tailen, &trailer);
375 	if (err < 0)
376 		goto error;
377 	nfrags = err;
378 
379 	tail = skb_tail_pointer(trailer);
380 	esph = ip_esp_hdr(skb);
381 
382 skip_cow:
383 	esp_output_fill_trailer(tail, tfclen, plen, proto);
384 
385 	pskb_put(skb, trailer, clen - skb->len + alen);
386 	skb_push(skb, -skb_network_offset(skb));
387 
388 	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
389 	esph->spi = x->id.spi;
390 
391 	tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
392 	if (!tmp) {
393 		err = -ENOMEM;
394 		goto error;
395 	}
396 
397 	seqhi = esp_tmp_seqhi(tmp);
398 	iv = esp_tmp_iv(aead, tmp, seqhilen);
399 	req = esp_tmp_req(aead, iv);
400 	sg = esp_req_sg(aead, req);
401 	dsg = sg;
402 
403 	esph = esp_output_set_esn(skb, esph, seqhi);
404 
405 	sg_init_table(sg, nfrags);
406 	skb_to_sgvec(skb, sg,
407 		     (unsigned char *)esph - skb->data,
408 		     assoclen + ivlen + clen + alen);
409 
410 skip_cow2:
411 	if ((x->props.flags & XFRM_STATE_ESN))
412 		aead_request_set_callback(req, 0, esp_output_done_esn, skb);
413 	else
414 		aead_request_set_callback(req, 0, esp_output_done, skb);
415 
416 	aead_request_set_crypt(req, sg, dsg, ivlen + clen, iv);
417 	aead_request_set_ad(req, assoclen);
418 
419 	seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
420 			    ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
421 
422 	memset(iv, 0, ivlen);
423 	memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&seqno + 8 - min(ivlen, 8),
424 	       min(ivlen, 8));
425 
426 	ESP_SKB_CB(skb)->tmp = tmp;
427 	err = crypto_aead_encrypt(req);
428 
429 	switch (err) {
430 	case -EINPROGRESS:
431 		goto error;
432 
433 	case -EBUSY:
434 		err = NET_XMIT_DROP;
435 		break;
436 
437 	case 0:
438 		if ((x->props.flags & XFRM_STATE_ESN))
439 			esp_output_restore_header(skb);
440 	}
441 
442 	if (sg != dsg)
443 		esp_ssg_unref(x, tmp);
444 	kfree(tmp);
445 
446 error:
447 	return err;
448 }
449 
450 static int esp_input_done2(struct sk_buff *skb, int err)
451 {
452 	struct xfrm_state *x = xfrm_input_state(skb);
453 	struct crypto_aead *aead = x->data;
454 	int alen = crypto_aead_authsize(aead);
455 	int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
456 	int elen = skb->len - hlen;
457 	int hdr_len = skb_network_header_len(skb);
458 	int padlen;
459 	u8 nexthdr[2];
460 
461 	kfree(ESP_SKB_CB(skb)->tmp);
462 
463 	if (unlikely(err))
464 		goto out;
465 
466 	if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
467 		BUG();
468 
469 	err = -EINVAL;
470 	padlen = nexthdr[0];
471 	if (padlen + 2 + alen >= elen) {
472 		net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
473 				    padlen + 2, elen - alen);
474 		goto out;
475 	}
476 
477 	/* ... check padding bits here. Silly. :-) */
478 
479 	pskb_trim(skb, skb->len - alen - padlen - 2);
480 	__skb_pull(skb, hlen);
481 	if (x->props.mode == XFRM_MODE_TUNNEL)
482 		skb_reset_transport_header(skb);
483 	else
484 		skb_set_transport_header(skb, -hdr_len);
485 
486 	err = nexthdr[1];
487 
488 	/* RFC4303: Drop dummy packets without any error */
489 	if (err == IPPROTO_NONE)
490 		err = -EINVAL;
491 
492 out:
493 	return err;
494 }
495 
496 static void esp_input_done(struct crypto_async_request *base, int err)
497 {
498 	struct sk_buff *skb = base->data;
499 
500 	xfrm_input_resume(skb, esp_input_done2(skb, err));
501 }
502 
503 static void esp_input_restore_header(struct sk_buff *skb)
504 {
505 	esp_restore_header(skb, 0);
506 	__skb_pull(skb, 4);
507 }
508 
509 static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
510 {
511 	struct xfrm_state *x = xfrm_input_state(skb);
512 	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)skb->data;
513 
514 	/* For ESN we move the header forward by 4 bytes to
515 	 * accomodate the high bits.  We will move it back after
516 	 * decryption.
517 	 */
518 	if ((x->props.flags & XFRM_STATE_ESN)) {
519 		esph = (void *)skb_push(skb, 4);
520 		*seqhi = esph->spi;
521 		esph->spi = esph->seq_no;
522 		esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
523 	}
524 }
525 
526 static void esp_input_done_esn(struct crypto_async_request *base, int err)
527 {
528 	struct sk_buff *skb = base->data;
529 
530 	esp_input_restore_header(skb);
531 	esp_input_done(base, err);
532 }
533 
534 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
535 {
536 	struct ip_esp_hdr *esph;
537 	struct crypto_aead *aead = x->data;
538 	struct aead_request *req;
539 	struct sk_buff *trailer;
540 	int ivlen = crypto_aead_ivsize(aead);
541 	int elen = skb->len - sizeof(*esph) - ivlen;
542 	int nfrags;
543 	int assoclen;
544 	int seqhilen;
545 	int ret = 0;
546 	void *tmp;
547 	__be32 *seqhi;
548 	u8 *iv;
549 	struct scatterlist *sg;
550 
551 	if (!pskb_may_pull(skb, sizeof(*esph) + ivlen)) {
552 		ret = -EINVAL;
553 		goto out;
554 	}
555 
556 	if (elen <= 0) {
557 		ret = -EINVAL;
558 		goto out;
559 	}
560 
561 	assoclen = sizeof(*esph);
562 	seqhilen = 0;
563 
564 	if (x->props.flags & XFRM_STATE_ESN) {
565 		seqhilen += sizeof(__be32);
566 		assoclen += seqhilen;
567 	}
568 
569 	if (!skb_cloned(skb)) {
570 		if (!skb_is_nonlinear(skb)) {
571 			nfrags = 1;
572 
573 			goto skip_cow;
574 		} else if (!skb_has_frag_list(skb)) {
575 			nfrags = skb_shinfo(skb)->nr_frags;
576 			nfrags++;
577 
578 			goto skip_cow;
579 		}
580 	}
581 
582 	nfrags = skb_cow_data(skb, 0, &trailer);
583 	if (nfrags < 0) {
584 		ret = -EINVAL;
585 		goto out;
586 	}
587 
588 skip_cow:
589 	ret = -ENOMEM;
590 	tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
591 	if (!tmp)
592 		goto out;
593 
594 	ESP_SKB_CB(skb)->tmp = tmp;
595 	seqhi = esp_tmp_seqhi(tmp);
596 	iv = esp_tmp_iv(aead, tmp, seqhilen);
597 	req = esp_tmp_req(aead, iv);
598 	sg = esp_req_sg(aead, req);
599 
600 	esp_input_set_header(skb, seqhi);
601 
602 	sg_init_table(sg, nfrags);
603 	skb_to_sgvec(skb, sg, 0, skb->len);
604 
605 	skb->ip_summed = CHECKSUM_NONE;
606 
607 	if ((x->props.flags & XFRM_STATE_ESN))
608 		aead_request_set_callback(req, 0, esp_input_done_esn, skb);
609 	else
610 		aead_request_set_callback(req, 0, esp_input_done, skb);
611 
612 	aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
613 	aead_request_set_ad(req, assoclen);
614 
615 	ret = crypto_aead_decrypt(req);
616 	if (ret == -EINPROGRESS)
617 		goto out;
618 
619 	if ((x->props.flags & XFRM_STATE_ESN))
620 		esp_input_restore_header(skb);
621 
622 	ret = esp_input_done2(skb, ret);
623 
624 out:
625 	return ret;
626 }
627 
628 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
629 {
630 	struct crypto_aead *aead = x->data;
631 	u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
632 	unsigned int net_adj;
633 
634 	if (x->props.mode != XFRM_MODE_TUNNEL)
635 		net_adj = sizeof(struct ipv6hdr);
636 	else
637 		net_adj = 0;
638 
639 	return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
640 		 net_adj) & ~(blksize - 1)) + net_adj - 2;
641 }
642 
643 static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
644 		    u8 type, u8 code, int offset, __be32 info)
645 {
646 	struct net *net = dev_net(skb->dev);
647 	const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
648 	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
649 	struct xfrm_state *x;
650 
651 	if (type != ICMPV6_PKT_TOOBIG &&
652 	    type != NDISC_REDIRECT)
653 		return 0;
654 
655 	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
656 			      esph->spi, IPPROTO_ESP, AF_INET6);
657 	if (!x)
658 		return 0;
659 
660 	if (type == NDISC_REDIRECT)
661 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
662 			     sock_net_uid(net, NULL));
663 	else
664 		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
665 	xfrm_state_put(x);
666 
667 	return 0;
668 }
669 
670 static void esp6_destroy(struct xfrm_state *x)
671 {
672 	struct crypto_aead *aead = x->data;
673 
674 	if (!aead)
675 		return;
676 
677 	crypto_free_aead(aead);
678 }
679 
680 static int esp_init_aead(struct xfrm_state *x)
681 {
682 	char aead_name[CRYPTO_MAX_ALG_NAME];
683 	struct crypto_aead *aead;
684 	int err;
685 
686 	err = -ENAMETOOLONG;
687 	if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
688 		     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
689 		goto error;
690 
691 	aead = crypto_alloc_aead(aead_name, 0, 0);
692 	err = PTR_ERR(aead);
693 	if (IS_ERR(aead))
694 		goto error;
695 
696 	x->data = aead;
697 
698 	err = crypto_aead_setkey(aead, x->aead->alg_key,
699 				 (x->aead->alg_key_len + 7) / 8);
700 	if (err)
701 		goto error;
702 
703 	err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
704 	if (err)
705 		goto error;
706 
707 error:
708 	return err;
709 }
710 
711 static int esp_init_authenc(struct xfrm_state *x)
712 {
713 	struct crypto_aead *aead;
714 	struct crypto_authenc_key_param *param;
715 	struct rtattr *rta;
716 	char *key;
717 	char *p;
718 	char authenc_name[CRYPTO_MAX_ALG_NAME];
719 	unsigned int keylen;
720 	int err;
721 
722 	err = -EINVAL;
723 	if (!x->ealg)
724 		goto error;
725 
726 	err = -ENAMETOOLONG;
727 
728 	if ((x->props.flags & XFRM_STATE_ESN)) {
729 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
730 			     "%s%sauthencesn(%s,%s)%s",
731 			     x->geniv ?: "", x->geniv ? "(" : "",
732 			     x->aalg ? x->aalg->alg_name : "digest_null",
733 			     x->ealg->alg_name,
734 			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
735 			goto error;
736 	} else {
737 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
738 			     "%s%sauthenc(%s,%s)%s",
739 			     x->geniv ?: "", x->geniv ? "(" : "",
740 			     x->aalg ? x->aalg->alg_name : "digest_null",
741 			     x->ealg->alg_name,
742 			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
743 			goto error;
744 	}
745 
746 	aead = crypto_alloc_aead(authenc_name, 0, 0);
747 	err = PTR_ERR(aead);
748 	if (IS_ERR(aead))
749 		goto error;
750 
751 	x->data = aead;
752 
753 	keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
754 		 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
755 	err = -ENOMEM;
756 	key = kmalloc(keylen, GFP_KERNEL);
757 	if (!key)
758 		goto error;
759 
760 	p = key;
761 	rta = (void *)p;
762 	rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
763 	rta->rta_len = RTA_LENGTH(sizeof(*param));
764 	param = RTA_DATA(rta);
765 	p += RTA_SPACE(sizeof(*param));
766 
767 	if (x->aalg) {
768 		struct xfrm_algo_desc *aalg_desc;
769 
770 		memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
771 		p += (x->aalg->alg_key_len + 7) / 8;
772 
773 		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
774 		BUG_ON(!aalg_desc);
775 
776 		err = -EINVAL;
777 		if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
778 		    crypto_aead_authsize(aead)) {
779 			pr_info("ESP: %s digestsize %u != %hu\n",
780 				x->aalg->alg_name,
781 				crypto_aead_authsize(aead),
782 				aalg_desc->uinfo.auth.icv_fullbits / 8);
783 			goto free_key;
784 		}
785 
786 		err = crypto_aead_setauthsize(
787 			aead, x->aalg->alg_trunc_len / 8);
788 		if (err)
789 			goto free_key;
790 	}
791 
792 	param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
793 	memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
794 
795 	err = crypto_aead_setkey(aead, key, keylen);
796 
797 free_key:
798 	kfree(key);
799 
800 error:
801 	return err;
802 }
803 
804 static int esp6_init_state(struct xfrm_state *x)
805 {
806 	struct crypto_aead *aead;
807 	u32 align;
808 	int err;
809 
810 	if (x->encap)
811 		return -EINVAL;
812 
813 	x->data = NULL;
814 
815 	if (x->aead)
816 		err = esp_init_aead(x);
817 	else
818 		err = esp_init_authenc(x);
819 
820 	if (err)
821 		goto error;
822 
823 	aead = x->data;
824 
825 	x->props.header_len = sizeof(struct ip_esp_hdr) +
826 			      crypto_aead_ivsize(aead);
827 	switch (x->props.mode) {
828 	case XFRM_MODE_BEET:
829 		if (x->sel.family != AF_INET6)
830 			x->props.header_len += IPV4_BEET_PHMAXLEN +
831 					       (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
832 		break;
833 	case XFRM_MODE_TRANSPORT:
834 		break;
835 	case XFRM_MODE_TUNNEL:
836 		x->props.header_len += sizeof(struct ipv6hdr);
837 		break;
838 	default:
839 		goto error;
840 	}
841 
842 	align = ALIGN(crypto_aead_blocksize(aead), 4);
843 	x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
844 
845 error:
846 	return err;
847 }
848 
849 static int esp6_rcv_cb(struct sk_buff *skb, int err)
850 {
851 	return 0;
852 }
853 
854 static const struct xfrm_type esp6_type = {
855 	.description	= "ESP6",
856 	.owner		= THIS_MODULE,
857 	.proto		= IPPROTO_ESP,
858 	.flags		= XFRM_TYPE_REPLAY_PROT,
859 	.init_state	= esp6_init_state,
860 	.destructor	= esp6_destroy,
861 	.get_mtu	= esp6_get_mtu,
862 	.input		= esp6_input,
863 	.output		= esp6_output,
864 	.hdr_offset	= xfrm6_find_1stfragopt,
865 };
866 
867 static struct xfrm6_protocol esp6_protocol = {
868 	.handler	=	xfrm6_rcv,
869 	.cb_handler	=	esp6_rcv_cb,
870 	.err_handler	=	esp6_err,
871 	.priority	=	0,
872 };
873 
874 static int __init esp6_init(void)
875 {
876 	if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
877 		pr_info("%s: can't add xfrm type\n", __func__);
878 		return -EAGAIN;
879 	}
880 	if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
881 		pr_info("%s: can't add protocol\n", __func__);
882 		xfrm_unregister_type(&esp6_type, AF_INET6);
883 		return -EAGAIN;
884 	}
885 
886 	return 0;
887 }
888 
889 static void __exit esp6_fini(void)
890 {
891 	if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
892 		pr_info("%s: can't remove protocol\n", __func__);
893 	if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
894 		pr_info("%s: can't remove xfrm type\n", __func__);
895 }
896 
897 module_init(esp6_init);
898 module_exit(esp6_fini);
899 
900 MODULE_LICENSE("GPL");
901 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
902