xref: /openbmc/linux/net/ipv6/esp6.c (revision 8ab59da2)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C)2002 USAGI/WIDE Project
4  *
5  * Authors
6  *
7  *	Mitsuru KANDA @USAGI       : IPv6 Support
8  *	Kazunori MIYAZAWA @USAGI   :
9  *	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
10  *
11  *	This file is derived from net/ipv4/esp.c
12  */
13 
14 #define pr_fmt(fmt) "IPv6: " fmt
15 
16 #include <crypto/aead.h>
17 #include <crypto/authenc.h>
18 #include <linux/err.h>
19 #include <linux/module.h>
20 #include <net/ip.h>
21 #include <net/xfrm.h>
22 #include <net/esp.h>
23 #include <linux/scatterlist.h>
24 #include <linux/kernel.h>
25 #include <linux/pfkeyv2.h>
26 #include <linux/random.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <net/ip6_checksum.h>
30 #include <net/ip6_route.h>
31 #include <net/icmp.h>
32 #include <net/ipv6.h>
33 #include <net/protocol.h>
34 #include <net/udp.h>
35 #include <linux/icmpv6.h>
36 #include <net/tcp.h>
37 #include <net/espintcp.h>
38 #include <net/inet6_hashtables.h>
39 
40 #include <linux/highmem.h>
41 
42 struct esp_skb_cb {
43 	struct xfrm_skb_cb xfrm;
44 	void *tmp;
45 };
46 
47 struct esp_output_extra {
48 	__be32 seqhi;
49 	u32 esphoff;
50 };
51 
52 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
53 
54 /*
55  * Allocate an AEAD request structure with extra space for SG and IV.
56  *
57  * For alignment considerations the upper 32 bits of the sequence number are
58  * placed at the front, if present. Followed by the IV, the request and finally
59  * the SG list.
60  *
61  * TODO: Use spare space in skb for this where possible.
62  */
63 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
64 {
65 	unsigned int len;
66 
67 	len = seqihlen;
68 
69 	len += crypto_aead_ivsize(aead);
70 
71 	if (len) {
72 		len += crypto_aead_alignmask(aead) &
73 		       ~(crypto_tfm_ctx_alignment() - 1);
74 		len = ALIGN(len, crypto_tfm_ctx_alignment());
75 	}
76 
77 	len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
78 	len = ALIGN(len, __alignof__(struct scatterlist));
79 
80 	len += sizeof(struct scatterlist) * nfrags;
81 
82 	return kmalloc(len, GFP_ATOMIC);
83 }
84 
85 static inline void *esp_tmp_extra(void *tmp)
86 {
87 	return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
88 }
89 
90 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
91 {
92 	return crypto_aead_ivsize(aead) ?
93 	       PTR_ALIGN((u8 *)tmp + seqhilen,
94 			 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
95 }
96 
97 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
98 {
99 	struct aead_request *req;
100 
101 	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
102 				crypto_tfm_ctx_alignment());
103 	aead_request_set_tfm(req, aead);
104 	return req;
105 }
106 
107 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
108 					     struct aead_request *req)
109 {
110 	return (void *)ALIGN((unsigned long)(req + 1) +
111 			     crypto_aead_reqsize(aead),
112 			     __alignof__(struct scatterlist));
113 }
114 
115 static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
116 {
117 	struct crypto_aead *aead = x->data;
118 	int extralen = 0;
119 	u8 *iv;
120 	struct aead_request *req;
121 	struct scatterlist *sg;
122 
123 	if (x->props.flags & XFRM_STATE_ESN)
124 		extralen += sizeof(struct esp_output_extra);
125 
126 	iv = esp_tmp_iv(aead, tmp, extralen);
127 	req = esp_tmp_req(aead, iv);
128 
129 	/* Unref skb_frag_pages in the src scatterlist if necessary.
130 	 * Skip the first sg which comes from skb->data.
131 	 */
132 	if (req->src != req->dst)
133 		for (sg = sg_next(req->src); sg; sg = sg_next(sg))
134 			put_page(sg_page(sg));
135 }
136 
137 #ifdef CONFIG_INET6_ESPINTCP
138 struct esp_tcp_sk {
139 	struct sock *sk;
140 	struct rcu_head rcu;
141 };
142 
143 static void esp_free_tcp_sk(struct rcu_head *head)
144 {
145 	struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu);
146 
147 	sock_put(esk->sk);
148 	kfree(esk);
149 }
150 
151 static struct sock *esp6_find_tcp_sk(struct xfrm_state *x)
152 {
153 	struct xfrm_encap_tmpl *encap = x->encap;
154 	struct net *net = xs_net(x);
155 	struct esp_tcp_sk *esk;
156 	__be16 sport, dport;
157 	struct sock *nsk;
158 	struct sock *sk;
159 
160 	sk = rcu_dereference(x->encap_sk);
161 	if (sk && sk->sk_state == TCP_ESTABLISHED)
162 		return sk;
163 
164 	spin_lock_bh(&x->lock);
165 	sport = encap->encap_sport;
166 	dport = encap->encap_dport;
167 	nsk = rcu_dereference_protected(x->encap_sk,
168 					lockdep_is_held(&x->lock));
169 	if (sk && sk == nsk) {
170 		esk = kmalloc(sizeof(*esk), GFP_ATOMIC);
171 		if (!esk) {
172 			spin_unlock_bh(&x->lock);
173 			return ERR_PTR(-ENOMEM);
174 		}
175 		RCU_INIT_POINTER(x->encap_sk, NULL);
176 		esk->sk = sk;
177 		call_rcu(&esk->rcu, esp_free_tcp_sk);
178 	}
179 	spin_unlock_bh(&x->lock);
180 
181 	sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, &x->id.daddr.in6,
182 					dport, &x->props.saddr.in6, ntohs(sport), 0, 0);
183 	if (!sk)
184 		return ERR_PTR(-ENOENT);
185 
186 	if (!tcp_is_ulp_esp(sk)) {
187 		sock_put(sk);
188 		return ERR_PTR(-EINVAL);
189 	}
190 
191 	spin_lock_bh(&x->lock);
192 	nsk = rcu_dereference_protected(x->encap_sk,
193 					lockdep_is_held(&x->lock));
194 	if (encap->encap_sport != sport ||
195 	    encap->encap_dport != dport) {
196 		sock_put(sk);
197 		sk = nsk ?: ERR_PTR(-EREMCHG);
198 	} else if (sk == nsk) {
199 		sock_put(sk);
200 	} else {
201 		rcu_assign_pointer(x->encap_sk, sk);
202 	}
203 	spin_unlock_bh(&x->lock);
204 
205 	return sk;
206 }
207 
208 static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
209 {
210 	struct sock *sk;
211 	int err;
212 
213 	rcu_read_lock();
214 
215 	sk = esp6_find_tcp_sk(x);
216 	err = PTR_ERR_OR_ZERO(sk);
217 	if (err)
218 		goto out;
219 
220 	bh_lock_sock(sk);
221 	if (sock_owned_by_user(sk))
222 		err = espintcp_queue_out(sk, skb);
223 	else
224 		err = espintcp_push_skb(sk, skb);
225 	bh_unlock_sock(sk);
226 
227 out:
228 	rcu_read_unlock();
229 	return err;
230 }
231 
232 static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk,
233 				   struct sk_buff *skb)
234 {
235 	struct dst_entry *dst = skb_dst(skb);
236 	struct xfrm_state *x = dst->xfrm;
237 
238 	return esp_output_tcp_finish(x, skb);
239 }
240 
241 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
242 {
243 	int err;
244 
245 	local_bh_disable();
246 	err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb);
247 	local_bh_enable();
248 
249 	/* EINPROGRESS just happens to do the right thing.  It
250 	 * actually means that the skb has been consumed and
251 	 * isn't coming back.
252 	 */
253 	return err ?: -EINPROGRESS;
254 }
255 #else
256 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
257 {
258 	kfree_skb(skb);
259 
260 	return -EOPNOTSUPP;
261 }
262 #endif
263 
264 static void esp_output_encap_csum(struct sk_buff *skb)
265 {
266 	/* UDP encap with IPv6 requires a valid checksum */
267 	if (*skb_mac_header(skb) == IPPROTO_UDP) {
268 		struct udphdr *uh = udp_hdr(skb);
269 		struct ipv6hdr *ip6h = ipv6_hdr(skb);
270 		int len = ntohs(uh->len);
271 		unsigned int offset = skb_transport_offset(skb);
272 		__wsum csum = skb_checksum(skb, offset, skb->len - offset, 0);
273 
274 		uh->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
275 					    len, IPPROTO_UDP, csum);
276 		if (uh->check == 0)
277 			uh->check = CSUM_MANGLED_0;
278 	}
279 }
280 
281 static void esp_output_done(struct crypto_async_request *base, int err)
282 {
283 	struct sk_buff *skb = base->data;
284 	struct xfrm_offload *xo = xfrm_offload(skb);
285 	void *tmp;
286 	struct xfrm_state *x;
287 
288 	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
289 		struct sec_path *sp = skb_sec_path(skb);
290 
291 		x = sp->xvec[sp->len - 1];
292 	} else {
293 		x = skb_dst(skb)->xfrm;
294 	}
295 
296 	tmp = ESP_SKB_CB(skb)->tmp;
297 	esp_ssg_unref(x, tmp);
298 	kfree(tmp);
299 
300 	esp_output_encap_csum(skb);
301 
302 	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
303 		if (err) {
304 			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
305 			kfree_skb(skb);
306 			return;
307 		}
308 
309 		skb_push(skb, skb->data - skb_mac_header(skb));
310 		secpath_reset(skb);
311 		xfrm_dev_resume(skb);
312 	} else {
313 		if (!err &&
314 		    x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
315 			esp_output_tail_tcp(x, skb);
316 		else
317 			xfrm_output_resume(skb->sk, skb, err);
318 	}
319 }
320 
321 /* Move ESP header back into place. */
322 static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
323 {
324 	struct ip_esp_hdr *esph = (void *)(skb->data + offset);
325 	void *tmp = ESP_SKB_CB(skb)->tmp;
326 	__be32 *seqhi = esp_tmp_extra(tmp);
327 
328 	esph->seq_no = esph->spi;
329 	esph->spi = *seqhi;
330 }
331 
332 static void esp_output_restore_header(struct sk_buff *skb)
333 {
334 	void *tmp = ESP_SKB_CB(skb)->tmp;
335 	struct esp_output_extra *extra = esp_tmp_extra(tmp);
336 
337 	esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
338 				sizeof(__be32));
339 }
340 
341 static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
342 					     struct xfrm_state *x,
343 					     struct ip_esp_hdr *esph,
344 					     struct esp_output_extra *extra)
345 {
346 	/* For ESN we move the header forward by 4 bytes to
347 	 * accommodate the high bits.  We will move it back after
348 	 * encryption.
349 	 */
350 	if ((x->props.flags & XFRM_STATE_ESN)) {
351 		__u32 seqhi;
352 		struct xfrm_offload *xo = xfrm_offload(skb);
353 
354 		if (xo)
355 			seqhi = xo->seq.hi;
356 		else
357 			seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
358 
359 		extra->esphoff = (unsigned char *)esph -
360 				 skb_transport_header(skb);
361 		esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
362 		extra->seqhi = esph->spi;
363 		esph->seq_no = htonl(seqhi);
364 	}
365 
366 	esph->spi = x->id.spi;
367 
368 	return esph;
369 }
370 
371 static void esp_output_done_esn(struct crypto_async_request *base, int err)
372 {
373 	struct sk_buff *skb = base->data;
374 
375 	esp_output_restore_header(skb);
376 	esp_output_done(base, err);
377 }
378 
379 static struct ip_esp_hdr *esp6_output_udp_encap(struct sk_buff *skb,
380 					       int encap_type,
381 					       struct esp_info *esp,
382 					       __be16 sport,
383 					       __be16 dport)
384 {
385 	struct udphdr *uh;
386 	__be32 *udpdata32;
387 	unsigned int len;
388 
389 	len = skb->len + esp->tailen - skb_transport_offset(skb);
390 	if (len > U16_MAX)
391 		return ERR_PTR(-EMSGSIZE);
392 
393 	uh = (struct udphdr *)esp->esph;
394 	uh->source = sport;
395 	uh->dest = dport;
396 	uh->len = htons(len);
397 	uh->check = 0;
398 
399 	*skb_mac_header(skb) = IPPROTO_UDP;
400 
401 	if (encap_type == UDP_ENCAP_ESPINUDP_NON_IKE) {
402 		udpdata32 = (__be32 *)(uh + 1);
403 		udpdata32[0] = udpdata32[1] = 0;
404 		return (struct ip_esp_hdr *)(udpdata32 + 2);
405 	}
406 
407 	return (struct ip_esp_hdr *)(uh + 1);
408 }
409 
410 #ifdef CONFIG_INET6_ESPINTCP
411 static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
412 						struct sk_buff *skb,
413 						struct esp_info *esp)
414 {
415 	__be16 *lenp = (void *)esp->esph;
416 	struct ip_esp_hdr *esph;
417 	unsigned int len;
418 	struct sock *sk;
419 
420 	len = skb->len + esp->tailen - skb_transport_offset(skb);
421 	if (len > IP_MAX_MTU)
422 		return ERR_PTR(-EMSGSIZE);
423 
424 	rcu_read_lock();
425 	sk = esp6_find_tcp_sk(x);
426 	rcu_read_unlock();
427 
428 	if (IS_ERR(sk))
429 		return ERR_CAST(sk);
430 
431 	*lenp = htons(len);
432 	esph = (struct ip_esp_hdr *)(lenp + 1);
433 
434 	return esph;
435 }
436 #else
437 static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
438 						struct sk_buff *skb,
439 						struct esp_info *esp)
440 {
441 	return ERR_PTR(-EOPNOTSUPP);
442 }
443 #endif
444 
445 static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb,
446 			    struct esp_info *esp)
447 {
448 	struct xfrm_encap_tmpl *encap = x->encap;
449 	struct ip_esp_hdr *esph;
450 	__be16 sport, dport;
451 	int encap_type;
452 
453 	spin_lock_bh(&x->lock);
454 	sport = encap->encap_sport;
455 	dport = encap->encap_dport;
456 	encap_type = encap->encap_type;
457 	spin_unlock_bh(&x->lock);
458 
459 	switch (encap_type) {
460 	default:
461 	case UDP_ENCAP_ESPINUDP:
462 	case UDP_ENCAP_ESPINUDP_NON_IKE:
463 		esph = esp6_output_udp_encap(skb, encap_type, esp, sport, dport);
464 		break;
465 	case TCP_ENCAP_ESPINTCP:
466 		esph = esp6_output_tcp_encap(x, skb, esp);
467 		break;
468 	}
469 
470 	if (IS_ERR(esph))
471 		return PTR_ERR(esph);
472 
473 	esp->esph = esph;
474 
475 	return 0;
476 }
477 
478 int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
479 {
480 	u8 *tail;
481 	int nfrags;
482 	int esph_offset;
483 	struct page *page;
484 	struct sk_buff *trailer;
485 	int tailen = esp->tailen;
486 
487 	if (x->encap) {
488 		int err = esp6_output_encap(x, skb, esp);
489 
490 		if (err < 0)
491 			return err;
492 	}
493 
494 	if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
495 	    ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
496 		goto cow;
497 
498 	if (!skb_cloned(skb)) {
499 		if (tailen <= skb_tailroom(skb)) {
500 			nfrags = 1;
501 			trailer = skb;
502 			tail = skb_tail_pointer(trailer);
503 
504 			goto skip_cow;
505 		} else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
506 			   && !skb_has_frag_list(skb)) {
507 			int allocsize;
508 			struct sock *sk = skb->sk;
509 			struct page_frag *pfrag = &x->xfrag;
510 
511 			esp->inplace = false;
512 
513 			allocsize = ALIGN(tailen, L1_CACHE_BYTES);
514 
515 			spin_lock_bh(&x->lock);
516 
517 			if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
518 				spin_unlock_bh(&x->lock);
519 				goto cow;
520 			}
521 
522 			page = pfrag->page;
523 			get_page(page);
524 
525 			tail = page_address(page) + pfrag->offset;
526 
527 			esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
528 
529 			nfrags = skb_shinfo(skb)->nr_frags;
530 
531 			__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
532 					     tailen);
533 			skb_shinfo(skb)->nr_frags = ++nfrags;
534 
535 			pfrag->offset = pfrag->offset + allocsize;
536 
537 			spin_unlock_bh(&x->lock);
538 
539 			nfrags++;
540 
541 			skb->len += tailen;
542 			skb->data_len += tailen;
543 			skb->truesize += tailen;
544 			if (sk && sk_fullsock(sk))
545 				refcount_add(tailen, &sk->sk_wmem_alloc);
546 
547 			goto out;
548 		}
549 	}
550 
551 cow:
552 	esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
553 
554 	nfrags = skb_cow_data(skb, tailen, &trailer);
555 	if (nfrags < 0)
556 		goto out;
557 	tail = skb_tail_pointer(trailer);
558 	esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
559 
560 skip_cow:
561 	esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
562 	pskb_put(skb, trailer, tailen);
563 
564 out:
565 	return nfrags;
566 }
567 EXPORT_SYMBOL_GPL(esp6_output_head);
568 
569 int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
570 {
571 	u8 *iv;
572 	int alen;
573 	void *tmp;
574 	int ivlen;
575 	int assoclen;
576 	int extralen;
577 	struct page *page;
578 	struct ip_esp_hdr *esph;
579 	struct aead_request *req;
580 	struct crypto_aead *aead;
581 	struct scatterlist *sg, *dsg;
582 	struct esp_output_extra *extra;
583 	int err = -ENOMEM;
584 
585 	assoclen = sizeof(struct ip_esp_hdr);
586 	extralen = 0;
587 
588 	if (x->props.flags & XFRM_STATE_ESN) {
589 		extralen += sizeof(*extra);
590 		assoclen += sizeof(__be32);
591 	}
592 
593 	aead = x->data;
594 	alen = crypto_aead_authsize(aead);
595 	ivlen = crypto_aead_ivsize(aead);
596 
597 	tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
598 	if (!tmp)
599 		goto error;
600 
601 	extra = esp_tmp_extra(tmp);
602 	iv = esp_tmp_iv(aead, tmp, extralen);
603 	req = esp_tmp_req(aead, iv);
604 	sg = esp_req_sg(aead, req);
605 
606 	if (esp->inplace)
607 		dsg = sg;
608 	else
609 		dsg = &sg[esp->nfrags];
610 
611 	esph = esp_output_set_esn(skb, x, esp->esph, extra);
612 	esp->esph = esph;
613 
614 	sg_init_table(sg, esp->nfrags);
615 	err = skb_to_sgvec(skb, sg,
616 		           (unsigned char *)esph - skb->data,
617 		           assoclen + ivlen + esp->clen + alen);
618 	if (unlikely(err < 0))
619 		goto error_free;
620 
621 	if (!esp->inplace) {
622 		int allocsize;
623 		struct page_frag *pfrag = &x->xfrag;
624 
625 		allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
626 
627 		spin_lock_bh(&x->lock);
628 		if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
629 			spin_unlock_bh(&x->lock);
630 			goto error_free;
631 		}
632 
633 		skb_shinfo(skb)->nr_frags = 1;
634 
635 		page = pfrag->page;
636 		get_page(page);
637 		/* replace page frags in skb with new page */
638 		__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
639 		pfrag->offset = pfrag->offset + allocsize;
640 		spin_unlock_bh(&x->lock);
641 
642 		sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
643 		err = skb_to_sgvec(skb, dsg,
644 			           (unsigned char *)esph - skb->data,
645 			           assoclen + ivlen + esp->clen + alen);
646 		if (unlikely(err < 0))
647 			goto error_free;
648 	}
649 
650 	if ((x->props.flags & XFRM_STATE_ESN))
651 		aead_request_set_callback(req, 0, esp_output_done_esn, skb);
652 	else
653 		aead_request_set_callback(req, 0, esp_output_done, skb);
654 
655 	aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
656 	aead_request_set_ad(req, assoclen);
657 
658 	memset(iv, 0, ivlen);
659 	memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
660 	       min(ivlen, 8));
661 
662 	ESP_SKB_CB(skb)->tmp = tmp;
663 	err = crypto_aead_encrypt(req);
664 
665 	switch (err) {
666 	case -EINPROGRESS:
667 		goto error;
668 
669 	case -ENOSPC:
670 		err = NET_XMIT_DROP;
671 		break;
672 
673 	case 0:
674 		if ((x->props.flags & XFRM_STATE_ESN))
675 			esp_output_restore_header(skb);
676 		esp_output_encap_csum(skb);
677 	}
678 
679 	if (sg != dsg)
680 		esp_ssg_unref(x, tmp);
681 
682 	if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
683 		err = esp_output_tail_tcp(x, skb);
684 
685 error_free:
686 	kfree(tmp);
687 error:
688 	return err;
689 }
690 EXPORT_SYMBOL_GPL(esp6_output_tail);
691 
692 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
693 {
694 	int alen;
695 	int blksize;
696 	struct ip_esp_hdr *esph;
697 	struct crypto_aead *aead;
698 	struct esp_info esp;
699 
700 	esp.inplace = true;
701 
702 	esp.proto = *skb_mac_header(skb);
703 	*skb_mac_header(skb) = IPPROTO_ESP;
704 
705 	/* skb is pure payload to encrypt */
706 
707 	aead = x->data;
708 	alen = crypto_aead_authsize(aead);
709 
710 	esp.tfclen = 0;
711 	if (x->tfcpad) {
712 		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
713 		u32 padto;
714 
715 		padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
716 		if (skb->len < padto)
717 			esp.tfclen = padto - skb->len;
718 	}
719 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
720 	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
721 	esp.plen = esp.clen - skb->len - esp.tfclen;
722 	esp.tailen = esp.tfclen + esp.plen + alen;
723 
724 	esp.esph = ip_esp_hdr(skb);
725 
726 	esp.nfrags = esp6_output_head(x, skb, &esp);
727 	if (esp.nfrags < 0)
728 		return esp.nfrags;
729 
730 	esph = esp.esph;
731 	esph->spi = x->id.spi;
732 
733 	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
734 	esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
735 			    ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
736 
737 	skb_push(skb, -skb_network_offset(skb));
738 
739 	return esp6_output_tail(x, skb, &esp);
740 }
741 
742 static inline int esp_remove_trailer(struct sk_buff *skb)
743 {
744 	struct xfrm_state *x = xfrm_input_state(skb);
745 	struct crypto_aead *aead = x->data;
746 	int alen, hlen, elen;
747 	int padlen, trimlen;
748 	__wsum csumdiff;
749 	u8 nexthdr[2];
750 	int ret;
751 
752 	alen = crypto_aead_authsize(aead);
753 	hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
754 	elen = skb->len - hlen;
755 
756 	ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2);
757 	BUG_ON(ret);
758 
759 	ret = -EINVAL;
760 	padlen = nexthdr[0];
761 	if (padlen + 2 + alen >= elen) {
762 		net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
763 				    padlen + 2, elen - alen);
764 		goto out;
765 	}
766 
767 	trimlen = alen + padlen + 2;
768 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
769 		csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
770 		skb->csum = csum_block_sub(skb->csum, csumdiff,
771 					   skb->len - trimlen);
772 	}
773 	pskb_trim(skb, skb->len - trimlen);
774 
775 	ret = nexthdr[1];
776 
777 out:
778 	return ret;
779 }
780 
781 int esp6_input_done2(struct sk_buff *skb, int err)
782 {
783 	struct xfrm_state *x = xfrm_input_state(skb);
784 	struct xfrm_offload *xo = xfrm_offload(skb);
785 	struct crypto_aead *aead = x->data;
786 	int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
787 	int hdr_len = skb_network_header_len(skb);
788 
789 	if (!xo || !(xo->flags & CRYPTO_DONE))
790 		kfree(ESP_SKB_CB(skb)->tmp);
791 
792 	if (unlikely(err))
793 		goto out;
794 
795 	err = esp_remove_trailer(skb);
796 	if (unlikely(err < 0))
797 		goto out;
798 
799 	if (x->encap) {
800 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
801 		int offset = skb_network_offset(skb) + sizeof(*ip6h);
802 		struct xfrm_encap_tmpl *encap = x->encap;
803 		u8 nexthdr = ip6h->nexthdr;
804 		__be16 frag_off, source;
805 		struct udphdr *uh;
806 		struct tcphdr *th;
807 
808 		offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
809 		if (offset == -1) {
810 			err = -EINVAL;
811 			goto out;
812 		}
813 
814 		uh = (void *)(skb->data + offset);
815 		th = (void *)(skb->data + offset);
816 		hdr_len += offset;
817 
818 		switch (x->encap->encap_type) {
819 		case TCP_ENCAP_ESPINTCP:
820 			source = th->source;
821 			break;
822 		case UDP_ENCAP_ESPINUDP:
823 		case UDP_ENCAP_ESPINUDP_NON_IKE:
824 			source = uh->source;
825 			break;
826 		default:
827 			WARN_ON_ONCE(1);
828 			err = -EINVAL;
829 			goto out;
830 		}
831 
832 		/*
833 		 * 1) if the NAT-T peer's IP or port changed then
834 		 *    advertize the change to the keying daemon.
835 		 *    This is an inbound SA, so just compare
836 		 *    SRC ports.
837 		 */
838 		if (!ipv6_addr_equal(&ip6h->saddr, &x->props.saddr.in6) ||
839 		    source != encap->encap_sport) {
840 			xfrm_address_t ipaddr;
841 
842 			memcpy(&ipaddr.a6, &ip6h->saddr.s6_addr, sizeof(ipaddr.a6));
843 			km_new_mapping(x, &ipaddr, source);
844 
845 			/* XXX: perhaps add an extra
846 			 * policy check here, to see
847 			 * if we should allow or
848 			 * reject a packet from a
849 			 * different source
850 			 * address/port.
851 			 */
852 		}
853 
854 		/*
855 		 * 2) ignore UDP/TCP checksums in case
856 		 *    of NAT-T in Transport Mode, or
857 		 *    perform other post-processing fixes
858 		 *    as per draft-ietf-ipsec-udp-encaps-06,
859 		 *    section 3.1.2
860 		 */
861 		if (x->props.mode == XFRM_MODE_TRANSPORT)
862 			skb->ip_summed = CHECKSUM_UNNECESSARY;
863 	}
864 
865 	skb_postpull_rcsum(skb, skb_network_header(skb),
866 			   skb_network_header_len(skb));
867 	skb_pull_rcsum(skb, hlen);
868 	if (x->props.mode == XFRM_MODE_TUNNEL)
869 		skb_reset_transport_header(skb);
870 	else
871 		skb_set_transport_header(skb, -hdr_len);
872 
873 	/* RFC4303: Drop dummy packets without any error */
874 	if (err == IPPROTO_NONE)
875 		err = -EINVAL;
876 
877 out:
878 	return err;
879 }
880 EXPORT_SYMBOL_GPL(esp6_input_done2);
881 
882 static void esp_input_done(struct crypto_async_request *base, int err)
883 {
884 	struct sk_buff *skb = base->data;
885 
886 	xfrm_input_resume(skb, esp6_input_done2(skb, err));
887 }
888 
889 static void esp_input_restore_header(struct sk_buff *skb)
890 {
891 	esp_restore_header(skb, 0);
892 	__skb_pull(skb, 4);
893 }
894 
895 static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
896 {
897 	struct xfrm_state *x = xfrm_input_state(skb);
898 
899 	/* For ESN we move the header forward by 4 bytes to
900 	 * accommodate the high bits.  We will move it back after
901 	 * decryption.
902 	 */
903 	if ((x->props.flags & XFRM_STATE_ESN)) {
904 		struct ip_esp_hdr *esph = skb_push(skb, 4);
905 
906 		*seqhi = esph->spi;
907 		esph->spi = esph->seq_no;
908 		esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
909 	}
910 }
911 
912 static void esp_input_done_esn(struct crypto_async_request *base, int err)
913 {
914 	struct sk_buff *skb = base->data;
915 
916 	esp_input_restore_header(skb);
917 	esp_input_done(base, err);
918 }
919 
920 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
921 {
922 	struct crypto_aead *aead = x->data;
923 	struct aead_request *req;
924 	struct sk_buff *trailer;
925 	int ivlen = crypto_aead_ivsize(aead);
926 	int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
927 	int nfrags;
928 	int assoclen;
929 	int seqhilen;
930 	int ret = 0;
931 	void *tmp;
932 	__be32 *seqhi;
933 	u8 *iv;
934 	struct scatterlist *sg;
935 
936 	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
937 		ret = -EINVAL;
938 		goto out;
939 	}
940 
941 	if (elen <= 0) {
942 		ret = -EINVAL;
943 		goto out;
944 	}
945 
946 	assoclen = sizeof(struct ip_esp_hdr);
947 	seqhilen = 0;
948 
949 	if (x->props.flags & XFRM_STATE_ESN) {
950 		seqhilen += sizeof(__be32);
951 		assoclen += seqhilen;
952 	}
953 
954 	if (!skb_cloned(skb)) {
955 		if (!skb_is_nonlinear(skb)) {
956 			nfrags = 1;
957 
958 			goto skip_cow;
959 		} else if (!skb_has_frag_list(skb)) {
960 			nfrags = skb_shinfo(skb)->nr_frags;
961 			nfrags++;
962 
963 			goto skip_cow;
964 		}
965 	}
966 
967 	nfrags = skb_cow_data(skb, 0, &trailer);
968 	if (nfrags < 0) {
969 		ret = -EINVAL;
970 		goto out;
971 	}
972 
973 skip_cow:
974 	ret = -ENOMEM;
975 	tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
976 	if (!tmp)
977 		goto out;
978 
979 	ESP_SKB_CB(skb)->tmp = tmp;
980 	seqhi = esp_tmp_extra(tmp);
981 	iv = esp_tmp_iv(aead, tmp, seqhilen);
982 	req = esp_tmp_req(aead, iv);
983 	sg = esp_req_sg(aead, req);
984 
985 	esp_input_set_header(skb, seqhi);
986 
987 	sg_init_table(sg, nfrags);
988 	ret = skb_to_sgvec(skb, sg, 0, skb->len);
989 	if (unlikely(ret < 0)) {
990 		kfree(tmp);
991 		goto out;
992 	}
993 
994 	skb->ip_summed = CHECKSUM_NONE;
995 
996 	if ((x->props.flags & XFRM_STATE_ESN))
997 		aead_request_set_callback(req, 0, esp_input_done_esn, skb);
998 	else
999 		aead_request_set_callback(req, 0, esp_input_done, skb);
1000 
1001 	aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
1002 	aead_request_set_ad(req, assoclen);
1003 
1004 	ret = crypto_aead_decrypt(req);
1005 	if (ret == -EINPROGRESS)
1006 		goto out;
1007 
1008 	if ((x->props.flags & XFRM_STATE_ESN))
1009 		esp_input_restore_header(skb);
1010 
1011 	ret = esp6_input_done2(skb, ret);
1012 
1013 out:
1014 	return ret;
1015 }
1016 
1017 static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
1018 		    u8 type, u8 code, int offset, __be32 info)
1019 {
1020 	struct net *net = dev_net(skb->dev);
1021 	const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
1022 	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
1023 	struct xfrm_state *x;
1024 
1025 	if (type != ICMPV6_PKT_TOOBIG &&
1026 	    type != NDISC_REDIRECT)
1027 		return 0;
1028 
1029 	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
1030 			      esph->spi, IPPROTO_ESP, AF_INET6);
1031 	if (!x)
1032 		return 0;
1033 
1034 	if (type == NDISC_REDIRECT)
1035 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
1036 			     sock_net_uid(net, NULL));
1037 	else
1038 		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
1039 	xfrm_state_put(x);
1040 
1041 	return 0;
1042 }
1043 
1044 static void esp6_destroy(struct xfrm_state *x)
1045 {
1046 	struct crypto_aead *aead = x->data;
1047 
1048 	if (!aead)
1049 		return;
1050 
1051 	crypto_free_aead(aead);
1052 }
1053 
1054 static int esp_init_aead(struct xfrm_state *x, struct netlink_ext_ack *extack)
1055 {
1056 	char aead_name[CRYPTO_MAX_ALG_NAME];
1057 	struct crypto_aead *aead;
1058 	int err;
1059 
1060 	if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
1061 		     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) {
1062 		NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1063 		return -ENAMETOOLONG;
1064 	}
1065 
1066 	aead = crypto_alloc_aead(aead_name, 0, 0);
1067 	err = PTR_ERR(aead);
1068 	if (IS_ERR(aead))
1069 		goto error;
1070 
1071 	x->data = aead;
1072 
1073 	err = crypto_aead_setkey(aead, x->aead->alg_key,
1074 				 (x->aead->alg_key_len + 7) / 8);
1075 	if (err)
1076 		goto error;
1077 
1078 	err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
1079 	if (err)
1080 		goto error;
1081 
1082 	return 0;
1083 
1084 error:
1085 	NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1086 	return err;
1087 }
1088 
1089 static int esp_init_authenc(struct xfrm_state *x,
1090 			    struct netlink_ext_ack *extack)
1091 {
1092 	struct crypto_aead *aead;
1093 	struct crypto_authenc_key_param *param;
1094 	struct rtattr *rta;
1095 	char *key;
1096 	char *p;
1097 	char authenc_name[CRYPTO_MAX_ALG_NAME];
1098 	unsigned int keylen;
1099 	int err;
1100 
1101 	err = -ENAMETOOLONG;
1102 
1103 	if ((x->props.flags & XFRM_STATE_ESN)) {
1104 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1105 			     "%s%sauthencesn(%s,%s)%s",
1106 			     x->geniv ?: "", x->geniv ? "(" : "",
1107 			     x->aalg ? x->aalg->alg_name : "digest_null",
1108 			     x->ealg->alg_name,
1109 			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
1110 			NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1111 			goto error;
1112 		}
1113 	} else {
1114 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1115 			     "%s%sauthenc(%s,%s)%s",
1116 			     x->geniv ?: "", x->geniv ? "(" : "",
1117 			     x->aalg ? x->aalg->alg_name : "digest_null",
1118 			     x->ealg->alg_name,
1119 			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
1120 			NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1121 			goto error;
1122 		}
1123 	}
1124 
1125 	aead = crypto_alloc_aead(authenc_name, 0, 0);
1126 	err = PTR_ERR(aead);
1127 	if (IS_ERR(aead)) {
1128 		NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1129 		goto error;
1130 	}
1131 
1132 	x->data = aead;
1133 
1134 	keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
1135 		 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
1136 	err = -ENOMEM;
1137 	key = kmalloc(keylen, GFP_KERNEL);
1138 	if (!key)
1139 		goto error;
1140 
1141 	p = key;
1142 	rta = (void *)p;
1143 	rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
1144 	rta->rta_len = RTA_LENGTH(sizeof(*param));
1145 	param = RTA_DATA(rta);
1146 	p += RTA_SPACE(sizeof(*param));
1147 
1148 	if (x->aalg) {
1149 		struct xfrm_algo_desc *aalg_desc;
1150 
1151 		memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
1152 		p += (x->aalg->alg_key_len + 7) / 8;
1153 
1154 		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
1155 		BUG_ON(!aalg_desc);
1156 
1157 		err = -EINVAL;
1158 		if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
1159 		    crypto_aead_authsize(aead)) {
1160 			NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1161 			goto free_key;
1162 		}
1163 
1164 		err = crypto_aead_setauthsize(
1165 			aead, x->aalg->alg_trunc_len / 8);
1166 		if (err) {
1167 			NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1168 			goto free_key;
1169 		}
1170 	}
1171 
1172 	param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
1173 	memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
1174 
1175 	err = crypto_aead_setkey(aead, key, keylen);
1176 
1177 free_key:
1178 	kfree(key);
1179 
1180 error:
1181 	return err;
1182 }
1183 
1184 static int esp6_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
1185 {
1186 	struct crypto_aead *aead;
1187 	u32 align;
1188 	int err;
1189 
1190 	x->data = NULL;
1191 
1192 	if (x->aead) {
1193 		err = esp_init_aead(x, extack);
1194 	} else if (x->ealg) {
1195 		err = esp_init_authenc(x, extack);
1196 	} else {
1197 		NL_SET_ERR_MSG(extack, "ESP: AEAD or CRYPT must be provided");
1198 		err = -EINVAL;
1199 	}
1200 
1201 	if (err)
1202 		goto error;
1203 
1204 	aead = x->data;
1205 
1206 	x->props.header_len = sizeof(struct ip_esp_hdr) +
1207 			      crypto_aead_ivsize(aead);
1208 	switch (x->props.mode) {
1209 	case XFRM_MODE_BEET:
1210 		if (x->sel.family != AF_INET6)
1211 			x->props.header_len += IPV4_BEET_PHMAXLEN +
1212 					       (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
1213 		break;
1214 	default:
1215 	case XFRM_MODE_TRANSPORT:
1216 		break;
1217 	case XFRM_MODE_TUNNEL:
1218 		x->props.header_len += sizeof(struct ipv6hdr);
1219 		break;
1220 	}
1221 
1222 	if (x->encap) {
1223 		struct xfrm_encap_tmpl *encap = x->encap;
1224 
1225 		switch (encap->encap_type) {
1226 		default:
1227 			NL_SET_ERR_MSG(extack, "Unsupported encapsulation type for ESP");
1228 			err = -EINVAL;
1229 			goto error;
1230 		case UDP_ENCAP_ESPINUDP:
1231 			x->props.header_len += sizeof(struct udphdr);
1232 			break;
1233 		case UDP_ENCAP_ESPINUDP_NON_IKE:
1234 			x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
1235 			break;
1236 #ifdef CONFIG_INET6_ESPINTCP
1237 		case TCP_ENCAP_ESPINTCP:
1238 			/* only the length field, TCP encap is done by
1239 			 * the socket
1240 			 */
1241 			x->props.header_len += 2;
1242 			break;
1243 #endif
1244 		}
1245 	}
1246 
1247 	align = ALIGN(crypto_aead_blocksize(aead), 4);
1248 	x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
1249 
1250 error:
1251 	return err;
1252 }
1253 
1254 static int esp6_rcv_cb(struct sk_buff *skb, int err)
1255 {
1256 	return 0;
1257 }
1258 
1259 static const struct xfrm_type esp6_type = {
1260 	.owner		= THIS_MODULE,
1261 	.proto		= IPPROTO_ESP,
1262 	.flags		= XFRM_TYPE_REPLAY_PROT,
1263 	.init_state	= esp6_init_state,
1264 	.destructor	= esp6_destroy,
1265 	.input		= esp6_input,
1266 	.output		= esp6_output,
1267 };
1268 
1269 static struct xfrm6_protocol esp6_protocol = {
1270 	.handler	=	xfrm6_rcv,
1271 	.input_handler	=	xfrm_input,
1272 	.cb_handler	=	esp6_rcv_cb,
1273 	.err_handler	=	esp6_err,
1274 	.priority	=	0,
1275 };
1276 
1277 static int __init esp6_init(void)
1278 {
1279 	if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
1280 		pr_info("%s: can't add xfrm type\n", __func__);
1281 		return -EAGAIN;
1282 	}
1283 	if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
1284 		pr_info("%s: can't add protocol\n", __func__);
1285 		xfrm_unregister_type(&esp6_type, AF_INET6);
1286 		return -EAGAIN;
1287 	}
1288 
1289 	return 0;
1290 }
1291 
1292 static void __exit esp6_fini(void)
1293 {
1294 	if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
1295 		pr_info("%s: can't remove protocol\n", __func__);
1296 	xfrm_unregister_type(&esp6_type, AF_INET6);
1297 }
1298 
1299 module_init(esp6_init);
1300 module_exit(esp6_fini);
1301 
1302 MODULE_LICENSE("GPL");
1303 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
1304