xref: /openbmc/linux/net/ipv6/esp6.c (revision 93d90ad7)
1 /*
2  * Copyright (C)2002 USAGI/WIDE Project
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  *
17  * Authors
18  *
19  *	Mitsuru KANDA @USAGI       : IPv6 Support
20  *	Kazunori MIYAZAWA @USAGI   :
21  *	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
22  *
23  *	This file is derived from net/ipv4/esp.c
24  */
25 
26 #define pr_fmt(fmt) "IPv6: " fmt
27 
28 #include <crypto/aead.h>
29 #include <crypto/authenc.h>
30 #include <linux/err.h>
31 #include <linux/module.h>
32 #include <net/ip.h>
33 #include <net/xfrm.h>
34 #include <net/esp.h>
35 #include <linux/scatterlist.h>
36 #include <linux/kernel.h>
37 #include <linux/pfkeyv2.h>
38 #include <linux/random.h>
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <net/ip6_route.h>
42 #include <net/icmp.h>
43 #include <net/ipv6.h>
44 #include <net/protocol.h>
45 #include <linux/icmpv6.h>
46 
47 struct esp_skb_cb {
48 	struct xfrm_skb_cb xfrm;
49 	void *tmp;
50 };
51 
52 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
53 
54 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
55 
56 /*
57  * Allocate an AEAD request structure with extra space for SG and IV.
58  *
59  * For alignment considerations the upper 32 bits of the sequence number are
60  * placed at the front, if present. Followed by the IV, the request and finally
61  * the SG list.
62  *
63  * TODO: Use spare space in skb for this where possible.
64  */
65 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
66 {
67 	unsigned int len;
68 
69 	len = seqihlen;
70 
71 	len += crypto_aead_ivsize(aead);
72 
73 	if (len) {
74 		len += crypto_aead_alignmask(aead) &
75 		       ~(crypto_tfm_ctx_alignment() - 1);
76 		len = ALIGN(len, crypto_tfm_ctx_alignment());
77 	}
78 
79 	len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
80 	len = ALIGN(len, __alignof__(struct scatterlist));
81 
82 	len += sizeof(struct scatterlist) * nfrags;
83 
84 	return kmalloc(len, GFP_ATOMIC);
85 }
86 
87 static inline __be32 *esp_tmp_seqhi(void *tmp)
88 {
89 	return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
90 }
91 
92 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
93 {
94 	return crypto_aead_ivsize(aead) ?
95 	       PTR_ALIGN((u8 *)tmp + seqhilen,
96 			 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
97 }
98 
99 static inline struct aead_givcrypt_request *esp_tmp_givreq(
100 	struct crypto_aead *aead, u8 *iv)
101 {
102 	struct aead_givcrypt_request *req;
103 
104 	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
105 				crypto_tfm_ctx_alignment());
106 	aead_givcrypt_set_tfm(req, aead);
107 	return req;
108 }
109 
110 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
111 {
112 	struct aead_request *req;
113 
114 	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
115 				crypto_tfm_ctx_alignment());
116 	aead_request_set_tfm(req, aead);
117 	return req;
118 }
119 
120 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
121 					     struct aead_request *req)
122 {
123 	return (void *)ALIGN((unsigned long)(req + 1) +
124 			     crypto_aead_reqsize(aead),
125 			     __alignof__(struct scatterlist));
126 }
127 
128 static inline struct scatterlist *esp_givreq_sg(
129 	struct crypto_aead *aead, struct aead_givcrypt_request *req)
130 {
131 	return (void *)ALIGN((unsigned long)(req + 1) +
132 			     crypto_aead_reqsize(aead),
133 			     __alignof__(struct scatterlist));
134 }
135 
136 static void esp_output_done(struct crypto_async_request *base, int err)
137 {
138 	struct sk_buff *skb = base->data;
139 
140 	kfree(ESP_SKB_CB(skb)->tmp);
141 	xfrm_output_resume(skb, err);
142 }
143 
144 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
145 {
146 	int err;
147 	struct ip_esp_hdr *esph;
148 	struct crypto_aead *aead;
149 	struct aead_givcrypt_request *req;
150 	struct scatterlist *sg;
151 	struct scatterlist *asg;
152 	struct sk_buff *trailer;
153 	void *tmp;
154 	int blksize;
155 	int clen;
156 	int alen;
157 	int plen;
158 	int tfclen;
159 	int nfrags;
160 	int assoclen;
161 	int sglists;
162 	int seqhilen;
163 	u8 *iv;
164 	u8 *tail;
165 	__be32 *seqhi;
166 
167 	/* skb is pure payload to encrypt */
168 	aead = x->data;
169 	alen = crypto_aead_authsize(aead);
170 
171 	tfclen = 0;
172 	if (x->tfcpad) {
173 		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
174 		u32 padto;
175 
176 		padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
177 		if (skb->len < padto)
178 			tfclen = padto - skb->len;
179 	}
180 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
181 	clen = ALIGN(skb->len + 2 + tfclen, blksize);
182 	plen = clen - skb->len - tfclen;
183 
184 	err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
185 	if (err < 0)
186 		goto error;
187 	nfrags = err;
188 
189 	assoclen = sizeof(*esph);
190 	sglists = 1;
191 	seqhilen = 0;
192 
193 	if (x->props.flags & XFRM_STATE_ESN) {
194 		sglists += 2;
195 		seqhilen += sizeof(__be32);
196 		assoclen += seqhilen;
197 	}
198 
199 	tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
200 	if (!tmp) {
201 		err = -ENOMEM;
202 		goto error;
203 	}
204 
205 	seqhi = esp_tmp_seqhi(tmp);
206 	iv = esp_tmp_iv(aead, tmp, seqhilen);
207 	req = esp_tmp_givreq(aead, iv);
208 	asg = esp_givreq_sg(aead, req);
209 	sg = asg + sglists;
210 
211 	/* Fill padding... */
212 	tail = skb_tail_pointer(trailer);
213 	if (tfclen) {
214 		memset(tail, 0, tfclen);
215 		tail += tfclen;
216 	}
217 	do {
218 		int i;
219 		for (i = 0; i < plen - 2; i++)
220 			tail[i] = i + 1;
221 	} while (0);
222 	tail[plen - 2] = plen - 2;
223 	tail[plen - 1] = *skb_mac_header(skb);
224 	pskb_put(skb, trailer, clen - skb->len + alen);
225 
226 	skb_push(skb, -skb_network_offset(skb));
227 	esph = ip_esp_hdr(skb);
228 	*skb_mac_header(skb) = IPPROTO_ESP;
229 
230 	esph->spi = x->id.spi;
231 	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
232 
233 	sg_init_table(sg, nfrags);
234 	skb_to_sgvec(skb, sg,
235 		     esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
236 		     clen + alen);
237 
238 	if ((x->props.flags & XFRM_STATE_ESN)) {
239 		sg_init_table(asg, 3);
240 		sg_set_buf(asg, &esph->spi, sizeof(__be32));
241 		*seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
242 		sg_set_buf(asg + 1, seqhi, seqhilen);
243 		sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
244 	} else
245 		sg_init_one(asg, esph, sizeof(*esph));
246 
247 	aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
248 	aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
249 	aead_givcrypt_set_assoc(req, asg, assoclen);
250 	aead_givcrypt_set_giv(req, esph->enc_data,
251 			      XFRM_SKB_CB(skb)->seq.output.low);
252 
253 	ESP_SKB_CB(skb)->tmp = tmp;
254 	err = crypto_aead_givencrypt(req);
255 	if (err == -EINPROGRESS)
256 		goto error;
257 
258 	if (err == -EBUSY)
259 		err = NET_XMIT_DROP;
260 
261 	kfree(tmp);
262 
263 error:
264 	return err;
265 }
266 
267 static int esp_input_done2(struct sk_buff *skb, int err)
268 {
269 	struct xfrm_state *x = xfrm_input_state(skb);
270 	struct crypto_aead *aead = x->data;
271 	int alen = crypto_aead_authsize(aead);
272 	int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
273 	int elen = skb->len - hlen;
274 	int hdr_len = skb_network_header_len(skb);
275 	int padlen;
276 	u8 nexthdr[2];
277 
278 	kfree(ESP_SKB_CB(skb)->tmp);
279 
280 	if (unlikely(err))
281 		goto out;
282 
283 	if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
284 		BUG();
285 
286 	err = -EINVAL;
287 	padlen = nexthdr[0];
288 	if (padlen + 2 + alen >= elen) {
289 		net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
290 				    padlen + 2, elen - alen);
291 		goto out;
292 	}
293 
294 	/* ... check padding bits here. Silly. :-) */
295 
296 	pskb_trim(skb, skb->len - alen - padlen - 2);
297 	__skb_pull(skb, hlen);
298 	if (x->props.mode == XFRM_MODE_TUNNEL)
299 		skb_reset_transport_header(skb);
300 	else
301 		skb_set_transport_header(skb, -hdr_len);
302 
303 	err = nexthdr[1];
304 
305 	/* RFC4303: Drop dummy packets without any error */
306 	if (err == IPPROTO_NONE)
307 		err = -EINVAL;
308 
309 out:
310 	return err;
311 }
312 
313 static void esp_input_done(struct crypto_async_request *base, int err)
314 {
315 	struct sk_buff *skb = base->data;
316 
317 	xfrm_input_resume(skb, esp_input_done2(skb, err));
318 }
319 
320 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
321 {
322 	struct ip_esp_hdr *esph;
323 	struct crypto_aead *aead = x->data;
324 	struct aead_request *req;
325 	struct sk_buff *trailer;
326 	int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
327 	int nfrags;
328 	int assoclen;
329 	int sglists;
330 	int seqhilen;
331 	int ret = 0;
332 	void *tmp;
333 	__be32 *seqhi;
334 	u8 *iv;
335 	struct scatterlist *sg;
336 	struct scatterlist *asg;
337 
338 	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) {
339 		ret = -EINVAL;
340 		goto out;
341 	}
342 
343 	if (elen <= 0) {
344 		ret = -EINVAL;
345 		goto out;
346 	}
347 
348 	nfrags = skb_cow_data(skb, 0, &trailer);
349 	if (nfrags < 0) {
350 		ret = -EINVAL;
351 		goto out;
352 	}
353 
354 	ret = -ENOMEM;
355 
356 	assoclen = sizeof(*esph);
357 	sglists = 1;
358 	seqhilen = 0;
359 
360 	if (x->props.flags & XFRM_STATE_ESN) {
361 		sglists += 2;
362 		seqhilen += sizeof(__be32);
363 		assoclen += seqhilen;
364 	}
365 
366 	tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
367 	if (!tmp)
368 		goto out;
369 
370 	ESP_SKB_CB(skb)->tmp = tmp;
371 	seqhi = esp_tmp_seqhi(tmp);
372 	iv = esp_tmp_iv(aead, tmp, seqhilen);
373 	req = esp_tmp_req(aead, iv);
374 	asg = esp_req_sg(aead, req);
375 	sg = asg + sglists;
376 
377 	skb->ip_summed = CHECKSUM_NONE;
378 
379 	esph = (struct ip_esp_hdr *)skb->data;
380 
381 	/* Get ivec. This can be wrong, check against another impls. */
382 	iv = esph->enc_data;
383 
384 	sg_init_table(sg, nfrags);
385 	skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
386 
387 	if ((x->props.flags & XFRM_STATE_ESN)) {
388 		sg_init_table(asg, 3);
389 		sg_set_buf(asg, &esph->spi, sizeof(__be32));
390 		*seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
391 		sg_set_buf(asg + 1, seqhi, seqhilen);
392 		sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
393 	} else
394 		sg_init_one(asg, esph, sizeof(*esph));
395 
396 	aead_request_set_callback(req, 0, esp_input_done, skb);
397 	aead_request_set_crypt(req, sg, sg, elen, iv);
398 	aead_request_set_assoc(req, asg, assoclen);
399 
400 	ret = crypto_aead_decrypt(req);
401 	if (ret == -EINPROGRESS)
402 		goto out;
403 
404 	ret = esp_input_done2(skb, ret);
405 
406 out:
407 	return ret;
408 }
409 
410 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
411 {
412 	struct crypto_aead *aead = x->data;
413 	u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
414 	unsigned int net_adj;
415 
416 	if (x->props.mode != XFRM_MODE_TUNNEL)
417 		net_adj = sizeof(struct ipv6hdr);
418 	else
419 		net_adj = 0;
420 
421 	return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
422 		 net_adj) & ~(blksize - 1)) + net_adj - 2;
423 }
424 
425 static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
426 		    u8 type, u8 code, int offset, __be32 info)
427 {
428 	struct net *net = dev_net(skb->dev);
429 	const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
430 	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
431 	struct xfrm_state *x;
432 
433 	if (type != ICMPV6_PKT_TOOBIG &&
434 	    type != NDISC_REDIRECT)
435 		return 0;
436 
437 	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
438 			      esph->spi, IPPROTO_ESP, AF_INET6);
439 	if (!x)
440 		return 0;
441 
442 	if (type == NDISC_REDIRECT)
443 		ip6_redirect(skb, net, skb->dev->ifindex, 0);
444 	else
445 		ip6_update_pmtu(skb, net, info, 0, 0);
446 	xfrm_state_put(x);
447 
448 	return 0;
449 }
450 
451 static void esp6_destroy(struct xfrm_state *x)
452 {
453 	struct crypto_aead *aead = x->data;
454 
455 	if (!aead)
456 		return;
457 
458 	crypto_free_aead(aead);
459 }
460 
461 static int esp_init_aead(struct xfrm_state *x)
462 {
463 	struct crypto_aead *aead;
464 	int err;
465 
466 	aead = crypto_alloc_aead(x->aead->alg_name, 0, 0);
467 	err = PTR_ERR(aead);
468 	if (IS_ERR(aead))
469 		goto error;
470 
471 	x->data = aead;
472 
473 	err = crypto_aead_setkey(aead, x->aead->alg_key,
474 				 (x->aead->alg_key_len + 7) / 8);
475 	if (err)
476 		goto error;
477 
478 	err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
479 	if (err)
480 		goto error;
481 
482 error:
483 	return err;
484 }
485 
486 static int esp_init_authenc(struct xfrm_state *x)
487 {
488 	struct crypto_aead *aead;
489 	struct crypto_authenc_key_param *param;
490 	struct rtattr *rta;
491 	char *key;
492 	char *p;
493 	char authenc_name[CRYPTO_MAX_ALG_NAME];
494 	unsigned int keylen;
495 	int err;
496 
497 	err = -EINVAL;
498 	if (x->ealg == NULL)
499 		goto error;
500 
501 	err = -ENAMETOOLONG;
502 
503 	if ((x->props.flags & XFRM_STATE_ESN)) {
504 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
505 			     "authencesn(%s,%s)",
506 			     x->aalg ? x->aalg->alg_name : "digest_null",
507 			     x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
508 			goto error;
509 	} else {
510 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
511 			     "authenc(%s,%s)",
512 			     x->aalg ? x->aalg->alg_name : "digest_null",
513 			     x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
514 			goto error;
515 	}
516 
517 	aead = crypto_alloc_aead(authenc_name, 0, 0);
518 	err = PTR_ERR(aead);
519 	if (IS_ERR(aead))
520 		goto error;
521 
522 	x->data = aead;
523 
524 	keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
525 		 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
526 	err = -ENOMEM;
527 	key = kmalloc(keylen, GFP_KERNEL);
528 	if (!key)
529 		goto error;
530 
531 	p = key;
532 	rta = (void *)p;
533 	rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
534 	rta->rta_len = RTA_LENGTH(sizeof(*param));
535 	param = RTA_DATA(rta);
536 	p += RTA_SPACE(sizeof(*param));
537 
538 	if (x->aalg) {
539 		struct xfrm_algo_desc *aalg_desc;
540 
541 		memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
542 		p += (x->aalg->alg_key_len + 7) / 8;
543 
544 		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
545 		BUG_ON(!aalg_desc);
546 
547 		err = -EINVAL;
548 		if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
549 		    crypto_aead_authsize(aead)) {
550 			pr_info("ESP: %s digestsize %u != %hu\n",
551 				x->aalg->alg_name,
552 				crypto_aead_authsize(aead),
553 				aalg_desc->uinfo.auth.icv_fullbits / 8);
554 			goto free_key;
555 		}
556 
557 		err = crypto_aead_setauthsize(
558 			aead, x->aalg->alg_trunc_len / 8);
559 		if (err)
560 			goto free_key;
561 	}
562 
563 	param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
564 	memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
565 
566 	err = crypto_aead_setkey(aead, key, keylen);
567 
568 free_key:
569 	kfree(key);
570 
571 error:
572 	return err;
573 }
574 
575 static int esp6_init_state(struct xfrm_state *x)
576 {
577 	struct crypto_aead *aead;
578 	u32 align;
579 	int err;
580 
581 	if (x->encap)
582 		return -EINVAL;
583 
584 	x->data = NULL;
585 
586 	if (x->aead)
587 		err = esp_init_aead(x);
588 	else
589 		err = esp_init_authenc(x);
590 
591 	if (err)
592 		goto error;
593 
594 	aead = x->data;
595 
596 	x->props.header_len = sizeof(struct ip_esp_hdr) +
597 			      crypto_aead_ivsize(aead);
598 	switch (x->props.mode) {
599 	case XFRM_MODE_BEET:
600 		if (x->sel.family != AF_INET6)
601 			x->props.header_len += IPV4_BEET_PHMAXLEN +
602 					       (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
603 		break;
604 	case XFRM_MODE_TRANSPORT:
605 		break;
606 	case XFRM_MODE_TUNNEL:
607 		x->props.header_len += sizeof(struct ipv6hdr);
608 		break;
609 	default:
610 		goto error;
611 	}
612 
613 	align = ALIGN(crypto_aead_blocksize(aead), 4);
614 	x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
615 
616 error:
617 	return err;
618 }
619 
620 static int esp6_rcv_cb(struct sk_buff *skb, int err)
621 {
622 	return 0;
623 }
624 
625 static const struct xfrm_type esp6_type = {
626 	.description	= "ESP6",
627 	.owner		= THIS_MODULE,
628 	.proto		= IPPROTO_ESP,
629 	.flags		= XFRM_TYPE_REPLAY_PROT,
630 	.init_state	= esp6_init_state,
631 	.destructor	= esp6_destroy,
632 	.get_mtu	= esp6_get_mtu,
633 	.input		= esp6_input,
634 	.output		= esp6_output,
635 	.hdr_offset	= xfrm6_find_1stfragopt,
636 };
637 
638 static struct xfrm6_protocol esp6_protocol = {
639 	.handler	=	xfrm6_rcv,
640 	.cb_handler	=	esp6_rcv_cb,
641 	.err_handler	=	esp6_err,
642 	.priority	=	0,
643 };
644 
645 static int __init esp6_init(void)
646 {
647 	if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
648 		pr_info("%s: can't add xfrm type\n", __func__);
649 		return -EAGAIN;
650 	}
651 	if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
652 		pr_info("%s: can't add protocol\n", __func__);
653 		xfrm_unregister_type(&esp6_type, AF_INET6);
654 		return -EAGAIN;
655 	}
656 
657 	return 0;
658 }
659 
660 static void __exit esp6_fini(void)
661 {
662 	if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
663 		pr_info("%s: can't remove protocol\n", __func__);
664 	if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
665 		pr_info("%s: can't remove xfrm type\n", __func__);
666 }
667 
668 module_init(esp6_init);
669 module_exit(esp6_fini);
670 
671 MODULE_LICENSE("GPL");
672 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
673