1 /* 2 * Copyright (C)2002 USAGI/WIDE Project 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, see <http://www.gnu.org/licenses/>. 16 * 17 * Authors 18 * 19 * Mitsuru KANDA @USAGI : IPv6 Support 20 * Kazunori MIYAZAWA @USAGI : 21 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 22 * 23 * This file is derived from net/ipv4/ah.c. 24 */ 25 26 #define pr_fmt(fmt) "IPv6: " fmt 27 28 #include <crypto/hash.h> 29 #include <linux/module.h> 30 #include <linux/slab.h> 31 #include <net/ip.h> 32 #include <net/ah.h> 33 #include <linux/crypto.h> 34 #include <linux/pfkeyv2.h> 35 #include <linux/string.h> 36 #include <linux/scatterlist.h> 37 #include <net/ip6_route.h> 38 #include <net/icmp.h> 39 #include <net/ipv6.h> 40 #include <net/protocol.h> 41 #include <net/xfrm.h> 42 43 #define IPV6HDR_BASELEN 8 44 45 struct tmp_ext { 46 #if IS_ENABLED(CONFIG_IPV6_MIP6) 47 struct in6_addr saddr; 48 #endif 49 struct in6_addr daddr; 50 char hdrs[0]; 51 }; 52 53 struct ah_skb_cb { 54 struct xfrm_skb_cb xfrm; 55 void *tmp; 56 }; 57 58 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0])) 59 60 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags, 61 unsigned int size) 62 { 63 unsigned int len; 64 65 len = size + crypto_ahash_digestsize(ahash) + 66 (crypto_ahash_alignmask(ahash) & 67 ~(crypto_tfm_ctx_alignment() - 1)); 68 69 len = ALIGN(len, crypto_tfm_ctx_alignment()); 70 71 len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash); 72 len = ALIGN(len, __alignof__(struct scatterlist)); 73 74 len += sizeof(struct scatterlist) * nfrags; 75 76 return kmalloc(len, GFP_ATOMIC); 77 } 78 79 static inline struct tmp_ext *ah_tmp_ext(void *base) 80 { 81 return base + IPV6HDR_BASELEN; 82 } 83 84 static inline u8 *ah_tmp_auth(u8 *tmp, unsigned int offset) 85 { 86 return tmp + offset; 87 } 88 89 static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp, 90 unsigned int offset) 91 { 92 return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1); 93 } 94 95 static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash, 96 u8 *icv) 97 { 98 struct ahash_request *req; 99 100 req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash), 101 crypto_tfm_ctx_alignment()); 102 103 ahash_request_set_tfm(req, ahash); 104 105 return req; 106 } 107 108 static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash, 109 struct ahash_request *req) 110 { 111 return (void *)ALIGN((unsigned long)(req + 1) + 112 crypto_ahash_reqsize(ahash), 113 __alignof__(struct scatterlist)); 114 } 115 116 static bool zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr) 117 { 118 u8 *opt = (u8 *)opthdr; 119 int len = ipv6_optlen(opthdr); 120 int off = 0; 121 int optlen = 0; 122 123 off += 2; 124 len -= 2; 125 126 while (len > 0) { 127 128 switch (opt[off]) { 129 130 case IPV6_TLV_PAD1: 131 optlen = 1; 132 break; 133 default: 134 if (len < 2) 135 goto bad; 136 optlen = opt[off+1]+2; 137 if (len < optlen) 138 goto bad; 139 if (opt[off] & 0x20) 140 memset(&opt[off+2], 0, opt[off+1]); 141 break; 142 } 143 144 off += optlen; 145 len -= optlen; 146 } 147 if (len == 0) 148 return true; 149 150 bad: 151 return false; 152 } 153 154 #if IS_ENABLED(CONFIG_IPV6_MIP6) 155 /** 156 * ipv6_rearrange_destopt - rearrange IPv6 destination options header 157 * @iph: IPv6 header 158 * @destopt: destionation options header 159 */ 160 static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt) 161 { 162 u8 *opt = (u8 *)destopt; 163 int len = ipv6_optlen(destopt); 164 int off = 0; 165 int optlen = 0; 166 167 off += 2; 168 len -= 2; 169 170 while (len > 0) { 171 172 switch (opt[off]) { 173 174 case IPV6_TLV_PAD1: 175 optlen = 1; 176 break; 177 default: 178 if (len < 2) 179 goto bad; 180 optlen = opt[off+1]+2; 181 if (len < optlen) 182 goto bad; 183 184 /* Rearrange the source address in @iph and the 185 * addresses in home address option for final source. 186 * See 11.3.2 of RFC 3775 for details. 187 */ 188 if (opt[off] == IPV6_TLV_HAO) { 189 struct in6_addr final_addr; 190 struct ipv6_destopt_hao *hao; 191 192 hao = (struct ipv6_destopt_hao *)&opt[off]; 193 if (hao->length != sizeof(hao->addr)) { 194 net_warn_ratelimited("destopt hao: invalid header length: %u\n", 195 hao->length); 196 goto bad; 197 } 198 final_addr = hao->addr; 199 hao->addr = iph->saddr; 200 iph->saddr = final_addr; 201 } 202 break; 203 } 204 205 off += optlen; 206 len -= optlen; 207 } 208 /* Note: ok if len == 0 */ 209 bad: 210 return; 211 } 212 #else 213 static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt) {} 214 #endif 215 216 /** 217 * ipv6_rearrange_rthdr - rearrange IPv6 routing header 218 * @iph: IPv6 header 219 * @rthdr: routing header 220 * 221 * Rearrange the destination address in @iph and the addresses in @rthdr 222 * so that they appear in the order they will at the final destination. 223 * See Appendix A2 of RFC 2402 for details. 224 */ 225 static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr) 226 { 227 int segments, segments_left; 228 struct in6_addr *addrs; 229 struct in6_addr final_addr; 230 231 segments_left = rthdr->segments_left; 232 if (segments_left == 0) 233 return; 234 rthdr->segments_left = 0; 235 236 /* The value of rthdr->hdrlen has been verified either by the system 237 * call if it is locally generated, or by ipv6_rthdr_rcv() for incoming 238 * packets. So we can assume that it is even and that segments is 239 * greater than or equal to segments_left. 240 * 241 * For the same reason we can assume that this option is of type 0. 242 */ 243 segments = rthdr->hdrlen >> 1; 244 245 addrs = ((struct rt0_hdr *)rthdr)->addr; 246 final_addr = addrs[segments - 1]; 247 248 addrs += segments - segments_left; 249 memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs)); 250 251 addrs[0] = iph->daddr; 252 iph->daddr = final_addr; 253 } 254 255 static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir) 256 { 257 union { 258 struct ipv6hdr *iph; 259 struct ipv6_opt_hdr *opth; 260 struct ipv6_rt_hdr *rth; 261 char *raw; 262 } exthdr = { .iph = iph }; 263 char *end = exthdr.raw + len; 264 int nexthdr = iph->nexthdr; 265 266 exthdr.iph++; 267 268 while (exthdr.raw < end) { 269 switch (nexthdr) { 270 case NEXTHDR_DEST: 271 if (dir == XFRM_POLICY_OUT) 272 ipv6_rearrange_destopt(iph, exthdr.opth); 273 case NEXTHDR_HOP: 274 if (!zero_out_mutable_opts(exthdr.opth)) { 275 LIMIT_NETDEBUG( 276 KERN_WARNING "overrun %sopts\n", 277 nexthdr == NEXTHDR_HOP ? 278 "hop" : "dest"); 279 return -EINVAL; 280 } 281 break; 282 283 case NEXTHDR_ROUTING: 284 ipv6_rearrange_rthdr(iph, exthdr.rth); 285 break; 286 287 default : 288 return 0; 289 } 290 291 nexthdr = exthdr.opth->nexthdr; 292 exthdr.raw += ipv6_optlen(exthdr.opth); 293 } 294 295 return 0; 296 } 297 298 static void ah6_output_done(struct crypto_async_request *base, int err) 299 { 300 int extlen; 301 u8 *iph_base; 302 u8 *icv; 303 struct sk_buff *skb = base->data; 304 struct xfrm_state *x = skb_dst(skb)->xfrm; 305 struct ah_data *ahp = x->data; 306 struct ipv6hdr *top_iph = ipv6_hdr(skb); 307 struct ip_auth_hdr *ah = ip_auth_hdr(skb); 308 struct tmp_ext *iph_ext; 309 310 extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr); 311 if (extlen) 312 extlen += sizeof(*iph_ext); 313 314 iph_base = AH_SKB_CB(skb)->tmp; 315 iph_ext = ah_tmp_ext(iph_base); 316 icv = ah_tmp_icv(ahp->ahash, iph_ext, extlen); 317 318 memcpy(ah->auth_data, icv, ahp->icv_trunc_len); 319 memcpy(top_iph, iph_base, IPV6HDR_BASELEN); 320 321 if (extlen) { 322 #if IS_ENABLED(CONFIG_IPV6_MIP6) 323 memcpy(&top_iph->saddr, iph_ext, extlen); 324 #else 325 memcpy(&top_iph->daddr, iph_ext, extlen); 326 #endif 327 } 328 329 kfree(AH_SKB_CB(skb)->tmp); 330 xfrm_output_resume(skb, err); 331 } 332 333 static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) 334 { 335 int err; 336 int nfrags; 337 int extlen; 338 u8 *iph_base; 339 u8 *icv; 340 u8 nexthdr; 341 struct sk_buff *trailer; 342 struct crypto_ahash *ahash; 343 struct ahash_request *req; 344 struct scatterlist *sg; 345 struct ipv6hdr *top_iph; 346 struct ip_auth_hdr *ah; 347 struct ah_data *ahp; 348 struct tmp_ext *iph_ext; 349 350 ahp = x->data; 351 ahash = ahp->ahash; 352 353 if ((err = skb_cow_data(skb, 0, &trailer)) < 0) 354 goto out; 355 nfrags = err; 356 357 skb_push(skb, -skb_network_offset(skb)); 358 extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr); 359 if (extlen) 360 extlen += sizeof(*iph_ext); 361 362 err = -ENOMEM; 363 iph_base = ah_alloc_tmp(ahash, nfrags, IPV6HDR_BASELEN + extlen); 364 if (!iph_base) 365 goto out; 366 367 iph_ext = ah_tmp_ext(iph_base); 368 icv = ah_tmp_icv(ahash, iph_ext, extlen); 369 req = ah_tmp_req(ahash, icv); 370 sg = ah_req_sg(ahash, req); 371 372 ah = ip_auth_hdr(skb); 373 memset(ah->auth_data, 0, ahp->icv_trunc_len); 374 375 top_iph = ipv6_hdr(skb); 376 top_iph->payload_len = htons(skb->len - sizeof(*top_iph)); 377 378 nexthdr = *skb_mac_header(skb); 379 *skb_mac_header(skb) = IPPROTO_AH; 380 381 /* When there are no extension headers, we only need to save the first 382 * 8 bytes of the base IP header. 383 */ 384 memcpy(iph_base, top_iph, IPV6HDR_BASELEN); 385 386 if (extlen) { 387 #if IS_ENABLED(CONFIG_IPV6_MIP6) 388 memcpy(iph_ext, &top_iph->saddr, extlen); 389 #else 390 memcpy(iph_ext, &top_iph->daddr, extlen); 391 #endif 392 err = ipv6_clear_mutable_options(top_iph, 393 extlen - sizeof(*iph_ext) + 394 sizeof(*top_iph), 395 XFRM_POLICY_OUT); 396 if (err) 397 goto out_free; 398 } 399 400 ah->nexthdr = nexthdr; 401 402 top_iph->priority = 0; 403 top_iph->flow_lbl[0] = 0; 404 top_iph->flow_lbl[1] = 0; 405 top_iph->flow_lbl[2] = 0; 406 top_iph->hop_limit = 0; 407 408 ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2; 409 410 ah->reserved = 0; 411 ah->spi = x->id.spi; 412 ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 413 414 sg_init_table(sg, nfrags); 415 skb_to_sgvec(skb, sg, 0, skb->len); 416 417 ahash_request_set_crypt(req, sg, icv, skb->len); 418 ahash_request_set_callback(req, 0, ah6_output_done, skb); 419 420 AH_SKB_CB(skb)->tmp = iph_base; 421 422 err = crypto_ahash_digest(req); 423 if (err) { 424 if (err == -EINPROGRESS) 425 goto out; 426 427 if (err == -EBUSY) 428 err = NET_XMIT_DROP; 429 goto out_free; 430 } 431 432 memcpy(ah->auth_data, icv, ahp->icv_trunc_len); 433 memcpy(top_iph, iph_base, IPV6HDR_BASELEN); 434 435 if (extlen) { 436 #if IS_ENABLED(CONFIG_IPV6_MIP6) 437 memcpy(&top_iph->saddr, iph_ext, extlen); 438 #else 439 memcpy(&top_iph->daddr, iph_ext, extlen); 440 #endif 441 } 442 443 out_free: 444 kfree(iph_base); 445 out: 446 return err; 447 } 448 449 static void ah6_input_done(struct crypto_async_request *base, int err) 450 { 451 u8 *auth_data; 452 u8 *icv; 453 u8 *work_iph; 454 struct sk_buff *skb = base->data; 455 struct xfrm_state *x = xfrm_input_state(skb); 456 struct ah_data *ahp = x->data; 457 struct ip_auth_hdr *ah = ip_auth_hdr(skb); 458 int hdr_len = skb_network_header_len(skb); 459 int ah_hlen = (ah->hdrlen + 2) << 2; 460 461 work_iph = AH_SKB_CB(skb)->tmp; 462 auth_data = ah_tmp_auth(work_iph, hdr_len); 463 icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len); 464 465 err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0; 466 if (err) 467 goto out; 468 469 err = ah->nexthdr; 470 471 skb->network_header += ah_hlen; 472 memcpy(skb_network_header(skb), work_iph, hdr_len); 473 __skb_pull(skb, ah_hlen + hdr_len); 474 if (x->props.mode == XFRM_MODE_TUNNEL) 475 skb_reset_transport_header(skb); 476 else 477 skb_set_transport_header(skb, -hdr_len); 478 out: 479 kfree(AH_SKB_CB(skb)->tmp); 480 xfrm_input_resume(skb, err); 481 } 482 483 484 485 static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) 486 { 487 /* 488 * Before process AH 489 * [IPv6][Ext1][Ext2][AH][Dest][Payload] 490 * |<-------------->| hdr_len 491 * 492 * To erase AH: 493 * Keeping copy of cleared headers. After AH processing, 494 * Moving the pointer of skb->network_header by using skb_pull as long 495 * as AH header length. Then copy back the copy as long as hdr_len 496 * If destination header following AH exists, copy it into after [Ext2]. 497 * 498 * |<>|[IPv6][Ext1][Ext2][Dest][Payload] 499 * There is offset of AH before IPv6 header after the process. 500 */ 501 502 u8 *auth_data; 503 u8 *icv; 504 u8 *work_iph; 505 struct sk_buff *trailer; 506 struct crypto_ahash *ahash; 507 struct ahash_request *req; 508 struct scatterlist *sg; 509 struct ip_auth_hdr *ah; 510 struct ipv6hdr *ip6h; 511 struct ah_data *ahp; 512 u16 hdr_len; 513 u16 ah_hlen; 514 int nexthdr; 515 int nfrags; 516 int err = -ENOMEM; 517 518 if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr))) 519 goto out; 520 521 /* We are going to _remove_ AH header to keep sockets happy, 522 * so... Later this can change. */ 523 if (skb_unclone(skb, GFP_ATOMIC)) 524 goto out; 525 526 skb->ip_summed = CHECKSUM_NONE; 527 528 hdr_len = skb_network_header_len(skb); 529 ah = (struct ip_auth_hdr *)skb->data; 530 ahp = x->data; 531 ahash = ahp->ahash; 532 533 nexthdr = ah->nexthdr; 534 ah_hlen = (ah->hdrlen + 2) << 2; 535 536 if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) && 537 ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len)) 538 goto out; 539 540 if (!pskb_may_pull(skb, ah_hlen)) 541 goto out; 542 543 544 if ((err = skb_cow_data(skb, 0, &trailer)) < 0) 545 goto out; 546 nfrags = err; 547 548 ah = (struct ip_auth_hdr *)skb->data; 549 ip6h = ipv6_hdr(skb); 550 551 skb_push(skb, hdr_len); 552 553 work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len); 554 if (!work_iph) 555 goto out; 556 557 auth_data = ah_tmp_auth(work_iph, hdr_len); 558 icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len); 559 req = ah_tmp_req(ahash, icv); 560 sg = ah_req_sg(ahash, req); 561 562 memcpy(work_iph, ip6h, hdr_len); 563 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); 564 memset(ah->auth_data, 0, ahp->icv_trunc_len); 565 566 if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN)) 567 goto out_free; 568 569 ip6h->priority = 0; 570 ip6h->flow_lbl[0] = 0; 571 ip6h->flow_lbl[1] = 0; 572 ip6h->flow_lbl[2] = 0; 573 ip6h->hop_limit = 0; 574 575 sg_init_table(sg, nfrags); 576 skb_to_sgvec(skb, sg, 0, skb->len); 577 578 ahash_request_set_crypt(req, sg, icv, skb->len); 579 ahash_request_set_callback(req, 0, ah6_input_done, skb); 580 581 AH_SKB_CB(skb)->tmp = work_iph; 582 583 err = crypto_ahash_digest(req); 584 if (err) { 585 if (err == -EINPROGRESS) 586 goto out; 587 588 goto out_free; 589 } 590 591 err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0; 592 if (err) 593 goto out_free; 594 595 skb->network_header += ah_hlen; 596 memcpy(skb_network_header(skb), work_iph, hdr_len); 597 __skb_pull(skb, ah_hlen + hdr_len); 598 599 if (x->props.mode == XFRM_MODE_TUNNEL) 600 skb_reset_transport_header(skb); 601 else 602 skb_set_transport_header(skb, -hdr_len); 603 604 err = nexthdr; 605 606 out_free: 607 kfree(work_iph); 608 out: 609 return err; 610 } 611 612 static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 613 u8 type, u8 code, int offset, __be32 info) 614 { 615 struct net *net = dev_net(skb->dev); 616 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; 617 struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset); 618 struct xfrm_state *x; 619 620 if (type != ICMPV6_PKT_TOOBIG && 621 type != NDISC_REDIRECT) 622 return; 623 624 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6); 625 if (!x) 626 return; 627 628 if (type == NDISC_REDIRECT) 629 ip6_redirect(skb, net, skb->dev->ifindex, 0); 630 else 631 ip6_update_pmtu(skb, net, info, 0, 0); 632 xfrm_state_put(x); 633 } 634 635 static int ah6_init_state(struct xfrm_state *x) 636 { 637 struct ah_data *ahp = NULL; 638 struct xfrm_algo_desc *aalg_desc; 639 struct crypto_ahash *ahash; 640 641 if (!x->aalg) 642 goto error; 643 644 if (x->encap) 645 goto error; 646 647 ahp = kzalloc(sizeof(*ahp), GFP_KERNEL); 648 if (ahp == NULL) 649 return -ENOMEM; 650 651 ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0); 652 if (IS_ERR(ahash)) 653 goto error; 654 655 ahp->ahash = ahash; 656 if (crypto_ahash_setkey(ahash, x->aalg->alg_key, 657 (x->aalg->alg_key_len + 7) / 8)) 658 goto error; 659 660 /* 661 * Lookup the algorithm description maintained by xfrm_algo, 662 * verify crypto transform properties, and store information 663 * we need for AH processing. This lookup cannot fail here 664 * after a successful crypto_alloc_hash(). 665 */ 666 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 667 BUG_ON(!aalg_desc); 668 669 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 670 crypto_ahash_digestsize(ahash)) { 671 pr_info("AH: %s digestsize %u != %hu\n", 672 x->aalg->alg_name, crypto_ahash_digestsize(ahash), 673 aalg_desc->uinfo.auth.icv_fullbits/8); 674 goto error; 675 } 676 677 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; 678 ahp->icv_trunc_len = x->aalg->alg_trunc_len/8; 679 680 BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); 681 682 x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + 683 ahp->icv_trunc_len); 684 switch (x->props.mode) { 685 case XFRM_MODE_BEET: 686 case XFRM_MODE_TRANSPORT: 687 break; 688 case XFRM_MODE_TUNNEL: 689 x->props.header_len += sizeof(struct ipv6hdr); 690 break; 691 default: 692 goto error; 693 } 694 x->data = ahp; 695 696 return 0; 697 698 error: 699 if (ahp) { 700 crypto_free_ahash(ahp->ahash); 701 kfree(ahp); 702 } 703 return -EINVAL; 704 } 705 706 static void ah6_destroy(struct xfrm_state *x) 707 { 708 struct ah_data *ahp = x->data; 709 710 if (!ahp) 711 return; 712 713 crypto_free_ahash(ahp->ahash); 714 kfree(ahp); 715 } 716 717 static const struct xfrm_type ah6_type = 718 { 719 .description = "AH6", 720 .owner = THIS_MODULE, 721 .proto = IPPROTO_AH, 722 .flags = XFRM_TYPE_REPLAY_PROT, 723 .init_state = ah6_init_state, 724 .destructor = ah6_destroy, 725 .input = ah6_input, 726 .output = ah6_output, 727 .hdr_offset = xfrm6_find_1stfragopt, 728 }; 729 730 static const struct inet6_protocol ah6_protocol = { 731 .handler = xfrm6_rcv, 732 .err_handler = ah6_err, 733 .flags = INET6_PROTO_NOPOLICY, 734 }; 735 736 static int __init ah6_init(void) 737 { 738 if (xfrm_register_type(&ah6_type, AF_INET6) < 0) { 739 pr_info("%s: can't add xfrm type\n", __func__); 740 return -EAGAIN; 741 } 742 743 if (inet6_add_protocol(&ah6_protocol, IPPROTO_AH) < 0) { 744 pr_info("%s: can't add protocol\n", __func__); 745 xfrm_unregister_type(&ah6_type, AF_INET6); 746 return -EAGAIN; 747 } 748 749 return 0; 750 } 751 752 static void __exit ah6_fini(void) 753 { 754 if (inet6_del_protocol(&ah6_protocol, IPPROTO_AH) < 0) 755 pr_info("%s: can't remove protocol\n", __func__); 756 757 if (xfrm_unregister_type(&ah6_type, AF_INET6) < 0) 758 pr_info("%s: can't remove xfrm type\n", __func__); 759 760 } 761 762 module_init(ah6_init); 763 module_exit(ah6_fini); 764 765 MODULE_LICENSE("GPL"); 766 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_AH); 767