1 #include <crypto/aead.h> 2 #include <crypto/authenc.h> 3 #include <linux/err.h> 4 #include <linux/module.h> 5 #include <net/ip.h> 6 #include <net/xfrm.h> 7 #include <net/esp.h> 8 #include <linux/scatterlist.h> 9 #include <linux/kernel.h> 10 #include <linux/pfkeyv2.h> 11 #include <linux/rtnetlink.h> 12 #include <linux/slab.h> 13 #include <linux/spinlock.h> 14 #include <linux/in6.h> 15 #include <net/icmp.h> 16 #include <net/protocol.h> 17 #include <net/udp.h> 18 19 struct esp_skb_cb { 20 struct xfrm_skb_cb xfrm; 21 void *tmp; 22 }; 23 24 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) 25 26 static u32 esp4_get_mtu(struct xfrm_state *x, int mtu); 27 28 /* 29 * Allocate an AEAD request structure with extra space for SG and IV. 30 * 31 * For alignment considerations the IV is placed at the front, followed 32 * by the request and finally the SG list. 33 * 34 * TODO: Use spare space in skb for this where possible. 35 */ 36 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen) 37 { 38 unsigned int len; 39 40 len = seqhilen; 41 42 len += crypto_aead_ivsize(aead); 43 44 if (len) { 45 len += crypto_aead_alignmask(aead) & 46 ~(crypto_tfm_ctx_alignment() - 1); 47 len = ALIGN(len, crypto_tfm_ctx_alignment()); 48 } 49 50 len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead); 51 len = ALIGN(len, __alignof__(struct scatterlist)); 52 53 len += sizeof(struct scatterlist) * nfrags; 54 55 return kmalloc(len, GFP_ATOMIC); 56 } 57 58 static inline __be32 *esp_tmp_seqhi(void *tmp) 59 { 60 return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32)); 61 } 62 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen) 63 { 64 return crypto_aead_ivsize(aead) ? 65 PTR_ALIGN((u8 *)tmp + seqhilen, 66 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen; 67 } 68 69 static inline struct aead_givcrypt_request *esp_tmp_givreq( 70 struct crypto_aead *aead, u8 *iv) 71 { 72 struct aead_givcrypt_request *req; 73 74 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), 75 crypto_tfm_ctx_alignment()); 76 aead_givcrypt_set_tfm(req, aead); 77 return req; 78 } 79 80 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) 81 { 82 struct aead_request *req; 83 84 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), 85 crypto_tfm_ctx_alignment()); 86 aead_request_set_tfm(req, aead); 87 return req; 88 } 89 90 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, 91 struct aead_request *req) 92 { 93 return (void *)ALIGN((unsigned long)(req + 1) + 94 crypto_aead_reqsize(aead), 95 __alignof__(struct scatterlist)); 96 } 97 98 static inline struct scatterlist *esp_givreq_sg( 99 struct crypto_aead *aead, struct aead_givcrypt_request *req) 100 { 101 return (void *)ALIGN((unsigned long)(req + 1) + 102 crypto_aead_reqsize(aead), 103 __alignof__(struct scatterlist)); 104 } 105 106 static void esp_output_done(struct crypto_async_request *base, int err) 107 { 108 struct sk_buff *skb = base->data; 109 110 kfree(ESP_SKB_CB(skb)->tmp); 111 xfrm_output_resume(skb, err); 112 } 113 114 static int esp_output(struct xfrm_state *x, struct sk_buff *skb) 115 { 116 int err; 117 struct ip_esp_hdr *esph; 118 struct crypto_aead *aead; 119 struct aead_givcrypt_request *req; 120 struct scatterlist *sg; 121 struct scatterlist *asg; 122 struct esp_data *esp; 123 struct sk_buff *trailer; 124 void *tmp; 125 u8 *iv; 126 u8 *tail; 127 int blksize; 128 int clen; 129 int alen; 130 int plen; 131 int tfclen; 132 int nfrags; 133 int assoclen; 134 int sglists; 135 int seqhilen; 136 __be32 *seqhi; 137 138 /* skb is pure payload to encrypt */ 139 140 err = -ENOMEM; 141 142 esp = x->data; 143 aead = esp->aead; 144 alen = crypto_aead_authsize(aead); 145 146 tfclen = 0; 147 if (x->tfcpad) { 148 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); 149 u32 padto; 150 151 padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached)); 152 if (skb->len < padto) 153 tfclen = padto - skb->len; 154 } 155 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 156 clen = ALIGN(skb->len + 2 + tfclen, blksize); 157 if (esp->padlen) 158 clen = ALIGN(clen, esp->padlen); 159 plen = clen - skb->len - tfclen; 160 161 err = skb_cow_data(skb, tfclen + plen + alen, &trailer); 162 if (err < 0) 163 goto error; 164 nfrags = err; 165 166 assoclen = sizeof(*esph); 167 sglists = 1; 168 seqhilen = 0; 169 170 if (x->props.flags & XFRM_STATE_ESN) { 171 sglists += 2; 172 seqhilen += sizeof(__be32); 173 assoclen += seqhilen; 174 } 175 176 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); 177 if (!tmp) 178 goto error; 179 180 seqhi = esp_tmp_seqhi(tmp); 181 iv = esp_tmp_iv(aead, tmp, seqhilen); 182 req = esp_tmp_givreq(aead, iv); 183 asg = esp_givreq_sg(aead, req); 184 sg = asg + sglists; 185 186 /* Fill padding... */ 187 tail = skb_tail_pointer(trailer); 188 if (tfclen) { 189 memset(tail, 0, tfclen); 190 tail += tfclen; 191 } 192 do { 193 int i; 194 for (i = 0; i < plen - 2; i++) 195 tail[i] = i + 1; 196 } while (0); 197 tail[plen - 2] = plen - 2; 198 tail[plen - 1] = *skb_mac_header(skb); 199 pskb_put(skb, trailer, clen - skb->len + alen); 200 201 skb_push(skb, -skb_network_offset(skb)); 202 esph = ip_esp_hdr(skb); 203 *skb_mac_header(skb) = IPPROTO_ESP; 204 205 /* this is non-NULL only with UDP Encapsulation */ 206 if (x->encap) { 207 struct xfrm_encap_tmpl *encap = x->encap; 208 struct udphdr *uh; 209 __be32 *udpdata32; 210 __be16 sport, dport; 211 int encap_type; 212 213 spin_lock_bh(&x->lock); 214 sport = encap->encap_sport; 215 dport = encap->encap_dport; 216 encap_type = encap->encap_type; 217 spin_unlock_bh(&x->lock); 218 219 uh = (struct udphdr *)esph; 220 uh->source = sport; 221 uh->dest = dport; 222 uh->len = htons(skb->len - skb_transport_offset(skb)); 223 uh->check = 0; 224 225 switch (encap_type) { 226 default: 227 case UDP_ENCAP_ESPINUDP: 228 esph = (struct ip_esp_hdr *)(uh + 1); 229 break; 230 case UDP_ENCAP_ESPINUDP_NON_IKE: 231 udpdata32 = (__be32 *)(uh + 1); 232 udpdata32[0] = udpdata32[1] = 0; 233 esph = (struct ip_esp_hdr *)(udpdata32 + 2); 234 break; 235 } 236 237 *skb_mac_header(skb) = IPPROTO_UDP; 238 } 239 240 esph->spi = x->id.spi; 241 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 242 243 sg_init_table(sg, nfrags); 244 skb_to_sgvec(skb, sg, 245 esph->enc_data + crypto_aead_ivsize(aead) - skb->data, 246 clen + alen); 247 248 if ((x->props.flags & XFRM_STATE_ESN)) { 249 sg_init_table(asg, 3); 250 sg_set_buf(asg, &esph->spi, sizeof(__be32)); 251 *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi); 252 sg_set_buf(asg + 1, seqhi, seqhilen); 253 sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32)); 254 } else 255 sg_init_one(asg, esph, sizeof(*esph)); 256 257 aead_givcrypt_set_callback(req, 0, esp_output_done, skb); 258 aead_givcrypt_set_crypt(req, sg, sg, clen, iv); 259 aead_givcrypt_set_assoc(req, asg, assoclen); 260 aead_givcrypt_set_giv(req, esph->enc_data, 261 XFRM_SKB_CB(skb)->seq.output.low); 262 263 ESP_SKB_CB(skb)->tmp = tmp; 264 err = crypto_aead_givencrypt(req); 265 if (err == -EINPROGRESS) 266 goto error; 267 268 if (err == -EBUSY) 269 err = NET_XMIT_DROP; 270 271 kfree(tmp); 272 273 error: 274 return err; 275 } 276 277 static int esp_input_done2(struct sk_buff *skb, int err) 278 { 279 struct iphdr *iph; 280 struct xfrm_state *x = xfrm_input_state(skb); 281 struct esp_data *esp = x->data; 282 struct crypto_aead *aead = esp->aead; 283 int alen = crypto_aead_authsize(aead); 284 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); 285 int elen = skb->len - hlen; 286 int ihl; 287 u8 nexthdr[2]; 288 int padlen; 289 290 kfree(ESP_SKB_CB(skb)->tmp); 291 292 if (unlikely(err)) 293 goto out; 294 295 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) 296 BUG(); 297 298 err = -EINVAL; 299 padlen = nexthdr[0]; 300 if (padlen + 2 + alen >= elen) 301 goto out; 302 303 /* ... check padding bits here. Silly. :-) */ 304 305 iph = ip_hdr(skb); 306 ihl = iph->ihl * 4; 307 308 if (x->encap) { 309 struct xfrm_encap_tmpl *encap = x->encap; 310 struct udphdr *uh = (void *)(skb_network_header(skb) + ihl); 311 312 /* 313 * 1) if the NAT-T peer's IP or port changed then 314 * advertize the change to the keying daemon. 315 * This is an inbound SA, so just compare 316 * SRC ports. 317 */ 318 if (iph->saddr != x->props.saddr.a4 || 319 uh->source != encap->encap_sport) { 320 xfrm_address_t ipaddr; 321 322 ipaddr.a4 = iph->saddr; 323 km_new_mapping(x, &ipaddr, uh->source); 324 325 /* XXX: perhaps add an extra 326 * policy check here, to see 327 * if we should allow or 328 * reject a packet from a 329 * different source 330 * address/port. 331 */ 332 } 333 334 /* 335 * 2) ignore UDP/TCP checksums in case 336 * of NAT-T in Transport Mode, or 337 * perform other post-processing fixes 338 * as per draft-ietf-ipsec-udp-encaps-06, 339 * section 3.1.2 340 */ 341 if (x->props.mode == XFRM_MODE_TRANSPORT) 342 skb->ip_summed = CHECKSUM_UNNECESSARY; 343 } 344 345 pskb_trim(skb, skb->len - alen - padlen - 2); 346 __skb_pull(skb, hlen); 347 skb_set_transport_header(skb, -ihl); 348 349 err = nexthdr[1]; 350 351 /* RFC4303: Drop dummy packets without any error */ 352 if (err == IPPROTO_NONE) 353 err = -EINVAL; 354 355 out: 356 return err; 357 } 358 359 static void esp_input_done(struct crypto_async_request *base, int err) 360 { 361 struct sk_buff *skb = base->data; 362 363 xfrm_input_resume(skb, esp_input_done2(skb, err)); 364 } 365 366 /* 367 * Note: detecting truncated vs. non-truncated authentication data is very 368 * expensive, so we only support truncated data, which is the recommended 369 * and common case. 370 */ 371 static int esp_input(struct xfrm_state *x, struct sk_buff *skb) 372 { 373 struct ip_esp_hdr *esph; 374 struct esp_data *esp = x->data; 375 struct crypto_aead *aead = esp->aead; 376 struct aead_request *req; 377 struct sk_buff *trailer; 378 int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); 379 int nfrags; 380 int assoclen; 381 int sglists; 382 int seqhilen; 383 __be32 *seqhi; 384 void *tmp; 385 u8 *iv; 386 struct scatterlist *sg; 387 struct scatterlist *asg; 388 int err = -EINVAL; 389 390 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) 391 goto out; 392 393 if (elen <= 0) 394 goto out; 395 396 if ((err = skb_cow_data(skb, 0, &trailer)) < 0) 397 goto out; 398 nfrags = err; 399 400 assoclen = sizeof(*esph); 401 sglists = 1; 402 seqhilen = 0; 403 404 if (x->props.flags & XFRM_STATE_ESN) { 405 sglists += 2; 406 seqhilen += sizeof(__be32); 407 assoclen += seqhilen; 408 } 409 410 err = -ENOMEM; 411 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); 412 if (!tmp) 413 goto out; 414 415 ESP_SKB_CB(skb)->tmp = tmp; 416 seqhi = esp_tmp_seqhi(tmp); 417 iv = esp_tmp_iv(aead, tmp, seqhilen); 418 req = esp_tmp_req(aead, iv); 419 asg = esp_req_sg(aead, req); 420 sg = asg + sglists; 421 422 skb->ip_summed = CHECKSUM_NONE; 423 424 esph = (struct ip_esp_hdr *)skb->data; 425 426 /* Get ivec. This can be wrong, check against another impls. */ 427 iv = esph->enc_data; 428 429 sg_init_table(sg, nfrags); 430 skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); 431 432 if ((x->props.flags & XFRM_STATE_ESN)) { 433 sg_init_table(asg, 3); 434 sg_set_buf(asg, &esph->spi, sizeof(__be32)); 435 *seqhi = XFRM_SKB_CB(skb)->seq.input.hi; 436 sg_set_buf(asg + 1, seqhi, seqhilen); 437 sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32)); 438 } else 439 sg_init_one(asg, esph, sizeof(*esph)); 440 441 aead_request_set_callback(req, 0, esp_input_done, skb); 442 aead_request_set_crypt(req, sg, sg, elen, iv); 443 aead_request_set_assoc(req, asg, assoclen); 444 445 err = crypto_aead_decrypt(req); 446 if (err == -EINPROGRESS) 447 goto out; 448 449 err = esp_input_done2(skb, err); 450 451 out: 452 return err; 453 } 454 455 static u32 esp4_get_mtu(struct xfrm_state *x, int mtu) 456 { 457 struct esp_data *esp = x->data; 458 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4); 459 u32 align = max_t(u32, blksize, esp->padlen); 460 u32 rem; 461 462 mtu -= x->props.header_len + crypto_aead_authsize(esp->aead); 463 rem = mtu & (align - 1); 464 mtu &= ~(align - 1); 465 466 switch (x->props.mode) { 467 case XFRM_MODE_TUNNEL: 468 break; 469 default: 470 case XFRM_MODE_TRANSPORT: 471 /* The worst case */ 472 mtu -= blksize - 4; 473 mtu += min_t(u32, blksize - 4, rem); 474 break; 475 case XFRM_MODE_BEET: 476 /* The worst case. */ 477 mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem); 478 break; 479 } 480 481 return mtu - 2; 482 } 483 484 static void esp4_err(struct sk_buff *skb, u32 info) 485 { 486 struct net *net = dev_net(skb->dev); 487 struct iphdr *iph = (struct iphdr *)skb->data; 488 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); 489 struct xfrm_state *x; 490 491 if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH || 492 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 493 return; 494 495 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET); 496 if (!x) 497 return; 498 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n", 499 ntohl(esph->spi), ntohl(iph->daddr)); 500 xfrm_state_put(x); 501 } 502 503 static void esp_destroy(struct xfrm_state *x) 504 { 505 struct esp_data *esp = x->data; 506 507 if (!esp) 508 return; 509 510 crypto_free_aead(esp->aead); 511 kfree(esp); 512 } 513 514 static int esp_init_aead(struct xfrm_state *x) 515 { 516 struct esp_data *esp = x->data; 517 struct crypto_aead *aead; 518 int err; 519 520 aead = crypto_alloc_aead(x->aead->alg_name, 0, 0); 521 err = PTR_ERR(aead); 522 if (IS_ERR(aead)) 523 goto error; 524 525 esp->aead = aead; 526 527 err = crypto_aead_setkey(aead, x->aead->alg_key, 528 (x->aead->alg_key_len + 7) / 8); 529 if (err) 530 goto error; 531 532 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8); 533 if (err) 534 goto error; 535 536 error: 537 return err; 538 } 539 540 static int esp_init_authenc(struct xfrm_state *x) 541 { 542 struct esp_data *esp = x->data; 543 struct crypto_aead *aead; 544 struct crypto_authenc_key_param *param; 545 struct rtattr *rta; 546 char *key; 547 char *p; 548 char authenc_name[CRYPTO_MAX_ALG_NAME]; 549 unsigned int keylen; 550 int err; 551 552 err = -EINVAL; 553 if (x->ealg == NULL) 554 goto error; 555 556 err = -ENAMETOOLONG; 557 558 if ((x->props.flags & XFRM_STATE_ESN)) { 559 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, 560 "authencesn(%s,%s)", 561 x->aalg ? x->aalg->alg_name : "digest_null", 562 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) 563 goto error; 564 } else { 565 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, 566 "authenc(%s,%s)", 567 x->aalg ? x->aalg->alg_name : "digest_null", 568 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) 569 goto error; 570 } 571 572 aead = crypto_alloc_aead(authenc_name, 0, 0); 573 err = PTR_ERR(aead); 574 if (IS_ERR(aead)) 575 goto error; 576 577 esp->aead = aead; 578 579 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) + 580 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param)); 581 err = -ENOMEM; 582 key = kmalloc(keylen, GFP_KERNEL); 583 if (!key) 584 goto error; 585 586 p = key; 587 rta = (void *)p; 588 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; 589 rta->rta_len = RTA_LENGTH(sizeof(*param)); 590 param = RTA_DATA(rta); 591 p += RTA_SPACE(sizeof(*param)); 592 593 if (x->aalg) { 594 struct xfrm_algo_desc *aalg_desc; 595 596 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8); 597 p += (x->aalg->alg_key_len + 7) / 8; 598 599 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 600 BUG_ON(!aalg_desc); 601 602 err = -EINVAL; 603 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 604 crypto_aead_authsize(aead)) { 605 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", 606 x->aalg->alg_name, 607 crypto_aead_authsize(aead), 608 aalg_desc->uinfo.auth.icv_fullbits/8); 609 goto free_key; 610 } 611 612 err = crypto_aead_setauthsize( 613 aead, x->aalg->alg_trunc_len / 8); 614 if (err) 615 goto free_key; 616 } 617 618 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8); 619 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8); 620 621 err = crypto_aead_setkey(aead, key, keylen); 622 623 free_key: 624 kfree(key); 625 626 error: 627 return err; 628 } 629 630 static int esp_init_state(struct xfrm_state *x) 631 { 632 struct esp_data *esp; 633 struct crypto_aead *aead; 634 u32 align; 635 int err; 636 637 esp = kzalloc(sizeof(*esp), GFP_KERNEL); 638 if (esp == NULL) 639 return -ENOMEM; 640 641 x->data = esp; 642 643 if (x->aead) 644 err = esp_init_aead(x); 645 else 646 err = esp_init_authenc(x); 647 648 if (err) 649 goto error; 650 651 aead = esp->aead; 652 653 esp->padlen = 0; 654 655 x->props.header_len = sizeof(struct ip_esp_hdr) + 656 crypto_aead_ivsize(aead); 657 if (x->props.mode == XFRM_MODE_TUNNEL) 658 x->props.header_len += sizeof(struct iphdr); 659 else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6) 660 x->props.header_len += IPV4_BEET_PHMAXLEN; 661 if (x->encap) { 662 struct xfrm_encap_tmpl *encap = x->encap; 663 664 switch (encap->encap_type) { 665 default: 666 goto error; 667 case UDP_ENCAP_ESPINUDP: 668 x->props.header_len += sizeof(struct udphdr); 669 break; 670 case UDP_ENCAP_ESPINUDP_NON_IKE: 671 x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32); 672 break; 673 } 674 } 675 676 align = ALIGN(crypto_aead_blocksize(aead), 4); 677 if (esp->padlen) 678 align = max_t(u32, align, esp->padlen); 679 x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead); 680 681 error: 682 return err; 683 } 684 685 static const struct xfrm_type esp_type = 686 { 687 .description = "ESP4", 688 .owner = THIS_MODULE, 689 .proto = IPPROTO_ESP, 690 .flags = XFRM_TYPE_REPLAY_PROT, 691 .init_state = esp_init_state, 692 .destructor = esp_destroy, 693 .get_mtu = esp4_get_mtu, 694 .input = esp_input, 695 .output = esp_output 696 }; 697 698 static const struct net_protocol esp4_protocol = { 699 .handler = xfrm4_rcv, 700 .err_handler = esp4_err, 701 .no_policy = 1, 702 .netns_ok = 1, 703 }; 704 705 static int __init esp4_init(void) 706 { 707 if (xfrm_register_type(&esp_type, AF_INET) < 0) { 708 printk(KERN_INFO "ip esp init: can't add xfrm type\n"); 709 return -EAGAIN; 710 } 711 if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) { 712 printk(KERN_INFO "ip esp init: can't add protocol\n"); 713 xfrm_unregister_type(&esp_type, AF_INET); 714 return -EAGAIN; 715 } 716 return 0; 717 } 718 719 static void __exit esp4_fini(void) 720 { 721 if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0) 722 printk(KERN_INFO "ip esp close: can't remove protocol\n"); 723 if (xfrm_unregister_type(&esp_type, AF_INET) < 0) 724 printk(KERN_INFO "ip esp close: can't remove xfrm type\n"); 725 } 726 727 module_init(esp4_init); 728 module_exit(esp4_fini); 729 MODULE_LICENSE("GPL"); 730 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP); 731