1 /* 2 * Copyright (C)2002 USAGI/WIDE Project 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, see <http://www.gnu.org/licenses/>. 16 * 17 * Authors 18 * 19 * Mitsuru KANDA @USAGI : IPv6 Support 20 * Kazunori MIYAZAWA @USAGI : 21 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 22 * 23 * This file is derived from net/ipv4/esp.c 24 */ 25 26 #define pr_fmt(fmt) "IPv6: " fmt 27 28 #include <crypto/aead.h> 29 #include <crypto/authenc.h> 30 #include <linux/err.h> 31 #include <linux/module.h> 32 #include <net/ip.h> 33 #include <net/xfrm.h> 34 #include <net/esp.h> 35 #include <linux/scatterlist.h> 36 #include <linux/kernel.h> 37 #include <linux/pfkeyv2.h> 38 #include <linux/random.h> 39 #include <linux/slab.h> 40 #include <linux/spinlock.h> 41 #include <net/ip6_route.h> 42 #include <net/icmp.h> 43 #include <net/ipv6.h> 44 #include <net/protocol.h> 45 #include <linux/icmpv6.h> 46 47 #include <linux/highmem.h> 48 49 struct esp_skb_cb { 50 struct xfrm_skb_cb xfrm; 51 void *tmp; 52 }; 53 54 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) 55 56 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu); 57 58 /* 59 * Allocate an AEAD request structure with extra space for SG and IV. 60 * 61 * For alignment considerations the upper 32 bits of the sequence number are 62 * placed at the front, if present. Followed by the IV, the request and finally 63 * the SG list. 64 * 65 * TODO: Use spare space in skb for this where possible. 66 */ 67 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen) 68 { 69 unsigned int len; 70 71 len = seqihlen; 72 73 len += crypto_aead_ivsize(aead); 74 75 if (len) { 76 len += crypto_aead_alignmask(aead) & 77 ~(crypto_tfm_ctx_alignment() - 1); 78 len = ALIGN(len, crypto_tfm_ctx_alignment()); 79 } 80 81 len += sizeof(struct aead_request) + crypto_aead_reqsize(aead); 82 len = ALIGN(len, __alignof__(struct scatterlist)); 83 84 len += sizeof(struct scatterlist) * nfrags; 85 86 return kmalloc(len, GFP_ATOMIC); 87 } 88 89 static inline __be32 *esp_tmp_seqhi(void *tmp) 90 { 91 return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32)); 92 } 93 94 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen) 95 { 96 return crypto_aead_ivsize(aead) ? 97 PTR_ALIGN((u8 *)tmp + seqhilen, 98 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen; 99 } 100 101 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) 102 { 103 struct aead_request *req; 104 105 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), 106 crypto_tfm_ctx_alignment()); 107 aead_request_set_tfm(req, aead); 108 return req; 109 } 110 111 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, 112 struct aead_request *req) 113 { 114 return (void *)ALIGN((unsigned long)(req + 1) + 115 crypto_aead_reqsize(aead), 116 __alignof__(struct scatterlist)); 117 } 118 119 static void esp_ssg_unref(struct xfrm_state *x, void *tmp) 120 { 121 struct crypto_aead *aead = x->data; 122 int seqhilen = 0; 123 u8 *iv; 124 struct aead_request *req; 125 struct scatterlist *sg; 126 127 if (x->props.flags & XFRM_STATE_ESN) 128 seqhilen += sizeof(__be32); 129 130 iv = esp_tmp_iv(aead, tmp, seqhilen); 131 req = esp_tmp_req(aead, iv); 132 133 /* Unref skb_frag_pages in the src scatterlist if necessary. 134 * Skip the first sg which comes from skb->data. 135 */ 136 if (req->src != req->dst) 137 for (sg = sg_next(req->src); sg; sg = sg_next(sg)) 138 put_page(sg_page(sg)); 139 } 140 141 static void esp_output_done(struct crypto_async_request *base, int err) 142 { 143 struct sk_buff *skb = base->data; 144 void *tmp; 145 struct dst_entry *dst = skb_dst(skb); 146 struct xfrm_state *x = dst->xfrm; 147 148 tmp = ESP_SKB_CB(skb)->tmp; 149 esp_ssg_unref(x, tmp); 150 kfree(tmp); 151 xfrm_output_resume(skb, err); 152 } 153 154 /* Move ESP header back into place. */ 155 static void esp_restore_header(struct sk_buff *skb, unsigned int offset) 156 { 157 struct ip_esp_hdr *esph = (void *)(skb->data + offset); 158 void *tmp = ESP_SKB_CB(skb)->tmp; 159 __be32 *seqhi = esp_tmp_seqhi(tmp); 160 161 esph->seq_no = esph->spi; 162 esph->spi = *seqhi; 163 } 164 165 static void esp_output_restore_header(struct sk_buff *skb) 166 { 167 esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32)); 168 } 169 170 static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb, 171 struct xfrm_state *x, 172 struct ip_esp_hdr *esph, 173 __be32 *seqhi) 174 { 175 /* For ESN we move the header forward by 4 bytes to 176 * accomodate the high bits. We will move it back after 177 * encryption. 178 */ 179 if ((x->props.flags & XFRM_STATE_ESN)) { 180 struct xfrm_offload *xo = xfrm_offload(skb); 181 182 esph = (void *)(skb_transport_header(skb) - sizeof(__be32)); 183 *seqhi = esph->spi; 184 if (xo) 185 esph->seq_no = htonl(xo->seq.hi); 186 else 187 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); 188 } 189 190 esph->spi = x->id.spi; 191 192 return esph; 193 } 194 195 static void esp_output_done_esn(struct crypto_async_request *base, int err) 196 { 197 struct sk_buff *skb = base->data; 198 199 esp_output_restore_header(skb); 200 esp_output_done(base, err); 201 } 202 203 static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto) 204 { 205 /* Fill padding... */ 206 if (tfclen) { 207 memset(tail, 0, tfclen); 208 tail += tfclen; 209 } 210 do { 211 int i; 212 for (i = 0; i < plen - 2; i++) 213 tail[i] = i + 1; 214 } while (0); 215 tail[plen - 2] = plen - 2; 216 tail[plen - 1] = proto; 217 } 218 219 int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) 220 { 221 u8 *tail; 222 u8 *vaddr; 223 int nfrags; 224 struct page *page; 225 struct sk_buff *trailer; 226 int tailen = esp->tailen; 227 228 if (!skb_cloned(skb)) { 229 if (tailen <= skb_availroom(skb)) { 230 nfrags = 1; 231 trailer = skb; 232 tail = skb_tail_pointer(trailer); 233 234 goto skip_cow; 235 } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS) 236 && !skb_has_frag_list(skb)) { 237 int allocsize; 238 struct sock *sk = skb->sk; 239 struct page_frag *pfrag = &x->xfrag; 240 241 esp->inplace = false; 242 243 allocsize = ALIGN(tailen, L1_CACHE_BYTES); 244 245 spin_lock_bh(&x->lock); 246 247 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { 248 spin_unlock_bh(&x->lock); 249 goto cow; 250 } 251 252 page = pfrag->page; 253 get_page(page); 254 255 vaddr = kmap_atomic(page); 256 257 tail = vaddr + pfrag->offset; 258 259 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); 260 261 kunmap_atomic(vaddr); 262 263 spin_unlock_bh(&x->lock); 264 265 nfrags = skb_shinfo(skb)->nr_frags; 266 267 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, 268 tailen); 269 skb_shinfo(skb)->nr_frags = ++nfrags; 270 271 pfrag->offset = pfrag->offset + allocsize; 272 nfrags++; 273 274 skb->len += tailen; 275 skb->data_len += tailen; 276 skb->truesize += tailen; 277 if (sk) 278 refcount_add(tailen, &sk->sk_wmem_alloc); 279 280 goto out; 281 } 282 } 283 284 cow: 285 nfrags = skb_cow_data(skb, tailen, &trailer); 286 if (nfrags < 0) 287 goto out; 288 tail = skb_tail_pointer(trailer); 289 290 skip_cow: 291 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); 292 pskb_put(skb, trailer, tailen); 293 294 out: 295 return nfrags; 296 } 297 EXPORT_SYMBOL_GPL(esp6_output_head); 298 299 int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) 300 { 301 u8 *iv; 302 int alen; 303 void *tmp; 304 int ivlen; 305 int assoclen; 306 int seqhilen; 307 __be32 *seqhi; 308 struct page *page; 309 struct ip_esp_hdr *esph; 310 struct aead_request *req; 311 struct crypto_aead *aead; 312 struct scatterlist *sg, *dsg; 313 int err = -ENOMEM; 314 315 assoclen = sizeof(struct ip_esp_hdr); 316 seqhilen = 0; 317 318 if (x->props.flags & XFRM_STATE_ESN) { 319 seqhilen += sizeof(__be32); 320 assoclen += sizeof(__be32); 321 } 322 323 aead = x->data; 324 alen = crypto_aead_authsize(aead); 325 ivlen = crypto_aead_ivsize(aead); 326 327 tmp = esp_alloc_tmp(aead, esp->nfrags + 2, seqhilen); 328 if (!tmp) 329 goto error; 330 331 seqhi = esp_tmp_seqhi(tmp); 332 iv = esp_tmp_iv(aead, tmp, seqhilen); 333 req = esp_tmp_req(aead, iv); 334 sg = esp_req_sg(aead, req); 335 336 if (esp->inplace) 337 dsg = sg; 338 else 339 dsg = &sg[esp->nfrags]; 340 341 esph = esp_output_set_esn(skb, x, ip_esp_hdr(skb), seqhi); 342 343 sg_init_table(sg, esp->nfrags); 344 err = skb_to_sgvec(skb, sg, 345 (unsigned char *)esph - skb->data, 346 assoclen + ivlen + esp->clen + alen); 347 if (unlikely(err < 0)) 348 goto error; 349 350 if (!esp->inplace) { 351 int allocsize; 352 struct page_frag *pfrag = &x->xfrag; 353 354 allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES); 355 356 spin_lock_bh(&x->lock); 357 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { 358 spin_unlock_bh(&x->lock); 359 goto error; 360 } 361 362 skb_shinfo(skb)->nr_frags = 1; 363 364 page = pfrag->page; 365 get_page(page); 366 /* replace page frags in skb with new page */ 367 __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len); 368 pfrag->offset = pfrag->offset + allocsize; 369 spin_unlock_bh(&x->lock); 370 371 sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1); 372 err = skb_to_sgvec(skb, dsg, 373 (unsigned char *)esph - skb->data, 374 assoclen + ivlen + esp->clen + alen); 375 if (unlikely(err < 0)) 376 goto error; 377 } 378 379 if ((x->props.flags & XFRM_STATE_ESN)) 380 aead_request_set_callback(req, 0, esp_output_done_esn, skb); 381 else 382 aead_request_set_callback(req, 0, esp_output_done, skb); 383 384 aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv); 385 aead_request_set_ad(req, assoclen); 386 387 memset(iv, 0, ivlen); 388 memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8), 389 min(ivlen, 8)); 390 391 ESP_SKB_CB(skb)->tmp = tmp; 392 err = crypto_aead_encrypt(req); 393 394 switch (err) { 395 case -EINPROGRESS: 396 goto error; 397 398 case -EBUSY: 399 err = NET_XMIT_DROP; 400 break; 401 402 case 0: 403 if ((x->props.flags & XFRM_STATE_ESN)) 404 esp_output_restore_header(skb); 405 } 406 407 if (sg != dsg) 408 esp_ssg_unref(x, tmp); 409 kfree(tmp); 410 411 error: 412 return err; 413 } 414 EXPORT_SYMBOL_GPL(esp6_output_tail); 415 416 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) 417 { 418 int alen; 419 int blksize; 420 struct ip_esp_hdr *esph; 421 struct crypto_aead *aead; 422 struct esp_info esp; 423 424 esp.inplace = true; 425 426 esp.proto = *skb_mac_header(skb); 427 *skb_mac_header(skb) = IPPROTO_ESP; 428 429 /* skb is pure payload to encrypt */ 430 431 aead = x->data; 432 alen = crypto_aead_authsize(aead); 433 434 esp.tfclen = 0; 435 if (x->tfcpad) { 436 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); 437 u32 padto; 438 439 padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached)); 440 if (skb->len < padto) 441 esp.tfclen = padto - skb->len; 442 } 443 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 444 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); 445 esp.plen = esp.clen - skb->len - esp.tfclen; 446 esp.tailen = esp.tfclen + esp.plen + alen; 447 448 esp.nfrags = esp6_output_head(x, skb, &esp); 449 if (esp.nfrags < 0) 450 return esp.nfrags; 451 452 esph = ip_esp_hdr(skb); 453 esph->spi = x->id.spi; 454 455 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 456 esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low + 457 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); 458 459 skb_push(skb, -skb_network_offset(skb)); 460 461 return esp6_output_tail(x, skb, &esp); 462 } 463 464 int esp6_input_done2(struct sk_buff *skb, int err) 465 { 466 struct xfrm_state *x = xfrm_input_state(skb); 467 struct xfrm_offload *xo = xfrm_offload(skb); 468 struct crypto_aead *aead = x->data; 469 int alen = crypto_aead_authsize(aead); 470 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); 471 int elen = skb->len - hlen; 472 int hdr_len = skb_network_header_len(skb); 473 int padlen; 474 u8 nexthdr[2]; 475 476 if (!xo || (xo && !(xo->flags & CRYPTO_DONE))) 477 kfree(ESP_SKB_CB(skb)->tmp); 478 479 if (unlikely(err)) 480 goto out; 481 482 if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2)) 483 BUG(); 484 485 err = -EINVAL; 486 padlen = nexthdr[0]; 487 if (padlen + 2 + alen >= elen) { 488 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n", 489 padlen + 2, elen - alen); 490 goto out; 491 } 492 493 /* ... check padding bits here. Silly. :-) */ 494 495 pskb_trim(skb, skb->len - alen - padlen - 2); 496 __skb_pull(skb, hlen); 497 if (x->props.mode == XFRM_MODE_TUNNEL) 498 skb_reset_transport_header(skb); 499 else 500 skb_set_transport_header(skb, -hdr_len); 501 502 err = nexthdr[1]; 503 504 /* RFC4303: Drop dummy packets without any error */ 505 if (err == IPPROTO_NONE) 506 err = -EINVAL; 507 508 out: 509 return err; 510 } 511 EXPORT_SYMBOL_GPL(esp6_input_done2); 512 513 static void esp_input_done(struct crypto_async_request *base, int err) 514 { 515 struct sk_buff *skb = base->data; 516 517 xfrm_input_resume(skb, esp6_input_done2(skb, err)); 518 } 519 520 static void esp_input_restore_header(struct sk_buff *skb) 521 { 522 esp_restore_header(skb, 0); 523 __skb_pull(skb, 4); 524 } 525 526 static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi) 527 { 528 struct xfrm_state *x = xfrm_input_state(skb); 529 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)skb->data; 530 531 /* For ESN we move the header forward by 4 bytes to 532 * accomodate the high bits. We will move it back after 533 * decryption. 534 */ 535 if ((x->props.flags & XFRM_STATE_ESN)) { 536 esph = skb_push(skb, 4); 537 *seqhi = esph->spi; 538 esph->spi = esph->seq_no; 539 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi; 540 } 541 } 542 543 static void esp_input_done_esn(struct crypto_async_request *base, int err) 544 { 545 struct sk_buff *skb = base->data; 546 547 esp_input_restore_header(skb); 548 esp_input_done(base, err); 549 } 550 551 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) 552 { 553 struct ip_esp_hdr *esph; 554 struct crypto_aead *aead = x->data; 555 struct aead_request *req; 556 struct sk_buff *trailer; 557 int ivlen = crypto_aead_ivsize(aead); 558 int elen = skb->len - sizeof(*esph) - ivlen; 559 int nfrags; 560 int assoclen; 561 int seqhilen; 562 int ret = 0; 563 void *tmp; 564 __be32 *seqhi; 565 u8 *iv; 566 struct scatterlist *sg; 567 568 if (!pskb_may_pull(skb, sizeof(*esph) + ivlen)) { 569 ret = -EINVAL; 570 goto out; 571 } 572 573 if (elen <= 0) { 574 ret = -EINVAL; 575 goto out; 576 } 577 578 assoclen = sizeof(*esph); 579 seqhilen = 0; 580 581 if (x->props.flags & XFRM_STATE_ESN) { 582 seqhilen += sizeof(__be32); 583 assoclen += seqhilen; 584 } 585 586 if (!skb_cloned(skb)) { 587 if (!skb_is_nonlinear(skb)) { 588 nfrags = 1; 589 590 goto skip_cow; 591 } else if (!skb_has_frag_list(skb)) { 592 nfrags = skb_shinfo(skb)->nr_frags; 593 nfrags++; 594 595 goto skip_cow; 596 } 597 } 598 599 nfrags = skb_cow_data(skb, 0, &trailer); 600 if (nfrags < 0) { 601 ret = -EINVAL; 602 goto out; 603 } 604 605 skip_cow: 606 ret = -ENOMEM; 607 tmp = esp_alloc_tmp(aead, nfrags, seqhilen); 608 if (!tmp) 609 goto out; 610 611 ESP_SKB_CB(skb)->tmp = tmp; 612 seqhi = esp_tmp_seqhi(tmp); 613 iv = esp_tmp_iv(aead, tmp, seqhilen); 614 req = esp_tmp_req(aead, iv); 615 sg = esp_req_sg(aead, req); 616 617 esp_input_set_header(skb, seqhi); 618 619 sg_init_table(sg, nfrags); 620 ret = skb_to_sgvec(skb, sg, 0, skb->len); 621 if (unlikely(ret < 0)) 622 goto out; 623 624 skb->ip_summed = CHECKSUM_NONE; 625 626 if ((x->props.flags & XFRM_STATE_ESN)) 627 aead_request_set_callback(req, 0, esp_input_done_esn, skb); 628 else 629 aead_request_set_callback(req, 0, esp_input_done, skb); 630 631 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv); 632 aead_request_set_ad(req, assoclen); 633 634 ret = crypto_aead_decrypt(req); 635 if (ret == -EINPROGRESS) 636 goto out; 637 638 if ((x->props.flags & XFRM_STATE_ESN)) 639 esp_input_restore_header(skb); 640 641 ret = esp6_input_done2(skb, ret); 642 643 out: 644 return ret; 645 } 646 647 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu) 648 { 649 struct crypto_aead *aead = x->data; 650 u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 651 unsigned int net_adj; 652 653 if (x->props.mode != XFRM_MODE_TUNNEL) 654 net_adj = sizeof(struct ipv6hdr); 655 else 656 net_adj = 0; 657 658 return ((mtu - x->props.header_len - crypto_aead_authsize(aead) - 659 net_adj) & ~(blksize - 1)) + net_adj - 2; 660 } 661 662 static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 663 u8 type, u8 code, int offset, __be32 info) 664 { 665 struct net *net = dev_net(skb->dev); 666 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data; 667 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset); 668 struct xfrm_state *x; 669 670 if (type != ICMPV6_PKT_TOOBIG && 671 type != NDISC_REDIRECT) 672 return 0; 673 674 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, 675 esph->spi, IPPROTO_ESP, AF_INET6); 676 if (!x) 677 return 0; 678 679 if (type == NDISC_REDIRECT) 680 ip6_redirect(skb, net, skb->dev->ifindex, 0, 681 sock_net_uid(net, NULL)); 682 else 683 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL)); 684 xfrm_state_put(x); 685 686 return 0; 687 } 688 689 static void esp6_destroy(struct xfrm_state *x) 690 { 691 struct crypto_aead *aead = x->data; 692 693 if (!aead) 694 return; 695 696 crypto_free_aead(aead); 697 } 698 699 static int esp_init_aead(struct xfrm_state *x) 700 { 701 char aead_name[CRYPTO_MAX_ALG_NAME]; 702 struct crypto_aead *aead; 703 int err; 704 u32 mask = 0; 705 706 err = -ENAMETOOLONG; 707 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", 708 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) 709 goto error; 710 711 if (x->xso.offload_handle) 712 mask |= CRYPTO_ALG_ASYNC; 713 714 aead = crypto_alloc_aead(aead_name, 0, mask); 715 err = PTR_ERR(aead); 716 if (IS_ERR(aead)) 717 goto error; 718 719 x->data = aead; 720 721 err = crypto_aead_setkey(aead, x->aead->alg_key, 722 (x->aead->alg_key_len + 7) / 8); 723 if (err) 724 goto error; 725 726 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8); 727 if (err) 728 goto error; 729 730 error: 731 return err; 732 } 733 734 static int esp_init_authenc(struct xfrm_state *x) 735 { 736 struct crypto_aead *aead; 737 struct crypto_authenc_key_param *param; 738 struct rtattr *rta; 739 char *key; 740 char *p; 741 char authenc_name[CRYPTO_MAX_ALG_NAME]; 742 unsigned int keylen; 743 int err; 744 u32 mask = 0; 745 746 err = -EINVAL; 747 if (!x->ealg) 748 goto error; 749 750 err = -ENAMETOOLONG; 751 752 if ((x->props.flags & XFRM_STATE_ESN)) { 753 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, 754 "%s%sauthencesn(%s,%s)%s", 755 x->geniv ?: "", x->geniv ? "(" : "", 756 x->aalg ? x->aalg->alg_name : "digest_null", 757 x->ealg->alg_name, 758 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) 759 goto error; 760 } else { 761 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, 762 "%s%sauthenc(%s,%s)%s", 763 x->geniv ?: "", x->geniv ? "(" : "", 764 x->aalg ? x->aalg->alg_name : "digest_null", 765 x->ealg->alg_name, 766 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) 767 goto error; 768 } 769 770 if (x->xso.offload_handle) 771 mask |= CRYPTO_ALG_ASYNC; 772 773 aead = crypto_alloc_aead(authenc_name, 0, mask); 774 err = PTR_ERR(aead); 775 if (IS_ERR(aead)) 776 goto error; 777 778 x->data = aead; 779 780 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) + 781 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param)); 782 err = -ENOMEM; 783 key = kmalloc(keylen, GFP_KERNEL); 784 if (!key) 785 goto error; 786 787 p = key; 788 rta = (void *)p; 789 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; 790 rta->rta_len = RTA_LENGTH(sizeof(*param)); 791 param = RTA_DATA(rta); 792 p += RTA_SPACE(sizeof(*param)); 793 794 if (x->aalg) { 795 struct xfrm_algo_desc *aalg_desc; 796 797 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8); 798 p += (x->aalg->alg_key_len + 7) / 8; 799 800 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 801 BUG_ON(!aalg_desc); 802 803 err = -EINVAL; 804 if (aalg_desc->uinfo.auth.icv_fullbits / 8 != 805 crypto_aead_authsize(aead)) { 806 pr_info("ESP: %s digestsize %u != %hu\n", 807 x->aalg->alg_name, 808 crypto_aead_authsize(aead), 809 aalg_desc->uinfo.auth.icv_fullbits / 8); 810 goto free_key; 811 } 812 813 err = crypto_aead_setauthsize( 814 aead, x->aalg->alg_trunc_len / 8); 815 if (err) 816 goto free_key; 817 } 818 819 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8); 820 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8); 821 822 err = crypto_aead_setkey(aead, key, keylen); 823 824 free_key: 825 kfree(key); 826 827 error: 828 return err; 829 } 830 831 static int esp6_init_state(struct xfrm_state *x) 832 { 833 struct crypto_aead *aead; 834 u32 align; 835 int err; 836 837 if (x->encap) 838 return -EINVAL; 839 840 x->data = NULL; 841 842 if (x->aead) 843 err = esp_init_aead(x); 844 else 845 err = esp_init_authenc(x); 846 847 if (err) 848 goto error; 849 850 aead = x->data; 851 852 x->props.header_len = sizeof(struct ip_esp_hdr) + 853 crypto_aead_ivsize(aead); 854 switch (x->props.mode) { 855 case XFRM_MODE_BEET: 856 if (x->sel.family != AF_INET6) 857 x->props.header_len += IPV4_BEET_PHMAXLEN + 858 (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); 859 break; 860 case XFRM_MODE_TRANSPORT: 861 break; 862 case XFRM_MODE_TUNNEL: 863 x->props.header_len += sizeof(struct ipv6hdr); 864 break; 865 default: 866 goto error; 867 } 868 869 align = ALIGN(crypto_aead_blocksize(aead), 4); 870 x->props.trailer_len = align + 1 + crypto_aead_authsize(aead); 871 872 error: 873 return err; 874 } 875 876 static int esp6_rcv_cb(struct sk_buff *skb, int err) 877 { 878 return 0; 879 } 880 881 static const struct xfrm_type esp6_type = { 882 .description = "ESP6", 883 .owner = THIS_MODULE, 884 .proto = IPPROTO_ESP, 885 .flags = XFRM_TYPE_REPLAY_PROT, 886 .init_state = esp6_init_state, 887 .destructor = esp6_destroy, 888 .get_mtu = esp6_get_mtu, 889 .input = esp6_input, 890 .output = esp6_output, 891 .hdr_offset = xfrm6_find_1stfragopt, 892 }; 893 894 static struct xfrm6_protocol esp6_protocol = { 895 .handler = xfrm6_rcv, 896 .cb_handler = esp6_rcv_cb, 897 .err_handler = esp6_err, 898 .priority = 0, 899 }; 900 901 static int __init esp6_init(void) 902 { 903 if (xfrm_register_type(&esp6_type, AF_INET6) < 0) { 904 pr_info("%s: can't add xfrm type\n", __func__); 905 return -EAGAIN; 906 } 907 if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) { 908 pr_info("%s: can't add protocol\n", __func__); 909 xfrm_unregister_type(&esp6_type, AF_INET6); 910 return -EAGAIN; 911 } 912 913 return 0; 914 } 915 916 static void __exit esp6_fini(void) 917 { 918 if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0) 919 pr_info("%s: can't remove protocol\n", __func__); 920 if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0) 921 pr_info("%s: can't remove xfrm type\n", __func__); 922 } 923 924 module_init(esp6_init); 925 module_exit(esp6_fini); 926 927 MODULE_LICENSE("GPL"); 928 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP); 929