1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C)2002 USAGI/WIDE Project 4 * 5 * Authors 6 * 7 * Mitsuru KANDA @USAGI : IPv6 Support 8 * Kazunori MIYAZAWA @USAGI : 9 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 10 * 11 * This file is derived from net/ipv4/esp.c 12 */ 13 14 #define pr_fmt(fmt) "IPv6: " fmt 15 16 #include <crypto/aead.h> 17 #include <crypto/authenc.h> 18 #include <linux/err.h> 19 #include <linux/module.h> 20 #include <net/ip.h> 21 #include <net/xfrm.h> 22 #include <net/esp.h> 23 #include <linux/scatterlist.h> 24 #include <linux/kernel.h> 25 #include <linux/pfkeyv2.h> 26 #include <linux/random.h> 27 #include <linux/slab.h> 28 #include <linux/spinlock.h> 29 #include <net/ip6_checksum.h> 30 #include <net/ip6_route.h> 31 #include <net/icmp.h> 32 #include <net/ipv6.h> 33 #include <net/protocol.h> 34 #include <net/udp.h> 35 #include <linux/icmpv6.h> 36 #include <net/tcp.h> 37 #include <net/espintcp.h> 38 #include <net/inet6_hashtables.h> 39 40 #include <linux/highmem.h> 41 42 struct esp_skb_cb { 43 struct xfrm_skb_cb xfrm; 44 void *tmp; 45 }; 46 47 struct esp_output_extra { 48 __be32 seqhi; 49 u32 esphoff; 50 }; 51 52 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) 53 54 /* 55 * Allocate an AEAD request structure with extra space for SG and IV. 56 * 57 * For alignment considerations the upper 32 bits of the sequence number are 58 * placed at the front, if present. Followed by the IV, the request and finally 59 * the SG list. 60 * 61 * TODO: Use spare space in skb for this where possible. 62 */ 63 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen) 64 { 65 unsigned int len; 66 67 len = seqihlen; 68 69 len += crypto_aead_ivsize(aead); 70 71 if (len) { 72 len += crypto_aead_alignmask(aead) & 73 ~(crypto_tfm_ctx_alignment() - 1); 74 len = ALIGN(len, crypto_tfm_ctx_alignment()); 75 } 76 77 len += sizeof(struct aead_request) + crypto_aead_reqsize(aead); 78 len = ALIGN(len, __alignof__(struct scatterlist)); 79 80 len += sizeof(struct scatterlist) * nfrags; 81 82 return kmalloc(len, GFP_ATOMIC); 83 } 84 85 static inline void *esp_tmp_extra(void *tmp) 86 { 87 return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra)); 88 } 89 90 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen) 91 { 92 return crypto_aead_ivsize(aead) ? 93 PTR_ALIGN((u8 *)tmp + seqhilen, 94 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen; 95 } 96 97 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) 98 { 99 struct aead_request *req; 100 101 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), 102 crypto_tfm_ctx_alignment()); 103 aead_request_set_tfm(req, aead); 104 return req; 105 } 106 107 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, 108 struct aead_request *req) 109 { 110 return (void *)ALIGN((unsigned long)(req + 1) + 111 crypto_aead_reqsize(aead), 112 __alignof__(struct scatterlist)); 113 } 114 115 static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb) 116 { 117 struct crypto_aead *aead = x->data; 118 int extralen = 0; 119 u8 *iv; 120 struct aead_request *req; 121 struct scatterlist *sg; 122 123 if (x->props.flags & XFRM_STATE_ESN) 124 extralen += sizeof(struct esp_output_extra); 125 126 iv = esp_tmp_iv(aead, tmp, extralen); 127 req = esp_tmp_req(aead, iv); 128 129 /* Unref skb_frag_pages in the src scatterlist if necessary. 130 * Skip the first sg which comes from skb->data. 131 */ 132 if (req->src != req->dst) 133 for (sg = sg_next(req->src); sg; sg = sg_next(sg)) 134 skb_page_unref(skb, sg_page(sg), false); 135 } 136 137 #ifdef CONFIG_INET6_ESPINTCP 138 struct esp_tcp_sk { 139 struct sock *sk; 140 struct rcu_head rcu; 141 }; 142 143 static void esp_free_tcp_sk(struct rcu_head *head) 144 { 145 struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu); 146 147 sock_put(esk->sk); 148 kfree(esk); 149 } 150 151 static struct sock *esp6_find_tcp_sk(struct xfrm_state *x) 152 { 153 struct xfrm_encap_tmpl *encap = x->encap; 154 struct net *net = xs_net(x); 155 struct esp_tcp_sk *esk; 156 __be16 sport, dport; 157 struct sock *nsk; 158 struct sock *sk; 159 160 sk = rcu_dereference(x->encap_sk); 161 if (sk && sk->sk_state == TCP_ESTABLISHED) 162 return sk; 163 164 spin_lock_bh(&x->lock); 165 sport = encap->encap_sport; 166 dport = encap->encap_dport; 167 nsk = rcu_dereference_protected(x->encap_sk, 168 lockdep_is_held(&x->lock)); 169 if (sk && sk == nsk) { 170 esk = kmalloc(sizeof(*esk), GFP_ATOMIC); 171 if (!esk) { 172 spin_unlock_bh(&x->lock); 173 return ERR_PTR(-ENOMEM); 174 } 175 RCU_INIT_POINTER(x->encap_sk, NULL); 176 esk->sk = sk; 177 call_rcu(&esk->rcu, esp_free_tcp_sk); 178 } 179 spin_unlock_bh(&x->lock); 180 181 sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, &x->id.daddr.in6, 182 dport, &x->props.saddr.in6, ntohs(sport), 0, 0); 183 if (!sk) 184 return ERR_PTR(-ENOENT); 185 186 if (!tcp_is_ulp_esp(sk)) { 187 sock_put(sk); 188 return ERR_PTR(-EINVAL); 189 } 190 191 spin_lock_bh(&x->lock); 192 nsk = rcu_dereference_protected(x->encap_sk, 193 lockdep_is_held(&x->lock)); 194 if (encap->encap_sport != sport || 195 encap->encap_dport != dport) { 196 sock_put(sk); 197 sk = nsk ?: ERR_PTR(-EREMCHG); 198 } else if (sk == nsk) { 199 sock_put(sk); 200 } else { 201 rcu_assign_pointer(x->encap_sk, sk); 202 } 203 spin_unlock_bh(&x->lock); 204 205 return sk; 206 } 207 208 static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) 209 { 210 struct sock *sk; 211 int err; 212 213 rcu_read_lock(); 214 215 sk = esp6_find_tcp_sk(x); 216 err = PTR_ERR_OR_ZERO(sk); 217 if (err) 218 goto out; 219 220 bh_lock_sock(sk); 221 if (sock_owned_by_user(sk)) 222 err = espintcp_queue_out(sk, skb); 223 else 224 err = espintcp_push_skb(sk, skb); 225 bh_unlock_sock(sk); 226 227 out: 228 rcu_read_unlock(); 229 return err; 230 } 231 232 static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk, 233 struct sk_buff *skb) 234 { 235 struct dst_entry *dst = skb_dst(skb); 236 struct xfrm_state *x = dst->xfrm; 237 238 return esp_output_tcp_finish(x, skb); 239 } 240 241 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) 242 { 243 int err; 244 245 local_bh_disable(); 246 err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb); 247 local_bh_enable(); 248 249 /* EINPROGRESS just happens to do the right thing. It 250 * actually means that the skb has been consumed and 251 * isn't coming back. 252 */ 253 return err ?: -EINPROGRESS; 254 } 255 #else 256 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) 257 { 258 WARN_ON(1); 259 return -EOPNOTSUPP; 260 } 261 #endif 262 263 static void esp_output_encap_csum(struct sk_buff *skb) 264 { 265 /* UDP encap with IPv6 requires a valid checksum */ 266 if (*skb_mac_header(skb) == IPPROTO_UDP) { 267 struct udphdr *uh = udp_hdr(skb); 268 struct ipv6hdr *ip6h = ipv6_hdr(skb); 269 int len = ntohs(uh->len); 270 unsigned int offset = skb_transport_offset(skb); 271 __wsum csum = skb_checksum(skb, offset, skb->len - offset, 0); 272 273 uh->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 274 len, IPPROTO_UDP, csum); 275 if (uh->check == 0) 276 uh->check = CSUM_MANGLED_0; 277 } 278 } 279 280 static void esp_output_done(void *data, int err) 281 { 282 struct sk_buff *skb = data; 283 struct xfrm_offload *xo = xfrm_offload(skb); 284 void *tmp; 285 struct xfrm_state *x; 286 287 if (xo && (xo->flags & XFRM_DEV_RESUME)) { 288 struct sec_path *sp = skb_sec_path(skb); 289 290 x = sp->xvec[sp->len - 1]; 291 } else { 292 x = skb_dst(skb)->xfrm; 293 } 294 295 tmp = ESP_SKB_CB(skb)->tmp; 296 esp_ssg_unref(x, tmp, skb); 297 kfree(tmp); 298 299 esp_output_encap_csum(skb); 300 301 if (xo && (xo->flags & XFRM_DEV_RESUME)) { 302 if (err) { 303 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); 304 kfree_skb(skb); 305 return; 306 } 307 308 skb_push(skb, skb->data - skb_mac_header(skb)); 309 secpath_reset(skb); 310 xfrm_dev_resume(skb); 311 } else { 312 if (!err && 313 x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP) 314 esp_output_tail_tcp(x, skb); 315 else 316 xfrm_output_resume(skb->sk, skb, err); 317 } 318 } 319 320 /* Move ESP header back into place. */ 321 static void esp_restore_header(struct sk_buff *skb, unsigned int offset) 322 { 323 struct ip_esp_hdr *esph = (void *)(skb->data + offset); 324 void *tmp = ESP_SKB_CB(skb)->tmp; 325 __be32 *seqhi = esp_tmp_extra(tmp); 326 327 esph->seq_no = esph->spi; 328 esph->spi = *seqhi; 329 } 330 331 static void esp_output_restore_header(struct sk_buff *skb) 332 { 333 void *tmp = ESP_SKB_CB(skb)->tmp; 334 struct esp_output_extra *extra = esp_tmp_extra(tmp); 335 336 esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff - 337 sizeof(__be32)); 338 } 339 340 static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb, 341 struct xfrm_state *x, 342 struct ip_esp_hdr *esph, 343 struct esp_output_extra *extra) 344 { 345 /* For ESN we move the header forward by 4 bytes to 346 * accommodate the high bits. We will move it back after 347 * encryption. 348 */ 349 if ((x->props.flags & XFRM_STATE_ESN)) { 350 __u32 seqhi; 351 struct xfrm_offload *xo = xfrm_offload(skb); 352 353 if (xo) 354 seqhi = xo->seq.hi; 355 else 356 seqhi = XFRM_SKB_CB(skb)->seq.output.hi; 357 358 extra->esphoff = (unsigned char *)esph - 359 skb_transport_header(skb); 360 esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4); 361 extra->seqhi = esph->spi; 362 esph->seq_no = htonl(seqhi); 363 } 364 365 esph->spi = x->id.spi; 366 367 return esph; 368 } 369 370 static void esp_output_done_esn(void *data, int err) 371 { 372 struct sk_buff *skb = data; 373 374 esp_output_restore_header(skb); 375 esp_output_done(data, err); 376 } 377 378 static struct ip_esp_hdr *esp6_output_udp_encap(struct sk_buff *skb, 379 int encap_type, 380 struct esp_info *esp, 381 __be16 sport, 382 __be16 dport) 383 { 384 struct udphdr *uh; 385 __be32 *udpdata32; 386 unsigned int len; 387 388 len = skb->len + esp->tailen - skb_transport_offset(skb); 389 if (len > U16_MAX) 390 return ERR_PTR(-EMSGSIZE); 391 392 uh = (struct udphdr *)esp->esph; 393 uh->source = sport; 394 uh->dest = dport; 395 uh->len = htons(len); 396 uh->check = 0; 397 398 *skb_mac_header(skb) = IPPROTO_UDP; 399 400 if (encap_type == UDP_ENCAP_ESPINUDP_NON_IKE) { 401 udpdata32 = (__be32 *)(uh + 1); 402 udpdata32[0] = udpdata32[1] = 0; 403 return (struct ip_esp_hdr *)(udpdata32 + 2); 404 } 405 406 return (struct ip_esp_hdr *)(uh + 1); 407 } 408 409 #ifdef CONFIG_INET6_ESPINTCP 410 static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x, 411 struct sk_buff *skb, 412 struct esp_info *esp) 413 { 414 __be16 *lenp = (void *)esp->esph; 415 struct ip_esp_hdr *esph; 416 unsigned int len; 417 struct sock *sk; 418 419 len = skb->len + esp->tailen - skb_transport_offset(skb); 420 if (len > IP_MAX_MTU) 421 return ERR_PTR(-EMSGSIZE); 422 423 rcu_read_lock(); 424 sk = esp6_find_tcp_sk(x); 425 rcu_read_unlock(); 426 427 if (IS_ERR(sk)) 428 return ERR_CAST(sk); 429 430 *lenp = htons(len); 431 esph = (struct ip_esp_hdr *)(lenp + 1); 432 433 return esph; 434 } 435 #else 436 static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x, 437 struct sk_buff *skb, 438 struct esp_info *esp) 439 { 440 return ERR_PTR(-EOPNOTSUPP); 441 } 442 #endif 443 444 static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb, 445 struct esp_info *esp) 446 { 447 struct xfrm_encap_tmpl *encap = x->encap; 448 struct ip_esp_hdr *esph; 449 __be16 sport, dport; 450 int encap_type; 451 452 spin_lock_bh(&x->lock); 453 sport = encap->encap_sport; 454 dport = encap->encap_dport; 455 encap_type = encap->encap_type; 456 spin_unlock_bh(&x->lock); 457 458 switch (encap_type) { 459 default: 460 case UDP_ENCAP_ESPINUDP: 461 case UDP_ENCAP_ESPINUDP_NON_IKE: 462 esph = esp6_output_udp_encap(skb, encap_type, esp, sport, dport); 463 break; 464 case TCP_ENCAP_ESPINTCP: 465 esph = esp6_output_tcp_encap(x, skb, esp); 466 break; 467 } 468 469 if (IS_ERR(esph)) 470 return PTR_ERR(esph); 471 472 esp->esph = esph; 473 474 return 0; 475 } 476 477 int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) 478 { 479 u8 *tail; 480 int nfrags; 481 int esph_offset; 482 struct page *page; 483 struct sk_buff *trailer; 484 int tailen = esp->tailen; 485 486 if (x->encap) { 487 int err = esp6_output_encap(x, skb, esp); 488 489 if (err < 0) 490 return err; 491 } 492 493 if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE || 494 ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE) 495 goto cow; 496 497 if (!skb_cloned(skb)) { 498 if (tailen <= skb_tailroom(skb)) { 499 nfrags = 1; 500 trailer = skb; 501 tail = skb_tail_pointer(trailer); 502 503 goto skip_cow; 504 } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS) 505 && !skb_has_frag_list(skb)) { 506 int allocsize; 507 struct sock *sk = skb->sk; 508 struct page_frag *pfrag = &x->xfrag; 509 510 esp->inplace = false; 511 512 allocsize = ALIGN(tailen, L1_CACHE_BYTES); 513 514 spin_lock_bh(&x->lock); 515 516 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { 517 spin_unlock_bh(&x->lock); 518 goto cow; 519 } 520 521 page = pfrag->page; 522 get_page(page); 523 524 tail = page_address(page) + pfrag->offset; 525 526 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); 527 528 nfrags = skb_shinfo(skb)->nr_frags; 529 530 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, 531 tailen); 532 skb_shinfo(skb)->nr_frags = ++nfrags; 533 534 pfrag->offset = pfrag->offset + allocsize; 535 536 spin_unlock_bh(&x->lock); 537 538 nfrags++; 539 540 skb->len += tailen; 541 skb->data_len += tailen; 542 skb->truesize += tailen; 543 if (sk && sk_fullsock(sk)) 544 refcount_add(tailen, &sk->sk_wmem_alloc); 545 546 goto out; 547 } 548 } 549 550 cow: 551 esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb); 552 553 nfrags = skb_cow_data(skb, tailen, &trailer); 554 if (nfrags < 0) 555 goto out; 556 tail = skb_tail_pointer(trailer); 557 esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset); 558 559 skip_cow: 560 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); 561 pskb_put(skb, trailer, tailen); 562 563 out: 564 return nfrags; 565 } 566 EXPORT_SYMBOL_GPL(esp6_output_head); 567 568 int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) 569 { 570 u8 *iv; 571 int alen; 572 void *tmp; 573 int ivlen; 574 int assoclen; 575 int extralen; 576 struct page *page; 577 struct ip_esp_hdr *esph; 578 struct aead_request *req; 579 struct crypto_aead *aead; 580 struct scatterlist *sg, *dsg; 581 struct esp_output_extra *extra; 582 int err = -ENOMEM; 583 584 assoclen = sizeof(struct ip_esp_hdr); 585 extralen = 0; 586 587 if (x->props.flags & XFRM_STATE_ESN) { 588 extralen += sizeof(*extra); 589 assoclen += sizeof(__be32); 590 } 591 592 aead = x->data; 593 alen = crypto_aead_authsize(aead); 594 ivlen = crypto_aead_ivsize(aead); 595 596 tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen); 597 if (!tmp) 598 goto error; 599 600 extra = esp_tmp_extra(tmp); 601 iv = esp_tmp_iv(aead, tmp, extralen); 602 req = esp_tmp_req(aead, iv); 603 sg = esp_req_sg(aead, req); 604 605 if (esp->inplace) 606 dsg = sg; 607 else 608 dsg = &sg[esp->nfrags]; 609 610 esph = esp_output_set_esn(skb, x, esp->esph, extra); 611 esp->esph = esph; 612 613 sg_init_table(sg, esp->nfrags); 614 err = skb_to_sgvec(skb, sg, 615 (unsigned char *)esph - skb->data, 616 assoclen + ivlen + esp->clen + alen); 617 if (unlikely(err < 0)) 618 goto error_free; 619 620 if (!esp->inplace) { 621 int allocsize; 622 struct page_frag *pfrag = &x->xfrag; 623 624 allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES); 625 626 spin_lock_bh(&x->lock); 627 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { 628 spin_unlock_bh(&x->lock); 629 goto error_free; 630 } 631 632 skb_shinfo(skb)->nr_frags = 1; 633 634 page = pfrag->page; 635 get_page(page); 636 /* replace page frags in skb with new page */ 637 __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len); 638 pfrag->offset = pfrag->offset + allocsize; 639 spin_unlock_bh(&x->lock); 640 641 sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1); 642 err = skb_to_sgvec(skb, dsg, 643 (unsigned char *)esph - skb->data, 644 assoclen + ivlen + esp->clen + alen); 645 if (unlikely(err < 0)) 646 goto error_free; 647 } 648 649 if ((x->props.flags & XFRM_STATE_ESN)) 650 aead_request_set_callback(req, 0, esp_output_done_esn, skb); 651 else 652 aead_request_set_callback(req, 0, esp_output_done, skb); 653 654 aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv); 655 aead_request_set_ad(req, assoclen); 656 657 memset(iv, 0, ivlen); 658 memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8), 659 min(ivlen, 8)); 660 661 ESP_SKB_CB(skb)->tmp = tmp; 662 err = crypto_aead_encrypt(req); 663 664 switch (err) { 665 case -EINPROGRESS: 666 goto error; 667 668 case -ENOSPC: 669 err = NET_XMIT_DROP; 670 break; 671 672 case 0: 673 if ((x->props.flags & XFRM_STATE_ESN)) 674 esp_output_restore_header(skb); 675 esp_output_encap_csum(skb); 676 } 677 678 if (sg != dsg) 679 esp_ssg_unref(x, tmp, skb); 680 681 if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP) 682 err = esp_output_tail_tcp(x, skb); 683 684 error_free: 685 kfree(tmp); 686 error: 687 return err; 688 } 689 EXPORT_SYMBOL_GPL(esp6_output_tail); 690 691 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) 692 { 693 int alen; 694 int blksize; 695 struct ip_esp_hdr *esph; 696 struct crypto_aead *aead; 697 struct esp_info esp; 698 699 esp.inplace = true; 700 701 esp.proto = *skb_mac_header(skb); 702 *skb_mac_header(skb) = IPPROTO_ESP; 703 704 /* skb is pure payload to encrypt */ 705 706 aead = x->data; 707 alen = crypto_aead_authsize(aead); 708 709 esp.tfclen = 0; 710 if (x->tfcpad) { 711 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); 712 u32 padto; 713 714 padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached)); 715 if (skb->len < padto) 716 esp.tfclen = padto - skb->len; 717 } 718 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 719 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); 720 esp.plen = esp.clen - skb->len - esp.tfclen; 721 esp.tailen = esp.tfclen + esp.plen + alen; 722 723 esp.esph = ip_esp_hdr(skb); 724 725 esp.nfrags = esp6_output_head(x, skb, &esp); 726 if (esp.nfrags < 0) 727 return esp.nfrags; 728 729 esph = esp.esph; 730 esph->spi = x->id.spi; 731 732 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 733 esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low + 734 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); 735 736 skb_push(skb, -skb_network_offset(skb)); 737 738 return esp6_output_tail(x, skb, &esp); 739 } 740 741 static inline int esp_remove_trailer(struct sk_buff *skb) 742 { 743 struct xfrm_state *x = xfrm_input_state(skb); 744 struct crypto_aead *aead = x->data; 745 int alen, hlen, elen; 746 int padlen, trimlen; 747 __wsum csumdiff; 748 u8 nexthdr[2]; 749 int ret; 750 751 alen = crypto_aead_authsize(aead); 752 hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); 753 elen = skb->len - hlen; 754 755 ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2); 756 BUG_ON(ret); 757 758 ret = -EINVAL; 759 padlen = nexthdr[0]; 760 if (padlen + 2 + alen >= elen) { 761 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n", 762 padlen + 2, elen - alen); 763 goto out; 764 } 765 766 trimlen = alen + padlen + 2; 767 if (skb->ip_summed == CHECKSUM_COMPLETE) { 768 csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0); 769 skb->csum = csum_block_sub(skb->csum, csumdiff, 770 skb->len - trimlen); 771 } 772 ret = pskb_trim(skb, skb->len - trimlen); 773 if (unlikely(ret)) 774 return ret; 775 776 ret = nexthdr[1]; 777 778 out: 779 return ret; 780 } 781 782 int esp6_input_done2(struct sk_buff *skb, int err) 783 { 784 struct xfrm_state *x = xfrm_input_state(skb); 785 struct xfrm_offload *xo = xfrm_offload(skb); 786 struct crypto_aead *aead = x->data; 787 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); 788 int hdr_len = skb_network_header_len(skb); 789 790 if (!xo || !(xo->flags & CRYPTO_DONE)) 791 kfree(ESP_SKB_CB(skb)->tmp); 792 793 if (unlikely(err)) 794 goto out; 795 796 err = esp_remove_trailer(skb); 797 if (unlikely(err < 0)) 798 goto out; 799 800 if (x->encap) { 801 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 802 int offset = skb_network_offset(skb) + sizeof(*ip6h); 803 struct xfrm_encap_tmpl *encap = x->encap; 804 u8 nexthdr = ip6h->nexthdr; 805 __be16 frag_off, source; 806 struct udphdr *uh; 807 struct tcphdr *th; 808 809 offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off); 810 if (offset == -1) { 811 err = -EINVAL; 812 goto out; 813 } 814 815 uh = (void *)(skb->data + offset); 816 th = (void *)(skb->data + offset); 817 hdr_len += offset; 818 819 switch (x->encap->encap_type) { 820 case TCP_ENCAP_ESPINTCP: 821 source = th->source; 822 break; 823 case UDP_ENCAP_ESPINUDP: 824 case UDP_ENCAP_ESPINUDP_NON_IKE: 825 source = uh->source; 826 break; 827 default: 828 WARN_ON_ONCE(1); 829 err = -EINVAL; 830 goto out; 831 } 832 833 /* 834 * 1) if the NAT-T peer's IP or port changed then 835 * advertise the change to the keying daemon. 836 * This is an inbound SA, so just compare 837 * SRC ports. 838 */ 839 if (!ipv6_addr_equal(&ip6h->saddr, &x->props.saddr.in6) || 840 source != encap->encap_sport) { 841 xfrm_address_t ipaddr; 842 843 memcpy(&ipaddr.a6, &ip6h->saddr.s6_addr, sizeof(ipaddr.a6)); 844 km_new_mapping(x, &ipaddr, source); 845 846 /* XXX: perhaps add an extra 847 * policy check here, to see 848 * if we should allow or 849 * reject a packet from a 850 * different source 851 * address/port. 852 */ 853 } 854 855 /* 856 * 2) ignore UDP/TCP checksums in case 857 * of NAT-T in Transport Mode, or 858 * perform other post-processing fixes 859 * as per draft-ietf-ipsec-udp-encaps-06, 860 * section 3.1.2 861 */ 862 if (x->props.mode == XFRM_MODE_TRANSPORT) 863 skb->ip_summed = CHECKSUM_UNNECESSARY; 864 } 865 866 skb_postpull_rcsum(skb, skb_network_header(skb), 867 skb_network_header_len(skb)); 868 skb_pull_rcsum(skb, hlen); 869 if (x->props.mode == XFRM_MODE_TUNNEL) 870 skb_reset_transport_header(skb); 871 else 872 skb_set_transport_header(skb, -hdr_len); 873 874 /* RFC4303: Drop dummy packets without any error */ 875 if (err == IPPROTO_NONE) 876 err = -EINVAL; 877 878 out: 879 return err; 880 } 881 EXPORT_SYMBOL_GPL(esp6_input_done2); 882 883 static void esp_input_done(void *data, int err) 884 { 885 struct sk_buff *skb = data; 886 887 xfrm_input_resume(skb, esp6_input_done2(skb, err)); 888 } 889 890 static void esp_input_restore_header(struct sk_buff *skb) 891 { 892 esp_restore_header(skb, 0); 893 __skb_pull(skb, 4); 894 } 895 896 static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi) 897 { 898 struct xfrm_state *x = xfrm_input_state(skb); 899 900 /* For ESN we move the header forward by 4 bytes to 901 * accommodate the high bits. We will move it back after 902 * decryption. 903 */ 904 if ((x->props.flags & XFRM_STATE_ESN)) { 905 struct ip_esp_hdr *esph = skb_push(skb, 4); 906 907 *seqhi = esph->spi; 908 esph->spi = esph->seq_no; 909 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi; 910 } 911 } 912 913 static void esp_input_done_esn(void *data, int err) 914 { 915 struct sk_buff *skb = data; 916 917 esp_input_restore_header(skb); 918 esp_input_done(data, err); 919 } 920 921 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) 922 { 923 struct crypto_aead *aead = x->data; 924 struct aead_request *req; 925 struct sk_buff *trailer; 926 int ivlen = crypto_aead_ivsize(aead); 927 int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen; 928 int nfrags; 929 int assoclen; 930 int seqhilen; 931 int ret = 0; 932 void *tmp; 933 __be32 *seqhi; 934 u8 *iv; 935 struct scatterlist *sg; 936 937 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) { 938 ret = -EINVAL; 939 goto out; 940 } 941 942 if (elen <= 0) { 943 ret = -EINVAL; 944 goto out; 945 } 946 947 assoclen = sizeof(struct ip_esp_hdr); 948 seqhilen = 0; 949 950 if (x->props.flags & XFRM_STATE_ESN) { 951 seqhilen += sizeof(__be32); 952 assoclen += seqhilen; 953 } 954 955 if (!skb_cloned(skb)) { 956 if (!skb_is_nonlinear(skb)) { 957 nfrags = 1; 958 959 goto skip_cow; 960 } else if (!skb_has_frag_list(skb)) { 961 nfrags = skb_shinfo(skb)->nr_frags; 962 nfrags++; 963 964 goto skip_cow; 965 } 966 } 967 968 nfrags = skb_cow_data(skb, 0, &trailer); 969 if (nfrags < 0) { 970 ret = -EINVAL; 971 goto out; 972 } 973 974 skip_cow: 975 ret = -ENOMEM; 976 tmp = esp_alloc_tmp(aead, nfrags, seqhilen); 977 if (!tmp) 978 goto out; 979 980 ESP_SKB_CB(skb)->tmp = tmp; 981 seqhi = esp_tmp_extra(tmp); 982 iv = esp_tmp_iv(aead, tmp, seqhilen); 983 req = esp_tmp_req(aead, iv); 984 sg = esp_req_sg(aead, req); 985 986 esp_input_set_header(skb, seqhi); 987 988 sg_init_table(sg, nfrags); 989 ret = skb_to_sgvec(skb, sg, 0, skb->len); 990 if (unlikely(ret < 0)) { 991 kfree(tmp); 992 goto out; 993 } 994 995 skb->ip_summed = CHECKSUM_NONE; 996 997 if ((x->props.flags & XFRM_STATE_ESN)) 998 aead_request_set_callback(req, 0, esp_input_done_esn, skb); 999 else 1000 aead_request_set_callback(req, 0, esp_input_done, skb); 1001 1002 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv); 1003 aead_request_set_ad(req, assoclen); 1004 1005 ret = crypto_aead_decrypt(req); 1006 if (ret == -EINPROGRESS) 1007 goto out; 1008 1009 if ((x->props.flags & XFRM_STATE_ESN)) 1010 esp_input_restore_header(skb); 1011 1012 ret = esp6_input_done2(skb, ret); 1013 1014 out: 1015 return ret; 1016 } 1017 1018 static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 1019 u8 type, u8 code, int offset, __be32 info) 1020 { 1021 struct net *net = dev_net(skb->dev); 1022 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data; 1023 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset); 1024 struct xfrm_state *x; 1025 1026 if (type != ICMPV6_PKT_TOOBIG && 1027 type != NDISC_REDIRECT) 1028 return 0; 1029 1030 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, 1031 esph->spi, IPPROTO_ESP, AF_INET6); 1032 if (!x) 1033 return 0; 1034 1035 if (type == NDISC_REDIRECT) 1036 ip6_redirect(skb, net, skb->dev->ifindex, 0, 1037 sock_net_uid(net, NULL)); 1038 else 1039 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL)); 1040 xfrm_state_put(x); 1041 1042 return 0; 1043 } 1044 1045 static void esp6_destroy(struct xfrm_state *x) 1046 { 1047 struct crypto_aead *aead = x->data; 1048 1049 if (!aead) 1050 return; 1051 1052 crypto_free_aead(aead); 1053 } 1054 1055 static int esp_init_aead(struct xfrm_state *x, struct netlink_ext_ack *extack) 1056 { 1057 char aead_name[CRYPTO_MAX_ALG_NAME]; 1058 struct crypto_aead *aead; 1059 int err; 1060 1061 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", 1062 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) { 1063 NL_SET_ERR_MSG(extack, "Algorithm name is too long"); 1064 return -ENAMETOOLONG; 1065 } 1066 1067 aead = crypto_alloc_aead(aead_name, 0, 0); 1068 err = PTR_ERR(aead); 1069 if (IS_ERR(aead)) 1070 goto error; 1071 1072 x->data = aead; 1073 1074 err = crypto_aead_setkey(aead, x->aead->alg_key, 1075 (x->aead->alg_key_len + 7) / 8); 1076 if (err) 1077 goto error; 1078 1079 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8); 1080 if (err) 1081 goto error; 1082 1083 return 0; 1084 1085 error: 1086 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations"); 1087 return err; 1088 } 1089 1090 static int esp_init_authenc(struct xfrm_state *x, 1091 struct netlink_ext_ack *extack) 1092 { 1093 struct crypto_aead *aead; 1094 struct crypto_authenc_key_param *param; 1095 struct rtattr *rta; 1096 char *key; 1097 char *p; 1098 char authenc_name[CRYPTO_MAX_ALG_NAME]; 1099 unsigned int keylen; 1100 int err; 1101 1102 err = -ENAMETOOLONG; 1103 1104 if ((x->props.flags & XFRM_STATE_ESN)) { 1105 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, 1106 "%s%sauthencesn(%s,%s)%s", 1107 x->geniv ?: "", x->geniv ? "(" : "", 1108 x->aalg ? x->aalg->alg_name : "digest_null", 1109 x->ealg->alg_name, 1110 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) { 1111 NL_SET_ERR_MSG(extack, "Algorithm name is too long"); 1112 goto error; 1113 } 1114 } else { 1115 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, 1116 "%s%sauthenc(%s,%s)%s", 1117 x->geniv ?: "", x->geniv ? "(" : "", 1118 x->aalg ? x->aalg->alg_name : "digest_null", 1119 x->ealg->alg_name, 1120 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) { 1121 NL_SET_ERR_MSG(extack, "Algorithm name is too long"); 1122 goto error; 1123 } 1124 } 1125 1126 aead = crypto_alloc_aead(authenc_name, 0, 0); 1127 err = PTR_ERR(aead); 1128 if (IS_ERR(aead)) { 1129 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations"); 1130 goto error; 1131 } 1132 1133 x->data = aead; 1134 1135 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) + 1136 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param)); 1137 err = -ENOMEM; 1138 key = kmalloc(keylen, GFP_KERNEL); 1139 if (!key) 1140 goto error; 1141 1142 p = key; 1143 rta = (void *)p; 1144 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; 1145 rta->rta_len = RTA_LENGTH(sizeof(*param)); 1146 param = RTA_DATA(rta); 1147 p += RTA_SPACE(sizeof(*param)); 1148 1149 if (x->aalg) { 1150 struct xfrm_algo_desc *aalg_desc; 1151 1152 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8); 1153 p += (x->aalg->alg_key_len + 7) / 8; 1154 1155 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 1156 BUG_ON(!aalg_desc); 1157 1158 err = -EINVAL; 1159 if (aalg_desc->uinfo.auth.icv_fullbits / 8 != 1160 crypto_aead_authsize(aead)) { 1161 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations"); 1162 goto free_key; 1163 } 1164 1165 err = crypto_aead_setauthsize( 1166 aead, x->aalg->alg_trunc_len / 8); 1167 if (err) { 1168 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations"); 1169 goto free_key; 1170 } 1171 } 1172 1173 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8); 1174 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8); 1175 1176 err = crypto_aead_setkey(aead, key, keylen); 1177 1178 free_key: 1179 kfree(key); 1180 1181 error: 1182 return err; 1183 } 1184 1185 static int esp6_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack) 1186 { 1187 struct crypto_aead *aead; 1188 u32 align; 1189 int err; 1190 1191 x->data = NULL; 1192 1193 if (x->aead) { 1194 err = esp_init_aead(x, extack); 1195 } else if (x->ealg) { 1196 err = esp_init_authenc(x, extack); 1197 } else { 1198 NL_SET_ERR_MSG(extack, "ESP: AEAD or CRYPT must be provided"); 1199 err = -EINVAL; 1200 } 1201 1202 if (err) 1203 goto error; 1204 1205 aead = x->data; 1206 1207 x->props.header_len = sizeof(struct ip_esp_hdr) + 1208 crypto_aead_ivsize(aead); 1209 switch (x->props.mode) { 1210 case XFRM_MODE_BEET: 1211 if (x->sel.family != AF_INET6) 1212 x->props.header_len += IPV4_BEET_PHMAXLEN + 1213 (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); 1214 break; 1215 default: 1216 case XFRM_MODE_TRANSPORT: 1217 break; 1218 case XFRM_MODE_TUNNEL: 1219 x->props.header_len += sizeof(struct ipv6hdr); 1220 break; 1221 } 1222 1223 if (x->encap) { 1224 struct xfrm_encap_tmpl *encap = x->encap; 1225 1226 switch (encap->encap_type) { 1227 default: 1228 NL_SET_ERR_MSG(extack, "Unsupported encapsulation type for ESP"); 1229 err = -EINVAL; 1230 goto error; 1231 case UDP_ENCAP_ESPINUDP: 1232 x->props.header_len += sizeof(struct udphdr); 1233 break; 1234 case UDP_ENCAP_ESPINUDP_NON_IKE: 1235 x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32); 1236 break; 1237 #ifdef CONFIG_INET6_ESPINTCP 1238 case TCP_ENCAP_ESPINTCP: 1239 /* only the length field, TCP encap is done by 1240 * the socket 1241 */ 1242 x->props.header_len += 2; 1243 break; 1244 #endif 1245 } 1246 } 1247 1248 align = ALIGN(crypto_aead_blocksize(aead), 4); 1249 x->props.trailer_len = align + 1 + crypto_aead_authsize(aead); 1250 1251 error: 1252 return err; 1253 } 1254 1255 static int esp6_rcv_cb(struct sk_buff *skb, int err) 1256 { 1257 return 0; 1258 } 1259 1260 static const struct xfrm_type esp6_type = { 1261 .owner = THIS_MODULE, 1262 .proto = IPPROTO_ESP, 1263 .flags = XFRM_TYPE_REPLAY_PROT, 1264 .init_state = esp6_init_state, 1265 .destructor = esp6_destroy, 1266 .input = esp6_input, 1267 .output = esp6_output, 1268 }; 1269 1270 static struct xfrm6_protocol esp6_protocol = { 1271 .handler = xfrm6_rcv, 1272 .input_handler = xfrm_input, 1273 .cb_handler = esp6_rcv_cb, 1274 .err_handler = esp6_err, 1275 .priority = 0, 1276 }; 1277 1278 static int __init esp6_init(void) 1279 { 1280 if (xfrm_register_type(&esp6_type, AF_INET6) < 0) { 1281 pr_info("%s: can't add xfrm type\n", __func__); 1282 return -EAGAIN; 1283 } 1284 if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) { 1285 pr_info("%s: can't add protocol\n", __func__); 1286 xfrm_unregister_type(&esp6_type, AF_INET6); 1287 return -EAGAIN; 1288 } 1289 1290 return 0; 1291 } 1292 1293 static void __exit esp6_fini(void) 1294 { 1295 if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0) 1296 pr_info("%s: can't remove protocol\n", __func__); 1297 xfrm_unregister_type(&esp6_type, AF_INET6); 1298 } 1299 1300 module_init(esp6_init); 1301 module_exit(esp6_fini); 1302 1303 MODULE_LICENSE("GPL"); 1304 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP); 1305