1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * xfrm_input.c 4 * 5 * Changes: 6 * YOSHIFUJI Hideaki @USAGI 7 * Split up af-specific portion 8 * 9 */ 10 11 #include <linux/bottom_half.h> 12 #include <linux/cache.h> 13 #include <linux/interrupt.h> 14 #include <linux/slab.h> 15 #include <linux/module.h> 16 #include <linux/netdevice.h> 17 #include <linux/percpu.h> 18 #include <net/dst.h> 19 #include <net/ip.h> 20 #include <net/xfrm.h> 21 #include <net/ip_tunnels.h> 22 #include <net/ip6_tunnel.h> 23 #include <net/dst_metadata.h> 24 25 #include "xfrm_inout.h" 26 27 struct xfrm_trans_tasklet { 28 struct work_struct work; 29 spinlock_t queue_lock; 30 struct sk_buff_head queue; 31 }; 32 33 struct xfrm_trans_cb { 34 union { 35 struct inet_skb_parm h4; 36 #if IS_ENABLED(CONFIG_IPV6) 37 struct inet6_skb_parm h6; 38 #endif 39 } header; 40 int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb); 41 struct net *net; 42 }; 43 44 #define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0])) 45 46 static DEFINE_SPINLOCK(xfrm_input_afinfo_lock); 47 static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[2][AF_INET6 + 1]; 48 49 static struct gro_cells gro_cells; 50 static struct net_device xfrm_napi_dev; 51 52 static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet); 53 54 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo) 55 { 56 int err = 0; 57 58 if (WARN_ON(afinfo->family > AF_INET6)) 59 return -EAFNOSUPPORT; 60 61 spin_lock_bh(&xfrm_input_afinfo_lock); 62 if (unlikely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family])) 63 err = -EEXIST; 64 else 65 rcu_assign_pointer(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family], afinfo); 66 spin_unlock_bh(&xfrm_input_afinfo_lock); 67 return err; 68 } 69 EXPORT_SYMBOL(xfrm_input_register_afinfo); 70 71 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo) 72 { 73 int err = 0; 74 75 spin_lock_bh(&xfrm_input_afinfo_lock); 76 if (likely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family])) { 77 if (unlikely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family] != afinfo)) 78 err = -EINVAL; 79 else 80 RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family], NULL); 81 } 82 spin_unlock_bh(&xfrm_input_afinfo_lock); 83 synchronize_rcu(); 84 return err; 85 } 86 EXPORT_SYMBOL(xfrm_input_unregister_afinfo); 87 88 static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(u8 family, bool is_ipip) 89 { 90 const struct xfrm_input_afinfo *afinfo; 91 92 if (WARN_ON_ONCE(family > AF_INET6)) 93 return NULL; 94 95 rcu_read_lock(); 96 afinfo = rcu_dereference(xfrm_input_afinfo[is_ipip][family]); 97 if (unlikely(!afinfo)) 98 rcu_read_unlock(); 99 return afinfo; 100 } 101 102 static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol, 103 int err) 104 { 105 bool is_ipip = (protocol == IPPROTO_IPIP || protocol == IPPROTO_IPV6); 106 const struct xfrm_input_afinfo *afinfo; 107 int ret; 108 109 afinfo = xfrm_input_get_afinfo(family, is_ipip); 110 if (!afinfo) 111 return -EAFNOSUPPORT; 112 113 ret = afinfo->callback(skb, protocol, err); 114 rcu_read_unlock(); 115 116 return ret; 117 } 118 119 struct sec_path *secpath_set(struct sk_buff *skb) 120 { 121 struct sec_path *sp, *tmp = skb_ext_find(skb, SKB_EXT_SEC_PATH); 122 123 sp = skb_ext_add(skb, SKB_EXT_SEC_PATH); 124 if (!sp) 125 return NULL; 126 127 if (tmp) /* reused existing one (was COW'd if needed) */ 128 return sp; 129 130 /* allocated new secpath */ 131 memset(sp->ovec, 0, sizeof(sp->ovec)); 132 sp->olen = 0; 133 sp->len = 0; 134 sp->verified_cnt = 0; 135 136 return sp; 137 } 138 EXPORT_SYMBOL(secpath_set); 139 140 /* Fetch spi and seq from ipsec header */ 141 142 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq) 143 { 144 int offset, offset_seq; 145 int hlen; 146 147 switch (nexthdr) { 148 case IPPROTO_AH: 149 hlen = sizeof(struct ip_auth_hdr); 150 offset = offsetof(struct ip_auth_hdr, spi); 151 offset_seq = offsetof(struct ip_auth_hdr, seq_no); 152 break; 153 case IPPROTO_ESP: 154 hlen = sizeof(struct ip_esp_hdr); 155 offset = offsetof(struct ip_esp_hdr, spi); 156 offset_seq = offsetof(struct ip_esp_hdr, seq_no); 157 break; 158 case IPPROTO_COMP: 159 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr))) 160 return -EINVAL; 161 *spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2))); 162 *seq = 0; 163 return 0; 164 default: 165 return 1; 166 } 167 168 if (!pskb_may_pull(skb, hlen)) 169 return -EINVAL; 170 171 *spi = *(__be32 *)(skb_transport_header(skb) + offset); 172 *seq = *(__be32 *)(skb_transport_header(skb) + offset_seq); 173 return 0; 174 } 175 EXPORT_SYMBOL(xfrm_parse_spi); 176 177 static int xfrm4_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb) 178 { 179 struct iphdr *iph; 180 int optlen = 0; 181 int err = -EINVAL; 182 183 skb->protocol = htons(ETH_P_IP); 184 185 if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) { 186 struct ip_beet_phdr *ph; 187 int phlen; 188 189 if (!pskb_may_pull(skb, sizeof(*ph))) 190 goto out; 191 192 ph = (struct ip_beet_phdr *)skb->data; 193 194 phlen = sizeof(*ph) + ph->padlen; 195 optlen = ph->hdrlen * 8 + (IPV4_BEET_PHMAXLEN - phlen); 196 if (optlen < 0 || optlen & 3 || optlen > 250) 197 goto out; 198 199 XFRM_MODE_SKB_CB(skb)->protocol = ph->nexthdr; 200 201 if (!pskb_may_pull(skb, phlen)) 202 goto out; 203 __skb_pull(skb, phlen); 204 } 205 206 skb_push(skb, sizeof(*iph)); 207 skb_reset_network_header(skb); 208 skb_mac_header_rebuild(skb); 209 210 xfrm4_beet_make_header(skb); 211 212 iph = ip_hdr(skb); 213 214 iph->ihl += optlen / 4; 215 iph->tot_len = htons(skb->len); 216 iph->daddr = x->sel.daddr.a4; 217 iph->saddr = x->sel.saddr.a4; 218 iph->check = 0; 219 iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl); 220 err = 0; 221 out: 222 return err; 223 } 224 225 static void ipip_ecn_decapsulate(struct sk_buff *skb) 226 { 227 struct iphdr *inner_iph = ipip_hdr(skb); 228 229 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos)) 230 IP_ECN_set_ce(inner_iph); 231 } 232 233 static int xfrm4_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb) 234 { 235 int err = -EINVAL; 236 237 skb->protocol = htons(ETH_P_IP); 238 239 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 240 goto out; 241 242 err = skb_unclone(skb, GFP_ATOMIC); 243 if (err) 244 goto out; 245 246 if (x->props.flags & XFRM_STATE_DECAP_DSCP) 247 ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipip_hdr(skb)); 248 if (!(x->props.flags & XFRM_STATE_NOECN)) 249 ipip_ecn_decapsulate(skb); 250 251 skb_reset_network_header(skb); 252 skb_mac_header_rebuild(skb); 253 if (skb->mac_len) 254 eth_hdr(skb)->h_proto = skb->protocol; 255 256 err = 0; 257 258 out: 259 return err; 260 } 261 262 static void ipip6_ecn_decapsulate(struct sk_buff *skb) 263 { 264 struct ipv6hdr *inner_iph = ipipv6_hdr(skb); 265 266 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos)) 267 IP6_ECN_set_ce(skb, inner_iph); 268 } 269 270 static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb) 271 { 272 int err = -EINVAL; 273 274 skb->protocol = htons(ETH_P_IPV6); 275 276 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 277 goto out; 278 279 err = skb_unclone(skb, GFP_ATOMIC); 280 if (err) 281 goto out; 282 283 if (x->props.flags & XFRM_STATE_DECAP_DSCP) 284 ipv6_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipipv6_hdr(skb)); 285 if (!(x->props.flags & XFRM_STATE_NOECN)) 286 ipip6_ecn_decapsulate(skb); 287 288 skb_reset_network_header(skb); 289 skb_mac_header_rebuild(skb); 290 if (skb->mac_len) 291 eth_hdr(skb)->h_proto = skb->protocol; 292 293 err = 0; 294 295 out: 296 return err; 297 } 298 299 static int xfrm6_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb) 300 { 301 struct ipv6hdr *ip6h; 302 int size = sizeof(struct ipv6hdr); 303 int err; 304 305 skb->protocol = htons(ETH_P_IPV6); 306 307 err = skb_cow_head(skb, size + skb->mac_len); 308 if (err) 309 goto out; 310 311 __skb_push(skb, size); 312 skb_reset_network_header(skb); 313 skb_mac_header_rebuild(skb); 314 315 xfrm6_beet_make_header(skb); 316 317 ip6h = ipv6_hdr(skb); 318 ip6h->payload_len = htons(skb->len - size); 319 ip6h->daddr = x->sel.daddr.in6; 320 ip6h->saddr = x->sel.saddr.in6; 321 err = 0; 322 out: 323 return err; 324 } 325 326 /* Remove encapsulation header. 327 * 328 * The IP header will be moved over the top of the encapsulation 329 * header. 330 * 331 * On entry, the transport header shall point to where the IP header 332 * should be and the network header shall be set to where the IP 333 * header currently is. skb->data shall point to the start of the 334 * payload. 335 */ 336 static int 337 xfrm_inner_mode_encap_remove(struct xfrm_state *x, 338 struct sk_buff *skb) 339 { 340 switch (x->props.mode) { 341 case XFRM_MODE_BEET: 342 switch (x->sel.family) { 343 case AF_INET: 344 return xfrm4_remove_beet_encap(x, skb); 345 case AF_INET6: 346 return xfrm6_remove_beet_encap(x, skb); 347 } 348 break; 349 case XFRM_MODE_TUNNEL: 350 switch (XFRM_MODE_SKB_CB(skb)->protocol) { 351 case IPPROTO_IPIP: 352 return xfrm4_remove_tunnel_encap(x, skb); 353 case IPPROTO_IPV6: 354 return xfrm6_remove_tunnel_encap(x, skb); 355 break; 356 } 357 return -EINVAL; 358 } 359 360 WARN_ON_ONCE(1); 361 return -EOPNOTSUPP; 362 } 363 364 static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb) 365 { 366 switch (x->props.family) { 367 case AF_INET: 368 xfrm4_extract_header(skb); 369 break; 370 case AF_INET6: 371 xfrm6_extract_header(skb); 372 break; 373 default: 374 WARN_ON_ONCE(1); 375 return -EAFNOSUPPORT; 376 } 377 378 return xfrm_inner_mode_encap_remove(x, skb); 379 } 380 381 /* Remove encapsulation header. 382 * 383 * The IP header will be moved over the top of the encapsulation header. 384 * 385 * On entry, skb_transport_header() shall point to where the IP header 386 * should be and skb_network_header() shall be set to where the IP header 387 * currently is. skb->data shall point to the start of the payload. 388 */ 389 static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) 390 { 391 struct xfrm_offload *xo = xfrm_offload(skb); 392 int ihl = skb->data - skb_transport_header(skb); 393 394 if (skb->transport_header != skb->network_header) { 395 memmove(skb_transport_header(skb), 396 skb_network_header(skb), ihl); 397 if (xo) 398 xo->orig_mac_len = 399 skb_mac_header_was_set(skb) ? skb_mac_header_len(skb) : 0; 400 skb->network_header = skb->transport_header; 401 } 402 ip_hdr(skb)->tot_len = htons(skb->len + ihl); 403 skb_reset_transport_header(skb); 404 return 0; 405 } 406 407 static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) 408 { 409 #if IS_ENABLED(CONFIG_IPV6) 410 struct xfrm_offload *xo = xfrm_offload(skb); 411 int ihl = skb->data - skb_transport_header(skb); 412 413 if (skb->transport_header != skb->network_header) { 414 memmove(skb_transport_header(skb), 415 skb_network_header(skb), ihl); 416 if (xo) 417 xo->orig_mac_len = 418 skb_mac_header_was_set(skb) ? skb_mac_header_len(skb) : 0; 419 skb->network_header = skb->transport_header; 420 } 421 ipv6_hdr(skb)->payload_len = htons(skb->len + ihl - 422 sizeof(struct ipv6hdr)); 423 skb_reset_transport_header(skb); 424 return 0; 425 #else 426 WARN_ON_ONCE(1); 427 return -EAFNOSUPPORT; 428 #endif 429 } 430 431 static int xfrm_inner_mode_input(struct xfrm_state *x, 432 struct sk_buff *skb) 433 { 434 switch (x->props.mode) { 435 case XFRM_MODE_BEET: 436 case XFRM_MODE_TUNNEL: 437 return xfrm_prepare_input(x, skb); 438 case XFRM_MODE_TRANSPORT: 439 if (x->props.family == AF_INET) 440 return xfrm4_transport_input(x, skb); 441 if (x->props.family == AF_INET6) 442 return xfrm6_transport_input(x, skb); 443 break; 444 case XFRM_MODE_ROUTEOPTIMIZATION: 445 WARN_ON_ONCE(1); 446 break; 447 default: 448 WARN_ON_ONCE(1); 449 break; 450 } 451 452 return -EOPNOTSUPP; 453 } 454 455 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) 456 { 457 const struct xfrm_state_afinfo *afinfo; 458 struct net *net = dev_net(skb->dev); 459 int err; 460 __be32 seq; 461 __be32 seq_hi; 462 struct xfrm_state *x = NULL; 463 xfrm_address_t *daddr; 464 u32 mark = skb->mark; 465 unsigned int family = AF_UNSPEC; 466 int decaps = 0; 467 int async = 0; 468 bool xfrm_gro = false; 469 bool crypto_done = false; 470 struct xfrm_offload *xo = xfrm_offload(skb); 471 struct sec_path *sp; 472 473 if (encap_type < 0) { 474 x = xfrm_input_state(skb); 475 476 if (unlikely(x->km.state != XFRM_STATE_VALID)) { 477 if (x->km.state == XFRM_STATE_ACQ) 478 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); 479 else 480 XFRM_INC_STATS(net, 481 LINUX_MIB_XFRMINSTATEINVALID); 482 483 if (encap_type == -1) 484 dev_put(skb->dev); 485 goto drop; 486 } 487 488 family = x->props.family; 489 490 /* An encap_type of -1 indicates async resumption. */ 491 if (encap_type == -1) { 492 async = 1; 493 seq = XFRM_SKB_CB(skb)->seq.input.low; 494 goto resume; 495 } 496 497 /* encap_type < -1 indicates a GRO call. */ 498 encap_type = 0; 499 seq = XFRM_SPI_SKB_CB(skb)->seq; 500 501 if (xo && (xo->flags & CRYPTO_DONE)) { 502 crypto_done = true; 503 family = XFRM_SPI_SKB_CB(skb)->family; 504 505 if (!(xo->status & CRYPTO_SUCCESS)) { 506 if (xo->status & 507 (CRYPTO_TRANSPORT_AH_AUTH_FAILED | 508 CRYPTO_TRANSPORT_ESP_AUTH_FAILED | 509 CRYPTO_TUNNEL_AH_AUTH_FAILED | 510 CRYPTO_TUNNEL_ESP_AUTH_FAILED)) { 511 512 xfrm_audit_state_icvfail(x, skb, 513 x->type->proto); 514 x->stats.integrity_failed++; 515 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); 516 goto drop; 517 } 518 519 if (xo->status & CRYPTO_INVALID_PROTOCOL) { 520 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); 521 goto drop; 522 } 523 524 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); 525 goto drop; 526 } 527 528 if (xfrm_parse_spi(skb, nexthdr, &spi, &seq)) { 529 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 530 goto drop; 531 } 532 } 533 534 goto lock; 535 } 536 537 family = XFRM_SPI_SKB_CB(skb)->family; 538 539 /* if tunnel is present override skb->mark value with tunnel i_key */ 540 switch (family) { 541 case AF_INET: 542 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) 543 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key); 544 break; 545 case AF_INET6: 546 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6) 547 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key); 548 break; 549 } 550 551 sp = secpath_set(skb); 552 if (!sp) { 553 XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); 554 goto drop; 555 } 556 557 seq = 0; 558 if (!spi && xfrm_parse_spi(skb, nexthdr, &spi, &seq)) { 559 secpath_reset(skb); 560 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 561 goto drop; 562 } 563 564 daddr = (xfrm_address_t *)(skb_network_header(skb) + 565 XFRM_SPI_SKB_CB(skb)->daddroff); 566 do { 567 sp = skb_sec_path(skb); 568 569 if (sp->len == XFRM_MAX_DEPTH) { 570 secpath_reset(skb); 571 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); 572 goto drop; 573 } 574 575 x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family); 576 if (x == NULL) { 577 secpath_reset(skb); 578 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); 579 xfrm_audit_state_notfound(skb, family, spi, seq); 580 goto drop; 581 } 582 583 skb->mark = xfrm_smark_get(skb->mark, x); 584 585 sp->xvec[sp->len++] = x; 586 587 skb_dst_force(skb); 588 if (!skb_dst(skb)) { 589 XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); 590 goto drop; 591 } 592 593 lock: 594 spin_lock(&x->lock); 595 596 if (unlikely(x->km.state != XFRM_STATE_VALID)) { 597 if (x->km.state == XFRM_STATE_ACQ) 598 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); 599 else 600 XFRM_INC_STATS(net, 601 LINUX_MIB_XFRMINSTATEINVALID); 602 goto drop_unlock; 603 } 604 605 if ((x->encap ? x->encap->encap_type : 0) != encap_type) { 606 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); 607 goto drop_unlock; 608 } 609 610 if (xfrm_replay_check(x, skb, seq)) { 611 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); 612 goto drop_unlock; 613 } 614 615 if (xfrm_state_check_expire(x)) { 616 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED); 617 goto drop_unlock; 618 } 619 620 spin_unlock(&x->lock); 621 622 if (xfrm_tunnel_check(skb, x, family)) { 623 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); 624 goto drop; 625 } 626 627 seq_hi = htonl(xfrm_replay_seqhi(x, seq)); 628 629 XFRM_SKB_CB(skb)->seq.input.low = seq; 630 XFRM_SKB_CB(skb)->seq.input.hi = seq_hi; 631 632 dev_hold(skb->dev); 633 634 if (crypto_done) 635 nexthdr = x->type_offload->input_tail(x, skb); 636 else 637 nexthdr = x->type->input(x, skb); 638 639 if (nexthdr == -EINPROGRESS) 640 return 0; 641 resume: 642 dev_put(skb->dev); 643 644 spin_lock(&x->lock); 645 if (nexthdr < 0) { 646 if (nexthdr == -EBADMSG) { 647 xfrm_audit_state_icvfail(x, skb, 648 x->type->proto); 649 x->stats.integrity_failed++; 650 } 651 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); 652 goto drop_unlock; 653 } 654 655 /* only the first xfrm gets the encap type */ 656 encap_type = 0; 657 658 if (xfrm_replay_recheck(x, skb, seq)) { 659 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); 660 goto drop_unlock; 661 } 662 663 xfrm_replay_advance(x, seq); 664 665 x->curlft.bytes += skb->len; 666 x->curlft.packets++; 667 x->lastused = ktime_get_real_seconds(); 668 669 spin_unlock(&x->lock); 670 671 XFRM_MODE_SKB_CB(skb)->protocol = nexthdr; 672 673 if (xfrm_inner_mode_input(x, skb)) { 674 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); 675 goto drop; 676 } 677 678 if (x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL) { 679 decaps = 1; 680 break; 681 } 682 683 /* 684 * We need the inner address. However, we only get here for 685 * transport mode so the outer address is identical. 686 */ 687 daddr = &x->id.daddr; 688 family = x->props.family; 689 690 err = xfrm_parse_spi(skb, nexthdr, &spi, &seq); 691 if (err < 0) { 692 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 693 goto drop; 694 } 695 crypto_done = false; 696 } while (!err); 697 698 err = xfrm_rcv_cb(skb, family, x->type->proto, 0); 699 if (err) 700 goto drop; 701 702 nf_reset_ct(skb); 703 704 if (decaps) { 705 sp = skb_sec_path(skb); 706 if (sp) 707 sp->olen = 0; 708 if (skb_valid_dst(skb)) 709 skb_dst_drop(skb); 710 gro_cells_receive(&gro_cells, skb); 711 return 0; 712 } else { 713 xo = xfrm_offload(skb); 714 if (xo) 715 xfrm_gro = xo->flags & XFRM_GRO; 716 717 err = -EAFNOSUPPORT; 718 rcu_read_lock(); 719 afinfo = xfrm_state_afinfo_get_rcu(x->props.family); 720 if (likely(afinfo)) 721 err = afinfo->transport_finish(skb, xfrm_gro || async); 722 rcu_read_unlock(); 723 if (xfrm_gro) { 724 sp = skb_sec_path(skb); 725 if (sp) 726 sp->olen = 0; 727 if (skb_valid_dst(skb)) 728 skb_dst_drop(skb); 729 gro_cells_receive(&gro_cells, skb); 730 return err; 731 } 732 733 return err; 734 } 735 736 drop_unlock: 737 spin_unlock(&x->lock); 738 drop: 739 xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1); 740 kfree_skb(skb); 741 return 0; 742 } 743 EXPORT_SYMBOL(xfrm_input); 744 745 int xfrm_input_resume(struct sk_buff *skb, int nexthdr) 746 { 747 return xfrm_input(skb, nexthdr, 0, -1); 748 } 749 EXPORT_SYMBOL(xfrm_input_resume); 750 751 static void xfrm_trans_reinject(struct work_struct *work) 752 { 753 struct xfrm_trans_tasklet *trans = container_of(work, struct xfrm_trans_tasklet, work); 754 struct sk_buff_head queue; 755 struct sk_buff *skb; 756 757 __skb_queue_head_init(&queue); 758 spin_lock_bh(&trans->queue_lock); 759 skb_queue_splice_init(&trans->queue, &queue); 760 spin_unlock_bh(&trans->queue_lock); 761 762 local_bh_disable(); 763 while ((skb = __skb_dequeue(&queue))) 764 XFRM_TRANS_SKB_CB(skb)->finish(XFRM_TRANS_SKB_CB(skb)->net, 765 NULL, skb); 766 local_bh_enable(); 767 } 768 769 int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb, 770 int (*finish)(struct net *, struct sock *, 771 struct sk_buff *)) 772 { 773 struct xfrm_trans_tasklet *trans; 774 775 trans = this_cpu_ptr(&xfrm_trans_tasklet); 776 777 if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog)) 778 return -ENOBUFS; 779 780 BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb)); 781 782 XFRM_TRANS_SKB_CB(skb)->finish = finish; 783 XFRM_TRANS_SKB_CB(skb)->net = net; 784 spin_lock_bh(&trans->queue_lock); 785 __skb_queue_tail(&trans->queue, skb); 786 spin_unlock_bh(&trans->queue_lock); 787 schedule_work(&trans->work); 788 return 0; 789 } 790 EXPORT_SYMBOL(xfrm_trans_queue_net); 791 792 int xfrm_trans_queue(struct sk_buff *skb, 793 int (*finish)(struct net *, struct sock *, 794 struct sk_buff *)) 795 { 796 return xfrm_trans_queue_net(dev_net(skb->dev), skb, finish); 797 } 798 EXPORT_SYMBOL(xfrm_trans_queue); 799 800 void __init xfrm_input_init(void) 801 { 802 int err; 803 int i; 804 805 init_dummy_netdev(&xfrm_napi_dev); 806 err = gro_cells_init(&gro_cells, &xfrm_napi_dev); 807 if (err) 808 gro_cells.cells = NULL; 809 810 for_each_possible_cpu(i) { 811 struct xfrm_trans_tasklet *trans; 812 813 trans = &per_cpu(xfrm_trans_tasklet, i); 814 spin_lock_init(&trans->queue_lock); 815 __skb_queue_head_init(&trans->queue); 816 INIT_WORK(&trans->work, xfrm_trans_reinject); 817 } 818 } 819