1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * xfrm_input.c 4 * 5 * Changes: 6 * YOSHIFUJI Hideaki @USAGI 7 * Split up af-specific portion 8 * 9 */ 10 11 #include <linux/bottom_half.h> 12 #include <linux/cache.h> 13 #include <linux/interrupt.h> 14 #include <linux/slab.h> 15 #include <linux/module.h> 16 #include <linux/netdevice.h> 17 #include <linux/percpu.h> 18 #include <net/dst.h> 19 #include <net/ip.h> 20 #include <net/xfrm.h> 21 #include <net/ip_tunnels.h> 22 #include <net/ip6_tunnel.h> 23 #include <net/dst_metadata.h> 24 25 #include "xfrm_inout.h" 26 27 struct xfrm_trans_tasklet { 28 struct work_struct work; 29 spinlock_t queue_lock; 30 struct sk_buff_head queue; 31 }; 32 33 struct xfrm_trans_cb { 34 union { 35 struct inet_skb_parm h4; 36 #if IS_ENABLED(CONFIG_IPV6) 37 struct inet6_skb_parm h6; 38 #endif 39 } header; 40 int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb); 41 struct net *net; 42 }; 43 44 #define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0])) 45 46 static DEFINE_SPINLOCK(xfrm_input_afinfo_lock); 47 static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[2][AF_INET6 + 1]; 48 49 static struct gro_cells gro_cells; 50 static struct net_device xfrm_napi_dev; 51 52 static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet); 53 54 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo) 55 { 56 int err = 0; 57 58 if (WARN_ON(afinfo->family > AF_INET6)) 59 return -EAFNOSUPPORT; 60 61 spin_lock_bh(&xfrm_input_afinfo_lock); 62 if (unlikely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family])) 63 err = -EEXIST; 64 else 65 rcu_assign_pointer(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family], afinfo); 66 spin_unlock_bh(&xfrm_input_afinfo_lock); 67 return err; 68 } 69 EXPORT_SYMBOL(xfrm_input_register_afinfo); 70 71 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo) 72 { 73 int err = 0; 74 75 spin_lock_bh(&xfrm_input_afinfo_lock); 76 if (likely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family])) { 77 if (unlikely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family] != afinfo)) 78 err = -EINVAL; 79 else 80 RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family], NULL); 81 } 82 spin_unlock_bh(&xfrm_input_afinfo_lock); 83 synchronize_rcu(); 84 return err; 85 } 86 EXPORT_SYMBOL(xfrm_input_unregister_afinfo); 87 88 static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(u8 family, bool is_ipip) 89 { 90 const struct xfrm_input_afinfo *afinfo; 91 92 if (WARN_ON_ONCE(family > AF_INET6)) 93 return NULL; 94 95 rcu_read_lock(); 96 afinfo = rcu_dereference(xfrm_input_afinfo[is_ipip][family]); 97 if (unlikely(!afinfo)) 98 rcu_read_unlock(); 99 return afinfo; 100 } 101 102 static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol, 103 int err) 104 { 105 bool is_ipip = (protocol == IPPROTO_IPIP || protocol == IPPROTO_IPV6); 106 const struct xfrm_input_afinfo *afinfo; 107 int ret; 108 109 afinfo = xfrm_input_get_afinfo(family, is_ipip); 110 if (!afinfo) 111 return -EAFNOSUPPORT; 112 113 ret = afinfo->callback(skb, protocol, err); 114 rcu_read_unlock(); 115 116 return ret; 117 } 118 119 struct sec_path *secpath_set(struct sk_buff *skb) 120 { 121 struct sec_path *sp, *tmp = skb_ext_find(skb, SKB_EXT_SEC_PATH); 122 123 sp = skb_ext_add(skb, SKB_EXT_SEC_PATH); 124 if (!sp) 125 return NULL; 126 127 if (tmp) /* reused existing one (was COW'd if needed) */ 128 return sp; 129 130 /* allocated new secpath */ 131 memset(sp->ovec, 0, sizeof(sp->ovec)); 132 sp->olen = 0; 133 sp->len = 0; 134 135 return sp; 136 } 137 EXPORT_SYMBOL(secpath_set); 138 139 /* Fetch spi and seq from ipsec header */ 140 141 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq) 142 { 143 int offset, offset_seq; 144 int hlen; 145 146 switch (nexthdr) { 147 case IPPROTO_AH: 148 hlen = sizeof(struct ip_auth_hdr); 149 offset = offsetof(struct ip_auth_hdr, spi); 150 offset_seq = offsetof(struct ip_auth_hdr, seq_no); 151 break; 152 case IPPROTO_ESP: 153 hlen = sizeof(struct ip_esp_hdr); 154 offset = offsetof(struct ip_esp_hdr, spi); 155 offset_seq = offsetof(struct ip_esp_hdr, seq_no); 156 break; 157 case IPPROTO_COMP: 158 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr))) 159 return -EINVAL; 160 *spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2))); 161 *seq = 0; 162 return 0; 163 default: 164 return 1; 165 } 166 167 if (!pskb_may_pull(skb, hlen)) 168 return -EINVAL; 169 170 *spi = *(__be32 *)(skb_transport_header(skb) + offset); 171 *seq = *(__be32 *)(skb_transport_header(skb) + offset_seq); 172 return 0; 173 } 174 EXPORT_SYMBOL(xfrm_parse_spi); 175 176 static int xfrm4_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb) 177 { 178 struct iphdr *iph; 179 int optlen = 0; 180 int err = -EINVAL; 181 182 if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) { 183 struct ip_beet_phdr *ph; 184 int phlen; 185 186 if (!pskb_may_pull(skb, sizeof(*ph))) 187 goto out; 188 189 ph = (struct ip_beet_phdr *)skb->data; 190 191 phlen = sizeof(*ph) + ph->padlen; 192 optlen = ph->hdrlen * 8 + (IPV4_BEET_PHMAXLEN - phlen); 193 if (optlen < 0 || optlen & 3 || optlen > 250) 194 goto out; 195 196 XFRM_MODE_SKB_CB(skb)->protocol = ph->nexthdr; 197 198 if (!pskb_may_pull(skb, phlen)) 199 goto out; 200 __skb_pull(skb, phlen); 201 } 202 203 skb_push(skb, sizeof(*iph)); 204 skb_reset_network_header(skb); 205 skb_mac_header_rebuild(skb); 206 207 xfrm4_beet_make_header(skb); 208 209 iph = ip_hdr(skb); 210 211 iph->ihl += optlen / 4; 212 iph->tot_len = htons(skb->len); 213 iph->daddr = x->sel.daddr.a4; 214 iph->saddr = x->sel.saddr.a4; 215 iph->check = 0; 216 iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl); 217 err = 0; 218 out: 219 return err; 220 } 221 222 static void ipip_ecn_decapsulate(struct sk_buff *skb) 223 { 224 struct iphdr *inner_iph = ipip_hdr(skb); 225 226 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos)) 227 IP_ECN_set_ce(inner_iph); 228 } 229 230 static int xfrm4_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb) 231 { 232 int err = -EINVAL; 233 234 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 235 goto out; 236 237 err = skb_unclone(skb, GFP_ATOMIC); 238 if (err) 239 goto out; 240 241 if (x->props.flags & XFRM_STATE_DECAP_DSCP) 242 ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipip_hdr(skb)); 243 if (!(x->props.flags & XFRM_STATE_NOECN)) 244 ipip_ecn_decapsulate(skb); 245 246 skb_reset_network_header(skb); 247 skb_mac_header_rebuild(skb); 248 if (skb->mac_len) 249 eth_hdr(skb)->h_proto = skb->protocol; 250 251 err = 0; 252 253 out: 254 return err; 255 } 256 257 static void ipip6_ecn_decapsulate(struct sk_buff *skb) 258 { 259 struct ipv6hdr *inner_iph = ipipv6_hdr(skb); 260 261 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos)) 262 IP6_ECN_set_ce(skb, inner_iph); 263 } 264 265 static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb) 266 { 267 int err = -EINVAL; 268 269 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 270 goto out; 271 272 err = skb_unclone(skb, GFP_ATOMIC); 273 if (err) 274 goto out; 275 276 if (x->props.flags & XFRM_STATE_DECAP_DSCP) 277 ipv6_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipipv6_hdr(skb)); 278 if (!(x->props.flags & XFRM_STATE_NOECN)) 279 ipip6_ecn_decapsulate(skb); 280 281 skb_reset_network_header(skb); 282 skb_mac_header_rebuild(skb); 283 if (skb->mac_len) 284 eth_hdr(skb)->h_proto = skb->protocol; 285 286 err = 0; 287 288 out: 289 return err; 290 } 291 292 static int xfrm6_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb) 293 { 294 struct ipv6hdr *ip6h; 295 int size = sizeof(struct ipv6hdr); 296 int err; 297 298 err = skb_cow_head(skb, size + skb->mac_len); 299 if (err) 300 goto out; 301 302 __skb_push(skb, size); 303 skb_reset_network_header(skb); 304 skb_mac_header_rebuild(skb); 305 306 xfrm6_beet_make_header(skb); 307 308 ip6h = ipv6_hdr(skb); 309 ip6h->payload_len = htons(skb->len - size); 310 ip6h->daddr = x->sel.daddr.in6; 311 ip6h->saddr = x->sel.saddr.in6; 312 err = 0; 313 out: 314 return err; 315 } 316 317 /* Remove encapsulation header. 318 * 319 * The IP header will be moved over the top of the encapsulation 320 * header. 321 * 322 * On entry, the transport header shall point to where the IP header 323 * should be and the network header shall be set to where the IP 324 * header currently is. skb->data shall point to the start of the 325 * payload. 326 */ 327 static int 328 xfrm_inner_mode_encap_remove(struct xfrm_state *x, 329 struct sk_buff *skb) 330 { 331 switch (x->props.mode) { 332 case XFRM_MODE_BEET: 333 switch (XFRM_MODE_SKB_CB(skb)->protocol) { 334 case IPPROTO_IPIP: 335 case IPPROTO_BEETPH: 336 return xfrm4_remove_beet_encap(x, skb); 337 case IPPROTO_IPV6: 338 return xfrm6_remove_beet_encap(x, skb); 339 } 340 break; 341 case XFRM_MODE_TUNNEL: 342 switch (XFRM_MODE_SKB_CB(skb)->protocol) { 343 case IPPROTO_IPIP: 344 return xfrm4_remove_tunnel_encap(x, skb); 345 case IPPROTO_IPV6: 346 return xfrm6_remove_tunnel_encap(x, skb); 347 break; 348 } 349 } 350 351 WARN_ON_ONCE(1); 352 return -EOPNOTSUPP; 353 } 354 355 static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb) 356 { 357 switch (x->props.family) { 358 case AF_INET: 359 xfrm4_extract_header(skb); 360 break; 361 case AF_INET6: 362 xfrm6_extract_header(skb); 363 break; 364 default: 365 WARN_ON_ONCE(1); 366 return -EAFNOSUPPORT; 367 } 368 369 switch (XFRM_MODE_SKB_CB(skb)->protocol) { 370 case IPPROTO_IPIP: 371 case IPPROTO_BEETPH: 372 skb->protocol = htons(ETH_P_IP); 373 break; 374 case IPPROTO_IPV6: 375 skb->protocol = htons(ETH_P_IPV6); 376 break; 377 default: 378 WARN_ON_ONCE(1); 379 break; 380 } 381 382 return xfrm_inner_mode_encap_remove(x, skb); 383 } 384 385 /* Remove encapsulation header. 386 * 387 * The IP header will be moved over the top of the encapsulation header. 388 * 389 * On entry, skb_transport_header() shall point to where the IP header 390 * should be and skb_network_header() shall be set to where the IP header 391 * currently is. skb->data shall point to the start of the payload. 392 */ 393 static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) 394 { 395 int ihl = skb->data - skb_transport_header(skb); 396 397 if (skb->transport_header != skb->network_header) { 398 memmove(skb_transport_header(skb), 399 skb_network_header(skb), ihl); 400 skb->network_header = skb->transport_header; 401 } 402 ip_hdr(skb)->tot_len = htons(skb->len + ihl); 403 skb_reset_transport_header(skb); 404 return 0; 405 } 406 407 static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) 408 { 409 #if IS_ENABLED(CONFIG_IPV6) 410 int ihl = skb->data - skb_transport_header(skb); 411 412 if (skb->transport_header != skb->network_header) { 413 memmove(skb_transport_header(skb), 414 skb_network_header(skb), ihl); 415 skb->network_header = skb->transport_header; 416 } 417 ipv6_hdr(skb)->payload_len = htons(skb->len + ihl - 418 sizeof(struct ipv6hdr)); 419 skb_reset_transport_header(skb); 420 return 0; 421 #else 422 WARN_ON_ONCE(1); 423 return -EAFNOSUPPORT; 424 #endif 425 } 426 427 static int xfrm_inner_mode_input(struct xfrm_state *x, 428 struct sk_buff *skb) 429 { 430 switch (x->props.mode) { 431 case XFRM_MODE_BEET: 432 case XFRM_MODE_TUNNEL: 433 return xfrm_prepare_input(x, skb); 434 case XFRM_MODE_TRANSPORT: 435 if (x->props.family == AF_INET) 436 return xfrm4_transport_input(x, skb); 437 if (x->props.family == AF_INET6) 438 return xfrm6_transport_input(x, skb); 439 break; 440 case XFRM_MODE_ROUTEOPTIMIZATION: 441 WARN_ON_ONCE(1); 442 break; 443 default: 444 WARN_ON_ONCE(1); 445 break; 446 } 447 448 return -EOPNOTSUPP; 449 } 450 451 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) 452 { 453 const struct xfrm_state_afinfo *afinfo; 454 struct net *net = dev_net(skb->dev); 455 int err; 456 __be32 seq; 457 __be32 seq_hi; 458 struct xfrm_state *x = NULL; 459 xfrm_address_t *daddr; 460 u32 mark = skb->mark; 461 unsigned int family = AF_UNSPEC; 462 int decaps = 0; 463 int async = 0; 464 bool xfrm_gro = false; 465 bool crypto_done = false; 466 struct xfrm_offload *xo = xfrm_offload(skb); 467 struct sec_path *sp; 468 469 if (encap_type < 0) { 470 x = xfrm_input_state(skb); 471 472 if (unlikely(x->km.state != XFRM_STATE_VALID)) { 473 if (x->km.state == XFRM_STATE_ACQ) 474 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); 475 else 476 XFRM_INC_STATS(net, 477 LINUX_MIB_XFRMINSTATEINVALID); 478 479 if (encap_type == -1) 480 dev_put(skb->dev); 481 goto drop; 482 } 483 484 family = x->props.family; 485 486 /* An encap_type of -1 indicates async resumption. */ 487 if (encap_type == -1) { 488 async = 1; 489 seq = XFRM_SKB_CB(skb)->seq.input.low; 490 goto resume; 491 } 492 493 /* encap_type < -1 indicates a GRO call. */ 494 encap_type = 0; 495 seq = XFRM_SPI_SKB_CB(skb)->seq; 496 497 if (xo && (xo->flags & CRYPTO_DONE)) { 498 crypto_done = true; 499 family = XFRM_SPI_SKB_CB(skb)->family; 500 501 if (!(xo->status & CRYPTO_SUCCESS)) { 502 if (xo->status & 503 (CRYPTO_TRANSPORT_AH_AUTH_FAILED | 504 CRYPTO_TRANSPORT_ESP_AUTH_FAILED | 505 CRYPTO_TUNNEL_AH_AUTH_FAILED | 506 CRYPTO_TUNNEL_ESP_AUTH_FAILED)) { 507 508 xfrm_audit_state_icvfail(x, skb, 509 x->type->proto); 510 x->stats.integrity_failed++; 511 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); 512 goto drop; 513 } 514 515 if (xo->status & CRYPTO_INVALID_PROTOCOL) { 516 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); 517 goto drop; 518 } 519 520 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); 521 goto drop; 522 } 523 524 if (xfrm_parse_spi(skb, nexthdr, &spi, &seq)) { 525 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 526 goto drop; 527 } 528 } 529 530 goto lock; 531 } 532 533 family = XFRM_SPI_SKB_CB(skb)->family; 534 535 /* if tunnel is present override skb->mark value with tunnel i_key */ 536 switch (family) { 537 case AF_INET: 538 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) 539 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key); 540 break; 541 case AF_INET6: 542 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6) 543 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key); 544 break; 545 } 546 547 sp = secpath_set(skb); 548 if (!sp) { 549 XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); 550 goto drop; 551 } 552 553 seq = 0; 554 if (!spi && xfrm_parse_spi(skb, nexthdr, &spi, &seq)) { 555 secpath_reset(skb); 556 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 557 goto drop; 558 } 559 560 daddr = (xfrm_address_t *)(skb_network_header(skb) + 561 XFRM_SPI_SKB_CB(skb)->daddroff); 562 do { 563 sp = skb_sec_path(skb); 564 565 if (sp->len == XFRM_MAX_DEPTH) { 566 secpath_reset(skb); 567 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); 568 goto drop; 569 } 570 571 x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family); 572 if (x == NULL) { 573 secpath_reset(skb); 574 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); 575 xfrm_audit_state_notfound(skb, family, spi, seq); 576 goto drop; 577 } 578 579 skb->mark = xfrm_smark_get(skb->mark, x); 580 581 sp->xvec[sp->len++] = x; 582 583 skb_dst_force(skb); 584 if (!skb_dst(skb)) { 585 XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); 586 goto drop; 587 } 588 589 lock: 590 spin_lock(&x->lock); 591 592 if (unlikely(x->km.state != XFRM_STATE_VALID)) { 593 if (x->km.state == XFRM_STATE_ACQ) 594 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); 595 else 596 XFRM_INC_STATS(net, 597 LINUX_MIB_XFRMINSTATEINVALID); 598 goto drop_unlock; 599 } 600 601 if ((x->encap ? x->encap->encap_type : 0) != encap_type) { 602 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); 603 goto drop_unlock; 604 } 605 606 if (xfrm_replay_check(x, skb, seq)) { 607 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); 608 goto drop_unlock; 609 } 610 611 if (xfrm_state_check_expire(x)) { 612 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED); 613 goto drop_unlock; 614 } 615 616 spin_unlock(&x->lock); 617 618 if (xfrm_tunnel_check(skb, x, family)) { 619 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); 620 goto drop; 621 } 622 623 seq_hi = htonl(xfrm_replay_seqhi(x, seq)); 624 625 XFRM_SKB_CB(skb)->seq.input.low = seq; 626 XFRM_SKB_CB(skb)->seq.input.hi = seq_hi; 627 628 dev_hold(skb->dev); 629 630 if (crypto_done) 631 nexthdr = x->type_offload->input_tail(x, skb); 632 else 633 nexthdr = x->type->input(x, skb); 634 635 if (nexthdr == -EINPROGRESS) 636 return 0; 637 resume: 638 dev_put(skb->dev); 639 640 spin_lock(&x->lock); 641 if (nexthdr < 0) { 642 if (nexthdr == -EBADMSG) { 643 xfrm_audit_state_icvfail(x, skb, 644 x->type->proto); 645 x->stats.integrity_failed++; 646 } 647 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); 648 goto drop_unlock; 649 } 650 651 /* only the first xfrm gets the encap type */ 652 encap_type = 0; 653 654 if (xfrm_replay_recheck(x, skb, seq)) { 655 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); 656 goto drop_unlock; 657 } 658 659 xfrm_replay_advance(x, seq); 660 661 x->curlft.bytes += skb->len; 662 x->curlft.packets++; 663 x->lastused = ktime_get_real_seconds(); 664 665 spin_unlock(&x->lock); 666 667 XFRM_MODE_SKB_CB(skb)->protocol = nexthdr; 668 669 if (xfrm_inner_mode_input(x, skb)) { 670 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); 671 goto drop; 672 } 673 674 if (x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL) { 675 decaps = 1; 676 break; 677 } 678 679 /* 680 * We need the inner address. However, we only get here for 681 * transport mode so the outer address is identical. 682 */ 683 daddr = &x->id.daddr; 684 family = x->props.family; 685 686 err = xfrm_parse_spi(skb, nexthdr, &spi, &seq); 687 if (err < 0) { 688 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 689 goto drop; 690 } 691 crypto_done = false; 692 } while (!err); 693 694 err = xfrm_rcv_cb(skb, family, x->type->proto, 0); 695 if (err) 696 goto drop; 697 698 nf_reset_ct(skb); 699 700 if (decaps) { 701 sp = skb_sec_path(skb); 702 if (sp) 703 sp->olen = 0; 704 if (skb_valid_dst(skb)) 705 skb_dst_drop(skb); 706 gro_cells_receive(&gro_cells, skb); 707 return 0; 708 } else { 709 xo = xfrm_offload(skb); 710 if (xo) 711 xfrm_gro = xo->flags & XFRM_GRO; 712 713 err = -EAFNOSUPPORT; 714 rcu_read_lock(); 715 afinfo = xfrm_state_afinfo_get_rcu(x->props.family); 716 if (likely(afinfo)) 717 err = afinfo->transport_finish(skb, xfrm_gro || async); 718 rcu_read_unlock(); 719 if (xfrm_gro) { 720 sp = skb_sec_path(skb); 721 if (sp) 722 sp->olen = 0; 723 if (skb_valid_dst(skb)) 724 skb_dst_drop(skb); 725 gro_cells_receive(&gro_cells, skb); 726 return err; 727 } 728 729 return err; 730 } 731 732 drop_unlock: 733 spin_unlock(&x->lock); 734 drop: 735 xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1); 736 kfree_skb(skb); 737 return 0; 738 } 739 EXPORT_SYMBOL(xfrm_input); 740 741 int xfrm_input_resume(struct sk_buff *skb, int nexthdr) 742 { 743 return xfrm_input(skb, nexthdr, 0, -1); 744 } 745 EXPORT_SYMBOL(xfrm_input_resume); 746 747 static void xfrm_trans_reinject(struct work_struct *work) 748 { 749 struct xfrm_trans_tasklet *trans = container_of(work, struct xfrm_trans_tasklet, work); 750 struct sk_buff_head queue; 751 struct sk_buff *skb; 752 753 __skb_queue_head_init(&queue); 754 spin_lock_bh(&trans->queue_lock); 755 skb_queue_splice_init(&trans->queue, &queue); 756 spin_unlock_bh(&trans->queue_lock); 757 758 local_bh_disable(); 759 while ((skb = __skb_dequeue(&queue))) 760 XFRM_TRANS_SKB_CB(skb)->finish(XFRM_TRANS_SKB_CB(skb)->net, 761 NULL, skb); 762 local_bh_enable(); 763 } 764 765 int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb, 766 int (*finish)(struct net *, struct sock *, 767 struct sk_buff *)) 768 { 769 struct xfrm_trans_tasklet *trans; 770 771 trans = this_cpu_ptr(&xfrm_trans_tasklet); 772 773 if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog)) 774 return -ENOBUFS; 775 776 BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb)); 777 778 XFRM_TRANS_SKB_CB(skb)->finish = finish; 779 XFRM_TRANS_SKB_CB(skb)->net = net; 780 spin_lock_bh(&trans->queue_lock); 781 __skb_queue_tail(&trans->queue, skb); 782 spin_unlock_bh(&trans->queue_lock); 783 schedule_work(&trans->work); 784 return 0; 785 } 786 EXPORT_SYMBOL(xfrm_trans_queue_net); 787 788 int xfrm_trans_queue(struct sk_buff *skb, 789 int (*finish)(struct net *, struct sock *, 790 struct sk_buff *)) 791 { 792 return xfrm_trans_queue_net(dev_net(skb->dev), skb, finish); 793 } 794 EXPORT_SYMBOL(xfrm_trans_queue); 795 796 void __init xfrm_input_init(void) 797 { 798 int err; 799 int i; 800 801 init_dummy_netdev(&xfrm_napi_dev); 802 err = gro_cells_init(&gro_cells, &xfrm_napi_dev); 803 if (err) 804 gro_cells.cells = NULL; 805 806 for_each_possible_cpu(i) { 807 struct xfrm_trans_tasklet *trans; 808 809 trans = &per_cpu(xfrm_trans_tasklet, i); 810 spin_lock_init(&trans->queue_lock); 811 __skb_queue_head_init(&trans->queue); 812 INIT_WORK(&trans->work, xfrm_trans_reinject); 813 } 814 } 815