1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/kernel.h> 3 #include <linux/init.h> 4 #include <linux/module.h> 5 #include <linux/netfilter.h> 6 #include <linux/rhashtable.h> 7 #include <linux/ip.h> 8 #include <linux/ipv6.h> 9 #include <linux/netdevice.h> 10 #include <linux/if_ether.h> 11 #include <net/ip.h> 12 #include <net/ipv6.h> 13 #include <net/ip6_route.h> 14 #include <net/neighbour.h> 15 #include <net/netfilter/nf_flow_table.h> 16 #include <net/netfilter/nf_conntrack_acct.h> 17 /* For layer 4 checksum field offset. */ 18 #include <linux/tcp.h> 19 #include <linux/udp.h> 20 21 static int nf_flow_state_check(struct flow_offload *flow, int proto, 22 struct sk_buff *skb, unsigned int thoff) 23 { 24 struct tcphdr *tcph; 25 26 if (proto != IPPROTO_TCP) 27 return 0; 28 29 tcph = (void *)(skb_network_header(skb) + thoff); 30 if (unlikely(tcph->fin || tcph->rst)) { 31 flow_offload_teardown(flow); 32 return -1; 33 } 34 35 return 0; 36 } 37 38 static void nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff, 39 __be32 addr, __be32 new_addr) 40 { 41 struct tcphdr *tcph; 42 43 tcph = (void *)(skb_network_header(skb) + thoff); 44 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true); 45 } 46 47 static void nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff, 48 __be32 addr, __be32 new_addr) 49 { 50 struct udphdr *udph; 51 52 udph = (void *)(skb_network_header(skb) + thoff); 53 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { 54 inet_proto_csum_replace4(&udph->check, skb, addr, 55 new_addr, true); 56 if (!udph->check) 57 udph->check = CSUM_MANGLED_0; 58 } 59 } 60 61 static void nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph, 62 unsigned int thoff, __be32 addr, 63 __be32 new_addr) 64 { 65 switch (iph->protocol) { 66 case IPPROTO_TCP: 67 nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr); 68 break; 69 case IPPROTO_UDP: 70 nf_flow_nat_ip_udp(skb, thoff, addr, new_addr); 71 break; 72 } 73 } 74 75 static void nf_flow_snat_ip(const struct flow_offload *flow, 76 struct sk_buff *skb, struct iphdr *iph, 77 unsigned int thoff, enum flow_offload_tuple_dir dir) 78 { 79 __be32 addr, new_addr; 80 81 switch (dir) { 82 case FLOW_OFFLOAD_DIR_ORIGINAL: 83 addr = iph->saddr; 84 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr; 85 iph->saddr = new_addr; 86 break; 87 case FLOW_OFFLOAD_DIR_REPLY: 88 addr = iph->daddr; 89 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr; 90 iph->daddr = new_addr; 91 break; 92 } 93 csum_replace4(&iph->check, addr, new_addr); 94 95 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); 96 } 97 98 static void nf_flow_dnat_ip(const struct flow_offload *flow, 99 struct sk_buff *skb, struct iphdr *iph, 100 unsigned int thoff, enum flow_offload_tuple_dir dir) 101 { 102 __be32 addr, new_addr; 103 104 switch (dir) { 105 case FLOW_OFFLOAD_DIR_ORIGINAL: 106 addr = iph->daddr; 107 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr; 108 iph->daddr = new_addr; 109 break; 110 case FLOW_OFFLOAD_DIR_REPLY: 111 addr = iph->saddr; 112 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr; 113 iph->saddr = new_addr; 114 break; 115 } 116 csum_replace4(&iph->check, addr, new_addr); 117 118 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); 119 } 120 121 static void nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb, 122 unsigned int thoff, enum flow_offload_tuple_dir dir, 123 struct iphdr *iph) 124 { 125 if (test_bit(NF_FLOW_SNAT, &flow->flags)) { 126 nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir); 127 nf_flow_snat_ip(flow, skb, iph, thoff, dir); 128 } 129 if (test_bit(NF_FLOW_DNAT, &flow->flags)) { 130 nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir); 131 nf_flow_dnat_ip(flow, skb, iph, thoff, dir); 132 } 133 } 134 135 static bool ip_has_options(unsigned int thoff) 136 { 137 return thoff != sizeof(struct iphdr); 138 } 139 140 static void nf_flow_tuple_encap(struct sk_buff *skb, 141 struct flow_offload_tuple *tuple) 142 { 143 struct vlan_ethhdr *veth; 144 struct pppoe_hdr *phdr; 145 int i = 0; 146 147 if (skb_vlan_tag_present(skb)) { 148 tuple->encap[i].id = skb_vlan_tag_get(skb); 149 tuple->encap[i].proto = skb->vlan_proto; 150 i++; 151 } 152 switch (skb->protocol) { 153 case htons(ETH_P_8021Q): 154 veth = (struct vlan_ethhdr *)skb_mac_header(skb); 155 tuple->encap[i].id = ntohs(veth->h_vlan_TCI); 156 tuple->encap[i].proto = skb->protocol; 157 break; 158 case htons(ETH_P_PPP_SES): 159 phdr = (struct pppoe_hdr *)skb_mac_header(skb); 160 tuple->encap[i].id = ntohs(phdr->sid); 161 tuple->encap[i].proto = skb->protocol; 162 break; 163 } 164 } 165 166 static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev, 167 struct flow_offload_tuple *tuple, u32 *hdrsize, 168 u32 offset) 169 { 170 struct flow_ports *ports; 171 unsigned int thoff; 172 struct iphdr *iph; 173 u8 ipproto; 174 175 if (!pskb_may_pull(skb, sizeof(*iph) + offset)) 176 return -1; 177 178 iph = (struct iphdr *)(skb_network_header(skb) + offset); 179 thoff = (iph->ihl * 4); 180 181 if (ip_is_fragment(iph) || 182 unlikely(ip_has_options(thoff))) 183 return -1; 184 185 thoff += offset; 186 187 ipproto = iph->protocol; 188 switch (ipproto) { 189 case IPPROTO_TCP: 190 *hdrsize = sizeof(struct tcphdr); 191 break; 192 case IPPROTO_UDP: 193 *hdrsize = sizeof(struct udphdr); 194 break; 195 #ifdef CONFIG_NF_CT_PROTO_GRE 196 case IPPROTO_GRE: 197 *hdrsize = sizeof(struct gre_base_hdr); 198 break; 199 #endif 200 default: 201 return -1; 202 } 203 204 if (iph->ttl <= 1) 205 return -1; 206 207 if (!pskb_may_pull(skb, thoff + *hdrsize)) 208 return -1; 209 210 switch (ipproto) { 211 case IPPROTO_TCP: 212 case IPPROTO_UDP: 213 ports = (struct flow_ports *)(skb_network_header(skb) + thoff); 214 tuple->src_port = ports->source; 215 tuple->dst_port = ports->dest; 216 break; 217 case IPPROTO_GRE: { 218 struct gre_base_hdr *greh; 219 220 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff); 221 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0) 222 return -1; 223 break; 224 } 225 } 226 227 iph = (struct iphdr *)(skb_network_header(skb) + offset); 228 229 tuple->src_v4.s_addr = iph->saddr; 230 tuple->dst_v4.s_addr = iph->daddr; 231 tuple->l3proto = AF_INET; 232 tuple->l4proto = ipproto; 233 tuple->iifidx = dev->ifindex; 234 nf_flow_tuple_encap(skb, tuple); 235 236 return 0; 237 } 238 239 /* Based on ip_exceeds_mtu(). */ 240 static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) 241 { 242 if (skb->len <= mtu) 243 return false; 244 245 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) 246 return false; 247 248 return true; 249 } 250 251 static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb, 252 const struct nf_hook_state *state, 253 struct dst_entry *dst) 254 { 255 skb_orphan(skb); 256 skb_dst_set_noref(skb, dst); 257 dst_output(state->net, state->sk, skb); 258 return NF_STOLEN; 259 } 260 261 static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto, 262 u32 *offset) 263 { 264 struct vlan_ethhdr *veth; 265 266 switch (skb->protocol) { 267 case htons(ETH_P_8021Q): 268 veth = (struct vlan_ethhdr *)skb_mac_header(skb); 269 if (veth->h_vlan_encapsulated_proto == proto) { 270 *offset += VLAN_HLEN; 271 return true; 272 } 273 break; 274 case htons(ETH_P_PPP_SES): 275 if (nf_flow_pppoe_proto(skb) == proto) { 276 *offset += PPPOE_SES_HLEN; 277 return true; 278 } 279 break; 280 } 281 282 return false; 283 } 284 285 static void nf_flow_encap_pop(struct sk_buff *skb, 286 struct flow_offload_tuple_rhash *tuplehash) 287 { 288 struct vlan_hdr *vlan_hdr; 289 int i; 290 291 for (i = 0; i < tuplehash->tuple.encap_num; i++) { 292 if (skb_vlan_tag_present(skb)) { 293 __vlan_hwaccel_clear_tag(skb); 294 continue; 295 } 296 switch (skb->protocol) { 297 case htons(ETH_P_8021Q): 298 vlan_hdr = (struct vlan_hdr *)skb->data; 299 __skb_pull(skb, VLAN_HLEN); 300 vlan_set_encap_proto(skb, vlan_hdr); 301 skb_reset_network_header(skb); 302 break; 303 case htons(ETH_P_PPP_SES): 304 skb->protocol = nf_flow_pppoe_proto(skb); 305 skb_pull(skb, PPPOE_SES_HLEN); 306 skb_reset_network_header(skb); 307 break; 308 } 309 } 310 } 311 312 static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb, 313 const struct flow_offload_tuple_rhash *tuplehash, 314 unsigned short type) 315 { 316 struct net_device *outdev; 317 318 outdev = dev_get_by_index_rcu(net, tuplehash->tuple.out.ifidx); 319 if (!outdev) 320 return NF_DROP; 321 322 skb->dev = outdev; 323 dev_hard_header(skb, skb->dev, type, tuplehash->tuple.out.h_dest, 324 tuplehash->tuple.out.h_source, skb->len); 325 dev_queue_xmit(skb); 326 327 return NF_STOLEN; 328 } 329 330 unsigned int 331 nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, 332 const struct nf_hook_state *state) 333 { 334 struct flow_offload_tuple_rhash *tuplehash; 335 struct nf_flowtable *flow_table = priv; 336 struct flow_offload_tuple tuple = {}; 337 enum flow_offload_tuple_dir dir; 338 struct flow_offload *flow; 339 struct net_device *outdev; 340 u32 hdrsize, offset = 0; 341 unsigned int thoff, mtu; 342 struct rtable *rt; 343 struct iphdr *iph; 344 __be32 nexthop; 345 int ret; 346 347 if (skb->protocol != htons(ETH_P_IP) && 348 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP), &offset)) 349 return NF_ACCEPT; 350 351 if (nf_flow_tuple_ip(skb, state->in, &tuple, &hdrsize, offset) < 0) 352 return NF_ACCEPT; 353 354 tuplehash = flow_offload_lookup(flow_table, &tuple); 355 if (tuplehash == NULL) 356 return NF_ACCEPT; 357 358 dir = tuplehash->tuple.dir; 359 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); 360 361 mtu = flow->tuplehash[dir].tuple.mtu + offset; 362 if (unlikely(nf_flow_exceeds_mtu(skb, mtu))) 363 return NF_ACCEPT; 364 365 iph = (struct iphdr *)(skb_network_header(skb) + offset); 366 thoff = (iph->ihl * 4) + offset; 367 if (nf_flow_state_check(flow, iph->protocol, skb, thoff)) 368 return NF_ACCEPT; 369 370 if (skb_try_make_writable(skb, thoff + hdrsize)) 371 return NF_DROP; 372 373 flow_offload_refresh(flow_table, flow); 374 375 nf_flow_encap_pop(skb, tuplehash); 376 thoff -= offset; 377 378 iph = ip_hdr(skb); 379 nf_flow_nat_ip(flow, skb, thoff, dir, iph); 380 381 ip_decrease_ttl(iph); 382 skb_clear_tstamp(skb); 383 384 if (flow_table->flags & NF_FLOWTABLE_COUNTER) 385 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len); 386 387 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) { 388 rt = (struct rtable *)tuplehash->tuple.dst_cache; 389 memset(skb->cb, 0, sizeof(struct inet_skb_parm)); 390 IPCB(skb)->iif = skb->dev->ifindex; 391 IPCB(skb)->flags = IPSKB_FORWARDED; 392 return nf_flow_xmit_xfrm(skb, state, &rt->dst); 393 } 394 395 switch (tuplehash->tuple.xmit_type) { 396 case FLOW_OFFLOAD_XMIT_NEIGH: 397 rt = (struct rtable *)tuplehash->tuple.dst_cache; 398 outdev = rt->dst.dev; 399 skb->dev = outdev; 400 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr); 401 skb_dst_set_noref(skb, &rt->dst); 402 neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb); 403 ret = NF_STOLEN; 404 break; 405 case FLOW_OFFLOAD_XMIT_DIRECT: 406 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IP); 407 if (ret == NF_DROP) 408 flow_offload_teardown(flow); 409 break; 410 } 411 412 return ret; 413 } 414 EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook); 415 416 static void nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff, 417 struct in6_addr *addr, 418 struct in6_addr *new_addr, 419 struct ipv6hdr *ip6h) 420 { 421 struct tcphdr *tcph; 422 423 tcph = (void *)(skb_network_header(skb) + thoff); 424 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32, 425 new_addr->s6_addr32, true); 426 } 427 428 static void nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff, 429 struct in6_addr *addr, 430 struct in6_addr *new_addr) 431 { 432 struct udphdr *udph; 433 434 udph = (void *)(skb_network_header(skb) + thoff); 435 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { 436 inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32, 437 new_addr->s6_addr32, true); 438 if (!udph->check) 439 udph->check = CSUM_MANGLED_0; 440 } 441 } 442 443 static void nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h, 444 unsigned int thoff, struct in6_addr *addr, 445 struct in6_addr *new_addr) 446 { 447 switch (ip6h->nexthdr) { 448 case IPPROTO_TCP: 449 nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr, ip6h); 450 break; 451 case IPPROTO_UDP: 452 nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr); 453 break; 454 } 455 } 456 457 static void nf_flow_snat_ipv6(const struct flow_offload *flow, 458 struct sk_buff *skb, struct ipv6hdr *ip6h, 459 unsigned int thoff, 460 enum flow_offload_tuple_dir dir) 461 { 462 struct in6_addr addr, new_addr; 463 464 switch (dir) { 465 case FLOW_OFFLOAD_DIR_ORIGINAL: 466 addr = ip6h->saddr; 467 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6; 468 ip6h->saddr = new_addr; 469 break; 470 case FLOW_OFFLOAD_DIR_REPLY: 471 addr = ip6h->daddr; 472 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6; 473 ip6h->daddr = new_addr; 474 break; 475 } 476 477 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr); 478 } 479 480 static void nf_flow_dnat_ipv6(const struct flow_offload *flow, 481 struct sk_buff *skb, struct ipv6hdr *ip6h, 482 unsigned int thoff, 483 enum flow_offload_tuple_dir dir) 484 { 485 struct in6_addr addr, new_addr; 486 487 switch (dir) { 488 case FLOW_OFFLOAD_DIR_ORIGINAL: 489 addr = ip6h->daddr; 490 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6; 491 ip6h->daddr = new_addr; 492 break; 493 case FLOW_OFFLOAD_DIR_REPLY: 494 addr = ip6h->saddr; 495 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6; 496 ip6h->saddr = new_addr; 497 break; 498 } 499 500 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr); 501 } 502 503 static void nf_flow_nat_ipv6(const struct flow_offload *flow, 504 struct sk_buff *skb, 505 enum flow_offload_tuple_dir dir, 506 struct ipv6hdr *ip6h) 507 { 508 unsigned int thoff = sizeof(*ip6h); 509 510 if (test_bit(NF_FLOW_SNAT, &flow->flags)) { 511 nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir); 512 nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir); 513 } 514 if (test_bit(NF_FLOW_DNAT, &flow->flags)) { 515 nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir); 516 nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir); 517 } 518 } 519 520 static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev, 521 struct flow_offload_tuple *tuple, u32 *hdrsize, 522 u32 offset) 523 { 524 struct flow_ports *ports; 525 struct ipv6hdr *ip6h; 526 unsigned int thoff; 527 u8 nexthdr; 528 529 thoff = sizeof(*ip6h) + offset; 530 if (!pskb_may_pull(skb, thoff)) 531 return -1; 532 533 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset); 534 535 nexthdr = ip6h->nexthdr; 536 switch (nexthdr) { 537 case IPPROTO_TCP: 538 *hdrsize = sizeof(struct tcphdr); 539 break; 540 case IPPROTO_UDP: 541 *hdrsize = sizeof(struct udphdr); 542 break; 543 #ifdef CONFIG_NF_CT_PROTO_GRE 544 case IPPROTO_GRE: 545 *hdrsize = sizeof(struct gre_base_hdr); 546 break; 547 #endif 548 default: 549 return -1; 550 } 551 552 if (ip6h->hop_limit <= 1) 553 return -1; 554 555 if (!pskb_may_pull(skb, thoff + *hdrsize)) 556 return -1; 557 558 switch (nexthdr) { 559 case IPPROTO_TCP: 560 case IPPROTO_UDP: 561 ports = (struct flow_ports *)(skb_network_header(skb) + thoff); 562 tuple->src_port = ports->source; 563 tuple->dst_port = ports->dest; 564 break; 565 case IPPROTO_GRE: { 566 struct gre_base_hdr *greh; 567 568 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff); 569 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0) 570 return -1; 571 break; 572 } 573 } 574 575 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset); 576 577 tuple->src_v6 = ip6h->saddr; 578 tuple->dst_v6 = ip6h->daddr; 579 tuple->l3proto = AF_INET6; 580 tuple->l4proto = nexthdr; 581 tuple->iifidx = dev->ifindex; 582 nf_flow_tuple_encap(skb, tuple); 583 584 return 0; 585 } 586 587 unsigned int 588 nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, 589 const struct nf_hook_state *state) 590 { 591 struct flow_offload_tuple_rhash *tuplehash; 592 struct nf_flowtable *flow_table = priv; 593 struct flow_offload_tuple tuple = {}; 594 enum flow_offload_tuple_dir dir; 595 const struct in6_addr *nexthop; 596 struct flow_offload *flow; 597 struct net_device *outdev; 598 unsigned int thoff, mtu; 599 u32 hdrsize, offset = 0; 600 struct ipv6hdr *ip6h; 601 struct rt6_info *rt; 602 int ret; 603 604 if (skb->protocol != htons(ETH_P_IPV6) && 605 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IPV6), &offset)) 606 return NF_ACCEPT; 607 608 if (nf_flow_tuple_ipv6(skb, state->in, &tuple, &hdrsize, offset) < 0) 609 return NF_ACCEPT; 610 611 tuplehash = flow_offload_lookup(flow_table, &tuple); 612 if (tuplehash == NULL) 613 return NF_ACCEPT; 614 615 dir = tuplehash->tuple.dir; 616 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); 617 618 mtu = flow->tuplehash[dir].tuple.mtu + offset; 619 if (unlikely(nf_flow_exceeds_mtu(skb, mtu))) 620 return NF_ACCEPT; 621 622 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset); 623 thoff = sizeof(*ip6h) + offset; 624 if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff)) 625 return NF_ACCEPT; 626 627 if (skb_try_make_writable(skb, thoff + hdrsize)) 628 return NF_DROP; 629 630 flow_offload_refresh(flow_table, flow); 631 632 nf_flow_encap_pop(skb, tuplehash); 633 634 ip6h = ipv6_hdr(skb); 635 nf_flow_nat_ipv6(flow, skb, dir, ip6h); 636 637 ip6h->hop_limit--; 638 skb_clear_tstamp(skb); 639 640 if (flow_table->flags & NF_FLOWTABLE_COUNTER) 641 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len); 642 643 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) { 644 rt = (struct rt6_info *)tuplehash->tuple.dst_cache; 645 memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); 646 IP6CB(skb)->iif = skb->dev->ifindex; 647 IP6CB(skb)->flags = IP6SKB_FORWARDED; 648 return nf_flow_xmit_xfrm(skb, state, &rt->dst); 649 } 650 651 switch (tuplehash->tuple.xmit_type) { 652 case FLOW_OFFLOAD_XMIT_NEIGH: 653 rt = (struct rt6_info *)tuplehash->tuple.dst_cache; 654 outdev = rt->dst.dev; 655 skb->dev = outdev; 656 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6); 657 skb_dst_set_noref(skb, &rt->dst); 658 neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb); 659 ret = NF_STOLEN; 660 break; 661 case FLOW_OFFLOAD_XMIT_DIRECT: 662 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IPV6); 663 if (ret == NF_DROP) 664 flow_offload_teardown(flow); 665 break; 666 } 667 668 return ret; 669 } 670 EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook); 671