1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/kernel.h> 3 #include <linux/init.h> 4 #include <linux/module.h> 5 #include <linux/netfilter.h> 6 #include <linux/rhashtable.h> 7 #include <linux/ip.h> 8 #include <linux/ipv6.h> 9 #include <linux/netdevice.h> 10 #include <linux/if_ether.h> 11 #include <net/ip.h> 12 #include <net/ipv6.h> 13 #include <net/ip6_route.h> 14 #include <net/neighbour.h> 15 #include <net/netfilter/nf_flow_table.h> 16 #include <net/netfilter/nf_conntrack_acct.h> 17 /* For layer 4 checksum field offset. */ 18 #include <linux/tcp.h> 19 #include <linux/udp.h> 20 21 static int nf_flow_state_check(struct flow_offload *flow, int proto, 22 struct sk_buff *skb, unsigned int thoff) 23 { 24 struct tcphdr *tcph; 25 26 if (proto != IPPROTO_TCP) 27 return 0; 28 29 tcph = (void *)(skb_network_header(skb) + thoff); 30 if (unlikely(tcph->fin || tcph->rst)) { 31 flow_offload_teardown(flow); 32 return -1; 33 } 34 35 return 0; 36 } 37 38 static void nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff, 39 __be32 addr, __be32 new_addr) 40 { 41 struct tcphdr *tcph; 42 43 tcph = (void *)(skb_network_header(skb) + thoff); 44 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true); 45 } 46 47 static void nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff, 48 __be32 addr, __be32 new_addr) 49 { 50 struct udphdr *udph; 51 52 udph = (void *)(skb_network_header(skb) + thoff); 53 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { 54 inet_proto_csum_replace4(&udph->check, skb, addr, 55 new_addr, true); 56 if (!udph->check) 57 udph->check = CSUM_MANGLED_0; 58 } 59 } 60 61 static void nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph, 62 unsigned int thoff, __be32 addr, 63 __be32 new_addr) 64 { 65 switch (iph->protocol) { 66 case IPPROTO_TCP: 67 nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr); 68 break; 69 case IPPROTO_UDP: 70 nf_flow_nat_ip_udp(skb, thoff, addr, new_addr); 71 break; 72 } 73 } 74 75 static void nf_flow_snat_ip(const struct flow_offload *flow, 76 struct sk_buff *skb, struct iphdr *iph, 77 unsigned int thoff, enum flow_offload_tuple_dir dir) 78 { 79 __be32 addr, new_addr; 80 81 switch (dir) { 82 case FLOW_OFFLOAD_DIR_ORIGINAL: 83 addr = iph->saddr; 84 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr; 85 iph->saddr = new_addr; 86 break; 87 case FLOW_OFFLOAD_DIR_REPLY: 88 addr = iph->daddr; 89 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr; 90 iph->daddr = new_addr; 91 break; 92 } 93 csum_replace4(&iph->check, addr, new_addr); 94 95 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); 96 } 97 98 static void nf_flow_dnat_ip(const struct flow_offload *flow, 99 struct sk_buff *skb, struct iphdr *iph, 100 unsigned int thoff, enum flow_offload_tuple_dir dir) 101 { 102 __be32 addr, new_addr; 103 104 switch (dir) { 105 case FLOW_OFFLOAD_DIR_ORIGINAL: 106 addr = iph->daddr; 107 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr; 108 iph->daddr = new_addr; 109 break; 110 case FLOW_OFFLOAD_DIR_REPLY: 111 addr = iph->saddr; 112 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr; 113 iph->saddr = new_addr; 114 break; 115 } 116 csum_replace4(&iph->check, addr, new_addr); 117 118 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); 119 } 120 121 static void nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb, 122 unsigned int thoff, enum flow_offload_tuple_dir dir, 123 struct iphdr *iph) 124 { 125 if (test_bit(NF_FLOW_SNAT, &flow->flags)) { 126 nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir); 127 nf_flow_snat_ip(flow, skb, iph, thoff, dir); 128 } 129 if (test_bit(NF_FLOW_DNAT, &flow->flags)) { 130 nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir); 131 nf_flow_dnat_ip(flow, skb, iph, thoff, dir); 132 } 133 } 134 135 static bool ip_has_options(unsigned int thoff) 136 { 137 return thoff != sizeof(struct iphdr); 138 } 139 140 static void nf_flow_tuple_encap(struct sk_buff *skb, 141 struct flow_offload_tuple *tuple) 142 { 143 struct vlan_ethhdr *veth; 144 struct pppoe_hdr *phdr; 145 int i = 0; 146 147 if (skb_vlan_tag_present(skb)) { 148 tuple->encap[i].id = skb_vlan_tag_get(skb); 149 tuple->encap[i].proto = skb->vlan_proto; 150 i++; 151 } 152 switch (skb->protocol) { 153 case htons(ETH_P_8021Q): 154 veth = (struct vlan_ethhdr *)skb_mac_header(skb); 155 tuple->encap[i].id = ntohs(veth->h_vlan_TCI); 156 tuple->encap[i].proto = skb->protocol; 157 break; 158 case htons(ETH_P_PPP_SES): 159 phdr = (struct pppoe_hdr *)skb_mac_header(skb); 160 tuple->encap[i].id = ntohs(phdr->sid); 161 tuple->encap[i].proto = skb->protocol; 162 break; 163 } 164 } 165 166 static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev, 167 struct flow_offload_tuple *tuple, u32 *hdrsize, 168 u32 offset) 169 { 170 struct flow_ports *ports; 171 unsigned int thoff; 172 struct iphdr *iph; 173 174 if (!pskb_may_pull(skb, sizeof(*iph) + offset)) 175 return -1; 176 177 iph = (struct iphdr *)(skb_network_header(skb) + offset); 178 thoff = (iph->ihl * 4); 179 180 if (ip_is_fragment(iph) || 181 unlikely(ip_has_options(thoff))) 182 return -1; 183 184 thoff += offset; 185 186 switch (iph->protocol) { 187 case IPPROTO_TCP: 188 *hdrsize = sizeof(struct tcphdr); 189 break; 190 case IPPROTO_UDP: 191 *hdrsize = sizeof(struct udphdr); 192 break; 193 default: 194 return -1; 195 } 196 197 if (iph->ttl <= 1) 198 return -1; 199 200 if (!pskb_may_pull(skb, thoff + *hdrsize)) 201 return -1; 202 203 iph = (struct iphdr *)(skb_network_header(skb) + offset); 204 ports = (struct flow_ports *)(skb_network_header(skb) + thoff); 205 206 tuple->src_v4.s_addr = iph->saddr; 207 tuple->dst_v4.s_addr = iph->daddr; 208 tuple->src_port = ports->source; 209 tuple->dst_port = ports->dest; 210 tuple->l3proto = AF_INET; 211 tuple->l4proto = iph->protocol; 212 tuple->iifidx = dev->ifindex; 213 nf_flow_tuple_encap(skb, tuple); 214 215 return 0; 216 } 217 218 /* Based on ip_exceeds_mtu(). */ 219 static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) 220 { 221 if (skb->len <= mtu) 222 return false; 223 224 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) 225 return false; 226 227 return true; 228 } 229 230 static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb, 231 const struct nf_hook_state *state, 232 struct dst_entry *dst) 233 { 234 skb_orphan(skb); 235 skb_dst_set_noref(skb, dst); 236 dst_output(state->net, state->sk, skb); 237 return NF_STOLEN; 238 } 239 240 static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto, 241 u32 *offset) 242 { 243 struct vlan_ethhdr *veth; 244 245 switch (skb->protocol) { 246 case htons(ETH_P_8021Q): 247 veth = (struct vlan_ethhdr *)skb_mac_header(skb); 248 if (veth->h_vlan_encapsulated_proto == proto) { 249 *offset += VLAN_HLEN; 250 return true; 251 } 252 break; 253 case htons(ETH_P_PPP_SES): 254 if (nf_flow_pppoe_proto(skb) == proto) { 255 *offset += PPPOE_SES_HLEN; 256 return true; 257 } 258 break; 259 } 260 261 return false; 262 } 263 264 static void nf_flow_encap_pop(struct sk_buff *skb, 265 struct flow_offload_tuple_rhash *tuplehash) 266 { 267 struct vlan_hdr *vlan_hdr; 268 int i; 269 270 for (i = 0; i < tuplehash->tuple.encap_num; i++) { 271 if (skb_vlan_tag_present(skb)) { 272 __vlan_hwaccel_clear_tag(skb); 273 continue; 274 } 275 switch (skb->protocol) { 276 case htons(ETH_P_8021Q): 277 vlan_hdr = (struct vlan_hdr *)skb->data; 278 __skb_pull(skb, VLAN_HLEN); 279 vlan_set_encap_proto(skb, vlan_hdr); 280 skb_reset_network_header(skb); 281 break; 282 case htons(ETH_P_PPP_SES): 283 skb->protocol = nf_flow_pppoe_proto(skb); 284 skb_pull(skb, PPPOE_SES_HLEN); 285 skb_reset_network_header(skb); 286 break; 287 } 288 } 289 } 290 291 static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb, 292 const struct flow_offload_tuple_rhash *tuplehash, 293 unsigned short type) 294 { 295 struct net_device *outdev; 296 297 outdev = dev_get_by_index_rcu(net, tuplehash->tuple.out.ifidx); 298 if (!outdev) 299 return NF_DROP; 300 301 skb->dev = outdev; 302 dev_hard_header(skb, skb->dev, type, tuplehash->tuple.out.h_dest, 303 tuplehash->tuple.out.h_source, skb->len); 304 dev_queue_xmit(skb); 305 306 return NF_STOLEN; 307 } 308 309 unsigned int 310 nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, 311 const struct nf_hook_state *state) 312 { 313 struct flow_offload_tuple_rhash *tuplehash; 314 struct nf_flowtable *flow_table = priv; 315 struct flow_offload_tuple tuple = {}; 316 enum flow_offload_tuple_dir dir; 317 struct flow_offload *flow; 318 struct net_device *outdev; 319 u32 hdrsize, offset = 0; 320 unsigned int thoff, mtu; 321 struct rtable *rt; 322 struct iphdr *iph; 323 __be32 nexthop; 324 int ret; 325 326 if (skb->protocol != htons(ETH_P_IP) && 327 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP), &offset)) 328 return NF_ACCEPT; 329 330 if (nf_flow_tuple_ip(skb, state->in, &tuple, &hdrsize, offset) < 0) 331 return NF_ACCEPT; 332 333 tuplehash = flow_offload_lookup(flow_table, &tuple); 334 if (tuplehash == NULL) 335 return NF_ACCEPT; 336 337 dir = tuplehash->tuple.dir; 338 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); 339 340 mtu = flow->tuplehash[dir].tuple.mtu + offset; 341 if (unlikely(nf_flow_exceeds_mtu(skb, mtu))) 342 return NF_ACCEPT; 343 344 iph = (struct iphdr *)(skb_network_header(skb) + offset); 345 thoff = (iph->ihl * 4) + offset; 346 if (nf_flow_state_check(flow, iph->protocol, skb, thoff)) 347 return NF_ACCEPT; 348 349 if (skb_try_make_writable(skb, thoff + hdrsize)) 350 return NF_DROP; 351 352 flow_offload_refresh(flow_table, flow); 353 354 nf_flow_encap_pop(skb, tuplehash); 355 thoff -= offset; 356 357 iph = ip_hdr(skb); 358 nf_flow_nat_ip(flow, skb, thoff, dir, iph); 359 360 ip_decrease_ttl(iph); 361 skb->tstamp = 0; 362 363 if (flow_table->flags & NF_FLOWTABLE_COUNTER) 364 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len); 365 366 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) { 367 rt = (struct rtable *)tuplehash->tuple.dst_cache; 368 memset(skb->cb, 0, sizeof(struct inet_skb_parm)); 369 IPCB(skb)->iif = skb->dev->ifindex; 370 IPCB(skb)->flags = IPSKB_FORWARDED; 371 return nf_flow_xmit_xfrm(skb, state, &rt->dst); 372 } 373 374 switch (tuplehash->tuple.xmit_type) { 375 case FLOW_OFFLOAD_XMIT_NEIGH: 376 rt = (struct rtable *)tuplehash->tuple.dst_cache; 377 outdev = rt->dst.dev; 378 skb->dev = outdev; 379 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr); 380 skb_dst_set_noref(skb, &rt->dst); 381 neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb); 382 ret = NF_STOLEN; 383 break; 384 case FLOW_OFFLOAD_XMIT_DIRECT: 385 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IP); 386 if (ret == NF_DROP) 387 flow_offload_teardown(flow); 388 break; 389 } 390 391 return ret; 392 } 393 EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook); 394 395 static void nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff, 396 struct in6_addr *addr, 397 struct in6_addr *new_addr, 398 struct ipv6hdr *ip6h) 399 { 400 struct tcphdr *tcph; 401 402 tcph = (void *)(skb_network_header(skb) + thoff); 403 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32, 404 new_addr->s6_addr32, true); 405 } 406 407 static void nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff, 408 struct in6_addr *addr, 409 struct in6_addr *new_addr) 410 { 411 struct udphdr *udph; 412 413 udph = (void *)(skb_network_header(skb) + thoff); 414 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { 415 inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32, 416 new_addr->s6_addr32, true); 417 if (!udph->check) 418 udph->check = CSUM_MANGLED_0; 419 } 420 } 421 422 static void nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h, 423 unsigned int thoff, struct in6_addr *addr, 424 struct in6_addr *new_addr) 425 { 426 switch (ip6h->nexthdr) { 427 case IPPROTO_TCP: 428 nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr, ip6h); 429 break; 430 case IPPROTO_UDP: 431 nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr); 432 break; 433 } 434 } 435 436 static void nf_flow_snat_ipv6(const struct flow_offload *flow, 437 struct sk_buff *skb, struct ipv6hdr *ip6h, 438 unsigned int thoff, 439 enum flow_offload_tuple_dir dir) 440 { 441 struct in6_addr addr, new_addr; 442 443 switch (dir) { 444 case FLOW_OFFLOAD_DIR_ORIGINAL: 445 addr = ip6h->saddr; 446 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6; 447 ip6h->saddr = new_addr; 448 break; 449 case FLOW_OFFLOAD_DIR_REPLY: 450 addr = ip6h->daddr; 451 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6; 452 ip6h->daddr = new_addr; 453 break; 454 } 455 456 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr); 457 } 458 459 static void nf_flow_dnat_ipv6(const struct flow_offload *flow, 460 struct sk_buff *skb, struct ipv6hdr *ip6h, 461 unsigned int thoff, 462 enum flow_offload_tuple_dir dir) 463 { 464 struct in6_addr addr, new_addr; 465 466 switch (dir) { 467 case FLOW_OFFLOAD_DIR_ORIGINAL: 468 addr = ip6h->daddr; 469 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6; 470 ip6h->daddr = new_addr; 471 break; 472 case FLOW_OFFLOAD_DIR_REPLY: 473 addr = ip6h->saddr; 474 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6; 475 ip6h->saddr = new_addr; 476 break; 477 } 478 479 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr); 480 } 481 482 static void nf_flow_nat_ipv6(const struct flow_offload *flow, 483 struct sk_buff *skb, 484 enum flow_offload_tuple_dir dir, 485 struct ipv6hdr *ip6h) 486 { 487 unsigned int thoff = sizeof(*ip6h); 488 489 if (test_bit(NF_FLOW_SNAT, &flow->flags)) { 490 nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir); 491 nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir); 492 } 493 if (test_bit(NF_FLOW_DNAT, &flow->flags)) { 494 nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir); 495 nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir); 496 } 497 } 498 499 static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev, 500 struct flow_offload_tuple *tuple, u32 *hdrsize, 501 u32 offset) 502 { 503 struct flow_ports *ports; 504 struct ipv6hdr *ip6h; 505 unsigned int thoff; 506 507 thoff = sizeof(*ip6h) + offset; 508 if (!pskb_may_pull(skb, thoff)) 509 return -1; 510 511 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset); 512 513 switch (ip6h->nexthdr) { 514 case IPPROTO_TCP: 515 *hdrsize = sizeof(struct tcphdr); 516 break; 517 case IPPROTO_UDP: 518 *hdrsize = sizeof(struct udphdr); 519 break; 520 default: 521 return -1; 522 } 523 524 if (ip6h->hop_limit <= 1) 525 return -1; 526 527 if (!pskb_may_pull(skb, thoff + *hdrsize)) 528 return -1; 529 530 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset); 531 ports = (struct flow_ports *)(skb_network_header(skb) + thoff); 532 533 tuple->src_v6 = ip6h->saddr; 534 tuple->dst_v6 = ip6h->daddr; 535 tuple->src_port = ports->source; 536 tuple->dst_port = ports->dest; 537 tuple->l3proto = AF_INET6; 538 tuple->l4proto = ip6h->nexthdr; 539 tuple->iifidx = dev->ifindex; 540 nf_flow_tuple_encap(skb, tuple); 541 542 return 0; 543 } 544 545 unsigned int 546 nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, 547 const struct nf_hook_state *state) 548 { 549 struct flow_offload_tuple_rhash *tuplehash; 550 struct nf_flowtable *flow_table = priv; 551 struct flow_offload_tuple tuple = {}; 552 enum flow_offload_tuple_dir dir; 553 const struct in6_addr *nexthop; 554 struct flow_offload *flow; 555 struct net_device *outdev; 556 unsigned int thoff, mtu; 557 u32 hdrsize, offset = 0; 558 struct ipv6hdr *ip6h; 559 struct rt6_info *rt; 560 int ret; 561 562 if (skb->protocol != htons(ETH_P_IPV6) && 563 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IPV6), &offset)) 564 return NF_ACCEPT; 565 566 if (nf_flow_tuple_ipv6(skb, state->in, &tuple, &hdrsize, offset) < 0) 567 return NF_ACCEPT; 568 569 tuplehash = flow_offload_lookup(flow_table, &tuple); 570 if (tuplehash == NULL) 571 return NF_ACCEPT; 572 573 dir = tuplehash->tuple.dir; 574 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); 575 576 mtu = flow->tuplehash[dir].tuple.mtu + offset; 577 if (unlikely(nf_flow_exceeds_mtu(skb, mtu))) 578 return NF_ACCEPT; 579 580 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset); 581 thoff = sizeof(*ip6h) + offset; 582 if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff)) 583 return NF_ACCEPT; 584 585 if (skb_try_make_writable(skb, thoff + hdrsize)) 586 return NF_DROP; 587 588 flow_offload_refresh(flow_table, flow); 589 590 nf_flow_encap_pop(skb, tuplehash); 591 592 ip6h = ipv6_hdr(skb); 593 nf_flow_nat_ipv6(flow, skb, dir, ip6h); 594 595 ip6h->hop_limit--; 596 skb->tstamp = 0; 597 598 if (flow_table->flags & NF_FLOWTABLE_COUNTER) 599 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len); 600 601 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) { 602 rt = (struct rt6_info *)tuplehash->tuple.dst_cache; 603 memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); 604 IP6CB(skb)->iif = skb->dev->ifindex; 605 IP6CB(skb)->flags = IP6SKB_FORWARDED; 606 return nf_flow_xmit_xfrm(skb, state, &rt->dst); 607 } 608 609 switch (tuplehash->tuple.xmit_type) { 610 case FLOW_OFFLOAD_XMIT_NEIGH: 611 rt = (struct rt6_info *)tuplehash->tuple.dst_cache; 612 outdev = rt->dst.dev; 613 skb->dev = outdev; 614 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6); 615 skb_dst_set_noref(skb, &rt->dst); 616 neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb); 617 ret = NF_STOLEN; 618 break; 619 case FLOW_OFFLOAD_XMIT_DIRECT: 620 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IPV6); 621 if (ret == NF_DROP) 622 flow_offload_teardown(flow); 623 break; 624 } 625 626 return ret; 627 } 628 EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook); 629