1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/kernel.h> 3 #include <linux/init.h> 4 #include <linux/module.h> 5 #include <linux/netfilter.h> 6 #include <linux/rhashtable.h> 7 #include <linux/netdevice.h> 8 #include <net/ip.h> 9 #include <net/ip6_route.h> 10 #include <net/netfilter/nf_tables.h> 11 #include <net/netfilter/nf_flow_table.h> 12 #include <net/netfilter/nf_conntrack.h> 13 #include <net/netfilter/nf_conntrack_core.h> 14 #include <net/netfilter/nf_conntrack_l4proto.h> 15 #include <net/netfilter/nf_conntrack_tuple.h> 16 17 static DEFINE_MUTEX(flowtable_lock); 18 static LIST_HEAD(flowtables); 19 20 static void 21 flow_offload_fill_dir(struct flow_offload *flow, 22 enum flow_offload_tuple_dir dir) 23 { 24 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple; 25 struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple; 26 27 ft->dir = dir; 28 29 switch (ctt->src.l3num) { 30 case NFPROTO_IPV4: 31 ft->src_v4 = ctt->src.u3.in; 32 ft->dst_v4 = ctt->dst.u3.in; 33 break; 34 case NFPROTO_IPV6: 35 ft->src_v6 = ctt->src.u3.in6; 36 ft->dst_v6 = ctt->dst.u3.in6; 37 break; 38 } 39 40 ft->l3proto = ctt->src.l3num; 41 ft->l4proto = ctt->dst.protonum; 42 43 switch (ctt->dst.protonum) { 44 case IPPROTO_TCP: 45 case IPPROTO_UDP: 46 ft->src_port = ctt->src.u.tcp.port; 47 ft->dst_port = ctt->dst.u.tcp.port; 48 break; 49 } 50 } 51 52 struct flow_offload *flow_offload_alloc(struct nf_conn *ct) 53 { 54 struct flow_offload *flow; 55 56 if (unlikely(nf_ct_is_dying(ct))) 57 return NULL; 58 59 flow = kzalloc(sizeof(*flow), GFP_ATOMIC); 60 if (!flow) 61 return NULL; 62 63 refcount_inc(&ct->ct_general.use); 64 flow->ct = ct; 65 66 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL); 67 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY); 68 69 if (ct->status & IPS_SRC_NAT) 70 __set_bit(NF_FLOW_SNAT, &flow->flags); 71 if (ct->status & IPS_DST_NAT) 72 __set_bit(NF_FLOW_DNAT, &flow->flags); 73 74 return flow; 75 } 76 EXPORT_SYMBOL_GPL(flow_offload_alloc); 77 78 static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple) 79 { 80 const struct rt6_info *rt; 81 82 if (flow_tuple->l3proto == NFPROTO_IPV6) { 83 rt = (const struct rt6_info *)flow_tuple->dst_cache; 84 return rt6_get_cookie(rt); 85 } 86 87 return 0; 88 } 89 90 static int flow_offload_fill_route(struct flow_offload *flow, 91 const struct nf_flow_route *route, 92 enum flow_offload_tuple_dir dir) 93 { 94 struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple; 95 struct dst_entry *dst = route->tuple[dir].dst; 96 int i, j = 0; 97 98 switch (flow_tuple->l3proto) { 99 case NFPROTO_IPV4: 100 flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true); 101 break; 102 case NFPROTO_IPV6: 103 flow_tuple->mtu = ip6_dst_mtu_maybe_forward(dst, true); 104 break; 105 } 106 107 flow_tuple->iifidx = route->tuple[dir].in.ifindex; 108 for (i = route->tuple[dir].in.num_encaps - 1; i >= 0; i--) { 109 flow_tuple->encap[j].id = route->tuple[dir].in.encap[i].id; 110 flow_tuple->encap[j].proto = route->tuple[dir].in.encap[i].proto; 111 if (route->tuple[dir].in.ingress_vlans & BIT(i)) 112 flow_tuple->in_vlan_ingress |= BIT(j); 113 j++; 114 } 115 flow_tuple->encap_num = route->tuple[dir].in.num_encaps; 116 117 switch (route->tuple[dir].xmit_type) { 118 case FLOW_OFFLOAD_XMIT_DIRECT: 119 memcpy(flow_tuple->out.h_dest, route->tuple[dir].out.h_dest, 120 ETH_ALEN); 121 memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source, 122 ETH_ALEN); 123 flow_tuple->out.ifidx = route->tuple[dir].out.ifindex; 124 flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex; 125 break; 126 case FLOW_OFFLOAD_XMIT_XFRM: 127 case FLOW_OFFLOAD_XMIT_NEIGH: 128 if (!dst_hold_safe(route->tuple[dir].dst)) 129 return -1; 130 131 flow_tuple->dst_cache = dst; 132 flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple); 133 break; 134 default: 135 WARN_ON_ONCE(1); 136 break; 137 } 138 flow_tuple->xmit_type = route->tuple[dir].xmit_type; 139 140 return 0; 141 } 142 143 static void nft_flow_dst_release(struct flow_offload *flow, 144 enum flow_offload_tuple_dir dir) 145 { 146 if (flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH || 147 flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM) 148 dst_release(flow->tuplehash[dir].tuple.dst_cache); 149 } 150 151 int flow_offload_route_init(struct flow_offload *flow, 152 const struct nf_flow_route *route) 153 { 154 int err; 155 156 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL); 157 if (err < 0) 158 return err; 159 160 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY); 161 if (err < 0) 162 goto err_route_reply; 163 164 flow->type = NF_FLOW_OFFLOAD_ROUTE; 165 166 return 0; 167 168 err_route_reply: 169 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL); 170 171 return err; 172 } 173 EXPORT_SYMBOL_GPL(flow_offload_route_init); 174 175 static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp) 176 { 177 tcp->seen[0].td_maxwin = 0; 178 tcp->seen[1].td_maxwin = 0; 179 } 180 181 static void flow_offload_fixup_ct(struct nf_conn *ct) 182 { 183 struct net *net = nf_ct_net(ct); 184 int l4num = nf_ct_protonum(ct); 185 s32 timeout; 186 187 if (l4num == IPPROTO_TCP) { 188 struct nf_tcp_net *tn = nf_tcp_pernet(net); 189 190 flow_offload_fixup_tcp(&ct->proto.tcp); 191 192 timeout = tn->timeouts[ct->proto.tcp.state]; 193 timeout -= tn->offload_timeout; 194 } else if (l4num == IPPROTO_UDP) { 195 struct nf_udp_net *tn = nf_udp_pernet(net); 196 197 timeout = tn->timeouts[UDP_CT_REPLIED]; 198 timeout -= tn->offload_timeout; 199 } else { 200 return; 201 } 202 203 if (timeout < 0) 204 timeout = 0; 205 206 if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout) 207 WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout); 208 } 209 210 static void flow_offload_route_release(struct flow_offload *flow) 211 { 212 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL); 213 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_REPLY); 214 } 215 216 void flow_offload_free(struct flow_offload *flow) 217 { 218 switch (flow->type) { 219 case NF_FLOW_OFFLOAD_ROUTE: 220 flow_offload_route_release(flow); 221 break; 222 default: 223 break; 224 } 225 nf_ct_put(flow->ct); 226 kfree_rcu(flow, rcu_head); 227 } 228 EXPORT_SYMBOL_GPL(flow_offload_free); 229 230 static u32 flow_offload_hash(const void *data, u32 len, u32 seed) 231 { 232 const struct flow_offload_tuple *tuple = data; 233 234 return jhash(tuple, offsetof(struct flow_offload_tuple, __hash), seed); 235 } 236 237 static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed) 238 { 239 const struct flow_offload_tuple_rhash *tuplehash = data; 240 241 return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, __hash), seed); 242 } 243 244 static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg, 245 const void *ptr) 246 { 247 const struct flow_offload_tuple *tuple = arg->key; 248 const struct flow_offload_tuple_rhash *x = ptr; 249 250 if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, __hash))) 251 return 1; 252 253 return 0; 254 } 255 256 static const struct rhashtable_params nf_flow_offload_rhash_params = { 257 .head_offset = offsetof(struct flow_offload_tuple_rhash, node), 258 .hashfn = flow_offload_hash, 259 .obj_hashfn = flow_offload_hash_obj, 260 .obj_cmpfn = flow_offload_hash_cmp, 261 .automatic_shrinking = true, 262 }; 263 264 unsigned long flow_offload_get_timeout(struct flow_offload *flow) 265 { 266 unsigned long timeout = NF_FLOW_TIMEOUT; 267 struct net *net = nf_ct_net(flow->ct); 268 int l4num = nf_ct_protonum(flow->ct); 269 270 if (l4num == IPPROTO_TCP) { 271 struct nf_tcp_net *tn = nf_tcp_pernet(net); 272 273 timeout = tn->offload_timeout; 274 } else if (l4num == IPPROTO_UDP) { 275 struct nf_udp_net *tn = nf_udp_pernet(net); 276 277 timeout = tn->offload_timeout; 278 } 279 280 return timeout; 281 } 282 283 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow) 284 { 285 int err; 286 287 flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow); 288 289 err = rhashtable_insert_fast(&flow_table->rhashtable, 290 &flow->tuplehash[0].node, 291 nf_flow_offload_rhash_params); 292 if (err < 0) 293 return err; 294 295 err = rhashtable_insert_fast(&flow_table->rhashtable, 296 &flow->tuplehash[1].node, 297 nf_flow_offload_rhash_params); 298 if (err < 0) { 299 rhashtable_remove_fast(&flow_table->rhashtable, 300 &flow->tuplehash[0].node, 301 nf_flow_offload_rhash_params); 302 return err; 303 } 304 305 nf_ct_offload_timeout(flow->ct); 306 307 if (nf_flowtable_hw_offload(flow_table)) { 308 __set_bit(NF_FLOW_HW, &flow->flags); 309 nf_flow_offload_add(flow_table, flow); 310 } 311 312 return 0; 313 } 314 EXPORT_SYMBOL_GPL(flow_offload_add); 315 316 void flow_offload_refresh(struct nf_flowtable *flow_table, 317 struct flow_offload *flow) 318 { 319 u32 timeout; 320 321 timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow); 322 if (timeout - READ_ONCE(flow->timeout) > HZ) 323 WRITE_ONCE(flow->timeout, timeout); 324 else 325 return; 326 327 if (likely(!nf_flowtable_hw_offload(flow_table))) 328 return; 329 330 nf_flow_offload_add(flow_table, flow); 331 } 332 EXPORT_SYMBOL_GPL(flow_offload_refresh); 333 334 static inline bool nf_flow_has_expired(const struct flow_offload *flow) 335 { 336 return nf_flow_timeout_delta(flow->timeout) <= 0; 337 } 338 339 static void flow_offload_del(struct nf_flowtable *flow_table, 340 struct flow_offload *flow) 341 { 342 rhashtable_remove_fast(&flow_table->rhashtable, 343 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node, 344 nf_flow_offload_rhash_params); 345 rhashtable_remove_fast(&flow_table->rhashtable, 346 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node, 347 nf_flow_offload_rhash_params); 348 flow_offload_free(flow); 349 } 350 351 void flow_offload_teardown(struct flow_offload *flow) 352 { 353 clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status); 354 set_bit(NF_FLOW_TEARDOWN, &flow->flags); 355 flow_offload_fixup_ct(flow->ct); 356 } 357 EXPORT_SYMBOL_GPL(flow_offload_teardown); 358 359 struct flow_offload_tuple_rhash * 360 flow_offload_lookup(struct nf_flowtable *flow_table, 361 struct flow_offload_tuple *tuple) 362 { 363 struct flow_offload_tuple_rhash *tuplehash; 364 struct flow_offload *flow; 365 int dir; 366 367 tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple, 368 nf_flow_offload_rhash_params); 369 if (!tuplehash) 370 return NULL; 371 372 dir = tuplehash->tuple.dir; 373 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); 374 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) 375 return NULL; 376 377 if (unlikely(nf_ct_is_dying(flow->ct))) 378 return NULL; 379 380 return tuplehash; 381 } 382 EXPORT_SYMBOL_GPL(flow_offload_lookup); 383 384 static int 385 nf_flow_table_iterate(struct nf_flowtable *flow_table, 386 void (*iter)(struct nf_flowtable *flowtable, 387 struct flow_offload *flow, void *data), 388 void *data) 389 { 390 struct flow_offload_tuple_rhash *tuplehash; 391 struct rhashtable_iter hti; 392 struct flow_offload *flow; 393 int err = 0; 394 395 rhashtable_walk_enter(&flow_table->rhashtable, &hti); 396 rhashtable_walk_start(&hti); 397 398 while ((tuplehash = rhashtable_walk_next(&hti))) { 399 if (IS_ERR(tuplehash)) { 400 if (PTR_ERR(tuplehash) != -EAGAIN) { 401 err = PTR_ERR(tuplehash); 402 break; 403 } 404 continue; 405 } 406 if (tuplehash->tuple.dir) 407 continue; 408 409 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]); 410 411 iter(flow_table, flow, data); 412 } 413 rhashtable_walk_stop(&hti); 414 rhashtable_walk_exit(&hti); 415 416 return err; 417 } 418 419 static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table, 420 struct flow_offload *flow, void *data) 421 { 422 if (nf_flow_has_expired(flow) || 423 nf_ct_is_dying(flow->ct)) 424 flow_offload_teardown(flow); 425 426 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) { 427 if (test_bit(NF_FLOW_HW, &flow->flags)) { 428 if (!test_bit(NF_FLOW_HW_DYING, &flow->flags)) 429 nf_flow_offload_del(flow_table, flow); 430 else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags)) 431 flow_offload_del(flow_table, flow); 432 } else { 433 flow_offload_del(flow_table, flow); 434 } 435 } else if (test_bit(NF_FLOW_HW, &flow->flags)) { 436 nf_flow_offload_stats(flow_table, flow); 437 } 438 } 439 440 void nf_flow_table_gc_run(struct nf_flowtable *flow_table) 441 { 442 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL); 443 } 444 445 static void nf_flow_offload_work_gc(struct work_struct *work) 446 { 447 struct nf_flowtable *flow_table; 448 449 flow_table = container_of(work, struct nf_flowtable, gc_work.work); 450 nf_flow_table_gc_run(flow_table); 451 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ); 452 } 453 454 static void nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff, 455 __be16 port, __be16 new_port) 456 { 457 struct tcphdr *tcph; 458 459 tcph = (void *)(skb_network_header(skb) + thoff); 460 inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false); 461 } 462 463 static void nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff, 464 __be16 port, __be16 new_port) 465 { 466 struct udphdr *udph; 467 468 udph = (void *)(skb_network_header(skb) + thoff); 469 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { 470 inet_proto_csum_replace2(&udph->check, skb, port, 471 new_port, false); 472 if (!udph->check) 473 udph->check = CSUM_MANGLED_0; 474 } 475 } 476 477 static void nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff, 478 u8 protocol, __be16 port, __be16 new_port) 479 { 480 switch (protocol) { 481 case IPPROTO_TCP: 482 nf_flow_nat_port_tcp(skb, thoff, port, new_port); 483 break; 484 case IPPROTO_UDP: 485 nf_flow_nat_port_udp(skb, thoff, port, new_port); 486 break; 487 } 488 } 489 490 void nf_flow_snat_port(const struct flow_offload *flow, 491 struct sk_buff *skb, unsigned int thoff, 492 u8 protocol, enum flow_offload_tuple_dir dir) 493 { 494 struct flow_ports *hdr; 495 __be16 port, new_port; 496 497 hdr = (void *)(skb_network_header(skb) + thoff); 498 499 switch (dir) { 500 case FLOW_OFFLOAD_DIR_ORIGINAL: 501 port = hdr->source; 502 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port; 503 hdr->source = new_port; 504 break; 505 case FLOW_OFFLOAD_DIR_REPLY: 506 port = hdr->dest; 507 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port; 508 hdr->dest = new_port; 509 break; 510 } 511 512 nf_flow_nat_port(skb, thoff, protocol, port, new_port); 513 } 514 EXPORT_SYMBOL_GPL(nf_flow_snat_port); 515 516 void nf_flow_dnat_port(const struct flow_offload *flow, struct sk_buff *skb, 517 unsigned int thoff, u8 protocol, 518 enum flow_offload_tuple_dir dir) 519 { 520 struct flow_ports *hdr; 521 __be16 port, new_port; 522 523 hdr = (void *)(skb_network_header(skb) + thoff); 524 525 switch (dir) { 526 case FLOW_OFFLOAD_DIR_ORIGINAL: 527 port = hdr->dest; 528 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port; 529 hdr->dest = new_port; 530 break; 531 case FLOW_OFFLOAD_DIR_REPLY: 532 port = hdr->source; 533 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port; 534 hdr->source = new_port; 535 break; 536 } 537 538 nf_flow_nat_port(skb, thoff, protocol, port, new_port); 539 } 540 EXPORT_SYMBOL_GPL(nf_flow_dnat_port); 541 542 int nf_flow_table_init(struct nf_flowtable *flowtable) 543 { 544 int err; 545 546 INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc); 547 flow_block_init(&flowtable->flow_block); 548 init_rwsem(&flowtable->flow_block_lock); 549 550 err = rhashtable_init(&flowtable->rhashtable, 551 &nf_flow_offload_rhash_params); 552 if (err < 0) 553 return err; 554 555 queue_delayed_work(system_power_efficient_wq, 556 &flowtable->gc_work, HZ); 557 558 mutex_lock(&flowtable_lock); 559 list_add(&flowtable->list, &flowtables); 560 mutex_unlock(&flowtable_lock); 561 562 return 0; 563 } 564 EXPORT_SYMBOL_GPL(nf_flow_table_init); 565 566 static void nf_flow_table_do_cleanup(struct nf_flowtable *flow_table, 567 struct flow_offload *flow, void *data) 568 { 569 struct net_device *dev = data; 570 571 if (!dev) { 572 flow_offload_teardown(flow); 573 return; 574 } 575 576 if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) && 577 (flow->tuplehash[0].tuple.iifidx == dev->ifindex || 578 flow->tuplehash[1].tuple.iifidx == dev->ifindex)) 579 flow_offload_teardown(flow); 580 } 581 582 void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable, 583 struct net_device *dev) 584 { 585 nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev); 586 flush_delayed_work(&flowtable->gc_work); 587 nf_flow_table_offload_flush(flowtable); 588 } 589 590 void nf_flow_table_cleanup(struct net_device *dev) 591 { 592 struct nf_flowtable *flowtable; 593 594 mutex_lock(&flowtable_lock); 595 list_for_each_entry(flowtable, &flowtables, list) 596 nf_flow_table_gc_cleanup(flowtable, dev); 597 mutex_unlock(&flowtable_lock); 598 } 599 EXPORT_SYMBOL_GPL(nf_flow_table_cleanup); 600 601 void nf_flow_table_free(struct nf_flowtable *flow_table) 602 { 603 mutex_lock(&flowtable_lock); 604 list_del(&flow_table->list); 605 mutex_unlock(&flowtable_lock); 606 607 cancel_delayed_work_sync(&flow_table->gc_work); 608 nf_flow_table_offload_flush(flow_table); 609 /* ... no more pending work after this stage ... */ 610 nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL); 611 nf_flow_table_gc_run(flow_table); 612 nf_flow_table_offload_flush_cleanup(flow_table); 613 rhashtable_destroy(&flow_table->rhashtable); 614 } 615 EXPORT_SYMBOL_GPL(nf_flow_table_free); 616 617 static int nf_flow_table_init_net(struct net *net) 618 { 619 net->ft.stat = alloc_percpu(struct nf_flow_table_stat); 620 return net->ft.stat ? 0 : -ENOMEM; 621 } 622 623 static void nf_flow_table_fini_net(struct net *net) 624 { 625 free_percpu(net->ft.stat); 626 } 627 628 static int nf_flow_table_pernet_init(struct net *net) 629 { 630 int ret; 631 632 ret = nf_flow_table_init_net(net); 633 if (ret < 0) 634 return ret; 635 636 ret = nf_flow_table_init_proc(net); 637 if (ret < 0) 638 goto out_proc; 639 640 return 0; 641 642 out_proc: 643 nf_flow_table_fini_net(net); 644 return ret; 645 } 646 647 static void nf_flow_table_pernet_exit(struct list_head *net_exit_list) 648 { 649 struct net *net; 650 651 list_for_each_entry(net, net_exit_list, exit_list) { 652 nf_flow_table_fini_proc(net); 653 nf_flow_table_fini_net(net); 654 } 655 } 656 657 static struct pernet_operations nf_flow_table_net_ops = { 658 .init = nf_flow_table_pernet_init, 659 .exit_batch = nf_flow_table_pernet_exit, 660 }; 661 662 static int __init nf_flow_table_module_init(void) 663 { 664 int ret; 665 666 ret = register_pernet_subsys(&nf_flow_table_net_ops); 667 if (ret < 0) 668 return ret; 669 670 ret = nf_flow_table_offload_init(); 671 if (ret) 672 goto out_offload; 673 674 return 0; 675 676 out_offload: 677 unregister_pernet_subsys(&nf_flow_table_net_ops); 678 return ret; 679 } 680 681 static void __exit nf_flow_table_module_exit(void) 682 { 683 nf_flow_table_offload_exit(); 684 unregister_pernet_subsys(&nf_flow_table_net_ops); 685 } 686 687 module_init(nf_flow_table_module_init); 688 module_exit(nf_flow_table_module_exit); 689 690 MODULE_LICENSE("GPL"); 691 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); 692 MODULE_DESCRIPTION("Netfilter flow table module"); 693