1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* - 3 * net/sched/act_ct.c Connection Tracking action 4 * 5 * Authors: Paul Blakey <paulb@mellanox.com> 6 * Yossi Kuperman <yossiku@mellanox.com> 7 * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> 8 */ 9 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/skbuff.h> 14 #include <linux/rtnetlink.h> 15 #include <linux/pkt_cls.h> 16 #include <linux/ip.h> 17 #include <linux/ipv6.h> 18 #include <linux/rhashtable.h> 19 #include <net/netlink.h> 20 #include <net/pkt_sched.h> 21 #include <net/pkt_cls.h> 22 #include <net/act_api.h> 23 #include <net/ip.h> 24 #include <net/ipv6_frag.h> 25 #include <uapi/linux/tc_act/tc_ct.h> 26 #include <net/tc_act/tc_ct.h> 27 #include <net/tc_wrapper.h> 28 29 #include <net/netfilter/nf_flow_table.h> 30 #include <net/netfilter/nf_conntrack.h> 31 #include <net/netfilter/nf_conntrack_core.h> 32 #include <net/netfilter/nf_conntrack_zones.h> 33 #include <net/netfilter/nf_conntrack_helper.h> 34 #include <net/netfilter/nf_conntrack_acct.h> 35 #include <net/netfilter/ipv6/nf_defrag_ipv6.h> 36 #include <net/netfilter/nf_conntrack_act_ct.h> 37 #include <net/netfilter/nf_conntrack_seqadj.h> 38 #include <uapi/linux/netfilter/nf_nat.h> 39 40 static struct workqueue_struct *act_ct_wq; 41 static struct rhashtable zones_ht; 42 static DEFINE_MUTEX(zones_mutex); 43 44 struct tcf_ct_flow_table { 45 struct rhash_head node; /* In zones tables */ 46 47 struct rcu_work rwork; 48 struct nf_flowtable nf_ft; 49 refcount_t ref; 50 u16 zone; 51 52 bool dying; 53 }; 54 55 static const struct rhashtable_params zones_params = { 56 .head_offset = offsetof(struct tcf_ct_flow_table, node), 57 .key_offset = offsetof(struct tcf_ct_flow_table, zone), 58 .key_len = sizeof_field(struct tcf_ct_flow_table, zone), 59 .automatic_shrinking = true, 60 }; 61 62 static struct flow_action_entry * 63 tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action) 64 { 65 int i = flow_action->num_entries++; 66 67 return &flow_action->entries[i]; 68 } 69 70 static void tcf_ct_add_mangle_action(struct flow_action *action, 71 enum flow_action_mangle_base htype, 72 u32 offset, 73 u32 mask, 74 u32 val) 75 { 76 struct flow_action_entry *entry; 77 78 entry = tcf_ct_flow_table_flow_action_get_next(action); 79 entry->id = FLOW_ACTION_MANGLE; 80 entry->mangle.htype = htype; 81 entry->mangle.mask = ~mask; 82 entry->mangle.offset = offset; 83 entry->mangle.val = val; 84 } 85 86 /* The following nat helper functions check if the inverted reverse tuple 87 * (target) is different then the current dir tuple - meaning nat for ports 88 * and/or ip is needed, and add the relevant mangle actions. 89 */ 90 static void 91 tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple, 92 struct nf_conntrack_tuple target, 93 struct flow_action *action) 94 { 95 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3))) 96 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4, 97 offsetof(struct iphdr, saddr), 98 0xFFFFFFFF, 99 be32_to_cpu(target.src.u3.ip)); 100 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3))) 101 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4, 102 offsetof(struct iphdr, daddr), 103 0xFFFFFFFF, 104 be32_to_cpu(target.dst.u3.ip)); 105 } 106 107 static void 108 tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action, 109 union nf_inet_addr *addr, 110 u32 offset) 111 { 112 int i; 113 114 for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++) 115 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6, 116 i * sizeof(u32) + offset, 117 0xFFFFFFFF, be32_to_cpu(addr->ip6[i])); 118 } 119 120 static void 121 tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple, 122 struct nf_conntrack_tuple target, 123 struct flow_action *action) 124 { 125 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3))) 126 tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3, 127 offsetof(struct ipv6hdr, 128 saddr)); 129 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3))) 130 tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3, 131 offsetof(struct ipv6hdr, 132 daddr)); 133 } 134 135 static void 136 tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple, 137 struct nf_conntrack_tuple target, 138 struct flow_action *action) 139 { 140 __be16 target_src = target.src.u.tcp.port; 141 __be16 target_dst = target.dst.u.tcp.port; 142 143 if (target_src != tuple->src.u.tcp.port) 144 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP, 145 offsetof(struct tcphdr, source), 146 0xFFFF, be16_to_cpu(target_src)); 147 if (target_dst != tuple->dst.u.tcp.port) 148 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP, 149 offsetof(struct tcphdr, dest), 150 0xFFFF, be16_to_cpu(target_dst)); 151 } 152 153 static void 154 tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple, 155 struct nf_conntrack_tuple target, 156 struct flow_action *action) 157 { 158 __be16 target_src = target.src.u.udp.port; 159 __be16 target_dst = target.dst.u.udp.port; 160 161 if (target_src != tuple->src.u.udp.port) 162 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP, 163 offsetof(struct udphdr, source), 164 0xFFFF, be16_to_cpu(target_src)); 165 if (target_dst != tuple->dst.u.udp.port) 166 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP, 167 offsetof(struct udphdr, dest), 168 0xFFFF, be16_to_cpu(target_dst)); 169 } 170 171 static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct, 172 enum ip_conntrack_dir dir, 173 enum ip_conntrack_info ctinfo, 174 struct flow_action *action) 175 { 176 struct nf_conn_labels *ct_labels; 177 struct flow_action_entry *entry; 178 u32 *act_ct_labels; 179 180 entry = tcf_ct_flow_table_flow_action_get_next(action); 181 entry->id = FLOW_ACTION_CT_METADATA; 182 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) 183 entry->ct_metadata.mark = READ_ONCE(ct->mark); 184 #endif 185 /* aligns with the CT reference on the SKB nf_ct_set */ 186 entry->ct_metadata.cookie = (unsigned long)ct | ctinfo; 187 entry->ct_metadata.orig_dir = dir == IP_CT_DIR_ORIGINAL; 188 189 act_ct_labels = entry->ct_metadata.labels; 190 ct_labels = nf_ct_labels_find(ct); 191 if (ct_labels) 192 memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE); 193 else 194 memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE); 195 } 196 197 static int tcf_ct_flow_table_add_action_nat(struct net *net, 198 struct nf_conn *ct, 199 enum ip_conntrack_dir dir, 200 struct flow_action *action) 201 { 202 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; 203 struct nf_conntrack_tuple target; 204 205 if (!(ct->status & IPS_NAT_MASK)) 206 return 0; 207 208 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple); 209 210 switch (tuple->src.l3num) { 211 case NFPROTO_IPV4: 212 tcf_ct_flow_table_add_action_nat_ipv4(tuple, target, 213 action); 214 break; 215 case NFPROTO_IPV6: 216 tcf_ct_flow_table_add_action_nat_ipv6(tuple, target, 217 action); 218 break; 219 default: 220 return -EOPNOTSUPP; 221 } 222 223 switch (nf_ct_protonum(ct)) { 224 case IPPROTO_TCP: 225 tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action); 226 break; 227 case IPPROTO_UDP: 228 tcf_ct_flow_table_add_action_nat_udp(tuple, target, action); 229 break; 230 default: 231 return -EOPNOTSUPP; 232 } 233 234 return 0; 235 } 236 237 static int tcf_ct_flow_table_fill_actions(struct net *net, 238 struct flow_offload *flow, 239 enum flow_offload_tuple_dir tdir, 240 struct nf_flow_rule *flow_rule) 241 { 242 struct flow_action *action = &flow_rule->rule->action; 243 int num_entries = action->num_entries; 244 struct nf_conn *ct = flow->ct; 245 enum ip_conntrack_info ctinfo; 246 enum ip_conntrack_dir dir; 247 int i, err; 248 249 switch (tdir) { 250 case FLOW_OFFLOAD_DIR_ORIGINAL: 251 dir = IP_CT_DIR_ORIGINAL; 252 ctinfo = test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ? 253 IP_CT_ESTABLISHED : IP_CT_NEW; 254 if (ctinfo == IP_CT_ESTABLISHED) 255 set_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags); 256 break; 257 case FLOW_OFFLOAD_DIR_REPLY: 258 dir = IP_CT_DIR_REPLY; 259 ctinfo = IP_CT_ESTABLISHED_REPLY; 260 break; 261 default: 262 return -EOPNOTSUPP; 263 } 264 265 err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action); 266 if (err) 267 goto err_nat; 268 269 tcf_ct_flow_table_add_action_meta(ct, dir, ctinfo, action); 270 return 0; 271 272 err_nat: 273 /* Clear filled actions */ 274 for (i = num_entries; i < action->num_entries; i++) 275 memset(&action->entries[i], 0, sizeof(action->entries[i])); 276 action->num_entries = num_entries; 277 278 return err; 279 } 280 281 static struct nf_flowtable_type flowtable_ct = { 282 .action = tcf_ct_flow_table_fill_actions, 283 .owner = THIS_MODULE, 284 }; 285 286 static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params) 287 { 288 struct tcf_ct_flow_table *ct_ft; 289 int err = -ENOMEM; 290 291 mutex_lock(&zones_mutex); 292 ct_ft = rhashtable_lookup_fast(&zones_ht, ¶ms->zone, zones_params); 293 if (ct_ft && refcount_inc_not_zero(&ct_ft->ref)) 294 goto out_unlock; 295 296 ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL); 297 if (!ct_ft) 298 goto err_alloc; 299 refcount_set(&ct_ft->ref, 1); 300 301 ct_ft->zone = params->zone; 302 err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params); 303 if (err) 304 goto err_insert; 305 306 ct_ft->nf_ft.type = &flowtable_ct; 307 ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD | 308 NF_FLOWTABLE_COUNTER; 309 err = nf_flow_table_init(&ct_ft->nf_ft); 310 if (err) 311 goto err_init; 312 write_pnet(&ct_ft->nf_ft.net, net); 313 314 __module_get(THIS_MODULE); 315 out_unlock: 316 params->ct_ft = ct_ft; 317 params->nf_ft = &ct_ft->nf_ft; 318 mutex_unlock(&zones_mutex); 319 320 return 0; 321 322 err_init: 323 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params); 324 err_insert: 325 kfree(ct_ft); 326 err_alloc: 327 mutex_unlock(&zones_mutex); 328 return err; 329 } 330 331 static void tcf_ct_flow_table_cleanup_work(struct work_struct *work) 332 { 333 struct flow_block_cb *block_cb, *tmp_cb; 334 struct tcf_ct_flow_table *ct_ft; 335 struct flow_block *block; 336 337 ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table, 338 rwork); 339 nf_flow_table_free(&ct_ft->nf_ft); 340 341 /* Remove any remaining callbacks before cleanup */ 342 block = &ct_ft->nf_ft.flow_block; 343 down_write(&ct_ft->nf_ft.flow_block_lock); 344 list_for_each_entry_safe(block_cb, tmp_cb, &block->cb_list, list) { 345 list_del(&block_cb->list); 346 flow_block_cb_free(block_cb); 347 } 348 up_write(&ct_ft->nf_ft.flow_block_lock); 349 kfree(ct_ft); 350 351 module_put(THIS_MODULE); 352 } 353 354 static void tcf_ct_flow_table_put(struct tcf_ct_flow_table *ct_ft) 355 { 356 if (refcount_dec_and_test(&ct_ft->ref)) { 357 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params); 358 INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work); 359 queue_rcu_work(act_ct_wq, &ct_ft->rwork); 360 } 361 } 362 363 static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry, 364 struct nf_conn_act_ct_ext *act_ct_ext, u8 dir) 365 { 366 entry->tuplehash[dir].tuple.xmit_type = FLOW_OFFLOAD_XMIT_TC; 367 entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir]; 368 } 369 370 static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft, 371 struct nf_conn *ct, 372 bool tcp, bool bidirectional) 373 { 374 struct nf_conn_act_ct_ext *act_ct_ext; 375 struct flow_offload *entry; 376 int err; 377 378 if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status)) 379 return; 380 381 entry = flow_offload_alloc(ct); 382 if (!entry) { 383 WARN_ON_ONCE(1); 384 goto err_alloc; 385 } 386 387 if (tcp) { 388 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; 389 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; 390 } 391 if (bidirectional) 392 __set_bit(NF_FLOW_HW_BIDIRECTIONAL, &entry->flags); 393 394 act_ct_ext = nf_conn_act_ct_ext_find(ct); 395 if (act_ct_ext) { 396 tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL); 397 tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY); 398 } 399 400 err = flow_offload_add(&ct_ft->nf_ft, entry); 401 if (err) 402 goto err_add; 403 404 return; 405 406 err_add: 407 flow_offload_free(entry); 408 err_alloc: 409 clear_bit(IPS_OFFLOAD_BIT, &ct->status); 410 } 411 412 static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft, 413 struct nf_conn *ct, 414 enum ip_conntrack_info ctinfo) 415 { 416 bool tcp = false, bidirectional = true; 417 418 switch (nf_ct_protonum(ct)) { 419 case IPPROTO_TCP: 420 if ((ctinfo != IP_CT_ESTABLISHED && 421 ctinfo != IP_CT_ESTABLISHED_REPLY) || 422 !test_bit(IPS_ASSURED_BIT, &ct->status) || 423 ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED) 424 return; 425 426 tcp = true; 427 break; 428 case IPPROTO_UDP: 429 if (!nf_ct_is_confirmed(ct)) 430 return; 431 if (!test_bit(IPS_ASSURED_BIT, &ct->status)) 432 bidirectional = false; 433 break; 434 #ifdef CONFIG_NF_CT_PROTO_GRE 435 case IPPROTO_GRE: { 436 struct nf_conntrack_tuple *tuple; 437 438 if ((ctinfo != IP_CT_ESTABLISHED && 439 ctinfo != IP_CT_ESTABLISHED_REPLY) || 440 !test_bit(IPS_ASSURED_BIT, &ct->status) || 441 ct->status & IPS_NAT_MASK) 442 return; 443 444 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; 445 /* No support for GRE v1 */ 446 if (tuple->src.u.gre.key || tuple->dst.u.gre.key) 447 return; 448 break; 449 } 450 #endif 451 default: 452 return; 453 } 454 455 if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) || 456 ct->status & IPS_SEQ_ADJUST) 457 return; 458 459 tcf_ct_flow_table_add(ct_ft, ct, tcp, bidirectional); 460 } 461 462 static bool 463 tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb, 464 struct flow_offload_tuple *tuple, 465 struct tcphdr **tcph) 466 { 467 struct flow_ports *ports; 468 unsigned int thoff; 469 struct iphdr *iph; 470 size_t hdrsize; 471 u8 ipproto; 472 473 if (!pskb_network_may_pull(skb, sizeof(*iph))) 474 return false; 475 476 iph = ip_hdr(skb); 477 thoff = iph->ihl * 4; 478 479 if (ip_is_fragment(iph) || 480 unlikely(thoff != sizeof(struct iphdr))) 481 return false; 482 483 ipproto = iph->protocol; 484 switch (ipproto) { 485 case IPPROTO_TCP: 486 hdrsize = sizeof(struct tcphdr); 487 break; 488 case IPPROTO_UDP: 489 hdrsize = sizeof(*ports); 490 break; 491 #ifdef CONFIG_NF_CT_PROTO_GRE 492 case IPPROTO_GRE: 493 hdrsize = sizeof(struct gre_base_hdr); 494 break; 495 #endif 496 default: 497 return false; 498 } 499 500 if (iph->ttl <= 1) 501 return false; 502 503 if (!pskb_network_may_pull(skb, thoff + hdrsize)) 504 return false; 505 506 switch (ipproto) { 507 case IPPROTO_TCP: 508 *tcph = (void *)(skb_network_header(skb) + thoff); 509 fallthrough; 510 case IPPROTO_UDP: 511 ports = (struct flow_ports *)(skb_network_header(skb) + thoff); 512 tuple->src_port = ports->source; 513 tuple->dst_port = ports->dest; 514 break; 515 case IPPROTO_GRE: { 516 struct gre_base_hdr *greh; 517 518 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff); 519 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0) 520 return false; 521 break; 522 } 523 } 524 525 iph = ip_hdr(skb); 526 527 tuple->src_v4.s_addr = iph->saddr; 528 tuple->dst_v4.s_addr = iph->daddr; 529 tuple->l3proto = AF_INET; 530 tuple->l4proto = ipproto; 531 532 return true; 533 } 534 535 static bool 536 tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb, 537 struct flow_offload_tuple *tuple, 538 struct tcphdr **tcph) 539 { 540 struct flow_ports *ports; 541 struct ipv6hdr *ip6h; 542 unsigned int thoff; 543 size_t hdrsize; 544 u8 nexthdr; 545 546 if (!pskb_network_may_pull(skb, sizeof(*ip6h))) 547 return false; 548 549 ip6h = ipv6_hdr(skb); 550 thoff = sizeof(*ip6h); 551 552 nexthdr = ip6h->nexthdr; 553 switch (nexthdr) { 554 case IPPROTO_TCP: 555 hdrsize = sizeof(struct tcphdr); 556 break; 557 case IPPROTO_UDP: 558 hdrsize = sizeof(*ports); 559 break; 560 #ifdef CONFIG_NF_CT_PROTO_GRE 561 case IPPROTO_GRE: 562 hdrsize = sizeof(struct gre_base_hdr); 563 break; 564 #endif 565 default: 566 return false; 567 } 568 569 if (ip6h->hop_limit <= 1) 570 return false; 571 572 if (!pskb_network_may_pull(skb, thoff + hdrsize)) 573 return false; 574 575 switch (nexthdr) { 576 case IPPROTO_TCP: 577 *tcph = (void *)(skb_network_header(skb) + thoff); 578 fallthrough; 579 case IPPROTO_UDP: 580 ports = (struct flow_ports *)(skb_network_header(skb) + thoff); 581 tuple->src_port = ports->source; 582 tuple->dst_port = ports->dest; 583 break; 584 case IPPROTO_GRE: { 585 struct gre_base_hdr *greh; 586 587 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff); 588 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0) 589 return false; 590 break; 591 } 592 } 593 594 ip6h = ipv6_hdr(skb); 595 596 tuple->src_v6 = ip6h->saddr; 597 tuple->dst_v6 = ip6h->daddr; 598 tuple->l3proto = AF_INET6; 599 tuple->l4proto = nexthdr; 600 601 return true; 602 } 603 604 static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p, 605 struct sk_buff *skb, 606 u8 family) 607 { 608 struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft; 609 struct flow_offload_tuple_rhash *tuplehash; 610 struct flow_offload_tuple tuple = {}; 611 enum ip_conntrack_info ctinfo; 612 struct tcphdr *tcph = NULL; 613 bool force_refresh = false; 614 struct flow_offload *flow; 615 struct nf_conn *ct; 616 u8 dir; 617 618 switch (family) { 619 case NFPROTO_IPV4: 620 if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph)) 621 return false; 622 break; 623 case NFPROTO_IPV6: 624 if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph)) 625 return false; 626 break; 627 default: 628 return false; 629 } 630 631 tuplehash = flow_offload_lookup(nf_ft, &tuple); 632 if (!tuplehash) 633 return false; 634 635 dir = tuplehash->tuple.dir; 636 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); 637 ct = flow->ct; 638 639 if (dir == FLOW_OFFLOAD_DIR_REPLY && 640 !test_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags)) { 641 /* Only offload reply direction after connection became 642 * assured. 643 */ 644 if (test_bit(IPS_ASSURED_BIT, &ct->status)) 645 set_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags); 646 else if (test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags)) 647 /* If flow_table flow has already been updated to the 648 * established state, then don't refresh. 649 */ 650 return false; 651 force_refresh = true; 652 } 653 654 if (tcph && (unlikely(tcph->fin || tcph->rst))) { 655 flow_offload_teardown(flow); 656 return false; 657 } 658 659 if (dir == FLOW_OFFLOAD_DIR_ORIGINAL) 660 ctinfo = test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ? 661 IP_CT_ESTABLISHED : IP_CT_NEW; 662 else 663 ctinfo = IP_CT_ESTABLISHED_REPLY; 664 665 flow_offload_refresh(nf_ft, flow, force_refresh); 666 if (!test_bit(IPS_ASSURED_BIT, &ct->status)) { 667 /* Process this flow in SW to allow promoting to ASSURED */ 668 return false; 669 } 670 671 nf_conntrack_get(&ct->ct_general); 672 nf_ct_set(skb, ct, ctinfo); 673 if (nf_ft->flags & NF_FLOWTABLE_COUNTER) 674 nf_ct_acct_update(ct, dir, skb->len); 675 676 return true; 677 } 678 679 static int tcf_ct_flow_tables_init(void) 680 { 681 return rhashtable_init(&zones_ht, &zones_params); 682 } 683 684 static void tcf_ct_flow_tables_uninit(void) 685 { 686 rhashtable_destroy(&zones_ht); 687 } 688 689 static struct tc_action_ops act_ct_ops; 690 691 struct tc_ct_action_net { 692 struct tc_action_net tn; /* Must be first */ 693 bool labels; 694 }; 695 696 /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */ 697 static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb, 698 struct tcf_ct_params *p) 699 { 700 enum ip_conntrack_info ctinfo; 701 struct nf_conn *ct; 702 703 ct = nf_ct_get(skb, &ctinfo); 704 if (!ct) 705 return false; 706 if (!net_eq(net, read_pnet(&ct->ct_net))) 707 goto drop_ct; 708 if (nf_ct_zone(ct)->id != p->zone) 709 goto drop_ct; 710 if (p->helper) { 711 struct nf_conn_help *help; 712 713 help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER); 714 if (help && rcu_access_pointer(help->helper) != p->helper) 715 goto drop_ct; 716 } 717 718 /* Force conntrack entry direction. */ 719 if ((p->ct_action & TCA_CT_ACT_FORCE) && 720 CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) { 721 if (nf_ct_is_confirmed(ct)) 722 nf_ct_kill(ct); 723 724 goto drop_ct; 725 } 726 727 return true; 728 729 drop_ct: 730 nf_ct_put(ct); 731 nf_ct_set(skb, NULL, IP_CT_UNTRACKED); 732 733 return false; 734 } 735 736 static u8 tcf_ct_skb_nf_family(struct sk_buff *skb) 737 { 738 u8 family = NFPROTO_UNSPEC; 739 740 switch (skb_protocol(skb, true)) { 741 case htons(ETH_P_IP): 742 family = NFPROTO_IPV4; 743 break; 744 case htons(ETH_P_IPV6): 745 family = NFPROTO_IPV6; 746 break; 747 default: 748 break; 749 } 750 751 return family; 752 } 753 754 static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag) 755 { 756 unsigned int len; 757 758 len = skb_network_offset(skb) + sizeof(struct iphdr); 759 if (unlikely(skb->len < len)) 760 return -EINVAL; 761 if (unlikely(!pskb_may_pull(skb, len))) 762 return -ENOMEM; 763 764 *frag = ip_is_fragment(ip_hdr(skb)); 765 return 0; 766 } 767 768 static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag) 769 { 770 unsigned int flags = 0, len, payload_ofs = 0; 771 unsigned short frag_off; 772 int nexthdr; 773 774 len = skb_network_offset(skb) + sizeof(struct ipv6hdr); 775 if (unlikely(skb->len < len)) 776 return -EINVAL; 777 if (unlikely(!pskb_may_pull(skb, len))) 778 return -ENOMEM; 779 780 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags); 781 if (unlikely(nexthdr < 0)) 782 return -EPROTO; 783 784 *frag = flags & IP6_FH_F_FRAG; 785 return 0; 786 } 787 788 static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, 789 u8 family, u16 zone, bool *defrag) 790 { 791 enum ip_conntrack_info ctinfo; 792 struct nf_conn *ct; 793 int err = 0; 794 bool frag; 795 u8 proto; 796 u16 mru; 797 798 /* Previously seen (loopback)? Ignore. */ 799 ct = nf_ct_get(skb, &ctinfo); 800 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED) 801 return 0; 802 803 if (family == NFPROTO_IPV4) 804 err = tcf_ct_ipv4_is_fragment(skb, &frag); 805 else 806 err = tcf_ct_ipv6_is_fragment(skb, &frag); 807 if (err || !frag) 808 return err; 809 810 skb_get(skb); 811 err = nf_ct_handle_fragments(net, skb, zone, family, &proto, &mru); 812 if (err) 813 return err; 814 815 *defrag = true; 816 tc_skb_cb(skb)->mru = mru; 817 818 return 0; 819 } 820 821 static void tcf_ct_params_free(struct tcf_ct_params *params) 822 { 823 if (params->helper) { 824 #if IS_ENABLED(CONFIG_NF_NAT) 825 if (params->ct_action & TCA_CT_ACT_NAT) 826 nf_nat_helper_put(params->helper); 827 #endif 828 nf_conntrack_helper_put(params->helper); 829 } 830 if (params->ct_ft) 831 tcf_ct_flow_table_put(params->ct_ft); 832 if (params->tmpl) 833 nf_ct_put(params->tmpl); 834 kfree(params); 835 } 836 837 static void tcf_ct_params_free_rcu(struct rcu_head *head) 838 { 839 struct tcf_ct_params *params; 840 841 params = container_of(head, struct tcf_ct_params, rcu); 842 tcf_ct_params_free(params); 843 } 844 845 static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask) 846 { 847 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) 848 u32 new_mark; 849 850 if (!mask) 851 return; 852 853 new_mark = mark | (READ_ONCE(ct->mark) & ~(mask)); 854 if (READ_ONCE(ct->mark) != new_mark) { 855 WRITE_ONCE(ct->mark, new_mark); 856 if (nf_ct_is_confirmed(ct)) 857 nf_conntrack_event_cache(IPCT_MARK, ct); 858 } 859 #endif 860 } 861 862 static void tcf_ct_act_set_labels(struct nf_conn *ct, 863 u32 *labels, 864 u32 *labels_m) 865 { 866 #if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) 867 size_t labels_sz = sizeof_field(struct tcf_ct_params, labels); 868 869 if (!memchr_inv(labels_m, 0, labels_sz)) 870 return; 871 872 nf_connlabels_replace(ct, labels, labels_m, 4); 873 #endif 874 } 875 876 static int tcf_ct_act_nat(struct sk_buff *skb, 877 struct nf_conn *ct, 878 enum ip_conntrack_info ctinfo, 879 int ct_action, 880 struct nf_nat_range2 *range, 881 bool commit) 882 { 883 #if IS_ENABLED(CONFIG_NF_NAT) 884 int err, action = 0; 885 886 if (!(ct_action & TCA_CT_ACT_NAT)) 887 return NF_ACCEPT; 888 if (ct_action & TCA_CT_ACT_NAT_SRC) 889 action |= BIT(NF_NAT_MANIP_SRC); 890 if (ct_action & TCA_CT_ACT_NAT_DST) 891 action |= BIT(NF_NAT_MANIP_DST); 892 893 err = nf_ct_nat(skb, ct, ctinfo, &action, range, commit); 894 895 if (action & BIT(NF_NAT_MANIP_SRC)) 896 tc_skb_cb(skb)->post_ct_snat = 1; 897 if (action & BIT(NF_NAT_MANIP_DST)) 898 tc_skb_cb(skb)->post_ct_dnat = 1; 899 900 return err; 901 #else 902 return NF_ACCEPT; 903 #endif 904 } 905 906 TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a, 907 struct tcf_result *res) 908 { 909 struct net *net = dev_net(skb->dev); 910 enum ip_conntrack_info ctinfo; 911 struct tcf_ct *c = to_ct(a); 912 struct nf_conn *tmpl = NULL; 913 struct nf_hook_state state; 914 bool cached, commit, clear; 915 int nh_ofs, err, retval; 916 struct tcf_ct_params *p; 917 bool add_helper = false; 918 bool skip_add = false; 919 bool defrag = false; 920 struct nf_conn *ct; 921 u8 family; 922 923 p = rcu_dereference_bh(c->params); 924 925 retval = READ_ONCE(c->tcf_action); 926 commit = p->ct_action & TCA_CT_ACT_COMMIT; 927 clear = p->ct_action & TCA_CT_ACT_CLEAR; 928 tmpl = p->tmpl; 929 930 tcf_lastuse_update(&c->tcf_tm); 931 tcf_action_update_bstats(&c->common, skb); 932 933 if (clear) { 934 tc_skb_cb(skb)->post_ct = false; 935 ct = nf_ct_get(skb, &ctinfo); 936 if (ct) { 937 nf_ct_put(ct); 938 nf_ct_set(skb, NULL, IP_CT_UNTRACKED); 939 } 940 941 goto out_clear; 942 } 943 944 family = tcf_ct_skb_nf_family(skb); 945 if (family == NFPROTO_UNSPEC) 946 goto drop; 947 948 /* The conntrack module expects to be working at L3. 949 * We also try to pull the IPv4/6 header to linear area 950 */ 951 nh_ofs = skb_network_offset(skb); 952 skb_pull_rcsum(skb, nh_ofs); 953 err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag); 954 if (err == -EINPROGRESS) { 955 retval = TC_ACT_STOLEN; 956 goto out_clear; 957 } 958 if (err) 959 goto drop; 960 961 err = nf_ct_skb_network_trim(skb, family); 962 if (err) 963 goto drop; 964 965 /* If we are recirculating packets to match on ct fields and 966 * committing with a separate ct action, then we don't need to 967 * actually run the packet through conntrack twice unless it's for a 968 * different zone. 969 */ 970 cached = tcf_ct_skb_nfct_cached(net, skb, p); 971 if (!cached) { 972 if (tcf_ct_flow_table_lookup(p, skb, family)) { 973 skip_add = true; 974 goto do_nat; 975 } 976 977 /* Associate skb with specified zone. */ 978 if (tmpl) { 979 nf_conntrack_put(skb_nfct(skb)); 980 nf_conntrack_get(&tmpl->ct_general); 981 nf_ct_set(skb, tmpl, IP_CT_NEW); 982 } 983 984 state.hook = NF_INET_PRE_ROUTING; 985 state.net = net; 986 state.pf = family; 987 err = nf_conntrack_in(skb, &state); 988 if (err != NF_ACCEPT) 989 goto out_push; 990 } 991 992 do_nat: 993 ct = nf_ct_get(skb, &ctinfo); 994 if (!ct) 995 goto out_push; 996 nf_ct_deliver_cached_events(ct); 997 nf_conn_act_ct_ext_fill(skb, ct, ctinfo); 998 999 err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit); 1000 if (err != NF_ACCEPT) 1001 goto drop; 1002 1003 if (!nf_ct_is_confirmed(ct) && commit && p->helper && !nfct_help(ct)) { 1004 err = __nf_ct_try_assign_helper(ct, p->tmpl, GFP_ATOMIC); 1005 if (err) 1006 goto drop; 1007 add_helper = true; 1008 if (p->ct_action & TCA_CT_ACT_NAT && !nfct_seqadj(ct)) { 1009 if (!nfct_seqadj_ext_add(ct)) 1010 goto drop; 1011 } 1012 } 1013 1014 if (nf_ct_is_confirmed(ct) ? ((!cached && !skip_add) || add_helper) : commit) { 1015 if (nf_ct_helper(skb, ct, ctinfo, family) != NF_ACCEPT) 1016 goto drop; 1017 } 1018 1019 if (commit) { 1020 tcf_ct_act_set_mark(ct, p->mark, p->mark_mask); 1021 tcf_ct_act_set_labels(ct, p->labels, p->labels_mask); 1022 1023 if (!nf_ct_is_confirmed(ct)) 1024 nf_conn_act_ct_ext_add(ct); 1025 1026 /* This will take care of sending queued events 1027 * even if the connection is already confirmed. 1028 */ 1029 if (nf_conntrack_confirm(skb) != NF_ACCEPT) 1030 goto drop; 1031 } 1032 1033 if (!skip_add) 1034 tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo); 1035 1036 out_push: 1037 skb_push_rcsum(skb, nh_ofs); 1038 1039 tc_skb_cb(skb)->post_ct = true; 1040 tc_skb_cb(skb)->zone = p->zone; 1041 out_clear: 1042 if (defrag) 1043 qdisc_skb_cb(skb)->pkt_len = skb->len; 1044 return retval; 1045 1046 drop: 1047 tcf_action_inc_drop_qstats(&c->common); 1048 return TC_ACT_SHOT; 1049 } 1050 1051 static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = { 1052 [TCA_CT_ACTION] = { .type = NLA_U16 }, 1053 [TCA_CT_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ct)), 1054 [TCA_CT_ZONE] = { .type = NLA_U16 }, 1055 [TCA_CT_MARK] = { .type = NLA_U32 }, 1056 [TCA_CT_MARK_MASK] = { .type = NLA_U32 }, 1057 [TCA_CT_LABELS] = { .type = NLA_BINARY, 1058 .len = 128 / BITS_PER_BYTE }, 1059 [TCA_CT_LABELS_MASK] = { .type = NLA_BINARY, 1060 .len = 128 / BITS_PER_BYTE }, 1061 [TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 }, 1062 [TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 }, 1063 [TCA_CT_NAT_IPV6_MIN] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)), 1064 [TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)), 1065 [TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 }, 1066 [TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 }, 1067 [TCA_CT_HELPER_NAME] = { .type = NLA_STRING, .len = NF_CT_HELPER_NAME_LEN }, 1068 [TCA_CT_HELPER_FAMILY] = { .type = NLA_U8 }, 1069 [TCA_CT_HELPER_PROTO] = { .type = NLA_U8 }, 1070 }; 1071 1072 static int tcf_ct_fill_params_nat(struct tcf_ct_params *p, 1073 struct tc_ct *parm, 1074 struct nlattr **tb, 1075 struct netlink_ext_ack *extack) 1076 { 1077 struct nf_nat_range2 *range; 1078 1079 if (!(p->ct_action & TCA_CT_ACT_NAT)) 1080 return 0; 1081 1082 if (!IS_ENABLED(CONFIG_NF_NAT)) { 1083 NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel"); 1084 return -EOPNOTSUPP; 1085 } 1086 1087 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST))) 1088 return 0; 1089 1090 if ((p->ct_action & TCA_CT_ACT_NAT_SRC) && 1091 (p->ct_action & TCA_CT_ACT_NAT_DST)) { 1092 NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time"); 1093 return -EOPNOTSUPP; 1094 } 1095 1096 range = &p->range; 1097 if (tb[TCA_CT_NAT_IPV4_MIN]) { 1098 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX]; 1099 1100 p->ipv4_range = true; 1101 range->flags |= NF_NAT_RANGE_MAP_IPS; 1102 range->min_addr.ip = 1103 nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]); 1104 1105 range->max_addr.ip = max_attr ? 1106 nla_get_in_addr(max_attr) : 1107 range->min_addr.ip; 1108 } else if (tb[TCA_CT_NAT_IPV6_MIN]) { 1109 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX]; 1110 1111 p->ipv4_range = false; 1112 range->flags |= NF_NAT_RANGE_MAP_IPS; 1113 range->min_addr.in6 = 1114 nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]); 1115 1116 range->max_addr.in6 = max_attr ? 1117 nla_get_in6_addr(max_attr) : 1118 range->min_addr.in6; 1119 } 1120 1121 if (tb[TCA_CT_NAT_PORT_MIN]) { 1122 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 1123 range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]); 1124 1125 range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ? 1126 nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) : 1127 range->min_proto.all; 1128 } 1129 1130 return 0; 1131 } 1132 1133 static void tcf_ct_set_key_val(struct nlattr **tb, 1134 void *val, int val_type, 1135 void *mask, int mask_type, 1136 int len) 1137 { 1138 if (!tb[val_type]) 1139 return; 1140 nla_memcpy(val, tb[val_type], len); 1141 1142 if (!mask) 1143 return; 1144 1145 if (mask_type == TCA_CT_UNSPEC || !tb[mask_type]) 1146 memset(mask, 0xff, len); 1147 else 1148 nla_memcpy(mask, tb[mask_type], len); 1149 } 1150 1151 static int tcf_ct_fill_params(struct net *net, 1152 struct tcf_ct_params *p, 1153 struct tc_ct *parm, 1154 struct nlattr **tb, 1155 struct netlink_ext_ack *extack) 1156 { 1157 struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id); 1158 struct nf_conntrack_zone zone; 1159 int err, family, proto, len; 1160 struct nf_conn *tmpl; 1161 char *name; 1162 1163 p->zone = NF_CT_DEFAULT_ZONE_ID; 1164 1165 tcf_ct_set_key_val(tb, 1166 &p->ct_action, TCA_CT_ACTION, 1167 NULL, TCA_CT_UNSPEC, 1168 sizeof(p->ct_action)); 1169 1170 if (p->ct_action & TCA_CT_ACT_CLEAR) 1171 return 0; 1172 1173 err = tcf_ct_fill_params_nat(p, parm, tb, extack); 1174 if (err) 1175 return err; 1176 1177 if (tb[TCA_CT_MARK]) { 1178 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) { 1179 NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled."); 1180 return -EOPNOTSUPP; 1181 } 1182 tcf_ct_set_key_val(tb, 1183 &p->mark, TCA_CT_MARK, 1184 &p->mark_mask, TCA_CT_MARK_MASK, 1185 sizeof(p->mark)); 1186 } 1187 1188 if (tb[TCA_CT_LABELS]) { 1189 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) { 1190 NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled."); 1191 return -EOPNOTSUPP; 1192 } 1193 1194 if (!tn->labels) { 1195 NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length"); 1196 return -EOPNOTSUPP; 1197 } 1198 tcf_ct_set_key_val(tb, 1199 p->labels, TCA_CT_LABELS, 1200 p->labels_mask, TCA_CT_LABELS_MASK, 1201 sizeof(p->labels)); 1202 } 1203 1204 if (tb[TCA_CT_ZONE]) { 1205 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) { 1206 NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled."); 1207 return -EOPNOTSUPP; 1208 } 1209 1210 tcf_ct_set_key_val(tb, 1211 &p->zone, TCA_CT_ZONE, 1212 NULL, TCA_CT_UNSPEC, 1213 sizeof(p->zone)); 1214 } 1215 1216 nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0); 1217 tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL); 1218 if (!tmpl) { 1219 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template"); 1220 return -ENOMEM; 1221 } 1222 p->tmpl = tmpl; 1223 if (tb[TCA_CT_HELPER_NAME]) { 1224 name = nla_data(tb[TCA_CT_HELPER_NAME]); 1225 len = nla_len(tb[TCA_CT_HELPER_NAME]); 1226 if (len > 16 || name[len - 1] != '\0') { 1227 NL_SET_ERR_MSG_MOD(extack, "Failed to parse helper name."); 1228 err = -EINVAL; 1229 goto err; 1230 } 1231 family = tb[TCA_CT_HELPER_FAMILY] ? nla_get_u8(tb[TCA_CT_HELPER_FAMILY]) : AF_INET; 1232 proto = tb[TCA_CT_HELPER_PROTO] ? nla_get_u8(tb[TCA_CT_HELPER_PROTO]) : IPPROTO_TCP; 1233 err = nf_ct_add_helper(tmpl, name, family, proto, 1234 p->ct_action & TCA_CT_ACT_NAT, &p->helper); 1235 if (err) { 1236 NL_SET_ERR_MSG_MOD(extack, "Failed to add helper"); 1237 goto err; 1238 } 1239 } 1240 1241 if (p->ct_action & TCA_CT_ACT_COMMIT) 1242 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status); 1243 return 0; 1244 err: 1245 nf_ct_put(p->tmpl); 1246 p->tmpl = NULL; 1247 return err; 1248 } 1249 1250 static int tcf_ct_init(struct net *net, struct nlattr *nla, 1251 struct nlattr *est, struct tc_action **a, 1252 struct tcf_proto *tp, u32 flags, 1253 struct netlink_ext_ack *extack) 1254 { 1255 struct tc_action_net *tn = net_generic(net, act_ct_ops.net_id); 1256 bool bind = flags & TCA_ACT_FLAGS_BIND; 1257 struct tcf_ct_params *params = NULL; 1258 struct nlattr *tb[TCA_CT_MAX + 1]; 1259 struct tcf_chain *goto_ch = NULL; 1260 struct tc_ct *parm; 1261 struct tcf_ct *c; 1262 int err, res = 0; 1263 u32 index; 1264 1265 if (!nla) { 1266 NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed"); 1267 return -EINVAL; 1268 } 1269 1270 err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack); 1271 if (err < 0) 1272 return err; 1273 1274 if (!tb[TCA_CT_PARMS]) { 1275 NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters"); 1276 return -EINVAL; 1277 } 1278 parm = nla_data(tb[TCA_CT_PARMS]); 1279 index = parm->index; 1280 err = tcf_idr_check_alloc(tn, &index, a, bind); 1281 if (err < 0) 1282 return err; 1283 1284 if (!err) { 1285 err = tcf_idr_create_from_flags(tn, index, est, a, 1286 &act_ct_ops, bind, flags); 1287 if (err) { 1288 tcf_idr_cleanup(tn, index); 1289 return err; 1290 } 1291 res = ACT_P_CREATED; 1292 } else { 1293 if (bind) 1294 return 0; 1295 1296 if (!(flags & TCA_ACT_FLAGS_REPLACE)) { 1297 tcf_idr_release(*a, bind); 1298 return -EEXIST; 1299 } 1300 } 1301 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); 1302 if (err < 0) 1303 goto cleanup; 1304 1305 c = to_ct(*a); 1306 1307 params = kzalloc(sizeof(*params), GFP_KERNEL); 1308 if (unlikely(!params)) { 1309 err = -ENOMEM; 1310 goto cleanup; 1311 } 1312 1313 err = tcf_ct_fill_params(net, params, parm, tb, extack); 1314 if (err) 1315 goto cleanup; 1316 1317 err = tcf_ct_flow_table_get(net, params); 1318 if (err) 1319 goto cleanup; 1320 1321 spin_lock_bh(&c->tcf_lock); 1322 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 1323 params = rcu_replace_pointer(c->params, params, 1324 lockdep_is_held(&c->tcf_lock)); 1325 spin_unlock_bh(&c->tcf_lock); 1326 1327 if (goto_ch) 1328 tcf_chain_put_by_act(goto_ch); 1329 if (params) 1330 call_rcu(¶ms->rcu, tcf_ct_params_free_rcu); 1331 1332 return res; 1333 1334 cleanup: 1335 if (goto_ch) 1336 tcf_chain_put_by_act(goto_ch); 1337 if (params) 1338 tcf_ct_params_free(params); 1339 tcf_idr_release(*a, bind); 1340 return err; 1341 } 1342 1343 static void tcf_ct_cleanup(struct tc_action *a) 1344 { 1345 struct tcf_ct_params *params; 1346 struct tcf_ct *c = to_ct(a); 1347 1348 params = rcu_dereference_protected(c->params, 1); 1349 if (params) 1350 call_rcu(¶ms->rcu, tcf_ct_params_free_rcu); 1351 } 1352 1353 static int tcf_ct_dump_key_val(struct sk_buff *skb, 1354 void *val, int val_type, 1355 void *mask, int mask_type, 1356 int len) 1357 { 1358 int err; 1359 1360 if (mask && !memchr_inv(mask, 0, len)) 1361 return 0; 1362 1363 err = nla_put(skb, val_type, len, val); 1364 if (err) 1365 return err; 1366 1367 if (mask_type != TCA_CT_UNSPEC) { 1368 err = nla_put(skb, mask_type, len, mask); 1369 if (err) 1370 return err; 1371 } 1372 1373 return 0; 1374 } 1375 1376 static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p) 1377 { 1378 struct nf_nat_range2 *range = &p->range; 1379 1380 if (!(p->ct_action & TCA_CT_ACT_NAT)) 1381 return 0; 1382 1383 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST))) 1384 return 0; 1385 1386 if (range->flags & NF_NAT_RANGE_MAP_IPS) { 1387 if (p->ipv4_range) { 1388 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN, 1389 range->min_addr.ip)) 1390 return -1; 1391 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX, 1392 range->max_addr.ip)) 1393 return -1; 1394 } else { 1395 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN, 1396 &range->min_addr.in6)) 1397 return -1; 1398 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX, 1399 &range->max_addr.in6)) 1400 return -1; 1401 } 1402 } 1403 1404 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) { 1405 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN, 1406 range->min_proto.all)) 1407 return -1; 1408 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX, 1409 range->max_proto.all)) 1410 return -1; 1411 } 1412 1413 return 0; 1414 } 1415 1416 static int tcf_ct_dump_helper(struct sk_buff *skb, struct nf_conntrack_helper *helper) 1417 { 1418 if (!helper) 1419 return 0; 1420 1421 if (nla_put_string(skb, TCA_CT_HELPER_NAME, helper->name) || 1422 nla_put_u8(skb, TCA_CT_HELPER_FAMILY, helper->tuple.src.l3num) || 1423 nla_put_u8(skb, TCA_CT_HELPER_PROTO, helper->tuple.dst.protonum)) 1424 return -1; 1425 1426 return 0; 1427 } 1428 1429 static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a, 1430 int bind, int ref) 1431 { 1432 unsigned char *b = skb_tail_pointer(skb); 1433 struct tcf_ct *c = to_ct(a); 1434 struct tcf_ct_params *p; 1435 1436 struct tc_ct opt = { 1437 .index = c->tcf_index, 1438 .refcnt = refcount_read(&c->tcf_refcnt) - ref, 1439 .bindcnt = atomic_read(&c->tcf_bindcnt) - bind, 1440 }; 1441 struct tcf_t t; 1442 1443 spin_lock_bh(&c->tcf_lock); 1444 p = rcu_dereference_protected(c->params, 1445 lockdep_is_held(&c->tcf_lock)); 1446 opt.action = c->tcf_action; 1447 1448 if (tcf_ct_dump_key_val(skb, 1449 &p->ct_action, TCA_CT_ACTION, 1450 NULL, TCA_CT_UNSPEC, 1451 sizeof(p->ct_action))) 1452 goto nla_put_failure; 1453 1454 if (p->ct_action & TCA_CT_ACT_CLEAR) 1455 goto skip_dump; 1456 1457 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && 1458 tcf_ct_dump_key_val(skb, 1459 &p->mark, TCA_CT_MARK, 1460 &p->mark_mask, TCA_CT_MARK_MASK, 1461 sizeof(p->mark))) 1462 goto nla_put_failure; 1463 1464 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && 1465 tcf_ct_dump_key_val(skb, 1466 p->labels, TCA_CT_LABELS, 1467 p->labels_mask, TCA_CT_LABELS_MASK, 1468 sizeof(p->labels))) 1469 goto nla_put_failure; 1470 1471 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && 1472 tcf_ct_dump_key_val(skb, 1473 &p->zone, TCA_CT_ZONE, 1474 NULL, TCA_CT_UNSPEC, 1475 sizeof(p->zone))) 1476 goto nla_put_failure; 1477 1478 if (tcf_ct_dump_nat(skb, p)) 1479 goto nla_put_failure; 1480 1481 if (tcf_ct_dump_helper(skb, p->helper)) 1482 goto nla_put_failure; 1483 1484 skip_dump: 1485 if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt)) 1486 goto nla_put_failure; 1487 1488 tcf_tm_dump(&t, &c->tcf_tm); 1489 if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD)) 1490 goto nla_put_failure; 1491 spin_unlock_bh(&c->tcf_lock); 1492 1493 return skb->len; 1494 nla_put_failure: 1495 spin_unlock_bh(&c->tcf_lock); 1496 nlmsg_trim(skb, b); 1497 return -1; 1498 } 1499 1500 static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets, 1501 u64 drops, u64 lastuse, bool hw) 1502 { 1503 struct tcf_ct *c = to_ct(a); 1504 1505 tcf_action_update_stats(a, bytes, packets, drops, hw); 1506 c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse); 1507 } 1508 1509 static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data, 1510 u32 *index_inc, bool bind, 1511 struct netlink_ext_ack *extack) 1512 { 1513 if (bind) { 1514 struct flow_action_entry *entry = entry_data; 1515 1516 entry->id = FLOW_ACTION_CT; 1517 entry->ct.action = tcf_ct_action(act); 1518 entry->ct.zone = tcf_ct_zone(act); 1519 entry->ct.flow_table = tcf_ct_ft(act); 1520 *index_inc = 1; 1521 } else { 1522 struct flow_offload_action *fl_action = entry_data; 1523 1524 fl_action->id = FLOW_ACTION_CT; 1525 } 1526 1527 return 0; 1528 } 1529 1530 static struct tc_action_ops act_ct_ops = { 1531 .kind = "ct", 1532 .id = TCA_ID_CT, 1533 .owner = THIS_MODULE, 1534 .act = tcf_ct_act, 1535 .dump = tcf_ct_dump, 1536 .init = tcf_ct_init, 1537 .cleanup = tcf_ct_cleanup, 1538 .stats_update = tcf_stats_update, 1539 .offload_act_setup = tcf_ct_offload_act_setup, 1540 .size = sizeof(struct tcf_ct), 1541 }; 1542 1543 static __net_init int ct_init_net(struct net *net) 1544 { 1545 unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8; 1546 struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id); 1547 1548 if (nf_connlabels_get(net, n_bits - 1)) { 1549 tn->labels = false; 1550 pr_err("act_ct: Failed to set connlabels length"); 1551 } else { 1552 tn->labels = true; 1553 } 1554 1555 return tc_action_net_init(net, &tn->tn, &act_ct_ops); 1556 } 1557 1558 static void __net_exit ct_exit_net(struct list_head *net_list) 1559 { 1560 struct net *net; 1561 1562 rtnl_lock(); 1563 list_for_each_entry(net, net_list, exit_list) { 1564 struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id); 1565 1566 if (tn->labels) 1567 nf_connlabels_put(net); 1568 } 1569 rtnl_unlock(); 1570 1571 tc_action_net_exit(net_list, act_ct_ops.net_id); 1572 } 1573 1574 static struct pernet_operations ct_net_ops = { 1575 .init = ct_init_net, 1576 .exit_batch = ct_exit_net, 1577 .id = &act_ct_ops.net_id, 1578 .size = sizeof(struct tc_ct_action_net), 1579 }; 1580 1581 static int __init ct_init_module(void) 1582 { 1583 int err; 1584 1585 act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0); 1586 if (!act_ct_wq) 1587 return -ENOMEM; 1588 1589 err = tcf_ct_flow_tables_init(); 1590 if (err) 1591 goto err_tbl_init; 1592 1593 err = tcf_register_action(&act_ct_ops, &ct_net_ops); 1594 if (err) 1595 goto err_register; 1596 1597 static_branch_inc(&tcf_frag_xmit_count); 1598 1599 return 0; 1600 1601 err_register: 1602 tcf_ct_flow_tables_uninit(); 1603 err_tbl_init: 1604 destroy_workqueue(act_ct_wq); 1605 return err; 1606 } 1607 1608 static void __exit ct_cleanup_module(void) 1609 { 1610 static_branch_dec(&tcf_frag_xmit_count); 1611 tcf_unregister_action(&act_ct_ops, &ct_net_ops); 1612 tcf_ct_flow_tables_uninit(); 1613 destroy_workqueue(act_ct_wq); 1614 } 1615 1616 module_init(ct_init_module); 1617 module_exit(ct_cleanup_module); 1618 MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>"); 1619 MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>"); 1620 MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>"); 1621 MODULE_DESCRIPTION("Connection tracking action"); 1622 MODULE_LICENSE("GPL v2"); 1623