1b57dc7c1SPaul Blakey // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2b57dc7c1SPaul Blakey /* - 3b57dc7c1SPaul Blakey * net/sched/act_ct.c Connection Tracking action 4b57dc7c1SPaul Blakey * 5b57dc7c1SPaul Blakey * Authors: Paul Blakey <paulb@mellanox.com> 6b57dc7c1SPaul Blakey * Yossi Kuperman <yossiku@mellanox.com> 7b57dc7c1SPaul Blakey * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> 8b57dc7c1SPaul Blakey */ 9b57dc7c1SPaul Blakey 10b57dc7c1SPaul Blakey #include <linux/module.h> 11b57dc7c1SPaul Blakey #include <linux/init.h> 12b57dc7c1SPaul Blakey #include <linux/kernel.h> 13b57dc7c1SPaul Blakey #include <linux/skbuff.h> 14b57dc7c1SPaul Blakey #include <linux/rtnetlink.h> 15b57dc7c1SPaul Blakey #include <linux/pkt_cls.h> 16b57dc7c1SPaul Blakey #include <linux/ip.h> 17b57dc7c1SPaul Blakey #include <linux/ipv6.h> 18*c34b961aSPaul Blakey #include <linux/rhashtable.h> 19b57dc7c1SPaul Blakey #include <net/netlink.h> 20b57dc7c1SPaul Blakey #include <net/pkt_sched.h> 21b57dc7c1SPaul Blakey #include <net/pkt_cls.h> 22b57dc7c1SPaul Blakey #include <net/act_api.h> 23b57dc7c1SPaul Blakey #include <net/ip.h> 24b57dc7c1SPaul Blakey #include <net/ipv6_frag.h> 25b57dc7c1SPaul Blakey #include <uapi/linux/tc_act/tc_ct.h> 26b57dc7c1SPaul Blakey #include <net/tc_act/tc_ct.h> 27b57dc7c1SPaul Blakey 28*c34b961aSPaul Blakey #include <net/netfilter/nf_flow_table.h> 29b57dc7c1SPaul Blakey #include <net/netfilter/nf_conntrack.h> 30b57dc7c1SPaul Blakey #include <net/netfilter/nf_conntrack_core.h> 31b57dc7c1SPaul Blakey #include <net/netfilter/nf_conntrack_zones.h> 32b57dc7c1SPaul Blakey #include <net/netfilter/nf_conntrack_helper.h> 33b57dc7c1SPaul Blakey #include <net/netfilter/ipv6/nf_defrag_ipv6.h> 3440d102cdSJeremy Sowden #include <uapi/linux/netfilter/nf_nat.h> 35b57dc7c1SPaul Blakey 36*c34b961aSPaul Blakey static struct workqueue_struct *act_ct_wq; 37*c34b961aSPaul Blakey static struct rhashtable zones_ht; 38*c34b961aSPaul Blakey static DEFINE_SPINLOCK(zones_lock); 39*c34b961aSPaul Blakey 40*c34b961aSPaul Blakey struct tcf_ct_flow_table { 41*c34b961aSPaul Blakey struct rhash_head node; /* In zones tables */ 42*c34b961aSPaul Blakey 43*c34b961aSPaul Blakey struct rcu_work rwork; 44*c34b961aSPaul Blakey struct nf_flowtable nf_ft; 45*c34b961aSPaul Blakey u16 zone; 46*c34b961aSPaul Blakey u32 ref; 47*c34b961aSPaul Blakey 48*c34b961aSPaul Blakey bool dying; 49*c34b961aSPaul Blakey }; 50*c34b961aSPaul Blakey 51*c34b961aSPaul Blakey static const struct rhashtable_params zones_params = { 52*c34b961aSPaul Blakey .head_offset = offsetof(struct tcf_ct_flow_table, node), 53*c34b961aSPaul Blakey .key_offset = offsetof(struct tcf_ct_flow_table, zone), 54*c34b961aSPaul Blakey .key_len = sizeof_field(struct tcf_ct_flow_table, zone), 55*c34b961aSPaul Blakey .automatic_shrinking = true, 56*c34b961aSPaul Blakey }; 57*c34b961aSPaul Blakey 58*c34b961aSPaul Blakey static struct nf_flowtable_type flowtable_ct = { 59*c34b961aSPaul Blakey .owner = THIS_MODULE, 60*c34b961aSPaul Blakey }; 61*c34b961aSPaul Blakey 62*c34b961aSPaul Blakey static int tcf_ct_flow_table_get(struct tcf_ct_params *params) 63*c34b961aSPaul Blakey { 64*c34b961aSPaul Blakey struct tcf_ct_flow_table *ct_ft; 65*c34b961aSPaul Blakey int err = -ENOMEM; 66*c34b961aSPaul Blakey 67*c34b961aSPaul Blakey spin_lock_bh(&zones_lock); 68*c34b961aSPaul Blakey ct_ft = rhashtable_lookup_fast(&zones_ht, ¶ms->zone, zones_params); 69*c34b961aSPaul Blakey if (ct_ft) 70*c34b961aSPaul Blakey goto take_ref; 71*c34b961aSPaul Blakey 72*c34b961aSPaul Blakey ct_ft = kzalloc(sizeof(*ct_ft), GFP_ATOMIC); 73*c34b961aSPaul Blakey if (!ct_ft) 74*c34b961aSPaul Blakey goto err_alloc; 75*c34b961aSPaul Blakey 76*c34b961aSPaul Blakey ct_ft->zone = params->zone; 77*c34b961aSPaul Blakey err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params); 78*c34b961aSPaul Blakey if (err) 79*c34b961aSPaul Blakey goto err_insert; 80*c34b961aSPaul Blakey 81*c34b961aSPaul Blakey ct_ft->nf_ft.type = &flowtable_ct; 82*c34b961aSPaul Blakey err = nf_flow_table_init(&ct_ft->nf_ft); 83*c34b961aSPaul Blakey if (err) 84*c34b961aSPaul Blakey goto err_init; 85*c34b961aSPaul Blakey 86*c34b961aSPaul Blakey __module_get(THIS_MODULE); 87*c34b961aSPaul Blakey take_ref: 88*c34b961aSPaul Blakey params->ct_ft = ct_ft; 89*c34b961aSPaul Blakey ct_ft->ref++; 90*c34b961aSPaul Blakey spin_unlock_bh(&zones_lock); 91*c34b961aSPaul Blakey 92*c34b961aSPaul Blakey return 0; 93*c34b961aSPaul Blakey 94*c34b961aSPaul Blakey err_init: 95*c34b961aSPaul Blakey rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params); 96*c34b961aSPaul Blakey err_insert: 97*c34b961aSPaul Blakey kfree(ct_ft); 98*c34b961aSPaul Blakey err_alloc: 99*c34b961aSPaul Blakey spin_unlock_bh(&zones_lock); 100*c34b961aSPaul Blakey return err; 101*c34b961aSPaul Blakey } 102*c34b961aSPaul Blakey 103*c34b961aSPaul Blakey static void tcf_ct_flow_table_cleanup_work(struct work_struct *work) 104*c34b961aSPaul Blakey { 105*c34b961aSPaul Blakey struct tcf_ct_flow_table *ct_ft; 106*c34b961aSPaul Blakey 107*c34b961aSPaul Blakey ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table, 108*c34b961aSPaul Blakey rwork); 109*c34b961aSPaul Blakey nf_flow_table_free(&ct_ft->nf_ft); 110*c34b961aSPaul Blakey kfree(ct_ft); 111*c34b961aSPaul Blakey 112*c34b961aSPaul Blakey module_put(THIS_MODULE); 113*c34b961aSPaul Blakey } 114*c34b961aSPaul Blakey 115*c34b961aSPaul Blakey static void tcf_ct_flow_table_put(struct tcf_ct_params *params) 116*c34b961aSPaul Blakey { 117*c34b961aSPaul Blakey struct tcf_ct_flow_table *ct_ft = params->ct_ft; 118*c34b961aSPaul Blakey 119*c34b961aSPaul Blakey spin_lock_bh(&zones_lock); 120*c34b961aSPaul Blakey if (--params->ct_ft->ref == 0) { 121*c34b961aSPaul Blakey rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params); 122*c34b961aSPaul Blakey INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work); 123*c34b961aSPaul Blakey queue_rcu_work(act_ct_wq, &ct_ft->rwork); 124*c34b961aSPaul Blakey } 125*c34b961aSPaul Blakey spin_unlock_bh(&zones_lock); 126*c34b961aSPaul Blakey } 127*c34b961aSPaul Blakey 128*c34b961aSPaul Blakey static int tcf_ct_flow_tables_init(void) 129*c34b961aSPaul Blakey { 130*c34b961aSPaul Blakey return rhashtable_init(&zones_ht, &zones_params); 131*c34b961aSPaul Blakey } 132*c34b961aSPaul Blakey 133*c34b961aSPaul Blakey static void tcf_ct_flow_tables_uninit(void) 134*c34b961aSPaul Blakey { 135*c34b961aSPaul Blakey rhashtable_destroy(&zones_ht); 136*c34b961aSPaul Blakey } 137*c34b961aSPaul Blakey 138b57dc7c1SPaul Blakey static struct tc_action_ops act_ct_ops; 139b57dc7c1SPaul Blakey static unsigned int ct_net_id; 140b57dc7c1SPaul Blakey 141b57dc7c1SPaul Blakey struct tc_ct_action_net { 142b57dc7c1SPaul Blakey struct tc_action_net tn; /* Must be first */ 143b57dc7c1SPaul Blakey bool labels; 144b57dc7c1SPaul Blakey }; 145b57dc7c1SPaul Blakey 146b57dc7c1SPaul Blakey /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */ 147b57dc7c1SPaul Blakey static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb, 148b57dc7c1SPaul Blakey u16 zone_id, bool force) 149b57dc7c1SPaul Blakey { 150b57dc7c1SPaul Blakey enum ip_conntrack_info ctinfo; 151b57dc7c1SPaul Blakey struct nf_conn *ct; 152b57dc7c1SPaul Blakey 153b57dc7c1SPaul Blakey ct = nf_ct_get(skb, &ctinfo); 154b57dc7c1SPaul Blakey if (!ct) 155b57dc7c1SPaul Blakey return false; 156b57dc7c1SPaul Blakey if (!net_eq(net, read_pnet(&ct->ct_net))) 157b57dc7c1SPaul Blakey return false; 158b57dc7c1SPaul Blakey if (nf_ct_zone(ct)->id != zone_id) 159b57dc7c1SPaul Blakey return false; 160b57dc7c1SPaul Blakey 161b57dc7c1SPaul Blakey /* Force conntrack entry direction. */ 162b57dc7c1SPaul Blakey if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) { 163b57dc7c1SPaul Blakey if (nf_ct_is_confirmed(ct)) 164b57dc7c1SPaul Blakey nf_ct_kill(ct); 165b57dc7c1SPaul Blakey 166b57dc7c1SPaul Blakey nf_conntrack_put(&ct->ct_general); 167b57dc7c1SPaul Blakey nf_ct_set(skb, NULL, IP_CT_UNTRACKED); 168b57dc7c1SPaul Blakey 169b57dc7c1SPaul Blakey return false; 170b57dc7c1SPaul Blakey } 171b57dc7c1SPaul Blakey 172b57dc7c1SPaul Blakey return true; 173b57dc7c1SPaul Blakey } 174b57dc7c1SPaul Blakey 175b57dc7c1SPaul Blakey /* Trim the skb to the length specified by the IP/IPv6 header, 176b57dc7c1SPaul Blakey * removing any trailing lower-layer padding. This prepares the skb 177b57dc7c1SPaul Blakey * for higher-layer processing that assumes skb->len excludes padding 178b57dc7c1SPaul Blakey * (such as nf_ip_checksum). The caller needs to pull the skb to the 179b57dc7c1SPaul Blakey * network header, and ensure ip_hdr/ipv6_hdr points to valid data. 180b57dc7c1SPaul Blakey */ 181b57dc7c1SPaul Blakey static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family) 182b57dc7c1SPaul Blakey { 183b57dc7c1SPaul Blakey unsigned int len; 184b57dc7c1SPaul Blakey int err; 185b57dc7c1SPaul Blakey 186b57dc7c1SPaul Blakey switch (family) { 187b57dc7c1SPaul Blakey case NFPROTO_IPV4: 188b57dc7c1SPaul Blakey len = ntohs(ip_hdr(skb)->tot_len); 189b57dc7c1SPaul Blakey break; 190b57dc7c1SPaul Blakey case NFPROTO_IPV6: 191b57dc7c1SPaul Blakey len = sizeof(struct ipv6hdr) 192b57dc7c1SPaul Blakey + ntohs(ipv6_hdr(skb)->payload_len); 193b57dc7c1SPaul Blakey break; 194b57dc7c1SPaul Blakey default: 195b57dc7c1SPaul Blakey len = skb->len; 196b57dc7c1SPaul Blakey } 197b57dc7c1SPaul Blakey 198b57dc7c1SPaul Blakey err = pskb_trim_rcsum(skb, len); 199b57dc7c1SPaul Blakey 200b57dc7c1SPaul Blakey return err; 201b57dc7c1SPaul Blakey } 202b57dc7c1SPaul Blakey 203b57dc7c1SPaul Blakey static u8 tcf_ct_skb_nf_family(struct sk_buff *skb) 204b57dc7c1SPaul Blakey { 205b57dc7c1SPaul Blakey u8 family = NFPROTO_UNSPEC; 206b57dc7c1SPaul Blakey 207b57dc7c1SPaul Blakey switch (skb->protocol) { 208b57dc7c1SPaul Blakey case htons(ETH_P_IP): 209b57dc7c1SPaul Blakey family = NFPROTO_IPV4; 210b57dc7c1SPaul Blakey break; 211b57dc7c1SPaul Blakey case htons(ETH_P_IPV6): 212b57dc7c1SPaul Blakey family = NFPROTO_IPV6; 213b57dc7c1SPaul Blakey break; 214b57dc7c1SPaul Blakey default: 215b57dc7c1SPaul Blakey break; 216b57dc7c1SPaul Blakey } 217b57dc7c1SPaul Blakey 218b57dc7c1SPaul Blakey return family; 219b57dc7c1SPaul Blakey } 220b57dc7c1SPaul Blakey 221b57dc7c1SPaul Blakey static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag) 222b57dc7c1SPaul Blakey { 223b57dc7c1SPaul Blakey unsigned int len; 224b57dc7c1SPaul Blakey 225b57dc7c1SPaul Blakey len = skb_network_offset(skb) + sizeof(struct iphdr); 226b57dc7c1SPaul Blakey if (unlikely(skb->len < len)) 227b57dc7c1SPaul Blakey return -EINVAL; 228b57dc7c1SPaul Blakey if (unlikely(!pskb_may_pull(skb, len))) 229b57dc7c1SPaul Blakey return -ENOMEM; 230b57dc7c1SPaul Blakey 231b57dc7c1SPaul Blakey *frag = ip_is_fragment(ip_hdr(skb)); 232b57dc7c1SPaul Blakey return 0; 233b57dc7c1SPaul Blakey } 234b57dc7c1SPaul Blakey 235b57dc7c1SPaul Blakey static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag) 236b57dc7c1SPaul Blakey { 237b57dc7c1SPaul Blakey unsigned int flags = 0, len, payload_ofs = 0; 238b57dc7c1SPaul Blakey unsigned short frag_off; 239b57dc7c1SPaul Blakey int nexthdr; 240b57dc7c1SPaul Blakey 241b57dc7c1SPaul Blakey len = skb_network_offset(skb) + sizeof(struct ipv6hdr); 242b57dc7c1SPaul Blakey if (unlikely(skb->len < len)) 243b57dc7c1SPaul Blakey return -EINVAL; 244b57dc7c1SPaul Blakey if (unlikely(!pskb_may_pull(skb, len))) 245b57dc7c1SPaul Blakey return -ENOMEM; 246b57dc7c1SPaul Blakey 247b57dc7c1SPaul Blakey nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags); 248b57dc7c1SPaul Blakey if (unlikely(nexthdr < 0)) 249b57dc7c1SPaul Blakey return -EPROTO; 250b57dc7c1SPaul Blakey 251b57dc7c1SPaul Blakey *frag = flags & IP6_FH_F_FRAG; 252b57dc7c1SPaul Blakey return 0; 253b57dc7c1SPaul Blakey } 254b57dc7c1SPaul Blakey 255b57dc7c1SPaul Blakey static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, 256b57dc7c1SPaul Blakey u8 family, u16 zone) 257b57dc7c1SPaul Blakey { 258b57dc7c1SPaul Blakey enum ip_conntrack_info ctinfo; 259b57dc7c1SPaul Blakey struct nf_conn *ct; 260b57dc7c1SPaul Blakey int err = 0; 261b57dc7c1SPaul Blakey bool frag; 262b57dc7c1SPaul Blakey 263b57dc7c1SPaul Blakey /* Previously seen (loopback)? Ignore. */ 264b57dc7c1SPaul Blakey ct = nf_ct_get(skb, &ctinfo); 265b57dc7c1SPaul Blakey if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED) 266b57dc7c1SPaul Blakey return 0; 267b57dc7c1SPaul Blakey 268b57dc7c1SPaul Blakey if (family == NFPROTO_IPV4) 269b57dc7c1SPaul Blakey err = tcf_ct_ipv4_is_fragment(skb, &frag); 270b57dc7c1SPaul Blakey else 271b57dc7c1SPaul Blakey err = tcf_ct_ipv6_is_fragment(skb, &frag); 272b57dc7c1SPaul Blakey if (err || !frag) 273b57dc7c1SPaul Blakey return err; 274b57dc7c1SPaul Blakey 275b57dc7c1SPaul Blakey skb_get(skb); 276b57dc7c1SPaul Blakey 277b57dc7c1SPaul Blakey if (family == NFPROTO_IPV4) { 278b57dc7c1SPaul Blakey enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone; 279b57dc7c1SPaul Blakey 280b57dc7c1SPaul Blakey memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 281b57dc7c1SPaul Blakey local_bh_disable(); 282b57dc7c1SPaul Blakey err = ip_defrag(net, skb, user); 283b57dc7c1SPaul Blakey local_bh_enable(); 284b57dc7c1SPaul Blakey if (err && err != -EINPROGRESS) 285b57dc7c1SPaul Blakey goto out_free; 286b57dc7c1SPaul Blakey } else { /* NFPROTO_IPV6 */ 287b57dc7c1SPaul Blakey #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) 288b57dc7c1SPaul Blakey enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; 289b57dc7c1SPaul Blakey 290b57dc7c1SPaul Blakey memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); 291b57dc7c1SPaul Blakey err = nf_ct_frag6_gather(net, skb, user); 292b57dc7c1SPaul Blakey if (err && err != -EINPROGRESS) 293b57dc7c1SPaul Blakey goto out_free; 294b57dc7c1SPaul Blakey #else 295b57dc7c1SPaul Blakey err = -EOPNOTSUPP; 296b57dc7c1SPaul Blakey goto out_free; 297b57dc7c1SPaul Blakey #endif 298b57dc7c1SPaul Blakey } 299b57dc7c1SPaul Blakey 300b57dc7c1SPaul Blakey skb_clear_hash(skb); 301b57dc7c1SPaul Blakey skb->ignore_df = 1; 302b57dc7c1SPaul Blakey return err; 303b57dc7c1SPaul Blakey 304b57dc7c1SPaul Blakey out_free: 305b57dc7c1SPaul Blakey kfree_skb(skb); 306b57dc7c1SPaul Blakey return err; 307b57dc7c1SPaul Blakey } 308b57dc7c1SPaul Blakey 309b57dc7c1SPaul Blakey static void tcf_ct_params_free(struct rcu_head *head) 310b57dc7c1SPaul Blakey { 311b57dc7c1SPaul Blakey struct tcf_ct_params *params = container_of(head, 312b57dc7c1SPaul Blakey struct tcf_ct_params, rcu); 313b57dc7c1SPaul Blakey 314*c34b961aSPaul Blakey tcf_ct_flow_table_put(params); 315*c34b961aSPaul Blakey 316b57dc7c1SPaul Blakey if (params->tmpl) 317b57dc7c1SPaul Blakey nf_conntrack_put(¶ms->tmpl->ct_general); 318b57dc7c1SPaul Blakey kfree(params); 319b57dc7c1SPaul Blakey } 320b57dc7c1SPaul Blakey 321b57dc7c1SPaul Blakey #if IS_ENABLED(CONFIG_NF_NAT) 322b57dc7c1SPaul Blakey /* Modelled after nf_nat_ipv[46]_fn(). 323b57dc7c1SPaul Blakey * range is only used for new, uninitialized NAT state. 324b57dc7c1SPaul Blakey * Returns either NF_ACCEPT or NF_DROP. 325b57dc7c1SPaul Blakey */ 326b57dc7c1SPaul Blakey static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, 327b57dc7c1SPaul Blakey enum ip_conntrack_info ctinfo, 328b57dc7c1SPaul Blakey const struct nf_nat_range2 *range, 329b57dc7c1SPaul Blakey enum nf_nat_manip_type maniptype) 330b57dc7c1SPaul Blakey { 331b57dc7c1SPaul Blakey int hooknum, err = NF_ACCEPT; 332b57dc7c1SPaul Blakey 333b57dc7c1SPaul Blakey /* See HOOK2MANIP(). */ 334b57dc7c1SPaul Blakey if (maniptype == NF_NAT_MANIP_SRC) 335b57dc7c1SPaul Blakey hooknum = NF_INET_LOCAL_IN; /* Source NAT */ 336b57dc7c1SPaul Blakey else 337b57dc7c1SPaul Blakey hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */ 338b57dc7c1SPaul Blakey 339b57dc7c1SPaul Blakey switch (ctinfo) { 340b57dc7c1SPaul Blakey case IP_CT_RELATED: 341b57dc7c1SPaul Blakey case IP_CT_RELATED_REPLY: 342b57dc7c1SPaul Blakey if (skb->protocol == htons(ETH_P_IP) && 343b57dc7c1SPaul Blakey ip_hdr(skb)->protocol == IPPROTO_ICMP) { 344b57dc7c1SPaul Blakey if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, 345b57dc7c1SPaul Blakey hooknum)) 346b57dc7c1SPaul Blakey err = NF_DROP; 347b57dc7c1SPaul Blakey goto out; 348b57dc7c1SPaul Blakey } else if (IS_ENABLED(CONFIG_IPV6) && 349b57dc7c1SPaul Blakey skb->protocol == htons(ETH_P_IPV6)) { 350b57dc7c1SPaul Blakey __be16 frag_off; 351b57dc7c1SPaul Blakey u8 nexthdr = ipv6_hdr(skb)->nexthdr; 352b57dc7c1SPaul Blakey int hdrlen = ipv6_skip_exthdr(skb, 353b57dc7c1SPaul Blakey sizeof(struct ipv6hdr), 354b57dc7c1SPaul Blakey &nexthdr, &frag_off); 355b57dc7c1SPaul Blakey 356b57dc7c1SPaul Blakey if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) { 357b57dc7c1SPaul Blakey if (!nf_nat_icmpv6_reply_translation(skb, ct, 358b57dc7c1SPaul Blakey ctinfo, 359b57dc7c1SPaul Blakey hooknum, 360b57dc7c1SPaul Blakey hdrlen)) 361b57dc7c1SPaul Blakey err = NF_DROP; 362b57dc7c1SPaul Blakey goto out; 363b57dc7c1SPaul Blakey } 364b57dc7c1SPaul Blakey } 365b57dc7c1SPaul Blakey /* Non-ICMP, fall thru to initialize if needed. */ 366b57dc7c1SPaul Blakey /* fall through */ 367b57dc7c1SPaul Blakey case IP_CT_NEW: 368b57dc7c1SPaul Blakey /* Seen it before? This can happen for loopback, retrans, 369b57dc7c1SPaul Blakey * or local packets. 370b57dc7c1SPaul Blakey */ 371b57dc7c1SPaul Blakey if (!nf_nat_initialized(ct, maniptype)) { 372b57dc7c1SPaul Blakey /* Initialize according to the NAT action. */ 373b57dc7c1SPaul Blakey err = (range && range->flags & NF_NAT_RANGE_MAP_IPS) 374b57dc7c1SPaul Blakey /* Action is set up to establish a new 375b57dc7c1SPaul Blakey * mapping. 376b57dc7c1SPaul Blakey */ 377b57dc7c1SPaul Blakey ? nf_nat_setup_info(ct, range, maniptype) 378b57dc7c1SPaul Blakey : nf_nat_alloc_null_binding(ct, hooknum); 379b57dc7c1SPaul Blakey if (err != NF_ACCEPT) 380b57dc7c1SPaul Blakey goto out; 381b57dc7c1SPaul Blakey } 382b57dc7c1SPaul Blakey break; 383b57dc7c1SPaul Blakey 384b57dc7c1SPaul Blakey case IP_CT_ESTABLISHED: 385b57dc7c1SPaul Blakey case IP_CT_ESTABLISHED_REPLY: 386b57dc7c1SPaul Blakey break; 387b57dc7c1SPaul Blakey 388b57dc7c1SPaul Blakey default: 389b57dc7c1SPaul Blakey err = NF_DROP; 390b57dc7c1SPaul Blakey goto out; 391b57dc7c1SPaul Blakey } 392b57dc7c1SPaul Blakey 393b57dc7c1SPaul Blakey err = nf_nat_packet(ct, ctinfo, hooknum, skb); 394b57dc7c1SPaul Blakey out: 395b57dc7c1SPaul Blakey return err; 396b57dc7c1SPaul Blakey } 397b57dc7c1SPaul Blakey #endif /* CONFIG_NF_NAT */ 398b57dc7c1SPaul Blakey 399b57dc7c1SPaul Blakey static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask) 400b57dc7c1SPaul Blakey { 401b57dc7c1SPaul Blakey #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) 402b57dc7c1SPaul Blakey u32 new_mark; 403b57dc7c1SPaul Blakey 404b57dc7c1SPaul Blakey if (!mask) 405b57dc7c1SPaul Blakey return; 406b57dc7c1SPaul Blakey 407b57dc7c1SPaul Blakey new_mark = mark | (ct->mark & ~(mask)); 408b57dc7c1SPaul Blakey if (ct->mark != new_mark) { 409b57dc7c1SPaul Blakey ct->mark = new_mark; 410b57dc7c1SPaul Blakey if (nf_ct_is_confirmed(ct)) 411b57dc7c1SPaul Blakey nf_conntrack_event_cache(IPCT_MARK, ct); 412b57dc7c1SPaul Blakey } 413b57dc7c1SPaul Blakey #endif 414b57dc7c1SPaul Blakey } 415b57dc7c1SPaul Blakey 416b57dc7c1SPaul Blakey static void tcf_ct_act_set_labels(struct nf_conn *ct, 417b57dc7c1SPaul Blakey u32 *labels, 418b57dc7c1SPaul Blakey u32 *labels_m) 419b57dc7c1SPaul Blakey { 420b57dc7c1SPaul Blakey #if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) 421c593642cSPankaj Bharadiya size_t labels_sz = sizeof_field(struct tcf_ct_params, labels); 422b57dc7c1SPaul Blakey 423b57dc7c1SPaul Blakey if (!memchr_inv(labels_m, 0, labels_sz)) 424b57dc7c1SPaul Blakey return; 425b57dc7c1SPaul Blakey 426b57dc7c1SPaul Blakey nf_connlabels_replace(ct, labels, labels_m, 4); 427b57dc7c1SPaul Blakey #endif 428b57dc7c1SPaul Blakey } 429b57dc7c1SPaul Blakey 430b57dc7c1SPaul Blakey static int tcf_ct_act_nat(struct sk_buff *skb, 431b57dc7c1SPaul Blakey struct nf_conn *ct, 432b57dc7c1SPaul Blakey enum ip_conntrack_info ctinfo, 433b57dc7c1SPaul Blakey int ct_action, 434b57dc7c1SPaul Blakey struct nf_nat_range2 *range, 435b57dc7c1SPaul Blakey bool commit) 436b57dc7c1SPaul Blakey { 437b57dc7c1SPaul Blakey #if IS_ENABLED(CONFIG_NF_NAT) 43895219afbSAaron Conole int err; 439b57dc7c1SPaul Blakey enum nf_nat_manip_type maniptype; 440b57dc7c1SPaul Blakey 441b57dc7c1SPaul Blakey if (!(ct_action & TCA_CT_ACT_NAT)) 442b57dc7c1SPaul Blakey return NF_ACCEPT; 443b57dc7c1SPaul Blakey 444b57dc7c1SPaul Blakey /* Add NAT extension if not confirmed yet. */ 445b57dc7c1SPaul Blakey if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct)) 446b57dc7c1SPaul Blakey return NF_DROP; /* Can't NAT. */ 447b57dc7c1SPaul Blakey 448b57dc7c1SPaul Blakey if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) && 449b57dc7c1SPaul Blakey (ctinfo != IP_CT_RELATED || commit)) { 450b57dc7c1SPaul Blakey /* NAT an established or related connection like before. */ 451b57dc7c1SPaul Blakey if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) 452b57dc7c1SPaul Blakey /* This is the REPLY direction for a connection 453b57dc7c1SPaul Blakey * for which NAT was applied in the forward 454b57dc7c1SPaul Blakey * direction. Do the reverse NAT. 455b57dc7c1SPaul Blakey */ 456b57dc7c1SPaul Blakey maniptype = ct->status & IPS_SRC_NAT 457b57dc7c1SPaul Blakey ? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC; 458b57dc7c1SPaul Blakey else 459b57dc7c1SPaul Blakey maniptype = ct->status & IPS_SRC_NAT 460b57dc7c1SPaul Blakey ? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST; 461b57dc7c1SPaul Blakey } else if (ct_action & TCA_CT_ACT_NAT_SRC) { 462b57dc7c1SPaul Blakey maniptype = NF_NAT_MANIP_SRC; 463b57dc7c1SPaul Blakey } else if (ct_action & TCA_CT_ACT_NAT_DST) { 464b57dc7c1SPaul Blakey maniptype = NF_NAT_MANIP_DST; 465b57dc7c1SPaul Blakey } else { 466b57dc7c1SPaul Blakey return NF_ACCEPT; 467b57dc7c1SPaul Blakey } 468b57dc7c1SPaul Blakey 46995219afbSAaron Conole err = ct_nat_execute(skb, ct, ctinfo, range, maniptype); 47095219afbSAaron Conole if (err == NF_ACCEPT && 47195219afbSAaron Conole ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) { 47295219afbSAaron Conole if (maniptype == NF_NAT_MANIP_SRC) 47395219afbSAaron Conole maniptype = NF_NAT_MANIP_DST; 47495219afbSAaron Conole else 47595219afbSAaron Conole maniptype = NF_NAT_MANIP_SRC; 47695219afbSAaron Conole 47795219afbSAaron Conole err = ct_nat_execute(skb, ct, ctinfo, range, maniptype); 47895219afbSAaron Conole } 47995219afbSAaron Conole return err; 480b57dc7c1SPaul Blakey #else 481b57dc7c1SPaul Blakey return NF_ACCEPT; 482b57dc7c1SPaul Blakey #endif 483b57dc7c1SPaul Blakey } 484b57dc7c1SPaul Blakey 485b57dc7c1SPaul Blakey static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a, 486b57dc7c1SPaul Blakey struct tcf_result *res) 487b57dc7c1SPaul Blakey { 488b57dc7c1SPaul Blakey struct net *net = dev_net(skb->dev); 489b57dc7c1SPaul Blakey bool cached, commit, clear, force; 490b57dc7c1SPaul Blakey enum ip_conntrack_info ctinfo; 491b57dc7c1SPaul Blakey struct tcf_ct *c = to_ct(a); 492b57dc7c1SPaul Blakey struct nf_conn *tmpl = NULL; 493b57dc7c1SPaul Blakey struct nf_hook_state state; 494b57dc7c1SPaul Blakey int nh_ofs, err, retval; 495b57dc7c1SPaul Blakey struct tcf_ct_params *p; 496b57dc7c1SPaul Blakey struct nf_conn *ct; 497b57dc7c1SPaul Blakey u8 family; 498b57dc7c1SPaul Blakey 499b57dc7c1SPaul Blakey p = rcu_dereference_bh(c->params); 500b57dc7c1SPaul Blakey 501b57dc7c1SPaul Blakey retval = READ_ONCE(c->tcf_action); 502b57dc7c1SPaul Blakey commit = p->ct_action & TCA_CT_ACT_COMMIT; 503b57dc7c1SPaul Blakey clear = p->ct_action & TCA_CT_ACT_CLEAR; 504b57dc7c1SPaul Blakey force = p->ct_action & TCA_CT_ACT_FORCE; 505b57dc7c1SPaul Blakey tmpl = p->tmpl; 506b57dc7c1SPaul Blakey 507b57dc7c1SPaul Blakey if (clear) { 508b57dc7c1SPaul Blakey ct = nf_ct_get(skb, &ctinfo); 509b57dc7c1SPaul Blakey if (ct) { 510b57dc7c1SPaul Blakey nf_conntrack_put(&ct->ct_general); 511b57dc7c1SPaul Blakey nf_ct_set(skb, NULL, IP_CT_UNTRACKED); 512b57dc7c1SPaul Blakey } 513b57dc7c1SPaul Blakey 514b57dc7c1SPaul Blakey goto out; 515b57dc7c1SPaul Blakey } 516b57dc7c1SPaul Blakey 517b57dc7c1SPaul Blakey family = tcf_ct_skb_nf_family(skb); 518b57dc7c1SPaul Blakey if (family == NFPROTO_UNSPEC) 519b57dc7c1SPaul Blakey goto drop; 520b57dc7c1SPaul Blakey 521b57dc7c1SPaul Blakey /* The conntrack module expects to be working at L3. 522b57dc7c1SPaul Blakey * We also try to pull the IPv4/6 header to linear area 523b57dc7c1SPaul Blakey */ 524b57dc7c1SPaul Blakey nh_ofs = skb_network_offset(skb); 525b57dc7c1SPaul Blakey skb_pull_rcsum(skb, nh_ofs); 526b57dc7c1SPaul Blakey err = tcf_ct_handle_fragments(net, skb, family, p->zone); 527b57dc7c1SPaul Blakey if (err == -EINPROGRESS) { 528b57dc7c1SPaul Blakey retval = TC_ACT_STOLEN; 529b57dc7c1SPaul Blakey goto out; 530b57dc7c1SPaul Blakey } 531b57dc7c1SPaul Blakey if (err) 532b57dc7c1SPaul Blakey goto drop; 533b57dc7c1SPaul Blakey 534b57dc7c1SPaul Blakey err = tcf_ct_skb_network_trim(skb, family); 535b57dc7c1SPaul Blakey if (err) 536b57dc7c1SPaul Blakey goto drop; 537b57dc7c1SPaul Blakey 538b57dc7c1SPaul Blakey /* If we are recirculating packets to match on ct fields and 539b57dc7c1SPaul Blakey * committing with a separate ct action, then we don't need to 540b57dc7c1SPaul Blakey * actually run the packet through conntrack twice unless it's for a 541b57dc7c1SPaul Blakey * different zone. 542b57dc7c1SPaul Blakey */ 543b57dc7c1SPaul Blakey cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force); 544b57dc7c1SPaul Blakey if (!cached) { 545b57dc7c1SPaul Blakey /* Associate skb with specified zone. */ 546b57dc7c1SPaul Blakey if (tmpl) { 547b57dc7c1SPaul Blakey ct = nf_ct_get(skb, &ctinfo); 548b57dc7c1SPaul Blakey if (skb_nfct(skb)) 549b57dc7c1SPaul Blakey nf_conntrack_put(skb_nfct(skb)); 550b57dc7c1SPaul Blakey nf_conntrack_get(&tmpl->ct_general); 551b57dc7c1SPaul Blakey nf_ct_set(skb, tmpl, IP_CT_NEW); 552b57dc7c1SPaul Blakey } 553b57dc7c1SPaul Blakey 554b57dc7c1SPaul Blakey state.hook = NF_INET_PRE_ROUTING; 555b57dc7c1SPaul Blakey state.net = net; 556b57dc7c1SPaul Blakey state.pf = family; 557b57dc7c1SPaul Blakey err = nf_conntrack_in(skb, &state); 558b57dc7c1SPaul Blakey if (err != NF_ACCEPT) 559b57dc7c1SPaul Blakey goto out_push; 560b57dc7c1SPaul Blakey } 561b57dc7c1SPaul Blakey 562b57dc7c1SPaul Blakey ct = nf_ct_get(skb, &ctinfo); 563b57dc7c1SPaul Blakey if (!ct) 564b57dc7c1SPaul Blakey goto out_push; 565b57dc7c1SPaul Blakey nf_ct_deliver_cached_events(ct); 566b57dc7c1SPaul Blakey 567b57dc7c1SPaul Blakey err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit); 568b57dc7c1SPaul Blakey if (err != NF_ACCEPT) 569b57dc7c1SPaul Blakey goto drop; 570b57dc7c1SPaul Blakey 571b57dc7c1SPaul Blakey if (commit) { 572b57dc7c1SPaul Blakey tcf_ct_act_set_mark(ct, p->mark, p->mark_mask); 573b57dc7c1SPaul Blakey tcf_ct_act_set_labels(ct, p->labels, p->labels_mask); 574b57dc7c1SPaul Blakey 575b57dc7c1SPaul Blakey /* This will take care of sending queued events 576b57dc7c1SPaul Blakey * even if the connection is already confirmed. 577b57dc7c1SPaul Blakey */ 578b57dc7c1SPaul Blakey nf_conntrack_confirm(skb); 579b57dc7c1SPaul Blakey } 580b57dc7c1SPaul Blakey 581b57dc7c1SPaul Blakey out_push: 582b57dc7c1SPaul Blakey skb_push_rcsum(skb, nh_ofs); 583b57dc7c1SPaul Blakey 584b57dc7c1SPaul Blakey out: 5855e1ad95bSVlad Buslov tcf_action_update_bstats(&c->common, skb); 586b57dc7c1SPaul Blakey return retval; 587b57dc7c1SPaul Blakey 588b57dc7c1SPaul Blakey drop: 58926b537a8SVlad Buslov tcf_action_inc_drop_qstats(&c->common); 590b57dc7c1SPaul Blakey return TC_ACT_SHOT; 591b57dc7c1SPaul Blakey } 592b57dc7c1SPaul Blakey 593b57dc7c1SPaul Blakey static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = { 594b57dc7c1SPaul Blakey [TCA_CT_ACTION] = { .type = NLA_U16 }, 595b57dc7c1SPaul Blakey [TCA_CT_PARMS] = { .type = NLA_EXACT_LEN, .len = sizeof(struct tc_ct) }, 596b57dc7c1SPaul Blakey [TCA_CT_ZONE] = { .type = NLA_U16 }, 597b57dc7c1SPaul Blakey [TCA_CT_MARK] = { .type = NLA_U32 }, 598b57dc7c1SPaul Blakey [TCA_CT_MARK_MASK] = { .type = NLA_U32 }, 599b57dc7c1SPaul Blakey [TCA_CT_LABELS] = { .type = NLA_BINARY, 600b57dc7c1SPaul Blakey .len = 128 / BITS_PER_BYTE }, 601b57dc7c1SPaul Blakey [TCA_CT_LABELS_MASK] = { .type = NLA_BINARY, 602b57dc7c1SPaul Blakey .len = 128 / BITS_PER_BYTE }, 603b57dc7c1SPaul Blakey [TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 }, 604b57dc7c1SPaul Blakey [TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 }, 605b57dc7c1SPaul Blakey [TCA_CT_NAT_IPV6_MIN] = { .type = NLA_EXACT_LEN, 606b57dc7c1SPaul Blakey .len = sizeof(struct in6_addr) }, 607b57dc7c1SPaul Blakey [TCA_CT_NAT_IPV6_MAX] = { .type = NLA_EXACT_LEN, 608b57dc7c1SPaul Blakey .len = sizeof(struct in6_addr) }, 609b57dc7c1SPaul Blakey [TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 }, 610b57dc7c1SPaul Blakey [TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 }, 611b57dc7c1SPaul Blakey }; 612b57dc7c1SPaul Blakey 613b57dc7c1SPaul Blakey static int tcf_ct_fill_params_nat(struct tcf_ct_params *p, 614b57dc7c1SPaul Blakey struct tc_ct *parm, 615b57dc7c1SPaul Blakey struct nlattr **tb, 616b57dc7c1SPaul Blakey struct netlink_ext_ack *extack) 617b57dc7c1SPaul Blakey { 618b57dc7c1SPaul Blakey struct nf_nat_range2 *range; 619b57dc7c1SPaul Blakey 620b57dc7c1SPaul Blakey if (!(p->ct_action & TCA_CT_ACT_NAT)) 621b57dc7c1SPaul Blakey return 0; 622b57dc7c1SPaul Blakey 623b57dc7c1SPaul Blakey if (!IS_ENABLED(CONFIG_NF_NAT)) { 624b57dc7c1SPaul Blakey NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel"); 625b57dc7c1SPaul Blakey return -EOPNOTSUPP; 626b57dc7c1SPaul Blakey } 627b57dc7c1SPaul Blakey 628b57dc7c1SPaul Blakey if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST))) 629b57dc7c1SPaul Blakey return 0; 630b57dc7c1SPaul Blakey 631b57dc7c1SPaul Blakey if ((p->ct_action & TCA_CT_ACT_NAT_SRC) && 632b57dc7c1SPaul Blakey (p->ct_action & TCA_CT_ACT_NAT_DST)) { 633b57dc7c1SPaul Blakey NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time"); 634b57dc7c1SPaul Blakey return -EOPNOTSUPP; 635b57dc7c1SPaul Blakey } 636b57dc7c1SPaul Blakey 637b57dc7c1SPaul Blakey range = &p->range; 638b57dc7c1SPaul Blakey if (tb[TCA_CT_NAT_IPV4_MIN]) { 639b57dc7c1SPaul Blakey struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX]; 640b57dc7c1SPaul Blakey 641b57dc7c1SPaul Blakey p->ipv4_range = true; 642b57dc7c1SPaul Blakey range->flags |= NF_NAT_RANGE_MAP_IPS; 643b57dc7c1SPaul Blakey range->min_addr.ip = 644b57dc7c1SPaul Blakey nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]); 645b57dc7c1SPaul Blakey 646b57dc7c1SPaul Blakey range->max_addr.ip = max_attr ? 647b57dc7c1SPaul Blakey nla_get_in_addr(max_attr) : 648b57dc7c1SPaul Blakey range->min_addr.ip; 649b57dc7c1SPaul Blakey } else if (tb[TCA_CT_NAT_IPV6_MIN]) { 650b57dc7c1SPaul Blakey struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX]; 651b57dc7c1SPaul Blakey 652b57dc7c1SPaul Blakey p->ipv4_range = false; 653b57dc7c1SPaul Blakey range->flags |= NF_NAT_RANGE_MAP_IPS; 654b57dc7c1SPaul Blakey range->min_addr.in6 = 655b57dc7c1SPaul Blakey nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]); 656b57dc7c1SPaul Blakey 657b57dc7c1SPaul Blakey range->max_addr.in6 = max_attr ? 658b57dc7c1SPaul Blakey nla_get_in6_addr(max_attr) : 659b57dc7c1SPaul Blakey range->min_addr.in6; 660b57dc7c1SPaul Blakey } 661b57dc7c1SPaul Blakey 662b57dc7c1SPaul Blakey if (tb[TCA_CT_NAT_PORT_MIN]) { 663b57dc7c1SPaul Blakey range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 664b57dc7c1SPaul Blakey range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]); 665b57dc7c1SPaul Blakey 666b57dc7c1SPaul Blakey range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ? 667b57dc7c1SPaul Blakey nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) : 668b57dc7c1SPaul Blakey range->min_proto.all; 669b57dc7c1SPaul Blakey } 670b57dc7c1SPaul Blakey 671b57dc7c1SPaul Blakey return 0; 672b57dc7c1SPaul Blakey } 673b57dc7c1SPaul Blakey 674b57dc7c1SPaul Blakey static void tcf_ct_set_key_val(struct nlattr **tb, 675b57dc7c1SPaul Blakey void *val, int val_type, 676b57dc7c1SPaul Blakey void *mask, int mask_type, 677b57dc7c1SPaul Blakey int len) 678b57dc7c1SPaul Blakey { 679b57dc7c1SPaul Blakey if (!tb[val_type]) 680b57dc7c1SPaul Blakey return; 681b57dc7c1SPaul Blakey nla_memcpy(val, tb[val_type], len); 682b57dc7c1SPaul Blakey 683b57dc7c1SPaul Blakey if (!mask) 684b57dc7c1SPaul Blakey return; 685b57dc7c1SPaul Blakey 686b57dc7c1SPaul Blakey if (mask_type == TCA_CT_UNSPEC || !tb[mask_type]) 687b57dc7c1SPaul Blakey memset(mask, 0xff, len); 688b57dc7c1SPaul Blakey else 689b57dc7c1SPaul Blakey nla_memcpy(mask, tb[mask_type], len); 690b57dc7c1SPaul Blakey } 691b57dc7c1SPaul Blakey 692b57dc7c1SPaul Blakey static int tcf_ct_fill_params(struct net *net, 693b57dc7c1SPaul Blakey struct tcf_ct_params *p, 694b57dc7c1SPaul Blakey struct tc_ct *parm, 695b57dc7c1SPaul Blakey struct nlattr **tb, 696b57dc7c1SPaul Blakey struct netlink_ext_ack *extack) 697b57dc7c1SPaul Blakey { 698b57dc7c1SPaul Blakey struct tc_ct_action_net *tn = net_generic(net, ct_net_id); 699b57dc7c1SPaul Blakey struct nf_conntrack_zone zone; 700b57dc7c1SPaul Blakey struct nf_conn *tmpl; 701b57dc7c1SPaul Blakey int err; 702b57dc7c1SPaul Blakey 703b57dc7c1SPaul Blakey p->zone = NF_CT_DEFAULT_ZONE_ID; 704b57dc7c1SPaul Blakey 705b57dc7c1SPaul Blakey tcf_ct_set_key_val(tb, 706b57dc7c1SPaul Blakey &p->ct_action, TCA_CT_ACTION, 707b57dc7c1SPaul Blakey NULL, TCA_CT_UNSPEC, 708b57dc7c1SPaul Blakey sizeof(p->ct_action)); 709b57dc7c1SPaul Blakey 710b57dc7c1SPaul Blakey if (p->ct_action & TCA_CT_ACT_CLEAR) 711b57dc7c1SPaul Blakey return 0; 712b57dc7c1SPaul Blakey 713b57dc7c1SPaul Blakey err = tcf_ct_fill_params_nat(p, parm, tb, extack); 714b57dc7c1SPaul Blakey if (err) 715b57dc7c1SPaul Blakey return err; 716b57dc7c1SPaul Blakey 717b57dc7c1SPaul Blakey if (tb[TCA_CT_MARK]) { 718b57dc7c1SPaul Blakey if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) { 719b57dc7c1SPaul Blakey NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled."); 720b57dc7c1SPaul Blakey return -EOPNOTSUPP; 721b57dc7c1SPaul Blakey } 722b57dc7c1SPaul Blakey tcf_ct_set_key_val(tb, 723b57dc7c1SPaul Blakey &p->mark, TCA_CT_MARK, 724b57dc7c1SPaul Blakey &p->mark_mask, TCA_CT_MARK_MASK, 725b57dc7c1SPaul Blakey sizeof(p->mark)); 726b57dc7c1SPaul Blakey } 727b57dc7c1SPaul Blakey 728b57dc7c1SPaul Blakey if (tb[TCA_CT_LABELS]) { 729b57dc7c1SPaul Blakey if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) { 730b57dc7c1SPaul Blakey NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled."); 731b57dc7c1SPaul Blakey return -EOPNOTSUPP; 732b57dc7c1SPaul Blakey } 733b57dc7c1SPaul Blakey 734b57dc7c1SPaul Blakey if (!tn->labels) { 735b57dc7c1SPaul Blakey NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length"); 736b57dc7c1SPaul Blakey return -EOPNOTSUPP; 737b57dc7c1SPaul Blakey } 738b57dc7c1SPaul Blakey tcf_ct_set_key_val(tb, 739b57dc7c1SPaul Blakey p->labels, TCA_CT_LABELS, 740b57dc7c1SPaul Blakey p->labels_mask, TCA_CT_LABELS_MASK, 741b57dc7c1SPaul Blakey sizeof(p->labels)); 742b57dc7c1SPaul Blakey } 743b57dc7c1SPaul Blakey 744b57dc7c1SPaul Blakey if (tb[TCA_CT_ZONE]) { 745b57dc7c1SPaul Blakey if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) { 746b57dc7c1SPaul Blakey NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled."); 747b57dc7c1SPaul Blakey return -EOPNOTSUPP; 748b57dc7c1SPaul Blakey } 749b57dc7c1SPaul Blakey 750b57dc7c1SPaul Blakey tcf_ct_set_key_val(tb, 751b57dc7c1SPaul Blakey &p->zone, TCA_CT_ZONE, 752b57dc7c1SPaul Blakey NULL, TCA_CT_UNSPEC, 753b57dc7c1SPaul Blakey sizeof(p->zone)); 754b57dc7c1SPaul Blakey } 755b57dc7c1SPaul Blakey 756b57dc7c1SPaul Blakey if (p->zone == NF_CT_DEFAULT_ZONE_ID) 757b57dc7c1SPaul Blakey return 0; 758b57dc7c1SPaul Blakey 759b57dc7c1SPaul Blakey nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0); 760b57dc7c1SPaul Blakey tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL); 761b57dc7c1SPaul Blakey if (!tmpl) { 762b57dc7c1SPaul Blakey NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template"); 763b57dc7c1SPaul Blakey return -ENOMEM; 764b57dc7c1SPaul Blakey } 765b57dc7c1SPaul Blakey __set_bit(IPS_CONFIRMED_BIT, &tmpl->status); 766b57dc7c1SPaul Blakey nf_conntrack_get(&tmpl->ct_general); 767b57dc7c1SPaul Blakey p->tmpl = tmpl; 768b57dc7c1SPaul Blakey 769b57dc7c1SPaul Blakey return 0; 770b57dc7c1SPaul Blakey } 771b57dc7c1SPaul Blakey 772b57dc7c1SPaul Blakey static int tcf_ct_init(struct net *net, struct nlattr *nla, 773b57dc7c1SPaul Blakey struct nlattr *est, struct tc_action **a, 774b57dc7c1SPaul Blakey int replace, int bind, bool rtnl_held, 775abbb0d33SVlad Buslov struct tcf_proto *tp, u32 flags, 776b57dc7c1SPaul Blakey struct netlink_ext_ack *extack) 777b57dc7c1SPaul Blakey { 778b57dc7c1SPaul Blakey struct tc_action_net *tn = net_generic(net, ct_net_id); 779b57dc7c1SPaul Blakey struct tcf_ct_params *params = NULL; 780b57dc7c1SPaul Blakey struct nlattr *tb[TCA_CT_MAX + 1]; 781b57dc7c1SPaul Blakey struct tcf_chain *goto_ch = NULL; 782b57dc7c1SPaul Blakey struct tc_ct *parm; 783b57dc7c1SPaul Blakey struct tcf_ct *c; 784b57dc7c1SPaul Blakey int err, res = 0; 7857be8ef2cSDmytro Linkin u32 index; 786b57dc7c1SPaul Blakey 787b57dc7c1SPaul Blakey if (!nla) { 788b57dc7c1SPaul Blakey NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed"); 789b57dc7c1SPaul Blakey return -EINVAL; 790b57dc7c1SPaul Blakey } 791b57dc7c1SPaul Blakey 792b57dc7c1SPaul Blakey err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack); 793b57dc7c1SPaul Blakey if (err < 0) 794b57dc7c1SPaul Blakey return err; 795b57dc7c1SPaul Blakey 796b57dc7c1SPaul Blakey if (!tb[TCA_CT_PARMS]) { 797b57dc7c1SPaul Blakey NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters"); 798b57dc7c1SPaul Blakey return -EINVAL; 799b57dc7c1SPaul Blakey } 800b57dc7c1SPaul Blakey parm = nla_data(tb[TCA_CT_PARMS]); 8017be8ef2cSDmytro Linkin index = parm->index; 8027be8ef2cSDmytro Linkin err = tcf_idr_check_alloc(tn, &index, a, bind); 803b57dc7c1SPaul Blakey if (err < 0) 804b57dc7c1SPaul Blakey return err; 805b57dc7c1SPaul Blakey 806b57dc7c1SPaul Blakey if (!err) { 807e3822678SVlad Buslov err = tcf_idr_create_from_flags(tn, index, est, a, 808e3822678SVlad Buslov &act_ct_ops, bind, flags); 809b57dc7c1SPaul Blakey if (err) { 8107be8ef2cSDmytro Linkin tcf_idr_cleanup(tn, index); 811b57dc7c1SPaul Blakey return err; 812b57dc7c1SPaul Blakey } 813b57dc7c1SPaul Blakey res = ACT_P_CREATED; 814b57dc7c1SPaul Blakey } else { 815b57dc7c1SPaul Blakey if (bind) 816b57dc7c1SPaul Blakey return 0; 817b57dc7c1SPaul Blakey 818b57dc7c1SPaul Blakey if (!replace) { 819b57dc7c1SPaul Blakey tcf_idr_release(*a, bind); 820b57dc7c1SPaul Blakey return -EEXIST; 821b57dc7c1SPaul Blakey } 822b57dc7c1SPaul Blakey } 823b57dc7c1SPaul Blakey err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); 824b57dc7c1SPaul Blakey if (err < 0) 825b57dc7c1SPaul Blakey goto cleanup; 826b57dc7c1SPaul Blakey 827b57dc7c1SPaul Blakey c = to_ct(*a); 828b57dc7c1SPaul Blakey 829b57dc7c1SPaul Blakey params = kzalloc(sizeof(*params), GFP_KERNEL); 830b57dc7c1SPaul Blakey if (unlikely(!params)) { 831b57dc7c1SPaul Blakey err = -ENOMEM; 832b57dc7c1SPaul Blakey goto cleanup; 833b57dc7c1SPaul Blakey } 834b57dc7c1SPaul Blakey 835b57dc7c1SPaul Blakey err = tcf_ct_fill_params(net, params, parm, tb, extack); 836b57dc7c1SPaul Blakey if (err) 837b57dc7c1SPaul Blakey goto cleanup; 838b57dc7c1SPaul Blakey 839*c34b961aSPaul Blakey err = tcf_ct_flow_table_get(params); 840*c34b961aSPaul Blakey if (err) 841*c34b961aSPaul Blakey goto cleanup; 842*c34b961aSPaul Blakey 843b57dc7c1SPaul Blakey spin_lock_bh(&c->tcf_lock); 844b57dc7c1SPaul Blakey goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 845445d3749SPaul E. McKenney params = rcu_replace_pointer(c->params, params, 846445d3749SPaul E. McKenney lockdep_is_held(&c->tcf_lock)); 847b57dc7c1SPaul Blakey spin_unlock_bh(&c->tcf_lock); 848b57dc7c1SPaul Blakey 849b57dc7c1SPaul Blakey if (goto_ch) 850b57dc7c1SPaul Blakey tcf_chain_put_by_act(goto_ch); 851b57dc7c1SPaul Blakey if (params) 852b57dc7c1SPaul Blakey kfree_rcu(params, rcu); 853b57dc7c1SPaul Blakey if (res == ACT_P_CREATED) 854b57dc7c1SPaul Blakey tcf_idr_insert(tn, *a); 855b57dc7c1SPaul Blakey 856b57dc7c1SPaul Blakey return res; 857b57dc7c1SPaul Blakey 858b57dc7c1SPaul Blakey cleanup: 859b57dc7c1SPaul Blakey if (goto_ch) 860b57dc7c1SPaul Blakey tcf_chain_put_by_act(goto_ch); 861b57dc7c1SPaul Blakey kfree(params); 862b57dc7c1SPaul Blakey tcf_idr_release(*a, bind); 863b57dc7c1SPaul Blakey return err; 864b57dc7c1SPaul Blakey } 865b57dc7c1SPaul Blakey 866b57dc7c1SPaul Blakey static void tcf_ct_cleanup(struct tc_action *a) 867b57dc7c1SPaul Blakey { 868b57dc7c1SPaul Blakey struct tcf_ct_params *params; 869b57dc7c1SPaul Blakey struct tcf_ct *c = to_ct(a); 870b57dc7c1SPaul Blakey 871b57dc7c1SPaul Blakey params = rcu_dereference_protected(c->params, 1); 872b57dc7c1SPaul Blakey if (params) 873b57dc7c1SPaul Blakey call_rcu(¶ms->rcu, tcf_ct_params_free); 874b57dc7c1SPaul Blakey } 875b57dc7c1SPaul Blakey 876b57dc7c1SPaul Blakey static int tcf_ct_dump_key_val(struct sk_buff *skb, 877b57dc7c1SPaul Blakey void *val, int val_type, 878b57dc7c1SPaul Blakey void *mask, int mask_type, 879b57dc7c1SPaul Blakey int len) 880b57dc7c1SPaul Blakey { 881b57dc7c1SPaul Blakey int err; 882b57dc7c1SPaul Blakey 883b57dc7c1SPaul Blakey if (mask && !memchr_inv(mask, 0, len)) 884b57dc7c1SPaul Blakey return 0; 885b57dc7c1SPaul Blakey 886b57dc7c1SPaul Blakey err = nla_put(skb, val_type, len, val); 887b57dc7c1SPaul Blakey if (err) 888b57dc7c1SPaul Blakey return err; 889b57dc7c1SPaul Blakey 890b57dc7c1SPaul Blakey if (mask_type != TCA_CT_UNSPEC) { 891b57dc7c1SPaul Blakey err = nla_put(skb, mask_type, len, mask); 892b57dc7c1SPaul Blakey if (err) 893b57dc7c1SPaul Blakey return err; 894b57dc7c1SPaul Blakey } 895b57dc7c1SPaul Blakey 896b57dc7c1SPaul Blakey return 0; 897b57dc7c1SPaul Blakey } 898b57dc7c1SPaul Blakey 899b57dc7c1SPaul Blakey static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p) 900b57dc7c1SPaul Blakey { 901b57dc7c1SPaul Blakey struct nf_nat_range2 *range = &p->range; 902b57dc7c1SPaul Blakey 903b57dc7c1SPaul Blakey if (!(p->ct_action & TCA_CT_ACT_NAT)) 904b57dc7c1SPaul Blakey return 0; 905b57dc7c1SPaul Blakey 906b57dc7c1SPaul Blakey if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST))) 907b57dc7c1SPaul Blakey return 0; 908b57dc7c1SPaul Blakey 909b57dc7c1SPaul Blakey if (range->flags & NF_NAT_RANGE_MAP_IPS) { 910b57dc7c1SPaul Blakey if (p->ipv4_range) { 911b57dc7c1SPaul Blakey if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN, 912b57dc7c1SPaul Blakey range->min_addr.ip)) 913b57dc7c1SPaul Blakey return -1; 914b57dc7c1SPaul Blakey if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX, 915b57dc7c1SPaul Blakey range->max_addr.ip)) 916b57dc7c1SPaul Blakey return -1; 917b57dc7c1SPaul Blakey } else { 918b57dc7c1SPaul Blakey if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN, 919b57dc7c1SPaul Blakey &range->min_addr.in6)) 920b57dc7c1SPaul Blakey return -1; 921b57dc7c1SPaul Blakey if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX, 922b57dc7c1SPaul Blakey &range->max_addr.in6)) 923b57dc7c1SPaul Blakey return -1; 924b57dc7c1SPaul Blakey } 925b57dc7c1SPaul Blakey } 926b57dc7c1SPaul Blakey 927b57dc7c1SPaul Blakey if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) { 928b57dc7c1SPaul Blakey if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN, 929b57dc7c1SPaul Blakey range->min_proto.all)) 930b57dc7c1SPaul Blakey return -1; 931b57dc7c1SPaul Blakey if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX, 932b57dc7c1SPaul Blakey range->max_proto.all)) 933b57dc7c1SPaul Blakey return -1; 934b57dc7c1SPaul Blakey } 935b57dc7c1SPaul Blakey 936b57dc7c1SPaul Blakey return 0; 937b57dc7c1SPaul Blakey } 938b57dc7c1SPaul Blakey 939b57dc7c1SPaul Blakey static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a, 940b57dc7c1SPaul Blakey int bind, int ref) 941b57dc7c1SPaul Blakey { 942b57dc7c1SPaul Blakey unsigned char *b = skb_tail_pointer(skb); 943b57dc7c1SPaul Blakey struct tcf_ct *c = to_ct(a); 944b57dc7c1SPaul Blakey struct tcf_ct_params *p; 945b57dc7c1SPaul Blakey 946b57dc7c1SPaul Blakey struct tc_ct opt = { 947b57dc7c1SPaul Blakey .index = c->tcf_index, 948b57dc7c1SPaul Blakey .refcnt = refcount_read(&c->tcf_refcnt) - ref, 949b57dc7c1SPaul Blakey .bindcnt = atomic_read(&c->tcf_bindcnt) - bind, 950b57dc7c1SPaul Blakey }; 951b57dc7c1SPaul Blakey struct tcf_t t; 952b57dc7c1SPaul Blakey 953b57dc7c1SPaul Blakey spin_lock_bh(&c->tcf_lock); 954b57dc7c1SPaul Blakey p = rcu_dereference_protected(c->params, 955b57dc7c1SPaul Blakey lockdep_is_held(&c->tcf_lock)); 956b57dc7c1SPaul Blakey opt.action = c->tcf_action; 957b57dc7c1SPaul Blakey 958b57dc7c1SPaul Blakey if (tcf_ct_dump_key_val(skb, 959b57dc7c1SPaul Blakey &p->ct_action, TCA_CT_ACTION, 960b57dc7c1SPaul Blakey NULL, TCA_CT_UNSPEC, 961b57dc7c1SPaul Blakey sizeof(p->ct_action))) 962b57dc7c1SPaul Blakey goto nla_put_failure; 963b57dc7c1SPaul Blakey 964b57dc7c1SPaul Blakey if (p->ct_action & TCA_CT_ACT_CLEAR) 965b57dc7c1SPaul Blakey goto skip_dump; 966b57dc7c1SPaul Blakey 967b57dc7c1SPaul Blakey if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && 968b57dc7c1SPaul Blakey tcf_ct_dump_key_val(skb, 969b57dc7c1SPaul Blakey &p->mark, TCA_CT_MARK, 970b57dc7c1SPaul Blakey &p->mark_mask, TCA_CT_MARK_MASK, 971b57dc7c1SPaul Blakey sizeof(p->mark))) 972b57dc7c1SPaul Blakey goto nla_put_failure; 973b57dc7c1SPaul Blakey 974b57dc7c1SPaul Blakey if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && 975b57dc7c1SPaul Blakey tcf_ct_dump_key_val(skb, 976b57dc7c1SPaul Blakey p->labels, TCA_CT_LABELS, 977b57dc7c1SPaul Blakey p->labels_mask, TCA_CT_LABELS_MASK, 978b57dc7c1SPaul Blakey sizeof(p->labels))) 979b57dc7c1SPaul Blakey goto nla_put_failure; 980b57dc7c1SPaul Blakey 981b57dc7c1SPaul Blakey if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && 982b57dc7c1SPaul Blakey tcf_ct_dump_key_val(skb, 983b57dc7c1SPaul Blakey &p->zone, TCA_CT_ZONE, 984b57dc7c1SPaul Blakey NULL, TCA_CT_UNSPEC, 985b57dc7c1SPaul Blakey sizeof(p->zone))) 986b57dc7c1SPaul Blakey goto nla_put_failure; 987b57dc7c1SPaul Blakey 988b57dc7c1SPaul Blakey if (tcf_ct_dump_nat(skb, p)) 989b57dc7c1SPaul Blakey goto nla_put_failure; 990b57dc7c1SPaul Blakey 991b57dc7c1SPaul Blakey skip_dump: 992b57dc7c1SPaul Blakey if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt)) 993b57dc7c1SPaul Blakey goto nla_put_failure; 994b57dc7c1SPaul Blakey 995b57dc7c1SPaul Blakey tcf_tm_dump(&t, &c->tcf_tm); 996b57dc7c1SPaul Blakey if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD)) 997b57dc7c1SPaul Blakey goto nla_put_failure; 998b57dc7c1SPaul Blakey spin_unlock_bh(&c->tcf_lock); 999b57dc7c1SPaul Blakey 1000b57dc7c1SPaul Blakey return skb->len; 1001b57dc7c1SPaul Blakey nla_put_failure: 1002b57dc7c1SPaul Blakey spin_unlock_bh(&c->tcf_lock); 1003b57dc7c1SPaul Blakey nlmsg_trim(skb, b); 1004b57dc7c1SPaul Blakey return -1; 1005b57dc7c1SPaul Blakey } 1006b57dc7c1SPaul Blakey 1007b57dc7c1SPaul Blakey static int tcf_ct_walker(struct net *net, struct sk_buff *skb, 1008b57dc7c1SPaul Blakey struct netlink_callback *cb, int type, 1009b57dc7c1SPaul Blakey const struct tc_action_ops *ops, 1010b57dc7c1SPaul Blakey struct netlink_ext_ack *extack) 1011b57dc7c1SPaul Blakey { 1012b57dc7c1SPaul Blakey struct tc_action_net *tn = net_generic(net, ct_net_id); 1013b57dc7c1SPaul Blakey 1014b57dc7c1SPaul Blakey return tcf_generic_walker(tn, skb, cb, type, ops, extack); 1015b57dc7c1SPaul Blakey } 1016b57dc7c1SPaul Blakey 1017b57dc7c1SPaul Blakey static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index) 1018b57dc7c1SPaul Blakey { 1019b57dc7c1SPaul Blakey struct tc_action_net *tn = net_generic(net, ct_net_id); 1020b57dc7c1SPaul Blakey 1021b57dc7c1SPaul Blakey return tcf_idr_search(tn, a, index); 1022b57dc7c1SPaul Blakey } 1023b57dc7c1SPaul Blakey 1024b57dc7c1SPaul Blakey static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets, 1025b57dc7c1SPaul Blakey u64 lastuse, bool hw) 1026b57dc7c1SPaul Blakey { 1027b57dc7c1SPaul Blakey struct tcf_ct *c = to_ct(a); 1028b57dc7c1SPaul Blakey 1029c8ecebd0SVlad Buslov tcf_action_update_stats(a, bytes, packets, false, hw); 1030b57dc7c1SPaul Blakey c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse); 1031b57dc7c1SPaul Blakey } 1032b57dc7c1SPaul Blakey 1033b57dc7c1SPaul Blakey static struct tc_action_ops act_ct_ops = { 1034b57dc7c1SPaul Blakey .kind = "ct", 1035b57dc7c1SPaul Blakey .id = TCA_ID_CT, 1036b57dc7c1SPaul Blakey .owner = THIS_MODULE, 1037b57dc7c1SPaul Blakey .act = tcf_ct_act, 1038b57dc7c1SPaul Blakey .dump = tcf_ct_dump, 1039b57dc7c1SPaul Blakey .init = tcf_ct_init, 1040b57dc7c1SPaul Blakey .cleanup = tcf_ct_cleanup, 1041b57dc7c1SPaul Blakey .walk = tcf_ct_walker, 1042b57dc7c1SPaul Blakey .lookup = tcf_ct_search, 1043b57dc7c1SPaul Blakey .stats_update = tcf_stats_update, 1044b57dc7c1SPaul Blakey .size = sizeof(struct tcf_ct), 1045b57dc7c1SPaul Blakey }; 1046b57dc7c1SPaul Blakey 1047b57dc7c1SPaul Blakey static __net_init int ct_init_net(struct net *net) 1048b57dc7c1SPaul Blakey { 1049c593642cSPankaj Bharadiya unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8; 1050b57dc7c1SPaul Blakey struct tc_ct_action_net *tn = net_generic(net, ct_net_id); 1051b57dc7c1SPaul Blakey 1052b57dc7c1SPaul Blakey if (nf_connlabels_get(net, n_bits - 1)) { 1053b57dc7c1SPaul Blakey tn->labels = false; 1054b57dc7c1SPaul Blakey pr_err("act_ct: Failed to set connlabels length"); 1055b57dc7c1SPaul Blakey } else { 1056b57dc7c1SPaul Blakey tn->labels = true; 1057b57dc7c1SPaul Blakey } 1058b57dc7c1SPaul Blakey 1059981471bdSCong Wang return tc_action_net_init(net, &tn->tn, &act_ct_ops); 1060b57dc7c1SPaul Blakey } 1061b57dc7c1SPaul Blakey 1062b57dc7c1SPaul Blakey static void __net_exit ct_exit_net(struct list_head *net_list) 1063b57dc7c1SPaul Blakey { 1064b57dc7c1SPaul Blakey struct net *net; 1065b57dc7c1SPaul Blakey 1066b57dc7c1SPaul Blakey rtnl_lock(); 1067b57dc7c1SPaul Blakey list_for_each_entry(net, net_list, exit_list) { 1068b57dc7c1SPaul Blakey struct tc_ct_action_net *tn = net_generic(net, ct_net_id); 1069b57dc7c1SPaul Blakey 1070b57dc7c1SPaul Blakey if (tn->labels) 1071b57dc7c1SPaul Blakey nf_connlabels_put(net); 1072b57dc7c1SPaul Blakey } 1073b57dc7c1SPaul Blakey rtnl_unlock(); 1074b57dc7c1SPaul Blakey 1075b57dc7c1SPaul Blakey tc_action_net_exit(net_list, ct_net_id); 1076b57dc7c1SPaul Blakey } 1077b57dc7c1SPaul Blakey 1078b57dc7c1SPaul Blakey static struct pernet_operations ct_net_ops = { 1079b57dc7c1SPaul Blakey .init = ct_init_net, 1080b57dc7c1SPaul Blakey .exit_batch = ct_exit_net, 1081b57dc7c1SPaul Blakey .id = &ct_net_id, 1082b57dc7c1SPaul Blakey .size = sizeof(struct tc_ct_action_net), 1083b57dc7c1SPaul Blakey }; 1084b57dc7c1SPaul Blakey 1085b57dc7c1SPaul Blakey static int __init ct_init_module(void) 1086b57dc7c1SPaul Blakey { 1087*c34b961aSPaul Blakey int err; 1088*c34b961aSPaul Blakey 1089*c34b961aSPaul Blakey act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0); 1090*c34b961aSPaul Blakey if (!act_ct_wq) 1091*c34b961aSPaul Blakey return -ENOMEM; 1092*c34b961aSPaul Blakey 1093*c34b961aSPaul Blakey err = tcf_ct_flow_tables_init(); 1094*c34b961aSPaul Blakey if (err) 1095*c34b961aSPaul Blakey goto err_tbl_init; 1096*c34b961aSPaul Blakey 1097*c34b961aSPaul Blakey err = tcf_register_action(&act_ct_ops, &ct_net_ops); 1098*c34b961aSPaul Blakey if (err) 1099*c34b961aSPaul Blakey goto err_register; 1100*c34b961aSPaul Blakey 1101*c34b961aSPaul Blakey return 0; 1102*c34b961aSPaul Blakey 1103*c34b961aSPaul Blakey err_tbl_init: 1104*c34b961aSPaul Blakey destroy_workqueue(act_ct_wq); 1105*c34b961aSPaul Blakey err_register: 1106*c34b961aSPaul Blakey tcf_ct_flow_tables_uninit(); 1107*c34b961aSPaul Blakey return err; 1108b57dc7c1SPaul Blakey } 1109b57dc7c1SPaul Blakey 1110b57dc7c1SPaul Blakey static void __exit ct_cleanup_module(void) 1111b57dc7c1SPaul Blakey { 1112b57dc7c1SPaul Blakey tcf_unregister_action(&act_ct_ops, &ct_net_ops); 1113*c34b961aSPaul Blakey tcf_ct_flow_tables_uninit(); 1114*c34b961aSPaul Blakey destroy_workqueue(act_ct_wq); 1115b57dc7c1SPaul Blakey } 1116b57dc7c1SPaul Blakey 1117b57dc7c1SPaul Blakey module_init(ct_init_module); 1118b57dc7c1SPaul Blakey module_exit(ct_cleanup_module); 1119b57dc7c1SPaul Blakey MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>"); 1120b57dc7c1SPaul Blakey MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>"); 1121b57dc7c1SPaul Blakey MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>"); 1122b57dc7c1SPaul Blakey MODULE_DESCRIPTION("Connection tracking action"); 1123b57dc7c1SPaul Blakey MODULE_LICENSE("GPL v2"); 1124b57dc7c1SPaul Blakey 1125