xref: /openbmc/linux/net/sched/act_ct.c (revision 56b5b1c7)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* -
3  * net/sched/act_ct.c  Connection Tracking action
4  *
5  * Authors:   Paul Blakey <paulb@mellanox.com>
6  *            Yossi Kuperman <yossiku@mellanox.com>
7  *            Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/pkt_cls.h>
16 #include <linux/ip.h>
17 #include <linux/ipv6.h>
18 #include <linux/rhashtable.h>
19 #include <net/netlink.h>
20 #include <net/pkt_sched.h>
21 #include <net/pkt_cls.h>
22 #include <net/act_api.h>
23 #include <net/ip.h>
24 #include <net/ipv6_frag.h>
25 #include <uapi/linux/tc_act/tc_ct.h>
26 #include <net/tc_act/tc_ct.h>
27 
28 #include <net/netfilter/nf_flow_table.h>
29 #include <net/netfilter/nf_conntrack.h>
30 #include <net/netfilter/nf_conntrack_core.h>
31 #include <net/netfilter/nf_conntrack_zones.h>
32 #include <net/netfilter/nf_conntrack_helper.h>
33 #include <net/netfilter/nf_conntrack_acct.h>
34 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
35 #include <net/netfilter/nf_conntrack_act_ct.h>
36 #include <uapi/linux/netfilter/nf_nat.h>
37 
38 static struct workqueue_struct *act_ct_wq;
39 static struct rhashtable zones_ht;
40 static DEFINE_MUTEX(zones_mutex);
41 
42 struct tcf_ct_flow_table {
43 	struct rhash_head node; /* In zones tables */
44 
45 	struct rcu_work rwork;
46 	struct nf_flowtable nf_ft;
47 	refcount_t ref;
48 	u16 zone;
49 
50 	bool dying;
51 };
52 
53 static const struct rhashtable_params zones_params = {
54 	.head_offset = offsetof(struct tcf_ct_flow_table, node),
55 	.key_offset = offsetof(struct tcf_ct_flow_table, zone),
56 	.key_len = sizeof_field(struct tcf_ct_flow_table, zone),
57 	.automatic_shrinking = true,
58 };
59 
60 static struct nf_ct_ext_type act_ct_extend __read_mostly = {
61 	.len		= sizeof(struct nf_conn_act_ct_ext),
62 	.align		= __alignof__(struct nf_conn_act_ct_ext),
63 	.id		= NF_CT_EXT_ACT_CT,
64 };
65 
66 static struct flow_action_entry *
67 tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
68 {
69 	int i = flow_action->num_entries++;
70 
71 	return &flow_action->entries[i];
72 }
73 
74 static void tcf_ct_add_mangle_action(struct flow_action *action,
75 				     enum flow_action_mangle_base htype,
76 				     u32 offset,
77 				     u32 mask,
78 				     u32 val)
79 {
80 	struct flow_action_entry *entry;
81 
82 	entry = tcf_ct_flow_table_flow_action_get_next(action);
83 	entry->id = FLOW_ACTION_MANGLE;
84 	entry->mangle.htype = htype;
85 	entry->mangle.mask = ~mask;
86 	entry->mangle.offset = offset;
87 	entry->mangle.val = val;
88 }
89 
90 /* The following nat helper functions check if the inverted reverse tuple
91  * (target) is different then the current dir tuple - meaning nat for ports
92  * and/or ip is needed, and add the relevant mangle actions.
93  */
94 static void
95 tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
96 				      struct nf_conntrack_tuple target,
97 				      struct flow_action *action)
98 {
99 	if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
100 		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
101 					 offsetof(struct iphdr, saddr),
102 					 0xFFFFFFFF,
103 					 be32_to_cpu(target.src.u3.ip));
104 	if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
105 		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
106 					 offsetof(struct iphdr, daddr),
107 					 0xFFFFFFFF,
108 					 be32_to_cpu(target.dst.u3.ip));
109 }
110 
111 static void
112 tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
113 				   union nf_inet_addr *addr,
114 				   u32 offset)
115 {
116 	int i;
117 
118 	for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
119 		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
120 					 i * sizeof(u32) + offset,
121 					 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
122 }
123 
124 static void
125 tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
126 				      struct nf_conntrack_tuple target,
127 				      struct flow_action *action)
128 {
129 	if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
130 		tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
131 						   offsetof(struct ipv6hdr,
132 							    saddr));
133 	if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
134 		tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
135 						   offsetof(struct ipv6hdr,
136 							    daddr));
137 }
138 
139 static void
140 tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
141 				     struct nf_conntrack_tuple target,
142 				     struct flow_action *action)
143 {
144 	__be16 target_src = target.src.u.tcp.port;
145 	__be16 target_dst = target.dst.u.tcp.port;
146 
147 	if (target_src != tuple->src.u.tcp.port)
148 		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
149 					 offsetof(struct tcphdr, source),
150 					 0xFFFF, be16_to_cpu(target_src));
151 	if (target_dst != tuple->dst.u.tcp.port)
152 		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
153 					 offsetof(struct tcphdr, dest),
154 					 0xFFFF, be16_to_cpu(target_dst));
155 }
156 
157 static void
158 tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
159 				     struct nf_conntrack_tuple target,
160 				     struct flow_action *action)
161 {
162 	__be16 target_src = target.src.u.udp.port;
163 	__be16 target_dst = target.dst.u.udp.port;
164 
165 	if (target_src != tuple->src.u.udp.port)
166 		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
167 					 offsetof(struct udphdr, source),
168 					 0xFFFF, be16_to_cpu(target_src));
169 	if (target_dst != tuple->dst.u.udp.port)
170 		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
171 					 offsetof(struct udphdr, dest),
172 					 0xFFFF, be16_to_cpu(target_dst));
173 }
174 
175 static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
176 					      enum ip_conntrack_dir dir,
177 					      struct flow_action *action)
178 {
179 	struct nf_conn_labels *ct_labels;
180 	struct flow_action_entry *entry;
181 	enum ip_conntrack_info ctinfo;
182 	u32 *act_ct_labels;
183 
184 	entry = tcf_ct_flow_table_flow_action_get_next(action);
185 	entry->id = FLOW_ACTION_CT_METADATA;
186 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
187 	entry->ct_metadata.mark = ct->mark;
188 #endif
189 	ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
190 					     IP_CT_ESTABLISHED_REPLY;
191 	/* aligns with the CT reference on the SKB nf_ct_set */
192 	entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
193 	entry->ct_metadata.orig_dir = dir == IP_CT_DIR_ORIGINAL;
194 
195 	act_ct_labels = entry->ct_metadata.labels;
196 	ct_labels = nf_ct_labels_find(ct);
197 	if (ct_labels)
198 		memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
199 	else
200 		memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
201 }
202 
203 static int tcf_ct_flow_table_add_action_nat(struct net *net,
204 					    struct nf_conn *ct,
205 					    enum ip_conntrack_dir dir,
206 					    struct flow_action *action)
207 {
208 	const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
209 	struct nf_conntrack_tuple target;
210 
211 	if (!(ct->status & IPS_NAT_MASK))
212 		return 0;
213 
214 	nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
215 
216 	switch (tuple->src.l3num) {
217 	case NFPROTO_IPV4:
218 		tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
219 						      action);
220 		break;
221 	case NFPROTO_IPV6:
222 		tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
223 						      action);
224 		break;
225 	default:
226 		return -EOPNOTSUPP;
227 	}
228 
229 	switch (nf_ct_protonum(ct)) {
230 	case IPPROTO_TCP:
231 		tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
232 		break;
233 	case IPPROTO_UDP:
234 		tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
235 		break;
236 	default:
237 		return -EOPNOTSUPP;
238 	}
239 
240 	return 0;
241 }
242 
243 static int tcf_ct_flow_table_fill_actions(struct net *net,
244 					  const struct flow_offload *flow,
245 					  enum flow_offload_tuple_dir tdir,
246 					  struct nf_flow_rule *flow_rule)
247 {
248 	struct flow_action *action = &flow_rule->rule->action;
249 	int num_entries = action->num_entries;
250 	struct nf_conn *ct = flow->ct;
251 	enum ip_conntrack_dir dir;
252 	int i, err;
253 
254 	switch (tdir) {
255 	case FLOW_OFFLOAD_DIR_ORIGINAL:
256 		dir = IP_CT_DIR_ORIGINAL;
257 		break;
258 	case FLOW_OFFLOAD_DIR_REPLY:
259 		dir = IP_CT_DIR_REPLY;
260 		break;
261 	default:
262 		return -EOPNOTSUPP;
263 	}
264 
265 	err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
266 	if (err)
267 		goto err_nat;
268 
269 	tcf_ct_flow_table_add_action_meta(ct, dir, action);
270 	return 0;
271 
272 err_nat:
273 	/* Clear filled actions */
274 	for (i = num_entries; i < action->num_entries; i++)
275 		memset(&action->entries[i], 0, sizeof(action->entries[i]));
276 	action->num_entries = num_entries;
277 
278 	return err;
279 }
280 
281 static struct nf_flowtable_type flowtable_ct = {
282 	.action		= tcf_ct_flow_table_fill_actions,
283 	.owner		= THIS_MODULE,
284 };
285 
286 static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
287 {
288 	struct tcf_ct_flow_table *ct_ft;
289 	int err = -ENOMEM;
290 
291 	mutex_lock(&zones_mutex);
292 	ct_ft = rhashtable_lookup_fast(&zones_ht, &params->zone, zones_params);
293 	if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
294 		goto out_unlock;
295 
296 	ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
297 	if (!ct_ft)
298 		goto err_alloc;
299 	refcount_set(&ct_ft->ref, 1);
300 
301 	ct_ft->zone = params->zone;
302 	err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
303 	if (err)
304 		goto err_insert;
305 
306 	ct_ft->nf_ft.type = &flowtable_ct;
307 	ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD |
308 			      NF_FLOWTABLE_COUNTER;
309 	err = nf_flow_table_init(&ct_ft->nf_ft);
310 	if (err)
311 		goto err_init;
312 
313 	__module_get(THIS_MODULE);
314 out_unlock:
315 	params->ct_ft = ct_ft;
316 	params->nf_ft = &ct_ft->nf_ft;
317 	mutex_unlock(&zones_mutex);
318 
319 	return 0;
320 
321 err_init:
322 	rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
323 err_insert:
324 	kfree(ct_ft);
325 err_alloc:
326 	mutex_unlock(&zones_mutex);
327 	return err;
328 }
329 
330 static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
331 {
332 	struct flow_block_cb *block_cb, *tmp_cb;
333 	struct tcf_ct_flow_table *ct_ft;
334 	struct flow_block *block;
335 
336 	ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
337 			     rwork);
338 	nf_flow_table_free(&ct_ft->nf_ft);
339 
340 	/* Remove any remaining callbacks before cleanup */
341 	block = &ct_ft->nf_ft.flow_block;
342 	down_write(&ct_ft->nf_ft.flow_block_lock);
343 	list_for_each_entry_safe(block_cb, tmp_cb, &block->cb_list, list) {
344 		list_del(&block_cb->list);
345 		flow_block_cb_free(block_cb);
346 	}
347 	up_write(&ct_ft->nf_ft.flow_block_lock);
348 	kfree(ct_ft);
349 
350 	module_put(THIS_MODULE);
351 }
352 
353 static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
354 {
355 	struct tcf_ct_flow_table *ct_ft = params->ct_ft;
356 
357 	if (refcount_dec_and_test(&params->ct_ft->ref)) {
358 		rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
359 		INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
360 		queue_rcu_work(act_ct_wq, &ct_ft->rwork);
361 	}
362 }
363 
364 static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
365 				  struct nf_conn *ct,
366 				  bool tcp)
367 {
368 	struct nf_conn_act_ct_ext *act_ct_ext;
369 	struct flow_offload *entry;
370 	int err;
371 
372 	if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
373 		return;
374 
375 	entry = flow_offload_alloc(ct);
376 	if (!entry) {
377 		WARN_ON_ONCE(1);
378 		goto err_alloc;
379 	}
380 
381 	if (tcp) {
382 		ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
383 		ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
384 	}
385 
386 	act_ct_ext = nf_conn_act_ct_ext_find(ct);
387 	if (act_ct_ext) {
388 		entry->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.iifidx =
389 			act_ct_ext->ifindex[IP_CT_DIR_ORIGINAL];
390 		entry->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.iifidx =
391 			act_ct_ext->ifindex[IP_CT_DIR_REPLY];
392 	}
393 
394 	err = flow_offload_add(&ct_ft->nf_ft, entry);
395 	if (err)
396 		goto err_add;
397 
398 	return;
399 
400 err_add:
401 	flow_offload_free(entry);
402 err_alloc:
403 	clear_bit(IPS_OFFLOAD_BIT, &ct->status);
404 }
405 
406 static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
407 					   struct nf_conn *ct,
408 					   enum ip_conntrack_info ctinfo)
409 {
410 	bool tcp = false;
411 
412 	if ((ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) ||
413 	    !test_bit(IPS_ASSURED_BIT, &ct->status))
414 		return;
415 
416 	switch (nf_ct_protonum(ct)) {
417 	case IPPROTO_TCP:
418 		tcp = true;
419 		if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
420 			return;
421 		break;
422 	case IPPROTO_UDP:
423 		break;
424 	default:
425 		return;
426 	}
427 
428 	if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
429 	    ct->status & IPS_SEQ_ADJUST)
430 		return;
431 
432 	tcf_ct_flow_table_add(ct_ft, ct, tcp);
433 }
434 
435 static bool
436 tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
437 				  struct flow_offload_tuple *tuple,
438 				  struct tcphdr **tcph)
439 {
440 	struct flow_ports *ports;
441 	unsigned int thoff;
442 	struct iphdr *iph;
443 
444 	if (!pskb_network_may_pull(skb, sizeof(*iph)))
445 		return false;
446 
447 	iph = ip_hdr(skb);
448 	thoff = iph->ihl * 4;
449 
450 	if (ip_is_fragment(iph) ||
451 	    unlikely(thoff != sizeof(struct iphdr)))
452 		return false;
453 
454 	if (iph->protocol != IPPROTO_TCP &&
455 	    iph->protocol != IPPROTO_UDP)
456 		return false;
457 
458 	if (iph->ttl <= 1)
459 		return false;
460 
461 	if (!pskb_network_may_pull(skb, iph->protocol == IPPROTO_TCP ?
462 					thoff + sizeof(struct tcphdr) :
463 					thoff + sizeof(*ports)))
464 		return false;
465 
466 	iph = ip_hdr(skb);
467 	if (iph->protocol == IPPROTO_TCP)
468 		*tcph = (void *)(skb_network_header(skb) + thoff);
469 
470 	ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
471 	tuple->src_v4.s_addr = iph->saddr;
472 	tuple->dst_v4.s_addr = iph->daddr;
473 	tuple->src_port = ports->source;
474 	tuple->dst_port = ports->dest;
475 	tuple->l3proto = AF_INET;
476 	tuple->l4proto = iph->protocol;
477 
478 	return true;
479 }
480 
481 static bool
482 tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
483 				  struct flow_offload_tuple *tuple,
484 				  struct tcphdr **tcph)
485 {
486 	struct flow_ports *ports;
487 	struct ipv6hdr *ip6h;
488 	unsigned int thoff;
489 
490 	if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
491 		return false;
492 
493 	ip6h = ipv6_hdr(skb);
494 
495 	if (ip6h->nexthdr != IPPROTO_TCP &&
496 	    ip6h->nexthdr != IPPROTO_UDP)
497 		return false;
498 
499 	if (ip6h->hop_limit <= 1)
500 		return false;
501 
502 	thoff = sizeof(*ip6h);
503 	if (!pskb_network_may_pull(skb, ip6h->nexthdr == IPPROTO_TCP ?
504 					thoff + sizeof(struct tcphdr) :
505 					thoff + sizeof(*ports)))
506 		return false;
507 
508 	ip6h = ipv6_hdr(skb);
509 	if (ip6h->nexthdr == IPPROTO_TCP)
510 		*tcph = (void *)(skb_network_header(skb) + thoff);
511 
512 	ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
513 	tuple->src_v6 = ip6h->saddr;
514 	tuple->dst_v6 = ip6h->daddr;
515 	tuple->src_port = ports->source;
516 	tuple->dst_port = ports->dest;
517 	tuple->l3proto = AF_INET6;
518 	tuple->l4proto = ip6h->nexthdr;
519 
520 	return true;
521 }
522 
523 static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
524 				     struct sk_buff *skb,
525 				     u8 family)
526 {
527 	struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
528 	struct flow_offload_tuple_rhash *tuplehash;
529 	struct flow_offload_tuple tuple = {};
530 	enum ip_conntrack_info ctinfo;
531 	struct tcphdr *tcph = NULL;
532 	struct flow_offload *flow;
533 	struct nf_conn *ct;
534 	u8 dir;
535 
536 	switch (family) {
537 	case NFPROTO_IPV4:
538 		if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
539 			return false;
540 		break;
541 	case NFPROTO_IPV6:
542 		if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
543 			return false;
544 		break;
545 	default:
546 		return false;
547 	}
548 
549 	tuplehash = flow_offload_lookup(nf_ft, &tuple);
550 	if (!tuplehash)
551 		return false;
552 
553 	dir = tuplehash->tuple.dir;
554 	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
555 	ct = flow->ct;
556 
557 	if (tcph && (unlikely(tcph->fin || tcph->rst))) {
558 		flow_offload_teardown(flow);
559 		return false;
560 	}
561 
562 	ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
563 						    IP_CT_ESTABLISHED_REPLY;
564 
565 	flow_offload_refresh(nf_ft, flow);
566 	nf_conntrack_get(&ct->ct_general);
567 	nf_ct_set(skb, ct, ctinfo);
568 	if (nf_ft->flags & NF_FLOWTABLE_COUNTER)
569 		nf_ct_acct_update(ct, dir, skb->len);
570 
571 	return true;
572 }
573 
574 static int tcf_ct_flow_tables_init(void)
575 {
576 	return rhashtable_init(&zones_ht, &zones_params);
577 }
578 
579 static void tcf_ct_flow_tables_uninit(void)
580 {
581 	rhashtable_destroy(&zones_ht);
582 }
583 
584 static struct tc_action_ops act_ct_ops;
585 static unsigned int ct_net_id;
586 
587 struct tc_ct_action_net {
588 	struct tc_action_net tn; /* Must be first */
589 	bool labels;
590 };
591 
592 /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
593 static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
594 				   u16 zone_id, bool force)
595 {
596 	enum ip_conntrack_info ctinfo;
597 	struct nf_conn *ct;
598 
599 	ct = nf_ct_get(skb, &ctinfo);
600 	if (!ct)
601 		return false;
602 	if (!net_eq(net, read_pnet(&ct->ct_net)))
603 		return false;
604 	if (nf_ct_zone(ct)->id != zone_id)
605 		return false;
606 
607 	/* Force conntrack entry direction. */
608 	if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
609 		if (nf_ct_is_confirmed(ct))
610 			nf_ct_kill(ct);
611 
612 		nf_ct_put(ct);
613 		nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
614 
615 		return false;
616 	}
617 
618 	return true;
619 }
620 
621 /* Trim the skb to the length specified by the IP/IPv6 header,
622  * removing any trailing lower-layer padding. This prepares the skb
623  * for higher-layer processing that assumes skb->len excludes padding
624  * (such as nf_ip_checksum). The caller needs to pull the skb to the
625  * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
626  */
627 static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
628 {
629 	unsigned int len;
630 	int err;
631 
632 	switch (family) {
633 	case NFPROTO_IPV4:
634 		len = ntohs(ip_hdr(skb)->tot_len);
635 		break;
636 	case NFPROTO_IPV6:
637 		len = sizeof(struct ipv6hdr)
638 			+ ntohs(ipv6_hdr(skb)->payload_len);
639 		break;
640 	default:
641 		len = skb->len;
642 	}
643 
644 	err = pskb_trim_rcsum(skb, len);
645 
646 	return err;
647 }
648 
649 static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
650 {
651 	u8 family = NFPROTO_UNSPEC;
652 
653 	switch (skb_protocol(skb, true)) {
654 	case htons(ETH_P_IP):
655 		family = NFPROTO_IPV4;
656 		break;
657 	case htons(ETH_P_IPV6):
658 		family = NFPROTO_IPV6;
659 		break;
660 	default:
661 		break;
662 	}
663 
664 	return family;
665 }
666 
667 static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
668 {
669 	unsigned int len;
670 
671 	len =  skb_network_offset(skb) + sizeof(struct iphdr);
672 	if (unlikely(skb->len < len))
673 		return -EINVAL;
674 	if (unlikely(!pskb_may_pull(skb, len)))
675 		return -ENOMEM;
676 
677 	*frag = ip_is_fragment(ip_hdr(skb));
678 	return 0;
679 }
680 
681 static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
682 {
683 	unsigned int flags = 0, len, payload_ofs = 0;
684 	unsigned short frag_off;
685 	int nexthdr;
686 
687 	len =  skb_network_offset(skb) + sizeof(struct ipv6hdr);
688 	if (unlikely(skb->len < len))
689 		return -EINVAL;
690 	if (unlikely(!pskb_may_pull(skb, len)))
691 		return -ENOMEM;
692 
693 	nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
694 	if (unlikely(nexthdr < 0))
695 		return -EPROTO;
696 
697 	*frag = flags & IP6_FH_F_FRAG;
698 	return 0;
699 }
700 
701 static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
702 				   u8 family, u16 zone, bool *defrag)
703 {
704 	enum ip_conntrack_info ctinfo;
705 	struct nf_conn *ct;
706 	int err = 0;
707 	bool frag;
708 	u16 mru;
709 
710 	/* Previously seen (loopback)? Ignore. */
711 	ct = nf_ct_get(skb, &ctinfo);
712 	if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
713 		return 0;
714 
715 	if (family == NFPROTO_IPV4)
716 		err = tcf_ct_ipv4_is_fragment(skb, &frag);
717 	else
718 		err = tcf_ct_ipv6_is_fragment(skb, &frag);
719 	if (err || !frag)
720 		return err;
721 
722 	skb_get(skb);
723 	mru = tc_skb_cb(skb)->mru;
724 
725 	if (family == NFPROTO_IPV4) {
726 		enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
727 
728 		memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
729 		local_bh_disable();
730 		err = ip_defrag(net, skb, user);
731 		local_bh_enable();
732 		if (err && err != -EINPROGRESS)
733 			return err;
734 
735 		if (!err) {
736 			*defrag = true;
737 			mru = IPCB(skb)->frag_max_size;
738 		}
739 	} else { /* NFPROTO_IPV6 */
740 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
741 		enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
742 
743 		memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
744 		err = nf_ct_frag6_gather(net, skb, user);
745 		if (err && err != -EINPROGRESS)
746 			goto out_free;
747 
748 		if (!err) {
749 			*defrag = true;
750 			mru = IP6CB(skb)->frag_max_size;
751 		}
752 #else
753 		err = -EOPNOTSUPP;
754 		goto out_free;
755 #endif
756 	}
757 
758 	if (err != -EINPROGRESS)
759 		tc_skb_cb(skb)->mru = mru;
760 	skb_clear_hash(skb);
761 	skb->ignore_df = 1;
762 	return err;
763 
764 out_free:
765 	kfree_skb(skb);
766 	return err;
767 }
768 
769 static void tcf_ct_params_free(struct rcu_head *head)
770 {
771 	struct tcf_ct_params *params = container_of(head,
772 						    struct tcf_ct_params, rcu);
773 
774 	tcf_ct_flow_table_put(params);
775 
776 	if (params->tmpl)
777 		nf_ct_put(params->tmpl);
778 	kfree(params);
779 }
780 
781 #if IS_ENABLED(CONFIG_NF_NAT)
782 /* Modelled after nf_nat_ipv[46]_fn().
783  * range is only used for new, uninitialized NAT state.
784  * Returns either NF_ACCEPT or NF_DROP.
785  */
786 static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
787 			  enum ip_conntrack_info ctinfo,
788 			  const struct nf_nat_range2 *range,
789 			  enum nf_nat_manip_type maniptype)
790 {
791 	__be16 proto = skb_protocol(skb, true);
792 	int hooknum, err = NF_ACCEPT;
793 
794 	/* See HOOK2MANIP(). */
795 	if (maniptype == NF_NAT_MANIP_SRC)
796 		hooknum = NF_INET_LOCAL_IN; /* Source NAT */
797 	else
798 		hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
799 
800 	switch (ctinfo) {
801 	case IP_CT_RELATED:
802 	case IP_CT_RELATED_REPLY:
803 		if (proto == htons(ETH_P_IP) &&
804 		    ip_hdr(skb)->protocol == IPPROTO_ICMP) {
805 			if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
806 							   hooknum))
807 				err = NF_DROP;
808 			goto out;
809 		} else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) {
810 			__be16 frag_off;
811 			u8 nexthdr = ipv6_hdr(skb)->nexthdr;
812 			int hdrlen = ipv6_skip_exthdr(skb,
813 						      sizeof(struct ipv6hdr),
814 						      &nexthdr, &frag_off);
815 
816 			if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
817 				if (!nf_nat_icmpv6_reply_translation(skb, ct,
818 								     ctinfo,
819 								     hooknum,
820 								     hdrlen))
821 					err = NF_DROP;
822 				goto out;
823 			}
824 		}
825 		/* Non-ICMP, fall thru to initialize if needed. */
826 		fallthrough;
827 	case IP_CT_NEW:
828 		/* Seen it before?  This can happen for loopback, retrans,
829 		 * or local packets.
830 		 */
831 		if (!nf_nat_initialized(ct, maniptype)) {
832 			/* Initialize according to the NAT action. */
833 			err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
834 				/* Action is set up to establish a new
835 				 * mapping.
836 				 */
837 				? nf_nat_setup_info(ct, range, maniptype)
838 				: nf_nat_alloc_null_binding(ct, hooknum);
839 			if (err != NF_ACCEPT)
840 				goto out;
841 		}
842 		break;
843 
844 	case IP_CT_ESTABLISHED:
845 	case IP_CT_ESTABLISHED_REPLY:
846 		break;
847 
848 	default:
849 		err = NF_DROP;
850 		goto out;
851 	}
852 
853 	err = nf_nat_packet(ct, ctinfo, hooknum, skb);
854 	if (err == NF_ACCEPT) {
855 		if (maniptype == NF_NAT_MANIP_SRC)
856 			tc_skb_cb(skb)->post_ct_snat = 1;
857 		if (maniptype == NF_NAT_MANIP_DST)
858 			tc_skb_cb(skb)->post_ct_dnat = 1;
859 	}
860 out:
861 	return err;
862 }
863 #endif /* CONFIG_NF_NAT */
864 
865 static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
866 {
867 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
868 	u32 new_mark;
869 
870 	if (!mask)
871 		return;
872 
873 	new_mark = mark | (ct->mark & ~(mask));
874 	if (ct->mark != new_mark) {
875 		ct->mark = new_mark;
876 		if (nf_ct_is_confirmed(ct))
877 			nf_conntrack_event_cache(IPCT_MARK, ct);
878 	}
879 #endif
880 }
881 
882 static void tcf_ct_act_set_labels(struct nf_conn *ct,
883 				  u32 *labels,
884 				  u32 *labels_m)
885 {
886 #if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
887 	size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
888 
889 	if (!memchr_inv(labels_m, 0, labels_sz))
890 		return;
891 
892 	nf_connlabels_replace(ct, labels, labels_m, 4);
893 #endif
894 }
895 
896 static int tcf_ct_act_nat(struct sk_buff *skb,
897 			  struct nf_conn *ct,
898 			  enum ip_conntrack_info ctinfo,
899 			  int ct_action,
900 			  struct nf_nat_range2 *range,
901 			  bool commit)
902 {
903 #if IS_ENABLED(CONFIG_NF_NAT)
904 	int err;
905 	enum nf_nat_manip_type maniptype;
906 
907 	if (!(ct_action & TCA_CT_ACT_NAT))
908 		return NF_ACCEPT;
909 
910 	/* Add NAT extension if not confirmed yet. */
911 	if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
912 		return NF_DROP;   /* Can't NAT. */
913 
914 	if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) &&
915 	    (ctinfo != IP_CT_RELATED || commit)) {
916 		/* NAT an established or related connection like before. */
917 		if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
918 			/* This is the REPLY direction for a connection
919 			 * for which NAT was applied in the forward
920 			 * direction.  Do the reverse NAT.
921 			 */
922 			maniptype = ct->status & IPS_SRC_NAT
923 				? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
924 		else
925 			maniptype = ct->status & IPS_SRC_NAT
926 				? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
927 	} else if (ct_action & TCA_CT_ACT_NAT_SRC) {
928 		maniptype = NF_NAT_MANIP_SRC;
929 	} else if (ct_action & TCA_CT_ACT_NAT_DST) {
930 		maniptype = NF_NAT_MANIP_DST;
931 	} else {
932 		return NF_ACCEPT;
933 	}
934 
935 	err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
936 	if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
937 		if (ct->status & IPS_SRC_NAT) {
938 			if (maniptype == NF_NAT_MANIP_SRC)
939 				maniptype = NF_NAT_MANIP_DST;
940 			else
941 				maniptype = NF_NAT_MANIP_SRC;
942 
943 			err = ct_nat_execute(skb, ct, ctinfo, range,
944 					     maniptype);
945 		} else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
946 			err = ct_nat_execute(skb, ct, ctinfo, NULL,
947 					     NF_NAT_MANIP_SRC);
948 		}
949 	}
950 	return err;
951 #else
952 	return NF_ACCEPT;
953 #endif
954 }
955 
956 static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
957 		      struct tcf_result *res)
958 {
959 	struct net *net = dev_net(skb->dev);
960 	bool cached, commit, clear, force;
961 	enum ip_conntrack_info ctinfo;
962 	struct tcf_ct *c = to_ct(a);
963 	struct nf_conn *tmpl = NULL;
964 	struct nf_hook_state state;
965 	int nh_ofs, err, retval;
966 	struct tcf_ct_params *p;
967 	bool skip_add = false;
968 	bool defrag = false;
969 	struct nf_conn *ct;
970 	u8 family;
971 
972 	p = rcu_dereference_bh(c->params);
973 
974 	retval = READ_ONCE(c->tcf_action);
975 	commit = p->ct_action & TCA_CT_ACT_COMMIT;
976 	clear = p->ct_action & TCA_CT_ACT_CLEAR;
977 	force = p->ct_action & TCA_CT_ACT_FORCE;
978 	tmpl = p->tmpl;
979 
980 	tcf_lastuse_update(&c->tcf_tm);
981 	tcf_action_update_bstats(&c->common, skb);
982 
983 	if (clear) {
984 		tc_skb_cb(skb)->post_ct = false;
985 		ct = nf_ct_get(skb, &ctinfo);
986 		if (ct) {
987 			nf_ct_put(ct);
988 			nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
989 		}
990 
991 		goto out_clear;
992 	}
993 
994 	family = tcf_ct_skb_nf_family(skb);
995 	if (family == NFPROTO_UNSPEC)
996 		goto drop;
997 
998 	/* The conntrack module expects to be working at L3.
999 	 * We also try to pull the IPv4/6 header to linear area
1000 	 */
1001 	nh_ofs = skb_network_offset(skb);
1002 	skb_pull_rcsum(skb, nh_ofs);
1003 	err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
1004 	if (err == -EINPROGRESS) {
1005 		retval = TC_ACT_STOLEN;
1006 		goto out_clear;
1007 	}
1008 	if (err)
1009 		goto drop;
1010 
1011 	err = tcf_ct_skb_network_trim(skb, family);
1012 	if (err)
1013 		goto drop;
1014 
1015 	/* If we are recirculating packets to match on ct fields and
1016 	 * committing with a separate ct action, then we don't need to
1017 	 * actually run the packet through conntrack twice unless it's for a
1018 	 * different zone.
1019 	 */
1020 	cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
1021 	if (!cached) {
1022 		if (tcf_ct_flow_table_lookup(p, skb, family)) {
1023 			skip_add = true;
1024 			goto do_nat;
1025 		}
1026 
1027 		/* Associate skb with specified zone. */
1028 		if (tmpl) {
1029 			nf_conntrack_put(skb_nfct(skb));
1030 			nf_conntrack_get(&tmpl->ct_general);
1031 			nf_ct_set(skb, tmpl, IP_CT_NEW);
1032 		}
1033 
1034 		state.hook = NF_INET_PRE_ROUTING;
1035 		state.net = net;
1036 		state.pf = family;
1037 		err = nf_conntrack_in(skb, &state);
1038 		if (err != NF_ACCEPT)
1039 			goto out_push;
1040 	}
1041 
1042 do_nat:
1043 	ct = nf_ct_get(skb, &ctinfo);
1044 	if (!ct)
1045 		goto out_push;
1046 	nf_ct_deliver_cached_events(ct);
1047 	nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
1048 
1049 	err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
1050 	if (err != NF_ACCEPT)
1051 		goto drop;
1052 
1053 	if (commit) {
1054 		tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
1055 		tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
1056 
1057 		if (!nf_ct_is_confirmed(ct))
1058 			nf_conn_act_ct_ext_add(ct);
1059 
1060 		/* This will take care of sending queued events
1061 		 * even if the connection is already confirmed.
1062 		 */
1063 		if (nf_conntrack_confirm(skb) != NF_ACCEPT)
1064 			goto drop;
1065 	}
1066 
1067 	if (!skip_add)
1068 		tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
1069 
1070 out_push:
1071 	skb_push_rcsum(skb, nh_ofs);
1072 
1073 	tc_skb_cb(skb)->post_ct = true;
1074 	tc_skb_cb(skb)->zone = p->zone;
1075 out_clear:
1076 	if (defrag)
1077 		qdisc_skb_cb(skb)->pkt_len = skb->len;
1078 	return retval;
1079 
1080 drop:
1081 	tcf_action_inc_drop_qstats(&c->common);
1082 	return TC_ACT_SHOT;
1083 }
1084 
1085 static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
1086 	[TCA_CT_ACTION] = { .type = NLA_U16 },
1087 	[TCA_CT_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ct)),
1088 	[TCA_CT_ZONE] = { .type = NLA_U16 },
1089 	[TCA_CT_MARK] = { .type = NLA_U32 },
1090 	[TCA_CT_MARK_MASK] = { .type = NLA_U32 },
1091 	[TCA_CT_LABELS] = { .type = NLA_BINARY,
1092 			    .len = 128 / BITS_PER_BYTE },
1093 	[TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
1094 				 .len = 128 / BITS_PER_BYTE },
1095 	[TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
1096 	[TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
1097 	[TCA_CT_NAT_IPV6_MIN] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1098 	[TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1099 	[TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
1100 	[TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
1101 };
1102 
1103 static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
1104 				  struct tc_ct *parm,
1105 				  struct nlattr **tb,
1106 				  struct netlink_ext_ack *extack)
1107 {
1108 	struct nf_nat_range2 *range;
1109 
1110 	if (!(p->ct_action & TCA_CT_ACT_NAT))
1111 		return 0;
1112 
1113 	if (!IS_ENABLED(CONFIG_NF_NAT)) {
1114 		NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
1115 		return -EOPNOTSUPP;
1116 	}
1117 
1118 	if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1119 		return 0;
1120 
1121 	if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
1122 	    (p->ct_action & TCA_CT_ACT_NAT_DST)) {
1123 		NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
1124 		return -EOPNOTSUPP;
1125 	}
1126 
1127 	range = &p->range;
1128 	if (tb[TCA_CT_NAT_IPV4_MIN]) {
1129 		struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
1130 
1131 		p->ipv4_range = true;
1132 		range->flags |= NF_NAT_RANGE_MAP_IPS;
1133 		range->min_addr.ip =
1134 			nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
1135 
1136 		range->max_addr.ip = max_attr ?
1137 				     nla_get_in_addr(max_attr) :
1138 				     range->min_addr.ip;
1139 	} else if (tb[TCA_CT_NAT_IPV6_MIN]) {
1140 		struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
1141 
1142 		p->ipv4_range = false;
1143 		range->flags |= NF_NAT_RANGE_MAP_IPS;
1144 		range->min_addr.in6 =
1145 			nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
1146 
1147 		range->max_addr.in6 = max_attr ?
1148 				      nla_get_in6_addr(max_attr) :
1149 				      range->min_addr.in6;
1150 	}
1151 
1152 	if (tb[TCA_CT_NAT_PORT_MIN]) {
1153 		range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1154 		range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
1155 
1156 		range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
1157 				       nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
1158 				       range->min_proto.all;
1159 	}
1160 
1161 	return 0;
1162 }
1163 
1164 static void tcf_ct_set_key_val(struct nlattr **tb,
1165 			       void *val, int val_type,
1166 			       void *mask, int mask_type,
1167 			       int len)
1168 {
1169 	if (!tb[val_type])
1170 		return;
1171 	nla_memcpy(val, tb[val_type], len);
1172 
1173 	if (!mask)
1174 		return;
1175 
1176 	if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
1177 		memset(mask, 0xff, len);
1178 	else
1179 		nla_memcpy(mask, tb[mask_type], len);
1180 }
1181 
1182 static int tcf_ct_fill_params(struct net *net,
1183 			      struct tcf_ct_params *p,
1184 			      struct tc_ct *parm,
1185 			      struct nlattr **tb,
1186 			      struct netlink_ext_ack *extack)
1187 {
1188 	struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1189 	struct nf_conntrack_zone zone;
1190 	struct nf_conn *tmpl;
1191 	int err;
1192 
1193 	p->zone = NF_CT_DEFAULT_ZONE_ID;
1194 
1195 	tcf_ct_set_key_val(tb,
1196 			   &p->ct_action, TCA_CT_ACTION,
1197 			   NULL, TCA_CT_UNSPEC,
1198 			   sizeof(p->ct_action));
1199 
1200 	if (p->ct_action & TCA_CT_ACT_CLEAR)
1201 		return 0;
1202 
1203 	err = tcf_ct_fill_params_nat(p, parm, tb, extack);
1204 	if (err)
1205 		return err;
1206 
1207 	if (tb[TCA_CT_MARK]) {
1208 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1209 			NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
1210 			return -EOPNOTSUPP;
1211 		}
1212 		tcf_ct_set_key_val(tb,
1213 				   &p->mark, TCA_CT_MARK,
1214 				   &p->mark_mask, TCA_CT_MARK_MASK,
1215 				   sizeof(p->mark));
1216 	}
1217 
1218 	if (tb[TCA_CT_LABELS]) {
1219 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1220 			NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
1221 			return -EOPNOTSUPP;
1222 		}
1223 
1224 		if (!tn->labels) {
1225 			NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
1226 			return -EOPNOTSUPP;
1227 		}
1228 		tcf_ct_set_key_val(tb,
1229 				   p->labels, TCA_CT_LABELS,
1230 				   p->labels_mask, TCA_CT_LABELS_MASK,
1231 				   sizeof(p->labels));
1232 	}
1233 
1234 	if (tb[TCA_CT_ZONE]) {
1235 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1236 			NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
1237 			return -EOPNOTSUPP;
1238 		}
1239 
1240 		tcf_ct_set_key_val(tb,
1241 				   &p->zone, TCA_CT_ZONE,
1242 				   NULL, TCA_CT_UNSPEC,
1243 				   sizeof(p->zone));
1244 	}
1245 
1246 	nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
1247 	tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
1248 	if (!tmpl) {
1249 		NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
1250 		return -ENOMEM;
1251 	}
1252 	__set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
1253 	p->tmpl = tmpl;
1254 
1255 	return 0;
1256 }
1257 
1258 static int tcf_ct_init(struct net *net, struct nlattr *nla,
1259 		       struct nlattr *est, struct tc_action **a,
1260 		       struct tcf_proto *tp, u32 flags,
1261 		       struct netlink_ext_ack *extack)
1262 {
1263 	struct tc_action_net *tn = net_generic(net, ct_net_id);
1264 	bool bind = flags & TCA_ACT_FLAGS_BIND;
1265 	struct tcf_ct_params *params = NULL;
1266 	struct nlattr *tb[TCA_CT_MAX + 1];
1267 	struct tcf_chain *goto_ch = NULL;
1268 	struct tc_ct *parm;
1269 	struct tcf_ct *c;
1270 	int err, res = 0;
1271 	u32 index;
1272 
1273 	if (!nla) {
1274 		NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
1275 		return -EINVAL;
1276 	}
1277 
1278 	err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
1279 	if (err < 0)
1280 		return err;
1281 
1282 	if (!tb[TCA_CT_PARMS]) {
1283 		NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
1284 		return -EINVAL;
1285 	}
1286 	parm = nla_data(tb[TCA_CT_PARMS]);
1287 	index = parm->index;
1288 	err = tcf_idr_check_alloc(tn, &index, a, bind);
1289 	if (err < 0)
1290 		return err;
1291 
1292 	if (!err) {
1293 		err = tcf_idr_create_from_flags(tn, index, est, a,
1294 						&act_ct_ops, bind, flags);
1295 		if (err) {
1296 			tcf_idr_cleanup(tn, index);
1297 			return err;
1298 		}
1299 		res = ACT_P_CREATED;
1300 	} else {
1301 		if (bind)
1302 			return 0;
1303 
1304 		if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
1305 			tcf_idr_release(*a, bind);
1306 			return -EEXIST;
1307 		}
1308 	}
1309 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
1310 	if (err < 0)
1311 		goto cleanup;
1312 
1313 	c = to_ct(*a);
1314 
1315 	params = kzalloc(sizeof(*params), GFP_KERNEL);
1316 	if (unlikely(!params)) {
1317 		err = -ENOMEM;
1318 		goto cleanup;
1319 	}
1320 
1321 	err = tcf_ct_fill_params(net, params, parm, tb, extack);
1322 	if (err)
1323 		goto cleanup;
1324 
1325 	err = tcf_ct_flow_table_get(params);
1326 	if (err)
1327 		goto cleanup;
1328 
1329 	spin_lock_bh(&c->tcf_lock);
1330 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
1331 	params = rcu_replace_pointer(c->params, params,
1332 				     lockdep_is_held(&c->tcf_lock));
1333 	spin_unlock_bh(&c->tcf_lock);
1334 
1335 	if (goto_ch)
1336 		tcf_chain_put_by_act(goto_ch);
1337 	if (params)
1338 		call_rcu(&params->rcu, tcf_ct_params_free);
1339 
1340 	return res;
1341 
1342 cleanup:
1343 	if (goto_ch)
1344 		tcf_chain_put_by_act(goto_ch);
1345 	kfree(params);
1346 	tcf_idr_release(*a, bind);
1347 	return err;
1348 }
1349 
1350 static void tcf_ct_cleanup(struct tc_action *a)
1351 {
1352 	struct tcf_ct_params *params;
1353 	struct tcf_ct *c = to_ct(a);
1354 
1355 	params = rcu_dereference_protected(c->params, 1);
1356 	if (params)
1357 		call_rcu(&params->rcu, tcf_ct_params_free);
1358 }
1359 
1360 static int tcf_ct_dump_key_val(struct sk_buff *skb,
1361 			       void *val, int val_type,
1362 			       void *mask, int mask_type,
1363 			       int len)
1364 {
1365 	int err;
1366 
1367 	if (mask && !memchr_inv(mask, 0, len))
1368 		return 0;
1369 
1370 	err = nla_put(skb, val_type, len, val);
1371 	if (err)
1372 		return err;
1373 
1374 	if (mask_type != TCA_CT_UNSPEC) {
1375 		err = nla_put(skb, mask_type, len, mask);
1376 		if (err)
1377 			return err;
1378 	}
1379 
1380 	return 0;
1381 }
1382 
1383 static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
1384 {
1385 	struct nf_nat_range2 *range = &p->range;
1386 
1387 	if (!(p->ct_action & TCA_CT_ACT_NAT))
1388 		return 0;
1389 
1390 	if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1391 		return 0;
1392 
1393 	if (range->flags & NF_NAT_RANGE_MAP_IPS) {
1394 		if (p->ipv4_range) {
1395 			if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
1396 					    range->min_addr.ip))
1397 				return -1;
1398 			if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
1399 					    range->max_addr.ip))
1400 				return -1;
1401 		} else {
1402 			if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
1403 					     &range->min_addr.in6))
1404 				return -1;
1405 			if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
1406 					     &range->max_addr.in6))
1407 				return -1;
1408 		}
1409 	}
1410 
1411 	if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
1412 		if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
1413 				 range->min_proto.all))
1414 			return -1;
1415 		if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
1416 				 range->max_proto.all))
1417 			return -1;
1418 	}
1419 
1420 	return 0;
1421 }
1422 
1423 static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
1424 			      int bind, int ref)
1425 {
1426 	unsigned char *b = skb_tail_pointer(skb);
1427 	struct tcf_ct *c = to_ct(a);
1428 	struct tcf_ct_params *p;
1429 
1430 	struct tc_ct opt = {
1431 		.index   = c->tcf_index,
1432 		.refcnt  = refcount_read(&c->tcf_refcnt) - ref,
1433 		.bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
1434 	};
1435 	struct tcf_t t;
1436 
1437 	spin_lock_bh(&c->tcf_lock);
1438 	p = rcu_dereference_protected(c->params,
1439 				      lockdep_is_held(&c->tcf_lock));
1440 	opt.action = c->tcf_action;
1441 
1442 	if (tcf_ct_dump_key_val(skb,
1443 				&p->ct_action, TCA_CT_ACTION,
1444 				NULL, TCA_CT_UNSPEC,
1445 				sizeof(p->ct_action)))
1446 		goto nla_put_failure;
1447 
1448 	if (p->ct_action & TCA_CT_ACT_CLEAR)
1449 		goto skip_dump;
1450 
1451 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1452 	    tcf_ct_dump_key_val(skb,
1453 				&p->mark, TCA_CT_MARK,
1454 				&p->mark_mask, TCA_CT_MARK_MASK,
1455 				sizeof(p->mark)))
1456 		goto nla_put_failure;
1457 
1458 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1459 	    tcf_ct_dump_key_val(skb,
1460 				p->labels, TCA_CT_LABELS,
1461 				p->labels_mask, TCA_CT_LABELS_MASK,
1462 				sizeof(p->labels)))
1463 		goto nla_put_failure;
1464 
1465 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1466 	    tcf_ct_dump_key_val(skb,
1467 				&p->zone, TCA_CT_ZONE,
1468 				NULL, TCA_CT_UNSPEC,
1469 				sizeof(p->zone)))
1470 		goto nla_put_failure;
1471 
1472 	if (tcf_ct_dump_nat(skb, p))
1473 		goto nla_put_failure;
1474 
1475 skip_dump:
1476 	if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
1477 		goto nla_put_failure;
1478 
1479 	tcf_tm_dump(&t, &c->tcf_tm);
1480 	if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
1481 		goto nla_put_failure;
1482 	spin_unlock_bh(&c->tcf_lock);
1483 
1484 	return skb->len;
1485 nla_put_failure:
1486 	spin_unlock_bh(&c->tcf_lock);
1487 	nlmsg_trim(skb, b);
1488 	return -1;
1489 }
1490 
1491 static int tcf_ct_walker(struct net *net, struct sk_buff *skb,
1492 			 struct netlink_callback *cb, int type,
1493 			 const struct tc_action_ops *ops,
1494 			 struct netlink_ext_ack *extack)
1495 {
1496 	struct tc_action_net *tn = net_generic(net, ct_net_id);
1497 
1498 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
1499 }
1500 
1501 static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index)
1502 {
1503 	struct tc_action_net *tn = net_generic(net, ct_net_id);
1504 
1505 	return tcf_idr_search(tn, a, index);
1506 }
1507 
1508 static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
1509 			     u64 drops, u64 lastuse, bool hw)
1510 {
1511 	struct tcf_ct *c = to_ct(a);
1512 
1513 	tcf_action_update_stats(a, bytes, packets, drops, hw);
1514 	c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
1515 }
1516 
1517 static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data,
1518 				    u32 *index_inc, bool bind)
1519 {
1520 	if (bind) {
1521 		struct flow_action_entry *entry = entry_data;
1522 
1523 		entry->id = FLOW_ACTION_CT;
1524 		entry->ct.action = tcf_ct_action(act);
1525 		entry->ct.zone = tcf_ct_zone(act);
1526 		entry->ct.flow_table = tcf_ct_ft(act);
1527 		*index_inc = 1;
1528 	} else {
1529 		struct flow_offload_action *fl_action = entry_data;
1530 
1531 		fl_action->id = FLOW_ACTION_CT;
1532 	}
1533 
1534 	return 0;
1535 }
1536 
1537 static struct tc_action_ops act_ct_ops = {
1538 	.kind		=	"ct",
1539 	.id		=	TCA_ID_CT,
1540 	.owner		=	THIS_MODULE,
1541 	.act		=	tcf_ct_act,
1542 	.dump		=	tcf_ct_dump,
1543 	.init		=	tcf_ct_init,
1544 	.cleanup	=	tcf_ct_cleanup,
1545 	.walk		=	tcf_ct_walker,
1546 	.lookup		=	tcf_ct_search,
1547 	.stats_update	=	tcf_stats_update,
1548 	.offload_act_setup =	tcf_ct_offload_act_setup,
1549 	.size		=	sizeof(struct tcf_ct),
1550 };
1551 
1552 static __net_init int ct_init_net(struct net *net)
1553 {
1554 	unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
1555 	struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1556 
1557 	if (nf_connlabels_get(net, n_bits - 1)) {
1558 		tn->labels = false;
1559 		pr_err("act_ct: Failed to set connlabels length");
1560 	} else {
1561 		tn->labels = true;
1562 	}
1563 
1564 	return tc_action_net_init(net, &tn->tn, &act_ct_ops);
1565 }
1566 
1567 static void __net_exit ct_exit_net(struct list_head *net_list)
1568 {
1569 	struct net *net;
1570 
1571 	rtnl_lock();
1572 	list_for_each_entry(net, net_list, exit_list) {
1573 		struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1574 
1575 		if (tn->labels)
1576 			nf_connlabels_put(net);
1577 	}
1578 	rtnl_unlock();
1579 
1580 	tc_action_net_exit(net_list, ct_net_id);
1581 }
1582 
1583 static struct pernet_operations ct_net_ops = {
1584 	.init = ct_init_net,
1585 	.exit_batch = ct_exit_net,
1586 	.id   = &ct_net_id,
1587 	.size = sizeof(struct tc_ct_action_net),
1588 };
1589 
1590 static int __init ct_init_module(void)
1591 {
1592 	int err;
1593 
1594 	act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
1595 	if (!act_ct_wq)
1596 		return -ENOMEM;
1597 
1598 	err = tcf_ct_flow_tables_init();
1599 	if (err)
1600 		goto err_tbl_init;
1601 
1602 	err = tcf_register_action(&act_ct_ops, &ct_net_ops);
1603 	if (err)
1604 		goto err_register;
1605 
1606 	err = nf_ct_extend_register(&act_ct_extend);
1607 	if (err)
1608 		goto err_register_extend;
1609 
1610 	static_branch_inc(&tcf_frag_xmit_count);
1611 
1612 	return 0;
1613 
1614 err_register_extend:
1615 	tcf_unregister_action(&act_ct_ops, &ct_net_ops);
1616 err_register:
1617 	tcf_ct_flow_tables_uninit();
1618 err_tbl_init:
1619 	destroy_workqueue(act_ct_wq);
1620 	return err;
1621 }
1622 
1623 static void __exit ct_cleanup_module(void)
1624 {
1625 	static_branch_dec(&tcf_frag_xmit_count);
1626 	nf_ct_extend_unregister(&act_ct_extend);
1627 	tcf_unregister_action(&act_ct_ops, &ct_net_ops);
1628 	tcf_ct_flow_tables_uninit();
1629 	destroy_workqueue(act_ct_wq);
1630 }
1631 
1632 module_init(ct_init_module);
1633 module_exit(ct_cleanup_module);
1634 MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
1635 MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
1636 MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
1637 MODULE_DESCRIPTION("Connection tracking action");
1638 MODULE_LICENSE("GPL v2");
1639