xref: /openbmc/linux/net/sched/act_ct.c (revision e1181b5b)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* -
3  * net/sched/act_ct.c  Connection Tracking action
4  *
5  * Authors:   Paul Blakey <paulb@mellanox.com>
6  *            Yossi Kuperman <yossiku@mellanox.com>
7  *            Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/pkt_cls.h>
16 #include <linux/ip.h>
17 #include <linux/ipv6.h>
18 #include <linux/rhashtable.h>
19 #include <net/netlink.h>
20 #include <net/pkt_sched.h>
21 #include <net/pkt_cls.h>
22 #include <net/act_api.h>
23 #include <net/ip.h>
24 #include <net/ipv6_frag.h>
25 #include <uapi/linux/tc_act/tc_ct.h>
26 #include <net/tc_act/tc_ct.h>
27 
28 #include <net/netfilter/nf_flow_table.h>
29 #include <net/netfilter/nf_conntrack.h>
30 #include <net/netfilter/nf_conntrack_core.h>
31 #include <net/netfilter/nf_conntrack_zones.h>
32 #include <net/netfilter/nf_conntrack_helper.h>
33 #include <net/netfilter/nf_conntrack_acct.h>
34 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
35 #include <uapi/linux/netfilter/nf_nat.h>
36 
37 static struct workqueue_struct *act_ct_wq;
38 static struct rhashtable zones_ht;
39 static DEFINE_MUTEX(zones_mutex);
40 
41 struct tcf_ct_flow_table {
42 	struct rhash_head node; /* In zones tables */
43 
44 	struct rcu_work rwork;
45 	struct nf_flowtable nf_ft;
46 	refcount_t ref;
47 	u16 zone;
48 
49 	bool dying;
50 };
51 
52 static const struct rhashtable_params zones_params = {
53 	.head_offset = offsetof(struct tcf_ct_flow_table, node),
54 	.key_offset = offsetof(struct tcf_ct_flow_table, zone),
55 	.key_len = sizeof_field(struct tcf_ct_flow_table, zone),
56 	.automatic_shrinking = true,
57 };
58 
59 static struct flow_action_entry *
60 tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
61 {
62 	int i = flow_action->num_entries++;
63 
64 	return &flow_action->entries[i];
65 }
66 
67 static void tcf_ct_add_mangle_action(struct flow_action *action,
68 				     enum flow_action_mangle_base htype,
69 				     u32 offset,
70 				     u32 mask,
71 				     u32 val)
72 {
73 	struct flow_action_entry *entry;
74 
75 	entry = tcf_ct_flow_table_flow_action_get_next(action);
76 	entry->id = FLOW_ACTION_MANGLE;
77 	entry->mangle.htype = htype;
78 	entry->mangle.mask = ~mask;
79 	entry->mangle.offset = offset;
80 	entry->mangle.val = val;
81 }
82 
83 /* The following nat helper functions check if the inverted reverse tuple
84  * (target) is different then the current dir tuple - meaning nat for ports
85  * and/or ip is needed, and add the relevant mangle actions.
86  */
87 static void
88 tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
89 				      struct nf_conntrack_tuple target,
90 				      struct flow_action *action)
91 {
92 	if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
93 		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
94 					 offsetof(struct iphdr, saddr),
95 					 0xFFFFFFFF,
96 					 be32_to_cpu(target.src.u3.ip));
97 	if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
98 		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
99 					 offsetof(struct iphdr, daddr),
100 					 0xFFFFFFFF,
101 					 be32_to_cpu(target.dst.u3.ip));
102 }
103 
104 static void
105 tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
106 				   union nf_inet_addr *addr,
107 				   u32 offset)
108 {
109 	int i;
110 
111 	for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
112 		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
113 					 i * sizeof(u32) + offset,
114 					 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
115 }
116 
117 static void
118 tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
119 				      struct nf_conntrack_tuple target,
120 				      struct flow_action *action)
121 {
122 	if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
123 		tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
124 						   offsetof(struct ipv6hdr,
125 							    saddr));
126 	if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
127 		tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
128 						   offsetof(struct ipv6hdr,
129 							    daddr));
130 }
131 
132 static void
133 tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
134 				     struct nf_conntrack_tuple target,
135 				     struct flow_action *action)
136 {
137 	__be16 target_src = target.src.u.tcp.port;
138 	__be16 target_dst = target.dst.u.tcp.port;
139 
140 	if (target_src != tuple->src.u.tcp.port)
141 		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
142 					 offsetof(struct tcphdr, source),
143 					 0xFFFF, be16_to_cpu(target_src));
144 	if (target_dst != tuple->dst.u.tcp.port)
145 		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
146 					 offsetof(struct tcphdr, dest),
147 					 0xFFFF, be16_to_cpu(target_dst));
148 }
149 
150 static void
151 tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
152 				     struct nf_conntrack_tuple target,
153 				     struct flow_action *action)
154 {
155 	__be16 target_src = target.src.u.udp.port;
156 	__be16 target_dst = target.dst.u.udp.port;
157 
158 	if (target_src != tuple->src.u.udp.port)
159 		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
160 					 offsetof(struct udphdr, source),
161 					 0xFFFF, be16_to_cpu(target_src));
162 	if (target_dst != tuple->dst.u.udp.port)
163 		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
164 					 offsetof(struct udphdr, dest),
165 					 0xFFFF, be16_to_cpu(target_dst));
166 }
167 
168 static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
169 					      enum ip_conntrack_dir dir,
170 					      struct flow_action *action)
171 {
172 	struct nf_conn_labels *ct_labels;
173 	struct flow_action_entry *entry;
174 	enum ip_conntrack_info ctinfo;
175 	u32 *act_ct_labels;
176 
177 	entry = tcf_ct_flow_table_flow_action_get_next(action);
178 	entry->id = FLOW_ACTION_CT_METADATA;
179 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
180 	entry->ct_metadata.mark = ct->mark;
181 #endif
182 	ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
183 					     IP_CT_ESTABLISHED_REPLY;
184 	/* aligns with the CT reference on the SKB nf_ct_set */
185 	entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
186 	entry->ct_metadata.orig_dir = dir == IP_CT_DIR_ORIGINAL;
187 
188 	act_ct_labels = entry->ct_metadata.labels;
189 	ct_labels = nf_ct_labels_find(ct);
190 	if (ct_labels)
191 		memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
192 	else
193 		memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
194 }
195 
196 static int tcf_ct_flow_table_add_action_nat(struct net *net,
197 					    struct nf_conn *ct,
198 					    enum ip_conntrack_dir dir,
199 					    struct flow_action *action)
200 {
201 	const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
202 	struct nf_conntrack_tuple target;
203 
204 	if (!(ct->status & IPS_NAT_MASK))
205 		return 0;
206 
207 	nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
208 
209 	switch (tuple->src.l3num) {
210 	case NFPROTO_IPV4:
211 		tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
212 						      action);
213 		break;
214 	case NFPROTO_IPV6:
215 		tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
216 						      action);
217 		break;
218 	default:
219 		return -EOPNOTSUPP;
220 	}
221 
222 	switch (nf_ct_protonum(ct)) {
223 	case IPPROTO_TCP:
224 		tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
225 		break;
226 	case IPPROTO_UDP:
227 		tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
228 		break;
229 	default:
230 		return -EOPNOTSUPP;
231 	}
232 
233 	return 0;
234 }
235 
236 static int tcf_ct_flow_table_fill_actions(struct net *net,
237 					  const struct flow_offload *flow,
238 					  enum flow_offload_tuple_dir tdir,
239 					  struct nf_flow_rule *flow_rule)
240 {
241 	struct flow_action *action = &flow_rule->rule->action;
242 	int num_entries = action->num_entries;
243 	struct nf_conn *ct = flow->ct;
244 	enum ip_conntrack_dir dir;
245 	int i, err;
246 
247 	switch (tdir) {
248 	case FLOW_OFFLOAD_DIR_ORIGINAL:
249 		dir = IP_CT_DIR_ORIGINAL;
250 		break;
251 	case FLOW_OFFLOAD_DIR_REPLY:
252 		dir = IP_CT_DIR_REPLY;
253 		break;
254 	default:
255 		return -EOPNOTSUPP;
256 	}
257 
258 	err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
259 	if (err)
260 		goto err_nat;
261 
262 	tcf_ct_flow_table_add_action_meta(ct, dir, action);
263 	return 0;
264 
265 err_nat:
266 	/* Clear filled actions */
267 	for (i = num_entries; i < action->num_entries; i++)
268 		memset(&action->entries[i], 0, sizeof(action->entries[i]));
269 	action->num_entries = num_entries;
270 
271 	return err;
272 }
273 
274 static struct nf_flowtable_type flowtable_ct = {
275 	.action		= tcf_ct_flow_table_fill_actions,
276 	.owner		= THIS_MODULE,
277 };
278 
279 static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
280 {
281 	struct tcf_ct_flow_table *ct_ft;
282 	int err = -ENOMEM;
283 
284 	mutex_lock(&zones_mutex);
285 	ct_ft = rhashtable_lookup_fast(&zones_ht, &params->zone, zones_params);
286 	if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
287 		goto out_unlock;
288 
289 	ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
290 	if (!ct_ft)
291 		goto err_alloc;
292 	refcount_set(&ct_ft->ref, 1);
293 
294 	ct_ft->zone = params->zone;
295 	err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
296 	if (err)
297 		goto err_insert;
298 
299 	ct_ft->nf_ft.type = &flowtable_ct;
300 	ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD |
301 			      NF_FLOWTABLE_COUNTER;
302 	err = nf_flow_table_init(&ct_ft->nf_ft);
303 	if (err)
304 		goto err_init;
305 
306 	__module_get(THIS_MODULE);
307 out_unlock:
308 	params->ct_ft = ct_ft;
309 	params->nf_ft = &ct_ft->nf_ft;
310 	mutex_unlock(&zones_mutex);
311 
312 	return 0;
313 
314 err_init:
315 	rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
316 err_insert:
317 	kfree(ct_ft);
318 err_alloc:
319 	mutex_unlock(&zones_mutex);
320 	return err;
321 }
322 
323 static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
324 {
325 	struct tcf_ct_flow_table *ct_ft;
326 
327 	ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
328 			     rwork);
329 	nf_flow_table_free(&ct_ft->nf_ft);
330 	kfree(ct_ft);
331 
332 	module_put(THIS_MODULE);
333 }
334 
335 static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
336 {
337 	struct tcf_ct_flow_table *ct_ft = params->ct_ft;
338 
339 	if (refcount_dec_and_test(&params->ct_ft->ref)) {
340 		rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
341 		INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
342 		queue_rcu_work(act_ct_wq, &ct_ft->rwork);
343 	}
344 }
345 
346 static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
347 				  struct nf_conn *ct,
348 				  bool tcp)
349 {
350 	struct flow_offload *entry;
351 	int err;
352 
353 	if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
354 		return;
355 
356 	entry = flow_offload_alloc(ct);
357 	if (!entry) {
358 		WARN_ON_ONCE(1);
359 		goto err_alloc;
360 	}
361 
362 	if (tcp) {
363 		ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
364 		ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
365 	}
366 
367 	err = flow_offload_add(&ct_ft->nf_ft, entry);
368 	if (err)
369 		goto err_add;
370 
371 	return;
372 
373 err_add:
374 	flow_offload_free(entry);
375 err_alloc:
376 	clear_bit(IPS_OFFLOAD_BIT, &ct->status);
377 }
378 
379 static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
380 					   struct nf_conn *ct,
381 					   enum ip_conntrack_info ctinfo)
382 {
383 	bool tcp = false;
384 
385 	if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
386 		return;
387 
388 	switch (nf_ct_protonum(ct)) {
389 	case IPPROTO_TCP:
390 		tcp = true;
391 		if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
392 			return;
393 		break;
394 	case IPPROTO_UDP:
395 		break;
396 	default:
397 		return;
398 	}
399 
400 	if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
401 	    ct->status & IPS_SEQ_ADJUST)
402 		return;
403 
404 	tcf_ct_flow_table_add(ct_ft, ct, tcp);
405 }
406 
407 static bool
408 tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
409 				  struct flow_offload_tuple *tuple,
410 				  struct tcphdr **tcph)
411 {
412 	struct flow_ports *ports;
413 	unsigned int thoff;
414 	struct iphdr *iph;
415 
416 	if (!pskb_network_may_pull(skb, sizeof(*iph)))
417 		return false;
418 
419 	iph = ip_hdr(skb);
420 	thoff = iph->ihl * 4;
421 
422 	if (ip_is_fragment(iph) ||
423 	    unlikely(thoff != sizeof(struct iphdr)))
424 		return false;
425 
426 	if (iph->protocol != IPPROTO_TCP &&
427 	    iph->protocol != IPPROTO_UDP)
428 		return false;
429 
430 	if (iph->ttl <= 1)
431 		return false;
432 
433 	if (!pskb_network_may_pull(skb, iph->protocol == IPPROTO_TCP ?
434 					thoff + sizeof(struct tcphdr) :
435 					thoff + sizeof(*ports)))
436 		return false;
437 
438 	iph = ip_hdr(skb);
439 	if (iph->protocol == IPPROTO_TCP)
440 		*tcph = (void *)(skb_network_header(skb) + thoff);
441 
442 	ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
443 	tuple->src_v4.s_addr = iph->saddr;
444 	tuple->dst_v4.s_addr = iph->daddr;
445 	tuple->src_port = ports->source;
446 	tuple->dst_port = ports->dest;
447 	tuple->l3proto = AF_INET;
448 	tuple->l4proto = iph->protocol;
449 
450 	return true;
451 }
452 
453 static bool
454 tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
455 				  struct flow_offload_tuple *tuple,
456 				  struct tcphdr **tcph)
457 {
458 	struct flow_ports *ports;
459 	struct ipv6hdr *ip6h;
460 	unsigned int thoff;
461 
462 	if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
463 		return false;
464 
465 	ip6h = ipv6_hdr(skb);
466 
467 	if (ip6h->nexthdr != IPPROTO_TCP &&
468 	    ip6h->nexthdr != IPPROTO_UDP)
469 		return false;
470 
471 	if (ip6h->hop_limit <= 1)
472 		return false;
473 
474 	thoff = sizeof(*ip6h);
475 	if (!pskb_network_may_pull(skb, ip6h->nexthdr == IPPROTO_TCP ?
476 					thoff + sizeof(struct tcphdr) :
477 					thoff + sizeof(*ports)))
478 		return false;
479 
480 	ip6h = ipv6_hdr(skb);
481 	if (ip6h->nexthdr == IPPROTO_TCP)
482 		*tcph = (void *)(skb_network_header(skb) + thoff);
483 
484 	ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
485 	tuple->src_v6 = ip6h->saddr;
486 	tuple->dst_v6 = ip6h->daddr;
487 	tuple->src_port = ports->source;
488 	tuple->dst_port = ports->dest;
489 	tuple->l3proto = AF_INET6;
490 	tuple->l4proto = ip6h->nexthdr;
491 
492 	return true;
493 }
494 
495 static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
496 				     struct sk_buff *skb,
497 				     u8 family)
498 {
499 	struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
500 	struct flow_offload_tuple_rhash *tuplehash;
501 	struct flow_offload_tuple tuple = {};
502 	enum ip_conntrack_info ctinfo;
503 	struct tcphdr *tcph = NULL;
504 	struct flow_offload *flow;
505 	struct nf_conn *ct;
506 	u8 dir;
507 
508 	/* Previously seen or loopback */
509 	ct = nf_ct_get(skb, &ctinfo);
510 	if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
511 		return false;
512 
513 	switch (family) {
514 	case NFPROTO_IPV4:
515 		if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
516 			return false;
517 		break;
518 	case NFPROTO_IPV6:
519 		if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
520 			return false;
521 		break;
522 	default:
523 		return false;
524 	}
525 
526 	tuplehash = flow_offload_lookup(nf_ft, &tuple);
527 	if (!tuplehash)
528 		return false;
529 
530 	dir = tuplehash->tuple.dir;
531 	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
532 	ct = flow->ct;
533 
534 	if (tcph && (unlikely(tcph->fin || tcph->rst))) {
535 		flow_offload_teardown(flow);
536 		return false;
537 	}
538 
539 	ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
540 						    IP_CT_ESTABLISHED_REPLY;
541 
542 	flow_offload_refresh(nf_ft, flow);
543 	nf_conntrack_get(&ct->ct_general);
544 	nf_ct_set(skb, ct, ctinfo);
545 	if (nf_ft->flags & NF_FLOWTABLE_COUNTER)
546 		nf_ct_acct_update(ct, dir, skb->len);
547 
548 	return true;
549 }
550 
551 static int tcf_ct_flow_tables_init(void)
552 {
553 	return rhashtable_init(&zones_ht, &zones_params);
554 }
555 
556 static void tcf_ct_flow_tables_uninit(void)
557 {
558 	rhashtable_destroy(&zones_ht);
559 }
560 
561 static struct tc_action_ops act_ct_ops;
562 static unsigned int ct_net_id;
563 
564 struct tc_ct_action_net {
565 	struct tc_action_net tn; /* Must be first */
566 	bool labels;
567 };
568 
569 /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
570 static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
571 				   u16 zone_id, bool force)
572 {
573 	enum ip_conntrack_info ctinfo;
574 	struct nf_conn *ct;
575 
576 	ct = nf_ct_get(skb, &ctinfo);
577 	if (!ct)
578 		return false;
579 	if (!net_eq(net, read_pnet(&ct->ct_net)))
580 		return false;
581 	if (nf_ct_zone(ct)->id != zone_id)
582 		return false;
583 
584 	/* Force conntrack entry direction. */
585 	if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
586 		if (nf_ct_is_confirmed(ct))
587 			nf_ct_kill(ct);
588 
589 		nf_conntrack_put(&ct->ct_general);
590 		nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
591 
592 		return false;
593 	}
594 
595 	return true;
596 }
597 
598 /* Trim the skb to the length specified by the IP/IPv6 header,
599  * removing any trailing lower-layer padding. This prepares the skb
600  * for higher-layer processing that assumes skb->len excludes padding
601  * (such as nf_ip_checksum). The caller needs to pull the skb to the
602  * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
603  */
604 static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
605 {
606 	unsigned int len;
607 	int err;
608 
609 	switch (family) {
610 	case NFPROTO_IPV4:
611 		len = ntohs(ip_hdr(skb)->tot_len);
612 		break;
613 	case NFPROTO_IPV6:
614 		len = sizeof(struct ipv6hdr)
615 			+ ntohs(ipv6_hdr(skb)->payload_len);
616 		break;
617 	default:
618 		len = skb->len;
619 	}
620 
621 	err = pskb_trim_rcsum(skb, len);
622 
623 	return err;
624 }
625 
626 static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
627 {
628 	u8 family = NFPROTO_UNSPEC;
629 
630 	switch (skb_protocol(skb, true)) {
631 	case htons(ETH_P_IP):
632 		family = NFPROTO_IPV4;
633 		break;
634 	case htons(ETH_P_IPV6):
635 		family = NFPROTO_IPV6;
636 		break;
637 	default:
638 		break;
639 	}
640 
641 	return family;
642 }
643 
644 static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
645 {
646 	unsigned int len;
647 
648 	len =  skb_network_offset(skb) + sizeof(struct iphdr);
649 	if (unlikely(skb->len < len))
650 		return -EINVAL;
651 	if (unlikely(!pskb_may_pull(skb, len)))
652 		return -ENOMEM;
653 
654 	*frag = ip_is_fragment(ip_hdr(skb));
655 	return 0;
656 }
657 
658 static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
659 {
660 	unsigned int flags = 0, len, payload_ofs = 0;
661 	unsigned short frag_off;
662 	int nexthdr;
663 
664 	len =  skb_network_offset(skb) + sizeof(struct ipv6hdr);
665 	if (unlikely(skb->len < len))
666 		return -EINVAL;
667 	if (unlikely(!pskb_may_pull(skb, len)))
668 		return -ENOMEM;
669 
670 	nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
671 	if (unlikely(nexthdr < 0))
672 		return -EPROTO;
673 
674 	*frag = flags & IP6_FH_F_FRAG;
675 	return 0;
676 }
677 
678 static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
679 				   u8 family, u16 zone, bool *defrag)
680 {
681 	enum ip_conntrack_info ctinfo;
682 	struct qdisc_skb_cb cb;
683 	struct nf_conn *ct;
684 	int err = 0;
685 	bool frag;
686 
687 	/* Previously seen (loopback)? Ignore. */
688 	ct = nf_ct_get(skb, &ctinfo);
689 	if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
690 		return 0;
691 
692 	if (family == NFPROTO_IPV4)
693 		err = tcf_ct_ipv4_is_fragment(skb, &frag);
694 	else
695 		err = tcf_ct_ipv6_is_fragment(skb, &frag);
696 	if (err || !frag)
697 		return err;
698 
699 	skb_get(skb);
700 	cb = *qdisc_skb_cb(skb);
701 
702 	if (family == NFPROTO_IPV4) {
703 		enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
704 
705 		memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
706 		local_bh_disable();
707 		err = ip_defrag(net, skb, user);
708 		local_bh_enable();
709 		if (err && err != -EINPROGRESS)
710 			return err;
711 
712 		if (!err) {
713 			*defrag = true;
714 			cb.mru = IPCB(skb)->frag_max_size;
715 		}
716 	} else { /* NFPROTO_IPV6 */
717 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
718 		enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
719 
720 		memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
721 		err = nf_ct_frag6_gather(net, skb, user);
722 		if (err && err != -EINPROGRESS)
723 			goto out_free;
724 
725 		if (!err) {
726 			*defrag = true;
727 			cb.mru = IP6CB(skb)->frag_max_size;
728 		}
729 #else
730 		err = -EOPNOTSUPP;
731 		goto out_free;
732 #endif
733 	}
734 
735 	*qdisc_skb_cb(skb) = cb;
736 	skb_clear_hash(skb);
737 	skb->ignore_df = 1;
738 	return err;
739 
740 out_free:
741 	kfree_skb(skb);
742 	return err;
743 }
744 
745 static void tcf_ct_params_free(struct rcu_head *head)
746 {
747 	struct tcf_ct_params *params = container_of(head,
748 						    struct tcf_ct_params, rcu);
749 
750 	tcf_ct_flow_table_put(params);
751 
752 	if (params->tmpl)
753 		nf_conntrack_put(&params->tmpl->ct_general);
754 	kfree(params);
755 }
756 
757 #if IS_ENABLED(CONFIG_NF_NAT)
758 /* Modelled after nf_nat_ipv[46]_fn().
759  * range is only used for new, uninitialized NAT state.
760  * Returns either NF_ACCEPT or NF_DROP.
761  */
762 static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
763 			  enum ip_conntrack_info ctinfo,
764 			  const struct nf_nat_range2 *range,
765 			  enum nf_nat_manip_type maniptype)
766 {
767 	__be16 proto = skb_protocol(skb, true);
768 	int hooknum, err = NF_ACCEPT;
769 
770 	/* See HOOK2MANIP(). */
771 	if (maniptype == NF_NAT_MANIP_SRC)
772 		hooknum = NF_INET_LOCAL_IN; /* Source NAT */
773 	else
774 		hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
775 
776 	switch (ctinfo) {
777 	case IP_CT_RELATED:
778 	case IP_CT_RELATED_REPLY:
779 		if (proto == htons(ETH_P_IP) &&
780 		    ip_hdr(skb)->protocol == IPPROTO_ICMP) {
781 			if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
782 							   hooknum))
783 				err = NF_DROP;
784 			goto out;
785 		} else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) {
786 			__be16 frag_off;
787 			u8 nexthdr = ipv6_hdr(skb)->nexthdr;
788 			int hdrlen = ipv6_skip_exthdr(skb,
789 						      sizeof(struct ipv6hdr),
790 						      &nexthdr, &frag_off);
791 
792 			if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
793 				if (!nf_nat_icmpv6_reply_translation(skb, ct,
794 								     ctinfo,
795 								     hooknum,
796 								     hdrlen))
797 					err = NF_DROP;
798 				goto out;
799 			}
800 		}
801 		/* Non-ICMP, fall thru to initialize if needed. */
802 		fallthrough;
803 	case IP_CT_NEW:
804 		/* Seen it before?  This can happen for loopback, retrans,
805 		 * or local packets.
806 		 */
807 		if (!nf_nat_initialized(ct, maniptype)) {
808 			/* Initialize according to the NAT action. */
809 			err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
810 				/* Action is set up to establish a new
811 				 * mapping.
812 				 */
813 				? nf_nat_setup_info(ct, range, maniptype)
814 				: nf_nat_alloc_null_binding(ct, hooknum);
815 			if (err != NF_ACCEPT)
816 				goto out;
817 		}
818 		break;
819 
820 	case IP_CT_ESTABLISHED:
821 	case IP_CT_ESTABLISHED_REPLY:
822 		break;
823 
824 	default:
825 		err = NF_DROP;
826 		goto out;
827 	}
828 
829 	err = nf_nat_packet(ct, ctinfo, hooknum, skb);
830 out:
831 	return err;
832 }
833 #endif /* CONFIG_NF_NAT */
834 
835 static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
836 {
837 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
838 	u32 new_mark;
839 
840 	if (!mask)
841 		return;
842 
843 	new_mark = mark | (ct->mark & ~(mask));
844 	if (ct->mark != new_mark) {
845 		ct->mark = new_mark;
846 		if (nf_ct_is_confirmed(ct))
847 			nf_conntrack_event_cache(IPCT_MARK, ct);
848 	}
849 #endif
850 }
851 
852 static void tcf_ct_act_set_labels(struct nf_conn *ct,
853 				  u32 *labels,
854 				  u32 *labels_m)
855 {
856 #if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
857 	size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
858 
859 	if (!memchr_inv(labels_m, 0, labels_sz))
860 		return;
861 
862 	nf_connlabels_replace(ct, labels, labels_m, 4);
863 #endif
864 }
865 
866 static int tcf_ct_act_nat(struct sk_buff *skb,
867 			  struct nf_conn *ct,
868 			  enum ip_conntrack_info ctinfo,
869 			  int ct_action,
870 			  struct nf_nat_range2 *range,
871 			  bool commit)
872 {
873 #if IS_ENABLED(CONFIG_NF_NAT)
874 	int err;
875 	enum nf_nat_manip_type maniptype;
876 
877 	if (!(ct_action & TCA_CT_ACT_NAT))
878 		return NF_ACCEPT;
879 
880 	/* Add NAT extension if not confirmed yet. */
881 	if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
882 		return NF_DROP;   /* Can't NAT. */
883 
884 	if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) &&
885 	    (ctinfo != IP_CT_RELATED || commit)) {
886 		/* NAT an established or related connection like before. */
887 		if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
888 			/* This is the REPLY direction for a connection
889 			 * for which NAT was applied in the forward
890 			 * direction.  Do the reverse NAT.
891 			 */
892 			maniptype = ct->status & IPS_SRC_NAT
893 				? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
894 		else
895 			maniptype = ct->status & IPS_SRC_NAT
896 				? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
897 	} else if (ct_action & TCA_CT_ACT_NAT_SRC) {
898 		maniptype = NF_NAT_MANIP_SRC;
899 	} else if (ct_action & TCA_CT_ACT_NAT_DST) {
900 		maniptype = NF_NAT_MANIP_DST;
901 	} else {
902 		return NF_ACCEPT;
903 	}
904 
905 	err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
906 	if (err == NF_ACCEPT &&
907 	    ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
908 		if (maniptype == NF_NAT_MANIP_SRC)
909 			maniptype = NF_NAT_MANIP_DST;
910 		else
911 			maniptype = NF_NAT_MANIP_SRC;
912 
913 		err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
914 	}
915 	return err;
916 #else
917 	return NF_ACCEPT;
918 #endif
919 }
920 
921 static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
922 		      struct tcf_result *res)
923 {
924 	struct net *net = dev_net(skb->dev);
925 	bool cached, commit, clear, force;
926 	enum ip_conntrack_info ctinfo;
927 	struct tcf_ct *c = to_ct(a);
928 	struct nf_conn *tmpl = NULL;
929 	struct nf_hook_state state;
930 	int nh_ofs, err, retval;
931 	struct tcf_ct_params *p;
932 	bool skip_add = false;
933 	bool defrag = false;
934 	struct nf_conn *ct;
935 	u8 family;
936 
937 	p = rcu_dereference_bh(c->params);
938 
939 	retval = READ_ONCE(c->tcf_action);
940 	commit = p->ct_action & TCA_CT_ACT_COMMIT;
941 	clear = p->ct_action & TCA_CT_ACT_CLEAR;
942 	force = p->ct_action & TCA_CT_ACT_FORCE;
943 	tmpl = p->tmpl;
944 
945 	tcf_lastuse_update(&c->tcf_tm);
946 
947 	if (clear) {
948 		qdisc_skb_cb(skb)->post_ct = false;
949 		ct = nf_ct_get(skb, &ctinfo);
950 		if (ct) {
951 			nf_conntrack_put(&ct->ct_general);
952 			nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
953 		}
954 
955 		goto out_clear;
956 	}
957 
958 	family = tcf_ct_skb_nf_family(skb);
959 	if (family == NFPROTO_UNSPEC)
960 		goto drop;
961 
962 	/* The conntrack module expects to be working at L3.
963 	 * We also try to pull the IPv4/6 header to linear area
964 	 */
965 	nh_ofs = skb_network_offset(skb);
966 	skb_pull_rcsum(skb, nh_ofs);
967 	err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
968 	if (err == -EINPROGRESS) {
969 		retval = TC_ACT_STOLEN;
970 		goto out;
971 	}
972 	if (err)
973 		goto drop;
974 
975 	err = tcf_ct_skb_network_trim(skb, family);
976 	if (err)
977 		goto drop;
978 
979 	/* If we are recirculating packets to match on ct fields and
980 	 * committing with a separate ct action, then we don't need to
981 	 * actually run the packet through conntrack twice unless it's for a
982 	 * different zone.
983 	 */
984 	cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
985 	if (!cached) {
986 		if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) {
987 			skip_add = true;
988 			goto do_nat;
989 		}
990 
991 		/* Associate skb with specified zone. */
992 		if (tmpl) {
993 			ct = nf_ct_get(skb, &ctinfo);
994 			if (skb_nfct(skb))
995 				nf_conntrack_put(skb_nfct(skb));
996 			nf_conntrack_get(&tmpl->ct_general);
997 			nf_ct_set(skb, tmpl, IP_CT_NEW);
998 		}
999 
1000 		state.hook = NF_INET_PRE_ROUTING;
1001 		state.net = net;
1002 		state.pf = family;
1003 		err = nf_conntrack_in(skb, &state);
1004 		if (err != NF_ACCEPT)
1005 			goto out_push;
1006 	}
1007 
1008 do_nat:
1009 	ct = nf_ct_get(skb, &ctinfo);
1010 	if (!ct)
1011 		goto out_push;
1012 	nf_ct_deliver_cached_events(ct);
1013 
1014 	err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
1015 	if (err != NF_ACCEPT)
1016 		goto drop;
1017 
1018 	if (commit) {
1019 		tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
1020 		tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
1021 
1022 		/* This will take care of sending queued events
1023 		 * even if the connection is already confirmed.
1024 		 */
1025 		nf_conntrack_confirm(skb);
1026 	} else if (!skip_add) {
1027 		tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
1028 	}
1029 
1030 out_push:
1031 	skb_push_rcsum(skb, nh_ofs);
1032 
1033 out:
1034 	qdisc_skb_cb(skb)->post_ct = true;
1035 out_clear:
1036 	tcf_action_update_bstats(&c->common, skb);
1037 	if (defrag)
1038 		qdisc_skb_cb(skb)->pkt_len = skb->len;
1039 	return retval;
1040 
1041 drop:
1042 	tcf_action_inc_drop_qstats(&c->common);
1043 	return TC_ACT_SHOT;
1044 }
1045 
1046 static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
1047 	[TCA_CT_ACTION] = { .type = NLA_U16 },
1048 	[TCA_CT_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ct)),
1049 	[TCA_CT_ZONE] = { .type = NLA_U16 },
1050 	[TCA_CT_MARK] = { .type = NLA_U32 },
1051 	[TCA_CT_MARK_MASK] = { .type = NLA_U32 },
1052 	[TCA_CT_LABELS] = { .type = NLA_BINARY,
1053 			    .len = 128 / BITS_PER_BYTE },
1054 	[TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
1055 				 .len = 128 / BITS_PER_BYTE },
1056 	[TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
1057 	[TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
1058 	[TCA_CT_NAT_IPV6_MIN] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1059 	[TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1060 	[TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
1061 	[TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
1062 };
1063 
1064 static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
1065 				  struct tc_ct *parm,
1066 				  struct nlattr **tb,
1067 				  struct netlink_ext_ack *extack)
1068 {
1069 	struct nf_nat_range2 *range;
1070 
1071 	if (!(p->ct_action & TCA_CT_ACT_NAT))
1072 		return 0;
1073 
1074 	if (!IS_ENABLED(CONFIG_NF_NAT)) {
1075 		NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
1076 		return -EOPNOTSUPP;
1077 	}
1078 
1079 	if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1080 		return 0;
1081 
1082 	if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
1083 	    (p->ct_action & TCA_CT_ACT_NAT_DST)) {
1084 		NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
1085 		return -EOPNOTSUPP;
1086 	}
1087 
1088 	range = &p->range;
1089 	if (tb[TCA_CT_NAT_IPV4_MIN]) {
1090 		struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
1091 
1092 		p->ipv4_range = true;
1093 		range->flags |= NF_NAT_RANGE_MAP_IPS;
1094 		range->min_addr.ip =
1095 			nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
1096 
1097 		range->max_addr.ip = max_attr ?
1098 				     nla_get_in_addr(max_attr) :
1099 				     range->min_addr.ip;
1100 	} else if (tb[TCA_CT_NAT_IPV6_MIN]) {
1101 		struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
1102 
1103 		p->ipv4_range = false;
1104 		range->flags |= NF_NAT_RANGE_MAP_IPS;
1105 		range->min_addr.in6 =
1106 			nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
1107 
1108 		range->max_addr.in6 = max_attr ?
1109 				      nla_get_in6_addr(max_attr) :
1110 				      range->min_addr.in6;
1111 	}
1112 
1113 	if (tb[TCA_CT_NAT_PORT_MIN]) {
1114 		range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1115 		range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
1116 
1117 		range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
1118 				       nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
1119 				       range->min_proto.all;
1120 	}
1121 
1122 	return 0;
1123 }
1124 
1125 static void tcf_ct_set_key_val(struct nlattr **tb,
1126 			       void *val, int val_type,
1127 			       void *mask, int mask_type,
1128 			       int len)
1129 {
1130 	if (!tb[val_type])
1131 		return;
1132 	nla_memcpy(val, tb[val_type], len);
1133 
1134 	if (!mask)
1135 		return;
1136 
1137 	if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
1138 		memset(mask, 0xff, len);
1139 	else
1140 		nla_memcpy(mask, tb[mask_type], len);
1141 }
1142 
1143 static int tcf_ct_fill_params(struct net *net,
1144 			      struct tcf_ct_params *p,
1145 			      struct tc_ct *parm,
1146 			      struct nlattr **tb,
1147 			      struct netlink_ext_ack *extack)
1148 {
1149 	struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1150 	struct nf_conntrack_zone zone;
1151 	struct nf_conn *tmpl;
1152 	int err;
1153 
1154 	p->zone = NF_CT_DEFAULT_ZONE_ID;
1155 
1156 	tcf_ct_set_key_val(tb,
1157 			   &p->ct_action, TCA_CT_ACTION,
1158 			   NULL, TCA_CT_UNSPEC,
1159 			   sizeof(p->ct_action));
1160 
1161 	if (p->ct_action & TCA_CT_ACT_CLEAR)
1162 		return 0;
1163 
1164 	err = tcf_ct_fill_params_nat(p, parm, tb, extack);
1165 	if (err)
1166 		return err;
1167 
1168 	if (tb[TCA_CT_MARK]) {
1169 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1170 			NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
1171 			return -EOPNOTSUPP;
1172 		}
1173 		tcf_ct_set_key_val(tb,
1174 				   &p->mark, TCA_CT_MARK,
1175 				   &p->mark_mask, TCA_CT_MARK_MASK,
1176 				   sizeof(p->mark));
1177 	}
1178 
1179 	if (tb[TCA_CT_LABELS]) {
1180 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1181 			NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
1182 			return -EOPNOTSUPP;
1183 		}
1184 
1185 		if (!tn->labels) {
1186 			NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
1187 			return -EOPNOTSUPP;
1188 		}
1189 		tcf_ct_set_key_val(tb,
1190 				   p->labels, TCA_CT_LABELS,
1191 				   p->labels_mask, TCA_CT_LABELS_MASK,
1192 				   sizeof(p->labels));
1193 	}
1194 
1195 	if (tb[TCA_CT_ZONE]) {
1196 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1197 			NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
1198 			return -EOPNOTSUPP;
1199 		}
1200 
1201 		tcf_ct_set_key_val(tb,
1202 				   &p->zone, TCA_CT_ZONE,
1203 				   NULL, TCA_CT_UNSPEC,
1204 				   sizeof(p->zone));
1205 	}
1206 
1207 	if (p->zone == NF_CT_DEFAULT_ZONE_ID)
1208 		return 0;
1209 
1210 	nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
1211 	tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
1212 	if (!tmpl) {
1213 		NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
1214 		return -ENOMEM;
1215 	}
1216 	__set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
1217 	nf_conntrack_get(&tmpl->ct_general);
1218 	p->tmpl = tmpl;
1219 
1220 	return 0;
1221 }
1222 
1223 static int tcf_ct_init(struct net *net, struct nlattr *nla,
1224 		       struct nlattr *est, struct tc_action **a,
1225 		       int replace, int bind, bool rtnl_held,
1226 		       struct tcf_proto *tp, u32 flags,
1227 		       struct netlink_ext_ack *extack)
1228 {
1229 	struct tc_action_net *tn = net_generic(net, ct_net_id);
1230 	struct tcf_ct_params *params = NULL;
1231 	struct nlattr *tb[TCA_CT_MAX + 1];
1232 	struct tcf_chain *goto_ch = NULL;
1233 	struct tc_ct *parm;
1234 	struct tcf_ct *c;
1235 	int err, res = 0;
1236 	u32 index;
1237 
1238 	if (!nla) {
1239 		NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
1240 		return -EINVAL;
1241 	}
1242 
1243 	err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
1244 	if (err < 0)
1245 		return err;
1246 
1247 	if (!tb[TCA_CT_PARMS]) {
1248 		NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
1249 		return -EINVAL;
1250 	}
1251 	parm = nla_data(tb[TCA_CT_PARMS]);
1252 	index = parm->index;
1253 	err = tcf_idr_check_alloc(tn, &index, a, bind);
1254 	if (err < 0)
1255 		return err;
1256 
1257 	if (!err) {
1258 		err = tcf_idr_create_from_flags(tn, index, est, a,
1259 						&act_ct_ops, bind, flags);
1260 		if (err) {
1261 			tcf_idr_cleanup(tn, index);
1262 			return err;
1263 		}
1264 		res = ACT_P_CREATED;
1265 	} else {
1266 		if (bind)
1267 			return 0;
1268 
1269 		if (!replace) {
1270 			tcf_idr_release(*a, bind);
1271 			return -EEXIST;
1272 		}
1273 	}
1274 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
1275 	if (err < 0)
1276 		goto cleanup;
1277 
1278 	c = to_ct(*a);
1279 
1280 	params = kzalloc(sizeof(*params), GFP_KERNEL);
1281 	if (unlikely(!params)) {
1282 		err = -ENOMEM;
1283 		goto cleanup;
1284 	}
1285 
1286 	err = tcf_ct_fill_params(net, params, parm, tb, extack);
1287 	if (err)
1288 		goto cleanup;
1289 
1290 	err = tcf_ct_flow_table_get(params);
1291 	if (err)
1292 		goto cleanup;
1293 
1294 	spin_lock_bh(&c->tcf_lock);
1295 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
1296 	params = rcu_replace_pointer(c->params, params,
1297 				     lockdep_is_held(&c->tcf_lock));
1298 	spin_unlock_bh(&c->tcf_lock);
1299 
1300 	if (goto_ch)
1301 		tcf_chain_put_by_act(goto_ch);
1302 	if (params)
1303 		call_rcu(&params->rcu, tcf_ct_params_free);
1304 
1305 	return res;
1306 
1307 cleanup:
1308 	if (goto_ch)
1309 		tcf_chain_put_by_act(goto_ch);
1310 	kfree(params);
1311 	tcf_idr_release(*a, bind);
1312 	return err;
1313 }
1314 
1315 static void tcf_ct_cleanup(struct tc_action *a)
1316 {
1317 	struct tcf_ct_params *params;
1318 	struct tcf_ct *c = to_ct(a);
1319 
1320 	params = rcu_dereference_protected(c->params, 1);
1321 	if (params)
1322 		call_rcu(&params->rcu, tcf_ct_params_free);
1323 }
1324 
1325 static int tcf_ct_dump_key_val(struct sk_buff *skb,
1326 			       void *val, int val_type,
1327 			       void *mask, int mask_type,
1328 			       int len)
1329 {
1330 	int err;
1331 
1332 	if (mask && !memchr_inv(mask, 0, len))
1333 		return 0;
1334 
1335 	err = nla_put(skb, val_type, len, val);
1336 	if (err)
1337 		return err;
1338 
1339 	if (mask_type != TCA_CT_UNSPEC) {
1340 		err = nla_put(skb, mask_type, len, mask);
1341 		if (err)
1342 			return err;
1343 	}
1344 
1345 	return 0;
1346 }
1347 
1348 static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
1349 {
1350 	struct nf_nat_range2 *range = &p->range;
1351 
1352 	if (!(p->ct_action & TCA_CT_ACT_NAT))
1353 		return 0;
1354 
1355 	if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1356 		return 0;
1357 
1358 	if (range->flags & NF_NAT_RANGE_MAP_IPS) {
1359 		if (p->ipv4_range) {
1360 			if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
1361 					    range->min_addr.ip))
1362 				return -1;
1363 			if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
1364 					    range->max_addr.ip))
1365 				return -1;
1366 		} else {
1367 			if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
1368 					     &range->min_addr.in6))
1369 				return -1;
1370 			if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
1371 					     &range->max_addr.in6))
1372 				return -1;
1373 		}
1374 	}
1375 
1376 	if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
1377 		if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
1378 				 range->min_proto.all))
1379 			return -1;
1380 		if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
1381 				 range->max_proto.all))
1382 			return -1;
1383 	}
1384 
1385 	return 0;
1386 }
1387 
1388 static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
1389 			      int bind, int ref)
1390 {
1391 	unsigned char *b = skb_tail_pointer(skb);
1392 	struct tcf_ct *c = to_ct(a);
1393 	struct tcf_ct_params *p;
1394 
1395 	struct tc_ct opt = {
1396 		.index   = c->tcf_index,
1397 		.refcnt  = refcount_read(&c->tcf_refcnt) - ref,
1398 		.bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
1399 	};
1400 	struct tcf_t t;
1401 
1402 	spin_lock_bh(&c->tcf_lock);
1403 	p = rcu_dereference_protected(c->params,
1404 				      lockdep_is_held(&c->tcf_lock));
1405 	opt.action = c->tcf_action;
1406 
1407 	if (tcf_ct_dump_key_val(skb,
1408 				&p->ct_action, TCA_CT_ACTION,
1409 				NULL, TCA_CT_UNSPEC,
1410 				sizeof(p->ct_action)))
1411 		goto nla_put_failure;
1412 
1413 	if (p->ct_action & TCA_CT_ACT_CLEAR)
1414 		goto skip_dump;
1415 
1416 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1417 	    tcf_ct_dump_key_val(skb,
1418 				&p->mark, TCA_CT_MARK,
1419 				&p->mark_mask, TCA_CT_MARK_MASK,
1420 				sizeof(p->mark)))
1421 		goto nla_put_failure;
1422 
1423 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1424 	    tcf_ct_dump_key_val(skb,
1425 				p->labels, TCA_CT_LABELS,
1426 				p->labels_mask, TCA_CT_LABELS_MASK,
1427 				sizeof(p->labels)))
1428 		goto nla_put_failure;
1429 
1430 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1431 	    tcf_ct_dump_key_val(skb,
1432 				&p->zone, TCA_CT_ZONE,
1433 				NULL, TCA_CT_UNSPEC,
1434 				sizeof(p->zone)))
1435 		goto nla_put_failure;
1436 
1437 	if (tcf_ct_dump_nat(skb, p))
1438 		goto nla_put_failure;
1439 
1440 skip_dump:
1441 	if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
1442 		goto nla_put_failure;
1443 
1444 	tcf_tm_dump(&t, &c->tcf_tm);
1445 	if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
1446 		goto nla_put_failure;
1447 	spin_unlock_bh(&c->tcf_lock);
1448 
1449 	return skb->len;
1450 nla_put_failure:
1451 	spin_unlock_bh(&c->tcf_lock);
1452 	nlmsg_trim(skb, b);
1453 	return -1;
1454 }
1455 
1456 static int tcf_ct_walker(struct net *net, struct sk_buff *skb,
1457 			 struct netlink_callback *cb, int type,
1458 			 const struct tc_action_ops *ops,
1459 			 struct netlink_ext_ack *extack)
1460 {
1461 	struct tc_action_net *tn = net_generic(net, ct_net_id);
1462 
1463 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
1464 }
1465 
1466 static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index)
1467 {
1468 	struct tc_action_net *tn = net_generic(net, ct_net_id);
1469 
1470 	return tcf_idr_search(tn, a, index);
1471 }
1472 
1473 static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
1474 			     u64 drops, u64 lastuse, bool hw)
1475 {
1476 	struct tcf_ct *c = to_ct(a);
1477 
1478 	tcf_action_update_stats(a, bytes, packets, drops, hw);
1479 	c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
1480 }
1481 
1482 static struct tc_action_ops act_ct_ops = {
1483 	.kind		=	"ct",
1484 	.id		=	TCA_ID_CT,
1485 	.owner		=	THIS_MODULE,
1486 	.act		=	tcf_ct_act,
1487 	.dump		=	tcf_ct_dump,
1488 	.init		=	tcf_ct_init,
1489 	.cleanup	=	tcf_ct_cleanup,
1490 	.walk		=	tcf_ct_walker,
1491 	.lookup		=	tcf_ct_search,
1492 	.stats_update	=	tcf_stats_update,
1493 	.size		=	sizeof(struct tcf_ct),
1494 };
1495 
1496 static __net_init int ct_init_net(struct net *net)
1497 {
1498 	unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
1499 	struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1500 
1501 	if (nf_connlabels_get(net, n_bits - 1)) {
1502 		tn->labels = false;
1503 		pr_err("act_ct: Failed to set connlabels length");
1504 	} else {
1505 		tn->labels = true;
1506 	}
1507 
1508 	return tc_action_net_init(net, &tn->tn, &act_ct_ops);
1509 }
1510 
1511 static void __net_exit ct_exit_net(struct list_head *net_list)
1512 {
1513 	struct net *net;
1514 
1515 	rtnl_lock();
1516 	list_for_each_entry(net, net_list, exit_list) {
1517 		struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1518 
1519 		if (tn->labels)
1520 			nf_connlabels_put(net);
1521 	}
1522 	rtnl_unlock();
1523 
1524 	tc_action_net_exit(net_list, ct_net_id);
1525 }
1526 
1527 static struct pernet_operations ct_net_ops = {
1528 	.init = ct_init_net,
1529 	.exit_batch = ct_exit_net,
1530 	.id   = &ct_net_id,
1531 	.size = sizeof(struct tc_ct_action_net),
1532 };
1533 
1534 static int __init ct_init_module(void)
1535 {
1536 	int err;
1537 
1538 	act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
1539 	if (!act_ct_wq)
1540 		return -ENOMEM;
1541 
1542 	err = tcf_ct_flow_tables_init();
1543 	if (err)
1544 		goto err_tbl_init;
1545 
1546 	err = tcf_register_action(&act_ct_ops, &ct_net_ops);
1547 	if (err)
1548 		goto err_register;
1549 
1550 	static_branch_inc(&tcf_frag_xmit_count);
1551 
1552 	return 0;
1553 
1554 err_register:
1555 	tcf_ct_flow_tables_uninit();
1556 err_tbl_init:
1557 	destroy_workqueue(act_ct_wq);
1558 	return err;
1559 }
1560 
1561 static void __exit ct_cleanup_module(void)
1562 {
1563 	static_branch_dec(&tcf_frag_xmit_count);
1564 	tcf_unregister_action(&act_ct_ops, &ct_net_ops);
1565 	tcf_ct_flow_tables_uninit();
1566 	destroy_workqueue(act_ct_wq);
1567 }
1568 
1569 module_init(ct_init_module);
1570 module_exit(ct_cleanup_module);
1571 MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
1572 MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
1573 MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
1574 MODULE_DESCRIPTION("Connection tracking action");
1575 MODULE_LICENSE("GPL v2");
1576