14c3844d9SPaul Blakey // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
24c3844d9SPaul Blakey /* Copyright (c) 2019 Mellanox Technologies. */
34c3844d9SPaul Blakey 
44c3844d9SPaul Blakey #include <net/netfilter/nf_conntrack.h>
54c3844d9SPaul Blakey #include <net/netfilter/nf_conntrack_core.h>
64c3844d9SPaul Blakey #include <net/netfilter/nf_conntrack_zones.h>
74c3844d9SPaul Blakey #include <net/netfilter/nf_conntrack_labels.h>
84c3844d9SPaul Blakey #include <net/netfilter/nf_conntrack_helper.h>
94c3844d9SPaul Blakey #include <net/netfilter/nf_conntrack_acct.h>
104c3844d9SPaul Blakey #include <uapi/linux/tc_act/tc_pedit.h>
114c3844d9SPaul Blakey #include <net/tc_act/tc_ct.h>
124c3844d9SPaul Blakey #include <net/flow_offload.h>
13ac991b48SPaul Blakey #include <net/netfilter/nf_flow_table.h>
144c3844d9SPaul Blakey #include <linux/workqueue.h>
15a2173131SOz Shlomo #include <linux/refcount.h>
1670840b66SPaul Blakey #include <linux/xarray.h>
17c9c079b4SPaul Blakey #include <linux/if_macvlan.h>
1877422a8fSSaeed Mahameed #include <linux/debugfs.h>
194c3844d9SPaul Blakey 
20ae430332SAriel Levkovich #include "lib/fs_chains.h"
214c3844d9SPaul Blakey #include "en/tc_ct.h"
2276909000SPaul Blakey #include "en/tc/ct_fs.h"
23a8128326SRoi Dayan #include "en/tc_priv.h"
246702d393SPaul Blakey #include "en/mod_hdr.h"
258f5b3c3eSPaul Blakey #include "en/mapping.h"
26f0da4daaSChris Mi #include "en/tc/post_act.h"
274c3844d9SPaul Blakey #include "en.h"
284c3844d9SPaul Blakey #include "en_tc.h"
294c3844d9SPaul Blakey #include "en_rep.h"
303ee61ebbSPaul Blakey #include "fs_core.h"
314c3844d9SPaul Blakey 
324c3844d9SPaul Blakey #define MLX5_CT_STATE_ESTABLISHED_BIT BIT(1)
334c3844d9SPaul Blakey #define MLX5_CT_STATE_TRK_BIT BIT(2)
349102d836SRoi Dayan #define MLX5_CT_STATE_NAT_BIT BIT(3)
356895cb3aSPaul Blakey #define MLX5_CT_STATE_REPLY_BIT BIT(4)
36116c76c5SAriel Levkovich #define MLX5_CT_STATE_RELATED_BIT BIT(5)
37116c76c5SAriel Levkovich #define MLX5_CT_STATE_INVALID_BIT BIT(6)
38f869bcb0SVlad Buslov #define MLX5_CT_STATE_NEW_BIT BIT(7)
394c3844d9SPaul Blakey 
4017c5da03SJianbo Liu #define MLX5_CT_LABELS_BITS MLX5_REG_MAPPING_MBITS(LABELS_TO_REG)
4117c5da03SJianbo Liu #define MLX5_CT_LABELS_MASK MLX5_REG_MAPPING_MASK(LABELS_TO_REG)
4254b154ecSEli Britstein 
431cfd3490SPaul Blakey /* Statically allocate modify actions for
441cfd3490SPaul Blakey  * ipv6 and port nat (5) + tuple fields (4) + nic mode zone restore (1) = 10.
451cfd3490SPaul Blakey  * This will be increased dynamically if needed (for the ipv6 snat + dnat).
461cfd3490SPaul Blakey  */
471cfd3490SPaul Blakey #define MLX5_CT_MIN_MOD_ACTS 10
481cfd3490SPaul Blakey 
494c3844d9SPaul Blakey #define ct_dbg(fmt, args...)\
504c3844d9SPaul Blakey 	netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args)
514c3844d9SPaul Blakey 
5277422a8fSSaeed Mahameed struct mlx5_tc_ct_debugfs {
5377422a8fSSaeed Mahameed 	struct {
5477422a8fSSaeed Mahameed 		atomic_t offloaded;
5577422a8fSSaeed Mahameed 		atomic_t rx_dropped;
5677422a8fSSaeed Mahameed 	} stats;
5777422a8fSSaeed Mahameed 
5877422a8fSSaeed Mahameed 	struct dentry *root;
5977422a8fSSaeed Mahameed };
6077422a8fSSaeed Mahameed 
614c3844d9SPaul Blakey struct mlx5_tc_ct_priv {
62670c239aSAriel Levkovich 	struct mlx5_core_dev *dev;
6367027828SPaul Blakey 	struct mlx5e_priv *priv;
644c3844d9SPaul Blakey 	const struct net_device *netdev;
65aedd133dSAriel Levkovich 	struct mod_hdr_tbl *mod_hdr_tbl;
6670840b66SPaul Blakey 	struct xarray tuple_ids;
67ac991b48SPaul Blakey 	struct rhashtable zone_ht;
68bc562be9SPaul Blakey 	struct rhashtable ct_tuples_ht;
69bc562be9SPaul Blakey 	struct rhashtable ct_tuples_nat_ht;
704c3844d9SPaul Blakey 	struct mlx5_flow_table *ct;
714c3844d9SPaul Blakey 	struct mlx5_flow_table *ct_nat;
72f0da4daaSChris Mi 	struct mlx5e_post_act *post_act;
734c3844d9SPaul Blakey 	struct mutex control_lock; /* guards parallel adds/dels */
748f5b3c3eSPaul Blakey 	struct mapping_ctx *zone_mapping;
7554b154ecSEli Britstein 	struct mapping_ctx *labels_mapping;
76aedd133dSAriel Levkovich 	enum mlx5_flow_namespace_type ns_type;
77aedd133dSAriel Levkovich 	struct mlx5_fs_chains *chains;
7876909000SPaul Blakey 	struct mlx5_ct_fs *fs;
7976909000SPaul Blakey 	struct mlx5_ct_fs_ops *fs_ops;
80a2173131SOz Shlomo 	spinlock_t ht_lock; /* protects ft entries */
816c4e8fa0SRoi Dayan 	struct workqueue_struct *wq;
8277422a8fSSaeed Mahameed 
8377422a8fSSaeed Mahameed 	struct mlx5_tc_ct_debugfs debugfs;
844c3844d9SPaul Blakey };
854c3844d9SPaul Blakey 
86ac991b48SPaul Blakey struct mlx5_ct_zone_rule {
8776909000SPaul Blakey 	struct mlx5_ct_fs_rule *rule;
886702d393SPaul Blakey 	struct mlx5e_mod_hdr_handle *mh;
89c620b772SAriel Levkovich 	struct mlx5_flow_attr *attr;
90ac991b48SPaul Blakey 	bool nat;
91ac991b48SPaul Blakey };
92ac991b48SPaul Blakey 
939102d836SRoi Dayan struct mlx5_tc_ct_pre {
94aedd133dSAriel Levkovich 	struct mlx5_flow_table *ft;
959102d836SRoi Dayan 	struct mlx5_flow_group *flow_grp;
969102d836SRoi Dayan 	struct mlx5_flow_group *miss_grp;
979102d836SRoi Dayan 	struct mlx5_flow_handle *flow_rule;
989102d836SRoi Dayan 	struct mlx5_flow_handle *miss_rule;
999102d836SRoi Dayan 	struct mlx5_modify_hdr *modify_hdr;
1009102d836SRoi Dayan };
1019102d836SRoi Dayan 
102ac991b48SPaul Blakey struct mlx5_ct_ft {
103ac991b48SPaul Blakey 	struct rhash_head node;
104ac991b48SPaul Blakey 	u16 zone;
1058f5b3c3eSPaul Blakey 	u32 zone_restore_id;
106ac991b48SPaul Blakey 	refcount_t refcount;
107ac991b48SPaul Blakey 	struct nf_flowtable *nf_ft;
108ac991b48SPaul Blakey 	struct mlx5_tc_ct_priv *ct_priv;
109ac991b48SPaul Blakey 	struct rhashtable ct_entries_ht;
1109102d836SRoi Dayan 	struct mlx5_tc_ct_pre pre_ct;
1119102d836SRoi Dayan 	struct mlx5_tc_ct_pre pre_ct_nat;
112ac991b48SPaul Blakey };
113ac991b48SPaul Blakey 
114bc562be9SPaul Blakey struct mlx5_ct_tuple {
115bc562be9SPaul Blakey 	u16 addr_type;
116bc562be9SPaul Blakey 	__be16 n_proto;
117bc562be9SPaul Blakey 	u8 ip_proto;
118bc562be9SPaul Blakey 	struct {
119bc562be9SPaul Blakey 		union {
120bc562be9SPaul Blakey 			__be32 src_v4;
121bc562be9SPaul Blakey 			struct in6_addr src_v6;
122bc562be9SPaul Blakey 		};
123bc562be9SPaul Blakey 		union {
124bc562be9SPaul Blakey 			__be32 dst_v4;
125bc562be9SPaul Blakey 			struct in6_addr dst_v6;
126bc562be9SPaul Blakey 		};
127bc562be9SPaul Blakey 	} ip;
128bc562be9SPaul Blakey 	struct {
129bc562be9SPaul Blakey 		__be16 src;
130bc562be9SPaul Blakey 		__be16 dst;
131bc562be9SPaul Blakey 	} port;
132bc562be9SPaul Blakey 
133bc562be9SPaul Blakey 	u16 zone;
134bc562be9SPaul Blakey };
135bc562be9SPaul Blakey 
136eed38eeeSOz Shlomo struct mlx5_ct_counter {
1371edae233SOz Shlomo 	struct mlx5_fc *counter;
1381edae233SOz Shlomo 	refcount_t refcount;
139eed38eeeSOz Shlomo 	bool is_shared;
1401edae233SOz Shlomo };
1411edae233SOz Shlomo 
142a2173131SOz Shlomo enum {
143a2173131SOz Shlomo 	MLX5_CT_ENTRY_FLAG_VALID,
144a2173131SOz Shlomo };
145a2173131SOz Shlomo 
146ac991b48SPaul Blakey struct mlx5_ct_entry {
147ac991b48SPaul Blakey 	struct rhash_head node;
148bc562be9SPaul Blakey 	struct rhash_head tuple_node;
149bc562be9SPaul Blakey 	struct rhash_head tuple_nat_node;
150eed38eeeSOz Shlomo 	struct mlx5_ct_counter *counter;
151ac991b48SPaul Blakey 	unsigned long cookie;
1525c6b9460SPaul Blakey 	unsigned long restore_cookie;
153bc562be9SPaul Blakey 	struct mlx5_ct_tuple tuple;
154bc562be9SPaul Blakey 	struct mlx5_ct_tuple tuple_nat;
155ac991b48SPaul Blakey 	struct mlx5_ct_zone_rule zone_rules[2];
156a2173131SOz Shlomo 
157a2173131SOz Shlomo 	struct mlx5_tc_ct_priv *ct_priv;
158a2173131SOz Shlomo 	struct work_struct work;
159a2173131SOz Shlomo 
160a2173131SOz Shlomo 	refcount_t refcnt;
161a2173131SOz Shlomo 	unsigned long flags;
162ac991b48SPaul Blakey };
163ac991b48SPaul Blakey 
1647fac5c2eSPaul Blakey static void
1657fac5c2eSPaul Blakey mlx5_tc_ct_entry_destroy_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
1667fac5c2eSPaul Blakey 				 struct mlx5_flow_attr *attr,
1677fac5c2eSPaul Blakey 				 struct mlx5e_mod_hdr_handle *mh);
1687fac5c2eSPaul Blakey 
169ac991b48SPaul Blakey static const struct rhashtable_params cts_ht_params = {
170ac991b48SPaul Blakey 	.head_offset = offsetof(struct mlx5_ct_entry, node),
171ac991b48SPaul Blakey 	.key_offset = offsetof(struct mlx5_ct_entry, cookie),
172ac991b48SPaul Blakey 	.key_len = sizeof(((struct mlx5_ct_entry *)0)->cookie),
173ac991b48SPaul Blakey 	.automatic_shrinking = true,
174ac991b48SPaul Blakey 	.min_size = 16 * 1024,
175ac991b48SPaul Blakey };
176ac991b48SPaul Blakey 
177ac991b48SPaul Blakey static const struct rhashtable_params zone_params = {
178ac991b48SPaul Blakey 	.head_offset = offsetof(struct mlx5_ct_ft, node),
179ac991b48SPaul Blakey 	.key_offset = offsetof(struct mlx5_ct_ft, zone),
180ac991b48SPaul Blakey 	.key_len = sizeof(((struct mlx5_ct_ft *)0)->zone),
181ac991b48SPaul Blakey 	.automatic_shrinking = true,
182ac991b48SPaul Blakey };
183ac991b48SPaul Blakey 
184bc562be9SPaul Blakey static const struct rhashtable_params tuples_ht_params = {
185bc562be9SPaul Blakey 	.head_offset = offsetof(struct mlx5_ct_entry, tuple_node),
186bc562be9SPaul Blakey 	.key_offset = offsetof(struct mlx5_ct_entry, tuple),
187bc562be9SPaul Blakey 	.key_len = sizeof(((struct mlx5_ct_entry *)0)->tuple),
188bc562be9SPaul Blakey 	.automatic_shrinking = true,
189bc562be9SPaul Blakey 	.min_size = 16 * 1024,
190bc562be9SPaul Blakey };
191bc562be9SPaul Blakey 
192bc562be9SPaul Blakey static const struct rhashtable_params tuples_nat_ht_params = {
193bc562be9SPaul Blakey 	.head_offset = offsetof(struct mlx5_ct_entry, tuple_nat_node),
194bc562be9SPaul Blakey 	.key_offset = offsetof(struct mlx5_ct_entry, tuple_nat),
195bc562be9SPaul Blakey 	.key_len = sizeof(((struct mlx5_ct_entry *)0)->tuple_nat),
196bc562be9SPaul Blakey 	.automatic_shrinking = true,
197bc562be9SPaul Blakey 	.min_size = 16 * 1024,
198bc562be9SPaul Blakey };
199bc562be9SPaul Blakey 
200e2194a17SPaul Blakey static bool
mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry * entry)201e2194a17SPaul Blakey mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
202e2194a17SPaul Blakey {
203e2194a17SPaul Blakey 	return !!(entry->tuple_nat_node.next);
204e2194a17SPaul Blakey }
205e2194a17SPaul Blakey 
206ac991b48SPaul Blakey static int
mlx5_get_label_mapping(struct mlx5_tc_ct_priv * ct_priv,u32 * labels,u32 * id)207d24f847eSAriel Levkovich mlx5_get_label_mapping(struct mlx5_tc_ct_priv *ct_priv,
208d24f847eSAriel Levkovich 		       u32 *labels, u32 *id)
209d24f847eSAriel Levkovich {
210d24f847eSAriel Levkovich 	if (!memchr_inv(labels, 0, sizeof(u32) * 4)) {
211d24f847eSAriel Levkovich 		*id = 0;
212d24f847eSAriel Levkovich 		return 0;
213d24f847eSAriel Levkovich 	}
214d24f847eSAriel Levkovich 
215d24f847eSAriel Levkovich 	if (mapping_add(ct_priv->labels_mapping, labels, id))
216d24f847eSAriel Levkovich 		return -EOPNOTSUPP;
217d24f847eSAriel Levkovich 
218d24f847eSAriel Levkovich 	return 0;
219d24f847eSAriel Levkovich }
220d24f847eSAriel Levkovich 
221d24f847eSAriel Levkovich static void
mlx5_put_label_mapping(struct mlx5_tc_ct_priv * ct_priv,u32 id)222d24f847eSAriel Levkovich mlx5_put_label_mapping(struct mlx5_tc_ct_priv *ct_priv, u32 id)
223d24f847eSAriel Levkovich {
224d24f847eSAriel Levkovich 	if (id)
225d24f847eSAriel Levkovich 		mapping_remove(ct_priv->labels_mapping, id);
226d24f847eSAriel Levkovich }
227d24f847eSAriel Levkovich 
228d24f847eSAriel Levkovich static int
mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple * tuple,struct flow_rule * rule)229bc562be9SPaul Blakey mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
230bc562be9SPaul Blakey {
231bc562be9SPaul Blakey 	struct flow_match_control control;
232bc562be9SPaul Blakey 	struct flow_match_basic basic;
233bc562be9SPaul Blakey 
234bc562be9SPaul Blakey 	flow_rule_match_basic(rule, &basic);
235bc562be9SPaul Blakey 	flow_rule_match_control(rule, &control);
236bc562be9SPaul Blakey 
237bc562be9SPaul Blakey 	tuple->n_proto = basic.key->n_proto;
238bc562be9SPaul Blakey 	tuple->ip_proto = basic.key->ip_proto;
239bc562be9SPaul Blakey 	tuple->addr_type = control.key->addr_type;
240bc562be9SPaul Blakey 
241bc562be9SPaul Blakey 	if (tuple->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
242bc562be9SPaul Blakey 		struct flow_match_ipv4_addrs match;
243bc562be9SPaul Blakey 
244bc562be9SPaul Blakey 		flow_rule_match_ipv4_addrs(rule, &match);
245bc562be9SPaul Blakey 		tuple->ip.src_v4 = match.key->src;
246bc562be9SPaul Blakey 		tuple->ip.dst_v4 = match.key->dst;
247bc562be9SPaul Blakey 	} else if (tuple->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
248bc562be9SPaul Blakey 		struct flow_match_ipv6_addrs match;
249bc562be9SPaul Blakey 
250bc562be9SPaul Blakey 		flow_rule_match_ipv6_addrs(rule, &match);
251bc562be9SPaul Blakey 		tuple->ip.src_v6 = match.key->src;
252bc562be9SPaul Blakey 		tuple->ip.dst_v6 = match.key->dst;
253bc562be9SPaul Blakey 	} else {
254bc562be9SPaul Blakey 		return -EOPNOTSUPP;
255bc562be9SPaul Blakey 	}
256bc562be9SPaul Blakey 
257bc562be9SPaul Blakey 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
258bc562be9SPaul Blakey 		struct flow_match_ports match;
259bc562be9SPaul Blakey 
260bc562be9SPaul Blakey 		flow_rule_match_ports(rule, &match);
261bc562be9SPaul Blakey 		switch (tuple->ip_proto) {
262bc562be9SPaul Blakey 		case IPPROTO_TCP:
263bc562be9SPaul Blakey 		case IPPROTO_UDP:
264bc562be9SPaul Blakey 			tuple->port.src = match.key->src;
265bc562be9SPaul Blakey 			tuple->port.dst = match.key->dst;
266bc562be9SPaul Blakey 			break;
267bc562be9SPaul Blakey 		default:
268bc562be9SPaul Blakey 			return -EOPNOTSUPP;
269bc562be9SPaul Blakey 		}
270bc562be9SPaul Blakey 	} else {
2711918ace1SToshiaki Makita 		if (tuple->ip_proto != IPPROTO_GRE)
272bc562be9SPaul Blakey 			return -EOPNOTSUPP;
273bc562be9SPaul Blakey 	}
274bc562be9SPaul Blakey 
275bc562be9SPaul Blakey 	return 0;
276bc562be9SPaul Blakey }
277bc562be9SPaul Blakey 
278bc562be9SPaul Blakey static int
mlx5_tc_ct_rule_to_tuple_nat(struct mlx5_ct_tuple * tuple,struct flow_rule * rule)279bc562be9SPaul Blakey mlx5_tc_ct_rule_to_tuple_nat(struct mlx5_ct_tuple *tuple,
280bc562be9SPaul Blakey 			     struct flow_rule *rule)
281bc562be9SPaul Blakey {
282bc562be9SPaul Blakey 	struct flow_action *flow_action = &rule->action;
283bc562be9SPaul Blakey 	struct flow_action_entry *act;
284bc562be9SPaul Blakey 	u32 offset, val, ip6_offset;
285bc562be9SPaul Blakey 	int i;
286bc562be9SPaul Blakey 
287bc562be9SPaul Blakey 	flow_action_for_each(i, act, flow_action) {
288bc562be9SPaul Blakey 		if (act->id != FLOW_ACTION_MANGLE)
289bc562be9SPaul Blakey 			continue;
290bc562be9SPaul Blakey 
291bc562be9SPaul Blakey 		offset = act->mangle.offset;
292bc562be9SPaul Blakey 		val = act->mangle.val;
293bc562be9SPaul Blakey 		switch (act->mangle.htype) {
294bc562be9SPaul Blakey 		case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
295bc562be9SPaul Blakey 			if (offset == offsetof(struct iphdr, saddr))
296bc562be9SPaul Blakey 				tuple->ip.src_v4 = cpu_to_be32(val);
297bc562be9SPaul Blakey 			else if (offset == offsetof(struct iphdr, daddr))
298bc562be9SPaul Blakey 				tuple->ip.dst_v4 = cpu_to_be32(val);
299bc562be9SPaul Blakey 			else
300bc562be9SPaul Blakey 				return -EOPNOTSUPP;
301bc562be9SPaul Blakey 			break;
302bc562be9SPaul Blakey 
303bc562be9SPaul Blakey 		case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
304bc562be9SPaul Blakey 			ip6_offset = (offset - offsetof(struct ipv6hdr, saddr));
305bc562be9SPaul Blakey 			ip6_offset /= 4;
3062b021989SMaor Dickman 			if (ip6_offset < 4)
307bc562be9SPaul Blakey 				tuple->ip.src_v6.s6_addr32[ip6_offset] = cpu_to_be32(val);
3082b021989SMaor Dickman 			else if (ip6_offset < 8)
3092b021989SMaor Dickman 				tuple->ip.dst_v6.s6_addr32[ip6_offset - 4] = cpu_to_be32(val);
310bc562be9SPaul Blakey 			else
311bc562be9SPaul Blakey 				return -EOPNOTSUPP;
312bc562be9SPaul Blakey 			break;
313bc562be9SPaul Blakey 
314bc562be9SPaul Blakey 		case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
315bc562be9SPaul Blakey 			if (offset == offsetof(struct tcphdr, source))
316bc562be9SPaul Blakey 				tuple->port.src = cpu_to_be16(val);
317bc562be9SPaul Blakey 			else if (offset == offsetof(struct tcphdr, dest))
318bc562be9SPaul Blakey 				tuple->port.dst = cpu_to_be16(val);
319bc562be9SPaul Blakey 			else
320bc562be9SPaul Blakey 				return -EOPNOTSUPP;
321bc562be9SPaul Blakey 			break;
322bc562be9SPaul Blakey 
323bc562be9SPaul Blakey 		case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
324bc562be9SPaul Blakey 			if (offset == offsetof(struct udphdr, source))
325bc562be9SPaul Blakey 				tuple->port.src = cpu_to_be16(val);
326bc562be9SPaul Blakey 			else if (offset == offsetof(struct udphdr, dest))
327bc562be9SPaul Blakey 				tuple->port.dst = cpu_to_be16(val);
328bc562be9SPaul Blakey 			else
329bc562be9SPaul Blakey 				return -EOPNOTSUPP;
330bc562be9SPaul Blakey 			break;
331bc562be9SPaul Blakey 
332bc562be9SPaul Blakey 		default:
333bc562be9SPaul Blakey 			return -EOPNOTSUPP;
334bc562be9SPaul Blakey 		}
335bc562be9SPaul Blakey 	}
336bc562be9SPaul Blakey 
337bc562be9SPaul Blakey 	return 0;
338bc562be9SPaul Blakey }
339bc562be9SPaul Blakey 
340bc562be9SPaul Blakey static int
mlx5_tc_ct_get_flow_source_match(struct mlx5_tc_ct_priv * ct_priv,struct net_device * ndev)341c9c079b4SPaul Blakey mlx5_tc_ct_get_flow_source_match(struct mlx5_tc_ct_priv *ct_priv,
342c9c079b4SPaul Blakey 				 struct net_device *ndev)
343c9c079b4SPaul Blakey {
344c9c079b4SPaul Blakey 	struct mlx5e_priv *other_priv = netdev_priv(ndev);
345c9c079b4SPaul Blakey 	struct mlx5_core_dev *mdev = ct_priv->dev;
346c9c079b4SPaul Blakey 	bool vf_rep, uplink_rep;
347c9c079b4SPaul Blakey 
348c9c079b4SPaul Blakey 	vf_rep = mlx5e_eswitch_vf_rep(ndev) && mlx5_same_hw_devs(mdev, other_priv->mdev);
349c9c079b4SPaul Blakey 	uplink_rep = mlx5e_eswitch_uplink_rep(ndev) && mlx5_same_hw_devs(mdev, other_priv->mdev);
350c9c079b4SPaul Blakey 
351c9c079b4SPaul Blakey 	if (vf_rep)
352c9c079b4SPaul Blakey 		return MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
353c9c079b4SPaul Blakey 	if (uplink_rep)
354c9c079b4SPaul Blakey 		return MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
355c9c079b4SPaul Blakey 	if (is_vlan_dev(ndev))
356c9c079b4SPaul Blakey 		return mlx5_tc_ct_get_flow_source_match(ct_priv, vlan_dev_real_dev(ndev));
357c9c079b4SPaul Blakey 	if (netif_is_macvlan(ndev))
358c9c079b4SPaul Blakey 		return mlx5_tc_ct_get_flow_source_match(ct_priv, macvlan_dev_real_dev(ndev));
359c9c079b4SPaul Blakey 	if (mlx5e_get_tc_tun(ndev) || netif_is_lag_master(ndev))
360c9c079b4SPaul Blakey 		return MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
361c9c079b4SPaul Blakey 
362c9c079b4SPaul Blakey 	return MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT;
363c9c079b4SPaul Blakey }
364c9c079b4SPaul Blakey 
365c9c079b4SPaul Blakey static int
mlx5_tc_ct_set_tuple_match(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_flow_spec * spec,struct flow_rule * rule)366c9c079b4SPaul Blakey mlx5_tc_ct_set_tuple_match(struct mlx5_tc_ct_priv *ct_priv,
367c9c079b4SPaul Blakey 			   struct mlx5_flow_spec *spec,
368ac991b48SPaul Blakey 			   struct flow_rule *rule)
369ac991b48SPaul Blakey {
370ac991b48SPaul Blakey 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
371ac991b48SPaul Blakey 				       outer_headers);
372ac991b48SPaul Blakey 	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
373ac991b48SPaul Blakey 				       outer_headers);
374ac991b48SPaul Blakey 	u16 addr_type = 0;
375ac991b48SPaul Blakey 	u8 ip_proto = 0;
376ac991b48SPaul Blakey 
377ac991b48SPaul Blakey 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
378ac991b48SPaul Blakey 		struct flow_match_basic match;
379ac991b48SPaul Blakey 
380ac991b48SPaul Blakey 		flow_rule_match_basic(rule, &match);
381ac991b48SPaul Blakey 
382c9c079b4SPaul Blakey 		mlx5e_tc_set_ethertype(ct_priv->dev, &match, true, headers_c, headers_v);
383ac991b48SPaul Blakey 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
384ac991b48SPaul Blakey 			 match.mask->ip_proto);
385ac991b48SPaul Blakey 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
386ac991b48SPaul Blakey 			 match.key->ip_proto);
387ac991b48SPaul Blakey 
388ac991b48SPaul Blakey 		ip_proto = match.key->ip_proto;
389ac991b48SPaul Blakey 	}
390ac991b48SPaul Blakey 
391ac991b48SPaul Blakey 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
392ac991b48SPaul Blakey 		struct flow_match_control match;
393ac991b48SPaul Blakey 
394ac991b48SPaul Blakey 		flow_rule_match_control(rule, &match);
395ac991b48SPaul Blakey 		addr_type = match.key->addr_type;
396ac991b48SPaul Blakey 	}
397ac991b48SPaul Blakey 
398ac991b48SPaul Blakey 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
399ac991b48SPaul Blakey 		struct flow_match_ipv4_addrs match;
400ac991b48SPaul Blakey 
401ac991b48SPaul Blakey 		flow_rule_match_ipv4_addrs(rule, &match);
402ac991b48SPaul Blakey 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
403ac991b48SPaul Blakey 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
404ac991b48SPaul Blakey 		       &match.mask->src, sizeof(match.mask->src));
405ac991b48SPaul Blakey 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
406ac991b48SPaul Blakey 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
407ac991b48SPaul Blakey 		       &match.key->src, sizeof(match.key->src));
408ac991b48SPaul Blakey 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
409ac991b48SPaul Blakey 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
410ac991b48SPaul Blakey 		       &match.mask->dst, sizeof(match.mask->dst));
411ac991b48SPaul Blakey 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
412ac991b48SPaul Blakey 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
413ac991b48SPaul Blakey 		       &match.key->dst, sizeof(match.key->dst));
414ac991b48SPaul Blakey 	}
415ac991b48SPaul Blakey 
416ac991b48SPaul Blakey 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
417ac991b48SPaul Blakey 		struct flow_match_ipv6_addrs match;
418ac991b48SPaul Blakey 
419ac991b48SPaul Blakey 		flow_rule_match_ipv6_addrs(rule, &match);
420ac991b48SPaul Blakey 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
421ac991b48SPaul Blakey 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
422ac991b48SPaul Blakey 		       &match.mask->src, sizeof(match.mask->src));
423ac991b48SPaul Blakey 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
424ac991b48SPaul Blakey 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
425ac991b48SPaul Blakey 		       &match.key->src, sizeof(match.key->src));
426ac991b48SPaul Blakey 
427ac991b48SPaul Blakey 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
428ac991b48SPaul Blakey 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
429ac991b48SPaul Blakey 		       &match.mask->dst, sizeof(match.mask->dst));
430ac991b48SPaul Blakey 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
431ac991b48SPaul Blakey 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
432ac991b48SPaul Blakey 		       &match.key->dst, sizeof(match.key->dst));
433ac991b48SPaul Blakey 	}
434ac991b48SPaul Blakey 
435ac991b48SPaul Blakey 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
436ac991b48SPaul Blakey 		struct flow_match_ports match;
437ac991b48SPaul Blakey 
438ac991b48SPaul Blakey 		flow_rule_match_ports(rule, &match);
439ac991b48SPaul Blakey 		switch (ip_proto) {
440ac991b48SPaul Blakey 		case IPPROTO_TCP:
441ac991b48SPaul Blakey 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
442ac991b48SPaul Blakey 				 tcp_sport, ntohs(match.mask->src));
443ac991b48SPaul Blakey 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
444ac991b48SPaul Blakey 				 tcp_sport, ntohs(match.key->src));
445ac991b48SPaul Blakey 
446ac991b48SPaul Blakey 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
447ac991b48SPaul Blakey 				 tcp_dport, ntohs(match.mask->dst));
448ac991b48SPaul Blakey 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
449ac991b48SPaul Blakey 				 tcp_dport, ntohs(match.key->dst));
450ac991b48SPaul Blakey 			break;
451ac991b48SPaul Blakey 
452ac991b48SPaul Blakey 		case IPPROTO_UDP:
453ac991b48SPaul Blakey 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
454ac991b48SPaul Blakey 				 udp_sport, ntohs(match.mask->src));
455ac991b48SPaul Blakey 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
456ac991b48SPaul Blakey 				 udp_sport, ntohs(match.key->src));
457ac991b48SPaul Blakey 
458ac991b48SPaul Blakey 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
459ac991b48SPaul Blakey 				 udp_dport, ntohs(match.mask->dst));
460ac991b48SPaul Blakey 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
461ac991b48SPaul Blakey 				 udp_dport, ntohs(match.key->dst));
462ac991b48SPaul Blakey 			break;
463ac991b48SPaul Blakey 		default:
464ac991b48SPaul Blakey 			break;
465ac991b48SPaul Blakey 		}
466ac991b48SPaul Blakey 	}
467ac991b48SPaul Blakey 
468ac991b48SPaul Blakey 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
469ac991b48SPaul Blakey 		struct flow_match_tcp match;
470ac991b48SPaul Blakey 
471ac991b48SPaul Blakey 		flow_rule_match_tcp(rule, &match);
472ac991b48SPaul Blakey 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
473ac991b48SPaul Blakey 			 ntohs(match.mask->flags));
474ac991b48SPaul Blakey 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
475ac991b48SPaul Blakey 			 ntohs(match.key->flags));
476ac991b48SPaul Blakey 	}
477ac991b48SPaul Blakey 
478c9c079b4SPaul Blakey 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
479c9c079b4SPaul Blakey 		struct flow_match_meta match;
480c9c079b4SPaul Blakey 
481c9c079b4SPaul Blakey 		flow_rule_match_meta(rule, &match);
482c9c079b4SPaul Blakey 
483c9c079b4SPaul Blakey 		if (match.key->ingress_ifindex & match.mask->ingress_ifindex) {
484c9c079b4SPaul Blakey 			struct net_device *dev;
485c9c079b4SPaul Blakey 
486c9c079b4SPaul Blakey 			dev = dev_get_by_index(&init_net, match.key->ingress_ifindex);
487c9c079b4SPaul Blakey 			if (dev && MLX5_CAP_ESW_FLOWTABLE(ct_priv->dev, flow_source))
488c9c079b4SPaul Blakey 				spec->flow_context.flow_source =
489c9c079b4SPaul Blakey 					mlx5_tc_ct_get_flow_source_match(ct_priv, dev);
490c9c079b4SPaul Blakey 
491c9c079b4SPaul Blakey 			dev_put(dev);
492c9c079b4SPaul Blakey 		}
493c9c079b4SPaul Blakey 	}
494c9c079b4SPaul Blakey 
495ac991b48SPaul Blakey 	return 0;
496ac991b48SPaul Blakey }
497ac991b48SPaul Blakey 
498ac991b48SPaul Blakey static void
mlx5_tc_ct_counter_put(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_ct_entry * entry)499eed38eeeSOz Shlomo mlx5_tc_ct_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
5001edae233SOz Shlomo {
501eed38eeeSOz Shlomo 	if (entry->counter->is_shared &&
502eed38eeeSOz Shlomo 	    !refcount_dec_and_test(&entry->counter->refcount))
5031edae233SOz Shlomo 		return;
5041edae233SOz Shlomo 
505eed38eeeSOz Shlomo 	mlx5_fc_destroy(ct_priv->dev, entry->counter->counter);
506eed38eeeSOz Shlomo 	kfree(entry->counter);
5071edae233SOz Shlomo }
5081edae233SOz Shlomo 
5091edae233SOz Shlomo static void
mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_ct_entry * entry,bool nat)510ac991b48SPaul Blakey mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
511ac991b48SPaul Blakey 			  struct mlx5_ct_entry *entry,
512ac991b48SPaul Blakey 			  bool nat)
513ac991b48SPaul Blakey {
514ac991b48SPaul Blakey 	struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
515c620b772SAriel Levkovich 	struct mlx5_flow_attr *attr = zone_rule->attr;
516ac991b48SPaul Blakey 
517a8eb919bSPaul Blakey 	ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone);
518ac991b48SPaul Blakey 
51976909000SPaul Blakey 	ct_priv->fs_ops->ct_rule_del(ct_priv->fs, zone_rule->rule);
5207fac5c2eSPaul Blakey 	mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh);
521d24f847eSAriel Levkovich 	mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
522c620b772SAriel Levkovich 	kfree(attr);
523ac991b48SPaul Blakey }
524ac991b48SPaul Blakey 
525ac991b48SPaul Blakey static void
mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_ct_entry * entry)526ac991b48SPaul Blakey mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv,
527ac991b48SPaul Blakey 			   struct mlx5_ct_entry *entry)
528ac991b48SPaul Blakey {
529ac991b48SPaul Blakey 	mlx5_tc_ct_entry_del_rule(ct_priv, entry, true);
530ac991b48SPaul Blakey 	mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
53177422a8fSSaeed Mahameed 
53277422a8fSSaeed Mahameed 	atomic_dec(&ct_priv->debugfs.stats.offloaded);
533ac991b48SPaul Blakey }
534ac991b48SPaul Blakey 
535ac991b48SPaul Blakey static struct flow_action_entry *
mlx5_tc_ct_get_ct_metadata_action(struct flow_rule * flow_rule)536ac991b48SPaul Blakey mlx5_tc_ct_get_ct_metadata_action(struct flow_rule *flow_rule)
537ac991b48SPaul Blakey {
538ac991b48SPaul Blakey 	struct flow_action *flow_action = &flow_rule->action;
539ac991b48SPaul Blakey 	struct flow_action_entry *act;
540ac991b48SPaul Blakey 	int i;
541ac991b48SPaul Blakey 
542ac991b48SPaul Blakey 	flow_action_for_each(i, act, flow_action) {
543ac991b48SPaul Blakey 		if (act->id == FLOW_ACTION_CT_METADATA)
544ac991b48SPaul Blakey 			return act;
545ac991b48SPaul Blakey 	}
546ac991b48SPaul Blakey 
547ac991b48SPaul Blakey 	return NULL;
548ac991b48SPaul Blakey }
549ac991b48SPaul Blakey 
550ac991b48SPaul Blakey static int
mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv * ct_priv,struct mlx5e_tc_mod_hdr_acts * mod_acts,u8 ct_state,u32 mark,u32 labels_id,u8 zone_restore_id)551ac991b48SPaul Blakey mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
552ac991b48SPaul Blakey 			       struct mlx5e_tc_mod_hdr_acts *mod_acts,
553ac991b48SPaul Blakey 			       u8 ct_state,
554ac991b48SPaul Blakey 			       u32 mark,
55554b154ecSEli Britstein 			       u32 labels_id,
5568f5b3c3eSPaul Blakey 			       u8 zone_restore_id)
557ac991b48SPaul Blakey {
558aedd133dSAriel Levkovich 	enum mlx5_flow_namespace_type ns = ct_priv->ns_type;
559670c239aSAriel Levkovich 	struct mlx5_core_dev *dev = ct_priv->dev;
560ac991b48SPaul Blakey 	int err;
561ac991b48SPaul Blakey 
562670c239aSAriel Levkovich 	err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
563ac991b48SPaul Blakey 					CTSTATE_TO_REG, ct_state);
564ac991b48SPaul Blakey 	if (err)
565ac991b48SPaul Blakey 		return err;
566ac991b48SPaul Blakey 
567670c239aSAriel Levkovich 	err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
568ac991b48SPaul Blakey 					MARK_TO_REG, mark);
569ac991b48SPaul Blakey 	if (err)
570ac991b48SPaul Blakey 		return err;
571ac991b48SPaul Blakey 
572670c239aSAriel Levkovich 	err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
57354b154ecSEli Britstein 					LABELS_TO_REG, labels_id);
574ac991b48SPaul Blakey 	if (err)
575ac991b48SPaul Blakey 		return err;
576ac991b48SPaul Blakey 
577670c239aSAriel Levkovich 	err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
5788f5b3c3eSPaul Blakey 					ZONE_RESTORE_TO_REG, zone_restore_id);
5795c6b9460SPaul Blakey 	if (err)
5805c6b9460SPaul Blakey 		return err;
5815c6b9460SPaul Blakey 
582aedd133dSAriel Levkovich 	/* Make another copy of zone id in reg_b for
583aedd133dSAriel Levkovich 	 * NIC rx flows since we don't copy reg_c1 to
584aedd133dSAriel Levkovich 	 * reg_b upon miss.
585aedd133dSAriel Levkovich 	 */
586aedd133dSAriel Levkovich 	if (ns != MLX5_FLOW_NAMESPACE_FDB) {
587670c239aSAriel Levkovich 		err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
588aedd133dSAriel Levkovich 						NIC_ZONE_RESTORE_TO_REG, zone_restore_id);
589aedd133dSAriel Levkovich 		if (err)
590aedd133dSAriel Levkovich 			return err;
591aedd133dSAriel Levkovich 	}
592ac991b48SPaul Blakey 	return 0;
593ac991b48SPaul Blakey }
594ac991b48SPaul Blakey 
595ac991b48SPaul Blakey static int
mlx5_tc_ct_parse_mangle_to_mod_act(struct flow_action_entry * act,char * modact)596ac991b48SPaul Blakey mlx5_tc_ct_parse_mangle_to_mod_act(struct flow_action_entry *act,
597ac991b48SPaul Blakey 				   char *modact)
598ac991b48SPaul Blakey {
599ac991b48SPaul Blakey 	u32 offset = act->mangle.offset, field;
600ac991b48SPaul Blakey 
601ac991b48SPaul Blakey 	switch (act->mangle.htype) {
602ac991b48SPaul Blakey 	case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
603ac991b48SPaul Blakey 		MLX5_SET(set_action_in, modact, length, 0);
604ac991b48SPaul Blakey 		if (offset == offsetof(struct iphdr, saddr))
605ac991b48SPaul Blakey 			field = MLX5_ACTION_IN_FIELD_OUT_SIPV4;
606ac991b48SPaul Blakey 		else if (offset == offsetof(struct iphdr, daddr))
607ac991b48SPaul Blakey 			field = MLX5_ACTION_IN_FIELD_OUT_DIPV4;
608ac991b48SPaul Blakey 		else
609ac991b48SPaul Blakey 			return -EOPNOTSUPP;
610ac991b48SPaul Blakey 		break;
611ac991b48SPaul Blakey 
612ac991b48SPaul Blakey 	case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
613ac991b48SPaul Blakey 		MLX5_SET(set_action_in, modact, length, 0);
6140d156f2dSOz Shlomo 		if (offset == offsetof(struct ipv6hdr, saddr) + 12)
615ac991b48SPaul Blakey 			field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0;
616ac991b48SPaul Blakey 		else if (offset == offsetof(struct ipv6hdr, saddr) + 8)
6170d156f2dSOz Shlomo 			field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32;
6180d156f2dSOz Shlomo 		else if (offset == offsetof(struct ipv6hdr, saddr) + 4)
619ac991b48SPaul Blakey 			field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64;
6200d156f2dSOz Shlomo 		else if (offset == offsetof(struct ipv6hdr, saddr))
621ac991b48SPaul Blakey 			field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96;
622ac991b48SPaul Blakey 		else if (offset == offsetof(struct ipv6hdr, daddr) + 12)
6230d156f2dSOz Shlomo 			field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0;
6240d156f2dSOz Shlomo 		else if (offset == offsetof(struct ipv6hdr, daddr) + 8)
6250d156f2dSOz Shlomo 			field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32;
6260d156f2dSOz Shlomo 		else if (offset == offsetof(struct ipv6hdr, daddr) + 4)
6270d156f2dSOz Shlomo 			field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64;
6280d156f2dSOz Shlomo 		else if (offset == offsetof(struct ipv6hdr, daddr))
629ac991b48SPaul Blakey 			field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96;
630ac991b48SPaul Blakey 		else
631ac991b48SPaul Blakey 			return -EOPNOTSUPP;
632ac991b48SPaul Blakey 		break;
633ac991b48SPaul Blakey 
634ac991b48SPaul Blakey 	case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
635ac991b48SPaul Blakey 		MLX5_SET(set_action_in, modact, length, 16);
636ac991b48SPaul Blakey 		if (offset == offsetof(struct tcphdr, source))
637ac991b48SPaul Blakey 			field = MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT;
638ac991b48SPaul Blakey 		else if (offset == offsetof(struct tcphdr, dest))
639ac991b48SPaul Blakey 			field = MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT;
640ac991b48SPaul Blakey 		else
641ac991b48SPaul Blakey 			return -EOPNOTSUPP;
642ac991b48SPaul Blakey 		break;
643ac991b48SPaul Blakey 
644ac991b48SPaul Blakey 	case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
645ac991b48SPaul Blakey 		MLX5_SET(set_action_in, modact, length, 16);
646ac991b48SPaul Blakey 		if (offset == offsetof(struct udphdr, source))
647ac991b48SPaul Blakey 			field = MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT;
648ac991b48SPaul Blakey 		else if (offset == offsetof(struct udphdr, dest))
649ac991b48SPaul Blakey 			field = MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT;
650ac991b48SPaul Blakey 		else
651ac991b48SPaul Blakey 			return -EOPNOTSUPP;
652ac991b48SPaul Blakey 		break;
653ac991b48SPaul Blakey 
654ac991b48SPaul Blakey 	default:
655ac991b48SPaul Blakey 		return -EOPNOTSUPP;
656ac991b48SPaul Blakey 	}
657ac991b48SPaul Blakey 
658ac991b48SPaul Blakey 	MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
659ac991b48SPaul Blakey 	MLX5_SET(set_action_in, modact, offset, 0);
660ac991b48SPaul Blakey 	MLX5_SET(set_action_in, modact, field, field);
661ac991b48SPaul Blakey 	MLX5_SET(set_action_in, modact, data, act->mangle.val);
662ac991b48SPaul Blakey 
663ac991b48SPaul Blakey 	return 0;
664ac991b48SPaul Blakey }
665ac991b48SPaul Blakey 
666ac991b48SPaul Blakey static int
mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv * ct_priv,struct flow_rule * flow_rule,struct mlx5e_tc_mod_hdr_acts * mod_acts)667ac991b48SPaul Blakey mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
668ac991b48SPaul Blakey 			    struct flow_rule *flow_rule,
669ac991b48SPaul Blakey 			    struct mlx5e_tc_mod_hdr_acts *mod_acts)
670ac991b48SPaul Blakey {
671ac991b48SPaul Blakey 	struct flow_action *flow_action = &flow_rule->action;
672670c239aSAriel Levkovich 	struct mlx5_core_dev *mdev = ct_priv->dev;
673ac991b48SPaul Blakey 	struct flow_action_entry *act;
674ac991b48SPaul Blakey 	char *modact;
675ac991b48SPaul Blakey 	int err, i;
676ac991b48SPaul Blakey 
677ac991b48SPaul Blakey 	flow_action_for_each(i, act, flow_action) {
678ac991b48SPaul Blakey 		switch (act->id) {
679ac991b48SPaul Blakey 		case FLOW_ACTION_MANGLE: {
6802c0e5cf5SPaul Blakey 			modact = mlx5e_mod_hdr_alloc(mdev, ct_priv->ns_type, mod_acts);
6812c0e5cf5SPaul Blakey 			if (IS_ERR(modact))
6822c0e5cf5SPaul Blakey 				return PTR_ERR(modact);
683ac991b48SPaul Blakey 
684ac991b48SPaul Blakey 			err = mlx5_tc_ct_parse_mangle_to_mod_act(act, modact);
685ac991b48SPaul Blakey 			if (err)
686ac991b48SPaul Blakey 				return err;
687ac991b48SPaul Blakey 
688ac991b48SPaul Blakey 			mod_acts->num_actions++;
689ac991b48SPaul Blakey 		}
690ac991b48SPaul Blakey 		break;
691ac991b48SPaul Blakey 
692ac991b48SPaul Blakey 		case FLOW_ACTION_CT_METADATA:
693ac991b48SPaul Blakey 			/* Handled earlier */
694ac991b48SPaul Blakey 			continue;
695ac991b48SPaul Blakey 		default:
696ac991b48SPaul Blakey 			return -EOPNOTSUPP;
697ac991b48SPaul Blakey 		}
698ac991b48SPaul Blakey 	}
699ac991b48SPaul Blakey 
700ac991b48SPaul Blakey 	return 0;
701ac991b48SPaul Blakey }
702ac991b48SPaul Blakey 
703ac991b48SPaul Blakey static int
mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_flow_attr * attr,struct flow_rule * flow_rule,struct mlx5e_mod_hdr_handle ** mh,u8 zone_restore_id,bool nat_table,bool has_nat)704ac991b48SPaul Blakey mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
705c620b772SAriel Levkovich 				struct mlx5_flow_attr *attr,
706ac991b48SPaul Blakey 				struct flow_rule *flow_rule,
7076702d393SPaul Blakey 				struct mlx5e_mod_hdr_handle **mh,
7081f2856cdSPaul Blakey 				u8 zone_restore_id, bool nat_table, bool has_nat)
709ac991b48SPaul Blakey {
7101cfd3490SPaul Blakey 	DECLARE_MOD_HDR_ACTS_ACTIONS(actions_arr, MLX5_CT_MIN_MOD_ACTS);
7111cfd3490SPaul Blakey 	DECLARE_MOD_HDR_ACTS(mod_acts, actions_arr);
712ac991b48SPaul Blakey 	struct flow_action_entry *meta;
713f869bcb0SVlad Buslov 	enum ip_conntrack_info ctinfo;
7149102d836SRoi Dayan 	u16 ct_state = 0;
715ac991b48SPaul Blakey 	int err;
716ac991b48SPaul Blakey 
717ac991b48SPaul Blakey 	meta = mlx5_tc_ct_get_ct_metadata_action(flow_rule);
718ac991b48SPaul Blakey 	if (!meta)
719ac991b48SPaul Blakey 		return -EOPNOTSUPP;
720f869bcb0SVlad Buslov 	ctinfo = meta->ct_metadata.cookie & NFCT_INFOMASK;
721ac991b48SPaul Blakey 
722d24f847eSAriel Levkovich 	err = mlx5_get_label_mapping(ct_priv, meta->ct_metadata.labels,
72354b154ecSEli Britstein 				     &attr->ct_attr.ct_labels_id);
72454b154ecSEli Britstein 	if (err)
725ac991b48SPaul Blakey 		return -EOPNOTSUPP;
7261f2856cdSPaul Blakey 	if (nat_table) {
7271f2856cdSPaul Blakey 		if (has_nat) {
7281f2856cdSPaul Blakey 			err = mlx5_tc_ct_entry_create_nat(ct_priv, flow_rule, &mod_acts);
729ac991b48SPaul Blakey 			if (err)
730ac991b48SPaul Blakey 				goto err_mapping;
7311f2856cdSPaul Blakey 		}
7329102d836SRoi Dayan 
7339102d836SRoi Dayan 		ct_state |= MLX5_CT_STATE_NAT_BIT;
734ac991b48SPaul Blakey 	}
735ac991b48SPaul Blakey 
736f869bcb0SVlad Buslov 	ct_state |= MLX5_CT_STATE_TRK_BIT;
737f869bcb0SVlad Buslov 	ct_state |= ctinfo == IP_CT_NEW ? MLX5_CT_STATE_NEW_BIT : MLX5_CT_STATE_ESTABLISHED_BIT;
7386895cb3aSPaul Blakey 	ct_state |= meta->ct_metadata.orig_dir ? 0 : MLX5_CT_STATE_REPLY_BIT;
739ac991b48SPaul Blakey 	err = mlx5_tc_ct_entry_set_registers(ct_priv, &mod_acts,
7409102d836SRoi Dayan 					     ct_state,
741ac991b48SPaul Blakey 					     meta->ct_metadata.mark,
74254b154ecSEli Britstein 					     attr->ct_attr.ct_labels_id,
7438f5b3c3eSPaul Blakey 					     zone_restore_id);
744ac991b48SPaul Blakey 	if (err)
745ac991b48SPaul Blakey 		goto err_mapping;
746ac991b48SPaul Blakey 
7471f2856cdSPaul Blakey 	if (nat_table && has_nat) {
7487fac5c2eSPaul Blakey 		attr->modify_hdr = mlx5_modify_header_alloc(ct_priv->dev, ct_priv->ns_type,
7497fac5c2eSPaul Blakey 							    mod_acts.num_actions,
7507fac5c2eSPaul Blakey 							    mod_acts.actions);
7517fac5c2eSPaul Blakey 		if (IS_ERR(attr->modify_hdr)) {
7527fac5c2eSPaul Blakey 			err = PTR_ERR(attr->modify_hdr);
7537fac5c2eSPaul Blakey 			goto err_mapping;
7547fac5c2eSPaul Blakey 		}
7557fac5c2eSPaul Blakey 
7567fac5c2eSPaul Blakey 		*mh = NULL;
7577fac5c2eSPaul Blakey 	} else {
758670c239aSAriel Levkovich 		*mh = mlx5e_mod_hdr_attach(ct_priv->dev,
759aedd133dSAriel Levkovich 					   ct_priv->mod_hdr_tbl,
760aedd133dSAriel Levkovich 					   ct_priv->ns_type,
7616702d393SPaul Blakey 					   &mod_acts);
7626702d393SPaul Blakey 		if (IS_ERR(*mh)) {
7636702d393SPaul Blakey 			err = PTR_ERR(*mh);
764ac991b48SPaul Blakey 			goto err_mapping;
765ac991b48SPaul Blakey 		}
7666702d393SPaul Blakey 		attr->modify_hdr = mlx5e_mod_hdr_get(*mh);
7677fac5c2eSPaul Blakey 	}
768ac991b48SPaul Blakey 
7692c0e5cf5SPaul Blakey 	mlx5e_mod_hdr_dealloc(&mod_acts);
770ac991b48SPaul Blakey 	return 0;
771ac991b48SPaul Blakey 
772ac991b48SPaul Blakey err_mapping:
7732c0e5cf5SPaul Blakey 	mlx5e_mod_hdr_dealloc(&mod_acts);
774d24f847eSAriel Levkovich 	mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
775ac991b48SPaul Blakey 	return err;
776ac991b48SPaul Blakey }
777ac991b48SPaul Blakey 
7787fac5c2eSPaul Blakey static void
mlx5_tc_ct_entry_destroy_mod_hdr(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_flow_attr * attr,struct mlx5e_mod_hdr_handle * mh)7797fac5c2eSPaul Blakey mlx5_tc_ct_entry_destroy_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
7807fac5c2eSPaul Blakey 				 struct mlx5_flow_attr *attr,
7817fac5c2eSPaul Blakey 				 struct mlx5e_mod_hdr_handle *mh)
7827fac5c2eSPaul Blakey {
7837fac5c2eSPaul Blakey 	if (mh)
7847fac5c2eSPaul Blakey 		mlx5e_mod_hdr_detach(ct_priv->dev, ct_priv->mod_hdr_tbl, mh);
7857fac5c2eSPaul Blakey 	else
7867fac5c2eSPaul Blakey 		mlx5_modify_header_dealloc(ct_priv->dev, attr->modify_hdr);
7877fac5c2eSPaul Blakey }
7887fac5c2eSPaul Blakey 
789ac991b48SPaul Blakey static int
mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv * ct_priv,struct flow_rule * flow_rule,struct mlx5_ct_entry * entry,bool nat,u8 zone_restore_id)790ac991b48SPaul Blakey mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
791ac991b48SPaul Blakey 			  struct flow_rule *flow_rule,
792ac991b48SPaul Blakey 			  struct mlx5_ct_entry *entry,
7938f5b3c3eSPaul Blakey 			  bool nat, u8 zone_restore_id)
794ac991b48SPaul Blakey {
795ac991b48SPaul Blakey 	struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
796aedd133dSAriel Levkovich 	struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
797aded104dSSaeed Mahameed 	struct mlx5_flow_spec *spec = NULL;
798c620b772SAriel Levkovich 	struct mlx5_flow_attr *attr;
799ac991b48SPaul Blakey 	int err;
800ac991b48SPaul Blakey 
801ac991b48SPaul Blakey 	zone_rule->nat = nat;
802ac991b48SPaul Blakey 
8039f4d9283SRoi Dayan 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
804aded104dSSaeed Mahameed 	if (!spec)
805aded104dSSaeed Mahameed 		return -ENOMEM;
806aded104dSSaeed Mahameed 
807aedd133dSAriel Levkovich 	attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
808c620b772SAriel Levkovich 	if (!attr) {
809c620b772SAriel Levkovich 		err = -ENOMEM;
810c620b772SAriel Levkovich 		goto err_attr;
811c620b772SAriel Levkovich 	}
812c620b772SAriel Levkovich 
8135c6b9460SPaul Blakey 	err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule,
8146702d393SPaul Blakey 					      &zone_rule->mh,
8151f2856cdSPaul Blakey 					      zone_restore_id,
8161f2856cdSPaul Blakey 					      nat,
8171f2856cdSPaul Blakey 					      mlx5_tc_ct_entry_has_nat(entry));
818ac991b48SPaul Blakey 	if (err) {
819ac991b48SPaul Blakey 		ct_dbg("Failed to create ct entry mod hdr");
8205c6b9460SPaul Blakey 		goto err_mod_hdr;
821ac991b48SPaul Blakey 	}
822ac991b48SPaul Blakey 
823ac991b48SPaul Blakey 	attr->action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
824ac991b48SPaul Blakey 		       MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
825ac991b48SPaul Blakey 		       MLX5_FLOW_CONTEXT_ACTION_COUNT;
826ac991b48SPaul Blakey 	attr->dest_chain = 0;
827f0da4daaSChris Mi 	attr->dest_ft = mlx5e_tc_post_act_get_ft(ct_priv->post_act);
828c620b772SAriel Levkovich 	attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
8291918ace1SToshiaki Makita 	if (entry->tuple.ip_proto == IPPROTO_TCP ||
8301918ace1SToshiaki Makita 	    entry->tuple.ip_proto == IPPROTO_UDP)
831ac991b48SPaul Blakey 		attr->outer_match_level = MLX5_MATCH_L4;
8321918ace1SToshiaki Makita 	else
8331918ace1SToshiaki Makita 		attr->outer_match_level = MLX5_MATCH_L3;
834eed38eeeSOz Shlomo 	attr->counter = entry->counter->counter;
835e5d4e1daSRoi Dayan 	attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
836275c21d6SVlad Buslov 	if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB)
837275c21d6SVlad Buslov 		attr->esw_attr->in_mdev = priv->mdev;
838ac991b48SPaul Blakey 
839c9c079b4SPaul Blakey 	mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
840763e1e54SRoi Dayan 	mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
841ac991b48SPaul Blakey 
8423ee61ebbSPaul Blakey 	zone_rule->rule = ct_priv->fs_ops->ct_rule_add(ct_priv->fs, spec, attr, flow_rule);
843ac991b48SPaul Blakey 	if (IS_ERR(zone_rule->rule)) {
844ac991b48SPaul Blakey 		err = PTR_ERR(zone_rule->rule);
845ac991b48SPaul Blakey 		ct_dbg("Failed to add ct entry rule, nat: %d", nat);
846ac991b48SPaul Blakey 		goto err_rule;
847ac991b48SPaul Blakey 	}
848ac991b48SPaul Blakey 
849c620b772SAriel Levkovich 	zone_rule->attr = attr;
850c620b772SAriel Levkovich 
8519f4d9283SRoi Dayan 	kvfree(spec);
852a8eb919bSPaul Blakey 	ct_dbg("Offloaded ct entry rule in zone %d", entry->tuple.zone);
853ac991b48SPaul Blakey 
854ac991b48SPaul Blakey 	return 0;
855ac991b48SPaul Blakey 
856ac991b48SPaul Blakey err_rule:
8577fac5c2eSPaul Blakey 	mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh);
858d24f847eSAriel Levkovich 	mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
8595c6b9460SPaul Blakey err_mod_hdr:
860c620b772SAriel Levkovich 	kfree(attr);
861c620b772SAriel Levkovich err_attr:
8629f4d9283SRoi Dayan 	kvfree(spec);
863ac991b48SPaul Blakey 	return err;
864ac991b48SPaul Blakey }
865ac991b48SPaul Blakey 
86694ceffb4SVlad Buslov static int
mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv * ct_priv,struct flow_rule * flow_rule,struct mlx5_ct_entry * entry,bool nat,u8 zone_restore_id)86794ceffb4SVlad Buslov mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
86894ceffb4SVlad Buslov 			      struct flow_rule *flow_rule,
86994ceffb4SVlad Buslov 			      struct mlx5_ct_entry *entry,
87094ceffb4SVlad Buslov 			      bool nat, u8 zone_restore_id)
87194ceffb4SVlad Buslov {
87294ceffb4SVlad Buslov 	struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
87394ceffb4SVlad Buslov 	struct mlx5_flow_attr *attr = zone_rule->attr, *old_attr;
87494ceffb4SVlad Buslov 	struct mlx5e_mod_hdr_handle *mh;
87594ceffb4SVlad Buslov 	struct mlx5_ct_fs_rule *rule;
87694ceffb4SVlad Buslov 	struct mlx5_flow_spec *spec;
87794ceffb4SVlad Buslov 	int err;
87894ceffb4SVlad Buslov 
87994ceffb4SVlad Buslov 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
88094ceffb4SVlad Buslov 	if (!spec)
88194ceffb4SVlad Buslov 		return -ENOMEM;
88294ceffb4SVlad Buslov 
88394ceffb4SVlad Buslov 	old_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
88494ceffb4SVlad Buslov 	if (!old_attr) {
88594ceffb4SVlad Buslov 		err = -ENOMEM;
88694ceffb4SVlad Buslov 		goto err_attr;
88794ceffb4SVlad Buslov 	}
88894ceffb4SVlad Buslov 	*old_attr = *attr;
88994ceffb4SVlad Buslov 
89094ceffb4SVlad Buslov 	err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id,
89194ceffb4SVlad Buslov 					      nat, mlx5_tc_ct_entry_has_nat(entry));
89294ceffb4SVlad Buslov 	if (err) {
89394ceffb4SVlad Buslov 		ct_dbg("Failed to create ct entry mod hdr");
89494ceffb4SVlad Buslov 		goto err_mod_hdr;
89594ceffb4SVlad Buslov 	}
89694ceffb4SVlad Buslov 
89794ceffb4SVlad Buslov 	mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
89894ceffb4SVlad Buslov 	mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
89994ceffb4SVlad Buslov 
90094ceffb4SVlad Buslov 	rule = ct_priv->fs_ops->ct_rule_add(ct_priv->fs, spec, attr, flow_rule);
90194ceffb4SVlad Buslov 	if (IS_ERR(rule)) {
90294ceffb4SVlad Buslov 		err = PTR_ERR(rule);
90394ceffb4SVlad Buslov 		ct_dbg("Failed to add replacement ct entry rule, nat: %d", nat);
90494ceffb4SVlad Buslov 		goto err_rule;
90594ceffb4SVlad Buslov 	}
90694ceffb4SVlad Buslov 
90794ceffb4SVlad Buslov 	ct_priv->fs_ops->ct_rule_del(ct_priv->fs, zone_rule->rule);
90894ceffb4SVlad Buslov 	zone_rule->rule = rule;
90994ceffb4SVlad Buslov 	mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, old_attr, zone_rule->mh);
91094ceffb4SVlad Buslov 	zone_rule->mh = mh;
9118ac04a28SVlad Buslov 	mlx5_put_label_mapping(ct_priv, old_attr->ct_attr.ct_labels_id);
91294ceffb4SVlad Buslov 
91394ceffb4SVlad Buslov 	kfree(old_attr);
91494ceffb4SVlad Buslov 	kvfree(spec);
91594ceffb4SVlad Buslov 	ct_dbg("Replaced ct entry rule in zone %d", entry->tuple.zone);
91694ceffb4SVlad Buslov 
91794ceffb4SVlad Buslov 	return 0;
91894ceffb4SVlad Buslov 
91994ceffb4SVlad Buslov err_rule:
92094ceffb4SVlad Buslov 	mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, mh);
92194ceffb4SVlad Buslov 	mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
92294ceffb4SVlad Buslov err_mod_hdr:
92394ceffb4SVlad Buslov 	kfree(old_attr);
92494ceffb4SVlad Buslov err_attr:
92594ceffb4SVlad Buslov 	kvfree(spec);
92694ceffb4SVlad Buslov 	return err;
92794ceffb4SVlad Buslov }
92894ceffb4SVlad Buslov 
929a2173131SOz Shlomo static bool
mlx5_tc_ct_entry_valid(struct mlx5_ct_entry * entry)930a2173131SOz Shlomo mlx5_tc_ct_entry_valid(struct mlx5_ct_entry *entry)
931a2173131SOz Shlomo {
932a2173131SOz Shlomo 	return test_bit(MLX5_CT_ENTRY_FLAG_VALID, &entry->flags);
933a2173131SOz Shlomo }
934a2173131SOz Shlomo 
935a2173131SOz Shlomo static struct mlx5_ct_entry *
mlx5_tc_ct_entry_get(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_ct_tuple * tuple)936a2173131SOz Shlomo mlx5_tc_ct_entry_get(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_tuple *tuple)
937a2173131SOz Shlomo {
938a2173131SOz Shlomo 	struct mlx5_ct_entry *entry;
939a2173131SOz Shlomo 
940a2173131SOz Shlomo 	entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, tuple,
941a2173131SOz Shlomo 				       tuples_ht_params);
942a2173131SOz Shlomo 	if (entry && mlx5_tc_ct_entry_valid(entry) &&
943a2173131SOz Shlomo 	    refcount_inc_not_zero(&entry->refcnt)) {
944a2173131SOz Shlomo 		return entry;
945a2173131SOz Shlomo 	} else if (!entry) {
946a2173131SOz Shlomo 		entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_nat_ht,
947a2173131SOz Shlomo 					       tuple, tuples_nat_ht_params);
948a2173131SOz Shlomo 		if (entry && mlx5_tc_ct_entry_valid(entry) &&
949a2173131SOz Shlomo 		    refcount_inc_not_zero(&entry->refcnt))
950a2173131SOz Shlomo 			return entry;
951a2173131SOz Shlomo 	}
952a2173131SOz Shlomo 
953a2173131SOz Shlomo 	return entry ? ERR_PTR(-EINVAL) : NULL;
954a2173131SOz Shlomo }
955a2173131SOz Shlomo 
mlx5_tc_ct_entry_remove_from_tuples(struct mlx5_ct_entry * entry)956a2173131SOz Shlomo static void mlx5_tc_ct_entry_remove_from_tuples(struct mlx5_ct_entry *entry)
957a2173131SOz Shlomo {
958a2173131SOz Shlomo 	struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv;
959a2173131SOz Shlomo 
960a2173131SOz Shlomo 	rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
961a2173131SOz Shlomo 			       &entry->tuple_nat_node,
962a2173131SOz Shlomo 			       tuples_nat_ht_params);
963a2173131SOz Shlomo 	rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
964a2173131SOz Shlomo 			       tuples_ht_params);
965a2173131SOz Shlomo }
966a2173131SOz Shlomo 
mlx5_tc_ct_entry_del(struct mlx5_ct_entry * entry)967a2173131SOz Shlomo static void mlx5_tc_ct_entry_del(struct mlx5_ct_entry *entry)
968a2173131SOz Shlomo {
969a2173131SOz Shlomo 	struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv;
970a2173131SOz Shlomo 
971a2173131SOz Shlomo 	mlx5_tc_ct_entry_del_rules(ct_priv, entry);
972a2173131SOz Shlomo 
973a2173131SOz Shlomo 	spin_lock_bh(&ct_priv->ht_lock);
974a2173131SOz Shlomo 	mlx5_tc_ct_entry_remove_from_tuples(entry);
975a2173131SOz Shlomo 	spin_unlock_bh(&ct_priv->ht_lock);
976a2173131SOz Shlomo 
977a2173131SOz Shlomo 	mlx5_tc_ct_counter_put(ct_priv, entry);
978a2173131SOz Shlomo 	kfree(entry);
979a2173131SOz Shlomo }
980a2173131SOz Shlomo 
981a2173131SOz Shlomo static void
mlx5_tc_ct_entry_put(struct mlx5_ct_entry * entry)982a2173131SOz Shlomo mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry)
983a2173131SOz Shlomo {
984a2173131SOz Shlomo 	if (!refcount_dec_and_test(&entry->refcnt))
985a2173131SOz Shlomo 		return;
986a2173131SOz Shlomo 
987a2173131SOz Shlomo 	mlx5_tc_ct_entry_del(entry);
988a2173131SOz Shlomo }
989a2173131SOz Shlomo 
mlx5_tc_ct_entry_del_work(struct work_struct * work)990a2173131SOz Shlomo static void mlx5_tc_ct_entry_del_work(struct work_struct *work)
991a2173131SOz Shlomo {
992a2173131SOz Shlomo 	struct mlx5_ct_entry *entry = container_of(work, struct mlx5_ct_entry, work);
993a2173131SOz Shlomo 
994a2173131SOz Shlomo 	mlx5_tc_ct_entry_del(entry);
995a2173131SOz Shlomo }
996a2173131SOz Shlomo 
997a2173131SOz Shlomo static void
__mlx5_tc_ct_entry_put(struct mlx5_ct_entry * entry)998a2173131SOz Shlomo __mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry)
999a2173131SOz Shlomo {
1000a2173131SOz Shlomo 	if (!refcount_dec_and_test(&entry->refcnt))
1001a2173131SOz Shlomo 		return;
1002a2173131SOz Shlomo 
1003a2173131SOz Shlomo 	INIT_WORK(&entry->work, mlx5_tc_ct_entry_del_work);
10046c4e8fa0SRoi Dayan 	queue_work(entry->ct_priv->wq, &entry->work);
1005a2173131SOz Shlomo }
1006a2173131SOz Shlomo 
1007eed38eeeSOz Shlomo static struct mlx5_ct_counter *
mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv * ct_priv)1008eed38eeeSOz Shlomo mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
1009eed38eeeSOz Shlomo {
1010eed38eeeSOz Shlomo 	struct mlx5_ct_counter *counter;
1011eed38eeeSOz Shlomo 	int ret;
1012eed38eeeSOz Shlomo 
1013eed38eeeSOz Shlomo 	counter = kzalloc(sizeof(*counter), GFP_KERNEL);
1014eed38eeeSOz Shlomo 	if (!counter)
1015eed38eeeSOz Shlomo 		return ERR_PTR(-ENOMEM);
1016eed38eeeSOz Shlomo 
1017eed38eeeSOz Shlomo 	counter->is_shared = false;
1018504e1572SPaul Blakey 	counter->counter = mlx5_fc_create_ex(ct_priv->dev, true);
1019eed38eeeSOz Shlomo 	if (IS_ERR(counter->counter)) {
1020eed38eeeSOz Shlomo 		ct_dbg("Failed to create counter for ct entry");
1021eed38eeeSOz Shlomo 		ret = PTR_ERR(counter->counter);
1022eed38eeeSOz Shlomo 		kfree(counter);
1023eed38eeeSOz Shlomo 		return ERR_PTR(ret);
1024eed38eeeSOz Shlomo 	}
1025eed38eeeSOz Shlomo 
1026eed38eeeSOz Shlomo 	return counter;
1027eed38eeeSOz Shlomo }
1028eed38eeeSOz Shlomo 
1029eed38eeeSOz Shlomo static struct mlx5_ct_counter *
mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_ct_entry * entry)10301edae233SOz Shlomo mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
10311edae233SOz Shlomo 			      struct mlx5_ct_entry *entry)
10321edae233SOz Shlomo {
10331edae233SOz Shlomo 	struct mlx5_ct_tuple rev_tuple = entry->tuple;
1034eed38eeeSOz Shlomo 	struct mlx5_ct_counter *shared_counter;
10351edae233SOz Shlomo 	struct mlx5_ct_entry *rev_entry;
10361edae233SOz Shlomo 
10371edae233SOz Shlomo 	/* get the reversed tuple */
10380164a9bdSYihao Han 	swap(rev_tuple.port.src, rev_tuple.port.dst);
10391edae233SOz Shlomo 
10401edae233SOz Shlomo 	if (rev_tuple.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
10411edae233SOz Shlomo 		__be32 tmp_addr = rev_tuple.ip.src_v4;
10421edae233SOz Shlomo 
10431edae233SOz Shlomo 		rev_tuple.ip.src_v4 = rev_tuple.ip.dst_v4;
10441edae233SOz Shlomo 		rev_tuple.ip.dst_v4 = tmp_addr;
10451edae233SOz Shlomo 	} else if (rev_tuple.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
10461edae233SOz Shlomo 		struct in6_addr tmp_addr = rev_tuple.ip.src_v6;
10471edae233SOz Shlomo 
10481edae233SOz Shlomo 		rev_tuple.ip.src_v6 = rev_tuple.ip.dst_v6;
10491edae233SOz Shlomo 		rev_tuple.ip.dst_v6 = tmp_addr;
10501edae233SOz Shlomo 	} else {
10511edae233SOz Shlomo 		return ERR_PTR(-EOPNOTSUPP);
10521edae233SOz Shlomo 	}
10531edae233SOz Shlomo 
10541edae233SOz Shlomo 	/* Use the same counter as the reverse direction */
1055a2173131SOz Shlomo 	spin_lock_bh(&ct_priv->ht_lock);
1056a2173131SOz Shlomo 	rev_entry = mlx5_tc_ct_entry_get(ct_priv, &rev_tuple);
1057a2173131SOz Shlomo 
1058a2173131SOz Shlomo 	if (IS_ERR(rev_entry)) {
1059a2173131SOz Shlomo 		spin_unlock_bh(&ct_priv->ht_lock);
1060a2173131SOz Shlomo 		goto create_counter;
10611edae233SOz Shlomo 	}
1062a2173131SOz Shlomo 
1063a2173131SOz Shlomo 	if (rev_entry && refcount_inc_not_zero(&rev_entry->counter->refcount)) {
106474097a0dSRoi Dayan 		ct_dbg("Using shared counter entry=0x%p rev=0x%p", entry, rev_entry);
1065a2173131SOz Shlomo 		shared_counter = rev_entry->counter;
1066a2173131SOz Shlomo 		spin_unlock_bh(&ct_priv->ht_lock);
1067a2173131SOz Shlomo 
1068a2173131SOz Shlomo 		mlx5_tc_ct_entry_put(rev_entry);
1069a2173131SOz Shlomo 		return shared_counter;
10701edae233SOz Shlomo 	}
1071a2173131SOz Shlomo 
1072a2173131SOz Shlomo 	spin_unlock_bh(&ct_priv->ht_lock);
1073a2173131SOz Shlomo 
1074a2173131SOz Shlomo create_counter:
10751edae233SOz Shlomo 
1076eed38eeeSOz Shlomo 	shared_counter = mlx5_tc_ct_counter_create(ct_priv);
1077902c0245SSaeed Mahameed 	if (IS_ERR(shared_counter))
1078902c0245SSaeed Mahameed 		return shared_counter;
10791edae233SOz Shlomo 
1080eed38eeeSOz Shlomo 	shared_counter->is_shared = true;
10811edae233SOz Shlomo 	refcount_set(&shared_counter->refcount, 1);
10821edae233SOz Shlomo 	return shared_counter;
10831edae233SOz Shlomo }
10841edae233SOz Shlomo 
1085ac991b48SPaul Blakey static int
mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv * ct_priv,struct flow_rule * flow_rule,struct mlx5_ct_entry * entry,u8 zone_restore_id)1086ac991b48SPaul Blakey mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
1087ac991b48SPaul Blakey 			   struct flow_rule *flow_rule,
10888f5b3c3eSPaul Blakey 			   struct mlx5_ct_entry *entry,
10898f5b3c3eSPaul Blakey 			   u8 zone_restore_id)
1090ac991b48SPaul Blakey {
1091ac991b48SPaul Blakey 	int err;
1092ac991b48SPaul Blakey 
1093eed38eeeSOz Shlomo 	if (nf_ct_acct_enabled(dev_net(ct_priv->netdev)))
1094eed38eeeSOz Shlomo 		entry->counter = mlx5_tc_ct_counter_create(ct_priv);
1095eed38eeeSOz Shlomo 	else
1096eed38eeeSOz Shlomo 		entry->counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
1097eed38eeeSOz Shlomo 
1098eed38eeeSOz Shlomo 	if (IS_ERR(entry->counter)) {
1099eed38eeeSOz Shlomo 		err = PTR_ERR(entry->counter);
1100ac991b48SPaul Blakey 		return err;
1101ac991b48SPaul Blakey 	}
1102ac991b48SPaul Blakey 
11038f5b3c3eSPaul Blakey 	err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false,
11048f5b3c3eSPaul Blakey 					zone_restore_id);
1105ac991b48SPaul Blakey 	if (err)
1106ac991b48SPaul Blakey 		goto err_orig;
1107ac991b48SPaul Blakey 
11088f5b3c3eSPaul Blakey 	err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true,
11098f5b3c3eSPaul Blakey 					zone_restore_id);
1110ac991b48SPaul Blakey 	if (err)
1111ac991b48SPaul Blakey 		goto err_nat;
1112ac991b48SPaul Blakey 
111377422a8fSSaeed Mahameed 	atomic_inc(&ct_priv->debugfs.stats.offloaded);
1114ac991b48SPaul Blakey 	return 0;
1115ac991b48SPaul Blakey 
1116ac991b48SPaul Blakey err_nat:
1117ac991b48SPaul Blakey 	mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
1118ac991b48SPaul Blakey err_orig:
1119eed38eeeSOz Shlomo 	mlx5_tc_ct_counter_put(ct_priv, entry);
1120ac991b48SPaul Blakey 	return err;
1121ac991b48SPaul Blakey }
1122ac991b48SPaul Blakey 
1123ac991b48SPaul Blakey static int
mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv * ct_priv,struct flow_rule * flow_rule,struct mlx5_ct_entry * entry,u8 zone_restore_id)112494ceffb4SVlad Buslov mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv,
112594ceffb4SVlad Buslov 			       struct flow_rule *flow_rule,
112694ceffb4SVlad Buslov 			       struct mlx5_ct_entry *entry,
112794ceffb4SVlad Buslov 			       u8 zone_restore_id)
112894ceffb4SVlad Buslov {
112994ceffb4SVlad Buslov 	int err;
113094ceffb4SVlad Buslov 
113194ceffb4SVlad Buslov 	err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false,
113294ceffb4SVlad Buslov 					    zone_restore_id);
113394ceffb4SVlad Buslov 	if (err)
113494ceffb4SVlad Buslov 		return err;
113594ceffb4SVlad Buslov 
113694ceffb4SVlad Buslov 	err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true,
113794ceffb4SVlad Buslov 					    zone_restore_id);
113894ceffb4SVlad Buslov 	if (err)
113994ceffb4SVlad Buslov 		mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
114094ceffb4SVlad Buslov 	return err;
114194ceffb4SVlad Buslov }
114294ceffb4SVlad Buslov 
114394ceffb4SVlad Buslov static int
mlx5_tc_ct_block_flow_offload_replace(struct mlx5_ct_ft * ft,struct flow_rule * flow_rule,struct mlx5_ct_entry * entry,unsigned long cookie)114494ceffb4SVlad Buslov mlx5_tc_ct_block_flow_offload_replace(struct mlx5_ct_ft *ft, struct flow_rule *flow_rule,
114594ceffb4SVlad Buslov 				      struct mlx5_ct_entry *entry, unsigned long cookie)
114694ceffb4SVlad Buslov {
114794ceffb4SVlad Buslov 	struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
114894ceffb4SVlad Buslov 	int err;
114994ceffb4SVlad Buslov 
115094ceffb4SVlad Buslov 	err = mlx5_tc_ct_entry_replace_rules(ct_priv, flow_rule, entry, ft->zone_restore_id);
115194ceffb4SVlad Buslov 	if (!err)
115294ceffb4SVlad Buslov 		return 0;
115394ceffb4SVlad Buslov 
115494ceffb4SVlad Buslov 	/* If failed to update the entry, then look it up again under ht_lock
115594ceffb4SVlad Buslov 	 * protection and properly delete it.
115694ceffb4SVlad Buslov 	 */
115794ceffb4SVlad Buslov 	spin_lock_bh(&ct_priv->ht_lock);
115894ceffb4SVlad Buslov 	entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
115994ceffb4SVlad Buslov 	if (entry) {
116094ceffb4SVlad Buslov 		rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
116194ceffb4SVlad Buslov 		spin_unlock_bh(&ct_priv->ht_lock);
116294ceffb4SVlad Buslov 		mlx5_tc_ct_entry_put(entry);
116394ceffb4SVlad Buslov 	} else {
116494ceffb4SVlad Buslov 		spin_unlock_bh(&ct_priv->ht_lock);
116594ceffb4SVlad Buslov 	}
116694ceffb4SVlad Buslov 	return err;
116794ceffb4SVlad Buslov }
116894ceffb4SVlad Buslov 
116994ceffb4SVlad Buslov static int
mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft * ft,struct flow_cls_offload * flow)1170ac991b48SPaul Blakey mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
1171ac991b48SPaul Blakey 				  struct flow_cls_offload *flow)
1172ac991b48SPaul Blakey {
1173ac991b48SPaul Blakey 	struct flow_rule *flow_rule = flow_cls_offload_flow_rule(flow);
1174ac991b48SPaul Blakey 	struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
1175ac991b48SPaul Blakey 	struct flow_action_entry *meta_action;
1176ac991b48SPaul Blakey 	unsigned long cookie = flow->cookie;
1177ac991b48SPaul Blakey 	struct mlx5_ct_entry *entry;
1178ac991b48SPaul Blakey 	int err;
1179ac991b48SPaul Blakey 
1180ac991b48SPaul Blakey 	meta_action = mlx5_tc_ct_get_ct_metadata_action(flow_rule);
1181ac991b48SPaul Blakey 	if (!meta_action)
1182ac991b48SPaul Blakey 		return -EOPNOTSUPP;
1183ac991b48SPaul Blakey 
1184a2173131SOz Shlomo 	spin_lock_bh(&ct_priv->ht_lock);
1185a2173131SOz Shlomo 	entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
1186a2173131SOz Shlomo 	if (entry && refcount_inc_not_zero(&entry->refcnt)) {
118794ceffb4SVlad Buslov 		if (entry->restore_cookie == meta_action->ct_metadata.cookie) {
1188a2173131SOz Shlomo 			spin_unlock_bh(&ct_priv->ht_lock);
1189a2173131SOz Shlomo 			mlx5_tc_ct_entry_put(entry);
1190a2173131SOz Shlomo 			return -EEXIST;
1191a2173131SOz Shlomo 		}
119294ceffb4SVlad Buslov 		entry->restore_cookie = meta_action->ct_metadata.cookie;
119394ceffb4SVlad Buslov 		spin_unlock_bh(&ct_priv->ht_lock);
119494ceffb4SVlad Buslov 
119594ceffb4SVlad Buslov 		err = mlx5_tc_ct_block_flow_offload_replace(ft, flow_rule, entry, cookie);
119694ceffb4SVlad Buslov 		mlx5_tc_ct_entry_put(entry);
119794ceffb4SVlad Buslov 		return err;
119894ceffb4SVlad Buslov 	}
1199a2173131SOz Shlomo 	spin_unlock_bh(&ct_priv->ht_lock);
1200ac991b48SPaul Blakey 
1201ac991b48SPaul Blakey 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1202ac991b48SPaul Blakey 	if (!entry)
1203ac991b48SPaul Blakey 		return -ENOMEM;
1204ac991b48SPaul Blakey 
1205bc562be9SPaul Blakey 	entry->tuple.zone = ft->zone;
1206ac991b48SPaul Blakey 	entry->cookie = flow->cookie;
12075c6b9460SPaul Blakey 	entry->restore_cookie = meta_action->ct_metadata.cookie;
1208a2173131SOz Shlomo 	refcount_set(&entry->refcnt, 2);
1209a2173131SOz Shlomo 	entry->ct_priv = ct_priv;
1210ac991b48SPaul Blakey 
1211bc562be9SPaul Blakey 	err = mlx5_tc_ct_rule_to_tuple(&entry->tuple, flow_rule);
1212bc562be9SPaul Blakey 	if (err)
1213bc562be9SPaul Blakey 		goto err_set;
1214bc562be9SPaul Blakey 
1215bc562be9SPaul Blakey 	memcpy(&entry->tuple_nat, &entry->tuple, sizeof(entry->tuple));
1216bc562be9SPaul Blakey 	err = mlx5_tc_ct_rule_to_tuple_nat(&entry->tuple_nat, flow_rule);
1217bc562be9SPaul Blakey 	if (err)
1218bc562be9SPaul Blakey 		goto err_set;
1219bc562be9SPaul Blakey 
1220a2173131SOz Shlomo 	spin_lock_bh(&ct_priv->ht_lock);
1221a2173131SOz Shlomo 
1222a2173131SOz Shlomo 	err = rhashtable_lookup_insert_fast(&ft->ct_entries_ht, &entry->node,
1223a2173131SOz Shlomo 					    cts_ht_params);
1224a2173131SOz Shlomo 	if (err)
1225a2173131SOz Shlomo 		goto err_entries;
1226a2173131SOz Shlomo 
1227a2173131SOz Shlomo 	err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht,
1228bc562be9SPaul Blakey 					    &entry->tuple_node,
1229bc562be9SPaul Blakey 					    tuples_ht_params);
1230bc562be9SPaul Blakey 	if (err)
1231bc562be9SPaul Blakey 		goto err_tuple;
1232bc562be9SPaul Blakey 
1233bc562be9SPaul Blakey 	if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) {
1234a2173131SOz Shlomo 		err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_nat_ht,
1235bc562be9SPaul Blakey 						    &entry->tuple_nat_node,
1236bc562be9SPaul Blakey 						    tuples_nat_ht_params);
1237bc562be9SPaul Blakey 		if (err)
1238bc562be9SPaul Blakey 			goto err_tuple_nat;
1239bc562be9SPaul Blakey 	}
1240a2173131SOz Shlomo 	spin_unlock_bh(&ct_priv->ht_lock);
1241bc562be9SPaul Blakey 
12428f5b3c3eSPaul Blakey 	err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry,
12438f5b3c3eSPaul Blakey 					 ft->zone_restore_id);
1244ac991b48SPaul Blakey 	if (err)
1245ac991b48SPaul Blakey 		goto err_rules;
1246ac991b48SPaul Blakey 
1247a2173131SOz Shlomo 	set_bit(MLX5_CT_ENTRY_FLAG_VALID, &entry->flags);
1248a2173131SOz Shlomo 	mlx5_tc_ct_entry_put(entry); /* this function reference */
1249ac991b48SPaul Blakey 
1250ac991b48SPaul Blakey 	return 0;
1251ac991b48SPaul Blakey 
1252ac991b48SPaul Blakey err_rules:
1253a2173131SOz Shlomo 	spin_lock_bh(&ct_priv->ht_lock);
1254e2194a17SPaul Blakey 	if (mlx5_tc_ct_entry_has_nat(entry))
1255bc562be9SPaul Blakey 		rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
1256bc562be9SPaul Blakey 				       &entry->tuple_nat_node, tuples_nat_ht_params);
1257bc562be9SPaul Blakey err_tuple_nat:
1258bc562be9SPaul Blakey 	rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
1259bc562be9SPaul Blakey 			       &entry->tuple_node,
1260bc562be9SPaul Blakey 			       tuples_ht_params);
1261bc562be9SPaul Blakey err_tuple:
1262a2173131SOz Shlomo 	rhashtable_remove_fast(&ft->ct_entries_ht,
1263a2173131SOz Shlomo 			       &entry->node,
1264a2173131SOz Shlomo 			       cts_ht_params);
1265a2173131SOz Shlomo err_entries:
1266a2173131SOz Shlomo 	spin_unlock_bh(&ct_priv->ht_lock);
1267bc562be9SPaul Blakey err_set:
1268ac991b48SPaul Blakey 	kfree(entry);
1269a2173131SOz Shlomo 	if (err != -EEXIST)
1270a2173131SOz Shlomo 		netdev_warn(ct_priv->netdev, "Failed to offload ct entry, err: %d\n", err);
1271ac991b48SPaul Blakey 	return err;
1272ac991b48SPaul Blakey }
1273ac991b48SPaul Blakey 
1274ac991b48SPaul Blakey static int
mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft * ft,struct flow_cls_offload * flow)1275ac991b48SPaul Blakey mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
1276ac991b48SPaul Blakey 				  struct flow_cls_offload *flow)
1277ac991b48SPaul Blakey {
1278a2173131SOz Shlomo 	struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
1279ac991b48SPaul Blakey 	unsigned long cookie = flow->cookie;
1280ac991b48SPaul Blakey 	struct mlx5_ct_entry *entry;
1281ac991b48SPaul Blakey 
1282a2173131SOz Shlomo 	spin_lock_bh(&ct_priv->ht_lock);
1283a2173131SOz Shlomo 	entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
1284a2173131SOz Shlomo 	if (!entry) {
1285a2173131SOz Shlomo 		spin_unlock_bh(&ct_priv->ht_lock);
1286ac991b48SPaul Blakey 		return -ENOENT;
1287a2173131SOz Shlomo 	}
1288ac991b48SPaul Blakey 
1289a2173131SOz Shlomo 	if (!mlx5_tc_ct_entry_valid(entry)) {
1290a2173131SOz Shlomo 		spin_unlock_bh(&ct_priv->ht_lock);
1291a2173131SOz Shlomo 		return -EINVAL;
1292a2173131SOz Shlomo 	}
1293a2173131SOz Shlomo 
1294a2173131SOz Shlomo 	rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
1295a2173131SOz Shlomo 	spin_unlock_bh(&ct_priv->ht_lock);
1296a2173131SOz Shlomo 
1297a2173131SOz Shlomo 	mlx5_tc_ct_entry_put(entry);
1298ac991b48SPaul Blakey 
1299ac991b48SPaul Blakey 	return 0;
1300ac991b48SPaul Blakey }
1301ac991b48SPaul Blakey 
1302ac991b48SPaul Blakey static int
mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft * ft,struct flow_cls_offload * f)1303ac991b48SPaul Blakey mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
1304ac991b48SPaul Blakey 				    struct flow_cls_offload *f)
1305ac991b48SPaul Blakey {
1306a2173131SOz Shlomo 	struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
1307ac991b48SPaul Blakey 	unsigned long cookie = f->cookie;
1308ac991b48SPaul Blakey 	struct mlx5_ct_entry *entry;
1309ac991b48SPaul Blakey 	u64 lastuse, packets, bytes;
1310ac991b48SPaul Blakey 
1311a2173131SOz Shlomo 	spin_lock_bh(&ct_priv->ht_lock);
1312a2173131SOz Shlomo 	entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
1313a2173131SOz Shlomo 	if (!entry) {
1314a2173131SOz Shlomo 		spin_unlock_bh(&ct_priv->ht_lock);
1315ac991b48SPaul Blakey 		return -ENOENT;
1316a2173131SOz Shlomo 	}
1317a2173131SOz Shlomo 
1318a2173131SOz Shlomo 	if (!mlx5_tc_ct_entry_valid(entry) || !refcount_inc_not_zero(&entry->refcnt)) {
1319a2173131SOz Shlomo 		spin_unlock_bh(&ct_priv->ht_lock);
1320a2173131SOz Shlomo 		return -EINVAL;
1321a2173131SOz Shlomo 	}
1322a2173131SOz Shlomo 
1323a2173131SOz Shlomo 	spin_unlock_bh(&ct_priv->ht_lock);
1324ac991b48SPaul Blakey 
1325eed38eeeSOz Shlomo 	mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse);
13264b61d3e8SPo Liu 	flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
132793a129ebSJiri Pirko 			  FLOW_ACTION_HW_STATS_DELAYED);
1328ac991b48SPaul Blakey 
1329a2173131SOz Shlomo 	mlx5_tc_ct_entry_put(entry);
1330ac991b48SPaul Blakey 	return 0;
1331ac991b48SPaul Blakey }
1332ac991b48SPaul Blakey 
1333ac991b48SPaul Blakey static int
mlx5_tc_ct_block_flow_offload(enum tc_setup_type type,void * type_data,void * cb_priv)1334ac991b48SPaul Blakey mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
1335ac991b48SPaul Blakey 			      void *cb_priv)
1336ac991b48SPaul Blakey {
1337ac991b48SPaul Blakey 	struct flow_cls_offload *f = type_data;
1338ac991b48SPaul Blakey 	struct mlx5_ct_ft *ft = cb_priv;
1339ac991b48SPaul Blakey 
1340ac991b48SPaul Blakey 	if (type != TC_SETUP_CLSFLOWER)
1341ac991b48SPaul Blakey 		return -EOPNOTSUPP;
1342ac991b48SPaul Blakey 
1343ac991b48SPaul Blakey 	switch (f->command) {
1344ac991b48SPaul Blakey 	case FLOW_CLS_REPLACE:
1345ac991b48SPaul Blakey 		return mlx5_tc_ct_block_flow_offload_add(ft, f);
1346ac991b48SPaul Blakey 	case FLOW_CLS_DESTROY:
1347ac991b48SPaul Blakey 		return mlx5_tc_ct_block_flow_offload_del(ft, f);
1348ac991b48SPaul Blakey 	case FLOW_CLS_STATS:
1349ac991b48SPaul Blakey 		return mlx5_tc_ct_block_flow_offload_stats(ft, f);
1350ac991b48SPaul Blakey 	default:
1351ac991b48SPaul Blakey 		break;
1352e59b254cSZheng Bin 	}
1353ac991b48SPaul Blakey 
1354ac991b48SPaul Blakey 	return -EOPNOTSUPP;
1355ac991b48SPaul Blakey }
1356ac991b48SPaul Blakey 
1357a8eb919bSPaul Blakey static bool
mlx5_tc_ct_skb_to_tuple(struct sk_buff * skb,struct mlx5_ct_tuple * tuple,u16 zone)1358a8eb919bSPaul Blakey mlx5_tc_ct_skb_to_tuple(struct sk_buff *skb, struct mlx5_ct_tuple *tuple,
1359a8eb919bSPaul Blakey 			u16 zone)
1360a8eb919bSPaul Blakey {
1361a8eb919bSPaul Blakey 	struct flow_keys flow_keys;
1362a8eb919bSPaul Blakey 
1363a8eb919bSPaul Blakey 	skb_reset_network_header(skb);
13641918ace1SToshiaki Makita 	skb_flow_dissect_flow_keys(skb, &flow_keys, FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
1365a8eb919bSPaul Blakey 
1366a8eb919bSPaul Blakey 	tuple->zone = zone;
1367a8eb919bSPaul Blakey 
1368a8eb919bSPaul Blakey 	if (flow_keys.basic.ip_proto != IPPROTO_TCP &&
13691918ace1SToshiaki Makita 	    flow_keys.basic.ip_proto != IPPROTO_UDP &&
13701918ace1SToshiaki Makita 	    flow_keys.basic.ip_proto != IPPROTO_GRE)
1371a8eb919bSPaul Blakey 		return false;
1372a8eb919bSPaul Blakey 
13731918ace1SToshiaki Makita 	if (flow_keys.basic.ip_proto == IPPROTO_TCP ||
13741918ace1SToshiaki Makita 	    flow_keys.basic.ip_proto == IPPROTO_UDP) {
1375a8eb919bSPaul Blakey 		tuple->port.src = flow_keys.ports.src;
1376a8eb919bSPaul Blakey 		tuple->port.dst = flow_keys.ports.dst;
13771918ace1SToshiaki Makita 	}
1378a8eb919bSPaul Blakey 	tuple->n_proto = flow_keys.basic.n_proto;
1379a8eb919bSPaul Blakey 	tuple->ip_proto = flow_keys.basic.ip_proto;
1380a8eb919bSPaul Blakey 
1381a8eb919bSPaul Blakey 	switch (flow_keys.basic.n_proto) {
1382a8eb919bSPaul Blakey 	case htons(ETH_P_IP):
1383a8eb919bSPaul Blakey 		tuple->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1384a8eb919bSPaul Blakey 		tuple->ip.src_v4 = flow_keys.addrs.v4addrs.src;
1385a8eb919bSPaul Blakey 		tuple->ip.dst_v4 = flow_keys.addrs.v4addrs.dst;
1386a8eb919bSPaul Blakey 		break;
1387a8eb919bSPaul Blakey 
1388a8eb919bSPaul Blakey 	case htons(ETH_P_IPV6):
1389a8eb919bSPaul Blakey 		tuple->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1390a8eb919bSPaul Blakey 		tuple->ip.src_v6 = flow_keys.addrs.v6addrs.src;
1391a8eb919bSPaul Blakey 		tuple->ip.dst_v6 = flow_keys.addrs.v6addrs.dst;
1392a8eb919bSPaul Blakey 		break;
1393a8eb919bSPaul Blakey 	default:
1394a8eb919bSPaul Blakey 		goto out;
1395a8eb919bSPaul Blakey 	}
1396a8eb919bSPaul Blakey 
1397a8eb919bSPaul Blakey 	return true;
1398a8eb919bSPaul Blakey 
1399a8eb919bSPaul Blakey out:
1400a8eb919bSPaul Blakey 	return false;
1401a8eb919bSPaul Blakey }
1402a8eb919bSPaul Blakey 
mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec * spec)140389fbdbaeSSaeed Mahameed int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
14047e36feebSPaul Blakey {
14057e36feebSPaul Blakey 	u32 ctstate = 0, ctstate_mask = 0;
14067e36feebSPaul Blakey 
14077e36feebSPaul Blakey 	mlx5e_tc_match_to_reg_get_match(spec, CTSTATE_TO_REG,
14087e36feebSPaul Blakey 					&ctstate, &ctstate_mask);
140996b5b458SDima Chumak 
141096b5b458SDima Chumak 	if ((ctstate & ctstate_mask) == MLX5_CT_STATE_TRK_BIT)
14117e36feebSPaul Blakey 		return -EOPNOTSUPP;
14127e36feebSPaul Blakey 
14137e36feebSPaul Blakey 	ctstate_mask |= MLX5_CT_STATE_TRK_BIT;
14147e36feebSPaul Blakey 	mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG,
14157e36feebSPaul Blakey 				    ctstate, ctstate_mask);
14167e36feebSPaul Blakey 
14177e36feebSPaul Blakey 	return 0;
14187e36feebSPaul Blakey }
14197e36feebSPaul Blakey 
mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv * priv,struct mlx5_ct_attr * ct_attr)1420aedd133dSAriel Levkovich void mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_attr)
14214c8594adSRoi Dayan {
1422aedd133dSAriel Levkovich 	if (!priv || !ct_attr->ct_labels_id)
14234c8594adSRoi Dayan 		return;
14244c8594adSRoi Dayan 
1425d24f847eSAriel Levkovich 	mlx5_put_label_mapping(priv, ct_attr->ct_labels_id);
14264c8594adSRoi Dayan }
14274c8594adSRoi Dayan 
14287e36feebSPaul Blakey int
mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv * priv,struct mlx5_flow_spec * spec,struct flow_cls_offload * f,struct mlx5_ct_attr * ct_attr,struct netlink_ext_ack * extack)1429aedd133dSAriel Levkovich mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
14304c3844d9SPaul Blakey 		     struct mlx5_flow_spec *spec,
14314c3844d9SPaul Blakey 		     struct flow_cls_offload *f,
143254b154ecSEli Britstein 		     struct mlx5_ct_attr *ct_attr,
14334c3844d9SPaul Blakey 		     struct netlink_ext_ack *extack)
14344c3844d9SPaul Blakey {
1435f869bcb0SVlad Buslov 	bool trk, est, untrk, unnew, unest, new, rpl, unrpl, rel, unrel, inv, uninv;
1436d37bd5e8SRoi Dayan 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
14374c3844d9SPaul Blakey 	struct flow_dissector_key_ct *mask, *key;
14384c3844d9SPaul Blakey 	u32 ctstate = 0, ctstate_mask = 0;
14394c3844d9SPaul Blakey 	u16 ct_state_on, ct_state_off;
14404c3844d9SPaul Blakey 	u16 ct_state, ct_state_mask;
14414c3844d9SPaul Blakey 	struct flow_match_ct match;
144254b154ecSEli Britstein 	u32 ct_labels[4];
14434c3844d9SPaul Blakey 
1444d37bd5e8SRoi Dayan 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT))
14454c3844d9SPaul Blakey 		return 0;
14464c3844d9SPaul Blakey 
1447aedd133dSAriel Levkovich 	if (!priv) {
14484c3844d9SPaul Blakey 		NL_SET_ERR_MSG_MOD(extack,
14494c3844d9SPaul Blakey 				   "offload of ct matching isn't available");
14504c3844d9SPaul Blakey 		return -EOPNOTSUPP;
14514c3844d9SPaul Blakey 	}
14524c3844d9SPaul Blakey 
1453d37bd5e8SRoi Dayan 	flow_rule_match_ct(rule, &match);
14544c3844d9SPaul Blakey 
14554c3844d9SPaul Blakey 	key = match.key;
14564c3844d9SPaul Blakey 	mask = match.mask;
14574c3844d9SPaul Blakey 
14584c3844d9SPaul Blakey 	ct_state = key->ct_state;
14594c3844d9SPaul Blakey 	ct_state_mask = mask->ct_state;
14604c3844d9SPaul Blakey 
14614c3844d9SPaul Blakey 	if (ct_state_mask & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
14624c3844d9SPaul Blakey 			      TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
14636895cb3aSPaul Blakey 			      TCA_FLOWER_KEY_CT_FLAGS_NEW |
1464116c76c5SAriel Levkovich 			      TCA_FLOWER_KEY_CT_FLAGS_REPLY |
1465116c76c5SAriel Levkovich 			      TCA_FLOWER_KEY_CT_FLAGS_RELATED |
1466116c76c5SAriel Levkovich 			      TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
14674c3844d9SPaul Blakey 		NL_SET_ERR_MSG_MOD(extack,
14686895cb3aSPaul Blakey 				   "only ct_state trk, est, new and rpl are supported for offload");
14694c3844d9SPaul Blakey 		return -EOPNOTSUPP;
14704c3844d9SPaul Blakey 	}
14714c3844d9SPaul Blakey 
14724c3844d9SPaul Blakey 	ct_state_on = ct_state & ct_state_mask;
14734c3844d9SPaul Blakey 	ct_state_off = (ct_state & ct_state_mask) ^ ct_state_mask;
14744c3844d9SPaul Blakey 	trk = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
14754c3844d9SPaul Blakey 	new = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_NEW;
14764c3844d9SPaul Blakey 	est = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
14776895cb3aSPaul Blakey 	rpl = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_REPLY;
1478116c76c5SAriel Levkovich 	rel = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_RELATED;
1479116c76c5SAriel Levkovich 	inv = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_INVALID;
14804c3844d9SPaul Blakey 	untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
1481f869bcb0SVlad Buslov 	unnew = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_NEW;
14824c3844d9SPaul Blakey 	unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
14836895cb3aSPaul Blakey 	unrpl = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_REPLY;
1484116c76c5SAriel Levkovich 	unrel = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_RELATED;
1485116c76c5SAriel Levkovich 	uninv = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_INVALID;
14864c3844d9SPaul Blakey 
14874c3844d9SPaul Blakey 	ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0;
1488f869bcb0SVlad Buslov 	ctstate |= new ? MLX5_CT_STATE_NEW_BIT : 0;
14894c3844d9SPaul Blakey 	ctstate |= est ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
14906895cb3aSPaul Blakey 	ctstate |= rpl ? MLX5_CT_STATE_REPLY_BIT : 0;
14914c3844d9SPaul Blakey 	ctstate_mask |= (untrk || trk) ? MLX5_CT_STATE_TRK_BIT : 0;
1492f869bcb0SVlad Buslov 	ctstate_mask |= (unnew || new) ? MLX5_CT_STATE_NEW_BIT : 0;
14934c3844d9SPaul Blakey 	ctstate_mask |= (unest || est) ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
14946895cb3aSPaul Blakey 	ctstate_mask |= (unrpl || rpl) ? MLX5_CT_STATE_REPLY_BIT : 0;
1495116c76c5SAriel Levkovich 	ctstate_mask |= unrel ? MLX5_CT_STATE_RELATED_BIT : 0;
1496116c76c5SAriel Levkovich 	ctstate_mask |= uninv ? MLX5_CT_STATE_INVALID_BIT : 0;
1497116c76c5SAriel Levkovich 
1498116c76c5SAriel Levkovich 	if (rel) {
1499116c76c5SAriel Levkovich 		NL_SET_ERR_MSG_MOD(extack,
1500116c76c5SAriel Levkovich 				   "matching on ct_state +rel isn't supported");
1501116c76c5SAriel Levkovich 		return -EOPNOTSUPP;
1502116c76c5SAriel Levkovich 	}
1503116c76c5SAriel Levkovich 
1504116c76c5SAriel Levkovich 	if (inv) {
1505116c76c5SAriel Levkovich 		NL_SET_ERR_MSG_MOD(extack,
1506116c76c5SAriel Levkovich 				   "matching on ct_state +inv isn't supported");
1507116c76c5SAriel Levkovich 		return -EOPNOTSUPP;
1508116c76c5SAriel Levkovich 	}
15094c3844d9SPaul Blakey 
15104c3844d9SPaul Blakey 	if (mask->ct_zone)
15118f5b3c3eSPaul Blakey 		mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
15124c3844d9SPaul Blakey 					    key->ct_zone, MLX5_CT_ZONE_MASK);
15134c3844d9SPaul Blakey 	if (ctstate_mask)
15144c3844d9SPaul Blakey 		mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG,
15154c3844d9SPaul Blakey 					    ctstate, ctstate_mask);
15164c3844d9SPaul Blakey 	if (mask->ct_mark)
15174c3844d9SPaul Blakey 		mlx5e_tc_match_to_reg_match(spec, MARK_TO_REG,
15184c3844d9SPaul Blakey 					    key->ct_mark, mask->ct_mark);
151954b154ecSEli Britstein 	if (mask->ct_labels[0] || mask->ct_labels[1] || mask->ct_labels[2] ||
152054b154ecSEli Britstein 	    mask->ct_labels[3]) {
152154b154ecSEli Britstein 		ct_labels[0] = key->ct_labels[0] & mask->ct_labels[0];
152254b154ecSEli Britstein 		ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1];
152354b154ecSEli Britstein 		ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2];
152454b154ecSEli Britstein 		ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3];
1525d24f847eSAriel Levkovich 		if (mlx5_get_label_mapping(priv, ct_labels, &ct_attr->ct_labels_id))
152654b154ecSEli Britstein 			return -EOPNOTSUPP;
152754b154ecSEli Britstein 		mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id,
152854b154ecSEli Britstein 					    MLX5_CT_LABELS_MASK);
152954b154ecSEli Britstein 	}
15304c3844d9SPaul Blakey 
15314c3844d9SPaul Blakey 	return 0;
15324c3844d9SPaul Blakey }
15334c3844d9SPaul Blakey 
15344c3844d9SPaul Blakey int
mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv * priv,struct mlx5_flow_attr * attr,const struct flow_action_entry * act,struct netlink_ext_ack * extack)1535aedd133dSAriel Levkovich mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
1536c620b772SAriel Levkovich 			struct mlx5_flow_attr *attr,
15374c3844d9SPaul Blakey 			const struct flow_action_entry *act,
15384c3844d9SPaul Blakey 			struct netlink_ext_ack *extack)
15394c3844d9SPaul Blakey {
1540aedd133dSAriel Levkovich 	if (!priv) {
15414c3844d9SPaul Blakey 		NL_SET_ERR_MSG_MOD(extack,
15424c3844d9SPaul Blakey 				   "offload of ct action isn't available");
15434c3844d9SPaul Blakey 		return -EOPNOTSUPP;
15444c3844d9SPaul Blakey 	}
15454c3844d9SPaul Blakey 
154608fe94ecSPaul Blakey 	attr->ct_attr.ct_action |= act->ct.action; /* So we can have clear + ct */
15474c3844d9SPaul Blakey 	attr->ct_attr.zone = act->ct.zone;
1548*f7a48511SYevgeny Kliteynik 	if (!(act->ct.action & TCA_CT_ACT_CLEAR))
1549ac991b48SPaul Blakey 		attr->ct_attr.nf_ft = act->ct.flow_table;
155067027828SPaul Blakey 	attr->ct_attr.act_miss_cookie = act->miss_cookie;
15514c3844d9SPaul Blakey 
15524c3844d9SPaul Blakey 	return 0;
15534c3844d9SPaul Blakey }
15544c3844d9SPaul Blakey 
tc_ct_pre_ct_add_rules(struct mlx5_ct_ft * ct_ft,struct mlx5_tc_ct_pre * pre_ct,bool nat)15559102d836SRoi Dayan static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
15569102d836SRoi Dayan 				  struct mlx5_tc_ct_pre *pre_ct,
15579102d836SRoi Dayan 				  bool nat)
15589102d836SRoi Dayan {
15599102d836SRoi Dayan 	struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
15609102d836SRoi Dayan 	struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
1561670c239aSAriel Levkovich 	struct mlx5_core_dev *dev = ct_priv->dev;
1562aedd133dSAriel Levkovich 	struct mlx5_flow_table *ft = pre_ct->ft;
15639102d836SRoi Dayan 	struct mlx5_flow_destination dest = {};
15649102d836SRoi Dayan 	struct mlx5_flow_act flow_act = {};
15659102d836SRoi Dayan 	struct mlx5_modify_hdr *mod_hdr;
15669102d836SRoi Dayan 	struct mlx5_flow_handle *rule;
15679102d836SRoi Dayan 	struct mlx5_flow_spec *spec;
15689102d836SRoi Dayan 	u32 ctstate;
15699102d836SRoi Dayan 	u16 zone;
15709102d836SRoi Dayan 	int err;
15719102d836SRoi Dayan 
15729102d836SRoi Dayan 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
15739102d836SRoi Dayan 	if (!spec)
15749102d836SRoi Dayan 		return -ENOMEM;
15759102d836SRoi Dayan 
15769102d836SRoi Dayan 	zone = ct_ft->zone & MLX5_CT_ZONE_MASK;
1577aedd133dSAriel Levkovich 	err = mlx5e_tc_match_to_reg_set(dev, &pre_mod_acts, ct_priv->ns_type,
1578aedd133dSAriel Levkovich 					ZONE_TO_REG, zone);
15799102d836SRoi Dayan 	if (err) {
15809102d836SRoi Dayan 		ct_dbg("Failed to set zone register mapping");
15819102d836SRoi Dayan 		goto err_mapping;
15829102d836SRoi Dayan 	}
15839102d836SRoi Dayan 
1584aedd133dSAriel Levkovich 	mod_hdr = mlx5_modify_header_alloc(dev, ct_priv->ns_type,
15859102d836SRoi Dayan 					   pre_mod_acts.num_actions,
15869102d836SRoi Dayan 					   pre_mod_acts.actions);
15879102d836SRoi Dayan 
15889102d836SRoi Dayan 	if (IS_ERR(mod_hdr)) {
15899102d836SRoi Dayan 		err = PTR_ERR(mod_hdr);
15909102d836SRoi Dayan 		ct_dbg("Failed to create pre ct mod hdr");
15919102d836SRoi Dayan 		goto err_mapping;
15929102d836SRoi Dayan 	}
15939102d836SRoi Dayan 	pre_ct->modify_hdr = mod_hdr;
15949102d836SRoi Dayan 
15959102d836SRoi Dayan 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
15969102d836SRoi Dayan 			  MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
15979102d836SRoi Dayan 	flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
15989102d836SRoi Dayan 	flow_act.modify_hdr = mod_hdr;
15999102d836SRoi Dayan 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
16009102d836SRoi Dayan 
16019102d836SRoi Dayan 	/* add flow rule */
16029102d836SRoi Dayan 	mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
16039102d836SRoi Dayan 				    zone, MLX5_CT_ZONE_MASK);
16049102d836SRoi Dayan 	ctstate = MLX5_CT_STATE_TRK_BIT;
16059102d836SRoi Dayan 	if (nat)
16069102d836SRoi Dayan 		ctstate |= MLX5_CT_STATE_NAT_BIT;
16079102d836SRoi Dayan 	mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG, ctstate, ctstate);
16089102d836SRoi Dayan 
1609f0da4daaSChris Mi 	dest.ft = mlx5e_tc_post_act_get_ft(ct_priv->post_act);
1610aedd133dSAriel Levkovich 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
16119102d836SRoi Dayan 	if (IS_ERR(rule)) {
16129102d836SRoi Dayan 		err = PTR_ERR(rule);
16139102d836SRoi Dayan 		ct_dbg("Failed to add pre ct flow rule zone %d", zone);
16149102d836SRoi Dayan 		goto err_flow_rule;
16159102d836SRoi Dayan 	}
16169102d836SRoi Dayan 	pre_ct->flow_rule = rule;
16179102d836SRoi Dayan 
16189102d836SRoi Dayan 	/* add miss rule */
16199102d836SRoi Dayan 	dest.ft = nat ? ct_priv->ct_nat : ct_priv->ct;
1620f822cf86SRoi Dayan 	rule = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1);
16219102d836SRoi Dayan 	if (IS_ERR(rule)) {
16229102d836SRoi Dayan 		err = PTR_ERR(rule);
16239102d836SRoi Dayan 		ct_dbg("Failed to add pre ct miss rule zone %d", zone);
16249102d836SRoi Dayan 		goto err_miss_rule;
16259102d836SRoi Dayan 	}
16269102d836SRoi Dayan 	pre_ct->miss_rule = rule;
16279102d836SRoi Dayan 
16282c0e5cf5SPaul Blakey 	mlx5e_mod_hdr_dealloc(&pre_mod_acts);
16299102d836SRoi Dayan 	kvfree(spec);
16309102d836SRoi Dayan 	return 0;
16319102d836SRoi Dayan 
16329102d836SRoi Dayan err_miss_rule:
16339102d836SRoi Dayan 	mlx5_del_flow_rules(pre_ct->flow_rule);
16349102d836SRoi Dayan err_flow_rule:
16359102d836SRoi Dayan 	mlx5_modify_header_dealloc(dev, pre_ct->modify_hdr);
16369102d836SRoi Dayan err_mapping:
16372c0e5cf5SPaul Blakey 	mlx5e_mod_hdr_dealloc(&pre_mod_acts);
16389102d836SRoi Dayan 	kvfree(spec);
16399102d836SRoi Dayan 	return err;
16409102d836SRoi Dayan }
16419102d836SRoi Dayan 
16429102d836SRoi Dayan static void
tc_ct_pre_ct_del_rules(struct mlx5_ct_ft * ct_ft,struct mlx5_tc_ct_pre * pre_ct)16439102d836SRoi Dayan tc_ct_pre_ct_del_rules(struct mlx5_ct_ft *ct_ft,
16449102d836SRoi Dayan 		       struct mlx5_tc_ct_pre *pre_ct)
16459102d836SRoi Dayan {
16469102d836SRoi Dayan 	struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
1647670c239aSAriel Levkovich 	struct mlx5_core_dev *dev = ct_priv->dev;
16489102d836SRoi Dayan 
16499102d836SRoi Dayan 	mlx5_del_flow_rules(pre_ct->flow_rule);
16509102d836SRoi Dayan 	mlx5_del_flow_rules(pre_ct->miss_rule);
16519102d836SRoi Dayan 	mlx5_modify_header_dealloc(dev, pre_ct->modify_hdr);
16529102d836SRoi Dayan }
16539102d836SRoi Dayan 
16549102d836SRoi Dayan static int
mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft * ct_ft,struct mlx5_tc_ct_pre * pre_ct,bool nat)16559102d836SRoi Dayan mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft *ct_ft,
16569102d836SRoi Dayan 			struct mlx5_tc_ct_pre *pre_ct,
16579102d836SRoi Dayan 			bool nat)
16589102d836SRoi Dayan {
16599102d836SRoi Dayan 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
16609102d836SRoi Dayan 	struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
1661670c239aSAriel Levkovich 	struct mlx5_core_dev *dev = ct_priv->dev;
16629102d836SRoi Dayan 	struct mlx5_flow_table_attr ft_attr = {};
16639102d836SRoi Dayan 	struct mlx5_flow_namespace *ns;
16649102d836SRoi Dayan 	struct mlx5_flow_table *ft;
16659102d836SRoi Dayan 	struct mlx5_flow_group *g;
16669102d836SRoi Dayan 	u32 metadata_reg_c_2_mask;
16679102d836SRoi Dayan 	u32 *flow_group_in;
16689102d836SRoi Dayan 	void *misc;
16699102d836SRoi Dayan 	int err;
16709102d836SRoi Dayan 
1671aedd133dSAriel Levkovich 	ns = mlx5_get_flow_namespace(dev, ct_priv->ns_type);
16729102d836SRoi Dayan 	if (!ns) {
16739102d836SRoi Dayan 		err = -EOPNOTSUPP;
1674aedd133dSAriel Levkovich 		ct_dbg("Failed to get flow namespace");
16759102d836SRoi Dayan 		return err;
16769102d836SRoi Dayan 	}
16779102d836SRoi Dayan 
16789102d836SRoi Dayan 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
16799102d836SRoi Dayan 	if (!flow_group_in)
16809102d836SRoi Dayan 		return -ENOMEM;
16819102d836SRoi Dayan 
16829102d836SRoi Dayan 	ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
1683aedd133dSAriel Levkovich 	ft_attr.prio =  ct_priv->ns_type ==  MLX5_FLOW_NAMESPACE_FDB ?
1684aedd133dSAriel Levkovich 			FDB_TC_OFFLOAD : MLX5E_TC_PRIO;
16859102d836SRoi Dayan 	ft_attr.max_fte = 2;
16869102d836SRoi Dayan 	ft_attr.level = 1;
16879102d836SRoi Dayan 	ft = mlx5_create_flow_table(ns, &ft_attr);
16889102d836SRoi Dayan 	if (IS_ERR(ft)) {
16899102d836SRoi Dayan 		err = PTR_ERR(ft);
16909102d836SRoi Dayan 		ct_dbg("Failed to create pre ct table");
16919102d836SRoi Dayan 		goto out_free;
16929102d836SRoi Dayan 	}
1693aedd133dSAriel Levkovich 	pre_ct->ft = ft;
16949102d836SRoi Dayan 
16959102d836SRoi Dayan 	/* create flow group */
16969102d836SRoi Dayan 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
16979102d836SRoi Dayan 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
16989102d836SRoi Dayan 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
16999102d836SRoi Dayan 		 MLX5_MATCH_MISC_PARAMETERS_2);
17009102d836SRoi Dayan 
17019102d836SRoi Dayan 	misc = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
17029102d836SRoi Dayan 			    match_criteria.misc_parameters_2);
17039102d836SRoi Dayan 
17049102d836SRoi Dayan 	metadata_reg_c_2_mask = MLX5_CT_ZONE_MASK;
17059102d836SRoi Dayan 	metadata_reg_c_2_mask |= (MLX5_CT_STATE_TRK_BIT << 16);
17069102d836SRoi Dayan 	if (nat)
17079102d836SRoi Dayan 		metadata_reg_c_2_mask |= (MLX5_CT_STATE_NAT_BIT << 16);
17089102d836SRoi Dayan 
17099102d836SRoi Dayan 	MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_2,
17109102d836SRoi Dayan 		 metadata_reg_c_2_mask);
17119102d836SRoi Dayan 
17129102d836SRoi Dayan 	g = mlx5_create_flow_group(ft, flow_group_in);
17139102d836SRoi Dayan 	if (IS_ERR(g)) {
17149102d836SRoi Dayan 		err = PTR_ERR(g);
17159102d836SRoi Dayan 		ct_dbg("Failed to create pre ct group");
17169102d836SRoi Dayan 		goto err_flow_grp;
17179102d836SRoi Dayan 	}
17189102d836SRoi Dayan 	pre_ct->flow_grp = g;
17199102d836SRoi Dayan 
17209102d836SRoi Dayan 	/* create miss group */
17219102d836SRoi Dayan 	memset(flow_group_in, 0, inlen);
17229102d836SRoi Dayan 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
17239102d836SRoi Dayan 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
17249102d836SRoi Dayan 	g = mlx5_create_flow_group(ft, flow_group_in);
17259102d836SRoi Dayan 	if (IS_ERR(g)) {
17269102d836SRoi Dayan 		err = PTR_ERR(g);
17279102d836SRoi Dayan 		ct_dbg("Failed to create pre ct miss group");
17289102d836SRoi Dayan 		goto err_miss_grp;
17299102d836SRoi Dayan 	}
17309102d836SRoi Dayan 	pre_ct->miss_grp = g;
17319102d836SRoi Dayan 
17329102d836SRoi Dayan 	err = tc_ct_pre_ct_add_rules(ct_ft, pre_ct, nat);
17339102d836SRoi Dayan 	if (err)
17349102d836SRoi Dayan 		goto err_add_rules;
17359102d836SRoi Dayan 
17369102d836SRoi Dayan 	kvfree(flow_group_in);
17379102d836SRoi Dayan 	return 0;
17389102d836SRoi Dayan 
17399102d836SRoi Dayan err_add_rules:
17409102d836SRoi Dayan 	mlx5_destroy_flow_group(pre_ct->miss_grp);
17419102d836SRoi Dayan err_miss_grp:
17429102d836SRoi Dayan 	mlx5_destroy_flow_group(pre_ct->flow_grp);
17439102d836SRoi Dayan err_flow_grp:
17449102d836SRoi Dayan 	mlx5_destroy_flow_table(ft);
17459102d836SRoi Dayan out_free:
17469102d836SRoi Dayan 	kvfree(flow_group_in);
17479102d836SRoi Dayan 	return err;
17489102d836SRoi Dayan }
17499102d836SRoi Dayan 
17509102d836SRoi Dayan static void
mlx5_tc_ct_free_pre_ct(struct mlx5_ct_ft * ct_ft,struct mlx5_tc_ct_pre * pre_ct)17519102d836SRoi Dayan mlx5_tc_ct_free_pre_ct(struct mlx5_ct_ft *ct_ft,
17529102d836SRoi Dayan 		       struct mlx5_tc_ct_pre *pre_ct)
17539102d836SRoi Dayan {
17549102d836SRoi Dayan 	tc_ct_pre_ct_del_rules(ct_ft, pre_ct);
17559102d836SRoi Dayan 	mlx5_destroy_flow_group(pre_ct->miss_grp);
17569102d836SRoi Dayan 	mlx5_destroy_flow_group(pre_ct->flow_grp);
1757aedd133dSAriel Levkovich 	mlx5_destroy_flow_table(pre_ct->ft);
17589102d836SRoi Dayan }
17599102d836SRoi Dayan 
17609102d836SRoi Dayan static int
mlx5_tc_ct_alloc_pre_ct_tables(struct mlx5_ct_ft * ft)17619102d836SRoi Dayan mlx5_tc_ct_alloc_pre_ct_tables(struct mlx5_ct_ft *ft)
17629102d836SRoi Dayan {
17639102d836SRoi Dayan 	int err;
17649102d836SRoi Dayan 
17659102d836SRoi Dayan 	err = mlx5_tc_ct_alloc_pre_ct(ft, &ft->pre_ct, false);
17669102d836SRoi Dayan 	if (err)
17679102d836SRoi Dayan 		return err;
17689102d836SRoi Dayan 
17699102d836SRoi Dayan 	err = mlx5_tc_ct_alloc_pre_ct(ft, &ft->pre_ct_nat, true);
17709102d836SRoi Dayan 	if (err)
17719102d836SRoi Dayan 		goto err_pre_ct_nat;
17729102d836SRoi Dayan 
17739102d836SRoi Dayan 	return 0;
17749102d836SRoi Dayan 
17759102d836SRoi Dayan err_pre_ct_nat:
17769102d836SRoi Dayan 	mlx5_tc_ct_free_pre_ct(ft, &ft->pre_ct);
17779102d836SRoi Dayan 	return err;
17789102d836SRoi Dayan }
17799102d836SRoi Dayan 
17809102d836SRoi Dayan static void
mlx5_tc_ct_free_pre_ct_tables(struct mlx5_ct_ft * ft)17819102d836SRoi Dayan mlx5_tc_ct_free_pre_ct_tables(struct mlx5_ct_ft *ft)
17829102d836SRoi Dayan {
17839102d836SRoi Dayan 	mlx5_tc_ct_free_pre_ct(ft, &ft->pre_ct_nat);
17849102d836SRoi Dayan 	mlx5_tc_ct_free_pre_ct(ft, &ft->pre_ct);
17859102d836SRoi Dayan }
17869102d836SRoi Dayan 
178776e68d95SRoi Dayan /* To avoid false lock dependency warning set the ct_entries_ht lock
178876e68d95SRoi Dayan  * class different than the lock class of the ht being used when deleting
178976e68d95SRoi Dayan  * last flow from a group and then deleting a group, we get into del_sw_flow_group()
179076e68d95SRoi Dayan  * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but
179176e68d95SRoi Dayan  * it's different than the ht->mutex here.
179276e68d95SRoi Dayan  */
179376e68d95SRoi Dayan static struct lock_class_key ct_entries_ht_lock_key;
179476e68d95SRoi Dayan 
1795ac991b48SPaul Blakey static struct mlx5_ct_ft *
mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv * ct_priv,u16 zone,struct nf_flowtable * nf_ft)1796ac991b48SPaul Blakey mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone,
1797ac991b48SPaul Blakey 		     struct nf_flowtable *nf_ft)
1798ac991b48SPaul Blakey {
1799ac991b48SPaul Blakey 	struct mlx5_ct_ft *ft;
1800ac991b48SPaul Blakey 	int err;
1801ac991b48SPaul Blakey 
1802ac991b48SPaul Blakey 	ft = rhashtable_lookup_fast(&ct_priv->zone_ht, &zone, zone_params);
1803ac991b48SPaul Blakey 	if (ft) {
1804ac991b48SPaul Blakey 		refcount_inc(&ft->refcount);
1805ac991b48SPaul Blakey 		return ft;
1806ac991b48SPaul Blakey 	}
1807ac991b48SPaul Blakey 
1808ac991b48SPaul Blakey 	ft = kzalloc(sizeof(*ft), GFP_KERNEL);
1809ac991b48SPaul Blakey 	if (!ft)
1810ac991b48SPaul Blakey 		return ERR_PTR(-ENOMEM);
1811ac991b48SPaul Blakey 
18128f5b3c3eSPaul Blakey 	err = mapping_add(ct_priv->zone_mapping, &zone, &ft->zone_restore_id);
18138f5b3c3eSPaul Blakey 	if (err)
18148f5b3c3eSPaul Blakey 		goto err_mapping;
18158f5b3c3eSPaul Blakey 
1816ac991b48SPaul Blakey 	ft->zone = zone;
1817ac991b48SPaul Blakey 	ft->nf_ft = nf_ft;
1818ac991b48SPaul Blakey 	ft->ct_priv = ct_priv;
1819ac991b48SPaul Blakey 	refcount_set(&ft->refcount, 1);
1820ac991b48SPaul Blakey 
18219102d836SRoi Dayan 	err = mlx5_tc_ct_alloc_pre_ct_tables(ft);
18229102d836SRoi Dayan 	if (err)
18239102d836SRoi Dayan 		goto err_alloc_pre_ct;
18249102d836SRoi Dayan 
1825ac991b48SPaul Blakey 	err = rhashtable_init(&ft->ct_entries_ht, &cts_ht_params);
1826ac991b48SPaul Blakey 	if (err)
1827ac991b48SPaul Blakey 		goto err_init;
1828ac991b48SPaul Blakey 
182976e68d95SRoi Dayan 	lockdep_set_class(&ft->ct_entries_ht.mutex, &ct_entries_ht_lock_key);
183076e68d95SRoi Dayan 
1831ac991b48SPaul Blakey 	err = rhashtable_insert_fast(&ct_priv->zone_ht, &ft->node,
1832ac991b48SPaul Blakey 				     zone_params);
1833ac991b48SPaul Blakey 	if (err)
1834ac991b48SPaul Blakey 		goto err_insert;
1835ac991b48SPaul Blakey 
1836ac991b48SPaul Blakey 	err = nf_flow_table_offload_add_cb(ft->nf_ft,
1837ac991b48SPaul Blakey 					   mlx5_tc_ct_block_flow_offload, ft);
1838ac991b48SPaul Blakey 	if (err)
1839ac991b48SPaul Blakey 		goto err_add_cb;
1840ac991b48SPaul Blakey 
1841ac991b48SPaul Blakey 	return ft;
1842ac991b48SPaul Blakey 
1843ac991b48SPaul Blakey err_add_cb:
1844ac991b48SPaul Blakey 	rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params);
1845ac991b48SPaul Blakey err_insert:
1846ac991b48SPaul Blakey 	rhashtable_destroy(&ft->ct_entries_ht);
1847ac991b48SPaul Blakey err_init:
18489102d836SRoi Dayan 	mlx5_tc_ct_free_pre_ct_tables(ft);
18499102d836SRoi Dayan err_alloc_pre_ct:
18508f5b3c3eSPaul Blakey 	mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id);
18518f5b3c3eSPaul Blakey err_mapping:
1852ac991b48SPaul Blakey 	kfree(ft);
1853ac991b48SPaul Blakey 	return ERR_PTR(err);
1854ac991b48SPaul Blakey }
1855ac991b48SPaul Blakey 
1856ac991b48SPaul Blakey static void
mlx5_tc_ct_flush_ft_entry(void * ptr,void * arg)18579808dd0aSPaul Blakey mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
1858ac991b48SPaul Blakey {
18599808dd0aSPaul Blakey 	struct mlx5_ct_entry *entry = ptr;
1860ac991b48SPaul Blakey 
1861a2173131SOz Shlomo 	mlx5_tc_ct_entry_put(entry);
1862ac991b48SPaul Blakey }
1863ac991b48SPaul Blakey 
1864ac991b48SPaul Blakey static void
mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_ct_ft * ft)1865ac991b48SPaul Blakey mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
1866ac991b48SPaul Blakey {
1867ac991b48SPaul Blakey 	if (!refcount_dec_and_test(&ft->refcount))
1868ac991b48SPaul Blakey 		return;
1869ac991b48SPaul Blakey 
18706c4e8fa0SRoi Dayan 	flush_workqueue(ct_priv->wq);
1871ac991b48SPaul Blakey 	nf_flow_table_offload_del_cb(ft->nf_ft,
1872ac991b48SPaul Blakey 				     mlx5_tc_ct_block_flow_offload, ft);
1873ac991b48SPaul Blakey 	rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params);
18749808dd0aSPaul Blakey 	rhashtable_free_and_destroy(&ft->ct_entries_ht,
18759808dd0aSPaul Blakey 				    mlx5_tc_ct_flush_ft_entry,
18769808dd0aSPaul Blakey 				    ct_priv);
18779102d836SRoi Dayan 	mlx5_tc_ct_free_pre_ct_tables(ft);
18788f5b3c3eSPaul Blakey 	mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id);
1879ac991b48SPaul Blakey 	kfree(ft);
1880ac991b48SPaul Blakey }
1881ac991b48SPaul Blakey 
18824c3844d9SPaul Blakey /* We translate the tc filter with CT action to the following HW model:
18834c3844d9SPaul Blakey  *
188408fe94ecSPaul Blakey  *	+-----------------------+
188508fe94ecSPaul Blakey  *	+ rule (either original +
188608fe94ecSPaul Blakey  *	+ or post_act rule)     +
188708fe94ecSPaul Blakey  *	+-----------------------+
188867027828SPaul Blakey  *		 | set act_miss_cookie mapping
18899102d836SRoi Dayan  *		 | set fte_id
18909102d836SRoi Dayan  *		 | set tunnel_id
189108fe94ecSPaul Blakey  *		 | rest of actions before the CT action (for this orig/post_act rule)
189205bb74c2SOz Shlomo  *		 |
189305bb74c2SOz Shlomo  * +-------------+
189405bb74c2SOz Shlomo  * | Chain 0	 |
189505bb74c2SOz Shlomo  * | optimization|
189605bb74c2SOz Shlomo  * |		 v
189705bb74c2SOz Shlomo  * |	+---------------------+
189805bb74c2SOz Shlomo  * |	+ pre_ct/pre_ct_nat   +  if matches     +----------------------+
189905bb74c2SOz Shlomo  * |	+ zone+nat match      +---------------->+ post_act (see below) +
190005bb74c2SOz Shlomo  * |	+---------------------+  set zone       +----------------------+
190105bb74c2SOz Shlomo  * |		 |
190205bb74c2SOz Shlomo  * +-------------+ set zone
190305bb74c2SOz Shlomo  *		 |
19049102d836SRoi Dayan  *		 v
19059102d836SRoi Dayan  *	+--------------------+
19069102d836SRoi Dayan  *	+ CT (nat or no nat) +
19079102d836SRoi Dayan  *	+ tuple + zone match +
19089102d836SRoi Dayan  *	+--------------------+
19099102d836SRoi Dayan  *		 | set mark
191054b154ecSEli Britstein  *		 | set labels_id
19119102d836SRoi Dayan  *		 | set established
1912a8eb919bSPaul Blakey  *		 | set zone_restore
19139102d836SRoi Dayan  *		 | do nat (if needed)
19149102d836SRoi Dayan  *		 v
19159102d836SRoi Dayan  *	+--------------+
191608fe94ecSPaul Blakey  *	+ post_act     + rest of parsed filter's actions
19179102d836SRoi Dayan  *	+ fte_id match +------------------------>
19189102d836SRoi Dayan  *	+--------------+
191905bb74c2SOz Shlomo  *
19204c3844d9SPaul Blakey  */
192108fe94ecSPaul Blakey static int
__mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_flow_attr * attr)1922aedd133dSAriel Levkovich __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
1923c620b772SAriel Levkovich 			  struct mlx5_flow_attr *attr)
19244c3844d9SPaul Blakey {
19254c3844d9SPaul Blakey 	bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
1926aedd133dSAriel Levkovich 	struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
192767027828SPaul Blakey 	int act_miss_mapping = 0, err;
1928ac991b48SPaul Blakey 	struct mlx5_ct_ft *ft;
192905bb74c2SOz Shlomo 	u16 zone;
19304c3844d9SPaul Blakey 
1931ac991b48SPaul Blakey 	/* Register for CT established events */
1932ac991b48SPaul Blakey 	ft = mlx5_tc_ct_add_ft_cb(ct_priv, attr->ct_attr.zone,
1933ac991b48SPaul Blakey 				  attr->ct_attr.nf_ft);
1934ac991b48SPaul Blakey 	if (IS_ERR(ft)) {
1935ac991b48SPaul Blakey 		err = PTR_ERR(ft);
1936ac991b48SPaul Blakey 		ct_dbg("Failed to register to ft callback");
1937ac991b48SPaul Blakey 		goto err_ft;
1938ac991b48SPaul Blakey 	}
193908fe94ecSPaul Blakey 	attr->ct_attr.ft = ft;
19404c3844d9SPaul Blakey 
194167027828SPaul Blakey 	err = mlx5e_tc_action_miss_mapping_get(ct_priv->priv, attr, attr->ct_attr.act_miss_cookie,
194267027828SPaul Blakey 					       &act_miss_mapping);
19434c3844d9SPaul Blakey 	if (err) {
194467027828SPaul Blakey 		ct_dbg("Failed to get register mapping for act miss");
194567027828SPaul Blakey 		goto err_get_act_miss;
19464c3844d9SPaul Blakey 	}
19474c3844d9SPaul Blakey 
194808fe94ecSPaul Blakey 	err = mlx5e_tc_match_to_reg_set(priv->mdev, &attr->parse_attr->mod_hdr_acts,
194908fe94ecSPaul Blakey 					ct_priv->ns_type, MAPPED_OBJ_TO_REG, act_miss_mapping);
19504c3844d9SPaul Blakey 	if (err) {
195167027828SPaul Blakey 		ct_dbg("Failed to set act miss register mapping");
19524c3844d9SPaul Blakey 		goto err_mapping;
19534c3844d9SPaul Blakey 	}
19544c3844d9SPaul Blakey 
195508fe94ecSPaul Blakey 	/* Chain 0 sets the zone and jumps to ct table
195605bb74c2SOz Shlomo 	 * Other chains jump to pre_ct table to align with act_ct cached logic
195705bb74c2SOz Shlomo 	 */
195805bb74c2SOz Shlomo 	if (!attr->chain) {
195905bb74c2SOz Shlomo 		zone = ft->zone & MLX5_CT_ZONE_MASK;
196008fe94ecSPaul Blakey 		err = mlx5e_tc_match_to_reg_set(priv->mdev, &attr->parse_attr->mod_hdr_acts,
196108fe94ecSPaul Blakey 						ct_priv->ns_type, ZONE_TO_REG, zone);
196205bb74c2SOz Shlomo 		if (err) {
196305bb74c2SOz Shlomo 			ct_dbg("Failed to set zone register mapping");
196405bb74c2SOz Shlomo 			goto err_mapping;
196505bb74c2SOz Shlomo 		}
196605bb74c2SOz Shlomo 
196708fe94ecSPaul Blakey 		attr->dest_ft = nat ? ct_priv->ct_nat : ct_priv->ct;
196805bb74c2SOz Shlomo 	} else {
196908fe94ecSPaul Blakey 		attr->dest_ft = nat ? ft->pre_ct_nat.ft : ft->pre_ct.ft;
197005bb74c2SOz Shlomo 	}
197105bb74c2SOz Shlomo 
197208fe94ecSPaul Blakey 	attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
197308fe94ecSPaul Blakey 	attr->ct_attr.act_miss_mapping = act_miss_mapping;
19744c3844d9SPaul Blakey 
197508fe94ecSPaul Blakey 	return 0;
19764c3844d9SPaul Blakey 
19774c3844d9SPaul Blakey err_mapping:
197867027828SPaul Blakey 	mlx5e_tc_action_miss_mapping_put(ct_priv->priv, attr, act_miss_mapping);
197967027828SPaul Blakey err_get_act_miss:
1980ac991b48SPaul Blakey 	mlx5_tc_ct_del_ft_cb(ct_priv, ft);
1981ac991b48SPaul Blakey err_ft:
19824c3844d9SPaul Blakey 	netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err);
198308fe94ecSPaul Blakey 	return err;
19844c3844d9SPaul Blakey }
19854c3844d9SPaul Blakey 
198608fe94ecSPaul Blakey int
mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv * priv,struct mlx5_flow_attr * attr)198708fe94ecSPaul Blakey mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *attr)
19884c3844d9SPaul Blakey {
198908fe94ecSPaul Blakey 	int err;
19904c3844d9SPaul Blakey 
1991aedd133dSAriel Levkovich 	if (!priv)
199208fe94ecSPaul Blakey 		return -EOPNOTSUPP;
199308fe94ecSPaul Blakey 
1994*f7a48511SYevgeny Kliteynik 	if (attr->ct_attr.offloaded)
1995*f7a48511SYevgeny Kliteynik 		return 0;
1996*f7a48511SYevgeny Kliteynik 
199708fe94ecSPaul Blakey 	if (attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR) {
199808fe94ecSPaul Blakey 		err = mlx5_tc_ct_entry_set_registers(priv, &attr->parse_attr->mod_hdr_acts,
199908fe94ecSPaul Blakey 						     0, 0, 0, 0);
200008fe94ecSPaul Blakey 		if (err)
200108fe94ecSPaul Blakey 			return err;
200208fe94ecSPaul Blakey 
200308fe94ecSPaul Blakey 		attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
200408fe94ecSPaul Blakey 	}
200508fe94ecSPaul Blakey 
2006*f7a48511SYevgeny Kliteynik 	if (!attr->ct_attr.nf_ft) { /* means only ct clear action, and not ct_clear,ct() */
2007*f7a48511SYevgeny Kliteynik 		attr->ct_attr.offloaded = true;
200808fe94ecSPaul Blakey 		return 0;
2009*f7a48511SYevgeny Kliteynik 	}
20104c3844d9SPaul Blakey 
2011aedd133dSAriel Levkovich 	mutex_lock(&priv->control_lock);
201208fe94ecSPaul Blakey 	err = __mlx5_tc_ct_flow_offload(priv, attr);
2013*f7a48511SYevgeny Kliteynik 	if (!err)
2014*f7a48511SYevgeny Kliteynik 		attr->ct_attr.offloaded = true;
2015aedd133dSAriel Levkovich 	mutex_unlock(&priv->control_lock);
20164c3844d9SPaul Blakey 
201708fe94ecSPaul Blakey 	return err;
20184c3844d9SPaul Blakey }
20194c3844d9SPaul Blakey 
20204c3844d9SPaul Blakey static void
__mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_flow_attr * attr)20214c3844d9SPaul Blakey __mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
2022a572c0a7SRoi Dayan 			 struct mlx5_flow_attr *attr)
20234c3844d9SPaul Blakey {
202467027828SPaul Blakey 	mlx5e_tc_action_miss_mapping_put(ct_priv->priv, attr, attr->ct_attr.act_miss_mapping);
202508fe94ecSPaul Blakey 	mlx5_tc_ct_del_ft_cb(ct_priv, attr->ct_attr.ft);
20264c3844d9SPaul Blakey }
20274c3844d9SPaul Blakey 
20284c3844d9SPaul Blakey void
mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv * priv,struct mlx5_flow_attr * attr)2029aedd133dSAriel Levkovich mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
2030c620b772SAriel Levkovich 		       struct mlx5_flow_attr *attr)
20314c3844d9SPaul Blakey {
2032*f7a48511SYevgeny Kliteynik 	if (!attr->ct_attr.offloaded) /* no ct action, return */
2033b100573aSChris Mi 		return;
203408fe94ecSPaul Blakey 	if (!attr->ct_attr.nf_ft) /* means only ct clear action, and not ct_clear,ct() */
20354c3844d9SPaul Blakey 		return;
20364c3844d9SPaul Blakey 
2037aedd133dSAriel Levkovich 	mutex_lock(&priv->control_lock);
203808fe94ecSPaul Blakey 	__mlx5_tc_ct_delete_flow(priv, attr);
2039aedd133dSAriel Levkovich 	mutex_unlock(&priv->control_lock);
20404c3844d9SPaul Blakey }
20414c3844d9SPaul Blakey 
20424c3844d9SPaul Blakey static int
mlx5_tc_ct_fs_init(struct mlx5_tc_ct_priv * ct_priv)204376909000SPaul Blakey mlx5_tc_ct_fs_init(struct mlx5_tc_ct_priv *ct_priv)
204476909000SPaul Blakey {
20453ee61ebbSPaul Blakey 	struct mlx5_flow_table *post_ct = mlx5e_tc_post_act_get_ft(ct_priv->post_act);
204676909000SPaul Blakey 	struct mlx5_ct_fs_ops *fs_ops = mlx5_ct_fs_dmfs_ops_get();
20473ee61ebbSPaul Blakey 	int err;
204876909000SPaul Blakey 
20493ee61ebbSPaul Blakey 	if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
20503ee61ebbSPaul Blakey 	    ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) {
20513ee61ebbSPaul Blakey 		ct_dbg("Using SMFS ct flow steering provider");
20523ee61ebbSPaul Blakey 		fs_ops = mlx5_ct_fs_smfs_ops_get();
20533ee61ebbSPaul Blakey 	}
20543ee61ebbSPaul Blakey 
20553ee61ebbSPaul Blakey 	ct_priv->fs = kzalloc(sizeof(*ct_priv->fs) + fs_ops->priv_size, GFP_KERNEL);
205676909000SPaul Blakey 	if (!ct_priv->fs)
205776909000SPaul Blakey 		return -ENOMEM;
205876909000SPaul Blakey 
205976909000SPaul Blakey 	ct_priv->fs->netdev = ct_priv->netdev;
206076909000SPaul Blakey 	ct_priv->fs->dev = ct_priv->dev;
206176909000SPaul Blakey 	ct_priv->fs_ops = fs_ops;
206276909000SPaul Blakey 
20633ee61ebbSPaul Blakey 	err = ct_priv->fs_ops->init(ct_priv->fs, ct_priv->ct, ct_priv->ct_nat, post_ct);
20643ee61ebbSPaul Blakey 	if (err)
20653ee61ebbSPaul Blakey 		goto err_init;
20663ee61ebbSPaul Blakey 
206776909000SPaul Blakey 	return 0;
20683ee61ebbSPaul Blakey 
20693ee61ebbSPaul Blakey err_init:
20703ee61ebbSPaul Blakey 	kfree(ct_priv->fs);
20713ee61ebbSPaul Blakey 	return err;
207276909000SPaul Blakey }
207376909000SPaul Blakey 
207476909000SPaul Blakey static int
mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch * esw,const char ** err_msg)2075aedd133dSAriel Levkovich mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch *esw,
20764c3844d9SPaul Blakey 				  const char **err_msg)
20774c3844d9SPaul Blakey {
20784c3844d9SPaul Blakey 	if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) {
20794c3844d9SPaul Blakey 		/* vlan workaround should be avoided for multi chain rules.
20804c3844d9SPaul Blakey 		 * This is just a sanity check as pop vlan action should
20814c3844d9SPaul Blakey 		 * be supported by any FW that supports ignore_flow_level
20824c3844d9SPaul Blakey 		 */
20834c3844d9SPaul Blakey 
20844c3844d9SPaul Blakey 		*err_msg = "firmware vlan actions support is missing";
20854c3844d9SPaul Blakey 		return -EOPNOTSUPP;
20864c3844d9SPaul Blakey 	}
20874c3844d9SPaul Blakey 
20884c3844d9SPaul Blakey 	if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev,
20894c3844d9SPaul Blakey 				    fdb_modify_header_fwd_to_table)) {
20904c3844d9SPaul Blakey 		/* CT always writes to registers which are mod header actions.
20914c3844d9SPaul Blakey 		 * Therefore, mod header and goto is required
20924c3844d9SPaul Blakey 		 */
20934c3844d9SPaul Blakey 
20944c3844d9SPaul Blakey 		*err_msg = "firmware fwd and modify support is missing";
20954c3844d9SPaul Blakey 		return -EOPNOTSUPP;
20964c3844d9SPaul Blakey 	}
20974c3844d9SPaul Blakey 
20984c3844d9SPaul Blakey 	if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
20994c3844d9SPaul Blakey 		*err_msg = "register loopback isn't supported";
21004c3844d9SPaul Blakey 		return -EOPNOTSUPP;
21014c3844d9SPaul Blakey 	}
21024c3844d9SPaul Blakey 
21034c3844d9SPaul Blakey 	return 0;
21044c3844d9SPaul Blakey }
21054c3844d9SPaul Blakey 
2106aedd133dSAriel Levkovich static int
mlx5_tc_ct_init_check_support(struct mlx5e_priv * priv,enum mlx5_flow_namespace_type ns_type,struct mlx5e_post_act * post_act)2107aedd133dSAriel Levkovich mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv,
2108aedd133dSAriel Levkovich 			      enum mlx5_flow_namespace_type ns_type,
2109ae2ee3beSPaul Blakey 			      struct mlx5e_post_act *post_act)
2110aedd133dSAriel Levkovich {
2111aedd133dSAriel Levkovich 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2112ae2ee3beSPaul Blakey 	const char *err_msg = NULL;
2113ae2ee3beSPaul Blakey 	int err = 0;
2114aedd133dSAriel Levkovich 
2115f0da4daaSChris Mi 	if (IS_ERR_OR_NULL(post_act)) {
2116ae2ee3beSPaul Blakey 		/* Ignore_flow_level support isn't supported by default for VFs and so post_act
2117ae2ee3beSPaul Blakey 		 * won't be supported. Skip showing error msg.
2118ae2ee3beSPaul Blakey 		 */
211922df2e93SRoi Dayan 		if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
2120ae2ee3beSPaul Blakey 			err_msg = "post action is missing";
2121ae2ee3beSPaul Blakey 		err = -EOPNOTSUPP;
2122ae2ee3beSPaul Blakey 		goto out_err;
2123f0da4daaSChris Mi 	}
2124f0da4daaSChris Mi 
2125aedd133dSAriel Levkovich 	if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
2126ae2ee3beSPaul Blakey 		err = mlx5_tc_ct_init_check_esw_support(esw, &err_msg);
2127ae2ee3beSPaul Blakey 
2128ae2ee3beSPaul Blakey out_err:
2129ae2ee3beSPaul Blakey 	if (err && err_msg)
2130ae2ee3beSPaul Blakey 		netdev_dbg(priv->netdev, "tc ct offload not supported, %s\n", err_msg);
2131ae2ee3beSPaul Blakey 	return err;
2132aedd133dSAriel Levkovich }
2133aedd133dSAriel Levkovich 
213477422a8fSSaeed Mahameed static void
mlx5_ct_tc_create_dbgfs(struct mlx5_tc_ct_priv * ct_priv)213577422a8fSSaeed Mahameed mlx5_ct_tc_create_dbgfs(struct mlx5_tc_ct_priv *ct_priv)
213677422a8fSSaeed Mahameed {
213777422a8fSSaeed Mahameed 	struct mlx5_tc_ct_debugfs *ct_dbgfs = &ct_priv->debugfs;
213877422a8fSSaeed Mahameed 
2139849190e3SChris Mi 	ct_dbgfs->root = debugfs_create_dir("ct", mlx5_debugfs_get_dev_root(ct_priv->dev));
214077422a8fSSaeed Mahameed 	debugfs_create_atomic_t("offloaded", 0400, ct_dbgfs->root,
214177422a8fSSaeed Mahameed 				&ct_dbgfs->stats.offloaded);
214277422a8fSSaeed Mahameed 	debugfs_create_atomic_t("rx_dropped", 0400, ct_dbgfs->root,
214377422a8fSSaeed Mahameed 				&ct_dbgfs->stats.rx_dropped);
214477422a8fSSaeed Mahameed }
214577422a8fSSaeed Mahameed 
214677422a8fSSaeed Mahameed static void
mlx5_ct_tc_remove_dbgfs(struct mlx5_tc_ct_priv * ct_priv)214777422a8fSSaeed Mahameed mlx5_ct_tc_remove_dbgfs(struct mlx5_tc_ct_priv *ct_priv)
214877422a8fSSaeed Mahameed {
214977422a8fSSaeed Mahameed 	debugfs_remove_recursive(ct_priv->debugfs.root);
215077422a8fSSaeed Mahameed }
215177422a8fSSaeed Mahameed 
2152211a5364SAriel Levkovich #define INIT_ERR_PREFIX "tc ct offload init failed"
21534c3844d9SPaul Blakey 
2154aedd133dSAriel Levkovich struct mlx5_tc_ct_priv *
mlx5_tc_ct_init(struct mlx5e_priv * priv,struct mlx5_fs_chains * chains,struct mod_hdr_tbl * mod_hdr,enum mlx5_flow_namespace_type ns_type,struct mlx5e_post_act * post_act)2155aedd133dSAriel Levkovich mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
2156aedd133dSAriel Levkovich 		struct mod_hdr_tbl *mod_hdr,
2157f0da4daaSChris Mi 		enum mlx5_flow_namespace_type ns_type,
2158f0da4daaSChris Mi 		struct mlx5e_post_act *post_act)
21594c3844d9SPaul Blakey {
21604c3844d9SPaul Blakey 	struct mlx5_tc_ct_priv *ct_priv;
2161211a5364SAriel Levkovich 	struct mlx5_core_dev *dev;
21622198b932SRoi Dayan 	u64 mapping_id;
21634c3844d9SPaul Blakey 	int err;
21644c3844d9SPaul Blakey 
2165211a5364SAriel Levkovich 	dev = priv->mdev;
2166ae2ee3beSPaul Blakey 	err = mlx5_tc_ct_init_check_support(priv, ns_type, post_act);
2167ae2ee3beSPaul Blakey 	if (err)
21684c3844d9SPaul Blakey 		goto err_support;
21694c3844d9SPaul Blakey 
21704c3844d9SPaul Blakey 	ct_priv = kzalloc(sizeof(*ct_priv), GFP_KERNEL);
2171211a5364SAriel Levkovich 	if (!ct_priv)
21724c3844d9SPaul Blakey 		goto err_alloc;
21734c3844d9SPaul Blakey 
21742198b932SRoi Dayan 	mapping_id = mlx5_query_nic_system_image_guid(dev);
21752198b932SRoi Dayan 
21762198b932SRoi Dayan 	ct_priv->zone_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_ZONE,
21772198b932SRoi Dayan 						      sizeof(u16), 0, true);
21788f5b3c3eSPaul Blakey 	if (IS_ERR(ct_priv->zone_mapping)) {
21798f5b3c3eSPaul Blakey 		err = PTR_ERR(ct_priv->zone_mapping);
218054b154ecSEli Britstein 		goto err_mapping_zone;
218154b154ecSEli Britstein 	}
218254b154ecSEli Britstein 
21832198b932SRoi Dayan 	ct_priv->labels_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_LABELS,
21842198b932SRoi Dayan 							sizeof(u32) * 4, 0, true);
218554b154ecSEli Britstein 	if (IS_ERR(ct_priv->labels_mapping)) {
218654b154ecSEli Britstein 		err = PTR_ERR(ct_priv->labels_mapping);
218754b154ecSEli Britstein 		goto err_mapping_labels;
21888f5b3c3eSPaul Blakey 	}
21898f5b3c3eSPaul Blakey 
2190a2173131SOz Shlomo 	spin_lock_init(&ct_priv->ht_lock);
219167027828SPaul Blakey 	ct_priv->priv = priv;
2192aedd133dSAriel Levkovich 	ct_priv->ns_type = ns_type;
2193aedd133dSAriel Levkovich 	ct_priv->chains = chains;
2194aedd133dSAriel Levkovich 	ct_priv->netdev = priv->netdev;
2195670c239aSAriel Levkovich 	ct_priv->dev = priv->mdev;
2196aedd133dSAriel Levkovich 	ct_priv->mod_hdr_tbl = mod_hdr;
2197aedd133dSAriel Levkovich 	ct_priv->ct = mlx5_chains_create_global_table(chains);
21984c3844d9SPaul Blakey 	if (IS_ERR(ct_priv->ct)) {
21994c3844d9SPaul Blakey 		err = PTR_ERR(ct_priv->ct);
2200211a5364SAriel Levkovich 		mlx5_core_warn(dev,
2201211a5364SAriel Levkovich 			       "%s, failed to create ct table err: %d\n",
2202211a5364SAriel Levkovich 			       INIT_ERR_PREFIX, err);
22034c3844d9SPaul Blakey 		goto err_ct_tbl;
22044c3844d9SPaul Blakey 	}
22054c3844d9SPaul Blakey 
2206aedd133dSAriel Levkovich 	ct_priv->ct_nat = mlx5_chains_create_global_table(chains);
22074c3844d9SPaul Blakey 	if (IS_ERR(ct_priv->ct_nat)) {
22084c3844d9SPaul Blakey 		err = PTR_ERR(ct_priv->ct_nat);
2209211a5364SAriel Levkovich 		mlx5_core_warn(dev,
2210211a5364SAriel Levkovich 			       "%s, failed to create ct nat table err: %d\n",
2211211a5364SAriel Levkovich 			       INIT_ERR_PREFIX, err);
22124c3844d9SPaul Blakey 		goto err_ct_nat_tbl;
22134c3844d9SPaul Blakey 	}
22144c3844d9SPaul Blakey 
2215f0da4daaSChris Mi 	ct_priv->post_act = post_act;
22164c3844d9SPaul Blakey 	mutex_init(&ct_priv->control_lock);
2217d7cade51SMichelleJin 	if (rhashtable_init(&ct_priv->zone_ht, &zone_params))
2218d7cade51SMichelleJin 		goto err_ct_zone_ht;
2219d7cade51SMichelleJin 	if (rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params))
2220d7cade51SMichelleJin 		goto err_ct_tuples_ht;
2221d7cade51SMichelleJin 	if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params))
2222d7cade51SMichelleJin 		goto err_ct_tuples_nat_ht;
22234c3844d9SPaul Blakey 
22246c4e8fa0SRoi Dayan 	ct_priv->wq = alloc_ordered_workqueue("mlx5e_ct_priv_wq", 0);
22256c4e8fa0SRoi Dayan 	if (!ct_priv->wq) {
22266c4e8fa0SRoi Dayan 		err = -ENOMEM;
22276c4e8fa0SRoi Dayan 		goto err_wq;
22286c4e8fa0SRoi Dayan 	}
22296c4e8fa0SRoi Dayan 
223076909000SPaul Blakey 	err = mlx5_tc_ct_fs_init(ct_priv);
223176909000SPaul Blakey 	if (err)
223276909000SPaul Blakey 		goto err_init_fs;
223376909000SPaul Blakey 
223477422a8fSSaeed Mahameed 	mlx5_ct_tc_create_dbgfs(ct_priv);
2235aedd133dSAriel Levkovich 	return ct_priv;
22364c3844d9SPaul Blakey 
223776909000SPaul Blakey err_init_fs:
22386c4e8fa0SRoi Dayan 	destroy_workqueue(ct_priv->wq);
22396c4e8fa0SRoi Dayan err_wq:
224076909000SPaul Blakey 	rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
2241d7cade51SMichelleJin err_ct_tuples_nat_ht:
2242d7cade51SMichelleJin 	rhashtable_destroy(&ct_priv->ct_tuples_ht);
2243d7cade51SMichelleJin err_ct_tuples_ht:
2244d7cade51SMichelleJin 	rhashtable_destroy(&ct_priv->zone_ht);
2245d7cade51SMichelleJin err_ct_zone_ht:
224688594d83SPaul Blakey 	mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
22474c3844d9SPaul Blakey err_ct_nat_tbl:
2248aedd133dSAriel Levkovich 	mlx5_chains_destroy_global_table(chains, ct_priv->ct);
22494c3844d9SPaul Blakey err_ct_tbl:
225054b154ecSEli Britstein 	mapping_destroy(ct_priv->labels_mapping);
225154b154ecSEli Britstein err_mapping_labels:
22528f5b3c3eSPaul Blakey 	mapping_destroy(ct_priv->zone_mapping);
225354b154ecSEli Britstein err_mapping_zone:
22544c3844d9SPaul Blakey 	kfree(ct_priv);
22554c3844d9SPaul Blakey err_alloc:
22564c3844d9SPaul Blakey err_support:
22574c3844d9SPaul Blakey 
2258aedd133dSAriel Levkovich 	return NULL;
22594c3844d9SPaul Blakey }
22604c3844d9SPaul Blakey 
22614c3844d9SPaul Blakey void
mlx5_tc_ct_clean(struct mlx5_tc_ct_priv * ct_priv)2262aedd133dSAriel Levkovich mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
22634c3844d9SPaul Blakey {
2264ae430332SAriel Levkovich 	struct mlx5_fs_chains *chains;
22654c3844d9SPaul Blakey 
22664c3844d9SPaul Blakey 	if (!ct_priv)
22674c3844d9SPaul Blakey 		return;
22684c3844d9SPaul Blakey 
22696c4e8fa0SRoi Dayan 	destroy_workqueue(ct_priv->wq);
227077422a8fSSaeed Mahameed 	mlx5_ct_tc_remove_dbgfs(ct_priv);
2271aedd133dSAriel Levkovich 	chains = ct_priv->chains;
2272ae430332SAriel Levkovich 
22733ee61ebbSPaul Blakey 	ct_priv->fs_ops->destroy(ct_priv->fs);
227476909000SPaul Blakey 	kfree(ct_priv->fs);
227576909000SPaul Blakey 
2276ae430332SAriel Levkovich 	mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
2277ae430332SAriel Levkovich 	mlx5_chains_destroy_global_table(chains, ct_priv->ct);
22788f5b3c3eSPaul Blakey 	mapping_destroy(ct_priv->zone_mapping);
227954b154ecSEli Britstein 	mapping_destroy(ct_priv->labels_mapping);
22804c3844d9SPaul Blakey 
2281bc562be9SPaul Blakey 	rhashtable_destroy(&ct_priv->ct_tuples_ht);
2282bc562be9SPaul Blakey 	rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
2283ac991b48SPaul Blakey 	rhashtable_destroy(&ct_priv->zone_ht);
22844c3844d9SPaul Blakey 	mutex_destroy(&ct_priv->control_lock);
22854c3844d9SPaul Blakey 	kfree(ct_priv);
22864c3844d9SPaul Blakey }
22875c6b9460SPaul Blakey 
22885c6b9460SPaul Blakey bool
mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv * ct_priv,struct sk_buff * skb,u8 zone_restore_id)2289aedd133dSAriel Levkovich mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
22908f5b3c3eSPaul Blakey 			 struct sk_buff *skb, u8 zone_restore_id)
22915c6b9460SPaul Blakey {
2292a8eb919bSPaul Blakey 	struct mlx5_ct_tuple tuple = {};
22935c6b9460SPaul Blakey 	struct mlx5_ct_entry *entry;
22948f5b3c3eSPaul Blakey 	u16 zone;
22955c6b9460SPaul Blakey 
22968f5b3c3eSPaul Blakey 	if (!ct_priv || !zone_restore_id)
22975c6b9460SPaul Blakey 		return true;
22985c6b9460SPaul Blakey 
22998f5b3c3eSPaul Blakey 	if (mapping_find(ct_priv->zone_mapping, zone_restore_id, &zone))
230077422a8fSSaeed Mahameed 		goto out_inc_drop;
23018f5b3c3eSPaul Blakey 
23028f5b3c3eSPaul Blakey 	if (!mlx5_tc_ct_skb_to_tuple(skb, &tuple, zone))
230377422a8fSSaeed Mahameed 		goto out_inc_drop;
23045c6b9460SPaul Blakey 
2305a2173131SOz Shlomo 	spin_lock(&ct_priv->ht_lock);
2306a2173131SOz Shlomo 
2307a2173131SOz Shlomo 	entry = mlx5_tc_ct_entry_get(ct_priv, &tuple);
2308a2173131SOz Shlomo 	if (!entry) {
2309a2173131SOz Shlomo 		spin_unlock(&ct_priv->ht_lock);
231077422a8fSSaeed Mahameed 		goto out_inc_drop;
2311a2173131SOz Shlomo 	}
2312a2173131SOz Shlomo 
2313a2173131SOz Shlomo 	if (IS_ERR(entry)) {
2314a2173131SOz Shlomo 		spin_unlock(&ct_priv->ht_lock);
231577422a8fSSaeed Mahameed 		goto out_inc_drop;
2316a2173131SOz Shlomo 	}
2317a2173131SOz Shlomo 	spin_unlock(&ct_priv->ht_lock);
23185c6b9460SPaul Blakey 
2319a8eb919bSPaul Blakey 	tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie);
2320a2173131SOz Shlomo 	__mlx5_tc_ct_entry_put(entry);
2321a2173131SOz Shlomo 
23225c6b9460SPaul Blakey 	return true;
232377422a8fSSaeed Mahameed 
232477422a8fSSaeed Mahameed out_inc_drop:
232577422a8fSSaeed Mahameed 	atomic_inc(&ct_priv->debugfs.stats.rx_dropped);
232677422a8fSSaeed Mahameed 	return false;
23275c6b9460SPaul Blakey }
2328