1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
3 
4 #include <linux/refcount.h>
5 
6 #include "en_tc.h"
7 #include "en/tc_priv.h"
8 #include "en/tc_ct.h"
9 #include "en/tc/ct_fs.h"
10 
11 #include "lib/smfs.h"
12 
13 #define INIT_ERR_PREFIX "ct_fs_smfs init failed"
14 #define ct_dbg(fmt, args...)\
15 	netdev_dbg(fs->netdev, "ct_fs_smfs debug: " fmt "\n", ##args)
16 #define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16)
17 
18 struct mlx5_ct_fs_smfs_matcher {
19 	struct mlx5dr_matcher *dr_matcher;
20 	struct list_head list;
21 	int prio;
22 	refcount_t ref;
23 };
24 
25 struct mlx5_ct_fs_smfs_matchers {
26 	struct mlx5_ct_fs_smfs_matcher smfs_matchers[4];
27 	struct list_head used;
28 };
29 
30 struct mlx5_ct_fs_smfs {
31 	struct mlx5dr_table *ct_tbl, *ct_nat_tbl;
32 	struct mlx5_ct_fs_smfs_matchers matchers;
33 	struct mlx5_ct_fs_smfs_matchers matchers_nat;
34 	struct mlx5dr_action *fwd_action;
35 	struct mlx5_flow_table *ct_nat;
36 	struct mutex lock; /* Guards matchers */
37 };
38 
39 struct mlx5_ct_fs_smfs_rule {
40 	struct mlx5_ct_fs_rule fs_rule;
41 	struct mlx5dr_rule *rule;
42 	struct mlx5dr_action *count_action;
43 	struct mlx5_ct_fs_smfs_matcher *smfs_matcher;
44 };
45 
46 static inline void
47 mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp)
48 {
49 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
50 
51 	if (likely(MLX5_CAP_FLOWTABLE_NIC_RX(fs->dev, ft_field_support.outer_ip_version)))
52 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
53 	else
54 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
55 
56 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
57 	if (likely(ipv4)) {
58 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
59 				 src_ipv4_src_ipv6.ipv4_layout.ipv4);
60 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
61 				 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
62 	} else {
63 		memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
64 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
65 		       0xFF,
66 		       MLX5_FLD_SZ_BYTES(fte_match_set_lyr_2_4,
67 					 dst_ipv4_dst_ipv6.ipv6_layout.ipv6));
68 		memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
69 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
70 		       0xFF,
71 		       MLX5_FLD_SZ_BYTES(fte_match_set_lyr_2_4,
72 					 src_ipv4_src_ipv6.ipv6_layout.ipv6));
73 	}
74 
75 	if (likely(tcp)) {
76 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_sport);
77 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_dport);
78 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
79 			 ntohs(MLX5_CT_TCP_FLAGS_MASK));
80 	} else {
81 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_sport);
82 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_dport);
83 	}
84 
85 	mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, 0, MLX5_CT_ZONE_MASK);
86 }
87 
88 static struct mlx5dr_matcher *
89 mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, bool ipv4,
90 			       bool tcp, u32 priority)
91 {
92 	struct mlx5dr_matcher *dr_matcher;
93 	struct mlx5_flow_spec *spec;
94 
95 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
96 	if (!spec)
97 		return ERR_PTR(-ENOMEM);
98 
99 	mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp);
100 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
101 
102 	dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec);
103 	kfree(spec);
104 	if (!dr_matcher)
105 		return ERR_PTR(-EINVAL);
106 
107 	return dr_matcher;
108 }
109 
110 static struct mlx5_ct_fs_smfs_matcher *
111 mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp)
112 {
113 	struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
114 	struct mlx5_ct_fs_smfs_matcher *m, *smfs_matcher;
115 	struct mlx5_ct_fs_smfs_matchers *matchers;
116 	struct mlx5dr_matcher *dr_matcher;
117 	struct mlx5dr_table *tbl;
118 	struct list_head *prev;
119 	int prio;
120 
121 	matchers = nat ? &fs_smfs->matchers_nat : &fs_smfs->matchers;
122 	smfs_matcher = &matchers->smfs_matchers[ipv4 * 2 + tcp];
123 
124 	if (refcount_inc_not_zero(&smfs_matcher->ref))
125 		return smfs_matcher;
126 
127 	mutex_lock(&fs_smfs->lock);
128 
129 	/* Retry with lock, as another thread might have already created the relevant matcher
130 	 * till we acquired the lock
131 	 */
132 	if (refcount_inc_not_zero(&smfs_matcher->ref))
133 		goto out_unlock;
134 
135 	// Find next available priority in sorted used list
136 	prio = 0;
137 	prev = &matchers->used;
138 	list_for_each_entry(m, &matchers->used, list) {
139 		prev = &m->list;
140 
141 		if (m->prio == prio)
142 			prio = m->prio + 1;
143 		else
144 			break;
145 	}
146 
147 	tbl = nat ? fs_smfs->ct_nat_tbl : fs_smfs->ct_tbl;
148 	dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, prio);
149 	if (IS_ERR(dr_matcher)) {
150 		netdev_warn(fs->netdev,
151 			    "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d), err: %ld\n",
152 			    nat, ipv4, tcp, PTR_ERR(dr_matcher));
153 
154 		smfs_matcher = ERR_CAST(dr_matcher);
155 		goto out_unlock;
156 	}
157 
158 	smfs_matcher->dr_matcher = dr_matcher;
159 	smfs_matcher->prio = prio;
160 	list_add(&smfs_matcher->list, prev);
161 	refcount_set(&smfs_matcher->ref, 1);
162 
163 out_unlock:
164 	mutex_unlock(&fs_smfs->lock);
165 	return smfs_matcher;
166 }
167 
168 static void
169 mlx5_ct_fs_smfs_matcher_put(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_smfs_matcher *smfs_matcher)
170 {
171 	struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
172 
173 	if (!refcount_dec_and_mutex_lock(&smfs_matcher->ref, &fs_smfs->lock))
174 		return;
175 
176 	mlx5_smfs_matcher_destroy(smfs_matcher->dr_matcher);
177 	list_del(&smfs_matcher->list);
178 	mutex_unlock(&fs_smfs->lock);
179 }
180 
181 static int
182 mlx5_ct_fs_smfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
183 		     struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct)
184 {
185 	struct mlx5dr_table *ct_tbl, *ct_nat_tbl, *post_ct_tbl;
186 	struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
187 
188 	post_ct_tbl = mlx5_smfs_table_get_from_fs_ft(post_ct);
189 	ct_nat_tbl = mlx5_smfs_table_get_from_fs_ft(ct_nat);
190 	ct_tbl = mlx5_smfs_table_get_from_fs_ft(ct);
191 	fs_smfs->ct_nat = ct_nat;
192 
193 	if (!ct_tbl || !ct_nat_tbl || !post_ct_tbl) {
194 		netdev_warn(fs->netdev, "ct_fs_smfs: failed to init, missing backing dr tables");
195 		return -EOPNOTSUPP;
196 	}
197 
198 	ct_dbg("using smfs steering");
199 
200 	fs_smfs->fwd_action = mlx5_smfs_action_create_dest_table(post_ct_tbl);
201 	if (!fs_smfs->fwd_action) {
202 		return -EINVAL;
203 	}
204 
205 	fs_smfs->ct_tbl = ct_tbl;
206 	fs_smfs->ct_nat_tbl = ct_nat_tbl;
207 	mutex_init(&fs_smfs->lock);
208 	INIT_LIST_HEAD(&fs_smfs->matchers.used);
209 	INIT_LIST_HEAD(&fs_smfs->matchers_nat.used);
210 
211 	return 0;
212 }
213 
214 static void
215 mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
216 {
217 	struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
218 
219 	mlx5_smfs_action_destroy(fs_smfs->fwd_action);
220 }
221 
222 static inline bool
223 mlx5_tc_ct_valid_used_dissector_keys(const u32 used_keys)
224 {
225 #define DISSECTOR_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name)
226 	const u32 basic_keys = DISSECTOR_BIT(BASIC) | DISSECTOR_BIT(CONTROL) |
227 			       DISSECTOR_BIT(PORTS) | DISSECTOR_BIT(META);
228 	const u32 ipv4_tcp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS) | DISSECTOR_BIT(TCP);
229 	const u32 ipv4_udp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS);
230 	const u32 ipv6_tcp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS) | DISSECTOR_BIT(TCP);
231 	const u32 ipv6_udp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS);
232 
233 	return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
234 		used_keys == ipv6_udp);
235 }
236 
237 static bool
238 mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *flow_rule)
239 {
240 	struct flow_match_ipv4_addrs ipv4_addrs;
241 	struct flow_match_ipv6_addrs ipv6_addrs;
242 	struct flow_match_control control;
243 	struct flow_match_basic basic;
244 	struct flow_match_ports ports;
245 	struct flow_match_tcp tcp;
246 
247 	if (!mlx5_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) {
248 		ct_dbg("rule uses unexpected dissectors (0x%08x)",
249 		       flow_rule->match.dissector->used_keys);
250 		return false;
251 	}
252 
253 	flow_rule_match_basic(flow_rule, &basic);
254 	flow_rule_match_control(flow_rule, &control);
255 	flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
256 	flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
257 	flow_rule_match_ports(flow_rule, &ports);
258 	flow_rule_match_tcp(flow_rule, &tcp);
259 
260 	if (basic.mask->n_proto != htons(0xFFFF) ||
261 	    (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
262 	    basic.mask->ip_proto != 0xFF ||
263 	    (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP)) {
264 		ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
265 		       ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
266 		       basic.key->ip_proto, basic.mask->ip_proto);
267 		return false;
268 	}
269 
270 	if (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF)) {
271 		ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)",
272 		       ports.mask->src, ports.mask->dst);
273 		return false;
274 	}
275 
276 	if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) {
277 		ct_dbg("rule uses unexpected tcp match (flags 0x%02x)", tcp.mask->flags);
278 		return false;
279 	}
280 
281 	return true;
282 }
283 
284 static struct mlx5_ct_fs_rule *
285 mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
286 			    struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
287 {
288 	struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
289 	struct mlx5_ct_fs_smfs_matcher *smfs_matcher;
290 	struct mlx5_ct_fs_smfs_rule *smfs_rule;
291 	struct mlx5dr_action *actions[5];
292 	struct mlx5dr_rule *rule;
293 	int num_actions = 0, err;
294 	bool nat, tcp, ipv4;
295 
296 	if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule))
297 		return ERR_PTR(-EOPNOTSUPP);
298 
299 	smfs_rule = kzalloc(sizeof(*smfs_rule), GFP_KERNEL);
300 	if (!smfs_rule)
301 		return ERR_PTR(-ENOMEM);
302 
303 	smfs_rule->count_action = mlx5_smfs_action_create_flow_counter(mlx5_fc_id(attr->counter));
304 	if (!smfs_rule->count_action) {
305 		err = -EINVAL;
306 		goto err_count;
307 	}
308 
309 	actions[num_actions++] = smfs_rule->count_action;
310 	actions[num_actions++] = attr->modify_hdr->action.dr_action;
311 	actions[num_actions++] = fs_smfs->fwd_action;
312 
313 	nat = (attr->ft == fs_smfs->ct_nat);
314 	ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
315 	tcp = MLX5_GET(fte_match_param, spec->match_value,
316 		       outer_headers.ip_protocol) == IPPROTO_TCP;
317 
318 	smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp);
319 	if (IS_ERR(smfs_matcher)) {
320 		err = PTR_ERR(smfs_matcher);
321 		goto err_matcher;
322 	}
323 
324 	rule = mlx5_smfs_rule_create(smfs_matcher->dr_matcher, spec, num_actions, actions,
325 				     MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT);
326 	if (!rule) {
327 		err = -EINVAL;
328 		goto err_create;
329 	}
330 
331 	smfs_rule->rule = rule;
332 	smfs_rule->smfs_matcher = smfs_matcher;
333 
334 	return &smfs_rule->fs_rule;
335 
336 err_create:
337 	mlx5_ct_fs_smfs_matcher_put(fs, smfs_matcher);
338 err_matcher:
339 	mlx5_smfs_action_destroy(smfs_rule->count_action);
340 err_count:
341 	kfree(smfs_rule);
342 	return ERR_PTR(err);
343 }
344 
345 static void
346 mlx5_ct_fs_smfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule)
347 {
348 	struct mlx5_ct_fs_smfs_rule *smfs_rule = container_of(fs_rule,
349 							      struct mlx5_ct_fs_smfs_rule,
350 							      fs_rule);
351 
352 	mlx5_smfs_rule_destroy(smfs_rule->rule);
353 	mlx5_ct_fs_smfs_matcher_put(fs, smfs_rule->smfs_matcher);
354 	mlx5_smfs_action_destroy(smfs_rule->count_action);
355 	kfree(smfs_rule);
356 }
357 
358 static struct mlx5_ct_fs_ops fs_smfs_ops = {
359 	.ct_rule_add = mlx5_ct_fs_smfs_ct_rule_add,
360 	.ct_rule_del = mlx5_ct_fs_smfs_ct_rule_del,
361 
362 	.init = mlx5_ct_fs_smfs_init,
363 	.destroy = mlx5_ct_fs_smfs_destroy,
364 
365 	.priv_size = sizeof(struct mlx5_ct_fs_smfs),
366 };
367 
368 struct mlx5_ct_fs_ops *
369 mlx5_ct_fs_smfs_ops_get(void)
370 {
371 	return &fs_smfs_ops;
372 }
373