1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
3
4 #include <linux/refcount.h>
5
6 #include "en_tc.h"
7 #include "en/tc_priv.h"
8 #include "en/tc_ct.h"
9 #include "en/tc/ct_fs.h"
10
11 #include "lib/smfs.h"
12
13 #define INIT_ERR_PREFIX "ct_fs_smfs init failed"
14 #define ct_dbg(fmt, args...)\
15 netdev_dbg(fs->netdev, "ct_fs_smfs debug: " fmt "\n", ##args)
16 #define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16)
17
18 struct mlx5_ct_fs_smfs_matcher {
19 struct mlx5dr_matcher *dr_matcher;
20 struct list_head list;
21 int prio;
22 refcount_t ref;
23 };
24
25 struct mlx5_ct_fs_smfs_matchers {
26 struct mlx5_ct_fs_smfs_matcher smfs_matchers[6];
27 struct list_head used;
28 };
29
30 struct mlx5_ct_fs_smfs {
31 struct mlx5dr_table *ct_tbl, *ct_nat_tbl;
32 struct mlx5_ct_fs_smfs_matchers matchers;
33 struct mlx5_ct_fs_smfs_matchers matchers_nat;
34 struct mlx5dr_action *fwd_action;
35 struct mlx5_flow_table *ct_nat;
36 struct mutex lock; /* Guards matchers */
37 };
38
39 struct mlx5_ct_fs_smfs_rule {
40 struct mlx5_ct_fs_rule fs_rule;
41 struct mlx5dr_rule *rule;
42 struct mlx5dr_action *count_action;
43 struct mlx5_ct_fs_smfs_matcher *smfs_matcher;
44 };
45
46 static inline void
mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs * fs,struct mlx5_flow_spec * spec,bool ipv4,bool tcp,bool gre)47 mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp,
48 bool gre)
49 {
50 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
51
52 if (likely(MLX5_CAP_FLOWTABLE_NIC_RX(fs->dev, ft_field_support.outer_ip_version)))
53 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
54 else
55 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
56
57 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
58 if (likely(ipv4)) {
59 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
60 src_ipv4_src_ipv6.ipv4_layout.ipv4);
61 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
62 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
63 } else {
64 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
65 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
66 0xFF,
67 MLX5_FLD_SZ_BYTES(fte_match_set_lyr_2_4,
68 dst_ipv4_dst_ipv6.ipv6_layout.ipv6));
69 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
70 src_ipv4_src_ipv6.ipv6_layout.ipv6),
71 0xFF,
72 MLX5_FLD_SZ_BYTES(fte_match_set_lyr_2_4,
73 src_ipv4_src_ipv6.ipv6_layout.ipv6));
74 }
75
76 if (likely(tcp)) {
77 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_sport);
78 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_dport);
79 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
80 ntohs(MLX5_CT_TCP_FLAGS_MASK));
81 } else if (!gre) {
82 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_sport);
83 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_dport);
84 }
85
86 mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, 0, MLX5_CT_ZONE_MASK);
87 }
88
89 static struct mlx5dr_matcher *
mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs * fs,struct mlx5dr_table * tbl,bool ipv4,bool tcp,bool gre,u32 priority)90 mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, bool ipv4,
91 bool tcp, bool gre, u32 priority)
92 {
93 struct mlx5dr_matcher *dr_matcher;
94 struct mlx5_flow_spec *spec;
95
96 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
97 if (!spec)
98 return ERR_PTR(-ENOMEM);
99
100 mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp, gre);
101 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
102
103 dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec);
104 kvfree(spec);
105 if (!dr_matcher)
106 return ERR_PTR(-EINVAL);
107
108 return dr_matcher;
109 }
110
111 static struct mlx5_ct_fs_smfs_matcher *
mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs * fs,bool nat,bool ipv4,bool tcp,bool gre)112 mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp, bool gre)
113 {
114 struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
115 struct mlx5_ct_fs_smfs_matcher *m, *smfs_matcher;
116 struct mlx5_ct_fs_smfs_matchers *matchers;
117 struct mlx5dr_matcher *dr_matcher;
118 struct mlx5dr_table *tbl;
119 struct list_head *prev;
120 int prio;
121
122 matchers = nat ? &fs_smfs->matchers_nat : &fs_smfs->matchers;
123 smfs_matcher = &matchers->smfs_matchers[ipv4 * 3 + tcp * 2 + gre];
124
125 if (refcount_inc_not_zero(&smfs_matcher->ref))
126 return smfs_matcher;
127
128 mutex_lock(&fs_smfs->lock);
129
130 /* Retry with lock, as another thread might have already created the relevant matcher
131 * till we acquired the lock
132 */
133 if (refcount_inc_not_zero(&smfs_matcher->ref))
134 goto out_unlock;
135
136 // Find next available priority in sorted used list
137 prio = 0;
138 prev = &matchers->used;
139 list_for_each_entry(m, &matchers->used, list) {
140 prev = &m->list;
141
142 if (m->prio == prio)
143 prio = m->prio + 1;
144 else
145 break;
146 }
147
148 tbl = nat ? fs_smfs->ct_nat_tbl : fs_smfs->ct_tbl;
149 dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, gre, prio);
150 if (IS_ERR(dr_matcher)) {
151 netdev_warn(fs->netdev,
152 "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
153 nat, ipv4, tcp, gre, PTR_ERR(dr_matcher));
154
155 smfs_matcher = ERR_CAST(dr_matcher);
156 goto out_unlock;
157 }
158
159 smfs_matcher->dr_matcher = dr_matcher;
160 smfs_matcher->prio = prio;
161 list_add(&smfs_matcher->list, prev);
162 refcount_set(&smfs_matcher->ref, 1);
163
164 out_unlock:
165 mutex_unlock(&fs_smfs->lock);
166 return smfs_matcher;
167 }
168
169 static void
mlx5_ct_fs_smfs_matcher_put(struct mlx5_ct_fs * fs,struct mlx5_ct_fs_smfs_matcher * smfs_matcher)170 mlx5_ct_fs_smfs_matcher_put(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_smfs_matcher *smfs_matcher)
171 {
172 struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
173
174 if (!refcount_dec_and_mutex_lock(&smfs_matcher->ref, &fs_smfs->lock))
175 return;
176
177 mlx5_smfs_matcher_destroy(smfs_matcher->dr_matcher);
178 list_del(&smfs_matcher->list);
179 mutex_unlock(&fs_smfs->lock);
180 }
181
182 static int
mlx5_ct_fs_smfs_init(struct mlx5_ct_fs * fs,struct mlx5_flow_table * ct,struct mlx5_flow_table * ct_nat,struct mlx5_flow_table * post_ct)183 mlx5_ct_fs_smfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
184 struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct)
185 {
186 struct mlx5dr_table *ct_tbl, *ct_nat_tbl, *post_ct_tbl;
187 struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
188
189 post_ct_tbl = mlx5_smfs_table_get_from_fs_ft(post_ct);
190 ct_nat_tbl = mlx5_smfs_table_get_from_fs_ft(ct_nat);
191 ct_tbl = mlx5_smfs_table_get_from_fs_ft(ct);
192 fs_smfs->ct_nat = ct_nat;
193
194 if (!ct_tbl || !ct_nat_tbl || !post_ct_tbl) {
195 netdev_warn(fs->netdev, "ct_fs_smfs: failed to init, missing backing dr tables");
196 return -EOPNOTSUPP;
197 }
198
199 ct_dbg("using smfs steering");
200
201 fs_smfs->fwd_action = mlx5_smfs_action_create_dest_table(post_ct_tbl);
202 if (!fs_smfs->fwd_action) {
203 return -EINVAL;
204 }
205
206 fs_smfs->ct_tbl = ct_tbl;
207 fs_smfs->ct_nat_tbl = ct_nat_tbl;
208 mutex_init(&fs_smfs->lock);
209 INIT_LIST_HEAD(&fs_smfs->matchers.used);
210 INIT_LIST_HEAD(&fs_smfs->matchers_nat.used);
211
212 return 0;
213 }
214
215 static void
mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs * fs)216 mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
217 {
218 struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
219
220 mlx5_smfs_action_destroy(fs_smfs->fwd_action);
221 }
222
223 static inline bool
mlx5_tc_ct_valid_used_dissector_keys(const u64 used_keys)224 mlx5_tc_ct_valid_used_dissector_keys(const u64 used_keys)
225 {
226 #define DISS_BIT(name) BIT_ULL(FLOW_DISSECTOR_KEY_ ## name)
227 const u64 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) |
228 DISS_BIT(META);
229 const u64 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) |
230 DISS_BIT(PORTS) | DISS_BIT(TCP);
231 const u64 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) |
232 DISS_BIT(PORTS) | DISS_BIT(TCP);
233 const u64 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) |
234 DISS_BIT(PORTS);
235 const u64 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) |
236 DISS_BIT(PORTS);
237 const u64 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
238 const u64 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
239
240 return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
241 used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
242 }
243
244 static bool
mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs * fs,struct flow_rule * flow_rule)245 mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *flow_rule)
246 {
247 struct flow_match_ipv4_addrs ipv4_addrs;
248 struct flow_match_ipv6_addrs ipv6_addrs;
249 struct flow_match_control control;
250 struct flow_match_basic basic;
251 struct flow_match_ports ports;
252 struct flow_match_tcp tcp;
253
254 if (!mlx5_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) {
255 ct_dbg("rule uses unexpected dissectors (0x%016llx)",
256 flow_rule->match.dissector->used_keys);
257 return false;
258 }
259
260 flow_rule_match_basic(flow_rule, &basic);
261 flow_rule_match_control(flow_rule, &control);
262 flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
263 flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
264 if (basic.key->ip_proto != IPPROTO_GRE)
265 flow_rule_match_ports(flow_rule, &ports);
266 if (basic.key->ip_proto == IPPROTO_TCP)
267 flow_rule_match_tcp(flow_rule, &tcp);
268
269 if (basic.mask->n_proto != htons(0xFFFF) ||
270 (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
271 basic.mask->ip_proto != 0xFF ||
272 (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
273 basic.key->ip_proto != IPPROTO_GRE)) {
274 ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
275 ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
276 basic.key->ip_proto, basic.mask->ip_proto);
277 return false;
278 }
279
280 if (basic.key->ip_proto != IPPROTO_GRE &&
281 (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
282 ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)",
283 ports.mask->src, ports.mask->dst);
284 return false;
285 }
286
287 if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) {
288 ct_dbg("rule uses unexpected tcp match (flags 0x%02x)", tcp.mask->flags);
289 return false;
290 }
291
292 return true;
293 }
294
295 static struct mlx5_ct_fs_rule *
mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs * fs,struct mlx5_flow_spec * spec,struct mlx5_flow_attr * attr,struct flow_rule * flow_rule)296 mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
297 struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
298 {
299 struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
300 struct mlx5_ct_fs_smfs_matcher *smfs_matcher;
301 struct mlx5_ct_fs_smfs_rule *smfs_rule;
302 struct mlx5dr_action *actions[5];
303 struct mlx5dr_rule *rule;
304 int num_actions = 0, err;
305 bool nat, tcp, ipv4, gre;
306
307 if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule))
308 return ERR_PTR(-EOPNOTSUPP);
309
310 smfs_rule = kzalloc(sizeof(*smfs_rule), GFP_KERNEL);
311 if (!smfs_rule)
312 return ERR_PTR(-ENOMEM);
313
314 smfs_rule->count_action = mlx5_smfs_action_create_flow_counter(mlx5_fc_id(attr->counter));
315 if (!smfs_rule->count_action) {
316 err = -EINVAL;
317 goto err_count;
318 }
319
320 actions[num_actions++] = smfs_rule->count_action;
321 actions[num_actions++] = attr->modify_hdr->action.dr_action;
322 actions[num_actions++] = fs_smfs->fwd_action;
323
324 nat = (attr->ft == fs_smfs->ct_nat);
325 ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
326 tcp = MLX5_GET(fte_match_param, spec->match_value,
327 outer_headers.ip_protocol) == IPPROTO_TCP;
328 gre = MLX5_GET(fte_match_param, spec->match_value,
329 outer_headers.ip_protocol) == IPPROTO_GRE;
330
331 smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp, gre);
332 if (IS_ERR(smfs_matcher)) {
333 err = PTR_ERR(smfs_matcher);
334 goto err_matcher;
335 }
336
337 rule = mlx5_smfs_rule_create(smfs_matcher->dr_matcher, spec, num_actions, actions,
338 spec->flow_context.flow_source);
339 if (!rule) {
340 err = -EINVAL;
341 goto err_create;
342 }
343
344 smfs_rule->rule = rule;
345 smfs_rule->smfs_matcher = smfs_matcher;
346
347 return &smfs_rule->fs_rule;
348
349 err_create:
350 mlx5_ct_fs_smfs_matcher_put(fs, smfs_matcher);
351 err_matcher:
352 mlx5_smfs_action_destroy(smfs_rule->count_action);
353 err_count:
354 kfree(smfs_rule);
355 return ERR_PTR(err);
356 }
357
358 static void
mlx5_ct_fs_smfs_ct_rule_del(struct mlx5_ct_fs * fs,struct mlx5_ct_fs_rule * fs_rule)359 mlx5_ct_fs_smfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule)
360 {
361 struct mlx5_ct_fs_smfs_rule *smfs_rule = container_of(fs_rule,
362 struct mlx5_ct_fs_smfs_rule,
363 fs_rule);
364
365 mlx5_smfs_rule_destroy(smfs_rule->rule);
366 mlx5_ct_fs_smfs_matcher_put(fs, smfs_rule->smfs_matcher);
367 mlx5_smfs_action_destroy(smfs_rule->count_action);
368 kfree(smfs_rule);
369 }
370
371 static struct mlx5_ct_fs_ops fs_smfs_ops = {
372 .ct_rule_add = mlx5_ct_fs_smfs_ct_rule_add,
373 .ct_rule_del = mlx5_ct_fs_smfs_ct_rule_del,
374
375 .init = mlx5_ct_fs_smfs_init,
376 .destroy = mlx5_ct_fs_smfs_destroy,
377
378 .priv_size = sizeof(struct mlx5_ct_fs_smfs),
379 };
380
381 struct mlx5_ct_fs_ops *
mlx5_ct_fs_smfs_ops_get(void)382 mlx5_ct_fs_smfs_ops_get(void)
383 {
384 return &fs_smfs_ops;
385 }
386