1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3
4 #include <mlx5_core.h>
5 #include "en_accel/fs_tcp.h"
6 #include "fs_core.h"
7
8 enum accel_fs_tcp_type {
9 ACCEL_FS_IPV4_TCP,
10 ACCEL_FS_IPV6_TCP,
11 ACCEL_FS_TCP_NUM_TYPES,
12 };
13
14 struct mlx5e_accel_fs_tcp {
15 struct mlx5e_flow_table tables[ACCEL_FS_TCP_NUM_TYPES];
16 struct mlx5_flow_handle *default_rules[ACCEL_FS_TCP_NUM_TYPES];
17 };
18
fs_accel2tt(enum accel_fs_tcp_type i)19 static enum mlx5_traffic_types fs_accel2tt(enum accel_fs_tcp_type i)
20 {
21 switch (i) {
22 case ACCEL_FS_IPV4_TCP:
23 return MLX5_TT_IPV4_TCP;
24 default: /* ACCEL_FS_IPV6_TCP */
25 return MLX5_TT_IPV6_TCP;
26 }
27 }
28
accel_fs_tcp_set_ipv4_flow(struct mlx5_flow_spec * spec,struct sock * sk)29 static void accel_fs_tcp_set_ipv4_flow(struct mlx5_flow_spec *spec, struct sock *sk)
30 {
31 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
32 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_TCP);
33 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
34 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
35 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
36 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
37 &inet_sk(sk)->inet_daddr, 4);
38 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
39 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
40 &inet_sk(sk)->inet_rcv_saddr, 4);
41 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
42 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
43 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
44 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
45 }
46
47 #if IS_ENABLED(CONFIG_IPV6)
accel_fs_tcp_set_ipv6_flow(struct mlx5_flow_spec * spec,struct sock * sk)48 static void accel_fs_tcp_set_ipv6_flow(struct mlx5_flow_spec *spec, struct sock *sk)
49 {
50 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
51 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_TCP);
52 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
53 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
54 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
55 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
56 &sk->sk_v6_daddr, 16);
57 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
58 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
59 &inet6_sk(sk)->saddr, 16);
60 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
61 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
62 0xff, 16);
63 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
64 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
65 0xff, 16);
66 }
67 #endif
68
mlx5e_accel_fs_del_sk(struct mlx5_flow_handle * rule)69 void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule)
70 {
71 mlx5_del_flow_rules(rule);
72 }
73
mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering * fs,struct sock * sk,u32 tirn,uint32_t flow_tag)74 struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
75 struct sock *sk, u32 tirn,
76 uint32_t flow_tag)
77 {
78 struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
79 struct mlx5_flow_destination dest = {};
80 struct mlx5e_flow_table *ft = NULL;
81 MLX5_DECLARE_FLOW_ACT(flow_act);
82 struct mlx5_flow_handle *flow;
83 struct mlx5_flow_spec *spec;
84
85 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
86 if (!spec)
87 return ERR_PTR(-ENOMEM);
88
89 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
90
91 switch (sk->sk_family) {
92 case AF_INET:
93 accel_fs_tcp_set_ipv4_flow(spec, sk);
94 ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP];
95 fs_dbg(fs, "%s flow is %pI4:%d -> %pI4:%d\n", __func__,
96 &inet_sk(sk)->inet_rcv_saddr,
97 inet_sk(sk)->inet_sport,
98 &inet_sk(sk)->inet_daddr,
99 inet_sk(sk)->inet_dport);
100 break;
101 #if IS_ENABLED(CONFIG_IPV6)
102 case AF_INET6:
103 if (!ipv6_only_sock(sk) &&
104 ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
105 accel_fs_tcp_set_ipv4_flow(spec, sk);
106 ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP];
107 } else {
108 accel_fs_tcp_set_ipv6_flow(spec, sk);
109 ft = &fs_tcp->tables[ACCEL_FS_IPV6_TCP];
110 }
111 break;
112 #endif
113 default:
114 break;
115 }
116
117 if (!ft) {
118 flow = ERR_PTR(-EINVAL);
119 goto out;
120 }
121
122 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
123 outer_headers.tcp_dport);
124 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
125 outer_headers.tcp_sport);
126 MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport,
127 ntohs(inet_sk(sk)->inet_sport));
128 MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport,
129 ntohs(inet_sk(sk)->inet_dport));
130
131 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
132 dest.tir_num = tirn;
133 if (flow_tag != MLX5_FS_DEFAULT_FLOW_TAG) {
134 spec->flow_context.flow_tag = flow_tag;
135 spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
136 }
137
138 flow = mlx5_add_flow_rules(ft->t, spec, &flow_act, &dest, 1);
139
140 if (IS_ERR(flow))
141 fs_err(fs, "mlx5_add_flow_rules() failed, flow is %ld\n", PTR_ERR(flow));
142
143 out:
144 kvfree(spec);
145 return flow;
146 }
147
accel_fs_tcp_add_default_rule(struct mlx5e_flow_steering * fs,enum accel_fs_tcp_type type)148 static int accel_fs_tcp_add_default_rule(struct mlx5e_flow_steering *fs,
149 enum accel_fs_tcp_type type)
150 {
151 struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
152 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
153 struct mlx5e_flow_table *accel_fs_t;
154 struct mlx5_flow_destination dest;
155 MLX5_DECLARE_FLOW_ACT(flow_act);
156 struct mlx5_flow_handle *rule;
157 int err = 0;
158
159 accel_fs_t = &fs_tcp->tables[type];
160
161 dest = mlx5_ttc_get_default_dest(ttc, fs_accel2tt(type));
162 rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1);
163 if (IS_ERR(rule)) {
164 err = PTR_ERR(rule);
165 fs_err(fs, "%s: add default rule failed, accel_fs type=%d, err %d\n",
166 __func__, type, err);
167 return err;
168 }
169
170 fs_tcp->default_rules[type] = rule;
171 return 0;
172 }
173
174 #define MLX5E_ACCEL_FS_TCP_NUM_GROUPS (2)
175 #define MLX5E_ACCEL_FS_TCP_GROUP1_SIZE (BIT(16) - 1)
176 #define MLX5E_ACCEL_FS_TCP_GROUP2_SIZE (BIT(0))
177 #define MLX5E_ACCEL_FS_TCP_TABLE_SIZE (MLX5E_ACCEL_FS_TCP_GROUP1_SIZE +\
178 MLX5E_ACCEL_FS_TCP_GROUP2_SIZE)
accel_fs_tcp_create_groups(struct mlx5e_flow_table * ft,enum accel_fs_tcp_type type)179 static int accel_fs_tcp_create_groups(struct mlx5e_flow_table *ft,
180 enum accel_fs_tcp_type type)
181 {
182 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
183 void *outer_headers_c;
184 int ix = 0;
185 u32 *in;
186 int err;
187 u8 *mc;
188
189 ft->g = kcalloc(MLX5E_ACCEL_FS_TCP_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
190 in = kvzalloc(inlen, GFP_KERNEL);
191 if (!in || !ft->g) {
192 kfree(ft->g);
193 ft->g = NULL;
194 kvfree(in);
195 return -ENOMEM;
196 }
197
198 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
199 outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
200 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
201 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_version);
202
203 switch (type) {
204 case ACCEL_FS_IPV4_TCP:
205 case ACCEL_FS_IPV6_TCP:
206 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
207 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
208 break;
209 default:
210 err = -EINVAL;
211 goto out;
212 }
213
214 switch (type) {
215 case ACCEL_FS_IPV4_TCP:
216 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
217 src_ipv4_src_ipv6.ipv4_layout.ipv4);
218 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
219 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
220 break;
221 case ACCEL_FS_IPV6_TCP:
222 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
223 src_ipv4_src_ipv6.ipv6_layout.ipv6),
224 0xff, 16);
225 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
226 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
227 0xff, 16);
228 break;
229 default:
230 err = -EINVAL;
231 goto out;
232 }
233
234 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
235 MLX5_SET_CFG(in, start_flow_index, ix);
236 ix += MLX5E_ACCEL_FS_TCP_GROUP1_SIZE;
237 MLX5_SET_CFG(in, end_flow_index, ix - 1);
238 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
239 if (IS_ERR(ft->g[ft->num_groups]))
240 goto err;
241 ft->num_groups++;
242
243 /* Default Flow Group */
244 memset(in, 0, inlen);
245 MLX5_SET_CFG(in, start_flow_index, ix);
246 ix += MLX5E_ACCEL_FS_TCP_GROUP2_SIZE;
247 MLX5_SET_CFG(in, end_flow_index, ix - 1);
248 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
249 if (IS_ERR(ft->g[ft->num_groups]))
250 goto err;
251 ft->num_groups++;
252
253 kvfree(in);
254 return 0;
255
256 err:
257 err = PTR_ERR(ft->g[ft->num_groups]);
258 ft->g[ft->num_groups] = NULL;
259 out:
260 kvfree(in);
261
262 return err;
263 }
264
accel_fs_tcp_create_table(struct mlx5e_flow_steering * fs,enum accel_fs_tcp_type type)265 static int accel_fs_tcp_create_table(struct mlx5e_flow_steering *fs, enum accel_fs_tcp_type type)
266 {
267 struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs);
268 struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
269 struct mlx5e_flow_table *ft = &accel_tcp->tables[type];
270 struct mlx5_flow_table_attr ft_attr = {};
271 int err;
272
273 ft->num_groups = 0;
274
275 ft_attr.max_fte = MLX5E_ACCEL_FS_TCP_TABLE_SIZE;
276 ft_attr.level = MLX5E_ACCEL_FS_TCP_FT_LEVEL;
277 ft_attr.prio = MLX5E_NIC_PRIO;
278
279 ft->t = mlx5_create_flow_table(ns, &ft_attr);
280 if (IS_ERR(ft->t)) {
281 err = PTR_ERR(ft->t);
282 ft->t = NULL;
283 return err;
284 }
285
286 fs_dbg(fs, "Created fs accel table id %u level %u\n",
287 ft->t->id, ft->t->level);
288
289 err = accel_fs_tcp_create_groups(ft, type);
290 if (err)
291 goto err;
292
293 err = accel_fs_tcp_add_default_rule(fs, type);
294 if (err)
295 goto err;
296
297 return 0;
298 err:
299 mlx5e_destroy_flow_table(ft);
300 return err;
301 }
302
accel_fs_tcp_disable(struct mlx5e_flow_steering * fs)303 static int accel_fs_tcp_disable(struct mlx5e_flow_steering *fs)
304 {
305 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
306 int err, i;
307
308 for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
309 /* Modify ttc rules destination to point back to the indir TIRs */
310 err = mlx5_ttc_fwd_default_dest(ttc, fs_accel2tt(i));
311 if (err) {
312 fs_err(fs,
313 "%s: modify ttc[%d] default destination failed, err(%d)\n",
314 __func__, fs_accel2tt(i), err);
315 return err;
316 }
317 }
318
319 return 0;
320 }
321
accel_fs_tcp_enable(struct mlx5e_flow_steering * fs)322 static int accel_fs_tcp_enable(struct mlx5e_flow_steering *fs)
323 {
324 struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs);
325 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
326 struct mlx5_flow_destination dest = {};
327 int err, i;
328
329 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
330 for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
331 dest.ft = accel_tcp->tables[i].t;
332
333 /* Modify ttc rules destination to point on the accel_fs FTs */
334 err = mlx5_ttc_fwd_dest(ttc, fs_accel2tt(i), &dest);
335 if (err) {
336 fs_err(fs, "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
337 __func__, fs_accel2tt(i), err);
338 return err;
339 }
340 }
341 return 0;
342 }
343
accel_fs_tcp_destroy_table(struct mlx5e_flow_steering * fs,int i)344 static void accel_fs_tcp_destroy_table(struct mlx5e_flow_steering *fs, int i)
345 {
346 struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
347
348 if (IS_ERR_OR_NULL(fs_tcp->tables[i].t))
349 return;
350
351 mlx5_del_flow_rules(fs_tcp->default_rules[i]);
352 mlx5e_destroy_flow_table(&fs_tcp->tables[i]);
353 fs_tcp->tables[i].t = NULL;
354 }
355
mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering * fs)356 void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs)
357 {
358 struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs);
359 int i;
360
361 if (!accel_tcp)
362 return;
363
364 accel_fs_tcp_disable(fs);
365
366 for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
367 accel_fs_tcp_destroy_table(fs, i);
368
369 kfree(accel_tcp);
370 mlx5e_fs_set_accel_tcp(fs, NULL);
371 }
372
mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering * fs)373 int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs)
374 {
375 struct mlx5e_accel_fs_tcp *accel_tcp;
376 int i, err;
377
378 if (!MLX5_CAP_FLOWTABLE_NIC_RX(mlx5e_fs_get_mdev(fs), ft_field_support.outer_ip_version))
379 return -EOPNOTSUPP;
380
381 accel_tcp = kzalloc(sizeof(*accel_tcp), GFP_KERNEL);
382 if (!accel_tcp)
383 return -ENOMEM;
384 mlx5e_fs_set_accel_tcp(fs, accel_tcp);
385
386 for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
387 err = accel_fs_tcp_create_table(fs, i);
388 if (err)
389 goto err_destroy_tables;
390 }
391
392 err = accel_fs_tcp_enable(fs);
393 if (err)
394 goto err_destroy_tables;
395
396 return 0;
397
398 err_destroy_tables:
399 while (--i >= 0)
400 accel_fs_tcp_destroy_table(fs, i);
401 kfree(accel_tcp);
402 mlx5e_fs_set_accel_tcp(fs, NULL);
403 return err;
404 }
405