1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/device.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 
37 #include "fs_core.h"
38 #include "fs_cmd.h"
39 #include "fs_ft_pool.h"
40 #include "mlx5_core.h"
41 #include "eswitch.h"
42 
43 static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
44 					struct mlx5_flow_table *ft,
45 					u32 underlay_qpn,
46 					bool disconnect)
47 {
48 	return 0;
49 }
50 
51 static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
52 					   struct mlx5_flow_table *ft,
53 					   unsigned int size,
54 					   struct mlx5_flow_table *next_ft)
55 {
56 	ft->max_fte = size ? roundup_pow_of_two(size) : 1;
57 
58 	return 0;
59 }
60 
61 static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
62 					    struct mlx5_flow_table *ft)
63 {
64 	return 0;
65 }
66 
67 static int mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace *ns,
68 					   struct mlx5_flow_table *ft,
69 					   struct mlx5_flow_table *next_ft)
70 {
71 	return 0;
72 }
73 
74 static int mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace *ns,
75 					   struct mlx5_flow_table *ft,
76 					   u32 *in,
77 					   struct mlx5_flow_group *fg)
78 {
79 	return 0;
80 }
81 
82 static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
83 					    struct mlx5_flow_table *ft,
84 					    struct mlx5_flow_group *fg)
85 {
86 	return 0;
87 }
88 
89 static int mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace *ns,
90 				    struct mlx5_flow_table *ft,
91 				    struct mlx5_flow_group *group,
92 				    struct fs_fte *fte)
93 {
94 	return 0;
95 }
96 
97 static int mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace *ns,
98 				    struct mlx5_flow_table *ft,
99 				    struct mlx5_flow_group *group,
100 				    int modify_mask,
101 				    struct fs_fte *fte)
102 {
103 	return -EOPNOTSUPP;
104 }
105 
106 static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
107 				    struct mlx5_flow_table *ft,
108 				    struct fs_fte *fte)
109 {
110 	return 0;
111 }
112 
113 static int mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
114 					       struct mlx5_pkt_reformat_params *params,
115 					       enum mlx5_flow_namespace_type namespace,
116 					       struct mlx5_pkt_reformat *pkt_reformat)
117 {
118 	return 0;
119 }
120 
121 static void mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
122 						  struct mlx5_pkt_reformat *pkt_reformat)
123 {
124 }
125 
126 static int mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
127 					     u8 namespace, u8 num_actions,
128 					     void *modify_actions,
129 					     struct mlx5_modify_hdr *modify_hdr)
130 {
131 	return 0;
132 }
133 
134 static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
135 						struct mlx5_modify_hdr *modify_hdr)
136 {
137 }
138 
139 static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
140 				  struct mlx5_flow_root_namespace *peer_ns)
141 {
142 	return 0;
143 }
144 
145 static int mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace *ns)
146 {
147 	return 0;
148 }
149 
150 static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns)
151 {
152 	return 0;
153 }
154 
155 static int mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev *master,
156 				       struct mlx5_core_dev *slave,
157 				       bool ft_id_valid,
158 				       u32 ft_id)
159 {
160 	u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
161 	u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
162 	struct mlx5_flow_root_namespace *root;
163 	struct mlx5_flow_namespace *ns;
164 
165 	MLX5_SET(set_flow_table_root_in, in, opcode,
166 		 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
167 	MLX5_SET(set_flow_table_root_in, in, table_type,
168 		 FS_FT_FDB);
169 	if (ft_id_valid) {
170 		MLX5_SET(set_flow_table_root_in, in,
171 			 table_eswitch_owner_vhca_id_valid, 1);
172 		MLX5_SET(set_flow_table_root_in, in,
173 			 table_eswitch_owner_vhca_id,
174 			 MLX5_CAP_GEN(master, vhca_id));
175 		MLX5_SET(set_flow_table_root_in, in, table_id,
176 			 ft_id);
177 	} else {
178 		ns = mlx5_get_flow_namespace(slave,
179 					     MLX5_FLOW_NAMESPACE_FDB);
180 		root = find_root(&ns->node);
181 		MLX5_SET(set_flow_table_root_in, in, table_id,
182 			 root->root_ft->id);
183 	}
184 
185 	return mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
186 }
187 
188 static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
189 				   struct mlx5_flow_table *ft, u32 underlay_qpn,
190 				   bool disconnect)
191 {
192 	u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
193 	struct mlx5_core_dev *dev = ns->dev;
194 	int err;
195 
196 	if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
197 	    underlay_qpn == 0)
198 		return 0;
199 
200 	if (ft->type == FS_FT_FDB &&
201 	    mlx5_lag_is_shared_fdb(dev) &&
202 	    !mlx5_lag_is_master(dev))
203 		return 0;
204 
205 	MLX5_SET(set_flow_table_root_in, in, opcode,
206 		 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
207 	MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
208 
209 	if (disconnect)
210 		MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
211 	else
212 		MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
213 
214 	MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
215 	MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
216 	MLX5_SET(set_flow_table_root_in, in, other_vport,
217 		 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
218 
219 	err = mlx5_cmd_exec_in(dev, set_flow_table_root, in);
220 	if (!err &&
221 	    ft->type == FS_FT_FDB &&
222 	    mlx5_lag_is_shared_fdb(dev) &&
223 	    mlx5_lag_is_master(dev)) {
224 		err = mlx5_cmd_set_slave_root_fdb(dev,
225 						  mlx5_lag_get_peer_mdev(dev),
226 						  !disconnect, (!disconnect) ?
227 						  ft->id : 0);
228 		if (err && !disconnect) {
229 			MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
230 			MLX5_SET(set_flow_table_root_in, in, table_id,
231 				 ns->root_ft->id);
232 			mlx5_cmd_exec_in(dev, set_flow_table_root, in);
233 		}
234 	}
235 
236 	return err;
237 }
238 
239 static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
240 				      struct mlx5_flow_table *ft,
241 				      unsigned int size,
242 				      struct mlx5_flow_table *next_ft)
243 {
244 	int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
245 	int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
246 	int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
247 	u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
248 	u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
249 	struct mlx5_core_dev *dev = ns->dev;
250 	int err;
251 
252 	if (size != POOL_NEXT_SIZE)
253 		size = roundup_pow_of_two(size);
254 	size = mlx5_ft_pool_get_avail_sz(dev, ft->type, size);
255 	if (!size)
256 		return -ENOSPC;
257 
258 	MLX5_SET(create_flow_table_in, in, opcode,
259 		 MLX5_CMD_OP_CREATE_FLOW_TABLE);
260 
261 	MLX5_SET(create_flow_table_in, in, table_type, ft->type);
262 	MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
263 	MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, size ? ilog2(size) : 0);
264 	MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
265 	MLX5_SET(create_flow_table_in, in, other_vport,
266 		 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
267 
268 	MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
269 		 en_decap);
270 	MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
271 		 en_encap);
272 	MLX5_SET(create_flow_table_in, in, flow_table_context.termination_table,
273 		 term);
274 
275 	switch (ft->op_mod) {
276 	case FS_FT_OP_MOD_NORMAL:
277 		if (next_ft) {
278 			MLX5_SET(create_flow_table_in, in,
279 				 flow_table_context.table_miss_action,
280 				 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
281 			MLX5_SET(create_flow_table_in, in,
282 				 flow_table_context.table_miss_id, next_ft->id);
283 		} else {
284 			MLX5_SET(create_flow_table_in, in,
285 				 flow_table_context.table_miss_action,
286 				 ft->def_miss_action);
287 		}
288 		break;
289 
290 	case FS_FT_OP_MOD_LAG_DEMUX:
291 		MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
292 		if (next_ft)
293 			MLX5_SET(create_flow_table_in, in,
294 				 flow_table_context.lag_master_next_table_id,
295 				 next_ft->id);
296 		break;
297 	}
298 
299 	err = mlx5_cmd_exec_inout(dev, create_flow_table, in, out);
300 	if (!err) {
301 		ft->id = MLX5_GET(create_flow_table_out, out,
302 				  table_id);
303 		ft->max_fte = size;
304 	} else {
305 		mlx5_ft_pool_put_sz(ns->dev, size);
306 	}
307 
308 	return err;
309 }
310 
311 static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
312 				       struct mlx5_flow_table *ft)
313 {
314 	u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
315 	struct mlx5_core_dev *dev = ns->dev;
316 	int err;
317 
318 	MLX5_SET(destroy_flow_table_in, in, opcode,
319 		 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
320 	MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
321 	MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
322 	MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
323 	MLX5_SET(destroy_flow_table_in, in, other_vport,
324 		 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
325 
326 	err = mlx5_cmd_exec_in(dev, destroy_flow_table, in);
327 	if (!err)
328 		mlx5_ft_pool_put_sz(ns->dev, ft->max_fte);
329 
330 	return err;
331 }
332 
333 static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
334 				      struct mlx5_flow_table *ft,
335 				      struct mlx5_flow_table *next_ft)
336 {
337 	u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {};
338 	struct mlx5_core_dev *dev = ns->dev;
339 
340 	MLX5_SET(modify_flow_table_in, in, opcode,
341 		 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
342 	MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
343 	MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
344 
345 	if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
346 		MLX5_SET(modify_flow_table_in, in, modify_field_select,
347 			 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
348 		if (next_ft) {
349 			MLX5_SET(modify_flow_table_in, in,
350 				 flow_table_context.lag_master_next_table_id, next_ft->id);
351 		} else {
352 			MLX5_SET(modify_flow_table_in, in,
353 				 flow_table_context.lag_master_next_table_id, 0);
354 		}
355 	} else {
356 		MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
357 		MLX5_SET(modify_flow_table_in, in, other_vport,
358 			 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
359 		MLX5_SET(modify_flow_table_in, in, modify_field_select,
360 			 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
361 		if (next_ft) {
362 			MLX5_SET(modify_flow_table_in, in,
363 				 flow_table_context.table_miss_action,
364 				 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
365 			MLX5_SET(modify_flow_table_in, in,
366 				 flow_table_context.table_miss_id,
367 				 next_ft->id);
368 		} else {
369 			MLX5_SET(modify_flow_table_in, in,
370 				 flow_table_context.table_miss_action,
371 				 ft->def_miss_action);
372 		}
373 	}
374 
375 	return mlx5_cmd_exec_in(dev, modify_flow_table, in);
376 }
377 
378 static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
379 				      struct mlx5_flow_table *ft,
380 				      u32 *in,
381 				      struct mlx5_flow_group *fg)
382 {
383 	u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
384 	struct mlx5_core_dev *dev = ns->dev;
385 	int err;
386 
387 	MLX5_SET(create_flow_group_in, in, opcode,
388 		 MLX5_CMD_OP_CREATE_FLOW_GROUP);
389 	MLX5_SET(create_flow_group_in, in, table_type, ft->type);
390 	MLX5_SET(create_flow_group_in, in, table_id, ft->id);
391 	if (ft->vport) {
392 		MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
393 		MLX5_SET(create_flow_group_in, in, other_vport, 1);
394 	}
395 
396 	MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
397 	MLX5_SET(create_flow_group_in, in, other_vport,
398 		 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
399 	err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out);
400 	if (!err)
401 		fg->id = MLX5_GET(create_flow_group_out, out,
402 				  group_id);
403 	return err;
404 }
405 
406 static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
407 				       struct mlx5_flow_table *ft,
408 				       struct mlx5_flow_group *fg)
409 {
410 	u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
411 	struct mlx5_core_dev *dev = ns->dev;
412 
413 	MLX5_SET(destroy_flow_group_in, in, opcode,
414 		 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
415 	MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
416 	MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
417 	MLX5_SET(destroy_flow_group_in, in, group_id, fg->id);
418 	MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
419 	MLX5_SET(destroy_flow_group_in, in, other_vport,
420 		 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
421 	return mlx5_cmd_exec_in(dev, destroy_flow_group, in);
422 }
423 
424 static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
425 				  struct fs_fte *fte, bool *extended_dest)
426 {
427 	int fw_log_max_fdb_encap_uplink =
428 		MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
429 	int num_fwd_destinations = 0;
430 	struct mlx5_flow_rule *dst;
431 	int num_encap = 0;
432 
433 	*extended_dest = false;
434 	if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
435 		return 0;
436 
437 	list_for_each_entry(dst, &fte->node.children, node.list) {
438 		if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
439 			continue;
440 		if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
441 		    dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
442 			num_encap++;
443 		num_fwd_destinations++;
444 	}
445 	if (num_fwd_destinations > 1 && num_encap > 0)
446 		*extended_dest = true;
447 
448 	if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
449 		mlx5_core_warn(dev, "FW does not support extended destination");
450 		return -EOPNOTSUPP;
451 	}
452 	if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
453 		mlx5_core_warn(dev, "FW does not support more than %d encaps",
454 			       1 << fw_log_max_fdb_encap_uplink);
455 		return -EOPNOTSUPP;
456 	}
457 
458 	return 0;
459 }
460 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
461 			    int opmod, int modify_mask,
462 			    struct mlx5_flow_table *ft,
463 			    unsigned group_id,
464 			    struct fs_fte *fte)
465 {
466 	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
467 	bool extended_dest = false;
468 	struct mlx5_flow_rule *dst;
469 	void *in_flow_context, *vlan;
470 	void *in_match_value;
471 	unsigned int inlen;
472 	int dst_cnt_size;
473 	void *in_dests;
474 	u32 *in;
475 	int err;
476 
477 	if (mlx5_set_extended_dest(dev, fte, &extended_dest))
478 		return -EOPNOTSUPP;
479 
480 	if (!extended_dest)
481 		dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
482 	else
483 		dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
484 
485 	inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
486 	in = kvzalloc(inlen, GFP_KERNEL);
487 	if (!in)
488 		return -ENOMEM;
489 
490 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
491 	MLX5_SET(set_fte_in, in, op_mod, opmod);
492 	MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
493 	MLX5_SET(set_fte_in, in, table_type, ft->type);
494 	MLX5_SET(set_fte_in, in, table_id,   ft->id);
495 	MLX5_SET(set_fte_in, in, flow_index, fte->index);
496 	MLX5_SET(set_fte_in, in, ignore_flow_level,
497 		 !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
498 
499 	MLX5_SET(set_fte_in, in, vport_number, ft->vport);
500 	MLX5_SET(set_fte_in, in, other_vport,
501 		 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
502 
503 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
504 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
505 
506 	MLX5_SET(flow_context, in_flow_context, flow_tag,
507 		 fte->flow_context.flow_tag);
508 	MLX5_SET(flow_context, in_flow_context, flow_source,
509 		 fte->flow_context.flow_source);
510 
511 	MLX5_SET(flow_context, in_flow_context, extended_destination,
512 		 extended_dest);
513 	if (extended_dest) {
514 		u32 action;
515 
516 		action = fte->action.action &
517 			~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
518 		MLX5_SET(flow_context, in_flow_context, action, action);
519 	} else {
520 		MLX5_SET(flow_context, in_flow_context, action,
521 			 fte->action.action);
522 		if (fte->action.pkt_reformat)
523 			MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
524 				 fte->action.pkt_reformat->id);
525 	}
526 	if (fte->action.modify_hdr)
527 		MLX5_SET(flow_context, in_flow_context, modify_header_id,
528 			 fte->action.modify_hdr->id);
529 
530 	MLX5_SET(flow_context, in_flow_context, ipsec_obj_id, fte->action.ipsec_obj_id);
531 
532 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
533 
534 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
535 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
536 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
537 
538 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
539 
540 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
541 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
542 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
543 
544 	in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
545 				      match_value);
546 	memcpy(in_match_value, &fte->val, sizeof(fte->val));
547 
548 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
549 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
550 		int list_size = 0;
551 
552 		list_for_each_entry(dst, &fte->node.children, node.list) {
553 			unsigned int id, type = dst->dest_attr.type;
554 
555 			if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
556 				continue;
557 
558 			switch (type) {
559 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
560 				id = dst->dest_attr.ft_num;
561 				type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
562 				break;
563 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
564 				id = dst->dest_attr.ft->id;
565 				break;
566 			case MLX5_FLOW_DESTINATION_TYPE_VPORT:
567 				id = dst->dest_attr.vport.num;
568 				MLX5_SET(dest_format_struct, in_dests,
569 					 destination_eswitch_owner_vhca_id_valid,
570 					 !!(dst->dest_attr.vport.flags &
571 					    MLX5_FLOW_DEST_VPORT_VHCA_ID));
572 				MLX5_SET(dest_format_struct, in_dests,
573 					 destination_eswitch_owner_vhca_id,
574 					 dst->dest_attr.vport.vhca_id);
575 				if (extended_dest &&
576 				    dst->dest_attr.vport.pkt_reformat) {
577 					MLX5_SET(dest_format_struct, in_dests,
578 						 packet_reformat,
579 						 !!(dst->dest_attr.vport.flags &
580 						    MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
581 					MLX5_SET(extended_dest_format, in_dests,
582 						 packet_reformat_id,
583 						 dst->dest_attr.vport.pkt_reformat->id);
584 				}
585 				break;
586 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
587 				id = dst->dest_attr.sampler_id;
588 				break;
589 			default:
590 				id = dst->dest_attr.tir_num;
591 			}
592 
593 			MLX5_SET(dest_format_struct, in_dests, destination_type,
594 				 type);
595 			MLX5_SET(dest_format_struct, in_dests, destination_id, id);
596 			in_dests += dst_cnt_size;
597 			list_size++;
598 		}
599 
600 		MLX5_SET(flow_context, in_flow_context, destination_list_size,
601 			 list_size);
602 	}
603 
604 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
605 		int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
606 					log_max_flow_counter,
607 					ft->type));
608 		int list_size = 0;
609 
610 		list_for_each_entry(dst, &fte->node.children, node.list) {
611 			if (dst->dest_attr.type !=
612 			    MLX5_FLOW_DESTINATION_TYPE_COUNTER)
613 				continue;
614 
615 			MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
616 				 dst->dest_attr.counter_id);
617 			in_dests += dst_cnt_size;
618 			list_size++;
619 		}
620 		if (list_size > max_list_size) {
621 			err = -EINVAL;
622 			goto err_out;
623 		}
624 
625 		MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
626 			 list_size);
627 	}
628 
629 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
630 err_out:
631 	kvfree(in);
632 	return err;
633 }
634 
635 static int mlx5_cmd_create_fte(struct mlx5_flow_root_namespace *ns,
636 			       struct mlx5_flow_table *ft,
637 			       struct mlx5_flow_group *group,
638 			       struct fs_fte *fte)
639 {
640 	struct mlx5_core_dev *dev = ns->dev;
641 	unsigned int group_id = group->id;
642 
643 	return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
644 }
645 
646 static int mlx5_cmd_update_fte(struct mlx5_flow_root_namespace *ns,
647 			       struct mlx5_flow_table *ft,
648 			       struct mlx5_flow_group *fg,
649 			       int modify_mask,
650 			       struct fs_fte *fte)
651 {
652 	int opmod;
653 	struct mlx5_core_dev *dev = ns->dev;
654 	int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
655 						flow_table_properties_nic_receive.
656 						flow_modify_en);
657 	if (!atomic_mod_cap)
658 		return -EOPNOTSUPP;
659 	opmod = 1;
660 
661 	return	mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, fg->id, fte);
662 }
663 
664 static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
665 			       struct mlx5_flow_table *ft,
666 			       struct fs_fte *fte)
667 {
668 	u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
669 	struct mlx5_core_dev *dev = ns->dev;
670 
671 	MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
672 	MLX5_SET(delete_fte_in, in, table_type, ft->type);
673 	MLX5_SET(delete_fte_in, in, table_id, ft->id);
674 	MLX5_SET(delete_fte_in, in, flow_index, fte->index);
675 	MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
676 	MLX5_SET(delete_fte_in, in, other_vport,
677 		 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
678 
679 	return mlx5_cmd_exec_in(dev, delete_fte, in);
680 }
681 
682 int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
683 			   enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
684 			   u32 *id)
685 {
686 	u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {};
687 	u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {};
688 	int err;
689 
690 	MLX5_SET(alloc_flow_counter_in, in, opcode,
691 		 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
692 	MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
693 
694 	err = mlx5_cmd_exec_inout(dev, alloc_flow_counter, in, out);
695 	if (!err)
696 		*id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
697 	return err;
698 }
699 
700 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
701 {
702 	return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
703 }
704 
705 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
706 {
707 	u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {};
708 
709 	MLX5_SET(dealloc_flow_counter_in, in, opcode,
710 		 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
711 	MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
712 	return mlx5_cmd_exec_in(dev, dealloc_flow_counter, in);
713 }
714 
715 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
716 		      u64 *packets, u64 *bytes)
717 {
718 	u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
719 		MLX5_ST_SZ_BYTES(traffic_counter)] = {};
720 	u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
721 	void *stats;
722 	int err = 0;
723 
724 	MLX5_SET(query_flow_counter_in, in, opcode,
725 		 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
726 	MLX5_SET(query_flow_counter_in, in, op_mod, 0);
727 	MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
728 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
729 	if (err)
730 		return err;
731 
732 	stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
733 	*packets = MLX5_GET64(traffic_counter, stats, packets);
734 	*bytes = MLX5_GET64(traffic_counter, stats, octets);
735 	return 0;
736 }
737 
738 int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
739 {
740 	return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
741 		MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
742 }
743 
744 int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
745 			   u32 *out)
746 {
747 	int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
748 	u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
749 
750 	MLX5_SET(query_flow_counter_in, in, opcode,
751 		 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
752 	MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
753 	MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
754 	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
755 }
756 
757 static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
758 					  struct mlx5_pkt_reformat_params *params,
759 					  enum mlx5_flow_namespace_type namespace,
760 					  struct mlx5_pkt_reformat *pkt_reformat)
761 {
762 	u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
763 	struct mlx5_core_dev *dev = ns->dev;
764 	void *packet_reformat_context_in;
765 	int max_encap_size;
766 	void *reformat;
767 	int inlen;
768 	int err;
769 	u32 *in;
770 
771 	if (namespace == MLX5_FLOW_NAMESPACE_FDB)
772 		max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
773 	else
774 		max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
775 
776 	if (params->size > max_encap_size) {
777 		mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
778 			       params->size, max_encap_size);
779 		return -EINVAL;
780 	}
781 
782 	in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) +
783 		     params->size, GFP_KERNEL);
784 	if (!in)
785 		return -ENOMEM;
786 
787 	packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
788 						  in, packet_reformat_context);
789 	reformat = MLX5_ADDR_OF(packet_reformat_context_in,
790 				packet_reformat_context_in,
791 				reformat_data);
792 	inlen = reformat - (void *)in + params->size;
793 
794 	MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
795 		 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
796 	MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
797 		 reformat_data_size, params->size);
798 	MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
799 		 reformat_type, params->type);
800 	MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
801 		 reformat_param_0, params->param_0);
802 	MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
803 		 reformat_param_1, params->param_1);
804 	if (params->data && params->size)
805 		memcpy(reformat, params->data, params->size);
806 
807 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
808 
809 	pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out,
810 				    out, packet_reformat_id);
811 	kfree(in);
812 	return err;
813 }
814 
815 static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
816 					     struct mlx5_pkt_reformat *pkt_reformat)
817 {
818 	u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
819 	struct mlx5_core_dev *dev = ns->dev;
820 
821 	MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
822 		 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
823 	MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
824 		 pkt_reformat->id);
825 
826 	mlx5_cmd_exec_in(dev, dealloc_packet_reformat_context, in);
827 }
828 
829 static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
830 					u8 namespace, u8 num_actions,
831 					void *modify_actions,
832 					struct mlx5_modify_hdr *modify_hdr)
833 {
834 	u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
835 	int max_actions, actions_size, inlen, err;
836 	struct mlx5_core_dev *dev = ns->dev;
837 	void *actions_in;
838 	u8 table_type;
839 	u32 *in;
840 
841 	switch (namespace) {
842 	case MLX5_FLOW_NAMESPACE_FDB:
843 		max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
844 		table_type = FS_FT_FDB;
845 		break;
846 	case MLX5_FLOW_NAMESPACE_KERNEL:
847 	case MLX5_FLOW_NAMESPACE_BYPASS:
848 		max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
849 		table_type = FS_FT_NIC_RX;
850 		break;
851 	case MLX5_FLOW_NAMESPACE_EGRESS:
852 #ifdef CONFIG_MLX5_IPSEC
853 	case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
854 #endif
855 		max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
856 		table_type = FS_FT_NIC_TX;
857 		break;
858 	case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
859 		max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions);
860 		table_type = FS_FT_ESW_INGRESS_ACL;
861 		break;
862 	case MLX5_FLOW_NAMESPACE_RDMA_TX:
863 		max_actions = MLX5_CAP_FLOWTABLE_RDMA_TX(dev, max_modify_header_actions);
864 		table_type = FS_FT_RDMA_TX;
865 		break;
866 	default:
867 		return -EOPNOTSUPP;
868 	}
869 
870 	if (num_actions > max_actions) {
871 		mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
872 			       num_actions, max_actions);
873 		return -EOPNOTSUPP;
874 	}
875 
876 	actions_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
877 	inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
878 
879 	in = kzalloc(inlen, GFP_KERNEL);
880 	if (!in)
881 		return -ENOMEM;
882 
883 	MLX5_SET(alloc_modify_header_context_in, in, opcode,
884 		 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
885 	MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
886 	MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
887 
888 	actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
889 	memcpy(actions_in, modify_actions, actions_size);
890 
891 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
892 
893 	modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
894 	kfree(in);
895 	return err;
896 }
897 
898 static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
899 					   struct mlx5_modify_hdr *modify_hdr)
900 {
901 	u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
902 	struct mlx5_core_dev *dev = ns->dev;
903 
904 	MLX5_SET(dealloc_modify_header_context_in, in, opcode,
905 		 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
906 	MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
907 		 modify_hdr->id);
908 
909 	mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in);
910 }
911 
912 static const struct mlx5_flow_cmds mlx5_flow_cmds = {
913 	.create_flow_table = mlx5_cmd_create_flow_table,
914 	.destroy_flow_table = mlx5_cmd_destroy_flow_table,
915 	.modify_flow_table = mlx5_cmd_modify_flow_table,
916 	.create_flow_group = mlx5_cmd_create_flow_group,
917 	.destroy_flow_group = mlx5_cmd_destroy_flow_group,
918 	.create_fte = mlx5_cmd_create_fte,
919 	.update_fte = mlx5_cmd_update_fte,
920 	.delete_fte = mlx5_cmd_delete_fte,
921 	.update_root_ft = mlx5_cmd_update_root_ft,
922 	.packet_reformat_alloc = mlx5_cmd_packet_reformat_alloc,
923 	.packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
924 	.modify_header_alloc = mlx5_cmd_modify_header_alloc,
925 	.modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
926 	.set_peer = mlx5_cmd_stub_set_peer,
927 	.create_ns = mlx5_cmd_stub_create_ns,
928 	.destroy_ns = mlx5_cmd_stub_destroy_ns,
929 };
930 
931 static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
932 	.create_flow_table = mlx5_cmd_stub_create_flow_table,
933 	.destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
934 	.modify_flow_table = mlx5_cmd_stub_modify_flow_table,
935 	.create_flow_group = mlx5_cmd_stub_create_flow_group,
936 	.destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
937 	.create_fte = mlx5_cmd_stub_create_fte,
938 	.update_fte = mlx5_cmd_stub_update_fte,
939 	.delete_fte = mlx5_cmd_stub_delete_fte,
940 	.update_root_ft = mlx5_cmd_stub_update_root_ft,
941 	.packet_reformat_alloc = mlx5_cmd_stub_packet_reformat_alloc,
942 	.packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
943 	.modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
944 	.modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
945 	.set_peer = mlx5_cmd_stub_set_peer,
946 	.create_ns = mlx5_cmd_stub_create_ns,
947 	.destroy_ns = mlx5_cmd_stub_destroy_ns,
948 };
949 
950 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
951 {
952 	return &mlx5_flow_cmds;
953 }
954 
955 static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
956 {
957 	return &mlx5_flow_cmd_stubs;
958 }
959 
960 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
961 {
962 	switch (type) {
963 	case FS_FT_NIC_RX:
964 	case FS_FT_ESW_EGRESS_ACL:
965 	case FS_FT_ESW_INGRESS_ACL:
966 	case FS_FT_FDB:
967 	case FS_FT_SNIFFER_RX:
968 	case FS_FT_SNIFFER_TX:
969 	case FS_FT_NIC_TX:
970 	case FS_FT_RDMA_RX:
971 	case FS_FT_RDMA_TX:
972 		return mlx5_fs_cmd_get_fw_cmds();
973 	default:
974 		return mlx5_fs_cmd_get_stub_cmds();
975 	}
976 }
977