1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/device.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 
37 #include "fs_core.h"
38 #include "fs_cmd.h"
39 #include "mlx5_core.h"
40 #include "eswitch.h"
41 
42 static int mlx5_cmd_stub_update_root_ft(struct mlx5_core_dev *dev,
43 					struct mlx5_flow_table *ft,
44 					u32 underlay_qpn,
45 					bool disconnect)
46 {
47 	return 0;
48 }
49 
50 static int mlx5_cmd_stub_create_flow_table(struct mlx5_core_dev *dev,
51 					   u16 vport,
52 					   enum fs_flow_table_op_mod op_mod,
53 					   enum fs_flow_table_type type,
54 					   unsigned int level,
55 					   unsigned int log_size,
56 					   struct mlx5_flow_table *next_ft,
57 					   unsigned int *table_id, u32 flags)
58 {
59 	return 0;
60 }
61 
62 static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_core_dev *dev,
63 					    struct mlx5_flow_table *ft)
64 {
65 	return 0;
66 }
67 
68 static int mlx5_cmd_stub_modify_flow_table(struct mlx5_core_dev *dev,
69 					   struct mlx5_flow_table *ft,
70 					   struct mlx5_flow_table *next_ft)
71 {
72 	return 0;
73 }
74 
75 static int mlx5_cmd_stub_create_flow_group(struct mlx5_core_dev *dev,
76 					   struct mlx5_flow_table *ft,
77 					   u32 *in,
78 					   unsigned int *group_id)
79 {
80 	return 0;
81 }
82 
83 static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_core_dev *dev,
84 					    struct mlx5_flow_table *ft,
85 					    unsigned int group_id)
86 {
87 	return 0;
88 }
89 
90 static int mlx5_cmd_stub_create_fte(struct mlx5_core_dev *dev,
91 				    struct mlx5_flow_table *ft,
92 				    struct mlx5_flow_group *group,
93 				    struct fs_fte *fte)
94 {
95 	return 0;
96 }
97 
98 static int mlx5_cmd_stub_update_fte(struct mlx5_core_dev *dev,
99 				    struct mlx5_flow_table *ft,
100 				    unsigned int group_id,
101 				    int modify_mask,
102 				    struct fs_fte *fte)
103 {
104 	return -EOPNOTSUPP;
105 }
106 
107 static int mlx5_cmd_stub_delete_fte(struct mlx5_core_dev *dev,
108 				    struct mlx5_flow_table *ft,
109 				    struct fs_fte *fte)
110 {
111 	return 0;
112 }
113 
114 static int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
115 				   struct mlx5_flow_table *ft, u32 underlay_qpn,
116 				   bool disconnect)
117 {
118 	u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)]   = {0};
119 	u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
120 
121 	if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
122 	    underlay_qpn == 0)
123 		return 0;
124 
125 	MLX5_SET(set_flow_table_root_in, in, opcode,
126 		 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
127 	MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
128 
129 	if (disconnect) {
130 		MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
131 		MLX5_SET(set_flow_table_root_in, in, table_id, 0);
132 	} else {
133 		MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
134 		MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
135 	}
136 
137 	MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
138 	if (ft->vport) {
139 		MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
140 		MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
141 	}
142 
143 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
144 }
145 
146 static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
147 				      u16 vport,
148 				      enum fs_flow_table_op_mod op_mod,
149 				      enum fs_flow_table_type type,
150 				      unsigned int level,
151 				      unsigned int log_size,
152 				      struct mlx5_flow_table *next_ft,
153 				      unsigned int *table_id, u32 flags)
154 {
155 	int en_encap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
156 	int en_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
157 	u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
158 	u32 in[MLX5_ST_SZ_DW(create_flow_table_in)]   = {0};
159 	int err;
160 
161 	MLX5_SET(create_flow_table_in, in, opcode,
162 		 MLX5_CMD_OP_CREATE_FLOW_TABLE);
163 
164 	MLX5_SET(create_flow_table_in, in, table_type, type);
165 	MLX5_SET(create_flow_table_in, in, flow_table_context.level, level);
166 	MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
167 	if (vport) {
168 		MLX5_SET(create_flow_table_in, in, vport_number, vport);
169 		MLX5_SET(create_flow_table_in, in, other_vport, 1);
170 	}
171 
172 	MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
173 		 en_decap);
174 	MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
175 		 en_encap);
176 
177 	switch (op_mod) {
178 	case FS_FT_OP_MOD_NORMAL:
179 		if (next_ft) {
180 			MLX5_SET(create_flow_table_in, in,
181 				 flow_table_context.table_miss_action, 1);
182 			MLX5_SET(create_flow_table_in, in,
183 				 flow_table_context.table_miss_id, next_ft->id);
184 		}
185 		break;
186 
187 	case FS_FT_OP_MOD_LAG_DEMUX:
188 		MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
189 		if (next_ft)
190 			MLX5_SET(create_flow_table_in, in,
191 				 flow_table_context.lag_master_next_table_id,
192 				 next_ft->id);
193 		break;
194 	}
195 
196 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
197 	if (!err)
198 		*table_id = MLX5_GET(create_flow_table_out, out,
199 				     table_id);
200 	return err;
201 }
202 
203 static int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
204 				       struct mlx5_flow_table *ft)
205 {
206 	u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)]   = {0};
207 	u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
208 
209 	MLX5_SET(destroy_flow_table_in, in, opcode,
210 		 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
211 	MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
212 	MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
213 	if (ft->vport) {
214 		MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
215 		MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
216 	}
217 
218 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
219 }
220 
221 static int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
222 				      struct mlx5_flow_table *ft,
223 				      struct mlx5_flow_table *next_ft)
224 {
225 	u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)]   = {0};
226 	u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
227 
228 	MLX5_SET(modify_flow_table_in, in, opcode,
229 		 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
230 	MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
231 	MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
232 
233 	if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
234 		MLX5_SET(modify_flow_table_in, in, modify_field_select,
235 			 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
236 		if (next_ft) {
237 			MLX5_SET(modify_flow_table_in, in,
238 				 flow_table_context.lag_master_next_table_id, next_ft->id);
239 		} else {
240 			MLX5_SET(modify_flow_table_in, in,
241 				 flow_table_context.lag_master_next_table_id, 0);
242 		}
243 	} else {
244 		if (ft->vport) {
245 			MLX5_SET(modify_flow_table_in, in, vport_number,
246 				 ft->vport);
247 			MLX5_SET(modify_flow_table_in, in, other_vport, 1);
248 		}
249 		MLX5_SET(modify_flow_table_in, in, modify_field_select,
250 			 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
251 		if (next_ft) {
252 			MLX5_SET(modify_flow_table_in, in,
253 				 flow_table_context.table_miss_action, 1);
254 			MLX5_SET(modify_flow_table_in, in,
255 				 flow_table_context.table_miss_id,
256 				 next_ft->id);
257 		} else {
258 			MLX5_SET(modify_flow_table_in, in,
259 				 flow_table_context.table_miss_action, 0);
260 		}
261 	}
262 
263 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
264 }
265 
266 static int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
267 				      struct mlx5_flow_table *ft,
268 				      u32 *in,
269 				      unsigned int *group_id)
270 {
271 	u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
272 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
273 	int err;
274 
275 	MLX5_SET(create_flow_group_in, in, opcode,
276 		 MLX5_CMD_OP_CREATE_FLOW_GROUP);
277 	MLX5_SET(create_flow_group_in, in, table_type, ft->type);
278 	MLX5_SET(create_flow_group_in, in, table_id, ft->id);
279 	if (ft->vport) {
280 		MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
281 		MLX5_SET(create_flow_group_in, in, other_vport, 1);
282 	}
283 
284 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
285 	if (!err)
286 		*group_id = MLX5_GET(create_flow_group_out, out,
287 				     group_id);
288 	return err;
289 }
290 
291 static int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
292 				       struct mlx5_flow_table *ft,
293 				       unsigned int group_id)
294 {
295 	u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
296 	u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)]   = {0};
297 
298 	MLX5_SET(destroy_flow_group_in, in, opcode,
299 		 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
300 	MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
301 	MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
302 	MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
303 	if (ft->vport) {
304 		MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
305 		MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
306 	}
307 
308 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
309 }
310 
311 static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
312 				  struct fs_fte *fte, bool *extended_dest)
313 {
314 	int fw_log_max_fdb_encap_uplink =
315 		MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
316 	int num_fwd_destinations = 0;
317 	struct mlx5_flow_rule *dst;
318 	int num_encap = 0;
319 
320 	*extended_dest = false;
321 	if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
322 		return 0;
323 
324 	list_for_each_entry(dst, &fte->node.children, node.list) {
325 		if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
326 			continue;
327 		if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
328 		    dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
329 			num_encap++;
330 		num_fwd_destinations++;
331 	}
332 	if (num_fwd_destinations > 1 && num_encap > 0)
333 		*extended_dest = true;
334 
335 	if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
336 		mlx5_core_warn(dev, "FW does not support extended destination");
337 		return -EOPNOTSUPP;
338 	}
339 	if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
340 		mlx5_core_warn(dev, "FW does not support more than %d encaps",
341 			       1 << fw_log_max_fdb_encap_uplink);
342 		return -EOPNOTSUPP;
343 	}
344 
345 	return 0;
346 }
347 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
348 			    int opmod, int modify_mask,
349 			    struct mlx5_flow_table *ft,
350 			    unsigned group_id,
351 			    struct fs_fte *fte)
352 {
353 	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
354 	bool extended_dest = false;
355 	struct mlx5_flow_rule *dst;
356 	void *in_flow_context, *vlan;
357 	void *in_match_value;
358 	unsigned int inlen;
359 	int dst_cnt_size;
360 	void *in_dests;
361 	u32 *in;
362 	int err;
363 
364 	if (mlx5_set_extended_dest(dev, fte, &extended_dest))
365 		return -EOPNOTSUPP;
366 
367 	if (!extended_dest)
368 		dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
369 	else
370 		dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
371 
372 	inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
373 	in = kvzalloc(inlen, GFP_KERNEL);
374 	if (!in)
375 		return -ENOMEM;
376 
377 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
378 	MLX5_SET(set_fte_in, in, op_mod, opmod);
379 	MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
380 	MLX5_SET(set_fte_in, in, table_type, ft->type);
381 	MLX5_SET(set_fte_in, in, table_id,   ft->id);
382 	MLX5_SET(set_fte_in, in, flow_index, fte->index);
383 	if (ft->vport) {
384 		MLX5_SET(set_fte_in, in, vport_number, ft->vport);
385 		MLX5_SET(set_fte_in, in, other_vport, 1);
386 	}
387 
388 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
389 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
390 
391 	MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
392 	MLX5_SET(flow_context, in_flow_context, extended_destination,
393 		 extended_dest);
394 	if (extended_dest) {
395 		u32 action;
396 
397 		action = fte->action.action &
398 			~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
399 		MLX5_SET(flow_context, in_flow_context, action, action);
400 	} else {
401 		MLX5_SET(flow_context, in_flow_context, action,
402 			 fte->action.action);
403 		MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
404 			 fte->action.reformat_id);
405 	}
406 	MLX5_SET(flow_context, in_flow_context, modify_header_id,
407 		 fte->action.modify_id);
408 
409 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
410 
411 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
412 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
413 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
414 
415 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
416 
417 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
418 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
419 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
420 
421 	in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
422 				      match_value);
423 	memcpy(in_match_value, &fte->val, sizeof(fte->val));
424 
425 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
426 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
427 		int list_size = 0;
428 
429 		list_for_each_entry(dst, &fte->node.children, node.list) {
430 			unsigned int id, type = dst->dest_attr.type;
431 
432 			if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
433 				continue;
434 
435 			switch (type) {
436 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
437 				id = dst->dest_attr.ft_num;
438 				type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
439 				break;
440 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
441 				id = dst->dest_attr.ft->id;
442 				break;
443 			case MLX5_FLOW_DESTINATION_TYPE_VPORT:
444 				id = dst->dest_attr.vport.num;
445 				MLX5_SET(dest_format_struct, in_dests,
446 					 destination_eswitch_owner_vhca_id_valid,
447 					 !!(dst->dest_attr.vport.flags &
448 					    MLX5_FLOW_DEST_VPORT_VHCA_ID));
449 				MLX5_SET(dest_format_struct, in_dests,
450 					 destination_eswitch_owner_vhca_id,
451 					 dst->dest_attr.vport.vhca_id);
452 				if (extended_dest) {
453 					MLX5_SET(dest_format_struct, in_dests,
454 						 packet_reformat,
455 						 !!(dst->dest_attr.vport.flags &
456 						    MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
457 					MLX5_SET(extended_dest_format, in_dests,
458 						 packet_reformat_id,
459 						 dst->dest_attr.vport.reformat_id);
460 				}
461 				break;
462 			default:
463 				id = dst->dest_attr.tir_num;
464 			}
465 
466 			MLX5_SET(dest_format_struct, in_dests, destination_type,
467 				 type);
468 			MLX5_SET(dest_format_struct, in_dests, destination_id, id);
469 			in_dests += dst_cnt_size;
470 			list_size++;
471 		}
472 
473 		MLX5_SET(flow_context, in_flow_context, destination_list_size,
474 			 list_size);
475 	}
476 
477 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
478 		int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
479 					log_max_flow_counter,
480 					ft->type));
481 		int list_size = 0;
482 
483 		list_for_each_entry(dst, &fte->node.children, node.list) {
484 			if (dst->dest_attr.type !=
485 			    MLX5_FLOW_DESTINATION_TYPE_COUNTER)
486 				continue;
487 
488 			MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
489 				 dst->dest_attr.counter_id);
490 			in_dests += dst_cnt_size;
491 			list_size++;
492 		}
493 		if (list_size > max_list_size) {
494 			err = -EINVAL;
495 			goto err_out;
496 		}
497 
498 		MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
499 			 list_size);
500 	}
501 
502 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
503 err_out:
504 	kvfree(in);
505 	return err;
506 }
507 
508 static int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
509 			       struct mlx5_flow_table *ft,
510 			       struct mlx5_flow_group *group,
511 			       struct fs_fte *fte)
512 {
513 	unsigned int group_id = group->id;
514 
515 	return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
516 }
517 
518 static int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
519 			       struct mlx5_flow_table *ft,
520 			       unsigned int group_id,
521 			       int modify_mask,
522 			       struct fs_fte *fte)
523 {
524 	int opmod;
525 	int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
526 						flow_table_properties_nic_receive.
527 						flow_modify_en);
528 	if (!atomic_mod_cap)
529 		return -EOPNOTSUPP;
530 	opmod = 1;
531 
532 	return	mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
533 }
534 
535 static int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
536 			       struct mlx5_flow_table *ft,
537 			       struct fs_fte *fte)
538 {
539 	u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
540 	u32 in[MLX5_ST_SZ_DW(delete_fte_in)]   = {0};
541 
542 	MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
543 	MLX5_SET(delete_fte_in, in, table_type, ft->type);
544 	MLX5_SET(delete_fte_in, in, table_id, ft->id);
545 	MLX5_SET(delete_fte_in, in, flow_index, fte->index);
546 	if (ft->vport) {
547 		MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
548 		MLX5_SET(delete_fte_in, in, other_vport, 1);
549 	}
550 
551 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
552 }
553 
554 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
555 {
556 	u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)]   = {0};
557 	u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
558 	int err;
559 
560 	MLX5_SET(alloc_flow_counter_in, in, opcode,
561 		 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
562 
563 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
564 	if (!err)
565 		*id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
566 	return err;
567 }
568 
569 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
570 {
571 	u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)]   = {0};
572 	u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
573 
574 	MLX5_SET(dealloc_flow_counter_in, in, opcode,
575 		 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
576 	MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
577 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
578 }
579 
580 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
581 		      u64 *packets, u64 *bytes)
582 {
583 	u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
584 		MLX5_ST_SZ_BYTES(traffic_counter)]   = {0};
585 	u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
586 	void *stats;
587 	int err = 0;
588 
589 	MLX5_SET(query_flow_counter_in, in, opcode,
590 		 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
591 	MLX5_SET(query_flow_counter_in, in, op_mod, 0);
592 	MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
593 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
594 	if (err)
595 		return err;
596 
597 	stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
598 	*packets = MLX5_GET64(traffic_counter, stats, packets);
599 	*bytes = MLX5_GET64(traffic_counter, stats, octets);
600 	return 0;
601 }
602 
603 struct mlx5_cmd_fc_bulk {
604 	u32 id;
605 	int num;
606 	int outlen;
607 	u32 out[0];
608 };
609 
610 struct mlx5_cmd_fc_bulk *
611 mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num)
612 {
613 	struct mlx5_cmd_fc_bulk *b;
614 	int outlen =
615 		MLX5_ST_SZ_BYTES(query_flow_counter_out) +
616 		MLX5_ST_SZ_BYTES(traffic_counter) * num;
617 
618 	b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
619 	if (!b)
620 		return NULL;
621 
622 	b->id = id;
623 	b->num = num;
624 	b->outlen = outlen;
625 
626 	return b;
627 }
628 
629 void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
630 {
631 	kfree(b);
632 }
633 
634 int
635 mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
636 {
637 	u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
638 
639 	MLX5_SET(query_flow_counter_in, in, opcode,
640 		 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
641 	MLX5_SET(query_flow_counter_in, in, op_mod, 0);
642 	MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
643 	MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
644 	return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
645 }
646 
647 void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
648 			  struct mlx5_cmd_fc_bulk *b, u32 id,
649 			  u64 *packets, u64 *bytes)
650 {
651 	int index = id - b->id;
652 	void *stats;
653 
654 	if (index < 0 || index >= b->num) {
655 		mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
656 			       id, b->id, b->id + b->num - 1);
657 		return;
658 	}
659 
660 	stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
661 			     flow_statistics[index]);
662 	*packets = MLX5_GET64(traffic_counter, stats, packets);
663 	*bytes = MLX5_GET64(traffic_counter, stats, octets);
664 }
665 
666 int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
667 			       int reformat_type,
668 			       size_t size,
669 			       void *reformat_data,
670 			       enum mlx5_flow_namespace_type namespace,
671 			       u32 *packet_reformat_id)
672 {
673 	u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)];
674 	void *packet_reformat_context_in;
675 	int max_encap_size;
676 	void *reformat;
677 	int inlen;
678 	int err;
679 	u32 *in;
680 
681 	if (namespace == MLX5_FLOW_NAMESPACE_FDB)
682 		max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
683 	else
684 		max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
685 
686 	if (size > max_encap_size) {
687 		mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
688 			       size, max_encap_size);
689 		return -EINVAL;
690 	}
691 
692 	in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) + size,
693 		     GFP_KERNEL);
694 	if (!in)
695 		return -ENOMEM;
696 
697 	packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
698 						  in, packet_reformat_context);
699 	reformat = MLX5_ADDR_OF(packet_reformat_context_in,
700 				packet_reformat_context_in,
701 				reformat_data);
702 	inlen = reformat - (void *)in  + size;
703 
704 	memset(in, 0, inlen);
705 	MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
706 		 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
707 	MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
708 		 reformat_data_size, size);
709 	MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
710 		 reformat_type, reformat_type);
711 	memcpy(reformat, reformat_data, size);
712 
713 	memset(out, 0, sizeof(out));
714 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
715 
716 	*packet_reformat_id = MLX5_GET(alloc_packet_reformat_context_out,
717 				       out, packet_reformat_id);
718 	kfree(in);
719 	return err;
720 }
721 EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
722 
723 void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
724 				  u32 packet_reformat_id)
725 {
726 	u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)];
727 	u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)];
728 
729 	memset(in, 0, sizeof(in));
730 	MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
731 		 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
732 	MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
733 		 packet_reformat_id);
734 
735 	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
736 }
737 EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
738 
739 int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
740 			     u8 namespace, u8 num_actions,
741 			     void *modify_actions, u32 *modify_header_id)
742 {
743 	u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)];
744 	int max_actions, actions_size, inlen, err;
745 	void *actions_in;
746 	u8 table_type;
747 	u32 *in;
748 
749 	switch (namespace) {
750 	case MLX5_FLOW_NAMESPACE_FDB:
751 		max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
752 		table_type = FS_FT_FDB;
753 		break;
754 	case MLX5_FLOW_NAMESPACE_KERNEL:
755 	case MLX5_FLOW_NAMESPACE_BYPASS:
756 		max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
757 		table_type = FS_FT_NIC_RX;
758 		break;
759 	case MLX5_FLOW_NAMESPACE_EGRESS:
760 		max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
761 		table_type = FS_FT_NIC_TX;
762 		break;
763 	default:
764 		return -EOPNOTSUPP;
765 	}
766 
767 	if (num_actions > max_actions) {
768 		mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
769 			       num_actions, max_actions);
770 		return -EOPNOTSUPP;
771 	}
772 
773 	actions_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * num_actions;
774 	inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
775 
776 	in = kzalloc(inlen, GFP_KERNEL);
777 	if (!in)
778 		return -ENOMEM;
779 
780 	MLX5_SET(alloc_modify_header_context_in, in, opcode,
781 		 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
782 	MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
783 	MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
784 
785 	actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
786 	memcpy(actions_in, modify_actions, actions_size);
787 
788 	memset(out, 0, sizeof(out));
789 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
790 
791 	*modify_header_id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
792 	kfree(in);
793 	return err;
794 }
795 EXPORT_SYMBOL(mlx5_modify_header_alloc);
796 
797 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
798 {
799 	u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)];
800 	u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)];
801 
802 	memset(in, 0, sizeof(in));
803 	MLX5_SET(dealloc_modify_header_context_in, in, opcode,
804 		 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
805 	MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
806 		 modify_header_id);
807 
808 	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
809 }
810 EXPORT_SYMBOL(mlx5_modify_header_dealloc);
811 
812 static const struct mlx5_flow_cmds mlx5_flow_cmds = {
813 	.create_flow_table = mlx5_cmd_create_flow_table,
814 	.destroy_flow_table = mlx5_cmd_destroy_flow_table,
815 	.modify_flow_table = mlx5_cmd_modify_flow_table,
816 	.create_flow_group = mlx5_cmd_create_flow_group,
817 	.destroy_flow_group = mlx5_cmd_destroy_flow_group,
818 	.create_fte = mlx5_cmd_create_fte,
819 	.update_fte = mlx5_cmd_update_fte,
820 	.delete_fte = mlx5_cmd_delete_fte,
821 	.update_root_ft = mlx5_cmd_update_root_ft,
822 };
823 
824 static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
825 	.create_flow_table = mlx5_cmd_stub_create_flow_table,
826 	.destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
827 	.modify_flow_table = mlx5_cmd_stub_modify_flow_table,
828 	.create_flow_group = mlx5_cmd_stub_create_flow_group,
829 	.destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
830 	.create_fte = mlx5_cmd_stub_create_fte,
831 	.update_fte = mlx5_cmd_stub_update_fte,
832 	.delete_fte = mlx5_cmd_stub_delete_fte,
833 	.update_root_ft = mlx5_cmd_stub_update_root_ft,
834 };
835 
836 static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
837 {
838 	return &mlx5_flow_cmds;
839 }
840 
841 static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
842 {
843 	return &mlx5_flow_cmd_stubs;
844 }
845 
846 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
847 {
848 	switch (type) {
849 	case FS_FT_NIC_RX:
850 	case FS_FT_ESW_EGRESS_ACL:
851 	case FS_FT_ESW_INGRESS_ACL:
852 	case FS_FT_FDB:
853 	case FS_FT_SNIFFER_RX:
854 	case FS_FT_SNIFFER_TX:
855 	case FS_FT_NIC_TX:
856 		return mlx5_fs_cmd_get_fw_cmds();
857 	default:
858 		return mlx5_fs_cmd_get_stub_cmds();
859 	}
860 }
861