1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/device.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 
37 #include "fs_core.h"
38 #include "fs_cmd.h"
39 #include "mlx5_core.h"
40 #include "eswitch.h"
41 
42 static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
43 					struct mlx5_flow_table *ft,
44 					u32 underlay_qpn,
45 					bool disconnect)
46 {
47 	return 0;
48 }
49 
50 static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
51 					   struct mlx5_flow_table *ft,
52 					   unsigned int log_size,
53 					   struct mlx5_flow_table *next_ft)
54 {
55 	return 0;
56 }
57 
58 static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
59 					    struct mlx5_flow_table *ft)
60 {
61 	return 0;
62 }
63 
64 static int mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace *ns,
65 					   struct mlx5_flow_table *ft,
66 					   struct mlx5_flow_table *next_ft)
67 {
68 	return 0;
69 }
70 
71 static int mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace *ns,
72 					   struct mlx5_flow_table *ft,
73 					   u32 *in,
74 					   struct mlx5_flow_group *fg)
75 {
76 	return 0;
77 }
78 
79 static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
80 					    struct mlx5_flow_table *ft,
81 					    struct mlx5_flow_group *fg)
82 {
83 	return 0;
84 }
85 
86 static int mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace *ns,
87 				    struct mlx5_flow_table *ft,
88 				    struct mlx5_flow_group *group,
89 				    struct fs_fte *fte)
90 {
91 	return 0;
92 }
93 
94 static int mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace *ns,
95 				    struct mlx5_flow_table *ft,
96 				    struct mlx5_flow_group *group,
97 				    int modify_mask,
98 				    struct fs_fte *fte)
99 {
100 	return -EOPNOTSUPP;
101 }
102 
103 static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
104 				    struct mlx5_flow_table *ft,
105 				    struct fs_fte *fte)
106 {
107 	return 0;
108 }
109 
110 static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
111 				   struct mlx5_flow_table *ft, u32 underlay_qpn,
112 				   bool disconnect)
113 {
114 	u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)]   = {0};
115 	u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
116 	struct mlx5_core_dev *dev = ns->dev;
117 
118 	if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
119 	    underlay_qpn == 0)
120 		return 0;
121 
122 	MLX5_SET(set_flow_table_root_in, in, opcode,
123 		 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
124 	MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
125 
126 	if (disconnect) {
127 		MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
128 		MLX5_SET(set_flow_table_root_in, in, table_id, 0);
129 	} else {
130 		MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
131 		MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
132 	}
133 
134 	MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
135 	if (ft->vport) {
136 		MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
137 		MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
138 	}
139 
140 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
141 }
142 
143 static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
144 				      struct mlx5_flow_table *ft,
145 				      unsigned int log_size,
146 				      struct mlx5_flow_table *next_ft)
147 {
148 	int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
149 	int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
150 	u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
151 	u32 in[MLX5_ST_SZ_DW(create_flow_table_in)]   = {0};
152 	struct mlx5_core_dev *dev = ns->dev;
153 	int err;
154 
155 	MLX5_SET(create_flow_table_in, in, opcode,
156 		 MLX5_CMD_OP_CREATE_FLOW_TABLE);
157 
158 	MLX5_SET(create_flow_table_in, in, table_type, ft->type);
159 	MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
160 	MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
161 	if (ft->vport) {
162 		MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
163 		MLX5_SET(create_flow_table_in, in, other_vport, 1);
164 	}
165 
166 	MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
167 		 en_decap);
168 	MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
169 		 en_encap);
170 
171 	switch (ft->op_mod) {
172 	case FS_FT_OP_MOD_NORMAL:
173 		if (next_ft) {
174 			MLX5_SET(create_flow_table_in, in,
175 				 flow_table_context.table_miss_action,
176 				 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
177 			MLX5_SET(create_flow_table_in, in,
178 				 flow_table_context.table_miss_id, next_ft->id);
179 		} else {
180 			MLX5_SET(create_flow_table_in, in,
181 				 flow_table_context.table_miss_action,
182 				 ns->def_miss_action);
183 		}
184 		break;
185 
186 	case FS_FT_OP_MOD_LAG_DEMUX:
187 		MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
188 		if (next_ft)
189 			MLX5_SET(create_flow_table_in, in,
190 				 flow_table_context.lag_master_next_table_id,
191 				 next_ft->id);
192 		break;
193 	}
194 
195 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
196 	if (!err)
197 		ft->id = MLX5_GET(create_flow_table_out, out,
198 				  table_id);
199 	return err;
200 }
201 
202 static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
203 				       struct mlx5_flow_table *ft)
204 {
205 	u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)]   = {0};
206 	u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
207 	struct mlx5_core_dev *dev = ns->dev;
208 
209 	MLX5_SET(destroy_flow_table_in, in, opcode,
210 		 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
211 	MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
212 	MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
213 	if (ft->vport) {
214 		MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
215 		MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
216 	}
217 
218 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
219 }
220 
221 static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
222 				      struct mlx5_flow_table *ft,
223 				      struct mlx5_flow_table *next_ft)
224 {
225 	u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)]   = {0};
226 	u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
227 	struct mlx5_core_dev *dev = ns->dev;
228 
229 	MLX5_SET(modify_flow_table_in, in, opcode,
230 		 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
231 	MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
232 	MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
233 
234 	if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
235 		MLX5_SET(modify_flow_table_in, in, modify_field_select,
236 			 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
237 		if (next_ft) {
238 			MLX5_SET(modify_flow_table_in, in,
239 				 flow_table_context.lag_master_next_table_id, next_ft->id);
240 		} else {
241 			MLX5_SET(modify_flow_table_in, in,
242 				 flow_table_context.lag_master_next_table_id, 0);
243 		}
244 	} else {
245 		if (ft->vport) {
246 			MLX5_SET(modify_flow_table_in, in, vport_number,
247 				 ft->vport);
248 			MLX5_SET(modify_flow_table_in, in, other_vport, 1);
249 		}
250 		MLX5_SET(modify_flow_table_in, in, modify_field_select,
251 			 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
252 		if (next_ft) {
253 			MLX5_SET(modify_flow_table_in, in,
254 				 flow_table_context.table_miss_action,
255 				 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
256 			MLX5_SET(modify_flow_table_in, in,
257 				 flow_table_context.table_miss_id,
258 				 next_ft->id);
259 		} else {
260 			MLX5_SET(modify_flow_table_in, in,
261 				 flow_table_context.table_miss_action,
262 				 ns->def_miss_action);
263 		}
264 	}
265 
266 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
267 }
268 
269 static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
270 				      struct mlx5_flow_table *ft,
271 				      u32 *in,
272 				      struct mlx5_flow_group *fg)
273 {
274 	u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
275 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
276 	struct mlx5_core_dev *dev = ns->dev;
277 	int err;
278 
279 	MLX5_SET(create_flow_group_in, in, opcode,
280 		 MLX5_CMD_OP_CREATE_FLOW_GROUP);
281 	MLX5_SET(create_flow_group_in, in, table_type, ft->type);
282 	MLX5_SET(create_flow_group_in, in, table_id, ft->id);
283 	if (ft->vport) {
284 		MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
285 		MLX5_SET(create_flow_group_in, in, other_vport, 1);
286 	}
287 
288 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
289 	if (!err)
290 		fg->id = MLX5_GET(create_flow_group_out, out,
291 				  group_id);
292 	return err;
293 }
294 
295 static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
296 				       struct mlx5_flow_table *ft,
297 				       struct mlx5_flow_group *fg)
298 {
299 	u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
300 	u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)]   = {0};
301 	struct mlx5_core_dev *dev = ns->dev;
302 
303 	MLX5_SET(destroy_flow_group_in, in, opcode,
304 		 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
305 	MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
306 	MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
307 	MLX5_SET(destroy_flow_group_in, in, group_id, fg->id);
308 	if (ft->vport) {
309 		MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
310 		MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
311 	}
312 
313 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
314 }
315 
316 static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
317 				  struct fs_fte *fte, bool *extended_dest)
318 {
319 	int fw_log_max_fdb_encap_uplink =
320 		MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
321 	int num_fwd_destinations = 0;
322 	struct mlx5_flow_rule *dst;
323 	int num_encap = 0;
324 
325 	*extended_dest = false;
326 	if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
327 		return 0;
328 
329 	list_for_each_entry(dst, &fte->node.children, node.list) {
330 		if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
331 			continue;
332 		if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
333 		    dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
334 			num_encap++;
335 		num_fwd_destinations++;
336 	}
337 	if (num_fwd_destinations > 1 && num_encap > 0)
338 		*extended_dest = true;
339 
340 	if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
341 		mlx5_core_warn(dev, "FW does not support extended destination");
342 		return -EOPNOTSUPP;
343 	}
344 	if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
345 		mlx5_core_warn(dev, "FW does not support more than %d encaps",
346 			       1 << fw_log_max_fdb_encap_uplink);
347 		return -EOPNOTSUPP;
348 	}
349 
350 	return 0;
351 }
352 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
353 			    int opmod, int modify_mask,
354 			    struct mlx5_flow_table *ft,
355 			    unsigned group_id,
356 			    struct fs_fte *fte)
357 {
358 	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
359 	bool extended_dest = false;
360 	struct mlx5_flow_rule *dst;
361 	void *in_flow_context, *vlan;
362 	void *in_match_value;
363 	unsigned int inlen;
364 	int dst_cnt_size;
365 	void *in_dests;
366 	u32 *in;
367 	int err;
368 
369 	if (mlx5_set_extended_dest(dev, fte, &extended_dest))
370 		return -EOPNOTSUPP;
371 
372 	if (!extended_dest)
373 		dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
374 	else
375 		dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
376 
377 	inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
378 	in = kvzalloc(inlen, GFP_KERNEL);
379 	if (!in)
380 		return -ENOMEM;
381 
382 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
383 	MLX5_SET(set_fte_in, in, op_mod, opmod);
384 	MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
385 	MLX5_SET(set_fte_in, in, table_type, ft->type);
386 	MLX5_SET(set_fte_in, in, table_id,   ft->id);
387 	MLX5_SET(set_fte_in, in, flow_index, fte->index);
388 	if (ft->vport) {
389 		MLX5_SET(set_fte_in, in, vport_number, ft->vport);
390 		MLX5_SET(set_fte_in, in, other_vport, 1);
391 	}
392 
393 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
394 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
395 
396 	MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
397 	MLX5_SET(flow_context, in_flow_context, extended_destination,
398 		 extended_dest);
399 	if (extended_dest) {
400 		u32 action;
401 
402 		action = fte->action.action &
403 			~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
404 		MLX5_SET(flow_context, in_flow_context, action, action);
405 	} else {
406 		MLX5_SET(flow_context, in_flow_context, action,
407 			 fte->action.action);
408 		MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
409 			 fte->action.reformat_id);
410 	}
411 	MLX5_SET(flow_context, in_flow_context, modify_header_id,
412 		 fte->action.modify_id);
413 
414 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
415 
416 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
417 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
418 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
419 
420 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
421 
422 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
423 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
424 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
425 
426 	in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
427 				      match_value);
428 	memcpy(in_match_value, &fte->val, sizeof(fte->val));
429 
430 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
431 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
432 		int list_size = 0;
433 
434 		list_for_each_entry(dst, &fte->node.children, node.list) {
435 			unsigned int id, type = dst->dest_attr.type;
436 
437 			if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
438 				continue;
439 
440 			switch (type) {
441 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
442 				id = dst->dest_attr.ft_num;
443 				type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
444 				break;
445 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
446 				id = dst->dest_attr.ft->id;
447 				break;
448 			case MLX5_FLOW_DESTINATION_TYPE_VPORT:
449 				id = dst->dest_attr.vport.num;
450 				MLX5_SET(dest_format_struct, in_dests,
451 					 destination_eswitch_owner_vhca_id_valid,
452 					 !!(dst->dest_attr.vport.flags &
453 					    MLX5_FLOW_DEST_VPORT_VHCA_ID));
454 				MLX5_SET(dest_format_struct, in_dests,
455 					 destination_eswitch_owner_vhca_id,
456 					 dst->dest_attr.vport.vhca_id);
457 				if (extended_dest) {
458 					MLX5_SET(dest_format_struct, in_dests,
459 						 packet_reformat,
460 						 !!(dst->dest_attr.vport.flags &
461 						    MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
462 					MLX5_SET(extended_dest_format, in_dests,
463 						 packet_reformat_id,
464 						 dst->dest_attr.vport.reformat_id);
465 				}
466 				break;
467 			default:
468 				id = dst->dest_attr.tir_num;
469 			}
470 
471 			MLX5_SET(dest_format_struct, in_dests, destination_type,
472 				 type);
473 			MLX5_SET(dest_format_struct, in_dests, destination_id, id);
474 			in_dests += dst_cnt_size;
475 			list_size++;
476 		}
477 
478 		MLX5_SET(flow_context, in_flow_context, destination_list_size,
479 			 list_size);
480 	}
481 
482 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
483 		int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
484 					log_max_flow_counter,
485 					ft->type));
486 		int list_size = 0;
487 
488 		list_for_each_entry(dst, &fte->node.children, node.list) {
489 			if (dst->dest_attr.type !=
490 			    MLX5_FLOW_DESTINATION_TYPE_COUNTER)
491 				continue;
492 
493 			MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
494 				 dst->dest_attr.counter_id);
495 			in_dests += dst_cnt_size;
496 			list_size++;
497 		}
498 		if (list_size > max_list_size) {
499 			err = -EINVAL;
500 			goto err_out;
501 		}
502 
503 		MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
504 			 list_size);
505 	}
506 
507 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
508 err_out:
509 	kvfree(in);
510 	return err;
511 }
512 
513 static int mlx5_cmd_create_fte(struct mlx5_flow_root_namespace *ns,
514 			       struct mlx5_flow_table *ft,
515 			       struct mlx5_flow_group *group,
516 			       struct fs_fte *fte)
517 {
518 	struct mlx5_core_dev *dev = ns->dev;
519 	unsigned int group_id = group->id;
520 
521 	return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
522 }
523 
524 static int mlx5_cmd_update_fte(struct mlx5_flow_root_namespace *ns,
525 			       struct mlx5_flow_table *ft,
526 			       struct mlx5_flow_group *fg,
527 			       int modify_mask,
528 			       struct fs_fte *fte)
529 {
530 	int opmod;
531 	struct mlx5_core_dev *dev = ns->dev;
532 	int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
533 						flow_table_properties_nic_receive.
534 						flow_modify_en);
535 	if (!atomic_mod_cap)
536 		return -EOPNOTSUPP;
537 	opmod = 1;
538 
539 	return	mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, fg->id, fte);
540 }
541 
542 static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
543 			       struct mlx5_flow_table *ft,
544 			       struct fs_fte *fte)
545 {
546 	u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
547 	u32 in[MLX5_ST_SZ_DW(delete_fte_in)]   = {0};
548 	struct mlx5_core_dev *dev = ns->dev;
549 
550 	MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
551 	MLX5_SET(delete_fte_in, in, table_type, ft->type);
552 	MLX5_SET(delete_fte_in, in, table_id, ft->id);
553 	MLX5_SET(delete_fte_in, in, flow_index, fte->index);
554 	if (ft->vport) {
555 		MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
556 		MLX5_SET(delete_fte_in, in, other_vport, 1);
557 	}
558 
559 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
560 }
561 
562 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
563 {
564 	u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)]   = {0};
565 	u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
566 	int err;
567 
568 	MLX5_SET(alloc_flow_counter_in, in, opcode,
569 		 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
570 
571 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
572 	if (!err)
573 		*id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
574 	return err;
575 }
576 
577 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
578 {
579 	u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)]   = {0};
580 	u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
581 
582 	MLX5_SET(dealloc_flow_counter_in, in, opcode,
583 		 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
584 	MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
585 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
586 }
587 
588 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
589 		      u64 *packets, u64 *bytes)
590 {
591 	u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
592 		MLX5_ST_SZ_BYTES(traffic_counter)]   = {0};
593 	u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
594 	void *stats;
595 	int err = 0;
596 
597 	MLX5_SET(query_flow_counter_in, in, opcode,
598 		 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
599 	MLX5_SET(query_flow_counter_in, in, op_mod, 0);
600 	MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
601 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
602 	if (err)
603 		return err;
604 
605 	stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
606 	*packets = MLX5_GET64(traffic_counter, stats, packets);
607 	*bytes = MLX5_GET64(traffic_counter, stats, octets);
608 	return 0;
609 }
610 
611 struct mlx5_cmd_fc_bulk {
612 	u32 id;
613 	int num;
614 	int outlen;
615 	u32 out[0];
616 };
617 
618 struct mlx5_cmd_fc_bulk *
619 mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num)
620 {
621 	struct mlx5_cmd_fc_bulk *b;
622 	int outlen =
623 		MLX5_ST_SZ_BYTES(query_flow_counter_out) +
624 		MLX5_ST_SZ_BYTES(traffic_counter) * num;
625 
626 	b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
627 	if (!b)
628 		return NULL;
629 
630 	b->id = id;
631 	b->num = num;
632 	b->outlen = outlen;
633 
634 	return b;
635 }
636 
637 void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
638 {
639 	kfree(b);
640 }
641 
642 int
643 mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
644 {
645 	u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
646 
647 	MLX5_SET(query_flow_counter_in, in, opcode,
648 		 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
649 	MLX5_SET(query_flow_counter_in, in, op_mod, 0);
650 	MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
651 	MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
652 	return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
653 }
654 
655 void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
656 			  struct mlx5_cmd_fc_bulk *b, u32 id,
657 			  u64 *packets, u64 *bytes)
658 {
659 	int index = id - b->id;
660 	void *stats;
661 
662 	if (index < 0 || index >= b->num) {
663 		mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
664 			       id, b->id, b->id + b->num - 1);
665 		return;
666 	}
667 
668 	stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
669 			     flow_statistics[index]);
670 	*packets = MLX5_GET64(traffic_counter, stats, packets);
671 	*bytes = MLX5_GET64(traffic_counter, stats, octets);
672 }
673 
674 int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
675 			       int reformat_type,
676 			       size_t size,
677 			       void *reformat_data,
678 			       enum mlx5_flow_namespace_type namespace,
679 			       u32 *packet_reformat_id)
680 {
681 	u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)];
682 	void *packet_reformat_context_in;
683 	int max_encap_size;
684 	void *reformat;
685 	int inlen;
686 	int err;
687 	u32 *in;
688 
689 	if (namespace == MLX5_FLOW_NAMESPACE_FDB)
690 		max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
691 	else
692 		max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
693 
694 	if (size > max_encap_size) {
695 		mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
696 			       size, max_encap_size);
697 		return -EINVAL;
698 	}
699 
700 	in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) + size,
701 		     GFP_KERNEL);
702 	if (!in)
703 		return -ENOMEM;
704 
705 	packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
706 						  in, packet_reformat_context);
707 	reformat = MLX5_ADDR_OF(packet_reformat_context_in,
708 				packet_reformat_context_in,
709 				reformat_data);
710 	inlen = reformat - (void *)in  + size;
711 
712 	memset(in, 0, inlen);
713 	MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
714 		 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
715 	MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
716 		 reformat_data_size, size);
717 	MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
718 		 reformat_type, reformat_type);
719 	memcpy(reformat, reformat_data, size);
720 
721 	memset(out, 0, sizeof(out));
722 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
723 
724 	*packet_reformat_id = MLX5_GET(alloc_packet_reformat_context_out,
725 				       out, packet_reformat_id);
726 	kfree(in);
727 	return err;
728 }
729 EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
730 
731 void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
732 				  u32 packet_reformat_id)
733 {
734 	u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)];
735 	u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)];
736 
737 	memset(in, 0, sizeof(in));
738 	MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
739 		 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
740 	MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
741 		 packet_reformat_id);
742 
743 	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
744 }
745 EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
746 
747 int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
748 			     u8 namespace, u8 num_actions,
749 			     void *modify_actions, u32 *modify_header_id)
750 {
751 	u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)];
752 	int max_actions, actions_size, inlen, err;
753 	void *actions_in;
754 	u8 table_type;
755 	u32 *in;
756 
757 	switch (namespace) {
758 	case MLX5_FLOW_NAMESPACE_FDB:
759 		max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
760 		table_type = FS_FT_FDB;
761 		break;
762 	case MLX5_FLOW_NAMESPACE_KERNEL:
763 	case MLX5_FLOW_NAMESPACE_BYPASS:
764 		max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
765 		table_type = FS_FT_NIC_RX;
766 		break;
767 	case MLX5_FLOW_NAMESPACE_EGRESS:
768 		max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
769 		table_type = FS_FT_NIC_TX;
770 		break;
771 	default:
772 		return -EOPNOTSUPP;
773 	}
774 
775 	if (num_actions > max_actions) {
776 		mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
777 			       num_actions, max_actions);
778 		return -EOPNOTSUPP;
779 	}
780 
781 	actions_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * num_actions;
782 	inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
783 
784 	in = kzalloc(inlen, GFP_KERNEL);
785 	if (!in)
786 		return -ENOMEM;
787 
788 	MLX5_SET(alloc_modify_header_context_in, in, opcode,
789 		 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
790 	MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
791 	MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
792 
793 	actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
794 	memcpy(actions_in, modify_actions, actions_size);
795 
796 	memset(out, 0, sizeof(out));
797 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
798 
799 	*modify_header_id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
800 	kfree(in);
801 	return err;
802 }
803 EXPORT_SYMBOL(mlx5_modify_header_alloc);
804 
805 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
806 {
807 	u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)];
808 	u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)];
809 
810 	memset(in, 0, sizeof(in));
811 	MLX5_SET(dealloc_modify_header_context_in, in, opcode,
812 		 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
813 	MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
814 		 modify_header_id);
815 
816 	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
817 }
818 EXPORT_SYMBOL(mlx5_modify_header_dealloc);
819 
820 static const struct mlx5_flow_cmds mlx5_flow_cmds = {
821 	.create_flow_table = mlx5_cmd_create_flow_table,
822 	.destroy_flow_table = mlx5_cmd_destroy_flow_table,
823 	.modify_flow_table = mlx5_cmd_modify_flow_table,
824 	.create_flow_group = mlx5_cmd_create_flow_group,
825 	.destroy_flow_group = mlx5_cmd_destroy_flow_group,
826 	.create_fte = mlx5_cmd_create_fte,
827 	.update_fte = mlx5_cmd_update_fte,
828 	.delete_fte = mlx5_cmd_delete_fte,
829 	.update_root_ft = mlx5_cmd_update_root_ft,
830 };
831 
832 static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
833 	.create_flow_table = mlx5_cmd_stub_create_flow_table,
834 	.destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
835 	.modify_flow_table = mlx5_cmd_stub_modify_flow_table,
836 	.create_flow_group = mlx5_cmd_stub_create_flow_group,
837 	.destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
838 	.create_fte = mlx5_cmd_stub_create_fte,
839 	.update_fte = mlx5_cmd_stub_update_fte,
840 	.delete_fte = mlx5_cmd_stub_delete_fte,
841 	.update_root_ft = mlx5_cmd_stub_update_root_ft,
842 };
843 
844 static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
845 {
846 	return &mlx5_flow_cmds;
847 }
848 
849 static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
850 {
851 	return &mlx5_flow_cmd_stubs;
852 }
853 
854 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
855 {
856 	switch (type) {
857 	case FS_FT_NIC_RX:
858 	case FS_FT_ESW_EGRESS_ACL:
859 	case FS_FT_ESW_INGRESS_ACL:
860 	case FS_FT_FDB:
861 	case FS_FT_SNIFFER_RX:
862 	case FS_FT_SNIFFER_TX:
863 	case FS_FT_NIC_TX:
864 	case FS_FT_RDMA_RX:
865 		return mlx5_fs_cmd_get_fw_cmds();
866 	default:
867 		return mlx5_fs_cmd_get_stub_cmds();
868 	}
869 }
870