1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/device.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 
37 #include "fs_core.h"
38 #include "fs_cmd.h"
39 #include "mlx5_core.h"
40 #include "eswitch.h"
41 
42 int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
43 			    struct mlx5_flow_table *ft)
44 {
45 	u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)]   = {0};
46 	u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
47 
48 	MLX5_SET(set_flow_table_root_in, in, opcode,
49 		 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
50 	MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
51 	MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
52 	if (ft->vport) {
53 		MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
54 		MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
55 	}
56 
57 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
58 }
59 
60 int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
61 			       u16 vport,
62 			       enum fs_flow_table_op_mod op_mod,
63 			       enum fs_flow_table_type type, unsigned int level,
64 			       unsigned int log_size, struct mlx5_flow_table
65 			       *next_ft, unsigned int *table_id, u32 flags)
66 {
67 	int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN);
68 	u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
69 	u32 in[MLX5_ST_SZ_DW(create_flow_table_in)]   = {0};
70 	int err;
71 
72 	MLX5_SET(create_flow_table_in, in, opcode,
73 		 MLX5_CMD_OP_CREATE_FLOW_TABLE);
74 
75 	MLX5_SET(create_flow_table_in, in, table_type, type);
76 	MLX5_SET(create_flow_table_in, in, level, level);
77 	MLX5_SET(create_flow_table_in, in, log_size, log_size);
78 	if (vport) {
79 		MLX5_SET(create_flow_table_in, in, vport_number, vport);
80 		MLX5_SET(create_flow_table_in, in, other_vport, 1);
81 	}
82 
83 	MLX5_SET(create_flow_table_in, in, decap_en, en_encap_decap);
84 	MLX5_SET(create_flow_table_in, in, encap_en, en_encap_decap);
85 
86 	switch (op_mod) {
87 	case FS_FT_OP_MOD_NORMAL:
88 		if (next_ft) {
89 			MLX5_SET(create_flow_table_in, in, table_miss_mode, 1);
90 			MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id);
91 		}
92 		break;
93 
94 	case FS_FT_OP_MOD_LAG_DEMUX:
95 		MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
96 		if (next_ft)
97 			MLX5_SET(create_flow_table_in, in, lag_master_next_table_id,
98 				 next_ft->id);
99 		break;
100 	}
101 
102 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
103 	if (!err)
104 		*table_id = MLX5_GET(create_flow_table_out, out,
105 				     table_id);
106 	return err;
107 }
108 
109 int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
110 				struct mlx5_flow_table *ft)
111 {
112 	u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)]   = {0};
113 	u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
114 
115 	MLX5_SET(destroy_flow_table_in, in, opcode,
116 		 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
117 	MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
118 	MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
119 	if (ft->vport) {
120 		MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
121 		MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
122 	}
123 
124 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
125 }
126 
127 int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
128 			       struct mlx5_flow_table *ft,
129 			       struct mlx5_flow_table *next_ft)
130 {
131 	u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)]   = {0};
132 	u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
133 
134 	MLX5_SET(modify_flow_table_in, in, opcode,
135 		 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
136 	MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
137 	MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
138 
139 	if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
140 		MLX5_SET(modify_flow_table_in, in, modify_field_select,
141 			 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
142 		if (next_ft) {
143 			MLX5_SET(modify_flow_table_in, in,
144 				 lag_master_next_table_id, next_ft->id);
145 		} else {
146 			MLX5_SET(modify_flow_table_in, in,
147 				 lag_master_next_table_id, 0);
148 		}
149 	} else {
150 		if (ft->vport) {
151 			MLX5_SET(modify_flow_table_in, in, vport_number,
152 				 ft->vport);
153 			MLX5_SET(modify_flow_table_in, in, other_vport, 1);
154 		}
155 		MLX5_SET(modify_flow_table_in, in, modify_field_select,
156 			 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
157 		if (next_ft) {
158 			MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1);
159 			MLX5_SET(modify_flow_table_in, in, table_miss_id,
160 				 next_ft->id);
161 		} else {
162 			MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0);
163 		}
164 	}
165 
166 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
167 }
168 
169 int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
170 			       struct mlx5_flow_table *ft,
171 			       u32 *in,
172 			       unsigned int *group_id)
173 {
174 	u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
175 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
176 	int err;
177 
178 	MLX5_SET(create_flow_group_in, in, opcode,
179 		 MLX5_CMD_OP_CREATE_FLOW_GROUP);
180 	MLX5_SET(create_flow_group_in, in, table_type, ft->type);
181 	MLX5_SET(create_flow_group_in, in, table_id, ft->id);
182 	if (ft->vport) {
183 		MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
184 		MLX5_SET(create_flow_group_in, in, other_vport, 1);
185 	}
186 
187 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
188 	if (!err)
189 		*group_id = MLX5_GET(create_flow_group_out, out,
190 				     group_id);
191 	return err;
192 }
193 
194 int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
195 				struct mlx5_flow_table *ft,
196 				unsigned int group_id)
197 {
198 	u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
199 	u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)]   = {0};
200 
201 	MLX5_SET(destroy_flow_group_in, in, opcode,
202 		 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
203 	MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
204 	MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
205 	MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
206 	if (ft->vport) {
207 		MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
208 		MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
209 	}
210 
211 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
212 }
213 
214 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
215 			    int opmod, int modify_mask,
216 			    struct mlx5_flow_table *ft,
217 			    unsigned group_id,
218 			    struct fs_fte *fte)
219 {
220 	unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
221 		fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
222 	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
223 	struct mlx5_flow_rule *dst;
224 	void *in_flow_context;
225 	void *in_match_value;
226 	void *in_dests;
227 	u32 *in;
228 	int err;
229 
230 	in = mlx5_vzalloc(inlen);
231 	if (!in) {
232 		mlx5_core_warn(dev, "failed to allocate inbox\n");
233 		return -ENOMEM;
234 	}
235 
236 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
237 	MLX5_SET(set_fte_in, in, op_mod, opmod);
238 	MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
239 	MLX5_SET(set_fte_in, in, table_type, ft->type);
240 	MLX5_SET(set_fte_in, in, table_id,   ft->id);
241 	MLX5_SET(set_fte_in, in, flow_index, fte->index);
242 	if (ft->vport) {
243 		MLX5_SET(set_fte_in, in, vport_number, ft->vport);
244 		MLX5_SET(set_fte_in, in, other_vport, 1);
245 	}
246 
247 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
248 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
249 	MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
250 	MLX5_SET(flow_context, in_flow_context, action, fte->action);
251 	MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id);
252 	in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
253 				      match_value);
254 	memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
255 
256 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
257 	if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
258 		int list_size = 0;
259 
260 		list_for_each_entry(dst, &fte->node.children, node.list) {
261 			unsigned int id;
262 
263 			if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
264 				continue;
265 
266 			MLX5_SET(dest_format_struct, in_dests, destination_type,
267 				 dst->dest_attr.type);
268 			if (dst->dest_attr.type ==
269 			    MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
270 				id = dst->dest_attr.ft->id;
271 			} else {
272 				id = dst->dest_attr.tir_num;
273 			}
274 			MLX5_SET(dest_format_struct, in_dests, destination_id, id);
275 			in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
276 			list_size++;
277 		}
278 
279 		MLX5_SET(flow_context, in_flow_context, destination_list_size,
280 			 list_size);
281 	}
282 
283 	if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
284 		int list_size = 0;
285 
286 		list_for_each_entry(dst, &fte->node.children, node.list) {
287 			if (dst->dest_attr.type !=
288 			    MLX5_FLOW_DESTINATION_TYPE_COUNTER)
289 				continue;
290 
291 			MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
292 				 dst->dest_attr.counter->id);
293 			in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
294 			list_size++;
295 		}
296 
297 		MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
298 			 list_size);
299 	}
300 
301 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
302 	kvfree(in);
303 	return err;
304 }
305 
306 int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
307 			struct mlx5_flow_table *ft,
308 			unsigned group_id,
309 			struct fs_fte *fte)
310 {
311 	return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
312 }
313 
314 int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
315 			struct mlx5_flow_table *ft,
316 			unsigned group_id,
317 			int modify_mask,
318 			struct fs_fte *fte)
319 {
320 	int opmod;
321 	int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
322 						flow_table_properties_nic_receive.
323 						flow_modify_en);
324 	if (!atomic_mod_cap)
325 		return -EOPNOTSUPP;
326 	opmod = 1;
327 
328 	return	mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
329 }
330 
331 int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
332 			struct mlx5_flow_table *ft,
333 			unsigned int index)
334 {
335 	u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
336 	u32 in[MLX5_ST_SZ_DW(delete_fte_in)]   = {0};
337 
338 	MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
339 	MLX5_SET(delete_fte_in, in, table_type, ft->type);
340 	MLX5_SET(delete_fte_in, in, table_id, ft->id);
341 	MLX5_SET(delete_fte_in, in, flow_index, index);
342 	if (ft->vport) {
343 		MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
344 		MLX5_SET(delete_fte_in, in, other_vport, 1);
345 	}
346 
347 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
348 }
349 
350 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id)
351 {
352 	u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)]   = {0};
353 	u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
354 	int err;
355 
356 	MLX5_SET(alloc_flow_counter_in, in, opcode,
357 		 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
358 
359 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
360 	if (!err)
361 		*id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
362 	return err;
363 }
364 
365 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id)
366 {
367 	u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)]   = {0};
368 	u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
369 
370 	MLX5_SET(dealloc_flow_counter_in, in, opcode,
371 		 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
372 	MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
373 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
374 }
375 
376 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
377 		      u64 *packets, u64 *bytes)
378 {
379 	u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
380 		MLX5_ST_SZ_BYTES(traffic_counter)]   = {0};
381 	u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
382 	void *stats;
383 	int err = 0;
384 
385 	MLX5_SET(query_flow_counter_in, in, opcode,
386 		 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
387 	MLX5_SET(query_flow_counter_in, in, op_mod, 0);
388 	MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
389 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
390 	if (err)
391 		return err;
392 
393 	stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
394 	*packets = MLX5_GET64(traffic_counter, stats, packets);
395 	*bytes = MLX5_GET64(traffic_counter, stats, octets);
396 	return 0;
397 }
398 
399 struct mlx5_cmd_fc_bulk {
400 	u16 id;
401 	int num;
402 	int outlen;
403 	u32 out[0];
404 };
405 
406 struct mlx5_cmd_fc_bulk *
407 mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num)
408 {
409 	struct mlx5_cmd_fc_bulk *b;
410 	int outlen =
411 		MLX5_ST_SZ_BYTES(query_flow_counter_out) +
412 		MLX5_ST_SZ_BYTES(traffic_counter) * num;
413 
414 	b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
415 	if (!b)
416 		return NULL;
417 
418 	b->id = id;
419 	b->num = num;
420 	b->outlen = outlen;
421 
422 	return b;
423 }
424 
425 void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
426 {
427 	kfree(b);
428 }
429 
430 int
431 mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
432 {
433 	u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
434 
435 	MLX5_SET(query_flow_counter_in, in, opcode,
436 		 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
437 	MLX5_SET(query_flow_counter_in, in, op_mod, 0);
438 	MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
439 	MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
440 	return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
441 }
442 
443 void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
444 			  struct mlx5_cmd_fc_bulk *b, u16 id,
445 			  u64 *packets, u64 *bytes)
446 {
447 	int index = id - b->id;
448 	void *stats;
449 
450 	if (index < 0 || index >= b->num) {
451 		mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
452 			       id, b->id, b->id + b->num - 1);
453 		return;
454 	}
455 
456 	stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
457 			     flow_statistics[index]);
458 	*packets = MLX5_GET64(traffic_counter, stats, packets);
459 	*bytes = MLX5_GET64(traffic_counter, stats, octets);
460 }
461 
462 int mlx5_encap_alloc(struct mlx5_core_dev *dev,
463 		     int header_type,
464 		     size_t size,
465 		     void *encap_header,
466 		     u32 *encap_id)
467 {
468 	int max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
469 	u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)];
470 	void *encap_header_in;
471 	void *header;
472 	int inlen;
473 	int err;
474 	u32 *in;
475 
476 	if (size > max_encap_size) {
477 		mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
478 			       size, max_encap_size);
479 		return -EINVAL;
480 	}
481 
482 	in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + size,
483 		     GFP_KERNEL);
484 	if (!in)
485 		return -ENOMEM;
486 
487 	encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, encap_header);
488 	header = MLX5_ADDR_OF(encap_header_in, encap_header_in, encap_header);
489 	inlen = header - (void *)in  + size;
490 
491 	memset(in, 0, inlen);
492 	MLX5_SET(alloc_encap_header_in, in, opcode,
493 		 MLX5_CMD_OP_ALLOC_ENCAP_HEADER);
494 	MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size);
495 	MLX5_SET(encap_header_in, encap_header_in, header_type, header_type);
496 	memcpy(header, encap_header, size);
497 
498 	memset(out, 0, sizeof(out));
499 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
500 
501 	*encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id);
502 	kfree(in);
503 	return err;
504 }
505 
506 void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id)
507 {
508 	u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)];
509 	u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)];
510 
511 	memset(in, 0, sizeof(in));
512 	MLX5_SET(dealloc_encap_header_in, in, opcode,
513 		 MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
514 	MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id);
515 
516 	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
517 }
518