1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
39 #include "eswitch.h"
40 #include "fs_core.h"
41 
42 #define UPLINK_VPORT 0xFFFF
43 
44 enum {
45 	MLX5_ACTION_NONE = 0,
46 	MLX5_ACTION_ADD  = 1,
47 	MLX5_ACTION_DEL  = 2,
48 };
49 
50 /* Vport UC/MC hash node */
51 struct vport_addr {
52 	struct l2addr_node     node;
53 	u8                     action;
54 	u32                    vport;
55 	struct mlx5_flow_handle *flow_rule;
56 	bool mpfs; /* UC MAC was added to MPFs */
57 	/* A flag indicating that mac was added due to mc promiscuous vport */
58 	bool mc_promisc;
59 };
60 
61 enum {
62 	UC_ADDR_CHANGE = BIT(0),
63 	MC_ADDR_CHANGE = BIT(1),
64 	PROMISC_CHANGE = BIT(3),
65 };
66 
67 /* Vport context events */
68 #define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
69 			    MC_ADDR_CHANGE | \
70 			    PROMISC_CHANGE)
71 
72 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
73 					u32 events_mask)
74 {
75 	int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)]   = {0};
76 	int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
77 	void *nic_vport_ctx;
78 
79 	MLX5_SET(modify_nic_vport_context_in, in,
80 		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
81 	MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
82 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
83 	if (vport)
84 		MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
85 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
86 				     in, nic_vport_context);
87 
88 	MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
89 
90 	if (events_mask & UC_ADDR_CHANGE)
91 		MLX5_SET(nic_vport_context, nic_vport_ctx,
92 			 event_on_uc_address_change, 1);
93 	if (events_mask & MC_ADDR_CHANGE)
94 		MLX5_SET(nic_vport_context, nic_vport_ctx,
95 			 event_on_mc_address_change, 1);
96 	if (events_mask & PROMISC_CHANGE)
97 		MLX5_SET(nic_vport_context, nic_vport_ctx,
98 			 event_on_promisc_change, 1);
99 
100 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
101 }
102 
103 /* E-Switch vport context HW commands */
104 static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
105 					void *in, int inlen)
106 {
107 	u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
108 
109 	MLX5_SET(modify_esw_vport_context_in, in, opcode,
110 		 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
111 	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
112 	if (vport)
113 		MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
114 	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
115 }
116 
117 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
118 				  u16 vlan, u8 qos, u8 set_flags)
119 {
120 	u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
121 
122 	if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
123 	    !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
124 		return -EOPNOTSUPP;
125 
126 	esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
127 		  vport, vlan, qos, set_flags);
128 
129 	if (set_flags & SET_VLAN_STRIP)
130 		MLX5_SET(modify_esw_vport_context_in, in,
131 			 esw_vport_context.vport_cvlan_strip, 1);
132 
133 	if (set_flags & SET_VLAN_INSERT) {
134 		/* insert only if no vlan in packet */
135 		MLX5_SET(modify_esw_vport_context_in, in,
136 			 esw_vport_context.vport_cvlan_insert, 1);
137 
138 		MLX5_SET(modify_esw_vport_context_in, in,
139 			 esw_vport_context.cvlan_pcp, qos);
140 		MLX5_SET(modify_esw_vport_context_in, in,
141 			 esw_vport_context.cvlan_id, vlan);
142 	}
143 
144 	MLX5_SET(modify_esw_vport_context_in, in,
145 		 field_select.vport_cvlan_strip, 1);
146 	MLX5_SET(modify_esw_vport_context_in, in,
147 		 field_select.vport_cvlan_insert, 1);
148 
149 	return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
150 }
151 
152 /* E-Switch FDB */
153 static struct mlx5_flow_handle *
154 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
155 			 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
156 {
157 	int match_header = (is_zero_ether_addr(mac_c) ? 0 :
158 			    MLX5_MATCH_OUTER_HEADERS);
159 	struct mlx5_flow_handle *flow_rule = NULL;
160 	struct mlx5_flow_act flow_act = {0};
161 	struct mlx5_flow_destination dest = {};
162 	struct mlx5_flow_spec *spec;
163 	void *mv_misc = NULL;
164 	void *mc_misc = NULL;
165 	u8 *dmac_v = NULL;
166 	u8 *dmac_c = NULL;
167 
168 	if (rx_rule)
169 		match_header |= MLX5_MATCH_MISC_PARAMETERS;
170 
171 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
172 	if (!spec)
173 		return NULL;
174 
175 	dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
176 			      outer_headers.dmac_47_16);
177 	dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
178 			      outer_headers.dmac_47_16);
179 
180 	if (match_header & MLX5_MATCH_OUTER_HEADERS) {
181 		ether_addr_copy(dmac_v, mac_v);
182 		ether_addr_copy(dmac_c, mac_c);
183 	}
184 
185 	if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
186 		mv_misc  = MLX5_ADDR_OF(fte_match_param, spec->match_value,
187 					misc_parameters);
188 		mc_misc  = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
189 					misc_parameters);
190 		MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
191 		MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
192 	}
193 
194 	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
195 	dest.vport_num = vport;
196 
197 	esw_debug(esw->dev,
198 		  "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
199 		  dmac_v, dmac_c, vport);
200 	spec->match_criteria_enable = match_header;
201 	flow_act.action =  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
202 	flow_rule =
203 		mlx5_add_flow_rules(esw->fdb_table.fdb, spec,
204 				    &flow_act, &dest, 1);
205 	if (IS_ERR(flow_rule)) {
206 		esw_warn(esw->dev,
207 			 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
208 			 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
209 		flow_rule = NULL;
210 	}
211 
212 	kvfree(spec);
213 	return flow_rule;
214 }
215 
216 static struct mlx5_flow_handle *
217 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
218 {
219 	u8 mac_c[ETH_ALEN];
220 
221 	eth_broadcast_addr(mac_c);
222 	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
223 }
224 
225 static struct mlx5_flow_handle *
226 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
227 {
228 	u8 mac_c[ETH_ALEN];
229 	u8 mac_v[ETH_ALEN];
230 
231 	eth_zero_addr(mac_c);
232 	eth_zero_addr(mac_v);
233 	mac_c[0] = 0x01;
234 	mac_v[0] = 0x01;
235 	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
236 }
237 
238 static struct mlx5_flow_handle *
239 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
240 {
241 	u8 mac_c[ETH_ALEN];
242 	u8 mac_v[ETH_ALEN];
243 
244 	eth_zero_addr(mac_c);
245 	eth_zero_addr(mac_v);
246 	return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
247 }
248 
249 static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
250 {
251 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
252 	struct mlx5_flow_table_attr ft_attr = {};
253 	struct mlx5_core_dev *dev = esw->dev;
254 	struct mlx5_flow_namespace *root_ns;
255 	struct mlx5_flow_table *fdb;
256 	struct mlx5_flow_group *g;
257 	void *match_criteria;
258 	int table_size;
259 	u32 *flow_group_in;
260 	u8 *dmac;
261 	int err = 0;
262 
263 	esw_debug(dev, "Create FDB log_max_size(%d)\n",
264 		  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
265 
266 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
267 	if (!root_ns) {
268 		esw_warn(dev, "Failed to get FDB flow namespace\n");
269 		return -EOPNOTSUPP;
270 	}
271 
272 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
273 	if (!flow_group_in)
274 		return -ENOMEM;
275 
276 	table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
277 
278 	ft_attr.max_fte = table_size;
279 	fdb = mlx5_create_flow_table(root_ns, &ft_attr);
280 	if (IS_ERR(fdb)) {
281 		err = PTR_ERR(fdb);
282 		esw_warn(dev, "Failed to create FDB Table err %d\n", err);
283 		goto out;
284 	}
285 	esw->fdb_table.fdb = fdb;
286 
287 	/* Addresses group : Full match unicast/multicast addresses */
288 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
289 		 MLX5_MATCH_OUTER_HEADERS);
290 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
291 	dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
292 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
293 	/* Preserve 2 entries for allmulti and promisc rules*/
294 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
295 	eth_broadcast_addr(dmac);
296 	g = mlx5_create_flow_group(fdb, flow_group_in);
297 	if (IS_ERR(g)) {
298 		err = PTR_ERR(g);
299 		esw_warn(dev, "Failed to create flow group err(%d)\n", err);
300 		goto out;
301 	}
302 	esw->fdb_table.legacy.addr_grp = g;
303 
304 	/* Allmulti group : One rule that forwards any mcast traffic */
305 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
306 		 MLX5_MATCH_OUTER_HEADERS);
307 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
308 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
309 	eth_zero_addr(dmac);
310 	dmac[0] = 0x01;
311 	g = mlx5_create_flow_group(fdb, flow_group_in);
312 	if (IS_ERR(g)) {
313 		err = PTR_ERR(g);
314 		esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
315 		goto out;
316 	}
317 	esw->fdb_table.legacy.allmulti_grp = g;
318 
319 	/* Promiscuous group :
320 	 * One rule that forward all unmatched traffic from previous groups
321 	 */
322 	eth_zero_addr(dmac);
323 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
324 		 MLX5_MATCH_MISC_PARAMETERS);
325 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
326 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
327 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
328 	g = mlx5_create_flow_group(fdb, flow_group_in);
329 	if (IS_ERR(g)) {
330 		err = PTR_ERR(g);
331 		esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
332 		goto out;
333 	}
334 	esw->fdb_table.legacy.promisc_grp = g;
335 
336 out:
337 	if (err) {
338 		if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.allmulti_grp)) {
339 			mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
340 			esw->fdb_table.legacy.allmulti_grp = NULL;
341 		}
342 		if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.addr_grp)) {
343 			mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
344 			esw->fdb_table.legacy.addr_grp = NULL;
345 		}
346 		if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) {
347 			mlx5_destroy_flow_table(esw->fdb_table.fdb);
348 			esw->fdb_table.fdb = NULL;
349 		}
350 	}
351 
352 	kvfree(flow_group_in);
353 	return err;
354 }
355 
356 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
357 {
358 	if (!esw->fdb_table.fdb)
359 		return;
360 
361 	esw_debug(esw->dev, "Destroy FDB Table\n");
362 	mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
363 	mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
364 	mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
365 	mlx5_destroy_flow_table(esw->fdb_table.fdb);
366 	esw->fdb_table.fdb = NULL;
367 	esw->fdb_table.legacy.addr_grp = NULL;
368 	esw->fdb_table.legacy.allmulti_grp = NULL;
369 	esw->fdb_table.legacy.promisc_grp = NULL;
370 }
371 
372 /* E-Switch vport UC/MC lists management */
373 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
374 				 struct vport_addr *vaddr);
375 
376 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
377 {
378 	u8 *mac = vaddr->node.addr;
379 	u32 vport = vaddr->vport;
380 	int err;
381 
382 	/* Skip mlx5_mpfs_add_mac for PFs,
383 	 * it is already done by the PF netdev in mlx5e_execute_l2_action
384 	 */
385 	if (!vport)
386 		goto fdb_add;
387 
388 	err = mlx5_mpfs_add_mac(esw->dev, mac);
389 	if (err) {
390 		esw_warn(esw->dev,
391 			 "Failed to add L2 table mac(%pM) for vport(%d), err(%d)\n",
392 			 mac, vport, err);
393 		return err;
394 	}
395 	vaddr->mpfs = true;
396 
397 fdb_add:
398 	/* SRIOV is enabled: Forward UC MAC to vport */
399 	if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY)
400 		vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
401 
402 	esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
403 		  vport, mac, vaddr->flow_rule);
404 
405 	return 0;
406 }
407 
408 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
409 {
410 	u8 *mac = vaddr->node.addr;
411 	u32 vport = vaddr->vport;
412 	int err = 0;
413 
414 	/* Skip mlx5_mpfs_del_mac for PFs,
415 	 * it is already done by the PF netdev in mlx5e_execute_l2_action
416 	 */
417 	if (!vport || !vaddr->mpfs)
418 		goto fdb_del;
419 
420 	err = mlx5_mpfs_del_mac(esw->dev, mac);
421 	if (err)
422 		esw_warn(esw->dev,
423 			 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n",
424 			 mac, vport, err);
425 	vaddr->mpfs = false;
426 
427 fdb_del:
428 	if (vaddr->flow_rule)
429 		mlx5_del_flow_rules(vaddr->flow_rule);
430 	vaddr->flow_rule = NULL;
431 
432 	return 0;
433 }
434 
435 static void update_allmulti_vports(struct mlx5_eswitch *esw,
436 				   struct vport_addr *vaddr,
437 				   struct esw_mc_addr *esw_mc)
438 {
439 	u8 *mac = vaddr->node.addr;
440 	u32 vport_idx = 0;
441 
442 	for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) {
443 		struct mlx5_vport *vport = &esw->vports[vport_idx];
444 		struct hlist_head *vport_hash = vport->mc_list;
445 		struct vport_addr *iter_vaddr =
446 					l2addr_hash_find(vport_hash,
447 							 mac,
448 							 struct vport_addr);
449 		if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
450 		    vaddr->vport == vport_idx)
451 			continue;
452 		switch (vaddr->action) {
453 		case MLX5_ACTION_ADD:
454 			if (iter_vaddr)
455 				continue;
456 			iter_vaddr = l2addr_hash_add(vport_hash, mac,
457 						     struct vport_addr,
458 						     GFP_KERNEL);
459 			if (!iter_vaddr) {
460 				esw_warn(esw->dev,
461 					 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
462 					 mac, vport_idx);
463 				continue;
464 			}
465 			iter_vaddr->vport = vport_idx;
466 			iter_vaddr->flow_rule =
467 					esw_fdb_set_vport_rule(esw,
468 							       mac,
469 							       vport_idx);
470 			iter_vaddr->mc_promisc = true;
471 			break;
472 		case MLX5_ACTION_DEL:
473 			if (!iter_vaddr)
474 				continue;
475 			mlx5_del_flow_rules(iter_vaddr->flow_rule);
476 			l2addr_hash_del(iter_vaddr);
477 			break;
478 		}
479 	}
480 }
481 
482 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
483 {
484 	struct hlist_head *hash = esw->mc_table;
485 	struct esw_mc_addr *esw_mc;
486 	u8 *mac = vaddr->node.addr;
487 	u32 vport = vaddr->vport;
488 
489 	if (!esw->fdb_table.fdb)
490 		return 0;
491 
492 	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
493 	if (esw_mc)
494 		goto add;
495 
496 	esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
497 	if (!esw_mc)
498 		return -ENOMEM;
499 
500 	esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
501 		esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
502 
503 	/* Add this multicast mac to all the mc promiscuous vports */
504 	update_allmulti_vports(esw, vaddr, esw_mc);
505 
506 add:
507 	/* If the multicast mac is added as a result of mc promiscuous vport,
508 	 * don't increment the multicast ref count
509 	 */
510 	if (!vaddr->mc_promisc)
511 		esw_mc->refcnt++;
512 
513 	/* Forward MC MAC to vport */
514 	vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
515 	esw_debug(esw->dev,
516 		  "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
517 		  vport, mac, vaddr->flow_rule,
518 		  esw_mc->refcnt, esw_mc->uplink_rule);
519 	return 0;
520 }
521 
522 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
523 {
524 	struct hlist_head *hash = esw->mc_table;
525 	struct esw_mc_addr *esw_mc;
526 	u8 *mac = vaddr->node.addr;
527 	u32 vport = vaddr->vport;
528 
529 	if (!esw->fdb_table.fdb)
530 		return 0;
531 
532 	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
533 	if (!esw_mc) {
534 		esw_warn(esw->dev,
535 			 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
536 			 mac, vport);
537 		return -EINVAL;
538 	}
539 	esw_debug(esw->dev,
540 		  "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
541 		  vport, mac, vaddr->flow_rule, esw_mc->refcnt,
542 		  esw_mc->uplink_rule);
543 
544 	if (vaddr->flow_rule)
545 		mlx5_del_flow_rules(vaddr->flow_rule);
546 	vaddr->flow_rule = NULL;
547 
548 	/* If the multicast mac is added as a result of mc promiscuous vport,
549 	 * don't decrement the multicast ref count.
550 	 */
551 	if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
552 		return 0;
553 
554 	/* Remove this multicast mac from all the mc promiscuous vports */
555 	update_allmulti_vports(esw, vaddr, esw_mc);
556 
557 	if (esw_mc->uplink_rule)
558 		mlx5_del_flow_rules(esw_mc->uplink_rule);
559 
560 	l2addr_hash_del(esw_mc);
561 	return 0;
562 }
563 
564 /* Apply vport UC/MC list to HW l2 table and FDB table */
565 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
566 				      u32 vport_num, int list_type)
567 {
568 	struct mlx5_vport *vport = &esw->vports[vport_num];
569 	bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
570 	vport_addr_action vport_addr_add;
571 	vport_addr_action vport_addr_del;
572 	struct vport_addr *addr;
573 	struct l2addr_node *node;
574 	struct hlist_head *hash;
575 	struct hlist_node *tmp;
576 	int hi;
577 
578 	vport_addr_add = is_uc ? esw_add_uc_addr :
579 				 esw_add_mc_addr;
580 	vport_addr_del = is_uc ? esw_del_uc_addr :
581 				 esw_del_mc_addr;
582 
583 	hash = is_uc ? vport->uc_list : vport->mc_list;
584 	for_each_l2hash_node(node, tmp, hash, hi) {
585 		addr = container_of(node, struct vport_addr, node);
586 		switch (addr->action) {
587 		case MLX5_ACTION_ADD:
588 			vport_addr_add(esw, addr);
589 			addr->action = MLX5_ACTION_NONE;
590 			break;
591 		case MLX5_ACTION_DEL:
592 			vport_addr_del(esw, addr);
593 			l2addr_hash_del(addr);
594 			break;
595 		}
596 	}
597 }
598 
599 /* Sync vport UC/MC list from vport context */
600 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
601 				       u32 vport_num, int list_type)
602 {
603 	struct mlx5_vport *vport = &esw->vports[vport_num];
604 	bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
605 	u8 (*mac_list)[ETH_ALEN];
606 	struct l2addr_node *node;
607 	struct vport_addr *addr;
608 	struct hlist_head *hash;
609 	struct hlist_node *tmp;
610 	int size;
611 	int err;
612 	int hi;
613 	int i;
614 
615 	size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
616 		       MLX5_MAX_MC_PER_VPORT(esw->dev);
617 
618 	mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
619 	if (!mac_list)
620 		return;
621 
622 	hash = is_uc ? vport->uc_list : vport->mc_list;
623 
624 	for_each_l2hash_node(node, tmp, hash, hi) {
625 		addr = container_of(node, struct vport_addr, node);
626 		addr->action = MLX5_ACTION_DEL;
627 	}
628 
629 	if (!vport->enabled)
630 		goto out;
631 
632 	err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
633 					    mac_list, &size);
634 	if (err)
635 		goto out;
636 	esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
637 		  vport_num, is_uc ? "UC" : "MC", size);
638 
639 	for (i = 0; i < size; i++) {
640 		if (is_uc && !is_valid_ether_addr(mac_list[i]))
641 			continue;
642 
643 		if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
644 			continue;
645 
646 		addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
647 		if (addr) {
648 			addr->action = MLX5_ACTION_NONE;
649 			/* If this mac was previously added because of allmulti
650 			 * promiscuous rx mode, its now converted to be original
651 			 * vport mac.
652 			 */
653 			if (addr->mc_promisc) {
654 				struct esw_mc_addr *esw_mc =
655 					l2addr_hash_find(esw->mc_table,
656 							 mac_list[i],
657 							 struct esw_mc_addr);
658 				if (!esw_mc) {
659 					esw_warn(esw->dev,
660 						 "Failed to MAC(%pM) in mcast DB\n",
661 						 mac_list[i]);
662 					continue;
663 				}
664 				esw_mc->refcnt++;
665 				addr->mc_promisc = false;
666 			}
667 			continue;
668 		}
669 
670 		addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
671 				       GFP_KERNEL);
672 		if (!addr) {
673 			esw_warn(esw->dev,
674 				 "Failed to add MAC(%pM) to vport[%d] DB\n",
675 				 mac_list[i], vport_num);
676 			continue;
677 		}
678 		addr->vport = vport_num;
679 		addr->action = MLX5_ACTION_ADD;
680 	}
681 out:
682 	kfree(mac_list);
683 }
684 
685 /* Sync vport UC/MC list from vport context
686  * Must be called after esw_update_vport_addr_list
687  */
688 static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
689 {
690 	struct mlx5_vport *vport = &esw->vports[vport_num];
691 	struct l2addr_node *node;
692 	struct vport_addr *addr;
693 	struct hlist_head *hash;
694 	struct hlist_node *tmp;
695 	int hi;
696 
697 	hash = vport->mc_list;
698 
699 	for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
700 		u8 *mac = node->addr;
701 
702 		addr = l2addr_hash_find(hash, mac, struct vport_addr);
703 		if (addr) {
704 			if (addr->action == MLX5_ACTION_DEL)
705 				addr->action = MLX5_ACTION_NONE;
706 			continue;
707 		}
708 		addr = l2addr_hash_add(hash, mac, struct vport_addr,
709 				       GFP_KERNEL);
710 		if (!addr) {
711 			esw_warn(esw->dev,
712 				 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
713 				 mac, vport_num);
714 			continue;
715 		}
716 		addr->vport = vport_num;
717 		addr->action = MLX5_ACTION_ADD;
718 		addr->mc_promisc = true;
719 	}
720 }
721 
722 /* Apply vport rx mode to HW FDB table */
723 static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
724 				    bool promisc, bool mc_promisc)
725 {
726 	struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
727 	struct mlx5_vport *vport = &esw->vports[vport_num];
728 
729 	if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
730 		goto promisc;
731 
732 	if (mc_promisc) {
733 		vport->allmulti_rule =
734 				esw_fdb_set_vport_allmulti_rule(esw, vport_num);
735 		if (!allmulti_addr->uplink_rule)
736 			allmulti_addr->uplink_rule =
737 				esw_fdb_set_vport_allmulti_rule(esw,
738 								UPLINK_VPORT);
739 		allmulti_addr->refcnt++;
740 	} else if (vport->allmulti_rule) {
741 		mlx5_del_flow_rules(vport->allmulti_rule);
742 		vport->allmulti_rule = NULL;
743 
744 		if (--allmulti_addr->refcnt > 0)
745 			goto promisc;
746 
747 		if (allmulti_addr->uplink_rule)
748 			mlx5_del_flow_rules(allmulti_addr->uplink_rule);
749 		allmulti_addr->uplink_rule = NULL;
750 	}
751 
752 promisc:
753 	if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
754 		return;
755 
756 	if (promisc) {
757 		vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
758 								     vport_num);
759 	} else if (vport->promisc_rule) {
760 		mlx5_del_flow_rules(vport->promisc_rule);
761 		vport->promisc_rule = NULL;
762 	}
763 }
764 
765 /* Sync vport rx mode from vport context */
766 static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
767 {
768 	struct mlx5_vport *vport = &esw->vports[vport_num];
769 	int promisc_all = 0;
770 	int promisc_uc = 0;
771 	int promisc_mc = 0;
772 	int err;
773 
774 	err = mlx5_query_nic_vport_promisc(esw->dev,
775 					   vport_num,
776 					   &promisc_uc,
777 					   &promisc_mc,
778 					   &promisc_all);
779 	if (err)
780 		return;
781 	esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
782 		  vport_num, promisc_all, promisc_mc);
783 
784 	if (!vport->info.trusted || !vport->enabled) {
785 		promisc_uc = 0;
786 		promisc_mc = 0;
787 		promisc_all = 0;
788 	}
789 
790 	esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
791 				(promisc_all || promisc_mc));
792 }
793 
794 static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
795 {
796 	struct mlx5_core_dev *dev = vport->dev;
797 	struct mlx5_eswitch *esw = dev->priv.eswitch;
798 	u8 mac[ETH_ALEN];
799 
800 	mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
801 	esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
802 		  vport->vport, mac);
803 
804 	if (vport->enabled_events & UC_ADDR_CHANGE) {
805 		esw_update_vport_addr_list(esw, vport->vport,
806 					   MLX5_NVPRT_LIST_TYPE_UC);
807 		esw_apply_vport_addr_list(esw, vport->vport,
808 					  MLX5_NVPRT_LIST_TYPE_UC);
809 	}
810 
811 	if (vport->enabled_events & MC_ADDR_CHANGE) {
812 		esw_update_vport_addr_list(esw, vport->vport,
813 					   MLX5_NVPRT_LIST_TYPE_MC);
814 	}
815 
816 	if (vport->enabled_events & PROMISC_CHANGE) {
817 		esw_update_vport_rx_mode(esw, vport->vport);
818 		if (!IS_ERR_OR_NULL(vport->allmulti_rule))
819 			esw_update_vport_mc_promisc(esw, vport->vport);
820 	}
821 
822 	if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
823 		esw_apply_vport_addr_list(esw, vport->vport,
824 					  MLX5_NVPRT_LIST_TYPE_MC);
825 	}
826 
827 	esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
828 	if (vport->enabled)
829 		arm_vport_context_events_cmd(dev, vport->vport,
830 					     vport->enabled_events);
831 }
832 
833 static void esw_vport_change_handler(struct work_struct *work)
834 {
835 	struct mlx5_vport *vport =
836 		container_of(work, struct mlx5_vport, vport_change_handler);
837 	struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
838 
839 	mutex_lock(&esw->state_lock);
840 	esw_vport_change_handle_locked(vport);
841 	mutex_unlock(&esw->state_lock);
842 }
843 
844 static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
845 				       struct mlx5_vport *vport)
846 {
847 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
848 	struct mlx5_flow_group *vlan_grp = NULL;
849 	struct mlx5_flow_group *drop_grp = NULL;
850 	struct mlx5_core_dev *dev = esw->dev;
851 	struct mlx5_flow_namespace *root_ns;
852 	struct mlx5_flow_table *acl;
853 	void *match_criteria;
854 	u32 *flow_group_in;
855 	/* The egress acl table contains 2 rules:
856 	 * 1)Allow traffic with vlan_tag=vst_vlan_id
857 	 * 2)Drop all other traffic.
858 	 */
859 	int table_size = 2;
860 	int err = 0;
861 
862 	if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
863 		return -EOPNOTSUPP;
864 
865 	if (!IS_ERR_OR_NULL(vport->egress.acl))
866 		return 0;
867 
868 	esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
869 		  vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
870 
871 	root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
872 						    vport->vport);
873 	if (!root_ns) {
874 		esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
875 		return -EOPNOTSUPP;
876 	}
877 
878 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
879 	if (!flow_group_in)
880 		return -ENOMEM;
881 
882 	acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
883 	if (IS_ERR(acl)) {
884 		err = PTR_ERR(acl);
885 		esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
886 			 vport->vport, err);
887 		goto out;
888 	}
889 
890 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
891 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
892 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
893 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
894 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
895 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
896 
897 	vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
898 	if (IS_ERR(vlan_grp)) {
899 		err = PTR_ERR(vlan_grp);
900 		esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
901 			 vport->vport, err);
902 		goto out;
903 	}
904 
905 	memset(flow_group_in, 0, inlen);
906 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
907 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
908 	drop_grp = mlx5_create_flow_group(acl, flow_group_in);
909 	if (IS_ERR(drop_grp)) {
910 		err = PTR_ERR(drop_grp);
911 		esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
912 			 vport->vport, err);
913 		goto out;
914 	}
915 
916 	vport->egress.acl = acl;
917 	vport->egress.drop_grp = drop_grp;
918 	vport->egress.allowed_vlans_grp = vlan_grp;
919 out:
920 	kvfree(flow_group_in);
921 	if (err && !IS_ERR_OR_NULL(vlan_grp))
922 		mlx5_destroy_flow_group(vlan_grp);
923 	if (err && !IS_ERR_OR_NULL(acl))
924 		mlx5_destroy_flow_table(acl);
925 	return err;
926 }
927 
928 static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
929 					   struct mlx5_vport *vport)
930 {
931 	if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
932 		mlx5_del_flow_rules(vport->egress.allowed_vlan);
933 
934 	if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
935 		mlx5_del_flow_rules(vport->egress.drop_rule);
936 
937 	vport->egress.allowed_vlan = NULL;
938 	vport->egress.drop_rule = NULL;
939 }
940 
941 static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
942 					 struct mlx5_vport *vport)
943 {
944 	if (IS_ERR_OR_NULL(vport->egress.acl))
945 		return;
946 
947 	esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
948 
949 	esw_vport_cleanup_egress_rules(esw, vport);
950 	mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
951 	mlx5_destroy_flow_group(vport->egress.drop_grp);
952 	mlx5_destroy_flow_table(vport->egress.acl);
953 	vport->egress.allowed_vlans_grp = NULL;
954 	vport->egress.drop_grp = NULL;
955 	vport->egress.acl = NULL;
956 }
957 
958 static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
959 					struct mlx5_vport *vport)
960 {
961 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
962 	struct mlx5_core_dev *dev = esw->dev;
963 	struct mlx5_flow_namespace *root_ns;
964 	struct mlx5_flow_table *acl;
965 	struct mlx5_flow_group *g;
966 	void *match_criteria;
967 	u32 *flow_group_in;
968 	/* The ingress acl table contains 4 groups
969 	 * (2 active rules at the same time -
970 	 *      1 allow rule from one of the first 3 groups.
971 	 *      1 drop rule from the last group):
972 	 * 1)Allow untagged traffic with smac=original mac.
973 	 * 2)Allow untagged traffic.
974 	 * 3)Allow traffic with smac=original mac.
975 	 * 4)Drop all other traffic.
976 	 */
977 	int table_size = 4;
978 	int err = 0;
979 
980 	if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
981 		return -EOPNOTSUPP;
982 
983 	if (!IS_ERR_OR_NULL(vport->ingress.acl))
984 		return 0;
985 
986 	esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
987 		  vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
988 
989 	root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
990 						    vport->vport);
991 	if (!root_ns) {
992 		esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
993 		return -EOPNOTSUPP;
994 	}
995 
996 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
997 	if (!flow_group_in)
998 		return -ENOMEM;
999 
1000 	acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
1001 	if (IS_ERR(acl)) {
1002 		err = PTR_ERR(acl);
1003 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
1004 			 vport->vport, err);
1005 		goto out;
1006 	}
1007 	vport->ingress.acl = acl;
1008 
1009 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1010 
1011 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1012 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1013 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1014 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1015 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1016 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
1017 
1018 	g = mlx5_create_flow_group(acl, flow_group_in);
1019 	if (IS_ERR(g)) {
1020 		err = PTR_ERR(g);
1021 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
1022 			 vport->vport, err);
1023 		goto out;
1024 	}
1025 	vport->ingress.allow_untagged_spoofchk_grp = g;
1026 
1027 	memset(flow_group_in, 0, inlen);
1028 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1029 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1030 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
1031 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
1032 
1033 	g = mlx5_create_flow_group(acl, flow_group_in);
1034 	if (IS_ERR(g)) {
1035 		err = PTR_ERR(g);
1036 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
1037 			 vport->vport, err);
1038 		goto out;
1039 	}
1040 	vport->ingress.allow_untagged_only_grp = g;
1041 
1042 	memset(flow_group_in, 0, inlen);
1043 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1044 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1045 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1046 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
1047 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
1048 
1049 	g = mlx5_create_flow_group(acl, flow_group_in);
1050 	if (IS_ERR(g)) {
1051 		err = PTR_ERR(g);
1052 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
1053 			 vport->vport, err);
1054 		goto out;
1055 	}
1056 	vport->ingress.allow_spoofchk_only_grp = g;
1057 
1058 	memset(flow_group_in, 0, inlen);
1059 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
1060 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
1061 
1062 	g = mlx5_create_flow_group(acl, flow_group_in);
1063 	if (IS_ERR(g)) {
1064 		err = PTR_ERR(g);
1065 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
1066 			 vport->vport, err);
1067 		goto out;
1068 	}
1069 	vport->ingress.drop_grp = g;
1070 
1071 out:
1072 	if (err) {
1073 		if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
1074 			mlx5_destroy_flow_group(
1075 					vport->ingress.allow_spoofchk_only_grp);
1076 		if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
1077 			mlx5_destroy_flow_group(
1078 					vport->ingress.allow_untagged_only_grp);
1079 		if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
1080 			mlx5_destroy_flow_group(
1081 				vport->ingress.allow_untagged_spoofchk_grp);
1082 		if (!IS_ERR_OR_NULL(vport->ingress.acl))
1083 			mlx5_destroy_flow_table(vport->ingress.acl);
1084 	}
1085 
1086 	kvfree(flow_group_in);
1087 	return err;
1088 }
1089 
1090 static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
1091 					    struct mlx5_vport *vport)
1092 {
1093 	if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
1094 		mlx5_del_flow_rules(vport->ingress.drop_rule);
1095 
1096 	if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
1097 		mlx5_del_flow_rules(vport->ingress.allow_rule);
1098 
1099 	vport->ingress.drop_rule = NULL;
1100 	vport->ingress.allow_rule = NULL;
1101 }
1102 
1103 static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
1104 					  struct mlx5_vport *vport)
1105 {
1106 	if (IS_ERR_OR_NULL(vport->ingress.acl))
1107 		return;
1108 
1109 	esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
1110 
1111 	esw_vport_cleanup_ingress_rules(esw, vport);
1112 	mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
1113 	mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
1114 	mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
1115 	mlx5_destroy_flow_group(vport->ingress.drop_grp);
1116 	mlx5_destroy_flow_table(vport->ingress.acl);
1117 	vport->ingress.acl = NULL;
1118 	vport->ingress.drop_grp = NULL;
1119 	vport->ingress.allow_spoofchk_only_grp = NULL;
1120 	vport->ingress.allow_untagged_only_grp = NULL;
1121 	vport->ingress.allow_untagged_spoofchk_grp = NULL;
1122 }
1123 
1124 static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1125 				    struct mlx5_vport *vport)
1126 {
1127 	struct mlx5_fc *counter = vport->ingress.drop_counter;
1128 	struct mlx5_flow_destination drop_ctr_dst = {0};
1129 	struct mlx5_flow_destination *dst = NULL;
1130 	struct mlx5_flow_act flow_act = {0};
1131 	struct mlx5_flow_spec *spec;
1132 	int dest_num = 0;
1133 	int err = 0;
1134 	u8 *smac_v;
1135 
1136 	if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
1137 		mlx5_core_warn(esw->dev,
1138 			       "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
1139 			       vport->vport);
1140 		return -EPERM;
1141 	}
1142 
1143 	esw_vport_cleanup_ingress_rules(esw, vport);
1144 
1145 	if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
1146 		esw_vport_disable_ingress_acl(esw, vport);
1147 		return 0;
1148 	}
1149 
1150 	err = esw_vport_enable_ingress_acl(esw, vport);
1151 	if (err) {
1152 		mlx5_core_warn(esw->dev,
1153 			       "failed to enable ingress acl (%d) on vport[%d]\n",
1154 			       err, vport->vport);
1155 		return err;
1156 	}
1157 
1158 	esw_debug(esw->dev,
1159 		  "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
1160 		  vport->vport, vport->info.vlan, vport->info.qos);
1161 
1162 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1163 	if (!spec) {
1164 		err = -ENOMEM;
1165 		goto out;
1166 	}
1167 
1168 	if (vport->info.vlan || vport->info.qos)
1169 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1170 
1171 	if (vport->info.spoofchk) {
1172 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
1173 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
1174 		smac_v = MLX5_ADDR_OF(fte_match_param,
1175 				      spec->match_value,
1176 				      outer_headers.smac_47_16);
1177 		ether_addr_copy(smac_v, vport->info.mac);
1178 	}
1179 
1180 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1181 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1182 	vport->ingress.allow_rule =
1183 		mlx5_add_flow_rules(vport->ingress.acl, spec,
1184 				    &flow_act, NULL, 0);
1185 	if (IS_ERR(vport->ingress.allow_rule)) {
1186 		err = PTR_ERR(vport->ingress.allow_rule);
1187 		esw_warn(esw->dev,
1188 			 "vport[%d] configure ingress allow rule, err(%d)\n",
1189 			 vport->vport, err);
1190 		vport->ingress.allow_rule = NULL;
1191 		goto out;
1192 	}
1193 
1194 	memset(spec, 0, sizeof(*spec));
1195 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
1196 
1197 	/* Attach drop flow counter */
1198 	if (counter) {
1199 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1200 		drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1201 		drop_ctr_dst.counter = counter;
1202 		dst = &drop_ctr_dst;
1203 		dest_num++;
1204 	}
1205 	vport->ingress.drop_rule =
1206 		mlx5_add_flow_rules(vport->ingress.acl, spec,
1207 				    &flow_act, dst, dest_num);
1208 	if (IS_ERR(vport->ingress.drop_rule)) {
1209 		err = PTR_ERR(vport->ingress.drop_rule);
1210 		esw_warn(esw->dev,
1211 			 "vport[%d] configure ingress drop rule, err(%d)\n",
1212 			 vport->vport, err);
1213 		vport->ingress.drop_rule = NULL;
1214 		goto out;
1215 	}
1216 
1217 out:
1218 	if (err)
1219 		esw_vport_cleanup_ingress_rules(esw, vport);
1220 	kvfree(spec);
1221 	return err;
1222 }
1223 
1224 static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1225 				   struct mlx5_vport *vport)
1226 {
1227 	struct mlx5_fc *counter = vport->egress.drop_counter;
1228 	struct mlx5_flow_destination drop_ctr_dst = {0};
1229 	struct mlx5_flow_destination *dst = NULL;
1230 	struct mlx5_flow_act flow_act = {0};
1231 	struct mlx5_flow_spec *spec;
1232 	int dest_num = 0;
1233 	int err = 0;
1234 
1235 	esw_vport_cleanup_egress_rules(esw, vport);
1236 
1237 	if (!vport->info.vlan && !vport->info.qos) {
1238 		esw_vport_disable_egress_acl(esw, vport);
1239 		return 0;
1240 	}
1241 
1242 	err = esw_vport_enable_egress_acl(esw, vport);
1243 	if (err) {
1244 		mlx5_core_warn(esw->dev,
1245 			       "failed to enable egress acl (%d) on vport[%d]\n",
1246 			       err, vport->vport);
1247 		return err;
1248 	}
1249 
1250 	esw_debug(esw->dev,
1251 		  "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
1252 		  vport->vport, vport->info.vlan, vport->info.qos);
1253 
1254 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1255 	if (!spec) {
1256 		err = -ENOMEM;
1257 		goto out;
1258 	}
1259 
1260 	/* Allowed vlan rule */
1261 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1262 	MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
1263 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1264 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
1265 
1266 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1267 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1268 	vport->egress.allowed_vlan =
1269 		mlx5_add_flow_rules(vport->egress.acl, spec,
1270 				    &flow_act, NULL, 0);
1271 	if (IS_ERR(vport->egress.allowed_vlan)) {
1272 		err = PTR_ERR(vport->egress.allowed_vlan);
1273 		esw_warn(esw->dev,
1274 			 "vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
1275 			 vport->vport, err);
1276 		vport->egress.allowed_vlan = NULL;
1277 		goto out;
1278 	}
1279 
1280 	/* Drop others rule (star rule) */
1281 	memset(spec, 0, sizeof(*spec));
1282 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
1283 
1284 	/* Attach egress drop flow counter */
1285 	if (counter) {
1286 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1287 		drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1288 		drop_ctr_dst.counter = counter;
1289 		dst = &drop_ctr_dst;
1290 		dest_num++;
1291 	}
1292 	vport->egress.drop_rule =
1293 		mlx5_add_flow_rules(vport->egress.acl, spec,
1294 				    &flow_act, dst, dest_num);
1295 	if (IS_ERR(vport->egress.drop_rule)) {
1296 		err = PTR_ERR(vport->egress.drop_rule);
1297 		esw_warn(esw->dev,
1298 			 "vport[%d] configure egress drop rule failed, err(%d)\n",
1299 			 vport->vport, err);
1300 		vport->egress.drop_rule = NULL;
1301 	}
1302 out:
1303 	kvfree(spec);
1304 	return err;
1305 }
1306 
1307 /* Vport QoS management */
1308 static int esw_create_tsar(struct mlx5_eswitch *esw)
1309 {
1310 	u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1311 	struct mlx5_core_dev *dev = esw->dev;
1312 	int err;
1313 
1314 	if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
1315 		return 0;
1316 
1317 	if (esw->qos.enabled)
1318 		return -EEXIST;
1319 
1320 	err = mlx5_create_scheduling_element_cmd(dev,
1321 						 SCHEDULING_HIERARCHY_E_SWITCH,
1322 						 tsar_ctx,
1323 						 &esw->qos.root_tsar_id);
1324 	if (err) {
1325 		esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
1326 		return err;
1327 	}
1328 
1329 	esw->qos.enabled = true;
1330 	return 0;
1331 }
1332 
1333 static void esw_destroy_tsar(struct mlx5_eswitch *esw)
1334 {
1335 	int err;
1336 
1337 	if (!esw->qos.enabled)
1338 		return;
1339 
1340 	err = mlx5_destroy_scheduling_element_cmd(esw->dev,
1341 						  SCHEDULING_HIERARCHY_E_SWITCH,
1342 						  esw->qos.root_tsar_id);
1343 	if (err)
1344 		esw_warn(esw->dev, "E-Switch destroy TSAR failed (%d)\n", err);
1345 
1346 	esw->qos.enabled = false;
1347 }
1348 
1349 static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
1350 				u32 initial_max_rate, u32 initial_bw_share)
1351 {
1352 	u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1353 	struct mlx5_vport *vport = &esw->vports[vport_num];
1354 	struct mlx5_core_dev *dev = esw->dev;
1355 	void *vport_elem;
1356 	int err = 0;
1357 
1358 	if (!esw->qos.enabled || !MLX5_CAP_GEN(dev, qos) ||
1359 	    !MLX5_CAP_QOS(dev, esw_scheduling))
1360 		return 0;
1361 
1362 	if (vport->qos.enabled)
1363 		return -EEXIST;
1364 
1365 	MLX5_SET(scheduling_context, sched_ctx, element_type,
1366 		 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
1367 	vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
1368 				  element_attributes);
1369 	MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
1370 	MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
1371 		 esw->qos.root_tsar_id);
1372 	MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
1373 		 initial_max_rate);
1374 	MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share);
1375 
1376 	err = mlx5_create_scheduling_element_cmd(dev,
1377 						 SCHEDULING_HIERARCHY_E_SWITCH,
1378 						 sched_ctx,
1379 						 &vport->qos.esw_tsar_ix);
1380 	if (err) {
1381 		esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
1382 			 vport_num, err);
1383 		return err;
1384 	}
1385 
1386 	vport->qos.enabled = true;
1387 	return 0;
1388 }
1389 
1390 static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num)
1391 {
1392 	struct mlx5_vport *vport = &esw->vports[vport_num];
1393 	int err = 0;
1394 
1395 	if (!vport->qos.enabled)
1396 		return;
1397 
1398 	err = mlx5_destroy_scheduling_element_cmd(esw->dev,
1399 						  SCHEDULING_HIERARCHY_E_SWITCH,
1400 						  vport->qos.esw_tsar_ix);
1401 	if (err)
1402 		esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
1403 			 vport_num, err);
1404 
1405 	vport->qos.enabled = false;
1406 }
1407 
1408 static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
1409 				u32 max_rate, u32 bw_share)
1410 {
1411 	u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1412 	struct mlx5_vport *vport = &esw->vports[vport_num];
1413 	struct mlx5_core_dev *dev = esw->dev;
1414 	void *vport_elem;
1415 	u32 bitmask = 0;
1416 	int err = 0;
1417 
1418 	if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
1419 		return -EOPNOTSUPP;
1420 
1421 	if (!vport->qos.enabled)
1422 		return -EIO;
1423 
1424 	MLX5_SET(scheduling_context, sched_ctx, element_type,
1425 		 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
1426 	vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
1427 				  element_attributes);
1428 	MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
1429 	MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
1430 		 esw->qos.root_tsar_id);
1431 	MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
1432 		 max_rate);
1433 	MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
1434 	bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
1435 	bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
1436 
1437 	err = mlx5_modify_scheduling_element_cmd(dev,
1438 						 SCHEDULING_HIERARCHY_E_SWITCH,
1439 						 sched_ctx,
1440 						 vport->qos.esw_tsar_ix,
1441 						 bitmask);
1442 	if (err) {
1443 		esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
1444 			 vport_num, err);
1445 		return err;
1446 	}
1447 
1448 	return 0;
1449 }
1450 
1451 static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
1452 {
1453 	((u8 *)node_guid)[7] = mac[0];
1454 	((u8 *)node_guid)[6] = mac[1];
1455 	((u8 *)node_guid)[5] = mac[2];
1456 	((u8 *)node_guid)[4] = 0xff;
1457 	((u8 *)node_guid)[3] = 0xfe;
1458 	((u8 *)node_guid)[2] = mac[3];
1459 	((u8 *)node_guid)[1] = mac[4];
1460 	((u8 *)node_guid)[0] = mac[5];
1461 }
1462 
1463 static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
1464 				 struct mlx5_vport *vport)
1465 {
1466 	int vport_num = vport->vport;
1467 
1468 	if (!vport_num)
1469 		return;
1470 
1471 	mlx5_modify_vport_admin_state(esw->dev,
1472 				      MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1473 				      vport_num,
1474 				      vport->info.link_state);
1475 	mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, vport->info.mac);
1476 	mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, vport->info.node_guid);
1477 	modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
1478 			       (vport->info.vlan || vport->info.qos));
1479 
1480 	/* Only legacy mode needs ACLs */
1481 	if (esw->mode == SRIOV_LEGACY) {
1482 		esw_vport_ingress_config(esw, vport);
1483 		esw_vport_egress_config(esw, vport);
1484 	}
1485 }
1486 
1487 static void esw_vport_create_drop_counters(struct mlx5_vport *vport)
1488 {
1489 	struct mlx5_core_dev *dev = vport->dev;
1490 
1491 	if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) {
1492 		vport->ingress.drop_counter = mlx5_fc_create(dev, false);
1493 		if (IS_ERR(vport->ingress.drop_counter)) {
1494 			esw_warn(dev,
1495 				 "vport[%d] configure ingress drop rule counter failed\n",
1496 				 vport->vport);
1497 			vport->ingress.drop_counter = NULL;
1498 		}
1499 	}
1500 
1501 	if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) {
1502 		vport->egress.drop_counter = mlx5_fc_create(dev, false);
1503 		if (IS_ERR(vport->egress.drop_counter)) {
1504 			esw_warn(dev,
1505 				 "vport[%d] configure egress drop rule counter failed\n",
1506 				 vport->vport);
1507 			vport->egress.drop_counter = NULL;
1508 		}
1509 	}
1510 }
1511 
1512 static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
1513 {
1514 	struct mlx5_core_dev *dev = vport->dev;
1515 
1516 	if (vport->ingress.drop_counter)
1517 		mlx5_fc_destroy(dev, vport->ingress.drop_counter);
1518 	if (vport->egress.drop_counter)
1519 		mlx5_fc_destroy(dev, vport->egress.drop_counter);
1520 }
1521 
1522 static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
1523 			     int enable_events)
1524 {
1525 	struct mlx5_vport *vport = &esw->vports[vport_num];
1526 
1527 	mutex_lock(&esw->state_lock);
1528 	WARN_ON(vport->enabled);
1529 
1530 	esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
1531 
1532 	/* Create steering drop counters for ingress and egress ACLs */
1533 	if (vport_num && esw->mode == SRIOV_LEGACY)
1534 		esw_vport_create_drop_counters(vport);
1535 
1536 	/* Restore old vport configuration */
1537 	esw_apply_vport_conf(esw, vport);
1538 
1539 	/* Attach vport to the eswitch rate limiter */
1540 	if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate,
1541 				 vport->qos.bw_share))
1542 		esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
1543 
1544 	/* Sync with current vport context */
1545 	vport->enabled_events = enable_events;
1546 	vport->enabled = true;
1547 
1548 	/* only PF is trusted by default */
1549 	if (!vport_num)
1550 		vport->info.trusted = true;
1551 
1552 	esw_vport_change_handle_locked(vport);
1553 
1554 	esw->enabled_vports++;
1555 	esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
1556 	mutex_unlock(&esw->state_lock);
1557 }
1558 
1559 static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
1560 {
1561 	struct mlx5_vport *vport = &esw->vports[vport_num];
1562 
1563 	if (!vport->enabled)
1564 		return;
1565 
1566 	esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
1567 	/* Mark this vport as disabled to discard new events */
1568 	vport->enabled = false;
1569 
1570 	synchronize_irq(pci_irq_vector(esw->dev->pdev, MLX5_EQ_VEC_ASYNC));
1571 	/* Wait for current already scheduled events to complete */
1572 	flush_workqueue(esw->work_queue);
1573 	/* Disable events from this vport */
1574 	arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
1575 	mutex_lock(&esw->state_lock);
1576 	/* We don't assume VFs will cleanup after themselves.
1577 	 * Calling vport change handler while vport is disabled will cleanup
1578 	 * the vport resources.
1579 	 */
1580 	esw_vport_change_handle_locked(vport);
1581 	vport->enabled_events = 0;
1582 	esw_vport_disable_qos(esw, vport_num);
1583 	if (vport_num && esw->mode == SRIOV_LEGACY) {
1584 		mlx5_modify_vport_admin_state(esw->dev,
1585 					      MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1586 					      vport_num,
1587 					      MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
1588 		esw_vport_disable_egress_acl(esw, vport);
1589 		esw_vport_disable_ingress_acl(esw, vport);
1590 		esw_vport_destroy_drop_counters(vport);
1591 	}
1592 	esw->enabled_vports--;
1593 	mutex_unlock(&esw->state_lock);
1594 }
1595 
1596 /* Public E-Switch API */
1597 #define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev))
1598 
1599 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
1600 {
1601 	int err;
1602 	int i, enabled_events;
1603 
1604 	if (!ESW_ALLOWED(esw))
1605 		return 0;
1606 
1607 	if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
1608 	    !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1609 		esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1610 		return -EOPNOTSUPP;
1611 	}
1612 
1613 	if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
1614 		esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
1615 
1616 	if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
1617 		esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
1618 
1619 	esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
1620 	esw->mode = mode;
1621 
1622 	if (mode == SRIOV_LEGACY) {
1623 		err = esw_create_legacy_fdb_table(esw, nvfs + 1);
1624 	} else {
1625 		mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1626 
1627 		err = esw_offloads_init(esw, nvfs + 1);
1628 	}
1629 
1630 	if (err)
1631 		goto abort;
1632 
1633 	err = esw_create_tsar(esw);
1634 	if (err)
1635 		esw_warn(esw->dev, "Failed to create eswitch TSAR");
1636 
1637 	/* Don't enable vport events when in SRIOV_OFFLOADS mode, since:
1638 	 * 1. L2 table (MPFS) is programmed by PF/VF representors netdevs set_rx_mode
1639 	 * 2. FDB/Eswitch is programmed by user space tools
1640 	 */
1641 	enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : 0;
1642 	for (i = 0; i <= nvfs; i++)
1643 		esw_enable_vport(esw, i, enabled_events);
1644 
1645 	esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
1646 		 esw->enabled_vports);
1647 	return 0;
1648 
1649 abort:
1650 	esw->mode = SRIOV_NONE;
1651 
1652 	if (mode == SRIOV_OFFLOADS)
1653 		mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1654 
1655 	return err;
1656 }
1657 
1658 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
1659 {
1660 	struct esw_mc_addr *mc_promisc;
1661 	int old_mode;
1662 	int nvports;
1663 	int i;
1664 
1665 	if (!ESW_ALLOWED(esw) || esw->mode == SRIOV_NONE)
1666 		return;
1667 
1668 	esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
1669 		 esw->enabled_vports, esw->mode);
1670 
1671 	mc_promisc = &esw->mc_promisc;
1672 	nvports = esw->enabled_vports;
1673 
1674 	for (i = 0; i < esw->total_vports; i++)
1675 		esw_disable_vport(esw, i);
1676 
1677 	if (mc_promisc && mc_promisc->uplink_rule)
1678 		mlx5_del_flow_rules(mc_promisc->uplink_rule);
1679 
1680 	esw_destroy_tsar(esw);
1681 
1682 	if (esw->mode == SRIOV_LEGACY)
1683 		esw_destroy_legacy_fdb_table(esw);
1684 	else if (esw->mode == SRIOV_OFFLOADS)
1685 		esw_offloads_cleanup(esw, nvports);
1686 
1687 	old_mode = esw->mode;
1688 	esw->mode = SRIOV_NONE;
1689 
1690 	if (old_mode == SRIOV_OFFLOADS)
1691 		mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1692 }
1693 
1694 int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1695 {
1696 	int total_vports = MLX5_TOTAL_VPORTS(dev);
1697 	struct mlx5_eswitch *esw;
1698 	int vport_num;
1699 	int err;
1700 
1701 	if (!MLX5_VPORT_MANAGER(dev))
1702 		return 0;
1703 
1704 	esw_info(dev,
1705 		 "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
1706 		 total_vports,
1707 		 MLX5_MAX_UC_PER_VPORT(dev),
1708 		 MLX5_MAX_MC_PER_VPORT(dev));
1709 
1710 	esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1711 	if (!esw)
1712 		return -ENOMEM;
1713 
1714 	esw->dev = dev;
1715 
1716 	esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1717 	if (!esw->work_queue) {
1718 		err = -ENOMEM;
1719 		goto abort;
1720 	}
1721 
1722 	esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
1723 			      GFP_KERNEL);
1724 	if (!esw->vports) {
1725 		err = -ENOMEM;
1726 		goto abort;
1727 	}
1728 
1729 	err = esw_offloads_init_reps(esw);
1730 	if (err)
1731 		goto abort;
1732 
1733 	hash_init(esw->offloads.encap_tbl);
1734 	hash_init(esw->offloads.mod_hdr_tbl);
1735 	mutex_init(&esw->state_lock);
1736 
1737 	for (vport_num = 0; vport_num < total_vports; vport_num++) {
1738 		struct mlx5_vport *vport = &esw->vports[vport_num];
1739 
1740 		vport->vport = vport_num;
1741 		vport->info.link_state = MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
1742 		vport->dev = dev;
1743 		INIT_WORK(&vport->vport_change_handler,
1744 			  esw_vport_change_handler);
1745 	}
1746 
1747 	esw->total_vports = total_vports;
1748 	esw->enabled_vports = 0;
1749 	esw->mode = SRIOV_NONE;
1750 	esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
1751 	if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
1752 	    MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
1753 		esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
1754 	else
1755 		esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
1756 
1757 	dev->priv.eswitch = esw;
1758 	return 0;
1759 abort:
1760 	if (esw->work_queue)
1761 		destroy_workqueue(esw->work_queue);
1762 	esw_offloads_cleanup_reps(esw);
1763 	kfree(esw->vports);
1764 	kfree(esw);
1765 	return err;
1766 }
1767 
1768 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1769 {
1770 	if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
1771 		return;
1772 
1773 	esw_info(esw->dev, "cleanup\n");
1774 
1775 	esw->dev->priv.eswitch = NULL;
1776 	destroy_workqueue(esw->work_queue);
1777 	esw_offloads_cleanup_reps(esw);
1778 	kfree(esw->vports);
1779 	kfree(esw);
1780 }
1781 
1782 void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
1783 {
1784 	struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change;
1785 	u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
1786 	struct mlx5_vport *vport;
1787 
1788 	if (!esw) {
1789 		pr_warn("MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n",
1790 			vport_num);
1791 		return;
1792 	}
1793 
1794 	vport = &esw->vports[vport_num];
1795 	if (vport->enabled)
1796 		queue_work(esw->work_queue, &vport->vport_change_handler);
1797 }
1798 
1799 /* Vport Administration */
1800 #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
1801 
1802 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1803 			       int vport, u8 mac[ETH_ALEN])
1804 {
1805 	struct mlx5_vport *evport;
1806 	u64 node_guid;
1807 	int err = 0;
1808 
1809 	if (!ESW_ALLOWED(esw))
1810 		return -EPERM;
1811 	if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
1812 		return -EINVAL;
1813 
1814 	mutex_lock(&esw->state_lock);
1815 	evport = &esw->vports[vport];
1816 
1817 	if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
1818 		mlx5_core_warn(esw->dev,
1819 			       "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
1820 			       vport);
1821 		err = -EPERM;
1822 		goto unlock;
1823 	}
1824 
1825 	err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
1826 	if (err) {
1827 		mlx5_core_warn(esw->dev,
1828 			       "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1829 			       vport, err);
1830 		goto unlock;
1831 	}
1832 
1833 	node_guid_gen_from_mac(&node_guid, mac);
1834 	err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
1835 	if (err)
1836 		mlx5_core_warn(esw->dev,
1837 			       "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
1838 			       vport, err);
1839 
1840 	ether_addr_copy(evport->info.mac, mac);
1841 	evport->info.node_guid = node_guid;
1842 	if (evport->enabled && esw->mode == SRIOV_LEGACY)
1843 		err = esw_vport_ingress_config(esw, evport);
1844 
1845 unlock:
1846 	mutex_unlock(&esw->state_lock);
1847 	return err;
1848 }
1849 
1850 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1851 				 int vport, int link_state)
1852 {
1853 	struct mlx5_vport *evport;
1854 	int err = 0;
1855 
1856 	if (!ESW_ALLOWED(esw))
1857 		return -EPERM;
1858 	if (!LEGAL_VPORT(esw, vport))
1859 		return -EINVAL;
1860 
1861 	mutex_lock(&esw->state_lock);
1862 	evport = &esw->vports[vport];
1863 
1864 	err = mlx5_modify_vport_admin_state(esw->dev,
1865 					    MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1866 					    vport, link_state);
1867 	if (err) {
1868 		mlx5_core_warn(esw->dev,
1869 			       "Failed to set vport %d link state, err = %d",
1870 			       vport, err);
1871 		goto unlock;
1872 	}
1873 
1874 	evport->info.link_state = link_state;
1875 
1876 unlock:
1877 	mutex_unlock(&esw->state_lock);
1878 	return 0;
1879 }
1880 
1881 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1882 				  int vport, struct ifla_vf_info *ivi)
1883 {
1884 	struct mlx5_vport *evport;
1885 
1886 	if (!ESW_ALLOWED(esw))
1887 		return -EPERM;
1888 	if (!LEGAL_VPORT(esw, vport))
1889 		return -EINVAL;
1890 
1891 	evport = &esw->vports[vport];
1892 
1893 	memset(ivi, 0, sizeof(*ivi));
1894 	ivi->vf = vport - 1;
1895 
1896 	mutex_lock(&esw->state_lock);
1897 	ether_addr_copy(ivi->mac, evport->info.mac);
1898 	ivi->linkstate = evport->info.link_state;
1899 	ivi->vlan = evport->info.vlan;
1900 	ivi->qos = evport->info.qos;
1901 	ivi->spoofchk = evport->info.spoofchk;
1902 	ivi->trusted = evport->info.trusted;
1903 	ivi->min_tx_rate = evport->info.min_rate;
1904 	ivi->max_tx_rate = evport->info.max_rate;
1905 	mutex_unlock(&esw->state_lock);
1906 
1907 	return 0;
1908 }
1909 
1910 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1911 				  int vport, u16 vlan, u8 qos, u8 set_flags)
1912 {
1913 	struct mlx5_vport *evport;
1914 	int err = 0;
1915 
1916 	if (!ESW_ALLOWED(esw))
1917 		return -EPERM;
1918 	if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
1919 		return -EINVAL;
1920 
1921 	mutex_lock(&esw->state_lock);
1922 	evport = &esw->vports[vport];
1923 
1924 	err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
1925 	if (err)
1926 		goto unlock;
1927 
1928 	evport->info.vlan = vlan;
1929 	evport->info.qos = qos;
1930 	if (evport->enabled && esw->mode == SRIOV_LEGACY) {
1931 		err = esw_vport_ingress_config(esw, evport);
1932 		if (err)
1933 			goto unlock;
1934 		err = esw_vport_egress_config(esw, evport);
1935 	}
1936 
1937 unlock:
1938 	mutex_unlock(&esw->state_lock);
1939 	return err;
1940 }
1941 
1942 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1943 				int vport, u16 vlan, u8 qos)
1944 {
1945 	u8 set_flags = 0;
1946 
1947 	if (vlan || qos)
1948 		set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
1949 
1950 	return __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
1951 }
1952 
1953 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
1954 				    int vport, bool spoofchk)
1955 {
1956 	struct mlx5_vport *evport;
1957 	bool pschk;
1958 	int err = 0;
1959 
1960 	if (!ESW_ALLOWED(esw))
1961 		return -EPERM;
1962 	if (!LEGAL_VPORT(esw, vport))
1963 		return -EINVAL;
1964 
1965 	mutex_lock(&esw->state_lock);
1966 	evport = &esw->vports[vport];
1967 	pschk = evport->info.spoofchk;
1968 	evport->info.spoofchk = spoofchk;
1969 	if (evport->enabled && esw->mode == SRIOV_LEGACY)
1970 		err = esw_vport_ingress_config(esw, evport);
1971 	if (err)
1972 		evport->info.spoofchk = pschk;
1973 	mutex_unlock(&esw->state_lock);
1974 
1975 	return err;
1976 }
1977 
1978 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
1979 				 int vport, bool setting)
1980 {
1981 	struct mlx5_vport *evport;
1982 
1983 	if (!ESW_ALLOWED(esw))
1984 		return -EPERM;
1985 	if (!LEGAL_VPORT(esw, vport))
1986 		return -EINVAL;
1987 
1988 	mutex_lock(&esw->state_lock);
1989 	evport = &esw->vports[vport];
1990 	evport->info.trusted = setting;
1991 	if (evport->enabled)
1992 		esw_vport_change_handle_locked(evport);
1993 	mutex_unlock(&esw->state_lock);
1994 
1995 	return 0;
1996 }
1997 
1998 static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
1999 {
2000 	u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2001 	struct mlx5_vport *evport;
2002 	u32 max_guarantee = 0;
2003 	int i;
2004 
2005 	for (i = 0; i <= esw->total_vports; i++) {
2006 		evport = &esw->vports[i];
2007 		if (!evport->enabled || evport->info.min_rate < max_guarantee)
2008 			continue;
2009 		max_guarantee = evport->info.min_rate;
2010 	}
2011 
2012 	return max_t(u32, max_guarantee / fw_max_bw_share, 1);
2013 }
2014 
2015 static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
2016 {
2017 	u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2018 	struct mlx5_vport *evport;
2019 	u32 vport_max_rate;
2020 	u32 vport_min_rate;
2021 	u32 bw_share;
2022 	int err;
2023 	int i;
2024 
2025 	for (i = 0; i <= esw->total_vports; i++) {
2026 		evport = &esw->vports[i];
2027 		if (!evport->enabled)
2028 			continue;
2029 		vport_min_rate = evport->info.min_rate;
2030 		vport_max_rate = evport->info.max_rate;
2031 		bw_share = MLX5_MIN_BW_SHARE;
2032 
2033 		if (vport_min_rate)
2034 			bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
2035 							 divider,
2036 							 fw_max_bw_share);
2037 
2038 		if (bw_share == evport->qos.bw_share)
2039 			continue;
2040 
2041 		err = esw_vport_qos_config(esw, i, vport_max_rate,
2042 					   bw_share);
2043 		if (!err)
2044 			evport->qos.bw_share = bw_share;
2045 		else
2046 			return err;
2047 	}
2048 
2049 	return 0;
2050 }
2051 
2052 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
2053 				u32 max_rate, u32 min_rate)
2054 {
2055 	u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2056 	bool min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
2057 					fw_max_bw_share >= MLX5_MIN_BW_SHARE;
2058 	bool max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
2059 	struct mlx5_vport *evport;
2060 	u32 previous_min_rate;
2061 	u32 divider;
2062 	int err = 0;
2063 
2064 	if (!ESW_ALLOWED(esw))
2065 		return -EPERM;
2066 	if (!LEGAL_VPORT(esw, vport))
2067 		return -EINVAL;
2068 	if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
2069 		return -EOPNOTSUPP;
2070 
2071 	mutex_lock(&esw->state_lock);
2072 	evport = &esw->vports[vport];
2073 
2074 	if (min_rate == evport->info.min_rate)
2075 		goto set_max_rate;
2076 
2077 	previous_min_rate = evport->info.min_rate;
2078 	evport->info.min_rate = min_rate;
2079 	divider = calculate_vports_min_rate_divider(esw);
2080 	err = normalize_vports_min_rate(esw, divider);
2081 	if (err) {
2082 		evport->info.min_rate = previous_min_rate;
2083 		goto unlock;
2084 	}
2085 
2086 set_max_rate:
2087 	if (max_rate == evport->info.max_rate)
2088 		goto unlock;
2089 
2090 	err = esw_vport_qos_config(esw, vport, max_rate, evport->qos.bw_share);
2091 	if (!err)
2092 		evport->info.max_rate = max_rate;
2093 
2094 unlock:
2095 	mutex_unlock(&esw->state_lock);
2096 	return err;
2097 }
2098 
2099 static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
2100 					       int vport_idx,
2101 					       struct mlx5_vport_drop_stats *stats)
2102 {
2103 	struct mlx5_eswitch *esw = dev->priv.eswitch;
2104 	struct mlx5_vport *vport = &esw->vports[vport_idx];
2105 	u64 rx_discard_vport_down, tx_discard_vport_down;
2106 	u64 bytes = 0;
2107 	u16 idx = 0;
2108 	int err = 0;
2109 
2110 	if (!vport->enabled || esw->mode != SRIOV_LEGACY)
2111 		return 0;
2112 
2113 	if (vport->egress.drop_counter) {
2114 		idx = vport->egress.drop_counter->id;
2115 		mlx5_fc_query(dev, idx, &stats->rx_dropped, &bytes);
2116 	}
2117 
2118 	if (vport->ingress.drop_counter) {
2119 		idx = vport->ingress.drop_counter->id;
2120 		mlx5_fc_query(dev, idx, &stats->tx_dropped, &bytes);
2121 	}
2122 
2123 	if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
2124 	    !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
2125 		return 0;
2126 
2127 	err = mlx5_query_vport_down_stats(dev, vport_idx,
2128 					  &rx_discard_vport_down,
2129 					  &tx_discard_vport_down);
2130 	if (err)
2131 		return err;
2132 
2133 	if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
2134 		stats->rx_dropped += rx_discard_vport_down;
2135 	if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
2136 		stats->tx_dropped += tx_discard_vport_down;
2137 
2138 	return 0;
2139 }
2140 
2141 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
2142 				 int vport,
2143 				 struct ifla_vf_stats *vf_stats)
2144 {
2145 	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
2146 	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
2147 	struct mlx5_vport_drop_stats stats = {0};
2148 	int err = 0;
2149 	u32 *out;
2150 
2151 	if (!ESW_ALLOWED(esw))
2152 		return -EPERM;
2153 	if (!LEGAL_VPORT(esw, vport))
2154 		return -EINVAL;
2155 
2156 	out = kvzalloc(outlen, GFP_KERNEL);
2157 	if (!out)
2158 		return -ENOMEM;
2159 
2160 	MLX5_SET(query_vport_counter_in, in, opcode,
2161 		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
2162 	MLX5_SET(query_vport_counter_in, in, op_mod, 0);
2163 	MLX5_SET(query_vport_counter_in, in, vport_number, vport);
2164 	if (vport)
2165 		MLX5_SET(query_vport_counter_in, in, other_vport, 1);
2166 
2167 	memset(out, 0, outlen);
2168 	err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
2169 	if (err)
2170 		goto free_out;
2171 
2172 	#define MLX5_GET_CTR(p, x) \
2173 		MLX5_GET64(query_vport_counter_out, p, x)
2174 
2175 	memset(vf_stats, 0, sizeof(*vf_stats));
2176 	vf_stats->rx_packets =
2177 		MLX5_GET_CTR(out, received_eth_unicast.packets) +
2178 		MLX5_GET_CTR(out, received_eth_multicast.packets) +
2179 		MLX5_GET_CTR(out, received_eth_broadcast.packets);
2180 
2181 	vf_stats->rx_bytes =
2182 		MLX5_GET_CTR(out, received_eth_unicast.octets) +
2183 		MLX5_GET_CTR(out, received_eth_multicast.octets) +
2184 		MLX5_GET_CTR(out, received_eth_broadcast.octets);
2185 
2186 	vf_stats->tx_packets =
2187 		MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
2188 		MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
2189 		MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
2190 
2191 	vf_stats->tx_bytes =
2192 		MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
2193 		MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
2194 		MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
2195 
2196 	vf_stats->multicast =
2197 		MLX5_GET_CTR(out, received_eth_multicast.packets);
2198 
2199 	vf_stats->broadcast =
2200 		MLX5_GET_CTR(out, received_eth_broadcast.packets);
2201 
2202 	err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
2203 	if (err)
2204 		goto free_out;
2205 	vf_stats->rx_dropped = stats.rx_dropped;
2206 	vf_stats->tx_dropped = stats.tx_dropped;
2207 
2208 free_out:
2209 	kvfree(out);
2210 	return err;
2211 }
2212 
2213 u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
2214 {
2215 	return esw->mode;
2216 }
2217 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
2218