1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <linux/rtnetlink.h>
49 #include <net/switchdev.h>
50 
51 #include "spectrum.h"
52 #include "core.h"
53 #include "reg.h"
54 
55 static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port,
56 					u16 vid)
57 {
58 	u16 fid = vid;
59 
60 	if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
61 		u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
62 
63 		fid = mlxsw_sp_vfid_to_fid(vfid);
64 	}
65 
66 	if (!fid)
67 		fid = mlxsw_sp_port->pvid;
68 
69 	return fid;
70 }
71 
72 static struct mlxsw_sp_port *
73 mlxsw_sp_port_orig_get(struct net_device *dev,
74 		       struct mlxsw_sp_port *mlxsw_sp_port)
75 {
76 	struct mlxsw_sp_port *mlxsw_sp_vport;
77 	u16 vid;
78 
79 	if (!is_vlan_dev(dev))
80 		return mlxsw_sp_port;
81 
82 	vid = vlan_dev_vlan_id(dev);
83 	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
84 	WARN_ON(!mlxsw_sp_vport);
85 
86 	return mlxsw_sp_vport;
87 }
88 
89 static int mlxsw_sp_port_attr_get(struct net_device *dev,
90 				  struct switchdev_attr *attr)
91 {
92 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
93 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
94 
95 	mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
96 	if (!mlxsw_sp_port)
97 		return -EINVAL;
98 
99 	switch (attr->id) {
100 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
101 		attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
102 		memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
103 		       attr->u.ppid.id_len);
104 		break;
105 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
106 		attr->u.brport_flags =
107 			(mlxsw_sp_port->learning ? BR_LEARNING : 0) |
108 			(mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
109 			(mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
110 		break;
111 	default:
112 		return -EOPNOTSUPP;
113 	}
114 
115 	return 0;
116 }
117 
118 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
119 				       u8 state)
120 {
121 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
122 	enum mlxsw_reg_spms_state spms_state;
123 	char *spms_pl;
124 	u16 vid;
125 	int err;
126 
127 	switch (state) {
128 	case BR_STATE_FORWARDING:
129 		spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
130 		break;
131 	case BR_STATE_LEARNING:
132 		spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
133 		break;
134 	case BR_STATE_LISTENING: /* fall-through */
135 	case BR_STATE_DISABLED: /* fall-through */
136 	case BR_STATE_BLOCKING:
137 		spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
138 		break;
139 	default:
140 		BUG();
141 	}
142 
143 	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
144 	if (!spms_pl)
145 		return -ENOMEM;
146 	mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
147 
148 	if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
149 		vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
150 		mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
151 	} else {
152 		for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
153 			mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
154 	}
155 
156 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
157 	kfree(spms_pl);
158 	return err;
159 }
160 
161 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
162 					    struct switchdev_trans *trans,
163 					    u8 state)
164 {
165 	if (switchdev_trans_ph_prepare(trans))
166 		return 0;
167 
168 	mlxsw_sp_port->stp_state = state;
169 	return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
170 }
171 
172 static bool mlxsw_sp_vfid_is_vport_br(u16 vfid)
173 {
174 	return vfid >= MLXSW_SP_VFID_PORT_MAX;
175 }
176 
177 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
178 				     u16 idx_begin, u16 idx_end, bool set,
179 				     bool only_uc)
180 {
181 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
182 	u16 local_port = mlxsw_sp_port->local_port;
183 	enum mlxsw_flood_table_type table_type;
184 	u16 range = idx_end - idx_begin + 1;
185 	char *sftr_pl;
186 	int err;
187 
188 	if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
189 		table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
190 		if (mlxsw_sp_vfid_is_vport_br(idx_begin))
191 			local_port = mlxsw_sp_port->local_port;
192 		else
193 			local_port = MLXSW_PORT_CPU_PORT;
194 	} else {
195 		table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
196 	}
197 
198 	sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
199 	if (!sftr_pl)
200 		return -ENOMEM;
201 
202 	mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
203 			    table_type, range, local_port, set);
204 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
205 	if (err)
206 		goto buffer_out;
207 
208 	/* Flooding control allows one to decide whether a given port will
209 	 * flood unicast traffic for which there is no FDB entry.
210 	 */
211 	if (only_uc)
212 		goto buffer_out;
213 
214 	mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
215 			    table_type, range, local_port, set);
216 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
217 	if (err)
218 		goto err_flood_bm_set;
219 	else
220 		goto buffer_out;
221 
222 err_flood_bm_set:
223 	mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
224 			    table_type, range, local_port, !set);
225 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
226 buffer_out:
227 	kfree(sftr_pl);
228 	return err;
229 }
230 
231 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
232 				      bool set)
233 {
234 	struct net_device *dev = mlxsw_sp_port->dev;
235 	u16 vid, last_visited_vid;
236 	int err;
237 
238 	if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
239 		u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
240 
241 		return  __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid,
242 						  set, true);
243 	}
244 
245 	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
246 		err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
247 						true);
248 		if (err) {
249 			last_visited_vid = vid;
250 			goto err_port_flood_set;
251 		}
252 	}
253 
254 	return 0;
255 
256 err_port_flood_set:
257 	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
258 		__mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
259 	netdev_err(dev, "Failed to configure unicast flooding\n");
260 	return err;
261 }
262 
263 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
264 			     bool set, bool only_uc)
265 {
266 	/* In case of vFIDs, index into the flooding table is relative to
267 	 * the start of the vFIDs range.
268 	 */
269 	return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set,
270 					 only_uc);
271 }
272 
273 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
274 					   struct switchdev_trans *trans,
275 					   unsigned long brport_flags)
276 {
277 	unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
278 	bool set;
279 	int err;
280 
281 	if (!mlxsw_sp_port->bridged)
282 		return -EINVAL;
283 
284 	if (switchdev_trans_ph_prepare(trans))
285 		return 0;
286 
287 	if ((uc_flood ^ brport_flags) & BR_FLOOD) {
288 		set = mlxsw_sp_port->uc_flood ? false : true;
289 		err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
290 		if (err)
291 			return err;
292 	}
293 
294 	mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
295 	mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
296 	mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
297 
298 	return 0;
299 }
300 
301 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
302 {
303 	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
304 	int err;
305 
306 	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
307 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
308 	if (err)
309 		return err;
310 	mlxsw_sp->ageing_time = ageing_time;
311 	return 0;
312 }
313 
314 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
315 					    struct switchdev_trans *trans,
316 					    unsigned long ageing_clock_t)
317 {
318 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
319 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
320 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
321 
322 	if (switchdev_trans_ph_prepare(trans)) {
323 		if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
324 		    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
325 			return -ERANGE;
326 		else
327 			return 0;
328 	}
329 
330 	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
331 }
332 
333 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
334 					  struct switchdev_trans *trans,
335 					  struct net_device *orig_dev,
336 					  bool vlan_enabled)
337 {
338 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
339 
340 	/* SWITCHDEV_TRANS_PREPARE phase */
341 	if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) {
342 		netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n");
343 		return -EINVAL;
344 	}
345 
346 	return 0;
347 }
348 
349 static int mlxsw_sp_port_attr_set(struct net_device *dev,
350 				  const struct switchdev_attr *attr,
351 				  struct switchdev_trans *trans)
352 {
353 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
354 	int err = 0;
355 
356 	mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
357 	if (!mlxsw_sp_port)
358 		return -EINVAL;
359 
360 	switch (attr->id) {
361 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
362 		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
363 						       attr->u.stp_state);
364 		break;
365 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
366 		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
367 						      attr->u.brport_flags);
368 		break;
369 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
370 		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
371 						       attr->u.ageing_time);
372 		break;
373 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
374 		err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
375 						     attr->orig_dev,
376 						     attr->u.vlan_filtering);
377 		break;
378 	default:
379 		err = -EOPNOTSUPP;
380 		break;
381 	}
382 
383 	return err;
384 }
385 
386 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
387 				    u16 vid)
388 {
389 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
390 	char spvid_pl[MLXSW_REG_SPVID_LEN];
391 
392 	mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
393 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
394 }
395 
396 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
397 					    bool allow)
398 {
399 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
400 	char spaft_pl[MLXSW_REG_SPAFT_LEN];
401 
402 	mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
403 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
404 }
405 
406 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
407 {
408 	struct net_device *dev = mlxsw_sp_port->dev;
409 	int err;
410 
411 	if (!vid) {
412 		err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
413 		if (err) {
414 			netdev_err(dev, "Failed to disallow untagged traffic\n");
415 			return err;
416 		}
417 	} else {
418 		err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
419 		if (err) {
420 			netdev_err(dev, "Failed to set PVID\n");
421 			return err;
422 		}
423 
424 		/* Only allow if not already allowed. */
425 		if (!mlxsw_sp_port->pvid) {
426 			err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
427 							       true);
428 			if (err) {
429 				netdev_err(dev, "Failed to allow untagged traffic\n");
430 				goto err_port_allow_untagged_set;
431 			}
432 		}
433 	}
434 
435 	mlxsw_sp_port->pvid = vid;
436 	return 0;
437 
438 err_port_allow_untagged_set:
439 	__mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
440 	return err;
441 }
442 
443 static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
444 {
445 	char sfmr_pl[MLXSW_REG_SFMR_LEN];
446 	int err;
447 
448 	mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid);
449 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
450 
451 	if (err)
452 		return err;
453 
454 	set_bit(fid, mlxsw_sp->active_fids);
455 	return 0;
456 }
457 
458 static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid)
459 {
460 	char sfmr_pl[MLXSW_REG_SFMR_LEN];
461 
462 	clear_bit(fid, mlxsw_sp->active_fids);
463 
464 	mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
465 			    fid, fid);
466 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
467 }
468 
469 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
470 {
471 	enum mlxsw_reg_svfa_mt mt;
472 
473 	if (!list_empty(&mlxsw_sp_port->vports_list))
474 		mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
475 	else
476 		mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
477 
478 	return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid);
479 }
480 
481 static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
482 {
483 	enum mlxsw_reg_svfa_mt mt;
484 
485 	if (list_empty(&mlxsw_sp_port->vports_list))
486 		return 0;
487 
488 	mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
489 	return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid);
490 }
491 
492 static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
493 				  u16 vid_end)
494 {
495 	u16 vid;
496 	int err;
497 
498 	for (vid = vid_begin; vid <= vid_end; vid++) {
499 		err = mlxsw_sp_port_add_vid(dev, 0, vid);
500 		if (err)
501 			goto err_port_add_vid;
502 	}
503 	return 0;
504 
505 err_port_add_vid:
506 	for (vid--; vid >= vid_begin; vid--)
507 		mlxsw_sp_port_kill_vid(dev, 0, vid);
508 	return err;
509 }
510 
511 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
512 				     u16 vid_begin, u16 vid_end, bool is_member,
513 				     bool untagged)
514 {
515 	u16 vid, vid_e;
516 	int err;
517 
518 	for (vid = vid_begin; vid <= vid_end;
519 	     vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
520 		vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
521 			    vid_end);
522 
523 		err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
524 					     is_member, untagged);
525 		if (err)
526 			return err;
527 	}
528 
529 	return 0;
530 }
531 
532 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
533 				     u16 vid_begin, u16 vid_end,
534 				     bool flag_untagged, bool flag_pvid)
535 {
536 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
537 	struct net_device *dev = mlxsw_sp_port->dev;
538 	u16 vid, last_visited_vid, old_pvid;
539 	enum mlxsw_reg_svfa_mt mt;
540 	int err;
541 
542 	/* In case this is invoked with BRIDGE_FLAGS_SELF and port is
543 	 * not bridged, then packets ingressing through the port with
544 	 * the specified VIDs will be directed to CPU.
545 	 */
546 	if (!mlxsw_sp_port->bridged)
547 		return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end);
548 
549 	for (vid = vid_begin; vid <= vid_end; vid++) {
550 		if (!test_bit(vid, mlxsw_sp->active_fids)) {
551 			err = mlxsw_sp_fid_create(mlxsw_sp, vid);
552 			if (err) {
553 				netdev_err(dev, "Failed to create FID=%d\n",
554 					   vid);
555 				return err;
556 			}
557 
558 			/* When creating a FID, we set a VID to FID mapping
559 			 * regardless of the port's mode.
560 			 */
561 			mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
562 			err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt,
563 							   true, vid, vid);
564 			if (err) {
565 				netdev_err(dev, "Failed to create FID=VID=%d mapping\n",
566 					   vid);
567 				goto err_port_vid_to_fid_set;
568 			}
569 		}
570 	}
571 
572 	/* Set FID mapping according to port's mode */
573 	for (vid = vid_begin; vid <= vid_end; vid++) {
574 		err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid);
575 		if (err) {
576 			netdev_err(dev, "Failed to map FID=%d", vid);
577 			last_visited_vid = --vid;
578 			goto err_port_fid_map;
579 		}
580 	}
581 
582 	err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
583 					true, false);
584 	if (err) {
585 		netdev_err(dev, "Failed to configure flooding\n");
586 		goto err_port_flood_set;
587 	}
588 
589 	err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
590 					true, flag_untagged);
591 	if (err) {
592 		netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
593 			   vid_end);
594 		goto err_port_vlans_set;
595 	}
596 
597 	old_pvid = mlxsw_sp_port->pvid;
598 	if (flag_pvid && old_pvid != vid_begin) {
599 		err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin);
600 		if (err) {
601 			netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
602 			goto err_port_pvid_set;
603 		}
604 	} else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
605 		err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
606 		if (err) {
607 			netdev_err(dev, "Unable to del PVID\n");
608 			goto err_port_pvid_set;
609 		}
610 	}
611 
612 	/* Changing activity bits only if HW operation succeded */
613 	for (vid = vid_begin; vid <= vid_end; vid++) {
614 		set_bit(vid, mlxsw_sp_port->active_vlans);
615 		if (flag_untagged)
616 			set_bit(vid, mlxsw_sp_port->untagged_vlans);
617 		else
618 			clear_bit(vid, mlxsw_sp_port->untagged_vlans);
619 	}
620 
621 	/* STP state change must be done after we set active VLANs */
622 	err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
623 					  mlxsw_sp_port->stp_state);
624 	if (err) {
625 		netdev_err(dev, "Failed to set STP state\n");
626 		goto err_port_stp_state_set;
627 	}
628 
629 	return 0;
630 
631 err_port_vid_to_fid_set:
632 	mlxsw_sp_fid_destroy(mlxsw_sp, vid);
633 	return err;
634 
635 err_port_stp_state_set:
636 	for (vid = vid_begin; vid <= vid_end; vid++)
637 		clear_bit(vid, mlxsw_sp_port->active_vlans);
638 	if (old_pvid != mlxsw_sp_port->pvid)
639 		mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
640 err_port_pvid_set:
641 	__mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
642 				  false);
643 err_port_vlans_set:
644 	__mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, false,
645 				  false);
646 err_port_flood_set:
647 	last_visited_vid = vid_end;
648 err_port_fid_map:
649 	for (vid = last_visited_vid; vid >= vid_begin; vid--)
650 		mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
651 	return err;
652 }
653 
654 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
655 				   const struct switchdev_obj_port_vlan *vlan,
656 				   struct switchdev_trans *trans)
657 {
658 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
659 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
660 
661 	if (switchdev_trans_ph_prepare(trans))
662 		return 0;
663 
664 	return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
665 					 vlan->vid_begin, vlan->vid_end,
666 					 flag_untagged, flag_pvid);
667 }
668 
669 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
670 {
671 	return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
672 			 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
673 }
674 
675 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
676 {
677 	return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
678 			MLXSW_REG_SFD_OP_WRITE_REMOVE;
679 }
680 
681 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
682 				   const char *mac, u16 fid, bool adding,
683 				   bool dynamic)
684 {
685 	char *sfd_pl;
686 	int err;
687 
688 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
689 	if (!sfd_pl)
690 		return -ENOMEM;
691 
692 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
693 	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
694 			      mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
695 			      local_port);
696 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
697 	kfree(sfd_pl);
698 
699 	return err;
700 }
701 
702 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
703 				       const char *mac, u16 fid, u16 lag_vid,
704 				       bool adding, bool dynamic)
705 {
706 	char *sfd_pl;
707 	int err;
708 
709 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
710 	if (!sfd_pl)
711 		return -ENOMEM;
712 
713 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
714 	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
715 				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
716 				  lag_vid, lag_id);
717 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
718 	kfree(sfd_pl);
719 
720 	return err;
721 }
722 
723 static int
724 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
725 			     const struct switchdev_obj_port_fdb *fdb,
726 			     struct switchdev_trans *trans)
727 {
728 	u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
729 	u16 lag_vid = 0;
730 
731 	if (switchdev_trans_ph_prepare(trans))
732 		return 0;
733 
734 	if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
735 		lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
736 	}
737 
738 	if (!mlxsw_sp_port->lagged)
739 		return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
740 					       mlxsw_sp_port->local_port,
741 					       fdb->addr, fid, true, false);
742 	else
743 		return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
744 						   mlxsw_sp_port->lag_id,
745 						   fdb->addr, fid, lag_vid,
746 						   true, false);
747 }
748 
749 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
750 				u16 fid, u16 mid, bool adding)
751 {
752 	char *sfd_pl;
753 	int err;
754 
755 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
756 	if (!sfd_pl)
757 		return -ENOMEM;
758 
759 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
760 	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
761 			      MLXSW_REG_SFD_REC_ACTION_NOP, mid);
762 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
763 	kfree(sfd_pl);
764 	return err;
765 }
766 
767 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
768 				  bool add, bool clear_all_ports)
769 {
770 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
771 	char *smid_pl;
772 	int err, i;
773 
774 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
775 	if (!smid_pl)
776 		return -ENOMEM;
777 
778 	mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add);
779 	if (clear_all_ports) {
780 		for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
781 			if (mlxsw_sp->ports[i])
782 				mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
783 	}
784 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
785 	kfree(smid_pl);
786 	return err;
787 }
788 
789 static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
790 					      const unsigned char *addr,
791 					      u16 vid)
792 {
793 	struct mlxsw_sp_mid *mid;
794 
795 	list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) {
796 		if (ether_addr_equal(mid->addr, addr) && mid->vid == vid)
797 			return mid;
798 	}
799 	return NULL;
800 }
801 
802 static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
803 						const unsigned char *addr,
804 						u16 vid)
805 {
806 	struct mlxsw_sp_mid *mid;
807 	u16 mid_idx;
808 
809 	mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped,
810 				      MLXSW_SP_MID_MAX);
811 	if (mid_idx == MLXSW_SP_MID_MAX)
812 		return NULL;
813 
814 	mid = kzalloc(sizeof(*mid), GFP_KERNEL);
815 	if (!mid)
816 		return NULL;
817 
818 	set_bit(mid_idx, mlxsw_sp->br_mids.mapped);
819 	ether_addr_copy(mid->addr, addr);
820 	mid->vid = vid;
821 	mid->mid = mid_idx;
822 	mid->ref_count = 0;
823 	list_add_tail(&mid->list, &mlxsw_sp->br_mids.list);
824 
825 	return mid;
826 }
827 
828 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp,
829 				 struct mlxsw_sp_mid *mid)
830 {
831 	if (--mid->ref_count == 0) {
832 		list_del(&mid->list);
833 		clear_bit(mid->mid, mlxsw_sp->br_mids.mapped);
834 		kfree(mid);
835 		return 1;
836 	}
837 	return 0;
838 }
839 
840 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
841 				 const struct switchdev_obj_port_mdb *mdb,
842 				 struct switchdev_trans *trans)
843 {
844 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
845 	struct net_device *dev = mlxsw_sp_port->dev;
846 	struct mlxsw_sp_mid *mid;
847 	u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
848 	int err = 0;
849 
850 	if (switchdev_trans_ph_prepare(trans))
851 		return 0;
852 
853 	mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid);
854 	if (!mid) {
855 		mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid);
856 		if (!mid) {
857 			netdev_err(dev, "Unable to allocate MC group\n");
858 			return -ENOMEM;
859 		}
860 	}
861 	mid->ref_count++;
862 
863 	err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true,
864 				     mid->ref_count == 1);
865 	if (err) {
866 		netdev_err(dev, "Unable to set SMID\n");
867 		goto err_out;
868 	}
869 
870 	if (mid->ref_count == 1) {
871 		err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid,
872 					   true);
873 		if (err) {
874 			netdev_err(dev, "Unable to set MC SFD\n");
875 			goto err_out;
876 		}
877 	}
878 
879 	return 0;
880 
881 err_out:
882 	__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid);
883 	return err;
884 }
885 
886 static int mlxsw_sp_port_obj_add(struct net_device *dev,
887 				 const struct switchdev_obj *obj,
888 				 struct switchdev_trans *trans)
889 {
890 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
891 	int err = 0;
892 
893 	mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
894 	if (!mlxsw_sp_port)
895 		return -EINVAL;
896 
897 	switch (obj->id) {
898 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
899 		if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
900 			return 0;
901 
902 		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
903 					      SWITCHDEV_OBJ_PORT_VLAN(obj),
904 					      trans);
905 		break;
906 	case SWITCHDEV_OBJ_ID_PORT_FDB:
907 		err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
908 						   SWITCHDEV_OBJ_PORT_FDB(obj),
909 						   trans);
910 		break;
911 	case SWITCHDEV_OBJ_ID_PORT_MDB:
912 		err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
913 					    SWITCHDEV_OBJ_PORT_MDB(obj),
914 					    trans);
915 		break;
916 	default:
917 		err = -EOPNOTSUPP;
918 		break;
919 	}
920 
921 	return err;
922 }
923 
924 static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin,
925 				   u16 vid_end)
926 {
927 	u16 vid;
928 	int err;
929 
930 	for (vid = vid_begin; vid <= vid_end; vid++) {
931 		err = mlxsw_sp_port_kill_vid(dev, 0, vid);
932 		if (err)
933 			return err;
934 	}
935 
936 	return 0;
937 }
938 
939 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
940 				     u16 vid_begin, u16 vid_end, bool init)
941 {
942 	struct net_device *dev = mlxsw_sp_port->dev;
943 	u16 vid, pvid;
944 	int err;
945 
946 	/* In case this is invoked with BRIDGE_FLAGS_SELF and port is
947 	 * not bridged, then prevent packets ingressing through the
948 	 * port with the specified VIDs from being trapped to CPU.
949 	 */
950 	if (!init && !mlxsw_sp_port->bridged)
951 		return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end);
952 
953 	err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
954 					false, false);
955 	if (err) {
956 		netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin,
957 			   vid_end);
958 		return err;
959 	}
960 
961 	if (init)
962 		goto out;
963 
964 	pvid = mlxsw_sp_port->pvid;
965 	if (pvid >= vid_begin && pvid <= vid_end) {
966 		err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
967 		if (err) {
968 			netdev_err(dev, "Unable to del PVID %d\n", pvid);
969 			return err;
970 		}
971 	}
972 
973 	err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
974 					false, false);
975 	if (err) {
976 		netdev_err(dev, "Failed to clear flooding\n");
977 		return err;
978 	}
979 
980 	for (vid = vid_begin; vid <= vid_end; vid++) {
981 		/* Remove FID mapping in case of Virtual mode */
982 		err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
983 		if (err) {
984 			netdev_err(dev, "Failed to unmap FID=%d", vid);
985 			return err;
986 		}
987 	}
988 
989 out:
990 	/* Changing activity bits only if HW operation succeded */
991 	for (vid = vid_begin; vid <= vid_end; vid++)
992 		clear_bit(vid, mlxsw_sp_port->active_vlans);
993 
994 	return 0;
995 }
996 
997 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
998 				   const struct switchdev_obj_port_vlan *vlan)
999 {
1000 	return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1001 					 vlan->vid_begin, vlan->vid_end, false);
1002 }
1003 
1004 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
1005 {
1006 	u16 vid;
1007 
1008 	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
1009 		__mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false);
1010 }
1011 
1012 static int
1013 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
1014 			     const struct switchdev_obj_port_fdb *fdb)
1015 {
1016 	u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
1017 	u16 lag_vid = 0;
1018 
1019 	if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1020 		lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1021 	}
1022 
1023 	if (!mlxsw_sp_port->lagged)
1024 		return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
1025 					       mlxsw_sp_port->local_port,
1026 					       fdb->addr, fid,
1027 					       false, false);
1028 	else
1029 		return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
1030 						   mlxsw_sp_port->lag_id,
1031 						   fdb->addr, fid, lag_vid,
1032 						   false, false);
1033 }
1034 
1035 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1036 				 const struct switchdev_obj_port_mdb *mdb)
1037 {
1038 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1039 	struct net_device *dev = mlxsw_sp_port->dev;
1040 	struct mlxsw_sp_mid *mid;
1041 	u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
1042 	u16 mid_idx;
1043 	int err = 0;
1044 
1045 	mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid);
1046 	if (!mid) {
1047 		netdev_err(dev, "Unable to remove port from MC DB\n");
1048 		return -EINVAL;
1049 	}
1050 
1051 	err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false);
1052 	if (err)
1053 		netdev_err(dev, "Unable to remove port from SMID\n");
1054 
1055 	mid_idx = mid->mid;
1056 	if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) {
1057 		err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx,
1058 					   false);
1059 		if (err)
1060 			netdev_err(dev, "Unable to remove MC SFD\n");
1061 	}
1062 
1063 	return err;
1064 }
1065 
1066 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1067 				 const struct switchdev_obj *obj)
1068 {
1069 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1070 	int err = 0;
1071 
1072 	mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1073 	if (!mlxsw_sp_port)
1074 		return -EINVAL;
1075 
1076 	switch (obj->id) {
1077 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1078 		if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
1079 			return 0;
1080 
1081 		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1082 					      SWITCHDEV_OBJ_PORT_VLAN(obj));
1083 		break;
1084 	case SWITCHDEV_OBJ_ID_PORT_FDB:
1085 		err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
1086 						   SWITCHDEV_OBJ_PORT_FDB(obj));
1087 		break;
1088 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1089 		err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1090 					    SWITCHDEV_OBJ_PORT_MDB(obj));
1091 		break;
1092 	default:
1093 		err = -EOPNOTSUPP;
1094 		break;
1095 	}
1096 
1097 	return err;
1098 }
1099 
1100 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1101 						   u16 lag_id)
1102 {
1103 	struct mlxsw_sp_port *mlxsw_sp_port;
1104 	int i;
1105 
1106 	for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
1107 		mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1108 		if (mlxsw_sp_port)
1109 			return mlxsw_sp_port;
1110 	}
1111 	return NULL;
1112 }
1113 
1114 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1115 				  struct switchdev_obj_port_fdb *fdb,
1116 				  switchdev_obj_dump_cb_t *cb,
1117 				  struct net_device *orig_dev)
1118 {
1119 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1120 	struct mlxsw_sp_port *tmp;
1121 	u16 vport_fid = 0;
1122 	char *sfd_pl;
1123 	char mac[ETH_ALEN];
1124 	u16 fid;
1125 	u8 local_port;
1126 	u16 lag_id;
1127 	u8 num_rec;
1128 	int stored_err = 0;
1129 	int i;
1130 	int err;
1131 
1132 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1133 	if (!sfd_pl)
1134 		return -ENOMEM;
1135 
1136 	if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1137 		u16 tmp;
1138 
1139 		tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
1140 		vport_fid = mlxsw_sp_vfid_to_fid(tmp);
1141 	}
1142 
1143 	mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
1144 	do {
1145 		mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
1146 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1147 		if (err)
1148 			goto out;
1149 
1150 		num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1151 
1152 		/* Even in case of error, we have to run the dump to the end
1153 		 * so the session in firmware is finished.
1154 		 */
1155 		if (stored_err)
1156 			continue;
1157 
1158 		for (i = 0; i < num_rec; i++) {
1159 			switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
1160 			case MLXSW_REG_SFD_REC_TYPE_UNICAST:
1161 				mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
1162 							&local_port);
1163 				if (local_port == mlxsw_sp_port->local_port) {
1164 					if (vport_fid && vport_fid == fid)
1165 						fdb->vid = 0;
1166 					else if (!vport_fid &&
1167 						 !mlxsw_sp_fid_is_vfid(fid))
1168 						fdb->vid = fid;
1169 					else
1170 						continue;
1171 					ether_addr_copy(fdb->addr, mac);
1172 					fdb->ndm_state = NUD_REACHABLE;
1173 					err = cb(&fdb->obj);
1174 					if (err)
1175 						stored_err = err;
1176 				}
1177 				break;
1178 			case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
1179 				mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
1180 							    mac, &fid, &lag_id);
1181 				tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1182 				if (tmp && tmp->local_port ==
1183 				    mlxsw_sp_port->local_port) {
1184 					/* LAG records can only point to LAG
1185 					 * devices or VLAN devices on top.
1186 					 */
1187 					if (!netif_is_lag_master(orig_dev) &&
1188 					    !is_vlan_dev(orig_dev))
1189 						continue;
1190 					if (vport_fid && vport_fid == fid)
1191 						fdb->vid = 0;
1192 					else if (!vport_fid &&
1193 						 !mlxsw_sp_fid_is_vfid(fid))
1194 						fdb->vid = fid;
1195 					else
1196 						continue;
1197 					ether_addr_copy(fdb->addr, mac);
1198 					fdb->ndm_state = NUD_REACHABLE;
1199 					err = cb(&fdb->obj);
1200 					if (err)
1201 						stored_err = err;
1202 				}
1203 				break;
1204 			}
1205 		}
1206 	} while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
1207 
1208 out:
1209 	kfree(sfd_pl);
1210 	return stored_err ? stored_err : err;
1211 }
1212 
1213 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1214 				   struct switchdev_obj_port_vlan *vlan,
1215 				   switchdev_obj_dump_cb_t *cb)
1216 {
1217 	u16 vid;
1218 	int err = 0;
1219 
1220 	if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1221 		vlan->flags = 0;
1222 		vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1223 		vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1224 		return cb(&vlan->obj);
1225 	}
1226 
1227 	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1228 		vlan->flags = 0;
1229 		if (vid == mlxsw_sp_port->pvid)
1230 			vlan->flags |= BRIDGE_VLAN_INFO_PVID;
1231 		if (test_bit(vid, mlxsw_sp_port->untagged_vlans))
1232 			vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1233 		vlan->vid_begin = vid;
1234 		vlan->vid_end = vid;
1235 		err = cb(&vlan->obj);
1236 		if (err)
1237 			break;
1238 	}
1239 	return err;
1240 }
1241 
1242 static int mlxsw_sp_port_obj_dump(struct net_device *dev,
1243 				  struct switchdev_obj *obj,
1244 				  switchdev_obj_dump_cb_t *cb)
1245 {
1246 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1247 	int err = 0;
1248 
1249 	mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1250 	if (!mlxsw_sp_port)
1251 		return -EINVAL;
1252 
1253 	switch (obj->id) {
1254 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1255 		err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
1256 					      SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
1257 		break;
1258 	case SWITCHDEV_OBJ_ID_PORT_FDB:
1259 		err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
1260 					     SWITCHDEV_OBJ_PORT_FDB(obj), cb,
1261 					     obj->orig_dev);
1262 		break;
1263 	default:
1264 		err = -EOPNOTSUPP;
1265 		break;
1266 	}
1267 
1268 	return err;
1269 }
1270 
1271 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1272 	.switchdev_port_attr_get	= mlxsw_sp_port_attr_get,
1273 	.switchdev_port_attr_set	= mlxsw_sp_port_attr_set,
1274 	.switchdev_port_obj_add		= mlxsw_sp_port_obj_add,
1275 	.switchdev_port_obj_del		= mlxsw_sp_port_obj_del,
1276 	.switchdev_port_obj_dump	= mlxsw_sp_port_obj_dump,
1277 };
1278 
1279 static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
1280 					char *mac, u16 vid,
1281 					struct net_device *dev)
1282 {
1283 	struct switchdev_notifier_fdb_info info;
1284 	unsigned long notifier_type;
1285 
1286 	if (learning_sync) {
1287 		info.addr = mac;
1288 		info.vid = vid;
1289 		notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
1290 		call_switchdev_notifiers(notifier_type, dev, &info.info);
1291 	}
1292 }
1293 
1294 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
1295 					    char *sfn_pl, int rec_index,
1296 					    bool adding)
1297 {
1298 	struct mlxsw_sp_port *mlxsw_sp_port;
1299 	char mac[ETH_ALEN];
1300 	u8 local_port;
1301 	u16 vid, fid;
1302 	bool do_notification = true;
1303 	int err;
1304 
1305 	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
1306 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
1307 	if (!mlxsw_sp_port) {
1308 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
1309 		goto just_remove;
1310 	}
1311 
1312 	if (mlxsw_sp_fid_is_vfid(fid)) {
1313 		u16 vfid = mlxsw_sp_fid_to_vfid(fid);
1314 		struct mlxsw_sp_port *mlxsw_sp_vport;
1315 
1316 		mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port,
1317 								  vfid);
1318 		if (!mlxsw_sp_vport) {
1319 			netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1320 			goto just_remove;
1321 		}
1322 		vid = 0;
1323 		/* Override the physical port with the vPort. */
1324 		mlxsw_sp_port = mlxsw_sp_vport;
1325 	} else {
1326 		vid = fid;
1327 	}
1328 
1329 	adding = adding && mlxsw_sp_port->learning;
1330 
1331 do_fdb_op:
1332 	err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
1333 				      adding, true);
1334 	if (err) {
1335 		if (net_ratelimit())
1336 			netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1337 		return;
1338 	}
1339 
1340 	if (!do_notification)
1341 		return;
1342 	mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
1343 				    adding, mac, vid, mlxsw_sp_port->dev);
1344 	return;
1345 
1346 just_remove:
1347 	adding = false;
1348 	do_notification = false;
1349 	goto do_fdb_op;
1350 }
1351 
1352 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
1353 						char *sfn_pl, int rec_index,
1354 						bool adding)
1355 {
1356 	struct mlxsw_sp_port *mlxsw_sp_port;
1357 	struct net_device *dev;
1358 	char mac[ETH_ALEN];
1359 	u16 lag_vid = 0;
1360 	u16 lag_id;
1361 	u16 vid, fid;
1362 	bool do_notification = true;
1363 	int err;
1364 
1365 	mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
1366 	mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1367 	if (!mlxsw_sp_port) {
1368 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
1369 		goto just_remove;
1370 	}
1371 
1372 	if (mlxsw_sp_fid_is_vfid(fid)) {
1373 		u16 vfid = mlxsw_sp_fid_to_vfid(fid);
1374 		struct mlxsw_sp_port *mlxsw_sp_vport;
1375 
1376 		mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port,
1377 								  vfid);
1378 		if (!mlxsw_sp_vport) {
1379 			netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1380 			goto just_remove;
1381 		}
1382 
1383 		lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1384 		dev = mlxsw_sp_vport->dev;
1385 		vid = 0;
1386 		/* Override the physical port with the vPort. */
1387 		mlxsw_sp_port = mlxsw_sp_vport;
1388 	} else {
1389 		dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
1390 		vid = fid;
1391 	}
1392 
1393 	adding = adding && mlxsw_sp_port->learning;
1394 
1395 do_fdb_op:
1396 	err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
1397 					  adding, true);
1398 	if (err) {
1399 		if (net_ratelimit())
1400 			netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1401 		return;
1402 	}
1403 
1404 	if (!do_notification)
1405 		return;
1406 	mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
1407 				    vid, dev);
1408 	return;
1409 
1410 just_remove:
1411 	adding = false;
1412 	do_notification = false;
1413 	goto do_fdb_op;
1414 }
1415 
1416 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
1417 					    char *sfn_pl, int rec_index)
1418 {
1419 	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
1420 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
1421 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1422 						rec_index, true);
1423 		break;
1424 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
1425 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1426 						rec_index, false);
1427 		break;
1428 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
1429 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1430 						    rec_index, true);
1431 		break;
1432 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
1433 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1434 						    rec_index, false);
1435 		break;
1436 	}
1437 }
1438 
1439 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
1440 {
1441 	mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw,
1442 			       msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
1443 }
1444 
1445 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
1446 {
1447 	struct mlxsw_sp *mlxsw_sp;
1448 	char *sfn_pl;
1449 	u8 num_rec;
1450 	int i;
1451 	int err;
1452 
1453 	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
1454 	if (!sfn_pl)
1455 		return;
1456 
1457 	mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
1458 
1459 	rtnl_lock();
1460 	do {
1461 		mlxsw_reg_sfn_pack(sfn_pl);
1462 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
1463 		if (err) {
1464 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
1465 			break;
1466 		}
1467 		num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
1468 		for (i = 0; i < num_rec; i++)
1469 			mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
1470 
1471 	} while (num_rec);
1472 	rtnl_unlock();
1473 
1474 	kfree(sfn_pl);
1475 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1476 }
1477 
1478 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
1479 {
1480 	int err;
1481 
1482 	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
1483 	if (err) {
1484 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
1485 		return err;
1486 	}
1487 	INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
1488 	mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
1489 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1490 	return 0;
1491 }
1492 
1493 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
1494 {
1495 	cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
1496 }
1497 
1498 static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
1499 {
1500 	u16 fid;
1501 
1502 	for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID)
1503 		mlxsw_sp_fid_destroy(mlxsw_sp, fid);
1504 }
1505 
1506 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1507 {
1508 	return mlxsw_sp_fdb_init(mlxsw_sp);
1509 }
1510 
1511 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1512 {
1513 	mlxsw_sp_fdb_fini(mlxsw_sp);
1514 	mlxsw_sp_fids_fini(mlxsw_sp);
1515 }
1516 
1517 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
1518 {
1519 	struct net_device *dev = mlxsw_sp_port->dev;
1520 	int err;
1521 
1522 	/* Allow only untagged packets to ingress and tag them internally
1523 	 * with VID 1.
1524 	 */
1525 	mlxsw_sp_port->pvid = 1;
1526 	err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1,
1527 					true);
1528 	if (err) {
1529 		netdev_err(dev, "Unable to init VLANs\n");
1530 		return err;
1531 	}
1532 
1533 	/* Add implicit VLAN interface in the device, so that untagged
1534 	 * packets will be classified to the default vFID.
1535 	 */
1536 	err = mlxsw_sp_port_add_vid(dev, 0, 1);
1537 	if (err)
1538 		netdev_err(dev, "Failed to configure default vFID\n");
1539 
1540 	return err;
1541 }
1542 
1543 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
1544 {
1545 	mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
1546 }
1547 
1548 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1549 {
1550 }
1551