1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <net/switchdev.h>
49 
50 #include "spectrum.h"
51 #include "core.h"
52 #include "reg.h"
53 
54 static int mlxsw_sp_port_attr_get(struct net_device *dev,
55 				  struct switchdev_attr *attr)
56 {
57 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
58 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
59 
60 	switch (attr->id) {
61 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
62 		attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
63 		memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
64 		       attr->u.ppid.id_len);
65 		break;
66 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
67 		attr->u.brport_flags =
68 			(mlxsw_sp_port->learning ? BR_LEARNING : 0) |
69 			(mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
70 			(mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
71 		break;
72 	default:
73 		return -EOPNOTSUPP;
74 	}
75 
76 	return 0;
77 }
78 
79 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
80 				       u8 state)
81 {
82 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
83 	enum mlxsw_reg_spms_state spms_state;
84 	char *spms_pl;
85 	u16 vid;
86 	int err;
87 
88 	switch (state) {
89 	case BR_STATE_DISABLED: /* fall-through */
90 	case BR_STATE_FORWARDING:
91 		spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
92 		break;
93 	case BR_STATE_LISTENING: /* fall-through */
94 	case BR_STATE_LEARNING:
95 		spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
96 		break;
97 	case BR_STATE_BLOCKING:
98 		spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
99 		break;
100 	default:
101 		BUG();
102 	}
103 
104 	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
105 	if (!spms_pl)
106 		return -ENOMEM;
107 	mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
108 	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
109 		mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
110 
111 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
112 	kfree(spms_pl);
113 	return err;
114 }
115 
116 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
117 					    struct switchdev_trans *trans,
118 					    u8 state)
119 {
120 	if (switchdev_trans_ph_prepare(trans))
121 		return 0;
122 
123 	mlxsw_sp_port->stp_state = state;
124 	return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
125 }
126 
127 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
128 				     u16 fid_begin, u16 fid_end, bool set,
129 				     bool only_uc)
130 {
131 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
132 	u16 range = fid_end - fid_begin + 1;
133 	char *sftr_pl;
134 	int err;
135 
136 	sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
137 	if (!sftr_pl)
138 		return -ENOMEM;
139 
140 	mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid_begin,
141 			    MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
142 			    mlxsw_sp_port->local_port, set);
143 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
144 	if (err)
145 		goto buffer_out;
146 
147 	/* Flooding control allows one to decide whether a given port will
148 	 * flood unicast traffic for which there is no FDB entry.
149 	 */
150 	if (only_uc)
151 		goto buffer_out;
152 
153 	mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid_begin,
154 			    MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
155 			    mlxsw_sp_port->local_port, set);
156 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
157 
158 buffer_out:
159 	kfree(sftr_pl);
160 	return err;
161 }
162 
163 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
164 				      bool set)
165 {
166 	struct net_device *dev = mlxsw_sp_port->dev;
167 	u16 vid, last_visited_vid;
168 	int err;
169 
170 	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
171 		err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
172 						true);
173 		if (err) {
174 			last_visited_vid = vid;
175 			goto err_port_flood_set;
176 		}
177 	}
178 
179 	return 0;
180 
181 err_port_flood_set:
182 	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
183 		__mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
184 	netdev_err(dev, "Failed to configure unicast flooding\n");
185 	return err;
186 }
187 
188 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
189 					   struct switchdev_trans *trans,
190 					   unsigned long brport_flags)
191 {
192 	unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
193 	bool set;
194 	int err;
195 
196 	if (switchdev_trans_ph_prepare(trans))
197 		return 0;
198 
199 	if ((uc_flood ^ brport_flags) & BR_FLOOD) {
200 		set = mlxsw_sp_port->uc_flood ? false : true;
201 		err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
202 		if (err)
203 			return err;
204 	}
205 
206 	mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
207 	mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
208 	mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
209 
210 	return 0;
211 }
212 
213 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
214 {
215 	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
216 	int err;
217 
218 	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
219 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
220 	if (err)
221 		return err;
222 	mlxsw_sp->ageing_time = ageing_time;
223 	return 0;
224 }
225 
226 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
227 					    struct switchdev_trans *trans,
228 					    unsigned long ageing_clock_t)
229 {
230 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
231 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
232 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
233 
234 	if (switchdev_trans_ph_prepare(trans))
235 		return 0;
236 
237 	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
238 }
239 
240 static int mlxsw_sp_port_attr_set(struct net_device *dev,
241 				  const struct switchdev_attr *attr,
242 				  struct switchdev_trans *trans)
243 {
244 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
245 	int err = 0;
246 
247 	switch (attr->id) {
248 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
249 		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
250 						       attr->u.stp_state);
251 		break;
252 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
253 		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
254 						      attr->u.brport_flags);
255 		break;
256 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
257 		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
258 						       attr->u.ageing_time);
259 		break;
260 	default:
261 		err = -EOPNOTSUPP;
262 		break;
263 	}
264 
265 	return err;
266 }
267 
268 static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
269 {
270 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
271 	char spvid_pl[MLXSW_REG_SPVID_LEN];
272 
273 	mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
274 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
275 }
276 
277 static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
278 {
279 	char sfmr_pl[MLXSW_REG_SFMR_LEN];
280 	int err;
281 
282 	mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid);
283 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
284 
285 	if (err)
286 		return err;
287 
288 	set_bit(fid, mlxsw_sp->active_fids);
289 	return 0;
290 }
291 
292 static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid)
293 {
294 	char sfmr_pl[MLXSW_REG_SFMR_LEN];
295 
296 	clear_bit(fid, mlxsw_sp->active_fids);
297 
298 	mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
299 			    fid, fid);
300 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
301 }
302 
303 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
304 {
305 	enum mlxsw_reg_svfa_mt mt;
306 
307 	if (mlxsw_sp_port->nr_vfids)
308 		mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
309 	else
310 		mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
311 
312 	return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid);
313 }
314 
315 static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
316 {
317 	enum mlxsw_reg_svfa_mt mt;
318 
319 	if (!mlxsw_sp_port->nr_vfids)
320 		return 0;
321 
322 	mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
323 	return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid);
324 }
325 
326 static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
327 				  u16 vid_end)
328 {
329 	u16 vid;
330 	int err;
331 
332 	for (vid = vid_begin; vid <= vid_end; vid++) {
333 		err = mlxsw_sp_port_add_vid(dev, 0, vid);
334 		if (err)
335 			goto err_port_add_vid;
336 	}
337 	return 0;
338 
339 err_port_add_vid:
340 	for (vid--; vid >= vid_begin; vid--)
341 		mlxsw_sp_port_kill_vid(dev, 0, vid);
342 	return err;
343 }
344 
345 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
346 				     u16 vid_begin, u16 vid_end,
347 				     bool flag_untagged, bool flag_pvid)
348 {
349 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
350 	struct net_device *dev = mlxsw_sp_port->dev;
351 	enum mlxsw_reg_svfa_mt mt;
352 	u16 vid, vid_e;
353 	int err;
354 
355 	/* In case this is invoked with BRIDGE_FLAGS_SELF and port is
356 	 * not bridged, then packets ingressing through the port with
357 	 * the specified VIDs will be directed to CPU.
358 	 */
359 	if (!mlxsw_sp_port->bridged)
360 		return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end);
361 
362 	for (vid = vid_begin; vid <= vid_end; vid++) {
363 		if (!test_bit(vid, mlxsw_sp->active_fids)) {
364 			err = mlxsw_sp_fid_create(mlxsw_sp, vid);
365 			if (err) {
366 				netdev_err(dev, "Failed to create FID=%d\n",
367 					   vid);
368 				return err;
369 			}
370 
371 			/* When creating a FID, we set a VID to FID mapping
372 			 * regardless of the port's mode.
373 			 */
374 			mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
375 			err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt,
376 							   true, vid, vid);
377 			if (err) {
378 				netdev_err(dev, "Failed to create FID=VID=%d mapping\n",
379 					   vid);
380 				return err;
381 			}
382 		}
383 
384 		/* Set FID mapping according to port's mode */
385 		err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid);
386 		if (err) {
387 			netdev_err(dev, "Failed to map FID=%d", vid);
388 			return err;
389 		}
390 	}
391 
392 	err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
393 					true, false);
394 	if (err) {
395 		netdev_err(dev, "Failed to configure flooding\n");
396 		return err;
397 	}
398 
399 	for (vid = vid_begin; vid <= vid_end;
400 	     vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
401 		vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
402 			    vid_end);
403 
404 		err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, true,
405 					     flag_untagged);
406 		if (err) {
407 			netdev_err(mlxsw_sp_port->dev, "Unable to add VIDs %d-%d\n",
408 				   vid, vid_e);
409 			return err;
410 		}
411 	}
412 
413 	vid = vid_begin;
414 	if (flag_pvid && mlxsw_sp_port->pvid != vid) {
415 		err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
416 		if (err) {
417 			netdev_err(mlxsw_sp_port->dev, "Unable to add PVID %d\n",
418 				   vid);
419 			return err;
420 		}
421 		mlxsw_sp_port->pvid = vid;
422 	}
423 
424 	/* Changing activity bits only if HW operation succeded */
425 	for (vid = vid_begin; vid <= vid_end; vid++)
426 		set_bit(vid, mlxsw_sp_port->active_vlans);
427 
428 	return mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
429 					   mlxsw_sp_port->stp_state);
430 }
431 
432 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
433 				   const struct switchdev_obj_port_vlan *vlan,
434 				   struct switchdev_trans *trans)
435 {
436 	bool untagged_flag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
437 	bool pvid_flag = vlan->flags & BRIDGE_VLAN_INFO_PVID;
438 
439 	if (switchdev_trans_ph_prepare(trans))
440 		return 0;
441 
442 	return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
443 					 vlan->vid_begin, vlan->vid_end,
444 					 untagged_flag, pvid_flag);
445 }
446 
447 static int mlxsw_sp_port_fdb_op(struct mlxsw_sp_port *mlxsw_sp_port,
448 				const char *mac, u16 vid, bool adding,
449 				bool dynamic)
450 {
451 	enum mlxsw_reg_sfd_rec_policy policy;
452 	enum mlxsw_reg_sfd_op op;
453 	char *sfd_pl;
454 	int err;
455 
456 	if (!vid)
457 		vid = mlxsw_sp_port->pvid;
458 
459 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
460 	if (!sfd_pl)
461 		return -ENOMEM;
462 
463 	policy = dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
464 			   MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
465 	op = adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
466 		      MLXSW_REG_SFD_OP_WRITE_REMOVE;
467 	mlxsw_reg_sfd_pack(sfd_pl, op, 0);
468 	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy,
469 			      mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP,
470 			      mlxsw_sp_port->local_port);
471 	err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sfd),
472 			      sfd_pl);
473 	kfree(sfd_pl);
474 
475 	return err;
476 }
477 
478 static int
479 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
480 			     const struct switchdev_obj_port_fdb *fdb,
481 			     struct switchdev_trans *trans)
482 {
483 	if (switchdev_trans_ph_prepare(trans))
484 		return 0;
485 
486 	return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid,
487 				    true, false);
488 }
489 
490 static int mlxsw_sp_port_obj_add(struct net_device *dev,
491 				 const struct switchdev_obj *obj,
492 				 struct switchdev_trans *trans)
493 {
494 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
495 	int err = 0;
496 
497 	switch (obj->id) {
498 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
499 		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
500 					      SWITCHDEV_OBJ_PORT_VLAN(obj),
501 					      trans);
502 		break;
503 	case SWITCHDEV_OBJ_ID_PORT_FDB:
504 		err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
505 						   SWITCHDEV_OBJ_PORT_FDB(obj),
506 						   trans);
507 		break;
508 	default:
509 		err = -EOPNOTSUPP;
510 		break;
511 	}
512 
513 	return err;
514 }
515 
516 static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin,
517 				   u16 vid_end)
518 {
519 	u16 vid;
520 	int err;
521 
522 	for (vid = vid_begin; vid <= vid_end; vid++) {
523 		err = mlxsw_sp_port_kill_vid(dev, 0, vid);
524 		if (err)
525 			return err;
526 	}
527 
528 	return 0;
529 }
530 
531 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
532 				     u16 vid_begin, u16 vid_end, bool init)
533 {
534 	struct net_device *dev = mlxsw_sp_port->dev;
535 	u16 vid, vid_e;
536 	int err;
537 
538 	/* In case this is invoked with BRIDGE_FLAGS_SELF and port is
539 	 * not bridged, then prevent packets ingressing through the
540 	 * port with the specified VIDs from being trapped to CPU.
541 	 */
542 	if (!init && !mlxsw_sp_port->bridged)
543 		return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end);
544 
545 	for (vid = vid_begin; vid <= vid_end;
546 	     vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
547 		vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
548 			    vid_end);
549 		err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, false,
550 					     false);
551 		if (err) {
552 			netdev_err(mlxsw_sp_port->dev, "Unable to del VIDs %d-%d\n",
553 				   vid, vid_e);
554 			return err;
555 		}
556 	}
557 
558 	if ((mlxsw_sp_port->pvid >= vid_begin) &&
559 	    (mlxsw_sp_port->pvid <= vid_end)) {
560 		/* Default VLAN is always 1 */
561 		mlxsw_sp_port->pvid = 1;
562 		err = mlxsw_sp_port_pvid_set(mlxsw_sp_port,
563 					     mlxsw_sp_port->pvid);
564 		if (err) {
565 			netdev_err(mlxsw_sp_port->dev, "Unable to del PVID %d\n",
566 				   vid);
567 			return err;
568 		}
569 	}
570 
571 	if (init)
572 		goto out;
573 
574 	err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
575 					false, false);
576 	if (err) {
577 		netdev_err(dev, "Failed to clear flooding\n");
578 		return err;
579 	}
580 
581 	for (vid = vid_begin; vid <= vid_end; vid++) {
582 		/* Remove FID mapping in case of Virtual mode */
583 		err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
584 		if (err) {
585 			netdev_err(dev, "Failed to unmap FID=%d", vid);
586 			return err;
587 		}
588 	}
589 
590 out:
591 	/* Changing activity bits only if HW operation succeded */
592 	for (vid = vid_begin; vid <= vid_end; vid++)
593 		clear_bit(vid, mlxsw_sp_port->active_vlans);
594 
595 	return 0;
596 }
597 
598 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
599 				   const struct switchdev_obj_port_vlan *vlan)
600 {
601 	return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
602 					 vlan->vid_begin, vlan->vid_end, false);
603 }
604 
605 static int
606 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
607 			     const struct switchdev_obj_port_fdb *fdb)
608 {
609 	return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid,
610 				    false, false);
611 }
612 
613 static int mlxsw_sp_port_obj_del(struct net_device *dev,
614 				 const struct switchdev_obj *obj)
615 {
616 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
617 	int err = 0;
618 
619 	switch (obj->id) {
620 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
621 		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
622 					      SWITCHDEV_OBJ_PORT_VLAN(obj));
623 		break;
624 	case SWITCHDEV_OBJ_ID_PORT_FDB:
625 		err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
626 						   SWITCHDEV_OBJ_PORT_FDB(obj));
627 		break;
628 	default:
629 		err = -EOPNOTSUPP;
630 		break;
631 	}
632 
633 	return err;
634 }
635 
636 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
637 				  struct switchdev_obj_port_fdb *fdb,
638 				  switchdev_obj_dump_cb_t *cb)
639 {
640 	char *sfd_pl;
641 	char mac[ETH_ALEN];
642 	u16 vid;
643 	u8 local_port;
644 	u8 num_rec;
645 	int stored_err = 0;
646 	int i;
647 	int err;
648 
649 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
650 	if (!sfd_pl)
651 		return -ENOMEM;
652 
653 	mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
654 	do {
655 		mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
656 		err = mlxsw_reg_query(mlxsw_sp_port->mlxsw_sp->core,
657 				      MLXSW_REG(sfd), sfd_pl);
658 		if (err)
659 			goto out;
660 
661 		num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
662 
663 		/* Even in case of error, we have to run the dump to the end
664 		 * so the session in firmware is finished.
665 		 */
666 		if (stored_err)
667 			continue;
668 
669 		for (i = 0; i < num_rec; i++) {
670 			switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
671 			case MLXSW_REG_SFD_REC_TYPE_UNICAST:
672 				mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &vid,
673 							&local_port);
674 				if (local_port == mlxsw_sp_port->local_port) {
675 					ether_addr_copy(fdb->addr, mac);
676 					fdb->ndm_state = NUD_REACHABLE;
677 					fdb->vid = vid;
678 					err = cb(&fdb->obj);
679 					if (err)
680 						stored_err = err;
681 				}
682 			}
683 		}
684 	} while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
685 
686 out:
687 	kfree(sfd_pl);
688 	return stored_err ? stored_err : err;
689 }
690 
691 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
692 				   struct switchdev_obj_port_vlan *vlan,
693 				   switchdev_obj_dump_cb_t *cb)
694 {
695 	u16 vid;
696 	int err = 0;
697 
698 	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
699 		vlan->flags = 0;
700 		if (vid == mlxsw_sp_port->pvid)
701 			vlan->flags |= BRIDGE_VLAN_INFO_PVID;
702 		vlan->vid_begin = vid;
703 		vlan->vid_end = vid;
704 		err = cb(&vlan->obj);
705 		if (err)
706 			break;
707 	}
708 	return err;
709 }
710 
711 static int mlxsw_sp_port_obj_dump(struct net_device *dev,
712 				  struct switchdev_obj *obj,
713 				  switchdev_obj_dump_cb_t *cb)
714 {
715 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
716 	int err = 0;
717 
718 	switch (obj->id) {
719 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
720 		err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
721 					      SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
722 		break;
723 	case SWITCHDEV_OBJ_ID_PORT_FDB:
724 		err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
725 					     SWITCHDEV_OBJ_PORT_FDB(obj), cb);
726 		break;
727 	default:
728 		err = -EOPNOTSUPP;
729 		break;
730 	}
731 
732 	return err;
733 }
734 
735 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
736 	.switchdev_port_attr_get	= mlxsw_sp_port_attr_get,
737 	.switchdev_port_attr_set	= mlxsw_sp_port_attr_set,
738 	.switchdev_port_obj_add		= mlxsw_sp_port_obj_add,
739 	.switchdev_port_obj_del		= mlxsw_sp_port_obj_del,
740 	.switchdev_port_obj_dump	= mlxsw_sp_port_obj_dump,
741 };
742 
743 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
744 					    char *sfn_pl, int rec_index,
745 					    bool adding)
746 {
747 	struct mlxsw_sp_port *mlxsw_sp_port;
748 	char mac[ETH_ALEN];
749 	u8 local_port;
750 	u16 vid;
751 	int err;
752 
753 	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &vid, &local_port);
754 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
755 	if (!mlxsw_sp_port) {
756 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
757 		return;
758 	}
759 
760 	err = mlxsw_sp_port_fdb_op(mlxsw_sp_port, mac, vid,
761 				   adding && mlxsw_sp_port->learning, true);
762 	if (err) {
763 		if (net_ratelimit())
764 			netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
765 		return;
766 	}
767 
768 	if (mlxsw_sp_port->learning && mlxsw_sp_port->learning_sync) {
769 		struct switchdev_notifier_fdb_info info;
770 		unsigned long notifier_type;
771 
772 		info.addr = mac;
773 		info.vid = vid;
774 		notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
775 		call_switchdev_notifiers(notifier_type, mlxsw_sp_port->dev,
776 					 &info.info);
777 	}
778 }
779 
780 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
781 					    char *sfn_pl, int rec_index)
782 {
783 	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
784 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
785 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
786 						rec_index, true);
787 		break;
788 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
789 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
790 						rec_index, false);
791 		break;
792 	}
793 }
794 
795 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
796 {
797 	schedule_delayed_work(&mlxsw_sp->fdb_notify.dw,
798 			      msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
799 }
800 
801 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
802 {
803 	struct mlxsw_sp *mlxsw_sp;
804 	char *sfn_pl;
805 	u8 num_rec;
806 	int i;
807 	int err;
808 
809 	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
810 	if (!sfn_pl)
811 		return;
812 
813 	mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
814 
815 	do {
816 		mlxsw_reg_sfn_pack(sfn_pl);
817 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
818 		if (err) {
819 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
820 			break;
821 		}
822 		num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
823 		for (i = 0; i < num_rec; i++)
824 			mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
825 
826 	} while (num_rec);
827 
828 	kfree(sfn_pl);
829 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
830 }
831 
832 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
833 {
834 	int err;
835 
836 	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
837 	if (err) {
838 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
839 		return err;
840 	}
841 	INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
842 	mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
843 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
844 	return 0;
845 }
846 
847 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
848 {
849 	cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
850 }
851 
852 static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
853 {
854 	u16 fid;
855 
856 	for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID)
857 		mlxsw_sp_fid_destroy(mlxsw_sp, fid);
858 }
859 
860 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
861 {
862 	return mlxsw_sp_fdb_init(mlxsw_sp);
863 }
864 
865 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
866 {
867 	mlxsw_sp_fdb_fini(mlxsw_sp);
868 	mlxsw_sp_fids_fini(mlxsw_sp);
869 }
870 
871 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
872 {
873 	struct net_device *dev = mlxsw_sp_port->dev;
874 	int err;
875 
876 	/* Allow only untagged packets to ingress and tag them internally
877 	 * with VID 1.
878 	 */
879 	mlxsw_sp_port->pvid = 1;
880 	err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID, true);
881 	if (err) {
882 		netdev_err(dev, "Unable to init VLANs\n");
883 		return err;
884 	}
885 
886 	/* Add implicit VLAN interface in the device, so that untagged
887 	 * packets will be classified to the default vFID.
888 	 */
889 	err = mlxsw_sp_port_add_vid(dev, 0, 1);
890 	if (err)
891 		netdev_err(dev, "Failed to configure default vFID\n");
892 
893 	return err;
894 }
895 
896 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
897 {
898 	mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
899 }
900 
901 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
902 {
903 }
904