1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/if_bridge.h>
4 #include <net/switchdev.h>
5 
6 #include "lan966x_main.h"
7 
8 static struct notifier_block lan966x_netdevice_nb __read_mostly;
9 
lan966x_port_set_mcast_ip_flood(struct lan966x_port * port,u32 pgid_ip)10 static void lan966x_port_set_mcast_ip_flood(struct lan966x_port *port,
11 					    u32 pgid_ip)
12 {
13 	struct lan966x *lan966x = port->lan966x;
14 	u32 flood_mask_ip;
15 
16 	flood_mask_ip = lan_rd(lan966x, ANA_PGID(pgid_ip));
17 	flood_mask_ip = ANA_PGID_PGID_GET(flood_mask_ip);
18 
19 	/* If mcast snooping is not enabled then use mcast flood mask
20 	 * to decide to enable multicast flooding or not.
21 	 */
22 	if (!port->mcast_ena) {
23 		u32 flood_mask;
24 
25 		flood_mask = lan_rd(lan966x, ANA_PGID(PGID_MC));
26 		flood_mask = ANA_PGID_PGID_GET(flood_mask);
27 
28 		if (flood_mask & BIT(port->chip_port))
29 			flood_mask_ip |= BIT(port->chip_port);
30 		else
31 			flood_mask_ip &= ~BIT(port->chip_port);
32 	} else {
33 		flood_mask_ip &= ~BIT(port->chip_port);
34 	}
35 
36 	lan_rmw(ANA_PGID_PGID_SET(flood_mask_ip),
37 		ANA_PGID_PGID,
38 		lan966x, ANA_PGID(pgid_ip));
39 }
40 
lan966x_port_set_mcast_flood(struct lan966x_port * port,bool enabled)41 static void lan966x_port_set_mcast_flood(struct lan966x_port *port,
42 					 bool enabled)
43 {
44 	u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_MC));
45 
46 	val = ANA_PGID_PGID_GET(val);
47 	if (enabled)
48 		val |= BIT(port->chip_port);
49 	else
50 		val &= ~BIT(port->chip_port);
51 
52 	lan_rmw(ANA_PGID_PGID_SET(val),
53 		ANA_PGID_PGID,
54 		port->lan966x, ANA_PGID(PGID_MC));
55 
56 	if (!port->mcast_ena) {
57 		lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
58 		lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
59 	}
60 }
61 
lan966x_port_set_ucast_flood(struct lan966x_port * port,bool enabled)62 static void lan966x_port_set_ucast_flood(struct lan966x_port *port,
63 					 bool enabled)
64 {
65 	u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_UC));
66 
67 	val = ANA_PGID_PGID_GET(val);
68 	if (enabled)
69 		val |= BIT(port->chip_port);
70 	else
71 		val &= ~BIT(port->chip_port);
72 
73 	lan_rmw(ANA_PGID_PGID_SET(val),
74 		ANA_PGID_PGID,
75 		port->lan966x, ANA_PGID(PGID_UC));
76 }
77 
lan966x_port_set_bcast_flood(struct lan966x_port * port,bool enabled)78 static void lan966x_port_set_bcast_flood(struct lan966x_port *port,
79 					 bool enabled)
80 {
81 	u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_BC));
82 
83 	val = ANA_PGID_PGID_GET(val);
84 	if (enabled)
85 		val |= BIT(port->chip_port);
86 	else
87 		val &= ~BIT(port->chip_port);
88 
89 	lan_rmw(ANA_PGID_PGID_SET(val),
90 		ANA_PGID_PGID,
91 		port->lan966x, ANA_PGID(PGID_BC));
92 }
93 
lan966x_port_set_learning(struct lan966x_port * port,bool enabled)94 static void lan966x_port_set_learning(struct lan966x_port *port, bool enabled)
95 {
96 	lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(enabled),
97 		ANA_PORT_CFG_LEARN_ENA,
98 		port->lan966x, ANA_PORT_CFG(port->chip_port));
99 
100 	port->learn_ena = enabled;
101 }
102 
lan966x_port_bridge_flags(struct lan966x_port * port,struct switchdev_brport_flags flags)103 static void lan966x_port_bridge_flags(struct lan966x_port *port,
104 				      struct switchdev_brport_flags flags)
105 {
106 	if (flags.mask & BR_MCAST_FLOOD)
107 		lan966x_port_set_mcast_flood(port,
108 					     !!(flags.val & BR_MCAST_FLOOD));
109 
110 	if (flags.mask & BR_FLOOD)
111 		lan966x_port_set_ucast_flood(port,
112 					     !!(flags.val & BR_FLOOD));
113 
114 	if (flags.mask & BR_BCAST_FLOOD)
115 		lan966x_port_set_bcast_flood(port,
116 					     !!(flags.val & BR_BCAST_FLOOD));
117 
118 	if (flags.mask & BR_LEARNING)
119 		lan966x_port_set_learning(port,
120 					  !!(flags.val & BR_LEARNING));
121 }
122 
lan966x_port_pre_bridge_flags(struct lan966x_port * port,struct switchdev_brport_flags flags)123 static int lan966x_port_pre_bridge_flags(struct lan966x_port *port,
124 					 struct switchdev_brport_flags flags)
125 {
126 	if (flags.mask & ~(BR_MCAST_FLOOD | BR_FLOOD | BR_BCAST_FLOOD |
127 			   BR_LEARNING))
128 		return -EINVAL;
129 
130 	return 0;
131 }
132 
lan966x_update_fwd_mask(struct lan966x * lan966x)133 void lan966x_update_fwd_mask(struct lan966x *lan966x)
134 {
135 	int i;
136 
137 	for (i = 0; i < lan966x->num_phys_ports; i++) {
138 		struct lan966x_port *port = lan966x->ports[i];
139 		unsigned long mask = 0;
140 
141 		if (port && lan966x->bridge_fwd_mask & BIT(i)) {
142 			mask = lan966x->bridge_fwd_mask & ~BIT(i);
143 
144 			if (port->bond)
145 				mask &= ~lan966x_lag_get_mask(lan966x,
146 							      port->bond);
147 		}
148 
149 		mask |= BIT(CPU_PORT);
150 
151 		lan_wr(ANA_PGID_PGID_SET(mask),
152 		       lan966x, ANA_PGID(PGID_SRC + i));
153 	}
154 }
155 
lan966x_port_stp_state_set(struct lan966x_port * port,u8 state)156 void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
157 {
158 	struct lan966x *lan966x = port->lan966x;
159 	bool learn_ena = false;
160 
161 	if ((state == BR_STATE_FORWARDING || state == BR_STATE_LEARNING) &&
162 	    port->learn_ena)
163 		learn_ena = true;
164 
165 	if (state == BR_STATE_FORWARDING)
166 		lan966x->bridge_fwd_mask |= BIT(port->chip_port);
167 	else
168 		lan966x->bridge_fwd_mask &= ~BIT(port->chip_port);
169 
170 	lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(learn_ena),
171 		ANA_PORT_CFG_LEARN_ENA,
172 		lan966x, ANA_PORT_CFG(port->chip_port));
173 
174 	lan966x_update_fwd_mask(lan966x);
175 }
176 
lan966x_port_ageing_set(struct lan966x_port * port,unsigned long ageing_clock_t)177 void lan966x_port_ageing_set(struct lan966x_port *port,
178 			     unsigned long ageing_clock_t)
179 {
180 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
181 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
182 
183 	lan966x_mac_set_ageing(port->lan966x, ageing_time);
184 }
185 
lan966x_port_mc_set(struct lan966x_port * port,bool mcast_ena)186 static void lan966x_port_mc_set(struct lan966x_port *port, bool mcast_ena)
187 {
188 	struct lan966x *lan966x = port->lan966x;
189 
190 	port->mcast_ena = mcast_ena;
191 	if (mcast_ena)
192 		lan966x_mdb_restore_entries(lan966x);
193 	else
194 		lan966x_mdb_clear_entries(lan966x);
195 
196 	lan_rmw(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(mcast_ena) |
197 		ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(mcast_ena) |
198 		ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(mcast_ena),
199 		ANA_CPU_FWD_CFG_IGMP_REDIR_ENA |
200 		ANA_CPU_FWD_CFG_MLD_REDIR_ENA |
201 		ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA,
202 		lan966x, ANA_CPU_FWD_CFG(port->chip_port));
203 
204 	lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
205 	lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
206 }
207 
lan966x_port_attr_set(struct net_device * dev,const void * ctx,const struct switchdev_attr * attr,struct netlink_ext_ack * extack)208 static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
209 				 const struct switchdev_attr *attr,
210 				 struct netlink_ext_ack *extack)
211 {
212 	struct lan966x_port *port = netdev_priv(dev);
213 	int err = 0;
214 
215 	if (ctx && ctx != port)
216 		return 0;
217 
218 	switch (attr->id) {
219 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
220 		lan966x_port_bridge_flags(port, attr->u.brport_flags);
221 		break;
222 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
223 		err = lan966x_port_pre_bridge_flags(port, attr->u.brport_flags);
224 		break;
225 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
226 		lan966x_port_stp_state_set(port, attr->u.stp_state);
227 		break;
228 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
229 		lan966x_port_ageing_set(port, attr->u.ageing_time);
230 		break;
231 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
232 		lan966x_vlan_port_set_vlan_aware(port, attr->u.vlan_filtering);
233 		lan966x_vlan_port_apply(port);
234 		break;
235 	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
236 		lan966x_port_mc_set(port, !attr->u.mc_disabled);
237 		break;
238 	default:
239 		err = -EOPNOTSUPP;
240 		break;
241 	}
242 
243 	return err;
244 }
245 
lan966x_port_bridge_join(struct lan966x_port * port,struct net_device * brport_dev,struct net_device * bridge,struct netlink_ext_ack * extack)246 static int lan966x_port_bridge_join(struct lan966x_port *port,
247 				    struct net_device *brport_dev,
248 				    struct net_device *bridge,
249 				    struct netlink_ext_ack *extack)
250 {
251 	struct switchdev_brport_flags flags = {0};
252 	struct lan966x *lan966x = port->lan966x;
253 	struct net_device *dev = port->dev;
254 	int err;
255 
256 	if (!lan966x->bridge_mask) {
257 		lan966x->bridge = bridge;
258 	} else {
259 		if (lan966x->bridge != bridge) {
260 			NL_SET_ERR_MSG_MOD(extack, "Not allow to add port to different bridge");
261 			return -ENODEV;
262 		}
263 	}
264 
265 	err = switchdev_bridge_port_offload(brport_dev, dev, port,
266 					    &lan966x_switchdev_nb,
267 					    &lan966x_switchdev_blocking_nb,
268 					    false, extack);
269 	if (err)
270 		return err;
271 
272 	lan966x->bridge_mask |= BIT(port->chip_port);
273 
274 	flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
275 	flags.val = flags.mask;
276 	lan966x_port_bridge_flags(port, flags);
277 
278 	return 0;
279 }
280 
lan966x_port_bridge_leave(struct lan966x_port * port,struct net_device * bridge)281 static void lan966x_port_bridge_leave(struct lan966x_port *port,
282 				      struct net_device *bridge)
283 {
284 	struct switchdev_brport_flags flags = {0};
285 	struct lan966x *lan966x = port->lan966x;
286 
287 	flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
288 	flags.val = flags.mask & ~BR_LEARNING;
289 	lan966x_port_bridge_flags(port, flags);
290 
291 	lan966x->bridge_mask &= ~BIT(port->chip_port);
292 
293 	if (!lan966x->bridge_mask)
294 		lan966x->bridge = NULL;
295 
296 	/* Set the port back to host mode */
297 	lan966x_vlan_port_set_vlan_aware(port, false);
298 	lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
299 	lan966x_vlan_port_apply(port);
300 }
301 
lan966x_port_changeupper(struct net_device * dev,struct net_device * brport_dev,struct netdev_notifier_changeupper_info * info)302 int lan966x_port_changeupper(struct net_device *dev,
303 			     struct net_device *brport_dev,
304 			     struct netdev_notifier_changeupper_info *info)
305 {
306 	struct lan966x_port *port = netdev_priv(dev);
307 	struct netlink_ext_ack *extack;
308 	int err = 0;
309 
310 	extack = netdev_notifier_info_to_extack(&info->info);
311 
312 	if (netif_is_bridge_master(info->upper_dev)) {
313 		if (info->linking)
314 			err = lan966x_port_bridge_join(port, brport_dev,
315 						       info->upper_dev,
316 						       extack);
317 		else
318 			lan966x_port_bridge_leave(port, info->upper_dev);
319 	}
320 
321 	if (netif_is_lag_master(info->upper_dev)) {
322 		if (info->linking)
323 			err = lan966x_lag_port_join(port, info->upper_dev,
324 						    info->upper_dev,
325 						    extack);
326 		else
327 			lan966x_lag_port_leave(port, info->upper_dev);
328 	}
329 
330 	return err;
331 }
332 
lan966x_port_prechangeupper(struct net_device * dev,struct net_device * brport_dev,struct netdev_notifier_changeupper_info * info)333 int lan966x_port_prechangeupper(struct net_device *dev,
334 				struct net_device *brport_dev,
335 				struct netdev_notifier_changeupper_info *info)
336 {
337 	struct lan966x_port *port = netdev_priv(dev);
338 	int err = NOTIFY_DONE;
339 
340 	if (netif_is_bridge_master(info->upper_dev) && !info->linking) {
341 		switchdev_bridge_port_unoffload(port->dev, port, NULL, NULL);
342 		lan966x_fdb_flush_workqueue(port->lan966x);
343 	}
344 
345 	if (netif_is_lag_master(info->upper_dev)) {
346 		err = lan966x_lag_port_prechangeupper(dev, info);
347 		if (err || info->linking)
348 			return err;
349 
350 		switchdev_bridge_port_unoffload(brport_dev, port, NULL, NULL);
351 		lan966x_fdb_flush_workqueue(port->lan966x);
352 	}
353 
354 	return err;
355 }
356 
lan966x_foreign_bridging_check(struct net_device * upper,bool * has_foreign,bool * seen_lan966x,struct netlink_ext_ack * extack)357 static int lan966x_foreign_bridging_check(struct net_device *upper,
358 					  bool *has_foreign,
359 					  bool *seen_lan966x,
360 					  struct netlink_ext_ack *extack)
361 {
362 	struct lan966x *lan966x = NULL;
363 	struct net_device *dev;
364 	struct list_head *iter;
365 
366 	if (!netif_is_bridge_master(upper) &&
367 	    !netif_is_lag_master(upper))
368 		return 0;
369 
370 	netdev_for_each_lower_dev(upper, dev, iter) {
371 		if (lan966x_netdevice_check(dev)) {
372 			struct lan966x_port *port = netdev_priv(dev);
373 
374 			if (lan966x) {
375 				/* Upper already has at least one port of a
376 				 * lan966x switch inside it, check that it's
377 				 * the same instance of the driver.
378 				 */
379 				if (port->lan966x != lan966x) {
380 					NL_SET_ERR_MSG_MOD(extack,
381 							   "Bridging between multiple lan966x switches disallowed");
382 					return -EINVAL;
383 				}
384 			} else {
385 				/* This is the first lan966x port inside this
386 				 * upper device
387 				 */
388 				lan966x = port->lan966x;
389 				*seen_lan966x = true;
390 			}
391 		} else if (netif_is_lag_master(dev)) {
392 			/* Allow to have bond interfaces that have only lan966x
393 			 * devices
394 			 */
395 			if (lan966x_foreign_bridging_check(dev, has_foreign,
396 							   seen_lan966x,
397 							   extack))
398 				return -EINVAL;
399 		} else {
400 			*has_foreign = true;
401 		}
402 
403 		if (*seen_lan966x && *has_foreign) {
404 			NL_SET_ERR_MSG_MOD(extack,
405 					   "Bridging lan966x ports with foreign interfaces disallowed");
406 			return -EINVAL;
407 		}
408 	}
409 
410 	return 0;
411 }
412 
lan966x_bridge_check(struct net_device * dev,struct netdev_notifier_changeupper_info * info)413 static int lan966x_bridge_check(struct net_device *dev,
414 				struct netdev_notifier_changeupper_info *info)
415 {
416 	bool has_foreign = false;
417 	bool seen_lan966x = false;
418 
419 	return lan966x_foreign_bridging_check(info->upper_dev,
420 					      &has_foreign,
421 					      &seen_lan966x,
422 					      info->info.extack);
423 }
424 
lan966x_netdevice_port_event(struct net_device * dev,struct notifier_block * nb,unsigned long event,void * ptr)425 static int lan966x_netdevice_port_event(struct net_device *dev,
426 					struct notifier_block *nb,
427 					unsigned long event, void *ptr)
428 {
429 	int err = 0;
430 
431 	if (!lan966x_netdevice_check(dev)) {
432 		switch (event) {
433 		case NETDEV_CHANGEUPPER:
434 		case NETDEV_PRECHANGEUPPER:
435 			err = lan966x_bridge_check(dev, ptr);
436 			if (err)
437 				return err;
438 
439 			if (netif_is_lag_master(dev)) {
440 				if (event == NETDEV_CHANGEUPPER)
441 					err = lan966x_lag_netdev_changeupper(dev,
442 									     ptr);
443 				else
444 					err = lan966x_lag_netdev_prechangeupper(dev,
445 										ptr);
446 
447 				return err;
448 			}
449 			break;
450 		default:
451 			return 0;
452 		}
453 
454 		return 0;
455 	}
456 
457 	switch (event) {
458 	case NETDEV_PRECHANGEUPPER:
459 		err = lan966x_port_prechangeupper(dev, dev, ptr);
460 		break;
461 	case NETDEV_CHANGEUPPER:
462 		err = lan966x_bridge_check(dev, ptr);
463 		if (err)
464 			return err;
465 
466 		err = lan966x_port_changeupper(dev, dev, ptr);
467 		break;
468 	case NETDEV_CHANGELOWERSTATE:
469 		err = lan966x_lag_port_changelowerstate(dev, ptr);
470 		break;
471 	}
472 
473 	return err;
474 }
475 
lan966x_netdevice_event(struct notifier_block * nb,unsigned long event,void * ptr)476 static int lan966x_netdevice_event(struct notifier_block *nb,
477 				   unsigned long event, void *ptr)
478 {
479 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
480 	int ret;
481 
482 	ret = lan966x_netdevice_port_event(dev, nb, event, ptr);
483 
484 	return notifier_from_errno(ret);
485 }
486 
lan966x_foreign_dev_check(const struct net_device * dev,const struct net_device * foreign_dev)487 static bool lan966x_foreign_dev_check(const struct net_device *dev,
488 				      const struct net_device *foreign_dev)
489 {
490 	struct lan966x_port *port = netdev_priv(dev);
491 	struct lan966x *lan966x = port->lan966x;
492 	int i;
493 
494 	if (netif_is_bridge_master(foreign_dev))
495 		if (lan966x->bridge == foreign_dev)
496 			return false;
497 
498 	if (netif_is_lag_master(foreign_dev))
499 		for (i = 0; i < lan966x->num_phys_ports; ++i)
500 			if (lan966x->ports[i] &&
501 			    lan966x->ports[i]->bond == foreign_dev)
502 				return false;
503 
504 	return true;
505 }
506 
lan966x_switchdev_event(struct notifier_block * nb,unsigned long event,void * ptr)507 static int lan966x_switchdev_event(struct notifier_block *nb,
508 				   unsigned long event, void *ptr)
509 {
510 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
511 	int err;
512 
513 	switch (event) {
514 	case SWITCHDEV_PORT_ATTR_SET:
515 		err = switchdev_handle_port_attr_set(dev, ptr,
516 						     lan966x_netdevice_check,
517 						     lan966x_port_attr_set);
518 		return notifier_from_errno(err);
519 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
520 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
521 		err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
522 							   lan966x_netdevice_check,
523 							   lan966x_foreign_dev_check,
524 							   lan966x_handle_fdb);
525 		return notifier_from_errno(err);
526 	}
527 
528 	return NOTIFY_DONE;
529 }
530 
lan966x_handle_port_vlan_add(struct lan966x_port * port,const struct switchdev_obj * obj)531 static int lan966x_handle_port_vlan_add(struct lan966x_port *port,
532 					const struct switchdev_obj *obj)
533 {
534 	const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
535 	struct lan966x *lan966x = port->lan966x;
536 
537 	if (!netif_is_bridge_master(obj->orig_dev))
538 		lan966x_vlan_port_add_vlan(port, v->vid,
539 					   v->flags & BRIDGE_VLAN_INFO_PVID,
540 					   v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
541 	else
542 		lan966x_vlan_cpu_add_vlan(lan966x, v->vid);
543 
544 	return 0;
545 }
546 
lan966x_handle_port_obj_add(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)547 static int lan966x_handle_port_obj_add(struct net_device *dev, const void *ctx,
548 				       const struct switchdev_obj *obj,
549 				       struct netlink_ext_ack *extack)
550 {
551 	struct lan966x_port *port = netdev_priv(dev);
552 	int err;
553 
554 	if (ctx && ctx != port)
555 		return 0;
556 
557 	switch (obj->id) {
558 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
559 		err = lan966x_handle_port_vlan_add(port, obj);
560 		break;
561 	case SWITCHDEV_OBJ_ID_PORT_MDB:
562 	case SWITCHDEV_OBJ_ID_HOST_MDB:
563 		err = lan966x_handle_port_mdb_add(port, obj);
564 		break;
565 	default:
566 		err = -EOPNOTSUPP;
567 		break;
568 	}
569 
570 	return err;
571 }
572 
lan966x_handle_port_vlan_del(struct lan966x_port * port,const struct switchdev_obj * obj)573 static int lan966x_handle_port_vlan_del(struct lan966x_port *port,
574 					const struct switchdev_obj *obj)
575 {
576 	const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
577 	struct lan966x *lan966x = port->lan966x;
578 
579 	if (!netif_is_bridge_master(obj->orig_dev))
580 		lan966x_vlan_port_del_vlan(port, v->vid);
581 	else
582 		lan966x_vlan_cpu_del_vlan(lan966x, v->vid);
583 
584 	return 0;
585 }
586 
lan966x_handle_port_obj_del(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj)587 static int lan966x_handle_port_obj_del(struct net_device *dev, const void *ctx,
588 				       const struct switchdev_obj *obj)
589 {
590 	struct lan966x_port *port = netdev_priv(dev);
591 	int err;
592 
593 	if (ctx && ctx != port)
594 		return 0;
595 
596 	switch (obj->id) {
597 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
598 		err = lan966x_handle_port_vlan_del(port, obj);
599 		break;
600 	case SWITCHDEV_OBJ_ID_PORT_MDB:
601 	case SWITCHDEV_OBJ_ID_HOST_MDB:
602 		err = lan966x_handle_port_mdb_del(port, obj);
603 		break;
604 	default:
605 		err = -EOPNOTSUPP;
606 		break;
607 	}
608 
609 	return err;
610 }
611 
lan966x_switchdev_blocking_event(struct notifier_block * nb,unsigned long event,void * ptr)612 static int lan966x_switchdev_blocking_event(struct notifier_block *nb,
613 					    unsigned long event,
614 					    void *ptr)
615 {
616 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
617 	int err;
618 
619 	switch (event) {
620 	case SWITCHDEV_PORT_OBJ_ADD:
621 		err = switchdev_handle_port_obj_add(dev, ptr,
622 						    lan966x_netdevice_check,
623 						    lan966x_handle_port_obj_add);
624 		return notifier_from_errno(err);
625 	case SWITCHDEV_PORT_OBJ_DEL:
626 		err = switchdev_handle_port_obj_del(dev, ptr,
627 						    lan966x_netdevice_check,
628 						    lan966x_handle_port_obj_del);
629 		return notifier_from_errno(err);
630 	case SWITCHDEV_PORT_ATTR_SET:
631 		err = switchdev_handle_port_attr_set(dev, ptr,
632 						     lan966x_netdevice_check,
633 						     lan966x_port_attr_set);
634 		return notifier_from_errno(err);
635 	}
636 
637 	return NOTIFY_DONE;
638 }
639 
640 static struct notifier_block lan966x_netdevice_nb __read_mostly = {
641 	.notifier_call = lan966x_netdevice_event,
642 };
643 
644 struct notifier_block lan966x_switchdev_nb __read_mostly = {
645 	.notifier_call = lan966x_switchdev_event,
646 };
647 
648 struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
649 	.notifier_call = lan966x_switchdev_blocking_event,
650 };
651 
lan966x_register_notifier_blocks(void)652 void lan966x_register_notifier_blocks(void)
653 {
654 	register_netdevice_notifier(&lan966x_netdevice_nb);
655 	register_switchdev_notifier(&lan966x_switchdev_nb);
656 	register_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
657 }
658 
lan966x_unregister_notifier_blocks(void)659 void lan966x_unregister_notifier_blocks(void)
660 {
661 	unregister_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
662 	unregister_switchdev_notifier(&lan966x_switchdev_nb);
663 	unregister_netdevice_notifier(&lan966x_netdevice_nb);
664 }
665