xref: /openbmc/linux/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c (revision fa5d824ce5dd8306c66f45c34fd78536e6ce2488)
1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/if_bridge.h>
4 #include <net/switchdev.h>
5 
6 #include "lan966x_main.h"
7 
8 static struct notifier_block lan966x_netdevice_nb __read_mostly;
9 static struct notifier_block lan966x_switchdev_nb __read_mostly;
10 static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly;
11 
12 static void lan966x_port_set_mcast_ip_flood(struct lan966x_port *port,
13 					    u32 pgid_ip)
14 {
15 	struct lan966x *lan966x = port->lan966x;
16 	u32 flood_mask_ip;
17 
18 	flood_mask_ip = lan_rd(lan966x, ANA_PGID(pgid_ip));
19 	flood_mask_ip = ANA_PGID_PGID_GET(flood_mask_ip);
20 
21 	/* If mcast snooping is not enabled then use mcast flood mask
22 	 * to decide to enable multicast flooding or not.
23 	 */
24 	if (!port->mcast_ena) {
25 		u32 flood_mask;
26 
27 		flood_mask = lan_rd(lan966x, ANA_PGID(PGID_MC));
28 		flood_mask = ANA_PGID_PGID_GET(flood_mask);
29 
30 		if (flood_mask & BIT(port->chip_port))
31 			flood_mask_ip |= BIT(port->chip_port);
32 		else
33 			flood_mask_ip &= ~BIT(port->chip_port);
34 	} else {
35 		flood_mask_ip &= ~BIT(port->chip_port);
36 	}
37 
38 	lan_rmw(ANA_PGID_PGID_SET(flood_mask_ip),
39 		ANA_PGID_PGID,
40 		lan966x, ANA_PGID(pgid_ip));
41 }
42 
43 static void lan966x_port_set_mcast_flood(struct lan966x_port *port,
44 					 bool enabled)
45 {
46 	u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_MC));
47 
48 	val = ANA_PGID_PGID_GET(val);
49 	if (enabled)
50 		val |= BIT(port->chip_port);
51 	else
52 		val &= ~BIT(port->chip_port);
53 
54 	lan_rmw(ANA_PGID_PGID_SET(val),
55 		ANA_PGID_PGID,
56 		port->lan966x, ANA_PGID(PGID_MC));
57 
58 	if (!port->mcast_ena) {
59 		lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
60 		lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
61 	}
62 }
63 
64 static void lan966x_port_set_ucast_flood(struct lan966x_port *port,
65 					 bool enabled)
66 {
67 	u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_UC));
68 
69 	val = ANA_PGID_PGID_GET(val);
70 	if (enabled)
71 		val |= BIT(port->chip_port);
72 	else
73 		val &= ~BIT(port->chip_port);
74 
75 	lan_rmw(ANA_PGID_PGID_SET(val),
76 		ANA_PGID_PGID,
77 		port->lan966x, ANA_PGID(PGID_UC));
78 }
79 
80 static void lan966x_port_set_bcast_flood(struct lan966x_port *port,
81 					 bool enabled)
82 {
83 	u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_BC));
84 
85 	val = ANA_PGID_PGID_GET(val);
86 	if (enabled)
87 		val |= BIT(port->chip_port);
88 	else
89 		val &= ~BIT(port->chip_port);
90 
91 	lan_rmw(ANA_PGID_PGID_SET(val),
92 		ANA_PGID_PGID,
93 		port->lan966x, ANA_PGID(PGID_BC));
94 }
95 
96 static void lan966x_port_set_learning(struct lan966x_port *port, bool enabled)
97 {
98 	lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(enabled),
99 		ANA_PORT_CFG_LEARN_ENA,
100 		port->lan966x, ANA_PORT_CFG(port->chip_port));
101 
102 	port->learn_ena = enabled;
103 }
104 
105 static void lan966x_port_bridge_flags(struct lan966x_port *port,
106 				      struct switchdev_brport_flags flags)
107 {
108 	if (flags.mask & BR_MCAST_FLOOD)
109 		lan966x_port_set_mcast_flood(port,
110 					     !!(flags.val & BR_MCAST_FLOOD));
111 
112 	if (flags.mask & BR_FLOOD)
113 		lan966x_port_set_ucast_flood(port,
114 					     !!(flags.val & BR_FLOOD));
115 
116 	if (flags.mask & BR_BCAST_FLOOD)
117 		lan966x_port_set_bcast_flood(port,
118 					     !!(flags.val & BR_BCAST_FLOOD));
119 
120 	if (flags.mask & BR_LEARNING)
121 		lan966x_port_set_learning(port,
122 					  !!(flags.val & BR_LEARNING));
123 }
124 
125 static int lan966x_port_pre_bridge_flags(struct lan966x_port *port,
126 					 struct switchdev_brport_flags flags)
127 {
128 	if (flags.mask & ~(BR_MCAST_FLOOD | BR_FLOOD | BR_BCAST_FLOOD |
129 			   BR_LEARNING))
130 		return -EINVAL;
131 
132 	return 0;
133 }
134 
135 static void lan966x_update_fwd_mask(struct lan966x *lan966x)
136 {
137 	int i;
138 
139 	for (i = 0; i < lan966x->num_phys_ports; i++) {
140 		struct lan966x_port *port = lan966x->ports[i];
141 		unsigned long mask = 0;
142 
143 		if (port && lan966x->bridge_fwd_mask & BIT(i))
144 			mask = lan966x->bridge_fwd_mask & ~BIT(i);
145 
146 		mask |= BIT(CPU_PORT);
147 
148 		lan_wr(ANA_PGID_PGID_SET(mask),
149 		       lan966x, ANA_PGID(PGID_SRC + i));
150 	}
151 }
152 
153 static void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
154 {
155 	struct lan966x *lan966x = port->lan966x;
156 	bool learn_ena = false;
157 
158 	if ((state == BR_STATE_FORWARDING || state == BR_STATE_LEARNING) &&
159 	    port->learn_ena)
160 		learn_ena = true;
161 
162 	if (state == BR_STATE_FORWARDING)
163 		lan966x->bridge_fwd_mask |= BIT(port->chip_port);
164 	else
165 		lan966x->bridge_fwd_mask &= ~BIT(port->chip_port);
166 
167 	lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(learn_ena),
168 		ANA_PORT_CFG_LEARN_ENA,
169 		lan966x, ANA_PORT_CFG(port->chip_port));
170 
171 	lan966x_update_fwd_mask(lan966x);
172 }
173 
174 static void lan966x_port_ageing_set(struct lan966x_port *port,
175 				    unsigned long ageing_clock_t)
176 {
177 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
178 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
179 
180 	lan966x_mac_set_ageing(port->lan966x, ageing_time);
181 }
182 
183 static void lan966x_port_mc_set(struct lan966x_port *port, bool mcast_ena)
184 {
185 	struct lan966x *lan966x = port->lan966x;
186 
187 	port->mcast_ena = mcast_ena;
188 	if (mcast_ena)
189 		lan966x_mdb_restore_entries(lan966x);
190 	else
191 		lan966x_mdb_clear_entries(lan966x);
192 
193 	lan_rmw(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(mcast_ena) |
194 		ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(mcast_ena) |
195 		ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(mcast_ena),
196 		ANA_CPU_FWD_CFG_IGMP_REDIR_ENA |
197 		ANA_CPU_FWD_CFG_MLD_REDIR_ENA |
198 		ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA,
199 		lan966x, ANA_CPU_FWD_CFG(port->chip_port));
200 
201 	lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
202 	lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
203 }
204 
205 static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
206 				 const struct switchdev_attr *attr,
207 				 struct netlink_ext_ack *extack)
208 {
209 	struct lan966x_port *port = netdev_priv(dev);
210 	int err = 0;
211 
212 	if (ctx && ctx != port)
213 		return 0;
214 
215 	switch (attr->id) {
216 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
217 		lan966x_port_bridge_flags(port, attr->u.brport_flags);
218 		break;
219 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
220 		err = lan966x_port_pre_bridge_flags(port, attr->u.brport_flags);
221 		break;
222 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
223 		lan966x_port_stp_state_set(port, attr->u.stp_state);
224 		break;
225 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
226 		lan966x_port_ageing_set(port, attr->u.ageing_time);
227 		break;
228 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
229 		lan966x_vlan_port_set_vlan_aware(port, attr->u.vlan_filtering);
230 		lan966x_vlan_port_apply(port);
231 		break;
232 	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
233 		lan966x_port_mc_set(port, !attr->u.mc_disabled);
234 		break;
235 	default:
236 		err = -EOPNOTSUPP;
237 		break;
238 	}
239 
240 	return err;
241 }
242 
243 static int lan966x_port_bridge_join(struct lan966x_port *port,
244 				    struct net_device *bridge,
245 				    struct netlink_ext_ack *extack)
246 {
247 	struct switchdev_brport_flags flags = {0};
248 	struct lan966x *lan966x = port->lan966x;
249 	struct net_device *dev = port->dev;
250 	int err;
251 
252 	if (!lan966x->bridge_mask) {
253 		lan966x->bridge = bridge;
254 	} else {
255 		if (lan966x->bridge != bridge) {
256 			NL_SET_ERR_MSG_MOD(extack, "Not allow to add port to different bridge");
257 			return -ENODEV;
258 		}
259 	}
260 
261 	err = switchdev_bridge_port_offload(dev, dev, port,
262 					    &lan966x_switchdev_nb,
263 					    &lan966x_switchdev_blocking_nb,
264 					    false, extack);
265 	if (err)
266 		return err;
267 
268 	lan966x->bridge_mask |= BIT(port->chip_port);
269 
270 	flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
271 	flags.val = flags.mask;
272 	lan966x_port_bridge_flags(port, flags);
273 
274 	return 0;
275 }
276 
277 static void lan966x_port_bridge_leave(struct lan966x_port *port,
278 				      struct net_device *bridge)
279 {
280 	struct switchdev_brport_flags flags = {0};
281 	struct lan966x *lan966x = port->lan966x;
282 
283 	flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
284 	flags.val = flags.mask & ~BR_LEARNING;
285 	lan966x_port_bridge_flags(port, flags);
286 
287 	lan966x->bridge_mask &= ~BIT(port->chip_port);
288 
289 	if (!lan966x->bridge_mask)
290 		lan966x->bridge = NULL;
291 
292 	/* Set the port back to host mode */
293 	lan966x_vlan_port_set_vlan_aware(port, false);
294 	lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
295 	lan966x_vlan_port_apply(port);
296 }
297 
298 static int lan966x_port_changeupper(struct net_device *dev,
299 				    struct netdev_notifier_changeupper_info *info)
300 {
301 	struct lan966x_port *port = netdev_priv(dev);
302 	struct netlink_ext_ack *extack;
303 	int err = 0;
304 
305 	extack = netdev_notifier_info_to_extack(&info->info);
306 
307 	if (netif_is_bridge_master(info->upper_dev)) {
308 		if (info->linking)
309 			err = lan966x_port_bridge_join(port, info->upper_dev,
310 						       extack);
311 		else
312 			lan966x_port_bridge_leave(port, info->upper_dev);
313 	}
314 
315 	return err;
316 }
317 
318 static int lan966x_port_prechangeupper(struct net_device *dev,
319 				       struct netdev_notifier_changeupper_info *info)
320 {
321 	struct lan966x_port *port = netdev_priv(dev);
322 
323 	if (netif_is_bridge_master(info->upper_dev) && !info->linking)
324 		switchdev_bridge_port_unoffload(port->dev, port,
325 						&lan966x_switchdev_nb,
326 						&lan966x_switchdev_blocking_nb);
327 
328 	return NOTIFY_DONE;
329 }
330 
331 static int lan966x_foreign_bridging_check(struct net_device *bridge,
332 					  struct netlink_ext_ack *extack)
333 {
334 	struct lan966x *lan966x = NULL;
335 	bool has_foreign = false;
336 	struct net_device *dev;
337 	struct list_head *iter;
338 
339 	if (!netif_is_bridge_master(bridge))
340 		return 0;
341 
342 	netdev_for_each_lower_dev(bridge, dev, iter) {
343 		if (lan966x_netdevice_check(dev)) {
344 			struct lan966x_port *port = netdev_priv(dev);
345 
346 			if (lan966x) {
347 				/* Bridge already has at least one port of a
348 				 * lan966x switch inside it, check that it's
349 				 * the same instance of the driver.
350 				 */
351 				if (port->lan966x != lan966x) {
352 					NL_SET_ERR_MSG_MOD(extack,
353 							   "Bridging between multiple lan966x switches disallowed");
354 					return -EINVAL;
355 				}
356 			} else {
357 				/* This is the first lan966x port inside this
358 				 * bridge
359 				 */
360 				lan966x = port->lan966x;
361 			}
362 		} else {
363 			has_foreign = true;
364 		}
365 
366 		if (lan966x && has_foreign) {
367 			NL_SET_ERR_MSG_MOD(extack,
368 					   "Bridging lan966x ports with foreign interfaces disallowed");
369 			return -EINVAL;
370 		}
371 	}
372 
373 	return 0;
374 }
375 
376 static int lan966x_bridge_check(struct net_device *dev,
377 				struct netdev_notifier_changeupper_info *info)
378 {
379 	return lan966x_foreign_bridging_check(info->upper_dev,
380 					      info->info.extack);
381 }
382 
383 static int lan966x_netdevice_port_event(struct net_device *dev,
384 					struct notifier_block *nb,
385 					unsigned long event, void *ptr)
386 {
387 	int err = 0;
388 
389 	if (!lan966x_netdevice_check(dev)) {
390 		if (event == NETDEV_CHANGEUPPER)
391 			return lan966x_bridge_check(dev, ptr);
392 		return 0;
393 	}
394 
395 	switch (event) {
396 	case NETDEV_PRECHANGEUPPER:
397 		err = lan966x_port_prechangeupper(dev, ptr);
398 		break;
399 	case NETDEV_CHANGEUPPER:
400 		err = lan966x_bridge_check(dev, ptr);
401 		if (err)
402 			return err;
403 
404 		err = lan966x_port_changeupper(dev, ptr);
405 		break;
406 	}
407 
408 	return err;
409 }
410 
411 static int lan966x_netdevice_event(struct notifier_block *nb,
412 				   unsigned long event, void *ptr)
413 {
414 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
415 	int ret;
416 
417 	ret = lan966x_netdevice_port_event(dev, nb, event, ptr);
418 
419 	return notifier_from_errno(ret);
420 }
421 
422 static bool lan966x_foreign_dev_check(const struct net_device *dev,
423 				      const struct net_device *foreign_dev)
424 {
425 	struct lan966x_port *port = netdev_priv(dev);
426 	struct lan966x *lan966x = port->lan966x;
427 
428 	if (netif_is_bridge_master(foreign_dev))
429 		if (lan966x->bridge != foreign_dev)
430 			return true;
431 
432 	return false;
433 }
434 
435 static int lan966x_switchdev_event(struct notifier_block *nb,
436 				   unsigned long event, void *ptr)
437 {
438 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
439 	int err;
440 
441 	switch (event) {
442 	case SWITCHDEV_PORT_ATTR_SET:
443 		err = switchdev_handle_port_attr_set(dev, ptr,
444 						     lan966x_netdevice_check,
445 						     lan966x_port_attr_set);
446 		return notifier_from_errno(err);
447 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
448 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
449 		err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
450 							   lan966x_netdevice_check,
451 							   lan966x_foreign_dev_check,
452 							   lan966x_handle_fdb,
453 							   NULL);
454 		return notifier_from_errno(err);
455 	}
456 
457 	return NOTIFY_DONE;
458 }
459 
460 static int lan966x_handle_port_vlan_add(struct lan966x_port *port,
461 					const struct switchdev_obj *obj)
462 {
463 	const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
464 	struct lan966x *lan966x = port->lan966x;
465 
466 	/* When adding a port to a vlan, we get a callback for the port but
467 	 * also for the bridge. When get the callback for the bridge just bail
468 	 * out. Then when the bridge is added to the vlan, then we get a
469 	 * callback here but in this case the flags has set:
470 	 * BRIDGE_VLAN_INFO_BRENTRY. In this case it means that the CPU
471 	 * port is added to the vlan, so the broadcast frames and unicast frames
472 	 * with dmac of the bridge should be foward to CPU.
473 	 */
474 	if (netif_is_bridge_master(obj->orig_dev) &&
475 	    !(v->flags & BRIDGE_VLAN_INFO_BRENTRY))
476 		return 0;
477 
478 	if (!netif_is_bridge_master(obj->orig_dev))
479 		lan966x_vlan_port_add_vlan(port, v->vid,
480 					   v->flags & BRIDGE_VLAN_INFO_PVID,
481 					   v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
482 	else
483 		lan966x_vlan_cpu_add_vlan(lan966x, v->vid);
484 
485 	return 0;
486 }
487 
488 static int lan966x_handle_port_obj_add(struct net_device *dev, const void *ctx,
489 				       const struct switchdev_obj *obj,
490 				       struct netlink_ext_ack *extack)
491 {
492 	struct lan966x_port *port = netdev_priv(dev);
493 	int err;
494 
495 	if (ctx && ctx != port)
496 		return 0;
497 
498 	switch (obj->id) {
499 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
500 		err = lan966x_handle_port_vlan_add(port, obj);
501 		break;
502 	case SWITCHDEV_OBJ_ID_PORT_MDB:
503 	case SWITCHDEV_OBJ_ID_HOST_MDB:
504 		err = lan966x_handle_port_mdb_add(port, obj);
505 		break;
506 	default:
507 		err = -EOPNOTSUPP;
508 		break;
509 	}
510 
511 	return err;
512 }
513 
514 static int lan966x_handle_port_vlan_del(struct lan966x_port *port,
515 					const struct switchdev_obj *obj)
516 {
517 	const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
518 	struct lan966x *lan966x = port->lan966x;
519 
520 	if (!netif_is_bridge_master(obj->orig_dev))
521 		lan966x_vlan_port_del_vlan(port, v->vid);
522 	else
523 		lan966x_vlan_cpu_del_vlan(lan966x, v->vid);
524 
525 	return 0;
526 }
527 
528 static int lan966x_handle_port_obj_del(struct net_device *dev, const void *ctx,
529 				       const struct switchdev_obj *obj)
530 {
531 	struct lan966x_port *port = netdev_priv(dev);
532 	int err;
533 
534 	if (ctx && ctx != port)
535 		return 0;
536 
537 	switch (obj->id) {
538 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
539 		err = lan966x_handle_port_vlan_del(port, obj);
540 		break;
541 	case SWITCHDEV_OBJ_ID_PORT_MDB:
542 	case SWITCHDEV_OBJ_ID_HOST_MDB:
543 		err = lan966x_handle_port_mdb_del(port, obj);
544 		break;
545 	default:
546 		err = -EOPNOTSUPP;
547 		break;
548 	}
549 
550 	return err;
551 }
552 
553 static int lan966x_switchdev_blocking_event(struct notifier_block *nb,
554 					    unsigned long event,
555 					    void *ptr)
556 {
557 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
558 	int err;
559 
560 	switch (event) {
561 	case SWITCHDEV_PORT_OBJ_ADD:
562 		err = switchdev_handle_port_obj_add(dev, ptr,
563 						    lan966x_netdevice_check,
564 						    lan966x_handle_port_obj_add);
565 		return notifier_from_errno(err);
566 	case SWITCHDEV_PORT_OBJ_DEL:
567 		err = switchdev_handle_port_obj_del(dev, ptr,
568 						    lan966x_netdevice_check,
569 						    lan966x_handle_port_obj_del);
570 		return notifier_from_errno(err);
571 	case SWITCHDEV_PORT_ATTR_SET:
572 		err = switchdev_handle_port_attr_set(dev, ptr,
573 						     lan966x_netdevice_check,
574 						     lan966x_port_attr_set);
575 		return notifier_from_errno(err);
576 	}
577 
578 	return NOTIFY_DONE;
579 }
580 
581 static struct notifier_block lan966x_netdevice_nb __read_mostly = {
582 	.notifier_call = lan966x_netdevice_event,
583 };
584 
585 static struct notifier_block lan966x_switchdev_nb __read_mostly = {
586 	.notifier_call = lan966x_switchdev_event,
587 };
588 
589 static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
590 	.notifier_call = lan966x_switchdev_blocking_event,
591 };
592 
593 void lan966x_register_notifier_blocks(void)
594 {
595 	register_netdevice_notifier(&lan966x_netdevice_nb);
596 	register_switchdev_notifier(&lan966x_switchdev_nb);
597 	register_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
598 }
599 
600 void lan966x_unregister_notifier_blocks(void)
601 {
602 	unregister_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
603 	unregister_switchdev_notifier(&lan966x_switchdev_nb);
604 	unregister_netdevice_notifier(&lan966x_netdevice_nb);
605 }
606