1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/if_bridge.h>
4 #include <net/switchdev.h>
5 
6 #include "lan966x_main.h"
7 
8 static struct notifier_block lan966x_netdevice_nb __read_mostly;
9 
10 static void lan966x_port_set_mcast_ip_flood(struct lan966x_port *port,
11 					    u32 pgid_ip)
12 {
13 	struct lan966x *lan966x = port->lan966x;
14 	u32 flood_mask_ip;
15 
16 	flood_mask_ip = lan_rd(lan966x, ANA_PGID(pgid_ip));
17 	flood_mask_ip = ANA_PGID_PGID_GET(flood_mask_ip);
18 
19 	/* If mcast snooping is not enabled then use mcast flood mask
20 	 * to decide to enable multicast flooding or not.
21 	 */
22 	if (!port->mcast_ena) {
23 		u32 flood_mask;
24 
25 		flood_mask = lan_rd(lan966x, ANA_PGID(PGID_MC));
26 		flood_mask = ANA_PGID_PGID_GET(flood_mask);
27 
28 		if (flood_mask & BIT(port->chip_port))
29 			flood_mask_ip |= BIT(port->chip_port);
30 		else
31 			flood_mask_ip &= ~BIT(port->chip_port);
32 	} else {
33 		flood_mask_ip &= ~BIT(port->chip_port);
34 	}
35 
36 	lan_rmw(ANA_PGID_PGID_SET(flood_mask_ip),
37 		ANA_PGID_PGID,
38 		lan966x, ANA_PGID(pgid_ip));
39 }
40 
41 static void lan966x_port_set_mcast_flood(struct lan966x_port *port,
42 					 bool enabled)
43 {
44 	u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_MC));
45 
46 	val = ANA_PGID_PGID_GET(val);
47 	if (enabled)
48 		val |= BIT(port->chip_port);
49 	else
50 		val &= ~BIT(port->chip_port);
51 
52 	lan_rmw(ANA_PGID_PGID_SET(val),
53 		ANA_PGID_PGID,
54 		port->lan966x, ANA_PGID(PGID_MC));
55 
56 	if (!port->mcast_ena) {
57 		lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
58 		lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
59 	}
60 }
61 
62 static void lan966x_port_set_ucast_flood(struct lan966x_port *port,
63 					 bool enabled)
64 {
65 	u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_UC));
66 
67 	val = ANA_PGID_PGID_GET(val);
68 	if (enabled)
69 		val |= BIT(port->chip_port);
70 	else
71 		val &= ~BIT(port->chip_port);
72 
73 	lan_rmw(ANA_PGID_PGID_SET(val),
74 		ANA_PGID_PGID,
75 		port->lan966x, ANA_PGID(PGID_UC));
76 }
77 
78 static void lan966x_port_set_bcast_flood(struct lan966x_port *port,
79 					 bool enabled)
80 {
81 	u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_BC));
82 
83 	val = ANA_PGID_PGID_GET(val);
84 	if (enabled)
85 		val |= BIT(port->chip_port);
86 	else
87 		val &= ~BIT(port->chip_port);
88 
89 	lan_rmw(ANA_PGID_PGID_SET(val),
90 		ANA_PGID_PGID,
91 		port->lan966x, ANA_PGID(PGID_BC));
92 }
93 
94 static void lan966x_port_set_learning(struct lan966x_port *port, bool enabled)
95 {
96 	lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(enabled),
97 		ANA_PORT_CFG_LEARN_ENA,
98 		port->lan966x, ANA_PORT_CFG(port->chip_port));
99 
100 	port->learn_ena = enabled;
101 }
102 
103 static void lan966x_port_bridge_flags(struct lan966x_port *port,
104 				      struct switchdev_brport_flags flags)
105 {
106 	if (flags.mask & BR_MCAST_FLOOD)
107 		lan966x_port_set_mcast_flood(port,
108 					     !!(flags.val & BR_MCAST_FLOOD));
109 
110 	if (flags.mask & BR_FLOOD)
111 		lan966x_port_set_ucast_flood(port,
112 					     !!(flags.val & BR_FLOOD));
113 
114 	if (flags.mask & BR_BCAST_FLOOD)
115 		lan966x_port_set_bcast_flood(port,
116 					     !!(flags.val & BR_BCAST_FLOOD));
117 
118 	if (flags.mask & BR_LEARNING)
119 		lan966x_port_set_learning(port,
120 					  !!(flags.val & BR_LEARNING));
121 }
122 
123 static int lan966x_port_pre_bridge_flags(struct lan966x_port *port,
124 					 struct switchdev_brport_flags flags)
125 {
126 	if (flags.mask & ~(BR_MCAST_FLOOD | BR_FLOOD | BR_BCAST_FLOOD |
127 			   BR_LEARNING))
128 		return -EINVAL;
129 
130 	return 0;
131 }
132 
133 static void lan966x_update_fwd_mask(struct lan966x *lan966x)
134 {
135 	int i;
136 
137 	for (i = 0; i < lan966x->num_phys_ports; i++) {
138 		struct lan966x_port *port = lan966x->ports[i];
139 		unsigned long mask = 0;
140 
141 		if (port && lan966x->bridge_fwd_mask & BIT(i))
142 			mask = lan966x->bridge_fwd_mask & ~BIT(i);
143 
144 		mask |= BIT(CPU_PORT);
145 
146 		lan_wr(ANA_PGID_PGID_SET(mask),
147 		       lan966x, ANA_PGID(PGID_SRC + i));
148 	}
149 }
150 
151 static void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
152 {
153 	struct lan966x *lan966x = port->lan966x;
154 	bool learn_ena = false;
155 
156 	if ((state == BR_STATE_FORWARDING || state == BR_STATE_LEARNING) &&
157 	    port->learn_ena)
158 		learn_ena = true;
159 
160 	if (state == BR_STATE_FORWARDING)
161 		lan966x->bridge_fwd_mask |= BIT(port->chip_port);
162 	else
163 		lan966x->bridge_fwd_mask &= ~BIT(port->chip_port);
164 
165 	lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(learn_ena),
166 		ANA_PORT_CFG_LEARN_ENA,
167 		lan966x, ANA_PORT_CFG(port->chip_port));
168 
169 	lan966x_update_fwd_mask(lan966x);
170 }
171 
172 static void lan966x_port_ageing_set(struct lan966x_port *port,
173 				    unsigned long ageing_clock_t)
174 {
175 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
176 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
177 
178 	lan966x_mac_set_ageing(port->lan966x, ageing_time);
179 }
180 
181 static void lan966x_port_mc_set(struct lan966x_port *port, bool mcast_ena)
182 {
183 	struct lan966x *lan966x = port->lan966x;
184 
185 	port->mcast_ena = mcast_ena;
186 	if (mcast_ena)
187 		lan966x_mdb_restore_entries(lan966x);
188 	else
189 		lan966x_mdb_clear_entries(lan966x);
190 
191 	lan_rmw(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(mcast_ena) |
192 		ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(mcast_ena) |
193 		ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(mcast_ena),
194 		ANA_CPU_FWD_CFG_IGMP_REDIR_ENA |
195 		ANA_CPU_FWD_CFG_MLD_REDIR_ENA |
196 		ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA,
197 		lan966x, ANA_CPU_FWD_CFG(port->chip_port));
198 
199 	lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
200 	lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
201 }
202 
203 static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
204 				 const struct switchdev_attr *attr,
205 				 struct netlink_ext_ack *extack)
206 {
207 	struct lan966x_port *port = netdev_priv(dev);
208 	int err = 0;
209 
210 	if (ctx && ctx != port)
211 		return 0;
212 
213 	switch (attr->id) {
214 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
215 		lan966x_port_bridge_flags(port, attr->u.brport_flags);
216 		break;
217 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
218 		err = lan966x_port_pre_bridge_flags(port, attr->u.brport_flags);
219 		break;
220 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
221 		lan966x_port_stp_state_set(port, attr->u.stp_state);
222 		break;
223 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
224 		lan966x_port_ageing_set(port, attr->u.ageing_time);
225 		break;
226 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
227 		lan966x_vlan_port_set_vlan_aware(port, attr->u.vlan_filtering);
228 		lan966x_vlan_port_apply(port);
229 		break;
230 	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
231 		lan966x_port_mc_set(port, !attr->u.mc_disabled);
232 		break;
233 	default:
234 		err = -EOPNOTSUPP;
235 		break;
236 	}
237 
238 	return err;
239 }
240 
241 static int lan966x_port_bridge_join(struct lan966x_port *port,
242 				    struct net_device *bridge,
243 				    struct netlink_ext_ack *extack)
244 {
245 	struct switchdev_brport_flags flags = {0};
246 	struct lan966x *lan966x = port->lan966x;
247 	struct net_device *dev = port->dev;
248 	int err;
249 
250 	if (!lan966x->bridge_mask) {
251 		lan966x->bridge = bridge;
252 	} else {
253 		if (lan966x->bridge != bridge) {
254 			NL_SET_ERR_MSG_MOD(extack, "Not allow to add port to different bridge");
255 			return -ENODEV;
256 		}
257 	}
258 
259 	err = switchdev_bridge_port_offload(dev, dev, port,
260 					    &lan966x_switchdev_nb,
261 					    &lan966x_switchdev_blocking_nb,
262 					    false, extack);
263 	if (err)
264 		return err;
265 
266 	lan966x->bridge_mask |= BIT(port->chip_port);
267 
268 	flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
269 	flags.val = flags.mask;
270 	lan966x_port_bridge_flags(port, flags);
271 
272 	return 0;
273 }
274 
275 static void lan966x_port_bridge_leave(struct lan966x_port *port,
276 				      struct net_device *bridge)
277 {
278 	struct switchdev_brport_flags flags = {0};
279 	struct lan966x *lan966x = port->lan966x;
280 
281 	flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
282 	flags.val = flags.mask & ~BR_LEARNING;
283 	lan966x_port_bridge_flags(port, flags);
284 
285 	lan966x->bridge_mask &= ~BIT(port->chip_port);
286 
287 	if (!lan966x->bridge_mask)
288 		lan966x->bridge = NULL;
289 
290 	/* Set the port back to host mode */
291 	lan966x_vlan_port_set_vlan_aware(port, false);
292 	lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
293 	lan966x_vlan_port_apply(port);
294 }
295 
296 static int lan966x_port_changeupper(struct net_device *dev,
297 				    struct netdev_notifier_changeupper_info *info)
298 {
299 	struct lan966x_port *port = netdev_priv(dev);
300 	struct netlink_ext_ack *extack;
301 	int err = 0;
302 
303 	extack = netdev_notifier_info_to_extack(&info->info);
304 
305 	if (netif_is_bridge_master(info->upper_dev)) {
306 		if (info->linking)
307 			err = lan966x_port_bridge_join(port, info->upper_dev,
308 						       extack);
309 		else
310 			lan966x_port_bridge_leave(port, info->upper_dev);
311 	}
312 
313 	return err;
314 }
315 
316 static int lan966x_port_prechangeupper(struct net_device *dev,
317 				       struct netdev_notifier_changeupper_info *info)
318 {
319 	struct lan966x_port *port = netdev_priv(dev);
320 
321 	if (netif_is_bridge_master(info->upper_dev) && !info->linking) {
322 		switchdev_bridge_port_unoffload(port->dev, port, NULL, NULL);
323 		lan966x_fdb_flush_workqueue(port->lan966x);
324 	}
325 
326 	return NOTIFY_DONE;
327 }
328 
329 static int lan966x_foreign_bridging_check(struct net_device *upper,
330 					  bool *has_foreign,
331 					  bool *seen_lan966x,
332 					  struct netlink_ext_ack *extack)
333 {
334 	struct lan966x *lan966x = NULL;
335 	struct net_device *dev;
336 	struct list_head *iter;
337 
338 	if (!netif_is_bridge_master(upper) &&
339 	    !netif_is_lag_master(upper))
340 		return 0;
341 
342 	netdev_for_each_lower_dev(upper, dev, iter) {
343 		if (lan966x_netdevice_check(dev)) {
344 			struct lan966x_port *port = netdev_priv(dev);
345 
346 			if (lan966x) {
347 				/* Upper already has at least one port of a
348 				 * lan966x switch inside it, check that it's
349 				 * the same instance of the driver.
350 				 */
351 				if (port->lan966x != lan966x) {
352 					NL_SET_ERR_MSG_MOD(extack,
353 							   "Bridging between multiple lan966x switches disallowed");
354 					return -EINVAL;
355 				}
356 			} else {
357 				/* This is the first lan966x port inside this
358 				 * upper device
359 				 */
360 				lan966x = port->lan966x;
361 				*seen_lan966x = true;
362 			}
363 		} else if (netif_is_lag_master(dev)) {
364 			/* Allow to have bond interfaces that have only lan966x
365 			 * devices
366 			 */
367 			if (lan966x_foreign_bridging_check(dev, has_foreign,
368 							   seen_lan966x,
369 							   extack))
370 				return -EINVAL;
371 		} else {
372 			*has_foreign = true;
373 		}
374 
375 		if (*seen_lan966x && *has_foreign) {
376 			NL_SET_ERR_MSG_MOD(extack,
377 					   "Bridging lan966x ports with foreign interfaces disallowed");
378 			return -EINVAL;
379 		}
380 	}
381 
382 	return 0;
383 }
384 
385 static int lan966x_bridge_check(struct net_device *dev,
386 				struct netdev_notifier_changeupper_info *info)
387 {
388 	bool has_foreign = false;
389 	bool seen_lan966x = false;
390 
391 	return lan966x_foreign_bridging_check(info->upper_dev,
392 					      &has_foreign,
393 					      &seen_lan966x,
394 					      info->info.extack);
395 }
396 
397 static int lan966x_netdevice_port_event(struct net_device *dev,
398 					struct notifier_block *nb,
399 					unsigned long event, void *ptr)
400 {
401 	int err = 0;
402 
403 	if (!lan966x_netdevice_check(dev)) {
404 		if (event == NETDEV_CHANGEUPPER)
405 			return lan966x_bridge_check(dev, ptr);
406 		return 0;
407 	}
408 
409 	switch (event) {
410 	case NETDEV_PRECHANGEUPPER:
411 		err = lan966x_port_prechangeupper(dev, ptr);
412 		break;
413 	case NETDEV_CHANGEUPPER:
414 		err = lan966x_bridge_check(dev, ptr);
415 		if (err)
416 			return err;
417 
418 		err = lan966x_port_changeupper(dev, ptr);
419 		break;
420 	}
421 
422 	return err;
423 }
424 
425 static int lan966x_netdevice_event(struct notifier_block *nb,
426 				   unsigned long event, void *ptr)
427 {
428 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
429 	int ret;
430 
431 	ret = lan966x_netdevice_port_event(dev, nb, event, ptr);
432 
433 	return notifier_from_errno(ret);
434 }
435 
436 /* We don't offload uppers such as LAG as bridge ports, so every device except
437  * the bridge itself is foreign.
438  */
439 static bool lan966x_foreign_dev_check(const struct net_device *dev,
440 				      const struct net_device *foreign_dev)
441 {
442 	struct lan966x_port *port = netdev_priv(dev);
443 	struct lan966x *lan966x = port->lan966x;
444 
445 	if (netif_is_bridge_master(foreign_dev))
446 		if (lan966x->bridge == foreign_dev)
447 			return false;
448 
449 	return true;
450 }
451 
452 static int lan966x_switchdev_event(struct notifier_block *nb,
453 				   unsigned long event, void *ptr)
454 {
455 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
456 	int err;
457 
458 	switch (event) {
459 	case SWITCHDEV_PORT_ATTR_SET:
460 		err = switchdev_handle_port_attr_set(dev, ptr,
461 						     lan966x_netdevice_check,
462 						     lan966x_port_attr_set);
463 		return notifier_from_errno(err);
464 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
465 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
466 		err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
467 							   lan966x_netdevice_check,
468 							   lan966x_foreign_dev_check,
469 							   lan966x_handle_fdb);
470 		return notifier_from_errno(err);
471 	}
472 
473 	return NOTIFY_DONE;
474 }
475 
476 static int lan966x_handle_port_vlan_add(struct lan966x_port *port,
477 					const struct switchdev_obj *obj)
478 {
479 	const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
480 	struct lan966x *lan966x = port->lan966x;
481 
482 	if (!netif_is_bridge_master(obj->orig_dev))
483 		lan966x_vlan_port_add_vlan(port, v->vid,
484 					   v->flags & BRIDGE_VLAN_INFO_PVID,
485 					   v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
486 	else
487 		lan966x_vlan_cpu_add_vlan(lan966x, v->vid);
488 
489 	return 0;
490 }
491 
492 static int lan966x_handle_port_obj_add(struct net_device *dev, const void *ctx,
493 				       const struct switchdev_obj *obj,
494 				       struct netlink_ext_ack *extack)
495 {
496 	struct lan966x_port *port = netdev_priv(dev);
497 	int err;
498 
499 	if (ctx && ctx != port)
500 		return 0;
501 
502 	switch (obj->id) {
503 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
504 		err = lan966x_handle_port_vlan_add(port, obj);
505 		break;
506 	case SWITCHDEV_OBJ_ID_PORT_MDB:
507 	case SWITCHDEV_OBJ_ID_HOST_MDB:
508 		err = lan966x_handle_port_mdb_add(port, obj);
509 		break;
510 	default:
511 		err = -EOPNOTSUPP;
512 		break;
513 	}
514 
515 	return err;
516 }
517 
518 static int lan966x_handle_port_vlan_del(struct lan966x_port *port,
519 					const struct switchdev_obj *obj)
520 {
521 	const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
522 	struct lan966x *lan966x = port->lan966x;
523 
524 	if (!netif_is_bridge_master(obj->orig_dev))
525 		lan966x_vlan_port_del_vlan(port, v->vid);
526 	else
527 		lan966x_vlan_cpu_del_vlan(lan966x, v->vid);
528 
529 	return 0;
530 }
531 
532 static int lan966x_handle_port_obj_del(struct net_device *dev, const void *ctx,
533 				       const struct switchdev_obj *obj)
534 {
535 	struct lan966x_port *port = netdev_priv(dev);
536 	int err;
537 
538 	if (ctx && ctx != port)
539 		return 0;
540 
541 	switch (obj->id) {
542 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
543 		err = lan966x_handle_port_vlan_del(port, obj);
544 		break;
545 	case SWITCHDEV_OBJ_ID_PORT_MDB:
546 	case SWITCHDEV_OBJ_ID_HOST_MDB:
547 		err = lan966x_handle_port_mdb_del(port, obj);
548 		break;
549 	default:
550 		err = -EOPNOTSUPP;
551 		break;
552 	}
553 
554 	return err;
555 }
556 
557 static int lan966x_switchdev_blocking_event(struct notifier_block *nb,
558 					    unsigned long event,
559 					    void *ptr)
560 {
561 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
562 	int err;
563 
564 	switch (event) {
565 	case SWITCHDEV_PORT_OBJ_ADD:
566 		err = switchdev_handle_port_obj_add(dev, ptr,
567 						    lan966x_netdevice_check,
568 						    lan966x_handle_port_obj_add);
569 		return notifier_from_errno(err);
570 	case SWITCHDEV_PORT_OBJ_DEL:
571 		err = switchdev_handle_port_obj_del(dev, ptr,
572 						    lan966x_netdevice_check,
573 						    lan966x_handle_port_obj_del);
574 		return notifier_from_errno(err);
575 	case SWITCHDEV_PORT_ATTR_SET:
576 		err = switchdev_handle_port_attr_set(dev, ptr,
577 						     lan966x_netdevice_check,
578 						     lan966x_port_attr_set);
579 		return notifier_from_errno(err);
580 	}
581 
582 	return NOTIFY_DONE;
583 }
584 
585 static struct notifier_block lan966x_netdevice_nb __read_mostly = {
586 	.notifier_call = lan966x_netdevice_event,
587 };
588 
589 struct notifier_block lan966x_switchdev_nb __read_mostly = {
590 	.notifier_call = lan966x_switchdev_event,
591 };
592 
593 struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
594 	.notifier_call = lan966x_switchdev_blocking_event,
595 };
596 
597 void lan966x_register_notifier_blocks(void)
598 {
599 	register_netdevice_notifier(&lan966x_netdevice_nb);
600 	register_switchdev_notifier(&lan966x_switchdev_nb);
601 	register_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
602 }
603 
604 void lan966x_unregister_notifier_blocks(void)
605 {
606 	unregister_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
607 	unregister_switchdev_notifier(&lan966x_switchdev_nb);
608 	unregister_netdevice_notifier(&lan966x_netdevice_nb);
609 }
610