1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/if_bridge.h>
4 #include <net/switchdev.h>
5 
6 #include "lan966x_main.h"
7 
8 static struct notifier_block lan966x_netdevice_nb __read_mostly;
9 static struct notifier_block lan966x_switchdev_nb __read_mostly;
10 static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly;
11 
12 static void lan966x_port_set_mcast_ip_flood(struct lan966x_port *port,
13 					    u32 pgid_ip)
14 {
15 	struct lan966x *lan966x = port->lan966x;
16 	u32 flood_mask_ip;
17 
18 	flood_mask_ip = lan_rd(lan966x, ANA_PGID(pgid_ip));
19 	flood_mask_ip = ANA_PGID_PGID_GET(flood_mask_ip);
20 
21 	/* If mcast snooping is not enabled then use mcast flood mask
22 	 * to decide to enable multicast flooding or not.
23 	 */
24 	if (!port->mcast_ena) {
25 		u32 flood_mask;
26 
27 		flood_mask = lan_rd(lan966x, ANA_PGID(PGID_MC));
28 		flood_mask = ANA_PGID_PGID_GET(flood_mask);
29 
30 		if (flood_mask & BIT(port->chip_port))
31 			flood_mask_ip |= BIT(port->chip_port);
32 		else
33 			flood_mask_ip &= ~BIT(port->chip_port);
34 	} else {
35 		flood_mask_ip &= ~BIT(port->chip_port);
36 	}
37 
38 	lan_rmw(ANA_PGID_PGID_SET(flood_mask_ip),
39 		ANA_PGID_PGID,
40 		lan966x, ANA_PGID(pgid_ip));
41 }
42 
43 static void lan966x_port_set_mcast_flood(struct lan966x_port *port,
44 					 bool enabled)
45 {
46 	u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_MC));
47 
48 	val = ANA_PGID_PGID_GET(val);
49 	if (enabled)
50 		val |= BIT(port->chip_port);
51 	else
52 		val &= ~BIT(port->chip_port);
53 
54 	lan_rmw(ANA_PGID_PGID_SET(val),
55 		ANA_PGID_PGID,
56 		port->lan966x, ANA_PGID(PGID_MC));
57 
58 	if (!port->mcast_ena) {
59 		lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
60 		lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
61 	}
62 }
63 
64 static void lan966x_port_set_ucast_flood(struct lan966x_port *port,
65 					 bool enabled)
66 {
67 	u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_UC));
68 
69 	val = ANA_PGID_PGID_GET(val);
70 	if (enabled)
71 		val |= BIT(port->chip_port);
72 	else
73 		val &= ~BIT(port->chip_port);
74 
75 	lan_rmw(ANA_PGID_PGID_SET(val),
76 		ANA_PGID_PGID,
77 		port->lan966x, ANA_PGID(PGID_UC));
78 }
79 
80 static void lan966x_port_set_bcast_flood(struct lan966x_port *port,
81 					 bool enabled)
82 {
83 	u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_BC));
84 
85 	val = ANA_PGID_PGID_GET(val);
86 	if (enabled)
87 		val |= BIT(port->chip_port);
88 	else
89 		val &= ~BIT(port->chip_port);
90 
91 	lan_rmw(ANA_PGID_PGID_SET(val),
92 		ANA_PGID_PGID,
93 		port->lan966x, ANA_PGID(PGID_BC));
94 }
95 
96 static void lan966x_port_set_learning(struct lan966x_port *port, bool enabled)
97 {
98 	lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(enabled),
99 		ANA_PORT_CFG_LEARN_ENA,
100 		port->lan966x, ANA_PORT_CFG(port->chip_port));
101 
102 	port->learn_ena = enabled;
103 }
104 
105 static void lan966x_port_bridge_flags(struct lan966x_port *port,
106 				      struct switchdev_brport_flags flags)
107 {
108 	if (flags.mask & BR_MCAST_FLOOD)
109 		lan966x_port_set_mcast_flood(port,
110 					     !!(flags.val & BR_MCAST_FLOOD));
111 
112 	if (flags.mask & BR_FLOOD)
113 		lan966x_port_set_ucast_flood(port,
114 					     !!(flags.val & BR_FLOOD));
115 
116 	if (flags.mask & BR_BCAST_FLOOD)
117 		lan966x_port_set_bcast_flood(port,
118 					     !!(flags.val & BR_BCAST_FLOOD));
119 
120 	if (flags.mask & BR_LEARNING)
121 		lan966x_port_set_learning(port,
122 					  !!(flags.val & BR_LEARNING));
123 }
124 
125 static int lan966x_port_pre_bridge_flags(struct lan966x_port *port,
126 					 struct switchdev_brport_flags flags)
127 {
128 	if (flags.mask & ~(BR_MCAST_FLOOD | BR_FLOOD | BR_BCAST_FLOOD |
129 			   BR_LEARNING))
130 		return -EINVAL;
131 
132 	return 0;
133 }
134 
135 static void lan966x_update_fwd_mask(struct lan966x *lan966x)
136 {
137 	int i;
138 
139 	for (i = 0; i < lan966x->num_phys_ports; i++) {
140 		struct lan966x_port *port = lan966x->ports[i];
141 		unsigned long mask = 0;
142 
143 		if (port && lan966x->bridge_fwd_mask & BIT(i))
144 			mask = lan966x->bridge_fwd_mask & ~BIT(i);
145 
146 		mask |= BIT(CPU_PORT);
147 
148 		lan_wr(ANA_PGID_PGID_SET(mask),
149 		       lan966x, ANA_PGID(PGID_SRC + i));
150 	}
151 }
152 
153 static void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
154 {
155 	struct lan966x *lan966x = port->lan966x;
156 	bool learn_ena = false;
157 
158 	if ((state == BR_STATE_FORWARDING || state == BR_STATE_LEARNING) &&
159 	    port->learn_ena)
160 		learn_ena = true;
161 
162 	if (state == BR_STATE_FORWARDING)
163 		lan966x->bridge_fwd_mask |= BIT(port->chip_port);
164 	else
165 		lan966x->bridge_fwd_mask &= ~BIT(port->chip_port);
166 
167 	lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(learn_ena),
168 		ANA_PORT_CFG_LEARN_ENA,
169 		lan966x, ANA_PORT_CFG(port->chip_port));
170 
171 	lan966x_update_fwd_mask(lan966x);
172 }
173 
174 static void lan966x_port_ageing_set(struct lan966x_port *port,
175 				    unsigned long ageing_clock_t)
176 {
177 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
178 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
179 
180 	lan966x_mac_set_ageing(port->lan966x, ageing_time);
181 }
182 
183 static void lan966x_port_mc_set(struct lan966x_port *port, bool mcast_ena)
184 {
185 	struct lan966x *lan966x = port->lan966x;
186 
187 	port->mcast_ena = mcast_ena;
188 	if (mcast_ena)
189 		lan966x_mdb_restore_entries(lan966x);
190 	else
191 		lan966x_mdb_clear_entries(lan966x);
192 
193 	lan_rmw(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(mcast_ena) |
194 		ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(mcast_ena) |
195 		ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(mcast_ena),
196 		ANA_CPU_FWD_CFG_IGMP_REDIR_ENA |
197 		ANA_CPU_FWD_CFG_MLD_REDIR_ENA |
198 		ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA,
199 		lan966x, ANA_CPU_FWD_CFG(port->chip_port));
200 
201 	lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
202 	lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
203 }
204 
205 static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
206 				 const struct switchdev_attr *attr,
207 				 struct netlink_ext_ack *extack)
208 {
209 	struct lan966x_port *port = netdev_priv(dev);
210 	int err = 0;
211 
212 	if (ctx && ctx != port)
213 		return 0;
214 
215 	switch (attr->id) {
216 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
217 		lan966x_port_bridge_flags(port, attr->u.brport_flags);
218 		break;
219 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
220 		err = lan966x_port_pre_bridge_flags(port, attr->u.brport_flags);
221 		break;
222 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
223 		lan966x_port_stp_state_set(port, attr->u.stp_state);
224 		break;
225 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
226 		lan966x_port_ageing_set(port, attr->u.ageing_time);
227 		break;
228 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
229 		lan966x_vlan_port_set_vlan_aware(port, attr->u.vlan_filtering);
230 		lan966x_vlan_port_apply(port);
231 		break;
232 	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
233 		lan966x_port_mc_set(port, !attr->u.mc_disabled);
234 		break;
235 	default:
236 		err = -EOPNOTSUPP;
237 		break;
238 	}
239 
240 	return err;
241 }
242 
243 static int lan966x_port_bridge_join(struct lan966x_port *port,
244 				    struct net_device *bridge,
245 				    struct netlink_ext_ack *extack)
246 {
247 	struct switchdev_brport_flags flags = {0};
248 	struct lan966x *lan966x = port->lan966x;
249 	struct net_device *dev = port->dev;
250 	int err;
251 
252 	if (!lan966x->bridge_mask) {
253 		lan966x->bridge = bridge;
254 	} else {
255 		if (lan966x->bridge != bridge) {
256 			NL_SET_ERR_MSG_MOD(extack, "Not allow to add port to different bridge");
257 			return -ENODEV;
258 		}
259 	}
260 
261 	err = switchdev_bridge_port_offload(dev, dev, port,
262 					    &lan966x_switchdev_nb,
263 					    &lan966x_switchdev_blocking_nb,
264 					    false, extack);
265 	if (err)
266 		return err;
267 
268 	lan966x->bridge_mask |= BIT(port->chip_port);
269 
270 	flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
271 	flags.val = flags.mask;
272 	lan966x_port_bridge_flags(port, flags);
273 
274 	return 0;
275 }
276 
277 static void lan966x_port_bridge_leave(struct lan966x_port *port,
278 				      struct net_device *bridge)
279 {
280 	struct switchdev_brport_flags flags = {0};
281 	struct lan966x *lan966x = port->lan966x;
282 
283 	flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
284 	flags.val = flags.mask & ~BR_LEARNING;
285 	lan966x_port_bridge_flags(port, flags);
286 
287 	lan966x->bridge_mask &= ~BIT(port->chip_port);
288 
289 	if (!lan966x->bridge_mask)
290 		lan966x->bridge = NULL;
291 
292 	/* Set the port back to host mode */
293 	lan966x_vlan_port_set_vlan_aware(port, false);
294 	lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
295 	lan966x_vlan_port_apply(port);
296 }
297 
298 static int lan966x_port_changeupper(struct net_device *dev,
299 				    struct netdev_notifier_changeupper_info *info)
300 {
301 	struct lan966x_port *port = netdev_priv(dev);
302 	struct netlink_ext_ack *extack;
303 	int err = 0;
304 
305 	extack = netdev_notifier_info_to_extack(&info->info);
306 
307 	if (netif_is_bridge_master(info->upper_dev)) {
308 		if (info->linking)
309 			err = lan966x_port_bridge_join(port, info->upper_dev,
310 						       extack);
311 		else
312 			lan966x_port_bridge_leave(port, info->upper_dev);
313 	}
314 
315 	return err;
316 }
317 
318 static int lan966x_port_prechangeupper(struct net_device *dev,
319 				       struct netdev_notifier_changeupper_info *info)
320 {
321 	struct lan966x_port *port = netdev_priv(dev);
322 
323 	if (netif_is_bridge_master(info->upper_dev) && !info->linking)
324 		switchdev_bridge_port_unoffload(port->dev, port,
325 						NULL, NULL);
326 
327 	return NOTIFY_DONE;
328 }
329 
330 static int lan966x_foreign_bridging_check(struct net_device *bridge,
331 					  struct netlink_ext_ack *extack)
332 {
333 	struct lan966x *lan966x = NULL;
334 	bool has_foreign = false;
335 	struct net_device *dev;
336 	struct list_head *iter;
337 
338 	if (!netif_is_bridge_master(bridge))
339 		return 0;
340 
341 	netdev_for_each_lower_dev(bridge, dev, iter) {
342 		if (lan966x_netdevice_check(dev)) {
343 			struct lan966x_port *port = netdev_priv(dev);
344 
345 			if (lan966x) {
346 				/* Bridge already has at least one port of a
347 				 * lan966x switch inside it, check that it's
348 				 * the same instance of the driver.
349 				 */
350 				if (port->lan966x != lan966x) {
351 					NL_SET_ERR_MSG_MOD(extack,
352 							   "Bridging between multiple lan966x switches disallowed");
353 					return -EINVAL;
354 				}
355 			} else {
356 				/* This is the first lan966x port inside this
357 				 * bridge
358 				 */
359 				lan966x = port->lan966x;
360 			}
361 		} else {
362 			has_foreign = true;
363 		}
364 
365 		if (lan966x && has_foreign) {
366 			NL_SET_ERR_MSG_MOD(extack,
367 					   "Bridging lan966x ports with foreign interfaces disallowed");
368 			return -EINVAL;
369 		}
370 	}
371 
372 	return 0;
373 }
374 
375 static int lan966x_bridge_check(struct net_device *dev,
376 				struct netdev_notifier_changeupper_info *info)
377 {
378 	return lan966x_foreign_bridging_check(info->upper_dev,
379 					      info->info.extack);
380 }
381 
382 static int lan966x_netdevice_port_event(struct net_device *dev,
383 					struct notifier_block *nb,
384 					unsigned long event, void *ptr)
385 {
386 	int err = 0;
387 
388 	if (!lan966x_netdevice_check(dev)) {
389 		if (event == NETDEV_CHANGEUPPER)
390 			return lan966x_bridge_check(dev, ptr);
391 		return 0;
392 	}
393 
394 	switch (event) {
395 	case NETDEV_PRECHANGEUPPER:
396 		err = lan966x_port_prechangeupper(dev, ptr);
397 		break;
398 	case NETDEV_CHANGEUPPER:
399 		err = lan966x_bridge_check(dev, ptr);
400 		if (err)
401 			return err;
402 
403 		err = lan966x_port_changeupper(dev, ptr);
404 		break;
405 	}
406 
407 	return err;
408 }
409 
410 static int lan966x_netdevice_event(struct notifier_block *nb,
411 				   unsigned long event, void *ptr)
412 {
413 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
414 	int ret;
415 
416 	ret = lan966x_netdevice_port_event(dev, nb, event, ptr);
417 
418 	return notifier_from_errno(ret);
419 }
420 
421 /* We don't offload uppers such as LAG as bridge ports, so every device except
422  * the bridge itself is foreign.
423  */
424 static bool lan966x_foreign_dev_check(const struct net_device *dev,
425 				      const struct net_device *foreign_dev)
426 {
427 	struct lan966x_port *port = netdev_priv(dev);
428 	struct lan966x *lan966x = port->lan966x;
429 
430 	if (netif_is_bridge_master(foreign_dev))
431 		if (lan966x->bridge == foreign_dev)
432 			return false;
433 
434 	return true;
435 }
436 
437 static int lan966x_switchdev_event(struct notifier_block *nb,
438 				   unsigned long event, void *ptr)
439 {
440 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
441 	int err;
442 
443 	switch (event) {
444 	case SWITCHDEV_PORT_ATTR_SET:
445 		err = switchdev_handle_port_attr_set(dev, ptr,
446 						     lan966x_netdevice_check,
447 						     lan966x_port_attr_set);
448 		return notifier_from_errno(err);
449 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
450 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
451 		err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
452 							   lan966x_netdevice_check,
453 							   lan966x_foreign_dev_check,
454 							   lan966x_handle_fdb);
455 		return notifier_from_errno(err);
456 	}
457 
458 	return NOTIFY_DONE;
459 }
460 
461 static int lan966x_handle_port_vlan_add(struct lan966x_port *port,
462 					const struct switchdev_obj *obj)
463 {
464 	const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
465 	struct lan966x *lan966x = port->lan966x;
466 
467 	if (!netif_is_bridge_master(obj->orig_dev))
468 		lan966x_vlan_port_add_vlan(port, v->vid,
469 					   v->flags & BRIDGE_VLAN_INFO_PVID,
470 					   v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
471 	else
472 		lan966x_vlan_cpu_add_vlan(lan966x, v->vid);
473 
474 	return 0;
475 }
476 
477 static int lan966x_handle_port_obj_add(struct net_device *dev, const void *ctx,
478 				       const struct switchdev_obj *obj,
479 				       struct netlink_ext_ack *extack)
480 {
481 	struct lan966x_port *port = netdev_priv(dev);
482 	int err;
483 
484 	if (ctx && ctx != port)
485 		return 0;
486 
487 	switch (obj->id) {
488 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
489 		err = lan966x_handle_port_vlan_add(port, obj);
490 		break;
491 	case SWITCHDEV_OBJ_ID_PORT_MDB:
492 	case SWITCHDEV_OBJ_ID_HOST_MDB:
493 		err = lan966x_handle_port_mdb_add(port, obj);
494 		break;
495 	default:
496 		err = -EOPNOTSUPP;
497 		break;
498 	}
499 
500 	return err;
501 }
502 
503 static int lan966x_handle_port_vlan_del(struct lan966x_port *port,
504 					const struct switchdev_obj *obj)
505 {
506 	const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
507 	struct lan966x *lan966x = port->lan966x;
508 
509 	if (!netif_is_bridge_master(obj->orig_dev))
510 		lan966x_vlan_port_del_vlan(port, v->vid);
511 	else
512 		lan966x_vlan_cpu_del_vlan(lan966x, v->vid);
513 
514 	return 0;
515 }
516 
517 static int lan966x_handle_port_obj_del(struct net_device *dev, const void *ctx,
518 				       const struct switchdev_obj *obj)
519 {
520 	struct lan966x_port *port = netdev_priv(dev);
521 	int err;
522 
523 	if (ctx && ctx != port)
524 		return 0;
525 
526 	switch (obj->id) {
527 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
528 		err = lan966x_handle_port_vlan_del(port, obj);
529 		break;
530 	case SWITCHDEV_OBJ_ID_PORT_MDB:
531 	case SWITCHDEV_OBJ_ID_HOST_MDB:
532 		err = lan966x_handle_port_mdb_del(port, obj);
533 		break;
534 	default:
535 		err = -EOPNOTSUPP;
536 		break;
537 	}
538 
539 	return err;
540 }
541 
542 static int lan966x_switchdev_blocking_event(struct notifier_block *nb,
543 					    unsigned long event,
544 					    void *ptr)
545 {
546 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
547 	int err;
548 
549 	switch (event) {
550 	case SWITCHDEV_PORT_OBJ_ADD:
551 		err = switchdev_handle_port_obj_add(dev, ptr,
552 						    lan966x_netdevice_check,
553 						    lan966x_handle_port_obj_add);
554 		return notifier_from_errno(err);
555 	case SWITCHDEV_PORT_OBJ_DEL:
556 		err = switchdev_handle_port_obj_del(dev, ptr,
557 						    lan966x_netdevice_check,
558 						    lan966x_handle_port_obj_del);
559 		return notifier_from_errno(err);
560 	case SWITCHDEV_PORT_ATTR_SET:
561 		err = switchdev_handle_port_attr_set(dev, ptr,
562 						     lan966x_netdevice_check,
563 						     lan966x_port_attr_set);
564 		return notifier_from_errno(err);
565 	}
566 
567 	return NOTIFY_DONE;
568 }
569 
570 static struct notifier_block lan966x_netdevice_nb __read_mostly = {
571 	.notifier_call = lan966x_netdevice_event,
572 };
573 
574 static struct notifier_block lan966x_switchdev_nb __read_mostly = {
575 	.notifier_call = lan966x_switchdev_event,
576 };
577 
578 static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
579 	.notifier_call = lan966x_switchdev_blocking_event,
580 };
581 
582 void lan966x_register_notifier_blocks(void)
583 {
584 	register_netdevice_notifier(&lan966x_netdevice_nb);
585 	register_switchdev_notifier(&lan966x_switchdev_nb);
586 	register_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
587 }
588 
589 void lan966x_unregister_notifier_blocks(void)
590 {
591 	unregister_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
592 	unregister_switchdev_notifier(&lan966x_switchdev_nb);
593 	unregister_netdevice_notifier(&lan966x_netdevice_nb);
594 }
595