1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/if_bridge.h>
4 #include <net/switchdev.h>
5 
6 #include "lan966x_main.h"
7 
8 static struct notifier_block lan966x_netdevice_nb __read_mostly;
9 static struct notifier_block lan966x_switchdev_nb __read_mostly;
10 static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly;
11 
12 static void lan966x_port_set_mcast_flood(struct lan966x_port *port,
13 					 bool enabled)
14 {
15 	u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_MC));
16 
17 	val = ANA_PGID_PGID_GET(val);
18 	if (enabled)
19 		val |= BIT(port->chip_port);
20 	else
21 		val &= ~BIT(port->chip_port);
22 
23 	lan_rmw(ANA_PGID_PGID_SET(val),
24 		ANA_PGID_PGID,
25 		port->lan966x, ANA_PGID(PGID_MC));
26 }
27 
28 static void lan966x_port_set_ucast_flood(struct lan966x_port *port,
29 					 bool enabled)
30 {
31 	u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_UC));
32 
33 	val = ANA_PGID_PGID_GET(val);
34 	if (enabled)
35 		val |= BIT(port->chip_port);
36 	else
37 		val &= ~BIT(port->chip_port);
38 
39 	lan_rmw(ANA_PGID_PGID_SET(val),
40 		ANA_PGID_PGID,
41 		port->lan966x, ANA_PGID(PGID_UC));
42 }
43 
44 static void lan966x_port_set_bcast_flood(struct lan966x_port *port,
45 					 bool enabled)
46 {
47 	u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_BC));
48 
49 	val = ANA_PGID_PGID_GET(val);
50 	if (enabled)
51 		val |= BIT(port->chip_port);
52 	else
53 		val &= ~BIT(port->chip_port);
54 
55 	lan_rmw(ANA_PGID_PGID_SET(val),
56 		ANA_PGID_PGID,
57 		port->lan966x, ANA_PGID(PGID_BC));
58 }
59 
60 static void lan966x_port_set_learning(struct lan966x_port *port, bool enabled)
61 {
62 	lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(enabled),
63 		ANA_PORT_CFG_LEARN_ENA,
64 		port->lan966x, ANA_PORT_CFG(port->chip_port));
65 
66 	port->learn_ena = enabled;
67 }
68 
69 static void lan966x_port_bridge_flags(struct lan966x_port *port,
70 				      struct switchdev_brport_flags flags)
71 {
72 	if (flags.mask & BR_MCAST_FLOOD)
73 		lan966x_port_set_mcast_flood(port,
74 					     !!(flags.val & BR_MCAST_FLOOD));
75 
76 	if (flags.mask & BR_FLOOD)
77 		lan966x_port_set_ucast_flood(port,
78 					     !!(flags.val & BR_FLOOD));
79 
80 	if (flags.mask & BR_BCAST_FLOOD)
81 		lan966x_port_set_bcast_flood(port,
82 					     !!(flags.val & BR_BCAST_FLOOD));
83 
84 	if (flags.mask & BR_LEARNING)
85 		lan966x_port_set_learning(port,
86 					  !!(flags.val & BR_LEARNING));
87 }
88 
89 static int lan966x_port_pre_bridge_flags(struct lan966x_port *port,
90 					 struct switchdev_brport_flags flags)
91 {
92 	if (flags.mask & ~(BR_MCAST_FLOOD | BR_FLOOD | BR_BCAST_FLOOD |
93 			   BR_LEARNING))
94 		return -EINVAL;
95 
96 	return 0;
97 }
98 
99 static void lan966x_update_fwd_mask(struct lan966x *lan966x)
100 {
101 	int i;
102 
103 	for (i = 0; i < lan966x->num_phys_ports; i++) {
104 		struct lan966x_port *port = lan966x->ports[i];
105 		unsigned long mask = 0;
106 
107 		if (port && lan966x->bridge_fwd_mask & BIT(i))
108 			mask = lan966x->bridge_fwd_mask & ~BIT(i);
109 
110 		mask |= BIT(CPU_PORT);
111 
112 		lan_wr(ANA_PGID_PGID_SET(mask),
113 		       lan966x, ANA_PGID(PGID_SRC + i));
114 	}
115 }
116 
117 static void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
118 {
119 	struct lan966x *lan966x = port->lan966x;
120 	bool learn_ena = false;
121 
122 	if ((state == BR_STATE_FORWARDING || state == BR_STATE_LEARNING) &&
123 	    port->learn_ena)
124 		learn_ena = true;
125 
126 	if (state == BR_STATE_FORWARDING)
127 		lan966x->bridge_fwd_mask |= BIT(port->chip_port);
128 	else
129 		lan966x->bridge_fwd_mask &= ~BIT(port->chip_port);
130 
131 	lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(learn_ena),
132 		ANA_PORT_CFG_LEARN_ENA,
133 		lan966x, ANA_PORT_CFG(port->chip_port));
134 
135 	lan966x_update_fwd_mask(lan966x);
136 }
137 
138 static void lan966x_port_ageing_set(struct lan966x_port *port,
139 				    unsigned long ageing_clock_t)
140 {
141 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
142 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
143 
144 	lan966x_mac_set_ageing(port->lan966x, ageing_time);
145 }
146 
147 static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
148 				 const struct switchdev_attr *attr,
149 				 struct netlink_ext_ack *extack)
150 {
151 	struct lan966x_port *port = netdev_priv(dev);
152 	int err = 0;
153 
154 	if (ctx && ctx != port)
155 		return 0;
156 
157 	switch (attr->id) {
158 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
159 		lan966x_port_bridge_flags(port, attr->u.brport_flags);
160 		break;
161 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
162 		err = lan966x_port_pre_bridge_flags(port, attr->u.brport_flags);
163 		break;
164 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
165 		lan966x_port_stp_state_set(port, attr->u.stp_state);
166 		break;
167 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
168 		lan966x_port_ageing_set(port, attr->u.ageing_time);
169 		break;
170 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
171 		lan966x_vlan_port_set_vlan_aware(port, attr->u.vlan_filtering);
172 		lan966x_vlan_port_apply(port);
173 		break;
174 	default:
175 		err = -EOPNOTSUPP;
176 		break;
177 	}
178 
179 	return err;
180 }
181 
182 static int lan966x_port_bridge_join(struct lan966x_port *port,
183 				    struct net_device *bridge,
184 				    struct netlink_ext_ack *extack)
185 {
186 	struct switchdev_brport_flags flags = {0};
187 	struct lan966x *lan966x = port->lan966x;
188 	struct net_device *dev = port->dev;
189 	int err;
190 
191 	if (!lan966x->bridge_mask) {
192 		lan966x->bridge = bridge;
193 	} else {
194 		if (lan966x->bridge != bridge) {
195 			NL_SET_ERR_MSG_MOD(extack, "Not allow to add port to different bridge");
196 			return -ENODEV;
197 		}
198 	}
199 
200 	err = switchdev_bridge_port_offload(dev, dev, port,
201 					    &lan966x_switchdev_nb,
202 					    &lan966x_switchdev_blocking_nb,
203 					    false, extack);
204 	if (err)
205 		return err;
206 
207 	lan966x->bridge_mask |= BIT(port->chip_port);
208 
209 	flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
210 	flags.val = flags.mask;
211 	lan966x_port_bridge_flags(port, flags);
212 
213 	return 0;
214 }
215 
216 static void lan966x_port_bridge_leave(struct lan966x_port *port,
217 				      struct net_device *bridge)
218 {
219 	struct switchdev_brport_flags flags = {0};
220 	struct lan966x *lan966x = port->lan966x;
221 
222 	flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
223 	flags.val = flags.mask & ~BR_LEARNING;
224 	lan966x_port_bridge_flags(port, flags);
225 
226 	lan966x->bridge_mask &= ~BIT(port->chip_port);
227 
228 	if (!lan966x->bridge_mask)
229 		lan966x->bridge = NULL;
230 
231 	/* Set the port back to host mode */
232 	lan966x_vlan_port_set_vlan_aware(port, false);
233 	lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
234 	lan966x_vlan_port_apply(port);
235 }
236 
237 static int lan966x_port_changeupper(struct net_device *dev,
238 				    struct netdev_notifier_changeupper_info *info)
239 {
240 	struct lan966x_port *port = netdev_priv(dev);
241 	struct netlink_ext_ack *extack;
242 	int err = 0;
243 
244 	extack = netdev_notifier_info_to_extack(&info->info);
245 
246 	if (netif_is_bridge_master(info->upper_dev)) {
247 		if (info->linking)
248 			err = lan966x_port_bridge_join(port, info->upper_dev,
249 						       extack);
250 		else
251 			lan966x_port_bridge_leave(port, info->upper_dev);
252 	}
253 
254 	return err;
255 }
256 
257 static int lan966x_port_prechangeupper(struct net_device *dev,
258 				       struct netdev_notifier_changeupper_info *info)
259 {
260 	struct lan966x_port *port = netdev_priv(dev);
261 
262 	if (netif_is_bridge_master(info->upper_dev) && !info->linking)
263 		switchdev_bridge_port_unoffload(port->dev, port,
264 						&lan966x_switchdev_nb,
265 						&lan966x_switchdev_blocking_nb);
266 
267 	return NOTIFY_DONE;
268 }
269 
270 static int lan966x_foreign_bridging_check(struct net_device *bridge,
271 					  struct netlink_ext_ack *extack)
272 {
273 	struct lan966x *lan966x = NULL;
274 	bool has_foreign = false;
275 	struct net_device *dev;
276 	struct list_head *iter;
277 
278 	if (!netif_is_bridge_master(bridge))
279 		return 0;
280 
281 	netdev_for_each_lower_dev(bridge, dev, iter) {
282 		if (lan966x_netdevice_check(dev)) {
283 			struct lan966x_port *port = netdev_priv(dev);
284 
285 			if (lan966x) {
286 				/* Bridge already has at least one port of a
287 				 * lan966x switch inside it, check that it's
288 				 * the same instance of the driver.
289 				 */
290 				if (port->lan966x != lan966x) {
291 					NL_SET_ERR_MSG_MOD(extack,
292 							   "Bridging between multiple lan966x switches disallowed");
293 					return -EINVAL;
294 				}
295 			} else {
296 				/* This is the first lan966x port inside this
297 				 * bridge
298 				 */
299 				lan966x = port->lan966x;
300 			}
301 		} else {
302 			has_foreign = true;
303 		}
304 
305 		if (lan966x && has_foreign) {
306 			NL_SET_ERR_MSG_MOD(extack,
307 					   "Bridging lan966x ports with foreign interfaces disallowed");
308 			return -EINVAL;
309 		}
310 	}
311 
312 	return 0;
313 }
314 
315 static int lan966x_bridge_check(struct net_device *dev,
316 				struct netdev_notifier_changeupper_info *info)
317 {
318 	return lan966x_foreign_bridging_check(info->upper_dev,
319 					      info->info.extack);
320 }
321 
322 static int lan966x_netdevice_port_event(struct net_device *dev,
323 					struct notifier_block *nb,
324 					unsigned long event, void *ptr)
325 {
326 	int err = 0;
327 
328 	if (!lan966x_netdevice_check(dev)) {
329 		if (event == NETDEV_CHANGEUPPER)
330 			return lan966x_bridge_check(dev, ptr);
331 		return 0;
332 	}
333 
334 	switch (event) {
335 	case NETDEV_PRECHANGEUPPER:
336 		err = lan966x_port_prechangeupper(dev, ptr);
337 		break;
338 	case NETDEV_CHANGEUPPER:
339 		err = lan966x_bridge_check(dev, ptr);
340 		if (err)
341 			return err;
342 
343 		err = lan966x_port_changeupper(dev, ptr);
344 		break;
345 	}
346 
347 	return err;
348 }
349 
350 static int lan966x_netdevice_event(struct notifier_block *nb,
351 				   unsigned long event, void *ptr)
352 {
353 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
354 	int ret;
355 
356 	ret = lan966x_netdevice_port_event(dev, nb, event, ptr);
357 
358 	return notifier_from_errno(ret);
359 }
360 
361 static bool lan966x_foreign_dev_check(const struct net_device *dev,
362 				      const struct net_device *foreign_dev)
363 {
364 	struct lan966x_port *port = netdev_priv(dev);
365 	struct lan966x *lan966x = port->lan966x;
366 
367 	if (netif_is_bridge_master(foreign_dev))
368 		if (lan966x->bridge != foreign_dev)
369 			return true;
370 
371 	return false;
372 }
373 
374 static int lan966x_switchdev_event(struct notifier_block *nb,
375 				   unsigned long event, void *ptr)
376 {
377 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
378 	int err;
379 
380 	switch (event) {
381 	case SWITCHDEV_PORT_ATTR_SET:
382 		err = switchdev_handle_port_attr_set(dev, ptr,
383 						     lan966x_netdevice_check,
384 						     lan966x_port_attr_set);
385 		return notifier_from_errno(err);
386 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
387 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
388 		err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
389 							   lan966x_netdevice_check,
390 							   lan966x_foreign_dev_check,
391 							   lan966x_handle_fdb,
392 							   NULL);
393 		return notifier_from_errno(err);
394 	}
395 
396 	return NOTIFY_DONE;
397 }
398 
399 static int lan966x_handle_port_vlan_add(struct lan966x_port *port,
400 					const struct switchdev_obj *obj)
401 {
402 	const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
403 	struct lan966x *lan966x = port->lan966x;
404 
405 	/* When adding a port to a vlan, we get a callback for the port but
406 	 * also for the bridge. When get the callback for the bridge just bail
407 	 * out. Then when the bridge is added to the vlan, then we get a
408 	 * callback here but in this case the flags has set:
409 	 * BRIDGE_VLAN_INFO_BRENTRY. In this case it means that the CPU
410 	 * port is added to the vlan, so the broadcast frames and unicast frames
411 	 * with dmac of the bridge should be foward to CPU.
412 	 */
413 	if (netif_is_bridge_master(obj->orig_dev) &&
414 	    !(v->flags & BRIDGE_VLAN_INFO_BRENTRY))
415 		return 0;
416 
417 	if (!netif_is_bridge_master(obj->orig_dev))
418 		lan966x_vlan_port_add_vlan(port, v->vid,
419 					   v->flags & BRIDGE_VLAN_INFO_PVID,
420 					   v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
421 	else
422 		lan966x_vlan_cpu_add_vlan(lan966x, v->vid);
423 
424 	return 0;
425 }
426 
427 static int lan966x_handle_port_obj_add(struct net_device *dev, const void *ctx,
428 				       const struct switchdev_obj *obj,
429 				       struct netlink_ext_ack *extack)
430 {
431 	struct lan966x_port *port = netdev_priv(dev);
432 	int err;
433 
434 	if (ctx && ctx != port)
435 		return 0;
436 
437 	switch (obj->id) {
438 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
439 		err = lan966x_handle_port_vlan_add(port, obj);
440 		break;
441 	case SWITCHDEV_OBJ_ID_PORT_MDB:
442 	case SWITCHDEV_OBJ_ID_HOST_MDB:
443 		err = lan966x_handle_port_mdb_add(port, obj);
444 		break;
445 	default:
446 		err = -EOPNOTSUPP;
447 		break;
448 	}
449 
450 	return err;
451 }
452 
453 static int lan966x_handle_port_vlan_del(struct lan966x_port *port,
454 					const struct switchdev_obj *obj)
455 {
456 	const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
457 	struct lan966x *lan966x = port->lan966x;
458 
459 	if (!netif_is_bridge_master(obj->orig_dev))
460 		lan966x_vlan_port_del_vlan(port, v->vid);
461 	else
462 		lan966x_vlan_cpu_del_vlan(lan966x, v->vid);
463 
464 	return 0;
465 }
466 
467 static int lan966x_handle_port_obj_del(struct net_device *dev, const void *ctx,
468 				       const struct switchdev_obj *obj)
469 {
470 	struct lan966x_port *port = netdev_priv(dev);
471 	int err;
472 
473 	if (ctx && ctx != port)
474 		return 0;
475 
476 	switch (obj->id) {
477 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
478 		err = lan966x_handle_port_vlan_del(port, obj);
479 		break;
480 	case SWITCHDEV_OBJ_ID_PORT_MDB:
481 	case SWITCHDEV_OBJ_ID_HOST_MDB:
482 		err = lan966x_handle_port_mdb_del(port, obj);
483 		break;
484 	default:
485 		err = -EOPNOTSUPP;
486 		break;
487 	}
488 
489 	return err;
490 }
491 
492 static int lan966x_switchdev_blocking_event(struct notifier_block *nb,
493 					    unsigned long event,
494 					    void *ptr)
495 {
496 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
497 	int err;
498 
499 	switch (event) {
500 	case SWITCHDEV_PORT_OBJ_ADD:
501 		err = switchdev_handle_port_obj_add(dev, ptr,
502 						    lan966x_netdevice_check,
503 						    lan966x_handle_port_obj_add);
504 		return notifier_from_errno(err);
505 	case SWITCHDEV_PORT_OBJ_DEL:
506 		err = switchdev_handle_port_obj_del(dev, ptr,
507 						    lan966x_netdevice_check,
508 						    lan966x_handle_port_obj_del);
509 		return notifier_from_errno(err);
510 	case SWITCHDEV_PORT_ATTR_SET:
511 		err = switchdev_handle_port_attr_set(dev, ptr,
512 						     lan966x_netdevice_check,
513 						     lan966x_port_attr_set);
514 		return notifier_from_errno(err);
515 	}
516 
517 	return NOTIFY_DONE;
518 }
519 
520 static struct notifier_block lan966x_netdevice_nb __read_mostly = {
521 	.notifier_call = lan966x_netdevice_event,
522 };
523 
524 static struct notifier_block lan966x_switchdev_nb __read_mostly = {
525 	.notifier_call = lan966x_switchdev_event,
526 };
527 
528 static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
529 	.notifier_call = lan966x_switchdev_blocking_event,
530 };
531 
532 void lan966x_register_notifier_blocks(void)
533 {
534 	register_netdevice_notifier(&lan966x_netdevice_nb);
535 	register_switchdev_notifier(&lan966x_switchdev_nb);
536 	register_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
537 }
538 
539 void lan966x_unregister_notifier_blocks(void)
540 {
541 	unregister_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
542 	unregister_switchdev_notifier(&lan966x_switchdev_nb);
543 	unregister_netdevice_notifier(&lan966x_netdevice_nb);
544 }
545