1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <linux/if_bridge.h>
8 #include <net/switchdev.h>
9 
10 #include "sparx5_main_regs.h"
11 #include "sparx5_main.h"
12 
13 static struct workqueue_struct *sparx5_owq;
14 
15 struct sparx5_switchdev_event_work {
16 	struct work_struct work;
17 	struct switchdev_notifier_fdb_info fdb_info;
18 	struct net_device *dev;
19 	struct sparx5 *sparx5;
20 	unsigned long event;
21 };
22 
23 static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port,
24 					     struct switchdev_brport_flags flags)
25 {
26 	if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD))
27 		return -EINVAL;
28 
29 	return 0;
30 }
31 
32 static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
33 					  struct switchdev_brport_flags flags)
34 {
35 	int pgid;
36 
37 	if (flags.mask & BR_MCAST_FLOOD)
38 		for (pgid = PGID_MC_FLOOD; pgid <= PGID_IPV6_MC_CTRL; pgid++)
39 			sparx5_pgid_update_mask(port, pgid, !!(flags.val & BR_MCAST_FLOOD));
40 	if (flags.mask & BR_FLOOD)
41 		sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD));
42 	if (flags.mask & BR_BCAST_FLOOD)
43 		sparx5_pgid_update_mask(port, PGID_BCAST, !!(flags.val & BR_BCAST_FLOOD));
44 }
45 
46 static void sparx5_attr_stp_state_set(struct sparx5_port *port,
47 				      u8 state)
48 {
49 	struct sparx5 *sparx5 = port->sparx5;
50 
51 	if (!test_bit(port->portno, sparx5->bridge_mask)) {
52 		netdev_err(port->ndev,
53 			   "Controlling non-bridged port %d?\n", port->portno);
54 		return;
55 	}
56 
57 	switch (state) {
58 	case BR_STATE_FORWARDING:
59 		set_bit(port->portno, sparx5->bridge_fwd_mask);
60 		fallthrough;
61 	case BR_STATE_LEARNING:
62 		set_bit(port->portno, sparx5->bridge_lrn_mask);
63 		break;
64 
65 	default:
66 		/* All other states treated as blocking */
67 		clear_bit(port->portno, sparx5->bridge_fwd_mask);
68 		clear_bit(port->portno, sparx5->bridge_lrn_mask);
69 		break;
70 	}
71 
72 	/* apply the bridge_fwd_mask to all the ports */
73 	sparx5_update_fwd(sparx5);
74 }
75 
76 static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
77 					unsigned long ageing_clock_t)
78 {
79 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
80 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
81 
82 	sparx5_set_ageing(port->sparx5, ageing_time);
83 }
84 
85 static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
86 				const struct switchdev_attr *attr,
87 				struct netlink_ext_ack *extack)
88 {
89 	struct sparx5_port *port = netdev_priv(dev);
90 
91 	switch (attr->id) {
92 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
93 		return sparx5_port_attr_pre_bridge_flags(port,
94 							 attr->u.brport_flags);
95 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
96 		sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
97 		break;
98 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
99 		sparx5_attr_stp_state_set(port, attr->u.stp_state);
100 		break;
101 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
102 		sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
103 		break;
104 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
105 		/* Used PVID 1 when default_pvid is 0, to avoid
106 		 * collision with non-bridged ports.
107 		 */
108 		if (port->pvid == 0)
109 			port->pvid = 1;
110 		port->vlan_aware = attr->u.vlan_filtering;
111 		sparx5_vlan_port_apply(port->sparx5, port);
112 		break;
113 	default:
114 		return -EOPNOTSUPP;
115 	}
116 
117 	return 0;
118 }
119 
120 static int sparx5_port_bridge_join(struct sparx5_port *port,
121 				   struct net_device *bridge,
122 				   struct netlink_ext_ack *extack)
123 {
124 	struct sparx5 *sparx5 = port->sparx5;
125 	struct net_device *ndev = port->ndev;
126 	int err;
127 
128 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
129 		/* First bridged port */
130 		sparx5->hw_bridge_dev = bridge;
131 	else
132 		if (sparx5->hw_bridge_dev != bridge)
133 			/* This is adding the port to a second bridge, this is
134 			 * unsupported
135 			 */
136 			return -ENODEV;
137 
138 	set_bit(port->portno, sparx5->bridge_mask);
139 
140 	err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
141 					    false, extack);
142 	if (err)
143 		goto err_switchdev_offload;
144 
145 	/* Remove standalone port entry */
146 	sparx5_mact_forget(sparx5, ndev->dev_addr, 0);
147 
148 	/* Port enters in bridge mode therefor don't need to copy to CPU
149 	 * frames for multicast in case the bridge is not requesting them
150 	 */
151 	__dev_mc_unsync(ndev, sparx5_mc_unsync);
152 
153 	return 0;
154 
155 err_switchdev_offload:
156 	clear_bit(port->portno, sparx5->bridge_mask);
157 	return err;
158 }
159 
160 static void sparx5_port_bridge_leave(struct sparx5_port *port,
161 				     struct net_device *bridge)
162 {
163 	struct sparx5 *sparx5 = port->sparx5;
164 
165 	switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL);
166 
167 	clear_bit(port->portno, sparx5->bridge_mask);
168 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
169 		sparx5->hw_bridge_dev = NULL;
170 
171 	/* Clear bridge vlan settings before updating the port settings */
172 	port->vlan_aware = 0;
173 	port->pvid = NULL_VID;
174 	port->vid = NULL_VID;
175 
176 	/* Forward frames to CPU */
177 	sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, 0);
178 
179 	/* Port enters in host more therefore restore mc list */
180 	__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
181 }
182 
183 static int sparx5_port_changeupper(struct net_device *dev,
184 				   struct netdev_notifier_changeupper_info *info)
185 {
186 	struct sparx5_port *port = netdev_priv(dev);
187 	struct netlink_ext_ack *extack;
188 	int err = 0;
189 
190 	extack = netdev_notifier_info_to_extack(&info->info);
191 
192 	if (netif_is_bridge_master(info->upper_dev)) {
193 		if (info->linking)
194 			err = sparx5_port_bridge_join(port, info->upper_dev,
195 						      extack);
196 		else
197 			sparx5_port_bridge_leave(port, info->upper_dev);
198 
199 		sparx5_vlan_port_apply(port->sparx5, port);
200 	}
201 
202 	return err;
203 }
204 
205 static int sparx5_port_add_addr(struct net_device *dev, bool up)
206 {
207 	struct sparx5_port *port = netdev_priv(dev);
208 	struct sparx5 *sparx5 = port->sparx5;
209 	u16 vid = port->pvid;
210 
211 	if (up)
212 		sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid);
213 	else
214 		sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
215 
216 	return 0;
217 }
218 
219 static int sparx5_netdevice_port_event(struct net_device *dev,
220 				       struct notifier_block *nb,
221 				       unsigned long event, void *ptr)
222 {
223 	int err = 0;
224 
225 	if (!sparx5_netdevice_check(dev))
226 		return 0;
227 
228 	switch (event) {
229 	case NETDEV_CHANGEUPPER:
230 		err = sparx5_port_changeupper(dev, ptr);
231 		break;
232 	case NETDEV_PRE_UP:
233 		err = sparx5_port_add_addr(dev, true);
234 		break;
235 	case NETDEV_DOWN:
236 		err = sparx5_port_add_addr(dev, false);
237 		break;
238 	}
239 
240 	return err;
241 }
242 
243 static int sparx5_netdevice_event(struct notifier_block *nb,
244 				  unsigned long event, void *ptr)
245 {
246 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
247 	int ret = 0;
248 
249 	ret = sparx5_netdevice_port_event(dev, nb, event, ptr);
250 
251 	return notifier_from_errno(ret);
252 }
253 
254 static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
255 {
256 	struct sparx5_switchdev_event_work *switchdev_work =
257 		container_of(work, struct sparx5_switchdev_event_work, work);
258 	struct net_device *dev = switchdev_work->dev;
259 	struct switchdev_notifier_fdb_info *fdb_info;
260 	struct sparx5_port *port;
261 	struct sparx5 *sparx5;
262 	bool host_addr;
263 	u16 vid;
264 
265 	rtnl_lock();
266 	if (!sparx5_netdevice_check(dev)) {
267 		host_addr = true;
268 		sparx5 = switchdev_work->sparx5;
269 	} else {
270 		host_addr = false;
271 		sparx5 = switchdev_work->sparx5;
272 		port = netdev_priv(dev);
273 	}
274 
275 	fdb_info = &switchdev_work->fdb_info;
276 
277 	/* Used PVID 1 when default_pvid is 0, to avoid
278 	 * collision with non-bridged ports.
279 	 */
280 	if (fdb_info->vid == 0)
281 		vid = 1;
282 	else
283 		vid = fdb_info->vid;
284 
285 	switch (switchdev_work->event) {
286 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
287 		if (host_addr)
288 			sparx5_add_mact_entry(sparx5, dev, PGID_CPU,
289 					      fdb_info->addr, vid);
290 		else
291 			sparx5_add_mact_entry(sparx5, port->ndev, port->portno,
292 					      fdb_info->addr, vid);
293 		break;
294 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
295 		sparx5_del_mact_entry(sparx5, fdb_info->addr, vid);
296 		break;
297 	}
298 
299 	rtnl_unlock();
300 	kfree(switchdev_work->fdb_info.addr);
301 	kfree(switchdev_work);
302 	dev_put(dev);
303 }
304 
305 static void sparx5_schedule_work(struct work_struct *work)
306 {
307 	queue_work(sparx5_owq, work);
308 }
309 
310 static int sparx5_switchdev_event(struct notifier_block *nb,
311 				  unsigned long event, void *ptr)
312 {
313 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
314 	struct sparx5_switchdev_event_work *switchdev_work;
315 	struct switchdev_notifier_fdb_info *fdb_info;
316 	struct switchdev_notifier_info *info = ptr;
317 	struct sparx5 *spx5;
318 	int err;
319 
320 	spx5 = container_of(nb, struct sparx5, switchdev_nb);
321 
322 	switch (event) {
323 	case SWITCHDEV_PORT_ATTR_SET:
324 		err = switchdev_handle_port_attr_set(dev, ptr,
325 						     sparx5_netdevice_check,
326 						     sparx5_port_attr_set);
327 		return notifier_from_errno(err);
328 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
329 		fallthrough;
330 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
331 		switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
332 		if (!switchdev_work)
333 			return NOTIFY_BAD;
334 
335 		switchdev_work->dev = dev;
336 		switchdev_work->event = event;
337 		switchdev_work->sparx5 = spx5;
338 
339 		fdb_info = container_of(info,
340 					struct switchdev_notifier_fdb_info,
341 					info);
342 		INIT_WORK(&switchdev_work->work,
343 			  sparx5_switchdev_bridge_fdb_event_work);
344 		memcpy(&switchdev_work->fdb_info, ptr,
345 		       sizeof(switchdev_work->fdb_info));
346 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
347 		if (!switchdev_work->fdb_info.addr)
348 			goto err_addr_alloc;
349 
350 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
351 				fdb_info->addr);
352 		dev_hold(dev);
353 
354 		sparx5_schedule_work(&switchdev_work->work);
355 		break;
356 	}
357 
358 	return NOTIFY_DONE;
359 err_addr_alloc:
360 	kfree(switchdev_work);
361 	return NOTIFY_BAD;
362 }
363 
364 static int sparx5_handle_port_vlan_add(struct net_device *dev,
365 				       struct notifier_block *nb,
366 				       const struct switchdev_obj_port_vlan *v)
367 {
368 	struct sparx5_port *port = netdev_priv(dev);
369 
370 	if (netif_is_bridge_master(dev)) {
371 		struct sparx5 *sparx5 =
372 			container_of(nb, struct sparx5,
373 				     switchdev_blocking_nb);
374 
375 		/* Flood broadcast to CPU */
376 		sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
377 				  v->vid);
378 		return 0;
379 	}
380 
381 	if (!sparx5_netdevice_check(dev))
382 		return -EOPNOTSUPP;
383 
384 	return sparx5_vlan_vid_add(port, v->vid,
385 				  v->flags & BRIDGE_VLAN_INFO_PVID,
386 				  v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
387 }
388 
389 static int sparx5_handle_port_mdb_add(struct net_device *dev,
390 				      struct notifier_block *nb,
391 				      const struct switchdev_obj_port_mdb *v)
392 {
393 	struct sparx5_port *port = netdev_priv(dev);
394 	struct sparx5 *spx5 = port->sparx5;
395 	u16 pgid_idx, vid;
396 	u32 mact_entry;
397 	int res, err;
398 
399 	if (netif_is_bridge_master(v->obj.orig_dev)) {
400 		sparx5_mact_learn(spx5, PGID_CPU, v->addr, v->vid);
401 		return 0;
402 	}
403 
404 	/* When VLAN unaware the vlan value is not parsed and we receive vid 0.
405 	 * Fall back to bridge vid 1.
406 	 */
407 	if (!br_vlan_enabled(spx5->hw_bridge_dev))
408 		vid = 1;
409 	else
410 		vid = v->vid;
411 
412 	res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
413 
414 	if (res == 0) {
415 		pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
416 
417 		/* MC_IDX starts after the port masks in the PGID table */
418 		pgid_idx += SPX5_PORTS;
419 		sparx5_pgid_update_mask(port, pgid_idx, true);
420 	} else {
421 		err = sparx5_pgid_alloc_mcast(spx5, &pgid_idx);
422 		if (err) {
423 			netdev_warn(dev, "multicast pgid table full\n");
424 			return err;
425 		}
426 		sparx5_pgid_update_mask(port, pgid_idx, true);
427 		err = sparx5_mact_learn(spx5, pgid_idx, v->addr, vid);
428 		if (err) {
429 			netdev_warn(dev, "could not learn mac address %pM\n", v->addr);
430 			sparx5_pgid_update_mask(port, pgid_idx, false);
431 			return err;
432 		}
433 	}
434 
435 	return 0;
436 }
437 
438 static int sparx5_mdb_del_entry(struct net_device *dev,
439 				struct sparx5 *spx5,
440 				const unsigned char mac[ETH_ALEN],
441 				const u16 vid,
442 				u16 pgid_idx)
443 {
444 	int err;
445 
446 	err = sparx5_mact_forget(spx5, mac, vid);
447 	if (err) {
448 		netdev_warn(dev, "could not forget mac address %pM", mac);
449 		return err;
450 	}
451 	err = sparx5_pgid_free(spx5, pgid_idx);
452 	if (err) {
453 		netdev_err(dev, "attempted to free already freed pgid\n");
454 		return err;
455 	}
456 	return 0;
457 }
458 
459 static int sparx5_handle_port_mdb_del(struct net_device *dev,
460 				      struct notifier_block *nb,
461 				      const struct switchdev_obj_port_mdb *v)
462 {
463 	struct sparx5_port *port = netdev_priv(dev);
464 	struct sparx5 *spx5 = port->sparx5;
465 	u16 pgid_idx, vid;
466 	u32 mact_entry, res, pgid_entry[3];
467 	int err;
468 
469 	if (netif_is_bridge_master(v->obj.orig_dev)) {
470 		sparx5_mact_forget(spx5, v->addr, v->vid);
471 		return 0;
472 	}
473 
474 	if (!br_vlan_enabled(spx5->hw_bridge_dev))
475 		vid = 1;
476 	else
477 		vid = v->vid;
478 
479 	res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
480 
481 	if (res == 0) {
482 		pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
483 
484 		/* MC_IDX starts after the port masks in the PGID table */
485 		pgid_idx += SPX5_PORTS;
486 		sparx5_pgid_update_mask(port, pgid_idx, false);
487 
488 		sparx5_pgid_read_mask(spx5, pgid_idx, pgid_entry);
489 		if (bitmap_empty((unsigned long *)pgid_entry, SPX5_PORTS)) {
490 			/* No ports are in MC group. Remove entry */
491 			err = sparx5_mdb_del_entry(dev, spx5, v->addr, vid, pgid_idx);
492 			if (err)
493 				return err;
494 		}
495 	}
496 
497 	return 0;
498 }
499 
500 static int sparx5_handle_port_obj_add(struct net_device *dev,
501 				      struct notifier_block *nb,
502 				      struct switchdev_notifier_port_obj_info *info)
503 {
504 	const struct switchdev_obj *obj = info->obj;
505 	int err;
506 
507 	switch (obj->id) {
508 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
509 		err = sparx5_handle_port_vlan_add(dev, nb,
510 						  SWITCHDEV_OBJ_PORT_VLAN(obj));
511 		break;
512 	case SWITCHDEV_OBJ_ID_PORT_MDB:
513 	case SWITCHDEV_OBJ_ID_HOST_MDB:
514 		err = sparx5_handle_port_mdb_add(dev, nb,
515 						 SWITCHDEV_OBJ_PORT_MDB(obj));
516 		break;
517 	default:
518 		err = -EOPNOTSUPP;
519 		break;
520 	}
521 
522 	info->handled = true;
523 	return err;
524 }
525 
526 static int sparx5_handle_port_vlan_del(struct net_device *dev,
527 				       struct notifier_block *nb,
528 				       u16 vid)
529 {
530 	struct sparx5_port *port = netdev_priv(dev);
531 	int ret;
532 
533 	/* Master bridge? */
534 	if (netif_is_bridge_master(dev)) {
535 		struct sparx5 *sparx5 =
536 			container_of(nb, struct sparx5,
537 				     switchdev_blocking_nb);
538 
539 		sparx5_mact_forget(sparx5, dev->broadcast, vid);
540 		return 0;
541 	}
542 
543 	if (!sparx5_netdevice_check(dev))
544 		return -EOPNOTSUPP;
545 
546 	ret = sparx5_vlan_vid_del(port, vid);
547 	if (ret)
548 		return ret;
549 
550 	return 0;
551 }
552 
553 static int sparx5_handle_port_obj_del(struct net_device *dev,
554 				      struct notifier_block *nb,
555 				      struct switchdev_notifier_port_obj_info *info)
556 {
557 	const struct switchdev_obj *obj = info->obj;
558 	int err;
559 
560 	switch (obj->id) {
561 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
562 		err = sparx5_handle_port_vlan_del(dev, nb,
563 						  SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
564 		break;
565 	case SWITCHDEV_OBJ_ID_PORT_MDB:
566 	case SWITCHDEV_OBJ_ID_HOST_MDB:
567 		err = sparx5_handle_port_mdb_del(dev, nb,
568 						 SWITCHDEV_OBJ_PORT_MDB(obj));
569 		break;
570 	default:
571 		err = -EOPNOTSUPP;
572 		break;
573 	}
574 
575 	info->handled = true;
576 	return err;
577 }
578 
579 static int sparx5_switchdev_blocking_event(struct notifier_block *nb,
580 					   unsigned long event,
581 					   void *ptr)
582 {
583 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
584 	int err;
585 
586 	switch (event) {
587 	case SWITCHDEV_PORT_OBJ_ADD:
588 		err = sparx5_handle_port_obj_add(dev, nb, ptr);
589 		return notifier_from_errno(err);
590 	case SWITCHDEV_PORT_OBJ_DEL:
591 		err = sparx5_handle_port_obj_del(dev, nb, ptr);
592 		return notifier_from_errno(err);
593 	case SWITCHDEV_PORT_ATTR_SET:
594 		err = switchdev_handle_port_attr_set(dev, ptr,
595 						     sparx5_netdevice_check,
596 						     sparx5_port_attr_set);
597 		return notifier_from_errno(err);
598 	}
599 
600 	return NOTIFY_DONE;
601 }
602 
603 int sparx5_register_notifier_blocks(struct sparx5 *s5)
604 {
605 	int err;
606 
607 	s5->netdevice_nb.notifier_call = sparx5_netdevice_event;
608 	err = register_netdevice_notifier(&s5->netdevice_nb);
609 	if (err)
610 		return err;
611 
612 	s5->switchdev_nb.notifier_call = sparx5_switchdev_event;
613 	err = register_switchdev_notifier(&s5->switchdev_nb);
614 	if (err)
615 		goto err_switchdev_nb;
616 
617 	s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event;
618 	err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
619 	if (err)
620 		goto err_switchdev_blocking_nb;
621 
622 	sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0);
623 	if (!sparx5_owq) {
624 		err = -ENOMEM;
625 		goto err_switchdev_blocking_nb;
626 	}
627 
628 	return 0;
629 
630 err_switchdev_blocking_nb:
631 	unregister_switchdev_notifier(&s5->switchdev_nb);
632 err_switchdev_nb:
633 	unregister_netdevice_notifier(&s5->netdevice_nb);
634 
635 	return err;
636 }
637 
638 void sparx5_unregister_notifier_blocks(struct sparx5 *s5)
639 {
640 	destroy_workqueue(sparx5_owq);
641 
642 	unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
643 	unregister_switchdev_notifier(&s5->switchdev_nb);
644 	unregister_netdevice_notifier(&s5->netdevice_nb);
645 }
646