1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <linux/if_bridge.h>
8 #include <net/switchdev.h>
9 
10 #include "sparx5_main_regs.h"
11 #include "sparx5_main.h"
12 
13 static struct workqueue_struct *sparx5_owq;
14 
15 struct sparx5_switchdev_event_work {
16 	struct work_struct work;
17 	struct switchdev_notifier_fdb_info fdb_info;
18 	struct net_device *dev;
19 	struct sparx5 *sparx5;
20 	unsigned long event;
21 };
22 
23 static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port,
24 					     struct switchdev_brport_flags flags)
25 {
26 	if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD))
27 		return -EINVAL;
28 
29 	return 0;
30 }
31 
32 static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
33 					  struct switchdev_brport_flags flags)
34 {
35 	int pgid;
36 
37 	if (flags.mask & BR_MCAST_FLOOD)
38 		for (pgid = PGID_MC_FLOOD; pgid <= PGID_IPV6_MC_CTRL; pgid++)
39 			sparx5_pgid_update_mask(port, pgid, !!(flags.val & BR_MCAST_FLOOD));
40 	if (flags.mask & BR_FLOOD)
41 		sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD));
42 	if (flags.mask & BR_BCAST_FLOOD)
43 		sparx5_pgid_update_mask(port, PGID_BCAST, !!(flags.val & BR_BCAST_FLOOD));
44 }
45 
46 static void sparx5_attr_stp_state_set(struct sparx5_port *port,
47 				      u8 state)
48 {
49 	struct sparx5 *sparx5 = port->sparx5;
50 
51 	if (!test_bit(port->portno, sparx5->bridge_mask)) {
52 		netdev_err(port->ndev,
53 			   "Controlling non-bridged port %d?\n", port->portno);
54 		return;
55 	}
56 
57 	switch (state) {
58 	case BR_STATE_FORWARDING:
59 		set_bit(port->portno, sparx5->bridge_fwd_mask);
60 		fallthrough;
61 	case BR_STATE_LEARNING:
62 		set_bit(port->portno, sparx5->bridge_lrn_mask);
63 		break;
64 
65 	default:
66 		/* All other states treated as blocking */
67 		clear_bit(port->portno, sparx5->bridge_fwd_mask);
68 		clear_bit(port->portno, sparx5->bridge_lrn_mask);
69 		break;
70 	}
71 
72 	/* apply the bridge_fwd_mask to all the ports */
73 	sparx5_update_fwd(sparx5);
74 }
75 
76 static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
77 					unsigned long ageing_clock_t)
78 {
79 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
80 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
81 
82 	sparx5_set_ageing(port->sparx5, ageing_time);
83 }
84 
85 static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
86 				const struct switchdev_attr *attr,
87 				struct netlink_ext_ack *extack)
88 {
89 	struct sparx5_port *port = netdev_priv(dev);
90 
91 	switch (attr->id) {
92 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
93 		return sparx5_port_attr_pre_bridge_flags(port,
94 							 attr->u.brport_flags);
95 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
96 		sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
97 		break;
98 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
99 		sparx5_attr_stp_state_set(port, attr->u.stp_state);
100 		break;
101 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
102 		sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
103 		break;
104 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
105 		/* Used PVID 1 when default_pvid is 0, to avoid
106 		 * collision with non-bridged ports.
107 		 */
108 		if (port->pvid == 0)
109 			port->pvid = 1;
110 		port->vlan_aware = attr->u.vlan_filtering;
111 		sparx5_vlan_port_apply(port->sparx5, port);
112 		break;
113 	default:
114 		return -EOPNOTSUPP;
115 	}
116 
117 	return 0;
118 }
119 
120 static int sparx5_port_bridge_join(struct sparx5_port *port,
121 				   struct net_device *bridge,
122 				   struct netlink_ext_ack *extack)
123 {
124 	struct sparx5 *sparx5 = port->sparx5;
125 	struct net_device *ndev = port->ndev;
126 	int err;
127 
128 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
129 		/* First bridged port */
130 		sparx5->hw_bridge_dev = bridge;
131 	else
132 		if (sparx5->hw_bridge_dev != bridge)
133 			/* This is adding the port to a second bridge, this is
134 			 * unsupported
135 			 */
136 			return -ENODEV;
137 
138 	set_bit(port->portno, sparx5->bridge_mask);
139 
140 	err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
141 					    false, extack);
142 	if (err)
143 		goto err_switchdev_offload;
144 
145 	/* Remove standalone port entry */
146 	sparx5_mact_forget(sparx5, ndev->dev_addr, 0);
147 
148 	/* Port enters in bridge mode therefor don't need to copy to CPU
149 	 * frames for multicast in case the bridge is not requesting them
150 	 */
151 	__dev_mc_unsync(ndev, sparx5_mc_unsync);
152 
153 	return 0;
154 
155 err_switchdev_offload:
156 	clear_bit(port->portno, sparx5->bridge_mask);
157 	return err;
158 }
159 
160 static void sparx5_port_bridge_leave(struct sparx5_port *port,
161 				     struct net_device *bridge)
162 {
163 	struct sparx5 *sparx5 = port->sparx5;
164 
165 	switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL);
166 
167 	clear_bit(port->portno, sparx5->bridge_mask);
168 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
169 		sparx5->hw_bridge_dev = NULL;
170 
171 	/* Clear bridge vlan settings before updating the port settings */
172 	port->vlan_aware = 0;
173 	port->pvid = NULL_VID;
174 	port->vid = NULL_VID;
175 
176 	/* Forward frames to CPU */
177 	sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, 0);
178 
179 	/* Port enters in host more therefore restore mc list */
180 	__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
181 }
182 
183 static int sparx5_port_changeupper(struct net_device *dev,
184 				   struct netdev_notifier_changeupper_info *info)
185 {
186 	struct sparx5_port *port = netdev_priv(dev);
187 	struct netlink_ext_ack *extack;
188 	int err = 0;
189 
190 	extack = netdev_notifier_info_to_extack(&info->info);
191 
192 	if (netif_is_bridge_master(info->upper_dev)) {
193 		if (info->linking)
194 			err = sparx5_port_bridge_join(port, info->upper_dev,
195 						      extack);
196 		else
197 			sparx5_port_bridge_leave(port, info->upper_dev);
198 
199 		sparx5_vlan_port_apply(port->sparx5, port);
200 	}
201 
202 	return err;
203 }
204 
205 static int sparx5_port_add_addr(struct net_device *dev, bool up)
206 {
207 	struct sparx5_port *port = netdev_priv(dev);
208 	struct sparx5 *sparx5 = port->sparx5;
209 	u16 vid = port->pvid;
210 
211 	if (up)
212 		sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid);
213 	else
214 		sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
215 
216 	return 0;
217 }
218 
219 static int sparx5_netdevice_port_event(struct net_device *dev,
220 				       struct notifier_block *nb,
221 				       unsigned long event, void *ptr)
222 {
223 	int err = 0;
224 
225 	if (!sparx5_netdevice_check(dev))
226 		return 0;
227 
228 	switch (event) {
229 	case NETDEV_CHANGEUPPER:
230 		err = sparx5_port_changeupper(dev, ptr);
231 		break;
232 	case NETDEV_PRE_UP:
233 		err = sparx5_port_add_addr(dev, true);
234 		break;
235 	case NETDEV_DOWN:
236 		err = sparx5_port_add_addr(dev, false);
237 		break;
238 	}
239 
240 	return err;
241 }
242 
243 static int sparx5_netdevice_event(struct notifier_block *nb,
244 				  unsigned long event, void *ptr)
245 {
246 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
247 	int ret = 0;
248 
249 	ret = sparx5_netdevice_port_event(dev, nb, event, ptr);
250 
251 	return notifier_from_errno(ret);
252 }
253 
254 static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
255 {
256 	struct sparx5_switchdev_event_work *switchdev_work =
257 		container_of(work, struct sparx5_switchdev_event_work, work);
258 	struct net_device *dev = switchdev_work->dev;
259 	struct switchdev_notifier_fdb_info *fdb_info;
260 	struct sparx5_port *port;
261 	struct sparx5 *sparx5;
262 	bool host_addr;
263 	u16 vid;
264 
265 	rtnl_lock();
266 	if (!sparx5_netdevice_check(dev)) {
267 		host_addr = true;
268 		sparx5 = switchdev_work->sparx5;
269 	} else {
270 		host_addr = false;
271 		sparx5 = switchdev_work->sparx5;
272 		port = netdev_priv(dev);
273 	}
274 
275 	fdb_info = &switchdev_work->fdb_info;
276 
277 	/* Used PVID 1 when default_pvid is 0, to avoid
278 	 * collision with non-bridged ports.
279 	 */
280 	if (fdb_info->vid == 0)
281 		vid = 1;
282 	else
283 		vid = fdb_info->vid;
284 
285 	switch (switchdev_work->event) {
286 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
287 		if (host_addr)
288 			sparx5_add_mact_entry(sparx5, dev, PGID_CPU,
289 					      fdb_info->addr, vid);
290 		else
291 			sparx5_add_mact_entry(sparx5, port->ndev, port->portno,
292 					      fdb_info->addr, vid);
293 		break;
294 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
295 		sparx5_del_mact_entry(sparx5, fdb_info->addr, vid);
296 		break;
297 	}
298 
299 	rtnl_unlock();
300 	kfree(switchdev_work->fdb_info.addr);
301 	kfree(switchdev_work);
302 	dev_put(dev);
303 }
304 
305 static void sparx5_schedule_work(struct work_struct *work)
306 {
307 	queue_work(sparx5_owq, work);
308 }
309 
310 static int sparx5_switchdev_event(struct notifier_block *nb,
311 				  unsigned long event, void *ptr)
312 {
313 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
314 	struct sparx5_switchdev_event_work *switchdev_work;
315 	struct switchdev_notifier_fdb_info *fdb_info;
316 	struct switchdev_notifier_info *info = ptr;
317 	struct sparx5 *spx5;
318 	int err;
319 
320 	spx5 = container_of(nb, struct sparx5, switchdev_nb);
321 
322 	switch (event) {
323 	case SWITCHDEV_PORT_ATTR_SET:
324 		err = switchdev_handle_port_attr_set(dev, ptr,
325 						     sparx5_netdevice_check,
326 						     sparx5_port_attr_set);
327 		return notifier_from_errno(err);
328 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
329 		fallthrough;
330 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
331 		switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
332 		if (!switchdev_work)
333 			return NOTIFY_BAD;
334 
335 		switchdev_work->dev = dev;
336 		switchdev_work->event = event;
337 		switchdev_work->sparx5 = spx5;
338 
339 		fdb_info = container_of(info,
340 					struct switchdev_notifier_fdb_info,
341 					info);
342 		INIT_WORK(&switchdev_work->work,
343 			  sparx5_switchdev_bridge_fdb_event_work);
344 		memcpy(&switchdev_work->fdb_info, ptr,
345 		       sizeof(switchdev_work->fdb_info));
346 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
347 		if (!switchdev_work->fdb_info.addr)
348 			goto err_addr_alloc;
349 
350 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
351 				fdb_info->addr);
352 		dev_hold(dev);
353 
354 		sparx5_schedule_work(&switchdev_work->work);
355 		break;
356 	}
357 
358 	return NOTIFY_DONE;
359 err_addr_alloc:
360 	kfree(switchdev_work);
361 	return NOTIFY_BAD;
362 }
363 
364 static int sparx5_handle_port_vlan_add(struct net_device *dev,
365 				       struct notifier_block *nb,
366 				       const struct switchdev_obj_port_vlan *v)
367 {
368 	struct sparx5_port *port = netdev_priv(dev);
369 
370 	if (netif_is_bridge_master(dev)) {
371 		struct sparx5 *sparx5 =
372 			container_of(nb, struct sparx5,
373 				     switchdev_blocking_nb);
374 
375 		/* Flood broadcast to CPU */
376 		sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
377 				  v->vid);
378 		return 0;
379 	}
380 
381 	if (!sparx5_netdevice_check(dev))
382 		return -EOPNOTSUPP;
383 
384 	return sparx5_vlan_vid_add(port, v->vid,
385 				  v->flags & BRIDGE_VLAN_INFO_PVID,
386 				  v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
387 }
388 
389 static int sparx5_handle_port_mdb_add(struct net_device *dev,
390 				      struct notifier_block *nb,
391 				      const struct switchdev_obj_port_mdb *v)
392 {
393 	struct sparx5_port *port = netdev_priv(dev);
394 	struct sparx5 *spx5 = port->sparx5;
395 	u16 pgid_idx, vid;
396 	u32 mact_entry;
397 	int res, err;
398 
399 	/* When VLAN unaware the vlan value is not parsed and we receive vid 0.
400 	 * Fall back to bridge vid 1.
401 	 */
402 	if (!br_vlan_enabled(spx5->hw_bridge_dev))
403 		vid = 1;
404 	else
405 		vid = v->vid;
406 
407 	res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
408 
409 	if (res == 0) {
410 		pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
411 
412 		/* MC_IDX starts after the port masks in the PGID table */
413 		pgid_idx += SPX5_PORTS;
414 		sparx5_pgid_update_mask(port, pgid_idx, true);
415 	} else {
416 		err = sparx5_pgid_alloc_mcast(spx5, &pgid_idx);
417 		if (err) {
418 			netdev_warn(dev, "multicast pgid table full\n");
419 			return err;
420 		}
421 		sparx5_pgid_update_mask(port, pgid_idx, true);
422 		err = sparx5_mact_learn(spx5, pgid_idx, v->addr, vid);
423 		if (err) {
424 			netdev_warn(dev, "could not learn mac address %pM\n", v->addr);
425 			sparx5_pgid_update_mask(port, pgid_idx, false);
426 			return err;
427 		}
428 	}
429 
430 	return 0;
431 }
432 
433 static int sparx5_mdb_del_entry(struct net_device *dev,
434 				struct sparx5 *spx5,
435 				const unsigned char mac[ETH_ALEN],
436 				const u16 vid,
437 				u16 pgid_idx)
438 {
439 	int err;
440 
441 	err = sparx5_mact_forget(spx5, mac, vid);
442 	if (err) {
443 		netdev_warn(dev, "could not forget mac address %pM", mac);
444 		return err;
445 	}
446 	err = sparx5_pgid_free(spx5, pgid_idx);
447 	if (err) {
448 		netdev_err(dev, "attempted to free already freed pgid\n");
449 		return err;
450 	}
451 	return 0;
452 }
453 
454 static int sparx5_handle_port_mdb_del(struct net_device *dev,
455 				      struct notifier_block *nb,
456 				      const struct switchdev_obj_port_mdb *v)
457 {
458 	struct sparx5_port *port = netdev_priv(dev);
459 	struct sparx5 *spx5 = port->sparx5;
460 	u16 pgid_idx, vid;
461 	u32 mact_entry, res, pgid_entry[3];
462 	int err;
463 
464 	if (!br_vlan_enabled(spx5->hw_bridge_dev))
465 		vid = 1;
466 	else
467 		vid = v->vid;
468 
469 	res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
470 
471 	if (res == 0) {
472 		pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
473 
474 		/* MC_IDX starts after the port masks in the PGID table */
475 		pgid_idx += SPX5_PORTS;
476 		sparx5_pgid_update_mask(port, pgid_idx, false);
477 
478 		sparx5_pgid_read_mask(spx5, pgid_idx, pgid_entry);
479 		if (bitmap_empty((unsigned long *)pgid_entry, SPX5_PORTS)) {
480 			/* No ports are in MC group. Remove entry */
481 			err = sparx5_mdb_del_entry(dev, spx5, v->addr, vid, pgid_idx);
482 			if (err)
483 				return err;
484 		}
485 	}
486 
487 	return 0;
488 }
489 
490 static int sparx5_handle_port_obj_add(struct net_device *dev,
491 				      struct notifier_block *nb,
492 				      struct switchdev_notifier_port_obj_info *info)
493 {
494 	const struct switchdev_obj *obj = info->obj;
495 	int err;
496 
497 	switch (obj->id) {
498 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
499 		err = sparx5_handle_port_vlan_add(dev, nb,
500 						  SWITCHDEV_OBJ_PORT_VLAN(obj));
501 		break;
502 	case SWITCHDEV_OBJ_ID_PORT_MDB:
503 		err = sparx5_handle_port_mdb_add(dev, nb,
504 						 SWITCHDEV_OBJ_PORT_MDB(obj));
505 		break;
506 	default:
507 		err = -EOPNOTSUPP;
508 		break;
509 	}
510 
511 	info->handled = true;
512 	return err;
513 }
514 
515 static int sparx5_handle_port_vlan_del(struct net_device *dev,
516 				       struct notifier_block *nb,
517 				       u16 vid)
518 {
519 	struct sparx5_port *port = netdev_priv(dev);
520 	int ret;
521 
522 	/* Master bridge? */
523 	if (netif_is_bridge_master(dev)) {
524 		struct sparx5 *sparx5 =
525 			container_of(nb, struct sparx5,
526 				     switchdev_blocking_nb);
527 
528 		sparx5_mact_forget(sparx5, dev->broadcast, vid);
529 		return 0;
530 	}
531 
532 	if (!sparx5_netdevice_check(dev))
533 		return -EOPNOTSUPP;
534 
535 	ret = sparx5_vlan_vid_del(port, vid);
536 	if (ret)
537 		return ret;
538 
539 	return 0;
540 }
541 
542 static int sparx5_handle_port_obj_del(struct net_device *dev,
543 				      struct notifier_block *nb,
544 				      struct switchdev_notifier_port_obj_info *info)
545 {
546 	const struct switchdev_obj *obj = info->obj;
547 	int err;
548 
549 	switch (obj->id) {
550 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
551 		err = sparx5_handle_port_vlan_del(dev, nb,
552 						  SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
553 		break;
554 	case SWITCHDEV_OBJ_ID_PORT_MDB:
555 		err = sparx5_handle_port_mdb_del(dev, nb,
556 						 SWITCHDEV_OBJ_PORT_MDB(obj));
557 		break;
558 	default:
559 		err = -EOPNOTSUPP;
560 		break;
561 	}
562 
563 	info->handled = true;
564 	return err;
565 }
566 
567 static int sparx5_switchdev_blocking_event(struct notifier_block *nb,
568 					   unsigned long event,
569 					   void *ptr)
570 {
571 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
572 	int err;
573 
574 	switch (event) {
575 	case SWITCHDEV_PORT_OBJ_ADD:
576 		err = sparx5_handle_port_obj_add(dev, nb, ptr);
577 		return notifier_from_errno(err);
578 	case SWITCHDEV_PORT_OBJ_DEL:
579 		err = sparx5_handle_port_obj_del(dev, nb, ptr);
580 		return notifier_from_errno(err);
581 	case SWITCHDEV_PORT_ATTR_SET:
582 		err = switchdev_handle_port_attr_set(dev, ptr,
583 						     sparx5_netdevice_check,
584 						     sparx5_port_attr_set);
585 		return notifier_from_errno(err);
586 	}
587 
588 	return NOTIFY_DONE;
589 }
590 
591 int sparx5_register_notifier_blocks(struct sparx5 *s5)
592 {
593 	int err;
594 
595 	s5->netdevice_nb.notifier_call = sparx5_netdevice_event;
596 	err = register_netdevice_notifier(&s5->netdevice_nb);
597 	if (err)
598 		return err;
599 
600 	s5->switchdev_nb.notifier_call = sparx5_switchdev_event;
601 	err = register_switchdev_notifier(&s5->switchdev_nb);
602 	if (err)
603 		goto err_switchdev_nb;
604 
605 	s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event;
606 	err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
607 	if (err)
608 		goto err_switchdev_blocking_nb;
609 
610 	sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0);
611 	if (!sparx5_owq) {
612 		err = -ENOMEM;
613 		goto err_switchdev_blocking_nb;
614 	}
615 
616 	return 0;
617 
618 err_switchdev_blocking_nb:
619 	unregister_switchdev_notifier(&s5->switchdev_nb);
620 err_switchdev_nb:
621 	unregister_netdevice_notifier(&s5->netdevice_nb);
622 
623 	return err;
624 }
625 
626 void sparx5_unregister_notifier_blocks(struct sparx5 *s5)
627 {
628 	destroy_workqueue(sparx5_owq);
629 
630 	unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
631 	unregister_switchdev_notifier(&s5->switchdev_nb);
632 	unregister_netdevice_notifier(&s5->netdevice_nb);
633 }
634