xref: /openbmc/linux/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c (revision d434ee9dee6dc75984897f183df773427a68a1ff)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <linux/if_bridge.h>
8 #include <net/switchdev.h>
9 
10 #include "sparx5_main_regs.h"
11 #include "sparx5_main.h"
12 
13 static struct workqueue_struct *sparx5_owq;
14 
15 struct sparx5_switchdev_event_work {
16 	struct work_struct work;
17 	struct switchdev_notifier_fdb_info fdb_info;
18 	struct net_device *dev;
19 	unsigned long event;
20 };
21 
22 static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port,
23 					     struct switchdev_brport_flags flags)
24 {
25 	if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD))
26 		return -EINVAL;
27 
28 	return 0;
29 }
30 
31 static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
32 					  struct switchdev_brport_flags flags)
33 {
34 	int pgid;
35 
36 	if (flags.mask & BR_MCAST_FLOOD)
37 		for (pgid = PGID_MC_FLOOD; pgid <= PGID_IPV6_MC_CTRL; pgid++)
38 			sparx5_pgid_update_mask(port, pgid, !!(flags.val & BR_MCAST_FLOOD));
39 	if (flags.mask & BR_FLOOD)
40 		sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD));
41 	if (flags.mask & BR_BCAST_FLOOD)
42 		sparx5_pgid_update_mask(port, PGID_BCAST, !!(flags.val & BR_BCAST_FLOOD));
43 }
44 
45 static void sparx5_attr_stp_state_set(struct sparx5_port *port,
46 				      u8 state)
47 {
48 	struct sparx5 *sparx5 = port->sparx5;
49 
50 	if (!test_bit(port->portno, sparx5->bridge_mask)) {
51 		netdev_err(port->ndev,
52 			   "Controlling non-bridged port %d?\n", port->portno);
53 		return;
54 	}
55 
56 	switch (state) {
57 	case BR_STATE_FORWARDING:
58 		set_bit(port->portno, sparx5->bridge_fwd_mask);
59 		fallthrough;
60 	case BR_STATE_LEARNING:
61 		set_bit(port->portno, sparx5->bridge_lrn_mask);
62 		break;
63 
64 	default:
65 		/* All other states treated as blocking */
66 		clear_bit(port->portno, sparx5->bridge_fwd_mask);
67 		clear_bit(port->portno, sparx5->bridge_lrn_mask);
68 		break;
69 	}
70 
71 	/* apply the bridge_fwd_mask to all the ports */
72 	sparx5_update_fwd(sparx5);
73 }
74 
75 static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
76 					unsigned long ageing_clock_t)
77 {
78 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
79 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
80 
81 	sparx5_set_ageing(port->sparx5, ageing_time);
82 }
83 
84 static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
85 				const struct switchdev_attr *attr,
86 				struct netlink_ext_ack *extack)
87 {
88 	struct sparx5_port *port = netdev_priv(dev);
89 
90 	switch (attr->id) {
91 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
92 		return sparx5_port_attr_pre_bridge_flags(port,
93 							 attr->u.brport_flags);
94 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
95 		sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
96 		break;
97 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
98 		sparx5_attr_stp_state_set(port, attr->u.stp_state);
99 		break;
100 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
101 		sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
102 		break;
103 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
104 		port->vlan_aware = attr->u.vlan_filtering;
105 		sparx5_vlan_port_apply(port->sparx5, port);
106 		break;
107 	default:
108 		return -EOPNOTSUPP;
109 	}
110 
111 	return 0;
112 }
113 
114 static int sparx5_port_bridge_join(struct sparx5_port *port,
115 				   struct net_device *bridge,
116 				   struct netlink_ext_ack *extack)
117 {
118 	struct sparx5 *sparx5 = port->sparx5;
119 	struct net_device *ndev = port->ndev;
120 	int err;
121 
122 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
123 		/* First bridged port */
124 		sparx5->hw_bridge_dev = bridge;
125 	else
126 		if (sparx5->hw_bridge_dev != bridge)
127 			/* This is adding the port to a second bridge, this is
128 			 * unsupported
129 			 */
130 			return -ENODEV;
131 
132 	set_bit(port->portno, sparx5->bridge_mask);
133 
134 	err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
135 					    false, extack);
136 	if (err)
137 		goto err_switchdev_offload;
138 
139 	/* Port enters in bridge mode therefor don't need to copy to CPU
140 	 * frames for multicast in case the bridge is not requesting them
141 	 */
142 	__dev_mc_unsync(ndev, sparx5_mc_unsync);
143 
144 	return 0;
145 
146 err_switchdev_offload:
147 	clear_bit(port->portno, sparx5->bridge_mask);
148 	return err;
149 }
150 
151 static void sparx5_port_bridge_leave(struct sparx5_port *port,
152 				     struct net_device *bridge)
153 {
154 	struct sparx5 *sparx5 = port->sparx5;
155 
156 	switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL);
157 
158 	clear_bit(port->portno, sparx5->bridge_mask);
159 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
160 		sparx5->hw_bridge_dev = NULL;
161 
162 	/* Clear bridge vlan settings before updating the port settings */
163 	port->vlan_aware = 0;
164 	port->pvid = NULL_VID;
165 	port->vid = NULL_VID;
166 
167 	/* Port enters in host more therefore restore mc list */
168 	__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
169 }
170 
171 static int sparx5_port_changeupper(struct net_device *dev,
172 				   struct netdev_notifier_changeupper_info *info)
173 {
174 	struct sparx5_port *port = netdev_priv(dev);
175 	struct netlink_ext_ack *extack;
176 	int err = 0;
177 
178 	extack = netdev_notifier_info_to_extack(&info->info);
179 
180 	if (netif_is_bridge_master(info->upper_dev)) {
181 		if (info->linking)
182 			err = sparx5_port_bridge_join(port, info->upper_dev,
183 						      extack);
184 		else
185 			sparx5_port_bridge_leave(port, info->upper_dev);
186 
187 		sparx5_vlan_port_apply(port->sparx5, port);
188 	}
189 
190 	return err;
191 }
192 
193 static int sparx5_port_add_addr(struct net_device *dev, bool up)
194 {
195 	struct sparx5_port *port = netdev_priv(dev);
196 	struct sparx5 *sparx5 = port->sparx5;
197 	u16 vid = port->pvid;
198 
199 	if (up)
200 		sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid);
201 	else
202 		sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
203 
204 	return 0;
205 }
206 
207 static int sparx5_netdevice_port_event(struct net_device *dev,
208 				       struct notifier_block *nb,
209 				       unsigned long event, void *ptr)
210 {
211 	int err = 0;
212 
213 	if (!sparx5_netdevice_check(dev))
214 		return 0;
215 
216 	switch (event) {
217 	case NETDEV_CHANGEUPPER:
218 		err = sparx5_port_changeupper(dev, ptr);
219 		break;
220 	case NETDEV_PRE_UP:
221 		err = sparx5_port_add_addr(dev, true);
222 		break;
223 	case NETDEV_DOWN:
224 		err = sparx5_port_add_addr(dev, false);
225 		break;
226 	}
227 
228 	return err;
229 }
230 
231 static int sparx5_netdevice_event(struct notifier_block *nb,
232 				  unsigned long event, void *ptr)
233 {
234 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
235 	int ret = 0;
236 
237 	ret = sparx5_netdevice_port_event(dev, nb, event, ptr);
238 
239 	return notifier_from_errno(ret);
240 }
241 
242 static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
243 {
244 	struct sparx5_switchdev_event_work *switchdev_work =
245 		container_of(work, struct sparx5_switchdev_event_work, work);
246 	struct net_device *dev = switchdev_work->dev;
247 	struct switchdev_notifier_fdb_info *fdb_info;
248 	struct sparx5_port *port;
249 	struct sparx5 *sparx5;
250 
251 	rtnl_lock();
252 	if (!sparx5_netdevice_check(dev))
253 		goto out;
254 
255 	port = netdev_priv(dev);
256 	sparx5 = port->sparx5;
257 
258 	fdb_info = &switchdev_work->fdb_info;
259 
260 	switch (switchdev_work->event) {
261 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
262 		if (!fdb_info->added_by_user)
263 			break;
264 		sparx5_add_mact_entry(sparx5, port, fdb_info->addr,
265 				      fdb_info->vid);
266 		break;
267 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
268 		if (!fdb_info->added_by_user)
269 			break;
270 		sparx5_del_mact_entry(sparx5, fdb_info->addr, fdb_info->vid);
271 		break;
272 	}
273 
274 out:
275 	rtnl_unlock();
276 	kfree(switchdev_work->fdb_info.addr);
277 	kfree(switchdev_work);
278 	dev_put(dev);
279 }
280 
281 static void sparx5_schedule_work(struct work_struct *work)
282 {
283 	queue_work(sparx5_owq, work);
284 }
285 
286 static int sparx5_switchdev_event(struct notifier_block *unused,
287 				  unsigned long event, void *ptr)
288 {
289 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
290 	struct sparx5_switchdev_event_work *switchdev_work;
291 	struct switchdev_notifier_fdb_info *fdb_info;
292 	struct switchdev_notifier_info *info = ptr;
293 	int err;
294 
295 	switch (event) {
296 	case SWITCHDEV_PORT_ATTR_SET:
297 		err = switchdev_handle_port_attr_set(dev, ptr,
298 						     sparx5_netdevice_check,
299 						     sparx5_port_attr_set);
300 		return notifier_from_errno(err);
301 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
302 		fallthrough;
303 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
304 		switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
305 		if (!switchdev_work)
306 			return NOTIFY_BAD;
307 
308 		switchdev_work->dev = dev;
309 		switchdev_work->event = event;
310 
311 		fdb_info = container_of(info,
312 					struct switchdev_notifier_fdb_info,
313 					info);
314 		INIT_WORK(&switchdev_work->work,
315 			  sparx5_switchdev_bridge_fdb_event_work);
316 		memcpy(&switchdev_work->fdb_info, ptr,
317 		       sizeof(switchdev_work->fdb_info));
318 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
319 		if (!switchdev_work->fdb_info.addr)
320 			goto err_addr_alloc;
321 
322 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
323 				fdb_info->addr);
324 		dev_hold(dev);
325 
326 		sparx5_schedule_work(&switchdev_work->work);
327 		break;
328 	}
329 
330 	return NOTIFY_DONE;
331 err_addr_alloc:
332 	kfree(switchdev_work);
333 	return NOTIFY_BAD;
334 }
335 
336 static void sparx5_sync_port_dev_addr(struct sparx5 *sparx5,
337 				      struct sparx5_port *port,
338 				      u16 vid, bool add)
339 {
340 	if (!port ||
341 	    !test_bit(port->portno, sparx5->bridge_mask))
342 		return; /* Skip null/host interfaces */
343 
344 	/* Bridge connects to vid? */
345 	if (add) {
346 		/* Add port MAC address from the VLAN */
347 		sparx5_mact_learn(sparx5, PGID_CPU,
348 				  port->ndev->dev_addr, vid);
349 	} else {
350 		/* Control port addr visibility depending on
351 		 * port VLAN connectivity.
352 		 */
353 		if (test_bit(port->portno, sparx5->vlan_mask[vid]))
354 			sparx5_mact_learn(sparx5, PGID_CPU,
355 					  port->ndev->dev_addr, vid);
356 		else
357 			sparx5_mact_forget(sparx5,
358 					   port->ndev->dev_addr, vid);
359 	}
360 }
361 
362 static void sparx5_sync_bridge_dev_addr(struct net_device *dev,
363 					struct sparx5 *sparx5,
364 					u16 vid, bool add)
365 {
366 	int i;
367 
368 	/* First, handle bridge address'es */
369 	if (add) {
370 		sparx5_mact_learn(sparx5, PGID_CPU, dev->dev_addr,
371 				  vid);
372 		sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
373 				  vid);
374 	} else {
375 		sparx5_mact_forget(sparx5, dev->dev_addr, vid);
376 		sparx5_mact_forget(sparx5, dev->broadcast, vid);
377 	}
378 
379 	/* Now look at bridged ports */
380 	for (i = 0; i < SPX5_PORTS; i++)
381 		sparx5_sync_port_dev_addr(sparx5, sparx5->ports[i], vid, add);
382 }
383 
384 static int sparx5_handle_port_vlan_add(struct net_device *dev,
385 				       struct notifier_block *nb,
386 				       const struct switchdev_obj_port_vlan *v)
387 {
388 	struct sparx5_port *port = netdev_priv(dev);
389 
390 	if (netif_is_bridge_master(dev)) {
391 		struct sparx5 *sparx5 =
392 			container_of(nb, struct sparx5,
393 				     switchdev_blocking_nb);
394 
395 		sparx5_sync_bridge_dev_addr(dev, sparx5, v->vid, true);
396 		return 0;
397 	}
398 
399 	if (!sparx5_netdevice_check(dev))
400 		return -EOPNOTSUPP;
401 
402 	return sparx5_vlan_vid_add(port, v->vid,
403 				  v->flags & BRIDGE_VLAN_INFO_PVID,
404 				  v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
405 }
406 
407 static int sparx5_handle_port_obj_add(struct net_device *dev,
408 				      struct notifier_block *nb,
409 				      struct switchdev_notifier_port_obj_info *info)
410 {
411 	const struct switchdev_obj *obj = info->obj;
412 	int err;
413 
414 	switch (obj->id) {
415 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
416 		err = sparx5_handle_port_vlan_add(dev, nb,
417 						  SWITCHDEV_OBJ_PORT_VLAN(obj));
418 		break;
419 	default:
420 		err = -EOPNOTSUPP;
421 		break;
422 	}
423 
424 	info->handled = true;
425 	return err;
426 }
427 
428 static int sparx5_handle_port_vlan_del(struct net_device *dev,
429 				       struct notifier_block *nb,
430 				       u16 vid)
431 {
432 	struct sparx5_port *port = netdev_priv(dev);
433 	int ret;
434 
435 	/* Master bridge? */
436 	if (netif_is_bridge_master(dev)) {
437 		struct sparx5 *sparx5 =
438 			container_of(nb, struct sparx5,
439 				     switchdev_blocking_nb);
440 
441 		sparx5_sync_bridge_dev_addr(dev, sparx5, vid, false);
442 		return 0;
443 	}
444 
445 	if (!sparx5_netdevice_check(dev))
446 		return -EOPNOTSUPP;
447 
448 	ret = sparx5_vlan_vid_del(port, vid);
449 	if (ret)
450 		return ret;
451 
452 	/* Delete the port MAC address with the matching VLAN information */
453 	sparx5_mact_forget(port->sparx5, port->ndev->dev_addr, vid);
454 
455 	return 0;
456 }
457 
458 static int sparx5_handle_port_obj_del(struct net_device *dev,
459 				      struct notifier_block *nb,
460 				      struct switchdev_notifier_port_obj_info *info)
461 {
462 	const struct switchdev_obj *obj = info->obj;
463 	int err;
464 
465 	switch (obj->id) {
466 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
467 		err = sparx5_handle_port_vlan_del(dev, nb,
468 						  SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
469 		break;
470 	default:
471 		err = -EOPNOTSUPP;
472 		break;
473 	}
474 
475 	info->handled = true;
476 	return err;
477 }
478 
479 static int sparx5_switchdev_blocking_event(struct notifier_block *nb,
480 					   unsigned long event,
481 					   void *ptr)
482 {
483 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
484 	int err;
485 
486 	switch (event) {
487 	case SWITCHDEV_PORT_OBJ_ADD:
488 		err = sparx5_handle_port_obj_add(dev, nb, ptr);
489 		return notifier_from_errno(err);
490 	case SWITCHDEV_PORT_OBJ_DEL:
491 		err = sparx5_handle_port_obj_del(dev, nb, ptr);
492 		return notifier_from_errno(err);
493 	case SWITCHDEV_PORT_ATTR_SET:
494 		err = switchdev_handle_port_attr_set(dev, ptr,
495 						     sparx5_netdevice_check,
496 						     sparx5_port_attr_set);
497 		return notifier_from_errno(err);
498 	}
499 
500 	return NOTIFY_DONE;
501 }
502 
503 int sparx5_register_notifier_blocks(struct sparx5 *s5)
504 {
505 	int err;
506 
507 	s5->netdevice_nb.notifier_call = sparx5_netdevice_event;
508 	err = register_netdevice_notifier(&s5->netdevice_nb);
509 	if (err)
510 		return err;
511 
512 	s5->switchdev_nb.notifier_call = sparx5_switchdev_event;
513 	err = register_switchdev_notifier(&s5->switchdev_nb);
514 	if (err)
515 		goto err_switchdev_nb;
516 
517 	s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event;
518 	err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
519 	if (err)
520 		goto err_switchdev_blocking_nb;
521 
522 	sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0);
523 	if (!sparx5_owq) {
524 		err = -ENOMEM;
525 		goto err_switchdev_blocking_nb;
526 	}
527 
528 	return 0;
529 
530 err_switchdev_blocking_nb:
531 	unregister_switchdev_notifier(&s5->switchdev_nb);
532 err_switchdev_nb:
533 	unregister_netdevice_notifier(&s5->netdevice_nb);
534 
535 	return err;
536 }
537 
538 void sparx5_unregister_notifier_blocks(struct sparx5 *s5)
539 {
540 	destroy_workqueue(sparx5_owq);
541 
542 	unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
543 	unregister_switchdev_notifier(&s5->switchdev_nb);
544 	unregister_netdevice_notifier(&s5->netdevice_nb);
545 }
546