xref: /openbmc/linux/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c (revision 2f5dc00f7a3ea669fd387ce79ffca92bff361550)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <linux/if_bridge.h>
8 #include <net/switchdev.h>
9 
10 #include "sparx5_main_regs.h"
11 #include "sparx5_main.h"
12 
13 static struct workqueue_struct *sparx5_owq;
14 
15 struct sparx5_switchdev_event_work {
16 	struct work_struct work;
17 	struct switchdev_notifier_fdb_info fdb_info;
18 	struct net_device *dev;
19 	unsigned long event;
20 };
21 
22 static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
23 					  struct switchdev_brport_flags flags)
24 {
25 	if (flags.mask & BR_MCAST_FLOOD)
26 		sparx5_pgid_update_mask(port, PGID_MC_FLOOD, true);
27 }
28 
29 static void sparx5_attr_stp_state_set(struct sparx5_port *port,
30 				      u8 state)
31 {
32 	struct sparx5 *sparx5 = port->sparx5;
33 
34 	if (!test_bit(port->portno, sparx5->bridge_mask)) {
35 		netdev_err(port->ndev,
36 			   "Controlling non-bridged port %d?\n", port->portno);
37 		return;
38 	}
39 
40 	switch (state) {
41 	case BR_STATE_FORWARDING:
42 		set_bit(port->portno, sparx5->bridge_fwd_mask);
43 		fallthrough;
44 	case BR_STATE_LEARNING:
45 		set_bit(port->portno, sparx5->bridge_lrn_mask);
46 		break;
47 
48 	default:
49 		/* All other states treated as blocking */
50 		clear_bit(port->portno, sparx5->bridge_fwd_mask);
51 		clear_bit(port->portno, sparx5->bridge_lrn_mask);
52 		break;
53 	}
54 
55 	/* apply the bridge_fwd_mask to all the ports */
56 	sparx5_update_fwd(sparx5);
57 }
58 
59 static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
60 					unsigned long ageing_clock_t)
61 {
62 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
63 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
64 
65 	sparx5_set_ageing(port->sparx5, ageing_time);
66 }
67 
68 static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
69 				const struct switchdev_attr *attr,
70 				struct netlink_ext_ack *extack)
71 {
72 	struct sparx5_port *port = netdev_priv(dev);
73 
74 	switch (attr->id) {
75 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
76 		sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
77 		break;
78 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
79 		sparx5_attr_stp_state_set(port, attr->u.stp_state);
80 		break;
81 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
82 		sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
83 		break;
84 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
85 		port->vlan_aware = attr->u.vlan_filtering;
86 		sparx5_vlan_port_apply(port->sparx5, port);
87 		break;
88 	default:
89 		return -EOPNOTSUPP;
90 	}
91 
92 	return 0;
93 }
94 
95 static int sparx5_port_bridge_join(struct sparx5_port *port,
96 				   struct net_device *bridge,
97 				   struct netlink_ext_ack *extack)
98 {
99 	struct sparx5 *sparx5 = port->sparx5;
100 	struct net_device *ndev = port->ndev;
101 	int err;
102 
103 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
104 		/* First bridged port */
105 		sparx5->hw_bridge_dev = bridge;
106 	else
107 		if (sparx5->hw_bridge_dev != bridge)
108 			/* This is adding the port to a second bridge, this is
109 			 * unsupported
110 			 */
111 			return -ENODEV;
112 
113 	set_bit(port->portno, sparx5->bridge_mask);
114 
115 	err = switchdev_bridge_port_offload(ndev, ndev, extack);
116 	if (err)
117 		goto err_switchdev_offload;
118 
119 	/* Port enters in bridge mode therefor don't need to copy to CPU
120 	 * frames for multicast in case the bridge is not requesting them
121 	 */
122 	__dev_mc_unsync(ndev, sparx5_mc_unsync);
123 
124 	return 0;
125 
126 err_switchdev_offload:
127 	clear_bit(port->portno, sparx5->bridge_mask);
128 	return err;
129 }
130 
131 static void sparx5_port_bridge_leave(struct sparx5_port *port,
132 				     struct net_device *bridge)
133 {
134 	struct sparx5 *sparx5 = port->sparx5;
135 
136 	switchdev_bridge_port_unoffload(port->ndev);
137 
138 	clear_bit(port->portno, sparx5->bridge_mask);
139 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
140 		sparx5->hw_bridge_dev = NULL;
141 
142 	/* Clear bridge vlan settings before updating the port settings */
143 	port->vlan_aware = 0;
144 	port->pvid = NULL_VID;
145 	port->vid = NULL_VID;
146 
147 	/* Port enters in host more therefore restore mc list */
148 	__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
149 }
150 
151 static int sparx5_port_changeupper(struct net_device *dev,
152 				   struct netdev_notifier_changeupper_info *info)
153 {
154 	struct sparx5_port *port = netdev_priv(dev);
155 	struct netlink_ext_ack *extack;
156 	int err = 0;
157 
158 	extack = netdev_notifier_info_to_extack(&info->info);
159 
160 	if (netif_is_bridge_master(info->upper_dev)) {
161 		if (info->linking)
162 			err = sparx5_port_bridge_join(port, info->upper_dev,
163 						      extack);
164 		else
165 			sparx5_port_bridge_leave(port, info->upper_dev);
166 
167 		sparx5_vlan_port_apply(port->sparx5, port);
168 	}
169 
170 	return err;
171 }
172 
173 static int sparx5_port_add_addr(struct net_device *dev, bool up)
174 {
175 	struct sparx5_port *port = netdev_priv(dev);
176 	struct sparx5 *sparx5 = port->sparx5;
177 	u16 vid = port->pvid;
178 
179 	if (up)
180 		sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid);
181 	else
182 		sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
183 
184 	return 0;
185 }
186 
187 static int sparx5_netdevice_port_event(struct net_device *dev,
188 				       struct notifier_block *nb,
189 				       unsigned long event, void *ptr)
190 {
191 	int err = 0;
192 
193 	if (!sparx5_netdevice_check(dev))
194 		return 0;
195 
196 	switch (event) {
197 	case NETDEV_CHANGEUPPER:
198 		err = sparx5_port_changeupper(dev, ptr);
199 		break;
200 	case NETDEV_PRE_UP:
201 		err = sparx5_port_add_addr(dev, true);
202 		break;
203 	case NETDEV_DOWN:
204 		err = sparx5_port_add_addr(dev, false);
205 		break;
206 	}
207 
208 	return err;
209 }
210 
211 static int sparx5_netdevice_event(struct notifier_block *nb,
212 				  unsigned long event, void *ptr)
213 {
214 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
215 	int ret = 0;
216 
217 	ret = sparx5_netdevice_port_event(dev, nb, event, ptr);
218 
219 	return notifier_from_errno(ret);
220 }
221 
222 static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
223 {
224 	struct sparx5_switchdev_event_work *switchdev_work =
225 		container_of(work, struct sparx5_switchdev_event_work, work);
226 	struct net_device *dev = switchdev_work->dev;
227 	struct switchdev_notifier_fdb_info *fdb_info;
228 	struct sparx5_port *port;
229 	struct sparx5 *sparx5;
230 
231 	rtnl_lock();
232 	if (!sparx5_netdevice_check(dev))
233 		goto out;
234 
235 	port = netdev_priv(dev);
236 	sparx5 = port->sparx5;
237 
238 	fdb_info = &switchdev_work->fdb_info;
239 
240 	switch (switchdev_work->event) {
241 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
242 		if (!fdb_info->added_by_user)
243 			break;
244 		sparx5_add_mact_entry(sparx5, port, fdb_info->addr,
245 				      fdb_info->vid);
246 		break;
247 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
248 		if (!fdb_info->added_by_user)
249 			break;
250 		sparx5_del_mact_entry(sparx5, fdb_info->addr, fdb_info->vid);
251 		break;
252 	}
253 
254 out:
255 	rtnl_unlock();
256 	kfree(switchdev_work->fdb_info.addr);
257 	kfree(switchdev_work);
258 	dev_put(dev);
259 }
260 
261 static void sparx5_schedule_work(struct work_struct *work)
262 {
263 	queue_work(sparx5_owq, work);
264 }
265 
266 static int sparx5_switchdev_event(struct notifier_block *unused,
267 				  unsigned long event, void *ptr)
268 {
269 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
270 	struct sparx5_switchdev_event_work *switchdev_work;
271 	struct switchdev_notifier_fdb_info *fdb_info;
272 	struct switchdev_notifier_info *info = ptr;
273 	int err;
274 
275 	switch (event) {
276 	case SWITCHDEV_PORT_ATTR_SET:
277 		err = switchdev_handle_port_attr_set(dev, ptr,
278 						     sparx5_netdevice_check,
279 						     sparx5_port_attr_set);
280 		return notifier_from_errno(err);
281 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
282 		fallthrough;
283 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
284 		switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
285 		if (!switchdev_work)
286 			return NOTIFY_BAD;
287 
288 		switchdev_work->dev = dev;
289 		switchdev_work->event = event;
290 
291 		fdb_info = container_of(info,
292 					struct switchdev_notifier_fdb_info,
293 					info);
294 		INIT_WORK(&switchdev_work->work,
295 			  sparx5_switchdev_bridge_fdb_event_work);
296 		memcpy(&switchdev_work->fdb_info, ptr,
297 		       sizeof(switchdev_work->fdb_info));
298 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
299 		if (!switchdev_work->fdb_info.addr)
300 			goto err_addr_alloc;
301 
302 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
303 				fdb_info->addr);
304 		dev_hold(dev);
305 
306 		sparx5_schedule_work(&switchdev_work->work);
307 		break;
308 	}
309 
310 	return NOTIFY_DONE;
311 err_addr_alloc:
312 	kfree(switchdev_work);
313 	return NOTIFY_BAD;
314 }
315 
316 static void sparx5_sync_port_dev_addr(struct sparx5 *sparx5,
317 				      struct sparx5_port *port,
318 				      u16 vid, bool add)
319 {
320 	if (!port ||
321 	    !test_bit(port->portno, sparx5->bridge_mask))
322 		return; /* Skip null/host interfaces */
323 
324 	/* Bridge connects to vid? */
325 	if (add) {
326 		/* Add port MAC address from the VLAN */
327 		sparx5_mact_learn(sparx5, PGID_CPU,
328 				  port->ndev->dev_addr, vid);
329 	} else {
330 		/* Control port addr visibility depending on
331 		 * port VLAN connectivity.
332 		 */
333 		if (test_bit(port->portno, sparx5->vlan_mask[vid]))
334 			sparx5_mact_learn(sparx5, PGID_CPU,
335 					  port->ndev->dev_addr, vid);
336 		else
337 			sparx5_mact_forget(sparx5,
338 					   port->ndev->dev_addr, vid);
339 	}
340 }
341 
342 static void sparx5_sync_bridge_dev_addr(struct net_device *dev,
343 					struct sparx5 *sparx5,
344 					u16 vid, bool add)
345 {
346 	int i;
347 
348 	/* First, handle bridge address'es */
349 	if (add) {
350 		sparx5_mact_learn(sparx5, PGID_CPU, dev->dev_addr,
351 				  vid);
352 		sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
353 				  vid);
354 	} else {
355 		sparx5_mact_forget(sparx5, dev->dev_addr, vid);
356 		sparx5_mact_forget(sparx5, dev->broadcast, vid);
357 	}
358 
359 	/* Now look at bridged ports */
360 	for (i = 0; i < SPX5_PORTS; i++)
361 		sparx5_sync_port_dev_addr(sparx5, sparx5->ports[i], vid, add);
362 }
363 
364 static int sparx5_handle_port_vlan_add(struct net_device *dev,
365 				       struct notifier_block *nb,
366 				       const struct switchdev_obj_port_vlan *v)
367 {
368 	struct sparx5_port *port = netdev_priv(dev);
369 
370 	if (netif_is_bridge_master(dev)) {
371 		if (v->flags & BRIDGE_VLAN_INFO_BRENTRY) {
372 			struct sparx5 *sparx5 =
373 				container_of(nb, struct sparx5,
374 					     switchdev_blocking_nb);
375 
376 			sparx5_sync_bridge_dev_addr(dev, sparx5, v->vid, true);
377 		}
378 		return 0;
379 	}
380 
381 	if (!sparx5_netdevice_check(dev))
382 		return -EOPNOTSUPP;
383 
384 	return sparx5_vlan_vid_add(port, v->vid,
385 				  v->flags & BRIDGE_VLAN_INFO_PVID,
386 				  v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
387 }
388 
389 static int sparx5_handle_port_obj_add(struct net_device *dev,
390 				      struct notifier_block *nb,
391 				      struct switchdev_notifier_port_obj_info *info)
392 {
393 	const struct switchdev_obj *obj = info->obj;
394 	int err;
395 
396 	switch (obj->id) {
397 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
398 		err = sparx5_handle_port_vlan_add(dev, nb,
399 						  SWITCHDEV_OBJ_PORT_VLAN(obj));
400 		break;
401 	default:
402 		err = -EOPNOTSUPP;
403 		break;
404 	}
405 
406 	info->handled = true;
407 	return err;
408 }
409 
410 static int sparx5_handle_port_vlan_del(struct net_device *dev,
411 				       struct notifier_block *nb,
412 				       u16 vid)
413 {
414 	struct sparx5_port *port = netdev_priv(dev);
415 	int ret;
416 
417 	/* Master bridge? */
418 	if (netif_is_bridge_master(dev)) {
419 		struct sparx5 *sparx5 =
420 			container_of(nb, struct sparx5,
421 				     switchdev_blocking_nb);
422 
423 		sparx5_sync_bridge_dev_addr(dev, sparx5, vid, false);
424 		return 0;
425 	}
426 
427 	if (!sparx5_netdevice_check(dev))
428 		return -EOPNOTSUPP;
429 
430 	ret = sparx5_vlan_vid_del(port, vid);
431 	if (ret)
432 		return ret;
433 
434 	/* Delete the port MAC address with the matching VLAN information */
435 	sparx5_mact_forget(port->sparx5, port->ndev->dev_addr, vid);
436 
437 	return 0;
438 }
439 
440 static int sparx5_handle_port_obj_del(struct net_device *dev,
441 				      struct notifier_block *nb,
442 				      struct switchdev_notifier_port_obj_info *info)
443 {
444 	const struct switchdev_obj *obj = info->obj;
445 	int err;
446 
447 	switch (obj->id) {
448 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
449 		err = sparx5_handle_port_vlan_del(dev, nb,
450 						  SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
451 		break;
452 	default:
453 		err = -EOPNOTSUPP;
454 		break;
455 	}
456 
457 	info->handled = true;
458 	return err;
459 }
460 
461 static int sparx5_switchdev_blocking_event(struct notifier_block *nb,
462 					   unsigned long event,
463 					   void *ptr)
464 {
465 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
466 	int err;
467 
468 	switch (event) {
469 	case SWITCHDEV_PORT_OBJ_ADD:
470 		err = sparx5_handle_port_obj_add(dev, nb, ptr);
471 		return notifier_from_errno(err);
472 	case SWITCHDEV_PORT_OBJ_DEL:
473 		err = sparx5_handle_port_obj_del(dev, nb, ptr);
474 		return notifier_from_errno(err);
475 	case SWITCHDEV_PORT_ATTR_SET:
476 		err = switchdev_handle_port_attr_set(dev, ptr,
477 						     sparx5_netdevice_check,
478 						     sparx5_port_attr_set);
479 		return notifier_from_errno(err);
480 	}
481 
482 	return NOTIFY_DONE;
483 }
484 
485 int sparx5_register_notifier_blocks(struct sparx5 *s5)
486 {
487 	int err;
488 
489 	s5->netdevice_nb.notifier_call = sparx5_netdevice_event;
490 	err = register_netdevice_notifier(&s5->netdevice_nb);
491 	if (err)
492 		return err;
493 
494 	s5->switchdev_nb.notifier_call = sparx5_switchdev_event;
495 	err = register_switchdev_notifier(&s5->switchdev_nb);
496 	if (err)
497 		goto err_switchdev_nb;
498 
499 	s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event;
500 	err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
501 	if (err)
502 		goto err_switchdev_blocking_nb;
503 
504 	sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0);
505 	if (!sparx5_owq) {
506 		err = -ENOMEM;
507 		goto err_switchdev_blocking_nb;
508 	}
509 
510 	return 0;
511 
512 err_switchdev_blocking_nb:
513 	unregister_switchdev_notifier(&s5->switchdev_nb);
514 err_switchdev_nb:
515 	unregister_netdevice_notifier(&s5->netdevice_nb);
516 
517 	return err;
518 }
519 
520 void sparx5_unregister_notifier_blocks(struct sparx5 *s5)
521 {
522 	destroy_workqueue(sparx5_owq);
523 
524 	unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
525 	unregister_switchdev_notifier(&s5->switchdev_nb);
526 	unregister_netdevice_notifier(&s5->netdevice_nb);
527 }
528