1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <linux/if_bridge.h>
8 #include <net/switchdev.h>
9 
10 #include "sparx5_main_regs.h"
11 #include "sparx5_main.h"
12 
13 static struct workqueue_struct *sparx5_owq;
14 
15 struct sparx5_switchdev_event_work {
16 	struct work_struct work;
17 	struct switchdev_notifier_fdb_info fdb_info;
18 	struct net_device *dev;
19 	unsigned long event;
20 };
21 
22 static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
23 					  struct switchdev_brport_flags flags)
24 {
25 	if (flags.mask & BR_MCAST_FLOOD)
26 		sparx5_pgid_update_mask(port, PGID_MC_FLOOD, true);
27 }
28 
29 static void sparx5_attr_stp_state_set(struct sparx5_port *port,
30 				      u8 state)
31 {
32 	struct sparx5 *sparx5 = port->sparx5;
33 
34 	if (!test_bit(port->portno, sparx5->bridge_mask)) {
35 		netdev_err(port->ndev,
36 			   "Controlling non-bridged port %d?\n", port->portno);
37 		return;
38 	}
39 
40 	switch (state) {
41 	case BR_STATE_FORWARDING:
42 		set_bit(port->portno, sparx5->bridge_fwd_mask);
43 		fallthrough;
44 	case BR_STATE_LEARNING:
45 		set_bit(port->portno, sparx5->bridge_lrn_mask);
46 		break;
47 
48 	default:
49 		/* All other states treated as blocking */
50 		clear_bit(port->portno, sparx5->bridge_fwd_mask);
51 		clear_bit(port->portno, sparx5->bridge_lrn_mask);
52 		break;
53 	}
54 
55 	/* apply the bridge_fwd_mask to all the ports */
56 	sparx5_update_fwd(sparx5);
57 }
58 
59 static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
60 					unsigned long ageing_clock_t)
61 {
62 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
63 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
64 
65 	sparx5_set_ageing(port->sparx5, ageing_time);
66 }
67 
68 static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
69 				const struct switchdev_attr *attr,
70 				struct netlink_ext_ack *extack)
71 {
72 	struct sparx5_port *port = netdev_priv(dev);
73 
74 	switch (attr->id) {
75 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
76 		sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
77 		break;
78 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
79 		sparx5_attr_stp_state_set(port, attr->u.stp_state);
80 		break;
81 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
82 		sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
83 		break;
84 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
85 		port->vlan_aware = attr->u.vlan_filtering;
86 		sparx5_vlan_port_apply(port->sparx5, port);
87 		break;
88 	default:
89 		return -EOPNOTSUPP;
90 	}
91 
92 	return 0;
93 }
94 
95 static int sparx5_port_bridge_join(struct sparx5_port *port,
96 				   struct net_device *bridge)
97 {
98 	struct sparx5 *sparx5 = port->sparx5;
99 
100 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
101 		/* First bridged port */
102 		sparx5->hw_bridge_dev = bridge;
103 	else
104 		if (sparx5->hw_bridge_dev != bridge)
105 			/* This is adding the port to a second bridge, this is
106 			 * unsupported
107 			 */
108 			return -ENODEV;
109 
110 	set_bit(port->portno, sparx5->bridge_mask);
111 
112 	/* Port enters in bridge mode therefor don't need to copy to CPU
113 	 * frames for multicast in case the bridge is not requesting them
114 	 */
115 	__dev_mc_unsync(port->ndev, sparx5_mc_unsync);
116 
117 	return 0;
118 }
119 
120 static void sparx5_port_bridge_leave(struct sparx5_port *port,
121 				     struct net_device *bridge)
122 {
123 	struct sparx5 *sparx5 = port->sparx5;
124 
125 	clear_bit(port->portno, sparx5->bridge_mask);
126 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
127 		sparx5->hw_bridge_dev = NULL;
128 
129 	/* Clear bridge vlan settings before updating the port settings */
130 	port->vlan_aware = 0;
131 	port->pvid = NULL_VID;
132 	port->vid = NULL_VID;
133 
134 	/* Port enters in host more therefore restore mc list */
135 	__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
136 }
137 
138 static int sparx5_port_changeupper(struct net_device *dev,
139 				   struct netdev_notifier_changeupper_info *info)
140 {
141 	struct sparx5_port *port = netdev_priv(dev);
142 	int err = 0;
143 
144 	if (netif_is_bridge_master(info->upper_dev)) {
145 		if (info->linking)
146 			err = sparx5_port_bridge_join(port, info->upper_dev);
147 		else
148 			sparx5_port_bridge_leave(port, info->upper_dev);
149 
150 		sparx5_vlan_port_apply(port->sparx5, port);
151 	}
152 
153 	return err;
154 }
155 
156 static int sparx5_port_add_addr(struct net_device *dev, bool up)
157 {
158 	struct sparx5_port *port = netdev_priv(dev);
159 	struct sparx5 *sparx5 = port->sparx5;
160 	u16 vid = port->pvid;
161 
162 	if (up)
163 		sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid);
164 	else
165 		sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
166 
167 	return 0;
168 }
169 
170 static int sparx5_netdevice_port_event(struct net_device *dev,
171 				       struct notifier_block *nb,
172 				       unsigned long event, void *ptr)
173 {
174 	int err = 0;
175 
176 	if (!sparx5_netdevice_check(dev))
177 		return 0;
178 
179 	switch (event) {
180 	case NETDEV_CHANGEUPPER:
181 		err = sparx5_port_changeupper(dev, ptr);
182 		break;
183 	case NETDEV_PRE_UP:
184 		err = sparx5_port_add_addr(dev, true);
185 		break;
186 	case NETDEV_DOWN:
187 		err = sparx5_port_add_addr(dev, false);
188 		break;
189 	}
190 
191 	return err;
192 }
193 
194 static int sparx5_netdevice_event(struct notifier_block *nb,
195 				  unsigned long event, void *ptr)
196 {
197 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
198 	int ret = 0;
199 
200 	ret = sparx5_netdevice_port_event(dev, nb, event, ptr);
201 
202 	return notifier_from_errno(ret);
203 }
204 
205 static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
206 {
207 	struct sparx5_switchdev_event_work *switchdev_work =
208 		container_of(work, struct sparx5_switchdev_event_work, work);
209 	struct net_device *dev = switchdev_work->dev;
210 	struct switchdev_notifier_fdb_info *fdb_info;
211 	struct sparx5_port *port;
212 	struct sparx5 *sparx5;
213 
214 	rtnl_lock();
215 	if (!sparx5_netdevice_check(dev))
216 		goto out;
217 
218 	port = netdev_priv(dev);
219 	sparx5 = port->sparx5;
220 
221 	fdb_info = &switchdev_work->fdb_info;
222 
223 	switch (switchdev_work->event) {
224 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
225 		if (!fdb_info->added_by_user)
226 			break;
227 		sparx5_add_mact_entry(sparx5, port, fdb_info->addr,
228 				      fdb_info->vid);
229 		break;
230 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
231 		if (!fdb_info->added_by_user)
232 			break;
233 		sparx5_del_mact_entry(sparx5, fdb_info->addr, fdb_info->vid);
234 		break;
235 	}
236 
237 out:
238 	rtnl_unlock();
239 	kfree(switchdev_work->fdb_info.addr);
240 	kfree(switchdev_work);
241 	dev_put(dev);
242 }
243 
244 static void sparx5_schedule_work(struct work_struct *work)
245 {
246 	queue_work(sparx5_owq, work);
247 }
248 
249 static int sparx5_switchdev_event(struct notifier_block *unused,
250 				  unsigned long event, void *ptr)
251 {
252 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
253 	struct sparx5_switchdev_event_work *switchdev_work;
254 	struct switchdev_notifier_fdb_info *fdb_info;
255 	struct switchdev_notifier_info *info = ptr;
256 	int err;
257 
258 	switch (event) {
259 	case SWITCHDEV_PORT_ATTR_SET:
260 		err = switchdev_handle_port_attr_set(dev, ptr,
261 						     sparx5_netdevice_check,
262 						     sparx5_port_attr_set);
263 		return notifier_from_errno(err);
264 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
265 		fallthrough;
266 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
267 		switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
268 		if (!switchdev_work)
269 			return NOTIFY_BAD;
270 
271 		switchdev_work->dev = dev;
272 		switchdev_work->event = event;
273 
274 		fdb_info = container_of(info,
275 					struct switchdev_notifier_fdb_info,
276 					info);
277 		INIT_WORK(&switchdev_work->work,
278 			  sparx5_switchdev_bridge_fdb_event_work);
279 		memcpy(&switchdev_work->fdb_info, ptr,
280 		       sizeof(switchdev_work->fdb_info));
281 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
282 		if (!switchdev_work->fdb_info.addr)
283 			goto err_addr_alloc;
284 
285 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
286 				fdb_info->addr);
287 		dev_hold(dev);
288 
289 		sparx5_schedule_work(&switchdev_work->work);
290 		break;
291 	}
292 
293 	return NOTIFY_DONE;
294 err_addr_alloc:
295 	kfree(switchdev_work);
296 	return NOTIFY_BAD;
297 }
298 
299 static void sparx5_sync_port_dev_addr(struct sparx5 *sparx5,
300 				      struct sparx5_port *port,
301 				      u16 vid, bool add)
302 {
303 	if (!port ||
304 	    !test_bit(port->portno, sparx5->bridge_mask))
305 		return; /* Skip null/host interfaces */
306 
307 	/* Bridge connects to vid? */
308 	if (add) {
309 		/* Add port MAC address from the VLAN */
310 		sparx5_mact_learn(sparx5, PGID_CPU,
311 				  port->ndev->dev_addr, vid);
312 	} else {
313 		/* Control port addr visibility depending on
314 		 * port VLAN connectivity.
315 		 */
316 		if (test_bit(port->portno, sparx5->vlan_mask[vid]))
317 			sparx5_mact_learn(sparx5, PGID_CPU,
318 					  port->ndev->dev_addr, vid);
319 		else
320 			sparx5_mact_forget(sparx5,
321 					   port->ndev->dev_addr, vid);
322 	}
323 }
324 
325 static void sparx5_sync_bridge_dev_addr(struct net_device *dev,
326 					struct sparx5 *sparx5,
327 					u16 vid, bool add)
328 {
329 	int i;
330 
331 	/* First, handle bridge address'es */
332 	if (add) {
333 		sparx5_mact_learn(sparx5, PGID_CPU, dev->dev_addr,
334 				  vid);
335 		sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
336 				  vid);
337 	} else {
338 		sparx5_mact_forget(sparx5, dev->dev_addr, vid);
339 		sparx5_mact_forget(sparx5, dev->broadcast, vid);
340 	}
341 
342 	/* Now look at bridged ports */
343 	for (i = 0; i < SPX5_PORTS; i++)
344 		sparx5_sync_port_dev_addr(sparx5, sparx5->ports[i], vid, add);
345 }
346 
347 static int sparx5_handle_port_vlan_add(struct net_device *dev,
348 				       struct notifier_block *nb,
349 				       const struct switchdev_obj_port_vlan *v)
350 {
351 	struct sparx5_port *port = netdev_priv(dev);
352 
353 	if (netif_is_bridge_master(dev)) {
354 		if (v->flags & BRIDGE_VLAN_INFO_BRENTRY) {
355 			struct sparx5 *sparx5 =
356 				container_of(nb, struct sparx5,
357 					     switchdev_blocking_nb);
358 
359 			sparx5_sync_bridge_dev_addr(dev, sparx5, v->vid, true);
360 		}
361 		return 0;
362 	}
363 
364 	if (!sparx5_netdevice_check(dev))
365 		return -EOPNOTSUPP;
366 
367 	return sparx5_vlan_vid_add(port, v->vid,
368 				  v->flags & BRIDGE_VLAN_INFO_PVID,
369 				  v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
370 }
371 
372 static int sparx5_handle_port_obj_add(struct net_device *dev,
373 				      struct notifier_block *nb,
374 				      struct switchdev_notifier_port_obj_info *info)
375 {
376 	const struct switchdev_obj *obj = info->obj;
377 	int err;
378 
379 	switch (obj->id) {
380 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
381 		err = sparx5_handle_port_vlan_add(dev, nb,
382 						  SWITCHDEV_OBJ_PORT_VLAN(obj));
383 		break;
384 	default:
385 		err = -EOPNOTSUPP;
386 		break;
387 	}
388 
389 	info->handled = true;
390 	return err;
391 }
392 
393 static int sparx5_handle_port_vlan_del(struct net_device *dev,
394 				       struct notifier_block *nb,
395 				       u16 vid)
396 {
397 	struct sparx5_port *port = netdev_priv(dev);
398 	int ret;
399 
400 	/* Master bridge? */
401 	if (netif_is_bridge_master(dev)) {
402 		struct sparx5 *sparx5 =
403 			container_of(nb, struct sparx5,
404 				     switchdev_blocking_nb);
405 
406 		sparx5_sync_bridge_dev_addr(dev, sparx5, vid, false);
407 		return 0;
408 	}
409 
410 	if (!sparx5_netdevice_check(dev))
411 		return -EOPNOTSUPP;
412 
413 	ret = sparx5_vlan_vid_del(port, vid);
414 	if (ret)
415 		return ret;
416 
417 	/* Delete the port MAC address with the matching VLAN information */
418 	sparx5_mact_forget(port->sparx5, port->ndev->dev_addr, vid);
419 
420 	return 0;
421 }
422 
423 static int sparx5_handle_port_obj_del(struct net_device *dev,
424 				      struct notifier_block *nb,
425 				      struct switchdev_notifier_port_obj_info *info)
426 {
427 	const struct switchdev_obj *obj = info->obj;
428 	int err;
429 
430 	switch (obj->id) {
431 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
432 		err = sparx5_handle_port_vlan_del(dev, nb,
433 						  SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
434 		break;
435 	default:
436 		err = -EOPNOTSUPP;
437 		break;
438 	}
439 
440 	info->handled = true;
441 	return err;
442 }
443 
444 static int sparx5_switchdev_blocking_event(struct notifier_block *nb,
445 					   unsigned long event,
446 					   void *ptr)
447 {
448 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
449 	int err;
450 
451 	switch (event) {
452 	case SWITCHDEV_PORT_OBJ_ADD:
453 		err = sparx5_handle_port_obj_add(dev, nb, ptr);
454 		return notifier_from_errno(err);
455 	case SWITCHDEV_PORT_OBJ_DEL:
456 		err = sparx5_handle_port_obj_del(dev, nb, ptr);
457 		return notifier_from_errno(err);
458 	case SWITCHDEV_PORT_ATTR_SET:
459 		err = switchdev_handle_port_attr_set(dev, ptr,
460 						     sparx5_netdevice_check,
461 						     sparx5_port_attr_set);
462 		return notifier_from_errno(err);
463 	}
464 
465 	return NOTIFY_DONE;
466 }
467 
468 int sparx5_register_notifier_blocks(struct sparx5 *s5)
469 {
470 	int err;
471 
472 	s5->netdevice_nb.notifier_call = sparx5_netdevice_event;
473 	err = register_netdevice_notifier(&s5->netdevice_nb);
474 	if (err)
475 		return err;
476 
477 	s5->switchdev_nb.notifier_call = sparx5_switchdev_event;
478 	err = register_switchdev_notifier(&s5->switchdev_nb);
479 	if (err)
480 		goto err_switchdev_nb;
481 
482 	s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event;
483 	err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
484 	if (err)
485 		goto err_switchdev_blocking_nb;
486 
487 	sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0);
488 	if (!sparx5_owq) {
489 		err = -ENOMEM;
490 		goto err_switchdev_blocking_nb;
491 	}
492 
493 	return 0;
494 
495 err_switchdev_blocking_nb:
496 	unregister_switchdev_notifier(&s5->switchdev_nb);
497 err_switchdev_nb:
498 	unregister_netdevice_notifier(&s5->netdevice_nb);
499 
500 	return err;
501 }
502 
503 void sparx5_unregister_notifier_blocks(struct sparx5 *s5)
504 {
505 	destroy_workqueue(sparx5_owq);
506 
507 	unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
508 	unregister_switchdev_notifier(&s5->switchdev_nb);
509 	unregister_netdevice_notifier(&s5->netdevice_nb);
510 }
511