1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Texas Instruments K3 AM65 Ethernet Switchdev Driver
3  *
4  * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com/
5  *
6  */
7 
8 #include <linux/etherdevice.h>
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/workqueue.h>
12 #include <net/switchdev.h>
13 
14 #include "am65-cpsw-nuss.h"
15 #include "am65-cpsw-switchdev.h"
16 #include "cpsw_ale.h"
17 
18 struct am65_cpsw_switchdev_event_work {
19 	struct work_struct work;
20 	struct switchdev_notifier_fdb_info fdb_info;
21 	struct am65_cpsw_port *port;
22 	unsigned long event;
23 };
24 
25 static int am65_cpsw_port_stp_state_set(struct am65_cpsw_port *port, u8 state)
26 {
27 	struct am65_cpsw_common *cpsw = port->common;
28 	u8 cpsw_state;
29 	int ret = 0;
30 
31 	switch (state) {
32 	case BR_STATE_FORWARDING:
33 		cpsw_state = ALE_PORT_STATE_FORWARD;
34 		break;
35 	case BR_STATE_LEARNING:
36 		cpsw_state = ALE_PORT_STATE_LEARN;
37 		break;
38 	case BR_STATE_DISABLED:
39 		cpsw_state = ALE_PORT_STATE_DISABLE;
40 		break;
41 	case BR_STATE_LISTENING:
42 	case BR_STATE_BLOCKING:
43 		cpsw_state = ALE_PORT_STATE_BLOCK;
44 		break;
45 	default:
46 		return -EOPNOTSUPP;
47 	}
48 
49 	ret = cpsw_ale_control_set(cpsw->ale, port->port_id,
50 				   ALE_PORT_STATE, cpsw_state);
51 	netdev_dbg(port->ndev, "ale state: %u\n", cpsw_state);
52 
53 	return ret;
54 }
55 
56 static int am65_cpsw_port_attr_br_flags_set(struct am65_cpsw_port *port,
57 					    struct net_device *orig_dev,
58 					    struct switchdev_brport_flags flags)
59 {
60 	struct am65_cpsw_common *cpsw = port->common;
61 
62 	if (flags.mask & BR_MCAST_FLOOD) {
63 		bool unreg_mcast_add = false;
64 
65 		if (flags.val & BR_MCAST_FLOOD)
66 			unreg_mcast_add = true;
67 
68 		netdev_dbg(port->ndev, "BR_MCAST_FLOOD: %d port %u\n",
69 			   unreg_mcast_add, port->port_id);
70 
71 		cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(port->port_id),
72 					 unreg_mcast_add);
73 	}
74 
75 	return 0;
76 }
77 
78 static int am65_cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
79 						struct switchdev_brport_flags flags)
80 {
81 	if (flags.mask & ~(BR_LEARNING | BR_MCAST_FLOOD))
82 		return -EINVAL;
83 
84 	return 0;
85 }
86 
87 static int am65_cpsw_port_attr_set(struct net_device *ndev,
88 				   const struct switchdev_attr *attr,
89 				   struct netlink_ext_ack *extack)
90 {
91 	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
92 	int ret;
93 
94 	netdev_dbg(ndev, "attr: id %u port: %u\n", attr->id, port->port_id);
95 
96 	switch (attr->id) {
97 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
98 		ret = am65_cpsw_port_attr_br_flags_pre_set(ndev,
99 							   attr->u.brport_flags);
100 		break;
101 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
102 		ret = am65_cpsw_port_stp_state_set(port, attr->u.stp_state);
103 		netdev_dbg(ndev, "stp state: %u\n", attr->u.stp_state);
104 		break;
105 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
106 		ret = am65_cpsw_port_attr_br_flags_set(port, attr->orig_dev,
107 						       attr->u.brport_flags);
108 		break;
109 	default:
110 		ret = -EOPNOTSUPP;
111 		break;
112 	}
113 
114 	return ret;
115 }
116 
117 static u16 am65_cpsw_get_pvid(struct am65_cpsw_port *port)
118 {
119 	struct am65_cpsw_common *cpsw = port->common;
120 	struct am65_cpsw_host *host_p = am65_common_get_host(cpsw);
121 	u32 pvid;
122 
123 	if (port->port_id)
124 		pvid = readl(port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
125 	else
126 		pvid = readl(host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
127 
128 	pvid = pvid & 0xfff;
129 
130 	return pvid;
131 }
132 
133 static void am65_cpsw_set_pvid(struct am65_cpsw_port *port, u16 vid, bool cfi, u32 cos)
134 {
135 	struct am65_cpsw_common *cpsw = port->common;
136 	struct am65_cpsw_host *host_p = am65_common_get_host(cpsw);
137 	u32 pvid;
138 
139 	pvid = vid;
140 	pvid |= cfi ? BIT(12) : 0;
141 	pvid |= (cos & 0x7) << 13;
142 
143 	if (port->port_id)
144 		writel(pvid, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
145 	else
146 		writel(pvid, host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
147 }
148 
149 static int am65_cpsw_port_vlan_add(struct am65_cpsw_port *port, bool untag, bool pvid,
150 				   u16 vid, struct net_device *orig_dev)
151 {
152 	bool cpu_port = netif_is_bridge_master(orig_dev);
153 	struct am65_cpsw_common *cpsw = port->common;
154 	int unreg_mcast_mask = 0;
155 	int reg_mcast_mask = 0;
156 	int untag_mask = 0;
157 	int port_mask;
158 	int ret = 0;
159 	u32 flags;
160 
161 	if (cpu_port) {
162 		port_mask = BIT(HOST_PORT_NUM);
163 		flags = orig_dev->flags;
164 		unreg_mcast_mask = port_mask;
165 	} else {
166 		port_mask = BIT(port->port_id);
167 		flags = port->ndev->flags;
168 	}
169 
170 	if (flags & IFF_MULTICAST)
171 		reg_mcast_mask = port_mask;
172 
173 	if (untag)
174 		untag_mask = port_mask;
175 
176 	ret = cpsw_ale_vlan_add_modify(cpsw->ale, vid, port_mask, untag_mask,
177 				       reg_mcast_mask, unreg_mcast_mask);
178 	if (ret) {
179 		netdev_err(port->ndev, "Unable to add vlan\n");
180 		return ret;
181 	}
182 
183 	if (cpu_port)
184 		cpsw_ale_add_ucast(cpsw->ale, port->slave.mac_addr,
185 				   HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, vid);
186 	if (!pvid)
187 		return ret;
188 
189 	am65_cpsw_set_pvid(port, vid, 0, 0);
190 
191 	netdev_dbg(port->ndev, "VID add: %s: vid:%u ports:%X\n",
192 		   port->ndev->name, vid, port_mask);
193 
194 	return ret;
195 }
196 
197 static int am65_cpsw_port_vlan_del(struct am65_cpsw_port *port, u16 vid,
198 				   struct net_device *orig_dev)
199 {
200 	bool cpu_port = netif_is_bridge_master(orig_dev);
201 	struct am65_cpsw_common *cpsw = port->common;
202 	int port_mask;
203 	int ret = 0;
204 
205 	if (cpu_port)
206 		port_mask = BIT(HOST_PORT_NUM);
207 	else
208 		port_mask = BIT(port->port_id);
209 
210 	ret = cpsw_ale_del_vlan(cpsw->ale, vid, port_mask);
211 	if (ret != 0)
212 		return ret;
213 
214 	/* We don't care for the return value here, error is returned only if
215 	 * the unicast entry is not present
216 	 */
217 	if (cpu_port)
218 		cpsw_ale_del_ucast(cpsw->ale, port->slave.mac_addr,
219 				   HOST_PORT_NUM, ALE_VLAN, vid);
220 
221 	if (vid == am65_cpsw_get_pvid(port))
222 		am65_cpsw_set_pvid(port, 0, 0, 0);
223 
224 	/* We don't care for the return value here, error is returned only if
225 	 * the multicast entry is not present
226 	 */
227 	cpsw_ale_del_mcast(cpsw->ale, port->ndev->broadcast, port_mask,
228 			   ALE_VLAN, vid);
229 	netdev_dbg(port->ndev, "VID del: %s: vid:%u ports:%X\n",
230 		   port->ndev->name, vid, port_mask);
231 
232 	return ret;
233 }
234 
235 static int am65_cpsw_port_vlans_add(struct am65_cpsw_port *port,
236 				    const struct switchdev_obj_port_vlan *vlan)
237 {
238 	bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
239 	struct net_device *orig_dev = vlan->obj.orig_dev;
240 	bool cpu_port = netif_is_bridge_master(orig_dev);
241 	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
242 
243 	netdev_dbg(port->ndev, "VID add: %s: vid:%u flags:%X\n",
244 		   port->ndev->name, vlan->vid, vlan->flags);
245 
246 	if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
247 		return 0;
248 
249 	return am65_cpsw_port_vlan_add(port, untag, pvid, vlan->vid, orig_dev);
250 }
251 
252 static int am65_cpsw_port_vlans_del(struct am65_cpsw_port *port,
253 				    const struct switchdev_obj_port_vlan *vlan)
254 
255 {
256 	return am65_cpsw_port_vlan_del(port, vlan->vid, vlan->obj.orig_dev);
257 }
258 
259 static int am65_cpsw_port_mdb_add(struct am65_cpsw_port *port,
260 				  struct switchdev_obj_port_mdb *mdb)
261 
262 {
263 	struct net_device *orig_dev = mdb->obj.orig_dev;
264 	bool cpu_port = netif_is_bridge_master(orig_dev);
265 	struct am65_cpsw_common *cpsw = port->common;
266 	int port_mask;
267 	int err;
268 
269 	if (cpu_port)
270 		port_mask = BIT(HOST_PORT_NUM);
271 	else
272 		port_mask = BIT(port->port_id);
273 
274 	err = cpsw_ale_add_mcast(cpsw->ale, mdb->addr, port_mask,
275 				 ALE_VLAN, mdb->vid, 0);
276 	netdev_dbg(port->ndev, "MDB add: %s: vid %u:%pM  ports: %X\n",
277 		   port->ndev->name, mdb->vid, mdb->addr, port_mask);
278 
279 	return err;
280 }
281 
282 static int am65_cpsw_port_mdb_del(struct am65_cpsw_port *port,
283 				  struct switchdev_obj_port_mdb *mdb)
284 
285 {
286 	struct net_device *orig_dev = mdb->obj.orig_dev;
287 	bool cpu_port = netif_is_bridge_master(orig_dev);
288 	struct am65_cpsw_common *cpsw = port->common;
289 	int del_mask;
290 
291 	if (cpu_port)
292 		del_mask = BIT(HOST_PORT_NUM);
293 	else
294 		del_mask = BIT(port->port_id);
295 
296 	/* Ignore error as error code is returned only when entry is already removed */
297 	cpsw_ale_del_mcast(cpsw->ale, mdb->addr, del_mask,
298 			   ALE_VLAN, mdb->vid);
299 	netdev_dbg(port->ndev, "MDB del: %s: vid %u:%pM  ports: %X\n",
300 		   port->ndev->name, mdb->vid, mdb->addr, del_mask);
301 
302 	return 0;
303 }
304 
305 static int am65_cpsw_port_obj_add(struct net_device *ndev,
306 				  const struct switchdev_obj *obj,
307 				  struct netlink_ext_ack *extack)
308 {
309 	struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
310 	struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
311 	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
312 	int err = 0;
313 
314 	netdev_dbg(ndev, "obj_add: id %u port: %u\n", obj->id, port->port_id);
315 
316 	switch (obj->id) {
317 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
318 		err = am65_cpsw_port_vlans_add(port, vlan);
319 		break;
320 	case SWITCHDEV_OBJ_ID_PORT_MDB:
321 	case SWITCHDEV_OBJ_ID_HOST_MDB:
322 		err = am65_cpsw_port_mdb_add(port, mdb);
323 		break;
324 	default:
325 		err = -EOPNOTSUPP;
326 		break;
327 	}
328 
329 	return err;
330 }
331 
332 static int am65_cpsw_port_obj_del(struct net_device *ndev,
333 				  const struct switchdev_obj *obj)
334 {
335 	struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
336 	struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
337 	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
338 	int err = 0;
339 
340 	netdev_dbg(ndev, "obj_del: id %u port: %u\n", obj->id, port->port_id);
341 
342 	switch (obj->id) {
343 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
344 		err = am65_cpsw_port_vlans_del(port, vlan);
345 		break;
346 	case SWITCHDEV_OBJ_ID_PORT_MDB:
347 	case SWITCHDEV_OBJ_ID_HOST_MDB:
348 		err = am65_cpsw_port_mdb_del(port, mdb);
349 		break;
350 	default:
351 		err = -EOPNOTSUPP;
352 		break;
353 	}
354 
355 	return err;
356 }
357 
358 static void am65_cpsw_fdb_offload_notify(struct net_device *ndev,
359 					 struct switchdev_notifier_fdb_info *rcv)
360 {
361 	struct switchdev_notifier_fdb_info info;
362 
363 	info.addr = rcv->addr;
364 	info.vid = rcv->vid;
365 	info.offloaded = true;
366 	call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
367 				 ndev, &info.info, NULL);
368 }
369 
370 static void am65_cpsw_switchdev_event_work(struct work_struct *work)
371 {
372 	struct am65_cpsw_switchdev_event_work *switchdev_work =
373 		container_of(work, struct am65_cpsw_switchdev_event_work, work);
374 	struct am65_cpsw_port *port = switchdev_work->port;
375 	struct switchdev_notifier_fdb_info *fdb;
376 	struct am65_cpsw_common *cpsw = port->common;
377 	int port_id = port->port_id;
378 
379 	rtnl_lock();
380 	switch (switchdev_work->event) {
381 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
382 		fdb = &switchdev_work->fdb_info;
383 
384 		netdev_dbg(port->ndev, "cpsw_fdb_add: MACID = %pM vid = %u flags = %u %u -- port %d\n",
385 			   fdb->addr, fdb->vid, fdb->added_by_user,
386 			   fdb->offloaded, port_id);
387 
388 		if (!fdb->added_by_user)
389 			break;
390 		if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
391 			port_id = HOST_PORT_NUM;
392 
393 		cpsw_ale_add_ucast(cpsw->ale, (u8 *)fdb->addr, port_id,
394 				   fdb->vid ? ALE_VLAN : 0, fdb->vid);
395 		am65_cpsw_fdb_offload_notify(port->ndev, fdb);
396 		break;
397 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
398 		fdb = &switchdev_work->fdb_info;
399 
400 		netdev_dbg(port->ndev, "cpsw_fdb_del: MACID = %pM vid = %u flags = %u %u -- port %d\n",
401 			   fdb->addr, fdb->vid, fdb->added_by_user,
402 			   fdb->offloaded, port_id);
403 
404 		if (!fdb->added_by_user)
405 			break;
406 		if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
407 			port_id = HOST_PORT_NUM;
408 
409 		cpsw_ale_del_ucast(cpsw->ale, (u8 *)fdb->addr, port_id,
410 				   fdb->vid ? ALE_VLAN : 0, fdb->vid);
411 		break;
412 	default:
413 		break;
414 	}
415 	rtnl_unlock();
416 
417 	kfree(switchdev_work->fdb_info.addr);
418 	kfree(switchdev_work);
419 	dev_put(port->ndev);
420 }
421 
422 /* called under rcu_read_lock() */
423 static int am65_cpsw_switchdev_event(struct notifier_block *unused,
424 				     unsigned long event, void *ptr)
425 {
426 	struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
427 	struct am65_cpsw_switchdev_event_work *switchdev_work;
428 	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
429 	struct switchdev_notifier_fdb_info *fdb_info = ptr;
430 	int err;
431 
432 	if (event == SWITCHDEV_PORT_ATTR_SET) {
433 		err = switchdev_handle_port_attr_set(ndev, ptr,
434 						     am65_cpsw_port_dev_check,
435 						     am65_cpsw_port_attr_set);
436 		return notifier_from_errno(err);
437 	}
438 
439 	if (!am65_cpsw_port_dev_check(ndev))
440 		return NOTIFY_DONE;
441 
442 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
443 	if (WARN_ON(!switchdev_work))
444 		return NOTIFY_BAD;
445 
446 	INIT_WORK(&switchdev_work->work, am65_cpsw_switchdev_event_work);
447 	switchdev_work->port = port;
448 	switchdev_work->event = event;
449 
450 	switch (event) {
451 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
452 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
453 		memcpy(&switchdev_work->fdb_info, ptr,
454 		       sizeof(switchdev_work->fdb_info));
455 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
456 		if (!switchdev_work->fdb_info.addr)
457 			goto err_addr_alloc;
458 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
459 				fdb_info->addr);
460 		dev_hold(ndev);
461 		break;
462 	default:
463 		kfree(switchdev_work);
464 		return NOTIFY_DONE;
465 	}
466 
467 	queue_work(system_long_wq, &switchdev_work->work);
468 
469 	return NOTIFY_DONE;
470 
471 err_addr_alloc:
472 	kfree(switchdev_work);
473 	return NOTIFY_BAD;
474 }
475 
476 static struct notifier_block cpsw_switchdev_notifier = {
477 	.notifier_call = am65_cpsw_switchdev_event,
478 };
479 
480 static int am65_cpsw_switchdev_blocking_event(struct notifier_block *unused,
481 					      unsigned long event, void *ptr)
482 {
483 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
484 	int err;
485 
486 	switch (event) {
487 	case SWITCHDEV_PORT_OBJ_ADD:
488 		err = switchdev_handle_port_obj_add(dev, ptr,
489 						    am65_cpsw_port_dev_check,
490 						    am65_cpsw_port_obj_add);
491 		return notifier_from_errno(err);
492 	case SWITCHDEV_PORT_OBJ_DEL:
493 		err = switchdev_handle_port_obj_del(dev, ptr,
494 						    am65_cpsw_port_dev_check,
495 						    am65_cpsw_port_obj_del);
496 		return notifier_from_errno(err);
497 	case SWITCHDEV_PORT_ATTR_SET:
498 		err = switchdev_handle_port_attr_set(dev, ptr,
499 						     am65_cpsw_port_dev_check,
500 						     am65_cpsw_port_attr_set);
501 		return notifier_from_errno(err);
502 	default:
503 		break;
504 	}
505 
506 	return NOTIFY_DONE;
507 }
508 
509 static struct notifier_block cpsw_switchdev_bl_notifier = {
510 	.notifier_call = am65_cpsw_switchdev_blocking_event,
511 };
512 
513 int am65_cpsw_switchdev_register_notifiers(struct am65_cpsw_common *cpsw)
514 {
515 	int ret = 0;
516 
517 	ret = register_switchdev_notifier(&cpsw_switchdev_notifier);
518 	if (ret) {
519 		dev_err(cpsw->dev, "register switchdev notifier fail ret:%d\n",
520 			ret);
521 		return ret;
522 	}
523 
524 	ret = register_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
525 	if (ret) {
526 		dev_err(cpsw->dev, "register switchdev blocking notifier ret:%d\n",
527 			ret);
528 		unregister_switchdev_notifier(&cpsw_switchdev_notifier);
529 	}
530 
531 	return ret;
532 }
533 
534 void am65_cpsw_switchdev_unregister_notifiers(struct am65_cpsw_common *cpsw)
535 {
536 	unregister_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
537 	unregister_switchdev_notifier(&cpsw_switchdev_notifier);
538 }
539