1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Texas Instruments K3 AM65 Ethernet Switchdev Driver
3  *
4  * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com/
5  *
6  */
7 
8 #include <linux/etherdevice.h>
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/workqueue.h>
12 #include <net/switchdev.h>
13 
14 #include "am65-cpsw-nuss.h"
15 #include "am65-cpsw-switchdev.h"
16 #include "cpsw_ale.h"
17 
18 struct am65_cpsw_switchdev_event_work {
19 	struct work_struct work;
20 	struct switchdev_notifier_fdb_info fdb_info;
21 	struct am65_cpsw_port *port;
22 	unsigned long event;
23 };
24 
am65_cpsw_port_stp_state_set(struct am65_cpsw_port * port,u8 state)25 static int am65_cpsw_port_stp_state_set(struct am65_cpsw_port *port, u8 state)
26 {
27 	struct am65_cpsw_common *cpsw = port->common;
28 	u8 cpsw_state;
29 	int ret = 0;
30 
31 	switch (state) {
32 	case BR_STATE_FORWARDING:
33 		cpsw_state = ALE_PORT_STATE_FORWARD;
34 		break;
35 	case BR_STATE_LEARNING:
36 		cpsw_state = ALE_PORT_STATE_LEARN;
37 		break;
38 	case BR_STATE_DISABLED:
39 		cpsw_state = ALE_PORT_STATE_DISABLE;
40 		break;
41 	case BR_STATE_LISTENING:
42 	case BR_STATE_BLOCKING:
43 		cpsw_state = ALE_PORT_STATE_BLOCK;
44 		break;
45 	default:
46 		return -EOPNOTSUPP;
47 	}
48 
49 	ret = cpsw_ale_control_set(cpsw->ale, port->port_id,
50 				   ALE_PORT_STATE, cpsw_state);
51 	netdev_dbg(port->ndev, "ale state: %u\n", cpsw_state);
52 
53 	return ret;
54 }
55 
am65_cpsw_port_attr_br_flags_set(struct am65_cpsw_port * port,struct net_device * orig_dev,struct switchdev_brport_flags flags)56 static int am65_cpsw_port_attr_br_flags_set(struct am65_cpsw_port *port,
57 					    struct net_device *orig_dev,
58 					    struct switchdev_brport_flags flags)
59 {
60 	struct am65_cpsw_common *cpsw = port->common;
61 
62 	if (flags.mask & BR_MCAST_FLOOD) {
63 		bool unreg_mcast_add = false;
64 
65 		if (flags.val & BR_MCAST_FLOOD)
66 			unreg_mcast_add = true;
67 
68 		netdev_dbg(port->ndev, "BR_MCAST_FLOOD: %d port %u\n",
69 			   unreg_mcast_add, port->port_id);
70 
71 		cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(port->port_id),
72 					 unreg_mcast_add);
73 	}
74 
75 	return 0;
76 }
77 
am65_cpsw_port_attr_br_flags_pre_set(struct net_device * netdev,struct switchdev_brport_flags flags)78 static int am65_cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
79 						struct switchdev_brport_flags flags)
80 {
81 	if (flags.mask & ~(BR_LEARNING | BR_MCAST_FLOOD))
82 		return -EINVAL;
83 
84 	return 0;
85 }
86 
am65_cpsw_port_attr_set(struct net_device * ndev,const void * ctx,const struct switchdev_attr * attr,struct netlink_ext_ack * extack)87 static int am65_cpsw_port_attr_set(struct net_device *ndev, const void *ctx,
88 				   const struct switchdev_attr *attr,
89 				   struct netlink_ext_ack *extack)
90 {
91 	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
92 	int ret;
93 
94 	netdev_dbg(ndev, "attr: id %u port: %u\n", attr->id, port->port_id);
95 
96 	switch (attr->id) {
97 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
98 		ret = am65_cpsw_port_attr_br_flags_pre_set(ndev,
99 							   attr->u.brport_flags);
100 		break;
101 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
102 		ret = am65_cpsw_port_stp_state_set(port, attr->u.stp_state);
103 		netdev_dbg(ndev, "stp state: %u\n", attr->u.stp_state);
104 		break;
105 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
106 		ret = am65_cpsw_port_attr_br_flags_set(port, attr->orig_dev,
107 						       attr->u.brport_flags);
108 		break;
109 	default:
110 		ret = -EOPNOTSUPP;
111 		break;
112 	}
113 
114 	return ret;
115 }
116 
am65_cpsw_get_pvid(struct am65_cpsw_port * port)117 static u16 am65_cpsw_get_pvid(struct am65_cpsw_port *port)
118 {
119 	struct am65_cpsw_common *cpsw = port->common;
120 	struct am65_cpsw_host *host_p = am65_common_get_host(cpsw);
121 	u32 pvid;
122 
123 	if (port->port_id)
124 		pvid = readl(port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
125 	else
126 		pvid = readl(host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
127 
128 	pvid = pvid & 0xfff;
129 
130 	return pvid;
131 }
132 
am65_cpsw_set_pvid(struct am65_cpsw_port * port,u16 vid,bool cfi,u32 cos)133 static void am65_cpsw_set_pvid(struct am65_cpsw_port *port, u16 vid, bool cfi, u32 cos)
134 {
135 	struct am65_cpsw_common *cpsw = port->common;
136 	struct am65_cpsw_host *host_p = am65_common_get_host(cpsw);
137 	u32 pvid;
138 
139 	pvid = vid;
140 	pvid |= cfi ? BIT(12) : 0;
141 	pvid |= (cos & 0x7) << 13;
142 
143 	if (port->port_id)
144 		writel(pvid, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
145 	else
146 		writel(pvid, host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
147 }
148 
am65_cpsw_port_vlan_add(struct am65_cpsw_port * port,bool untag,bool pvid,u16 vid,struct net_device * orig_dev)149 static int am65_cpsw_port_vlan_add(struct am65_cpsw_port *port, bool untag, bool pvid,
150 				   u16 vid, struct net_device *orig_dev)
151 {
152 	bool cpu_port = netif_is_bridge_master(orig_dev);
153 	struct am65_cpsw_common *cpsw = port->common;
154 	int unreg_mcast_mask = 0;
155 	int reg_mcast_mask = 0;
156 	int untag_mask = 0;
157 	int port_mask;
158 	int ret = 0;
159 	u32 flags;
160 
161 	if (cpu_port) {
162 		port_mask = BIT(HOST_PORT_NUM);
163 		flags = orig_dev->flags;
164 		unreg_mcast_mask = port_mask;
165 	} else {
166 		port_mask = BIT(port->port_id);
167 		flags = port->ndev->flags;
168 	}
169 
170 	if (flags & IFF_MULTICAST)
171 		reg_mcast_mask = port_mask;
172 
173 	if (untag)
174 		untag_mask = port_mask;
175 
176 	ret = cpsw_ale_vlan_add_modify(cpsw->ale, vid, port_mask, untag_mask,
177 				       reg_mcast_mask, unreg_mcast_mask);
178 	if (ret) {
179 		netdev_err(port->ndev, "Unable to add vlan\n");
180 		return ret;
181 	}
182 
183 	if (cpu_port)
184 		cpsw_ale_add_ucast(cpsw->ale, port->slave.mac_addr,
185 				   HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, vid);
186 	if (!pvid)
187 		return ret;
188 
189 	am65_cpsw_set_pvid(port, vid, 0, 0);
190 
191 	netdev_dbg(port->ndev, "VID add: %s: vid:%u ports:%X\n",
192 		   port->ndev->name, vid, port_mask);
193 
194 	return ret;
195 }
196 
am65_cpsw_port_vlan_del(struct am65_cpsw_port * port,u16 vid,struct net_device * orig_dev)197 static int am65_cpsw_port_vlan_del(struct am65_cpsw_port *port, u16 vid,
198 				   struct net_device *orig_dev)
199 {
200 	bool cpu_port = netif_is_bridge_master(orig_dev);
201 	struct am65_cpsw_common *cpsw = port->common;
202 	int port_mask;
203 	int ret = 0;
204 
205 	if (cpu_port)
206 		port_mask = BIT(HOST_PORT_NUM);
207 	else
208 		port_mask = BIT(port->port_id);
209 
210 	ret = cpsw_ale_del_vlan(cpsw->ale, vid, port_mask);
211 	if (ret != 0)
212 		return ret;
213 
214 	/* We don't care for the return value here, error is returned only if
215 	 * the unicast entry is not present
216 	 */
217 	if (cpu_port)
218 		cpsw_ale_del_ucast(cpsw->ale, port->slave.mac_addr,
219 				   HOST_PORT_NUM, ALE_VLAN, vid);
220 
221 	if (vid == am65_cpsw_get_pvid(port))
222 		am65_cpsw_set_pvid(port, 0, 0, 0);
223 
224 	/* We don't care for the return value here, error is returned only if
225 	 * the multicast entry is not present
226 	 */
227 	cpsw_ale_del_mcast(cpsw->ale, port->ndev->broadcast, port_mask,
228 			   ALE_VLAN, vid);
229 	netdev_dbg(port->ndev, "VID del: %s: vid:%u ports:%X\n",
230 		   port->ndev->name, vid, port_mask);
231 
232 	return ret;
233 }
234 
am65_cpsw_port_vlans_add(struct am65_cpsw_port * port,const struct switchdev_obj_port_vlan * vlan)235 static int am65_cpsw_port_vlans_add(struct am65_cpsw_port *port,
236 				    const struct switchdev_obj_port_vlan *vlan)
237 {
238 	bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
239 	struct net_device *orig_dev = vlan->obj.orig_dev;
240 	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
241 
242 	netdev_dbg(port->ndev, "VID add: %s: vid:%u flags:%X\n",
243 		   port->ndev->name, vlan->vid, vlan->flags);
244 
245 	return am65_cpsw_port_vlan_add(port, untag, pvid, vlan->vid, orig_dev);
246 }
247 
am65_cpsw_port_vlans_del(struct am65_cpsw_port * port,const struct switchdev_obj_port_vlan * vlan)248 static int am65_cpsw_port_vlans_del(struct am65_cpsw_port *port,
249 				    const struct switchdev_obj_port_vlan *vlan)
250 
251 {
252 	return am65_cpsw_port_vlan_del(port, vlan->vid, vlan->obj.orig_dev);
253 }
254 
am65_cpsw_port_mdb_add(struct am65_cpsw_port * port,struct switchdev_obj_port_mdb * mdb)255 static int am65_cpsw_port_mdb_add(struct am65_cpsw_port *port,
256 				  struct switchdev_obj_port_mdb *mdb)
257 
258 {
259 	struct net_device *orig_dev = mdb->obj.orig_dev;
260 	bool cpu_port = netif_is_bridge_master(orig_dev);
261 	struct am65_cpsw_common *cpsw = port->common;
262 	int port_mask;
263 	int err;
264 
265 	if (cpu_port)
266 		port_mask = BIT(HOST_PORT_NUM);
267 	else
268 		port_mask = BIT(port->port_id);
269 
270 	err = cpsw_ale_add_mcast(cpsw->ale, mdb->addr, port_mask,
271 				 ALE_VLAN, mdb->vid, 0);
272 	netdev_dbg(port->ndev, "MDB add: %s: vid %u:%pM  ports: %X\n",
273 		   port->ndev->name, mdb->vid, mdb->addr, port_mask);
274 
275 	return err;
276 }
277 
am65_cpsw_port_mdb_del(struct am65_cpsw_port * port,struct switchdev_obj_port_mdb * mdb)278 static int am65_cpsw_port_mdb_del(struct am65_cpsw_port *port,
279 				  struct switchdev_obj_port_mdb *mdb)
280 
281 {
282 	struct net_device *orig_dev = mdb->obj.orig_dev;
283 	bool cpu_port = netif_is_bridge_master(orig_dev);
284 	struct am65_cpsw_common *cpsw = port->common;
285 	int del_mask;
286 
287 	if (cpu_port)
288 		del_mask = BIT(HOST_PORT_NUM);
289 	else
290 		del_mask = BIT(port->port_id);
291 
292 	/* Ignore error as error code is returned only when entry is already removed */
293 	cpsw_ale_del_mcast(cpsw->ale, mdb->addr, del_mask,
294 			   ALE_VLAN, mdb->vid);
295 	netdev_dbg(port->ndev, "MDB del: %s: vid %u:%pM  ports: %X\n",
296 		   port->ndev->name, mdb->vid, mdb->addr, del_mask);
297 
298 	return 0;
299 }
300 
am65_cpsw_port_obj_add(struct net_device * ndev,const void * ctx,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)301 static int am65_cpsw_port_obj_add(struct net_device *ndev, const void *ctx,
302 				  const struct switchdev_obj *obj,
303 				  struct netlink_ext_ack *extack)
304 {
305 	struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
306 	struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
307 	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
308 	int err = 0;
309 
310 	netdev_dbg(ndev, "obj_add: id %u port: %u\n", obj->id, port->port_id);
311 
312 	switch (obj->id) {
313 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
314 		err = am65_cpsw_port_vlans_add(port, vlan);
315 		break;
316 	case SWITCHDEV_OBJ_ID_PORT_MDB:
317 	case SWITCHDEV_OBJ_ID_HOST_MDB:
318 		err = am65_cpsw_port_mdb_add(port, mdb);
319 		break;
320 	default:
321 		err = -EOPNOTSUPP;
322 		break;
323 	}
324 
325 	return err;
326 }
327 
am65_cpsw_port_obj_del(struct net_device * ndev,const void * ctx,const struct switchdev_obj * obj)328 static int am65_cpsw_port_obj_del(struct net_device *ndev, const void *ctx,
329 				  const struct switchdev_obj *obj)
330 {
331 	struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
332 	struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
333 	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
334 	int err = 0;
335 
336 	netdev_dbg(ndev, "obj_del: id %u port: %u\n", obj->id, port->port_id);
337 
338 	switch (obj->id) {
339 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
340 		err = am65_cpsw_port_vlans_del(port, vlan);
341 		break;
342 	case SWITCHDEV_OBJ_ID_PORT_MDB:
343 	case SWITCHDEV_OBJ_ID_HOST_MDB:
344 		err = am65_cpsw_port_mdb_del(port, mdb);
345 		break;
346 	default:
347 		err = -EOPNOTSUPP;
348 		break;
349 	}
350 
351 	return err;
352 }
353 
am65_cpsw_fdb_offload_notify(struct net_device * ndev,struct switchdev_notifier_fdb_info * rcv)354 static void am65_cpsw_fdb_offload_notify(struct net_device *ndev,
355 					 struct switchdev_notifier_fdb_info *rcv)
356 {
357 	struct switchdev_notifier_fdb_info info = {};
358 
359 	info.addr = rcv->addr;
360 	info.vid = rcv->vid;
361 	info.offloaded = true;
362 	call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
363 				 ndev, &info.info, NULL);
364 }
365 
am65_cpsw_switchdev_event_work(struct work_struct * work)366 static void am65_cpsw_switchdev_event_work(struct work_struct *work)
367 {
368 	struct am65_cpsw_switchdev_event_work *switchdev_work =
369 		container_of(work, struct am65_cpsw_switchdev_event_work, work);
370 	struct am65_cpsw_port *port = switchdev_work->port;
371 	struct switchdev_notifier_fdb_info *fdb;
372 	struct am65_cpsw_common *cpsw = port->common;
373 	int port_id = port->port_id;
374 
375 	rtnl_lock();
376 	switch (switchdev_work->event) {
377 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
378 		fdb = &switchdev_work->fdb_info;
379 
380 		netdev_dbg(port->ndev, "cpsw_fdb_add: MACID = %pM vid = %u flags = %u %u -- port %d\n",
381 			   fdb->addr, fdb->vid, fdb->added_by_user,
382 			   fdb->offloaded, port_id);
383 
384 		if (!fdb->added_by_user || fdb->is_local)
385 			break;
386 		if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
387 			port_id = HOST_PORT_NUM;
388 
389 		cpsw_ale_add_ucast(cpsw->ale, (u8 *)fdb->addr, port_id,
390 				   fdb->vid ? ALE_VLAN : 0, fdb->vid);
391 		am65_cpsw_fdb_offload_notify(port->ndev, fdb);
392 		break;
393 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
394 		fdb = &switchdev_work->fdb_info;
395 
396 		netdev_dbg(port->ndev, "cpsw_fdb_del: MACID = %pM vid = %u flags = %u %u -- port %d\n",
397 			   fdb->addr, fdb->vid, fdb->added_by_user,
398 			   fdb->offloaded, port_id);
399 
400 		if (!fdb->added_by_user || fdb->is_local)
401 			break;
402 		if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
403 			port_id = HOST_PORT_NUM;
404 
405 		cpsw_ale_del_ucast(cpsw->ale, (u8 *)fdb->addr, port_id,
406 				   fdb->vid ? ALE_VLAN : 0, fdb->vid);
407 		break;
408 	default:
409 		break;
410 	}
411 	rtnl_unlock();
412 
413 	kfree(switchdev_work->fdb_info.addr);
414 	kfree(switchdev_work);
415 	dev_put(port->ndev);
416 }
417 
418 /* called under rcu_read_lock() */
am65_cpsw_switchdev_event(struct notifier_block * unused,unsigned long event,void * ptr)419 static int am65_cpsw_switchdev_event(struct notifier_block *unused,
420 				     unsigned long event, void *ptr)
421 {
422 	struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
423 	struct am65_cpsw_switchdev_event_work *switchdev_work;
424 	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
425 	struct switchdev_notifier_fdb_info *fdb_info = ptr;
426 	int err;
427 
428 	if (event == SWITCHDEV_PORT_ATTR_SET) {
429 		err = switchdev_handle_port_attr_set(ndev, ptr,
430 						     am65_cpsw_port_dev_check,
431 						     am65_cpsw_port_attr_set);
432 		return notifier_from_errno(err);
433 	}
434 
435 	if (!am65_cpsw_port_dev_check(ndev))
436 		return NOTIFY_DONE;
437 
438 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
439 	if (WARN_ON(!switchdev_work))
440 		return NOTIFY_BAD;
441 
442 	INIT_WORK(&switchdev_work->work, am65_cpsw_switchdev_event_work);
443 	switchdev_work->port = port;
444 	switchdev_work->event = event;
445 
446 	switch (event) {
447 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
448 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
449 		memcpy(&switchdev_work->fdb_info, ptr,
450 		       sizeof(switchdev_work->fdb_info));
451 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
452 		if (!switchdev_work->fdb_info.addr)
453 			goto err_addr_alloc;
454 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
455 				fdb_info->addr);
456 		dev_hold(ndev);
457 		break;
458 	default:
459 		kfree(switchdev_work);
460 		return NOTIFY_DONE;
461 	}
462 
463 	queue_work(system_long_wq, &switchdev_work->work);
464 
465 	return NOTIFY_DONE;
466 
467 err_addr_alloc:
468 	kfree(switchdev_work);
469 	return NOTIFY_BAD;
470 }
471 
472 static struct notifier_block cpsw_switchdev_notifier = {
473 	.notifier_call = am65_cpsw_switchdev_event,
474 };
475 
am65_cpsw_switchdev_blocking_event(struct notifier_block * unused,unsigned long event,void * ptr)476 static int am65_cpsw_switchdev_blocking_event(struct notifier_block *unused,
477 					      unsigned long event, void *ptr)
478 {
479 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
480 	int err;
481 
482 	switch (event) {
483 	case SWITCHDEV_PORT_OBJ_ADD:
484 		err = switchdev_handle_port_obj_add(dev, ptr,
485 						    am65_cpsw_port_dev_check,
486 						    am65_cpsw_port_obj_add);
487 		return notifier_from_errno(err);
488 	case SWITCHDEV_PORT_OBJ_DEL:
489 		err = switchdev_handle_port_obj_del(dev, ptr,
490 						    am65_cpsw_port_dev_check,
491 						    am65_cpsw_port_obj_del);
492 		return notifier_from_errno(err);
493 	case SWITCHDEV_PORT_ATTR_SET:
494 		err = switchdev_handle_port_attr_set(dev, ptr,
495 						     am65_cpsw_port_dev_check,
496 						     am65_cpsw_port_attr_set);
497 		return notifier_from_errno(err);
498 	default:
499 		break;
500 	}
501 
502 	return NOTIFY_DONE;
503 }
504 
505 static struct notifier_block cpsw_switchdev_bl_notifier = {
506 	.notifier_call = am65_cpsw_switchdev_blocking_event,
507 };
508 
am65_cpsw_switchdev_register_notifiers(struct am65_cpsw_common * cpsw)509 int am65_cpsw_switchdev_register_notifiers(struct am65_cpsw_common *cpsw)
510 {
511 	int ret = 0;
512 
513 	ret = register_switchdev_notifier(&cpsw_switchdev_notifier);
514 	if (ret) {
515 		dev_err(cpsw->dev, "register switchdev notifier fail ret:%d\n",
516 			ret);
517 		return ret;
518 	}
519 
520 	ret = register_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
521 	if (ret) {
522 		dev_err(cpsw->dev, "register switchdev blocking notifier ret:%d\n",
523 			ret);
524 		unregister_switchdev_notifier(&cpsw_switchdev_notifier);
525 	}
526 
527 	return ret;
528 }
529 
am65_cpsw_switchdev_unregister_notifiers(struct am65_cpsw_common * cpsw)530 void am65_cpsw_switchdev_unregister_notifiers(struct am65_cpsw_common *cpsw)
531 {
532 	unregister_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
533 	unregister_switchdev_notifier(&cpsw_switchdev_notifier);
534 }
535