xref: /openbmc/linux/net/dsa/dsa.c (revision 09f92341681a23346c456938bcb2670de2cd99d4)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/dsa/dsa.c - Hardware switch handling
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
6  */
7 
8 #include <linux/device.h>
9 #include <linux/list.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/sysfs.h>
13 #include <linux/ptp_classify.h>
14 #include <net/dst_metadata.h>
15 
16 #include "dsa_priv.h"
17 #include "slave.h"
18 
19 static LIST_HEAD(dsa_tag_drivers_list);
20 static DEFINE_MUTEX(dsa_tag_drivers_lock);
21 
22 static void dsa_tag_driver_register(struct dsa_tag_driver *dsa_tag_driver,
23 				    struct module *owner)
24 {
25 	dsa_tag_driver->owner = owner;
26 
27 	mutex_lock(&dsa_tag_drivers_lock);
28 	list_add_tail(&dsa_tag_driver->list, &dsa_tag_drivers_list);
29 	mutex_unlock(&dsa_tag_drivers_lock);
30 }
31 
32 void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
33 			      unsigned int count, struct module *owner)
34 {
35 	unsigned int i;
36 
37 	for (i = 0; i < count; i++)
38 		dsa_tag_driver_register(dsa_tag_driver_array[i], owner);
39 }
40 
41 static void dsa_tag_driver_unregister(struct dsa_tag_driver *dsa_tag_driver)
42 {
43 	mutex_lock(&dsa_tag_drivers_lock);
44 	list_del(&dsa_tag_driver->list);
45 	mutex_unlock(&dsa_tag_drivers_lock);
46 }
47 EXPORT_SYMBOL_GPL(dsa_tag_drivers_register);
48 
49 void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
50 				unsigned int count)
51 {
52 	unsigned int i;
53 
54 	for (i = 0; i < count; i++)
55 		dsa_tag_driver_unregister(dsa_tag_driver_array[i]);
56 }
57 EXPORT_SYMBOL_GPL(dsa_tag_drivers_unregister);
58 
59 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops)
60 {
61 	return ops->name;
62 };
63 
64 /* Function takes a reference on the module owning the tagger,
65  * so dsa_tag_driver_put must be called afterwards.
66  */
67 const struct dsa_device_ops *dsa_tag_driver_get_by_name(const char *name)
68 {
69 	const struct dsa_device_ops *ops = ERR_PTR(-ENOPROTOOPT);
70 	struct dsa_tag_driver *dsa_tag_driver;
71 
72 	request_module("%s%s", DSA_TAG_DRIVER_ALIAS, name);
73 
74 	mutex_lock(&dsa_tag_drivers_lock);
75 	list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
76 		const struct dsa_device_ops *tmp = dsa_tag_driver->ops;
77 
78 		if (strcmp(name, tmp->name))
79 			continue;
80 
81 		if (!try_module_get(dsa_tag_driver->owner))
82 			break;
83 
84 		ops = tmp;
85 		break;
86 	}
87 	mutex_unlock(&dsa_tag_drivers_lock);
88 
89 	return ops;
90 }
91 
92 const struct dsa_device_ops *dsa_tag_driver_get_by_id(int tag_protocol)
93 {
94 	struct dsa_tag_driver *dsa_tag_driver;
95 	const struct dsa_device_ops *ops;
96 	bool found = false;
97 
98 	request_module("%sid-%d", DSA_TAG_DRIVER_ALIAS, tag_protocol);
99 
100 	mutex_lock(&dsa_tag_drivers_lock);
101 	list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
102 		ops = dsa_tag_driver->ops;
103 		if (ops->proto == tag_protocol) {
104 			found = true;
105 			break;
106 		}
107 	}
108 
109 	if (found) {
110 		if (!try_module_get(dsa_tag_driver->owner))
111 			ops = ERR_PTR(-ENOPROTOOPT);
112 	} else {
113 		ops = ERR_PTR(-ENOPROTOOPT);
114 	}
115 
116 	mutex_unlock(&dsa_tag_drivers_lock);
117 
118 	return ops;
119 }
120 
121 void dsa_tag_driver_put(const struct dsa_device_ops *ops)
122 {
123 	struct dsa_tag_driver *dsa_tag_driver;
124 
125 	mutex_lock(&dsa_tag_drivers_lock);
126 	list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
127 		if (dsa_tag_driver->ops == ops) {
128 			module_put(dsa_tag_driver->owner);
129 			break;
130 		}
131 	}
132 	mutex_unlock(&dsa_tag_drivers_lock);
133 }
134 
135 static int dev_is_class(struct device *dev, void *class)
136 {
137 	if (dev->class != NULL && !strcmp(dev->class->name, class))
138 		return 1;
139 
140 	return 0;
141 }
142 
143 static struct device *dev_find_class(struct device *parent, char *class)
144 {
145 	if (dev_is_class(parent, class)) {
146 		get_device(parent);
147 		return parent;
148 	}
149 
150 	return device_find_child(parent, class, dev_is_class);
151 }
152 
153 struct net_device *dsa_dev_to_net_device(struct device *dev)
154 {
155 	struct device *d;
156 
157 	d = dev_find_class(dev, "net");
158 	if (d != NULL) {
159 		struct net_device *nd;
160 
161 		nd = to_net_dev(d);
162 		dev_hold(nd);
163 		put_device(d);
164 
165 		return nd;
166 	}
167 
168 	return NULL;
169 }
170 
171 /* Determine if we should defer delivery of skb until we have a rx timestamp.
172  *
173  * Called from dsa_switch_rcv. For now, this will only work if tagging is
174  * enabled on the switch. Normally the MAC driver would retrieve the hardware
175  * timestamp when it reads the packet out of the hardware. However in a DSA
176  * switch, the DSA driver owning the interface to which the packet is
177  * delivered is never notified unless we do so here.
178  */
179 static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p,
180 				       struct sk_buff *skb)
181 {
182 	struct dsa_switch *ds = p->dp->ds;
183 	unsigned int type;
184 
185 	if (skb_headroom(skb) < ETH_HLEN)
186 		return false;
187 
188 	__skb_push(skb, ETH_HLEN);
189 
190 	type = ptp_classify_raw(skb);
191 
192 	__skb_pull(skb, ETH_HLEN);
193 
194 	if (type == PTP_CLASS_NONE)
195 		return false;
196 
197 	if (likely(ds->ops->port_rxtstamp))
198 		return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type);
199 
200 	return false;
201 }
202 
203 static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
204 			  struct packet_type *pt, struct net_device *unused)
205 {
206 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
207 	struct dsa_port *cpu_dp = dev->dsa_ptr;
208 	struct sk_buff *nskb = NULL;
209 	struct dsa_slave_priv *p;
210 
211 	if (unlikely(!cpu_dp)) {
212 		kfree_skb(skb);
213 		return 0;
214 	}
215 
216 	skb = skb_unshare(skb, GFP_ATOMIC);
217 	if (!skb)
218 		return 0;
219 
220 	if (md_dst && md_dst->type == METADATA_HW_PORT_MUX) {
221 		unsigned int port = md_dst->u.port_info.port_id;
222 
223 		skb_dst_drop(skb);
224 		if (!skb_has_extensions(skb))
225 			skb->slow_gro = 0;
226 
227 		skb->dev = dsa_master_find_slave(dev, 0, port);
228 		if (likely(skb->dev)) {
229 			dsa_default_offload_fwd_mark(skb);
230 			nskb = skb;
231 		}
232 	} else {
233 		nskb = cpu_dp->rcv(skb, dev);
234 	}
235 
236 	if (!nskb) {
237 		kfree_skb(skb);
238 		return 0;
239 	}
240 
241 	skb = nskb;
242 	skb_push(skb, ETH_HLEN);
243 	skb->pkt_type = PACKET_HOST;
244 	skb->protocol = eth_type_trans(skb, skb->dev);
245 
246 	if (unlikely(!dsa_slave_dev_check(skb->dev))) {
247 		/* Packet is to be injected directly on an upper
248 		 * device, e.g. a team/bond, so skip all DSA-port
249 		 * specific actions.
250 		 */
251 		netif_rx(skb);
252 		return 0;
253 	}
254 
255 	p = netdev_priv(skb->dev);
256 
257 	if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
258 		nskb = dsa_untag_bridge_pvid(skb);
259 		if (!nskb) {
260 			kfree_skb(skb);
261 			return 0;
262 		}
263 		skb = nskb;
264 	}
265 
266 	dev_sw_netstats_rx_add(skb->dev, skb->len);
267 
268 	if (dsa_skb_defer_rx_timestamp(p, skb))
269 		return 0;
270 
271 	gro_cells_receive(&p->gcells, skb);
272 
273 	return 0;
274 }
275 
276 #ifdef CONFIG_PM_SLEEP
277 static bool dsa_port_is_initialized(const struct dsa_port *dp)
278 {
279 	return dp->type == DSA_PORT_TYPE_USER && dp->slave;
280 }
281 
282 int dsa_switch_suspend(struct dsa_switch *ds)
283 {
284 	struct dsa_port *dp;
285 	int ret = 0;
286 
287 	/* Suspend slave network devices */
288 	dsa_switch_for_each_port(dp, ds) {
289 		if (!dsa_port_is_initialized(dp))
290 			continue;
291 
292 		ret = dsa_slave_suspend(dp->slave);
293 		if (ret)
294 			return ret;
295 	}
296 
297 	if (ds->ops->suspend)
298 		ret = ds->ops->suspend(ds);
299 
300 	return ret;
301 }
302 EXPORT_SYMBOL_GPL(dsa_switch_suspend);
303 
304 int dsa_switch_resume(struct dsa_switch *ds)
305 {
306 	struct dsa_port *dp;
307 	int ret = 0;
308 
309 	if (ds->ops->resume)
310 		ret = ds->ops->resume(ds);
311 
312 	if (ret)
313 		return ret;
314 
315 	/* Resume slave network devices */
316 	dsa_switch_for_each_port(dp, ds) {
317 		if (!dsa_port_is_initialized(dp))
318 			continue;
319 
320 		ret = dsa_slave_resume(dp->slave);
321 		if (ret)
322 			return ret;
323 	}
324 
325 	return 0;
326 }
327 EXPORT_SYMBOL_GPL(dsa_switch_resume);
328 #endif
329 
330 static struct packet_type dsa_pack_type __read_mostly = {
331 	.type	= cpu_to_be16(ETH_P_XDSA),
332 	.func	= dsa_switch_rcv,
333 };
334 
335 static struct workqueue_struct *dsa_owq;
336 
337 bool dsa_schedule_work(struct work_struct *work)
338 {
339 	return queue_work(dsa_owq, work);
340 }
341 
342 void dsa_flush_workqueue(void)
343 {
344 	flush_workqueue(dsa_owq);
345 }
346 EXPORT_SYMBOL_GPL(dsa_flush_workqueue);
347 
348 struct dsa_port *dsa_port_from_netdev(struct net_device *netdev)
349 {
350 	if (!netdev || !dsa_slave_dev_check(netdev))
351 		return ERR_PTR(-ENODEV);
352 
353 	return dsa_slave_to_port(netdev);
354 }
355 EXPORT_SYMBOL_GPL(dsa_port_from_netdev);
356 
357 bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b)
358 {
359 	if (a->type != b->type)
360 		return false;
361 
362 	switch (a->type) {
363 	case DSA_DB_PORT:
364 		return a->dp == b->dp;
365 	case DSA_DB_LAG:
366 		return a->lag.dev == b->lag.dev;
367 	case DSA_DB_BRIDGE:
368 		return a->bridge.num == b->bridge.num;
369 	default:
370 		WARN_ON(1);
371 		return false;
372 	}
373 }
374 
375 bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port,
376 				 const unsigned char *addr, u16 vid,
377 				 struct dsa_db db)
378 {
379 	struct dsa_port *dp = dsa_to_port(ds, port);
380 	struct dsa_mac_addr *a;
381 
382 	lockdep_assert_held(&dp->addr_lists_lock);
383 
384 	list_for_each_entry(a, &dp->fdbs, list) {
385 		if (!ether_addr_equal(a->addr, addr) || a->vid != vid)
386 			continue;
387 
388 		if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
389 			return true;
390 	}
391 
392 	return false;
393 }
394 EXPORT_SYMBOL_GPL(dsa_fdb_present_in_other_db);
395 
396 bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
397 				 const struct switchdev_obj_port_mdb *mdb,
398 				 struct dsa_db db)
399 {
400 	struct dsa_port *dp = dsa_to_port(ds, port);
401 	struct dsa_mac_addr *a;
402 
403 	lockdep_assert_held(&dp->addr_lists_lock);
404 
405 	list_for_each_entry(a, &dp->mdbs, list) {
406 		if (!ether_addr_equal(a->addr, mdb->addr) || a->vid != mdb->vid)
407 			continue;
408 
409 		if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
410 			return true;
411 	}
412 
413 	return false;
414 }
415 EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db);
416 
417 static int __init dsa_init_module(void)
418 {
419 	int rc;
420 
421 	dsa_owq = alloc_ordered_workqueue("dsa_ordered",
422 					  WQ_MEM_RECLAIM);
423 	if (!dsa_owq)
424 		return -ENOMEM;
425 
426 	rc = dsa_slave_register_notifier();
427 	if (rc)
428 		goto register_notifier_fail;
429 
430 	dev_add_pack(&dsa_pack_type);
431 
432 	rc = rtnl_link_register(&dsa_link_ops);
433 	if (rc)
434 		goto netlink_register_fail;
435 
436 	return 0;
437 
438 netlink_register_fail:
439 	dsa_slave_unregister_notifier();
440 	dev_remove_pack(&dsa_pack_type);
441 register_notifier_fail:
442 	destroy_workqueue(dsa_owq);
443 
444 	return rc;
445 }
446 module_init(dsa_init_module);
447 
448 static void __exit dsa_cleanup_module(void)
449 {
450 	rtnl_link_unregister(&dsa_link_ops);
451 
452 	dsa_slave_unregister_notifier();
453 	dev_remove_pack(&dsa_pack_type);
454 	destroy_workqueue(dsa_owq);
455 }
456 module_exit(dsa_cleanup_module);
457 
458 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
459 MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
460 MODULE_LICENSE("GPL");
461 MODULE_ALIAS("platform:dsa");
462