xref: /openbmc/linux/net/dsa/dsa.c (revision b664e06d)
1 /*
2  * net/dsa/dsa.c - Hardware switch handling
3  * Copyright (c) 2008-2009 Marvell Semiconductor
4  * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11 
12 #include <linux/device.h>
13 #include <linux/list.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/notifier.h>
18 #include <linux/of.h>
19 #include <linux/of_mdio.h>
20 #include <linux/of_platform.h>
21 #include <linux/of_net.h>
22 #include <linux/netdevice.h>
23 #include <linux/sysfs.h>
24 #include <linux/phy_fixed.h>
25 #include <linux/ptp_classify.h>
26 #include <linux/etherdevice.h>
27 
28 #include "dsa_priv.h"
29 
30 static LIST_HEAD(dsa_tag_drivers_list);
31 static DEFINE_MUTEX(dsa_tag_drivers_lock);
32 
33 static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
34 					    struct net_device *dev)
35 {
36 	/* Just return the original SKB */
37 	return skb;
38 }
39 
40 static const struct dsa_device_ops none_ops = {
41 	.name	= "none",
42 	.proto	= DSA_TAG_PROTO_NONE,
43 	.xmit	= dsa_slave_notag_xmit,
44 	.rcv	= NULL,
45 };
46 
47 DSA_TAG_DRIVER(none_ops);
48 
49 static void dsa_tag_driver_register(struct dsa_tag_driver *dsa_tag_driver,
50 				    struct module *owner)
51 {
52 	dsa_tag_driver->owner = owner;
53 
54 	mutex_lock(&dsa_tag_drivers_lock);
55 	list_add_tail(&dsa_tag_driver->list, &dsa_tag_drivers_list);
56 	mutex_unlock(&dsa_tag_drivers_lock);
57 }
58 
59 void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
60 			      unsigned int count, struct module *owner)
61 {
62 	unsigned int i;
63 
64 	for (i = 0; i < count; i++)
65 		dsa_tag_driver_register(dsa_tag_driver_array[i], owner);
66 }
67 
68 static void dsa_tag_driver_unregister(struct dsa_tag_driver *dsa_tag_driver)
69 {
70 	mutex_lock(&dsa_tag_drivers_lock);
71 	list_del(&dsa_tag_driver->list);
72 	mutex_unlock(&dsa_tag_drivers_lock);
73 }
74 EXPORT_SYMBOL_GPL(dsa_tag_drivers_register);
75 
76 void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
77 				unsigned int count)
78 {
79 	unsigned int i;
80 
81 	for (i = 0; i < count; i++)
82 		dsa_tag_driver_unregister(dsa_tag_driver_array[i]);
83 }
84 EXPORT_SYMBOL_GPL(dsa_tag_drivers_unregister);
85 
86 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops)
87 {
88 	return ops->name;
89 };
90 
91 const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol)
92 {
93 	struct dsa_tag_driver *dsa_tag_driver;
94 	const struct dsa_device_ops *ops;
95 	char module_name[128];
96 	bool found = false;
97 
98 	snprintf(module_name, 127, "%s%d", DSA_TAG_DRIVER_ALIAS,
99 		 tag_protocol);
100 
101 	request_module(module_name);
102 
103 	mutex_lock(&dsa_tag_drivers_lock);
104 	list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
105 		ops = dsa_tag_driver->ops;
106 		if (ops->proto == tag_protocol) {
107 			found = true;
108 			break;
109 		}
110 	}
111 
112 	if (found) {
113 		if (!try_module_get(dsa_tag_driver->owner))
114 			ops = ERR_PTR(-ENOPROTOOPT);
115 	} else {
116 		ops = ERR_PTR(-ENOPROTOOPT);
117 	}
118 
119 	mutex_unlock(&dsa_tag_drivers_lock);
120 
121 	return ops;
122 }
123 
124 void dsa_tag_driver_put(const struct dsa_device_ops *ops)
125 {
126 	struct dsa_tag_driver *dsa_tag_driver;
127 
128 	mutex_lock(&dsa_tag_drivers_lock);
129 	list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
130 		if (dsa_tag_driver->ops == ops) {
131 			module_put(dsa_tag_driver->owner);
132 			break;
133 		}
134 	}
135 	mutex_unlock(&dsa_tag_drivers_lock);
136 }
137 
138 static int dev_is_class(struct device *dev, void *class)
139 {
140 	if (dev->class != NULL && !strcmp(dev->class->name, class))
141 		return 1;
142 
143 	return 0;
144 }
145 
146 static struct device *dev_find_class(struct device *parent, char *class)
147 {
148 	if (dev_is_class(parent, class)) {
149 		get_device(parent);
150 		return parent;
151 	}
152 
153 	return device_find_child(parent, class, dev_is_class);
154 }
155 
156 struct net_device *dsa_dev_to_net_device(struct device *dev)
157 {
158 	struct device *d;
159 
160 	d = dev_find_class(dev, "net");
161 	if (d != NULL) {
162 		struct net_device *nd;
163 
164 		nd = to_net_dev(d);
165 		dev_hold(nd);
166 		put_device(d);
167 
168 		return nd;
169 	}
170 
171 	return NULL;
172 }
173 EXPORT_SYMBOL_GPL(dsa_dev_to_net_device);
174 
175 /* Determine if we should defer delivery of skb until we have a rx timestamp.
176  *
177  * Called from dsa_switch_rcv. For now, this will only work if tagging is
178  * enabled on the switch. Normally the MAC driver would retrieve the hardware
179  * timestamp when it reads the packet out of the hardware. However in a DSA
180  * switch, the DSA driver owning the interface to which the packet is
181  * delivered is never notified unless we do so here.
182  */
183 static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p,
184 				       struct sk_buff *skb)
185 {
186 	struct dsa_switch *ds = p->dp->ds;
187 	unsigned int type;
188 
189 	if (skb_headroom(skb) < ETH_HLEN)
190 		return false;
191 
192 	__skb_push(skb, ETH_HLEN);
193 
194 	type = ptp_classify_raw(skb);
195 
196 	__skb_pull(skb, ETH_HLEN);
197 
198 	if (type == PTP_CLASS_NONE)
199 		return false;
200 
201 	if (likely(ds->ops->port_rxtstamp))
202 		return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type);
203 
204 	return false;
205 }
206 
207 static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
208 			  struct packet_type *pt, struct net_device *unused)
209 {
210 	struct dsa_port *cpu_dp = dev->dsa_ptr;
211 	struct sk_buff *nskb = NULL;
212 	struct pcpu_sw_netstats *s;
213 	struct dsa_slave_priv *p;
214 
215 	if (unlikely(!cpu_dp)) {
216 		kfree_skb(skb);
217 		return 0;
218 	}
219 
220 	skb = skb_unshare(skb, GFP_ATOMIC);
221 	if (!skb)
222 		return 0;
223 
224 	nskb = cpu_dp->rcv(skb, dev, pt);
225 	if (!nskb) {
226 		kfree_skb(skb);
227 		return 0;
228 	}
229 
230 	skb = nskb;
231 	p = netdev_priv(skb->dev);
232 	skb_push(skb, ETH_HLEN);
233 	skb->pkt_type = PACKET_HOST;
234 	skb->protocol = eth_type_trans(skb, skb->dev);
235 
236 	s = this_cpu_ptr(p->stats64);
237 	u64_stats_update_begin(&s->syncp);
238 	s->rx_packets++;
239 	s->rx_bytes += skb->len;
240 	u64_stats_update_end(&s->syncp);
241 
242 	if (dsa_skb_defer_rx_timestamp(p, skb))
243 		return 0;
244 
245 	netif_receive_skb(skb);
246 
247 	return 0;
248 }
249 
250 #ifdef CONFIG_PM_SLEEP
251 static bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
252 {
253 	return dsa_is_user_port(ds, p) && ds->ports[p].slave;
254 }
255 
256 int dsa_switch_suspend(struct dsa_switch *ds)
257 {
258 	int i, ret = 0;
259 
260 	/* Suspend slave network devices */
261 	for (i = 0; i < ds->num_ports; i++) {
262 		if (!dsa_is_port_initialized(ds, i))
263 			continue;
264 
265 		ret = dsa_slave_suspend(ds->ports[i].slave);
266 		if (ret)
267 			return ret;
268 	}
269 
270 	if (ds->ops->suspend)
271 		ret = ds->ops->suspend(ds);
272 
273 	return ret;
274 }
275 EXPORT_SYMBOL_GPL(dsa_switch_suspend);
276 
277 int dsa_switch_resume(struct dsa_switch *ds)
278 {
279 	int i, ret = 0;
280 
281 	if (ds->ops->resume)
282 		ret = ds->ops->resume(ds);
283 
284 	if (ret)
285 		return ret;
286 
287 	/* Resume slave network devices */
288 	for (i = 0; i < ds->num_ports; i++) {
289 		if (!dsa_is_port_initialized(ds, i))
290 			continue;
291 
292 		ret = dsa_slave_resume(ds->ports[i].slave);
293 		if (ret)
294 			return ret;
295 	}
296 
297 	return 0;
298 }
299 EXPORT_SYMBOL_GPL(dsa_switch_resume);
300 #endif
301 
302 static struct packet_type dsa_pack_type __read_mostly = {
303 	.type	= cpu_to_be16(ETH_P_XDSA),
304 	.func	= dsa_switch_rcv,
305 };
306 
307 static struct workqueue_struct *dsa_owq;
308 
309 bool dsa_schedule_work(struct work_struct *work)
310 {
311 	return queue_work(dsa_owq, work);
312 }
313 
314 static ATOMIC_NOTIFIER_HEAD(dsa_notif_chain);
315 
316 int register_dsa_notifier(struct notifier_block *nb)
317 {
318 	return atomic_notifier_chain_register(&dsa_notif_chain, nb);
319 }
320 EXPORT_SYMBOL_GPL(register_dsa_notifier);
321 
322 int unregister_dsa_notifier(struct notifier_block *nb)
323 {
324 	return atomic_notifier_chain_unregister(&dsa_notif_chain, nb);
325 }
326 EXPORT_SYMBOL_GPL(unregister_dsa_notifier);
327 
328 int call_dsa_notifiers(unsigned long val, struct net_device *dev,
329 		       struct dsa_notifier_info *info)
330 {
331 	info->dev = dev;
332 	return atomic_notifier_call_chain(&dsa_notif_chain, val, info);
333 }
334 EXPORT_SYMBOL_GPL(call_dsa_notifiers);
335 
336 static int __init dsa_init_module(void)
337 {
338 	int rc;
339 
340 	dsa_owq = alloc_ordered_workqueue("dsa_ordered",
341 					  WQ_MEM_RECLAIM);
342 	if (!dsa_owq)
343 		return -ENOMEM;
344 
345 	rc = dsa_slave_register_notifier();
346 	if (rc)
347 		goto register_notifier_fail;
348 
349 	dev_add_pack(&dsa_pack_type);
350 
351 	dsa_tag_driver_register(&DSA_TAG_DRIVER_NAME(none_ops),
352 				THIS_MODULE);
353 
354 	return 0;
355 
356 register_notifier_fail:
357 	destroy_workqueue(dsa_owq);
358 
359 	return rc;
360 }
361 module_init(dsa_init_module);
362 
363 static void __exit dsa_cleanup_module(void)
364 {
365 	dsa_tag_driver_unregister(&DSA_TAG_DRIVER_NAME(none_ops));
366 
367 	dsa_slave_unregister_notifier();
368 	dev_remove_pack(&dsa_pack_type);
369 	destroy_workqueue(dsa_owq);
370 }
371 module_exit(dsa_cleanup_module);
372 
373 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
374 MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
375 MODULE_LICENSE("GPL");
376 MODULE_ALIAS("platform:dsa");
377