xref: /openbmc/linux/net/dsa/dsa.c (revision 53809828)
1 /*
2  * net/dsa/dsa.c - Hardware switch handling
3  * Copyright (c) 2008-2009 Marvell Semiconductor
4  * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11 
12 #include <linux/device.h>
13 #include <linux/list.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/notifier.h>
18 #include <linux/of.h>
19 #include <linux/of_mdio.h>
20 #include <linux/of_platform.h>
21 #include <linux/of_net.h>
22 #include <linux/netdevice.h>
23 #include <linux/sysfs.h>
24 #include <linux/phy_fixed.h>
25 #include <linux/ptp_classify.h>
26 #include <linux/etherdevice.h>
27 
28 #include "dsa_priv.h"
29 
30 static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
31 					    struct net_device *dev)
32 {
33 	/* Just return the original SKB */
34 	return skb;
35 }
36 
37 static const struct dsa_device_ops none_ops = {
38 	.xmit	= dsa_slave_notag_xmit,
39 	.rcv	= NULL,
40 };
41 
42 const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = {
43 #ifdef CONFIG_NET_DSA_TAG_BRCM
44 	[DSA_TAG_PROTO_BRCM] = &brcm_netdev_ops,
45 #endif
46 #ifdef CONFIG_NET_DSA_TAG_BRCM_PREPEND
47 	[DSA_TAG_PROTO_BRCM_PREPEND] = &brcm_prepend_netdev_ops,
48 #endif
49 #ifdef CONFIG_NET_DSA_TAG_DSA
50 	[DSA_TAG_PROTO_DSA] = &dsa_netdev_ops,
51 #endif
52 #ifdef CONFIG_NET_DSA_TAG_EDSA
53 	[DSA_TAG_PROTO_EDSA] = &edsa_netdev_ops,
54 #endif
55 #ifdef CONFIG_NET_DSA_TAG_GSWIP
56 	[DSA_TAG_PROTO_GSWIP] = &gswip_netdev_ops,
57 #endif
58 #ifdef CONFIG_NET_DSA_TAG_KSZ
59 	[DSA_TAG_PROTO_KSZ] = &ksz_netdev_ops,
60 #endif
61 #ifdef CONFIG_NET_DSA_TAG_LAN9303
62 	[DSA_TAG_PROTO_LAN9303] = &lan9303_netdev_ops,
63 #endif
64 #ifdef CONFIG_NET_DSA_TAG_MTK
65 	[DSA_TAG_PROTO_MTK] = &mtk_netdev_ops,
66 #endif
67 #ifdef CONFIG_NET_DSA_TAG_QCA
68 	[DSA_TAG_PROTO_QCA] = &qca_netdev_ops,
69 #endif
70 #ifdef CONFIG_NET_DSA_TAG_TRAILER
71 	[DSA_TAG_PROTO_TRAILER] = &trailer_netdev_ops,
72 #endif
73 	[DSA_TAG_PROTO_NONE] = &none_ops,
74 };
75 
76 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops)
77 {
78 	const char *protocol_name[DSA_TAG_LAST] = {
79 #ifdef CONFIG_NET_DSA_TAG_BRCM
80 		[DSA_TAG_PROTO_BRCM] = "brcm",
81 #endif
82 #ifdef CONFIG_NET_DSA_TAG_BRCM_PREPEND
83 		[DSA_TAG_PROTO_BRCM_PREPEND] = "brcm-prepend",
84 #endif
85 #ifdef CONFIG_NET_DSA_TAG_DSA
86 		[DSA_TAG_PROTO_DSA] = "dsa",
87 #endif
88 #ifdef CONFIG_NET_DSA_TAG_EDSA
89 		[DSA_TAG_PROTO_EDSA] = "edsa",
90 #endif
91 #ifdef CONFIG_NET_DSA_TAG_GSWIP
92 		[DSA_TAG_PROTO_GSWIP] = "gswip",
93 #endif
94 #ifdef CONFIG_NET_DSA_TAG_KSZ
95 		[DSA_TAG_PROTO_KSZ] = "ksz",
96 #endif
97 #ifdef CONFIG_NET_DSA_TAG_LAN9303
98 		[DSA_TAG_PROTO_LAN9303] = "lan9303",
99 #endif
100 #ifdef CONFIG_NET_DSA_TAG_MTK
101 		[DSA_TAG_PROTO_MTK] = "mtk",
102 #endif
103 #ifdef CONFIG_NET_DSA_TAG_QCA
104 		[DSA_TAG_PROTO_QCA] = "qca",
105 #endif
106 #ifdef CONFIG_NET_DSA_TAG_TRAILER
107 		[DSA_TAG_PROTO_TRAILER] = "trailer",
108 #endif
109 		[DSA_TAG_PROTO_NONE] = "none",
110 	};
111 	unsigned int i;
112 
113 	BUILD_BUG_ON(ARRAY_SIZE(protocol_name) != DSA_TAG_LAST);
114 
115 	for (i = 0; i < ARRAY_SIZE(dsa_device_ops); i++)
116 		if (ops == dsa_device_ops[i])
117 			return protocol_name[i];
118 
119 	return protocol_name[DSA_TAG_PROTO_NONE];
120 };
121 
122 const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol)
123 {
124 	const struct dsa_device_ops *ops;
125 
126 	if (tag_protocol >= DSA_TAG_LAST)
127 		return ERR_PTR(-EINVAL);
128 	ops = dsa_device_ops[tag_protocol];
129 
130 	if (!ops)
131 		return ERR_PTR(-ENOPROTOOPT);
132 
133 	return ops;
134 }
135 
136 static int dev_is_class(struct device *dev, void *class)
137 {
138 	if (dev->class != NULL && !strcmp(dev->class->name, class))
139 		return 1;
140 
141 	return 0;
142 }
143 
144 static struct device *dev_find_class(struct device *parent, char *class)
145 {
146 	if (dev_is_class(parent, class)) {
147 		get_device(parent);
148 		return parent;
149 	}
150 
151 	return device_find_child(parent, class, dev_is_class);
152 }
153 
154 struct net_device *dsa_dev_to_net_device(struct device *dev)
155 {
156 	struct device *d;
157 
158 	d = dev_find_class(dev, "net");
159 	if (d != NULL) {
160 		struct net_device *nd;
161 
162 		nd = to_net_dev(d);
163 		dev_hold(nd);
164 		put_device(d);
165 
166 		return nd;
167 	}
168 
169 	return NULL;
170 }
171 EXPORT_SYMBOL_GPL(dsa_dev_to_net_device);
172 
173 /* Determine if we should defer delivery of skb until we have a rx timestamp.
174  *
175  * Called from dsa_switch_rcv. For now, this will only work if tagging is
176  * enabled on the switch. Normally the MAC driver would retrieve the hardware
177  * timestamp when it reads the packet out of the hardware. However in a DSA
178  * switch, the DSA driver owning the interface to which the packet is
179  * delivered is never notified unless we do so here.
180  */
181 static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p,
182 				       struct sk_buff *skb)
183 {
184 	struct dsa_switch *ds = p->dp->ds;
185 	unsigned int type;
186 
187 	if (skb_headroom(skb) < ETH_HLEN)
188 		return false;
189 
190 	__skb_push(skb, ETH_HLEN);
191 
192 	type = ptp_classify_raw(skb);
193 
194 	__skb_pull(skb, ETH_HLEN);
195 
196 	if (type == PTP_CLASS_NONE)
197 		return false;
198 
199 	if (likely(ds->ops->port_rxtstamp))
200 		return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type);
201 
202 	return false;
203 }
204 
205 static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
206 			  struct packet_type *pt, struct net_device *unused)
207 {
208 	struct dsa_port *cpu_dp = dev->dsa_ptr;
209 	struct sk_buff *nskb = NULL;
210 	struct pcpu_sw_netstats *s;
211 	struct dsa_slave_priv *p;
212 
213 	if (unlikely(!cpu_dp)) {
214 		kfree_skb(skb);
215 		return 0;
216 	}
217 
218 	skb = skb_unshare(skb, GFP_ATOMIC);
219 	if (!skb)
220 		return 0;
221 
222 	nskb = cpu_dp->rcv(skb, dev, pt);
223 	if (!nskb) {
224 		kfree_skb(skb);
225 		return 0;
226 	}
227 
228 	skb = nskb;
229 	p = netdev_priv(skb->dev);
230 	skb_push(skb, ETH_HLEN);
231 	skb->pkt_type = PACKET_HOST;
232 	skb->protocol = eth_type_trans(skb, skb->dev);
233 
234 	s = this_cpu_ptr(p->stats64);
235 	u64_stats_update_begin(&s->syncp);
236 	s->rx_packets++;
237 	s->rx_bytes += skb->len;
238 	u64_stats_update_end(&s->syncp);
239 
240 	if (dsa_skb_defer_rx_timestamp(p, skb))
241 		return 0;
242 
243 	netif_receive_skb(skb);
244 
245 	return 0;
246 }
247 
248 #ifdef CONFIG_PM_SLEEP
249 static bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
250 {
251 	return dsa_is_user_port(ds, p) && ds->ports[p].slave;
252 }
253 
254 int dsa_switch_suspend(struct dsa_switch *ds)
255 {
256 	int i, ret = 0;
257 
258 	/* Suspend slave network devices */
259 	for (i = 0; i < ds->num_ports; i++) {
260 		if (!dsa_is_port_initialized(ds, i))
261 			continue;
262 
263 		ret = dsa_slave_suspend(ds->ports[i].slave);
264 		if (ret)
265 			return ret;
266 	}
267 
268 	if (ds->ops->suspend)
269 		ret = ds->ops->suspend(ds);
270 
271 	return ret;
272 }
273 EXPORT_SYMBOL_GPL(dsa_switch_suspend);
274 
275 int dsa_switch_resume(struct dsa_switch *ds)
276 {
277 	int i, ret = 0;
278 
279 	if (ds->ops->resume)
280 		ret = ds->ops->resume(ds);
281 
282 	if (ret)
283 		return ret;
284 
285 	/* Resume slave network devices */
286 	for (i = 0; i < ds->num_ports; i++) {
287 		if (!dsa_is_port_initialized(ds, i))
288 			continue;
289 
290 		ret = dsa_slave_resume(ds->ports[i].slave);
291 		if (ret)
292 			return ret;
293 	}
294 
295 	return 0;
296 }
297 EXPORT_SYMBOL_GPL(dsa_switch_resume);
298 #endif
299 
300 static struct packet_type dsa_pack_type __read_mostly = {
301 	.type	= cpu_to_be16(ETH_P_XDSA),
302 	.func	= dsa_switch_rcv,
303 };
304 
305 static struct workqueue_struct *dsa_owq;
306 
307 bool dsa_schedule_work(struct work_struct *work)
308 {
309 	return queue_work(dsa_owq, work);
310 }
311 
312 static ATOMIC_NOTIFIER_HEAD(dsa_notif_chain);
313 
314 int register_dsa_notifier(struct notifier_block *nb)
315 {
316 	return atomic_notifier_chain_register(&dsa_notif_chain, nb);
317 }
318 EXPORT_SYMBOL_GPL(register_dsa_notifier);
319 
320 int unregister_dsa_notifier(struct notifier_block *nb)
321 {
322 	return atomic_notifier_chain_unregister(&dsa_notif_chain, nb);
323 }
324 EXPORT_SYMBOL_GPL(unregister_dsa_notifier);
325 
326 int call_dsa_notifiers(unsigned long val, struct net_device *dev,
327 		       struct dsa_notifier_info *info)
328 {
329 	info->dev = dev;
330 	return atomic_notifier_call_chain(&dsa_notif_chain, val, info);
331 }
332 EXPORT_SYMBOL_GPL(call_dsa_notifiers);
333 
334 static int __init dsa_init_module(void)
335 {
336 	int rc;
337 
338 	dsa_owq = alloc_ordered_workqueue("dsa_ordered",
339 					  WQ_MEM_RECLAIM);
340 	if (!dsa_owq)
341 		return -ENOMEM;
342 
343 	rc = dsa_slave_register_notifier();
344 	if (rc)
345 		return rc;
346 
347 	rc = dsa_legacy_register();
348 	if (rc)
349 		return rc;
350 
351 	dev_add_pack(&dsa_pack_type);
352 
353 	return 0;
354 }
355 module_init(dsa_init_module);
356 
357 static void __exit dsa_cleanup_module(void)
358 {
359 	dsa_slave_unregister_notifier();
360 	dev_remove_pack(&dsa_pack_type);
361 	dsa_legacy_unregister();
362 	destroy_workqueue(dsa_owq);
363 }
364 module_exit(dsa_cleanup_module);
365 
366 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
367 MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
368 MODULE_LICENSE("GPL");
369 MODULE_ALIAS("platform:dsa");
370