xref: /openbmc/linux/net/dsa/master.c (revision 71de0a05)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a master device, switching frames via its switch fabric CPU port
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8 
9 #include <linux/ethtool.h>
10 #include <linux/netdevice.h>
11 #include <linux/netlink.h>
12 #include <net/dsa.h>
13 
14 #include "dsa.h"
15 #include "master.h"
16 #include "port.h"
17 #include "tag.h"
18 
19 static int dsa_master_get_regs_len(struct net_device *dev)
20 {
21 	struct dsa_port *cpu_dp = dev->dsa_ptr;
22 	const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
23 	struct dsa_switch *ds = cpu_dp->ds;
24 	int port = cpu_dp->index;
25 	int ret = 0;
26 	int len;
27 
28 	if (ops->get_regs_len) {
29 		len = ops->get_regs_len(dev);
30 		if (len < 0)
31 			return len;
32 		ret += len;
33 	}
34 
35 	ret += sizeof(struct ethtool_drvinfo);
36 	ret += sizeof(struct ethtool_regs);
37 
38 	if (ds->ops->get_regs_len) {
39 		len = ds->ops->get_regs_len(ds, port);
40 		if (len < 0)
41 			return len;
42 		ret += len;
43 	}
44 
45 	return ret;
46 }
47 
48 static void dsa_master_get_regs(struct net_device *dev,
49 				struct ethtool_regs *regs, void *data)
50 {
51 	struct dsa_port *cpu_dp = dev->dsa_ptr;
52 	const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
53 	struct dsa_switch *ds = cpu_dp->ds;
54 	struct ethtool_drvinfo *cpu_info;
55 	struct ethtool_regs *cpu_regs;
56 	int port = cpu_dp->index;
57 	int len;
58 
59 	if (ops->get_regs_len && ops->get_regs) {
60 		len = ops->get_regs_len(dev);
61 		if (len < 0)
62 			return;
63 		regs->len = len;
64 		ops->get_regs(dev, regs, data);
65 		data += regs->len;
66 	}
67 
68 	cpu_info = (struct ethtool_drvinfo *)data;
69 	strscpy(cpu_info->driver, "dsa", sizeof(cpu_info->driver));
70 	data += sizeof(*cpu_info);
71 	cpu_regs = (struct ethtool_regs *)data;
72 	data += sizeof(*cpu_regs);
73 
74 	if (ds->ops->get_regs_len && ds->ops->get_regs) {
75 		len = ds->ops->get_regs_len(ds, port);
76 		if (len < 0)
77 			return;
78 		cpu_regs->len = len;
79 		ds->ops->get_regs(ds, port, cpu_regs, data);
80 	}
81 }
82 
83 static void dsa_master_get_ethtool_stats(struct net_device *dev,
84 					 struct ethtool_stats *stats,
85 					 uint64_t *data)
86 {
87 	struct dsa_port *cpu_dp = dev->dsa_ptr;
88 	const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
89 	struct dsa_switch *ds = cpu_dp->ds;
90 	int port = cpu_dp->index;
91 	int count = 0;
92 
93 	if (ops->get_sset_count && ops->get_ethtool_stats) {
94 		count = ops->get_sset_count(dev, ETH_SS_STATS);
95 		ops->get_ethtool_stats(dev, stats, data);
96 	}
97 
98 	if (ds->ops->get_ethtool_stats)
99 		ds->ops->get_ethtool_stats(ds, port, data + count);
100 }
101 
102 static void dsa_master_get_ethtool_phy_stats(struct net_device *dev,
103 					     struct ethtool_stats *stats,
104 					     uint64_t *data)
105 {
106 	struct dsa_port *cpu_dp = dev->dsa_ptr;
107 	const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
108 	struct dsa_switch *ds = cpu_dp->ds;
109 	int port = cpu_dp->index;
110 	int count = 0;
111 
112 	if (dev->phydev && !ops->get_ethtool_phy_stats) {
113 		count = phy_ethtool_get_sset_count(dev->phydev);
114 		if (count >= 0)
115 			phy_ethtool_get_stats(dev->phydev, stats, data);
116 	} else if (ops->get_sset_count && ops->get_ethtool_phy_stats) {
117 		count = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
118 		ops->get_ethtool_phy_stats(dev, stats, data);
119 	}
120 
121 	if (count < 0)
122 		count = 0;
123 
124 	if (ds->ops->get_ethtool_phy_stats)
125 		ds->ops->get_ethtool_phy_stats(ds, port, data + count);
126 }
127 
128 static int dsa_master_get_sset_count(struct net_device *dev, int sset)
129 {
130 	struct dsa_port *cpu_dp = dev->dsa_ptr;
131 	const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
132 	struct dsa_switch *ds = cpu_dp->ds;
133 	int count = 0;
134 
135 	if (sset == ETH_SS_PHY_STATS && dev->phydev &&
136 	    !ops->get_ethtool_phy_stats)
137 		count = phy_ethtool_get_sset_count(dev->phydev);
138 	else if (ops->get_sset_count)
139 		count = ops->get_sset_count(dev, sset);
140 
141 	if (count < 0)
142 		count = 0;
143 
144 	if (ds->ops->get_sset_count)
145 		count += ds->ops->get_sset_count(ds, cpu_dp->index, sset);
146 
147 	return count;
148 }
149 
150 static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
151 				   uint8_t *data)
152 {
153 	struct dsa_port *cpu_dp = dev->dsa_ptr;
154 	const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
155 	struct dsa_switch *ds = cpu_dp->ds;
156 	int port = cpu_dp->index;
157 	int len = ETH_GSTRING_LEN;
158 	int mcount = 0, count, i;
159 	uint8_t pfx[4];
160 	uint8_t *ndata;
161 
162 	snprintf(pfx, sizeof(pfx), "p%.2d", port);
163 	/* We do not want to be NULL-terminated, since this is a prefix */
164 	pfx[sizeof(pfx) - 1] = '_';
165 
166 	if (stringset == ETH_SS_PHY_STATS && dev->phydev &&
167 	    !ops->get_ethtool_phy_stats) {
168 		mcount = phy_ethtool_get_sset_count(dev->phydev);
169 		if (mcount < 0)
170 			mcount = 0;
171 		else
172 			phy_ethtool_get_strings(dev->phydev, data);
173 	} else if (ops->get_sset_count && ops->get_strings) {
174 		mcount = ops->get_sset_count(dev, stringset);
175 		if (mcount < 0)
176 			mcount = 0;
177 		ops->get_strings(dev, stringset, data);
178 	}
179 
180 	if (ds->ops->get_strings) {
181 		ndata = data + mcount * len;
182 		/* This function copies ETH_GSTRINGS_LEN bytes, we will mangle
183 		 * the output after to prepend our CPU port prefix we
184 		 * constructed earlier
185 		 */
186 		ds->ops->get_strings(ds, port, stringset, ndata);
187 		count = ds->ops->get_sset_count(ds, port, stringset);
188 		if (count < 0)
189 			return;
190 		for (i = 0; i < count; i++) {
191 			memmove(ndata + (i * len + sizeof(pfx)),
192 				ndata + i * len, len - sizeof(pfx));
193 			memcpy(ndata + i * len, pfx, sizeof(pfx));
194 		}
195 	}
196 }
197 
198 static int dsa_master_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
199 {
200 	struct dsa_port *cpu_dp = dev->dsa_ptr;
201 	struct dsa_switch *ds = cpu_dp->ds;
202 	struct dsa_switch_tree *dst;
203 	int err = -EOPNOTSUPP;
204 	struct dsa_port *dp;
205 
206 	dst = ds->dst;
207 
208 	switch (cmd) {
209 	case SIOCGHWTSTAMP:
210 	case SIOCSHWTSTAMP:
211 		/* Deny PTP operations on master if there is at least one
212 		 * switch in the tree that is PTP capable.
213 		 */
214 		list_for_each_entry(dp, &dst->ports, list)
215 			if (dsa_port_supports_hwtstamp(dp, ifr))
216 				return -EBUSY;
217 		break;
218 	}
219 
220 	if (dev->netdev_ops->ndo_eth_ioctl)
221 		err = dev->netdev_ops->ndo_eth_ioctl(dev, ifr, cmd);
222 
223 	return err;
224 }
225 
226 static const struct dsa_netdevice_ops dsa_netdev_ops = {
227 	.ndo_eth_ioctl = dsa_master_ioctl,
228 };
229 
230 static int dsa_master_ethtool_setup(struct net_device *dev)
231 {
232 	struct dsa_port *cpu_dp = dev->dsa_ptr;
233 	struct dsa_switch *ds = cpu_dp->ds;
234 	struct ethtool_ops *ops;
235 
236 	if (netif_is_lag_master(dev))
237 		return 0;
238 
239 	ops = devm_kzalloc(ds->dev, sizeof(*ops), GFP_KERNEL);
240 	if (!ops)
241 		return -ENOMEM;
242 
243 	cpu_dp->orig_ethtool_ops = dev->ethtool_ops;
244 	if (cpu_dp->orig_ethtool_ops)
245 		memcpy(ops, cpu_dp->orig_ethtool_ops, sizeof(*ops));
246 
247 	ops->get_regs_len = dsa_master_get_regs_len;
248 	ops->get_regs = dsa_master_get_regs;
249 	ops->get_sset_count = dsa_master_get_sset_count;
250 	ops->get_ethtool_stats = dsa_master_get_ethtool_stats;
251 	ops->get_strings = dsa_master_get_strings;
252 	ops->get_ethtool_phy_stats = dsa_master_get_ethtool_phy_stats;
253 
254 	dev->ethtool_ops = ops;
255 
256 	return 0;
257 }
258 
259 static void dsa_master_ethtool_teardown(struct net_device *dev)
260 {
261 	struct dsa_port *cpu_dp = dev->dsa_ptr;
262 
263 	if (netif_is_lag_master(dev))
264 		return;
265 
266 	dev->ethtool_ops = cpu_dp->orig_ethtool_ops;
267 	cpu_dp->orig_ethtool_ops = NULL;
268 }
269 
270 static void dsa_netdev_ops_set(struct net_device *dev,
271 			       const struct dsa_netdevice_ops *ops)
272 {
273 	if (netif_is_lag_master(dev))
274 		return;
275 
276 	dev->dsa_ptr->netdev_ops = ops;
277 }
278 
279 /* Keep the master always promiscuous if the tagging protocol requires that
280  * (garbles MAC DA) or if it doesn't support unicast filtering, case in which
281  * it would revert to promiscuous mode as soon as we call dev_uc_add() on it
282  * anyway.
283  */
284 static void dsa_master_set_promiscuity(struct net_device *dev, int inc)
285 {
286 	const struct dsa_device_ops *ops = dev->dsa_ptr->tag_ops;
287 
288 	if ((dev->priv_flags & IFF_UNICAST_FLT) && !ops->promisc_on_master)
289 		return;
290 
291 	ASSERT_RTNL();
292 
293 	dev_set_promiscuity(dev, inc);
294 }
295 
296 static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
297 			    char *buf)
298 {
299 	struct net_device *dev = to_net_dev(d);
300 	struct dsa_port *cpu_dp = dev->dsa_ptr;
301 
302 	return sysfs_emit(buf, "%s\n",
303 		       dsa_tag_protocol_to_str(cpu_dp->tag_ops));
304 }
305 
306 static ssize_t tagging_store(struct device *d, struct device_attribute *attr,
307 			     const char *buf, size_t count)
308 {
309 	const struct dsa_device_ops *new_tag_ops, *old_tag_ops;
310 	const char *end = strchrnul(buf, '\n'), *name;
311 	struct net_device *dev = to_net_dev(d);
312 	struct dsa_port *cpu_dp = dev->dsa_ptr;
313 	size_t len = end - buf;
314 	int err;
315 
316 	/* Empty string passed */
317 	if (!len)
318 		return -ENOPROTOOPT;
319 
320 	name = kstrndup(buf, len, GFP_KERNEL);
321 	if (!name)
322 		return -ENOMEM;
323 
324 	old_tag_ops = cpu_dp->tag_ops;
325 	new_tag_ops = dsa_tag_driver_get_by_name(name);
326 	kfree(name);
327 	/* Bad tagger name? */
328 	if (IS_ERR(new_tag_ops))
329 		return PTR_ERR(new_tag_ops);
330 
331 	if (new_tag_ops == old_tag_ops)
332 		/* Drop the temporarily held duplicate reference, since
333 		 * the DSA switch tree uses this tagger.
334 		 */
335 		goto out;
336 
337 	err = dsa_tree_change_tag_proto(cpu_dp->ds->dst, new_tag_ops,
338 					old_tag_ops);
339 	if (err) {
340 		/* On failure the old tagger is restored, so we don't need the
341 		 * driver for the new one.
342 		 */
343 		dsa_tag_driver_put(new_tag_ops);
344 		return err;
345 	}
346 
347 	/* On success we no longer need the module for the old tagging protocol
348 	 */
349 out:
350 	dsa_tag_driver_put(old_tag_ops);
351 	return count;
352 }
353 static DEVICE_ATTR_RW(tagging);
354 
355 static struct attribute *dsa_slave_attrs[] = {
356 	&dev_attr_tagging.attr,
357 	NULL
358 };
359 
360 static const struct attribute_group dsa_group = {
361 	.name	= "dsa",
362 	.attrs	= dsa_slave_attrs,
363 };
364 
365 static void dsa_master_reset_mtu(struct net_device *dev)
366 {
367 	int err;
368 
369 	err = dev_set_mtu(dev, ETH_DATA_LEN);
370 	if (err)
371 		netdev_dbg(dev,
372 			   "Unable to reset MTU to exclude DSA overheads\n");
373 }
374 
375 int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
376 {
377 	const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops;
378 	struct dsa_switch *ds = cpu_dp->ds;
379 	struct device_link *consumer_link;
380 	int mtu, ret;
381 
382 	mtu = ETH_DATA_LEN + dsa_tag_protocol_overhead(tag_ops);
383 
384 	/* The DSA master must use SET_NETDEV_DEV for this to work. */
385 	if (!netif_is_lag_master(dev)) {
386 		consumer_link = device_link_add(ds->dev, dev->dev.parent,
387 						DL_FLAG_AUTOREMOVE_CONSUMER);
388 		if (!consumer_link)
389 			netdev_err(dev,
390 				   "Failed to create a device link to DSA switch %s\n",
391 				   dev_name(ds->dev));
392 	}
393 
394 	/* The switch driver may not implement ->port_change_mtu(), case in
395 	 * which dsa_slave_change_mtu() will not update the master MTU either,
396 	 * so we need to do that here.
397 	 */
398 	ret = dev_set_mtu(dev, mtu);
399 	if (ret)
400 		netdev_warn(dev, "error %d setting MTU to %d to include DSA overhead\n",
401 			    ret, mtu);
402 
403 	/* If we use a tagging format that doesn't have an ethertype
404 	 * field, make sure that all packets from this point on get
405 	 * sent to the tag format's receive function.
406 	 */
407 	wmb();
408 
409 	dev->dsa_ptr = cpu_dp;
410 
411 	dsa_master_set_promiscuity(dev, 1);
412 
413 	ret = dsa_master_ethtool_setup(dev);
414 	if (ret)
415 		goto out_err_reset_promisc;
416 
417 	dsa_netdev_ops_set(dev, &dsa_netdev_ops);
418 
419 	ret = sysfs_create_group(&dev->dev.kobj, &dsa_group);
420 	if (ret)
421 		goto out_err_ndo_teardown;
422 
423 	return ret;
424 
425 out_err_ndo_teardown:
426 	dsa_netdev_ops_set(dev, NULL);
427 	dsa_master_ethtool_teardown(dev);
428 out_err_reset_promisc:
429 	dsa_master_set_promiscuity(dev, -1);
430 	return ret;
431 }
432 
433 void dsa_master_teardown(struct net_device *dev)
434 {
435 	sysfs_remove_group(&dev->dev.kobj, &dsa_group);
436 	dsa_netdev_ops_set(dev, NULL);
437 	dsa_master_ethtool_teardown(dev);
438 	dsa_master_reset_mtu(dev);
439 	dsa_master_set_promiscuity(dev, -1);
440 
441 	dev->dsa_ptr = NULL;
442 
443 	/* If we used a tagging format that doesn't have an ethertype
444 	 * field, make sure that all packets from this point get sent
445 	 * without the tag and go through the regular receive path.
446 	 */
447 	wmb();
448 }
449 
450 int dsa_master_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp,
451 			 struct netdev_lag_upper_info *uinfo,
452 			 struct netlink_ext_ack *extack)
453 {
454 	bool master_setup = false;
455 	int err;
456 
457 	if (!netdev_uses_dsa(lag_dev)) {
458 		err = dsa_master_setup(lag_dev, cpu_dp);
459 		if (err)
460 			return err;
461 
462 		master_setup = true;
463 	}
464 
465 	err = dsa_port_lag_join(cpu_dp, lag_dev, uinfo, extack);
466 	if (err) {
467 		NL_SET_ERR_MSG_WEAK_MOD(extack, "CPU port failed to join LAG");
468 		goto out_master_teardown;
469 	}
470 
471 	return 0;
472 
473 out_master_teardown:
474 	if (master_setup)
475 		dsa_master_teardown(lag_dev);
476 	return err;
477 }
478 
479 /* Tear down a master if there isn't any other user port on it,
480  * optionally also destroying LAG information.
481  */
482 void dsa_master_lag_teardown(struct net_device *lag_dev,
483 			     struct dsa_port *cpu_dp)
484 {
485 	struct net_device *upper;
486 	struct list_head *iter;
487 
488 	dsa_port_lag_leave(cpu_dp, lag_dev);
489 
490 	netdev_for_each_upper_dev_rcu(lag_dev, upper, iter)
491 		if (dsa_slave_dev_check(upper))
492 			return;
493 
494 	dsa_master_teardown(lag_dev);
495 }
496