xref: /openbmc/linux/net/dsa/master.c (revision 5a178186)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a master device, switching frames via its switch fabric CPU port
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8 
9 #include <linux/ethtool.h>
10 #include <linux/netdevice.h>
11 #include <linux/netlink.h>
12 #include <net/dsa.h>
13 
14 #include "dsa.h"
15 #include "master.h"
16 #include "port.h"
17 #include "tag.h"
18 
dsa_master_get_regs_len(struct net_device * dev)19 static int dsa_master_get_regs_len(struct net_device *dev)
20 {
21 	struct dsa_port *cpu_dp = dev->dsa_ptr;
22 	const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
23 	struct dsa_switch *ds = cpu_dp->ds;
24 	int port = cpu_dp->index;
25 	int ret = 0;
26 	int len;
27 
28 	if (ops->get_regs_len) {
29 		len = ops->get_regs_len(dev);
30 		if (len < 0)
31 			return len;
32 		ret += len;
33 	}
34 
35 	ret += sizeof(struct ethtool_drvinfo);
36 	ret += sizeof(struct ethtool_regs);
37 
38 	if (ds->ops->get_regs_len) {
39 		len = ds->ops->get_regs_len(ds, port);
40 		if (len < 0)
41 			return len;
42 		ret += len;
43 	}
44 
45 	return ret;
46 }
47 
dsa_master_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * data)48 static void dsa_master_get_regs(struct net_device *dev,
49 				struct ethtool_regs *regs, void *data)
50 {
51 	struct dsa_port *cpu_dp = dev->dsa_ptr;
52 	const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
53 	struct dsa_switch *ds = cpu_dp->ds;
54 	struct ethtool_drvinfo *cpu_info;
55 	struct ethtool_regs *cpu_regs;
56 	int port = cpu_dp->index;
57 	int len;
58 
59 	if (ops->get_regs_len && ops->get_regs) {
60 		len = ops->get_regs_len(dev);
61 		if (len < 0)
62 			return;
63 		regs->len = len;
64 		ops->get_regs(dev, regs, data);
65 		data += regs->len;
66 	}
67 
68 	cpu_info = (struct ethtool_drvinfo *)data;
69 	strscpy(cpu_info->driver, "dsa", sizeof(cpu_info->driver));
70 	data += sizeof(*cpu_info);
71 	cpu_regs = (struct ethtool_regs *)data;
72 	data += sizeof(*cpu_regs);
73 
74 	if (ds->ops->get_regs_len && ds->ops->get_regs) {
75 		len = ds->ops->get_regs_len(ds, port);
76 		if (len < 0)
77 			return;
78 		cpu_regs->len = len;
79 		ds->ops->get_regs(ds, port, cpu_regs, data);
80 	}
81 }
82 
dsa_master_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,uint64_t * data)83 static void dsa_master_get_ethtool_stats(struct net_device *dev,
84 					 struct ethtool_stats *stats,
85 					 uint64_t *data)
86 {
87 	struct dsa_port *cpu_dp = dev->dsa_ptr;
88 	const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
89 	struct dsa_switch *ds = cpu_dp->ds;
90 	int port = cpu_dp->index;
91 	int count = 0;
92 
93 	if (ops->get_sset_count && ops->get_ethtool_stats) {
94 		count = ops->get_sset_count(dev, ETH_SS_STATS);
95 		ops->get_ethtool_stats(dev, stats, data);
96 	}
97 
98 	if (ds->ops->get_ethtool_stats)
99 		ds->ops->get_ethtool_stats(ds, port, data + count);
100 }
101 
dsa_master_get_ethtool_phy_stats(struct net_device * dev,struct ethtool_stats * stats,uint64_t * data)102 static void dsa_master_get_ethtool_phy_stats(struct net_device *dev,
103 					     struct ethtool_stats *stats,
104 					     uint64_t *data)
105 {
106 	struct dsa_port *cpu_dp = dev->dsa_ptr;
107 	const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
108 	struct dsa_switch *ds = cpu_dp->ds;
109 	int port = cpu_dp->index;
110 	int count = 0;
111 
112 	if (dev->phydev && !ops->get_ethtool_phy_stats) {
113 		count = phy_ethtool_get_sset_count(dev->phydev);
114 		if (count >= 0)
115 			phy_ethtool_get_stats(dev->phydev, stats, data);
116 	} else if (ops->get_sset_count && ops->get_ethtool_phy_stats) {
117 		count = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
118 		ops->get_ethtool_phy_stats(dev, stats, data);
119 	}
120 
121 	if (count < 0)
122 		count = 0;
123 
124 	if (ds->ops->get_ethtool_phy_stats)
125 		ds->ops->get_ethtool_phy_stats(ds, port, data + count);
126 }
127 
dsa_master_get_sset_count(struct net_device * dev,int sset)128 static int dsa_master_get_sset_count(struct net_device *dev, int sset)
129 {
130 	struct dsa_port *cpu_dp = dev->dsa_ptr;
131 	const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
132 	struct dsa_switch *ds = cpu_dp->ds;
133 	int count = 0;
134 
135 	if (sset == ETH_SS_PHY_STATS && dev->phydev &&
136 	    !ops->get_ethtool_phy_stats)
137 		count = phy_ethtool_get_sset_count(dev->phydev);
138 	else if (ops->get_sset_count)
139 		count = ops->get_sset_count(dev, sset);
140 
141 	if (count < 0)
142 		count = 0;
143 
144 	if (ds->ops->get_sset_count)
145 		count += ds->ops->get_sset_count(ds, cpu_dp->index, sset);
146 
147 	return count;
148 }
149 
dsa_master_get_strings(struct net_device * dev,uint32_t stringset,uint8_t * data)150 static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
151 				   uint8_t *data)
152 {
153 	struct dsa_port *cpu_dp = dev->dsa_ptr;
154 	const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
155 	struct dsa_switch *ds = cpu_dp->ds;
156 	int port = cpu_dp->index;
157 	int len = ETH_GSTRING_LEN;
158 	int mcount = 0, count, i;
159 	uint8_t pfx[4];
160 	uint8_t *ndata;
161 
162 	snprintf(pfx, sizeof(pfx), "p%.2d", port);
163 	/* We do not want to be NULL-terminated, since this is a prefix */
164 	pfx[sizeof(pfx) - 1] = '_';
165 
166 	if (stringset == ETH_SS_PHY_STATS && dev->phydev &&
167 	    !ops->get_ethtool_phy_stats) {
168 		mcount = phy_ethtool_get_sset_count(dev->phydev);
169 		if (mcount < 0)
170 			mcount = 0;
171 		else
172 			phy_ethtool_get_strings(dev->phydev, data);
173 	} else if (ops->get_sset_count && ops->get_strings) {
174 		mcount = ops->get_sset_count(dev, stringset);
175 		if (mcount < 0)
176 			mcount = 0;
177 		ops->get_strings(dev, stringset, data);
178 	}
179 
180 	if (ds->ops->get_strings) {
181 		ndata = data + mcount * len;
182 		/* This function copies ETH_GSTRINGS_LEN bytes, we will mangle
183 		 * the output after to prepend our CPU port prefix we
184 		 * constructed earlier
185 		 */
186 		ds->ops->get_strings(ds, port, stringset, ndata);
187 		count = ds->ops->get_sset_count(ds, port, stringset);
188 		if (count < 0)
189 			return;
190 		for (i = 0; i < count; i++) {
191 			memmove(ndata + (i * len + sizeof(pfx)),
192 				ndata + i * len, len - sizeof(pfx));
193 			memcpy(ndata + i * len, pfx, sizeof(pfx));
194 		}
195 	}
196 }
197 
198 /* Deny PTP operations on master if there is at least one switch in the tree
199  * that is PTP capable.
200  */
__dsa_master_hwtstamp_validate(struct net_device * dev,const struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)201 int __dsa_master_hwtstamp_validate(struct net_device *dev,
202 				   const struct kernel_hwtstamp_config *config,
203 				   struct netlink_ext_ack *extack)
204 {
205 	struct dsa_port *cpu_dp = dev->dsa_ptr;
206 	struct dsa_switch *ds = cpu_dp->ds;
207 	struct dsa_switch_tree *dst;
208 	struct dsa_port *dp;
209 
210 	dst = ds->dst;
211 
212 	list_for_each_entry(dp, &dst->ports, list) {
213 		if (dsa_port_supports_hwtstamp(dp)) {
214 			NL_SET_ERR_MSG(extack,
215 				       "HW timestamping not allowed on DSA master when switch supports the operation");
216 			return -EBUSY;
217 		}
218 	}
219 
220 	return 0;
221 }
222 
dsa_master_ethtool_setup(struct net_device * dev)223 static int dsa_master_ethtool_setup(struct net_device *dev)
224 {
225 	struct dsa_port *cpu_dp = dev->dsa_ptr;
226 	struct dsa_switch *ds = cpu_dp->ds;
227 	struct ethtool_ops *ops;
228 
229 	if (netif_is_lag_master(dev))
230 		return 0;
231 
232 	ops = devm_kzalloc(ds->dev, sizeof(*ops), GFP_KERNEL);
233 	if (!ops)
234 		return -ENOMEM;
235 
236 	cpu_dp->orig_ethtool_ops = dev->ethtool_ops;
237 	if (cpu_dp->orig_ethtool_ops)
238 		memcpy(ops, cpu_dp->orig_ethtool_ops, sizeof(*ops));
239 
240 	ops->get_regs_len = dsa_master_get_regs_len;
241 	ops->get_regs = dsa_master_get_regs;
242 	ops->get_sset_count = dsa_master_get_sset_count;
243 	ops->get_ethtool_stats = dsa_master_get_ethtool_stats;
244 	ops->get_strings = dsa_master_get_strings;
245 	ops->get_ethtool_phy_stats = dsa_master_get_ethtool_phy_stats;
246 
247 	dev->ethtool_ops = ops;
248 
249 	return 0;
250 }
251 
dsa_master_ethtool_teardown(struct net_device * dev)252 static void dsa_master_ethtool_teardown(struct net_device *dev)
253 {
254 	struct dsa_port *cpu_dp = dev->dsa_ptr;
255 
256 	if (netif_is_lag_master(dev))
257 		return;
258 
259 	dev->ethtool_ops = cpu_dp->orig_ethtool_ops;
260 	cpu_dp->orig_ethtool_ops = NULL;
261 }
262 
263 /* Keep the master always promiscuous if the tagging protocol requires that
264  * (garbles MAC DA) or if it doesn't support unicast filtering, case in which
265  * it would revert to promiscuous mode as soon as we call dev_uc_add() on it
266  * anyway.
267  */
dsa_master_set_promiscuity(struct net_device * dev,int inc)268 static void dsa_master_set_promiscuity(struct net_device *dev, int inc)
269 {
270 	const struct dsa_device_ops *ops = dev->dsa_ptr->tag_ops;
271 
272 	if ((dev->priv_flags & IFF_UNICAST_FLT) && !ops->promisc_on_master)
273 		return;
274 
275 	ASSERT_RTNL();
276 
277 	dev_set_promiscuity(dev, inc);
278 }
279 
tagging_show(struct device * d,struct device_attribute * attr,char * buf)280 static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
281 			    char *buf)
282 {
283 	struct net_device *dev = to_net_dev(d);
284 	struct dsa_port *cpu_dp = dev->dsa_ptr;
285 
286 	return sysfs_emit(buf, "%s\n",
287 		       dsa_tag_protocol_to_str(cpu_dp->tag_ops));
288 }
289 
tagging_store(struct device * d,struct device_attribute * attr,const char * buf,size_t count)290 static ssize_t tagging_store(struct device *d, struct device_attribute *attr,
291 			     const char *buf, size_t count)
292 {
293 	const struct dsa_device_ops *new_tag_ops, *old_tag_ops;
294 	const char *end = strchrnul(buf, '\n'), *name;
295 	struct net_device *dev = to_net_dev(d);
296 	struct dsa_port *cpu_dp = dev->dsa_ptr;
297 	size_t len = end - buf;
298 	int err;
299 
300 	/* Empty string passed */
301 	if (!len)
302 		return -ENOPROTOOPT;
303 
304 	name = kstrndup(buf, len, GFP_KERNEL);
305 	if (!name)
306 		return -ENOMEM;
307 
308 	old_tag_ops = cpu_dp->tag_ops;
309 	new_tag_ops = dsa_tag_driver_get_by_name(name);
310 	kfree(name);
311 	/* Bad tagger name? */
312 	if (IS_ERR(new_tag_ops))
313 		return PTR_ERR(new_tag_ops);
314 
315 	if (new_tag_ops == old_tag_ops)
316 		/* Drop the temporarily held duplicate reference, since
317 		 * the DSA switch tree uses this tagger.
318 		 */
319 		goto out;
320 
321 	err = dsa_tree_change_tag_proto(cpu_dp->ds->dst, new_tag_ops,
322 					old_tag_ops);
323 	if (err) {
324 		/* On failure the old tagger is restored, so we don't need the
325 		 * driver for the new one.
326 		 */
327 		dsa_tag_driver_put(new_tag_ops);
328 		return err;
329 	}
330 
331 	/* On success we no longer need the module for the old tagging protocol
332 	 */
333 out:
334 	dsa_tag_driver_put(old_tag_ops);
335 	return count;
336 }
337 static DEVICE_ATTR_RW(tagging);
338 
339 static struct attribute *dsa_slave_attrs[] = {
340 	&dev_attr_tagging.attr,
341 	NULL
342 };
343 
344 static const struct attribute_group dsa_group = {
345 	.name	= "dsa",
346 	.attrs	= dsa_slave_attrs,
347 };
348 
dsa_master_reset_mtu(struct net_device * dev)349 static void dsa_master_reset_mtu(struct net_device *dev)
350 {
351 	int err;
352 
353 	err = dev_set_mtu(dev, ETH_DATA_LEN);
354 	if (err)
355 		netdev_dbg(dev,
356 			   "Unable to reset MTU to exclude DSA overheads\n");
357 }
358 
dsa_master_setup(struct net_device * dev,struct dsa_port * cpu_dp)359 int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
360 {
361 	const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops;
362 	struct dsa_switch *ds = cpu_dp->ds;
363 	struct device_link *consumer_link;
364 	int mtu, ret;
365 
366 	mtu = ETH_DATA_LEN + dsa_tag_protocol_overhead(tag_ops);
367 
368 	/* The DSA master must use SET_NETDEV_DEV for this to work. */
369 	if (!netif_is_lag_master(dev)) {
370 		consumer_link = device_link_add(ds->dev, dev->dev.parent,
371 						DL_FLAG_AUTOREMOVE_CONSUMER);
372 		if (!consumer_link)
373 			netdev_err(dev,
374 				   "Failed to create a device link to DSA switch %s\n",
375 				   dev_name(ds->dev));
376 	}
377 
378 	/* The switch driver may not implement ->port_change_mtu(), case in
379 	 * which dsa_slave_change_mtu() will not update the master MTU either,
380 	 * so we need to do that here.
381 	 */
382 	ret = dev_set_mtu(dev, mtu);
383 	if (ret)
384 		netdev_warn(dev, "error %d setting MTU to %d to include DSA overhead\n",
385 			    ret, mtu);
386 
387 	/* If we use a tagging format that doesn't have an ethertype
388 	 * field, make sure that all packets from this point on get
389 	 * sent to the tag format's receive function.
390 	 */
391 	wmb();
392 
393 	dev->dsa_ptr = cpu_dp;
394 
395 	dsa_master_set_promiscuity(dev, 1);
396 
397 	ret = dsa_master_ethtool_setup(dev);
398 	if (ret)
399 		goto out_err_reset_promisc;
400 
401 	ret = sysfs_create_group(&dev->dev.kobj, &dsa_group);
402 	if (ret)
403 		goto out_err_ethtool_teardown;
404 
405 	return ret;
406 
407 out_err_ethtool_teardown:
408 	dsa_master_ethtool_teardown(dev);
409 out_err_reset_promisc:
410 	dsa_master_set_promiscuity(dev, -1);
411 	return ret;
412 }
413 
dsa_master_teardown(struct net_device * dev)414 void dsa_master_teardown(struct net_device *dev)
415 {
416 	sysfs_remove_group(&dev->dev.kobj, &dsa_group);
417 	dsa_master_ethtool_teardown(dev);
418 	dsa_master_reset_mtu(dev);
419 	dsa_master_set_promiscuity(dev, -1);
420 
421 	dev->dsa_ptr = NULL;
422 
423 	/* If we used a tagging format that doesn't have an ethertype
424 	 * field, make sure that all packets from this point get sent
425 	 * without the tag and go through the regular receive path.
426 	 */
427 	wmb();
428 }
429 
dsa_master_lag_setup(struct net_device * lag_dev,struct dsa_port * cpu_dp,struct netdev_lag_upper_info * uinfo,struct netlink_ext_ack * extack)430 int dsa_master_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp,
431 			 struct netdev_lag_upper_info *uinfo,
432 			 struct netlink_ext_ack *extack)
433 {
434 	bool master_setup = false;
435 	int err;
436 
437 	if (!netdev_uses_dsa(lag_dev)) {
438 		err = dsa_master_setup(lag_dev, cpu_dp);
439 		if (err)
440 			return err;
441 
442 		master_setup = true;
443 	}
444 
445 	err = dsa_port_lag_join(cpu_dp, lag_dev, uinfo, extack);
446 	if (err) {
447 		NL_SET_ERR_MSG_WEAK_MOD(extack, "CPU port failed to join LAG");
448 		goto out_master_teardown;
449 	}
450 
451 	return 0;
452 
453 out_master_teardown:
454 	if (master_setup)
455 		dsa_master_teardown(lag_dev);
456 	return err;
457 }
458 
459 /* Tear down a master if there isn't any other user port on it,
460  * optionally also destroying LAG information.
461  */
dsa_master_lag_teardown(struct net_device * lag_dev,struct dsa_port * cpu_dp)462 void dsa_master_lag_teardown(struct net_device *lag_dev,
463 			     struct dsa_port *cpu_dp)
464 {
465 	struct net_device *upper;
466 	struct list_head *iter;
467 
468 	dsa_port_lag_leave(cpu_dp, lag_dev);
469 
470 	netdev_for_each_upper_dev_rcu(lag_dev, upper, iter)
471 		if (dsa_slave_dev_check(upper))
472 			return;
473 
474 	dsa_master_teardown(lag_dev);
475 }
476