1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handling of a master device, switching frames via its switch fabric CPU port 4 * 5 * Copyright (c) 2017 Savoir-faire Linux Inc. 6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com> 7 */ 8 9 #include "dsa_priv.h" 10 11 static int dsa_master_get_regs_len(struct net_device *dev) 12 { 13 struct dsa_port *cpu_dp = dev->dsa_ptr; 14 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops; 15 struct dsa_switch *ds = cpu_dp->ds; 16 int port = cpu_dp->index; 17 int ret = 0; 18 int len; 19 20 if (ops->get_regs_len) { 21 len = ops->get_regs_len(dev); 22 if (len < 0) 23 return len; 24 ret += len; 25 } 26 27 ret += sizeof(struct ethtool_drvinfo); 28 ret += sizeof(struct ethtool_regs); 29 30 if (ds->ops->get_regs_len) { 31 len = ds->ops->get_regs_len(ds, port); 32 if (len < 0) 33 return len; 34 ret += len; 35 } 36 37 return ret; 38 } 39 40 static void dsa_master_get_regs(struct net_device *dev, 41 struct ethtool_regs *regs, void *data) 42 { 43 struct dsa_port *cpu_dp = dev->dsa_ptr; 44 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops; 45 struct dsa_switch *ds = cpu_dp->ds; 46 struct ethtool_drvinfo *cpu_info; 47 struct ethtool_regs *cpu_regs; 48 int port = cpu_dp->index; 49 int len; 50 51 if (ops->get_regs_len && ops->get_regs) { 52 len = ops->get_regs_len(dev); 53 if (len < 0) 54 return; 55 regs->len = len; 56 ops->get_regs(dev, regs, data); 57 data += regs->len; 58 } 59 60 cpu_info = (struct ethtool_drvinfo *)data; 61 strscpy(cpu_info->driver, "dsa", sizeof(cpu_info->driver)); 62 data += sizeof(*cpu_info); 63 cpu_regs = (struct ethtool_regs *)data; 64 data += sizeof(*cpu_regs); 65 66 if (ds->ops->get_regs_len && ds->ops->get_regs) { 67 len = ds->ops->get_regs_len(ds, port); 68 if (len < 0) 69 return; 70 cpu_regs->len = len; 71 ds->ops->get_regs(ds, port, cpu_regs, data); 72 } 73 } 74 75 static void dsa_master_get_ethtool_stats(struct net_device *dev, 76 struct ethtool_stats *stats, 77 uint64_t *data) 78 { 79 struct dsa_port *cpu_dp = dev->dsa_ptr; 80 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops; 81 struct dsa_switch *ds = cpu_dp->ds; 82 int port = cpu_dp->index; 83 int count = 0; 84 85 if (ops->get_sset_count && ops->get_ethtool_stats) { 86 count = ops->get_sset_count(dev, ETH_SS_STATS); 87 ops->get_ethtool_stats(dev, stats, data); 88 } 89 90 if (ds->ops->get_ethtool_stats) 91 ds->ops->get_ethtool_stats(ds, port, data + count); 92 } 93 94 static void dsa_master_get_ethtool_phy_stats(struct net_device *dev, 95 struct ethtool_stats *stats, 96 uint64_t *data) 97 { 98 struct dsa_port *cpu_dp = dev->dsa_ptr; 99 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops; 100 struct dsa_switch *ds = cpu_dp->ds; 101 int port = cpu_dp->index; 102 int count = 0; 103 104 if (dev->phydev && !ops->get_ethtool_phy_stats) { 105 count = phy_ethtool_get_sset_count(dev->phydev); 106 if (count >= 0) 107 phy_ethtool_get_stats(dev->phydev, stats, data); 108 } else if (ops->get_sset_count && ops->get_ethtool_phy_stats) { 109 count = ops->get_sset_count(dev, ETH_SS_PHY_STATS); 110 ops->get_ethtool_phy_stats(dev, stats, data); 111 } 112 113 if (count < 0) 114 count = 0; 115 116 if (ds->ops->get_ethtool_phy_stats) 117 ds->ops->get_ethtool_phy_stats(ds, port, data + count); 118 } 119 120 static int dsa_master_get_sset_count(struct net_device *dev, int sset) 121 { 122 struct dsa_port *cpu_dp = dev->dsa_ptr; 123 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops; 124 struct dsa_switch *ds = cpu_dp->ds; 125 int count = 0; 126 127 if (sset == ETH_SS_PHY_STATS && dev->phydev && 128 !ops->get_ethtool_phy_stats) 129 count = phy_ethtool_get_sset_count(dev->phydev); 130 else if (ops->get_sset_count) 131 count = ops->get_sset_count(dev, sset); 132 133 if (count < 0) 134 count = 0; 135 136 if (ds->ops->get_sset_count) 137 count += ds->ops->get_sset_count(ds, cpu_dp->index, sset); 138 139 return count; 140 } 141 142 static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset, 143 uint8_t *data) 144 { 145 struct dsa_port *cpu_dp = dev->dsa_ptr; 146 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops; 147 struct dsa_switch *ds = cpu_dp->ds; 148 int port = cpu_dp->index; 149 int len = ETH_GSTRING_LEN; 150 int mcount = 0, count, i; 151 uint8_t pfx[4]; 152 uint8_t *ndata; 153 154 snprintf(pfx, sizeof(pfx), "p%.2d", port); 155 /* We do not want to be NULL-terminated, since this is a prefix */ 156 pfx[sizeof(pfx) - 1] = '_'; 157 158 if (stringset == ETH_SS_PHY_STATS && dev->phydev && 159 !ops->get_ethtool_phy_stats) { 160 mcount = phy_ethtool_get_sset_count(dev->phydev); 161 if (mcount < 0) 162 mcount = 0; 163 else 164 phy_ethtool_get_strings(dev->phydev, data); 165 } else if (ops->get_sset_count && ops->get_strings) { 166 mcount = ops->get_sset_count(dev, stringset); 167 if (mcount < 0) 168 mcount = 0; 169 ops->get_strings(dev, stringset, data); 170 } 171 172 if (ds->ops->get_strings) { 173 ndata = data + mcount * len; 174 /* This function copies ETH_GSTRINGS_LEN bytes, we will mangle 175 * the output after to prepend our CPU port prefix we 176 * constructed earlier 177 */ 178 ds->ops->get_strings(ds, port, stringset, ndata); 179 count = ds->ops->get_sset_count(ds, port, stringset); 180 if (count < 0) 181 return; 182 for (i = 0; i < count; i++) { 183 memmove(ndata + (i * len + sizeof(pfx)), 184 ndata + i * len, len - sizeof(pfx)); 185 memcpy(ndata + i * len, pfx, sizeof(pfx)); 186 } 187 } 188 } 189 190 static int dsa_master_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 191 { 192 struct dsa_port *cpu_dp = dev->dsa_ptr; 193 struct dsa_switch *ds = cpu_dp->ds; 194 struct dsa_switch_tree *dst; 195 int err = -EOPNOTSUPP; 196 struct dsa_port *dp; 197 198 dst = ds->dst; 199 200 switch (cmd) { 201 case SIOCGHWTSTAMP: 202 case SIOCSHWTSTAMP: 203 /* Deny PTP operations on master if there is at least one 204 * switch in the tree that is PTP capable. 205 */ 206 list_for_each_entry(dp, &dst->ports, list) 207 if (dp->ds->ops->port_hwtstamp_get || 208 dp->ds->ops->port_hwtstamp_set) 209 return -EBUSY; 210 break; 211 } 212 213 if (dev->netdev_ops->ndo_eth_ioctl) 214 err = dev->netdev_ops->ndo_eth_ioctl(dev, ifr, cmd); 215 216 return err; 217 } 218 219 static const struct dsa_netdevice_ops dsa_netdev_ops = { 220 .ndo_eth_ioctl = dsa_master_ioctl, 221 }; 222 223 static int dsa_master_ethtool_setup(struct net_device *dev) 224 { 225 struct dsa_port *cpu_dp = dev->dsa_ptr; 226 struct dsa_switch *ds = cpu_dp->ds; 227 struct ethtool_ops *ops; 228 229 if (netif_is_lag_master(dev)) 230 return 0; 231 232 ops = devm_kzalloc(ds->dev, sizeof(*ops), GFP_KERNEL); 233 if (!ops) 234 return -ENOMEM; 235 236 cpu_dp->orig_ethtool_ops = dev->ethtool_ops; 237 if (cpu_dp->orig_ethtool_ops) 238 memcpy(ops, cpu_dp->orig_ethtool_ops, sizeof(*ops)); 239 240 ops->get_regs_len = dsa_master_get_regs_len; 241 ops->get_regs = dsa_master_get_regs; 242 ops->get_sset_count = dsa_master_get_sset_count; 243 ops->get_ethtool_stats = dsa_master_get_ethtool_stats; 244 ops->get_strings = dsa_master_get_strings; 245 ops->get_ethtool_phy_stats = dsa_master_get_ethtool_phy_stats; 246 247 dev->ethtool_ops = ops; 248 249 return 0; 250 } 251 252 static void dsa_master_ethtool_teardown(struct net_device *dev) 253 { 254 struct dsa_port *cpu_dp = dev->dsa_ptr; 255 256 if (netif_is_lag_master(dev)) 257 return; 258 259 dev->ethtool_ops = cpu_dp->orig_ethtool_ops; 260 cpu_dp->orig_ethtool_ops = NULL; 261 } 262 263 static void dsa_netdev_ops_set(struct net_device *dev, 264 const struct dsa_netdevice_ops *ops) 265 { 266 if (netif_is_lag_master(dev)) 267 return; 268 269 dev->dsa_ptr->netdev_ops = ops; 270 } 271 272 /* Keep the master always promiscuous if the tagging protocol requires that 273 * (garbles MAC DA) or if it doesn't support unicast filtering, case in which 274 * it would revert to promiscuous mode as soon as we call dev_uc_add() on it 275 * anyway. 276 */ 277 static void dsa_master_set_promiscuity(struct net_device *dev, int inc) 278 { 279 const struct dsa_device_ops *ops = dev->dsa_ptr->tag_ops; 280 281 if ((dev->priv_flags & IFF_UNICAST_FLT) && !ops->promisc_on_master) 282 return; 283 284 ASSERT_RTNL(); 285 286 dev_set_promiscuity(dev, inc); 287 } 288 289 static ssize_t tagging_show(struct device *d, struct device_attribute *attr, 290 char *buf) 291 { 292 struct net_device *dev = to_net_dev(d); 293 struct dsa_port *cpu_dp = dev->dsa_ptr; 294 295 return sprintf(buf, "%s\n", 296 dsa_tag_protocol_to_str(cpu_dp->tag_ops)); 297 } 298 299 static ssize_t tagging_store(struct device *d, struct device_attribute *attr, 300 const char *buf, size_t count) 301 { 302 const struct dsa_device_ops *new_tag_ops, *old_tag_ops; 303 struct net_device *dev = to_net_dev(d); 304 struct dsa_port *cpu_dp = dev->dsa_ptr; 305 int err; 306 307 old_tag_ops = cpu_dp->tag_ops; 308 new_tag_ops = dsa_find_tagger_by_name(buf); 309 /* Bad tagger name, or module is not loaded? */ 310 if (IS_ERR(new_tag_ops)) 311 return PTR_ERR(new_tag_ops); 312 313 if (new_tag_ops == old_tag_ops) 314 /* Drop the temporarily held duplicate reference, since 315 * the DSA switch tree uses this tagger. 316 */ 317 goto out; 318 319 err = dsa_tree_change_tag_proto(cpu_dp->ds->dst, new_tag_ops, 320 old_tag_ops); 321 if (err) { 322 /* On failure the old tagger is restored, so we don't need the 323 * driver for the new one. 324 */ 325 dsa_tag_driver_put(new_tag_ops); 326 return err; 327 } 328 329 /* On success we no longer need the module for the old tagging protocol 330 */ 331 out: 332 dsa_tag_driver_put(old_tag_ops); 333 return count; 334 } 335 static DEVICE_ATTR_RW(tagging); 336 337 static struct attribute *dsa_slave_attrs[] = { 338 &dev_attr_tagging.attr, 339 NULL 340 }; 341 342 static const struct attribute_group dsa_group = { 343 .name = "dsa", 344 .attrs = dsa_slave_attrs, 345 }; 346 347 static void dsa_master_reset_mtu(struct net_device *dev) 348 { 349 int err; 350 351 err = dev_set_mtu(dev, ETH_DATA_LEN); 352 if (err) 353 netdev_dbg(dev, 354 "Unable to reset MTU to exclude DSA overheads\n"); 355 } 356 357 int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) 358 { 359 const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops; 360 struct dsa_switch *ds = cpu_dp->ds; 361 struct device_link *consumer_link; 362 int mtu, ret; 363 364 mtu = ETH_DATA_LEN + dsa_tag_protocol_overhead(tag_ops); 365 366 /* The DSA master must use SET_NETDEV_DEV for this to work. */ 367 if (!netif_is_lag_master(dev)) { 368 consumer_link = device_link_add(ds->dev, dev->dev.parent, 369 DL_FLAG_AUTOREMOVE_CONSUMER); 370 if (!consumer_link) 371 netdev_err(dev, 372 "Failed to create a device link to DSA switch %s\n", 373 dev_name(ds->dev)); 374 } 375 376 /* The switch driver may not implement ->port_change_mtu(), case in 377 * which dsa_slave_change_mtu() will not update the master MTU either, 378 * so we need to do that here. 379 */ 380 ret = dev_set_mtu(dev, mtu); 381 if (ret) 382 netdev_warn(dev, "error %d setting MTU to %d to include DSA overhead\n", 383 ret, mtu); 384 385 /* If we use a tagging format that doesn't have an ethertype 386 * field, make sure that all packets from this point on get 387 * sent to the tag format's receive function. 388 */ 389 wmb(); 390 391 dev->dsa_ptr = cpu_dp; 392 393 dsa_master_set_promiscuity(dev, 1); 394 395 ret = dsa_master_ethtool_setup(dev); 396 if (ret) 397 goto out_err_reset_promisc; 398 399 dsa_netdev_ops_set(dev, &dsa_netdev_ops); 400 401 ret = sysfs_create_group(&dev->dev.kobj, &dsa_group); 402 if (ret) 403 goto out_err_ndo_teardown; 404 405 return ret; 406 407 out_err_ndo_teardown: 408 dsa_netdev_ops_set(dev, NULL); 409 dsa_master_ethtool_teardown(dev); 410 out_err_reset_promisc: 411 dsa_master_set_promiscuity(dev, -1); 412 return ret; 413 } 414 415 void dsa_master_teardown(struct net_device *dev) 416 { 417 sysfs_remove_group(&dev->dev.kobj, &dsa_group); 418 dsa_netdev_ops_set(dev, NULL); 419 dsa_master_ethtool_teardown(dev); 420 dsa_master_reset_mtu(dev); 421 dsa_master_set_promiscuity(dev, -1); 422 423 dev->dsa_ptr = NULL; 424 425 /* If we used a tagging format that doesn't have an ethertype 426 * field, make sure that all packets from this point get sent 427 * without the tag and go through the regular receive path. 428 */ 429 wmb(); 430 } 431 432 int dsa_master_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp, 433 struct netdev_lag_upper_info *uinfo, 434 struct netlink_ext_ack *extack) 435 { 436 bool master_setup = false; 437 int err; 438 439 if (!netdev_uses_dsa(lag_dev)) { 440 err = dsa_master_setup(lag_dev, cpu_dp); 441 if (err) 442 return err; 443 444 master_setup = true; 445 } 446 447 err = dsa_port_lag_join(cpu_dp, lag_dev, uinfo, extack); 448 if (err) { 449 if (extack && !extack->_msg) 450 NL_SET_ERR_MSG_MOD(extack, 451 "CPU port failed to join LAG"); 452 goto out_master_teardown; 453 } 454 455 return 0; 456 457 out_master_teardown: 458 if (master_setup) 459 dsa_master_teardown(lag_dev); 460 return err; 461 } 462 463 /* Tear down a master if there isn't any other user port on it, 464 * optionally also destroying LAG information. 465 */ 466 void dsa_master_lag_teardown(struct net_device *lag_dev, 467 struct dsa_port *cpu_dp) 468 { 469 struct net_device *upper; 470 struct list_head *iter; 471 472 dsa_port_lag_leave(cpu_dp, lag_dev); 473 474 netdev_for_each_upper_dev_rcu(lag_dev, upper, iter) 475 if (dsa_slave_dev_check(upper)) 476 return; 477 478 dsa_master_teardown(lag_dev); 479 } 480