1 /* 2 * net/dsa/slave.c - Slave device handling 3 * Copyright (c) 2008-2009 Marvell Semiconductor 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 */ 10 11 #include <linux/list.h> 12 #include <linux/etherdevice.h> 13 #include <linux/netdevice.h> 14 #include <linux/phy.h> 15 #include <linux/phy_fixed.h> 16 #include <linux/phylink.h> 17 #include <linux/of_net.h> 18 #include <linux/of_mdio.h> 19 #include <linux/mdio.h> 20 #include <net/rtnetlink.h> 21 #include <net/pkt_cls.h> 22 #include <net/tc_act/tc_mirred.h> 23 #include <linux/if_bridge.h> 24 #include <linux/netpoll.h> 25 #include <linux/ptp_classify.h> 26 27 #include "dsa_priv.h" 28 29 static bool dsa_slave_dev_check(struct net_device *dev); 30 31 /* slave mii_bus handling ***************************************************/ 32 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg) 33 { 34 struct dsa_switch *ds = bus->priv; 35 36 if (ds->phys_mii_mask & (1 << addr)) 37 return ds->ops->phy_read(ds, addr, reg); 38 39 return 0xffff; 40 } 41 42 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val) 43 { 44 struct dsa_switch *ds = bus->priv; 45 46 if (ds->phys_mii_mask & (1 << addr)) 47 return ds->ops->phy_write(ds, addr, reg, val); 48 49 return 0; 50 } 51 52 void dsa_slave_mii_bus_init(struct dsa_switch *ds) 53 { 54 ds->slave_mii_bus->priv = (void *)ds; 55 ds->slave_mii_bus->name = "dsa slave smi"; 56 ds->slave_mii_bus->read = dsa_slave_phy_read; 57 ds->slave_mii_bus->write = dsa_slave_phy_write; 58 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d", 59 ds->dst->index, ds->index); 60 ds->slave_mii_bus->parent = ds->dev; 61 ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask; 62 } 63 64 65 /* slave device handling ****************************************************/ 66 static int dsa_slave_get_iflink(const struct net_device *dev) 67 { 68 return dsa_slave_to_master(dev)->ifindex; 69 } 70 71 static int dsa_slave_open(struct net_device *dev) 72 { 73 struct net_device *master = dsa_slave_to_master(dev); 74 struct dsa_port *dp = dsa_slave_to_port(dev); 75 int err; 76 77 if (!(master->flags & IFF_UP)) 78 return -ENETDOWN; 79 80 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) { 81 err = dev_uc_add(master, dev->dev_addr); 82 if (err < 0) 83 goto out; 84 } 85 86 if (dev->flags & IFF_ALLMULTI) { 87 err = dev_set_allmulti(master, 1); 88 if (err < 0) 89 goto del_unicast; 90 } 91 if (dev->flags & IFF_PROMISC) { 92 err = dev_set_promiscuity(master, 1); 93 if (err < 0) 94 goto clear_allmulti; 95 } 96 97 err = dsa_port_enable(dp, dev->phydev); 98 if (err) 99 goto clear_promisc; 100 101 phylink_start(dp->pl); 102 103 return 0; 104 105 clear_promisc: 106 if (dev->flags & IFF_PROMISC) 107 dev_set_promiscuity(master, -1); 108 clear_allmulti: 109 if (dev->flags & IFF_ALLMULTI) 110 dev_set_allmulti(master, -1); 111 del_unicast: 112 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) 113 dev_uc_del(master, dev->dev_addr); 114 out: 115 return err; 116 } 117 118 static int dsa_slave_close(struct net_device *dev) 119 { 120 struct net_device *master = dsa_slave_to_master(dev); 121 struct dsa_port *dp = dsa_slave_to_port(dev); 122 123 phylink_stop(dp->pl); 124 125 dsa_port_disable(dp, dev->phydev); 126 127 dev_mc_unsync(master, dev); 128 dev_uc_unsync(master, dev); 129 if (dev->flags & IFF_ALLMULTI) 130 dev_set_allmulti(master, -1); 131 if (dev->flags & IFF_PROMISC) 132 dev_set_promiscuity(master, -1); 133 134 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) 135 dev_uc_del(master, dev->dev_addr); 136 137 return 0; 138 } 139 140 static void dsa_slave_change_rx_flags(struct net_device *dev, int change) 141 { 142 struct net_device *master = dsa_slave_to_master(dev); 143 144 if (change & IFF_ALLMULTI) 145 dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1); 146 if (change & IFF_PROMISC) 147 dev_set_promiscuity(master, dev->flags & IFF_PROMISC ? 1 : -1); 148 } 149 150 static void dsa_slave_set_rx_mode(struct net_device *dev) 151 { 152 struct net_device *master = dsa_slave_to_master(dev); 153 154 dev_mc_sync(master, dev); 155 dev_uc_sync(master, dev); 156 } 157 158 static int dsa_slave_set_mac_address(struct net_device *dev, void *a) 159 { 160 struct net_device *master = dsa_slave_to_master(dev); 161 struct sockaddr *addr = a; 162 int err; 163 164 if (!is_valid_ether_addr(addr->sa_data)) 165 return -EADDRNOTAVAIL; 166 167 if (!(dev->flags & IFF_UP)) 168 goto out; 169 170 if (!ether_addr_equal(addr->sa_data, master->dev_addr)) { 171 err = dev_uc_add(master, addr->sa_data); 172 if (err < 0) 173 return err; 174 } 175 176 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) 177 dev_uc_del(master, dev->dev_addr); 178 179 out: 180 ether_addr_copy(dev->dev_addr, addr->sa_data); 181 182 return 0; 183 } 184 185 struct dsa_slave_dump_ctx { 186 struct net_device *dev; 187 struct sk_buff *skb; 188 struct netlink_callback *cb; 189 int idx; 190 }; 191 192 static int 193 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid, 194 bool is_static, void *data) 195 { 196 struct dsa_slave_dump_ctx *dump = data; 197 u32 portid = NETLINK_CB(dump->cb->skb).portid; 198 u32 seq = dump->cb->nlh->nlmsg_seq; 199 struct nlmsghdr *nlh; 200 struct ndmsg *ndm; 201 202 if (dump->idx < dump->cb->args[2]) 203 goto skip; 204 205 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, 206 sizeof(*ndm), NLM_F_MULTI); 207 if (!nlh) 208 return -EMSGSIZE; 209 210 ndm = nlmsg_data(nlh); 211 ndm->ndm_family = AF_BRIDGE; 212 ndm->ndm_pad1 = 0; 213 ndm->ndm_pad2 = 0; 214 ndm->ndm_flags = NTF_SELF; 215 ndm->ndm_type = 0; 216 ndm->ndm_ifindex = dump->dev->ifindex; 217 ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; 218 219 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr)) 220 goto nla_put_failure; 221 222 if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid)) 223 goto nla_put_failure; 224 225 nlmsg_end(dump->skb, nlh); 226 227 skip: 228 dump->idx++; 229 return 0; 230 231 nla_put_failure: 232 nlmsg_cancel(dump->skb, nlh); 233 return -EMSGSIZE; 234 } 235 236 static int 237 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 238 struct net_device *dev, struct net_device *filter_dev, 239 int *idx) 240 { 241 struct dsa_port *dp = dsa_slave_to_port(dev); 242 struct dsa_slave_dump_ctx dump = { 243 .dev = dev, 244 .skb = skb, 245 .cb = cb, 246 .idx = *idx, 247 }; 248 int err; 249 250 err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump); 251 *idx = dump.idx; 252 253 return err; 254 } 255 256 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 257 { 258 struct dsa_slave_priv *p = netdev_priv(dev); 259 struct dsa_switch *ds = p->dp->ds; 260 int port = p->dp->index; 261 262 /* Pass through to switch driver if it supports timestamping */ 263 switch (cmd) { 264 case SIOCGHWTSTAMP: 265 if (ds->ops->port_hwtstamp_get) 266 return ds->ops->port_hwtstamp_get(ds, port, ifr); 267 break; 268 case SIOCSHWTSTAMP: 269 if (ds->ops->port_hwtstamp_set) 270 return ds->ops->port_hwtstamp_set(ds, port, ifr); 271 break; 272 } 273 274 return phylink_mii_ioctl(p->dp->pl, ifr, cmd); 275 } 276 277 static int dsa_slave_port_attr_set(struct net_device *dev, 278 const struct switchdev_attr *attr, 279 struct switchdev_trans *trans) 280 { 281 struct dsa_port *dp = dsa_slave_to_port(dev); 282 int ret; 283 284 switch (attr->id) { 285 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 286 ret = dsa_port_set_state(dp, attr->u.stp_state, trans); 287 break; 288 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 289 ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering, 290 trans); 291 break; 292 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 293 ret = dsa_port_ageing_time(dp, attr->u.ageing_time, trans); 294 break; 295 default: 296 ret = -EOPNOTSUPP; 297 break; 298 } 299 300 return ret; 301 } 302 303 static int dsa_slave_port_obj_add(struct net_device *dev, 304 const struct switchdev_obj *obj, 305 struct switchdev_trans *trans) 306 { 307 struct dsa_port *dp = dsa_slave_to_port(dev); 308 int err; 309 310 /* For the prepare phase, ensure the full set of changes is feasable in 311 * one go in order to signal a failure properly. If an operation is not 312 * supported, return -EOPNOTSUPP. 313 */ 314 315 switch (obj->id) { 316 case SWITCHDEV_OBJ_ID_PORT_MDB: 317 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans); 318 break; 319 case SWITCHDEV_OBJ_ID_HOST_MDB: 320 /* DSA can directly translate this to a normal MDB add, 321 * but on the CPU port. 322 */ 323 err = dsa_port_mdb_add(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj), 324 trans); 325 break; 326 case SWITCHDEV_OBJ_ID_PORT_VLAN: 327 err = dsa_port_vlan_add(dp, SWITCHDEV_OBJ_PORT_VLAN(obj), 328 trans); 329 break; 330 default: 331 err = -EOPNOTSUPP; 332 break; 333 } 334 335 return err; 336 } 337 338 static int dsa_slave_port_obj_del(struct net_device *dev, 339 const struct switchdev_obj *obj) 340 { 341 struct dsa_port *dp = dsa_slave_to_port(dev); 342 int err; 343 344 switch (obj->id) { 345 case SWITCHDEV_OBJ_ID_PORT_MDB: 346 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); 347 break; 348 case SWITCHDEV_OBJ_ID_HOST_MDB: 349 /* DSA can directly translate this to a normal MDB add, 350 * but on the CPU port. 351 */ 352 err = dsa_port_mdb_del(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj)); 353 break; 354 case SWITCHDEV_OBJ_ID_PORT_VLAN: 355 err = dsa_port_vlan_del(dp, SWITCHDEV_OBJ_PORT_VLAN(obj)); 356 break; 357 default: 358 err = -EOPNOTSUPP; 359 break; 360 } 361 362 return err; 363 } 364 365 static int dsa_slave_port_attr_get(struct net_device *dev, 366 struct switchdev_attr *attr) 367 { 368 struct dsa_port *dp = dsa_slave_to_port(dev); 369 struct dsa_switch *ds = dp->ds; 370 struct dsa_switch_tree *dst = ds->dst; 371 372 switch (attr->id) { 373 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: 374 attr->u.ppid.id_len = sizeof(dst->index); 375 memcpy(&attr->u.ppid.id, &dst->index, attr->u.ppid.id_len); 376 break; 377 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT: 378 attr->u.brport_flags_support = 0; 379 break; 380 default: 381 return -EOPNOTSUPP; 382 } 383 384 return 0; 385 } 386 387 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev, 388 struct sk_buff *skb) 389 { 390 #ifdef CONFIG_NET_POLL_CONTROLLER 391 struct dsa_slave_priv *p = netdev_priv(dev); 392 393 if (p->netpoll) 394 netpoll_send_skb(p->netpoll, skb); 395 #else 396 BUG(); 397 #endif 398 return NETDEV_TX_OK; 399 } 400 401 static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p, 402 struct sk_buff *skb) 403 { 404 struct dsa_switch *ds = p->dp->ds; 405 struct sk_buff *clone; 406 unsigned int type; 407 408 type = ptp_classify_raw(skb); 409 if (type == PTP_CLASS_NONE) 410 return; 411 412 if (!ds->ops->port_txtstamp) 413 return; 414 415 clone = skb_clone_sk(skb); 416 if (!clone) 417 return; 418 419 if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type)) 420 return; 421 422 kfree_skb(clone); 423 } 424 425 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev) 426 { 427 struct dsa_slave_priv *p = netdev_priv(dev); 428 struct pcpu_sw_netstats *s; 429 struct sk_buff *nskb; 430 431 s = this_cpu_ptr(p->stats64); 432 u64_stats_update_begin(&s->syncp); 433 s->tx_packets++; 434 s->tx_bytes += skb->len; 435 u64_stats_update_end(&s->syncp); 436 437 /* Identify PTP protocol packets, clone them, and pass them to the 438 * switch driver 439 */ 440 dsa_skb_tx_timestamp(p, skb); 441 442 /* Transmit function may have to reallocate the original SKB, 443 * in which case it must have freed it. Only free it here on error. 444 */ 445 nskb = p->xmit(skb, dev); 446 if (!nskb) { 447 kfree_skb(skb); 448 return NETDEV_TX_OK; 449 } 450 451 /* SKB for netpoll still need to be mangled with the protocol-specific 452 * tag to be successfully transmitted 453 */ 454 if (unlikely(netpoll_tx_running(dev))) 455 return dsa_slave_netpoll_send_skb(dev, nskb); 456 457 /* Queue the SKB for transmission on the parent interface, but 458 * do not modify its EtherType 459 */ 460 nskb->dev = dsa_slave_to_master(dev); 461 dev_queue_xmit(nskb); 462 463 return NETDEV_TX_OK; 464 } 465 466 /* ethtool operations *******************************************************/ 467 468 static void dsa_slave_get_drvinfo(struct net_device *dev, 469 struct ethtool_drvinfo *drvinfo) 470 { 471 strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver)); 472 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); 473 strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); 474 } 475 476 static int dsa_slave_get_regs_len(struct net_device *dev) 477 { 478 struct dsa_port *dp = dsa_slave_to_port(dev); 479 struct dsa_switch *ds = dp->ds; 480 481 if (ds->ops->get_regs_len) 482 return ds->ops->get_regs_len(ds, dp->index); 483 484 return -EOPNOTSUPP; 485 } 486 487 static void 488 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) 489 { 490 struct dsa_port *dp = dsa_slave_to_port(dev); 491 struct dsa_switch *ds = dp->ds; 492 493 if (ds->ops->get_regs) 494 ds->ops->get_regs(ds, dp->index, regs, _p); 495 } 496 497 static int dsa_slave_nway_reset(struct net_device *dev) 498 { 499 struct dsa_port *dp = dsa_slave_to_port(dev); 500 501 return phylink_ethtool_nway_reset(dp->pl); 502 } 503 504 static int dsa_slave_get_eeprom_len(struct net_device *dev) 505 { 506 struct dsa_port *dp = dsa_slave_to_port(dev); 507 struct dsa_switch *ds = dp->ds; 508 509 if (ds->cd && ds->cd->eeprom_len) 510 return ds->cd->eeprom_len; 511 512 if (ds->ops->get_eeprom_len) 513 return ds->ops->get_eeprom_len(ds); 514 515 return 0; 516 } 517 518 static int dsa_slave_get_eeprom(struct net_device *dev, 519 struct ethtool_eeprom *eeprom, u8 *data) 520 { 521 struct dsa_port *dp = dsa_slave_to_port(dev); 522 struct dsa_switch *ds = dp->ds; 523 524 if (ds->ops->get_eeprom) 525 return ds->ops->get_eeprom(ds, eeprom, data); 526 527 return -EOPNOTSUPP; 528 } 529 530 static int dsa_slave_set_eeprom(struct net_device *dev, 531 struct ethtool_eeprom *eeprom, u8 *data) 532 { 533 struct dsa_port *dp = dsa_slave_to_port(dev); 534 struct dsa_switch *ds = dp->ds; 535 536 if (ds->ops->set_eeprom) 537 return ds->ops->set_eeprom(ds, eeprom, data); 538 539 return -EOPNOTSUPP; 540 } 541 542 static void dsa_slave_get_strings(struct net_device *dev, 543 uint32_t stringset, uint8_t *data) 544 { 545 struct dsa_port *dp = dsa_slave_to_port(dev); 546 struct dsa_switch *ds = dp->ds; 547 548 if (stringset == ETH_SS_STATS) { 549 int len = ETH_GSTRING_LEN; 550 551 strncpy(data, "tx_packets", len); 552 strncpy(data + len, "tx_bytes", len); 553 strncpy(data + 2 * len, "rx_packets", len); 554 strncpy(data + 3 * len, "rx_bytes", len); 555 if (ds->ops->get_strings) 556 ds->ops->get_strings(ds, dp->index, stringset, 557 data + 4 * len); 558 } 559 } 560 561 static void dsa_slave_get_ethtool_stats(struct net_device *dev, 562 struct ethtool_stats *stats, 563 uint64_t *data) 564 { 565 struct dsa_port *dp = dsa_slave_to_port(dev); 566 struct dsa_slave_priv *p = netdev_priv(dev); 567 struct dsa_switch *ds = dp->ds; 568 struct pcpu_sw_netstats *s; 569 unsigned int start; 570 int i; 571 572 for_each_possible_cpu(i) { 573 u64 tx_packets, tx_bytes, rx_packets, rx_bytes; 574 575 s = per_cpu_ptr(p->stats64, i); 576 do { 577 start = u64_stats_fetch_begin_irq(&s->syncp); 578 tx_packets = s->tx_packets; 579 tx_bytes = s->tx_bytes; 580 rx_packets = s->rx_packets; 581 rx_bytes = s->rx_bytes; 582 } while (u64_stats_fetch_retry_irq(&s->syncp, start)); 583 data[0] += tx_packets; 584 data[1] += tx_bytes; 585 data[2] += rx_packets; 586 data[3] += rx_bytes; 587 } 588 if (ds->ops->get_ethtool_stats) 589 ds->ops->get_ethtool_stats(ds, dp->index, data + 4); 590 } 591 592 static int dsa_slave_get_sset_count(struct net_device *dev, int sset) 593 { 594 struct dsa_port *dp = dsa_slave_to_port(dev); 595 struct dsa_switch *ds = dp->ds; 596 597 if (sset == ETH_SS_STATS) { 598 int count; 599 600 count = 4; 601 if (ds->ops->get_sset_count) 602 count += ds->ops->get_sset_count(ds, dp->index, sset); 603 604 return count; 605 } 606 607 return -EOPNOTSUPP; 608 } 609 610 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w) 611 { 612 struct dsa_port *dp = dsa_slave_to_port(dev); 613 struct dsa_switch *ds = dp->ds; 614 615 phylink_ethtool_get_wol(dp->pl, w); 616 617 if (ds->ops->get_wol) 618 ds->ops->get_wol(ds, dp->index, w); 619 } 620 621 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) 622 { 623 struct dsa_port *dp = dsa_slave_to_port(dev); 624 struct dsa_switch *ds = dp->ds; 625 int ret = -EOPNOTSUPP; 626 627 phylink_ethtool_set_wol(dp->pl, w); 628 629 if (ds->ops->set_wol) 630 ret = ds->ops->set_wol(ds, dp->index, w); 631 632 return ret; 633 } 634 635 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e) 636 { 637 struct dsa_port *dp = dsa_slave_to_port(dev); 638 struct dsa_switch *ds = dp->ds; 639 int ret; 640 641 /* Port's PHY and MAC both need to be EEE capable */ 642 if (!dev->phydev && !dp->pl) 643 return -ENODEV; 644 645 if (!ds->ops->set_mac_eee) 646 return -EOPNOTSUPP; 647 648 ret = ds->ops->set_mac_eee(ds, dp->index, e); 649 if (ret) 650 return ret; 651 652 return phylink_ethtool_set_eee(dp->pl, e); 653 } 654 655 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e) 656 { 657 struct dsa_port *dp = dsa_slave_to_port(dev); 658 struct dsa_switch *ds = dp->ds; 659 int ret; 660 661 /* Port's PHY and MAC both need to be EEE capable */ 662 if (!dev->phydev && !dp->pl) 663 return -ENODEV; 664 665 if (!ds->ops->get_mac_eee) 666 return -EOPNOTSUPP; 667 668 ret = ds->ops->get_mac_eee(ds, dp->index, e); 669 if (ret) 670 return ret; 671 672 return phylink_ethtool_get_eee(dp->pl, e); 673 } 674 675 static int dsa_slave_get_link_ksettings(struct net_device *dev, 676 struct ethtool_link_ksettings *cmd) 677 { 678 struct dsa_port *dp = dsa_slave_to_port(dev); 679 680 return phylink_ethtool_ksettings_get(dp->pl, cmd); 681 } 682 683 static int dsa_slave_set_link_ksettings(struct net_device *dev, 684 const struct ethtool_link_ksettings *cmd) 685 { 686 struct dsa_port *dp = dsa_slave_to_port(dev); 687 688 return phylink_ethtool_ksettings_set(dp->pl, cmd); 689 } 690 691 #ifdef CONFIG_NET_POLL_CONTROLLER 692 static int dsa_slave_netpoll_setup(struct net_device *dev, 693 struct netpoll_info *ni) 694 { 695 struct net_device *master = dsa_slave_to_master(dev); 696 struct dsa_slave_priv *p = netdev_priv(dev); 697 struct netpoll *netpoll; 698 int err = 0; 699 700 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); 701 if (!netpoll) 702 return -ENOMEM; 703 704 err = __netpoll_setup(netpoll, master); 705 if (err) { 706 kfree(netpoll); 707 goto out; 708 } 709 710 p->netpoll = netpoll; 711 out: 712 return err; 713 } 714 715 static void dsa_slave_netpoll_cleanup(struct net_device *dev) 716 { 717 struct dsa_slave_priv *p = netdev_priv(dev); 718 struct netpoll *netpoll = p->netpoll; 719 720 if (!netpoll) 721 return; 722 723 p->netpoll = NULL; 724 725 __netpoll_free(netpoll); 726 } 727 728 static void dsa_slave_poll_controller(struct net_device *dev) 729 { 730 } 731 #endif 732 733 static int dsa_slave_get_phys_port_name(struct net_device *dev, 734 char *name, size_t len) 735 { 736 struct dsa_port *dp = dsa_slave_to_port(dev); 737 738 if (snprintf(name, len, "p%d", dp->index) >= len) 739 return -EINVAL; 740 741 return 0; 742 } 743 744 static struct dsa_mall_tc_entry * 745 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie) 746 { 747 struct dsa_slave_priv *p = netdev_priv(dev); 748 struct dsa_mall_tc_entry *mall_tc_entry; 749 750 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) 751 if (mall_tc_entry->cookie == cookie) 752 return mall_tc_entry; 753 754 return NULL; 755 } 756 757 static int dsa_slave_add_cls_matchall(struct net_device *dev, 758 struct tc_cls_matchall_offload *cls, 759 bool ingress) 760 { 761 struct dsa_port *dp = dsa_slave_to_port(dev); 762 struct dsa_slave_priv *p = netdev_priv(dev); 763 struct dsa_mall_tc_entry *mall_tc_entry; 764 __be16 protocol = cls->common.protocol; 765 struct dsa_switch *ds = dp->ds; 766 struct net_device *to_dev; 767 const struct tc_action *a; 768 struct dsa_port *to_dp; 769 int err = -EOPNOTSUPP; 770 771 if (!ds->ops->port_mirror_add) 772 return err; 773 774 if (!tcf_exts_has_one_action(cls->exts)) 775 return err; 776 777 a = tcf_exts_first_action(cls->exts); 778 779 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { 780 struct dsa_mall_mirror_tc_entry *mirror; 781 782 to_dev = tcf_mirred_dev(a); 783 if (!to_dev) 784 return -EINVAL; 785 786 if (!dsa_slave_dev_check(to_dev)) 787 return -EOPNOTSUPP; 788 789 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 790 if (!mall_tc_entry) 791 return -ENOMEM; 792 793 mall_tc_entry->cookie = cls->cookie; 794 mall_tc_entry->type = DSA_PORT_MALL_MIRROR; 795 mirror = &mall_tc_entry->mirror; 796 797 to_dp = dsa_slave_to_port(to_dev); 798 799 mirror->to_local_port = to_dp->index; 800 mirror->ingress = ingress; 801 802 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress); 803 if (err) { 804 kfree(mall_tc_entry); 805 return err; 806 } 807 808 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); 809 } 810 811 return 0; 812 } 813 814 static void dsa_slave_del_cls_matchall(struct net_device *dev, 815 struct tc_cls_matchall_offload *cls) 816 { 817 struct dsa_port *dp = dsa_slave_to_port(dev); 818 struct dsa_mall_tc_entry *mall_tc_entry; 819 struct dsa_switch *ds = dp->ds; 820 821 if (!ds->ops->port_mirror_del) 822 return; 823 824 mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie); 825 if (!mall_tc_entry) 826 return; 827 828 list_del(&mall_tc_entry->list); 829 830 switch (mall_tc_entry->type) { 831 case DSA_PORT_MALL_MIRROR: 832 ds->ops->port_mirror_del(ds, dp->index, &mall_tc_entry->mirror); 833 break; 834 default: 835 WARN_ON(1); 836 } 837 838 kfree(mall_tc_entry); 839 } 840 841 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev, 842 struct tc_cls_matchall_offload *cls, 843 bool ingress) 844 { 845 if (cls->common.chain_index) 846 return -EOPNOTSUPP; 847 848 switch (cls->command) { 849 case TC_CLSMATCHALL_REPLACE: 850 return dsa_slave_add_cls_matchall(dev, cls, ingress); 851 case TC_CLSMATCHALL_DESTROY: 852 dsa_slave_del_cls_matchall(dev, cls); 853 return 0; 854 default: 855 return -EOPNOTSUPP; 856 } 857 } 858 859 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 860 void *cb_priv, bool ingress) 861 { 862 struct net_device *dev = cb_priv; 863 864 if (!tc_can_offload(dev)) 865 return -EOPNOTSUPP; 866 867 switch (type) { 868 case TC_SETUP_CLSMATCHALL: 869 return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress); 870 default: 871 return -EOPNOTSUPP; 872 } 873 } 874 875 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type, 876 void *type_data, void *cb_priv) 877 { 878 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true); 879 } 880 881 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type, 882 void *type_data, void *cb_priv) 883 { 884 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false); 885 } 886 887 static int dsa_slave_setup_tc_block(struct net_device *dev, 888 struct tc_block_offload *f) 889 { 890 tc_setup_cb_t *cb; 891 892 if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 893 cb = dsa_slave_setup_tc_block_cb_ig; 894 else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 895 cb = dsa_slave_setup_tc_block_cb_eg; 896 else 897 return -EOPNOTSUPP; 898 899 switch (f->command) { 900 case TC_BLOCK_BIND: 901 return tcf_block_cb_register(f->block, cb, dev, dev, f->extack); 902 case TC_BLOCK_UNBIND: 903 tcf_block_cb_unregister(f->block, cb, dev); 904 return 0; 905 default: 906 return -EOPNOTSUPP; 907 } 908 } 909 910 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type, 911 void *type_data) 912 { 913 switch (type) { 914 case TC_SETUP_BLOCK: 915 return dsa_slave_setup_tc_block(dev, type_data); 916 default: 917 return -EOPNOTSUPP; 918 } 919 } 920 921 static void dsa_slave_get_stats64(struct net_device *dev, 922 struct rtnl_link_stats64 *stats) 923 { 924 struct dsa_slave_priv *p = netdev_priv(dev); 925 struct pcpu_sw_netstats *s; 926 unsigned int start; 927 int i; 928 929 netdev_stats_to_stats64(stats, &dev->stats); 930 for_each_possible_cpu(i) { 931 u64 tx_packets, tx_bytes, rx_packets, rx_bytes; 932 933 s = per_cpu_ptr(p->stats64, i); 934 do { 935 start = u64_stats_fetch_begin_irq(&s->syncp); 936 tx_packets = s->tx_packets; 937 tx_bytes = s->tx_bytes; 938 rx_packets = s->rx_packets; 939 rx_bytes = s->rx_bytes; 940 } while (u64_stats_fetch_retry_irq(&s->syncp, start)); 941 942 stats->tx_packets += tx_packets; 943 stats->tx_bytes += tx_bytes; 944 stats->rx_packets += rx_packets; 945 stats->rx_bytes += rx_bytes; 946 } 947 } 948 949 static int dsa_slave_get_rxnfc(struct net_device *dev, 950 struct ethtool_rxnfc *nfc, u32 *rule_locs) 951 { 952 struct dsa_port *dp = dsa_slave_to_port(dev); 953 struct dsa_switch *ds = dp->ds; 954 955 if (!ds->ops->get_rxnfc) 956 return -EOPNOTSUPP; 957 958 return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs); 959 } 960 961 static int dsa_slave_set_rxnfc(struct net_device *dev, 962 struct ethtool_rxnfc *nfc) 963 { 964 struct dsa_port *dp = dsa_slave_to_port(dev); 965 struct dsa_switch *ds = dp->ds; 966 967 if (!ds->ops->set_rxnfc) 968 return -EOPNOTSUPP; 969 970 return ds->ops->set_rxnfc(ds, dp->index, nfc); 971 } 972 973 static int dsa_slave_get_ts_info(struct net_device *dev, 974 struct ethtool_ts_info *ts) 975 { 976 struct dsa_slave_priv *p = netdev_priv(dev); 977 struct dsa_switch *ds = p->dp->ds; 978 979 if (!ds->ops->get_ts_info) 980 return -EOPNOTSUPP; 981 982 return ds->ops->get_ts_info(ds, p->dp->index, ts); 983 } 984 985 static const struct ethtool_ops dsa_slave_ethtool_ops = { 986 .get_drvinfo = dsa_slave_get_drvinfo, 987 .get_regs_len = dsa_slave_get_regs_len, 988 .get_regs = dsa_slave_get_regs, 989 .nway_reset = dsa_slave_nway_reset, 990 .get_link = ethtool_op_get_link, 991 .get_eeprom_len = dsa_slave_get_eeprom_len, 992 .get_eeprom = dsa_slave_get_eeprom, 993 .set_eeprom = dsa_slave_set_eeprom, 994 .get_strings = dsa_slave_get_strings, 995 .get_ethtool_stats = dsa_slave_get_ethtool_stats, 996 .get_sset_count = dsa_slave_get_sset_count, 997 .set_wol = dsa_slave_set_wol, 998 .get_wol = dsa_slave_get_wol, 999 .set_eee = dsa_slave_set_eee, 1000 .get_eee = dsa_slave_get_eee, 1001 .get_link_ksettings = dsa_slave_get_link_ksettings, 1002 .set_link_ksettings = dsa_slave_set_link_ksettings, 1003 .get_rxnfc = dsa_slave_get_rxnfc, 1004 .set_rxnfc = dsa_slave_set_rxnfc, 1005 .get_ts_info = dsa_slave_get_ts_info, 1006 }; 1007 1008 /* legacy way, bypassing the bridge *****************************************/ 1009 int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 1010 struct net_device *dev, 1011 const unsigned char *addr, u16 vid, 1012 u16 flags) 1013 { 1014 struct dsa_port *dp = dsa_slave_to_port(dev); 1015 1016 return dsa_port_fdb_add(dp, addr, vid); 1017 } 1018 1019 int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], 1020 struct net_device *dev, 1021 const unsigned char *addr, u16 vid) 1022 { 1023 struct dsa_port *dp = dsa_slave_to_port(dev); 1024 1025 return dsa_port_fdb_del(dp, addr, vid); 1026 } 1027 1028 static const struct net_device_ops dsa_slave_netdev_ops = { 1029 .ndo_open = dsa_slave_open, 1030 .ndo_stop = dsa_slave_close, 1031 .ndo_start_xmit = dsa_slave_xmit, 1032 .ndo_change_rx_flags = dsa_slave_change_rx_flags, 1033 .ndo_set_rx_mode = dsa_slave_set_rx_mode, 1034 .ndo_set_mac_address = dsa_slave_set_mac_address, 1035 .ndo_fdb_add = dsa_legacy_fdb_add, 1036 .ndo_fdb_del = dsa_legacy_fdb_del, 1037 .ndo_fdb_dump = dsa_slave_fdb_dump, 1038 .ndo_do_ioctl = dsa_slave_ioctl, 1039 .ndo_get_iflink = dsa_slave_get_iflink, 1040 #ifdef CONFIG_NET_POLL_CONTROLLER 1041 .ndo_netpoll_setup = dsa_slave_netpoll_setup, 1042 .ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup, 1043 .ndo_poll_controller = dsa_slave_poll_controller, 1044 #endif 1045 .ndo_get_phys_port_name = dsa_slave_get_phys_port_name, 1046 .ndo_setup_tc = dsa_slave_setup_tc, 1047 .ndo_get_stats64 = dsa_slave_get_stats64, 1048 }; 1049 1050 static const struct switchdev_ops dsa_slave_switchdev_ops = { 1051 .switchdev_port_attr_get = dsa_slave_port_attr_get, 1052 .switchdev_port_attr_set = dsa_slave_port_attr_set, 1053 }; 1054 1055 static struct device_type dsa_type = { 1056 .name = "dsa", 1057 }; 1058 1059 static ssize_t tagging_show(struct device *d, struct device_attribute *attr, 1060 char *buf) 1061 { 1062 struct net_device *dev = to_net_dev(d); 1063 struct dsa_port *dp = dsa_slave_to_port(dev); 1064 1065 return sprintf(buf, "%s\n", 1066 dsa_tag_protocol_to_str(dp->cpu_dp->tag_ops)); 1067 } 1068 static DEVICE_ATTR_RO(tagging); 1069 1070 static struct attribute *dsa_slave_attrs[] = { 1071 &dev_attr_tagging.attr, 1072 NULL 1073 }; 1074 1075 static const struct attribute_group dsa_group = { 1076 .name = "dsa", 1077 .attrs = dsa_slave_attrs, 1078 }; 1079 1080 static void dsa_slave_phylink_validate(struct net_device *dev, 1081 unsigned long *supported, 1082 struct phylink_link_state *state) 1083 { 1084 struct dsa_port *dp = dsa_slave_to_port(dev); 1085 struct dsa_switch *ds = dp->ds; 1086 1087 if (!ds->ops->phylink_validate) 1088 return; 1089 1090 ds->ops->phylink_validate(ds, dp->index, supported, state); 1091 } 1092 1093 static int dsa_slave_phylink_mac_link_state(struct net_device *dev, 1094 struct phylink_link_state *state) 1095 { 1096 struct dsa_port *dp = dsa_slave_to_port(dev); 1097 struct dsa_switch *ds = dp->ds; 1098 1099 /* Only called for SGMII and 802.3z */ 1100 if (!ds->ops->phylink_mac_link_state) 1101 return -EOPNOTSUPP; 1102 1103 return ds->ops->phylink_mac_link_state(ds, dp->index, state); 1104 } 1105 1106 static void dsa_slave_phylink_mac_config(struct net_device *dev, 1107 unsigned int mode, 1108 const struct phylink_link_state *state) 1109 { 1110 struct dsa_port *dp = dsa_slave_to_port(dev); 1111 struct dsa_switch *ds = dp->ds; 1112 1113 if (!ds->ops->phylink_mac_config) 1114 return; 1115 1116 ds->ops->phylink_mac_config(ds, dp->index, mode, state); 1117 } 1118 1119 static void dsa_slave_phylink_mac_an_restart(struct net_device *dev) 1120 { 1121 struct dsa_port *dp = dsa_slave_to_port(dev); 1122 struct dsa_switch *ds = dp->ds; 1123 1124 if (!ds->ops->phylink_mac_an_restart) 1125 return; 1126 1127 ds->ops->phylink_mac_an_restart(ds, dp->index); 1128 } 1129 1130 static void dsa_slave_phylink_mac_link_down(struct net_device *dev, 1131 unsigned int mode, 1132 phy_interface_t interface) 1133 { 1134 struct dsa_port *dp = dsa_slave_to_port(dev); 1135 struct dsa_switch *ds = dp->ds; 1136 1137 if (!ds->ops->phylink_mac_link_down) { 1138 if (ds->ops->adjust_link && dev->phydev) 1139 ds->ops->adjust_link(ds, dp->index, dev->phydev); 1140 return; 1141 } 1142 1143 ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface); 1144 } 1145 1146 static void dsa_slave_phylink_mac_link_up(struct net_device *dev, 1147 unsigned int mode, 1148 phy_interface_t interface, 1149 struct phy_device *phydev) 1150 { 1151 struct dsa_port *dp = dsa_slave_to_port(dev); 1152 struct dsa_switch *ds = dp->ds; 1153 1154 if (!ds->ops->phylink_mac_link_up) { 1155 if (ds->ops->adjust_link && dev->phydev) 1156 ds->ops->adjust_link(ds, dp->index, dev->phydev); 1157 return; 1158 } 1159 1160 ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev); 1161 } 1162 1163 static const struct phylink_mac_ops dsa_slave_phylink_mac_ops = { 1164 .validate = dsa_slave_phylink_validate, 1165 .mac_link_state = dsa_slave_phylink_mac_link_state, 1166 .mac_config = dsa_slave_phylink_mac_config, 1167 .mac_an_restart = dsa_slave_phylink_mac_an_restart, 1168 .mac_link_down = dsa_slave_phylink_mac_link_down, 1169 .mac_link_up = dsa_slave_phylink_mac_link_up, 1170 }; 1171 1172 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up) 1173 { 1174 const struct dsa_port *dp = dsa_to_port(ds, port); 1175 1176 phylink_mac_change(dp->pl, up); 1177 } 1178 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change); 1179 1180 static void dsa_slave_phylink_fixed_state(struct net_device *dev, 1181 struct phylink_link_state *state) 1182 { 1183 struct dsa_port *dp = dsa_slave_to_port(dev); 1184 struct dsa_switch *ds = dp->ds; 1185 1186 /* No need to check that this operation is valid, the callback would 1187 * not be called if it was not. 1188 */ 1189 ds->ops->phylink_fixed_state(ds, dp->index, state); 1190 } 1191 1192 /* slave device setup *******************************************************/ 1193 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr) 1194 { 1195 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 1196 struct dsa_switch *ds = dp->ds; 1197 1198 slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr); 1199 if (!slave_dev->phydev) { 1200 netdev_err(slave_dev, "no phy at %d\n", addr); 1201 return -ENODEV; 1202 } 1203 1204 return phylink_connect_phy(dp->pl, slave_dev->phydev); 1205 } 1206 1207 static int dsa_slave_phy_setup(struct net_device *slave_dev) 1208 { 1209 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 1210 struct device_node *port_dn = dp->dn; 1211 struct dsa_switch *ds = dp->ds; 1212 u32 phy_flags = 0; 1213 int mode, ret; 1214 1215 mode = of_get_phy_mode(port_dn); 1216 if (mode < 0) 1217 mode = PHY_INTERFACE_MODE_NA; 1218 1219 dp->pl = phylink_create(slave_dev, of_fwnode_handle(port_dn), mode, 1220 &dsa_slave_phylink_mac_ops); 1221 if (IS_ERR(dp->pl)) { 1222 netdev_err(slave_dev, 1223 "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); 1224 return PTR_ERR(dp->pl); 1225 } 1226 1227 /* Register only if the switch provides such a callback, since this 1228 * callback takes precedence over polling the link GPIO in PHYLINK 1229 * (see phylink_get_fixed_state). 1230 */ 1231 if (ds->ops->phylink_fixed_state) 1232 phylink_fixed_state_cb(dp->pl, dsa_slave_phylink_fixed_state); 1233 1234 if (ds->ops->get_phy_flags) 1235 phy_flags = ds->ops->get_phy_flags(ds, dp->index); 1236 1237 ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags); 1238 if (ret == -ENODEV) { 1239 /* We could not connect to a designated PHY or SFP, so use the 1240 * switch internal MDIO bus instead 1241 */ 1242 ret = dsa_slave_phy_connect(slave_dev, dp->index); 1243 if (ret) { 1244 netdev_err(slave_dev, 1245 "failed to connect to port %d: %d\n", 1246 dp->index, ret); 1247 phylink_destroy(dp->pl); 1248 return ret; 1249 } 1250 } 1251 1252 return 0; 1253 } 1254 1255 static struct lock_class_key dsa_slave_netdev_xmit_lock_key; 1256 static void dsa_slave_set_lockdep_class_one(struct net_device *dev, 1257 struct netdev_queue *txq, 1258 void *_unused) 1259 { 1260 lockdep_set_class(&txq->_xmit_lock, 1261 &dsa_slave_netdev_xmit_lock_key); 1262 } 1263 1264 int dsa_slave_suspend(struct net_device *slave_dev) 1265 { 1266 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 1267 1268 if (!netif_running(slave_dev)) 1269 return 0; 1270 1271 netif_device_detach(slave_dev); 1272 1273 rtnl_lock(); 1274 phylink_stop(dp->pl); 1275 rtnl_unlock(); 1276 1277 return 0; 1278 } 1279 1280 int dsa_slave_resume(struct net_device *slave_dev) 1281 { 1282 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 1283 1284 if (!netif_running(slave_dev)) 1285 return 0; 1286 1287 netif_device_attach(slave_dev); 1288 1289 rtnl_lock(); 1290 phylink_start(dp->pl); 1291 rtnl_unlock(); 1292 1293 return 0; 1294 } 1295 1296 static void dsa_slave_notify(struct net_device *dev, unsigned long val) 1297 { 1298 struct net_device *master = dsa_slave_to_master(dev); 1299 struct dsa_port *dp = dsa_slave_to_port(dev); 1300 struct dsa_notifier_register_info rinfo = { 1301 .switch_number = dp->ds->index, 1302 .port_number = dp->index, 1303 .master = master, 1304 .info.dev = dev, 1305 }; 1306 1307 call_dsa_notifiers(val, dev, &rinfo.info); 1308 } 1309 1310 int dsa_slave_create(struct dsa_port *port) 1311 { 1312 const struct dsa_port *cpu_dp = port->cpu_dp; 1313 struct net_device *master = cpu_dp->master; 1314 struct dsa_switch *ds = port->ds; 1315 const char *name = port->name; 1316 struct net_device *slave_dev; 1317 struct dsa_slave_priv *p; 1318 int ret; 1319 1320 if (!ds->num_tx_queues) 1321 ds->num_tx_queues = 1; 1322 1323 slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name, 1324 NET_NAME_UNKNOWN, ether_setup, 1325 ds->num_tx_queues, 1); 1326 if (slave_dev == NULL) 1327 return -ENOMEM; 1328 1329 slave_dev->features = master->vlan_features | NETIF_F_HW_TC; 1330 slave_dev->hw_features |= NETIF_F_HW_TC; 1331 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; 1332 eth_hw_addr_inherit(slave_dev, master); 1333 slave_dev->priv_flags |= IFF_NO_QUEUE; 1334 slave_dev->netdev_ops = &dsa_slave_netdev_ops; 1335 slave_dev->switchdev_ops = &dsa_slave_switchdev_ops; 1336 slave_dev->min_mtu = 0; 1337 slave_dev->max_mtu = ETH_MAX_MTU; 1338 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type); 1339 1340 netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one, 1341 NULL); 1342 1343 SET_NETDEV_DEV(slave_dev, port->ds->dev); 1344 slave_dev->dev.of_node = port->dn; 1345 slave_dev->vlan_features = master->vlan_features; 1346 1347 p = netdev_priv(slave_dev); 1348 p->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1349 if (!p->stats64) { 1350 free_netdev(slave_dev); 1351 return -ENOMEM; 1352 } 1353 p->dp = port; 1354 INIT_LIST_HEAD(&p->mall_tc_list); 1355 p->xmit = cpu_dp->tag_ops->xmit; 1356 port->slave = slave_dev; 1357 1358 netif_carrier_off(slave_dev); 1359 1360 ret = dsa_slave_phy_setup(slave_dev); 1361 if (ret) { 1362 netdev_err(master, "error %d setting up slave phy\n", ret); 1363 goto out_free; 1364 } 1365 1366 dsa_slave_notify(slave_dev, DSA_PORT_REGISTER); 1367 1368 ret = register_netdev(slave_dev); 1369 if (ret) { 1370 netdev_err(master, "error %d registering interface %s\n", 1371 ret, slave_dev->name); 1372 goto out_phy; 1373 } 1374 1375 ret = sysfs_create_group(&slave_dev->dev.kobj, &dsa_group); 1376 if (ret) 1377 goto out_unreg; 1378 1379 return 0; 1380 1381 out_unreg: 1382 unregister_netdev(slave_dev); 1383 out_phy: 1384 rtnl_lock(); 1385 phylink_disconnect_phy(p->dp->pl); 1386 rtnl_unlock(); 1387 phylink_destroy(p->dp->pl); 1388 out_free: 1389 free_percpu(p->stats64); 1390 free_netdev(slave_dev); 1391 port->slave = NULL; 1392 return ret; 1393 } 1394 1395 void dsa_slave_destroy(struct net_device *slave_dev) 1396 { 1397 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 1398 struct dsa_slave_priv *p = netdev_priv(slave_dev); 1399 1400 netif_carrier_off(slave_dev); 1401 rtnl_lock(); 1402 phylink_disconnect_phy(dp->pl); 1403 rtnl_unlock(); 1404 1405 dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER); 1406 sysfs_remove_group(&slave_dev->dev.kobj, &dsa_group); 1407 unregister_netdev(slave_dev); 1408 phylink_destroy(dp->pl); 1409 free_percpu(p->stats64); 1410 free_netdev(slave_dev); 1411 } 1412 1413 static bool dsa_slave_dev_check(struct net_device *dev) 1414 { 1415 return dev->netdev_ops == &dsa_slave_netdev_ops; 1416 } 1417 1418 static int dsa_slave_changeupper(struct net_device *dev, 1419 struct netdev_notifier_changeupper_info *info) 1420 { 1421 struct dsa_port *dp = dsa_slave_to_port(dev); 1422 int err = NOTIFY_DONE; 1423 1424 if (netif_is_bridge_master(info->upper_dev)) { 1425 if (info->linking) { 1426 err = dsa_port_bridge_join(dp, info->upper_dev); 1427 err = notifier_from_errno(err); 1428 } else { 1429 dsa_port_bridge_leave(dp, info->upper_dev); 1430 err = NOTIFY_OK; 1431 } 1432 } 1433 1434 return err; 1435 } 1436 1437 static int dsa_slave_netdevice_event(struct notifier_block *nb, 1438 unsigned long event, void *ptr) 1439 { 1440 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1441 1442 if (!dsa_slave_dev_check(dev)) 1443 return NOTIFY_DONE; 1444 1445 if (event == NETDEV_CHANGEUPPER) 1446 return dsa_slave_changeupper(dev, ptr); 1447 1448 return NOTIFY_DONE; 1449 } 1450 1451 struct dsa_switchdev_event_work { 1452 struct work_struct work; 1453 struct switchdev_notifier_fdb_info fdb_info; 1454 struct net_device *dev; 1455 unsigned long event; 1456 }; 1457 1458 static void dsa_slave_switchdev_event_work(struct work_struct *work) 1459 { 1460 struct dsa_switchdev_event_work *switchdev_work = 1461 container_of(work, struct dsa_switchdev_event_work, work); 1462 struct net_device *dev = switchdev_work->dev; 1463 struct switchdev_notifier_fdb_info *fdb_info; 1464 struct dsa_port *dp = dsa_slave_to_port(dev); 1465 int err; 1466 1467 rtnl_lock(); 1468 switch (switchdev_work->event) { 1469 case SWITCHDEV_FDB_ADD_TO_DEVICE: 1470 fdb_info = &switchdev_work->fdb_info; 1471 if (!fdb_info->added_by_user) 1472 break; 1473 1474 err = dsa_port_fdb_add(dp, fdb_info->addr, fdb_info->vid); 1475 if (err) { 1476 netdev_dbg(dev, "fdb add failed err=%d\n", err); 1477 break; 1478 } 1479 fdb_info->offloaded = true; 1480 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev, 1481 &fdb_info->info); 1482 break; 1483 1484 case SWITCHDEV_FDB_DEL_TO_DEVICE: 1485 fdb_info = &switchdev_work->fdb_info; 1486 if (!fdb_info->added_by_user) 1487 break; 1488 1489 err = dsa_port_fdb_del(dp, fdb_info->addr, fdb_info->vid); 1490 if (err) { 1491 netdev_dbg(dev, "fdb del failed err=%d\n", err); 1492 dev_close(dev); 1493 } 1494 break; 1495 } 1496 rtnl_unlock(); 1497 1498 kfree(switchdev_work->fdb_info.addr); 1499 kfree(switchdev_work); 1500 dev_put(dev); 1501 } 1502 1503 static int 1504 dsa_slave_switchdev_fdb_work_init(struct dsa_switchdev_event_work * 1505 switchdev_work, 1506 const struct switchdev_notifier_fdb_info * 1507 fdb_info) 1508 { 1509 memcpy(&switchdev_work->fdb_info, fdb_info, 1510 sizeof(switchdev_work->fdb_info)); 1511 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); 1512 if (!switchdev_work->fdb_info.addr) 1513 return -ENOMEM; 1514 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, 1515 fdb_info->addr); 1516 return 0; 1517 } 1518 1519 /* Called under rcu_read_lock() */ 1520 static int dsa_slave_switchdev_event(struct notifier_block *unused, 1521 unsigned long event, void *ptr) 1522 { 1523 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 1524 struct dsa_switchdev_event_work *switchdev_work; 1525 1526 if (!dsa_slave_dev_check(dev)) 1527 return NOTIFY_DONE; 1528 1529 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); 1530 if (!switchdev_work) 1531 return NOTIFY_BAD; 1532 1533 INIT_WORK(&switchdev_work->work, 1534 dsa_slave_switchdev_event_work); 1535 switchdev_work->dev = dev; 1536 switchdev_work->event = event; 1537 1538 switch (event) { 1539 case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ 1540 case SWITCHDEV_FDB_DEL_TO_DEVICE: 1541 if (dsa_slave_switchdev_fdb_work_init(switchdev_work, ptr)) 1542 goto err_fdb_work_init; 1543 dev_hold(dev); 1544 break; 1545 default: 1546 kfree(switchdev_work); 1547 return NOTIFY_DONE; 1548 } 1549 1550 dsa_schedule_work(&switchdev_work->work); 1551 return NOTIFY_OK; 1552 1553 err_fdb_work_init: 1554 kfree(switchdev_work); 1555 return NOTIFY_BAD; 1556 } 1557 1558 static int 1559 dsa_slave_switchdev_port_obj_event(unsigned long event, 1560 struct net_device *netdev, 1561 struct switchdev_notifier_port_obj_info *port_obj_info) 1562 { 1563 int err = -EOPNOTSUPP; 1564 1565 switch (event) { 1566 case SWITCHDEV_PORT_OBJ_ADD: 1567 err = dsa_slave_port_obj_add(netdev, port_obj_info->obj, 1568 port_obj_info->trans); 1569 break; 1570 case SWITCHDEV_PORT_OBJ_DEL: 1571 err = dsa_slave_port_obj_del(netdev, port_obj_info->obj); 1572 break; 1573 } 1574 1575 port_obj_info->handled = true; 1576 return notifier_from_errno(err); 1577 } 1578 1579 static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused, 1580 unsigned long event, void *ptr) 1581 { 1582 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 1583 1584 if (!dsa_slave_dev_check(dev)) 1585 return NOTIFY_DONE; 1586 1587 switch (event) { 1588 case SWITCHDEV_PORT_OBJ_ADD: /* fall through */ 1589 case SWITCHDEV_PORT_OBJ_DEL: 1590 return dsa_slave_switchdev_port_obj_event(event, dev, ptr); 1591 } 1592 1593 return NOTIFY_DONE; 1594 } 1595 1596 static struct notifier_block dsa_slave_nb __read_mostly = { 1597 .notifier_call = dsa_slave_netdevice_event, 1598 }; 1599 1600 static struct notifier_block dsa_slave_switchdev_notifier = { 1601 .notifier_call = dsa_slave_switchdev_event, 1602 }; 1603 1604 static struct notifier_block dsa_slave_switchdev_blocking_notifier = { 1605 .notifier_call = dsa_slave_switchdev_blocking_event, 1606 }; 1607 1608 int dsa_slave_register_notifier(void) 1609 { 1610 struct notifier_block *nb; 1611 int err; 1612 1613 err = register_netdevice_notifier(&dsa_slave_nb); 1614 if (err) 1615 return err; 1616 1617 err = register_switchdev_notifier(&dsa_slave_switchdev_notifier); 1618 if (err) 1619 goto err_switchdev_nb; 1620 1621 nb = &dsa_slave_switchdev_blocking_notifier; 1622 err = register_switchdev_blocking_notifier(nb); 1623 if (err) 1624 goto err_switchdev_blocking_nb; 1625 1626 return 0; 1627 1628 err_switchdev_blocking_nb: 1629 unregister_switchdev_notifier(&dsa_slave_switchdev_notifier); 1630 err_switchdev_nb: 1631 unregister_netdevice_notifier(&dsa_slave_nb); 1632 return err; 1633 } 1634 1635 void dsa_slave_unregister_notifier(void) 1636 { 1637 struct notifier_block *nb; 1638 int err; 1639 1640 nb = &dsa_slave_switchdev_blocking_notifier; 1641 err = unregister_switchdev_blocking_notifier(nb); 1642 if (err) 1643 pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err); 1644 1645 err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier); 1646 if (err) 1647 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err); 1648 1649 err = unregister_netdevice_notifier(&dsa_slave_nb); 1650 if (err) 1651 pr_err("DSA: failed to unregister slave notifier (%d)\n", err); 1652 } 1653