1 /* 2 * net/dsa/slave.c - Slave device handling 3 * Copyright (c) 2008-2009 Marvell Semiconductor 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 */ 10 11 #include <linux/list.h> 12 #include <linux/etherdevice.h> 13 #include <linux/netdevice.h> 14 #include <linux/phy.h> 15 #include <linux/phy_fixed.h> 16 #include <linux/of_net.h> 17 #include <linux/of_mdio.h> 18 #include <linux/mdio.h> 19 #include <linux/list.h> 20 #include <net/rtnetlink.h> 21 #include <net/pkt_cls.h> 22 #include <net/tc_act/tc_mirred.h> 23 #include <linux/if_bridge.h> 24 #include <linux/netpoll.h> 25 26 #include "dsa_priv.h" 27 28 static bool dsa_slave_dev_check(struct net_device *dev); 29 30 /* slave mii_bus handling ***************************************************/ 31 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg) 32 { 33 struct dsa_switch *ds = bus->priv; 34 35 if (ds->phys_mii_mask & (1 << addr)) 36 return ds->ops->phy_read(ds, addr, reg); 37 38 return 0xffff; 39 } 40 41 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val) 42 { 43 struct dsa_switch *ds = bus->priv; 44 45 if (ds->phys_mii_mask & (1 << addr)) 46 return ds->ops->phy_write(ds, addr, reg, val); 47 48 return 0; 49 } 50 51 void dsa_slave_mii_bus_init(struct dsa_switch *ds) 52 { 53 ds->slave_mii_bus->priv = (void *)ds; 54 ds->slave_mii_bus->name = "dsa slave smi"; 55 ds->slave_mii_bus->read = dsa_slave_phy_read; 56 ds->slave_mii_bus->write = dsa_slave_phy_write; 57 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d", 58 ds->dst->tree, ds->index); 59 ds->slave_mii_bus->parent = ds->dev; 60 ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask; 61 } 62 63 64 /* slave device handling ****************************************************/ 65 static int dsa_slave_get_iflink(const struct net_device *dev) 66 { 67 return dsa_slave_to_master(dev)->ifindex; 68 } 69 70 static int dsa_slave_open(struct net_device *dev) 71 { 72 struct net_device *master = dsa_slave_to_master(dev); 73 struct dsa_port *dp = dsa_slave_to_port(dev); 74 int err; 75 76 if (!(master->flags & IFF_UP)) 77 return -ENETDOWN; 78 79 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) { 80 err = dev_uc_add(master, dev->dev_addr); 81 if (err < 0) 82 goto out; 83 } 84 85 if (dev->flags & IFF_ALLMULTI) { 86 err = dev_set_allmulti(master, 1); 87 if (err < 0) 88 goto del_unicast; 89 } 90 if (dev->flags & IFF_PROMISC) { 91 err = dev_set_promiscuity(master, 1); 92 if (err < 0) 93 goto clear_allmulti; 94 } 95 96 err = dsa_port_enable(dp, dev->phydev); 97 if (err) 98 goto clear_promisc; 99 100 if (dev->phydev) 101 phy_start(dev->phydev); 102 103 return 0; 104 105 clear_promisc: 106 if (dev->flags & IFF_PROMISC) 107 dev_set_promiscuity(master, -1); 108 clear_allmulti: 109 if (dev->flags & IFF_ALLMULTI) 110 dev_set_allmulti(master, -1); 111 del_unicast: 112 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) 113 dev_uc_del(master, dev->dev_addr); 114 out: 115 return err; 116 } 117 118 static int dsa_slave_close(struct net_device *dev) 119 { 120 struct net_device *master = dsa_slave_to_master(dev); 121 struct dsa_port *dp = dsa_slave_to_port(dev); 122 123 if (dev->phydev) 124 phy_stop(dev->phydev); 125 126 dsa_port_disable(dp, dev->phydev); 127 128 dev_mc_unsync(master, dev); 129 dev_uc_unsync(master, dev); 130 if (dev->flags & IFF_ALLMULTI) 131 dev_set_allmulti(master, -1); 132 if (dev->flags & IFF_PROMISC) 133 dev_set_promiscuity(master, -1); 134 135 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) 136 dev_uc_del(master, dev->dev_addr); 137 138 return 0; 139 } 140 141 static void dsa_slave_change_rx_flags(struct net_device *dev, int change) 142 { 143 struct net_device *master = dsa_slave_to_master(dev); 144 145 if (change & IFF_ALLMULTI) 146 dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1); 147 if (change & IFF_PROMISC) 148 dev_set_promiscuity(master, dev->flags & IFF_PROMISC ? 1 : -1); 149 } 150 151 static void dsa_slave_set_rx_mode(struct net_device *dev) 152 { 153 struct net_device *master = dsa_slave_to_master(dev); 154 155 dev_mc_sync(master, dev); 156 dev_uc_sync(master, dev); 157 } 158 159 static int dsa_slave_set_mac_address(struct net_device *dev, void *a) 160 { 161 struct net_device *master = dsa_slave_to_master(dev); 162 struct sockaddr *addr = a; 163 int err; 164 165 if (!is_valid_ether_addr(addr->sa_data)) 166 return -EADDRNOTAVAIL; 167 168 if (!(dev->flags & IFF_UP)) 169 goto out; 170 171 if (!ether_addr_equal(addr->sa_data, master->dev_addr)) { 172 err = dev_uc_add(master, addr->sa_data); 173 if (err < 0) 174 return err; 175 } 176 177 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) 178 dev_uc_del(master, dev->dev_addr); 179 180 out: 181 ether_addr_copy(dev->dev_addr, addr->sa_data); 182 183 return 0; 184 } 185 186 struct dsa_slave_dump_ctx { 187 struct net_device *dev; 188 struct sk_buff *skb; 189 struct netlink_callback *cb; 190 int idx; 191 }; 192 193 static int 194 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid, 195 bool is_static, void *data) 196 { 197 struct dsa_slave_dump_ctx *dump = data; 198 u32 portid = NETLINK_CB(dump->cb->skb).portid; 199 u32 seq = dump->cb->nlh->nlmsg_seq; 200 struct nlmsghdr *nlh; 201 struct ndmsg *ndm; 202 203 if (dump->idx < dump->cb->args[2]) 204 goto skip; 205 206 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, 207 sizeof(*ndm), NLM_F_MULTI); 208 if (!nlh) 209 return -EMSGSIZE; 210 211 ndm = nlmsg_data(nlh); 212 ndm->ndm_family = AF_BRIDGE; 213 ndm->ndm_pad1 = 0; 214 ndm->ndm_pad2 = 0; 215 ndm->ndm_flags = NTF_SELF; 216 ndm->ndm_type = 0; 217 ndm->ndm_ifindex = dump->dev->ifindex; 218 ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; 219 220 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr)) 221 goto nla_put_failure; 222 223 if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid)) 224 goto nla_put_failure; 225 226 nlmsg_end(dump->skb, nlh); 227 228 skip: 229 dump->idx++; 230 return 0; 231 232 nla_put_failure: 233 nlmsg_cancel(dump->skb, nlh); 234 return -EMSGSIZE; 235 } 236 237 static int 238 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 239 struct net_device *dev, struct net_device *filter_dev, 240 int *idx) 241 { 242 struct dsa_port *dp = dsa_slave_to_port(dev); 243 struct dsa_slave_dump_ctx dump = { 244 .dev = dev, 245 .skb = skb, 246 .cb = cb, 247 .idx = *idx, 248 }; 249 int err; 250 251 err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump); 252 *idx = dump.idx; 253 254 return err; 255 } 256 257 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 258 { 259 if (!dev->phydev) 260 return -ENODEV; 261 262 return phy_mii_ioctl(dev->phydev, ifr, cmd); 263 } 264 265 static int dsa_slave_port_attr_set(struct net_device *dev, 266 const struct switchdev_attr *attr, 267 struct switchdev_trans *trans) 268 { 269 struct dsa_port *dp = dsa_slave_to_port(dev); 270 int ret; 271 272 switch (attr->id) { 273 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 274 ret = dsa_port_set_state(dp, attr->u.stp_state, trans); 275 break; 276 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 277 ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering, 278 trans); 279 break; 280 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 281 ret = dsa_port_ageing_time(dp, attr->u.ageing_time, trans); 282 break; 283 default: 284 ret = -EOPNOTSUPP; 285 break; 286 } 287 288 return ret; 289 } 290 291 static int dsa_slave_port_obj_add(struct net_device *dev, 292 const struct switchdev_obj *obj, 293 struct switchdev_trans *trans) 294 { 295 struct dsa_port *dp = dsa_slave_to_port(dev); 296 int err; 297 298 /* For the prepare phase, ensure the full set of changes is feasable in 299 * one go in order to signal a failure properly. If an operation is not 300 * supported, return -EOPNOTSUPP. 301 */ 302 303 switch (obj->id) { 304 case SWITCHDEV_OBJ_ID_PORT_MDB: 305 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans); 306 break; 307 case SWITCHDEV_OBJ_ID_PORT_VLAN: 308 err = dsa_port_vlan_add(dp, SWITCHDEV_OBJ_PORT_VLAN(obj), 309 trans); 310 break; 311 default: 312 err = -EOPNOTSUPP; 313 break; 314 } 315 316 return err; 317 } 318 319 static int dsa_slave_port_obj_del(struct net_device *dev, 320 const struct switchdev_obj *obj) 321 { 322 struct dsa_port *dp = dsa_slave_to_port(dev); 323 int err; 324 325 switch (obj->id) { 326 case SWITCHDEV_OBJ_ID_PORT_MDB: 327 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); 328 break; 329 case SWITCHDEV_OBJ_ID_PORT_VLAN: 330 err = dsa_port_vlan_del(dp, SWITCHDEV_OBJ_PORT_VLAN(obj)); 331 break; 332 default: 333 err = -EOPNOTSUPP; 334 break; 335 } 336 337 return err; 338 } 339 340 static int dsa_slave_port_attr_get(struct net_device *dev, 341 struct switchdev_attr *attr) 342 { 343 struct dsa_port *dp = dsa_slave_to_port(dev); 344 struct dsa_switch *ds = dp->ds; 345 346 switch (attr->id) { 347 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: 348 attr->u.ppid.id_len = sizeof(ds->index); 349 memcpy(&attr->u.ppid.id, &ds->index, attr->u.ppid.id_len); 350 break; 351 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT: 352 attr->u.brport_flags_support = 0; 353 break; 354 default: 355 return -EOPNOTSUPP; 356 } 357 358 return 0; 359 } 360 361 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev, 362 struct sk_buff *skb) 363 { 364 #ifdef CONFIG_NET_POLL_CONTROLLER 365 struct dsa_slave_priv *p = netdev_priv(dev); 366 367 if (p->netpoll) 368 netpoll_send_skb(p->netpoll, skb); 369 #else 370 BUG(); 371 #endif 372 return NETDEV_TX_OK; 373 } 374 375 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev) 376 { 377 struct dsa_slave_priv *p = netdev_priv(dev); 378 struct pcpu_sw_netstats *s; 379 struct sk_buff *nskb; 380 381 s = this_cpu_ptr(p->stats64); 382 u64_stats_update_begin(&s->syncp); 383 s->tx_packets++; 384 s->tx_bytes += skb->len; 385 u64_stats_update_end(&s->syncp); 386 387 /* Transmit function may have to reallocate the original SKB, 388 * in which case it must have freed it. Only free it here on error. 389 */ 390 nskb = p->xmit(skb, dev); 391 if (!nskb) { 392 kfree_skb(skb); 393 return NETDEV_TX_OK; 394 } 395 396 /* SKB for netpoll still need to be mangled with the protocol-specific 397 * tag to be successfully transmitted 398 */ 399 if (unlikely(netpoll_tx_running(dev))) 400 return dsa_slave_netpoll_send_skb(dev, nskb); 401 402 /* Queue the SKB for transmission on the parent interface, but 403 * do not modify its EtherType 404 */ 405 nskb->dev = dsa_slave_to_master(dev); 406 dev_queue_xmit(nskb); 407 408 return NETDEV_TX_OK; 409 } 410 411 /* ethtool operations *******************************************************/ 412 413 static void dsa_slave_get_drvinfo(struct net_device *dev, 414 struct ethtool_drvinfo *drvinfo) 415 { 416 strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver)); 417 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); 418 strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); 419 } 420 421 static int dsa_slave_get_regs_len(struct net_device *dev) 422 { 423 struct dsa_port *dp = dsa_slave_to_port(dev); 424 struct dsa_switch *ds = dp->ds; 425 426 if (ds->ops->get_regs_len) 427 return ds->ops->get_regs_len(ds, dp->index); 428 429 return -EOPNOTSUPP; 430 } 431 432 static void 433 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) 434 { 435 struct dsa_port *dp = dsa_slave_to_port(dev); 436 struct dsa_switch *ds = dp->ds; 437 438 if (ds->ops->get_regs) 439 ds->ops->get_regs(ds, dp->index, regs, _p); 440 } 441 442 static u32 dsa_slave_get_link(struct net_device *dev) 443 { 444 if (!dev->phydev) 445 return -ENODEV; 446 447 genphy_update_link(dev->phydev); 448 449 return dev->phydev->link; 450 } 451 452 static int dsa_slave_get_eeprom_len(struct net_device *dev) 453 { 454 struct dsa_port *dp = dsa_slave_to_port(dev); 455 struct dsa_switch *ds = dp->ds; 456 457 if (ds->cd && ds->cd->eeprom_len) 458 return ds->cd->eeprom_len; 459 460 if (ds->ops->get_eeprom_len) 461 return ds->ops->get_eeprom_len(ds); 462 463 return 0; 464 } 465 466 static int dsa_slave_get_eeprom(struct net_device *dev, 467 struct ethtool_eeprom *eeprom, u8 *data) 468 { 469 struct dsa_port *dp = dsa_slave_to_port(dev); 470 struct dsa_switch *ds = dp->ds; 471 472 if (ds->ops->get_eeprom) 473 return ds->ops->get_eeprom(ds, eeprom, data); 474 475 return -EOPNOTSUPP; 476 } 477 478 static int dsa_slave_set_eeprom(struct net_device *dev, 479 struct ethtool_eeprom *eeprom, u8 *data) 480 { 481 struct dsa_port *dp = dsa_slave_to_port(dev); 482 struct dsa_switch *ds = dp->ds; 483 484 if (ds->ops->set_eeprom) 485 return ds->ops->set_eeprom(ds, eeprom, data); 486 487 return -EOPNOTSUPP; 488 } 489 490 static void dsa_slave_get_strings(struct net_device *dev, 491 uint32_t stringset, uint8_t *data) 492 { 493 struct dsa_port *dp = dsa_slave_to_port(dev); 494 struct dsa_switch *ds = dp->ds; 495 496 if (stringset == ETH_SS_STATS) { 497 int len = ETH_GSTRING_LEN; 498 499 strncpy(data, "tx_packets", len); 500 strncpy(data + len, "tx_bytes", len); 501 strncpy(data + 2 * len, "rx_packets", len); 502 strncpy(data + 3 * len, "rx_bytes", len); 503 if (ds->ops->get_strings) 504 ds->ops->get_strings(ds, dp->index, data + 4 * len); 505 } 506 } 507 508 static void dsa_slave_get_ethtool_stats(struct net_device *dev, 509 struct ethtool_stats *stats, 510 uint64_t *data) 511 { 512 struct dsa_port *dp = dsa_slave_to_port(dev); 513 struct dsa_slave_priv *p = netdev_priv(dev); 514 struct dsa_switch *ds = dp->ds; 515 struct pcpu_sw_netstats *s; 516 unsigned int start; 517 int i; 518 519 for_each_possible_cpu(i) { 520 u64 tx_packets, tx_bytes, rx_packets, rx_bytes; 521 522 s = per_cpu_ptr(p->stats64, i); 523 do { 524 start = u64_stats_fetch_begin_irq(&s->syncp); 525 tx_packets = s->tx_packets; 526 tx_bytes = s->tx_bytes; 527 rx_packets = s->rx_packets; 528 rx_bytes = s->rx_bytes; 529 } while (u64_stats_fetch_retry_irq(&s->syncp, start)); 530 data[0] += tx_packets; 531 data[1] += tx_bytes; 532 data[2] += rx_packets; 533 data[3] += rx_bytes; 534 } 535 if (ds->ops->get_ethtool_stats) 536 ds->ops->get_ethtool_stats(ds, dp->index, data + 4); 537 } 538 539 static int dsa_slave_get_sset_count(struct net_device *dev, int sset) 540 { 541 struct dsa_port *dp = dsa_slave_to_port(dev); 542 struct dsa_switch *ds = dp->ds; 543 544 if (sset == ETH_SS_STATS) { 545 int count; 546 547 count = 4; 548 if (ds->ops->get_sset_count) 549 count += ds->ops->get_sset_count(ds); 550 551 return count; 552 } 553 554 return -EOPNOTSUPP; 555 } 556 557 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w) 558 { 559 struct dsa_port *dp = dsa_slave_to_port(dev); 560 struct dsa_switch *ds = dp->ds; 561 562 if (ds->ops->get_wol) 563 ds->ops->get_wol(ds, dp->index, w); 564 } 565 566 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) 567 { 568 struct dsa_port *dp = dsa_slave_to_port(dev); 569 struct dsa_switch *ds = dp->ds; 570 int ret = -EOPNOTSUPP; 571 572 if (ds->ops->set_wol) 573 ret = ds->ops->set_wol(ds, dp->index, w); 574 575 return ret; 576 } 577 578 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e) 579 { 580 struct dsa_port *dp = dsa_slave_to_port(dev); 581 struct dsa_switch *ds = dp->ds; 582 int ret; 583 584 /* Port's PHY and MAC both need to be EEE capable */ 585 if (!dev->phydev) 586 return -ENODEV; 587 588 if (!ds->ops->set_mac_eee) 589 return -EOPNOTSUPP; 590 591 ret = ds->ops->set_mac_eee(ds, dp->index, e); 592 if (ret) 593 return ret; 594 595 if (e->eee_enabled) { 596 ret = phy_init_eee(dev->phydev, 0); 597 if (ret) 598 return ret; 599 } 600 601 return phy_ethtool_set_eee(dev->phydev, e); 602 } 603 604 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e) 605 { 606 struct dsa_port *dp = dsa_slave_to_port(dev); 607 struct dsa_switch *ds = dp->ds; 608 int ret; 609 610 /* Port's PHY and MAC both need to be EEE capable */ 611 if (!dev->phydev) 612 return -ENODEV; 613 614 if (!ds->ops->get_mac_eee) 615 return -EOPNOTSUPP; 616 617 ret = ds->ops->get_mac_eee(ds, dp->index, e); 618 if (ret) 619 return ret; 620 621 return phy_ethtool_get_eee(dev->phydev, e); 622 } 623 624 #ifdef CONFIG_NET_POLL_CONTROLLER 625 static int dsa_slave_netpoll_setup(struct net_device *dev, 626 struct netpoll_info *ni) 627 { 628 struct net_device *master = dsa_slave_to_master(dev); 629 struct dsa_slave_priv *p = netdev_priv(dev); 630 struct netpoll *netpoll; 631 int err = 0; 632 633 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); 634 if (!netpoll) 635 return -ENOMEM; 636 637 err = __netpoll_setup(netpoll, master); 638 if (err) { 639 kfree(netpoll); 640 goto out; 641 } 642 643 p->netpoll = netpoll; 644 out: 645 return err; 646 } 647 648 static void dsa_slave_netpoll_cleanup(struct net_device *dev) 649 { 650 struct dsa_slave_priv *p = netdev_priv(dev); 651 struct netpoll *netpoll = p->netpoll; 652 653 if (!netpoll) 654 return; 655 656 p->netpoll = NULL; 657 658 __netpoll_free_async(netpoll); 659 } 660 661 static void dsa_slave_poll_controller(struct net_device *dev) 662 { 663 } 664 #endif 665 666 static int dsa_slave_get_phys_port_name(struct net_device *dev, 667 char *name, size_t len) 668 { 669 struct dsa_port *dp = dsa_slave_to_port(dev); 670 671 if (snprintf(name, len, "p%d", dp->index) >= len) 672 return -EINVAL; 673 674 return 0; 675 } 676 677 static struct dsa_mall_tc_entry * 678 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie) 679 { 680 struct dsa_slave_priv *p = netdev_priv(dev); 681 struct dsa_mall_tc_entry *mall_tc_entry; 682 683 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) 684 if (mall_tc_entry->cookie == cookie) 685 return mall_tc_entry; 686 687 return NULL; 688 } 689 690 static int dsa_slave_add_cls_matchall(struct net_device *dev, 691 struct tc_cls_matchall_offload *cls, 692 bool ingress) 693 { 694 struct dsa_port *dp = dsa_slave_to_port(dev); 695 struct dsa_slave_priv *p = netdev_priv(dev); 696 struct dsa_mall_tc_entry *mall_tc_entry; 697 __be16 protocol = cls->common.protocol; 698 struct net *net = dev_net(dev); 699 struct dsa_switch *ds = dp->ds; 700 struct net_device *to_dev; 701 const struct tc_action *a; 702 struct dsa_port *to_dp; 703 int err = -EOPNOTSUPP; 704 LIST_HEAD(actions); 705 int ifindex; 706 707 if (!ds->ops->port_mirror_add) 708 return err; 709 710 if (!tcf_exts_has_one_action(cls->exts)) 711 return err; 712 713 tcf_exts_to_list(cls->exts, &actions); 714 a = list_first_entry(&actions, struct tc_action, list); 715 716 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { 717 struct dsa_mall_mirror_tc_entry *mirror; 718 719 ifindex = tcf_mirred_ifindex(a); 720 to_dev = __dev_get_by_index(net, ifindex); 721 if (!to_dev) 722 return -EINVAL; 723 724 if (!dsa_slave_dev_check(to_dev)) 725 return -EOPNOTSUPP; 726 727 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 728 if (!mall_tc_entry) 729 return -ENOMEM; 730 731 mall_tc_entry->cookie = cls->cookie; 732 mall_tc_entry->type = DSA_PORT_MALL_MIRROR; 733 mirror = &mall_tc_entry->mirror; 734 735 to_dp = dsa_slave_to_port(to_dev); 736 737 mirror->to_local_port = to_dp->index; 738 mirror->ingress = ingress; 739 740 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress); 741 if (err) { 742 kfree(mall_tc_entry); 743 return err; 744 } 745 746 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); 747 } 748 749 return 0; 750 } 751 752 static void dsa_slave_del_cls_matchall(struct net_device *dev, 753 struct tc_cls_matchall_offload *cls) 754 { 755 struct dsa_port *dp = dsa_slave_to_port(dev); 756 struct dsa_mall_tc_entry *mall_tc_entry; 757 struct dsa_switch *ds = dp->ds; 758 759 if (!ds->ops->port_mirror_del) 760 return; 761 762 mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie); 763 if (!mall_tc_entry) 764 return; 765 766 list_del(&mall_tc_entry->list); 767 768 switch (mall_tc_entry->type) { 769 case DSA_PORT_MALL_MIRROR: 770 ds->ops->port_mirror_del(ds, dp->index, &mall_tc_entry->mirror); 771 break; 772 default: 773 WARN_ON(1); 774 } 775 776 kfree(mall_tc_entry); 777 } 778 779 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev, 780 struct tc_cls_matchall_offload *cls, 781 bool ingress) 782 { 783 if (cls->common.chain_index) 784 return -EOPNOTSUPP; 785 786 switch (cls->command) { 787 case TC_CLSMATCHALL_REPLACE: 788 return dsa_slave_add_cls_matchall(dev, cls, ingress); 789 case TC_CLSMATCHALL_DESTROY: 790 dsa_slave_del_cls_matchall(dev, cls); 791 return 0; 792 default: 793 return -EOPNOTSUPP; 794 } 795 } 796 797 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 798 void *cb_priv, bool ingress) 799 { 800 struct net_device *dev = cb_priv; 801 802 switch (type) { 803 case TC_SETUP_CLSMATCHALL: 804 return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress); 805 default: 806 return -EOPNOTSUPP; 807 } 808 } 809 810 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type, 811 void *type_data, void *cb_priv) 812 { 813 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true); 814 } 815 816 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type, 817 void *type_data, void *cb_priv) 818 { 819 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false); 820 } 821 822 static int dsa_slave_setup_tc_block(struct net_device *dev, 823 struct tc_block_offload *f) 824 { 825 tc_setup_cb_t *cb; 826 827 if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 828 cb = dsa_slave_setup_tc_block_cb_ig; 829 else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 830 cb = dsa_slave_setup_tc_block_cb_eg; 831 else 832 return -EOPNOTSUPP; 833 834 switch (f->command) { 835 case TC_BLOCK_BIND: 836 return tcf_block_cb_register(f->block, cb, dev, dev); 837 case TC_BLOCK_UNBIND: 838 tcf_block_cb_unregister(f->block, cb, dev); 839 return 0; 840 default: 841 return -EOPNOTSUPP; 842 } 843 } 844 845 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type, 846 void *type_data) 847 { 848 switch (type) { 849 case TC_SETUP_BLOCK: 850 return dsa_slave_setup_tc_block(dev, type_data); 851 default: 852 return -EOPNOTSUPP; 853 } 854 } 855 856 static void dsa_slave_get_stats64(struct net_device *dev, 857 struct rtnl_link_stats64 *stats) 858 { 859 struct dsa_slave_priv *p = netdev_priv(dev); 860 struct pcpu_sw_netstats *s; 861 unsigned int start; 862 int i; 863 864 netdev_stats_to_stats64(stats, &dev->stats); 865 for_each_possible_cpu(i) { 866 u64 tx_packets, tx_bytes, rx_packets, rx_bytes; 867 868 s = per_cpu_ptr(p->stats64, i); 869 do { 870 start = u64_stats_fetch_begin_irq(&s->syncp); 871 tx_packets = s->tx_packets; 872 tx_bytes = s->tx_bytes; 873 rx_packets = s->rx_packets; 874 rx_bytes = s->rx_bytes; 875 } while (u64_stats_fetch_retry_irq(&s->syncp, start)); 876 877 stats->tx_packets += tx_packets; 878 stats->tx_bytes += tx_bytes; 879 stats->rx_packets += rx_packets; 880 stats->rx_bytes += rx_bytes; 881 } 882 } 883 884 static int dsa_slave_get_rxnfc(struct net_device *dev, 885 struct ethtool_rxnfc *nfc, u32 *rule_locs) 886 { 887 struct dsa_port *dp = dsa_slave_to_port(dev); 888 struct dsa_switch *ds = dp->ds; 889 890 if (!ds->ops->get_rxnfc) 891 return -EOPNOTSUPP; 892 893 return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs); 894 } 895 896 static int dsa_slave_set_rxnfc(struct net_device *dev, 897 struct ethtool_rxnfc *nfc) 898 { 899 struct dsa_port *dp = dsa_slave_to_port(dev); 900 struct dsa_switch *ds = dp->ds; 901 902 if (!ds->ops->set_rxnfc) 903 return -EOPNOTSUPP; 904 905 return ds->ops->set_rxnfc(ds, dp->index, nfc); 906 } 907 908 static const struct ethtool_ops dsa_slave_ethtool_ops = { 909 .get_drvinfo = dsa_slave_get_drvinfo, 910 .get_regs_len = dsa_slave_get_regs_len, 911 .get_regs = dsa_slave_get_regs, 912 .nway_reset = phy_ethtool_nway_reset, 913 .get_link = dsa_slave_get_link, 914 .get_eeprom_len = dsa_slave_get_eeprom_len, 915 .get_eeprom = dsa_slave_get_eeprom, 916 .set_eeprom = dsa_slave_set_eeprom, 917 .get_strings = dsa_slave_get_strings, 918 .get_ethtool_stats = dsa_slave_get_ethtool_stats, 919 .get_sset_count = dsa_slave_get_sset_count, 920 .set_wol = dsa_slave_set_wol, 921 .get_wol = dsa_slave_get_wol, 922 .set_eee = dsa_slave_set_eee, 923 .get_eee = dsa_slave_get_eee, 924 .get_link_ksettings = phy_ethtool_get_link_ksettings, 925 .set_link_ksettings = phy_ethtool_set_link_ksettings, 926 .get_rxnfc = dsa_slave_get_rxnfc, 927 .set_rxnfc = dsa_slave_set_rxnfc, 928 }; 929 930 static const struct net_device_ops dsa_slave_netdev_ops = { 931 .ndo_open = dsa_slave_open, 932 .ndo_stop = dsa_slave_close, 933 .ndo_start_xmit = dsa_slave_xmit, 934 .ndo_change_rx_flags = dsa_slave_change_rx_flags, 935 .ndo_set_rx_mode = dsa_slave_set_rx_mode, 936 .ndo_set_mac_address = dsa_slave_set_mac_address, 937 .ndo_fdb_add = dsa_legacy_fdb_add, 938 .ndo_fdb_del = dsa_legacy_fdb_del, 939 .ndo_fdb_dump = dsa_slave_fdb_dump, 940 .ndo_do_ioctl = dsa_slave_ioctl, 941 .ndo_get_iflink = dsa_slave_get_iflink, 942 #ifdef CONFIG_NET_POLL_CONTROLLER 943 .ndo_netpoll_setup = dsa_slave_netpoll_setup, 944 .ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup, 945 .ndo_poll_controller = dsa_slave_poll_controller, 946 #endif 947 .ndo_get_phys_port_name = dsa_slave_get_phys_port_name, 948 .ndo_setup_tc = dsa_slave_setup_tc, 949 .ndo_get_stats64 = dsa_slave_get_stats64, 950 }; 951 952 static const struct switchdev_ops dsa_slave_switchdev_ops = { 953 .switchdev_port_attr_get = dsa_slave_port_attr_get, 954 .switchdev_port_attr_set = dsa_slave_port_attr_set, 955 .switchdev_port_obj_add = dsa_slave_port_obj_add, 956 .switchdev_port_obj_del = dsa_slave_port_obj_del, 957 }; 958 959 static struct device_type dsa_type = { 960 .name = "dsa", 961 }; 962 963 static void dsa_slave_adjust_link(struct net_device *dev) 964 { 965 struct dsa_port *dp = dsa_slave_to_port(dev); 966 struct dsa_slave_priv *p = netdev_priv(dev); 967 struct dsa_switch *ds = dp->ds; 968 unsigned int status_changed = 0; 969 970 if (p->old_link != dev->phydev->link) { 971 status_changed = 1; 972 p->old_link = dev->phydev->link; 973 } 974 975 if (p->old_duplex != dev->phydev->duplex) { 976 status_changed = 1; 977 p->old_duplex = dev->phydev->duplex; 978 } 979 980 if (p->old_pause != dev->phydev->pause) { 981 status_changed = 1; 982 p->old_pause = dev->phydev->pause; 983 } 984 985 if (ds->ops->adjust_link && status_changed) 986 ds->ops->adjust_link(ds, dp->index, dev->phydev); 987 988 if (status_changed) 989 phy_print_status(dev->phydev); 990 } 991 992 static int dsa_slave_fixed_link_update(struct net_device *dev, 993 struct fixed_phy_status *status) 994 { 995 struct dsa_switch *ds; 996 struct dsa_port *dp; 997 998 if (dev) { 999 dp = dsa_slave_to_port(dev); 1000 ds = dp->ds; 1001 if (ds->ops->fixed_link_update) 1002 ds->ops->fixed_link_update(ds, dp->index, status); 1003 } 1004 1005 return 0; 1006 } 1007 1008 /* slave device setup *******************************************************/ 1009 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr) 1010 { 1011 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 1012 struct dsa_slave_priv *p = netdev_priv(slave_dev); 1013 struct dsa_switch *ds = dp->ds; 1014 1015 slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr); 1016 if (!slave_dev->phydev) { 1017 netdev_err(slave_dev, "no phy at %d\n", addr); 1018 return -ENODEV; 1019 } 1020 1021 /* Use already configured phy mode */ 1022 if (p->phy_interface == PHY_INTERFACE_MODE_NA) 1023 p->phy_interface = slave_dev->phydev->interface; 1024 1025 return phy_connect_direct(slave_dev, slave_dev->phydev, 1026 dsa_slave_adjust_link, p->phy_interface); 1027 } 1028 1029 static int dsa_slave_phy_setup(struct net_device *slave_dev) 1030 { 1031 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 1032 struct dsa_slave_priv *p = netdev_priv(slave_dev); 1033 struct device_node *port_dn = dp->dn; 1034 struct dsa_switch *ds = dp->ds; 1035 struct device_node *phy_dn; 1036 bool phy_is_fixed = false; 1037 u32 phy_flags = 0; 1038 int mode, ret; 1039 1040 mode = of_get_phy_mode(port_dn); 1041 if (mode < 0) 1042 mode = PHY_INTERFACE_MODE_NA; 1043 p->phy_interface = mode; 1044 1045 phy_dn = of_parse_phandle(port_dn, "phy-handle", 0); 1046 if (!phy_dn && of_phy_is_fixed_link(port_dn)) { 1047 /* In the case of a fixed PHY, the DT node associated 1048 * to the fixed PHY is the Port DT node 1049 */ 1050 ret = of_phy_register_fixed_link(port_dn); 1051 if (ret) { 1052 netdev_err(slave_dev, "failed to register fixed PHY: %d\n", ret); 1053 return ret; 1054 } 1055 phy_is_fixed = true; 1056 phy_dn = of_node_get(port_dn); 1057 } 1058 1059 if (ds->ops->get_phy_flags) 1060 phy_flags = ds->ops->get_phy_flags(ds, dp->index); 1061 1062 if (phy_dn) { 1063 slave_dev->phydev = of_phy_connect(slave_dev, phy_dn, 1064 dsa_slave_adjust_link, 1065 phy_flags, 1066 p->phy_interface); 1067 of_node_put(phy_dn); 1068 } 1069 1070 if (slave_dev->phydev && phy_is_fixed) 1071 fixed_phy_set_link_update(slave_dev->phydev, 1072 dsa_slave_fixed_link_update); 1073 1074 /* We could not connect to a designated PHY, so use the switch internal 1075 * MDIO bus instead 1076 */ 1077 if (!slave_dev->phydev) { 1078 ret = dsa_slave_phy_connect(slave_dev, dp->index); 1079 if (ret) { 1080 netdev_err(slave_dev, "failed to connect to port %d: %d\n", 1081 dp->index, ret); 1082 if (phy_is_fixed) 1083 of_phy_deregister_fixed_link(port_dn); 1084 return ret; 1085 } 1086 } 1087 1088 phy_attached_info(slave_dev->phydev); 1089 1090 return 0; 1091 } 1092 1093 static struct lock_class_key dsa_slave_netdev_xmit_lock_key; 1094 static void dsa_slave_set_lockdep_class_one(struct net_device *dev, 1095 struct netdev_queue *txq, 1096 void *_unused) 1097 { 1098 lockdep_set_class(&txq->_xmit_lock, 1099 &dsa_slave_netdev_xmit_lock_key); 1100 } 1101 1102 int dsa_slave_suspend(struct net_device *slave_dev) 1103 { 1104 struct dsa_slave_priv *p = netdev_priv(slave_dev); 1105 1106 netif_device_detach(slave_dev); 1107 1108 if (slave_dev->phydev) { 1109 phy_stop(slave_dev->phydev); 1110 p->old_pause = -1; 1111 p->old_link = -1; 1112 p->old_duplex = -1; 1113 phy_suspend(slave_dev->phydev); 1114 } 1115 1116 return 0; 1117 } 1118 1119 int dsa_slave_resume(struct net_device *slave_dev) 1120 { 1121 netif_device_attach(slave_dev); 1122 1123 if (slave_dev->phydev) { 1124 phy_resume(slave_dev->phydev); 1125 phy_start(slave_dev->phydev); 1126 } 1127 1128 return 0; 1129 } 1130 1131 static void dsa_slave_notify(struct net_device *dev, unsigned long val) 1132 { 1133 struct net_device *master = dsa_slave_to_master(dev); 1134 struct dsa_port *dp = dsa_slave_to_port(dev); 1135 struct dsa_notifier_register_info rinfo = { 1136 .switch_number = dp->ds->index, 1137 .port_number = dp->index, 1138 .master = master, 1139 .info.dev = dev, 1140 }; 1141 1142 call_dsa_notifiers(val, dev, &rinfo.info); 1143 } 1144 1145 int dsa_slave_create(struct dsa_port *port, const char *name) 1146 { 1147 struct dsa_port *cpu_dp = port->cpu_dp; 1148 struct net_device *master = cpu_dp->master; 1149 struct dsa_switch *ds = port->ds; 1150 struct net_device *slave_dev; 1151 struct dsa_slave_priv *p; 1152 int ret; 1153 1154 if (!ds->num_tx_queues) 1155 ds->num_tx_queues = 1; 1156 1157 slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name, 1158 NET_NAME_UNKNOWN, ether_setup, 1159 ds->num_tx_queues, 1); 1160 if (slave_dev == NULL) 1161 return -ENOMEM; 1162 1163 slave_dev->features = master->vlan_features | NETIF_F_HW_TC; 1164 slave_dev->hw_features |= NETIF_F_HW_TC; 1165 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; 1166 eth_hw_addr_inherit(slave_dev, master); 1167 slave_dev->priv_flags |= IFF_NO_QUEUE; 1168 slave_dev->netdev_ops = &dsa_slave_netdev_ops; 1169 slave_dev->switchdev_ops = &dsa_slave_switchdev_ops; 1170 slave_dev->min_mtu = 0; 1171 slave_dev->max_mtu = ETH_MAX_MTU; 1172 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type); 1173 1174 netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one, 1175 NULL); 1176 1177 SET_NETDEV_DEV(slave_dev, port->ds->dev); 1178 slave_dev->dev.of_node = port->dn; 1179 slave_dev->vlan_features = master->vlan_features; 1180 1181 p = netdev_priv(slave_dev); 1182 p->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1183 if (!p->stats64) { 1184 free_netdev(slave_dev); 1185 return -ENOMEM; 1186 } 1187 p->dp = port; 1188 INIT_LIST_HEAD(&p->mall_tc_list); 1189 p->xmit = cpu_dp->tag_ops->xmit; 1190 1191 p->old_pause = -1; 1192 p->old_link = -1; 1193 p->old_duplex = -1; 1194 1195 port->slave = slave_dev; 1196 1197 netif_carrier_off(slave_dev); 1198 1199 ret = dsa_slave_phy_setup(slave_dev); 1200 if (ret) { 1201 netdev_err(master, "error %d setting up slave phy\n", ret); 1202 goto out_free; 1203 } 1204 1205 dsa_slave_notify(slave_dev, DSA_PORT_REGISTER); 1206 1207 ret = register_netdev(slave_dev); 1208 if (ret) { 1209 netdev_err(master, "error %d registering interface %s\n", 1210 ret, slave_dev->name); 1211 goto out_phy; 1212 } 1213 1214 return 0; 1215 1216 out_phy: 1217 phy_disconnect(slave_dev->phydev); 1218 if (of_phy_is_fixed_link(port->dn)) 1219 of_phy_deregister_fixed_link(port->dn); 1220 out_free: 1221 free_percpu(p->stats64); 1222 free_netdev(slave_dev); 1223 port->slave = NULL; 1224 return ret; 1225 } 1226 1227 void dsa_slave_destroy(struct net_device *slave_dev) 1228 { 1229 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 1230 struct dsa_slave_priv *p = netdev_priv(slave_dev); 1231 struct device_node *port_dn = dp->dn; 1232 1233 netif_carrier_off(slave_dev); 1234 if (slave_dev->phydev) { 1235 phy_disconnect(slave_dev->phydev); 1236 1237 if (of_phy_is_fixed_link(port_dn)) 1238 of_phy_deregister_fixed_link(port_dn); 1239 } 1240 dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER); 1241 unregister_netdev(slave_dev); 1242 free_percpu(p->stats64); 1243 free_netdev(slave_dev); 1244 } 1245 1246 static bool dsa_slave_dev_check(struct net_device *dev) 1247 { 1248 return dev->netdev_ops == &dsa_slave_netdev_ops; 1249 } 1250 1251 static int dsa_slave_changeupper(struct net_device *dev, 1252 struct netdev_notifier_changeupper_info *info) 1253 { 1254 struct dsa_port *dp = dsa_slave_to_port(dev); 1255 int err = NOTIFY_DONE; 1256 1257 if (netif_is_bridge_master(info->upper_dev)) { 1258 if (info->linking) { 1259 err = dsa_port_bridge_join(dp, info->upper_dev); 1260 err = notifier_from_errno(err); 1261 } else { 1262 dsa_port_bridge_leave(dp, info->upper_dev); 1263 err = NOTIFY_OK; 1264 } 1265 } 1266 1267 return err; 1268 } 1269 1270 static int dsa_slave_netdevice_event(struct notifier_block *nb, 1271 unsigned long event, void *ptr) 1272 { 1273 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1274 1275 if (!dsa_slave_dev_check(dev)) 1276 return NOTIFY_DONE; 1277 1278 if (event == NETDEV_CHANGEUPPER) 1279 return dsa_slave_changeupper(dev, ptr); 1280 1281 return NOTIFY_DONE; 1282 } 1283 1284 struct dsa_switchdev_event_work { 1285 struct work_struct work; 1286 struct switchdev_notifier_fdb_info fdb_info; 1287 struct net_device *dev; 1288 unsigned long event; 1289 }; 1290 1291 static void dsa_slave_switchdev_event_work(struct work_struct *work) 1292 { 1293 struct dsa_switchdev_event_work *switchdev_work = 1294 container_of(work, struct dsa_switchdev_event_work, work); 1295 struct net_device *dev = switchdev_work->dev; 1296 struct switchdev_notifier_fdb_info *fdb_info; 1297 struct dsa_port *dp = dsa_slave_to_port(dev); 1298 int err; 1299 1300 rtnl_lock(); 1301 switch (switchdev_work->event) { 1302 case SWITCHDEV_FDB_ADD_TO_DEVICE: 1303 fdb_info = &switchdev_work->fdb_info; 1304 err = dsa_port_fdb_add(dp, fdb_info->addr, fdb_info->vid); 1305 if (err) { 1306 netdev_dbg(dev, "fdb add failed err=%d\n", err); 1307 break; 1308 } 1309 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev, 1310 &fdb_info->info); 1311 break; 1312 1313 case SWITCHDEV_FDB_DEL_TO_DEVICE: 1314 fdb_info = &switchdev_work->fdb_info; 1315 err = dsa_port_fdb_del(dp, fdb_info->addr, fdb_info->vid); 1316 if (err) { 1317 netdev_dbg(dev, "fdb del failed err=%d\n", err); 1318 dev_close(dev); 1319 } 1320 break; 1321 } 1322 rtnl_unlock(); 1323 1324 kfree(switchdev_work->fdb_info.addr); 1325 kfree(switchdev_work); 1326 dev_put(dev); 1327 } 1328 1329 static int 1330 dsa_slave_switchdev_fdb_work_init(struct dsa_switchdev_event_work * 1331 switchdev_work, 1332 const struct switchdev_notifier_fdb_info * 1333 fdb_info) 1334 { 1335 memcpy(&switchdev_work->fdb_info, fdb_info, 1336 sizeof(switchdev_work->fdb_info)); 1337 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); 1338 if (!switchdev_work->fdb_info.addr) 1339 return -ENOMEM; 1340 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, 1341 fdb_info->addr); 1342 return 0; 1343 } 1344 1345 /* Called under rcu_read_lock() */ 1346 static int dsa_slave_switchdev_event(struct notifier_block *unused, 1347 unsigned long event, void *ptr) 1348 { 1349 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 1350 struct dsa_switchdev_event_work *switchdev_work; 1351 1352 if (!dsa_slave_dev_check(dev)) 1353 return NOTIFY_DONE; 1354 1355 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); 1356 if (!switchdev_work) 1357 return NOTIFY_BAD; 1358 1359 INIT_WORK(&switchdev_work->work, 1360 dsa_slave_switchdev_event_work); 1361 switchdev_work->dev = dev; 1362 switchdev_work->event = event; 1363 1364 switch (event) { 1365 case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ 1366 case SWITCHDEV_FDB_DEL_TO_DEVICE: 1367 if (dsa_slave_switchdev_fdb_work_init(switchdev_work, 1368 ptr)) 1369 goto err_fdb_work_init; 1370 dev_hold(dev); 1371 break; 1372 default: 1373 kfree(switchdev_work); 1374 return NOTIFY_DONE; 1375 } 1376 1377 dsa_schedule_work(&switchdev_work->work); 1378 return NOTIFY_OK; 1379 1380 err_fdb_work_init: 1381 kfree(switchdev_work); 1382 return NOTIFY_BAD; 1383 } 1384 1385 static struct notifier_block dsa_slave_nb __read_mostly = { 1386 .notifier_call = dsa_slave_netdevice_event, 1387 }; 1388 1389 static struct notifier_block dsa_slave_switchdev_notifier = { 1390 .notifier_call = dsa_slave_switchdev_event, 1391 }; 1392 1393 int dsa_slave_register_notifier(void) 1394 { 1395 int err; 1396 1397 err = register_netdevice_notifier(&dsa_slave_nb); 1398 if (err) 1399 return err; 1400 1401 err = register_switchdev_notifier(&dsa_slave_switchdev_notifier); 1402 if (err) 1403 goto err_switchdev_nb; 1404 1405 return 0; 1406 1407 err_switchdev_nb: 1408 unregister_netdevice_notifier(&dsa_slave_nb); 1409 return err; 1410 } 1411 1412 void dsa_slave_unregister_notifier(void) 1413 { 1414 int err; 1415 1416 err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier); 1417 if (err) 1418 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err); 1419 1420 err = unregister_netdevice_notifier(&dsa_slave_nb); 1421 if (err) 1422 pr_err("DSA: failed to unregister slave notifier (%d)\n", err); 1423 } 1424