1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/dsa/slave.c - Slave device handling 4 * Copyright (c) 2008-2009 Marvell Semiconductor 5 */ 6 7 #include <linux/list.h> 8 #include <linux/etherdevice.h> 9 #include <linux/netdevice.h> 10 #include <linux/phy.h> 11 #include <linux/phy_fixed.h> 12 #include <linux/phylink.h> 13 #include <linux/of_net.h> 14 #include <linux/of_mdio.h> 15 #include <linux/mdio.h> 16 #include <net/rtnetlink.h> 17 #include <net/pkt_cls.h> 18 #include <net/selftests.h> 19 #include <net/tc_act/tc_mirred.h> 20 #include <linux/if_bridge.h> 21 #include <linux/if_hsr.h> 22 #include <net/dcbnl.h> 23 #include <linux/netpoll.h> 24 25 #include "dsa_priv.h" 26 #include "port.h" 27 28 static void dsa_slave_standalone_event_work(struct work_struct *work) 29 { 30 struct dsa_standalone_event_work *standalone_work = 31 container_of(work, struct dsa_standalone_event_work, work); 32 const unsigned char *addr = standalone_work->addr; 33 struct net_device *dev = standalone_work->dev; 34 struct dsa_port *dp = dsa_slave_to_port(dev); 35 struct switchdev_obj_port_mdb mdb; 36 struct dsa_switch *ds = dp->ds; 37 u16 vid = standalone_work->vid; 38 int err; 39 40 switch (standalone_work->event) { 41 case DSA_UC_ADD: 42 err = dsa_port_standalone_host_fdb_add(dp, addr, vid); 43 if (err) { 44 dev_err(ds->dev, 45 "port %d failed to add %pM vid %d to fdb: %d\n", 46 dp->index, addr, vid, err); 47 break; 48 } 49 break; 50 51 case DSA_UC_DEL: 52 err = dsa_port_standalone_host_fdb_del(dp, addr, vid); 53 if (err) { 54 dev_err(ds->dev, 55 "port %d failed to delete %pM vid %d from fdb: %d\n", 56 dp->index, addr, vid, err); 57 } 58 59 break; 60 case DSA_MC_ADD: 61 ether_addr_copy(mdb.addr, addr); 62 mdb.vid = vid; 63 64 err = dsa_port_standalone_host_mdb_add(dp, &mdb); 65 if (err) { 66 dev_err(ds->dev, 67 "port %d failed to add %pM vid %d to mdb: %d\n", 68 dp->index, addr, vid, err); 69 break; 70 } 71 break; 72 case DSA_MC_DEL: 73 ether_addr_copy(mdb.addr, addr); 74 mdb.vid = vid; 75 76 err = dsa_port_standalone_host_mdb_del(dp, &mdb); 77 if (err) { 78 dev_err(ds->dev, 79 "port %d failed to delete %pM vid %d from mdb: %d\n", 80 dp->index, addr, vid, err); 81 } 82 83 break; 84 } 85 86 kfree(standalone_work); 87 } 88 89 static int dsa_slave_schedule_standalone_work(struct net_device *dev, 90 enum dsa_standalone_event event, 91 const unsigned char *addr, 92 u16 vid) 93 { 94 struct dsa_standalone_event_work *standalone_work; 95 96 standalone_work = kzalloc(sizeof(*standalone_work), GFP_ATOMIC); 97 if (!standalone_work) 98 return -ENOMEM; 99 100 INIT_WORK(&standalone_work->work, dsa_slave_standalone_event_work); 101 standalone_work->event = event; 102 standalone_work->dev = dev; 103 104 ether_addr_copy(standalone_work->addr, addr); 105 standalone_work->vid = vid; 106 107 dsa_schedule_work(&standalone_work->work); 108 109 return 0; 110 } 111 112 static int dsa_slave_sync_uc(struct net_device *dev, 113 const unsigned char *addr) 114 { 115 struct net_device *master = dsa_slave_to_master(dev); 116 struct dsa_port *dp = dsa_slave_to_port(dev); 117 118 dev_uc_add(master, addr); 119 120 if (!dsa_switch_supports_uc_filtering(dp->ds)) 121 return 0; 122 123 return dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0); 124 } 125 126 static int dsa_slave_unsync_uc(struct net_device *dev, 127 const unsigned char *addr) 128 { 129 struct net_device *master = dsa_slave_to_master(dev); 130 struct dsa_port *dp = dsa_slave_to_port(dev); 131 132 dev_uc_del(master, addr); 133 134 if (!dsa_switch_supports_uc_filtering(dp->ds)) 135 return 0; 136 137 return dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0); 138 } 139 140 static int dsa_slave_sync_mc(struct net_device *dev, 141 const unsigned char *addr) 142 { 143 struct net_device *master = dsa_slave_to_master(dev); 144 struct dsa_port *dp = dsa_slave_to_port(dev); 145 146 dev_mc_add(master, addr); 147 148 if (!dsa_switch_supports_mc_filtering(dp->ds)) 149 return 0; 150 151 return dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0); 152 } 153 154 static int dsa_slave_unsync_mc(struct net_device *dev, 155 const unsigned char *addr) 156 { 157 struct net_device *master = dsa_slave_to_master(dev); 158 struct dsa_port *dp = dsa_slave_to_port(dev); 159 160 dev_mc_del(master, addr); 161 162 if (!dsa_switch_supports_mc_filtering(dp->ds)) 163 return 0; 164 165 return dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0); 166 } 167 168 void dsa_slave_sync_ha(struct net_device *dev) 169 { 170 struct dsa_port *dp = dsa_slave_to_port(dev); 171 struct dsa_switch *ds = dp->ds; 172 struct netdev_hw_addr *ha; 173 174 netif_addr_lock_bh(dev); 175 176 netdev_for_each_synced_mc_addr(ha, dev) 177 dsa_slave_sync_mc(dev, ha->addr); 178 179 netdev_for_each_synced_uc_addr(ha, dev) 180 dsa_slave_sync_uc(dev, ha->addr); 181 182 netif_addr_unlock_bh(dev); 183 184 if (dsa_switch_supports_uc_filtering(ds) || 185 dsa_switch_supports_mc_filtering(ds)) 186 dsa_flush_workqueue(); 187 } 188 189 void dsa_slave_unsync_ha(struct net_device *dev) 190 { 191 struct dsa_port *dp = dsa_slave_to_port(dev); 192 struct dsa_switch *ds = dp->ds; 193 struct netdev_hw_addr *ha; 194 195 netif_addr_lock_bh(dev); 196 197 netdev_for_each_synced_uc_addr(ha, dev) 198 dsa_slave_unsync_uc(dev, ha->addr); 199 200 netdev_for_each_synced_mc_addr(ha, dev) 201 dsa_slave_unsync_mc(dev, ha->addr); 202 203 netif_addr_unlock_bh(dev); 204 205 if (dsa_switch_supports_uc_filtering(ds) || 206 dsa_switch_supports_mc_filtering(ds)) 207 dsa_flush_workqueue(); 208 } 209 210 /* slave mii_bus handling ***************************************************/ 211 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg) 212 { 213 struct dsa_switch *ds = bus->priv; 214 215 if (ds->phys_mii_mask & (1 << addr)) 216 return ds->ops->phy_read(ds, addr, reg); 217 218 return 0xffff; 219 } 220 221 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val) 222 { 223 struct dsa_switch *ds = bus->priv; 224 225 if (ds->phys_mii_mask & (1 << addr)) 226 return ds->ops->phy_write(ds, addr, reg, val); 227 228 return 0; 229 } 230 231 void dsa_slave_mii_bus_init(struct dsa_switch *ds) 232 { 233 ds->slave_mii_bus->priv = (void *)ds; 234 ds->slave_mii_bus->name = "dsa slave smi"; 235 ds->slave_mii_bus->read = dsa_slave_phy_read; 236 ds->slave_mii_bus->write = dsa_slave_phy_write; 237 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d", 238 ds->dst->index, ds->index); 239 ds->slave_mii_bus->parent = ds->dev; 240 ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask; 241 } 242 243 244 /* slave device handling ****************************************************/ 245 static int dsa_slave_get_iflink(const struct net_device *dev) 246 { 247 return dsa_slave_to_master(dev)->ifindex; 248 } 249 250 static int dsa_slave_open(struct net_device *dev) 251 { 252 struct net_device *master = dsa_slave_to_master(dev); 253 struct dsa_port *dp = dsa_slave_to_port(dev); 254 struct dsa_switch *ds = dp->ds; 255 int err; 256 257 err = dev_open(master, NULL); 258 if (err < 0) { 259 netdev_err(dev, "failed to open master %s\n", master->name); 260 goto out; 261 } 262 263 if (dsa_switch_supports_uc_filtering(ds)) { 264 err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0); 265 if (err) 266 goto out; 267 } 268 269 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) { 270 err = dev_uc_add(master, dev->dev_addr); 271 if (err < 0) 272 goto del_host_addr; 273 } 274 275 err = dsa_port_enable_rt(dp, dev->phydev); 276 if (err) 277 goto del_unicast; 278 279 return 0; 280 281 del_unicast: 282 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) 283 dev_uc_del(master, dev->dev_addr); 284 del_host_addr: 285 if (dsa_switch_supports_uc_filtering(ds)) 286 dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); 287 out: 288 return err; 289 } 290 291 static int dsa_slave_close(struct net_device *dev) 292 { 293 struct net_device *master = dsa_slave_to_master(dev); 294 struct dsa_port *dp = dsa_slave_to_port(dev); 295 struct dsa_switch *ds = dp->ds; 296 297 dsa_port_disable_rt(dp); 298 299 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) 300 dev_uc_del(master, dev->dev_addr); 301 302 if (dsa_switch_supports_uc_filtering(ds)) 303 dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); 304 305 return 0; 306 } 307 308 static void dsa_slave_manage_host_flood(struct net_device *dev) 309 { 310 bool mc = dev->flags & (IFF_PROMISC | IFF_ALLMULTI); 311 struct dsa_port *dp = dsa_slave_to_port(dev); 312 bool uc = dev->flags & IFF_PROMISC; 313 314 dsa_port_set_host_flood(dp, uc, mc); 315 } 316 317 static void dsa_slave_change_rx_flags(struct net_device *dev, int change) 318 { 319 struct net_device *master = dsa_slave_to_master(dev); 320 struct dsa_port *dp = dsa_slave_to_port(dev); 321 struct dsa_switch *ds = dp->ds; 322 323 if (change & IFF_ALLMULTI) 324 dev_set_allmulti(master, 325 dev->flags & IFF_ALLMULTI ? 1 : -1); 326 if (change & IFF_PROMISC) 327 dev_set_promiscuity(master, 328 dev->flags & IFF_PROMISC ? 1 : -1); 329 330 if (dsa_switch_supports_uc_filtering(ds) && 331 dsa_switch_supports_mc_filtering(ds)) 332 dsa_slave_manage_host_flood(dev); 333 } 334 335 static void dsa_slave_set_rx_mode(struct net_device *dev) 336 { 337 __dev_mc_sync(dev, dsa_slave_sync_mc, dsa_slave_unsync_mc); 338 __dev_uc_sync(dev, dsa_slave_sync_uc, dsa_slave_unsync_uc); 339 } 340 341 static int dsa_slave_set_mac_address(struct net_device *dev, void *a) 342 { 343 struct net_device *master = dsa_slave_to_master(dev); 344 struct dsa_port *dp = dsa_slave_to_port(dev); 345 struct dsa_switch *ds = dp->ds; 346 struct sockaddr *addr = a; 347 int err; 348 349 if (!is_valid_ether_addr(addr->sa_data)) 350 return -EADDRNOTAVAIL; 351 352 /* If the port is down, the address isn't synced yet to hardware or 353 * to the DSA master, so there is nothing to change. 354 */ 355 if (!(dev->flags & IFF_UP)) 356 goto out_change_dev_addr; 357 358 if (dsa_switch_supports_uc_filtering(ds)) { 359 err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0); 360 if (err) 361 return err; 362 } 363 364 if (!ether_addr_equal(addr->sa_data, master->dev_addr)) { 365 err = dev_uc_add(master, addr->sa_data); 366 if (err < 0) 367 goto del_unicast; 368 } 369 370 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) 371 dev_uc_del(master, dev->dev_addr); 372 373 if (dsa_switch_supports_uc_filtering(ds)) 374 dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); 375 376 out_change_dev_addr: 377 eth_hw_addr_set(dev, addr->sa_data); 378 379 return 0; 380 381 del_unicast: 382 if (dsa_switch_supports_uc_filtering(ds)) 383 dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0); 384 385 return err; 386 } 387 388 struct dsa_slave_dump_ctx { 389 struct net_device *dev; 390 struct sk_buff *skb; 391 struct netlink_callback *cb; 392 int idx; 393 }; 394 395 static int 396 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid, 397 bool is_static, void *data) 398 { 399 struct dsa_slave_dump_ctx *dump = data; 400 u32 portid = NETLINK_CB(dump->cb->skb).portid; 401 u32 seq = dump->cb->nlh->nlmsg_seq; 402 struct nlmsghdr *nlh; 403 struct ndmsg *ndm; 404 405 if (dump->idx < dump->cb->args[2]) 406 goto skip; 407 408 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, 409 sizeof(*ndm), NLM_F_MULTI); 410 if (!nlh) 411 return -EMSGSIZE; 412 413 ndm = nlmsg_data(nlh); 414 ndm->ndm_family = AF_BRIDGE; 415 ndm->ndm_pad1 = 0; 416 ndm->ndm_pad2 = 0; 417 ndm->ndm_flags = NTF_SELF; 418 ndm->ndm_type = 0; 419 ndm->ndm_ifindex = dump->dev->ifindex; 420 ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; 421 422 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr)) 423 goto nla_put_failure; 424 425 if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid)) 426 goto nla_put_failure; 427 428 nlmsg_end(dump->skb, nlh); 429 430 skip: 431 dump->idx++; 432 return 0; 433 434 nla_put_failure: 435 nlmsg_cancel(dump->skb, nlh); 436 return -EMSGSIZE; 437 } 438 439 static int 440 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 441 struct net_device *dev, struct net_device *filter_dev, 442 int *idx) 443 { 444 struct dsa_port *dp = dsa_slave_to_port(dev); 445 struct dsa_slave_dump_ctx dump = { 446 .dev = dev, 447 .skb = skb, 448 .cb = cb, 449 .idx = *idx, 450 }; 451 int err; 452 453 err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump); 454 *idx = dump.idx; 455 456 return err; 457 } 458 459 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 460 { 461 struct dsa_slave_priv *p = netdev_priv(dev); 462 struct dsa_switch *ds = p->dp->ds; 463 int port = p->dp->index; 464 465 /* Pass through to switch driver if it supports timestamping */ 466 switch (cmd) { 467 case SIOCGHWTSTAMP: 468 if (ds->ops->port_hwtstamp_get) 469 return ds->ops->port_hwtstamp_get(ds, port, ifr); 470 break; 471 case SIOCSHWTSTAMP: 472 if (ds->ops->port_hwtstamp_set) 473 return ds->ops->port_hwtstamp_set(ds, port, ifr); 474 break; 475 } 476 477 return phylink_mii_ioctl(p->dp->pl, ifr, cmd); 478 } 479 480 static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx, 481 const struct switchdev_attr *attr, 482 struct netlink_ext_ack *extack) 483 { 484 struct dsa_port *dp = dsa_slave_to_port(dev); 485 int ret; 486 487 if (ctx && ctx != dp) 488 return 0; 489 490 switch (attr->id) { 491 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 492 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) 493 return -EOPNOTSUPP; 494 495 ret = dsa_port_set_state(dp, attr->u.stp_state, true); 496 break; 497 case SWITCHDEV_ATTR_ID_PORT_MST_STATE: 498 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) 499 return -EOPNOTSUPP; 500 501 ret = dsa_port_set_mst_state(dp, &attr->u.mst_state, extack); 502 break; 503 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 504 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) 505 return -EOPNOTSUPP; 506 507 ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering, 508 extack); 509 break; 510 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 511 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) 512 return -EOPNOTSUPP; 513 514 ret = dsa_port_ageing_time(dp, attr->u.ageing_time); 515 break; 516 case SWITCHDEV_ATTR_ID_BRIDGE_MST: 517 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) 518 return -EOPNOTSUPP; 519 520 ret = dsa_port_mst_enable(dp, attr->u.mst, extack); 521 break; 522 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: 523 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) 524 return -EOPNOTSUPP; 525 526 ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags, 527 extack); 528 break; 529 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 530 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) 531 return -EOPNOTSUPP; 532 533 ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack); 534 break; 535 case SWITCHDEV_ATTR_ID_VLAN_MSTI: 536 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) 537 return -EOPNOTSUPP; 538 539 ret = dsa_port_vlan_msti(dp, &attr->u.vlan_msti); 540 break; 541 default: 542 ret = -EOPNOTSUPP; 543 break; 544 } 545 546 return ret; 547 } 548 549 /* Must be called under rcu_read_lock() */ 550 static int 551 dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave, 552 const struct switchdev_obj_port_vlan *vlan) 553 { 554 struct net_device *upper_dev; 555 struct list_head *iter; 556 557 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) { 558 u16 vid; 559 560 if (!is_vlan_dev(upper_dev)) 561 continue; 562 563 vid = vlan_dev_vlan_id(upper_dev); 564 if (vid == vlan->vid) 565 return -EBUSY; 566 } 567 568 return 0; 569 } 570 571 static int dsa_slave_vlan_add(struct net_device *dev, 572 const struct switchdev_obj *obj, 573 struct netlink_ext_ack *extack) 574 { 575 struct dsa_port *dp = dsa_slave_to_port(dev); 576 struct switchdev_obj_port_vlan *vlan; 577 int err; 578 579 if (dsa_port_skip_vlan_configuration(dp)) { 580 NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN"); 581 return 0; 582 } 583 584 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 585 586 /* Deny adding a bridge VLAN when there is already an 802.1Q upper with 587 * the same VID. 588 */ 589 if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) { 590 rcu_read_lock(); 591 err = dsa_slave_vlan_check_for_8021q_uppers(dev, vlan); 592 rcu_read_unlock(); 593 if (err) { 594 NL_SET_ERR_MSG_MOD(extack, 595 "Port already has a VLAN upper with this VID"); 596 return err; 597 } 598 } 599 600 return dsa_port_vlan_add(dp, vlan, extack); 601 } 602 603 /* Offload a VLAN installed on the bridge or on a foreign interface by 604 * installing it as a VLAN towards the CPU port. 605 */ 606 static int dsa_slave_host_vlan_add(struct net_device *dev, 607 const struct switchdev_obj *obj, 608 struct netlink_ext_ack *extack) 609 { 610 struct dsa_port *dp = dsa_slave_to_port(dev); 611 struct switchdev_obj_port_vlan vlan; 612 613 /* Do nothing if this is a software bridge */ 614 if (!dp->bridge) 615 return -EOPNOTSUPP; 616 617 if (dsa_port_skip_vlan_configuration(dp)) { 618 NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN"); 619 return 0; 620 } 621 622 vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj); 623 624 /* Even though drivers often handle CPU membership in special ways, 625 * it doesn't make sense to program a PVID, so clear this flag. 626 */ 627 vlan.flags &= ~BRIDGE_VLAN_INFO_PVID; 628 629 return dsa_port_host_vlan_add(dp, &vlan, extack); 630 } 631 632 static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx, 633 const struct switchdev_obj *obj, 634 struct netlink_ext_ack *extack) 635 { 636 struct dsa_port *dp = dsa_slave_to_port(dev); 637 int err; 638 639 if (ctx && ctx != dp) 640 return 0; 641 642 switch (obj->id) { 643 case SWITCHDEV_OBJ_ID_PORT_MDB: 644 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev)) 645 return -EOPNOTSUPP; 646 647 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); 648 break; 649 case SWITCHDEV_OBJ_ID_HOST_MDB: 650 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 651 return -EOPNOTSUPP; 652 653 err = dsa_port_bridge_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); 654 break; 655 case SWITCHDEV_OBJ_ID_PORT_VLAN: 656 if (dsa_port_offloads_bridge_port(dp, obj->orig_dev)) 657 err = dsa_slave_vlan_add(dev, obj, extack); 658 else 659 err = dsa_slave_host_vlan_add(dev, obj, extack); 660 break; 661 case SWITCHDEV_OBJ_ID_MRP: 662 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 663 return -EOPNOTSUPP; 664 665 err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj)); 666 break; 667 case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: 668 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 669 return -EOPNOTSUPP; 670 671 err = dsa_port_mrp_add_ring_role(dp, 672 SWITCHDEV_OBJ_RING_ROLE_MRP(obj)); 673 break; 674 default: 675 err = -EOPNOTSUPP; 676 break; 677 } 678 679 return err; 680 } 681 682 static int dsa_slave_vlan_del(struct net_device *dev, 683 const struct switchdev_obj *obj) 684 { 685 struct dsa_port *dp = dsa_slave_to_port(dev); 686 struct switchdev_obj_port_vlan *vlan; 687 688 if (dsa_port_skip_vlan_configuration(dp)) 689 return 0; 690 691 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 692 693 return dsa_port_vlan_del(dp, vlan); 694 } 695 696 static int dsa_slave_host_vlan_del(struct net_device *dev, 697 const struct switchdev_obj *obj) 698 { 699 struct dsa_port *dp = dsa_slave_to_port(dev); 700 struct switchdev_obj_port_vlan *vlan; 701 702 /* Do nothing if this is a software bridge */ 703 if (!dp->bridge) 704 return -EOPNOTSUPP; 705 706 if (dsa_port_skip_vlan_configuration(dp)) 707 return 0; 708 709 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 710 711 return dsa_port_host_vlan_del(dp, vlan); 712 } 713 714 static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx, 715 const struct switchdev_obj *obj) 716 { 717 struct dsa_port *dp = dsa_slave_to_port(dev); 718 int err; 719 720 if (ctx && ctx != dp) 721 return 0; 722 723 switch (obj->id) { 724 case SWITCHDEV_OBJ_ID_PORT_MDB: 725 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev)) 726 return -EOPNOTSUPP; 727 728 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); 729 break; 730 case SWITCHDEV_OBJ_ID_HOST_MDB: 731 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 732 return -EOPNOTSUPP; 733 734 err = dsa_port_bridge_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); 735 break; 736 case SWITCHDEV_OBJ_ID_PORT_VLAN: 737 if (dsa_port_offloads_bridge_port(dp, obj->orig_dev)) 738 err = dsa_slave_vlan_del(dev, obj); 739 else 740 err = dsa_slave_host_vlan_del(dev, obj); 741 break; 742 case SWITCHDEV_OBJ_ID_MRP: 743 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 744 return -EOPNOTSUPP; 745 746 err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj)); 747 break; 748 case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: 749 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 750 return -EOPNOTSUPP; 751 752 err = dsa_port_mrp_del_ring_role(dp, 753 SWITCHDEV_OBJ_RING_ROLE_MRP(obj)); 754 break; 755 default: 756 err = -EOPNOTSUPP; 757 break; 758 } 759 760 return err; 761 } 762 763 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev, 764 struct sk_buff *skb) 765 { 766 #ifdef CONFIG_NET_POLL_CONTROLLER 767 struct dsa_slave_priv *p = netdev_priv(dev); 768 769 return netpoll_send_skb(p->netpoll, skb); 770 #else 771 BUG(); 772 return NETDEV_TX_OK; 773 #endif 774 } 775 776 static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p, 777 struct sk_buff *skb) 778 { 779 struct dsa_switch *ds = p->dp->ds; 780 781 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 782 return; 783 784 if (!ds->ops->port_txtstamp) 785 return; 786 787 ds->ops->port_txtstamp(ds, p->dp->index, skb); 788 } 789 790 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev) 791 { 792 /* SKB for netpoll still need to be mangled with the protocol-specific 793 * tag to be successfully transmitted 794 */ 795 if (unlikely(netpoll_tx_running(dev))) 796 return dsa_slave_netpoll_send_skb(dev, skb); 797 798 /* Queue the SKB for transmission on the parent interface, but 799 * do not modify its EtherType 800 */ 801 skb->dev = dsa_slave_to_master(dev); 802 dev_queue_xmit(skb); 803 804 return NETDEV_TX_OK; 805 } 806 EXPORT_SYMBOL_GPL(dsa_enqueue_skb); 807 808 static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev) 809 { 810 int needed_headroom = dev->needed_headroom; 811 int needed_tailroom = dev->needed_tailroom; 812 813 /* For tail taggers, we need to pad short frames ourselves, to ensure 814 * that the tail tag does not fail at its role of being at the end of 815 * the packet, once the master interface pads the frame. Account for 816 * that pad length here, and pad later. 817 */ 818 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) 819 needed_tailroom += ETH_ZLEN - skb->len; 820 /* skb_headroom() returns unsigned int... */ 821 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); 822 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); 823 824 if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb))) 825 /* No reallocation needed, yay! */ 826 return 0; 827 828 return pskb_expand_head(skb, needed_headroom, needed_tailroom, 829 GFP_ATOMIC); 830 } 831 832 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev) 833 { 834 struct dsa_slave_priv *p = netdev_priv(dev); 835 struct sk_buff *nskb; 836 837 dev_sw_netstats_tx_add(dev, 1, skb->len); 838 839 memset(skb->cb, 0, sizeof(skb->cb)); 840 841 /* Handle tx timestamp if any */ 842 dsa_skb_tx_timestamp(p, skb); 843 844 if (dsa_realloc_skb(skb, dev)) { 845 dev_kfree_skb_any(skb); 846 return NETDEV_TX_OK; 847 } 848 849 /* needed_tailroom should still be 'warm' in the cache line from 850 * dsa_realloc_skb(), which has also ensured that padding is safe. 851 */ 852 if (dev->needed_tailroom) 853 eth_skb_pad(skb); 854 855 /* Transmit function may have to reallocate the original SKB, 856 * in which case it must have freed it. Only free it here on error. 857 */ 858 nskb = p->xmit(skb, dev); 859 if (!nskb) { 860 kfree_skb(skb); 861 return NETDEV_TX_OK; 862 } 863 864 return dsa_enqueue_skb(nskb, dev); 865 } 866 867 /* ethtool operations *******************************************************/ 868 869 static void dsa_slave_get_drvinfo(struct net_device *dev, 870 struct ethtool_drvinfo *drvinfo) 871 { 872 strscpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver)); 873 strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); 874 strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); 875 } 876 877 static int dsa_slave_get_regs_len(struct net_device *dev) 878 { 879 struct dsa_port *dp = dsa_slave_to_port(dev); 880 struct dsa_switch *ds = dp->ds; 881 882 if (ds->ops->get_regs_len) 883 return ds->ops->get_regs_len(ds, dp->index); 884 885 return -EOPNOTSUPP; 886 } 887 888 static void 889 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) 890 { 891 struct dsa_port *dp = dsa_slave_to_port(dev); 892 struct dsa_switch *ds = dp->ds; 893 894 if (ds->ops->get_regs) 895 ds->ops->get_regs(ds, dp->index, regs, _p); 896 } 897 898 static int dsa_slave_nway_reset(struct net_device *dev) 899 { 900 struct dsa_port *dp = dsa_slave_to_port(dev); 901 902 return phylink_ethtool_nway_reset(dp->pl); 903 } 904 905 static int dsa_slave_get_eeprom_len(struct net_device *dev) 906 { 907 struct dsa_port *dp = dsa_slave_to_port(dev); 908 struct dsa_switch *ds = dp->ds; 909 910 if (ds->cd && ds->cd->eeprom_len) 911 return ds->cd->eeprom_len; 912 913 if (ds->ops->get_eeprom_len) 914 return ds->ops->get_eeprom_len(ds); 915 916 return 0; 917 } 918 919 static int dsa_slave_get_eeprom(struct net_device *dev, 920 struct ethtool_eeprom *eeprom, u8 *data) 921 { 922 struct dsa_port *dp = dsa_slave_to_port(dev); 923 struct dsa_switch *ds = dp->ds; 924 925 if (ds->ops->get_eeprom) 926 return ds->ops->get_eeprom(ds, eeprom, data); 927 928 return -EOPNOTSUPP; 929 } 930 931 static int dsa_slave_set_eeprom(struct net_device *dev, 932 struct ethtool_eeprom *eeprom, u8 *data) 933 { 934 struct dsa_port *dp = dsa_slave_to_port(dev); 935 struct dsa_switch *ds = dp->ds; 936 937 if (ds->ops->set_eeprom) 938 return ds->ops->set_eeprom(ds, eeprom, data); 939 940 return -EOPNOTSUPP; 941 } 942 943 static void dsa_slave_get_strings(struct net_device *dev, 944 uint32_t stringset, uint8_t *data) 945 { 946 struct dsa_port *dp = dsa_slave_to_port(dev); 947 struct dsa_switch *ds = dp->ds; 948 949 if (stringset == ETH_SS_STATS) { 950 int len = ETH_GSTRING_LEN; 951 952 strncpy(data, "tx_packets", len); 953 strncpy(data + len, "tx_bytes", len); 954 strncpy(data + 2 * len, "rx_packets", len); 955 strncpy(data + 3 * len, "rx_bytes", len); 956 if (ds->ops->get_strings) 957 ds->ops->get_strings(ds, dp->index, stringset, 958 data + 4 * len); 959 } else if (stringset == ETH_SS_TEST) { 960 net_selftest_get_strings(data); 961 } 962 963 } 964 965 static void dsa_slave_get_ethtool_stats(struct net_device *dev, 966 struct ethtool_stats *stats, 967 uint64_t *data) 968 { 969 struct dsa_port *dp = dsa_slave_to_port(dev); 970 struct dsa_switch *ds = dp->ds; 971 struct pcpu_sw_netstats *s; 972 unsigned int start; 973 int i; 974 975 for_each_possible_cpu(i) { 976 u64 tx_packets, tx_bytes, rx_packets, rx_bytes; 977 978 s = per_cpu_ptr(dev->tstats, i); 979 do { 980 start = u64_stats_fetch_begin(&s->syncp); 981 tx_packets = u64_stats_read(&s->tx_packets); 982 tx_bytes = u64_stats_read(&s->tx_bytes); 983 rx_packets = u64_stats_read(&s->rx_packets); 984 rx_bytes = u64_stats_read(&s->rx_bytes); 985 } while (u64_stats_fetch_retry(&s->syncp, start)); 986 data[0] += tx_packets; 987 data[1] += tx_bytes; 988 data[2] += rx_packets; 989 data[3] += rx_bytes; 990 } 991 if (ds->ops->get_ethtool_stats) 992 ds->ops->get_ethtool_stats(ds, dp->index, data + 4); 993 } 994 995 static int dsa_slave_get_sset_count(struct net_device *dev, int sset) 996 { 997 struct dsa_port *dp = dsa_slave_to_port(dev); 998 struct dsa_switch *ds = dp->ds; 999 1000 if (sset == ETH_SS_STATS) { 1001 int count = 0; 1002 1003 if (ds->ops->get_sset_count) { 1004 count = ds->ops->get_sset_count(ds, dp->index, sset); 1005 if (count < 0) 1006 return count; 1007 } 1008 1009 return count + 4; 1010 } else if (sset == ETH_SS_TEST) { 1011 return net_selftest_get_count(); 1012 } 1013 1014 return -EOPNOTSUPP; 1015 } 1016 1017 static void dsa_slave_get_eth_phy_stats(struct net_device *dev, 1018 struct ethtool_eth_phy_stats *phy_stats) 1019 { 1020 struct dsa_port *dp = dsa_slave_to_port(dev); 1021 struct dsa_switch *ds = dp->ds; 1022 1023 if (ds->ops->get_eth_phy_stats) 1024 ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats); 1025 } 1026 1027 static void dsa_slave_get_eth_mac_stats(struct net_device *dev, 1028 struct ethtool_eth_mac_stats *mac_stats) 1029 { 1030 struct dsa_port *dp = dsa_slave_to_port(dev); 1031 struct dsa_switch *ds = dp->ds; 1032 1033 if (ds->ops->get_eth_mac_stats) 1034 ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats); 1035 } 1036 1037 static void 1038 dsa_slave_get_eth_ctrl_stats(struct net_device *dev, 1039 struct ethtool_eth_ctrl_stats *ctrl_stats) 1040 { 1041 struct dsa_port *dp = dsa_slave_to_port(dev); 1042 struct dsa_switch *ds = dp->ds; 1043 1044 if (ds->ops->get_eth_ctrl_stats) 1045 ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats); 1046 } 1047 1048 static void 1049 dsa_slave_get_rmon_stats(struct net_device *dev, 1050 struct ethtool_rmon_stats *rmon_stats, 1051 const struct ethtool_rmon_hist_range **ranges) 1052 { 1053 struct dsa_port *dp = dsa_slave_to_port(dev); 1054 struct dsa_switch *ds = dp->ds; 1055 1056 if (ds->ops->get_rmon_stats) 1057 ds->ops->get_rmon_stats(ds, dp->index, rmon_stats, ranges); 1058 } 1059 1060 static void dsa_slave_net_selftest(struct net_device *ndev, 1061 struct ethtool_test *etest, u64 *buf) 1062 { 1063 struct dsa_port *dp = dsa_slave_to_port(ndev); 1064 struct dsa_switch *ds = dp->ds; 1065 1066 if (ds->ops->self_test) { 1067 ds->ops->self_test(ds, dp->index, etest, buf); 1068 return; 1069 } 1070 1071 net_selftest(ndev, etest, buf); 1072 } 1073 1074 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w) 1075 { 1076 struct dsa_port *dp = dsa_slave_to_port(dev); 1077 struct dsa_switch *ds = dp->ds; 1078 1079 phylink_ethtool_get_wol(dp->pl, w); 1080 1081 if (ds->ops->get_wol) 1082 ds->ops->get_wol(ds, dp->index, w); 1083 } 1084 1085 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) 1086 { 1087 struct dsa_port *dp = dsa_slave_to_port(dev); 1088 struct dsa_switch *ds = dp->ds; 1089 int ret = -EOPNOTSUPP; 1090 1091 phylink_ethtool_set_wol(dp->pl, w); 1092 1093 if (ds->ops->set_wol) 1094 ret = ds->ops->set_wol(ds, dp->index, w); 1095 1096 return ret; 1097 } 1098 1099 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e) 1100 { 1101 struct dsa_port *dp = dsa_slave_to_port(dev); 1102 struct dsa_switch *ds = dp->ds; 1103 int ret; 1104 1105 /* Port's PHY and MAC both need to be EEE capable */ 1106 if (!dev->phydev || !dp->pl) 1107 return -ENODEV; 1108 1109 if (!ds->ops->set_mac_eee) 1110 return -EOPNOTSUPP; 1111 1112 ret = ds->ops->set_mac_eee(ds, dp->index, e); 1113 if (ret) 1114 return ret; 1115 1116 return phylink_ethtool_set_eee(dp->pl, e); 1117 } 1118 1119 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e) 1120 { 1121 struct dsa_port *dp = dsa_slave_to_port(dev); 1122 struct dsa_switch *ds = dp->ds; 1123 int ret; 1124 1125 /* Port's PHY and MAC both need to be EEE capable */ 1126 if (!dev->phydev || !dp->pl) 1127 return -ENODEV; 1128 1129 if (!ds->ops->get_mac_eee) 1130 return -EOPNOTSUPP; 1131 1132 ret = ds->ops->get_mac_eee(ds, dp->index, e); 1133 if (ret) 1134 return ret; 1135 1136 return phylink_ethtool_get_eee(dp->pl, e); 1137 } 1138 1139 static int dsa_slave_get_link_ksettings(struct net_device *dev, 1140 struct ethtool_link_ksettings *cmd) 1141 { 1142 struct dsa_port *dp = dsa_slave_to_port(dev); 1143 1144 return phylink_ethtool_ksettings_get(dp->pl, cmd); 1145 } 1146 1147 static int dsa_slave_set_link_ksettings(struct net_device *dev, 1148 const struct ethtool_link_ksettings *cmd) 1149 { 1150 struct dsa_port *dp = dsa_slave_to_port(dev); 1151 1152 return phylink_ethtool_ksettings_set(dp->pl, cmd); 1153 } 1154 1155 static void dsa_slave_get_pause_stats(struct net_device *dev, 1156 struct ethtool_pause_stats *pause_stats) 1157 { 1158 struct dsa_port *dp = dsa_slave_to_port(dev); 1159 struct dsa_switch *ds = dp->ds; 1160 1161 if (ds->ops->get_pause_stats) 1162 ds->ops->get_pause_stats(ds, dp->index, pause_stats); 1163 } 1164 1165 static void dsa_slave_get_pauseparam(struct net_device *dev, 1166 struct ethtool_pauseparam *pause) 1167 { 1168 struct dsa_port *dp = dsa_slave_to_port(dev); 1169 1170 phylink_ethtool_get_pauseparam(dp->pl, pause); 1171 } 1172 1173 static int dsa_slave_set_pauseparam(struct net_device *dev, 1174 struct ethtool_pauseparam *pause) 1175 { 1176 struct dsa_port *dp = dsa_slave_to_port(dev); 1177 1178 return phylink_ethtool_set_pauseparam(dp->pl, pause); 1179 } 1180 1181 #ifdef CONFIG_NET_POLL_CONTROLLER 1182 static int dsa_slave_netpoll_setup(struct net_device *dev, 1183 struct netpoll_info *ni) 1184 { 1185 struct net_device *master = dsa_slave_to_master(dev); 1186 struct dsa_slave_priv *p = netdev_priv(dev); 1187 struct netpoll *netpoll; 1188 int err = 0; 1189 1190 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); 1191 if (!netpoll) 1192 return -ENOMEM; 1193 1194 err = __netpoll_setup(netpoll, master); 1195 if (err) { 1196 kfree(netpoll); 1197 goto out; 1198 } 1199 1200 p->netpoll = netpoll; 1201 out: 1202 return err; 1203 } 1204 1205 static void dsa_slave_netpoll_cleanup(struct net_device *dev) 1206 { 1207 struct dsa_slave_priv *p = netdev_priv(dev); 1208 struct netpoll *netpoll = p->netpoll; 1209 1210 if (!netpoll) 1211 return; 1212 1213 p->netpoll = NULL; 1214 1215 __netpoll_free(netpoll); 1216 } 1217 1218 static void dsa_slave_poll_controller(struct net_device *dev) 1219 { 1220 } 1221 #endif 1222 1223 static struct dsa_mall_tc_entry * 1224 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie) 1225 { 1226 struct dsa_slave_priv *p = netdev_priv(dev); 1227 struct dsa_mall_tc_entry *mall_tc_entry; 1228 1229 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) 1230 if (mall_tc_entry->cookie == cookie) 1231 return mall_tc_entry; 1232 1233 return NULL; 1234 } 1235 1236 static int 1237 dsa_slave_add_cls_matchall_mirred(struct net_device *dev, 1238 struct tc_cls_matchall_offload *cls, 1239 bool ingress) 1240 { 1241 struct netlink_ext_ack *extack = cls->common.extack; 1242 struct dsa_port *dp = dsa_slave_to_port(dev); 1243 struct dsa_slave_priv *p = netdev_priv(dev); 1244 struct dsa_mall_mirror_tc_entry *mirror; 1245 struct dsa_mall_tc_entry *mall_tc_entry; 1246 struct dsa_switch *ds = dp->ds; 1247 struct flow_action_entry *act; 1248 struct dsa_port *to_dp; 1249 int err; 1250 1251 if (!ds->ops->port_mirror_add) 1252 return -EOPNOTSUPP; 1253 1254 if (!flow_action_basic_hw_stats_check(&cls->rule->action, 1255 cls->common.extack)) 1256 return -EOPNOTSUPP; 1257 1258 act = &cls->rule->action.entries[0]; 1259 1260 if (!act->dev) 1261 return -EINVAL; 1262 1263 if (!dsa_slave_dev_check(act->dev)) 1264 return -EOPNOTSUPP; 1265 1266 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1267 if (!mall_tc_entry) 1268 return -ENOMEM; 1269 1270 mall_tc_entry->cookie = cls->cookie; 1271 mall_tc_entry->type = DSA_PORT_MALL_MIRROR; 1272 mirror = &mall_tc_entry->mirror; 1273 1274 to_dp = dsa_slave_to_port(act->dev); 1275 1276 mirror->to_local_port = to_dp->index; 1277 mirror->ingress = ingress; 1278 1279 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress, extack); 1280 if (err) { 1281 kfree(mall_tc_entry); 1282 return err; 1283 } 1284 1285 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); 1286 1287 return err; 1288 } 1289 1290 static int 1291 dsa_slave_add_cls_matchall_police(struct net_device *dev, 1292 struct tc_cls_matchall_offload *cls, 1293 bool ingress) 1294 { 1295 struct netlink_ext_ack *extack = cls->common.extack; 1296 struct dsa_port *dp = dsa_slave_to_port(dev); 1297 struct dsa_slave_priv *p = netdev_priv(dev); 1298 struct dsa_mall_policer_tc_entry *policer; 1299 struct dsa_mall_tc_entry *mall_tc_entry; 1300 struct dsa_switch *ds = dp->ds; 1301 struct flow_action_entry *act; 1302 int err; 1303 1304 if (!ds->ops->port_policer_add) { 1305 NL_SET_ERR_MSG_MOD(extack, 1306 "Policing offload not implemented"); 1307 return -EOPNOTSUPP; 1308 } 1309 1310 if (!ingress) { 1311 NL_SET_ERR_MSG_MOD(extack, 1312 "Only supported on ingress qdisc"); 1313 return -EOPNOTSUPP; 1314 } 1315 1316 if (!flow_action_basic_hw_stats_check(&cls->rule->action, 1317 cls->common.extack)) 1318 return -EOPNOTSUPP; 1319 1320 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) { 1321 if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) { 1322 NL_SET_ERR_MSG_MOD(extack, 1323 "Only one port policer allowed"); 1324 return -EEXIST; 1325 } 1326 } 1327 1328 act = &cls->rule->action.entries[0]; 1329 1330 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1331 if (!mall_tc_entry) 1332 return -ENOMEM; 1333 1334 mall_tc_entry->cookie = cls->cookie; 1335 mall_tc_entry->type = DSA_PORT_MALL_POLICER; 1336 policer = &mall_tc_entry->policer; 1337 policer->rate_bytes_per_sec = act->police.rate_bytes_ps; 1338 policer->burst = act->police.burst; 1339 1340 err = ds->ops->port_policer_add(ds, dp->index, policer); 1341 if (err) { 1342 kfree(mall_tc_entry); 1343 return err; 1344 } 1345 1346 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); 1347 1348 return err; 1349 } 1350 1351 static int dsa_slave_add_cls_matchall(struct net_device *dev, 1352 struct tc_cls_matchall_offload *cls, 1353 bool ingress) 1354 { 1355 int err = -EOPNOTSUPP; 1356 1357 if (cls->common.protocol == htons(ETH_P_ALL) && 1358 flow_offload_has_one_action(&cls->rule->action) && 1359 cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED) 1360 err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress); 1361 else if (flow_offload_has_one_action(&cls->rule->action) && 1362 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE) 1363 err = dsa_slave_add_cls_matchall_police(dev, cls, ingress); 1364 1365 return err; 1366 } 1367 1368 static void dsa_slave_del_cls_matchall(struct net_device *dev, 1369 struct tc_cls_matchall_offload *cls) 1370 { 1371 struct dsa_port *dp = dsa_slave_to_port(dev); 1372 struct dsa_mall_tc_entry *mall_tc_entry; 1373 struct dsa_switch *ds = dp->ds; 1374 1375 mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie); 1376 if (!mall_tc_entry) 1377 return; 1378 1379 list_del(&mall_tc_entry->list); 1380 1381 switch (mall_tc_entry->type) { 1382 case DSA_PORT_MALL_MIRROR: 1383 if (ds->ops->port_mirror_del) 1384 ds->ops->port_mirror_del(ds, dp->index, 1385 &mall_tc_entry->mirror); 1386 break; 1387 case DSA_PORT_MALL_POLICER: 1388 if (ds->ops->port_policer_del) 1389 ds->ops->port_policer_del(ds, dp->index); 1390 break; 1391 default: 1392 WARN_ON(1); 1393 } 1394 1395 kfree(mall_tc_entry); 1396 } 1397 1398 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev, 1399 struct tc_cls_matchall_offload *cls, 1400 bool ingress) 1401 { 1402 if (cls->common.chain_index) 1403 return -EOPNOTSUPP; 1404 1405 switch (cls->command) { 1406 case TC_CLSMATCHALL_REPLACE: 1407 return dsa_slave_add_cls_matchall(dev, cls, ingress); 1408 case TC_CLSMATCHALL_DESTROY: 1409 dsa_slave_del_cls_matchall(dev, cls); 1410 return 0; 1411 default: 1412 return -EOPNOTSUPP; 1413 } 1414 } 1415 1416 static int dsa_slave_add_cls_flower(struct net_device *dev, 1417 struct flow_cls_offload *cls, 1418 bool ingress) 1419 { 1420 struct dsa_port *dp = dsa_slave_to_port(dev); 1421 struct dsa_switch *ds = dp->ds; 1422 int port = dp->index; 1423 1424 if (!ds->ops->cls_flower_add) 1425 return -EOPNOTSUPP; 1426 1427 return ds->ops->cls_flower_add(ds, port, cls, ingress); 1428 } 1429 1430 static int dsa_slave_del_cls_flower(struct net_device *dev, 1431 struct flow_cls_offload *cls, 1432 bool ingress) 1433 { 1434 struct dsa_port *dp = dsa_slave_to_port(dev); 1435 struct dsa_switch *ds = dp->ds; 1436 int port = dp->index; 1437 1438 if (!ds->ops->cls_flower_del) 1439 return -EOPNOTSUPP; 1440 1441 return ds->ops->cls_flower_del(ds, port, cls, ingress); 1442 } 1443 1444 static int dsa_slave_stats_cls_flower(struct net_device *dev, 1445 struct flow_cls_offload *cls, 1446 bool ingress) 1447 { 1448 struct dsa_port *dp = dsa_slave_to_port(dev); 1449 struct dsa_switch *ds = dp->ds; 1450 int port = dp->index; 1451 1452 if (!ds->ops->cls_flower_stats) 1453 return -EOPNOTSUPP; 1454 1455 return ds->ops->cls_flower_stats(ds, port, cls, ingress); 1456 } 1457 1458 static int dsa_slave_setup_tc_cls_flower(struct net_device *dev, 1459 struct flow_cls_offload *cls, 1460 bool ingress) 1461 { 1462 switch (cls->command) { 1463 case FLOW_CLS_REPLACE: 1464 return dsa_slave_add_cls_flower(dev, cls, ingress); 1465 case FLOW_CLS_DESTROY: 1466 return dsa_slave_del_cls_flower(dev, cls, ingress); 1467 case FLOW_CLS_STATS: 1468 return dsa_slave_stats_cls_flower(dev, cls, ingress); 1469 default: 1470 return -EOPNOTSUPP; 1471 } 1472 } 1473 1474 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 1475 void *cb_priv, bool ingress) 1476 { 1477 struct net_device *dev = cb_priv; 1478 1479 if (!tc_can_offload(dev)) 1480 return -EOPNOTSUPP; 1481 1482 switch (type) { 1483 case TC_SETUP_CLSMATCHALL: 1484 return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress); 1485 case TC_SETUP_CLSFLOWER: 1486 return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress); 1487 default: 1488 return -EOPNOTSUPP; 1489 } 1490 } 1491 1492 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type, 1493 void *type_data, void *cb_priv) 1494 { 1495 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true); 1496 } 1497 1498 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type, 1499 void *type_data, void *cb_priv) 1500 { 1501 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false); 1502 } 1503 1504 static LIST_HEAD(dsa_slave_block_cb_list); 1505 1506 static int dsa_slave_setup_tc_block(struct net_device *dev, 1507 struct flow_block_offload *f) 1508 { 1509 struct flow_block_cb *block_cb; 1510 flow_setup_cb_t *cb; 1511 1512 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 1513 cb = dsa_slave_setup_tc_block_cb_ig; 1514 else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 1515 cb = dsa_slave_setup_tc_block_cb_eg; 1516 else 1517 return -EOPNOTSUPP; 1518 1519 f->driver_block_list = &dsa_slave_block_cb_list; 1520 1521 switch (f->command) { 1522 case FLOW_BLOCK_BIND: 1523 if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list)) 1524 return -EBUSY; 1525 1526 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL); 1527 if (IS_ERR(block_cb)) 1528 return PTR_ERR(block_cb); 1529 1530 flow_block_cb_add(block_cb, f); 1531 list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list); 1532 return 0; 1533 case FLOW_BLOCK_UNBIND: 1534 block_cb = flow_block_cb_lookup(f->block, cb, dev); 1535 if (!block_cb) 1536 return -ENOENT; 1537 1538 flow_block_cb_remove(block_cb, f); 1539 list_del(&block_cb->driver_list); 1540 return 0; 1541 default: 1542 return -EOPNOTSUPP; 1543 } 1544 } 1545 1546 static int dsa_slave_setup_ft_block(struct dsa_switch *ds, int port, 1547 void *type_data) 1548 { 1549 struct net_device *master = dsa_port_to_master(dsa_to_port(ds, port)); 1550 1551 if (!master->netdev_ops->ndo_setup_tc) 1552 return -EOPNOTSUPP; 1553 1554 return master->netdev_ops->ndo_setup_tc(master, TC_SETUP_FT, type_data); 1555 } 1556 1557 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type, 1558 void *type_data) 1559 { 1560 struct dsa_port *dp = dsa_slave_to_port(dev); 1561 struct dsa_switch *ds = dp->ds; 1562 1563 switch (type) { 1564 case TC_SETUP_BLOCK: 1565 return dsa_slave_setup_tc_block(dev, type_data); 1566 case TC_SETUP_FT: 1567 return dsa_slave_setup_ft_block(ds, dp->index, type_data); 1568 default: 1569 break; 1570 } 1571 1572 if (!ds->ops->port_setup_tc) 1573 return -EOPNOTSUPP; 1574 1575 return ds->ops->port_setup_tc(ds, dp->index, type, type_data); 1576 } 1577 1578 static int dsa_slave_get_rxnfc(struct net_device *dev, 1579 struct ethtool_rxnfc *nfc, u32 *rule_locs) 1580 { 1581 struct dsa_port *dp = dsa_slave_to_port(dev); 1582 struct dsa_switch *ds = dp->ds; 1583 1584 if (!ds->ops->get_rxnfc) 1585 return -EOPNOTSUPP; 1586 1587 return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs); 1588 } 1589 1590 static int dsa_slave_set_rxnfc(struct net_device *dev, 1591 struct ethtool_rxnfc *nfc) 1592 { 1593 struct dsa_port *dp = dsa_slave_to_port(dev); 1594 struct dsa_switch *ds = dp->ds; 1595 1596 if (!ds->ops->set_rxnfc) 1597 return -EOPNOTSUPP; 1598 1599 return ds->ops->set_rxnfc(ds, dp->index, nfc); 1600 } 1601 1602 static int dsa_slave_get_ts_info(struct net_device *dev, 1603 struct ethtool_ts_info *ts) 1604 { 1605 struct dsa_slave_priv *p = netdev_priv(dev); 1606 struct dsa_switch *ds = p->dp->ds; 1607 1608 if (!ds->ops->get_ts_info) 1609 return -EOPNOTSUPP; 1610 1611 return ds->ops->get_ts_info(ds, p->dp->index, ts); 1612 } 1613 1614 static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto, 1615 u16 vid) 1616 { 1617 struct dsa_port *dp = dsa_slave_to_port(dev); 1618 struct switchdev_obj_port_vlan vlan = { 1619 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 1620 .vid = vid, 1621 /* This API only allows programming tagged, non-PVID VIDs */ 1622 .flags = 0, 1623 }; 1624 struct netlink_ext_ack extack = {0}; 1625 int ret; 1626 1627 /* User port... */ 1628 ret = dsa_port_vlan_add(dp, &vlan, &extack); 1629 if (ret) { 1630 if (extack._msg) 1631 netdev_err(dev, "%s\n", extack._msg); 1632 return ret; 1633 } 1634 1635 /* And CPU port... */ 1636 ret = dsa_port_host_vlan_add(dp, &vlan, &extack); 1637 if (ret) { 1638 if (extack._msg) 1639 netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index, 1640 extack._msg); 1641 return ret; 1642 } 1643 1644 return 0; 1645 } 1646 1647 static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, 1648 u16 vid) 1649 { 1650 struct dsa_port *dp = dsa_slave_to_port(dev); 1651 struct switchdev_obj_port_vlan vlan = { 1652 .vid = vid, 1653 /* This API only allows programming tagged, non-PVID VIDs */ 1654 .flags = 0, 1655 }; 1656 int err; 1657 1658 err = dsa_port_vlan_del(dp, &vlan); 1659 if (err) 1660 return err; 1661 1662 return dsa_port_host_vlan_del(dp, &vlan); 1663 } 1664 1665 static int dsa_slave_restore_vlan(struct net_device *vdev, int vid, void *arg) 1666 { 1667 __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q); 1668 1669 return dsa_slave_vlan_rx_add_vid(arg, proto, vid); 1670 } 1671 1672 static int dsa_slave_clear_vlan(struct net_device *vdev, int vid, void *arg) 1673 { 1674 __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q); 1675 1676 return dsa_slave_vlan_rx_kill_vid(arg, proto, vid); 1677 } 1678 1679 /* Keep the VLAN RX filtering list in sync with the hardware only if VLAN 1680 * filtering is enabled. The baseline is that only ports that offload a 1681 * VLAN-aware bridge are VLAN-aware, and standalone ports are VLAN-unaware, 1682 * but there are exceptions for quirky hardware. 1683 * 1684 * If ds->vlan_filtering_is_global = true, then standalone ports which share 1685 * the same switch with other ports that offload a VLAN-aware bridge are also 1686 * inevitably VLAN-aware. 1687 * 1688 * To summarize, a DSA switch port offloads: 1689 * 1690 * - If standalone (this includes software bridge, software LAG): 1691 * - if ds->needs_standalone_vlan_filtering = true, OR if 1692 * (ds->vlan_filtering_is_global = true AND there are bridges spanning 1693 * this switch chip which have vlan_filtering=1) 1694 * - the 8021q upper VLANs 1695 * - else (standalone VLAN filtering is not needed, VLAN filtering is not 1696 * global, or it is, but no port is under a VLAN-aware bridge): 1697 * - no VLAN (any 8021q upper is a software VLAN) 1698 * 1699 * - If under a vlan_filtering=0 bridge which it offload: 1700 * - if ds->configure_vlan_while_not_filtering = true (default): 1701 * - the bridge VLANs. These VLANs are committed to hardware but inactive. 1702 * - else (deprecated): 1703 * - no VLAN. The bridge VLANs are not restored when VLAN awareness is 1704 * enabled, so this behavior is broken and discouraged. 1705 * 1706 * - If under a vlan_filtering=1 bridge which it offload: 1707 * - the bridge VLANs 1708 * - the 8021q upper VLANs 1709 */ 1710 int dsa_slave_manage_vlan_filtering(struct net_device *slave, 1711 bool vlan_filtering) 1712 { 1713 int err; 1714 1715 if (vlan_filtering) { 1716 slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1717 1718 err = vlan_for_each(slave, dsa_slave_restore_vlan, slave); 1719 if (err) { 1720 vlan_for_each(slave, dsa_slave_clear_vlan, slave); 1721 slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 1722 return err; 1723 } 1724 } else { 1725 err = vlan_for_each(slave, dsa_slave_clear_vlan, slave); 1726 if (err) 1727 return err; 1728 1729 slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 1730 } 1731 1732 return 0; 1733 } 1734 1735 struct dsa_hw_port { 1736 struct list_head list; 1737 struct net_device *dev; 1738 int old_mtu; 1739 }; 1740 1741 static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu) 1742 { 1743 const struct dsa_hw_port *p; 1744 int err; 1745 1746 list_for_each_entry(p, hw_port_list, list) { 1747 if (p->dev->mtu == mtu) 1748 continue; 1749 1750 err = dev_set_mtu(p->dev, mtu); 1751 if (err) 1752 goto rollback; 1753 } 1754 1755 return 0; 1756 1757 rollback: 1758 list_for_each_entry_continue_reverse(p, hw_port_list, list) { 1759 if (p->dev->mtu == p->old_mtu) 1760 continue; 1761 1762 if (dev_set_mtu(p->dev, p->old_mtu)) 1763 netdev_err(p->dev, "Failed to restore MTU\n"); 1764 } 1765 1766 return err; 1767 } 1768 1769 static void dsa_hw_port_list_free(struct list_head *hw_port_list) 1770 { 1771 struct dsa_hw_port *p, *n; 1772 1773 list_for_each_entry_safe(p, n, hw_port_list, list) 1774 kfree(p); 1775 } 1776 1777 /* Make the hardware datapath to/from @dev limited to a common MTU */ 1778 static void dsa_bridge_mtu_normalization(struct dsa_port *dp) 1779 { 1780 struct list_head hw_port_list; 1781 struct dsa_switch_tree *dst; 1782 int min_mtu = ETH_MAX_MTU; 1783 struct dsa_port *other_dp; 1784 int err; 1785 1786 if (!dp->ds->mtu_enforcement_ingress) 1787 return; 1788 1789 if (!dp->bridge) 1790 return; 1791 1792 INIT_LIST_HEAD(&hw_port_list); 1793 1794 /* Populate the list of ports that are part of the same bridge 1795 * as the newly added/modified port 1796 */ 1797 list_for_each_entry(dst, &dsa_tree_list, list) { 1798 list_for_each_entry(other_dp, &dst->ports, list) { 1799 struct dsa_hw_port *hw_port; 1800 struct net_device *slave; 1801 1802 if (other_dp->type != DSA_PORT_TYPE_USER) 1803 continue; 1804 1805 if (!dsa_port_bridge_same(dp, other_dp)) 1806 continue; 1807 1808 if (!other_dp->ds->mtu_enforcement_ingress) 1809 continue; 1810 1811 slave = other_dp->slave; 1812 1813 if (min_mtu > slave->mtu) 1814 min_mtu = slave->mtu; 1815 1816 hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL); 1817 if (!hw_port) 1818 goto out; 1819 1820 hw_port->dev = slave; 1821 hw_port->old_mtu = slave->mtu; 1822 1823 list_add(&hw_port->list, &hw_port_list); 1824 } 1825 } 1826 1827 /* Attempt to configure the entire hardware bridge to the newly added 1828 * interface's MTU first, regardless of whether the intention of the 1829 * user was to raise or lower it. 1830 */ 1831 err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu); 1832 if (!err) 1833 goto out; 1834 1835 /* Clearly that didn't work out so well, so just set the minimum MTU on 1836 * all hardware bridge ports now. If this fails too, then all ports will 1837 * still have their old MTU rolled back anyway. 1838 */ 1839 dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu); 1840 1841 out: 1842 dsa_hw_port_list_free(&hw_port_list); 1843 } 1844 1845 int dsa_slave_change_mtu(struct net_device *dev, int new_mtu) 1846 { 1847 struct net_device *master = dsa_slave_to_master(dev); 1848 struct dsa_port *dp = dsa_slave_to_port(dev); 1849 struct dsa_port *cpu_dp = dp->cpu_dp; 1850 struct dsa_switch *ds = dp->ds; 1851 struct dsa_port *other_dp; 1852 int largest_mtu = 0; 1853 int new_master_mtu; 1854 int old_master_mtu; 1855 int mtu_limit; 1856 int cpu_mtu; 1857 int err; 1858 1859 if (!ds->ops->port_change_mtu) 1860 return -EOPNOTSUPP; 1861 1862 dsa_tree_for_each_user_port(other_dp, ds->dst) { 1863 int slave_mtu; 1864 1865 /* During probe, this function will be called for each slave 1866 * device, while not all of them have been allocated. That's 1867 * ok, it doesn't change what the maximum is, so ignore it. 1868 */ 1869 if (!other_dp->slave) 1870 continue; 1871 1872 /* Pretend that we already applied the setting, which we 1873 * actually haven't (still haven't done all integrity checks) 1874 */ 1875 if (dp == other_dp) 1876 slave_mtu = new_mtu; 1877 else 1878 slave_mtu = other_dp->slave->mtu; 1879 1880 if (largest_mtu < slave_mtu) 1881 largest_mtu = slave_mtu; 1882 } 1883 1884 mtu_limit = min_t(int, master->max_mtu, dev->max_mtu); 1885 old_master_mtu = master->mtu; 1886 new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops); 1887 if (new_master_mtu > mtu_limit) 1888 return -ERANGE; 1889 1890 /* If the master MTU isn't over limit, there's no need to check the CPU 1891 * MTU, since that surely isn't either. 1892 */ 1893 cpu_mtu = largest_mtu; 1894 1895 /* Start applying stuff */ 1896 if (new_master_mtu != old_master_mtu) { 1897 err = dev_set_mtu(master, new_master_mtu); 1898 if (err < 0) 1899 goto out_master_failed; 1900 1901 /* We only need to propagate the MTU of the CPU port to 1902 * upstream switches, so emit a notifier which updates them. 1903 */ 1904 err = dsa_port_mtu_change(cpu_dp, cpu_mtu); 1905 if (err) 1906 goto out_cpu_failed; 1907 } 1908 1909 err = ds->ops->port_change_mtu(ds, dp->index, new_mtu); 1910 if (err) 1911 goto out_port_failed; 1912 1913 dev->mtu = new_mtu; 1914 1915 dsa_bridge_mtu_normalization(dp); 1916 1917 return 0; 1918 1919 out_port_failed: 1920 if (new_master_mtu != old_master_mtu) 1921 dsa_port_mtu_change(cpu_dp, old_master_mtu - 1922 dsa_tag_protocol_overhead(cpu_dp->tag_ops)); 1923 out_cpu_failed: 1924 if (new_master_mtu != old_master_mtu) 1925 dev_set_mtu(master, old_master_mtu); 1926 out_master_failed: 1927 return err; 1928 } 1929 1930 static int __maybe_unused 1931 dsa_slave_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app) 1932 { 1933 struct dsa_port *dp = dsa_slave_to_port(dev); 1934 struct dsa_switch *ds = dp->ds; 1935 unsigned long mask, new_prio; 1936 int err, port = dp->index; 1937 1938 if (!ds->ops->port_set_default_prio) 1939 return -EOPNOTSUPP; 1940 1941 err = dcb_ieee_setapp(dev, app); 1942 if (err) 1943 return err; 1944 1945 mask = dcb_ieee_getapp_mask(dev, app); 1946 new_prio = __fls(mask); 1947 1948 err = ds->ops->port_set_default_prio(ds, port, new_prio); 1949 if (err) { 1950 dcb_ieee_delapp(dev, app); 1951 return err; 1952 } 1953 1954 return 0; 1955 } 1956 1957 static int __maybe_unused 1958 dsa_slave_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app) 1959 { 1960 struct dsa_port *dp = dsa_slave_to_port(dev); 1961 struct dsa_switch *ds = dp->ds; 1962 unsigned long mask, new_prio; 1963 int err, port = dp->index; 1964 u8 dscp = app->protocol; 1965 1966 if (!ds->ops->port_add_dscp_prio) 1967 return -EOPNOTSUPP; 1968 1969 if (dscp >= 64) { 1970 netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n", 1971 dscp); 1972 return -EINVAL; 1973 } 1974 1975 err = dcb_ieee_setapp(dev, app); 1976 if (err) 1977 return err; 1978 1979 mask = dcb_ieee_getapp_mask(dev, app); 1980 new_prio = __fls(mask); 1981 1982 err = ds->ops->port_add_dscp_prio(ds, port, dscp, new_prio); 1983 if (err) { 1984 dcb_ieee_delapp(dev, app); 1985 return err; 1986 } 1987 1988 return 0; 1989 } 1990 1991 static int __maybe_unused dsa_slave_dcbnl_ieee_setapp(struct net_device *dev, 1992 struct dcb_app *app) 1993 { 1994 switch (app->selector) { 1995 case IEEE_8021QAZ_APP_SEL_ETHERTYPE: 1996 switch (app->protocol) { 1997 case 0: 1998 return dsa_slave_dcbnl_set_default_prio(dev, app); 1999 default: 2000 return -EOPNOTSUPP; 2001 } 2002 break; 2003 case IEEE_8021QAZ_APP_SEL_DSCP: 2004 return dsa_slave_dcbnl_add_dscp_prio(dev, app); 2005 default: 2006 return -EOPNOTSUPP; 2007 } 2008 } 2009 2010 static int __maybe_unused 2011 dsa_slave_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app) 2012 { 2013 struct dsa_port *dp = dsa_slave_to_port(dev); 2014 struct dsa_switch *ds = dp->ds; 2015 unsigned long mask, new_prio; 2016 int err, port = dp->index; 2017 2018 if (!ds->ops->port_set_default_prio) 2019 return -EOPNOTSUPP; 2020 2021 err = dcb_ieee_delapp(dev, app); 2022 if (err) 2023 return err; 2024 2025 mask = dcb_ieee_getapp_mask(dev, app); 2026 new_prio = mask ? __fls(mask) : 0; 2027 2028 err = ds->ops->port_set_default_prio(ds, port, new_prio); 2029 if (err) { 2030 dcb_ieee_setapp(dev, app); 2031 return err; 2032 } 2033 2034 return 0; 2035 } 2036 2037 static int __maybe_unused 2038 dsa_slave_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app) 2039 { 2040 struct dsa_port *dp = dsa_slave_to_port(dev); 2041 struct dsa_switch *ds = dp->ds; 2042 int err, port = dp->index; 2043 u8 dscp = app->protocol; 2044 2045 if (!ds->ops->port_del_dscp_prio) 2046 return -EOPNOTSUPP; 2047 2048 err = dcb_ieee_delapp(dev, app); 2049 if (err) 2050 return err; 2051 2052 err = ds->ops->port_del_dscp_prio(ds, port, dscp, app->priority); 2053 if (err) { 2054 dcb_ieee_setapp(dev, app); 2055 return err; 2056 } 2057 2058 return 0; 2059 } 2060 2061 static int __maybe_unused dsa_slave_dcbnl_ieee_delapp(struct net_device *dev, 2062 struct dcb_app *app) 2063 { 2064 switch (app->selector) { 2065 case IEEE_8021QAZ_APP_SEL_ETHERTYPE: 2066 switch (app->protocol) { 2067 case 0: 2068 return dsa_slave_dcbnl_del_default_prio(dev, app); 2069 default: 2070 return -EOPNOTSUPP; 2071 } 2072 break; 2073 case IEEE_8021QAZ_APP_SEL_DSCP: 2074 return dsa_slave_dcbnl_del_dscp_prio(dev, app); 2075 default: 2076 return -EOPNOTSUPP; 2077 } 2078 } 2079 2080 /* Pre-populate the DCB application priority table with the priorities 2081 * configured during switch setup, which we read from hardware here. 2082 */ 2083 static int dsa_slave_dcbnl_init(struct net_device *dev) 2084 { 2085 struct dsa_port *dp = dsa_slave_to_port(dev); 2086 struct dsa_switch *ds = dp->ds; 2087 int port = dp->index; 2088 int err; 2089 2090 if (ds->ops->port_get_default_prio) { 2091 int prio = ds->ops->port_get_default_prio(ds, port); 2092 struct dcb_app app = { 2093 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE, 2094 .protocol = 0, 2095 .priority = prio, 2096 }; 2097 2098 if (prio < 0) 2099 return prio; 2100 2101 err = dcb_ieee_setapp(dev, &app); 2102 if (err) 2103 return err; 2104 } 2105 2106 if (ds->ops->port_get_dscp_prio) { 2107 int protocol; 2108 2109 for (protocol = 0; protocol < 64; protocol++) { 2110 struct dcb_app app = { 2111 .selector = IEEE_8021QAZ_APP_SEL_DSCP, 2112 .protocol = protocol, 2113 }; 2114 int prio; 2115 2116 prio = ds->ops->port_get_dscp_prio(ds, port, protocol); 2117 if (prio == -EOPNOTSUPP) 2118 continue; 2119 if (prio < 0) 2120 return prio; 2121 2122 app.priority = prio; 2123 2124 err = dcb_ieee_setapp(dev, &app); 2125 if (err) 2126 return err; 2127 } 2128 } 2129 2130 return 0; 2131 } 2132 2133 static const struct ethtool_ops dsa_slave_ethtool_ops = { 2134 .get_drvinfo = dsa_slave_get_drvinfo, 2135 .get_regs_len = dsa_slave_get_regs_len, 2136 .get_regs = dsa_slave_get_regs, 2137 .nway_reset = dsa_slave_nway_reset, 2138 .get_link = ethtool_op_get_link, 2139 .get_eeprom_len = dsa_slave_get_eeprom_len, 2140 .get_eeprom = dsa_slave_get_eeprom, 2141 .set_eeprom = dsa_slave_set_eeprom, 2142 .get_strings = dsa_slave_get_strings, 2143 .get_ethtool_stats = dsa_slave_get_ethtool_stats, 2144 .get_sset_count = dsa_slave_get_sset_count, 2145 .get_eth_phy_stats = dsa_slave_get_eth_phy_stats, 2146 .get_eth_mac_stats = dsa_slave_get_eth_mac_stats, 2147 .get_eth_ctrl_stats = dsa_slave_get_eth_ctrl_stats, 2148 .get_rmon_stats = dsa_slave_get_rmon_stats, 2149 .set_wol = dsa_slave_set_wol, 2150 .get_wol = dsa_slave_get_wol, 2151 .set_eee = dsa_slave_set_eee, 2152 .get_eee = dsa_slave_get_eee, 2153 .get_link_ksettings = dsa_slave_get_link_ksettings, 2154 .set_link_ksettings = dsa_slave_set_link_ksettings, 2155 .get_pause_stats = dsa_slave_get_pause_stats, 2156 .get_pauseparam = dsa_slave_get_pauseparam, 2157 .set_pauseparam = dsa_slave_set_pauseparam, 2158 .get_rxnfc = dsa_slave_get_rxnfc, 2159 .set_rxnfc = dsa_slave_set_rxnfc, 2160 .get_ts_info = dsa_slave_get_ts_info, 2161 .self_test = dsa_slave_net_selftest, 2162 }; 2163 2164 static const struct dcbnl_rtnl_ops __maybe_unused dsa_slave_dcbnl_ops = { 2165 .ieee_setapp = dsa_slave_dcbnl_ieee_setapp, 2166 .ieee_delapp = dsa_slave_dcbnl_ieee_delapp, 2167 }; 2168 2169 static void dsa_slave_get_stats64(struct net_device *dev, 2170 struct rtnl_link_stats64 *s) 2171 { 2172 struct dsa_port *dp = dsa_slave_to_port(dev); 2173 struct dsa_switch *ds = dp->ds; 2174 2175 if (ds->ops->get_stats64) 2176 ds->ops->get_stats64(ds, dp->index, s); 2177 else 2178 dev_get_tstats64(dev, s); 2179 } 2180 2181 static int dsa_slave_fill_forward_path(struct net_device_path_ctx *ctx, 2182 struct net_device_path *path) 2183 { 2184 struct dsa_port *dp = dsa_slave_to_port(ctx->dev); 2185 struct net_device *master = dsa_port_to_master(dp); 2186 struct dsa_port *cpu_dp = dp->cpu_dp; 2187 2188 path->dev = ctx->dev; 2189 path->type = DEV_PATH_DSA; 2190 path->dsa.proto = cpu_dp->tag_ops->proto; 2191 path->dsa.port = dp->index; 2192 ctx->dev = master; 2193 2194 return 0; 2195 } 2196 2197 static const struct net_device_ops dsa_slave_netdev_ops = { 2198 .ndo_open = dsa_slave_open, 2199 .ndo_stop = dsa_slave_close, 2200 .ndo_start_xmit = dsa_slave_xmit, 2201 .ndo_change_rx_flags = dsa_slave_change_rx_flags, 2202 .ndo_set_rx_mode = dsa_slave_set_rx_mode, 2203 .ndo_set_mac_address = dsa_slave_set_mac_address, 2204 .ndo_fdb_dump = dsa_slave_fdb_dump, 2205 .ndo_eth_ioctl = dsa_slave_ioctl, 2206 .ndo_get_iflink = dsa_slave_get_iflink, 2207 #ifdef CONFIG_NET_POLL_CONTROLLER 2208 .ndo_netpoll_setup = dsa_slave_netpoll_setup, 2209 .ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup, 2210 .ndo_poll_controller = dsa_slave_poll_controller, 2211 #endif 2212 .ndo_setup_tc = dsa_slave_setup_tc, 2213 .ndo_get_stats64 = dsa_slave_get_stats64, 2214 .ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid, 2215 .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid, 2216 .ndo_change_mtu = dsa_slave_change_mtu, 2217 .ndo_fill_forward_path = dsa_slave_fill_forward_path, 2218 }; 2219 2220 static struct device_type dsa_type = { 2221 .name = "dsa", 2222 }; 2223 2224 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up) 2225 { 2226 const struct dsa_port *dp = dsa_to_port(ds, port); 2227 2228 if (dp->pl) 2229 phylink_mac_change(dp->pl, up); 2230 } 2231 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change); 2232 2233 static void dsa_slave_phylink_fixed_state(struct phylink_config *config, 2234 struct phylink_link_state *state) 2235 { 2236 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 2237 struct dsa_switch *ds = dp->ds; 2238 2239 /* No need to check that this operation is valid, the callback would 2240 * not be called if it was not. 2241 */ 2242 ds->ops->phylink_fixed_state(ds, dp->index, state); 2243 } 2244 2245 /* slave device setup *******************************************************/ 2246 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr, 2247 u32 flags) 2248 { 2249 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 2250 struct dsa_switch *ds = dp->ds; 2251 2252 slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr); 2253 if (!slave_dev->phydev) { 2254 netdev_err(slave_dev, "no phy at %d\n", addr); 2255 return -ENODEV; 2256 } 2257 2258 slave_dev->phydev->dev_flags |= flags; 2259 2260 return phylink_connect_phy(dp->pl, slave_dev->phydev); 2261 } 2262 2263 static int dsa_slave_phy_setup(struct net_device *slave_dev) 2264 { 2265 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 2266 struct device_node *port_dn = dp->dn; 2267 struct dsa_switch *ds = dp->ds; 2268 u32 phy_flags = 0; 2269 int ret; 2270 2271 dp->pl_config.dev = &slave_dev->dev; 2272 dp->pl_config.type = PHYLINK_NETDEV; 2273 2274 /* The get_fixed_state callback takes precedence over polling the 2275 * link GPIO in PHYLINK (see phylink_get_fixed_state). Only set 2276 * this if the switch provides such a callback. 2277 */ 2278 if (ds->ops->phylink_fixed_state) { 2279 dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state; 2280 dp->pl_config.poll_fixed_state = true; 2281 } 2282 2283 ret = dsa_port_phylink_create(dp); 2284 if (ret) 2285 return ret; 2286 2287 if (ds->ops->get_phy_flags) 2288 phy_flags = ds->ops->get_phy_flags(ds, dp->index); 2289 2290 ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags); 2291 if (ret == -ENODEV && ds->slave_mii_bus) { 2292 /* We could not connect to a designated PHY or SFP, so try to 2293 * use the switch internal MDIO bus instead 2294 */ 2295 ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags); 2296 } 2297 if (ret) { 2298 netdev_err(slave_dev, "failed to connect to PHY: %pe\n", 2299 ERR_PTR(ret)); 2300 dsa_port_phylink_destroy(dp); 2301 } 2302 2303 return ret; 2304 } 2305 2306 void dsa_slave_setup_tagger(struct net_device *slave) 2307 { 2308 struct dsa_port *dp = dsa_slave_to_port(slave); 2309 struct net_device *master = dsa_port_to_master(dp); 2310 struct dsa_slave_priv *p = netdev_priv(slave); 2311 const struct dsa_port *cpu_dp = dp->cpu_dp; 2312 const struct dsa_switch *ds = dp->ds; 2313 2314 slave->needed_headroom = cpu_dp->tag_ops->needed_headroom; 2315 slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom; 2316 /* Try to save one extra realloc later in the TX path (in the master) 2317 * by also inheriting the master's needed headroom and tailroom. 2318 * The 8021q driver also does this. 2319 */ 2320 slave->needed_headroom += master->needed_headroom; 2321 slave->needed_tailroom += master->needed_tailroom; 2322 2323 p->xmit = cpu_dp->tag_ops->xmit; 2324 2325 slave->features = master->vlan_features | NETIF_F_HW_TC; 2326 slave->hw_features |= NETIF_F_HW_TC; 2327 slave->features |= NETIF_F_LLTX; 2328 if (slave->needed_tailroom) 2329 slave->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST); 2330 if (ds->needs_standalone_vlan_filtering) 2331 slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 2332 } 2333 2334 int dsa_slave_suspend(struct net_device *slave_dev) 2335 { 2336 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 2337 2338 if (!netif_running(slave_dev)) 2339 return 0; 2340 2341 netif_device_detach(slave_dev); 2342 2343 rtnl_lock(); 2344 phylink_stop(dp->pl); 2345 rtnl_unlock(); 2346 2347 return 0; 2348 } 2349 2350 int dsa_slave_resume(struct net_device *slave_dev) 2351 { 2352 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 2353 2354 if (!netif_running(slave_dev)) 2355 return 0; 2356 2357 netif_device_attach(slave_dev); 2358 2359 rtnl_lock(); 2360 phylink_start(dp->pl); 2361 rtnl_unlock(); 2362 2363 return 0; 2364 } 2365 2366 int dsa_slave_create(struct dsa_port *port) 2367 { 2368 struct net_device *master = dsa_port_to_master(port); 2369 struct dsa_switch *ds = port->ds; 2370 struct net_device *slave_dev; 2371 struct dsa_slave_priv *p; 2372 const char *name; 2373 int assign_type; 2374 int ret; 2375 2376 if (!ds->num_tx_queues) 2377 ds->num_tx_queues = 1; 2378 2379 if (port->name) { 2380 name = port->name; 2381 assign_type = NET_NAME_PREDICTABLE; 2382 } else { 2383 name = "eth%d"; 2384 assign_type = NET_NAME_ENUM; 2385 } 2386 2387 slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name, 2388 assign_type, ether_setup, 2389 ds->num_tx_queues, 1); 2390 if (slave_dev == NULL) 2391 return -ENOMEM; 2392 2393 slave_dev->rtnl_link_ops = &dsa_link_ops; 2394 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; 2395 #if IS_ENABLED(CONFIG_DCB) 2396 slave_dev->dcbnl_ops = &dsa_slave_dcbnl_ops; 2397 #endif 2398 if (!is_zero_ether_addr(port->mac)) 2399 eth_hw_addr_set(slave_dev, port->mac); 2400 else 2401 eth_hw_addr_inherit(slave_dev, master); 2402 slave_dev->priv_flags |= IFF_NO_QUEUE; 2403 if (dsa_switch_supports_uc_filtering(ds)) 2404 slave_dev->priv_flags |= IFF_UNICAST_FLT; 2405 slave_dev->netdev_ops = &dsa_slave_netdev_ops; 2406 if (ds->ops->port_max_mtu) 2407 slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index); 2408 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type); 2409 2410 SET_NETDEV_DEV(slave_dev, port->ds->dev); 2411 SET_NETDEV_DEVLINK_PORT(slave_dev, &port->devlink_port); 2412 slave_dev->dev.of_node = port->dn; 2413 slave_dev->vlan_features = master->vlan_features; 2414 2415 p = netdev_priv(slave_dev); 2416 slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 2417 if (!slave_dev->tstats) { 2418 free_netdev(slave_dev); 2419 return -ENOMEM; 2420 } 2421 2422 ret = gro_cells_init(&p->gcells, slave_dev); 2423 if (ret) 2424 goto out_free; 2425 2426 p->dp = port; 2427 INIT_LIST_HEAD(&p->mall_tc_list); 2428 port->slave = slave_dev; 2429 dsa_slave_setup_tagger(slave_dev); 2430 2431 netif_carrier_off(slave_dev); 2432 2433 ret = dsa_slave_phy_setup(slave_dev); 2434 if (ret) { 2435 netdev_err(slave_dev, 2436 "error %d setting up PHY for tree %d, switch %d, port %d\n", 2437 ret, ds->dst->index, ds->index, port->index); 2438 goto out_gcells; 2439 } 2440 2441 rtnl_lock(); 2442 2443 ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN); 2444 if (ret && ret != -EOPNOTSUPP) 2445 dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n", 2446 ret, ETH_DATA_LEN, port->index); 2447 2448 ret = register_netdevice(slave_dev); 2449 if (ret) { 2450 netdev_err(master, "error %d registering interface %s\n", 2451 ret, slave_dev->name); 2452 rtnl_unlock(); 2453 goto out_phy; 2454 } 2455 2456 if (IS_ENABLED(CONFIG_DCB)) { 2457 ret = dsa_slave_dcbnl_init(slave_dev); 2458 if (ret) { 2459 netdev_err(slave_dev, 2460 "failed to initialize DCB: %pe\n", 2461 ERR_PTR(ret)); 2462 rtnl_unlock(); 2463 goto out_unregister; 2464 } 2465 } 2466 2467 ret = netdev_upper_dev_link(master, slave_dev, NULL); 2468 2469 rtnl_unlock(); 2470 2471 if (ret) 2472 goto out_unregister; 2473 2474 return 0; 2475 2476 out_unregister: 2477 unregister_netdev(slave_dev); 2478 out_phy: 2479 rtnl_lock(); 2480 phylink_disconnect_phy(p->dp->pl); 2481 rtnl_unlock(); 2482 dsa_port_phylink_destroy(p->dp); 2483 out_gcells: 2484 gro_cells_destroy(&p->gcells); 2485 out_free: 2486 free_percpu(slave_dev->tstats); 2487 free_netdev(slave_dev); 2488 port->slave = NULL; 2489 return ret; 2490 } 2491 2492 void dsa_slave_destroy(struct net_device *slave_dev) 2493 { 2494 struct net_device *master = dsa_slave_to_master(slave_dev); 2495 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 2496 struct dsa_slave_priv *p = netdev_priv(slave_dev); 2497 2498 netif_carrier_off(slave_dev); 2499 rtnl_lock(); 2500 netdev_upper_dev_unlink(master, slave_dev); 2501 unregister_netdevice(slave_dev); 2502 phylink_disconnect_phy(dp->pl); 2503 rtnl_unlock(); 2504 2505 dsa_port_phylink_destroy(dp); 2506 gro_cells_destroy(&p->gcells); 2507 free_percpu(slave_dev->tstats); 2508 free_netdev(slave_dev); 2509 } 2510 2511 int dsa_slave_change_master(struct net_device *dev, struct net_device *master, 2512 struct netlink_ext_ack *extack) 2513 { 2514 struct net_device *old_master = dsa_slave_to_master(dev); 2515 struct dsa_port *dp = dsa_slave_to_port(dev); 2516 struct dsa_switch *ds = dp->ds; 2517 struct net_device *upper; 2518 struct list_head *iter; 2519 int err; 2520 2521 if (master == old_master) 2522 return 0; 2523 2524 if (!ds->ops->port_change_master) { 2525 NL_SET_ERR_MSG_MOD(extack, 2526 "Driver does not support changing DSA master"); 2527 return -EOPNOTSUPP; 2528 } 2529 2530 if (!netdev_uses_dsa(master)) { 2531 NL_SET_ERR_MSG_MOD(extack, 2532 "Interface not eligible as DSA master"); 2533 return -EOPNOTSUPP; 2534 } 2535 2536 netdev_for_each_upper_dev_rcu(master, upper, iter) { 2537 if (dsa_slave_dev_check(upper)) 2538 continue; 2539 if (netif_is_bridge_master(upper)) 2540 continue; 2541 NL_SET_ERR_MSG_MOD(extack, "Cannot join master with unknown uppers"); 2542 return -EOPNOTSUPP; 2543 } 2544 2545 /* Since we allow live-changing the DSA master, plus we auto-open the 2546 * DSA master when the user port opens => we need to ensure that the 2547 * new DSA master is open too. 2548 */ 2549 if (dev->flags & IFF_UP) { 2550 err = dev_open(master, extack); 2551 if (err) 2552 return err; 2553 } 2554 2555 netdev_upper_dev_unlink(old_master, dev); 2556 2557 err = netdev_upper_dev_link(master, dev, extack); 2558 if (err) 2559 goto out_revert_old_master_unlink; 2560 2561 err = dsa_port_change_master(dp, master, extack); 2562 if (err) 2563 goto out_revert_master_link; 2564 2565 /* Update the MTU of the new CPU port through cross-chip notifiers */ 2566 err = dsa_slave_change_mtu(dev, dev->mtu); 2567 if (err && err != -EOPNOTSUPP) { 2568 netdev_warn(dev, 2569 "nonfatal error updating MTU with new master: %pe\n", 2570 ERR_PTR(err)); 2571 } 2572 2573 /* If the port doesn't have its own MAC address and relies on the DSA 2574 * master's one, inherit it again from the new DSA master. 2575 */ 2576 if (is_zero_ether_addr(dp->mac)) 2577 eth_hw_addr_inherit(dev, master); 2578 2579 return 0; 2580 2581 out_revert_master_link: 2582 netdev_upper_dev_unlink(master, dev); 2583 out_revert_old_master_unlink: 2584 netdev_upper_dev_link(old_master, dev, NULL); 2585 return err; 2586 } 2587 2588 bool dsa_slave_dev_check(const struct net_device *dev) 2589 { 2590 return dev->netdev_ops == &dsa_slave_netdev_ops; 2591 } 2592 EXPORT_SYMBOL_GPL(dsa_slave_dev_check); 2593 2594 static int dsa_slave_changeupper(struct net_device *dev, 2595 struct netdev_notifier_changeupper_info *info) 2596 { 2597 struct dsa_port *dp = dsa_slave_to_port(dev); 2598 struct netlink_ext_ack *extack; 2599 int err = NOTIFY_DONE; 2600 2601 if (!dsa_slave_dev_check(dev)) 2602 return err; 2603 2604 extack = netdev_notifier_info_to_extack(&info->info); 2605 2606 if (netif_is_bridge_master(info->upper_dev)) { 2607 if (info->linking) { 2608 err = dsa_port_bridge_join(dp, info->upper_dev, extack); 2609 if (!err) 2610 dsa_bridge_mtu_normalization(dp); 2611 if (err == -EOPNOTSUPP) { 2612 if (extack && !extack->_msg) 2613 NL_SET_ERR_MSG_MOD(extack, 2614 "Offloading not supported"); 2615 err = 0; 2616 } 2617 err = notifier_from_errno(err); 2618 } else { 2619 dsa_port_bridge_leave(dp, info->upper_dev); 2620 err = NOTIFY_OK; 2621 } 2622 } else if (netif_is_lag_master(info->upper_dev)) { 2623 if (info->linking) { 2624 err = dsa_port_lag_join(dp, info->upper_dev, 2625 info->upper_info, extack); 2626 if (err == -EOPNOTSUPP) { 2627 NL_SET_ERR_MSG_MOD(info->info.extack, 2628 "Offloading not supported"); 2629 err = 0; 2630 } 2631 err = notifier_from_errno(err); 2632 } else { 2633 dsa_port_lag_leave(dp, info->upper_dev); 2634 err = NOTIFY_OK; 2635 } 2636 } else if (is_hsr_master(info->upper_dev)) { 2637 if (info->linking) { 2638 err = dsa_port_hsr_join(dp, info->upper_dev); 2639 if (err == -EOPNOTSUPP) { 2640 NL_SET_ERR_MSG_MOD(info->info.extack, 2641 "Offloading not supported"); 2642 err = 0; 2643 } 2644 err = notifier_from_errno(err); 2645 } else { 2646 dsa_port_hsr_leave(dp, info->upper_dev); 2647 err = NOTIFY_OK; 2648 } 2649 } 2650 2651 return err; 2652 } 2653 2654 static int dsa_slave_prechangeupper(struct net_device *dev, 2655 struct netdev_notifier_changeupper_info *info) 2656 { 2657 struct dsa_port *dp = dsa_slave_to_port(dev); 2658 2659 if (!dsa_slave_dev_check(dev)) 2660 return NOTIFY_DONE; 2661 2662 if (netif_is_bridge_master(info->upper_dev) && !info->linking) 2663 dsa_port_pre_bridge_leave(dp, info->upper_dev); 2664 else if (netif_is_lag_master(info->upper_dev) && !info->linking) 2665 dsa_port_pre_lag_leave(dp, info->upper_dev); 2666 /* dsa_port_pre_hsr_leave is not yet necessary since hsr cannot be 2667 * meaningfully enslaved to a bridge yet 2668 */ 2669 2670 return NOTIFY_DONE; 2671 } 2672 2673 static int 2674 dsa_slave_lag_changeupper(struct net_device *dev, 2675 struct netdev_notifier_changeupper_info *info) 2676 { 2677 struct net_device *lower; 2678 struct list_head *iter; 2679 int err = NOTIFY_DONE; 2680 struct dsa_port *dp; 2681 2682 if (!netif_is_lag_master(dev)) 2683 return err; 2684 2685 netdev_for_each_lower_dev(dev, lower, iter) { 2686 if (!dsa_slave_dev_check(lower)) 2687 continue; 2688 2689 dp = dsa_slave_to_port(lower); 2690 if (!dp->lag) 2691 /* Software LAG */ 2692 continue; 2693 2694 err = dsa_slave_changeupper(lower, info); 2695 if (notifier_to_errno(err)) 2696 break; 2697 } 2698 2699 return err; 2700 } 2701 2702 /* Same as dsa_slave_lag_changeupper() except that it calls 2703 * dsa_slave_prechangeupper() 2704 */ 2705 static int 2706 dsa_slave_lag_prechangeupper(struct net_device *dev, 2707 struct netdev_notifier_changeupper_info *info) 2708 { 2709 struct net_device *lower; 2710 struct list_head *iter; 2711 int err = NOTIFY_DONE; 2712 struct dsa_port *dp; 2713 2714 if (!netif_is_lag_master(dev)) 2715 return err; 2716 2717 netdev_for_each_lower_dev(dev, lower, iter) { 2718 if (!dsa_slave_dev_check(lower)) 2719 continue; 2720 2721 dp = dsa_slave_to_port(lower); 2722 if (!dp->lag) 2723 /* Software LAG */ 2724 continue; 2725 2726 err = dsa_slave_prechangeupper(lower, info); 2727 if (notifier_to_errno(err)) 2728 break; 2729 } 2730 2731 return err; 2732 } 2733 2734 static int 2735 dsa_prevent_bridging_8021q_upper(struct net_device *dev, 2736 struct netdev_notifier_changeupper_info *info) 2737 { 2738 struct netlink_ext_ack *ext_ack; 2739 struct net_device *slave, *br; 2740 struct dsa_port *dp; 2741 2742 ext_ack = netdev_notifier_info_to_extack(&info->info); 2743 2744 if (!is_vlan_dev(dev)) 2745 return NOTIFY_DONE; 2746 2747 slave = vlan_dev_real_dev(dev); 2748 if (!dsa_slave_dev_check(slave)) 2749 return NOTIFY_DONE; 2750 2751 dp = dsa_slave_to_port(slave); 2752 br = dsa_port_bridge_dev_get(dp); 2753 if (!br) 2754 return NOTIFY_DONE; 2755 2756 /* Deny enslaving a VLAN device into a VLAN-aware bridge */ 2757 if (br_vlan_enabled(br) && 2758 netif_is_bridge_master(info->upper_dev) && info->linking) { 2759 NL_SET_ERR_MSG_MOD(ext_ack, 2760 "Cannot enslave VLAN device into VLAN aware bridge"); 2761 return notifier_from_errno(-EINVAL); 2762 } 2763 2764 return NOTIFY_DONE; 2765 } 2766 2767 static int 2768 dsa_slave_check_8021q_upper(struct net_device *dev, 2769 struct netdev_notifier_changeupper_info *info) 2770 { 2771 struct dsa_port *dp = dsa_slave_to_port(dev); 2772 struct net_device *br = dsa_port_bridge_dev_get(dp); 2773 struct bridge_vlan_info br_info; 2774 struct netlink_ext_ack *extack; 2775 int err = NOTIFY_DONE; 2776 u16 vid; 2777 2778 if (!br || !br_vlan_enabled(br)) 2779 return NOTIFY_DONE; 2780 2781 extack = netdev_notifier_info_to_extack(&info->info); 2782 vid = vlan_dev_vlan_id(info->upper_dev); 2783 2784 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the 2785 * device, respectively the VID is not found, returning 2786 * 0 means success, which is a failure for us here. 2787 */ 2788 err = br_vlan_get_info(br, vid, &br_info); 2789 if (err == 0) { 2790 NL_SET_ERR_MSG_MOD(extack, 2791 "This VLAN is already configured by the bridge"); 2792 return notifier_from_errno(-EBUSY); 2793 } 2794 2795 return NOTIFY_DONE; 2796 } 2797 2798 static int 2799 dsa_slave_prechangeupper_sanity_check(struct net_device *dev, 2800 struct netdev_notifier_changeupper_info *info) 2801 { 2802 struct dsa_switch *ds; 2803 struct dsa_port *dp; 2804 int err; 2805 2806 if (!dsa_slave_dev_check(dev)) 2807 return dsa_prevent_bridging_8021q_upper(dev, info); 2808 2809 dp = dsa_slave_to_port(dev); 2810 ds = dp->ds; 2811 2812 if (ds->ops->port_prechangeupper) { 2813 err = ds->ops->port_prechangeupper(ds, dp->index, info); 2814 if (err) 2815 return notifier_from_errno(err); 2816 } 2817 2818 if (is_vlan_dev(info->upper_dev)) 2819 return dsa_slave_check_8021q_upper(dev, info); 2820 2821 return NOTIFY_DONE; 2822 } 2823 2824 /* To be eligible as a DSA master, a LAG must have all lower interfaces be 2825 * eligible DSA masters. Additionally, all LAG slaves must be DSA masters of 2826 * switches in the same switch tree. 2827 */ 2828 static int dsa_lag_master_validate(struct net_device *lag_dev, 2829 struct netlink_ext_ack *extack) 2830 { 2831 struct net_device *lower1, *lower2; 2832 struct list_head *iter1, *iter2; 2833 2834 netdev_for_each_lower_dev(lag_dev, lower1, iter1) { 2835 netdev_for_each_lower_dev(lag_dev, lower2, iter2) { 2836 if (!netdev_uses_dsa(lower1) || 2837 !netdev_uses_dsa(lower2)) { 2838 NL_SET_ERR_MSG_MOD(extack, 2839 "All LAG ports must be eligible as DSA masters"); 2840 return notifier_from_errno(-EINVAL); 2841 } 2842 2843 if (lower1 == lower2) 2844 continue; 2845 2846 if (!dsa_port_tree_same(lower1->dsa_ptr, 2847 lower2->dsa_ptr)) { 2848 NL_SET_ERR_MSG_MOD(extack, 2849 "LAG contains DSA masters of disjoint switch trees"); 2850 return notifier_from_errno(-EINVAL); 2851 } 2852 } 2853 } 2854 2855 return NOTIFY_DONE; 2856 } 2857 2858 static int 2859 dsa_master_prechangeupper_sanity_check(struct net_device *master, 2860 struct netdev_notifier_changeupper_info *info) 2861 { 2862 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info); 2863 2864 if (!netdev_uses_dsa(master)) 2865 return NOTIFY_DONE; 2866 2867 if (!info->linking) 2868 return NOTIFY_DONE; 2869 2870 /* Allow DSA switch uppers */ 2871 if (dsa_slave_dev_check(info->upper_dev)) 2872 return NOTIFY_DONE; 2873 2874 /* Allow bridge uppers of DSA masters, subject to further 2875 * restrictions in dsa_bridge_prechangelower_sanity_check() 2876 */ 2877 if (netif_is_bridge_master(info->upper_dev)) 2878 return NOTIFY_DONE; 2879 2880 /* Allow LAG uppers, subject to further restrictions in 2881 * dsa_lag_master_prechangelower_sanity_check() 2882 */ 2883 if (netif_is_lag_master(info->upper_dev)) 2884 return dsa_lag_master_validate(info->upper_dev, extack); 2885 2886 NL_SET_ERR_MSG_MOD(extack, 2887 "DSA master cannot join unknown upper interfaces"); 2888 return notifier_from_errno(-EBUSY); 2889 } 2890 2891 static int 2892 dsa_lag_master_prechangelower_sanity_check(struct net_device *dev, 2893 struct netdev_notifier_changeupper_info *info) 2894 { 2895 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info); 2896 struct net_device *lag_dev = info->upper_dev; 2897 struct net_device *lower; 2898 struct list_head *iter; 2899 2900 if (!netdev_uses_dsa(lag_dev) || !netif_is_lag_master(lag_dev)) 2901 return NOTIFY_DONE; 2902 2903 if (!info->linking) 2904 return NOTIFY_DONE; 2905 2906 if (!netdev_uses_dsa(dev)) { 2907 NL_SET_ERR_MSG(extack, 2908 "Only DSA masters can join a LAG DSA master"); 2909 return notifier_from_errno(-EINVAL); 2910 } 2911 2912 netdev_for_each_lower_dev(lag_dev, lower, iter) { 2913 if (!dsa_port_tree_same(dev->dsa_ptr, lower->dsa_ptr)) { 2914 NL_SET_ERR_MSG(extack, 2915 "Interface is DSA master for a different switch tree than this LAG"); 2916 return notifier_from_errno(-EINVAL); 2917 } 2918 2919 break; 2920 } 2921 2922 return NOTIFY_DONE; 2923 } 2924 2925 /* Don't allow bridging of DSA masters, since the bridge layer rx_handler 2926 * prevents the DSA fake ethertype handler to be invoked, so we don't get the 2927 * chance to strip off and parse the DSA switch tag protocol header (the bridge 2928 * layer just returns RX_HANDLER_CONSUMED, stopping RX processing for these 2929 * frames). 2930 * The only case where that would not be an issue is when bridging can already 2931 * be offloaded, such as when the DSA master is itself a DSA or plain switchdev 2932 * port, and is bridged only with other ports from the same hardware device. 2933 */ 2934 static int 2935 dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower, 2936 struct netdev_notifier_changeupper_info *info) 2937 { 2938 struct net_device *br = info->upper_dev; 2939 struct netlink_ext_ack *extack; 2940 struct net_device *lower; 2941 struct list_head *iter; 2942 2943 if (!netif_is_bridge_master(br)) 2944 return NOTIFY_DONE; 2945 2946 if (!info->linking) 2947 return NOTIFY_DONE; 2948 2949 extack = netdev_notifier_info_to_extack(&info->info); 2950 2951 netdev_for_each_lower_dev(br, lower, iter) { 2952 if (!netdev_uses_dsa(new_lower) && !netdev_uses_dsa(lower)) 2953 continue; 2954 2955 if (!netdev_port_same_parent_id(lower, new_lower)) { 2956 NL_SET_ERR_MSG(extack, 2957 "Cannot do software bridging with a DSA master"); 2958 return notifier_from_errno(-EINVAL); 2959 } 2960 } 2961 2962 return NOTIFY_DONE; 2963 } 2964 2965 static void dsa_tree_migrate_ports_from_lag_master(struct dsa_switch_tree *dst, 2966 struct net_device *lag_dev) 2967 { 2968 struct net_device *new_master = dsa_tree_find_first_master(dst); 2969 struct dsa_port *dp; 2970 int err; 2971 2972 dsa_tree_for_each_user_port(dp, dst) { 2973 if (dsa_port_to_master(dp) != lag_dev) 2974 continue; 2975 2976 err = dsa_slave_change_master(dp->slave, new_master, NULL); 2977 if (err) { 2978 netdev_err(dp->slave, 2979 "failed to restore master to %s: %pe\n", 2980 new_master->name, ERR_PTR(err)); 2981 } 2982 } 2983 } 2984 2985 static int dsa_master_lag_join(struct net_device *master, 2986 struct net_device *lag_dev, 2987 struct netdev_lag_upper_info *uinfo, 2988 struct netlink_ext_ack *extack) 2989 { 2990 struct dsa_port *cpu_dp = master->dsa_ptr; 2991 struct dsa_switch_tree *dst = cpu_dp->dst; 2992 struct dsa_port *dp; 2993 int err; 2994 2995 err = dsa_master_lag_setup(lag_dev, cpu_dp, uinfo, extack); 2996 if (err) 2997 return err; 2998 2999 dsa_tree_for_each_user_port(dp, dst) { 3000 if (dsa_port_to_master(dp) != master) 3001 continue; 3002 3003 err = dsa_slave_change_master(dp->slave, lag_dev, extack); 3004 if (err) 3005 goto restore; 3006 } 3007 3008 return 0; 3009 3010 restore: 3011 dsa_tree_for_each_user_port_continue_reverse(dp, dst) { 3012 if (dsa_port_to_master(dp) != lag_dev) 3013 continue; 3014 3015 err = dsa_slave_change_master(dp->slave, master, NULL); 3016 if (err) { 3017 netdev_err(dp->slave, 3018 "failed to restore master to %s: %pe\n", 3019 master->name, ERR_PTR(err)); 3020 } 3021 } 3022 3023 dsa_master_lag_teardown(lag_dev, master->dsa_ptr); 3024 3025 return err; 3026 } 3027 3028 static void dsa_master_lag_leave(struct net_device *master, 3029 struct net_device *lag_dev) 3030 { 3031 struct dsa_port *dp, *cpu_dp = lag_dev->dsa_ptr; 3032 struct dsa_switch_tree *dst = cpu_dp->dst; 3033 struct dsa_port *new_cpu_dp = NULL; 3034 struct net_device *lower; 3035 struct list_head *iter; 3036 3037 netdev_for_each_lower_dev(lag_dev, lower, iter) { 3038 if (netdev_uses_dsa(lower)) { 3039 new_cpu_dp = lower->dsa_ptr; 3040 break; 3041 } 3042 } 3043 3044 if (new_cpu_dp) { 3045 /* Update the CPU port of the user ports still under the LAG 3046 * so that dsa_port_to_master() continues to work properly 3047 */ 3048 dsa_tree_for_each_user_port(dp, dst) 3049 if (dsa_port_to_master(dp) == lag_dev) 3050 dp->cpu_dp = new_cpu_dp; 3051 3052 /* Update the index of the virtual CPU port to match the lowest 3053 * physical CPU port 3054 */ 3055 lag_dev->dsa_ptr = new_cpu_dp; 3056 wmb(); 3057 } else { 3058 /* If the LAG DSA master has no ports left, migrate back all 3059 * user ports to the first physical CPU port 3060 */ 3061 dsa_tree_migrate_ports_from_lag_master(dst, lag_dev); 3062 } 3063 3064 /* This DSA master has left its LAG in any case, so let 3065 * the CPU port leave the hardware LAG as well 3066 */ 3067 dsa_master_lag_teardown(lag_dev, master->dsa_ptr); 3068 } 3069 3070 static int dsa_master_changeupper(struct net_device *dev, 3071 struct netdev_notifier_changeupper_info *info) 3072 { 3073 struct netlink_ext_ack *extack; 3074 int err = NOTIFY_DONE; 3075 3076 if (!netdev_uses_dsa(dev)) 3077 return err; 3078 3079 extack = netdev_notifier_info_to_extack(&info->info); 3080 3081 if (netif_is_lag_master(info->upper_dev)) { 3082 if (info->linking) { 3083 err = dsa_master_lag_join(dev, info->upper_dev, 3084 info->upper_info, extack); 3085 err = notifier_from_errno(err); 3086 } else { 3087 dsa_master_lag_leave(dev, info->upper_dev); 3088 err = NOTIFY_OK; 3089 } 3090 } 3091 3092 return err; 3093 } 3094 3095 static int dsa_slave_netdevice_event(struct notifier_block *nb, 3096 unsigned long event, void *ptr) 3097 { 3098 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3099 3100 switch (event) { 3101 case NETDEV_PRECHANGEUPPER: { 3102 struct netdev_notifier_changeupper_info *info = ptr; 3103 int err; 3104 3105 err = dsa_slave_prechangeupper_sanity_check(dev, info); 3106 if (notifier_to_errno(err)) 3107 return err; 3108 3109 err = dsa_master_prechangeupper_sanity_check(dev, info); 3110 if (notifier_to_errno(err)) 3111 return err; 3112 3113 err = dsa_lag_master_prechangelower_sanity_check(dev, info); 3114 if (notifier_to_errno(err)) 3115 return err; 3116 3117 err = dsa_bridge_prechangelower_sanity_check(dev, info); 3118 if (notifier_to_errno(err)) 3119 return err; 3120 3121 err = dsa_slave_prechangeupper(dev, ptr); 3122 if (notifier_to_errno(err)) 3123 return err; 3124 3125 err = dsa_slave_lag_prechangeupper(dev, ptr); 3126 if (notifier_to_errno(err)) 3127 return err; 3128 3129 break; 3130 } 3131 case NETDEV_CHANGEUPPER: { 3132 int err; 3133 3134 err = dsa_slave_changeupper(dev, ptr); 3135 if (notifier_to_errno(err)) 3136 return err; 3137 3138 err = dsa_slave_lag_changeupper(dev, ptr); 3139 if (notifier_to_errno(err)) 3140 return err; 3141 3142 err = dsa_master_changeupper(dev, ptr); 3143 if (notifier_to_errno(err)) 3144 return err; 3145 3146 break; 3147 } 3148 case NETDEV_CHANGELOWERSTATE: { 3149 struct netdev_notifier_changelowerstate_info *info = ptr; 3150 struct dsa_port *dp; 3151 int err = 0; 3152 3153 if (dsa_slave_dev_check(dev)) { 3154 dp = dsa_slave_to_port(dev); 3155 3156 err = dsa_port_lag_change(dp, info->lower_state_info); 3157 } 3158 3159 /* Mirror LAG port events on DSA masters that are in 3160 * a LAG towards their respective switch CPU ports 3161 */ 3162 if (netdev_uses_dsa(dev)) { 3163 dp = dev->dsa_ptr; 3164 3165 err = dsa_port_lag_change(dp, info->lower_state_info); 3166 } 3167 3168 return notifier_from_errno(err); 3169 } 3170 case NETDEV_CHANGE: 3171 case NETDEV_UP: { 3172 /* Track state of master port. 3173 * DSA driver may require the master port (and indirectly 3174 * the tagger) to be available for some special operation. 3175 */ 3176 if (netdev_uses_dsa(dev)) { 3177 struct dsa_port *cpu_dp = dev->dsa_ptr; 3178 struct dsa_switch_tree *dst = cpu_dp->ds->dst; 3179 3180 /* Track when the master port is UP */ 3181 dsa_tree_master_oper_state_change(dst, dev, 3182 netif_oper_up(dev)); 3183 3184 /* Track when the master port is ready and can accept 3185 * packet. 3186 * NETDEV_UP event is not enough to flag a port as ready. 3187 * We also have to wait for linkwatch_do_dev to dev_activate 3188 * and emit a NETDEV_CHANGE event. 3189 * We check if a master port is ready by checking if the dev 3190 * have a qdisc assigned and is not noop. 3191 */ 3192 dsa_tree_master_admin_state_change(dst, dev, 3193 !qdisc_tx_is_noop(dev)); 3194 3195 return NOTIFY_OK; 3196 } 3197 3198 return NOTIFY_DONE; 3199 } 3200 case NETDEV_GOING_DOWN: { 3201 struct dsa_port *dp, *cpu_dp; 3202 struct dsa_switch_tree *dst; 3203 LIST_HEAD(close_list); 3204 3205 if (!netdev_uses_dsa(dev)) 3206 return NOTIFY_DONE; 3207 3208 cpu_dp = dev->dsa_ptr; 3209 dst = cpu_dp->ds->dst; 3210 3211 dsa_tree_master_admin_state_change(dst, dev, false); 3212 3213 list_for_each_entry(dp, &dst->ports, list) { 3214 if (!dsa_port_is_user(dp)) 3215 continue; 3216 3217 if (dp->cpu_dp != cpu_dp) 3218 continue; 3219 3220 list_add(&dp->slave->close_list, &close_list); 3221 } 3222 3223 dev_close_many(&close_list, true); 3224 3225 return NOTIFY_OK; 3226 } 3227 default: 3228 break; 3229 } 3230 3231 return NOTIFY_DONE; 3232 } 3233 3234 static void 3235 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work) 3236 { 3237 struct switchdev_notifier_fdb_info info = {}; 3238 3239 info.addr = switchdev_work->addr; 3240 info.vid = switchdev_work->vid; 3241 info.offloaded = true; 3242 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, 3243 switchdev_work->orig_dev, &info.info, NULL); 3244 } 3245 3246 static void dsa_slave_switchdev_event_work(struct work_struct *work) 3247 { 3248 struct dsa_switchdev_event_work *switchdev_work = 3249 container_of(work, struct dsa_switchdev_event_work, work); 3250 const unsigned char *addr = switchdev_work->addr; 3251 struct net_device *dev = switchdev_work->dev; 3252 u16 vid = switchdev_work->vid; 3253 struct dsa_switch *ds; 3254 struct dsa_port *dp; 3255 int err; 3256 3257 dp = dsa_slave_to_port(dev); 3258 ds = dp->ds; 3259 3260 switch (switchdev_work->event) { 3261 case SWITCHDEV_FDB_ADD_TO_DEVICE: 3262 if (switchdev_work->host_addr) 3263 err = dsa_port_bridge_host_fdb_add(dp, addr, vid); 3264 else if (dp->lag) 3265 err = dsa_port_lag_fdb_add(dp, addr, vid); 3266 else 3267 err = dsa_port_fdb_add(dp, addr, vid); 3268 if (err) { 3269 dev_err(ds->dev, 3270 "port %d failed to add %pM vid %d to fdb: %d\n", 3271 dp->index, addr, vid, err); 3272 break; 3273 } 3274 dsa_fdb_offload_notify(switchdev_work); 3275 break; 3276 3277 case SWITCHDEV_FDB_DEL_TO_DEVICE: 3278 if (switchdev_work->host_addr) 3279 err = dsa_port_bridge_host_fdb_del(dp, addr, vid); 3280 else if (dp->lag) 3281 err = dsa_port_lag_fdb_del(dp, addr, vid); 3282 else 3283 err = dsa_port_fdb_del(dp, addr, vid); 3284 if (err) { 3285 dev_err(ds->dev, 3286 "port %d failed to delete %pM vid %d from fdb: %d\n", 3287 dp->index, addr, vid, err); 3288 } 3289 3290 break; 3291 } 3292 3293 kfree(switchdev_work); 3294 } 3295 3296 static bool dsa_foreign_dev_check(const struct net_device *dev, 3297 const struct net_device *foreign_dev) 3298 { 3299 const struct dsa_port *dp = dsa_slave_to_port(dev); 3300 struct dsa_switch_tree *dst = dp->ds->dst; 3301 3302 if (netif_is_bridge_master(foreign_dev)) 3303 return !dsa_tree_offloads_bridge_dev(dst, foreign_dev); 3304 3305 if (netif_is_bridge_port(foreign_dev)) 3306 return !dsa_tree_offloads_bridge_port(dst, foreign_dev); 3307 3308 /* Everything else is foreign */ 3309 return true; 3310 } 3311 3312 static int dsa_slave_fdb_event(struct net_device *dev, 3313 struct net_device *orig_dev, 3314 unsigned long event, const void *ctx, 3315 const struct switchdev_notifier_fdb_info *fdb_info) 3316 { 3317 struct dsa_switchdev_event_work *switchdev_work; 3318 struct dsa_port *dp = dsa_slave_to_port(dev); 3319 bool host_addr = fdb_info->is_local; 3320 struct dsa_switch *ds = dp->ds; 3321 3322 if (ctx && ctx != dp) 3323 return 0; 3324 3325 if (!dp->bridge) 3326 return 0; 3327 3328 if (switchdev_fdb_is_dynamically_learned(fdb_info)) { 3329 if (dsa_port_offloads_bridge_port(dp, orig_dev)) 3330 return 0; 3331 3332 /* FDB entries learned by the software bridge or by foreign 3333 * bridge ports should be installed as host addresses only if 3334 * the driver requests assisted learning. 3335 */ 3336 if (!ds->assisted_learning_on_cpu_port) 3337 return 0; 3338 } 3339 3340 /* Also treat FDB entries on foreign interfaces bridged with us as host 3341 * addresses. 3342 */ 3343 if (dsa_foreign_dev_check(dev, orig_dev)) 3344 host_addr = true; 3345 3346 /* Check early that we're not doing work in vain. 3347 * Host addresses on LAG ports still require regular FDB ops, 3348 * since the CPU port isn't in a LAG. 3349 */ 3350 if (dp->lag && !host_addr) { 3351 if (!ds->ops->lag_fdb_add || !ds->ops->lag_fdb_del) 3352 return -EOPNOTSUPP; 3353 } else { 3354 if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del) 3355 return -EOPNOTSUPP; 3356 } 3357 3358 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); 3359 if (!switchdev_work) 3360 return -ENOMEM; 3361 3362 netdev_dbg(dev, "%s FDB entry towards %s, addr %pM vid %d%s\n", 3363 event == SWITCHDEV_FDB_ADD_TO_DEVICE ? "Adding" : "Deleting", 3364 orig_dev->name, fdb_info->addr, fdb_info->vid, 3365 host_addr ? " as host address" : ""); 3366 3367 INIT_WORK(&switchdev_work->work, dsa_slave_switchdev_event_work); 3368 switchdev_work->event = event; 3369 switchdev_work->dev = dev; 3370 switchdev_work->orig_dev = orig_dev; 3371 3372 ether_addr_copy(switchdev_work->addr, fdb_info->addr); 3373 switchdev_work->vid = fdb_info->vid; 3374 switchdev_work->host_addr = host_addr; 3375 3376 dsa_schedule_work(&switchdev_work->work); 3377 3378 return 0; 3379 } 3380 3381 /* Called under rcu_read_lock() */ 3382 static int dsa_slave_switchdev_event(struct notifier_block *unused, 3383 unsigned long event, void *ptr) 3384 { 3385 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 3386 int err; 3387 3388 switch (event) { 3389 case SWITCHDEV_PORT_ATTR_SET: 3390 err = switchdev_handle_port_attr_set(dev, ptr, 3391 dsa_slave_dev_check, 3392 dsa_slave_port_attr_set); 3393 return notifier_from_errno(err); 3394 case SWITCHDEV_FDB_ADD_TO_DEVICE: 3395 case SWITCHDEV_FDB_DEL_TO_DEVICE: 3396 err = switchdev_handle_fdb_event_to_device(dev, event, ptr, 3397 dsa_slave_dev_check, 3398 dsa_foreign_dev_check, 3399 dsa_slave_fdb_event); 3400 return notifier_from_errno(err); 3401 default: 3402 return NOTIFY_DONE; 3403 } 3404 3405 return NOTIFY_OK; 3406 } 3407 3408 static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused, 3409 unsigned long event, void *ptr) 3410 { 3411 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 3412 int err; 3413 3414 switch (event) { 3415 case SWITCHDEV_PORT_OBJ_ADD: 3416 err = switchdev_handle_port_obj_add_foreign(dev, ptr, 3417 dsa_slave_dev_check, 3418 dsa_foreign_dev_check, 3419 dsa_slave_port_obj_add); 3420 return notifier_from_errno(err); 3421 case SWITCHDEV_PORT_OBJ_DEL: 3422 err = switchdev_handle_port_obj_del_foreign(dev, ptr, 3423 dsa_slave_dev_check, 3424 dsa_foreign_dev_check, 3425 dsa_slave_port_obj_del); 3426 return notifier_from_errno(err); 3427 case SWITCHDEV_PORT_ATTR_SET: 3428 err = switchdev_handle_port_attr_set(dev, ptr, 3429 dsa_slave_dev_check, 3430 dsa_slave_port_attr_set); 3431 return notifier_from_errno(err); 3432 } 3433 3434 return NOTIFY_DONE; 3435 } 3436 3437 static struct notifier_block dsa_slave_nb __read_mostly = { 3438 .notifier_call = dsa_slave_netdevice_event, 3439 }; 3440 3441 struct notifier_block dsa_slave_switchdev_notifier = { 3442 .notifier_call = dsa_slave_switchdev_event, 3443 }; 3444 3445 struct notifier_block dsa_slave_switchdev_blocking_notifier = { 3446 .notifier_call = dsa_slave_switchdev_blocking_event, 3447 }; 3448 3449 int dsa_slave_register_notifier(void) 3450 { 3451 struct notifier_block *nb; 3452 int err; 3453 3454 err = register_netdevice_notifier(&dsa_slave_nb); 3455 if (err) 3456 return err; 3457 3458 err = register_switchdev_notifier(&dsa_slave_switchdev_notifier); 3459 if (err) 3460 goto err_switchdev_nb; 3461 3462 nb = &dsa_slave_switchdev_blocking_notifier; 3463 err = register_switchdev_blocking_notifier(nb); 3464 if (err) 3465 goto err_switchdev_blocking_nb; 3466 3467 return 0; 3468 3469 err_switchdev_blocking_nb: 3470 unregister_switchdev_notifier(&dsa_slave_switchdev_notifier); 3471 err_switchdev_nb: 3472 unregister_netdevice_notifier(&dsa_slave_nb); 3473 return err; 3474 } 3475 3476 void dsa_slave_unregister_notifier(void) 3477 { 3478 struct notifier_block *nb; 3479 int err; 3480 3481 nb = &dsa_slave_switchdev_blocking_notifier; 3482 err = unregister_switchdev_blocking_notifier(nb); 3483 if (err) 3484 pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err); 3485 3486 err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier); 3487 if (err) 3488 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err); 3489 3490 err = unregister_netdevice_notifier(&dsa_slave_nb); 3491 if (err) 3492 pr_err("DSA: failed to unregister slave notifier (%d)\n", err); 3493 } 3494