1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/dsa/slave.c - Slave device handling 4 * Copyright (c) 2008-2009 Marvell Semiconductor 5 */ 6 7 #include <linux/list.h> 8 #include <linux/etherdevice.h> 9 #include <linux/netdevice.h> 10 #include <linux/phy.h> 11 #include <linux/phy_fixed.h> 12 #include <linux/phylink.h> 13 #include <linux/of_net.h> 14 #include <linux/of_mdio.h> 15 #include <linux/mdio.h> 16 #include <net/rtnetlink.h> 17 #include <net/pkt_cls.h> 18 #include <net/selftests.h> 19 #include <net/tc_act/tc_mirred.h> 20 #include <linux/if_bridge.h> 21 #include <linux/if_hsr.h> 22 #include <net/dcbnl.h> 23 #include <linux/netpoll.h> 24 25 #include "dsa_priv.h" 26 27 static void dsa_slave_standalone_event_work(struct work_struct *work) 28 { 29 struct dsa_standalone_event_work *standalone_work = 30 container_of(work, struct dsa_standalone_event_work, work); 31 const unsigned char *addr = standalone_work->addr; 32 struct net_device *dev = standalone_work->dev; 33 struct dsa_port *dp = dsa_slave_to_port(dev); 34 struct switchdev_obj_port_mdb mdb; 35 struct dsa_switch *ds = dp->ds; 36 u16 vid = standalone_work->vid; 37 int err; 38 39 switch (standalone_work->event) { 40 case DSA_UC_ADD: 41 err = dsa_port_standalone_host_fdb_add(dp, addr, vid); 42 if (err) { 43 dev_err(ds->dev, 44 "port %d failed to add %pM vid %d to fdb: %d\n", 45 dp->index, addr, vid, err); 46 break; 47 } 48 break; 49 50 case DSA_UC_DEL: 51 err = dsa_port_standalone_host_fdb_del(dp, addr, vid); 52 if (err) { 53 dev_err(ds->dev, 54 "port %d failed to delete %pM vid %d from fdb: %d\n", 55 dp->index, addr, vid, err); 56 } 57 58 break; 59 case DSA_MC_ADD: 60 ether_addr_copy(mdb.addr, addr); 61 mdb.vid = vid; 62 63 err = dsa_port_standalone_host_mdb_add(dp, &mdb); 64 if (err) { 65 dev_err(ds->dev, 66 "port %d failed to add %pM vid %d to mdb: %d\n", 67 dp->index, addr, vid, err); 68 break; 69 } 70 break; 71 case DSA_MC_DEL: 72 ether_addr_copy(mdb.addr, addr); 73 mdb.vid = vid; 74 75 err = dsa_port_standalone_host_mdb_del(dp, &mdb); 76 if (err) { 77 dev_err(ds->dev, 78 "port %d failed to delete %pM vid %d from mdb: %d\n", 79 dp->index, addr, vid, err); 80 } 81 82 break; 83 } 84 85 kfree(standalone_work); 86 } 87 88 static int dsa_slave_schedule_standalone_work(struct net_device *dev, 89 enum dsa_standalone_event event, 90 const unsigned char *addr, 91 u16 vid) 92 { 93 struct dsa_standalone_event_work *standalone_work; 94 95 standalone_work = kzalloc(sizeof(*standalone_work), GFP_ATOMIC); 96 if (!standalone_work) 97 return -ENOMEM; 98 99 INIT_WORK(&standalone_work->work, dsa_slave_standalone_event_work); 100 standalone_work->event = event; 101 standalone_work->dev = dev; 102 103 ether_addr_copy(standalone_work->addr, addr); 104 standalone_work->vid = vid; 105 106 dsa_schedule_work(&standalone_work->work); 107 108 return 0; 109 } 110 111 static int dsa_slave_sync_uc(struct net_device *dev, 112 const unsigned char *addr) 113 { 114 return dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0); 115 } 116 117 static int dsa_slave_unsync_uc(struct net_device *dev, 118 const unsigned char *addr) 119 { 120 return dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0); 121 } 122 123 static int dsa_slave_sync_mc(struct net_device *dev, 124 const unsigned char *addr) 125 { 126 return dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0); 127 } 128 129 static int dsa_slave_unsync_mc(struct net_device *dev, 130 const unsigned char *addr) 131 { 132 return dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0); 133 } 134 135 /* slave mii_bus handling ***************************************************/ 136 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg) 137 { 138 struct dsa_switch *ds = bus->priv; 139 140 if (ds->phys_mii_mask & (1 << addr)) 141 return ds->ops->phy_read(ds, addr, reg); 142 143 return 0xffff; 144 } 145 146 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val) 147 { 148 struct dsa_switch *ds = bus->priv; 149 150 if (ds->phys_mii_mask & (1 << addr)) 151 return ds->ops->phy_write(ds, addr, reg, val); 152 153 return 0; 154 } 155 156 void dsa_slave_mii_bus_init(struct dsa_switch *ds) 157 { 158 ds->slave_mii_bus->priv = (void *)ds; 159 ds->slave_mii_bus->name = "dsa slave smi"; 160 ds->slave_mii_bus->read = dsa_slave_phy_read; 161 ds->slave_mii_bus->write = dsa_slave_phy_write; 162 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d", 163 ds->dst->index, ds->index); 164 ds->slave_mii_bus->parent = ds->dev; 165 ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask; 166 } 167 168 169 /* slave device handling ****************************************************/ 170 static int dsa_slave_get_iflink(const struct net_device *dev) 171 { 172 return dsa_slave_to_master(dev)->ifindex; 173 } 174 175 static int dsa_slave_open(struct net_device *dev) 176 { 177 struct net_device *master = dsa_slave_to_master(dev); 178 struct dsa_port *dp = dsa_slave_to_port(dev); 179 struct dsa_switch *ds = dp->ds; 180 int err; 181 182 err = dev_open(master, NULL); 183 if (err < 0) { 184 netdev_err(dev, "failed to open master %s\n", master->name); 185 goto out; 186 } 187 188 if (dsa_switch_supports_uc_filtering(ds)) { 189 err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0); 190 if (err) 191 goto out; 192 } 193 194 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) { 195 err = dev_uc_add(master, dev->dev_addr); 196 if (err < 0) 197 goto del_host_addr; 198 } 199 200 err = dsa_port_enable_rt(dp, dev->phydev); 201 if (err) 202 goto del_unicast; 203 204 return 0; 205 206 del_unicast: 207 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) 208 dev_uc_del(master, dev->dev_addr); 209 del_host_addr: 210 if (dsa_switch_supports_uc_filtering(ds)) 211 dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); 212 out: 213 return err; 214 } 215 216 static int dsa_slave_close(struct net_device *dev) 217 { 218 struct net_device *master = dsa_slave_to_master(dev); 219 struct dsa_port *dp = dsa_slave_to_port(dev); 220 struct dsa_switch *ds = dp->ds; 221 222 dsa_port_disable_rt(dp); 223 224 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) 225 dev_uc_del(master, dev->dev_addr); 226 227 if (dsa_switch_supports_uc_filtering(ds)) 228 dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); 229 230 return 0; 231 } 232 233 /* Keep flooding enabled towards this port's CPU port as long as it serves at 234 * least one port in the tree that requires it. 235 */ 236 static void dsa_port_manage_cpu_flood(struct dsa_port *dp) 237 { 238 struct switchdev_brport_flags flags = { 239 .mask = BR_FLOOD | BR_MCAST_FLOOD, 240 }; 241 struct dsa_switch_tree *dst = dp->ds->dst; 242 struct dsa_port *cpu_dp = dp->cpu_dp; 243 struct dsa_port *other_dp; 244 int err; 245 246 list_for_each_entry(other_dp, &dst->ports, list) { 247 if (!dsa_port_is_user(other_dp)) 248 continue; 249 250 if (other_dp->cpu_dp != cpu_dp) 251 continue; 252 253 if (other_dp->slave->flags & IFF_ALLMULTI) 254 flags.val |= BR_MCAST_FLOOD; 255 if (other_dp->slave->flags & IFF_PROMISC) 256 flags.val |= BR_FLOOD; 257 } 258 259 err = dsa_port_pre_bridge_flags(dp, flags, NULL); 260 if (err) 261 return; 262 263 dsa_port_bridge_flags(cpu_dp, flags, NULL); 264 } 265 266 static void dsa_slave_change_rx_flags(struct net_device *dev, int change) 267 { 268 struct net_device *master = dsa_slave_to_master(dev); 269 struct dsa_port *dp = dsa_slave_to_port(dev); 270 struct dsa_switch *ds = dp->ds; 271 272 if (change & IFF_ALLMULTI) 273 dev_set_allmulti(master, 274 dev->flags & IFF_ALLMULTI ? 1 : -1); 275 if (change & IFF_PROMISC) 276 dev_set_promiscuity(master, 277 dev->flags & IFF_PROMISC ? 1 : -1); 278 279 if (dsa_switch_supports_uc_filtering(ds) && 280 dsa_switch_supports_mc_filtering(ds)) 281 dsa_port_manage_cpu_flood(dp); 282 } 283 284 static void dsa_slave_set_rx_mode(struct net_device *dev) 285 { 286 struct net_device *master = dsa_slave_to_master(dev); 287 struct dsa_port *dp = dsa_slave_to_port(dev); 288 struct dsa_switch *ds = dp->ds; 289 290 dev_mc_sync(master, dev); 291 dev_uc_sync(master, dev); 292 if (dsa_switch_supports_mc_filtering(ds)) 293 __dev_mc_sync(dev, dsa_slave_sync_mc, dsa_slave_unsync_mc); 294 if (dsa_switch_supports_uc_filtering(ds)) 295 __dev_uc_sync(dev, dsa_slave_sync_uc, dsa_slave_unsync_uc); 296 } 297 298 static int dsa_slave_set_mac_address(struct net_device *dev, void *a) 299 { 300 struct net_device *master = dsa_slave_to_master(dev); 301 struct dsa_port *dp = dsa_slave_to_port(dev); 302 struct dsa_switch *ds = dp->ds; 303 struct sockaddr *addr = a; 304 int err; 305 306 if (!is_valid_ether_addr(addr->sa_data)) 307 return -EADDRNOTAVAIL; 308 309 /* If the port is down, the address isn't synced yet to hardware or 310 * to the DSA master, so there is nothing to change. 311 */ 312 if (!(dev->flags & IFF_UP)) 313 goto out_change_dev_addr; 314 315 if (dsa_switch_supports_uc_filtering(ds)) { 316 err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0); 317 if (err) 318 return err; 319 } 320 321 if (!ether_addr_equal(addr->sa_data, master->dev_addr)) { 322 err = dev_uc_add(master, addr->sa_data); 323 if (err < 0) 324 goto del_unicast; 325 } 326 327 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) 328 dev_uc_del(master, dev->dev_addr); 329 330 if (dsa_switch_supports_uc_filtering(ds)) 331 dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); 332 333 out_change_dev_addr: 334 eth_hw_addr_set(dev, addr->sa_data); 335 336 return 0; 337 338 del_unicast: 339 if (dsa_switch_supports_uc_filtering(ds)) 340 dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0); 341 342 return err; 343 } 344 345 struct dsa_slave_dump_ctx { 346 struct net_device *dev; 347 struct sk_buff *skb; 348 struct netlink_callback *cb; 349 int idx; 350 }; 351 352 static int 353 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid, 354 bool is_static, void *data) 355 { 356 struct dsa_slave_dump_ctx *dump = data; 357 u32 portid = NETLINK_CB(dump->cb->skb).portid; 358 u32 seq = dump->cb->nlh->nlmsg_seq; 359 struct nlmsghdr *nlh; 360 struct ndmsg *ndm; 361 362 if (dump->idx < dump->cb->args[2]) 363 goto skip; 364 365 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, 366 sizeof(*ndm), NLM_F_MULTI); 367 if (!nlh) 368 return -EMSGSIZE; 369 370 ndm = nlmsg_data(nlh); 371 ndm->ndm_family = AF_BRIDGE; 372 ndm->ndm_pad1 = 0; 373 ndm->ndm_pad2 = 0; 374 ndm->ndm_flags = NTF_SELF; 375 ndm->ndm_type = 0; 376 ndm->ndm_ifindex = dump->dev->ifindex; 377 ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; 378 379 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr)) 380 goto nla_put_failure; 381 382 if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid)) 383 goto nla_put_failure; 384 385 nlmsg_end(dump->skb, nlh); 386 387 skip: 388 dump->idx++; 389 return 0; 390 391 nla_put_failure: 392 nlmsg_cancel(dump->skb, nlh); 393 return -EMSGSIZE; 394 } 395 396 static int 397 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 398 struct net_device *dev, struct net_device *filter_dev, 399 int *idx) 400 { 401 struct dsa_port *dp = dsa_slave_to_port(dev); 402 struct dsa_slave_dump_ctx dump = { 403 .dev = dev, 404 .skb = skb, 405 .cb = cb, 406 .idx = *idx, 407 }; 408 int err; 409 410 err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump); 411 *idx = dump.idx; 412 413 return err; 414 } 415 416 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 417 { 418 struct dsa_slave_priv *p = netdev_priv(dev); 419 struct dsa_switch *ds = p->dp->ds; 420 int port = p->dp->index; 421 422 /* Pass through to switch driver if it supports timestamping */ 423 switch (cmd) { 424 case SIOCGHWTSTAMP: 425 if (ds->ops->port_hwtstamp_get) 426 return ds->ops->port_hwtstamp_get(ds, port, ifr); 427 break; 428 case SIOCSHWTSTAMP: 429 if (ds->ops->port_hwtstamp_set) 430 return ds->ops->port_hwtstamp_set(ds, port, ifr); 431 break; 432 } 433 434 return phylink_mii_ioctl(p->dp->pl, ifr, cmd); 435 } 436 437 static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx, 438 const struct switchdev_attr *attr, 439 struct netlink_ext_ack *extack) 440 { 441 struct dsa_port *dp = dsa_slave_to_port(dev); 442 int ret; 443 444 if (ctx && ctx != dp) 445 return 0; 446 447 switch (attr->id) { 448 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 449 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) 450 return -EOPNOTSUPP; 451 452 ret = dsa_port_set_state(dp, attr->u.stp_state, true); 453 break; 454 case SWITCHDEV_ATTR_ID_PORT_MST_STATE: 455 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) 456 return -EOPNOTSUPP; 457 458 ret = dsa_port_set_mst_state(dp, &attr->u.mst_state, extack); 459 break; 460 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 461 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) 462 return -EOPNOTSUPP; 463 464 ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering, 465 extack); 466 break; 467 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 468 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) 469 return -EOPNOTSUPP; 470 471 ret = dsa_port_ageing_time(dp, attr->u.ageing_time); 472 break; 473 case SWITCHDEV_ATTR_ID_BRIDGE_MST: 474 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) 475 return -EOPNOTSUPP; 476 477 ret = dsa_port_mst_enable(dp, attr->u.mst, extack); 478 break; 479 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: 480 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) 481 return -EOPNOTSUPP; 482 483 ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags, 484 extack); 485 break; 486 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 487 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) 488 return -EOPNOTSUPP; 489 490 ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack); 491 break; 492 case SWITCHDEV_ATTR_ID_VLAN_MSTI: 493 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) 494 return -EOPNOTSUPP; 495 496 ret = dsa_port_vlan_msti(dp, &attr->u.vlan_msti); 497 break; 498 default: 499 ret = -EOPNOTSUPP; 500 break; 501 } 502 503 return ret; 504 } 505 506 /* Must be called under rcu_read_lock() */ 507 static int 508 dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave, 509 const struct switchdev_obj_port_vlan *vlan) 510 { 511 struct net_device *upper_dev; 512 struct list_head *iter; 513 514 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) { 515 u16 vid; 516 517 if (!is_vlan_dev(upper_dev)) 518 continue; 519 520 vid = vlan_dev_vlan_id(upper_dev); 521 if (vid == vlan->vid) 522 return -EBUSY; 523 } 524 525 return 0; 526 } 527 528 static int dsa_slave_vlan_add(struct net_device *dev, 529 const struct switchdev_obj *obj, 530 struct netlink_ext_ack *extack) 531 { 532 struct dsa_port *dp = dsa_slave_to_port(dev); 533 struct switchdev_obj_port_vlan *vlan; 534 int err; 535 536 if (dsa_port_skip_vlan_configuration(dp)) { 537 NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN"); 538 return 0; 539 } 540 541 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 542 543 /* Deny adding a bridge VLAN when there is already an 802.1Q upper with 544 * the same VID. 545 */ 546 if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) { 547 rcu_read_lock(); 548 err = dsa_slave_vlan_check_for_8021q_uppers(dev, vlan); 549 rcu_read_unlock(); 550 if (err) { 551 NL_SET_ERR_MSG_MOD(extack, 552 "Port already has a VLAN upper with this VID"); 553 return err; 554 } 555 } 556 557 return dsa_port_vlan_add(dp, vlan, extack); 558 } 559 560 /* Offload a VLAN installed on the bridge or on a foreign interface by 561 * installing it as a VLAN towards the CPU port. 562 */ 563 static int dsa_slave_host_vlan_add(struct net_device *dev, 564 const struct switchdev_obj *obj, 565 struct netlink_ext_ack *extack) 566 { 567 struct dsa_port *dp = dsa_slave_to_port(dev); 568 struct switchdev_obj_port_vlan vlan; 569 570 /* Do nothing if this is a software bridge */ 571 if (!dp->bridge) 572 return -EOPNOTSUPP; 573 574 if (dsa_port_skip_vlan_configuration(dp)) { 575 NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN"); 576 return 0; 577 } 578 579 vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj); 580 581 /* Even though drivers often handle CPU membership in special ways, 582 * it doesn't make sense to program a PVID, so clear this flag. 583 */ 584 vlan.flags &= ~BRIDGE_VLAN_INFO_PVID; 585 586 return dsa_port_host_vlan_add(dp, &vlan, extack); 587 } 588 589 static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx, 590 const struct switchdev_obj *obj, 591 struct netlink_ext_ack *extack) 592 { 593 struct dsa_port *dp = dsa_slave_to_port(dev); 594 int err; 595 596 if (ctx && ctx != dp) 597 return 0; 598 599 switch (obj->id) { 600 case SWITCHDEV_OBJ_ID_PORT_MDB: 601 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev)) 602 return -EOPNOTSUPP; 603 604 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); 605 break; 606 case SWITCHDEV_OBJ_ID_HOST_MDB: 607 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 608 return -EOPNOTSUPP; 609 610 err = dsa_port_bridge_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); 611 break; 612 case SWITCHDEV_OBJ_ID_PORT_VLAN: 613 if (dsa_port_offloads_bridge_port(dp, obj->orig_dev)) 614 err = dsa_slave_vlan_add(dev, obj, extack); 615 else 616 err = dsa_slave_host_vlan_add(dev, obj, extack); 617 break; 618 case SWITCHDEV_OBJ_ID_MRP: 619 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 620 return -EOPNOTSUPP; 621 622 err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj)); 623 break; 624 case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: 625 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 626 return -EOPNOTSUPP; 627 628 err = dsa_port_mrp_add_ring_role(dp, 629 SWITCHDEV_OBJ_RING_ROLE_MRP(obj)); 630 break; 631 default: 632 err = -EOPNOTSUPP; 633 break; 634 } 635 636 return err; 637 } 638 639 static int dsa_slave_vlan_del(struct net_device *dev, 640 const struct switchdev_obj *obj) 641 { 642 struct dsa_port *dp = dsa_slave_to_port(dev); 643 struct switchdev_obj_port_vlan *vlan; 644 645 if (dsa_port_skip_vlan_configuration(dp)) 646 return 0; 647 648 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 649 650 return dsa_port_vlan_del(dp, vlan); 651 } 652 653 static int dsa_slave_host_vlan_del(struct net_device *dev, 654 const struct switchdev_obj *obj) 655 { 656 struct dsa_port *dp = dsa_slave_to_port(dev); 657 struct switchdev_obj_port_vlan *vlan; 658 659 /* Do nothing if this is a software bridge */ 660 if (!dp->bridge) 661 return -EOPNOTSUPP; 662 663 if (dsa_port_skip_vlan_configuration(dp)) 664 return 0; 665 666 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 667 668 return dsa_port_host_vlan_del(dp, vlan); 669 } 670 671 static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx, 672 const struct switchdev_obj *obj) 673 { 674 struct dsa_port *dp = dsa_slave_to_port(dev); 675 int err; 676 677 if (ctx && ctx != dp) 678 return 0; 679 680 switch (obj->id) { 681 case SWITCHDEV_OBJ_ID_PORT_MDB: 682 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev)) 683 return -EOPNOTSUPP; 684 685 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); 686 break; 687 case SWITCHDEV_OBJ_ID_HOST_MDB: 688 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 689 return -EOPNOTSUPP; 690 691 err = dsa_port_bridge_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); 692 break; 693 case SWITCHDEV_OBJ_ID_PORT_VLAN: 694 if (dsa_port_offloads_bridge_port(dp, obj->orig_dev)) 695 err = dsa_slave_vlan_del(dev, obj); 696 else 697 err = dsa_slave_host_vlan_del(dev, obj); 698 break; 699 case SWITCHDEV_OBJ_ID_MRP: 700 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 701 return -EOPNOTSUPP; 702 703 err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj)); 704 break; 705 case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: 706 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 707 return -EOPNOTSUPP; 708 709 err = dsa_port_mrp_del_ring_role(dp, 710 SWITCHDEV_OBJ_RING_ROLE_MRP(obj)); 711 break; 712 default: 713 err = -EOPNOTSUPP; 714 break; 715 } 716 717 return err; 718 } 719 720 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev, 721 struct sk_buff *skb) 722 { 723 #ifdef CONFIG_NET_POLL_CONTROLLER 724 struct dsa_slave_priv *p = netdev_priv(dev); 725 726 return netpoll_send_skb(p->netpoll, skb); 727 #else 728 BUG(); 729 return NETDEV_TX_OK; 730 #endif 731 } 732 733 static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p, 734 struct sk_buff *skb) 735 { 736 struct dsa_switch *ds = p->dp->ds; 737 738 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 739 return; 740 741 if (!ds->ops->port_txtstamp) 742 return; 743 744 ds->ops->port_txtstamp(ds, p->dp->index, skb); 745 } 746 747 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev) 748 { 749 /* SKB for netpoll still need to be mangled with the protocol-specific 750 * tag to be successfully transmitted 751 */ 752 if (unlikely(netpoll_tx_running(dev))) 753 return dsa_slave_netpoll_send_skb(dev, skb); 754 755 /* Queue the SKB for transmission on the parent interface, but 756 * do not modify its EtherType 757 */ 758 skb->dev = dsa_slave_to_master(dev); 759 dev_queue_xmit(skb); 760 761 return NETDEV_TX_OK; 762 } 763 EXPORT_SYMBOL_GPL(dsa_enqueue_skb); 764 765 static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev) 766 { 767 int needed_headroom = dev->needed_headroom; 768 int needed_tailroom = dev->needed_tailroom; 769 770 /* For tail taggers, we need to pad short frames ourselves, to ensure 771 * that the tail tag does not fail at its role of being at the end of 772 * the packet, once the master interface pads the frame. Account for 773 * that pad length here, and pad later. 774 */ 775 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) 776 needed_tailroom += ETH_ZLEN - skb->len; 777 /* skb_headroom() returns unsigned int... */ 778 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); 779 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); 780 781 if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb))) 782 /* No reallocation needed, yay! */ 783 return 0; 784 785 return pskb_expand_head(skb, needed_headroom, needed_tailroom, 786 GFP_ATOMIC); 787 } 788 789 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev) 790 { 791 struct dsa_slave_priv *p = netdev_priv(dev); 792 struct sk_buff *nskb; 793 794 dev_sw_netstats_tx_add(dev, 1, skb->len); 795 796 memset(skb->cb, 0, sizeof(skb->cb)); 797 798 /* Handle tx timestamp if any */ 799 dsa_skb_tx_timestamp(p, skb); 800 801 if (dsa_realloc_skb(skb, dev)) { 802 dev_kfree_skb_any(skb); 803 return NETDEV_TX_OK; 804 } 805 806 /* needed_tailroom should still be 'warm' in the cache line from 807 * dsa_realloc_skb(), which has also ensured that padding is safe. 808 */ 809 if (dev->needed_tailroom) 810 eth_skb_pad(skb); 811 812 /* Transmit function may have to reallocate the original SKB, 813 * in which case it must have freed it. Only free it here on error. 814 */ 815 nskb = p->xmit(skb, dev); 816 if (!nskb) { 817 kfree_skb(skb); 818 return NETDEV_TX_OK; 819 } 820 821 return dsa_enqueue_skb(nskb, dev); 822 } 823 824 /* ethtool operations *******************************************************/ 825 826 static void dsa_slave_get_drvinfo(struct net_device *dev, 827 struct ethtool_drvinfo *drvinfo) 828 { 829 strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver)); 830 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); 831 strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); 832 } 833 834 static int dsa_slave_get_regs_len(struct net_device *dev) 835 { 836 struct dsa_port *dp = dsa_slave_to_port(dev); 837 struct dsa_switch *ds = dp->ds; 838 839 if (ds->ops->get_regs_len) 840 return ds->ops->get_regs_len(ds, dp->index); 841 842 return -EOPNOTSUPP; 843 } 844 845 static void 846 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) 847 { 848 struct dsa_port *dp = dsa_slave_to_port(dev); 849 struct dsa_switch *ds = dp->ds; 850 851 if (ds->ops->get_regs) 852 ds->ops->get_regs(ds, dp->index, regs, _p); 853 } 854 855 static int dsa_slave_nway_reset(struct net_device *dev) 856 { 857 struct dsa_port *dp = dsa_slave_to_port(dev); 858 859 return phylink_ethtool_nway_reset(dp->pl); 860 } 861 862 static int dsa_slave_get_eeprom_len(struct net_device *dev) 863 { 864 struct dsa_port *dp = dsa_slave_to_port(dev); 865 struct dsa_switch *ds = dp->ds; 866 867 if (ds->cd && ds->cd->eeprom_len) 868 return ds->cd->eeprom_len; 869 870 if (ds->ops->get_eeprom_len) 871 return ds->ops->get_eeprom_len(ds); 872 873 return 0; 874 } 875 876 static int dsa_slave_get_eeprom(struct net_device *dev, 877 struct ethtool_eeprom *eeprom, u8 *data) 878 { 879 struct dsa_port *dp = dsa_slave_to_port(dev); 880 struct dsa_switch *ds = dp->ds; 881 882 if (ds->ops->get_eeprom) 883 return ds->ops->get_eeprom(ds, eeprom, data); 884 885 return -EOPNOTSUPP; 886 } 887 888 static int dsa_slave_set_eeprom(struct net_device *dev, 889 struct ethtool_eeprom *eeprom, u8 *data) 890 { 891 struct dsa_port *dp = dsa_slave_to_port(dev); 892 struct dsa_switch *ds = dp->ds; 893 894 if (ds->ops->set_eeprom) 895 return ds->ops->set_eeprom(ds, eeprom, data); 896 897 return -EOPNOTSUPP; 898 } 899 900 static void dsa_slave_get_strings(struct net_device *dev, 901 uint32_t stringset, uint8_t *data) 902 { 903 struct dsa_port *dp = dsa_slave_to_port(dev); 904 struct dsa_switch *ds = dp->ds; 905 906 if (stringset == ETH_SS_STATS) { 907 int len = ETH_GSTRING_LEN; 908 909 strncpy(data, "tx_packets", len); 910 strncpy(data + len, "tx_bytes", len); 911 strncpy(data + 2 * len, "rx_packets", len); 912 strncpy(data + 3 * len, "rx_bytes", len); 913 if (ds->ops->get_strings) 914 ds->ops->get_strings(ds, dp->index, stringset, 915 data + 4 * len); 916 } else if (stringset == ETH_SS_TEST) { 917 net_selftest_get_strings(data); 918 } 919 920 } 921 922 static void dsa_slave_get_ethtool_stats(struct net_device *dev, 923 struct ethtool_stats *stats, 924 uint64_t *data) 925 { 926 struct dsa_port *dp = dsa_slave_to_port(dev); 927 struct dsa_switch *ds = dp->ds; 928 struct pcpu_sw_netstats *s; 929 unsigned int start; 930 int i; 931 932 for_each_possible_cpu(i) { 933 u64 tx_packets, tx_bytes, rx_packets, rx_bytes; 934 935 s = per_cpu_ptr(dev->tstats, i); 936 do { 937 start = u64_stats_fetch_begin_irq(&s->syncp); 938 tx_packets = s->tx_packets; 939 tx_bytes = s->tx_bytes; 940 rx_packets = s->rx_packets; 941 rx_bytes = s->rx_bytes; 942 } while (u64_stats_fetch_retry_irq(&s->syncp, start)); 943 data[0] += tx_packets; 944 data[1] += tx_bytes; 945 data[2] += rx_packets; 946 data[3] += rx_bytes; 947 } 948 if (ds->ops->get_ethtool_stats) 949 ds->ops->get_ethtool_stats(ds, dp->index, data + 4); 950 } 951 952 static int dsa_slave_get_sset_count(struct net_device *dev, int sset) 953 { 954 struct dsa_port *dp = dsa_slave_to_port(dev); 955 struct dsa_switch *ds = dp->ds; 956 957 if (sset == ETH_SS_STATS) { 958 int count = 0; 959 960 if (ds->ops->get_sset_count) { 961 count = ds->ops->get_sset_count(ds, dp->index, sset); 962 if (count < 0) 963 return count; 964 } 965 966 return count + 4; 967 } else if (sset == ETH_SS_TEST) { 968 return net_selftest_get_count(); 969 } 970 971 return -EOPNOTSUPP; 972 } 973 974 static void dsa_slave_get_eth_phy_stats(struct net_device *dev, 975 struct ethtool_eth_phy_stats *phy_stats) 976 { 977 struct dsa_port *dp = dsa_slave_to_port(dev); 978 struct dsa_switch *ds = dp->ds; 979 980 if (ds->ops->get_eth_phy_stats) 981 ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats); 982 } 983 984 static void dsa_slave_get_eth_mac_stats(struct net_device *dev, 985 struct ethtool_eth_mac_stats *mac_stats) 986 { 987 struct dsa_port *dp = dsa_slave_to_port(dev); 988 struct dsa_switch *ds = dp->ds; 989 990 if (ds->ops->get_eth_mac_stats) 991 ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats); 992 } 993 994 static void 995 dsa_slave_get_eth_ctrl_stats(struct net_device *dev, 996 struct ethtool_eth_ctrl_stats *ctrl_stats) 997 { 998 struct dsa_port *dp = dsa_slave_to_port(dev); 999 struct dsa_switch *ds = dp->ds; 1000 1001 if (ds->ops->get_eth_ctrl_stats) 1002 ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats); 1003 } 1004 1005 static void dsa_slave_net_selftest(struct net_device *ndev, 1006 struct ethtool_test *etest, u64 *buf) 1007 { 1008 struct dsa_port *dp = dsa_slave_to_port(ndev); 1009 struct dsa_switch *ds = dp->ds; 1010 1011 if (ds->ops->self_test) { 1012 ds->ops->self_test(ds, dp->index, etest, buf); 1013 return; 1014 } 1015 1016 net_selftest(ndev, etest, buf); 1017 } 1018 1019 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w) 1020 { 1021 struct dsa_port *dp = dsa_slave_to_port(dev); 1022 struct dsa_switch *ds = dp->ds; 1023 1024 phylink_ethtool_get_wol(dp->pl, w); 1025 1026 if (ds->ops->get_wol) 1027 ds->ops->get_wol(ds, dp->index, w); 1028 } 1029 1030 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) 1031 { 1032 struct dsa_port *dp = dsa_slave_to_port(dev); 1033 struct dsa_switch *ds = dp->ds; 1034 int ret = -EOPNOTSUPP; 1035 1036 phylink_ethtool_set_wol(dp->pl, w); 1037 1038 if (ds->ops->set_wol) 1039 ret = ds->ops->set_wol(ds, dp->index, w); 1040 1041 return ret; 1042 } 1043 1044 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e) 1045 { 1046 struct dsa_port *dp = dsa_slave_to_port(dev); 1047 struct dsa_switch *ds = dp->ds; 1048 int ret; 1049 1050 /* Port's PHY and MAC both need to be EEE capable */ 1051 if (!dev->phydev || !dp->pl) 1052 return -ENODEV; 1053 1054 if (!ds->ops->set_mac_eee) 1055 return -EOPNOTSUPP; 1056 1057 ret = ds->ops->set_mac_eee(ds, dp->index, e); 1058 if (ret) 1059 return ret; 1060 1061 return phylink_ethtool_set_eee(dp->pl, e); 1062 } 1063 1064 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e) 1065 { 1066 struct dsa_port *dp = dsa_slave_to_port(dev); 1067 struct dsa_switch *ds = dp->ds; 1068 int ret; 1069 1070 /* Port's PHY and MAC both need to be EEE capable */ 1071 if (!dev->phydev || !dp->pl) 1072 return -ENODEV; 1073 1074 if (!ds->ops->get_mac_eee) 1075 return -EOPNOTSUPP; 1076 1077 ret = ds->ops->get_mac_eee(ds, dp->index, e); 1078 if (ret) 1079 return ret; 1080 1081 return phylink_ethtool_get_eee(dp->pl, e); 1082 } 1083 1084 static int dsa_slave_get_link_ksettings(struct net_device *dev, 1085 struct ethtool_link_ksettings *cmd) 1086 { 1087 struct dsa_port *dp = dsa_slave_to_port(dev); 1088 1089 return phylink_ethtool_ksettings_get(dp->pl, cmd); 1090 } 1091 1092 static int dsa_slave_set_link_ksettings(struct net_device *dev, 1093 const struct ethtool_link_ksettings *cmd) 1094 { 1095 struct dsa_port *dp = dsa_slave_to_port(dev); 1096 1097 return phylink_ethtool_ksettings_set(dp->pl, cmd); 1098 } 1099 1100 static void dsa_slave_get_pauseparam(struct net_device *dev, 1101 struct ethtool_pauseparam *pause) 1102 { 1103 struct dsa_port *dp = dsa_slave_to_port(dev); 1104 1105 phylink_ethtool_get_pauseparam(dp->pl, pause); 1106 } 1107 1108 static int dsa_slave_set_pauseparam(struct net_device *dev, 1109 struct ethtool_pauseparam *pause) 1110 { 1111 struct dsa_port *dp = dsa_slave_to_port(dev); 1112 1113 return phylink_ethtool_set_pauseparam(dp->pl, pause); 1114 } 1115 1116 #ifdef CONFIG_NET_POLL_CONTROLLER 1117 static int dsa_slave_netpoll_setup(struct net_device *dev, 1118 struct netpoll_info *ni) 1119 { 1120 struct net_device *master = dsa_slave_to_master(dev); 1121 struct dsa_slave_priv *p = netdev_priv(dev); 1122 struct netpoll *netpoll; 1123 int err = 0; 1124 1125 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); 1126 if (!netpoll) 1127 return -ENOMEM; 1128 1129 err = __netpoll_setup(netpoll, master); 1130 if (err) { 1131 kfree(netpoll); 1132 goto out; 1133 } 1134 1135 p->netpoll = netpoll; 1136 out: 1137 return err; 1138 } 1139 1140 static void dsa_slave_netpoll_cleanup(struct net_device *dev) 1141 { 1142 struct dsa_slave_priv *p = netdev_priv(dev); 1143 struct netpoll *netpoll = p->netpoll; 1144 1145 if (!netpoll) 1146 return; 1147 1148 p->netpoll = NULL; 1149 1150 __netpoll_free(netpoll); 1151 } 1152 1153 static void dsa_slave_poll_controller(struct net_device *dev) 1154 { 1155 } 1156 #endif 1157 1158 static struct dsa_mall_tc_entry * 1159 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie) 1160 { 1161 struct dsa_slave_priv *p = netdev_priv(dev); 1162 struct dsa_mall_tc_entry *mall_tc_entry; 1163 1164 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) 1165 if (mall_tc_entry->cookie == cookie) 1166 return mall_tc_entry; 1167 1168 return NULL; 1169 } 1170 1171 static int 1172 dsa_slave_add_cls_matchall_mirred(struct net_device *dev, 1173 struct tc_cls_matchall_offload *cls, 1174 bool ingress) 1175 { 1176 struct netlink_ext_ack *extack = cls->common.extack; 1177 struct dsa_port *dp = dsa_slave_to_port(dev); 1178 struct dsa_slave_priv *p = netdev_priv(dev); 1179 struct dsa_mall_mirror_tc_entry *mirror; 1180 struct dsa_mall_tc_entry *mall_tc_entry; 1181 struct dsa_switch *ds = dp->ds; 1182 struct flow_action_entry *act; 1183 struct dsa_port *to_dp; 1184 int err; 1185 1186 if (!ds->ops->port_mirror_add) 1187 return -EOPNOTSUPP; 1188 1189 if (!flow_action_basic_hw_stats_check(&cls->rule->action, 1190 cls->common.extack)) 1191 return -EOPNOTSUPP; 1192 1193 act = &cls->rule->action.entries[0]; 1194 1195 if (!act->dev) 1196 return -EINVAL; 1197 1198 if (!dsa_slave_dev_check(act->dev)) 1199 return -EOPNOTSUPP; 1200 1201 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1202 if (!mall_tc_entry) 1203 return -ENOMEM; 1204 1205 mall_tc_entry->cookie = cls->cookie; 1206 mall_tc_entry->type = DSA_PORT_MALL_MIRROR; 1207 mirror = &mall_tc_entry->mirror; 1208 1209 to_dp = dsa_slave_to_port(act->dev); 1210 1211 mirror->to_local_port = to_dp->index; 1212 mirror->ingress = ingress; 1213 1214 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress, extack); 1215 if (err) { 1216 kfree(mall_tc_entry); 1217 return err; 1218 } 1219 1220 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); 1221 1222 return err; 1223 } 1224 1225 static int 1226 dsa_slave_add_cls_matchall_police(struct net_device *dev, 1227 struct tc_cls_matchall_offload *cls, 1228 bool ingress) 1229 { 1230 struct netlink_ext_ack *extack = cls->common.extack; 1231 struct dsa_port *dp = dsa_slave_to_port(dev); 1232 struct dsa_slave_priv *p = netdev_priv(dev); 1233 struct dsa_mall_policer_tc_entry *policer; 1234 struct dsa_mall_tc_entry *mall_tc_entry; 1235 struct dsa_switch *ds = dp->ds; 1236 struct flow_action_entry *act; 1237 int err; 1238 1239 if (!ds->ops->port_policer_add) { 1240 NL_SET_ERR_MSG_MOD(extack, 1241 "Policing offload not implemented"); 1242 return -EOPNOTSUPP; 1243 } 1244 1245 if (!ingress) { 1246 NL_SET_ERR_MSG_MOD(extack, 1247 "Only supported on ingress qdisc"); 1248 return -EOPNOTSUPP; 1249 } 1250 1251 if (!flow_action_basic_hw_stats_check(&cls->rule->action, 1252 cls->common.extack)) 1253 return -EOPNOTSUPP; 1254 1255 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) { 1256 if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) { 1257 NL_SET_ERR_MSG_MOD(extack, 1258 "Only one port policer allowed"); 1259 return -EEXIST; 1260 } 1261 } 1262 1263 act = &cls->rule->action.entries[0]; 1264 1265 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1266 if (!mall_tc_entry) 1267 return -ENOMEM; 1268 1269 mall_tc_entry->cookie = cls->cookie; 1270 mall_tc_entry->type = DSA_PORT_MALL_POLICER; 1271 policer = &mall_tc_entry->policer; 1272 policer->rate_bytes_per_sec = act->police.rate_bytes_ps; 1273 policer->burst = act->police.burst; 1274 1275 err = ds->ops->port_policer_add(ds, dp->index, policer); 1276 if (err) { 1277 kfree(mall_tc_entry); 1278 return err; 1279 } 1280 1281 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); 1282 1283 return err; 1284 } 1285 1286 static int dsa_slave_add_cls_matchall(struct net_device *dev, 1287 struct tc_cls_matchall_offload *cls, 1288 bool ingress) 1289 { 1290 int err = -EOPNOTSUPP; 1291 1292 if (cls->common.protocol == htons(ETH_P_ALL) && 1293 flow_offload_has_one_action(&cls->rule->action) && 1294 cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED) 1295 err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress); 1296 else if (flow_offload_has_one_action(&cls->rule->action) && 1297 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE) 1298 err = dsa_slave_add_cls_matchall_police(dev, cls, ingress); 1299 1300 return err; 1301 } 1302 1303 static void dsa_slave_del_cls_matchall(struct net_device *dev, 1304 struct tc_cls_matchall_offload *cls) 1305 { 1306 struct dsa_port *dp = dsa_slave_to_port(dev); 1307 struct dsa_mall_tc_entry *mall_tc_entry; 1308 struct dsa_switch *ds = dp->ds; 1309 1310 mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie); 1311 if (!mall_tc_entry) 1312 return; 1313 1314 list_del(&mall_tc_entry->list); 1315 1316 switch (mall_tc_entry->type) { 1317 case DSA_PORT_MALL_MIRROR: 1318 if (ds->ops->port_mirror_del) 1319 ds->ops->port_mirror_del(ds, dp->index, 1320 &mall_tc_entry->mirror); 1321 break; 1322 case DSA_PORT_MALL_POLICER: 1323 if (ds->ops->port_policer_del) 1324 ds->ops->port_policer_del(ds, dp->index); 1325 break; 1326 default: 1327 WARN_ON(1); 1328 } 1329 1330 kfree(mall_tc_entry); 1331 } 1332 1333 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev, 1334 struct tc_cls_matchall_offload *cls, 1335 bool ingress) 1336 { 1337 if (cls->common.chain_index) 1338 return -EOPNOTSUPP; 1339 1340 switch (cls->command) { 1341 case TC_CLSMATCHALL_REPLACE: 1342 return dsa_slave_add_cls_matchall(dev, cls, ingress); 1343 case TC_CLSMATCHALL_DESTROY: 1344 dsa_slave_del_cls_matchall(dev, cls); 1345 return 0; 1346 default: 1347 return -EOPNOTSUPP; 1348 } 1349 } 1350 1351 static int dsa_slave_add_cls_flower(struct net_device *dev, 1352 struct flow_cls_offload *cls, 1353 bool ingress) 1354 { 1355 struct dsa_port *dp = dsa_slave_to_port(dev); 1356 struct dsa_switch *ds = dp->ds; 1357 int port = dp->index; 1358 1359 if (!ds->ops->cls_flower_add) 1360 return -EOPNOTSUPP; 1361 1362 return ds->ops->cls_flower_add(ds, port, cls, ingress); 1363 } 1364 1365 static int dsa_slave_del_cls_flower(struct net_device *dev, 1366 struct flow_cls_offload *cls, 1367 bool ingress) 1368 { 1369 struct dsa_port *dp = dsa_slave_to_port(dev); 1370 struct dsa_switch *ds = dp->ds; 1371 int port = dp->index; 1372 1373 if (!ds->ops->cls_flower_del) 1374 return -EOPNOTSUPP; 1375 1376 return ds->ops->cls_flower_del(ds, port, cls, ingress); 1377 } 1378 1379 static int dsa_slave_stats_cls_flower(struct net_device *dev, 1380 struct flow_cls_offload *cls, 1381 bool ingress) 1382 { 1383 struct dsa_port *dp = dsa_slave_to_port(dev); 1384 struct dsa_switch *ds = dp->ds; 1385 int port = dp->index; 1386 1387 if (!ds->ops->cls_flower_stats) 1388 return -EOPNOTSUPP; 1389 1390 return ds->ops->cls_flower_stats(ds, port, cls, ingress); 1391 } 1392 1393 static int dsa_slave_setup_tc_cls_flower(struct net_device *dev, 1394 struct flow_cls_offload *cls, 1395 bool ingress) 1396 { 1397 switch (cls->command) { 1398 case FLOW_CLS_REPLACE: 1399 return dsa_slave_add_cls_flower(dev, cls, ingress); 1400 case FLOW_CLS_DESTROY: 1401 return dsa_slave_del_cls_flower(dev, cls, ingress); 1402 case FLOW_CLS_STATS: 1403 return dsa_slave_stats_cls_flower(dev, cls, ingress); 1404 default: 1405 return -EOPNOTSUPP; 1406 } 1407 } 1408 1409 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 1410 void *cb_priv, bool ingress) 1411 { 1412 struct net_device *dev = cb_priv; 1413 1414 if (!tc_can_offload(dev)) 1415 return -EOPNOTSUPP; 1416 1417 switch (type) { 1418 case TC_SETUP_CLSMATCHALL: 1419 return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress); 1420 case TC_SETUP_CLSFLOWER: 1421 return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress); 1422 default: 1423 return -EOPNOTSUPP; 1424 } 1425 } 1426 1427 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type, 1428 void *type_data, void *cb_priv) 1429 { 1430 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true); 1431 } 1432 1433 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type, 1434 void *type_data, void *cb_priv) 1435 { 1436 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false); 1437 } 1438 1439 static LIST_HEAD(dsa_slave_block_cb_list); 1440 1441 static int dsa_slave_setup_tc_block(struct net_device *dev, 1442 struct flow_block_offload *f) 1443 { 1444 struct flow_block_cb *block_cb; 1445 flow_setup_cb_t *cb; 1446 1447 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 1448 cb = dsa_slave_setup_tc_block_cb_ig; 1449 else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 1450 cb = dsa_slave_setup_tc_block_cb_eg; 1451 else 1452 return -EOPNOTSUPP; 1453 1454 f->driver_block_list = &dsa_slave_block_cb_list; 1455 1456 switch (f->command) { 1457 case FLOW_BLOCK_BIND: 1458 if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list)) 1459 return -EBUSY; 1460 1461 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL); 1462 if (IS_ERR(block_cb)) 1463 return PTR_ERR(block_cb); 1464 1465 flow_block_cb_add(block_cb, f); 1466 list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list); 1467 return 0; 1468 case FLOW_BLOCK_UNBIND: 1469 block_cb = flow_block_cb_lookup(f->block, cb, dev); 1470 if (!block_cb) 1471 return -ENOENT; 1472 1473 flow_block_cb_remove(block_cb, f); 1474 list_del(&block_cb->driver_list); 1475 return 0; 1476 default: 1477 return -EOPNOTSUPP; 1478 } 1479 } 1480 1481 static int dsa_slave_setup_ft_block(struct dsa_switch *ds, int port, 1482 void *type_data) 1483 { 1484 struct dsa_port *cpu_dp = dsa_to_port(ds, port)->cpu_dp; 1485 struct net_device *master = cpu_dp->master; 1486 1487 if (!master->netdev_ops->ndo_setup_tc) 1488 return -EOPNOTSUPP; 1489 1490 return master->netdev_ops->ndo_setup_tc(master, TC_SETUP_FT, type_data); 1491 } 1492 1493 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type, 1494 void *type_data) 1495 { 1496 struct dsa_port *dp = dsa_slave_to_port(dev); 1497 struct dsa_switch *ds = dp->ds; 1498 1499 switch (type) { 1500 case TC_SETUP_BLOCK: 1501 return dsa_slave_setup_tc_block(dev, type_data); 1502 case TC_SETUP_FT: 1503 return dsa_slave_setup_ft_block(ds, dp->index, type_data); 1504 default: 1505 break; 1506 } 1507 1508 if (!ds->ops->port_setup_tc) 1509 return -EOPNOTSUPP; 1510 1511 return ds->ops->port_setup_tc(ds, dp->index, type, type_data); 1512 } 1513 1514 static int dsa_slave_get_rxnfc(struct net_device *dev, 1515 struct ethtool_rxnfc *nfc, u32 *rule_locs) 1516 { 1517 struct dsa_port *dp = dsa_slave_to_port(dev); 1518 struct dsa_switch *ds = dp->ds; 1519 1520 if (!ds->ops->get_rxnfc) 1521 return -EOPNOTSUPP; 1522 1523 return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs); 1524 } 1525 1526 static int dsa_slave_set_rxnfc(struct net_device *dev, 1527 struct ethtool_rxnfc *nfc) 1528 { 1529 struct dsa_port *dp = dsa_slave_to_port(dev); 1530 struct dsa_switch *ds = dp->ds; 1531 1532 if (!ds->ops->set_rxnfc) 1533 return -EOPNOTSUPP; 1534 1535 return ds->ops->set_rxnfc(ds, dp->index, nfc); 1536 } 1537 1538 static int dsa_slave_get_ts_info(struct net_device *dev, 1539 struct ethtool_ts_info *ts) 1540 { 1541 struct dsa_slave_priv *p = netdev_priv(dev); 1542 struct dsa_switch *ds = p->dp->ds; 1543 1544 if (!ds->ops->get_ts_info) 1545 return -EOPNOTSUPP; 1546 1547 return ds->ops->get_ts_info(ds, p->dp->index, ts); 1548 } 1549 1550 static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto, 1551 u16 vid) 1552 { 1553 struct dsa_port *dp = dsa_slave_to_port(dev); 1554 struct switchdev_obj_port_vlan vlan = { 1555 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 1556 .vid = vid, 1557 /* This API only allows programming tagged, non-PVID VIDs */ 1558 .flags = 0, 1559 }; 1560 struct netlink_ext_ack extack = {0}; 1561 int ret; 1562 1563 /* User port... */ 1564 ret = dsa_port_vlan_add(dp, &vlan, &extack); 1565 if (ret) { 1566 if (extack._msg) 1567 netdev_err(dev, "%s\n", extack._msg); 1568 return ret; 1569 } 1570 1571 /* And CPU port... */ 1572 ret = dsa_port_host_vlan_add(dp, &vlan, &extack); 1573 if (ret) { 1574 if (extack._msg) 1575 netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index, 1576 extack._msg); 1577 return ret; 1578 } 1579 1580 return 0; 1581 } 1582 1583 static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, 1584 u16 vid) 1585 { 1586 struct dsa_port *dp = dsa_slave_to_port(dev); 1587 struct switchdev_obj_port_vlan vlan = { 1588 .vid = vid, 1589 /* This API only allows programming tagged, non-PVID VIDs */ 1590 .flags = 0, 1591 }; 1592 int err; 1593 1594 err = dsa_port_vlan_del(dp, &vlan); 1595 if (err) 1596 return err; 1597 1598 return dsa_port_host_vlan_del(dp, &vlan); 1599 } 1600 1601 static int dsa_slave_restore_vlan(struct net_device *vdev, int vid, void *arg) 1602 { 1603 __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q); 1604 1605 return dsa_slave_vlan_rx_add_vid(arg, proto, vid); 1606 } 1607 1608 static int dsa_slave_clear_vlan(struct net_device *vdev, int vid, void *arg) 1609 { 1610 __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q); 1611 1612 return dsa_slave_vlan_rx_kill_vid(arg, proto, vid); 1613 } 1614 1615 /* Keep the VLAN RX filtering list in sync with the hardware only if VLAN 1616 * filtering is enabled. The baseline is that only ports that offload a 1617 * VLAN-aware bridge are VLAN-aware, and standalone ports are VLAN-unaware, 1618 * but there are exceptions for quirky hardware. 1619 * 1620 * If ds->vlan_filtering_is_global = true, then standalone ports which share 1621 * the same switch with other ports that offload a VLAN-aware bridge are also 1622 * inevitably VLAN-aware. 1623 * 1624 * To summarize, a DSA switch port offloads: 1625 * 1626 * - If standalone (this includes software bridge, software LAG): 1627 * - if ds->needs_standalone_vlan_filtering = true, OR if 1628 * (ds->vlan_filtering_is_global = true AND there are bridges spanning 1629 * this switch chip which have vlan_filtering=1) 1630 * - the 8021q upper VLANs 1631 * - else (standalone VLAN filtering is not needed, VLAN filtering is not 1632 * global, or it is, but no port is under a VLAN-aware bridge): 1633 * - no VLAN (any 8021q upper is a software VLAN) 1634 * 1635 * - If under a vlan_filtering=0 bridge which it offload: 1636 * - if ds->configure_vlan_while_not_filtering = true (default): 1637 * - the bridge VLANs. These VLANs are committed to hardware but inactive. 1638 * - else (deprecated): 1639 * - no VLAN. The bridge VLANs are not restored when VLAN awareness is 1640 * enabled, so this behavior is broken and discouraged. 1641 * 1642 * - If under a vlan_filtering=1 bridge which it offload: 1643 * - the bridge VLANs 1644 * - the 8021q upper VLANs 1645 */ 1646 int dsa_slave_manage_vlan_filtering(struct net_device *slave, 1647 bool vlan_filtering) 1648 { 1649 int err; 1650 1651 if (vlan_filtering) { 1652 slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1653 1654 err = vlan_for_each(slave, dsa_slave_restore_vlan, slave); 1655 if (err) { 1656 vlan_for_each(slave, dsa_slave_clear_vlan, slave); 1657 slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 1658 return err; 1659 } 1660 } else { 1661 err = vlan_for_each(slave, dsa_slave_clear_vlan, slave); 1662 if (err) 1663 return err; 1664 1665 slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 1666 } 1667 1668 return 0; 1669 } 1670 1671 struct dsa_hw_port { 1672 struct list_head list; 1673 struct net_device *dev; 1674 int old_mtu; 1675 }; 1676 1677 static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu) 1678 { 1679 const struct dsa_hw_port *p; 1680 int err; 1681 1682 list_for_each_entry(p, hw_port_list, list) { 1683 if (p->dev->mtu == mtu) 1684 continue; 1685 1686 err = dev_set_mtu(p->dev, mtu); 1687 if (err) 1688 goto rollback; 1689 } 1690 1691 return 0; 1692 1693 rollback: 1694 list_for_each_entry_continue_reverse(p, hw_port_list, list) { 1695 if (p->dev->mtu == p->old_mtu) 1696 continue; 1697 1698 if (dev_set_mtu(p->dev, p->old_mtu)) 1699 netdev_err(p->dev, "Failed to restore MTU\n"); 1700 } 1701 1702 return err; 1703 } 1704 1705 static void dsa_hw_port_list_free(struct list_head *hw_port_list) 1706 { 1707 struct dsa_hw_port *p, *n; 1708 1709 list_for_each_entry_safe(p, n, hw_port_list, list) 1710 kfree(p); 1711 } 1712 1713 /* Make the hardware datapath to/from @dev limited to a common MTU */ 1714 static void dsa_bridge_mtu_normalization(struct dsa_port *dp) 1715 { 1716 struct list_head hw_port_list; 1717 struct dsa_switch_tree *dst; 1718 int min_mtu = ETH_MAX_MTU; 1719 struct dsa_port *other_dp; 1720 int err; 1721 1722 if (!dp->ds->mtu_enforcement_ingress) 1723 return; 1724 1725 if (!dp->bridge) 1726 return; 1727 1728 INIT_LIST_HEAD(&hw_port_list); 1729 1730 /* Populate the list of ports that are part of the same bridge 1731 * as the newly added/modified port 1732 */ 1733 list_for_each_entry(dst, &dsa_tree_list, list) { 1734 list_for_each_entry(other_dp, &dst->ports, list) { 1735 struct dsa_hw_port *hw_port; 1736 struct net_device *slave; 1737 1738 if (other_dp->type != DSA_PORT_TYPE_USER) 1739 continue; 1740 1741 if (!dsa_port_bridge_same(dp, other_dp)) 1742 continue; 1743 1744 if (!other_dp->ds->mtu_enforcement_ingress) 1745 continue; 1746 1747 slave = other_dp->slave; 1748 1749 if (min_mtu > slave->mtu) 1750 min_mtu = slave->mtu; 1751 1752 hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL); 1753 if (!hw_port) 1754 goto out; 1755 1756 hw_port->dev = slave; 1757 hw_port->old_mtu = slave->mtu; 1758 1759 list_add(&hw_port->list, &hw_port_list); 1760 } 1761 } 1762 1763 /* Attempt to configure the entire hardware bridge to the newly added 1764 * interface's MTU first, regardless of whether the intention of the 1765 * user was to raise or lower it. 1766 */ 1767 err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu); 1768 if (!err) 1769 goto out; 1770 1771 /* Clearly that didn't work out so well, so just set the minimum MTU on 1772 * all hardware bridge ports now. If this fails too, then all ports will 1773 * still have their old MTU rolled back anyway. 1774 */ 1775 dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu); 1776 1777 out: 1778 dsa_hw_port_list_free(&hw_port_list); 1779 } 1780 1781 int dsa_slave_change_mtu(struct net_device *dev, int new_mtu) 1782 { 1783 struct net_device *master = dsa_slave_to_master(dev); 1784 struct dsa_port *dp = dsa_slave_to_port(dev); 1785 struct dsa_slave_priv *p = netdev_priv(dev); 1786 struct dsa_switch *ds = p->dp->ds; 1787 struct dsa_port *dp_iter; 1788 struct dsa_port *cpu_dp; 1789 int port = p->dp->index; 1790 int largest_mtu = 0; 1791 int new_master_mtu; 1792 int old_master_mtu; 1793 int mtu_limit; 1794 int cpu_mtu; 1795 int err; 1796 1797 if (!ds->ops->port_change_mtu) 1798 return -EOPNOTSUPP; 1799 1800 list_for_each_entry(dp_iter, &ds->dst->ports, list) { 1801 int slave_mtu; 1802 1803 if (!dsa_port_is_user(dp_iter)) 1804 continue; 1805 1806 /* During probe, this function will be called for each slave 1807 * device, while not all of them have been allocated. That's 1808 * ok, it doesn't change what the maximum is, so ignore it. 1809 */ 1810 if (!dp_iter->slave) 1811 continue; 1812 1813 /* Pretend that we already applied the setting, which we 1814 * actually haven't (still haven't done all integrity checks) 1815 */ 1816 if (dp_iter == dp) 1817 slave_mtu = new_mtu; 1818 else 1819 slave_mtu = dp_iter->slave->mtu; 1820 1821 if (largest_mtu < slave_mtu) 1822 largest_mtu = slave_mtu; 1823 } 1824 1825 cpu_dp = dsa_to_port(ds, port)->cpu_dp; 1826 1827 mtu_limit = min_t(int, master->max_mtu, dev->max_mtu); 1828 old_master_mtu = master->mtu; 1829 new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops); 1830 if (new_master_mtu > mtu_limit) 1831 return -ERANGE; 1832 1833 /* If the master MTU isn't over limit, there's no need to check the CPU 1834 * MTU, since that surely isn't either. 1835 */ 1836 cpu_mtu = largest_mtu; 1837 1838 /* Start applying stuff */ 1839 if (new_master_mtu != old_master_mtu) { 1840 err = dev_set_mtu(master, new_master_mtu); 1841 if (err < 0) 1842 goto out_master_failed; 1843 1844 /* We only need to propagate the MTU of the CPU port to 1845 * upstream switches, so create a non-targeted notifier which 1846 * updates all switches. 1847 */ 1848 err = dsa_port_mtu_change(cpu_dp, cpu_mtu, false); 1849 if (err) 1850 goto out_cpu_failed; 1851 } 1852 1853 err = dsa_port_mtu_change(dp, new_mtu, true); 1854 if (err) 1855 goto out_port_failed; 1856 1857 dev->mtu = new_mtu; 1858 1859 dsa_bridge_mtu_normalization(dp); 1860 1861 return 0; 1862 1863 out_port_failed: 1864 if (new_master_mtu != old_master_mtu) 1865 dsa_port_mtu_change(cpu_dp, old_master_mtu - 1866 dsa_tag_protocol_overhead(cpu_dp->tag_ops), 1867 false); 1868 out_cpu_failed: 1869 if (new_master_mtu != old_master_mtu) 1870 dev_set_mtu(master, old_master_mtu); 1871 out_master_failed: 1872 return err; 1873 } 1874 1875 static int __maybe_unused 1876 dsa_slave_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app) 1877 { 1878 struct dsa_port *dp = dsa_slave_to_port(dev); 1879 struct dsa_switch *ds = dp->ds; 1880 unsigned long mask, new_prio; 1881 int err, port = dp->index; 1882 1883 if (!ds->ops->port_set_default_prio) 1884 return -EOPNOTSUPP; 1885 1886 err = dcb_ieee_setapp(dev, app); 1887 if (err) 1888 return err; 1889 1890 mask = dcb_ieee_getapp_mask(dev, app); 1891 new_prio = __fls(mask); 1892 1893 err = ds->ops->port_set_default_prio(ds, port, new_prio); 1894 if (err) { 1895 dcb_ieee_delapp(dev, app); 1896 return err; 1897 } 1898 1899 return 0; 1900 } 1901 1902 static int __maybe_unused 1903 dsa_slave_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app) 1904 { 1905 struct dsa_port *dp = dsa_slave_to_port(dev); 1906 struct dsa_switch *ds = dp->ds; 1907 unsigned long mask, new_prio; 1908 int err, port = dp->index; 1909 u8 dscp = app->protocol; 1910 1911 if (!ds->ops->port_add_dscp_prio) 1912 return -EOPNOTSUPP; 1913 1914 if (dscp >= 64) { 1915 netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n", 1916 dscp); 1917 return -EINVAL; 1918 } 1919 1920 err = dcb_ieee_setapp(dev, app); 1921 if (err) 1922 return err; 1923 1924 mask = dcb_ieee_getapp_mask(dev, app); 1925 new_prio = __fls(mask); 1926 1927 err = ds->ops->port_add_dscp_prio(ds, port, dscp, new_prio); 1928 if (err) { 1929 dcb_ieee_delapp(dev, app); 1930 return err; 1931 } 1932 1933 return 0; 1934 } 1935 1936 static int __maybe_unused dsa_slave_dcbnl_ieee_setapp(struct net_device *dev, 1937 struct dcb_app *app) 1938 { 1939 switch (app->selector) { 1940 case IEEE_8021QAZ_APP_SEL_ETHERTYPE: 1941 switch (app->protocol) { 1942 case 0: 1943 return dsa_slave_dcbnl_set_default_prio(dev, app); 1944 default: 1945 return -EOPNOTSUPP; 1946 } 1947 break; 1948 case IEEE_8021QAZ_APP_SEL_DSCP: 1949 return dsa_slave_dcbnl_add_dscp_prio(dev, app); 1950 default: 1951 return -EOPNOTSUPP; 1952 } 1953 } 1954 1955 static int __maybe_unused 1956 dsa_slave_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app) 1957 { 1958 struct dsa_port *dp = dsa_slave_to_port(dev); 1959 struct dsa_switch *ds = dp->ds; 1960 unsigned long mask, new_prio; 1961 int err, port = dp->index; 1962 1963 if (!ds->ops->port_set_default_prio) 1964 return -EOPNOTSUPP; 1965 1966 err = dcb_ieee_delapp(dev, app); 1967 if (err) 1968 return err; 1969 1970 mask = dcb_ieee_getapp_mask(dev, app); 1971 new_prio = mask ? __fls(mask) : 0; 1972 1973 err = ds->ops->port_set_default_prio(ds, port, new_prio); 1974 if (err) { 1975 dcb_ieee_setapp(dev, app); 1976 return err; 1977 } 1978 1979 return 0; 1980 } 1981 1982 static int __maybe_unused 1983 dsa_slave_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app) 1984 { 1985 struct dsa_port *dp = dsa_slave_to_port(dev); 1986 struct dsa_switch *ds = dp->ds; 1987 int err, port = dp->index; 1988 u8 dscp = app->protocol; 1989 1990 if (!ds->ops->port_del_dscp_prio) 1991 return -EOPNOTSUPP; 1992 1993 err = dcb_ieee_delapp(dev, app); 1994 if (err) 1995 return err; 1996 1997 err = ds->ops->port_del_dscp_prio(ds, port, dscp, app->priority); 1998 if (err) { 1999 dcb_ieee_setapp(dev, app); 2000 return err; 2001 } 2002 2003 return 0; 2004 } 2005 2006 static int __maybe_unused dsa_slave_dcbnl_ieee_delapp(struct net_device *dev, 2007 struct dcb_app *app) 2008 { 2009 switch (app->selector) { 2010 case IEEE_8021QAZ_APP_SEL_ETHERTYPE: 2011 switch (app->protocol) { 2012 case 0: 2013 return dsa_slave_dcbnl_del_default_prio(dev, app); 2014 default: 2015 return -EOPNOTSUPP; 2016 } 2017 break; 2018 case IEEE_8021QAZ_APP_SEL_DSCP: 2019 return dsa_slave_dcbnl_del_dscp_prio(dev, app); 2020 default: 2021 return -EOPNOTSUPP; 2022 } 2023 } 2024 2025 /* Pre-populate the DCB application priority table with the priorities 2026 * configured during switch setup, which we read from hardware here. 2027 */ 2028 static int dsa_slave_dcbnl_init(struct net_device *dev) 2029 { 2030 struct dsa_port *dp = dsa_slave_to_port(dev); 2031 struct dsa_switch *ds = dp->ds; 2032 int port = dp->index; 2033 int err; 2034 2035 if (ds->ops->port_get_default_prio) { 2036 int prio = ds->ops->port_get_default_prio(ds, port); 2037 struct dcb_app app = { 2038 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE, 2039 .protocol = 0, 2040 .priority = prio, 2041 }; 2042 2043 if (prio < 0) 2044 return prio; 2045 2046 err = dcb_ieee_setapp(dev, &app); 2047 if (err) 2048 return err; 2049 } 2050 2051 if (ds->ops->port_get_dscp_prio) { 2052 int protocol; 2053 2054 for (protocol = 0; protocol < 64; protocol++) { 2055 struct dcb_app app = { 2056 .selector = IEEE_8021QAZ_APP_SEL_DSCP, 2057 .protocol = protocol, 2058 }; 2059 int prio; 2060 2061 prio = ds->ops->port_get_dscp_prio(ds, port, protocol); 2062 if (prio == -EOPNOTSUPP) 2063 continue; 2064 if (prio < 0) 2065 return prio; 2066 2067 app.priority = prio; 2068 2069 err = dcb_ieee_setapp(dev, &app); 2070 if (err) 2071 return err; 2072 } 2073 } 2074 2075 return 0; 2076 } 2077 2078 static const struct ethtool_ops dsa_slave_ethtool_ops = { 2079 .get_drvinfo = dsa_slave_get_drvinfo, 2080 .get_regs_len = dsa_slave_get_regs_len, 2081 .get_regs = dsa_slave_get_regs, 2082 .nway_reset = dsa_slave_nway_reset, 2083 .get_link = ethtool_op_get_link, 2084 .get_eeprom_len = dsa_slave_get_eeprom_len, 2085 .get_eeprom = dsa_slave_get_eeprom, 2086 .set_eeprom = dsa_slave_set_eeprom, 2087 .get_strings = dsa_slave_get_strings, 2088 .get_ethtool_stats = dsa_slave_get_ethtool_stats, 2089 .get_sset_count = dsa_slave_get_sset_count, 2090 .get_eth_phy_stats = dsa_slave_get_eth_phy_stats, 2091 .get_eth_mac_stats = dsa_slave_get_eth_mac_stats, 2092 .get_eth_ctrl_stats = dsa_slave_get_eth_ctrl_stats, 2093 .set_wol = dsa_slave_set_wol, 2094 .get_wol = dsa_slave_get_wol, 2095 .set_eee = dsa_slave_set_eee, 2096 .get_eee = dsa_slave_get_eee, 2097 .get_link_ksettings = dsa_slave_get_link_ksettings, 2098 .set_link_ksettings = dsa_slave_set_link_ksettings, 2099 .get_pauseparam = dsa_slave_get_pauseparam, 2100 .set_pauseparam = dsa_slave_set_pauseparam, 2101 .get_rxnfc = dsa_slave_get_rxnfc, 2102 .set_rxnfc = dsa_slave_set_rxnfc, 2103 .get_ts_info = dsa_slave_get_ts_info, 2104 .self_test = dsa_slave_net_selftest, 2105 }; 2106 2107 static const struct dcbnl_rtnl_ops __maybe_unused dsa_slave_dcbnl_ops = { 2108 .ieee_setapp = dsa_slave_dcbnl_ieee_setapp, 2109 .ieee_delapp = dsa_slave_dcbnl_ieee_delapp, 2110 }; 2111 2112 static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev) 2113 { 2114 struct dsa_port *dp = dsa_slave_to_port(dev); 2115 2116 return &dp->devlink_port; 2117 } 2118 2119 static void dsa_slave_get_stats64(struct net_device *dev, 2120 struct rtnl_link_stats64 *s) 2121 { 2122 struct dsa_port *dp = dsa_slave_to_port(dev); 2123 struct dsa_switch *ds = dp->ds; 2124 2125 if (ds->ops->get_stats64) 2126 ds->ops->get_stats64(ds, dp->index, s); 2127 else 2128 dev_get_tstats64(dev, s); 2129 } 2130 2131 static int dsa_slave_fill_forward_path(struct net_device_path_ctx *ctx, 2132 struct net_device_path *path) 2133 { 2134 struct dsa_port *dp = dsa_slave_to_port(ctx->dev); 2135 struct dsa_port *cpu_dp = dp->cpu_dp; 2136 2137 path->dev = ctx->dev; 2138 path->type = DEV_PATH_DSA; 2139 path->dsa.proto = cpu_dp->tag_ops->proto; 2140 path->dsa.port = dp->index; 2141 ctx->dev = cpu_dp->master; 2142 2143 return 0; 2144 } 2145 2146 static const struct net_device_ops dsa_slave_netdev_ops = { 2147 .ndo_open = dsa_slave_open, 2148 .ndo_stop = dsa_slave_close, 2149 .ndo_start_xmit = dsa_slave_xmit, 2150 .ndo_change_rx_flags = dsa_slave_change_rx_flags, 2151 .ndo_set_rx_mode = dsa_slave_set_rx_mode, 2152 .ndo_set_mac_address = dsa_slave_set_mac_address, 2153 .ndo_fdb_dump = dsa_slave_fdb_dump, 2154 .ndo_eth_ioctl = dsa_slave_ioctl, 2155 .ndo_get_iflink = dsa_slave_get_iflink, 2156 #ifdef CONFIG_NET_POLL_CONTROLLER 2157 .ndo_netpoll_setup = dsa_slave_netpoll_setup, 2158 .ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup, 2159 .ndo_poll_controller = dsa_slave_poll_controller, 2160 #endif 2161 .ndo_setup_tc = dsa_slave_setup_tc, 2162 .ndo_get_stats64 = dsa_slave_get_stats64, 2163 .ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid, 2164 .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid, 2165 .ndo_get_devlink_port = dsa_slave_get_devlink_port, 2166 .ndo_change_mtu = dsa_slave_change_mtu, 2167 .ndo_fill_forward_path = dsa_slave_fill_forward_path, 2168 }; 2169 2170 static struct device_type dsa_type = { 2171 .name = "dsa", 2172 }; 2173 2174 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up) 2175 { 2176 const struct dsa_port *dp = dsa_to_port(ds, port); 2177 2178 if (dp->pl) 2179 phylink_mac_change(dp->pl, up); 2180 } 2181 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change); 2182 2183 static void dsa_slave_phylink_fixed_state(struct phylink_config *config, 2184 struct phylink_link_state *state) 2185 { 2186 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 2187 struct dsa_switch *ds = dp->ds; 2188 2189 /* No need to check that this operation is valid, the callback would 2190 * not be called if it was not. 2191 */ 2192 ds->ops->phylink_fixed_state(ds, dp->index, state); 2193 } 2194 2195 /* slave device setup *******************************************************/ 2196 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr, 2197 u32 flags) 2198 { 2199 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 2200 struct dsa_switch *ds = dp->ds; 2201 2202 slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr); 2203 if (!slave_dev->phydev) { 2204 netdev_err(slave_dev, "no phy at %d\n", addr); 2205 return -ENODEV; 2206 } 2207 2208 slave_dev->phydev->dev_flags |= flags; 2209 2210 return phylink_connect_phy(dp->pl, slave_dev->phydev); 2211 } 2212 2213 static int dsa_slave_phy_setup(struct net_device *slave_dev) 2214 { 2215 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 2216 struct device_node *port_dn = dp->dn; 2217 struct dsa_switch *ds = dp->ds; 2218 u32 phy_flags = 0; 2219 int ret; 2220 2221 dp->pl_config.dev = &slave_dev->dev; 2222 dp->pl_config.type = PHYLINK_NETDEV; 2223 2224 /* The get_fixed_state callback takes precedence over polling the 2225 * link GPIO in PHYLINK (see phylink_get_fixed_state). Only set 2226 * this if the switch provides such a callback. 2227 */ 2228 if (ds->ops->phylink_fixed_state) { 2229 dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state; 2230 dp->pl_config.poll_fixed_state = true; 2231 } 2232 2233 ret = dsa_port_phylink_create(dp); 2234 if (ret) 2235 return ret; 2236 2237 if (ds->ops->get_phy_flags) 2238 phy_flags = ds->ops->get_phy_flags(ds, dp->index); 2239 2240 ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags); 2241 if (ret == -ENODEV && ds->slave_mii_bus) { 2242 /* We could not connect to a designated PHY or SFP, so try to 2243 * use the switch internal MDIO bus instead 2244 */ 2245 ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags); 2246 } 2247 if (ret) { 2248 netdev_err(slave_dev, "failed to connect to PHY: %pe\n", 2249 ERR_PTR(ret)); 2250 phylink_destroy(dp->pl); 2251 } 2252 2253 return ret; 2254 } 2255 2256 void dsa_slave_setup_tagger(struct net_device *slave) 2257 { 2258 struct dsa_port *dp = dsa_slave_to_port(slave); 2259 struct dsa_slave_priv *p = netdev_priv(slave); 2260 const struct dsa_port *cpu_dp = dp->cpu_dp; 2261 struct net_device *master = cpu_dp->master; 2262 const struct dsa_switch *ds = dp->ds; 2263 2264 slave->needed_headroom = cpu_dp->tag_ops->needed_headroom; 2265 slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom; 2266 /* Try to save one extra realloc later in the TX path (in the master) 2267 * by also inheriting the master's needed headroom and tailroom. 2268 * The 8021q driver also does this. 2269 */ 2270 slave->needed_headroom += master->needed_headroom; 2271 slave->needed_tailroom += master->needed_tailroom; 2272 2273 p->xmit = cpu_dp->tag_ops->xmit; 2274 2275 slave->features = master->vlan_features | NETIF_F_HW_TC; 2276 slave->hw_features |= NETIF_F_HW_TC; 2277 slave->features |= NETIF_F_LLTX; 2278 if (slave->needed_tailroom) 2279 slave->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST); 2280 if (ds->needs_standalone_vlan_filtering) 2281 slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 2282 } 2283 2284 int dsa_slave_suspend(struct net_device *slave_dev) 2285 { 2286 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 2287 2288 if (!netif_running(slave_dev)) 2289 return 0; 2290 2291 netif_device_detach(slave_dev); 2292 2293 rtnl_lock(); 2294 phylink_stop(dp->pl); 2295 rtnl_unlock(); 2296 2297 return 0; 2298 } 2299 2300 int dsa_slave_resume(struct net_device *slave_dev) 2301 { 2302 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 2303 2304 if (!netif_running(slave_dev)) 2305 return 0; 2306 2307 netif_device_attach(slave_dev); 2308 2309 rtnl_lock(); 2310 phylink_start(dp->pl); 2311 rtnl_unlock(); 2312 2313 return 0; 2314 } 2315 2316 int dsa_slave_create(struct dsa_port *port) 2317 { 2318 const struct dsa_port *cpu_dp = port->cpu_dp; 2319 struct net_device *master = cpu_dp->master; 2320 struct dsa_switch *ds = port->ds; 2321 const char *name = port->name; 2322 struct net_device *slave_dev; 2323 struct dsa_slave_priv *p; 2324 int ret; 2325 2326 if (!ds->num_tx_queues) 2327 ds->num_tx_queues = 1; 2328 2329 slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name, 2330 NET_NAME_UNKNOWN, ether_setup, 2331 ds->num_tx_queues, 1); 2332 if (slave_dev == NULL) 2333 return -ENOMEM; 2334 2335 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; 2336 #if IS_ENABLED(CONFIG_DCB) 2337 slave_dev->dcbnl_ops = &dsa_slave_dcbnl_ops; 2338 #endif 2339 if (!is_zero_ether_addr(port->mac)) 2340 eth_hw_addr_set(slave_dev, port->mac); 2341 else 2342 eth_hw_addr_inherit(slave_dev, master); 2343 slave_dev->priv_flags |= IFF_NO_QUEUE; 2344 if (dsa_switch_supports_uc_filtering(ds)) 2345 slave_dev->priv_flags |= IFF_UNICAST_FLT; 2346 slave_dev->netdev_ops = &dsa_slave_netdev_ops; 2347 if (ds->ops->port_max_mtu) 2348 slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index); 2349 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type); 2350 2351 SET_NETDEV_DEV(slave_dev, port->ds->dev); 2352 slave_dev->dev.of_node = port->dn; 2353 slave_dev->vlan_features = master->vlan_features; 2354 2355 p = netdev_priv(slave_dev); 2356 slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 2357 if (!slave_dev->tstats) { 2358 free_netdev(slave_dev); 2359 return -ENOMEM; 2360 } 2361 2362 ret = gro_cells_init(&p->gcells, slave_dev); 2363 if (ret) 2364 goto out_free; 2365 2366 p->dp = port; 2367 INIT_LIST_HEAD(&p->mall_tc_list); 2368 port->slave = slave_dev; 2369 dsa_slave_setup_tagger(slave_dev); 2370 2371 netif_carrier_off(slave_dev); 2372 2373 ret = dsa_slave_phy_setup(slave_dev); 2374 if (ret) { 2375 netdev_err(slave_dev, 2376 "error %d setting up PHY for tree %d, switch %d, port %d\n", 2377 ret, ds->dst->index, ds->index, port->index); 2378 goto out_gcells; 2379 } 2380 2381 rtnl_lock(); 2382 2383 ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN); 2384 if (ret && ret != -EOPNOTSUPP) 2385 dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n", 2386 ret, ETH_DATA_LEN, port->index); 2387 2388 ret = register_netdevice(slave_dev); 2389 if (ret) { 2390 netdev_err(master, "error %d registering interface %s\n", 2391 ret, slave_dev->name); 2392 rtnl_unlock(); 2393 goto out_phy; 2394 } 2395 2396 if (IS_ENABLED(CONFIG_DCB)) { 2397 ret = dsa_slave_dcbnl_init(slave_dev); 2398 if (ret) { 2399 netdev_err(slave_dev, 2400 "failed to initialize DCB: %pe\n", 2401 ERR_PTR(ret)); 2402 rtnl_unlock(); 2403 goto out_unregister; 2404 } 2405 } 2406 2407 ret = netdev_upper_dev_link(master, slave_dev, NULL); 2408 2409 rtnl_unlock(); 2410 2411 if (ret) 2412 goto out_unregister; 2413 2414 return 0; 2415 2416 out_unregister: 2417 unregister_netdev(slave_dev); 2418 out_phy: 2419 rtnl_lock(); 2420 phylink_disconnect_phy(p->dp->pl); 2421 rtnl_unlock(); 2422 phylink_destroy(p->dp->pl); 2423 out_gcells: 2424 gro_cells_destroy(&p->gcells); 2425 out_free: 2426 free_percpu(slave_dev->tstats); 2427 free_netdev(slave_dev); 2428 port->slave = NULL; 2429 return ret; 2430 } 2431 2432 void dsa_slave_destroy(struct net_device *slave_dev) 2433 { 2434 struct net_device *master = dsa_slave_to_master(slave_dev); 2435 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 2436 struct dsa_slave_priv *p = netdev_priv(slave_dev); 2437 2438 netif_carrier_off(slave_dev); 2439 rtnl_lock(); 2440 netdev_upper_dev_unlink(master, slave_dev); 2441 unregister_netdevice(slave_dev); 2442 phylink_disconnect_phy(dp->pl); 2443 rtnl_unlock(); 2444 2445 phylink_destroy(dp->pl); 2446 gro_cells_destroy(&p->gcells); 2447 free_percpu(slave_dev->tstats); 2448 free_netdev(slave_dev); 2449 } 2450 2451 bool dsa_slave_dev_check(const struct net_device *dev) 2452 { 2453 return dev->netdev_ops == &dsa_slave_netdev_ops; 2454 } 2455 EXPORT_SYMBOL_GPL(dsa_slave_dev_check); 2456 2457 static int dsa_slave_changeupper(struct net_device *dev, 2458 struct netdev_notifier_changeupper_info *info) 2459 { 2460 struct dsa_port *dp = dsa_slave_to_port(dev); 2461 struct netlink_ext_ack *extack; 2462 int err = NOTIFY_DONE; 2463 2464 extack = netdev_notifier_info_to_extack(&info->info); 2465 2466 if (netif_is_bridge_master(info->upper_dev)) { 2467 if (info->linking) { 2468 err = dsa_port_bridge_join(dp, info->upper_dev, extack); 2469 if (!err) 2470 dsa_bridge_mtu_normalization(dp); 2471 if (err == -EOPNOTSUPP) { 2472 NL_SET_ERR_MSG_MOD(extack, 2473 "Offloading not supported"); 2474 err = 0; 2475 } 2476 err = notifier_from_errno(err); 2477 } else { 2478 dsa_port_bridge_leave(dp, info->upper_dev); 2479 err = NOTIFY_OK; 2480 } 2481 } else if (netif_is_lag_master(info->upper_dev)) { 2482 if (info->linking) { 2483 err = dsa_port_lag_join(dp, info->upper_dev, 2484 info->upper_info, extack); 2485 if (err == -EOPNOTSUPP) { 2486 NL_SET_ERR_MSG_MOD(info->info.extack, 2487 "Offloading not supported"); 2488 err = 0; 2489 } 2490 err = notifier_from_errno(err); 2491 } else { 2492 dsa_port_lag_leave(dp, info->upper_dev); 2493 err = NOTIFY_OK; 2494 } 2495 } else if (is_hsr_master(info->upper_dev)) { 2496 if (info->linking) { 2497 err = dsa_port_hsr_join(dp, info->upper_dev); 2498 if (err == -EOPNOTSUPP) { 2499 NL_SET_ERR_MSG_MOD(info->info.extack, 2500 "Offloading not supported"); 2501 err = 0; 2502 } 2503 err = notifier_from_errno(err); 2504 } else { 2505 dsa_port_hsr_leave(dp, info->upper_dev); 2506 err = NOTIFY_OK; 2507 } 2508 } 2509 2510 return err; 2511 } 2512 2513 static int dsa_slave_prechangeupper(struct net_device *dev, 2514 struct netdev_notifier_changeupper_info *info) 2515 { 2516 struct dsa_port *dp = dsa_slave_to_port(dev); 2517 2518 if (netif_is_bridge_master(info->upper_dev) && !info->linking) 2519 dsa_port_pre_bridge_leave(dp, info->upper_dev); 2520 else if (netif_is_lag_master(info->upper_dev) && !info->linking) 2521 dsa_port_pre_lag_leave(dp, info->upper_dev); 2522 /* dsa_port_pre_hsr_leave is not yet necessary since hsr cannot be 2523 * meaningfully enslaved to a bridge yet 2524 */ 2525 2526 return NOTIFY_DONE; 2527 } 2528 2529 static int 2530 dsa_slave_lag_changeupper(struct net_device *dev, 2531 struct netdev_notifier_changeupper_info *info) 2532 { 2533 struct net_device *lower; 2534 struct list_head *iter; 2535 int err = NOTIFY_DONE; 2536 struct dsa_port *dp; 2537 2538 netdev_for_each_lower_dev(dev, lower, iter) { 2539 if (!dsa_slave_dev_check(lower)) 2540 continue; 2541 2542 dp = dsa_slave_to_port(lower); 2543 if (!dp->lag) 2544 /* Software LAG */ 2545 continue; 2546 2547 err = dsa_slave_changeupper(lower, info); 2548 if (notifier_to_errno(err)) 2549 break; 2550 } 2551 2552 return err; 2553 } 2554 2555 /* Same as dsa_slave_lag_changeupper() except that it calls 2556 * dsa_slave_prechangeupper() 2557 */ 2558 static int 2559 dsa_slave_lag_prechangeupper(struct net_device *dev, 2560 struct netdev_notifier_changeupper_info *info) 2561 { 2562 struct net_device *lower; 2563 struct list_head *iter; 2564 int err = NOTIFY_DONE; 2565 struct dsa_port *dp; 2566 2567 netdev_for_each_lower_dev(dev, lower, iter) { 2568 if (!dsa_slave_dev_check(lower)) 2569 continue; 2570 2571 dp = dsa_slave_to_port(lower); 2572 if (!dp->lag) 2573 /* Software LAG */ 2574 continue; 2575 2576 err = dsa_slave_prechangeupper(lower, info); 2577 if (notifier_to_errno(err)) 2578 break; 2579 } 2580 2581 return err; 2582 } 2583 2584 static int 2585 dsa_prevent_bridging_8021q_upper(struct net_device *dev, 2586 struct netdev_notifier_changeupper_info *info) 2587 { 2588 struct netlink_ext_ack *ext_ack; 2589 struct net_device *slave, *br; 2590 struct dsa_port *dp; 2591 2592 ext_ack = netdev_notifier_info_to_extack(&info->info); 2593 2594 if (!is_vlan_dev(dev)) 2595 return NOTIFY_DONE; 2596 2597 slave = vlan_dev_real_dev(dev); 2598 if (!dsa_slave_dev_check(slave)) 2599 return NOTIFY_DONE; 2600 2601 dp = dsa_slave_to_port(slave); 2602 br = dsa_port_bridge_dev_get(dp); 2603 if (!br) 2604 return NOTIFY_DONE; 2605 2606 /* Deny enslaving a VLAN device into a VLAN-aware bridge */ 2607 if (br_vlan_enabled(br) && 2608 netif_is_bridge_master(info->upper_dev) && info->linking) { 2609 NL_SET_ERR_MSG_MOD(ext_ack, 2610 "Cannot enslave VLAN device into VLAN aware bridge"); 2611 return notifier_from_errno(-EINVAL); 2612 } 2613 2614 return NOTIFY_DONE; 2615 } 2616 2617 static int 2618 dsa_slave_check_8021q_upper(struct net_device *dev, 2619 struct netdev_notifier_changeupper_info *info) 2620 { 2621 struct dsa_port *dp = dsa_slave_to_port(dev); 2622 struct net_device *br = dsa_port_bridge_dev_get(dp); 2623 struct bridge_vlan_info br_info; 2624 struct netlink_ext_ack *extack; 2625 int err = NOTIFY_DONE; 2626 u16 vid; 2627 2628 if (!br || !br_vlan_enabled(br)) 2629 return NOTIFY_DONE; 2630 2631 extack = netdev_notifier_info_to_extack(&info->info); 2632 vid = vlan_dev_vlan_id(info->upper_dev); 2633 2634 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the 2635 * device, respectively the VID is not found, returning 2636 * 0 means success, which is a failure for us here. 2637 */ 2638 err = br_vlan_get_info(br, vid, &br_info); 2639 if (err == 0) { 2640 NL_SET_ERR_MSG_MOD(extack, 2641 "This VLAN is already configured by the bridge"); 2642 return notifier_from_errno(-EBUSY); 2643 } 2644 2645 return NOTIFY_DONE; 2646 } 2647 2648 static int 2649 dsa_slave_prechangeupper_sanity_check(struct net_device *dev, 2650 struct netdev_notifier_changeupper_info *info) 2651 { 2652 struct dsa_switch *ds; 2653 struct dsa_port *dp; 2654 int err; 2655 2656 if (!dsa_slave_dev_check(dev)) 2657 return dsa_prevent_bridging_8021q_upper(dev, info); 2658 2659 dp = dsa_slave_to_port(dev); 2660 ds = dp->ds; 2661 2662 if (ds->ops->port_prechangeupper) { 2663 err = ds->ops->port_prechangeupper(ds, dp->index, info); 2664 if (err) 2665 return notifier_from_errno(err); 2666 } 2667 2668 if (is_vlan_dev(info->upper_dev)) 2669 return dsa_slave_check_8021q_upper(dev, info); 2670 2671 return NOTIFY_DONE; 2672 } 2673 2674 static int dsa_slave_netdevice_event(struct notifier_block *nb, 2675 unsigned long event, void *ptr) 2676 { 2677 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 2678 2679 switch (event) { 2680 case NETDEV_PRECHANGEUPPER: { 2681 struct netdev_notifier_changeupper_info *info = ptr; 2682 int err; 2683 2684 err = dsa_slave_prechangeupper_sanity_check(dev, info); 2685 if (err != NOTIFY_DONE) 2686 return err; 2687 2688 if (dsa_slave_dev_check(dev)) 2689 return dsa_slave_prechangeupper(dev, ptr); 2690 2691 if (netif_is_lag_master(dev)) 2692 return dsa_slave_lag_prechangeupper(dev, ptr); 2693 2694 break; 2695 } 2696 case NETDEV_CHANGEUPPER: 2697 if (dsa_slave_dev_check(dev)) 2698 return dsa_slave_changeupper(dev, ptr); 2699 2700 if (netif_is_lag_master(dev)) 2701 return dsa_slave_lag_changeupper(dev, ptr); 2702 2703 break; 2704 case NETDEV_CHANGELOWERSTATE: { 2705 struct netdev_notifier_changelowerstate_info *info = ptr; 2706 struct dsa_port *dp; 2707 int err; 2708 2709 if (!dsa_slave_dev_check(dev)) 2710 break; 2711 2712 dp = dsa_slave_to_port(dev); 2713 2714 err = dsa_port_lag_change(dp, info->lower_state_info); 2715 return notifier_from_errno(err); 2716 } 2717 case NETDEV_CHANGE: 2718 case NETDEV_UP: { 2719 /* Track state of master port. 2720 * DSA driver may require the master port (and indirectly 2721 * the tagger) to be available for some special operation. 2722 */ 2723 if (netdev_uses_dsa(dev)) { 2724 struct dsa_port *cpu_dp = dev->dsa_ptr; 2725 struct dsa_switch_tree *dst = cpu_dp->ds->dst; 2726 2727 /* Track when the master port is UP */ 2728 dsa_tree_master_oper_state_change(dst, dev, 2729 netif_oper_up(dev)); 2730 2731 /* Track when the master port is ready and can accept 2732 * packet. 2733 * NETDEV_UP event is not enough to flag a port as ready. 2734 * We also have to wait for linkwatch_do_dev to dev_activate 2735 * and emit a NETDEV_CHANGE event. 2736 * We check if a master port is ready by checking if the dev 2737 * have a qdisc assigned and is not noop. 2738 */ 2739 dsa_tree_master_admin_state_change(dst, dev, 2740 !qdisc_tx_is_noop(dev)); 2741 2742 return NOTIFY_OK; 2743 } 2744 2745 return NOTIFY_DONE; 2746 } 2747 case NETDEV_GOING_DOWN: { 2748 struct dsa_port *dp, *cpu_dp; 2749 struct dsa_switch_tree *dst; 2750 LIST_HEAD(close_list); 2751 2752 if (!netdev_uses_dsa(dev)) 2753 return NOTIFY_DONE; 2754 2755 cpu_dp = dev->dsa_ptr; 2756 dst = cpu_dp->ds->dst; 2757 2758 dsa_tree_master_admin_state_change(dst, dev, false); 2759 2760 list_for_each_entry(dp, &dst->ports, list) { 2761 if (!dsa_port_is_user(dp)) 2762 continue; 2763 2764 list_add(&dp->slave->close_list, &close_list); 2765 } 2766 2767 dev_close_many(&close_list, true); 2768 2769 return NOTIFY_OK; 2770 } 2771 default: 2772 break; 2773 } 2774 2775 return NOTIFY_DONE; 2776 } 2777 2778 static void 2779 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work) 2780 { 2781 struct switchdev_notifier_fdb_info info = {}; 2782 2783 info.addr = switchdev_work->addr; 2784 info.vid = switchdev_work->vid; 2785 info.offloaded = true; 2786 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, 2787 switchdev_work->orig_dev, &info.info, NULL); 2788 } 2789 2790 static void dsa_slave_switchdev_event_work(struct work_struct *work) 2791 { 2792 struct dsa_switchdev_event_work *switchdev_work = 2793 container_of(work, struct dsa_switchdev_event_work, work); 2794 const unsigned char *addr = switchdev_work->addr; 2795 struct net_device *dev = switchdev_work->dev; 2796 u16 vid = switchdev_work->vid; 2797 struct dsa_switch *ds; 2798 struct dsa_port *dp; 2799 int err; 2800 2801 dp = dsa_slave_to_port(dev); 2802 ds = dp->ds; 2803 2804 switch (switchdev_work->event) { 2805 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2806 if (switchdev_work->host_addr) 2807 err = dsa_port_bridge_host_fdb_add(dp, addr, vid); 2808 else if (dp->lag) 2809 err = dsa_port_lag_fdb_add(dp, addr, vid); 2810 else 2811 err = dsa_port_fdb_add(dp, addr, vid); 2812 if (err) { 2813 dev_err(ds->dev, 2814 "port %d failed to add %pM vid %d to fdb: %d\n", 2815 dp->index, addr, vid, err); 2816 break; 2817 } 2818 dsa_fdb_offload_notify(switchdev_work); 2819 break; 2820 2821 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2822 if (switchdev_work->host_addr) 2823 err = dsa_port_bridge_host_fdb_del(dp, addr, vid); 2824 else if (dp->lag) 2825 err = dsa_port_lag_fdb_del(dp, addr, vid); 2826 else 2827 err = dsa_port_fdb_del(dp, addr, vid); 2828 if (err) { 2829 dev_err(ds->dev, 2830 "port %d failed to delete %pM vid %d from fdb: %d\n", 2831 dp->index, addr, vid, err); 2832 } 2833 2834 break; 2835 } 2836 2837 kfree(switchdev_work); 2838 } 2839 2840 static bool dsa_foreign_dev_check(const struct net_device *dev, 2841 const struct net_device *foreign_dev) 2842 { 2843 const struct dsa_port *dp = dsa_slave_to_port(dev); 2844 struct dsa_switch_tree *dst = dp->ds->dst; 2845 2846 if (netif_is_bridge_master(foreign_dev)) 2847 return !dsa_tree_offloads_bridge_dev(dst, foreign_dev); 2848 2849 if (netif_is_bridge_port(foreign_dev)) 2850 return !dsa_tree_offloads_bridge_port(dst, foreign_dev); 2851 2852 /* Everything else is foreign */ 2853 return true; 2854 } 2855 2856 static int dsa_slave_fdb_event(struct net_device *dev, 2857 struct net_device *orig_dev, 2858 unsigned long event, const void *ctx, 2859 const struct switchdev_notifier_fdb_info *fdb_info) 2860 { 2861 struct dsa_switchdev_event_work *switchdev_work; 2862 struct dsa_port *dp = dsa_slave_to_port(dev); 2863 bool host_addr = fdb_info->is_local; 2864 struct dsa_switch *ds = dp->ds; 2865 2866 if (ctx && ctx != dp) 2867 return 0; 2868 2869 if (!dp->bridge) 2870 return 0; 2871 2872 if (switchdev_fdb_is_dynamically_learned(fdb_info)) { 2873 if (dsa_port_offloads_bridge_port(dp, orig_dev)) 2874 return 0; 2875 2876 /* FDB entries learned by the software bridge or by foreign 2877 * bridge ports should be installed as host addresses only if 2878 * the driver requests assisted learning. 2879 */ 2880 if (!ds->assisted_learning_on_cpu_port) 2881 return 0; 2882 } 2883 2884 /* Also treat FDB entries on foreign interfaces bridged with us as host 2885 * addresses. 2886 */ 2887 if (dsa_foreign_dev_check(dev, orig_dev)) 2888 host_addr = true; 2889 2890 /* Check early that we're not doing work in vain. 2891 * Host addresses on LAG ports still require regular FDB ops, 2892 * since the CPU port isn't in a LAG. 2893 */ 2894 if (dp->lag && !host_addr) { 2895 if (!ds->ops->lag_fdb_add || !ds->ops->lag_fdb_del) 2896 return -EOPNOTSUPP; 2897 } else { 2898 if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del) 2899 return -EOPNOTSUPP; 2900 } 2901 2902 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); 2903 if (!switchdev_work) 2904 return -ENOMEM; 2905 2906 netdev_dbg(dev, "%s FDB entry towards %s, addr %pM vid %d%s\n", 2907 event == SWITCHDEV_FDB_ADD_TO_DEVICE ? "Adding" : "Deleting", 2908 orig_dev->name, fdb_info->addr, fdb_info->vid, 2909 host_addr ? " as host address" : ""); 2910 2911 INIT_WORK(&switchdev_work->work, dsa_slave_switchdev_event_work); 2912 switchdev_work->event = event; 2913 switchdev_work->dev = dev; 2914 switchdev_work->orig_dev = orig_dev; 2915 2916 ether_addr_copy(switchdev_work->addr, fdb_info->addr); 2917 switchdev_work->vid = fdb_info->vid; 2918 switchdev_work->host_addr = host_addr; 2919 2920 dsa_schedule_work(&switchdev_work->work); 2921 2922 return 0; 2923 } 2924 2925 /* Called under rcu_read_lock() */ 2926 static int dsa_slave_switchdev_event(struct notifier_block *unused, 2927 unsigned long event, void *ptr) 2928 { 2929 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 2930 int err; 2931 2932 switch (event) { 2933 case SWITCHDEV_PORT_ATTR_SET: 2934 err = switchdev_handle_port_attr_set(dev, ptr, 2935 dsa_slave_dev_check, 2936 dsa_slave_port_attr_set); 2937 return notifier_from_errno(err); 2938 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2939 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2940 err = switchdev_handle_fdb_event_to_device(dev, event, ptr, 2941 dsa_slave_dev_check, 2942 dsa_foreign_dev_check, 2943 dsa_slave_fdb_event); 2944 return notifier_from_errno(err); 2945 default: 2946 return NOTIFY_DONE; 2947 } 2948 2949 return NOTIFY_OK; 2950 } 2951 2952 static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused, 2953 unsigned long event, void *ptr) 2954 { 2955 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 2956 int err; 2957 2958 switch (event) { 2959 case SWITCHDEV_PORT_OBJ_ADD: 2960 err = switchdev_handle_port_obj_add_foreign(dev, ptr, 2961 dsa_slave_dev_check, 2962 dsa_foreign_dev_check, 2963 dsa_slave_port_obj_add); 2964 return notifier_from_errno(err); 2965 case SWITCHDEV_PORT_OBJ_DEL: 2966 err = switchdev_handle_port_obj_del_foreign(dev, ptr, 2967 dsa_slave_dev_check, 2968 dsa_foreign_dev_check, 2969 dsa_slave_port_obj_del); 2970 return notifier_from_errno(err); 2971 case SWITCHDEV_PORT_ATTR_SET: 2972 err = switchdev_handle_port_attr_set(dev, ptr, 2973 dsa_slave_dev_check, 2974 dsa_slave_port_attr_set); 2975 return notifier_from_errno(err); 2976 } 2977 2978 return NOTIFY_DONE; 2979 } 2980 2981 static struct notifier_block dsa_slave_nb __read_mostly = { 2982 .notifier_call = dsa_slave_netdevice_event, 2983 }; 2984 2985 struct notifier_block dsa_slave_switchdev_notifier = { 2986 .notifier_call = dsa_slave_switchdev_event, 2987 }; 2988 2989 struct notifier_block dsa_slave_switchdev_blocking_notifier = { 2990 .notifier_call = dsa_slave_switchdev_blocking_event, 2991 }; 2992 2993 int dsa_slave_register_notifier(void) 2994 { 2995 struct notifier_block *nb; 2996 int err; 2997 2998 err = register_netdevice_notifier(&dsa_slave_nb); 2999 if (err) 3000 return err; 3001 3002 err = register_switchdev_notifier(&dsa_slave_switchdev_notifier); 3003 if (err) 3004 goto err_switchdev_nb; 3005 3006 nb = &dsa_slave_switchdev_blocking_notifier; 3007 err = register_switchdev_blocking_notifier(nb); 3008 if (err) 3009 goto err_switchdev_blocking_nb; 3010 3011 return 0; 3012 3013 err_switchdev_blocking_nb: 3014 unregister_switchdev_notifier(&dsa_slave_switchdev_notifier); 3015 err_switchdev_nb: 3016 unregister_netdevice_notifier(&dsa_slave_nb); 3017 return err; 3018 } 3019 3020 void dsa_slave_unregister_notifier(void) 3021 { 3022 struct notifier_block *nb; 3023 int err; 3024 3025 nb = &dsa_slave_switchdev_blocking_notifier; 3026 err = unregister_switchdev_blocking_notifier(nb); 3027 if (err) 3028 pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err); 3029 3030 err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier); 3031 if (err) 3032 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err); 3033 3034 err = unregister_netdevice_notifier(&dsa_slave_nb); 3035 if (err) 3036 pr_err("DSA: failed to unregister slave notifier (%d)\n", err); 3037 } 3038