1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/dsa/slave.c - Slave device handling 4 * Copyright (c) 2008-2009 Marvell Semiconductor 5 */ 6 7 #include <linux/list.h> 8 #include <linux/etherdevice.h> 9 #include <linux/netdevice.h> 10 #include <linux/phy.h> 11 #include <linux/phy_fixed.h> 12 #include <linux/phylink.h> 13 #include <linux/of_net.h> 14 #include <linux/of_mdio.h> 15 #include <linux/mdio.h> 16 #include <net/rtnetlink.h> 17 #include <net/pkt_cls.h> 18 #include <net/selftests.h> 19 #include <net/tc_act/tc_mirred.h> 20 #include <linux/if_bridge.h> 21 #include <linux/if_hsr.h> 22 #include <net/dcbnl.h> 23 #include <linux/netpoll.h> 24 25 #include "dsa_priv.h" 26 27 static void dsa_slave_standalone_event_work(struct work_struct *work) 28 { 29 struct dsa_standalone_event_work *standalone_work = 30 container_of(work, struct dsa_standalone_event_work, work); 31 const unsigned char *addr = standalone_work->addr; 32 struct net_device *dev = standalone_work->dev; 33 struct dsa_port *dp = dsa_slave_to_port(dev); 34 struct switchdev_obj_port_mdb mdb; 35 struct dsa_switch *ds = dp->ds; 36 u16 vid = standalone_work->vid; 37 int err; 38 39 switch (standalone_work->event) { 40 case DSA_UC_ADD: 41 err = dsa_port_standalone_host_fdb_add(dp, addr, vid); 42 if (err) { 43 dev_err(ds->dev, 44 "port %d failed to add %pM vid %d to fdb: %d\n", 45 dp->index, addr, vid, err); 46 break; 47 } 48 break; 49 50 case DSA_UC_DEL: 51 err = dsa_port_standalone_host_fdb_del(dp, addr, vid); 52 if (err) { 53 dev_err(ds->dev, 54 "port %d failed to delete %pM vid %d from fdb: %d\n", 55 dp->index, addr, vid, err); 56 } 57 58 break; 59 case DSA_MC_ADD: 60 ether_addr_copy(mdb.addr, addr); 61 mdb.vid = vid; 62 63 err = dsa_port_standalone_host_mdb_add(dp, &mdb); 64 if (err) { 65 dev_err(ds->dev, 66 "port %d failed to add %pM vid %d to mdb: %d\n", 67 dp->index, addr, vid, err); 68 break; 69 } 70 break; 71 case DSA_MC_DEL: 72 ether_addr_copy(mdb.addr, addr); 73 mdb.vid = vid; 74 75 err = dsa_port_standalone_host_mdb_del(dp, &mdb); 76 if (err) { 77 dev_err(ds->dev, 78 "port %d failed to delete %pM vid %d from mdb: %d\n", 79 dp->index, addr, vid, err); 80 } 81 82 break; 83 } 84 85 kfree(standalone_work); 86 } 87 88 static int dsa_slave_schedule_standalone_work(struct net_device *dev, 89 enum dsa_standalone_event event, 90 const unsigned char *addr, 91 u16 vid) 92 { 93 struct dsa_standalone_event_work *standalone_work; 94 95 standalone_work = kzalloc(sizeof(*standalone_work), GFP_ATOMIC); 96 if (!standalone_work) 97 return -ENOMEM; 98 99 INIT_WORK(&standalone_work->work, dsa_slave_standalone_event_work); 100 standalone_work->event = event; 101 standalone_work->dev = dev; 102 103 ether_addr_copy(standalone_work->addr, addr); 104 standalone_work->vid = vid; 105 106 dsa_schedule_work(&standalone_work->work); 107 108 return 0; 109 } 110 111 static int dsa_slave_sync_uc(struct net_device *dev, 112 const unsigned char *addr) 113 { 114 struct net_device *master = dsa_slave_to_master(dev); 115 struct dsa_port *dp = dsa_slave_to_port(dev); 116 117 dev_uc_add(master, addr); 118 119 if (!dsa_switch_supports_uc_filtering(dp->ds)) 120 return 0; 121 122 return dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0); 123 } 124 125 static int dsa_slave_unsync_uc(struct net_device *dev, 126 const unsigned char *addr) 127 { 128 struct net_device *master = dsa_slave_to_master(dev); 129 struct dsa_port *dp = dsa_slave_to_port(dev); 130 131 dev_uc_del(master, addr); 132 133 if (!dsa_switch_supports_uc_filtering(dp->ds)) 134 return 0; 135 136 return dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0); 137 } 138 139 static int dsa_slave_sync_mc(struct net_device *dev, 140 const unsigned char *addr) 141 { 142 struct net_device *master = dsa_slave_to_master(dev); 143 struct dsa_port *dp = dsa_slave_to_port(dev); 144 145 dev_mc_add(master, addr); 146 147 if (!dsa_switch_supports_mc_filtering(dp->ds)) 148 return 0; 149 150 return dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0); 151 } 152 153 static int dsa_slave_unsync_mc(struct net_device *dev, 154 const unsigned char *addr) 155 { 156 struct net_device *master = dsa_slave_to_master(dev); 157 struct dsa_port *dp = dsa_slave_to_port(dev); 158 159 dev_mc_del(master, addr); 160 161 if (!dsa_switch_supports_mc_filtering(dp->ds)) 162 return 0; 163 164 return dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0); 165 } 166 167 /* slave mii_bus handling ***************************************************/ 168 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg) 169 { 170 struct dsa_switch *ds = bus->priv; 171 172 if (ds->phys_mii_mask & (1 << addr)) 173 return ds->ops->phy_read(ds, addr, reg); 174 175 return 0xffff; 176 } 177 178 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val) 179 { 180 struct dsa_switch *ds = bus->priv; 181 182 if (ds->phys_mii_mask & (1 << addr)) 183 return ds->ops->phy_write(ds, addr, reg, val); 184 185 return 0; 186 } 187 188 void dsa_slave_mii_bus_init(struct dsa_switch *ds) 189 { 190 ds->slave_mii_bus->priv = (void *)ds; 191 ds->slave_mii_bus->name = "dsa slave smi"; 192 ds->slave_mii_bus->read = dsa_slave_phy_read; 193 ds->slave_mii_bus->write = dsa_slave_phy_write; 194 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d", 195 ds->dst->index, ds->index); 196 ds->slave_mii_bus->parent = ds->dev; 197 ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask; 198 } 199 200 201 /* slave device handling ****************************************************/ 202 static int dsa_slave_get_iflink(const struct net_device *dev) 203 { 204 return dsa_slave_to_master(dev)->ifindex; 205 } 206 207 static int dsa_slave_open(struct net_device *dev) 208 { 209 struct net_device *master = dsa_slave_to_master(dev); 210 struct dsa_port *dp = dsa_slave_to_port(dev); 211 struct dsa_switch *ds = dp->ds; 212 int err; 213 214 err = dev_open(master, NULL); 215 if (err < 0) { 216 netdev_err(dev, "failed to open master %s\n", master->name); 217 goto out; 218 } 219 220 if (dsa_switch_supports_uc_filtering(ds)) { 221 err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0); 222 if (err) 223 goto out; 224 } 225 226 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) { 227 err = dev_uc_add(master, dev->dev_addr); 228 if (err < 0) 229 goto del_host_addr; 230 } 231 232 err = dsa_port_enable_rt(dp, dev->phydev); 233 if (err) 234 goto del_unicast; 235 236 return 0; 237 238 del_unicast: 239 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) 240 dev_uc_del(master, dev->dev_addr); 241 del_host_addr: 242 if (dsa_switch_supports_uc_filtering(ds)) 243 dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); 244 out: 245 return err; 246 } 247 248 static int dsa_slave_close(struct net_device *dev) 249 { 250 struct net_device *master = dsa_slave_to_master(dev); 251 struct dsa_port *dp = dsa_slave_to_port(dev); 252 struct dsa_switch *ds = dp->ds; 253 254 dsa_port_disable_rt(dp); 255 256 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) 257 dev_uc_del(master, dev->dev_addr); 258 259 if (dsa_switch_supports_uc_filtering(ds)) 260 dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); 261 262 return 0; 263 } 264 265 static void dsa_slave_manage_host_flood(struct net_device *dev) 266 { 267 bool mc = dev->flags & (IFF_PROMISC | IFF_ALLMULTI); 268 struct dsa_port *dp = dsa_slave_to_port(dev); 269 bool uc = dev->flags & IFF_PROMISC; 270 271 dsa_port_set_host_flood(dp, uc, mc); 272 } 273 274 static void dsa_slave_change_rx_flags(struct net_device *dev, int change) 275 { 276 struct net_device *master = dsa_slave_to_master(dev); 277 struct dsa_port *dp = dsa_slave_to_port(dev); 278 struct dsa_switch *ds = dp->ds; 279 280 if (change & IFF_ALLMULTI) 281 dev_set_allmulti(master, 282 dev->flags & IFF_ALLMULTI ? 1 : -1); 283 if (change & IFF_PROMISC) 284 dev_set_promiscuity(master, 285 dev->flags & IFF_PROMISC ? 1 : -1); 286 287 if (dsa_switch_supports_uc_filtering(ds) && 288 dsa_switch_supports_mc_filtering(ds)) 289 dsa_slave_manage_host_flood(dev); 290 } 291 292 static void dsa_slave_set_rx_mode(struct net_device *dev) 293 { 294 __dev_mc_sync(dev, dsa_slave_sync_mc, dsa_slave_unsync_mc); 295 __dev_uc_sync(dev, dsa_slave_sync_uc, dsa_slave_unsync_uc); 296 } 297 298 static int dsa_slave_set_mac_address(struct net_device *dev, void *a) 299 { 300 struct net_device *master = dsa_slave_to_master(dev); 301 struct dsa_port *dp = dsa_slave_to_port(dev); 302 struct dsa_switch *ds = dp->ds; 303 struct sockaddr *addr = a; 304 int err; 305 306 if (!is_valid_ether_addr(addr->sa_data)) 307 return -EADDRNOTAVAIL; 308 309 /* If the port is down, the address isn't synced yet to hardware or 310 * to the DSA master, so there is nothing to change. 311 */ 312 if (!(dev->flags & IFF_UP)) 313 goto out_change_dev_addr; 314 315 if (dsa_switch_supports_uc_filtering(ds)) { 316 err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0); 317 if (err) 318 return err; 319 } 320 321 if (!ether_addr_equal(addr->sa_data, master->dev_addr)) { 322 err = dev_uc_add(master, addr->sa_data); 323 if (err < 0) 324 goto del_unicast; 325 } 326 327 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) 328 dev_uc_del(master, dev->dev_addr); 329 330 if (dsa_switch_supports_uc_filtering(ds)) 331 dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); 332 333 out_change_dev_addr: 334 eth_hw_addr_set(dev, addr->sa_data); 335 336 return 0; 337 338 del_unicast: 339 if (dsa_switch_supports_uc_filtering(ds)) 340 dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0); 341 342 return err; 343 } 344 345 struct dsa_slave_dump_ctx { 346 struct net_device *dev; 347 struct sk_buff *skb; 348 struct netlink_callback *cb; 349 int idx; 350 }; 351 352 static int 353 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid, 354 bool is_static, void *data) 355 { 356 struct dsa_slave_dump_ctx *dump = data; 357 u32 portid = NETLINK_CB(dump->cb->skb).portid; 358 u32 seq = dump->cb->nlh->nlmsg_seq; 359 struct nlmsghdr *nlh; 360 struct ndmsg *ndm; 361 362 if (dump->idx < dump->cb->args[2]) 363 goto skip; 364 365 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, 366 sizeof(*ndm), NLM_F_MULTI); 367 if (!nlh) 368 return -EMSGSIZE; 369 370 ndm = nlmsg_data(nlh); 371 ndm->ndm_family = AF_BRIDGE; 372 ndm->ndm_pad1 = 0; 373 ndm->ndm_pad2 = 0; 374 ndm->ndm_flags = NTF_SELF; 375 ndm->ndm_type = 0; 376 ndm->ndm_ifindex = dump->dev->ifindex; 377 ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; 378 379 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr)) 380 goto nla_put_failure; 381 382 if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid)) 383 goto nla_put_failure; 384 385 nlmsg_end(dump->skb, nlh); 386 387 skip: 388 dump->idx++; 389 return 0; 390 391 nla_put_failure: 392 nlmsg_cancel(dump->skb, nlh); 393 return -EMSGSIZE; 394 } 395 396 static int 397 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 398 struct net_device *dev, struct net_device *filter_dev, 399 int *idx) 400 { 401 struct dsa_port *dp = dsa_slave_to_port(dev); 402 struct dsa_slave_dump_ctx dump = { 403 .dev = dev, 404 .skb = skb, 405 .cb = cb, 406 .idx = *idx, 407 }; 408 int err; 409 410 err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump); 411 *idx = dump.idx; 412 413 return err; 414 } 415 416 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 417 { 418 struct dsa_slave_priv *p = netdev_priv(dev); 419 struct dsa_switch *ds = p->dp->ds; 420 int port = p->dp->index; 421 422 /* Pass through to switch driver if it supports timestamping */ 423 switch (cmd) { 424 case SIOCGHWTSTAMP: 425 if (ds->ops->port_hwtstamp_get) 426 return ds->ops->port_hwtstamp_get(ds, port, ifr); 427 break; 428 case SIOCSHWTSTAMP: 429 if (ds->ops->port_hwtstamp_set) 430 return ds->ops->port_hwtstamp_set(ds, port, ifr); 431 break; 432 } 433 434 return phylink_mii_ioctl(p->dp->pl, ifr, cmd); 435 } 436 437 static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx, 438 const struct switchdev_attr *attr, 439 struct netlink_ext_ack *extack) 440 { 441 struct dsa_port *dp = dsa_slave_to_port(dev); 442 int ret; 443 444 if (ctx && ctx != dp) 445 return 0; 446 447 switch (attr->id) { 448 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 449 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) 450 return -EOPNOTSUPP; 451 452 ret = dsa_port_set_state(dp, attr->u.stp_state, true); 453 break; 454 case SWITCHDEV_ATTR_ID_PORT_MST_STATE: 455 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) 456 return -EOPNOTSUPP; 457 458 ret = dsa_port_set_mst_state(dp, &attr->u.mst_state, extack); 459 break; 460 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 461 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) 462 return -EOPNOTSUPP; 463 464 ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering, 465 extack); 466 break; 467 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 468 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) 469 return -EOPNOTSUPP; 470 471 ret = dsa_port_ageing_time(dp, attr->u.ageing_time); 472 break; 473 case SWITCHDEV_ATTR_ID_BRIDGE_MST: 474 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) 475 return -EOPNOTSUPP; 476 477 ret = dsa_port_mst_enable(dp, attr->u.mst, extack); 478 break; 479 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: 480 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) 481 return -EOPNOTSUPP; 482 483 ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags, 484 extack); 485 break; 486 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 487 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) 488 return -EOPNOTSUPP; 489 490 ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack); 491 break; 492 case SWITCHDEV_ATTR_ID_VLAN_MSTI: 493 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) 494 return -EOPNOTSUPP; 495 496 ret = dsa_port_vlan_msti(dp, &attr->u.vlan_msti); 497 break; 498 default: 499 ret = -EOPNOTSUPP; 500 break; 501 } 502 503 return ret; 504 } 505 506 /* Must be called under rcu_read_lock() */ 507 static int 508 dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave, 509 const struct switchdev_obj_port_vlan *vlan) 510 { 511 struct net_device *upper_dev; 512 struct list_head *iter; 513 514 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) { 515 u16 vid; 516 517 if (!is_vlan_dev(upper_dev)) 518 continue; 519 520 vid = vlan_dev_vlan_id(upper_dev); 521 if (vid == vlan->vid) 522 return -EBUSY; 523 } 524 525 return 0; 526 } 527 528 static int dsa_slave_vlan_add(struct net_device *dev, 529 const struct switchdev_obj *obj, 530 struct netlink_ext_ack *extack) 531 { 532 struct dsa_port *dp = dsa_slave_to_port(dev); 533 struct switchdev_obj_port_vlan *vlan; 534 int err; 535 536 if (dsa_port_skip_vlan_configuration(dp)) { 537 NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN"); 538 return 0; 539 } 540 541 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 542 543 /* Deny adding a bridge VLAN when there is already an 802.1Q upper with 544 * the same VID. 545 */ 546 if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) { 547 rcu_read_lock(); 548 err = dsa_slave_vlan_check_for_8021q_uppers(dev, vlan); 549 rcu_read_unlock(); 550 if (err) { 551 NL_SET_ERR_MSG_MOD(extack, 552 "Port already has a VLAN upper with this VID"); 553 return err; 554 } 555 } 556 557 return dsa_port_vlan_add(dp, vlan, extack); 558 } 559 560 /* Offload a VLAN installed on the bridge or on a foreign interface by 561 * installing it as a VLAN towards the CPU port. 562 */ 563 static int dsa_slave_host_vlan_add(struct net_device *dev, 564 const struct switchdev_obj *obj, 565 struct netlink_ext_ack *extack) 566 { 567 struct dsa_port *dp = dsa_slave_to_port(dev); 568 struct switchdev_obj_port_vlan vlan; 569 570 /* Do nothing if this is a software bridge */ 571 if (!dp->bridge) 572 return -EOPNOTSUPP; 573 574 if (dsa_port_skip_vlan_configuration(dp)) { 575 NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN"); 576 return 0; 577 } 578 579 vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj); 580 581 /* Even though drivers often handle CPU membership in special ways, 582 * it doesn't make sense to program a PVID, so clear this flag. 583 */ 584 vlan.flags &= ~BRIDGE_VLAN_INFO_PVID; 585 586 return dsa_port_host_vlan_add(dp, &vlan, extack); 587 } 588 589 static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx, 590 const struct switchdev_obj *obj, 591 struct netlink_ext_ack *extack) 592 { 593 struct dsa_port *dp = dsa_slave_to_port(dev); 594 int err; 595 596 if (ctx && ctx != dp) 597 return 0; 598 599 switch (obj->id) { 600 case SWITCHDEV_OBJ_ID_PORT_MDB: 601 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev)) 602 return -EOPNOTSUPP; 603 604 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); 605 break; 606 case SWITCHDEV_OBJ_ID_HOST_MDB: 607 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 608 return -EOPNOTSUPP; 609 610 err = dsa_port_bridge_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); 611 break; 612 case SWITCHDEV_OBJ_ID_PORT_VLAN: 613 if (dsa_port_offloads_bridge_port(dp, obj->orig_dev)) 614 err = dsa_slave_vlan_add(dev, obj, extack); 615 else 616 err = dsa_slave_host_vlan_add(dev, obj, extack); 617 break; 618 case SWITCHDEV_OBJ_ID_MRP: 619 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 620 return -EOPNOTSUPP; 621 622 err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj)); 623 break; 624 case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: 625 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 626 return -EOPNOTSUPP; 627 628 err = dsa_port_mrp_add_ring_role(dp, 629 SWITCHDEV_OBJ_RING_ROLE_MRP(obj)); 630 break; 631 default: 632 err = -EOPNOTSUPP; 633 break; 634 } 635 636 return err; 637 } 638 639 static int dsa_slave_vlan_del(struct net_device *dev, 640 const struct switchdev_obj *obj) 641 { 642 struct dsa_port *dp = dsa_slave_to_port(dev); 643 struct switchdev_obj_port_vlan *vlan; 644 645 if (dsa_port_skip_vlan_configuration(dp)) 646 return 0; 647 648 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 649 650 return dsa_port_vlan_del(dp, vlan); 651 } 652 653 static int dsa_slave_host_vlan_del(struct net_device *dev, 654 const struct switchdev_obj *obj) 655 { 656 struct dsa_port *dp = dsa_slave_to_port(dev); 657 struct switchdev_obj_port_vlan *vlan; 658 659 /* Do nothing if this is a software bridge */ 660 if (!dp->bridge) 661 return -EOPNOTSUPP; 662 663 if (dsa_port_skip_vlan_configuration(dp)) 664 return 0; 665 666 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 667 668 return dsa_port_host_vlan_del(dp, vlan); 669 } 670 671 static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx, 672 const struct switchdev_obj *obj) 673 { 674 struct dsa_port *dp = dsa_slave_to_port(dev); 675 int err; 676 677 if (ctx && ctx != dp) 678 return 0; 679 680 switch (obj->id) { 681 case SWITCHDEV_OBJ_ID_PORT_MDB: 682 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev)) 683 return -EOPNOTSUPP; 684 685 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); 686 break; 687 case SWITCHDEV_OBJ_ID_HOST_MDB: 688 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 689 return -EOPNOTSUPP; 690 691 err = dsa_port_bridge_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); 692 break; 693 case SWITCHDEV_OBJ_ID_PORT_VLAN: 694 if (dsa_port_offloads_bridge_port(dp, obj->orig_dev)) 695 err = dsa_slave_vlan_del(dev, obj); 696 else 697 err = dsa_slave_host_vlan_del(dev, obj); 698 break; 699 case SWITCHDEV_OBJ_ID_MRP: 700 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 701 return -EOPNOTSUPP; 702 703 err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj)); 704 break; 705 case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: 706 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) 707 return -EOPNOTSUPP; 708 709 err = dsa_port_mrp_del_ring_role(dp, 710 SWITCHDEV_OBJ_RING_ROLE_MRP(obj)); 711 break; 712 default: 713 err = -EOPNOTSUPP; 714 break; 715 } 716 717 return err; 718 } 719 720 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev, 721 struct sk_buff *skb) 722 { 723 #ifdef CONFIG_NET_POLL_CONTROLLER 724 struct dsa_slave_priv *p = netdev_priv(dev); 725 726 return netpoll_send_skb(p->netpoll, skb); 727 #else 728 BUG(); 729 return NETDEV_TX_OK; 730 #endif 731 } 732 733 static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p, 734 struct sk_buff *skb) 735 { 736 struct dsa_switch *ds = p->dp->ds; 737 738 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 739 return; 740 741 if (!ds->ops->port_txtstamp) 742 return; 743 744 ds->ops->port_txtstamp(ds, p->dp->index, skb); 745 } 746 747 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev) 748 { 749 /* SKB for netpoll still need to be mangled with the protocol-specific 750 * tag to be successfully transmitted 751 */ 752 if (unlikely(netpoll_tx_running(dev))) 753 return dsa_slave_netpoll_send_skb(dev, skb); 754 755 /* Queue the SKB for transmission on the parent interface, but 756 * do not modify its EtherType 757 */ 758 skb->dev = dsa_slave_to_master(dev); 759 dev_queue_xmit(skb); 760 761 return NETDEV_TX_OK; 762 } 763 EXPORT_SYMBOL_GPL(dsa_enqueue_skb); 764 765 static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev) 766 { 767 int needed_headroom = dev->needed_headroom; 768 int needed_tailroom = dev->needed_tailroom; 769 770 /* For tail taggers, we need to pad short frames ourselves, to ensure 771 * that the tail tag does not fail at its role of being at the end of 772 * the packet, once the master interface pads the frame. Account for 773 * that pad length here, and pad later. 774 */ 775 if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) 776 needed_tailroom += ETH_ZLEN - skb->len; 777 /* skb_headroom() returns unsigned int... */ 778 needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); 779 needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); 780 781 if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb))) 782 /* No reallocation needed, yay! */ 783 return 0; 784 785 return pskb_expand_head(skb, needed_headroom, needed_tailroom, 786 GFP_ATOMIC); 787 } 788 789 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev) 790 { 791 struct dsa_slave_priv *p = netdev_priv(dev); 792 struct sk_buff *nskb; 793 794 dev_sw_netstats_tx_add(dev, 1, skb->len); 795 796 memset(skb->cb, 0, sizeof(skb->cb)); 797 798 /* Handle tx timestamp if any */ 799 dsa_skb_tx_timestamp(p, skb); 800 801 if (dsa_realloc_skb(skb, dev)) { 802 dev_kfree_skb_any(skb); 803 return NETDEV_TX_OK; 804 } 805 806 /* needed_tailroom should still be 'warm' in the cache line from 807 * dsa_realloc_skb(), which has also ensured that padding is safe. 808 */ 809 if (dev->needed_tailroom) 810 eth_skb_pad(skb); 811 812 /* Transmit function may have to reallocate the original SKB, 813 * in which case it must have freed it. Only free it here on error. 814 */ 815 nskb = p->xmit(skb, dev); 816 if (!nskb) { 817 kfree_skb(skb); 818 return NETDEV_TX_OK; 819 } 820 821 return dsa_enqueue_skb(nskb, dev); 822 } 823 824 /* ethtool operations *******************************************************/ 825 826 static void dsa_slave_get_drvinfo(struct net_device *dev, 827 struct ethtool_drvinfo *drvinfo) 828 { 829 strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver)); 830 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); 831 strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); 832 } 833 834 static int dsa_slave_get_regs_len(struct net_device *dev) 835 { 836 struct dsa_port *dp = dsa_slave_to_port(dev); 837 struct dsa_switch *ds = dp->ds; 838 839 if (ds->ops->get_regs_len) 840 return ds->ops->get_regs_len(ds, dp->index); 841 842 return -EOPNOTSUPP; 843 } 844 845 static void 846 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) 847 { 848 struct dsa_port *dp = dsa_slave_to_port(dev); 849 struct dsa_switch *ds = dp->ds; 850 851 if (ds->ops->get_regs) 852 ds->ops->get_regs(ds, dp->index, regs, _p); 853 } 854 855 static int dsa_slave_nway_reset(struct net_device *dev) 856 { 857 struct dsa_port *dp = dsa_slave_to_port(dev); 858 859 return phylink_ethtool_nway_reset(dp->pl); 860 } 861 862 static int dsa_slave_get_eeprom_len(struct net_device *dev) 863 { 864 struct dsa_port *dp = dsa_slave_to_port(dev); 865 struct dsa_switch *ds = dp->ds; 866 867 if (ds->cd && ds->cd->eeprom_len) 868 return ds->cd->eeprom_len; 869 870 if (ds->ops->get_eeprom_len) 871 return ds->ops->get_eeprom_len(ds); 872 873 return 0; 874 } 875 876 static int dsa_slave_get_eeprom(struct net_device *dev, 877 struct ethtool_eeprom *eeprom, u8 *data) 878 { 879 struct dsa_port *dp = dsa_slave_to_port(dev); 880 struct dsa_switch *ds = dp->ds; 881 882 if (ds->ops->get_eeprom) 883 return ds->ops->get_eeprom(ds, eeprom, data); 884 885 return -EOPNOTSUPP; 886 } 887 888 static int dsa_slave_set_eeprom(struct net_device *dev, 889 struct ethtool_eeprom *eeprom, u8 *data) 890 { 891 struct dsa_port *dp = dsa_slave_to_port(dev); 892 struct dsa_switch *ds = dp->ds; 893 894 if (ds->ops->set_eeprom) 895 return ds->ops->set_eeprom(ds, eeprom, data); 896 897 return -EOPNOTSUPP; 898 } 899 900 static void dsa_slave_get_strings(struct net_device *dev, 901 uint32_t stringset, uint8_t *data) 902 { 903 struct dsa_port *dp = dsa_slave_to_port(dev); 904 struct dsa_switch *ds = dp->ds; 905 906 if (stringset == ETH_SS_STATS) { 907 int len = ETH_GSTRING_LEN; 908 909 strncpy(data, "tx_packets", len); 910 strncpy(data + len, "tx_bytes", len); 911 strncpy(data + 2 * len, "rx_packets", len); 912 strncpy(data + 3 * len, "rx_bytes", len); 913 if (ds->ops->get_strings) 914 ds->ops->get_strings(ds, dp->index, stringset, 915 data + 4 * len); 916 } else if (stringset == ETH_SS_TEST) { 917 net_selftest_get_strings(data); 918 } 919 920 } 921 922 static void dsa_slave_get_ethtool_stats(struct net_device *dev, 923 struct ethtool_stats *stats, 924 uint64_t *data) 925 { 926 struct dsa_port *dp = dsa_slave_to_port(dev); 927 struct dsa_switch *ds = dp->ds; 928 struct pcpu_sw_netstats *s; 929 unsigned int start; 930 int i; 931 932 for_each_possible_cpu(i) { 933 u64 tx_packets, tx_bytes, rx_packets, rx_bytes; 934 935 s = per_cpu_ptr(dev->tstats, i); 936 do { 937 start = u64_stats_fetch_begin_irq(&s->syncp); 938 tx_packets = s->tx_packets; 939 tx_bytes = s->tx_bytes; 940 rx_packets = s->rx_packets; 941 rx_bytes = s->rx_bytes; 942 } while (u64_stats_fetch_retry_irq(&s->syncp, start)); 943 data[0] += tx_packets; 944 data[1] += tx_bytes; 945 data[2] += rx_packets; 946 data[3] += rx_bytes; 947 } 948 if (ds->ops->get_ethtool_stats) 949 ds->ops->get_ethtool_stats(ds, dp->index, data + 4); 950 } 951 952 static int dsa_slave_get_sset_count(struct net_device *dev, int sset) 953 { 954 struct dsa_port *dp = dsa_slave_to_port(dev); 955 struct dsa_switch *ds = dp->ds; 956 957 if (sset == ETH_SS_STATS) { 958 int count = 0; 959 960 if (ds->ops->get_sset_count) { 961 count = ds->ops->get_sset_count(ds, dp->index, sset); 962 if (count < 0) 963 return count; 964 } 965 966 return count + 4; 967 } else if (sset == ETH_SS_TEST) { 968 return net_selftest_get_count(); 969 } 970 971 return -EOPNOTSUPP; 972 } 973 974 static void dsa_slave_get_eth_phy_stats(struct net_device *dev, 975 struct ethtool_eth_phy_stats *phy_stats) 976 { 977 struct dsa_port *dp = dsa_slave_to_port(dev); 978 struct dsa_switch *ds = dp->ds; 979 980 if (ds->ops->get_eth_phy_stats) 981 ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats); 982 } 983 984 static void dsa_slave_get_eth_mac_stats(struct net_device *dev, 985 struct ethtool_eth_mac_stats *mac_stats) 986 { 987 struct dsa_port *dp = dsa_slave_to_port(dev); 988 struct dsa_switch *ds = dp->ds; 989 990 if (ds->ops->get_eth_mac_stats) 991 ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats); 992 } 993 994 static void 995 dsa_slave_get_eth_ctrl_stats(struct net_device *dev, 996 struct ethtool_eth_ctrl_stats *ctrl_stats) 997 { 998 struct dsa_port *dp = dsa_slave_to_port(dev); 999 struct dsa_switch *ds = dp->ds; 1000 1001 if (ds->ops->get_eth_ctrl_stats) 1002 ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats); 1003 } 1004 1005 static void dsa_slave_net_selftest(struct net_device *ndev, 1006 struct ethtool_test *etest, u64 *buf) 1007 { 1008 struct dsa_port *dp = dsa_slave_to_port(ndev); 1009 struct dsa_switch *ds = dp->ds; 1010 1011 if (ds->ops->self_test) { 1012 ds->ops->self_test(ds, dp->index, etest, buf); 1013 return; 1014 } 1015 1016 net_selftest(ndev, etest, buf); 1017 } 1018 1019 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w) 1020 { 1021 struct dsa_port *dp = dsa_slave_to_port(dev); 1022 struct dsa_switch *ds = dp->ds; 1023 1024 phylink_ethtool_get_wol(dp->pl, w); 1025 1026 if (ds->ops->get_wol) 1027 ds->ops->get_wol(ds, dp->index, w); 1028 } 1029 1030 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) 1031 { 1032 struct dsa_port *dp = dsa_slave_to_port(dev); 1033 struct dsa_switch *ds = dp->ds; 1034 int ret = -EOPNOTSUPP; 1035 1036 phylink_ethtool_set_wol(dp->pl, w); 1037 1038 if (ds->ops->set_wol) 1039 ret = ds->ops->set_wol(ds, dp->index, w); 1040 1041 return ret; 1042 } 1043 1044 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e) 1045 { 1046 struct dsa_port *dp = dsa_slave_to_port(dev); 1047 struct dsa_switch *ds = dp->ds; 1048 int ret; 1049 1050 /* Port's PHY and MAC both need to be EEE capable */ 1051 if (!dev->phydev || !dp->pl) 1052 return -ENODEV; 1053 1054 if (!ds->ops->set_mac_eee) 1055 return -EOPNOTSUPP; 1056 1057 ret = ds->ops->set_mac_eee(ds, dp->index, e); 1058 if (ret) 1059 return ret; 1060 1061 return phylink_ethtool_set_eee(dp->pl, e); 1062 } 1063 1064 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e) 1065 { 1066 struct dsa_port *dp = dsa_slave_to_port(dev); 1067 struct dsa_switch *ds = dp->ds; 1068 int ret; 1069 1070 /* Port's PHY and MAC both need to be EEE capable */ 1071 if (!dev->phydev || !dp->pl) 1072 return -ENODEV; 1073 1074 if (!ds->ops->get_mac_eee) 1075 return -EOPNOTSUPP; 1076 1077 ret = ds->ops->get_mac_eee(ds, dp->index, e); 1078 if (ret) 1079 return ret; 1080 1081 return phylink_ethtool_get_eee(dp->pl, e); 1082 } 1083 1084 static int dsa_slave_get_link_ksettings(struct net_device *dev, 1085 struct ethtool_link_ksettings *cmd) 1086 { 1087 struct dsa_port *dp = dsa_slave_to_port(dev); 1088 1089 return phylink_ethtool_ksettings_get(dp->pl, cmd); 1090 } 1091 1092 static int dsa_slave_set_link_ksettings(struct net_device *dev, 1093 const struct ethtool_link_ksettings *cmd) 1094 { 1095 struct dsa_port *dp = dsa_slave_to_port(dev); 1096 1097 return phylink_ethtool_ksettings_set(dp->pl, cmd); 1098 } 1099 1100 static void dsa_slave_get_pauseparam(struct net_device *dev, 1101 struct ethtool_pauseparam *pause) 1102 { 1103 struct dsa_port *dp = dsa_slave_to_port(dev); 1104 1105 phylink_ethtool_get_pauseparam(dp->pl, pause); 1106 } 1107 1108 static int dsa_slave_set_pauseparam(struct net_device *dev, 1109 struct ethtool_pauseparam *pause) 1110 { 1111 struct dsa_port *dp = dsa_slave_to_port(dev); 1112 1113 return phylink_ethtool_set_pauseparam(dp->pl, pause); 1114 } 1115 1116 #ifdef CONFIG_NET_POLL_CONTROLLER 1117 static int dsa_slave_netpoll_setup(struct net_device *dev, 1118 struct netpoll_info *ni) 1119 { 1120 struct net_device *master = dsa_slave_to_master(dev); 1121 struct dsa_slave_priv *p = netdev_priv(dev); 1122 struct netpoll *netpoll; 1123 int err = 0; 1124 1125 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); 1126 if (!netpoll) 1127 return -ENOMEM; 1128 1129 err = __netpoll_setup(netpoll, master); 1130 if (err) { 1131 kfree(netpoll); 1132 goto out; 1133 } 1134 1135 p->netpoll = netpoll; 1136 out: 1137 return err; 1138 } 1139 1140 static void dsa_slave_netpoll_cleanup(struct net_device *dev) 1141 { 1142 struct dsa_slave_priv *p = netdev_priv(dev); 1143 struct netpoll *netpoll = p->netpoll; 1144 1145 if (!netpoll) 1146 return; 1147 1148 p->netpoll = NULL; 1149 1150 __netpoll_free(netpoll); 1151 } 1152 1153 static void dsa_slave_poll_controller(struct net_device *dev) 1154 { 1155 } 1156 #endif 1157 1158 static struct dsa_mall_tc_entry * 1159 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie) 1160 { 1161 struct dsa_slave_priv *p = netdev_priv(dev); 1162 struct dsa_mall_tc_entry *mall_tc_entry; 1163 1164 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) 1165 if (mall_tc_entry->cookie == cookie) 1166 return mall_tc_entry; 1167 1168 return NULL; 1169 } 1170 1171 static int 1172 dsa_slave_add_cls_matchall_mirred(struct net_device *dev, 1173 struct tc_cls_matchall_offload *cls, 1174 bool ingress) 1175 { 1176 struct netlink_ext_ack *extack = cls->common.extack; 1177 struct dsa_port *dp = dsa_slave_to_port(dev); 1178 struct dsa_slave_priv *p = netdev_priv(dev); 1179 struct dsa_mall_mirror_tc_entry *mirror; 1180 struct dsa_mall_tc_entry *mall_tc_entry; 1181 struct dsa_switch *ds = dp->ds; 1182 struct flow_action_entry *act; 1183 struct dsa_port *to_dp; 1184 int err; 1185 1186 if (!ds->ops->port_mirror_add) 1187 return -EOPNOTSUPP; 1188 1189 if (!flow_action_basic_hw_stats_check(&cls->rule->action, 1190 cls->common.extack)) 1191 return -EOPNOTSUPP; 1192 1193 act = &cls->rule->action.entries[0]; 1194 1195 if (!act->dev) 1196 return -EINVAL; 1197 1198 if (!dsa_slave_dev_check(act->dev)) 1199 return -EOPNOTSUPP; 1200 1201 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1202 if (!mall_tc_entry) 1203 return -ENOMEM; 1204 1205 mall_tc_entry->cookie = cls->cookie; 1206 mall_tc_entry->type = DSA_PORT_MALL_MIRROR; 1207 mirror = &mall_tc_entry->mirror; 1208 1209 to_dp = dsa_slave_to_port(act->dev); 1210 1211 mirror->to_local_port = to_dp->index; 1212 mirror->ingress = ingress; 1213 1214 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress, extack); 1215 if (err) { 1216 kfree(mall_tc_entry); 1217 return err; 1218 } 1219 1220 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); 1221 1222 return err; 1223 } 1224 1225 static int 1226 dsa_slave_add_cls_matchall_police(struct net_device *dev, 1227 struct tc_cls_matchall_offload *cls, 1228 bool ingress) 1229 { 1230 struct netlink_ext_ack *extack = cls->common.extack; 1231 struct dsa_port *dp = dsa_slave_to_port(dev); 1232 struct dsa_slave_priv *p = netdev_priv(dev); 1233 struct dsa_mall_policer_tc_entry *policer; 1234 struct dsa_mall_tc_entry *mall_tc_entry; 1235 struct dsa_switch *ds = dp->ds; 1236 struct flow_action_entry *act; 1237 int err; 1238 1239 if (!ds->ops->port_policer_add) { 1240 NL_SET_ERR_MSG_MOD(extack, 1241 "Policing offload not implemented"); 1242 return -EOPNOTSUPP; 1243 } 1244 1245 if (!ingress) { 1246 NL_SET_ERR_MSG_MOD(extack, 1247 "Only supported on ingress qdisc"); 1248 return -EOPNOTSUPP; 1249 } 1250 1251 if (!flow_action_basic_hw_stats_check(&cls->rule->action, 1252 cls->common.extack)) 1253 return -EOPNOTSUPP; 1254 1255 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) { 1256 if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) { 1257 NL_SET_ERR_MSG_MOD(extack, 1258 "Only one port policer allowed"); 1259 return -EEXIST; 1260 } 1261 } 1262 1263 act = &cls->rule->action.entries[0]; 1264 1265 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 1266 if (!mall_tc_entry) 1267 return -ENOMEM; 1268 1269 mall_tc_entry->cookie = cls->cookie; 1270 mall_tc_entry->type = DSA_PORT_MALL_POLICER; 1271 policer = &mall_tc_entry->policer; 1272 policer->rate_bytes_per_sec = act->police.rate_bytes_ps; 1273 policer->burst = act->police.burst; 1274 1275 err = ds->ops->port_policer_add(ds, dp->index, policer); 1276 if (err) { 1277 kfree(mall_tc_entry); 1278 return err; 1279 } 1280 1281 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); 1282 1283 return err; 1284 } 1285 1286 static int dsa_slave_add_cls_matchall(struct net_device *dev, 1287 struct tc_cls_matchall_offload *cls, 1288 bool ingress) 1289 { 1290 int err = -EOPNOTSUPP; 1291 1292 if (cls->common.protocol == htons(ETH_P_ALL) && 1293 flow_offload_has_one_action(&cls->rule->action) && 1294 cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED) 1295 err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress); 1296 else if (flow_offload_has_one_action(&cls->rule->action) && 1297 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE) 1298 err = dsa_slave_add_cls_matchall_police(dev, cls, ingress); 1299 1300 return err; 1301 } 1302 1303 static void dsa_slave_del_cls_matchall(struct net_device *dev, 1304 struct tc_cls_matchall_offload *cls) 1305 { 1306 struct dsa_port *dp = dsa_slave_to_port(dev); 1307 struct dsa_mall_tc_entry *mall_tc_entry; 1308 struct dsa_switch *ds = dp->ds; 1309 1310 mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie); 1311 if (!mall_tc_entry) 1312 return; 1313 1314 list_del(&mall_tc_entry->list); 1315 1316 switch (mall_tc_entry->type) { 1317 case DSA_PORT_MALL_MIRROR: 1318 if (ds->ops->port_mirror_del) 1319 ds->ops->port_mirror_del(ds, dp->index, 1320 &mall_tc_entry->mirror); 1321 break; 1322 case DSA_PORT_MALL_POLICER: 1323 if (ds->ops->port_policer_del) 1324 ds->ops->port_policer_del(ds, dp->index); 1325 break; 1326 default: 1327 WARN_ON(1); 1328 } 1329 1330 kfree(mall_tc_entry); 1331 } 1332 1333 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev, 1334 struct tc_cls_matchall_offload *cls, 1335 bool ingress) 1336 { 1337 if (cls->common.chain_index) 1338 return -EOPNOTSUPP; 1339 1340 switch (cls->command) { 1341 case TC_CLSMATCHALL_REPLACE: 1342 return dsa_slave_add_cls_matchall(dev, cls, ingress); 1343 case TC_CLSMATCHALL_DESTROY: 1344 dsa_slave_del_cls_matchall(dev, cls); 1345 return 0; 1346 default: 1347 return -EOPNOTSUPP; 1348 } 1349 } 1350 1351 static int dsa_slave_add_cls_flower(struct net_device *dev, 1352 struct flow_cls_offload *cls, 1353 bool ingress) 1354 { 1355 struct dsa_port *dp = dsa_slave_to_port(dev); 1356 struct dsa_switch *ds = dp->ds; 1357 int port = dp->index; 1358 1359 if (!ds->ops->cls_flower_add) 1360 return -EOPNOTSUPP; 1361 1362 return ds->ops->cls_flower_add(ds, port, cls, ingress); 1363 } 1364 1365 static int dsa_slave_del_cls_flower(struct net_device *dev, 1366 struct flow_cls_offload *cls, 1367 bool ingress) 1368 { 1369 struct dsa_port *dp = dsa_slave_to_port(dev); 1370 struct dsa_switch *ds = dp->ds; 1371 int port = dp->index; 1372 1373 if (!ds->ops->cls_flower_del) 1374 return -EOPNOTSUPP; 1375 1376 return ds->ops->cls_flower_del(ds, port, cls, ingress); 1377 } 1378 1379 static int dsa_slave_stats_cls_flower(struct net_device *dev, 1380 struct flow_cls_offload *cls, 1381 bool ingress) 1382 { 1383 struct dsa_port *dp = dsa_slave_to_port(dev); 1384 struct dsa_switch *ds = dp->ds; 1385 int port = dp->index; 1386 1387 if (!ds->ops->cls_flower_stats) 1388 return -EOPNOTSUPP; 1389 1390 return ds->ops->cls_flower_stats(ds, port, cls, ingress); 1391 } 1392 1393 static int dsa_slave_setup_tc_cls_flower(struct net_device *dev, 1394 struct flow_cls_offload *cls, 1395 bool ingress) 1396 { 1397 switch (cls->command) { 1398 case FLOW_CLS_REPLACE: 1399 return dsa_slave_add_cls_flower(dev, cls, ingress); 1400 case FLOW_CLS_DESTROY: 1401 return dsa_slave_del_cls_flower(dev, cls, ingress); 1402 case FLOW_CLS_STATS: 1403 return dsa_slave_stats_cls_flower(dev, cls, ingress); 1404 default: 1405 return -EOPNOTSUPP; 1406 } 1407 } 1408 1409 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 1410 void *cb_priv, bool ingress) 1411 { 1412 struct net_device *dev = cb_priv; 1413 1414 if (!tc_can_offload(dev)) 1415 return -EOPNOTSUPP; 1416 1417 switch (type) { 1418 case TC_SETUP_CLSMATCHALL: 1419 return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress); 1420 case TC_SETUP_CLSFLOWER: 1421 return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress); 1422 default: 1423 return -EOPNOTSUPP; 1424 } 1425 } 1426 1427 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type, 1428 void *type_data, void *cb_priv) 1429 { 1430 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true); 1431 } 1432 1433 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type, 1434 void *type_data, void *cb_priv) 1435 { 1436 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false); 1437 } 1438 1439 static LIST_HEAD(dsa_slave_block_cb_list); 1440 1441 static int dsa_slave_setup_tc_block(struct net_device *dev, 1442 struct flow_block_offload *f) 1443 { 1444 struct flow_block_cb *block_cb; 1445 flow_setup_cb_t *cb; 1446 1447 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 1448 cb = dsa_slave_setup_tc_block_cb_ig; 1449 else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 1450 cb = dsa_slave_setup_tc_block_cb_eg; 1451 else 1452 return -EOPNOTSUPP; 1453 1454 f->driver_block_list = &dsa_slave_block_cb_list; 1455 1456 switch (f->command) { 1457 case FLOW_BLOCK_BIND: 1458 if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list)) 1459 return -EBUSY; 1460 1461 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL); 1462 if (IS_ERR(block_cb)) 1463 return PTR_ERR(block_cb); 1464 1465 flow_block_cb_add(block_cb, f); 1466 list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list); 1467 return 0; 1468 case FLOW_BLOCK_UNBIND: 1469 block_cb = flow_block_cb_lookup(f->block, cb, dev); 1470 if (!block_cb) 1471 return -ENOENT; 1472 1473 flow_block_cb_remove(block_cb, f); 1474 list_del(&block_cb->driver_list); 1475 return 0; 1476 default: 1477 return -EOPNOTSUPP; 1478 } 1479 } 1480 1481 static int dsa_slave_setup_ft_block(struct dsa_switch *ds, int port, 1482 void *type_data) 1483 { 1484 struct dsa_port *cpu_dp = dsa_to_port(ds, port)->cpu_dp; 1485 struct net_device *master = cpu_dp->master; 1486 1487 if (!master->netdev_ops->ndo_setup_tc) 1488 return -EOPNOTSUPP; 1489 1490 return master->netdev_ops->ndo_setup_tc(master, TC_SETUP_FT, type_data); 1491 } 1492 1493 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type, 1494 void *type_data) 1495 { 1496 struct dsa_port *dp = dsa_slave_to_port(dev); 1497 struct dsa_switch *ds = dp->ds; 1498 1499 switch (type) { 1500 case TC_SETUP_BLOCK: 1501 return dsa_slave_setup_tc_block(dev, type_data); 1502 case TC_SETUP_FT: 1503 return dsa_slave_setup_ft_block(ds, dp->index, type_data); 1504 default: 1505 break; 1506 } 1507 1508 if (!ds->ops->port_setup_tc) 1509 return -EOPNOTSUPP; 1510 1511 return ds->ops->port_setup_tc(ds, dp->index, type, type_data); 1512 } 1513 1514 static int dsa_slave_get_rxnfc(struct net_device *dev, 1515 struct ethtool_rxnfc *nfc, u32 *rule_locs) 1516 { 1517 struct dsa_port *dp = dsa_slave_to_port(dev); 1518 struct dsa_switch *ds = dp->ds; 1519 1520 if (!ds->ops->get_rxnfc) 1521 return -EOPNOTSUPP; 1522 1523 return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs); 1524 } 1525 1526 static int dsa_slave_set_rxnfc(struct net_device *dev, 1527 struct ethtool_rxnfc *nfc) 1528 { 1529 struct dsa_port *dp = dsa_slave_to_port(dev); 1530 struct dsa_switch *ds = dp->ds; 1531 1532 if (!ds->ops->set_rxnfc) 1533 return -EOPNOTSUPP; 1534 1535 return ds->ops->set_rxnfc(ds, dp->index, nfc); 1536 } 1537 1538 static int dsa_slave_get_ts_info(struct net_device *dev, 1539 struct ethtool_ts_info *ts) 1540 { 1541 struct dsa_slave_priv *p = netdev_priv(dev); 1542 struct dsa_switch *ds = p->dp->ds; 1543 1544 if (!ds->ops->get_ts_info) 1545 return -EOPNOTSUPP; 1546 1547 return ds->ops->get_ts_info(ds, p->dp->index, ts); 1548 } 1549 1550 static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto, 1551 u16 vid) 1552 { 1553 struct dsa_port *dp = dsa_slave_to_port(dev); 1554 struct switchdev_obj_port_vlan vlan = { 1555 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 1556 .vid = vid, 1557 /* This API only allows programming tagged, non-PVID VIDs */ 1558 .flags = 0, 1559 }; 1560 struct netlink_ext_ack extack = {0}; 1561 int ret; 1562 1563 /* User port... */ 1564 ret = dsa_port_vlan_add(dp, &vlan, &extack); 1565 if (ret) { 1566 if (extack._msg) 1567 netdev_err(dev, "%s\n", extack._msg); 1568 return ret; 1569 } 1570 1571 /* And CPU port... */ 1572 ret = dsa_port_host_vlan_add(dp, &vlan, &extack); 1573 if (ret) { 1574 if (extack._msg) 1575 netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index, 1576 extack._msg); 1577 return ret; 1578 } 1579 1580 return 0; 1581 } 1582 1583 static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, 1584 u16 vid) 1585 { 1586 struct dsa_port *dp = dsa_slave_to_port(dev); 1587 struct switchdev_obj_port_vlan vlan = { 1588 .vid = vid, 1589 /* This API only allows programming tagged, non-PVID VIDs */ 1590 .flags = 0, 1591 }; 1592 int err; 1593 1594 err = dsa_port_vlan_del(dp, &vlan); 1595 if (err) 1596 return err; 1597 1598 return dsa_port_host_vlan_del(dp, &vlan); 1599 } 1600 1601 static int dsa_slave_restore_vlan(struct net_device *vdev, int vid, void *arg) 1602 { 1603 __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q); 1604 1605 return dsa_slave_vlan_rx_add_vid(arg, proto, vid); 1606 } 1607 1608 static int dsa_slave_clear_vlan(struct net_device *vdev, int vid, void *arg) 1609 { 1610 __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q); 1611 1612 return dsa_slave_vlan_rx_kill_vid(arg, proto, vid); 1613 } 1614 1615 /* Keep the VLAN RX filtering list in sync with the hardware only if VLAN 1616 * filtering is enabled. The baseline is that only ports that offload a 1617 * VLAN-aware bridge are VLAN-aware, and standalone ports are VLAN-unaware, 1618 * but there are exceptions for quirky hardware. 1619 * 1620 * If ds->vlan_filtering_is_global = true, then standalone ports which share 1621 * the same switch with other ports that offload a VLAN-aware bridge are also 1622 * inevitably VLAN-aware. 1623 * 1624 * To summarize, a DSA switch port offloads: 1625 * 1626 * - If standalone (this includes software bridge, software LAG): 1627 * - if ds->needs_standalone_vlan_filtering = true, OR if 1628 * (ds->vlan_filtering_is_global = true AND there are bridges spanning 1629 * this switch chip which have vlan_filtering=1) 1630 * - the 8021q upper VLANs 1631 * - else (standalone VLAN filtering is not needed, VLAN filtering is not 1632 * global, or it is, but no port is under a VLAN-aware bridge): 1633 * - no VLAN (any 8021q upper is a software VLAN) 1634 * 1635 * - If under a vlan_filtering=0 bridge which it offload: 1636 * - if ds->configure_vlan_while_not_filtering = true (default): 1637 * - the bridge VLANs. These VLANs are committed to hardware but inactive. 1638 * - else (deprecated): 1639 * - no VLAN. The bridge VLANs are not restored when VLAN awareness is 1640 * enabled, so this behavior is broken and discouraged. 1641 * 1642 * - If under a vlan_filtering=1 bridge which it offload: 1643 * - the bridge VLANs 1644 * - the 8021q upper VLANs 1645 */ 1646 int dsa_slave_manage_vlan_filtering(struct net_device *slave, 1647 bool vlan_filtering) 1648 { 1649 int err; 1650 1651 if (vlan_filtering) { 1652 slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1653 1654 err = vlan_for_each(slave, dsa_slave_restore_vlan, slave); 1655 if (err) { 1656 vlan_for_each(slave, dsa_slave_clear_vlan, slave); 1657 slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 1658 return err; 1659 } 1660 } else { 1661 err = vlan_for_each(slave, dsa_slave_clear_vlan, slave); 1662 if (err) 1663 return err; 1664 1665 slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 1666 } 1667 1668 return 0; 1669 } 1670 1671 struct dsa_hw_port { 1672 struct list_head list; 1673 struct net_device *dev; 1674 int old_mtu; 1675 }; 1676 1677 static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu) 1678 { 1679 const struct dsa_hw_port *p; 1680 int err; 1681 1682 list_for_each_entry(p, hw_port_list, list) { 1683 if (p->dev->mtu == mtu) 1684 continue; 1685 1686 err = dev_set_mtu(p->dev, mtu); 1687 if (err) 1688 goto rollback; 1689 } 1690 1691 return 0; 1692 1693 rollback: 1694 list_for_each_entry_continue_reverse(p, hw_port_list, list) { 1695 if (p->dev->mtu == p->old_mtu) 1696 continue; 1697 1698 if (dev_set_mtu(p->dev, p->old_mtu)) 1699 netdev_err(p->dev, "Failed to restore MTU\n"); 1700 } 1701 1702 return err; 1703 } 1704 1705 static void dsa_hw_port_list_free(struct list_head *hw_port_list) 1706 { 1707 struct dsa_hw_port *p, *n; 1708 1709 list_for_each_entry_safe(p, n, hw_port_list, list) 1710 kfree(p); 1711 } 1712 1713 /* Make the hardware datapath to/from @dev limited to a common MTU */ 1714 static void dsa_bridge_mtu_normalization(struct dsa_port *dp) 1715 { 1716 struct list_head hw_port_list; 1717 struct dsa_switch_tree *dst; 1718 int min_mtu = ETH_MAX_MTU; 1719 struct dsa_port *other_dp; 1720 int err; 1721 1722 if (!dp->ds->mtu_enforcement_ingress) 1723 return; 1724 1725 if (!dp->bridge) 1726 return; 1727 1728 INIT_LIST_HEAD(&hw_port_list); 1729 1730 /* Populate the list of ports that are part of the same bridge 1731 * as the newly added/modified port 1732 */ 1733 list_for_each_entry(dst, &dsa_tree_list, list) { 1734 list_for_each_entry(other_dp, &dst->ports, list) { 1735 struct dsa_hw_port *hw_port; 1736 struct net_device *slave; 1737 1738 if (other_dp->type != DSA_PORT_TYPE_USER) 1739 continue; 1740 1741 if (!dsa_port_bridge_same(dp, other_dp)) 1742 continue; 1743 1744 if (!other_dp->ds->mtu_enforcement_ingress) 1745 continue; 1746 1747 slave = other_dp->slave; 1748 1749 if (min_mtu > slave->mtu) 1750 min_mtu = slave->mtu; 1751 1752 hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL); 1753 if (!hw_port) 1754 goto out; 1755 1756 hw_port->dev = slave; 1757 hw_port->old_mtu = slave->mtu; 1758 1759 list_add(&hw_port->list, &hw_port_list); 1760 } 1761 } 1762 1763 /* Attempt to configure the entire hardware bridge to the newly added 1764 * interface's MTU first, regardless of whether the intention of the 1765 * user was to raise or lower it. 1766 */ 1767 err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu); 1768 if (!err) 1769 goto out; 1770 1771 /* Clearly that didn't work out so well, so just set the minimum MTU on 1772 * all hardware bridge ports now. If this fails too, then all ports will 1773 * still have their old MTU rolled back anyway. 1774 */ 1775 dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu); 1776 1777 out: 1778 dsa_hw_port_list_free(&hw_port_list); 1779 } 1780 1781 int dsa_slave_change_mtu(struct net_device *dev, int new_mtu) 1782 { 1783 struct net_device *master = dsa_slave_to_master(dev); 1784 struct dsa_port *dp = dsa_slave_to_port(dev); 1785 struct dsa_port *cpu_dp = dp->cpu_dp; 1786 struct dsa_switch *ds = dp->ds; 1787 struct dsa_port *other_dp; 1788 int largest_mtu = 0; 1789 int new_master_mtu; 1790 int old_master_mtu; 1791 int mtu_limit; 1792 int cpu_mtu; 1793 int err; 1794 1795 if (!ds->ops->port_change_mtu) 1796 return -EOPNOTSUPP; 1797 1798 dsa_tree_for_each_user_port(other_dp, ds->dst) { 1799 int slave_mtu; 1800 1801 /* During probe, this function will be called for each slave 1802 * device, while not all of them have been allocated. That's 1803 * ok, it doesn't change what the maximum is, so ignore it. 1804 */ 1805 if (!other_dp->slave) 1806 continue; 1807 1808 /* Pretend that we already applied the setting, which we 1809 * actually haven't (still haven't done all integrity checks) 1810 */ 1811 if (dp == other_dp) 1812 slave_mtu = new_mtu; 1813 else 1814 slave_mtu = other_dp->slave->mtu; 1815 1816 if (largest_mtu < slave_mtu) 1817 largest_mtu = slave_mtu; 1818 } 1819 1820 mtu_limit = min_t(int, master->max_mtu, dev->max_mtu); 1821 old_master_mtu = master->mtu; 1822 new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops); 1823 if (new_master_mtu > mtu_limit) 1824 return -ERANGE; 1825 1826 /* If the master MTU isn't over limit, there's no need to check the CPU 1827 * MTU, since that surely isn't either. 1828 */ 1829 cpu_mtu = largest_mtu; 1830 1831 /* Start applying stuff */ 1832 if (new_master_mtu != old_master_mtu) { 1833 err = dev_set_mtu(master, new_master_mtu); 1834 if (err < 0) 1835 goto out_master_failed; 1836 1837 /* We only need to propagate the MTU of the CPU port to 1838 * upstream switches, so emit a notifier which updates them. 1839 */ 1840 err = dsa_port_mtu_change(cpu_dp, cpu_mtu); 1841 if (err) 1842 goto out_cpu_failed; 1843 } 1844 1845 err = ds->ops->port_change_mtu(ds, dp->index, new_mtu); 1846 if (err) 1847 goto out_port_failed; 1848 1849 dev->mtu = new_mtu; 1850 1851 dsa_bridge_mtu_normalization(dp); 1852 1853 return 0; 1854 1855 out_port_failed: 1856 if (new_master_mtu != old_master_mtu) 1857 dsa_port_mtu_change(cpu_dp, old_master_mtu - 1858 dsa_tag_protocol_overhead(cpu_dp->tag_ops)); 1859 out_cpu_failed: 1860 if (new_master_mtu != old_master_mtu) 1861 dev_set_mtu(master, old_master_mtu); 1862 out_master_failed: 1863 return err; 1864 } 1865 1866 static int __maybe_unused 1867 dsa_slave_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app) 1868 { 1869 struct dsa_port *dp = dsa_slave_to_port(dev); 1870 struct dsa_switch *ds = dp->ds; 1871 unsigned long mask, new_prio; 1872 int err, port = dp->index; 1873 1874 if (!ds->ops->port_set_default_prio) 1875 return -EOPNOTSUPP; 1876 1877 err = dcb_ieee_setapp(dev, app); 1878 if (err) 1879 return err; 1880 1881 mask = dcb_ieee_getapp_mask(dev, app); 1882 new_prio = __fls(mask); 1883 1884 err = ds->ops->port_set_default_prio(ds, port, new_prio); 1885 if (err) { 1886 dcb_ieee_delapp(dev, app); 1887 return err; 1888 } 1889 1890 return 0; 1891 } 1892 1893 static int __maybe_unused 1894 dsa_slave_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app) 1895 { 1896 struct dsa_port *dp = dsa_slave_to_port(dev); 1897 struct dsa_switch *ds = dp->ds; 1898 unsigned long mask, new_prio; 1899 int err, port = dp->index; 1900 u8 dscp = app->protocol; 1901 1902 if (!ds->ops->port_add_dscp_prio) 1903 return -EOPNOTSUPP; 1904 1905 if (dscp >= 64) { 1906 netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n", 1907 dscp); 1908 return -EINVAL; 1909 } 1910 1911 err = dcb_ieee_setapp(dev, app); 1912 if (err) 1913 return err; 1914 1915 mask = dcb_ieee_getapp_mask(dev, app); 1916 new_prio = __fls(mask); 1917 1918 err = ds->ops->port_add_dscp_prio(ds, port, dscp, new_prio); 1919 if (err) { 1920 dcb_ieee_delapp(dev, app); 1921 return err; 1922 } 1923 1924 return 0; 1925 } 1926 1927 static int __maybe_unused dsa_slave_dcbnl_ieee_setapp(struct net_device *dev, 1928 struct dcb_app *app) 1929 { 1930 switch (app->selector) { 1931 case IEEE_8021QAZ_APP_SEL_ETHERTYPE: 1932 switch (app->protocol) { 1933 case 0: 1934 return dsa_slave_dcbnl_set_default_prio(dev, app); 1935 default: 1936 return -EOPNOTSUPP; 1937 } 1938 break; 1939 case IEEE_8021QAZ_APP_SEL_DSCP: 1940 return dsa_slave_dcbnl_add_dscp_prio(dev, app); 1941 default: 1942 return -EOPNOTSUPP; 1943 } 1944 } 1945 1946 static int __maybe_unused 1947 dsa_slave_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app) 1948 { 1949 struct dsa_port *dp = dsa_slave_to_port(dev); 1950 struct dsa_switch *ds = dp->ds; 1951 unsigned long mask, new_prio; 1952 int err, port = dp->index; 1953 1954 if (!ds->ops->port_set_default_prio) 1955 return -EOPNOTSUPP; 1956 1957 err = dcb_ieee_delapp(dev, app); 1958 if (err) 1959 return err; 1960 1961 mask = dcb_ieee_getapp_mask(dev, app); 1962 new_prio = mask ? __fls(mask) : 0; 1963 1964 err = ds->ops->port_set_default_prio(ds, port, new_prio); 1965 if (err) { 1966 dcb_ieee_setapp(dev, app); 1967 return err; 1968 } 1969 1970 return 0; 1971 } 1972 1973 static int __maybe_unused 1974 dsa_slave_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app) 1975 { 1976 struct dsa_port *dp = dsa_slave_to_port(dev); 1977 struct dsa_switch *ds = dp->ds; 1978 int err, port = dp->index; 1979 u8 dscp = app->protocol; 1980 1981 if (!ds->ops->port_del_dscp_prio) 1982 return -EOPNOTSUPP; 1983 1984 err = dcb_ieee_delapp(dev, app); 1985 if (err) 1986 return err; 1987 1988 err = ds->ops->port_del_dscp_prio(ds, port, dscp, app->priority); 1989 if (err) { 1990 dcb_ieee_setapp(dev, app); 1991 return err; 1992 } 1993 1994 return 0; 1995 } 1996 1997 static int __maybe_unused dsa_slave_dcbnl_ieee_delapp(struct net_device *dev, 1998 struct dcb_app *app) 1999 { 2000 switch (app->selector) { 2001 case IEEE_8021QAZ_APP_SEL_ETHERTYPE: 2002 switch (app->protocol) { 2003 case 0: 2004 return dsa_slave_dcbnl_del_default_prio(dev, app); 2005 default: 2006 return -EOPNOTSUPP; 2007 } 2008 break; 2009 case IEEE_8021QAZ_APP_SEL_DSCP: 2010 return dsa_slave_dcbnl_del_dscp_prio(dev, app); 2011 default: 2012 return -EOPNOTSUPP; 2013 } 2014 } 2015 2016 /* Pre-populate the DCB application priority table with the priorities 2017 * configured during switch setup, which we read from hardware here. 2018 */ 2019 static int dsa_slave_dcbnl_init(struct net_device *dev) 2020 { 2021 struct dsa_port *dp = dsa_slave_to_port(dev); 2022 struct dsa_switch *ds = dp->ds; 2023 int port = dp->index; 2024 int err; 2025 2026 if (ds->ops->port_get_default_prio) { 2027 int prio = ds->ops->port_get_default_prio(ds, port); 2028 struct dcb_app app = { 2029 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE, 2030 .protocol = 0, 2031 .priority = prio, 2032 }; 2033 2034 if (prio < 0) 2035 return prio; 2036 2037 err = dcb_ieee_setapp(dev, &app); 2038 if (err) 2039 return err; 2040 } 2041 2042 if (ds->ops->port_get_dscp_prio) { 2043 int protocol; 2044 2045 for (protocol = 0; protocol < 64; protocol++) { 2046 struct dcb_app app = { 2047 .selector = IEEE_8021QAZ_APP_SEL_DSCP, 2048 .protocol = protocol, 2049 }; 2050 int prio; 2051 2052 prio = ds->ops->port_get_dscp_prio(ds, port, protocol); 2053 if (prio == -EOPNOTSUPP) 2054 continue; 2055 if (prio < 0) 2056 return prio; 2057 2058 app.priority = prio; 2059 2060 err = dcb_ieee_setapp(dev, &app); 2061 if (err) 2062 return err; 2063 } 2064 } 2065 2066 return 0; 2067 } 2068 2069 static const struct ethtool_ops dsa_slave_ethtool_ops = { 2070 .get_drvinfo = dsa_slave_get_drvinfo, 2071 .get_regs_len = dsa_slave_get_regs_len, 2072 .get_regs = dsa_slave_get_regs, 2073 .nway_reset = dsa_slave_nway_reset, 2074 .get_link = ethtool_op_get_link, 2075 .get_eeprom_len = dsa_slave_get_eeprom_len, 2076 .get_eeprom = dsa_slave_get_eeprom, 2077 .set_eeprom = dsa_slave_set_eeprom, 2078 .get_strings = dsa_slave_get_strings, 2079 .get_ethtool_stats = dsa_slave_get_ethtool_stats, 2080 .get_sset_count = dsa_slave_get_sset_count, 2081 .get_eth_phy_stats = dsa_slave_get_eth_phy_stats, 2082 .get_eth_mac_stats = dsa_slave_get_eth_mac_stats, 2083 .get_eth_ctrl_stats = dsa_slave_get_eth_ctrl_stats, 2084 .set_wol = dsa_slave_set_wol, 2085 .get_wol = dsa_slave_get_wol, 2086 .set_eee = dsa_slave_set_eee, 2087 .get_eee = dsa_slave_get_eee, 2088 .get_link_ksettings = dsa_slave_get_link_ksettings, 2089 .set_link_ksettings = dsa_slave_set_link_ksettings, 2090 .get_pauseparam = dsa_slave_get_pauseparam, 2091 .set_pauseparam = dsa_slave_set_pauseparam, 2092 .get_rxnfc = dsa_slave_get_rxnfc, 2093 .set_rxnfc = dsa_slave_set_rxnfc, 2094 .get_ts_info = dsa_slave_get_ts_info, 2095 .self_test = dsa_slave_net_selftest, 2096 }; 2097 2098 static const struct dcbnl_rtnl_ops __maybe_unused dsa_slave_dcbnl_ops = { 2099 .ieee_setapp = dsa_slave_dcbnl_ieee_setapp, 2100 .ieee_delapp = dsa_slave_dcbnl_ieee_delapp, 2101 }; 2102 2103 static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev) 2104 { 2105 struct dsa_port *dp = dsa_slave_to_port(dev); 2106 2107 return &dp->devlink_port; 2108 } 2109 2110 static void dsa_slave_get_stats64(struct net_device *dev, 2111 struct rtnl_link_stats64 *s) 2112 { 2113 struct dsa_port *dp = dsa_slave_to_port(dev); 2114 struct dsa_switch *ds = dp->ds; 2115 2116 if (ds->ops->get_stats64) 2117 ds->ops->get_stats64(ds, dp->index, s); 2118 else 2119 dev_get_tstats64(dev, s); 2120 } 2121 2122 static int dsa_slave_fill_forward_path(struct net_device_path_ctx *ctx, 2123 struct net_device_path *path) 2124 { 2125 struct dsa_port *dp = dsa_slave_to_port(ctx->dev); 2126 struct dsa_port *cpu_dp = dp->cpu_dp; 2127 2128 path->dev = ctx->dev; 2129 path->type = DEV_PATH_DSA; 2130 path->dsa.proto = cpu_dp->tag_ops->proto; 2131 path->dsa.port = dp->index; 2132 ctx->dev = cpu_dp->master; 2133 2134 return 0; 2135 } 2136 2137 static const struct net_device_ops dsa_slave_netdev_ops = { 2138 .ndo_open = dsa_slave_open, 2139 .ndo_stop = dsa_slave_close, 2140 .ndo_start_xmit = dsa_slave_xmit, 2141 .ndo_change_rx_flags = dsa_slave_change_rx_flags, 2142 .ndo_set_rx_mode = dsa_slave_set_rx_mode, 2143 .ndo_set_mac_address = dsa_slave_set_mac_address, 2144 .ndo_fdb_dump = dsa_slave_fdb_dump, 2145 .ndo_eth_ioctl = dsa_slave_ioctl, 2146 .ndo_get_iflink = dsa_slave_get_iflink, 2147 #ifdef CONFIG_NET_POLL_CONTROLLER 2148 .ndo_netpoll_setup = dsa_slave_netpoll_setup, 2149 .ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup, 2150 .ndo_poll_controller = dsa_slave_poll_controller, 2151 #endif 2152 .ndo_setup_tc = dsa_slave_setup_tc, 2153 .ndo_get_stats64 = dsa_slave_get_stats64, 2154 .ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid, 2155 .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid, 2156 .ndo_get_devlink_port = dsa_slave_get_devlink_port, 2157 .ndo_change_mtu = dsa_slave_change_mtu, 2158 .ndo_fill_forward_path = dsa_slave_fill_forward_path, 2159 }; 2160 2161 static struct device_type dsa_type = { 2162 .name = "dsa", 2163 }; 2164 2165 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up) 2166 { 2167 const struct dsa_port *dp = dsa_to_port(ds, port); 2168 2169 if (dp->pl) 2170 phylink_mac_change(dp->pl, up); 2171 } 2172 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change); 2173 2174 static void dsa_slave_phylink_fixed_state(struct phylink_config *config, 2175 struct phylink_link_state *state) 2176 { 2177 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); 2178 struct dsa_switch *ds = dp->ds; 2179 2180 /* No need to check that this operation is valid, the callback would 2181 * not be called if it was not. 2182 */ 2183 ds->ops->phylink_fixed_state(ds, dp->index, state); 2184 } 2185 2186 /* slave device setup *******************************************************/ 2187 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr, 2188 u32 flags) 2189 { 2190 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 2191 struct dsa_switch *ds = dp->ds; 2192 2193 slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr); 2194 if (!slave_dev->phydev) { 2195 netdev_err(slave_dev, "no phy at %d\n", addr); 2196 return -ENODEV; 2197 } 2198 2199 slave_dev->phydev->dev_flags |= flags; 2200 2201 return phylink_connect_phy(dp->pl, slave_dev->phydev); 2202 } 2203 2204 static int dsa_slave_phy_setup(struct net_device *slave_dev) 2205 { 2206 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 2207 struct device_node *port_dn = dp->dn; 2208 struct dsa_switch *ds = dp->ds; 2209 u32 phy_flags = 0; 2210 int ret; 2211 2212 dp->pl_config.dev = &slave_dev->dev; 2213 dp->pl_config.type = PHYLINK_NETDEV; 2214 2215 /* The get_fixed_state callback takes precedence over polling the 2216 * link GPIO in PHYLINK (see phylink_get_fixed_state). Only set 2217 * this if the switch provides such a callback. 2218 */ 2219 if (ds->ops->phylink_fixed_state) { 2220 dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state; 2221 dp->pl_config.poll_fixed_state = true; 2222 } 2223 2224 ret = dsa_port_phylink_create(dp); 2225 if (ret) 2226 return ret; 2227 2228 if (ds->ops->get_phy_flags) 2229 phy_flags = ds->ops->get_phy_flags(ds, dp->index); 2230 2231 ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags); 2232 if (ret == -ENODEV && ds->slave_mii_bus) { 2233 /* We could not connect to a designated PHY or SFP, so try to 2234 * use the switch internal MDIO bus instead 2235 */ 2236 ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags); 2237 } 2238 if (ret) { 2239 netdev_err(slave_dev, "failed to connect to PHY: %pe\n", 2240 ERR_PTR(ret)); 2241 phylink_destroy(dp->pl); 2242 } 2243 2244 return ret; 2245 } 2246 2247 void dsa_slave_setup_tagger(struct net_device *slave) 2248 { 2249 struct dsa_port *dp = dsa_slave_to_port(slave); 2250 struct dsa_slave_priv *p = netdev_priv(slave); 2251 const struct dsa_port *cpu_dp = dp->cpu_dp; 2252 struct net_device *master = cpu_dp->master; 2253 const struct dsa_switch *ds = dp->ds; 2254 2255 slave->needed_headroom = cpu_dp->tag_ops->needed_headroom; 2256 slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom; 2257 /* Try to save one extra realloc later in the TX path (in the master) 2258 * by also inheriting the master's needed headroom and tailroom. 2259 * The 8021q driver also does this. 2260 */ 2261 slave->needed_headroom += master->needed_headroom; 2262 slave->needed_tailroom += master->needed_tailroom; 2263 2264 p->xmit = cpu_dp->tag_ops->xmit; 2265 2266 slave->features = master->vlan_features | NETIF_F_HW_TC; 2267 slave->hw_features |= NETIF_F_HW_TC; 2268 slave->features |= NETIF_F_LLTX; 2269 if (slave->needed_tailroom) 2270 slave->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST); 2271 if (ds->needs_standalone_vlan_filtering) 2272 slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 2273 } 2274 2275 int dsa_slave_suspend(struct net_device *slave_dev) 2276 { 2277 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 2278 2279 if (!netif_running(slave_dev)) 2280 return 0; 2281 2282 netif_device_detach(slave_dev); 2283 2284 rtnl_lock(); 2285 phylink_stop(dp->pl); 2286 rtnl_unlock(); 2287 2288 return 0; 2289 } 2290 2291 int dsa_slave_resume(struct net_device *slave_dev) 2292 { 2293 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 2294 2295 if (!netif_running(slave_dev)) 2296 return 0; 2297 2298 netif_device_attach(slave_dev); 2299 2300 rtnl_lock(); 2301 phylink_start(dp->pl); 2302 rtnl_unlock(); 2303 2304 return 0; 2305 } 2306 2307 int dsa_slave_create(struct dsa_port *port) 2308 { 2309 const struct dsa_port *cpu_dp = port->cpu_dp; 2310 struct net_device *master = cpu_dp->master; 2311 struct dsa_switch *ds = port->ds; 2312 const char *name = port->name; 2313 struct net_device *slave_dev; 2314 struct dsa_slave_priv *p; 2315 int ret; 2316 2317 if (!ds->num_tx_queues) 2318 ds->num_tx_queues = 1; 2319 2320 slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name, 2321 NET_NAME_UNKNOWN, ether_setup, 2322 ds->num_tx_queues, 1); 2323 if (slave_dev == NULL) 2324 return -ENOMEM; 2325 2326 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; 2327 #if IS_ENABLED(CONFIG_DCB) 2328 slave_dev->dcbnl_ops = &dsa_slave_dcbnl_ops; 2329 #endif 2330 if (!is_zero_ether_addr(port->mac)) 2331 eth_hw_addr_set(slave_dev, port->mac); 2332 else 2333 eth_hw_addr_inherit(slave_dev, master); 2334 slave_dev->priv_flags |= IFF_NO_QUEUE; 2335 if (dsa_switch_supports_uc_filtering(ds)) 2336 slave_dev->priv_flags |= IFF_UNICAST_FLT; 2337 slave_dev->netdev_ops = &dsa_slave_netdev_ops; 2338 if (ds->ops->port_max_mtu) 2339 slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index); 2340 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type); 2341 2342 SET_NETDEV_DEV(slave_dev, port->ds->dev); 2343 slave_dev->dev.of_node = port->dn; 2344 slave_dev->vlan_features = master->vlan_features; 2345 2346 p = netdev_priv(slave_dev); 2347 slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 2348 if (!slave_dev->tstats) { 2349 free_netdev(slave_dev); 2350 return -ENOMEM; 2351 } 2352 2353 ret = gro_cells_init(&p->gcells, slave_dev); 2354 if (ret) 2355 goto out_free; 2356 2357 p->dp = port; 2358 INIT_LIST_HEAD(&p->mall_tc_list); 2359 port->slave = slave_dev; 2360 dsa_slave_setup_tagger(slave_dev); 2361 2362 netif_carrier_off(slave_dev); 2363 2364 ret = dsa_slave_phy_setup(slave_dev); 2365 if (ret) { 2366 netdev_err(slave_dev, 2367 "error %d setting up PHY for tree %d, switch %d, port %d\n", 2368 ret, ds->dst->index, ds->index, port->index); 2369 goto out_gcells; 2370 } 2371 2372 rtnl_lock(); 2373 2374 ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN); 2375 if (ret && ret != -EOPNOTSUPP) 2376 dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n", 2377 ret, ETH_DATA_LEN, port->index); 2378 2379 ret = register_netdevice(slave_dev); 2380 if (ret) { 2381 netdev_err(master, "error %d registering interface %s\n", 2382 ret, slave_dev->name); 2383 rtnl_unlock(); 2384 goto out_phy; 2385 } 2386 2387 if (IS_ENABLED(CONFIG_DCB)) { 2388 ret = dsa_slave_dcbnl_init(slave_dev); 2389 if (ret) { 2390 netdev_err(slave_dev, 2391 "failed to initialize DCB: %pe\n", 2392 ERR_PTR(ret)); 2393 rtnl_unlock(); 2394 goto out_unregister; 2395 } 2396 } 2397 2398 ret = netdev_upper_dev_link(master, slave_dev, NULL); 2399 2400 rtnl_unlock(); 2401 2402 if (ret) 2403 goto out_unregister; 2404 2405 return 0; 2406 2407 out_unregister: 2408 unregister_netdev(slave_dev); 2409 out_phy: 2410 rtnl_lock(); 2411 phylink_disconnect_phy(p->dp->pl); 2412 rtnl_unlock(); 2413 phylink_destroy(p->dp->pl); 2414 out_gcells: 2415 gro_cells_destroy(&p->gcells); 2416 out_free: 2417 free_percpu(slave_dev->tstats); 2418 free_netdev(slave_dev); 2419 port->slave = NULL; 2420 return ret; 2421 } 2422 2423 void dsa_slave_destroy(struct net_device *slave_dev) 2424 { 2425 struct net_device *master = dsa_slave_to_master(slave_dev); 2426 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 2427 struct dsa_slave_priv *p = netdev_priv(slave_dev); 2428 2429 netif_carrier_off(slave_dev); 2430 rtnl_lock(); 2431 netdev_upper_dev_unlink(master, slave_dev); 2432 unregister_netdevice(slave_dev); 2433 phylink_disconnect_phy(dp->pl); 2434 rtnl_unlock(); 2435 2436 phylink_destroy(dp->pl); 2437 gro_cells_destroy(&p->gcells); 2438 free_percpu(slave_dev->tstats); 2439 free_netdev(slave_dev); 2440 } 2441 2442 bool dsa_slave_dev_check(const struct net_device *dev) 2443 { 2444 return dev->netdev_ops == &dsa_slave_netdev_ops; 2445 } 2446 EXPORT_SYMBOL_GPL(dsa_slave_dev_check); 2447 2448 static int dsa_slave_changeupper(struct net_device *dev, 2449 struct netdev_notifier_changeupper_info *info) 2450 { 2451 struct dsa_port *dp = dsa_slave_to_port(dev); 2452 struct netlink_ext_ack *extack; 2453 int err = NOTIFY_DONE; 2454 2455 extack = netdev_notifier_info_to_extack(&info->info); 2456 2457 if (netif_is_bridge_master(info->upper_dev)) { 2458 if (info->linking) { 2459 err = dsa_port_bridge_join(dp, info->upper_dev, extack); 2460 if (!err) 2461 dsa_bridge_mtu_normalization(dp); 2462 if (err == -EOPNOTSUPP) { 2463 NL_SET_ERR_MSG_MOD(extack, 2464 "Offloading not supported"); 2465 err = 0; 2466 } 2467 err = notifier_from_errno(err); 2468 } else { 2469 dsa_port_bridge_leave(dp, info->upper_dev); 2470 err = NOTIFY_OK; 2471 } 2472 } else if (netif_is_lag_master(info->upper_dev)) { 2473 if (info->linking) { 2474 err = dsa_port_lag_join(dp, info->upper_dev, 2475 info->upper_info, extack); 2476 if (err == -EOPNOTSUPP) { 2477 NL_SET_ERR_MSG_MOD(info->info.extack, 2478 "Offloading not supported"); 2479 err = 0; 2480 } 2481 err = notifier_from_errno(err); 2482 } else { 2483 dsa_port_lag_leave(dp, info->upper_dev); 2484 err = NOTIFY_OK; 2485 } 2486 } else if (is_hsr_master(info->upper_dev)) { 2487 if (info->linking) { 2488 err = dsa_port_hsr_join(dp, info->upper_dev); 2489 if (err == -EOPNOTSUPP) { 2490 NL_SET_ERR_MSG_MOD(info->info.extack, 2491 "Offloading not supported"); 2492 err = 0; 2493 } 2494 err = notifier_from_errno(err); 2495 } else { 2496 dsa_port_hsr_leave(dp, info->upper_dev); 2497 err = NOTIFY_OK; 2498 } 2499 } 2500 2501 return err; 2502 } 2503 2504 static int dsa_slave_prechangeupper(struct net_device *dev, 2505 struct netdev_notifier_changeupper_info *info) 2506 { 2507 struct dsa_port *dp = dsa_slave_to_port(dev); 2508 2509 if (netif_is_bridge_master(info->upper_dev) && !info->linking) 2510 dsa_port_pre_bridge_leave(dp, info->upper_dev); 2511 else if (netif_is_lag_master(info->upper_dev) && !info->linking) 2512 dsa_port_pre_lag_leave(dp, info->upper_dev); 2513 /* dsa_port_pre_hsr_leave is not yet necessary since hsr cannot be 2514 * meaningfully enslaved to a bridge yet 2515 */ 2516 2517 return NOTIFY_DONE; 2518 } 2519 2520 static int 2521 dsa_slave_lag_changeupper(struct net_device *dev, 2522 struct netdev_notifier_changeupper_info *info) 2523 { 2524 struct net_device *lower; 2525 struct list_head *iter; 2526 int err = NOTIFY_DONE; 2527 struct dsa_port *dp; 2528 2529 netdev_for_each_lower_dev(dev, lower, iter) { 2530 if (!dsa_slave_dev_check(lower)) 2531 continue; 2532 2533 dp = dsa_slave_to_port(lower); 2534 if (!dp->lag) 2535 /* Software LAG */ 2536 continue; 2537 2538 err = dsa_slave_changeupper(lower, info); 2539 if (notifier_to_errno(err)) 2540 break; 2541 } 2542 2543 return err; 2544 } 2545 2546 /* Same as dsa_slave_lag_changeupper() except that it calls 2547 * dsa_slave_prechangeupper() 2548 */ 2549 static int 2550 dsa_slave_lag_prechangeupper(struct net_device *dev, 2551 struct netdev_notifier_changeupper_info *info) 2552 { 2553 struct net_device *lower; 2554 struct list_head *iter; 2555 int err = NOTIFY_DONE; 2556 struct dsa_port *dp; 2557 2558 netdev_for_each_lower_dev(dev, lower, iter) { 2559 if (!dsa_slave_dev_check(lower)) 2560 continue; 2561 2562 dp = dsa_slave_to_port(lower); 2563 if (!dp->lag) 2564 /* Software LAG */ 2565 continue; 2566 2567 err = dsa_slave_prechangeupper(lower, info); 2568 if (notifier_to_errno(err)) 2569 break; 2570 } 2571 2572 return err; 2573 } 2574 2575 static int 2576 dsa_prevent_bridging_8021q_upper(struct net_device *dev, 2577 struct netdev_notifier_changeupper_info *info) 2578 { 2579 struct netlink_ext_ack *ext_ack; 2580 struct net_device *slave, *br; 2581 struct dsa_port *dp; 2582 2583 ext_ack = netdev_notifier_info_to_extack(&info->info); 2584 2585 if (!is_vlan_dev(dev)) 2586 return NOTIFY_DONE; 2587 2588 slave = vlan_dev_real_dev(dev); 2589 if (!dsa_slave_dev_check(slave)) 2590 return NOTIFY_DONE; 2591 2592 dp = dsa_slave_to_port(slave); 2593 br = dsa_port_bridge_dev_get(dp); 2594 if (!br) 2595 return NOTIFY_DONE; 2596 2597 /* Deny enslaving a VLAN device into a VLAN-aware bridge */ 2598 if (br_vlan_enabled(br) && 2599 netif_is_bridge_master(info->upper_dev) && info->linking) { 2600 NL_SET_ERR_MSG_MOD(ext_ack, 2601 "Cannot enslave VLAN device into VLAN aware bridge"); 2602 return notifier_from_errno(-EINVAL); 2603 } 2604 2605 return NOTIFY_DONE; 2606 } 2607 2608 static int 2609 dsa_slave_check_8021q_upper(struct net_device *dev, 2610 struct netdev_notifier_changeupper_info *info) 2611 { 2612 struct dsa_port *dp = dsa_slave_to_port(dev); 2613 struct net_device *br = dsa_port_bridge_dev_get(dp); 2614 struct bridge_vlan_info br_info; 2615 struct netlink_ext_ack *extack; 2616 int err = NOTIFY_DONE; 2617 u16 vid; 2618 2619 if (!br || !br_vlan_enabled(br)) 2620 return NOTIFY_DONE; 2621 2622 extack = netdev_notifier_info_to_extack(&info->info); 2623 vid = vlan_dev_vlan_id(info->upper_dev); 2624 2625 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the 2626 * device, respectively the VID is not found, returning 2627 * 0 means success, which is a failure for us here. 2628 */ 2629 err = br_vlan_get_info(br, vid, &br_info); 2630 if (err == 0) { 2631 NL_SET_ERR_MSG_MOD(extack, 2632 "This VLAN is already configured by the bridge"); 2633 return notifier_from_errno(-EBUSY); 2634 } 2635 2636 return NOTIFY_DONE; 2637 } 2638 2639 static int 2640 dsa_slave_prechangeupper_sanity_check(struct net_device *dev, 2641 struct netdev_notifier_changeupper_info *info) 2642 { 2643 struct dsa_switch *ds; 2644 struct dsa_port *dp; 2645 int err; 2646 2647 if (!dsa_slave_dev_check(dev)) 2648 return dsa_prevent_bridging_8021q_upper(dev, info); 2649 2650 dp = dsa_slave_to_port(dev); 2651 ds = dp->ds; 2652 2653 if (ds->ops->port_prechangeupper) { 2654 err = ds->ops->port_prechangeupper(ds, dp->index, info); 2655 if (err) 2656 return notifier_from_errno(err); 2657 } 2658 2659 if (is_vlan_dev(info->upper_dev)) 2660 return dsa_slave_check_8021q_upper(dev, info); 2661 2662 return NOTIFY_DONE; 2663 } 2664 2665 static int dsa_slave_netdevice_event(struct notifier_block *nb, 2666 unsigned long event, void *ptr) 2667 { 2668 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 2669 2670 switch (event) { 2671 case NETDEV_PRECHANGEUPPER: { 2672 struct netdev_notifier_changeupper_info *info = ptr; 2673 int err; 2674 2675 err = dsa_slave_prechangeupper_sanity_check(dev, info); 2676 if (err != NOTIFY_DONE) 2677 return err; 2678 2679 if (dsa_slave_dev_check(dev)) 2680 return dsa_slave_prechangeupper(dev, ptr); 2681 2682 if (netif_is_lag_master(dev)) 2683 return dsa_slave_lag_prechangeupper(dev, ptr); 2684 2685 break; 2686 } 2687 case NETDEV_CHANGEUPPER: 2688 if (dsa_slave_dev_check(dev)) 2689 return dsa_slave_changeupper(dev, ptr); 2690 2691 if (netif_is_lag_master(dev)) 2692 return dsa_slave_lag_changeupper(dev, ptr); 2693 2694 break; 2695 case NETDEV_CHANGELOWERSTATE: { 2696 struct netdev_notifier_changelowerstate_info *info = ptr; 2697 struct dsa_port *dp; 2698 int err; 2699 2700 if (!dsa_slave_dev_check(dev)) 2701 break; 2702 2703 dp = dsa_slave_to_port(dev); 2704 2705 err = dsa_port_lag_change(dp, info->lower_state_info); 2706 return notifier_from_errno(err); 2707 } 2708 case NETDEV_CHANGE: 2709 case NETDEV_UP: { 2710 /* Track state of master port. 2711 * DSA driver may require the master port (and indirectly 2712 * the tagger) to be available for some special operation. 2713 */ 2714 if (netdev_uses_dsa(dev)) { 2715 struct dsa_port *cpu_dp = dev->dsa_ptr; 2716 struct dsa_switch_tree *dst = cpu_dp->ds->dst; 2717 2718 /* Track when the master port is UP */ 2719 dsa_tree_master_oper_state_change(dst, dev, 2720 netif_oper_up(dev)); 2721 2722 /* Track when the master port is ready and can accept 2723 * packet. 2724 * NETDEV_UP event is not enough to flag a port as ready. 2725 * We also have to wait for linkwatch_do_dev to dev_activate 2726 * and emit a NETDEV_CHANGE event. 2727 * We check if a master port is ready by checking if the dev 2728 * have a qdisc assigned and is not noop. 2729 */ 2730 dsa_tree_master_admin_state_change(dst, dev, 2731 !qdisc_tx_is_noop(dev)); 2732 2733 return NOTIFY_OK; 2734 } 2735 2736 return NOTIFY_DONE; 2737 } 2738 case NETDEV_GOING_DOWN: { 2739 struct dsa_port *dp, *cpu_dp; 2740 struct dsa_switch_tree *dst; 2741 LIST_HEAD(close_list); 2742 2743 if (!netdev_uses_dsa(dev)) 2744 return NOTIFY_DONE; 2745 2746 cpu_dp = dev->dsa_ptr; 2747 dst = cpu_dp->ds->dst; 2748 2749 dsa_tree_master_admin_state_change(dst, dev, false); 2750 2751 list_for_each_entry(dp, &dst->ports, list) { 2752 if (!dsa_port_is_user(dp)) 2753 continue; 2754 2755 list_add(&dp->slave->close_list, &close_list); 2756 } 2757 2758 dev_close_many(&close_list, true); 2759 2760 return NOTIFY_OK; 2761 } 2762 default: 2763 break; 2764 } 2765 2766 return NOTIFY_DONE; 2767 } 2768 2769 static void 2770 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work) 2771 { 2772 struct switchdev_notifier_fdb_info info = {}; 2773 2774 info.addr = switchdev_work->addr; 2775 info.vid = switchdev_work->vid; 2776 info.offloaded = true; 2777 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, 2778 switchdev_work->orig_dev, &info.info, NULL); 2779 } 2780 2781 static void dsa_slave_switchdev_event_work(struct work_struct *work) 2782 { 2783 struct dsa_switchdev_event_work *switchdev_work = 2784 container_of(work, struct dsa_switchdev_event_work, work); 2785 const unsigned char *addr = switchdev_work->addr; 2786 struct net_device *dev = switchdev_work->dev; 2787 u16 vid = switchdev_work->vid; 2788 struct dsa_switch *ds; 2789 struct dsa_port *dp; 2790 int err; 2791 2792 dp = dsa_slave_to_port(dev); 2793 ds = dp->ds; 2794 2795 switch (switchdev_work->event) { 2796 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2797 if (switchdev_work->host_addr) 2798 err = dsa_port_bridge_host_fdb_add(dp, addr, vid); 2799 else if (dp->lag) 2800 err = dsa_port_lag_fdb_add(dp, addr, vid); 2801 else 2802 err = dsa_port_fdb_add(dp, addr, vid); 2803 if (err) { 2804 dev_err(ds->dev, 2805 "port %d failed to add %pM vid %d to fdb: %d\n", 2806 dp->index, addr, vid, err); 2807 break; 2808 } 2809 dsa_fdb_offload_notify(switchdev_work); 2810 break; 2811 2812 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2813 if (switchdev_work->host_addr) 2814 err = dsa_port_bridge_host_fdb_del(dp, addr, vid); 2815 else if (dp->lag) 2816 err = dsa_port_lag_fdb_del(dp, addr, vid); 2817 else 2818 err = dsa_port_fdb_del(dp, addr, vid); 2819 if (err) { 2820 dev_err(ds->dev, 2821 "port %d failed to delete %pM vid %d from fdb: %d\n", 2822 dp->index, addr, vid, err); 2823 } 2824 2825 break; 2826 } 2827 2828 kfree(switchdev_work); 2829 } 2830 2831 static bool dsa_foreign_dev_check(const struct net_device *dev, 2832 const struct net_device *foreign_dev) 2833 { 2834 const struct dsa_port *dp = dsa_slave_to_port(dev); 2835 struct dsa_switch_tree *dst = dp->ds->dst; 2836 2837 if (netif_is_bridge_master(foreign_dev)) 2838 return !dsa_tree_offloads_bridge_dev(dst, foreign_dev); 2839 2840 if (netif_is_bridge_port(foreign_dev)) 2841 return !dsa_tree_offloads_bridge_port(dst, foreign_dev); 2842 2843 /* Everything else is foreign */ 2844 return true; 2845 } 2846 2847 static int dsa_slave_fdb_event(struct net_device *dev, 2848 struct net_device *orig_dev, 2849 unsigned long event, const void *ctx, 2850 const struct switchdev_notifier_fdb_info *fdb_info) 2851 { 2852 struct dsa_switchdev_event_work *switchdev_work; 2853 struct dsa_port *dp = dsa_slave_to_port(dev); 2854 bool host_addr = fdb_info->is_local; 2855 struct dsa_switch *ds = dp->ds; 2856 2857 if (ctx && ctx != dp) 2858 return 0; 2859 2860 if (!dp->bridge) 2861 return 0; 2862 2863 if (switchdev_fdb_is_dynamically_learned(fdb_info)) { 2864 if (dsa_port_offloads_bridge_port(dp, orig_dev)) 2865 return 0; 2866 2867 /* FDB entries learned by the software bridge or by foreign 2868 * bridge ports should be installed as host addresses only if 2869 * the driver requests assisted learning. 2870 */ 2871 if (!ds->assisted_learning_on_cpu_port) 2872 return 0; 2873 } 2874 2875 /* Also treat FDB entries on foreign interfaces bridged with us as host 2876 * addresses. 2877 */ 2878 if (dsa_foreign_dev_check(dev, orig_dev)) 2879 host_addr = true; 2880 2881 /* Check early that we're not doing work in vain. 2882 * Host addresses on LAG ports still require regular FDB ops, 2883 * since the CPU port isn't in a LAG. 2884 */ 2885 if (dp->lag && !host_addr) { 2886 if (!ds->ops->lag_fdb_add || !ds->ops->lag_fdb_del) 2887 return -EOPNOTSUPP; 2888 } else { 2889 if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del) 2890 return -EOPNOTSUPP; 2891 } 2892 2893 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); 2894 if (!switchdev_work) 2895 return -ENOMEM; 2896 2897 netdev_dbg(dev, "%s FDB entry towards %s, addr %pM vid %d%s\n", 2898 event == SWITCHDEV_FDB_ADD_TO_DEVICE ? "Adding" : "Deleting", 2899 orig_dev->name, fdb_info->addr, fdb_info->vid, 2900 host_addr ? " as host address" : ""); 2901 2902 INIT_WORK(&switchdev_work->work, dsa_slave_switchdev_event_work); 2903 switchdev_work->event = event; 2904 switchdev_work->dev = dev; 2905 switchdev_work->orig_dev = orig_dev; 2906 2907 ether_addr_copy(switchdev_work->addr, fdb_info->addr); 2908 switchdev_work->vid = fdb_info->vid; 2909 switchdev_work->host_addr = host_addr; 2910 2911 dsa_schedule_work(&switchdev_work->work); 2912 2913 return 0; 2914 } 2915 2916 /* Called under rcu_read_lock() */ 2917 static int dsa_slave_switchdev_event(struct notifier_block *unused, 2918 unsigned long event, void *ptr) 2919 { 2920 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 2921 int err; 2922 2923 switch (event) { 2924 case SWITCHDEV_PORT_ATTR_SET: 2925 err = switchdev_handle_port_attr_set(dev, ptr, 2926 dsa_slave_dev_check, 2927 dsa_slave_port_attr_set); 2928 return notifier_from_errno(err); 2929 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2930 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2931 err = switchdev_handle_fdb_event_to_device(dev, event, ptr, 2932 dsa_slave_dev_check, 2933 dsa_foreign_dev_check, 2934 dsa_slave_fdb_event); 2935 return notifier_from_errno(err); 2936 default: 2937 return NOTIFY_DONE; 2938 } 2939 2940 return NOTIFY_OK; 2941 } 2942 2943 static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused, 2944 unsigned long event, void *ptr) 2945 { 2946 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 2947 int err; 2948 2949 switch (event) { 2950 case SWITCHDEV_PORT_OBJ_ADD: 2951 err = switchdev_handle_port_obj_add_foreign(dev, ptr, 2952 dsa_slave_dev_check, 2953 dsa_foreign_dev_check, 2954 dsa_slave_port_obj_add); 2955 return notifier_from_errno(err); 2956 case SWITCHDEV_PORT_OBJ_DEL: 2957 err = switchdev_handle_port_obj_del_foreign(dev, ptr, 2958 dsa_slave_dev_check, 2959 dsa_foreign_dev_check, 2960 dsa_slave_port_obj_del); 2961 return notifier_from_errno(err); 2962 case SWITCHDEV_PORT_ATTR_SET: 2963 err = switchdev_handle_port_attr_set(dev, ptr, 2964 dsa_slave_dev_check, 2965 dsa_slave_port_attr_set); 2966 return notifier_from_errno(err); 2967 } 2968 2969 return NOTIFY_DONE; 2970 } 2971 2972 static struct notifier_block dsa_slave_nb __read_mostly = { 2973 .notifier_call = dsa_slave_netdevice_event, 2974 }; 2975 2976 struct notifier_block dsa_slave_switchdev_notifier = { 2977 .notifier_call = dsa_slave_switchdev_event, 2978 }; 2979 2980 struct notifier_block dsa_slave_switchdev_blocking_notifier = { 2981 .notifier_call = dsa_slave_switchdev_blocking_event, 2982 }; 2983 2984 int dsa_slave_register_notifier(void) 2985 { 2986 struct notifier_block *nb; 2987 int err; 2988 2989 err = register_netdevice_notifier(&dsa_slave_nb); 2990 if (err) 2991 return err; 2992 2993 err = register_switchdev_notifier(&dsa_slave_switchdev_notifier); 2994 if (err) 2995 goto err_switchdev_nb; 2996 2997 nb = &dsa_slave_switchdev_blocking_notifier; 2998 err = register_switchdev_blocking_notifier(nb); 2999 if (err) 3000 goto err_switchdev_blocking_nb; 3001 3002 return 0; 3003 3004 err_switchdev_blocking_nb: 3005 unregister_switchdev_notifier(&dsa_slave_switchdev_notifier); 3006 err_switchdev_nb: 3007 unregister_netdevice_notifier(&dsa_slave_nb); 3008 return err; 3009 } 3010 3011 void dsa_slave_unregister_notifier(void) 3012 { 3013 struct notifier_block *nb; 3014 int err; 3015 3016 nb = &dsa_slave_switchdev_blocking_notifier; 3017 err = unregister_switchdev_blocking_notifier(nb); 3018 if (err) 3019 pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err); 3020 3021 err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier); 3022 if (err) 3023 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err); 3024 3025 err = unregister_netdevice_notifier(&dsa_slave_nb); 3026 if (err) 3027 pr_err("DSA: failed to unregister slave notifier (%d)\n", err); 3028 } 3029